From dd704470729630d185b1f16005dd96ee805da2e1 Mon Sep 17 00:00:00 2001 From: panqian099 <11823801+panqian099@user.noreply.gitee.com> Date: Sat, 22 Oct 2022 14:30:33 +0000 Subject: [PATCH 1/3] =?UTF-8?q?=E6=96=B0=E5=BB=BA=20=E6=BD=98=E5=80=A9-?= =?UTF-8?q?=E6=BD=98=E5=80=A9-=E5=9F=BA=E4=BA=8EWR-EL=E6=A8=A1=E5=9E=8B?= =?UTF-8?q?=E7=9A=84=E5=B0=8F=E9=BA=A6=E9=94=88=E7=97=85=E8=AF=86=E5=88=AB?= =?UTF-8?q?=E7=A0=94=E7=A9=B6?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../.keep" | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 "code/2022_autumn/\346\275\230\345\200\251-\346\275\230\345\200\251-\345\237\272\344\272\216WR-EL\346\250\241\345\236\213\347\232\204\345\260\217\351\272\246\351\224\210\347\227\205\350\257\206\345\210\253\347\240\224\347\251\266/.keep" diff --git "a/code/2022_autumn/\346\275\230\345\200\251-\346\275\230\345\200\251-\345\237\272\344\272\216WR-EL\346\250\241\345\236\213\347\232\204\345\260\217\351\272\246\351\224\210\347\227\205\350\257\206\345\210\253\347\240\224\347\251\266/.keep" "b/code/2022_autumn/\346\275\230\345\200\251-\346\275\230\345\200\251-\345\237\272\344\272\216WR-EL\346\250\241\345\236\213\347\232\204\345\260\217\351\272\246\351\224\210\347\227\205\350\257\206\345\210\253\347\240\224\347\251\266/.keep" new file mode 100644 index 0000000..e69de29 -- Gitee From 78d40953301b96b1a9e0b0f9a52a8eb7dea34c71 Mon Sep 17 00:00:00 2001 From: panqian099 <11823801+panqian099@user.noreply.gitee.com> Date: Sat, 22 Oct 2022 14:33:11 +0000 Subject: [PATCH 2/3] =?UTF-8?q?=E6=BD=98=E5=80=A9-=E5=9F=BA=E4=BA=8EWR-EL?= =?UTF-8?q?=E6=A8=A1=E5=9E=8B=E7=9A=84=E5=B0=8F=E9=BA=A6=E9=94=88=E7=97=85?= =?UTF-8?q?=E8=AF=86=E5=88=AB=E7=A0=94=E7=A9=B6?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: panqian099 <11823801+panqian099@user.noreply.gitee.com> --- ...206\345\210\253\347\240\224\347\251\266.rar" | Bin 0 -> 55088 bytes 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 "code/2022_autumn/\346\275\230\345\200\251-\346\275\230\345\200\251-\345\237\272\344\272\216WR-EL\346\250\241\345\236\213\347\232\204\345\260\217\351\272\246\351\224\210\347\227\205\350\257\206\345\210\253\347\240\224\347\251\266/\346\275\230\345\200\251-\345\237\272\344\272\216WR-EL\346\250\241\345\236\213\347\232\204\345\260\217\351\272\246\351\224\210\347\227\205\350\257\206\345\210\253\347\240\224\347\251\266.rar" diff --git "a/code/2022_autumn/\346\275\230\345\200\251-\346\275\230\345\200\251-\345\237\272\344\272\216WR-EL\346\250\241\345\236\213\347\232\204\345\260\217\351\272\246\351\224\210\347\227\205\350\257\206\345\210\253\347\240\224\347\251\266/\346\275\230\345\200\251-\345\237\272\344\272\216WR-EL\346\250\241\345\236\213\347\232\204\345\260\217\351\272\246\351\224\210\347\227\205\350\257\206\345\210\253\347\240\224\347\251\266.rar" "b/code/2022_autumn/\346\275\230\345\200\251-\346\275\230\345\200\251-\345\237\272\344\272\216WR-EL\346\250\241\345\236\213\347\232\204\345\260\217\351\272\246\351\224\210\347\227\205\350\257\206\345\210\253\347\240\224\347\251\266/\346\275\230\345\200\251-\345\237\272\344\272\216WR-EL\346\250\241\345\236\213\347\232\204\345\260\217\351\272\246\351\224\210\347\227\205\350\257\206\345\210\253\347\240\224\347\251\266.rar" new file mode 100644 index 0000000000000000000000000000000000000000..32d43afa2f08fadb2be8f172278a1396c8205715 GIT binary patch literal 55088 zcmcG#V~{A(vNhQ5)3$Bfwr$(CZQHhO`?PJ_wl(Lzc`+07_kJ<;Ye!W@?97#!D>7H@ zQZ#TRzyt>b0DnF8h6V)v0{{*P_*EI?4`9e1ovjZ94Alq#(g=Vrzk42zE!hGNV zEHX2(HE}d>Hqo=TFmyC<^w6_2)w8iPHnDc1wfB(uJ7?|tZ}TWXkXk@|NaW){e_#N^ z|9*1|TPJ%HBWDXcTX{#j|M8s8{(l#GPA1OI7Pe+i|8e`DebYYUS5;#xfI|r3^h!cR zFY>~>?#fF3irWvFmMMf)Dk#AAg{VLv7zv2re#?m@@D3{*ei+#qZ6(c5pYfFk^Tqgd z{7w9KnV{29c@Z$2sq)l7Urun;Mqn&h2i8`=u#*tv>SduFMTY3eH893 zM6YJe**pSvEHYJZ+&S4KcD}tNT4W}pXGa(8KQ%qNb^oC;0|-(FgfE!85%CWV!T%l& z8w)3+|MeDED)hzba1C%+d6ET`iSdPBc!${Dy)=K`hQ0uk76e~30piaT#pec~J ze$Nx4V``Or!tg=NFVfNMZ-U&&?&iy^Lzw=PCg@&-@+i~L$PPhxX!!k<1uh=17Umo~ zUZ2Mj@na&%7E`+Q76B(%r2iBGKn#S$eQ#qs_T}~;%Xrw>gA@K=Bhs?CDpm3lNB_Wy z9S40eYrbIGWUuzi4WN$iJf%u?MO9g6!pblhOe5oxsZx0#k*G3$(-D>Ye5a$SD&U{1 zVGB4L*WRui6}F+M1r2o^l*@ zpMdjWyutO}zi8h?Lae=L@hCTIUzt+CNxMk!FgEMbU+YO|+tG?zHP)8J@QI&M#u8va z5Z2T8Ka>*V4Sb<-6W-+C3PUGz4xo@mO}DKz;OCG%bWQ;0c3Lu|`odzpaKZ6-<*QH0 z5gf2lgh2#eDqbUKk^{ZN!kk|A0S&85#LW2#>r-vF0oysAB2-iR|J> z5W7JXalA0>4&s@zb<1jQctox_R8^gQd2}9LF>P5FnGl}Tt0`ADVquq&Cvjmoj^Sn#=4_DRMAL-|$oo55Z(H&k?C>tk0dfD2SPsFlkHB!x@1_M2MIGhnM91GVKu#zL2; zNY|f|R`TDx^ZU%Ww6C~AUGRJJy`QBkx-@LGIYu>$R z1As43^Lq9#LSp}Yg#JH#|Bv{92o#}$|53s8nN_X; zRlE_74D3-SK*)Oq$(?TFBb*od3Y3(a zDPo33EoI9SjSULv1+-)VW*o3A{{A&8;1&uNR_7b@b1mf}%q+|<;{|IA^UHIKi*klG zH9At7s?W##+qrLtcqA@`jFNl16mL^Fiv&~#X^;>72rxyjh|o2BS}IV_$g#XF2pf*%#_*rFv*0yfh|FMRZT$4DF*TC@(t(UZ6QLo=)$;rwsr+{% zWAH3P@}$Ak=kc@AJ3-4bRTB)oognsXXxp}xT{WcV%+>2vw$Ot%JyjXdrQ?%UJwi)M zUdp94HM%xAm!F$qSMFG5UNV!pm(H~-Lt1u3k}M&u6>M`ZQ_&tbl?CL0u!iuSk<^ms zo81%%9jI|^E-@dG8y2h$*oZ&~5A@uTG{!MtV{Dw*@h>yapQCatNM1Yl0+QX*;`R8h z!5=B>3`-&sMnI=^)}i4k+LYmt8vBvm>T4#c>@bFPy{BFZ0rJ!^`Vr;W>2pw|n-M7c zu$>XwDu%=pwmL3~ex{~;Y*}G@4~GxmdVQx)_TRg4Xrqrt@7~#ZXkdJcN2BjP+`4?- z-mh=oy3*cwaF=iH?Y_I{6YsxvO_vbt1h%Z-WRr*B451~_`u3!+jlAzx9akjOItg^R~q4FXrNV zvclGG{$$4Yq@f>)!rt)vkk*rO)gUZtfK=x3PD?_doW=Br;bHN5xDp6ASq_{QWoLQM zo#B}dEOB2kf~%KlTen(EL@gRe^@$~ATyRjhm#)JwadkS;8r#Mkuf$VTHCa6-mR1T%D4{o`|iq6`?vm1VL4(Jf-pxj0Vl zzV;o-m3zTGQNcA!8EsA^^P2uOObff93!X@k9`)omLE3zePvUKu@znb;f)(dWk;#E3 zW4O*?B(BI|VqxaE_56SihcT)Ra0})GM|3B~ebwck$t{1~51E_7CKxbx#G5xTz-a#U z7`!ydm{#G+s9`r~PW64+5w%7C1w$Ff^g+ISIeWyxsz-5xO-EWSZi0cYI-v z-h;{@N+2)YzR@FX&wY)S*?|u4&YTlA3KRqVyycT8V3N9@(r03hY3fcUp%-9!I~-RQp~c6$$7Lp>vN6C*2o zI}2N9Cpr~dXA5U*6JwhH_a-?51+B$@Ui?S;YHxqo6aRI)W7XuS{9wY&Ow4j9Cc3Zt zLI`24z^Z~>tO^o1CV#1I*kB1RMTDn_W`lI;L z@dbZa22XiYmYRaktITl|E#D{Zs_LZb>>8J(9TF$F6n{g`T4-ulhzYrjZX+$$74}%! z(QisYO4)F$w7=aht?OL0T*Ms|K6_;T0HD>ty^mtV&2^Gz1D8XPtcIt>P8v=n)W8M+ zwKjLq@JgV#V%tv0u>wCX6Na-b(-TT-G?V8_P5JhRvk8QW?*$uvw@#`HIJ+2;O%vs0PD;N#DF9YAmUn zEfo;o#ut3&2cyc#foGh4;blUkN`)x4>vb^YPj}&|b!{8I+MA+o{76B(hLEzI%&%tO zAU+aW+eR#?FJFL;%2lR)SJRc()sSx>2ex=27VN;*xwM7);g1;1SQ>z#+?;8oh7+D8 zIuwT^nMWz)$!xCB&AeU@I_I>+twU0(SnZm9r7&Dvy#DmudEKqJ`^5CAHUIsU^vv1q zz3p1Z;~gZ;Y)O-1prfFaypvV`+p+kg#<4wmg}HST0MXK8m-R@Aqc>X1zv1&!v)fcr z_3nAL@=M3#pB6DVN2u;@k4MFzC;D?r4{t)r7J(v^xw9&)BRf3nRN~Y#{X6q2c>3{! z#@pL6GrQ@w`$_w$z1gE@er-|l`jrp7olcE%7f{!@#{l~M%|Eih^Tk1Q_FVhiSr=vd znP=xr1b&?)^Ob59wR|iI zh`w5VXxv5IMrj%@Sb}g-3n@dMfUQQIps$}-CD*agvG{uP?wJ7O!ddz zjZ$h1KjzEG+J#JpT0zu>i56BaH8n154%&U{y<&~wIkNptAEA5kS z{l!NwY{_g?^|!g}nEDg-pmxqo!Iq?8JsYWkw-fnw1aCX%H<1JdpF?8-1=@2`LLznDH$_K))srl@8z=Uz*$vYBJKN(1>yjk6yC z3YeG-=OA0Hwocs09-9W%GPk~QIP0ZXON<&*j7Z&3xE~UW16yB4Nw+>yn+A{ADxnKq zAA#Za_oX>>y+>8C;i&8oA6si#HZb6*N{c<(BS8gS#uN_6@*l^Ft`P=8Yqa)xqG<>G9DSkk!657j0sV-EHmru{MGY;Hb ztc1jTSgpxK*>?;Rc@PZC++{S>|6Tt0WaIid-w3_!d4sktF@(v4kmL7zn5JqVdUoZf z&Pw=9E(Zd4yi@~+?=t&4D%Z8J^6nEhy5COwtv7S3wYhlhO6&cNmHj(-J9>HP6h>xu zhy9B)TejIT_ao;|K)qa}zJ{PEq&jKasgit-7qv$YM^E4|?K;^VrBjAAO-yTY`bB6oO#rvFiMN$LNxCS)Kp^2?jm`^Qt3RhMO{y zhuCsp84|^bmn#5>ASb8N4K}mY_;p=`;`k%G|%p6>D;OO{EfG( zM|LOAYZv<4Y44-?Qzvj>kn-rDf|^v$3RbG-?is}VpkV2})*)+ zUT4PvQHmT)e{C3($^uMmzfuTpl!$hBescWCj4K0BKluI@ssh+ReEo}gaZ3hF$nP?{ zq9yvz)2hb@{}n@Ak^B@SDUoJ zcl{DQ5NOQ7HVWz&9CSDs8ZLn9MUt2KO)@~6<9gbmH!4&(Fm}DXa2&SU%2GgutgusT zi2muBjAh59D8VIBI)L&kcE|Oq!fc><6J0E;1YB{0<0u&+K~A1Ea+;aiW<;n| zjq1do-QKyG0C7AR$n3vuds-*4rB^(}ub5Gw#EeOW5XA`T`21^10}8ycKWH?;QtT~# zpzWr9F^+k(Gq$pX8U;n;wTQcedvwh?;hO4&0}TGAhjCy_m?u>bnb}95i7&C;Jr&QX z6m+bJeyHigL;~U5Os?0QD4{xF+65T_sQUW6N&|mj8MFXK9qIL;-R)YWgu)HHjs(Yr zoya64_DZqw-0pa*TCYk!ABQj)`x#vjlz4;D{Uo^D5gjA@0(1W;dWW-(>N9P&68xww zg0Q6H4N$^WO-aoWd7I-JSHvG46mBRDi`5?Uk%_@$|lq!p=vOhme_(Z@HI0eD?AMqPU6w(Yn+3~D^AfWOVj>-L=Z zd?}qicf$o#zQc=ZHo4jW0gvS3SyZT>Xarrai#Gr5qq<{GmKp!%U66N`Bp&>A}^r~ zKg*~kX)X=Ncz~*lh~v@5l!-LnMakha9{+~6j&v?Qi$zh!Bw{;TJ91YjY7(D%SDaH# z2%;#;@=Am;Le%RR6Ynx3QU&t#Y2*Hydib$X$hdeGD9S~oC@z-6L0&|1#OqDTH)97o zAc2GaJ}<8^Skfb1{*jR=p+*O*?_CaZW!4)XkcYOnuD4!vOdX8h0VtN4)29XOa-3(_ zNUSO6(qdWRttv38&Z$}o*2y@64uUl%raKuZvXr7nPrb_GK(y(sB5+kR7$n$5AZS)M zHsUt= zb?WKiT~N80Uyx8n-XVdJvZ>r;mLu3(HE83a9MdRy);_i9jKxQKHWJPCj zo-hW_dDUc{d_AsWlGHR=JS)O7RD2>@)v^9!a9f+^=DtzO=~H`BHoVbs0|#JxI{W0$ zn>`u*M_g!|pIiop#(S>+Ls`HlI$JnbP_&)Jr)XzcN=*Z=p!u~uJRAdn>d zv!+2n5Z`M~y{oYP)Hy3O!lqAebwE&36QRjIf*+3#EPwABc9RE= z>5Fgns|S|{Sch$*?;Dx4w17GmQ7K$ZvQ2g)aDl;?o~Apbp~OwdFbRxOJT&E?mdP(` z;0K&B{|Hm|tQw3QI!vl8D8&D62xlu78dXG+^RrjQwJ9cm^7%z$uVY%mXKy;I777qf z#ywi$ppzCjl*l0)hz*Y>v%8DI3?MS%URg}>L1;j;#$-~#kWfO=(Pp7Uj*T|&b*an@ z&6VBXMskW|b5Q1ZKAeFfmjgbp0%Cf^zM$j^XPffxmh>_*Bq8yk#)EoG@rygLVHP1R zt%$UyHWs#I3N>9k@AoEA%doI{LzT>GQcWV!WryepaOzTv!B7*Uwd(BFcHZb%a z>{Rt>JV4D1JvcI2HCV0VUNZ{5bXH4)2Sq_nz)1yC4fw@EZqG=u&5Hm zo!}oL#EA_}!mZM*&NdWtHywLp$8Ol1yi5h;_c z^N&X@3o0AvSea91rVuqr2m^3CzfsrFyGop!xSB^jo3-z|_9SXUg5tlFk5jU8Iyg-zzqq2&Ll3{EnJ4exr$dh@2lpShQeF=1SiZ1^y6DJ5*(#qA z7*BkYlqWHl#_neV7%Mt7rMSp9pc<}z41+A6(6V;0L&dn{8j8yt_P*=Z{x?jlkPFqW zeXwq#vY-Zmx`om~1eT2JNC`>stKIHNt5XIWs~3y@f|ryjYGDzF>PI?wI>NN{Y*7U ztzk0|#uNTsz)qd2oH}1VL`Lu4u%eFZP18=W3!T8E78L$u2FU4=#?8d!LxJt(xx>2! zOre(q!E)i8lIR<<037?(2AXX?K`XAHl)=?1WDe@^2 zC~@B_ShK4r_Qk&;p{7gsqiHeO`P0$n!O@IJfp|jo@+;u1Q=uC7cR4iQ7s+}K`XqiJ z=x@;K1{vL;jx1McNnbQ>PH#qF zqHrR2op7eB`GW)P*jFKjcnwzN`&$Kj?qV_4zG$3=GTB1pU|0JY3sZ0OtskX`e&JnK zUki!LF%8L+WP}3?VSw3&>u3*3cJ|rl4npLnh2M?yCrCu#=fTXBwRy>#Y%YyRV-p7* zL8 zd{1R_Q(10Gye~VSyD`q?u~7J$(!RoBk-^>FKhY%aX4vd>VMaDR%+#2LeQ)#+%^sA}NX72=CcWjZ;0ZfkQeopTLz11cQ+6tRw zoz}JktYnpUaPS?6oPqP+zO}lJG>w#!Qw%8;S6X)1K!)Zmq8qQQxNmO6GBY{+kMIMx zRPNd#&~FD(cQir7Y%15p!^Tz<9g2hq8C~J6M5of<mS>dqeNcVHr%%jXuo|3GsIbjj&bXN-FB zQ9$m|m+Da1Tp5IXlr-pt|M|)wGOA!*(1Oz2=TgFDvk!x{rgrw#+y3e7ki_f?-I&X+ zvYdMW$H0TS1<$by{w(*vN4eV?`gVeOWb1qXla@*|iT@R+EoAo(Cqd^^MS*&S!r8gXs$wdup`XvHTIW0l0MeJJZ(pEzSk8 zdusQ3Cr&Su)MuBrXVTYaKfa+@KE6&UZ=G_A5mQczP32zp;P*|mRHm(E#XjJEv91gS z8${gDT%H00N*6*o@^ZVFbUo46gOfykEOgT>`hte{szqMxXY~d=(g`ZbQ)OM?v^69J z2#8OWLfjz>V%9bx?-fs<(t6u&fW4wBKSOa8IOxC3SC^}7Tl3DgPtnZBvyb2-JgJ~<_>N1`2B-XImeZbmXqbp>@kZoj!{}wxM<+wu zc&^aELIYW}XX;>qdi8RQ^bu%mgCu&Aw3LZrUJ__wKF(o_*TaB=^7)KH6sN+FXN_)j zUdbU5C@PC6#pfOKeQhsT*fL0g|8nGnZbqMV367N0t_zw+jQs>j5K;NzL)ctOcg<=^ z$tE`P4j(O%duVt7aU3~KJAm|Py)UyB$aVHeAiIz+M$V%t=e9$teU4rS^kPmvlJW|+ zRN$-WlLM|;iw;u-pA}R|W98!1>SXm4RG8p)0pQGzTNR;#%Ah@bq{mgALM_T|D>Ror zCjxkWIUhFR9hgK(ktWaE{jU^SMFXhmo8%B+K6r_jo)jHC9V^#IxDHJ7 zg3NufWCU{p339igIab$$FLeLnN9AL+uKqv|Nb1p1E)cu;)MRI8Wmo5au5PS@!?0mUY+F&`oCEwJzCr5|HWauUXTjdZMi)OzAr<=z(6 ztv=Pd`Oz-nx-RHrNMFO^WsKT3jM-IZpZbSA%UDKIRz9aS^BzIGP)ZW!CaBs_i9Ah* zLh9E6lmxuA@hY_<`-fQ8&8k=~Ije&`gL1Ii)cEuBMJ-uo@(z@ptX4P5)08kiHPObG ze+UOAbn=J1#6!z5Vd9(PDLhZgSE`dH^v(}Y+OD~L_~7J@mW0taLlu*!^%GEZsY$R0 zG_@WHt>(fP)M{IRaqU8)1-Q%+<0~i@kONmQAd>V-QE9drt380kA2*E;!H{n*RFb3S zh$TE%g2%sz=*L;Ekfd0Y=~np=%Opm0+xV$%b}d{^jb+trJ-zt&rRhC+a_)-p`}H|* z`@T-Ptz5_Ti7?8j&t*(1jVl^=7o#%Ny7aNk#JN<0&5D%^+T;y<{3eKBdH<{fTe*F* zdFj@UE}xP0{J3TwtVRJ^P^ZkM3OrOky}z+=I2a_6@HcCavvm6ASLVUY^r@aM&i(Mo zDW2}X*Lv$N&+Lx6tzGM!KheH;noG&}r9A_{Y2w$b3^e!7S^q_O{RoJujC>0e>v^O7 z)JY9#^XaF5^Q{AT{u5OxJRO&Jgvu7rtMjd2a{V)yVrf0OO($ix{PppR=dKZ?JXZXp zBs#I=QSUEmwa}zp*YGcOSc0WU;uIQ5SacfMEHyOr`SXIbx|TauUT8n^Di%4hFqX#R z7~XP-CWMz6K{9EbTDAm_U3q&)M$a#F-haGud{bt}m)5^=ta@^9 zMlXM}a`?7kf(UWBv!ucTv}@=rENg3nRh`=d%r9L3Mh+zcU9^LV1OEY;D}-rDu1?o) zN0Wg>rqqrGb%@mEP*|;rBayq5tn@z1O6B*m6tWPjq(HI(0gAx3%B$MwczSi^r3=&R zIRn8!-|D*X7J~f5)2ni7xMty`Upv5!nxZG4yz7(Hq&d!q99Hh;JE z>LZmymwDTCX8j~)=5&JdGd=6K*s=6A4M0LYUn2fMke+H2dDWV{kAP0ar6Cgr^vV{?#FAJdXM$DdWtU->Id+R6L2Rh~V_6MX;6# zNB+;1c>@-aegYtDYbqdUUHY6Vq@VB_Mm_{9EPYOKbC`S$ttxn2Aid-$!Y*CHD|?Kt zOC-J^Dk&nN0OplOz63XEgb%)%pm8s9w!!JY@Y66T@cU45mUj|7Qjp5gQV0j&Rs`z7 zwo5EGUBZ2%f}5sgjK-gr=CJV&Ffl%Ybi$Z;T62|*Ut($Xz2EroKF^Nc>0Et!;q(dK z);w&z`CQwccGNAOO{{$UQ1#Y1X`R0S@o9hkg8f4&Rpw?^+|{@-U4&Xc&Fsmuz}a@} z@+;@yK}rYmFy2pZ2rBZF*GRolM%3u;-{5!4$019xAk`MgxArh@qAdIXqL@ik)&xI} zz0PPK%pGk%;k+IvU?4OQCLB##S(~nYj7(*(Gj~%X!=HW`l}+-bPH|iSQeuRq+FP>& zz=zysh8*PIZUXW^@)3N+S{(gv==| z<&XvysvMYo^X?x`{e4wg#IxB$nOG=$=F|A7+9dR1Dst01$motV8cptZm?+_;wXxu4 zqXKzh4-&Oh;dRpT(-!^YHq6TDdRk3Wr>Nh43xW)wcwSga4oiE0wh3I73r%0Iu*jpo z%<20e^hEIdHuOb-g(Go*Q>g7E(TW?2n=yRd6*6c>EMyom@Ciq0koMU9P!ePce^W{7 zg^e-VR|octDjK(#j781G%Iy}7-5+j#gHS3$DGF1+Ff^(!bO6=bsp}97d3ZJ zEm0TnQUghKImtVBwpLeHSx0xonrD;|>BfX(>Eq7>UQw+?;OGK-C|NNGb{DskwM`B} zL`~j<^S{ai%Ke?LmhI0^?QRkH!6v7X1?%|=wtDu*-B5YZ(r%%Awgv(%vc>C9qV#yy zx|kx=G}2Fb<3?dTkqjW~qabUIf9n{wt-^&$rbgHmkozK-jYFC47L zM4=Ww?)#ubCxT2?VHg2HFR1C6e`Z`lU>&Pa@|UTi5X?MqQqY)9b+dGwV-3_)q0w+L z+Pb3&uLGN+bSrvqz46bDMiGOmX$*x0nfO4kQx>=|W~qc_842Na4vf0!`OBu$JF?b%-{qKjL!q{WHEQt)y}VY5{lyhfCc2WIGKsz z@0hJV`rf+sv&^6pM3C!X5}81Ny_~Je5?2yrW{{O(X1V8?#-(@z*j$vU3n6|-1q3bk zD$7R72{7We(uxRS7vmb*x{MdGP(LAuc!j#9Bq!J0E|5A942So`{@>wXV?5|H>xo#$_!*xy@W@H(kBr>g6) zRxI3iYn?%;8ja;}DUqOu3$XeKm_ClQ@!8Mp7+7=9#R}r9#Y~+NQxnzW)Xn?3L4H*I zf+G(Dt4yca^LrQ-It5T2XupR>%}1DJSoVcQ0qJZaED(KpCInOgLR;%`Vaj?8@_c!k z2HKcH$PTwk*{PoSlbwpem=pjQl4{Ke>;PcuA<}WytrAppLGDKJCmC%Z3Y7OG7l=E_a5A#BOWz7JY zK?+T%ax!C);WA=8FOG~5z;oy(FCWsE@9XxX``fAV2IcRUWM-stql<(n3JzormxYwX za4dN%P)$slb2Hh6VS2iS%=)3+W572!bG;+D?0GquJSJ>vLYRtw5R{o?fB%SK9=y@} z*eBwtBD5_EKfesD4ye6$Q48B#zF^WjWjOGHM#r z>D%}C0HSFx%hp%P#jIZox_f4VC z9Gy)yF1Wj4nv?fr7gTeJvDwdV<>aSlY1IM~m#f%7VJ`Q&5_Nsk%w+3x ztx7E_K|R94Z0;L1s;_k{;h|C=&Vs*h&Ge$kf)5S)BDt_LJf0*BCEiXn0qH(g&A>f~ z0T0K_N@J7ht;Qe86i;44Kb`w+W`VX$h6M z_sdWdF6eP~3`j`@cF!nH7L)RYe?KfP08Updlu0_3wJK1~6Hw)2eM;GG72zx4iU6oduW4Fsb?5DKBH`hg$Fchv~I_b+u&0y9}?4De62#I*CJ z+ ztX+H^Fyz&|VFgUUzu<`-tiMQ74jsPKKJh%YrH1xoZ>;(GinlwP@~(P}DhfE@n^L!< zS&$-UDccPN(yx_iXIFGn_r~R8`R51dXtB89v(?(tW(WF{k<-Cy5IGkD6N}n&L2F4k4WOqh3;o%eu*Ay2%HE5TWrOc^-l7iu+$r;I7}|E7?cwdkU9*} z=sJ>Kpl*e`1XQz++9MPoZfpVaJI^i_6N)q_NRjq2%#AU;GazWd|clS_LAODF8*$k{f2kC{j?<9cUVlxw?ePStmNhUG)B*Z$` z3JvvSmOdJeG==)iL4V-bC|>v=j)~N zUCuzout{5X225ZIMSD;aOyJlN+vm=YT{Q#P$TxE5aPJna-r1{;g{NBQ-}A4XS)&DB zm_hLiY`ep!r`jJn!&9v-a{w(3tDCd0G%ml`+lGy7ZZ5z@4jBHvdEkIa$&S_6TGy^L zFCBg6@6kz_t{d-HFTNh^lDxFl)$?cAsNJucWyh`9t4}u{zw|J;%DJsBfIOVp$qQy* z^rEVFS~u~&MSq6@Zb2404P%0e{rg%Q8f21%xOdX9aVmdePR815Wv$g50DvDB)W7#; z3vc@0^>RPWh~Rjm`_!;~J3R%Ne*+>1GfWq-+d$anMjgc+7$1P#k7WSMs1KEQQY(51;&Qu#$JZt71^O zOVN<4KnGeT$UtaURVS$#s2nG0YphY$c>JhlC&!RSI*g6Cnx^Xf zf~h%NAs(*5pSz5S*Ak`Oh>=a35uQ5)>|hjPmINQrQJZ zjj9CucZLKmGUmWswGO$`f;*E-qJGOoYw9`VpjV#gQ|L7H(napf#_V+^_5SW9C}D_P z_>9+Eu}2g?M1*I_(Rj?9?GjD*(j^Jcr-;+u3$X;L{E739JksjfPf7*ncpq&@{KkRk zPE^+8A)^bM?G}&hPUUjY5bZEq#~b`WWklq8qMk0+KWH2AfeI9IL_5qT$b?0}mB3&i zBdmE-1v${AkLpm9f*psl!nsZ*$;VaKhJ=)nV2=K~xe4cEJuU+G*fQr}U6f$@aYWc` z7fWBhvXd_k&Z-OCBs4K8&$+swE~BiJX#{qu4G4l_j~*{X*f-`wJFg_57_|~GWXuE@ z8C1%Wc?~ULdyP4pde5sYxLtZYKG=8<9;6vD`h`WVt=rOpredHC6*@P3Jjzo*loTHf z8M+i3K9Yn&xAa_Ed+PKock!>;Rewo5yO@b8Y7Qz zk47EZZc>v5A~H{LB6y%7o*KZP&uw8mz=p|uIhO;>P47^`VgO^koeKi%FBr>3Rop)= zS;HzDWS}3(|@Zb_6ilg5!475ZDNO{`mhdX`70yyYHm5@mGG z{$9ubtGPBxz#Z;|>VH~~K9JdufV#myghXnjS}jS5;(!&Ke`|5tiW*YL^*5ikYK)kq*_@x#2=bYcRP4{oy9$#so*{Kayv?K#@R-&AlAp$#v0&w;HyeKF?ZIV z9J(e3l&~CrN-}Al?)*446bhlTIeYiqGPVkG*t2yuvLH|CWk@fsT2FOhPb76%h7H*K zS2I}Ae;-)2qvIU_LmU;1bUmTI11`!)`$HD9D819&JZrz=pS!~jS0ApSAH+K*6_>bu>2SVEv6144SfC40@6rU2^&f4MA4e`P@ql<)W%uqGQ>oAsI_bn0mL%0VzP*}d|mod<%ONBz4t+k z3Ke+-e_PqyFX66cN1Zx1A5ZWbLSxBS^eak?X;RPTV+bXk`Uqu9judsBMl z4b+j1T@mDxYij@Nlh+Il*KDyrAXeg}_%QdW-E$oQf1)<;NVdqH3TG3NJgTBuQsSWP z7ztjZjo*Rwfo~hp%72ma9+IFy*E8=8qJ34YO zg(T9}UpEs%aX(t|*Vz3Z@q90SjrKp5xrt%xBMit(~y{U2qJKIyr+<$-V z-;1RspByJeE23ETXP&*hv`khYrz#g9C{69E@S-~{MB`Zp`dHN+g#*RcR(mwCESWoN zKC)D%+oHuyC1g~rL3Dxe7rAz@75C3nL)3M^!G^1VHnklb@$PUn74fdA#D%Jg zPPeL(%={BV^#OB9BZ{&36(27Gsm?_P3G!MnK|r6#6Z-SnmaODuLB(N?lr5AMBjIr( zCm>&+s4765Ez8AZ;CsBeN9%Da4#AB`Fl##S0tB3_b`jseg7jPQURIW!nkJ0?Wlw>7#vN{7f2 zT5ZYY<*#=>_Z!SucX7&V+qkL^RM09}_fAL2ZAk~m_~tR0SWmMLKjt%by>HU@=_R%uk%JXl9#Hn zZDB-;^1qp9Tr*b`CY32;9hpV^z#pM4%flk6l!M^ z;f1Hi?IBZgJ0Ztx+F6IKj_nG;-dm$gYzx07S0KCfUWl@Mz3?RirpXt!eS-%O_y@|Lg6*y;g+5X%q)C6*>Tj~+IG&uXC$i_gT%5xWU7M!d{&4z+_8ouZaPIJr z-L4+m#uv%_+*gW<$Fx+F!Y4+Kx#b#a5R%`Xg{aQal-w5pMsYP%JJIR^A|^iBf|K|Y z(;j%PhON{}8}n1EIe~6u1PRKCc?jc>UY-6g*6u0DmbGgXblG;TvTd!hZQHhO+qUgh zSY_L`ZJS+te|8l=z2C1}W_PG~CqPI9jruTC-Xs3KaZ#WF)SE+(iyyzo(s8U0kS+y)zhk-8D zJhOGQVLSpH-5mkhz!c)(Tp~ z6f`jzXb-nfD#qw>eIgJ$^Tpa6d#DKAH5W||E|LY2$s}q)K^EVgP2Sqj#Lj#T7z9%~ zcMXQze?vba+au5SHFvdwp5D)Ic##ZCZ4S`nTwd?u-0S%Q%63lOgOF!2lP6(cTB!cX zWMJqcj5VS5O_)g8Iz}j`0o)@ZBQztf8hqyq?`QYyPoLlzBI{$p3rMQHM21L4R5?kW z(RMqVM-2Ccr1|Cba#cfa#2{;ECwTpWa^644y$k7qVUn7~p(qTwfh$z&uPp8ge2uiN znw<`@7sJwoUyE<4r*FgXX7kJ~SWb;>-3hnlRFU;qvW;Mk)&A!`-5-q3yU^wQi{L=w z!{k$tEXL*9uhD(A-3R8*#rm`-@Ld5y&WsdGhul8v9Dr{iilF|Bll|j#! zSp^2F3c5LT2XGblkJMjMFMtdxrN310582>U7oZU_rN5O@P*81_c6!F##U!FLJvF)G z8>4qn&(VL?vP%dQ=RfX5xv~Ms9KbI(C~G(N-8Y7(7OTeIBTaz5>~| za-YnEFCV=r#KwYF2D^L6l=o93C#lkj04c;)DUn2-$Nf{1l5wGkRqOWN9FA-<(3+%V zsadG884FJ?ytzP8{>WwBcwEP8qv*E1P&wqodD|4<+Mr-zxSpH`rqbAH{5R2X2z0h- z6%H%F_^JdH$->E}$Hu4F`84Xos#+dzoExtWABqY_lYG0N0vlu(8V04zuGYRmaljIq zV)lf-xobKx85+h)pxPVPNF~pGCDZco!zP=u5_S=%yGJsJ(bOQm^LGv#u38L14EN>& zBqzxuG|B=%tZ_-D7zxgRoorbf8c}?L{JlnzVeb}VyY|eYB?s@QrMQ%I+c!tK=VD`O z!+`LG-7QuTPOcRjNKj*VN%jxIO5N}aC%WtcfQG2DQ< zB=2e-_lp}y^+<70RSLX1&Tg?lIj`-Fv{G(n@Vt=D#Ro?zZlz7%6PnOn&z%7W`tUzGWUDjKvj*uq*|H51&E!mV|t-w59>?*nz1EcYb;Pf(_uo70I-2 zCZ3=F`qgX$9Wu}`UKup3VehZuI=`5{9^w{il8UAa7NT_Q0uCBfbnh^^W955&c3sVh zD>lZb-)gm5EVIctT$s)lPxTs??O9M(Bd+FsA~{5Xg8B;=)Q!y9A^LW{AyQ6HP0i<1 zL>DdgyyouVzQ9e$XrnEiy|8-j_07I@ocy9A{FlHRbhfcC;Dy!FBjJ>mv83c@70XRz zJWyxpG?_pa&B<*Xs?~+*f+YX@eE&v+^MVeXLs0~d<2dHV_CmbgQ_= zDy)HuliAX%s4upEjE^LRgWVyx!hC$E8Bq?Nw=AM3@>diZI{Z-N*KwK6el}he1Ar6* z;6r?;Vg8UjQvcPqD?4{5V@D@lL;as)!T-A$#2sg(6gR!}BL)dN{16_-M)(Hq`2YDi zzP_`Qt@V%NVW@9q11dMlhNGc|?Nzf?~o#;tI4% z{Gvanl#fA-|BY#gD~|+(iRs7W*|de?cbRS1H?oldbc5{x{S@o-iZ4@ez1wfEMe#ru zFFl}@4aXN#GcgU^a9yaK4DFg}NtuRHvTTwqx4o)JYByXPtQibzT3nva_v}+7AICuD z)MtgozjK{UjlRalFq2lQl}+dLYu>29(n%p+>C7%4HZOI#9vs5np`@~I76J^5c?OUBASJF!P z`Ex~kB;X3~?^U!2;C^a*@izd7YIMI_4&qU}pV7<#0LcNshthL#`^k8v|M_UT(ETU# ze{32j6a60v>z`r0GBEi!%RFJB@bL+qoy`BGs5m(Oc~b~E@SPl-jbWj@q0nJuehzLy zrvZSZ0pQcD?`Z#oB>#UdB%2=t%iPHEC!XI||H*pI|F3vpp|mAJ**k20A_)Kh2>`&S zXqSThiA3T5TqHkGkDaTz<3DIuo5KGciuQ%a42#CkML&Q+W}xtOy&w&K@|)+sxu}u8 zlm3sr^`m|LT%bvuc={A=8F0k%1&|eliJ49XMCZ9Le}D#-a^5d4C@{=^p$S5O{cKTr z^Uu|Q?pTW4`t?yq>OFNMmxNuya@y^I%{D>oZ44V$EYJRP^icXCW2wv+-9nMLlGs}71LyUYUwdL%kC7rgs8 znl;09rS{>Yn)2iw5|G&9Y3<&T;a`X6$dF%Qw;2$Kj3}CI@LiV2WF-6B?hN0kgBfhk8&PwugG=m=YA4WS88troP|q z#7>dJ6R}%xJV=k^32FVQ!DnGUvA%m!pOtrEU3G9v>jX*HF{bd?NbG-%LLI-BIzRvT zIeqp`>gD4MB|&1vNMF?4k03((yc>f-I^keb$7_f(_vG5LSqyu0>xN|?m6ui_5KD{K zO8FSoZgCD<+^9$58bp0WZ=E)}9zZpjY%$g-msxZ09xm|+T$WkB?{xGTILtVG!QeVW z^Ezc5M+AVJDRul0anytW(#wKh7c7hUlUm~cO==n2I2v0USQ-D=ZdU(c-~N+jT%H-g z6Q0X}Q`HO*WMGO+LPB?@#_v-RCVfxrniONnp6>Y~Qn0L%&}`7uO!V{wg4ovVsO!-k z=4}<}+wPRQS$onY8xA!!H~k!>2=UCVC-*r;iKkP(kgv4Rp^=A!ciJe~^OM4~iYy41 zgO0tY#txvrd2LN5xrrto&;wDNq}#5djT7YFLNo{~gyao?G(x`~i;j1zM4=b0sPgI(MKQ2YcKpEbe>@ zryW^PBJ^MH+sW6Fnn4eNE#ATC-6kLU2Xe)bn>Ukvgi&+ti&^u2H=7d8O_UKg3wEi4iXUH7~~nO>LB1G3j*g8q%x&CxZZHtjc!x{xe;)( zMvLXX+t~)*$P4;K$cd=ogL|~)JbCoKeCi&nI?onW37tI=zhPDF+H1G@&a^ z?jCbZ%$cwHx243@O#t>F#BeEbz~9%b;V|S_=DHud3zxFL&MGkIjz9Gh#s^e0B#31A zdZOV~-rv|Z*L(x-+kbf~lv1V1`{4?4m9P0Bg^w2M;Fu}Xx+d_+tj13Zt?ewOgNJ-D zN8$elVVrvlA58;|2`W1A*Qkv7(j>7*a%P*z=Wr7VPU~=}GkMn9*d<4qDy7+&a#-Xd zo!z17%#g#MT8GJ>w2tcn=&{s4Ts9QQyt!$pA!RpUMEMRdUwgsN5hLI;gwa^Zew)8n z^r1?k@HS80wT$>nXcf1iW(8_;nFICL3tf%GA&_&?AYp!*QFVgAuI%GAc$Cl@_IAkO zWYrx0SS?U8rJi08`%z&skP+Nlp{Q7z`(`a2!HH`bW)SbKrN|2Mv@=a%w>`)`Va=Bz zx|QG+C7~CEZ{84Sz^}x(4hE;ushK-4jvUBNj_zG(^F&>()mvzwg%8rCG+8lqBcP>8xK0JJ-ovn-3Cj2wy83qhfE60?rvd zaz;-xeDZ54@4mxTX{m@7d!7wvFnT=ZisbaV$(<~BW9bZ2x1LJ3)HuIMW7zSEs1_D0 zq-TGf**m5-|FvE(MN(OZ$6K{1JMyrih%#}QLg4a3=Cf-n-=u$0EKt4?W3ykQlQ+8f zbOyfDB`qh!_np9v|cpyCV1MKl9h^bdD#nWEdZVyKHL`C`i7g%h>6BcT(u z-qbiAP}eo1ISy=vkT?$6x7C-+Zpf?D#Xx}WeW@|dNtle^0%L4YzEyvc=cOg(Kd;P0 zWj8mDlD~IIoY~+B6fKw~JJPglq^@R)6Xd~thDPzGF2$K>CN^m$9TMTZwIcIbymfQF z8?DMpmUlYQ4M1*!?JhQ%+ZyUI)%Z@9(p|aGH`>j?tOr1cU z;v7B_E`C==E_Wi$%7F-ld1GHGk%?S(>wxm2in+ z0aID$GD;Y~vWyn&^^O>_V&nY)6UHRZ-&{BF)YfEnR$YR=dctx5SBhiE>`TqVup(=8N+L~=TZvZJ z=kLM4Gy0{u!B&L#9D8%(`<{2}vJ*%4Q!?KWKY!qD9~u#|>nE7w_(%b3qs9u7=U3uT z0cr-E)a5EDQ^S*imvNer8^PfGtsYJ=a*l;>32=(IvlYAy^>5 zc$QZli5$U|#GnV3F8Ae}SpnF5zeFNhOd}kZ0=Pn5_f$#Yat1Wen5sm%#YWm0j4mc_ zuE#Cy<=u+ch#rE5ZU-e6%gd97kX}1tfrskxJwbF-q4+y0b|6;^G<3w5%TABGYRXKE zURk@vGq^db=h!08nQ9gdDH)}@3gi_&yFd(}u((zJQ~1{-g9LKm%NtQ|{gfyFRpI~t zOW*z!!;n0%{}Q>QY}JtTuFTBLf_JXMm*~L+Da;zJXw}WP&7}0`u$YM;t5sILi6Hy> zL8xoRE`=%ykvAS2fo?Jh5FqDVKn_`^?)!86Z2h)aZ_>&n;<`JRNB&(X4LC*7$PbB1 zX0sJKtiD07H(&Vo_%B`=Cpj7GH6BhD7SDzAQX4F+ymqKL5yP!3WW8mlx!UijIg(PS zKxPr6^3r+8YMjqSG9mKsMit^mtMP(r^uo>@TdjLST{KSpEb=cIXBIONfEZyI-48-b zBuPt1W+@(riJZL>J`{gb+B0`I(O##OyXfbOz~+^gIDjL!N*ro>Hr*1rQn1+rUw=^< zmwg6}$GRQBqc30PsAcGP3n)_nKYMGdXV!|s4G*}fJs=TK!5QOP+!g@$pw+g> z3-Bj+#jPvC>jtp?WM?2mOa{)Rap#m;G_)b*u4@C4T0pzTX zjxG7;Q(Rw!Y%}AhaeV%~6m^|r8?hCayr?RH-Fo~Y)Dm5C$)|#hgV`R0%hml|-d^Ad zclI8}cjyPo(K|K0Z7~@e%7F$YN9=}v-O^Md0?WjB3{v~0kQAF1{xR-G;oS>H!*$Xia(mwJN2RqSWlsIkdDJ8v4VtRu`sXp4(t&M+{fs!~k>k z6pI19B%SlbHV|nzcM1AU%R9FAlfs5*gNeh)p^dsD15}GJlIdACb^kXh$ecgdn3+TE zXX5>-SA(gJyncxAjIno4n#EM7b5RQF;Ol!XNW~wFMK^02biccqaeh0UK9&EE~V zO*rd;SH3@z8s|>rUXb8xbeJ)iyc~hJf;C3Hh5AJ5R~8m<<`UQp>>}_PoL)beh_hjv zl?t#YQS=ZBItYk-fC2zj+!Q|8CzBXV94HLexk{I3#dbll&T*X*t_%}4Lax1PVX`jc z(9cA^6sb>5BO%#&@}Ez>Jk7tN3~`tdwQ)RZ=Wv*lhZ$1R?m0xS->7-x`Jk^Y6!i%g zKz@p@6I(Dniy)h=$Xg2LsG`fZ)-XKp4I$(F8^3|gUSOY@p|^3I{hDi=Xc&3^y*>Z_ zJ^lQ}d;Rp~>-+lE{r%}n6|I;zJ~PW7wAE<>RC~&wu`2f%dEDvqY_=a}>_MQ}ZU7@G z_poSbEXtKLa~=kbCjWQEeB3ExJ;%=9K@ugV+s6!g(Vr7_k?9~C?psf-(3`2KY%Zi4 zBlljRLJ`YD*=y>iSg|9o?qj1JNxNgT26Hi)_`+hi8gQHjY^cy#c~16sKFVlI7XL`Y zdQ#*dHB}QMbl(S9O0t>?We5sGW&AmiTMF^LQPYBZcg&dO0v&z=u(AB!o1%CQ^E0r< zBOs^ZcALG?&So2T$Sp!mX%{ec)(eRxP?~H8wJ%ek=}eg_2xlh}6$(Fl{3VK-ZfVzx z>*z7+(2hIQ2fB)iX_953&C@Q)7TP^<3_7zq3LTL)Y6v?2Z-QdEQFDDKz;Xa{3S%by z!Cn(DpkUPT9emGITr{*DItMGE3;s<;ICzR9UrZ5%UKB`tTpYuZI)J4C%~X_xBi`I& z@^#J(gaXWPGf2}D>CQJDsDs5m9na$s7Bt&fqDLgTQaD5)ewMul%HnPChwG)tUcxcZ zaR;0!SxBgogULV;lzu|OnIu6@O9{QzbJ~s8r09QB1EcXC^NpU-m ztj?Yo0x2%>zWvN|Rpp879AldG*WBTY(>HXM>AX5vHHw&?UZ0ybe?JV0oEDY$yWj z)G=468e6ExPC8{I3o{g@&RXfJhPt_zKPZzG`0(}Sxzyj5`z&jr2$mn|iu2VOOi$q{ zz?7YJ$gpIsSBMFWlaP1NIHaIx6o+q+<{R{ds;m1Sj!eNar&NJ(fXCF;RNKtCNp`Kq z2VzCRq>QEykvgL8f2{2!H|L|~v%M5vok2ZBLq%j`IhCHRBW$GHdiUC+0LO{a85=O~ zQC=q^UL8Vbn{Sd1P9AMV+_b%{;UdW8q~eX2SzkpHF};`iwfs-xcZ6NbK??y^4-L3xDT zQb;)h?f`b{4+QBPh;}--St&Y#57$nQ5OO(?Tn~oyiB$3iw4MR^fHM3k!)O@>t5<}N z(Ks55*=1^LJ~LNE2gI_xh>&n41=3tn3V1PTM$r&u8X@ITZqUBDL-7rnGbja#QqOM$ zAI6|&Oz14d9G3{4-XlG5)gi(N1IS-vT>Hnwz!hM z)N;0vP^%I?{6|Z64)>YPOpm!_upPlv0^3KA9xQiwUak0#2rD<${_T}KTD|tYF0TAv z5M)>w(evv!E?DE*2i%yH#L@S*4%XEUk*8YJ&cS`QfZMs>THnw{8!W=biFd*yOK^S# zVEsu3KP6%@2*@`yK0Zac`A>-`^54v0**O^7={p$f{%@0{8OL*kXFrpqlZ^yGY|czf zf)9E^TQ3R)C)SZfhO{Ffv%*t=PMdP{eJ$(p66=!4veem+?Ou`LgBYy=nprIMAmT{A zF9nhZWEUObyfF#fwf!61XcScX)m?**3|GHUfm;$?JY+i0@M=n+QZivZ5!KusN#;Dr z45lJY^DcNw?Gzmuw?Qz4snJ>xK3N+s6EVu*xKe_FM*mvz&;&Eqlhd=?^<(*q zK?j06X~~^0-Bo=z{1!laQPl8bvjRQieyp>7yGXrSIWIoEPaPoZrj{sH&Fap34U>K! zn5Pw3hnUjWHQVaeSLCU^URa}QQ)u~0I^om$eIjQAapx#sgC;+Y>5H^D3nu5KX*Z#y z=Gif5e^>pWhW+Sd$n%6igMLQ{O&ZBIQyh-O2lPE?c8BfM;S|vm!47bm?c!m$UJ_7L zMUT4%kIkWo>a>nLhwbq}vQEQf42p8f_e7yhBTtIe}eVJAI>R^{Bd&gV9!&S+4$YH;J!Uq zpRBzKR{*n&*xX|SiB-SOi9mkJ9L@%VVhZ`Y6iro;D`LSEH~UnHHk{QXek_9^R$fIp z#Os~RdQzSh{*{Z8uGzWz{`W;29{Fj?JY4K6cv1D3sD7@nv4kP zLGCLW+w`E~a+~ut8Zza)J&KNQh{SY2ZIp7pb!`d6svZd!l|M8!cilfgc;bd8(JY zcoz^afB{6#LV>X_^syd3JMdXGFbEQI!Zl%P9AOYUksxUpzB|J!9SM9;K}>vv^03NQ zv3wC?RKR)PQ*wGCmm~-bqp5oES6yu!r%_bL743t}y{pbA{B}192lV}EL91|6#0eJl z)B5)ZuvvTRC6u-i&iN1vCMKsAs(fybVA{O#oP%7eb z`8dO9{Say8F#P<&?pB`RJunJKghdWntx@Q`s0Kpv>t%;CV3z3lc;xH}tmLp0sP8e8 zs;KpZSfs!jfJFrmWm-+JW-BiS8iLwRj#)-9x17Ox8~;ciNbXoj)<)MP&MF}K(MY3e zg<7N71*7$G1eE4wZ%yb*I(bXVY`PxhS=u}uy2cebve+!q(~Zd9Kl&!80_sNadW`HeR2pMTh^jD!iLLxU0R!( zxM(4#fO+AcN9`$MBk*LK3~bAO2_(9*6_@Ysh?m4l&XA|)M-IrdoLXW(GJOStpg&ezj6|H}&*b-8{h{9%+sTidF7mE7)ETt4qypGRy6ugZc5jG9089IulX z#o695h^H}3=qsQHkbksyuGxv?47`ph>6riAg*|*!`*;6Dv$nKwmDW(??88Gwg@=xj z?*-bS?EE~0`;ji((o8YiqW?x>1m{|-qwysWqRe5cyCXg1gy0q^2WyV?!^U+jyOv$W z=;2-PE;mluK3+h6yGkTRgy<)q}y08j^%IONJ_yo`BD>xtnhnZS6#UL}hg`N#Z_ z=%l+8z$uWI$$%D&qJ^hb>(z=~;$vOyhM7IJj7DsAAVSUYEk4y#R3U~poxw4z&}U-6 z%(yqk%5;h-Ixr%@7GcmjqKv>^?pHKGIfdSOPI9_oOb;;dcD_7ZfYVoU?yPAN%7>*P^CM-txc`X2R&U=eX+LphU!A@ z#~=hN>H=xEKjq8O_)H(-hTjnYD9}{`{tgh*fG?*uDe))Nku}+kpRRAQCFsgBPB>x%k z+LX%n8GW%Tk*tE?dT?ffvFaTnL!6sH)o|P3W@7V@B0B0XS&=-ng<{%ciT`z1sqCZ~ zsXdIa9k}jRXOmps#*lIkFFpOPYS^;7g`nb7ccl^Q;39wRET6?g=l@biG&>Uaf@ey0 ziY(^Tl$EbP)DTwO{7@%WLi-7t>Q{r32UA_NWvlnW(=BbBoOE%b7qP?v6bliRy10Mr ziuRJT-+9KXjNn-_+Me1W$A^H=DQ$uT1q@#gLFCE;-DSfZUgEHghi5v^bYpM@b#-WZ zangpv;wmA?XA5MpYQQGok1+Ne98}xBfm*baI*{GFIkVmnI^J>G=PWMhY`3@F-x&xG zMzi_gApz$RQ4c=2a~*Zc@1ttmPCM`wQRpbJxff)7zO~)T`;sT}=zs>s-P4Flxnsll zBDpJC%>uWDU#3~s&gqwS*L46D@Rzg8336vJ6bx_L6>ZZqpf3IW%?&Ysd&3Fl1vNrG z>=&O%{hy`YfNVidxJy05kl5Hl5-&ry4B&cSg`ALmk&UX_EALqKsU{`}nq!*`p4lCS zdc}0;-5}kGLgea{<%8Vc-y!gbtaoOGyHJ1p{(KWq{?mNx>Sqld5T7Mb-~Q(-`Cm2P zaxk`cHg_<#HvUofX`S4he&+H1*T=H=#*#!>kYwIEYXBgAfcUIpnvOrW^j|eU za&q|D8sSLiXlU#3)BMQh--UWlX<+A~AAzZNLX}lT2rp4uV^>oL$r>6For*9SDv=-) z1wTEk7@J~cLBH2EzVWYiuW*8)%unx#R(J1KwnTGbqdam*KlW83q6ImD}NSx z&4e>`n0P8#vbpD^M>iY*<)25gI0gU`^a_Me5-|?+a|i!bzpMY%{owxrq#%0scvJrY zq;}*Sb!>{AKptfLo5YXs0u zZ71!;#l_9Vz#JOgKA(yPXBC@nIX02u9>Ct69MPbXyMW4?p?e0YYe-?}Nf9GcwfU-Y ziCCycQFVw^RdEe8D;kxeT?&)pfb5isK^807LN4D3(UCj#1OPND_)lOTK}Kpw${mMU z{EnNXty(4dBo09Ue$wV#+(Kf1Hi=FR3$+U12cvjCtT`z};qoOU6_lL@J}Inm^#Ha^ zhfg0Q=Th+aXA$+di&WyoCD9tzPSLa+q&H38yf)js)7R^pI0y)1DiuUvBgDp$nFCE*hW>{V~k&rE4b-o`d zaBOSW@vUX9HMR}sm)<)XpSmYz)l_b^Xh;pOz6 zW+sau_R>-5v#cI(-E9ZMW82=rjYN1F&!l<&1}eP=pYnO5@brb0Y+^OO@y)3_|M=+R zvr`FX9yk6`;_YmDmj@8FoUc=^X}EY8Wet%ajhm|_Vcx1_Ltw1zFW@6Bvn{bKyY%UAn(uv@DBk;xnRyB`;xTYCC^7y`{YfqJ~ zt*yVhM~heP`Xa*5G;iGY&fd1&^G%CS>DlO3*0RDSsDE5OnYF|Z)9_63yE@Xb2&U5$ zXdS=~GKaZWTad}W?~lI5eaB~(E?$zz&xt;tmo-bTzDMAcaW8o&!_anD@Df6mc!e|7S>CxYp+fiZ{-{#((0SWD`Rds zszd_KH&9b6^_?3A?Q_l31gnEWaArNOHZ_+c&6Hh@Z7VsXsaVL7;$^i)zDRC&^}OvG zQ;LUhYDSjrNbzx&j|s@LdVVb#3gI)vA5b_^g-|aO>h(@_^%PKQB@1g@h*^|_!rCsC zzfz5B_#+1g$mEyuJ0|ix@~6PNkg1@s8t48oQw+eCVMCP>=PetK_Cfor+hEy#Mqpb=GbHR4L*% zS>)?)>NRKc9f4n@XoL*E&X+SJf7qx_d{b7Xtd9mTqUj(3l4`inWKdbakX0iJtctjJ^N+C{3*1Pe(uoKLcZn^wX zLc~tb1~^1%xjc8_kzqKpX%*|#5N`CRi6E0yg_s>YFk7fkT$Kv)w48%ukUgkS?&^RP z%A+H-RjL8-!dj?7MVjKOQB1KLgMVP1J2})}?717tR!royeJ5}182mz{*5Cuq;sA`S z18_PcW=BJIfs{`cm^JfkbeL6Xw80vWi~0J1H~wX#hfo0UwJ5q1Q)3QZ4z8Ow$xKHKD%C?(Ieb)GvxWJ0Th z)Mi1K%{~-)oa9xQM}v1M>on3G$T5*vfYea!M?lF~H}slN*u~5$dU{;Ujpaa1!sFFK zw9~Y~eH<9}1ai zkQMd_kw#-e_z@41bJP;{Gl{YbK?YkAsOJL?Q%vc**N;>%{V0HU^W{ery+FJ0?Wt|?ukt2=In!!=q-FKwIdAPfI z80mx`HgiKgRtNNC1xbBg)jemrR8=Nws5@daFv^5>ebhGf@#Y1urjn*~RDm^+DDOPG zi&@p$h8QKLuIR^ol+Jy~eVxgc?afd9>gfN)jDlM5&htgc^wtG$Zt*h7su8T=><`r2 z4ZkI>)RAHBYyfCYUn}wc`Ie4U#FMy{l-M@nrnSo)i8G!=6~Dplj?CKbr zj7l(Q{)Hqe8+Tt)d$qTqNlgGt;kYp=^0~1pKD>=a0;Q!MEApsgDJnmCdLpuJR z4r+qPXfPYbD%>IGBnD+(gNUuk>c??68(>i{q|^|fqt|10)IA8yGD?vT|cHt)BTvFWU02niBxynUSSw>+RXnfD=JPQB%ji%%y%|~xoo9o*j zIEuHyviLGu6V^7WnRW+siR!eErl3%(&AgG2N#mH&ZM8cwGJEV-HCM~AN>;%oWdpH| zxOF!U3^kwu6bsSVxmID0r)-c%S>S|VcIfMQidX+!em|y$NqMA~qh1tbi2-35k>nW6 zjjBYQQ-L{ObKfbJ0ICD6QhK6e;#jL9EvXU!GitQ~iUSA?14ujZyF=o#Hq>1&)hxGN zuhz)XrHj|m^D?xn)1#I%&(v0}>P6E60*a^rm8s>VQbHgY(JDeA)%!24+r%;oh9th1= zGa3=*YG^83iM10c-HZ~D@pTvBVt8Of`F0+G*17P3uidgIw=ZcX;eV9BU(#7%MvX2K z!WcO~JzNyg5+kwnEsr#@YEI2$)dnf(=OPn_x{rb1;LP!VS|I!*M`6MxNBbK8$WewC zJv{^e%25#SNJl{!8;pK_>DVpUyGk-43W{xek1E(GscE#;Z&ekc#|RPO9a-wp1XcN$ zgcRdY%e9VQdgNmF8qK-nkd!bjRMTMEut*E2^8hUwTmf$b_7{)?^Toe6q_)wAvwJ7^ zO{R7);K%lDR_j-IL#Ql)OM99;9`0vM+|efbde%sBn9SyXA@9}oV#q^}HR+?dasEy=O&SWG9sL7l zsIjaE%l#uq?f+367)h#;VWC9+IghPS93IUWBSU<97#&UE9A8c)B9J1I z74kIX&T!#UVdYzF#3;|KP_4GSPJiC=qb;`ugN zo;=q$_X-2RLE$Ipa_y4TN2ukC1>;{0oqr)J#s1Zo_N*oH0p zh1dsTn8Ad%Nur)a^XL~l5iFE*w1~`t@sT&c7AOc*Q`OROudmh>MGd#)g-ZZ~rmPx^V@E=BU6gRg8~U z`Z(xv2(P5Y6FXu(mm+TMVM`(?fNXf8w%R4s zWV1!%_5v~af`wzzmf!w|90_X@v;FYm&G%G|dW9rJmyCRMEe`CmTk*BDLmJPvHC+nQ z+HUWWAtF($wwh3rD>{sQNNq?71>uDuK}Z@KA8zl|PU1gyl=2ZAEvm%Bj~xY)ss2H& zj?OVblvxKye8~}=n(Aoj$Vk;1hh=;KsBQIje}0? zrSJklT-06S?J(%)W?zw#3>wv6X~lGfsPJ7e#t{na25_K@9B@|^vIVJVX#X5RCuoDK!!poJur=;wz-Wf9MyK*h_FWM5A2i`MiI^|E82%4*$lH|BWwluFyx7i-6lb8Yxf zPqx~{``AFc1ddSXH}8&jAKz-r(kn&)s;Y)($F4~2+;!Wf3n^S2`JyR7Il6m*9f!GU zXC5l;n8mJPDOeuG!;e*`LnYJ4l?1ZucP=MJL=ri zvKx(sC?Kjmys1Vk0K*mYXxgj??;%#&cU2qzIb&Ljt3~56-$`l@ci}WEo zxp7V3!j%{67af&q@zn16#V2dKu1M|nh6NZUrTQnZg}enqu#a{hCzZxis7&p=OYW~) zO!+O&LLo}WVY)hNv{ey1>V0FwppiDi1Fgm?zr0`?MicG96^vyb!{W6@X)_Yk@&>d= zwAo)w&Qe%e?1f~MlAM5axP<99pm)i&ew6Tj&iMz$1L_55<5RZqsrDeSCw?qF=|cS8 zcle$0g=+a-2e|};Bk~>aqzS0CyKp3DsgpfT*Tdz1x% zz)ZbC9t_8B=1hc%DM$uriG~lopo(S>ZB5V^BCgK!mi*lgO-IqRo!@ssBquLHE75J+ua!3+)V(%;+BvYaTgRXm@p40 zI6A4O`AZ%lEcK0Al)%0Lx~M46KHqk0o!xuON(Yzz~MWt0Vhyf0fdgsVe74AcP0Q z)h*;z*Z8U|J2pqAYMj4oie4SCXouhi7t#v&U>n_X!Y< z8g7`1aRKG%n*yKsK!{z>d^R=^ML{Ay@Rh@k;6>k5mNW}F6B`X=C4uLwCWgB$Oc@9h zn{TA7?YC7)@^MN3omWapFvsX``YU$Vnh=58qL{U=4^BdG7$#`!gl&wT8D!f9%Axdvf<%RDv_lOu_UDl3AvdL#eDd4dIK@FS7$?i02UzH8Ws)4j& zXnbER$f~L%?0MM}?V77V<%W|j!D#&1my-76~jWt+za`i$UiNv8Sdz*cFntm|1B)@rk5UT(RW%7c;ph+=0jPBOqiYuCR?yM97e zcTyu>5u;4>VvMGhLM`cq=7<%Fd8Q_`TqRNb-tCQey>Dd>WX?%{-Hn^UO9(|sUgrA+ zx_07%DU{@4=B#UBUSacq)F(y-%gt&yG!6p>??(P|y+;2S7W~Oou|@kw-Vnu-gdf2b z99MM8-TIoIEN+A}owF{aFhk{XX-bF#Ho?H1*=p^V&|I$Jd`8POx`sUtxF{lEDD0ST zl7P%nKPZ)yzc_SzcVIw&0@#|H&5Fg=&p8wcD5$*$cRzl4;qfau{dg5EnqIJAzD|^C zQ2-=Q+`S+vKbQIrE7=X~wvr7_&Q%NLsI$>#y*lwZ^NMdodGX&(5`Yx1G4ffW2ae znz135V#iFKi9)$AW!2#c#Ut_`S*@wi@u@zP{a)=p2K_}lSLHAahcaoqsOc1LheXXF zBEd1UA!Bq04ED7!Fl7l*Jabp>TFVXYBXcOMH$uZz7Z{ztTFC+yNaPucA?&!w|3cOeFdGYYskt2qz-I zm$U)^!37PhkAd|IU7!aooz=Vy92!+4&{x6pZ&oi@G69Uee(4mA<9A4H=^dMx$Rj4~ zqsLcuci{rBiy8?xmbOeJM;ct`oxBHCdNE+;uVnFJfJgirt&G@KFdJzCs}$-J7{3%^ z#T>d4>cNnkt+@J=F;PC1^=p62Qem^pdJdKzt=;-p zJ5)|`T@N|<5Y~Wn@Q|*nx_LjmPdU9>XZqRO)lADdFvN!u4Aoz@^z%PK8r4!4_rxqe zu=DD}mOGR$;qJfm4z|{wzLPU7e~D5FH|Xrt(A8R7% zHn}6A{i$%y%*XiLC&g7JebNqAR6taANTH6BGv z%Ai5lm1P@Y`T^{@*A=*KgB%#4r`P4{#6`V|Qhaa#Q~(6%2=>=93(>+xL^Cq6(-x<( zFrcdfh-5}ZB_#4fE_)H0fTu-c{xbGH#{mVwv@eK|b1!1Vd$O=edF!|?CDFtpMX+JQ zB{z2nIG1mI0#|9>y+H14rDY2E48H~iy~cTRm^}$bYikU^By}+Xc(J>zM@CFggskeT zGaV_EXlqg$|4=)S$(NAYo(6T5uY*)$DD23M9Hz3pZ;u#WJ#2Bk${rr%#B>NM@)2N+ zS2{G$l-UM)1W+*jVvn|QA+>k!&f%5bF|1Eb2#4uZyL%!cvy-c#UpcY%zxVkPjx-)xZRl z(G-N4gB5aqYouDT)K?W~lB#2u7+fLB&7nr#rSrk1QSsHk=y36;b?O31C$(Q(A-SkG z?C?mg#Ss0CCXMS6nkIax|LS%9i}lqpinyMKFJ{OBOTx*)_8X6`^v}yPB-Wp^$!nI+ z+_===r)yK@QFV>nL54V^bc5;)i8sABuZc0;LfuEapajIXX^^urJB$cv-)w*XJm#v8 zqorUp9ixt|w*BCn=qMI0SY!2DQ1dTLWfj*sKIBJ1aAqCwM?6@bSH{VbncajNF}fb_ z>hcL%9JUg74h@->@Jk_y+q2h@kn?hG3CkPzJ$vn_(@84t0xr`|<>Wo=<7~;<3z=!< z#V1OA0#a6|H`tM}qe)NW0x7YRd6tSh*lLaHx~EjGRMzT=@-$momM?l76lk>N1)57T zO>LOAe(-=P)E&s@kNT}gdnW0LPqOQ>Tq&B&<;^0#OO5JY0Lvf4^MlPM=vO70jI;>vXv7v=*prE;o&W@Z6v*O#H#~wYa-uql^|ZWAODQIyBUSb=C@VN z7+{N-mgm@u&1_N}3v)Zj%|mm&$)o8@3{8*8Q0axmJ=lCLuERw>`ot6?sm`*n;^D2O z9X`B^%o=JvCPsB=l>ye!9bNX!()vp@dnh*7N8&bwR!)W{;XKJ9YXP}?T!?{H;x z_7IDSEH<Y-f(mm$N`L7w)jsb1S_5u@NyA;Maax17Enu5yIL> zowCgLL9xLs9vI=)>YB)1;mS?UJ{EX>EawC`_srNvF6z`Teb`4bFLQs}%}- zPd{6r&|s6}cd}IVPa8l@*HMYYakC3810&E}Vp>8Y=eaC5pz7t%6T6WrCD2-pw48K>@s45vqxY`{ za}z&#{+(K8cYTLDF-r@-J|1TlPWL|OWwpeN@;63Jv|do71uNNo{S7blB}MO zt(A8)SMp<(THWy#d%>%Hq!#4p9Q5Xc@AQ(={Na6C&Q>u#9VZBq^#0GKlU{EK_D6t2 zC}l1xWy+_uxk`Nw64NkCj3KR08fJRG31Sr;&|(P%u_>t}%=2VQxP5yBN+vt3pMl;2 z_W*+>@@s42%2|eZeXUAWWG}xtYYq-JO-?kFw9@j{-A5l7_uZYGd%SL>PWmZ)s)Epi zX=2@e9w&|+za8DvTNWcWs^7XQsBMM3Ag5Q;^L?DB!V!5fAa)n8T?#6|)ULMR6TjL5wBWppT1qnK-WG#gF%pgy>37d^jwf{R7E~P|}_Q#)BzW zw^Hpi6rz7sP3qyqm6_vw(0}OcNi_&D(A3}643TDp0|RZHT8Kh5f^L;0?kZW$lV8wq zCvmn#uvsD2?%2VS^v$jPo^EOL5URZ+j9$PpCZ+2>{6gQJpj++&cXm6dCT2V(iIreN z2FP3%R!!5volkWY|BmBsd|ft@Y~uL7Z(~bey)4?-rc#=4e1@>D9*!14muk6?_iuSB ztiR-`SXF!M{*tFsd*I4pq`|?O`R1ngA;)3lm49PSk0{dT$RZuS4f%c?UGb)z$< zifZ3GQDX2|PhNGUY=Z{}F$QV5hf|ESKtG^Q_=my7CT;8F;n8mFnMZT+iI zTb0jet%Xh) z=7jRYW)(nMJpcrm} z@+V67`kZ@OaTPcz4n0qAxP*aDXk!!?NBn=(OHj8`nLU1GxZre<0m&($pZw(3aF|jV*#YUxc3V z5yUZQtyAfoLT736Ph^v)f;vYgSi5+tPfe=QN!zeDXtR2}XvLR?IHwVFOr~Sti#T+I<+pV+3gxmN+L%`-zEAo2K*F6Q%#%ZJA(gPpNfwNZK-OsLF;RB zD-RK@kd^3O(9-#9Zj0}qjGX_s_|Cr!(M=&{L4Hk~v$863xEVa|Nv}SzF1FBFjoX$K2BONDuuH3E-sb0qy}Ca{8g*%bn)Jv}?ef zJK*w&Kma6tvI9A2JWZFRbbulcVUyfN5X(uJ-T%2ZnWzJqeoPBKktChd69r3>iMB`2 zbtjfyE+m=ff1;wGulG2}$0D&t?S*s5c?5-p|fU3Fgt<6afnmN;$hl z6Jt7pvtY?!T~i6+2D;Hb2SUT#fHTR%L#HK@qNy6F5`ZDbP?vVga>4C>Ci+XmZ-8YX zRHES+eZQH;5!r{N9HwG5aOrBrV+>q?NjwB+ttEU$*w6^K;>r))h5>~Rl9yda63qes zMgS)4nWZ(*2cz#+Xt@&@&DiZ%mB)}Zla;!7ok6Dv@?5fD{71vAQF_f7)GH{FfHfa| zjgQl3(8r^WWh>n62Oyap(+oi)PBMFDXf%u~EX?FQ>S|6y1(=XPmueQ`0(a0EYXd1E znX3Gu`Yd-a`nMu3SCxaCuiu#m%eTq5U0+Ucu%<{`@Qytxb4ej_qn^k5E#RY_H0MJQ zB16y@wL8dZ*=EXbPfTPB3?GJ1Iu`A`GuF%kTahHNJ3tb=PrcwP^j|4Zvg8TRJ|D#4Zt268*_ZdYXE- zP+EGr^S6OCgycLwxkCp9%8e2DOyY1ue2FQB%4Df+DB5?MttXdO_JE9)WSeFPN1d5k zAt14|t)%5V`3lf@OazK%y87YIA*U(Eo{Bp>#?;~+=Q8>D-a!B@=0CvJNZB(ff|A&6 z#le!~(x{Z#Qz7F{9CCoWLAd1uC_N(+ze!dlqF%yayP{ha1hgtC?zY8q_O7-$EIG?A z?yP5=5n|=_KW>_wIbZc9!(|mR8I@;hvP5I;qH6QEC$nt0+6|e1-z=5xx`Oug$yNr5 z_u$UJ9wuY^!8V2`{)&^=vN1n^M61vED~qCg2wb=-f(W)1?P9%~K2{!${w#|;U(s!I z)t`2t)mWULop0)M7ET17L4LQN<-6K^yunfcSJ3Wcrs-T-CTVwV*nk8w%0}^Hm?OJ@ z<$xyq$J7Yd*9NhdbYkRhJ(Dk?_FbJP`7QEQnDskoX-jx!L+QgmV=kQXfz_}}m0PfF zc;`sIN?qu8LlwKQ4~-<}h?o)wZ+qIPb2%WlIkl_~TXZd427%1_ZN7kHv^X}Xmd+qW zrN98()k&HP`hS7a7VD9Z82r9pG;|JBZ+ieC22ccvHJEWyjZ-bTsNTPF&j~4L3Ut-_ zw?l?ADbB}cf>d(}tx&UiwHTTaqqf8X;h-;4x}`7eoP_iqZj=?MncHpo-|Krh;7@M0 z%=TPWj&J2RU9C-6-;U&_sEx!B@QB+NU7GCmEsL;Qf}x#EaayE^W*`5K(uf})bk3Xl zb%vyHx*yx$9Kb%Z771JMK!k$Nkrdl2Ny(KwN3!RryYx#Qf|D;FUw9phsO3Yk#od3} z0KSc`-1BMU-mtql^#wy^>wu;<1VR#Z3QhV9xV6bHK~GsurxhK2&1O+ZmXHVnJ=J>!7yB)HwWWu&@(e-##Uh`iu9+pN>+<<50vD@J`vM zjgNraoC|43i7GUT>vBv@Dcsa^C_&;?olc+b_Zn7916ZrsgBfowY4l$m0fLfuc5r|B-P3=d2hm{q!P5i zj?fKs!}$E7Jab+-h070QmWR}i?I+3xvH-lx`$3zYiW*I9kAWx|Gw`QWY3v}UdZOe@n!_HoFkj$D{L>3HgeNOAe8dRc(03Dx#J9kJtYfoP%8ehm zhd%G|Zh^^a!|UfpCMb}I^6Le6-36Uw?h zXiu6n1$G=p(xeOWfk*jp_gLLu16`+IYjQ+>7v)tKbQFv6$$x}p(2JBfvnrnW;lR;0 zN2>KcCgb+yEV1lASuy1sFL+(`i*N(PdkKeX`fSLXsP^w8&Y-X*soHoBX90`v%noS& z)#}fmc7_Js>aU!?i1R_r3uX~uOLOf}WM(Z1Bgq{$4tm4PY!6T=O95~shC?a6#h~NV za#9yoTzSuLey#dm05H<|v7;_S{jRpS!)LcU7uUFp>M}2CHFFSIf~A7u*lN49_CD5c zu6p)%<3SF@95Z_kgDxzt1+6U4d$K<_+CRuzS5oRB>f>yZw*#)fHme!4=TYg|Kbu)J z8u4{Zx}pbZa6V}aY=};h^~)y&^wy|rnK;*lO)J@cgP$BkqJVb+lQtKc9VTPUljD|m z6Bb3c*&BrW|Fv&cj|MhJOLPHIAOCgn|3~{~|9$Z{{kr-uI$Kaxk1PR?Hq*m!La2Z3 zJlW&>?F5USp7(sCrKh}#cigc zLS@%2WYaA`^- zyNS5gxBX@kMvA-sLQ1TK*&OQzmFkK7GB0PxpX3v53CveVcpyf73+rdM2-p*#Mpp)`HT=XDasU#t%`TQ zhqd1=>x<-A+7&Sx!HwsAZ$O0+&S&!L*rfz}i`o9e%Qb>Z>H9!80-iM*4Qo#R0+MuI z5P^!3uVF7g_@4~lhqOar78DgIfV+X4EB7H(YKyGEk}-^9nf$Vhxjw}1 zWA83&&!iUVY>dCn$bb$!rUEvam2(Xun8E$wTP-AhTiYga&V0ZGWICk|;dA%=66Kzl zKB`0kKFv2Q(0c`v(DxmJv&=S06w1to4j6>O_=I{soNEyTT5g`uDTMf!j@%u@R-&gA z3pr1Q+t0%)94#Fy*__aWNd{MM&m~MLsU; ziy|)<^?`%)?|9E2nft}@)6#0=n_RLL!l5m$%Jw89a;vEt926WsM-Yd4R&QD0Tda15 zh}ZSaNcSL@bkl?16QHE%vS1~%a^}|sY7tlb?XEuSU*4`8mmdn+3f=^ZS_d`LT3vJ5 z*m4HQGb)srfo8@c5!^nyBCjn+U^EY_6(Nmw796>0=p?dZW@{nM;SS==;ZpDAfqI%O zap!)JTmy}>k_XzYl?jGVX0pfyvG_Yw>KcH=PvhIz%OvvagrwX$q6N`+IXyd{|J)3T zJjlI#k2`w5B&tgYiET5pL{N~N28<`eh!)jifQkYVyVp0$DO=XN!(kK<`*k2rW8pnX zDfPX3Ud-R@x?SzfhUs)Wz&<{=#ONII>44pT!9-|k+pz=(;V8W~@2-d}o({Uk8O6TR znzXlB`l1c!ZFLr2)0-ymnR9Ur)J^;WgcyfFoyP!Injx(P7Ee&~?}&}Pwh-|*>b6#D?%aV<7cm%vpR;fkkM^~}9m@tpGJ$g!W;JShexFGX za>dLBKu@i=sxeOY;yAfaU`KpNjj+l{ynf4iNRY3&#tT2o2V_*H5*Ixa$M=h71SiX* z__IOxFe%us+2j*c8m4qNtj;7UsnjfQ`O(I@aW-CbRjLO3DLZO<5=v0>vX${ zsfti*d(Oo(aT<88upiZ2O4MKKiVRuMmGhP*awaM8*ymM{08$>F%fpl?+p=9f%q%+e zuFx++OpW0rQug$|x5q?nC@+|7+8wBz`B*i&?@ z=OeW7its;Q_bnxhc`y_MiJle6L~EPaoFUP z_wUct%=sC4i+Tz0jykHX=ruB>3z#+tK~0lSVOyRUO6Db%3Z6GC>$R<)0n);;#N2p` zE!(Y^L@{T<$CjKyED>cZfoEfuTO3B095g7EHc@=m)%z$bnsw*Ns+-yO5P z)dI{*_x-T1T~0$ME*Z1H5ufXjipH3!dbY=R4}@n$s}^_}_v{iT2Nb-jkhD0g+6+DN z8{j+5qLs!@D9i71SzRtdluFg3dq?c7EvQ+XN;y}D2`uyqx{dUm=2X%w{8l((wwn#o<{Cx+xjpQVR`7Rw zV)ZDavVpvYMNRbqZN80co{^>gOt2q`+Cv$!NVI#C&IE0Myiqa9p`I`zMx$w{k%lpw zOnCS{MvS|$z6qjn1YNK|WHRN^qEA?$Ic|4j)D10DMIy`&cxu5ca(G5UVpm=VV>$Yj zpu%QS@K5tGs2uqKsFh?gEE}IA-WsrxxV#+vo8Y+Mar@RWugmZoY{zLH@%Ok)USy(lsF3|(?G zw7m+Y^=-M7nlQJt3INO)2^L`O#ZsnVq3&B)FtpRENHT=?iB?05@&(1}^L`RgEX+ok zJqp_%dQh}iLp?BZwO=k}l;KalHE#X|R_r@4_n?i2S6;0CwrLM+gTZ3Ep|YWPt677P zghLYwlKK5IfDXiUTF>klUitW{;ijp2g!relx3m8#1N%Pn!%(`bicC>I`S6swyJw`q zc`fcvb(8`4P|}Ewj^vu6J%VczSR!{okW_r&6)5Odf%7#MIVm&ZGqc&EFET2NqH7oD zLtg&~s49$Sg!BiYCR2}0<4a|d`dd!ZR#`g_p2mjo;mbUUTj59T2fYcf>72PS8!m7~q;~!9TTz#PwsmoLgj*4dc9JhZ6N~jIKX^tX zV(%Uw+aQ(g#Z!CBpalu?2|5`dcF4IEB)tPb-4?TriB4hf!lKdH$yzX=8+`p>=*@e%$Mqi&RsNZba`#s*V6w; z&3`yx#_;lOT^>Y!V0jV0HL&M|*t$6BrphkW?y{?YG!)(33BgnNs+RG?$6YwudFg7a zHIxUQNlC#wf6>ttD6m2VBI!~sn=d-5@lSeDhK7z_f2&0s8Z!R39Ea(bAi9-ff$g*|9aU5DJ8&ocW$Tp(VN?Pb7&O$2`zbZ(wih_7_78YZ%78-pi=H4 zV1?1p1iNaWcP22Lq8fSG4C=0%o=wYEq~)K5PPY*M$`-ECF`(nx0Wqv}^oY{w7x_(- z2IqG>{%Ce=dx8S%#y8p(MKCE-JO@Ecnyh@MV+KPvTIQd{3=g3gNmWuELR83=6wZ0* zP@LLsKy1JuY+xKff9m$^RTN0+*@CPKN(m7uQ;abAmr4%QBj*hmTnm}DQ9&&??^2oR zljg*V>MmbkJ%wO#tLtlram~I2I78D8_hNk5QNC z*S#?dq3Z~lV#%M{=#;j*=VX5X*aGiS*y4?D$ofA9q`QO8jEshZfk+)rlUQma3i{a8{OZQ3Hdo7CG z$vBF%Xoyd0HAgdJoK~O9NRNKLQuz$lsBOim&gg@qff*>TMgh~Y41F_wy$$v=Bf-J5 z)*IlRN80;wdhv=Z?RHTuQmdImiI*nYj6gw~Z97o%26`$7n~Nd;>?()#lO`(PsWxx* z>rUK4kLQpBJZ|@pQZalYKa;t6W-U9pG+)mYfiXL4cw*AH22+7|2-*E?$7e&YyXV=QaQ@IYNnP%5z$}%R`O)tu(=f+juHU4~i#>OL@7^8M%tgA= z@#cg75{$)T#>LojA;DEi6`e7o_0qu zl+ak-NF<6?cW6wfNs()^IM!tj;w!N^yP~?H#qbTpA1=4DI@H4dxop5hH~H%uxM?@M z`!Q1Vm@jHV_txJ8#oFXz zaKvpG8H;(&a6djV-V zirzaM@?4BDND%7+a+V5cA`EEx3FAK^+HPN9R15{yfI!q*I_&rbM%w@5V8ltqO#Q#( zQA5axTf^6V^He=C*jLy53y(O#OcXw4pX_-}V!7$Ryt#LlVZzSE5M*^hmO$Zz&I*+C zb%HD1sH^QydZGEsFq59>4x71uyV(yNB!xbC)9nt1Lr+`A z0s;sprtv1YT=-nJdp!~1rla21))q!{rzAi8yX25^ugOopFo`c9L+QBQh>YB^SmiKq zrt0@h&F@=F9SA&gu3Au~mOYooq62CPqEdKC8d-t2-9dd~dOm}oMm3%fvYKt3e79fU zTC2zGIg4t{RO26h$6kOA;$RJvUP%s=N@e@wdgRDVIF!p~=c5-dkrSYlivb3Cm0&^% zi#mwlfuv%9@rg9C+*$#Y$SD~@GzBFemyl>X3cN9jqZ+h|o=Oj*xt4Q!p-qa? zD2cqptUpDrq+=I|dL$vfd zzKnKUz4h9{eyWF9uY+#jyy$hb2mmuwBc%&*Fkk(S!L;`UOhVb2X~cu=HF?${(Mh=u zO0`^x3c5Y%_T9U&mou61yE=!N(>|g;)WnP^t75373`;5Zk;HjbR1AQXY1tH8IJiyz= zaNokQQk7!w_Jt1GUwamG?0llEHDuYu%~HPG8*A`HCEEswfUHag0Hu&a0dGON(Gp2KBXg@ayboanXK2dBR`nT`kbNGe!pBTAV%EYJ0 zmi`BHv_$tYlk7G|I>K-3680~662km~C+JCEl)t#rzroZ0JXhlU4>tV=oBo4M|G}pJ zJJ{4lXxI9e1JD90*as0&3?a4E7j^na4nY5UANOC682*>LIAzPZ+`n9CrsQX!nJmP_ zSXpn3B&Kr?-=TRT<}`X@HZycC&&RV zY!Dzy5Lhtc{a0B;S*CE@Tne&HyFf{yF#eES3arsY45|f3=^cKRlXH>sQx4bR@>}Z5 za~G!-pZo^PBkPqFowalIa?g0z1WbD6_?dk5cKW<)=TM?R$LpG|y@=+X%jT0*cs z3fAPviKtr&VI+#NoixxTvE9;=BWnh(%YW*n40Q%2u~mxd9shZt{N03=kB-!7 zpM+!i9I5-L zve(l$8Qfm6tvu!k_k)A&jR=Iu(!X~hSf>`kOFEeiE*BW&nmNwnnHL>Z$NBInT*UzV6m=B;;%M2(#A{YE z(?1r%2*tJ_gbhpa0T#p;I32>)3jt#P?WZ$gHMtU@;iLoC$)UxQ${Q6$xD*4ZIAWQr zsP9XEU%(6ra!^A3g$AcEW55Jn$tnJCMSI^|=fa1!KQ3FM>IfI4p%81xD3wWAx*0Nc zjPXi9g=17BbXTje`vx~>S#SBamt-~vAr{Y};TX5fgAmt!P?H-%pN{f(-s(O-(HQ-} z%CfdOXGjDg#m^fE1;E>l)%gmyl`^|_tB{csLtTsxvkwW){y<@tf4_&+^;&-3)hzio z^$o$-YJ~5!jJw|-M*%T{cDpSVSj;8^-5rgZdZ97Vs|a-O3gAYp=-u1{HZGNPQ`Ot+ zRw;NkYPhAyxR1h<+$hpLV$Zhs+Dfb@y;%wuiy4GxRo-a5!vZ)c8m9CXsG<=7XJf57 z-@In5W_K5|;Cr`Rt3bTpCdZUFwDo<;V67FU8EzG3G%dgmbd&~eH$z61qcB&>BG)pt zal0F^TkA8@hEe4+DTZ?H=%?e3al^wx0s_yOs)e@YGjqvTnH6Ok=S8WG4@(2UfR4f| z+2~qfgQ&LaJQ?}e*F}rOwc&+l*F6t2HtX~pd9nhBfu0ifXP4C=xd+#g@^KK06g1JY zT%1tF<+D-aK}NOPZz_H8U2`LR2S*7DVT{3rPb@zHj&Pfh1qC4i6g_HXxcMIa{+5;- zo7}809jP0X+0R@;!G&YJyj_Exx0F7JkZN<@t;P`(?Ib*|fPC|9h6$C-lIAfNGHR?! zBze$6NZ|$|I;`&&OZcqB!z$MpJdA_gUHfq-vjpl*T^5|oIxf|HY}3T4RnO6(hrO7A zo_!M*9;(iky~H8>4P1Yd9#*%yN!|aKDZjX%t)<)k3QCXJu+MYfs~vBWo+f z?O2B;rrP&oT85yidPY11PYg}3N)7jc=^LR<v9m)ezSTf^f&fbU06*HA0V zNY0Hfowb5XFhW($CW}sGj-0iQ06c;fgvq?CIs8Jo_rsC4S}wWCR>J}fsQQ7t$P*=T zX+am{1JaZjHfy*=kYx!Ip{!7qN#MA#nqm@Msi^lMp$sfNk*BO?W^|M+sOnHvsCc-r z-)(c~sl!<^4?*Wg`KuV)QJW8|-l2ux{|N-zzkF ztqkb4*6<{M7)y_r>=POQZVrdD(Ye}ZT}fV*y|0VO6_@a{6}mT9)KO%(c{w_35Ufy- zxx<;x_FE{VcgWsZni8+<(@HQ0OG4luFN){Lt3JK7q6bAG1H4wmLp$%^ZLQ}A4q3~QjO%y>=KUaUIK&A@2 zE%Kg7X}%e$wNpm(830%gCSPlRnPaG20o!o2?MpjlIMasCrejPk%A!BZwpQAmG8PCUtfmg^5XYTsZWXcONHDE z)HqM?OmZ2Xkn+U1q3)hiLoIJ8S=mbwB*gbi*@UjS7Y!Y!%18P6cS%c_UuvSikJ9sX z+o`}Raf|0#&b^nmPENe0uO=Vmt@QNnBn0iA_{>vnJp}4;G@Nq4>srdtsQdJ;`}$v| zE0<5NFk3`C{X^Oz$0cd7J>}P9APUbRt7=F#OLHw|H5_5qv0IKi`xo;l#WRVO9;FP* zT&9PuhiH<^#kJDs8UYMv=S*S)D==wHsS;%nDR`s6YAD*aHjyPfanz+|)X9pkaCJieUMQx1z?b&|Z7G8U5fA0UlZB6VbdE8I9?hK!;O_VIT$kpug zjg<5R{9%RR5j)?CHHczbXZB^LN4KgNelelxXLs43kQcY0Z>u(|m)`fb!75RHow{Fl z4=-EW8zj|XiC!JOAS4~tx{klDAymy`-ma<>N0c=FP+8s9jT9dF(^S5z`7#Dw!bfO< z0UpsBhjNtjwca0K>aD;HBOcLqmpc(+gVz|P{-4vNYiBNcH~mlsCKUH#GAo&JZ>6&9aAuR zY~=b(&hx;4i*yE|;V!kzl^S-_<%>vep@Ibw6Y1SadVUc}&Htx8xc>{0{Fe^g8FpY< z#a9PzT4L>sOR9-}O|B@3_cU6Tqa^^)ayYKLoS}>+L|mccp|G6k>^boes1r)AxkQwW zCI0-7p%_zlPof@gCxF5A?sD1t(XN+AJM33}dq`wh(fa-=c|9YH_!MeT zWbgr{`wR;K`Tc8+<6YuZT}6_>^`cx|szRA&QIp86;@6;NMVE{druky3ySa9%#bRcB zqMP=t_PynZm3lsck|OxN`5L?PKDElB`Nmp%D?q(SXVKUJ$ixfI1e39SLe|8L`;WYd zD_j3V1v=$b_TWNUG-?g5dB>}5eYaDRngY`dKL#!FgzxyQ_&UG$Bo*>FKFxYEr$t1Q-K`VJ z9ig>URzThgW!;BH$@F(!6}^?Jlcc{Hw;;zD#xjIQ&;j$)FLoZ^&0Le#f9CvhHg7K~ z$woSr1A{g|+bY<9JmwW&RWRqtgdKndKp-*P^yeTROEwftZ#@_TKdq!vak; z#1{OiN4SbS$iWwJhUhMP#>C2E47%Wvd}Fk~Bm*c=it~z;?Z!Ok!pSSwHnra_C;X4J z8ssu-^-l5DRlk}v{7UJZKM=qp%w-FSy_mXXb<{U35Q(L_QAIDi=`E+em>Mik#Xlv*Yh&-0`;eM!`IcY(kfH0Ra5C`@80scCXZ_GAF{kR%^&VGz(JV;Qj-caiY;*t`XO{G5SyNy$3YNb>(?n_L96; z8@X1yC(LVr6)ljFJ!`WWG^!MZzLKLGdQx4Hp^FRyF4Lag>zeAiLFhe=e{^4rpc|k-TuG1 zr6VmcPEaqzm8msuan6hcsKn1fDmAqr3q}VNpclko=sc}XO$OU<*DYh;y72f4+(2rh zU*O}*Pav>$4$0E)_~6;j)#eM2=qW9rpHB1}+ax)Z{)CL@N&H}Y4s0{qdH@KL`_f;ONk%=XOZMEMM;Uqd^%0 zTfQg%B7q@kY1QOVWlDpOJD3PEyfBXL!*KeDTfD*)H?%ei=^5aiT>$yUw&O9`#JI&P zW$8|136HmQ^*_|1*#>N_5^*kTsaV(1goAW#OY}cFAi-T)BG*-Q@ZxlW# zOg$orL2<~`Zxk)VpAm+nrXR6%@&`--jbQ}qa!zH9snJgT@Slh@dR{*f7}>aq584>p zsYA77`+%!n_F@i(Kz5Qa(|-IZ=TU%F^98bNk2c#2%Xn~xWe6ku#mhiH8$}`Zsa3Vo zSrsDoJ9m1DrFb0yZ}|vi=z$V~{sm4Ep1iBD#i)3;<~xdyW+GkrFsEOF(><3m&>u3Q zoNuKV3zSJqzjF;y2=rqeSFgm$p(R`Xi)2pXeO{o#b|itrmknhoKXtEuDXGNvodEp4 z1E+oVo;XwH7>bP3vj#tR2o{|NHw=9D@cL<{aU=asGX`*FFsRkp^TTHp_J{8Mu@p8O zCz|xL74b*ipYPLjV(J;DVJ6GgK|FfoQyc7xaNV)BO=MJ^En{JF^@_yk(UHmxIL>W$ z{W6F2=oUcq^)a3OVa&6l?7J4<(fDVUSK&nC*Xluv0Z*00wdfg>RnEy`1{yn1`GoIR z&;wXw=K-mu<*R3a`;m9mv10KZvFdL3af>%erti9UW&(9HoodkWX=pr|^Kn(RvQ{w> z4yQT7kvVs>v$EV|3A4!Wl9cV=tN5gV)3oI;o%U5U6iwR`d~njG!e^;5hMsejhjQgy zI^rr5MGZ7VK!IZ9$0TmJ8ljoe;+hSUVXm#Gf@mZv;jcJW_uRs+Dh8wV+H%*?(=m>R zlT3UQv}fG~Bdp*MT52av0s+-i`1T=EY}3vm&teP_YM-%!rS-`aYQMn&H!C2sK(0vf zI1L_pV4qhEH)gLb*%Rf$wKmWyq^4ES*-cMmW@_%!JyHgMfAEBX1o2!H4Y_IYP62ZITq zeA?;QywEsZ;KupvTm296*D%_St0tu-sGnI&`)*)(^f??B{0wW+F8#p$t-KW<2Xxub zt|gCeBGn2tU!XpUcu*!7#Y26L{O%C#s4xYc=qWOSNFMC@DbEn>P^+VSacWUhlN z%_W=mB1Nj$6hrhB&Gojv1gB#TE0bvPFpak&oHEVbiRVrgVM`8(=UL$pCy`Uty=mu5^`EK#YES+ftNfpCPyY9VlmAf4 z|4_>RP|E*M%KuQx|4_>RP|E*M%KuQx|36Yn&Esxh(=SR11P+7*^z{k^0+RkGZOX4{ zn=cz~SHr(&ml@p||Ni_MRc`CPnZP|<0gj{AkiG%VV{eMp-`j1lm`yqX0Fi8FN_rcV{?r5q0dj{@*l;N*~ z`HJhbx$t-8By1+^f0@zz%gOjhevbcTKx1fXWo~NY=m7Zg2>MFWqfGvH@p#%+9=iXY zMED;S|F2)qSNs#5_P^uPR*I;p{d;_#e-i({{D8g!Gn8~q{&ijn9z)Cj+WGvKZ%6Q- z1pb%NjIZdynwft`UxHz)lK=PUtp6bTS425z^{Kz=T|1@;K>&qF00sJbfBmmOloiWY zDU4!(fzA^_!-3`HI#7dvzcN-;0fChr0TW%8b`5?#B{2O5SsDM=GtT(mZbbC|=g;7; ziCuuBm%YhXm6}gbT#2Jgz(0XnK_h^HIu1ZWfQxHi(fq$EdKd!&+j;;YYPH5a`Krk1 q|GkR-k8)N6ZqI_htdbIR$U&`v)@Ojt|37hN!uQZn1_cKL`riQYziV{> literal 0 HcmV?d00001 -- Gitee From ad2be063466fa76917055f1a996a36bcbc5db44a Mon Sep 17 00:00:00 2001 From: panqian099 <11823801+panqian099@user.noreply.gitee.com> Date: Sat, 22 Oct 2022 14:33:46 +0000 Subject: [PATCH 3/3] =?UTF-8?q?=E6=BD=98=E5=80=A9-=E5=9F=BA=E4=BA=8EWR-EL?= =?UTF-8?q?=E6=A8=A1=E5=9E=8B=E7=9A=84=E5=B0=8F=E9=BA=A6=E9=94=88=E7=97=85?= =?UTF-8?q?=E8=AF=86=E5=88=AB=E7=A0=94=E7=A9=B6?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: panqian099 <11823801+panqian099@user.noreply.gitee.com> --- .../.idea/WheatRustClassification-master.iml" | 11 + .../.idea/misc.xml" | 7 + .../.idea/modules.xml" | 8 + .../.idea/workspace.xml" | 156 +++++ .../Untitled-checkpoint(1).ipynb" | 331 ++++++++++ .../Untitled-checkpoint.ipynb" | 620 ++++++++++++++++++ .../.pytest_cache/CACHEDIR.TAG" | 4 + .../.pytest_cache/README.md" | 8 + .../README.md" | 127 ++++ .../Untitled.ipynb" | 620 ++++++++++++++++++ .../__pycache__/dataset.cpython-36.pyc" | Bin 0 -> 1827 bytes .../__pycache__/dataset.cpython-37(1).pyc" | Bin 0 -> 1842 bytes .../__pycache__/dataset.cpython-37.pyc" | Bin 0 -> 1842 bytes .../dataset.py" | 43 ++ .../ensemble_selection.py" | 169 +++++ .../generate_library_of_models.py" | 191 ++++++ .../prepare_dataset.py" | 38 ++ .../requirements.txt" | 8 + .../utils.py" | 163 +++++ .../utils.pyc" | Bin 0 -> 5586 bytes 20 files changed, 2504 insertions(+) create mode 100644 "code/2022_autumn/\346\275\230\345\200\251-\346\275\230\345\200\251-\345\237\272\344\272\216WR-EL\346\250\241\345\236\213\347\232\204\345\260\217\351\272\246\351\224\210\347\227\205\350\257\206\345\210\253\347\240\224\347\251\266/.idea/WheatRustClassification-master.iml" create mode 100644 "code/2022_autumn/\346\275\230\345\200\251-\346\275\230\345\200\251-\345\237\272\344\272\216WR-EL\346\250\241\345\236\213\347\232\204\345\260\217\351\272\246\351\224\210\347\227\205\350\257\206\345\210\253\347\240\224\347\251\266/.idea/misc.xml" create mode 100644 "code/2022_autumn/\346\275\230\345\200\251-\346\275\230\345\200\251-\345\237\272\344\272\216WR-EL\346\250\241\345\236\213\347\232\204\345\260\217\351\272\246\351\224\210\347\227\205\350\257\206\345\210\253\347\240\224\347\251\266/.idea/modules.xml" create mode 100644 "code/2022_autumn/\346\275\230\345\200\251-\346\275\230\345\200\251-\345\237\272\344\272\216WR-EL\346\250\241\345\236\213\347\232\204\345\260\217\351\272\246\351\224\210\347\227\205\350\257\206\345\210\253\347\240\224\347\251\266/.idea/workspace.xml" create mode 100644 "code/2022_autumn/\346\275\230\345\200\251-\346\275\230\345\200\251-\345\237\272\344\272\216WR-EL\346\250\241\345\236\213\347\232\204\345\260\217\351\272\246\351\224\210\347\227\205\350\257\206\345\210\253\347\240\224\347\251\266/.ipynb_checkpoints/Untitled-checkpoint(1).ipynb" create mode 100644 "code/2022_autumn/\346\275\230\345\200\251-\346\275\230\345\200\251-\345\237\272\344\272\216WR-EL\346\250\241\345\236\213\347\232\204\345\260\217\351\272\246\351\224\210\347\227\205\350\257\206\345\210\253\347\240\224\347\251\266/.ipynb_checkpoints/Untitled-checkpoint.ipynb" create mode 100644 "code/2022_autumn/\346\275\230\345\200\251-\346\275\230\345\200\251-\345\237\272\344\272\216WR-EL\346\250\241\345\236\213\347\232\204\345\260\217\351\272\246\351\224\210\347\227\205\350\257\206\345\210\253\347\240\224\347\251\266/.pytest_cache/CACHEDIR.TAG" create mode 100644 "code/2022_autumn/\346\275\230\345\200\251-\346\275\230\345\200\251-\345\237\272\344\272\216WR-EL\346\250\241\345\236\213\347\232\204\345\260\217\351\272\246\351\224\210\347\227\205\350\257\206\345\210\253\347\240\224\347\251\266/.pytest_cache/README.md" create mode 100644 "code/2022_autumn/\346\275\230\345\200\251-\346\275\230\345\200\251-\345\237\272\344\272\216WR-EL\346\250\241\345\236\213\347\232\204\345\260\217\351\272\246\351\224\210\347\227\205\350\257\206\345\210\253\347\240\224\347\251\266/README.md" create mode 100644 "code/2022_autumn/\346\275\230\345\200\251-\346\275\230\345\200\251-\345\237\272\344\272\216WR-EL\346\250\241\345\236\213\347\232\204\345\260\217\351\272\246\351\224\210\347\227\205\350\257\206\345\210\253\347\240\224\347\251\266/Untitled.ipynb" create mode 100644 "code/2022_autumn/\346\275\230\345\200\251-\346\275\230\345\200\251-\345\237\272\344\272\216WR-EL\346\250\241\345\236\213\347\232\204\345\260\217\351\272\246\351\224\210\347\227\205\350\257\206\345\210\253\347\240\224\347\251\266/__pycache__/dataset.cpython-36.pyc" create mode 100644 "code/2022_autumn/\346\275\230\345\200\251-\346\275\230\345\200\251-\345\237\272\344\272\216WR-EL\346\250\241\345\236\213\347\232\204\345\260\217\351\272\246\351\224\210\347\227\205\350\257\206\345\210\253\347\240\224\347\251\266/__pycache__/dataset.cpython-37(1).pyc" create mode 100644 "code/2022_autumn/\346\275\230\345\200\251-\346\275\230\345\200\251-\345\237\272\344\272\216WR-EL\346\250\241\345\236\213\347\232\204\345\260\217\351\272\246\351\224\210\347\227\205\350\257\206\345\210\253\347\240\224\347\251\266/__pycache__/dataset.cpython-37.pyc" create mode 100644 "code/2022_autumn/\346\275\230\345\200\251-\346\275\230\345\200\251-\345\237\272\344\272\216WR-EL\346\250\241\345\236\213\347\232\204\345\260\217\351\272\246\351\224\210\347\227\205\350\257\206\345\210\253\347\240\224\347\251\266/dataset.py" create mode 100644 "code/2022_autumn/\346\275\230\345\200\251-\346\275\230\345\200\251-\345\237\272\344\272\216WR-EL\346\250\241\345\236\213\347\232\204\345\260\217\351\272\246\351\224\210\347\227\205\350\257\206\345\210\253\347\240\224\347\251\266/ensemble_selection.py" create mode 100644 "code/2022_autumn/\346\275\230\345\200\251-\346\275\230\345\200\251-\345\237\272\344\272\216WR-EL\346\250\241\345\236\213\347\232\204\345\260\217\351\272\246\351\224\210\347\227\205\350\257\206\345\210\253\347\240\224\347\251\266/generate_library_of_models.py" create mode 100644 "code/2022_autumn/\346\275\230\345\200\251-\346\275\230\345\200\251-\345\237\272\344\272\216WR-EL\346\250\241\345\236\213\347\232\204\345\260\217\351\272\246\351\224\210\347\227\205\350\257\206\345\210\253\347\240\224\347\251\266/prepare_dataset.py" create mode 100644 "code/2022_autumn/\346\275\230\345\200\251-\346\275\230\345\200\251-\345\237\272\344\272\216WR-EL\346\250\241\345\236\213\347\232\204\345\260\217\351\272\246\351\224\210\347\227\205\350\257\206\345\210\253\347\240\224\347\251\266/requirements.txt" create mode 100644 "code/2022_autumn/\346\275\230\345\200\251-\346\275\230\345\200\251-\345\237\272\344\272\216WR-EL\346\250\241\345\236\213\347\232\204\345\260\217\351\272\246\351\224\210\347\227\205\350\257\206\345\210\253\347\240\224\347\251\266/utils.py" create mode 100644 "code/2022_autumn/\346\275\230\345\200\251-\346\275\230\345\200\251-\345\237\272\344\272\216WR-EL\346\250\241\345\236\213\347\232\204\345\260\217\351\272\246\351\224\210\347\227\205\350\257\206\345\210\253\347\240\224\347\251\266/utils.pyc" diff --git "a/code/2022_autumn/\346\275\230\345\200\251-\346\275\230\345\200\251-\345\237\272\344\272\216WR-EL\346\250\241\345\236\213\347\232\204\345\260\217\351\272\246\351\224\210\347\227\205\350\257\206\345\210\253\347\240\224\347\251\266/.idea/WheatRustClassification-master.iml" "b/code/2022_autumn/\346\275\230\345\200\251-\346\275\230\345\200\251-\345\237\272\344\272\216WR-EL\346\250\241\345\236\213\347\232\204\345\260\217\351\272\246\351\224\210\347\227\205\350\257\206\345\210\253\347\240\224\347\251\266/.idea/WheatRustClassification-master.iml" new file mode 100644 index 0000000..2490720 --- /dev/null +++ "b/code/2022_autumn/\346\275\230\345\200\251-\346\275\230\345\200\251-\345\237\272\344\272\216WR-EL\346\250\241\345\236\213\347\232\204\345\260\217\351\272\246\351\224\210\347\227\205\350\257\206\345\210\253\347\240\224\347\251\266/.idea/WheatRustClassification-master.iml" @@ -0,0 +1,11 @@ + + + + + + + + + + \ No newline at end of file diff --git "a/code/2022_autumn/\346\275\230\345\200\251-\346\275\230\345\200\251-\345\237\272\344\272\216WR-EL\346\250\241\345\236\213\347\232\204\345\260\217\351\272\246\351\224\210\347\227\205\350\257\206\345\210\253\347\240\224\347\251\266/.idea/misc.xml" "b/code/2022_autumn/\346\275\230\345\200\251-\346\275\230\345\200\251-\345\237\272\344\272\216WR-EL\346\250\241\345\236\213\347\232\204\345\260\217\351\272\246\351\224\210\347\227\205\350\257\206\345\210\253\347\240\224\347\251\266/.idea/misc.xml" new file mode 100644 index 0000000..404edc0 --- /dev/null +++ "b/code/2022_autumn/\346\275\230\345\200\251-\346\275\230\345\200\251-\345\237\272\344\272\216WR-EL\346\250\241\345\236\213\347\232\204\345\260\217\351\272\246\351\224\210\347\227\205\350\257\206\345\210\253\347\240\224\347\251\266/.idea/misc.xml" @@ -0,0 +1,7 @@ + + + + + + \ No newline at end of file diff --git "a/code/2022_autumn/\346\275\230\345\200\251-\346\275\230\345\200\251-\345\237\272\344\272\216WR-EL\346\250\241\345\236\213\347\232\204\345\260\217\351\272\246\351\224\210\347\227\205\350\257\206\345\210\253\347\240\224\347\251\266/.idea/modules.xml" "b/code/2022_autumn/\346\275\230\345\200\251-\346\275\230\345\200\251-\345\237\272\344\272\216WR-EL\346\250\241\345\236\213\347\232\204\345\260\217\351\272\246\351\224\210\347\227\205\350\257\206\345\210\253\347\240\224\347\251\266/.idea/modules.xml" new file mode 100644 index 0000000..d2ee608 --- /dev/null +++ "b/code/2022_autumn/\346\275\230\345\200\251-\346\275\230\345\200\251-\345\237\272\344\272\216WR-EL\346\250\241\345\236\213\347\232\204\345\260\217\351\272\246\351\224\210\347\227\205\350\257\206\345\210\253\347\240\224\347\251\266/.idea/modules.xml" @@ -0,0 +1,8 @@ + + + + + + + + \ No newline at end of file diff --git "a/code/2022_autumn/\346\275\230\345\200\251-\346\275\230\345\200\251-\345\237\272\344\272\216WR-EL\346\250\241\345\236\213\347\232\204\345\260\217\351\272\246\351\224\210\347\227\205\350\257\206\345\210\253\347\240\224\347\251\266/.idea/workspace.xml" "b/code/2022_autumn/\346\275\230\345\200\251-\346\275\230\345\200\251-\345\237\272\344\272\216WR-EL\346\250\241\345\236\213\347\232\204\345\260\217\351\272\246\351\224\210\347\227\205\350\257\206\345\210\253\347\240\224\347\251\266/.idea/workspace.xml" new file mode 100644 index 0000000..cf80099 --- /dev/null +++ "b/code/2022_autumn/\346\275\230\345\200\251-\346\275\230\345\200\251-\345\237\272\344\272\216WR-EL\346\250\241\345\236\213\347\232\204\345\260\217\351\272\246\351\224\210\347\227\205\350\257\206\345\210\253\347\240\224\347\251\266/.idea/workspace.xml" @@ -0,0 +1,156 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 1606914352714 + + + + \ No newline at end of file diff --git "a/code/2022_autumn/\346\275\230\345\200\251-\346\275\230\345\200\251-\345\237\272\344\272\216WR-EL\346\250\241\345\236\213\347\232\204\345\260\217\351\272\246\351\224\210\347\227\205\350\257\206\345\210\253\347\240\224\347\251\266/.ipynb_checkpoints/Untitled-checkpoint(1).ipynb" "b/code/2022_autumn/\346\275\230\345\200\251-\346\275\230\345\200\251-\345\237\272\344\272\216WR-EL\346\250\241\345\236\213\347\232\204\345\260\217\351\272\246\351\224\210\347\227\205\350\257\206\345\210\253\347\240\224\347\251\266/.ipynb_checkpoints/Untitled-checkpoint(1).ipynb" new file mode 100644 index 0000000..7b2cb97 --- /dev/null +++ "b/code/2022_autumn/\346\275\230\345\200\251-\346\275\230\345\200\251-\345\237\272\344\272\216WR-EL\346\250\241\345\236\213\347\232\204\345\260\217\351\272\246\351\224\210\347\227\205\350\257\206\345\210\253\347\240\224\347\251\266/.ipynb_checkpoints/Untitled-checkpoint(1).ipynb" @@ -0,0 +1,331 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 44, + "metadata": {}, + "outputs": [], + "source": [ + "# %load dataset.py\n", + "from torch.utils.data import Dataset\n", + "from torchvision import transforms, utils\n", + "import numpy as np\n", + "from scipy import ndimage\n", + "import torch\n", + "from PIL import Image #图像处理库\n", + "\n", + "class ICLRDataset(Dataset):\n", + " def __init__(self, imgs, gts, split_type, index, transform, img_mix_enable = True):\n", + " if index is None:\n", + " self.imgs = imgs\n", + " self.gts = gts\n", + " else:\n", + " self.imgs = [imgs[i] for i in index] #图片集\n", + " self.gts = [gts[i] for i in index] \n", + " \n", + " self.split_type = split_type\n", + " self.transform = transform\n", + " self.img_mix_enable = img_mix_enable\n", + " \n", + " def __len__(self):\n", + " return len(self.imgs)\n", + " \n", + " def augment(self, img, y): \n", + " p = np.random.random(1) #生成0-1之间的一个1维数组\n", + " if p[0] > 0.5: #取出数组里的那个数跟0.5作比较\n", + " while True:\n", + " rnd_idx = np.random.randint(0, len(self.imgs)) #前闭后开,其实就是所有图片索引\n", + " if self.gts[rnd_idx] != y: #如果图片标签不是y就跳出---检查是不是有分错类的图片\n", + " break\n", + " rnd_crop = self.transform(Image.fromarray(self.imgs[rnd_idx])) #用于变换的图片集\n", + " d = 0.8\n", + " img = img * d + rnd_crop * (1 - d) #对图像进行混合和随机裁剪\n", + " return img\n", + "\n", + " def __getitem__(self, idx):\n", + " img = self.imgs[idx]\n", + " y = self.gts[idx]\n", + " img = Image.fromarray(img)\n", + " img = self.transform(img)\n", + " if (self.split_type == 'train') & self.img_mix_enable:\n", + " img = self.augment(img, y) \n", + " return img, y #增强训练集数据,返回增强后的图片和对应标签\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": 54, + "metadata": {}, + "outputs": [], + "source": [ + "# %load utils.py\n", + "#用于训练、测试和读取数据集图像的使用函数\n", + "import torch\n", + "import torch.nn as nn\n", + "import torch.optim as optim\n", + "from torch.optim import lr_scheduler\n", + "import numpy as np\n", + "import torchvision\n", + "from torchvision import datasets, models, transforms\n", + "#import matplotlib.pyplot as plt\n", + "import time\n", + "import os\n", + "import copy\n", + "import torch.nn.functional as F\n", + "from PIL import Image, ExifTags\n", + "\n", + "def train_model_snapshot(model, criterion, lr, dataloaders, dataset_sizes, device, num_cycles, num_epochs_per_cycle):\n", + " since = time.time() #记录训练时间\n", + "\n", + " best_model_wts = copy.deepcopy(model.state_dict()) #从预训练的模型中复制权重并初始化模型\n", + " best_acc = 0.0\n", + " best_loss = 1000000.0\n", + " model_w_arr = []\n", + " prob = torch.zeros((dataset_sizes['val'], 3), dtype = torch.float32).to(device) #预测\n", + " lbl = torch.zeros((dataset_sizes['val'],), dtype = torch.long).to(device) #标签\n", + " for cycle in range(num_cycles):\n", + " optimizer = optim.SGD(model.parameters(), lr=lr, momentum=0.9)#, weight_decay = 0.0005)\n", + " scheduler = lr_scheduler.CosineAnnealingLR(optimizer, num_epochs_per_cycle*len(dataloaders['train'])) \n", + " #余弦周期调整学习率,修改优化器中的学习率,(优化器,调整间隔数,调整系数)\n", + " for epoch in range(num_epochs_per_cycle):\n", + " #print('Cycle {}: Epoch {}/{}'.format(cycle, epoch, num_epochs_per_cycle - 1))\n", + " #print('-' * 10)\n", + "\n", + " # Each epoch has a training and validation phase\n", + " for phase in ['train', 'val']:\n", + " if phase == 'train':\n", + " model.train() # Set model to training mode\n", + " else:\n", + " model.eval() # Set model to evaluate mode\n", + "\n", + " running_loss = 0.0\n", + " running_corrects = 0\n", + " idx = 0\n", + " # Iterate over data.迭代数据\n", + " for inputs, labels in dataloaders[phase]:\n", + " inputs = inputs.to(device)\n", + " labels = labels.to(device)\n", + "\n", + " # zero the parameter gradients\n", + " optimizer.zero_grad()\n", + "\n", + " # forward\n", + " # track history if only in train\n", + " with torch.set_grad_enabled(phase == 'train'):\n", + " outputs = model(inputs)\n", + " _, preds = torch.max(outputs, 1)\n", + " if (epoch == num_epochs_per_cycle-1) and (phase == 'val'):\n", + " prob[idx:idx+inputs.shape[0]] += F.softmax(outputs, dim = 1)\n", + " lbl[idx:idx+inputs.shape[0]] = labels\n", + " idx += inputs.shape[0]\n", + " loss = criterion(outputs, labels)\n", + " # backward + optimize only if in training phase\n", + " if phase == 'train':\n", + " loss.backward()\n", + " optimizer.step()\n", + " scheduler.step()\n", + " #print(optimizer.param_groups[0]['lr'])\n", + " \n", + " # statistics\n", + " running_loss += loss.item() * inputs.size(0)\n", + " running_corrects += torch.sum(preds == labels.data)\n", + "\n", + " epoch_loss = running_loss / dataset_sizes[phase]\n", + " epoch_acc = running_corrects.double() / dataset_sizes[phase]\n", + "\n", + " #print('{} Loss: {:.4f} Acc: {:.4f}'.format(\n", + " # phase, epoch_loss, epoch_acc))\n", + "\n", + " # deep copy the model\n", + " if phase == 'val' and epoch_loss < best_loss:\n", + " best_loss = epoch_loss\n", + " best_model_wts = copy.deepcopy(model.state_dict())\n", + " #print()\n", + " model_w_arr.append(copy.deepcopy(model.state_dict()))\n", + "\n", + " prob /= num_cycles\n", + " ensemble_loss = F.nll_loss(torch.log(prob), lbl) \n", + " ensemble_loss = ensemble_loss.item()\n", + " time_elapsed = time.time() - since\n", + " #print('Training complete in {:.0f}m {:.0f}s'.format(\n", + " # time_elapsed // 60, time_elapsed % 60))\n", + " #print('Ensemble Loss : {:4f}, Best val Loss: {:4f}'.format(ensemble_loss, best_loss))\n", + "\n", + " # load best model weights加载最佳模型权重\n", + " model_arr =[]\n", + " for weights in model_w_arr:\n", + " model.load_state_dict(weights) \n", + " model_arr.append(model) \n", + " return model_arr, ensemble_loss, best_loss, prob\n", + "\n", + "def test(models_arr, loader, device):\n", + " res = np.zeros((610, 3), dtype = np.float32)\n", + " for model in models_arr:\n", + " model.eval()\n", + " res_arr = []\n", + " for inputs, _ in loader:\n", + " inputs = inputs.to(device)\n", + " # forward\n", + " # track history if only in train\n", + " with torch.set_grad_enabled(False):\n", + " outputs = F.softmax(model(inputs), dim = 1) \n", + " res_arr.append(outputs.detach().cpu().numpy())\n", + " res_arr = np.concatenate(res_arr, axis = 0)\n", + " res += res_arr\n", + " return res / len(models_arr)\n", + "\n", + "def read_train_data(p):\n", + " imgs = []\n", + " labels = []\n", + " for i, lbl in enumerate(os.listdir(p)):\n", + " for fname in os.listdir(os.path.join(p, lbl)):\n", + " #read image\n", + " img = Image.open(os.path.join(p, lbl, fname))\n", + " #rotate image to original view旋转图像到原始视图\n", + " try:\n", + " exif=dict((ExifTags.TAGS[k], v) for k, v in img._getexif().items() if k in ExifTags.TAGS)\n", + " if exif['Orientation'] == 3:\n", + " img=img.rotate(180, expand=True)\n", + " elif exif['Orientation'] == 6:\n", + " img=img.rotate(270, expand=True)\n", + " elif exif['Orientation'] == 8:\n", + " img=img.rotate(90, expand=True)\n", + " except:\n", + " pass\n", + " #resize all images to the same size将所有图像调整为相同的大小\n", + " img = np.array(img.convert('RGB').resize((512,512), Image.ANTIALIAS))\n", + " imgs.append(img)\n", + " labels.append(i)\n", + " return imgs, labels\n", + "\n", + "def read_test_data(p):\n", + " imgs = []\n", + " labels = []\n", + " ids = []\n", + " for fname in os.listdir(p):\n", + " #read image\n", + " img = Image.open(os.path.join(p, fname))\n", + " #rotate image to original view\n", + " try:\n", + " if not('DMWVNR' in fname):\n", + " exif=dict((ExifTags.TAGS[k], v) for k, v in img._getexif().items() if k in ExifTags.TAGS)\n", + " if exif['Orientation'] == 3:\n", + " img=img.rotate(180, expand=True)\n", + " elif exif['Orientation'] == 6:\n", + " img=img.rotate(270, expand=True)\n", + " elif exif['Orientation'] == 8:\n", + " img=img.rotate(90, expand=True)\n", + " except:\n", + " pass\n", + " #resize all images to the same size\n", + " img = img.convert('RGB').resize((512,512), Image.ANTIALIAS)\n", + " imgs.append(np.array(img.copy()))\n", + " labels.append(0)\n", + " ids.append(fname.split('.')[0])\n", + " img.close()\n", + " return imgs, labels, ids\n" + ] + }, + { + "cell_type": "code", + "execution_count": 63, + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "ename": "AssertionError", + "evalue": "", + "output_type": "error", + "traceback": [ + "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[1;31mAssertionError\u001b[0m Traceback (most recent call last)", + "\u001b[1;32m\u001b[0m in \u001b[0;36m\u001b[1;34m\u001b[0m\n\u001b[0;32m 10\u001b[0m \u001b[0mparser\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0madd_argument\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m'D:\\datasets\\test'\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mhelp\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;34m'path to test data folder'\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mdefault\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;34m'test_data'\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mtype\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mstr\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 11\u001b[0m \u001b[0mparser\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0madd_argument\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m'D:\\datasets\\savepath'\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mhelp\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;34m'save path for training and test numpy matrices of images'\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mdefault\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;34m'.'\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mtype\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mstr\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 12\u001b[1;33m \u001b[0margs\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mparser\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mparse_args\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m \u001b[1;31m#获取参数,调用上面的属性\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 13\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 14\u001b[0m \u001b[1;31m#read training data\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", + "\u001b[1;32mC:\\ProgramData\\Anaconda3\\lib\\argparse.py\u001b[0m in \u001b[0;36mparse_args\u001b[1;34m(self, args, namespace)\u001b[0m\n\u001b[0;32m 1747\u001b[0m \u001b[1;31m# =====================================\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 1748\u001b[0m \u001b[1;32mdef\u001b[0m \u001b[0mparse_args\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0margs\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;32mNone\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mnamespace\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;32mNone\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m-> 1749\u001b[1;33m \u001b[0margs\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0margv\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mparse_known_args\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0margs\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mnamespace\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 1750\u001b[0m \u001b[1;32mif\u001b[0m \u001b[0margv\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 1751\u001b[0m \u001b[0mmsg\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0m_\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m'unrecognized arguments: %s'\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", + "\u001b[1;32mC:\\ProgramData\\Anaconda3\\lib\\argparse.py\u001b[0m in \u001b[0;36mparse_known_args\u001b[1;34m(self, args, namespace)\u001b[0m\n\u001b[0;32m 1779\u001b[0m \u001b[1;31m# parse the arguments and exit if there are any errors\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 1780\u001b[0m \u001b[1;32mtry\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m-> 1781\u001b[1;33m \u001b[0mnamespace\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0margs\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_parse_known_args\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0margs\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mnamespace\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 1782\u001b[0m \u001b[1;32mif\u001b[0m \u001b[0mhasattr\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mnamespace\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0m_UNRECOGNIZED_ARGS_ATTR\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 1783\u001b[0m \u001b[0margs\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mextend\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mgetattr\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mnamespace\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0m_UNRECOGNIZED_ARGS_ATTR\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", + "\u001b[1;32mC:\\ProgramData\\Anaconda3\\lib\\argparse.py\u001b[0m in \u001b[0;36m_parse_known_args\u001b[1;34m(self, arg_strings, namespace)\u001b[0m\n\u001b[0;32m 2014\u001b[0m \u001b[1;32mif\u001b[0m \u001b[0mrequired_actions\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 2015\u001b[0m self.error(_('the following arguments are required: %s') %\n\u001b[1;32m-> 2016\u001b[1;33m ', '.join(required_actions))\n\u001b[0m\u001b[0;32m 2017\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 2018\u001b[0m \u001b[1;31m# make sure all required groups had one option present\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", + "\u001b[1;32mC:\\ProgramData\\Anaconda3\\lib\\argparse.py\u001b[0m in \u001b[0;36merror\u001b[1;34m(self, message)\u001b[0m\n\u001b[0;32m 2497\u001b[0m \u001b[0mshould\u001b[0m \u001b[0meither\u001b[0m \u001b[0mexit\u001b[0m \u001b[1;32mor\u001b[0m \u001b[1;32mraise\u001b[0m \u001b[0man\u001b[0m \u001b[0mexception\u001b[0m\u001b[1;33m.\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 2498\u001b[0m \"\"\"\n\u001b[1;32m-> 2499\u001b[1;33m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mprint_usage\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0m_sys\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mstderr\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 2500\u001b[0m \u001b[0margs\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;33m{\u001b[0m\u001b[1;34m'prog'\u001b[0m\u001b[1;33m:\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mprog\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;34m'message'\u001b[0m\u001b[1;33m:\u001b[0m \u001b[0mmessage\u001b[0m\u001b[1;33m}\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 2501\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mexit\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;36m2\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0m_\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m'%(prog)s: error: %(message)s\\n'\u001b[0m\u001b[1;33m)\u001b[0m \u001b[1;33m%\u001b[0m \u001b[0margs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", + "\u001b[1;32mC:\\ProgramData\\Anaconda3\\lib\\argparse.py\u001b[0m in \u001b[0;36mprint_usage\u001b[1;34m(self, file)\u001b[0m\n\u001b[0;32m 2467\u001b[0m \u001b[1;32mif\u001b[0m \u001b[0mfile\u001b[0m \u001b[1;32mis\u001b[0m \u001b[1;32mNone\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 2468\u001b[0m \u001b[0mfile\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0m_sys\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mstdout\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m-> 2469\u001b[1;33m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_print_message\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mformat_usage\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mfile\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 2470\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 2471\u001b[0m \u001b[1;32mdef\u001b[0m \u001b[0mprint_help\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mfile\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;32mNone\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", + "\u001b[1;32mC:\\ProgramData\\Anaconda3\\lib\\argparse.py\u001b[0m in \u001b[0;36mformat_usage\u001b[1;34m(self)\u001b[0m\n\u001b[0;32m 2433\u001b[0m formatter.add_usage(self.usage, self._actions,\n\u001b[0;32m 2434\u001b[0m self._mutually_exclusive_groups)\n\u001b[1;32m-> 2435\u001b[1;33m \u001b[1;32mreturn\u001b[0m \u001b[0mformatter\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mformat_help\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 2436\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 2437\u001b[0m \u001b[1;32mdef\u001b[0m \u001b[0mformat_help\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", + "\u001b[1;32mC:\\ProgramData\\Anaconda3\\lib\\argparse.py\u001b[0m in \u001b[0;36mformat_help\u001b[1;34m(self)\u001b[0m\n\u001b[0;32m 282\u001b[0m \u001b[1;31m# =======================\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 283\u001b[0m \u001b[1;32mdef\u001b[0m \u001b[0mformat_help\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 284\u001b[1;33m \u001b[0mhelp\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_root_section\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mformat_help\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 285\u001b[0m \u001b[1;32mif\u001b[0m \u001b[0mhelp\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 286\u001b[0m \u001b[0mhelp\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_long_break_matcher\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0msub\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m'\\n\\n'\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mhelp\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", + "\u001b[1;32mC:\\ProgramData\\Anaconda3\\lib\\argparse.py\u001b[0m in \u001b[0;36mformat_help\u001b[1;34m(self)\u001b[0m\n\u001b[0;32m 213\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mformatter\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_indent\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 214\u001b[0m \u001b[0mjoin\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mformatter\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_join_parts\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 215\u001b[1;33m \u001b[0mitem_help\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mjoin\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mfunc\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0margs\u001b[0m\u001b[1;33m)\u001b[0m \u001b[1;32mfor\u001b[0m \u001b[0mfunc\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0margs\u001b[0m \u001b[1;32min\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mitems\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 216\u001b[0m \u001b[1;32mif\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mparent\u001b[0m \u001b[1;32mis\u001b[0m \u001b[1;32mnot\u001b[0m \u001b[1;32mNone\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 217\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mformatter\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_dedent\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", + "\u001b[1;32mC:\\ProgramData\\Anaconda3\\lib\\argparse.py\u001b[0m in \u001b[0;36m\u001b[1;34m(.0)\u001b[0m\n\u001b[0;32m 213\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mformatter\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_indent\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 214\u001b[0m \u001b[0mjoin\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mformatter\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_join_parts\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 215\u001b[1;33m \u001b[0mitem_help\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mjoin\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mfunc\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0margs\u001b[0m\u001b[1;33m)\u001b[0m \u001b[1;32mfor\u001b[0m \u001b[0mfunc\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0margs\u001b[0m \u001b[1;32min\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mitems\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 216\u001b[0m \u001b[1;32mif\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mparent\u001b[0m \u001b[1;32mis\u001b[0m \u001b[1;32mnot\u001b[0m \u001b[1;32mNone\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 217\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mformatter\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_dedent\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", + "\u001b[1;32mC:\\ProgramData\\Anaconda3\\lib\\argparse.py\u001b[0m in \u001b[0;36m_format_usage\u001b[1;34m(self, usage, actions, groups, prefix)\u001b[0m\n\u001b[0;32m 338\u001b[0m \u001b[0mpos_parts\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0m_re\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mfindall\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mpart_regexp\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mpos_usage\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 339\u001b[0m \u001b[1;32massert\u001b[0m \u001b[1;34m' '\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mjoin\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mopt_parts\u001b[0m\u001b[1;33m)\u001b[0m \u001b[1;33m==\u001b[0m \u001b[0mopt_usage\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 340\u001b[1;33m \u001b[1;32massert\u001b[0m \u001b[1;34m' '\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mjoin\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mpos_parts\u001b[0m\u001b[1;33m)\u001b[0m \u001b[1;33m==\u001b[0m \u001b[0mpos_usage\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 341\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 342\u001b[0m \u001b[1;31m# helper for wrapping lines\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", + "\u001b[1;31mAssertionError\u001b[0m: " + ] + } + ], + "source": [ + "# %load prepare_dataset.py\n", + "#读取训练数据和测试数据,从训练数据中删除重复的数据并保存在numpy矩阵中\n", + "import numpy as np\n", + "import os\n", + "import argparse\n", + "from utils import read_train_data, read_test_data\n", + "\n", + "parser = argparse.ArgumentParser(description='Data preperation')\n", + "parser.add_argument('D:\\datasets\\train', help='path to training data folder', default='train_data', type=str)\n", + "parser.add_argument('D:\\datasets\\test', help='path to test data folder', default='test_data', type=str)\n", + "parser.add_argument('D:\\datasets\\savepath', help='save path for training and test numpy matrices of images', default='.', type=str)\n", + "args = parser.parse_args() #获取参数,调用上面的属性\n", + "\n", + "#read training data\n", + "train_imgs, train_gts = read_train_data(args.train_data_path)\n", + "\n", + "#remove dublicate training imgs\n", + "idx_to_rmv = []\n", + "for i in range(len(train_imgs)-1):\n", + " for j in range(i+1, len(train_imgs)):\n", + " if np.all(train_imgs[i] == train_imgs[j]):\n", + " idx_to_rmv.append(i)\n", + " if train_gts[i] != train_gts[j]:\n", + " idx_to_rmv.append(j)\n", + "\n", + "idx = [i for i in range(len(train_imgs)) if not(i in idx_to_rmv)]\n", + "print('unique train imgs:',len(idx))\n", + "\n", + "#save unique training imgs\n", + "np.save(os.path.join(args.save_path, 'unique_train_imgs_rot_fixed'), np.array(train_imgs)[idx])\n", + "np.save(os.path.join(args.save_path, 'unique_train_gts_rot_fixed'), np.array(train_gts)[idx])\n", + "\n", + "#read test data\n", + "test_imgs, test_gts, ids = read_test_data(args.test_data_path)\n", + "\n", + "#save test data\n", + "np.save(os.path.join(args.save_path, 'test_imgs_rot_fixed'), np.array(test_imgs))\n", + "np.save(os.path.join(args.save_path, 'test_gts'), np.array(test_gts))\n", + "np.save(os.path.join(args.save_path, 'ids'), np.array(ids))\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.3" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git "a/code/2022_autumn/\346\275\230\345\200\251-\346\275\230\345\200\251-\345\237\272\344\272\216WR-EL\346\250\241\345\236\213\347\232\204\345\260\217\351\272\246\351\224\210\347\227\205\350\257\206\345\210\253\347\240\224\347\251\266/.ipynb_checkpoints/Untitled-checkpoint.ipynb" "b/code/2022_autumn/\346\275\230\345\200\251-\346\275\230\345\200\251-\345\237\272\344\272\216WR-EL\346\250\241\345\236\213\347\232\204\345\260\217\351\272\246\351\224\210\347\227\205\350\257\206\345\210\253\347\240\224\347\251\266/.ipynb_checkpoints/Untitled-checkpoint.ipynb" new file mode 100644 index 0000000..acb1078 --- /dev/null +++ "b/code/2022_autumn/\346\275\230\345\200\251-\346\275\230\345\200\251-\345\237\272\344\272\216WR-EL\346\250\241\345\236\213\347\232\204\345\260\217\351\272\246\351\224\210\347\227\205\350\257\206\345\210\253\347\240\224\347\251\266/.ipynb_checkpoints/Untitled-checkpoint.ipynb" @@ -0,0 +1,620 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "# %load dataset.py\n", + "from torch.utils.data import Dataset\n", + "from torchvision import transforms, utils\n", + "import numpy as np\n", + "from scipy import ndimage\n", + "import torch\n", + "from PIL import Image #图像处理库\n", + "\n", + "class ICLRDataset(Dataset):\n", + " def __init__(self, imgs, gts, split_type, index, transform, img_mix_enable = True):\n", + " if index is None:\n", + " self.imgs = imgs\n", + " self.gts = gts\n", + " else:\n", + " self.imgs = [imgs[i] for i in index] #图片集\n", + " self.gts = [gts[i] for i in index] \n", + " \n", + " self.split_type = split_type\n", + " self.transform = transform\n", + " self.img_mix_enable = img_mix_enable\n", + " \n", + " def __len__(self):\n", + " return len(self.imgs)\n", + " \n", + " def augment(self, img, y): \n", + " p = np.random.random(1) #生成0-1之间的一个1维数组\n", + " if p[0] > 0.5: #取出数组里的那个数跟0.5作比较\n", + " while True:\n", + " rnd_idx = np.random.randint(0, len(self.imgs)) #前闭后开,其实就是所有图片索引\n", + " if self.gts[rnd_idx] != y: #如果图片标签不是y就跳出---检查是不是有分错类的图片\n", + " break\n", + " rnd_crop = self.transform(Image.fromarray(self.imgs[rnd_idx])) #用于变换的图片集\n", + " d = 0.8\n", + " img = img * d + rnd_crop * (1 - d) #对图像进行混合和随机裁剪\n", + " return img\n", + "\n", + " def __getitem__(self, idx):\n", + " img = self.imgs[idx]\n", + " y = self.gts[idx]\n", + " img = Image.fromarray(img)\n", + " img = self.transform(img)\n", + " if (self.split_type == 'train') & self.img_mix_enable:\n", + " img = self.augment(img, y) \n", + " return img, y #增强训练集数据,返回增强后的图片和对应标签\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "# %load utils.py\n", + "#用于训练、测试和读取数据集图像的使用函数\n", + "import torch\n", + "import torch.nn as nn\n", + "import torch.optim as optim\n", + "from torch.optim import lr_scheduler\n", + "import numpy as np\n", + "import torchvision\n", + "from torchvision import datasets, models, transforms\n", + "#import matplotlib.pyplot as plt\n", + "import time\n", + "import os\n", + "import copy\n", + "import torch.nn.functional as F\n", + "from PIL import Image, ExifTags\n", + "\n", + "def train_model_snapshot(model, criterion, lr, dataloaders, dataset_sizes, device, num_cycles, num_epochs_per_cycle):\n", + " since = time.time() #记录训练时间\n", + "\n", + " best_model_wts = copy.deepcopy(model.state_dict()) #从预训练的模型中复制权重并初始化模型\n", + " best_acc = 0.0\n", + " best_loss = 1000000.0\n", + " model_w_arr = []\n", + " prob = torch.zeros((dataset_sizes['val'], 3), dtype = torch.float32).to(device) #预测\n", + " lbl = torch.zeros((dataset_sizes['val'],), dtype = torch.long).to(device) #标签\n", + " for cycle in range(num_cycles):\n", + " optimizer = optim.SGD(model.parameters(), lr=lr, momentum=0.9)#, weight_decay = 0.0005)\n", + " scheduler = lr_scheduler.CosineAnnealingLR(optimizer, num_epochs_per_cycle*len(dataloaders['train'])) \n", + " #余弦周期调整学习率,修改优化器中的学习率,(优化器,调整间隔数,调整系数)\n", + " for epoch in range(num_epochs_per_cycle):\n", + " #print('Cycle {}: Epoch {}/{}'.format(cycle, epoch, num_epochs_per_cycle - 1))\n", + " #print('-' * 10)\n", + "\n", + " # Each epoch has a training and validation phase\n", + " for phase in ['train', 'val']:\n", + " if phase == 'train':\n", + " model.train() # Set model to training mode\n", + " else:\n", + " model.eval() # Set model to evaluate mode\n", + "\n", + " running_loss = 0.0\n", + " running_corrects = 0\n", + " idx = 0\n", + " # Iterate over data.迭代数据\n", + " for inputs, labels in dataloaders[phase]:\n", + " inputs = inputs.to(device)\n", + " labels = labels.to(device)\n", + "\n", + " # zero the parameter gradients\n", + " optimizer.zero_grad()\n", + "\n", + " # forward\n", + " # track history if only in train\n", + " with torch.set_grad_enabled(phase == 'train'):\n", + " outputs = model(inputs)\n", + " _, preds = torch.max(outputs, 1)\n", + " if (epoch == num_epochs_per_cycle-1) and (phase == 'val'):\n", + " prob[idx:idx+inputs.shape[0]] += F.softmax(outputs, dim = 1)\n", + " lbl[idx:idx+inputs.shape[0]] = labels\n", + " idx += inputs.shape[0]\n", + " loss = criterion(outputs, labels)\n", + " # backward + optimize only if in training phase\n", + " if phase == 'train':\n", + " loss.backward()\n", + " optimizer.step()\n", + " scheduler.step()\n", + " #print(optimizer.param_groups[0]['lr'])\n", + " \n", + " # statistics\n", + " running_loss += loss.item() * inputs.size(0)\n", + " running_corrects += torch.sum(preds == labels.data)\n", + "\n", + " epoch_loss = running_loss / dataset_sizes[phase]\n", + " epoch_acc = running_corrects.double() / dataset_sizes[phase]\n", + "\n", + " #print('{} Loss: {:.4f} Acc: {:.4f}'.format(\n", + " # phase, epoch_loss, epoch_acc))\n", + "\n", + " # deep copy the model\n", + " if phase == 'val' and epoch_loss < best_loss:\n", + " best_loss = epoch_loss\n", + " best_model_wts = copy.deepcopy(model.state_dict())\n", + " #print()\n", + " model_w_arr.append(copy.deepcopy(model.state_dict()))\n", + "\n", + " prob /= num_cycles\n", + " ensemble_loss = F.nll_loss(torch.log(prob), lbl) \n", + " ensemble_loss = ensemble_loss.item()\n", + " time_elapsed = time.time() - since\n", + " #print('Training complete in {:.0f}m {:.0f}s'.format(\n", + " # time_elapsed // 60, time_elapsed % 60))\n", + " #print('Ensemble Loss : {:4f}, Best val Loss: {:4f}'.format(ensemble_loss, best_loss))\n", + "\n", + " # load best model weights加载最佳模型权重\n", + " model_arr =[]\n", + " for weights in model_w_arr:\n", + " model.load_state_dict(weights) \n", + " model_arr.append(model) \n", + " return model_arr, ensemble_loss, best_loss, prob\n", + "\n", + "def test(models_arr, loader, device):\n", + " res = np.zeros((610, 3), dtype = np.float32)\n", + " for model in models_arr:\n", + " model.eval()\n", + " res_arr = []\n", + " for inputs, _ in loader:\n", + " inputs = inputs.to(device)\n", + " # forward\n", + " # track history if only in train\n", + " with torch.set_grad_enabled(False):\n", + " outputs = F.softmax(model(inputs), dim = 1) \n", + " res_arr.append(outputs.detach().cpu().numpy())\n", + " res_arr = np.concatenate(res_arr, axis = 0)\n", + " res += res_arr\n", + " return res / len(models_arr)\n", + "\n", + "def read_train_data(p):\n", + " imgs = []\n", + " labels = []\n", + " for i, lbl in enumerate(os.listdir(p)):\n", + " for fname in os.listdir(os.path.join(p, lbl)):\n", + " #read image\n", + " img = Image.open(os.path.join(p, lbl, fname))\n", + " #rotate image to original view旋转图像到原始视图\n", + " try:\n", + " exif=dict((ExifTags.TAGS[k], v) for k, v in img._getexif().items() if k in ExifTags.TAGS)\n", + " if exif['Orientation'] == 3:\n", + " img=img.rotate(180, expand=True)\n", + " elif exif['Orientation'] == 6:\n", + " img=img.rotate(270, expand=True)\n", + " elif exif['Orientation'] == 8:\n", + " img=img.rotate(90, expand=True)\n", + " except:\n", + " pass\n", + " #resize all images to the same size将所有图像调整为相同的大小\n", + " img = np.array(img.convert('RGB').resize((512,512), Image.ANTIALIAS))\n", + " imgs.append(img)\n", + " labels.append(i)\n", + " return imgs, labels\n", + "\n", + "def read_test_data(p):\n", + " imgs = []\n", + " labels = []\n", + " ids = []\n", + " for fname in os.listdir(p):\n", + " #read image\n", + " img = Image.open(os.path.join(p, fname))\n", + " #rotate image to original view\n", + " try:\n", + " if not('DMWVNR' in fname):\n", + " exif=dict((ExifTags.TAGS[k], v) for k, v in img._getexif().items() if k in ExifTags.TAGS)\n", + " if exif['Orientation'] == 3:\n", + " img=img.rotate(180, expand=True)\n", + " elif exif['Orientation'] == 6:\n", + " img=img.rotate(270, expand=True)\n", + " elif exif['Orientation'] == 8:\n", + " img=img.rotate(90, expand=True)\n", + " except:\n", + " pass\n", + " #resize all images to the same size\n", + " img = img.convert('RGB').resize((512,512), Image.ANTIALIAS)\n", + " imgs.append(np.array(img.copy()))\n", + " labels.append(0)\n", + " ids.append(fname.split('.')[0])\n", + " img.close()\n", + " return imgs, labels, ids\n" + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "unique train imgs: 732\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "usage: ipykernel_launcher.py [-h] [--train_data_path TRAIN_DATA_PATH]\n", + " [--test_data_path TEST_DATA_PATH]\n", + " [--save_path SAVE_PATH]\n", + "ipykernel_launcher.py: error: unrecognized arguments: -f C:\\Users\\Administrator\\AppData\\Roaming\\jupyter\\runtime\\kernel-60e74fca-82ff-42d7-afc4-1d27b752461b.json\n" + ] + }, + { + "ename": "SystemExit", + "evalue": "2", + "output_type": "error", + "traceback": [ + "An exception has occurred, use %tb to see the full traceback.\n", + "\u001b[1;31mSystemExit\u001b[0m\u001b[1;31m:\u001b[0m 2\n" + ] + } + ], + "source": [ + "# %load prepare_dataset.py\n", + "#读取训练数据和测试数据,从训练数据中删除重复的数据并保存在numpy矩阵中\n", + "%run prepare_dataset.py\n", + "import numpy as np\n", + "import os\n", + "import argparse\n", + "from utils import read_train_data, read_test_data\n", + "\n", + "parser = argparse.ArgumentParser(description='Data preperation')\n", + "parser.add_argument('--train_data_path', help='path', default='D:/datasets/train', type=str)\n", + "parser.add_argument('--test_data_path', help='path', default='D:/datasets/test', type=str)\n", + "parser.add_argument('--save_path', help='save', default='D:/datasets/savepath', type=str)\n", + "args = parser.parse_args() #获取参数,调用上面的属性\n", + "\n", + "#read training data\n", + "train_imgs, train_gts = read_train_data(args.train_data_path)\n", + "\n", + "#remove dublicate training imgs\n", + "idx_to_rmv = []\n", + "for i in range(len(train_imgs)-1):\n", + " for j in range(i+1, len(train_imgs)):\n", + " if np.all(train_imgs[i] == train_imgs[j]):\n", + " idx_to_rmv.append(i)\n", + " if train_gts[i] != train_gts[j]:\n", + " idx_to_rmv.append(j)\n", + "\n", + "idx = [i for i in range(len(train_imgs)) if not(i in idx_to_rmv)]\n", + "print('unique train imgs:',len(idx))\n", + "\n", + "#save unique training imgs\n", + "np.save(os.path.join(args.save_path, 'unique_train_imgs_rot_fixed'), np.array(train_imgs)[idx])\n", + "np.save(os.path.join(args.save_path, 'unique_train_gts_rot_fixed'), np.array(train_gts)[idx])\n", + "\n", + "#read test data\n", + "test_imgs, test_gts, ids = read_test_data(args.test_data_path)\n", + "\n", + "#save test data\n", + "np.save(os.path.join(args.save_path, 'test_imgs_rot_fixed'), np.array(test_imgs))\n", + "np.save(os.path.join(args.save_path, 'test_gts'), np.array(test_gts))\n", + "np.save(os.path.join(args.save_path, 'ids'), np.array(ids))\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "unique train imgs: 732\n" + ] + } + ], + "source": [ + "%run prepare_dataset.py" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "usage: ipykernel_launcher.py [-h] [--data_path DATA_PATH]\n", + " [--library_size LIBRARY_SIZE]\n", + " [--library_path LIBRARY_PATH]\n", + "ipykernel_launcher.py: error: unrecognized arguments: -f C:\\Users\\Administrator\\AppData\\Roaming\\jupyter\\runtime\\kernel-9ba0d6cf-6ce4-4517-8c53-84c6b0e19712.json\n" + ] + }, + { + "ename": "SystemExit", + "evalue": "2", + "output_type": "error", + "traceback": [ + "An exception has occurred, use %tb to see the full traceback.\n", + "\u001b[1;31mSystemExit\u001b[0m\u001b[1;31m:\u001b[0m 2\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "C:\\ProgramData\\Anaconda3\\lib\\site-packages\\IPython\\core\\interactiveshell.py:3333: UserWarning: To exit: use 'exit', 'quit', or Ctrl-D.\n", + " warn(\"To exit: use 'exit', 'quit', or Ctrl-D.\", stacklevel=1)\n" + ] + } + ], + "source": [ + "# %load generate_library_of_models.py\n", + "import torch\n", + "import torch.nn as nn\n", + "import torch.optim as optim\n", + "from torch.optim import lr_scheduler\n", + "import numpy as np\n", + "import torchvision\n", + "from torchvision import datasets, models, transforms\n", + "import pandas as pd\n", + "import matplotlib.pyplot as plt\n", + "import time\n", + "import os\n", + "import argparse\n", + "import copy\n", + "from sklearn.model_selection import StratifiedKFold\n", + "import datetime\n", + "from PIL import Image\n", + "import torch.nn.functional as F\n", + "\n", + "from dataset import ICLRDataset\n", + "from utils import train_model_snapshot, test\n", + "from sklearn.metrics import confusion_matrix\n", + "from hyperopt import hp, tpe, fmin, Trials\n", + "from collections import OrderedDict\n", + "\n", + "\n", + "def score(params):\n", + " global test_prob, val_prob, trails_sc_arr,idx # 全局变量 \n", + " print(params)\n", + " k = 5 \n", + " sss = StratifiedKFold(n_splits=k, shuffle = True, random_state=seed_arr[idx]) #提供训练/测试索引来分割训练/测试集中的数据\n", + " #(折叠的数量,至少是2;在分组前是否对每个类的样本进行洗牌;当shuffle为真时,random_state将影响索引的排序)\n", + " \n", + " #define trail data augmentations 训练集数据增强\n", + " data_transforms = {\n", + " 'train': transforms.Compose([\n", + " transforms.ColorJitter(contrast = params['contrast'], hue = params['hue'], brightness = params['brightness']),\n", + " transforms.RandomAffine(degrees = params['degrees']),\n", + " transforms.RandomResizedCrop(224),\n", + " transforms.RandomHorizontalFlip(p = 0.5 if params['h_flip'] else 0.0), #以概率P水平翻转图像\n", + " transforms.RandomVerticalFlip(p = 0.5 if params['v_flip'] else 0.0),#以概率P垂直翻转图像\n", + " transforms.ToTensor(),\n", + " transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n", + " ]),\n", + " 'val': transforms.Compose([\n", + " transforms.Resize((params['val_img_size'], params['val_img_size'])),\n", + " transforms.CenterCrop(224),\n", + " transforms.ToTensor(),\n", + " transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n", + " ]),\n", + " }\n", + "\n", + " trail_test_prob = np.zeros((test_imgs.shape[0], 3), dtype = np.float32)\n", + " trail_val_prob = torch.zeros((train_imgs.shape[0], 3), dtype = torch.float32).to(device)\n", + " \n", + " sc_arr = []\n", + " models_arr = []\n", + " fold = 0\n", + " #train a model for each split\n", + " for train_index, val_index in sss.split(train_imgs, train_gts):\n", + " #define dataset and loader for training and validation\n", + " image_datasets = {'train': ICLRDataset(train_imgs, train_gts, 'train', train_index, data_transforms['train'], params['img_mix_enable']),\n", + "\t 'val': ICLRDataset(train_imgs, train_gts, 'val', val_index, data_transforms['val'])}\n", + "\n", + " dataloaders = {'train': torch.utils.data.DataLoader(image_datasets['train'], batch_size=16, shuffle=True, num_workers=2),\n", + " 'val': torch.utils.data.DataLoader(image_datasets['val'], batch_size=16, shuffle=False, num_workers=2)}\n", + "\n", + " #create model instance\n", + " model_ft = params['arch'](pretrained=True)\n", + " try:\n", + " num_ftrs = model_ft.fc.in_features\n", + " model_ft.fc = nn.Linear(num_ftrs, 3)\n", + " except:\n", + " num_ftrs = model_ft.classifier.in_features\n", + " model_ft.classifier = nn.Linear(num_ftrs, 3)\n", + " model_ft = model_ft.to(device)\n", + "\n", + " criterion = nn.CrossEntropyLoss()\n", + "\n", + " dataset_sizes = {x:len(image_datasets[x]) for x in ['train', 'val']}\n", + " \n", + " model_ft_arr, ensemble_loss, _, fold_val_prob = train_model_snapshot(model_ft, criterion, params['lr'], dataloaders, dataset_sizes, device,\n", + " num_cycles=params['num_cycles'], num_epochs_per_cycle=params['num_epochs_per_cycle'])\n", + " models_arr.extend(model_ft_arr)\n", + " fold += 1\n", + " sc_arr.append(ensemble_loss)\n", + " trail_val_prob[val_index] = fold_val_prob\n", + " \n", + " #predict on test data using average of kfold models\n", + " image_datasets['test'] = ICLRDataset(test_imgs, test_gts, 'test', None, data_transforms['val'])\n", + " test_loader = torch.utils.data.DataLoader(image_datasets['test'], batch_size=4,shuffle=False, num_workers=16)\n", + " trail_test_prob = test(models_arr, test_loader, device)\n", + "\n", + " print('mean val loss:', np.mean(sc_arr))\n", + "\n", + " test_prob.append(trail_test_prob)\n", + " val_prob.append(trail_val_prob)\n", + "\n", + " #save validation and test results for further processing \n", + " np.save(os.path.join(args.library_path, 'val_prob_trail_%d'%(idx)), trail_val_prob.detach().cpu().numpy())\n", + " np.save(os.path.join(args.library_path, 'test_prob_trail_%d'%(idx)), trail_test_prob)\n", + " idx += 1\n", + " \n", + " trails_sc_arr.append(np.mean(sc_arr))\n", + "\n", + " torch.cuda.empty_cache()\n", + " del models_arr\n", + "\n", + " return np.mean(sc_arr)\n", + "\n", + "parser = argparse.ArgumentParser(description='Data preperation')\n", + "parser.add_argument('--data_path', help='path to training and test numpy matrices of images', default='.', type=str)\n", + "parser.add_argument('--library_size', help='number of models to be trained in the library of models', default=50, type=int)\n", + "parser.add_argument('--library_path', help='save path for validation and test predictions of the library of models', default='trails', type=str)\n", + "args = parser.parse_args()\n", + "\n", + "device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n", + "\n", + "torch.manual_seed(0)\n", + "np.random.seed(0)\n", + "\n", + "torch.backends.cudnn.deterministic = True\n", + "torch.backends.cudnn.benchmark = False\n", + "\n", + "#read train data\n", + "train_imgs = np.load(os.path.join(args.data_path, 'D:/datasets/savepath/unique_train_imgs_rot_fixed.npy'))\n", + "train_gts = np.load(os.path.join(args.data_path, 'unique_train_gts_rot_fixed.npy'))\n", + "\n", + "#read test data\n", + "test_imgs = np.load(os.path.join(args.data_path, 'test_imgs_rot_fixed.npy'))\n", + "test_gts = np.load(os.path.join(args.data_path, 'test_gts.npy'))\n", + "ids = np.load(os.path.join(args.data_path, 'ids.npy')).tolist()\n", + "\n", + "test_prob = []\n", + "val_prob = []\n", + "trails_sc_arr = []\n", + "\n", + "n_trails = args.library_size\n", + "seed_arr = np.random.randint(low=0, high=1000000, size=n_trails)\n", + "\n", + "#create search space for hyperparameter optimization\n", + "space = OrderedDict([('lr', hp.choice('lr', [i*0.001 for i in range(1,4)])),\n", + " ('num_cycles', hp.choice('num_cycles', range(3, 6))),\n", + " ('num_epochs_per_cycle', hp.choice('num_epochs_per_cycle', range(3, 6))),\n", + " ('arch', hp.choice('arch', [models.densenet201, models.densenet121, models.densenet169,\n", + " models.wide_resnet50_2, models.resnet152, \n", + " models.resnet101, models.resnet50, models.resnet34, models.resnet18])),\n", + " ('img_mix_enable', hp.choice('img_mix_enable', [True, False])),\n", + " ('v_flip', hp.choice('v_flip', [True, False])),\n", + " ('h_flip', hp.choice('h_flip', [True, False])),\n", + " ('degrees', hp.choice('degrees', range(1, 90))),\n", + " ('contrast', hp.uniform('contrast', 0.0, 0.3)),\n", + " ('hue', hp.uniform('hue', 0.0, 0.3)),\n", + " ('brightness', hp.uniform('brightness', 0.0, 0.3)),\n", + " ('val_img_size', hp.choice('val_img_size', range(224, 512, 24))),\n", + " ])\n", + "\n", + "trials = Trials()\n", + "\n", + "idx = 0\n", + "if not os.path.exists(args.library_path):\n", + " os.mkdir(args.library_path)\n", + "\n", + "#use tpe algorithm in hyperopt to generate a library of differnet models 利用hyperopt中的tpe算法生成不同模型库\n", + "best = fmin(fn=score,space=space,algo=tpe.suggest,max_evals=n_trails,trials=trials)\n", + "#fmin是对不用的算法集及其超参数进行迭代,使目标函数最小化的优化函数\n", + "#(最小化的目标函数;定义的搜索空间;搜索算法-为超参数空间的顺序搜索提供逻辑;最大评估数;trials对象)\n", + "print(best)\n", + "\n", + "np.save(os.path.join(args.library_path, 'scores.npy'), np.array(trails_sc_arr))\n" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{'arch': , 'brightness': 0.20568033410096712, 'contrast': 0.047925566574344027, 'degrees': 87, 'h_flip': True, 'hue': 0.2804254868966057, 'img_mix_enable': True, 'lr': 0.001, 'num_cycles': 5, 'num_epochs_per_cycle': 5, 'v_flip': True, 'val_img_size': 248}\n", + " 0%| | 0/50 [00:00\u001b[1;34m\u001b[0m\n\u001b[0;32m 160\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 161\u001b[0m \u001b[1;31m#use tpe algorithm in hyperopt to generate a library of differnet models\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 162\u001b[1;33m \u001b[0mbest\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mfmin\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mfn\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mscore\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mspace\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mspace\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0malgo\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mtpe\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0msuggest\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mtrials\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mtrials\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mmax_evals\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mn_trails\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 163\u001b[0m \u001b[0mprint\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mbest\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 164\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n", + "\u001b[1;32m~\\AppData\\Roaming\\Python\\Python37\\site-packages\\hyperopt\\fmin.py\u001b[0m in \u001b[0;36mfmin\u001b[1;34m(fn, space, algo, max_evals, timeout, loss_threshold, trials, rstate, allow_trials_fmin, pass_expr_memo_ctrl, catch_eval_exceptions, verbose, return_argmin, points_to_evaluate, max_queue_len, show_progressbar)\u001b[0m\n\u001b[0;32m 480\u001b[0m \u001b[0mcatch_eval_exceptions\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mcatch_eval_exceptions\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 481\u001b[0m \u001b[0mreturn_argmin\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mreturn_argmin\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 482\u001b[1;33m \u001b[0mshow_progressbar\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mshow_progressbar\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 483\u001b[0m )\n\u001b[0;32m 484\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n", + "\u001b[1;32m~\\AppData\\Roaming\\Python\\Python37\\site-packages\\hyperopt\\base.py\u001b[0m in \u001b[0;36mfmin\u001b[1;34m(self, fn, space, algo, max_evals, timeout, loss_threshold, max_queue_len, rstate, verbose, pass_expr_memo_ctrl, catch_eval_exceptions, return_argmin, show_progressbar)\u001b[0m\n\u001b[0;32m 684\u001b[0m \u001b[0mcatch_eval_exceptions\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mcatch_eval_exceptions\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 685\u001b[0m \u001b[0mreturn_argmin\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mreturn_argmin\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 686\u001b[1;33m \u001b[0mshow_progressbar\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mshow_progressbar\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 687\u001b[0m )\n\u001b[0;32m 688\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n", + "\u001b[1;32m~\\AppData\\Roaming\\Python\\Python37\\site-packages\\hyperopt\\fmin.py\u001b[0m in \u001b[0;36mfmin\u001b[1;34m(fn, space, algo, max_evals, timeout, loss_threshold, trials, rstate, allow_trials_fmin, pass_expr_memo_ctrl, catch_eval_exceptions, verbose, return_argmin, points_to_evaluate, max_queue_len, show_progressbar)\u001b[0m\n\u001b[0;32m 507\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 508\u001b[0m \u001b[1;31m# next line is where the fmin is actually executed\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 509\u001b[1;33m \u001b[0mrval\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mexhaust\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 510\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 511\u001b[0m \u001b[1;32mif\u001b[0m \u001b[0mreturn_argmin\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", + "\u001b[1;32m~\\AppData\\Roaming\\Python\\Python37\\site-packages\\hyperopt\\fmin.py\u001b[0m in \u001b[0;36mexhaust\u001b[1;34m(self)\u001b[0m\n\u001b[0;32m 328\u001b[0m \u001b[1;32mdef\u001b[0m \u001b[0mexhaust\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 329\u001b[0m \u001b[0mn_done\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mlen\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mtrials\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 330\u001b[1;33m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mrun\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mmax_evals\u001b[0m \u001b[1;33m-\u001b[0m \u001b[0mn_done\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mblock_until_done\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0masynchronous\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 331\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mtrials\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mrefresh\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 332\u001b[0m \u001b[1;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", + "\u001b[1;32m~\\AppData\\Roaming\\Python\\Python37\\site-packages\\hyperopt\\fmin.py\u001b[0m in \u001b[0;36mrun\u001b[1;34m(self, N, block_until_done)\u001b[0m\n\u001b[0;32m 284\u001b[0m \u001b[1;32melse\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 285\u001b[0m \u001b[1;31m# -- loop over trials and do the jobs directly\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 286\u001b[1;33m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mserial_evaluate\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 287\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 288\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mtrials\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mrefresh\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", + "\u001b[1;32m~\\AppData\\Roaming\\Python\\Python37\\site-packages\\hyperopt\\fmin.py\u001b[0m in \u001b[0;36mserial_evaluate\u001b[1;34m(self, N)\u001b[0m\n\u001b[0;32m 163\u001b[0m \u001b[0mctrl\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mbase\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mCtrl\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mtrials\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mcurrent_trial\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mtrial\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 164\u001b[0m \u001b[1;32mtry\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 165\u001b[1;33m \u001b[0mresult\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mdomain\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mevaluate\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mspec\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mctrl\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 166\u001b[0m \u001b[1;32mexcept\u001b[0m \u001b[0mException\u001b[0m \u001b[1;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 167\u001b[0m \u001b[0mlogger\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0merror\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m\"job exception: %s\"\u001b[0m \u001b[1;33m%\u001b[0m \u001b[0mstr\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0me\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", + "\u001b[1;32m~\\AppData\\Roaming\\Python\\Python37\\site-packages\\hyperopt\\base.py\u001b[0m in \u001b[0;36mevaluate\u001b[1;34m(self, config, ctrl, attach_attachments)\u001b[0m\n\u001b[0;32m 892\u001b[0m \u001b[0mprint_node_on_error\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mrec_eval_print_node_on_error\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 893\u001b[0m )\n\u001b[1;32m--> 894\u001b[1;33m \u001b[0mrval\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mfn\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mpyll_rval\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 895\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 896\u001b[0m \u001b[1;32mif\u001b[0m \u001b[0misinstance\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mrval\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m(\u001b[0m\u001b[0mfloat\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mint\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mnp\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mnumber\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", + "\u001b[1;32m~\\Desktop\\WheatRustClassification-master\\generate_library_of_models.py\u001b[0m in \u001b[0;36mscore\u001b[1;34m(params)\u001b[0m\n\u001b[0;32m 78\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 79\u001b[0m model_ft_arr, ensemble_loss, _, fold_val_prob = train_model_snapshot(model_ft, criterion, params['lr'], dataloaders, dataset_sizes, device,\n\u001b[1;32m---> 80\u001b[1;33m num_cycles=params['num_cycles'], num_epochs_per_cycle=params['num_epochs_per_cycle'])\n\u001b[0m\u001b[0;32m 81\u001b[0m \u001b[0mmodels_arr\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mextend\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mmodel_ft_arr\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 82\u001b[0m \u001b[0mfold\u001b[0m \u001b[1;33m+=\u001b[0m \u001b[1;36m1\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", + "\u001b[1;32m~\\Desktop\\WheatRustClassification-master\\utils.py\u001b[0m in \u001b[0;36mtrain_model_snapshot\u001b[1;34m(model, criterion, lr, dataloaders, dataset_sizes, device, num_cycles, num_epochs_per_cycle)\u001b[0m\n\u001b[0;32m 40\u001b[0m \u001b[0midx\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;36m0\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 41\u001b[0m \u001b[1;31m# Iterate over data.\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 42\u001b[1;33m \u001b[1;32mfor\u001b[0m \u001b[0minputs\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mlabels\u001b[0m \u001b[1;32min\u001b[0m \u001b[0mdataloaders\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mphase\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 43\u001b[0m \u001b[0minputs\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0minputs\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mto\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mdevice\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 44\u001b[0m \u001b[0mlabels\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mlabels\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mto\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mdevice\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", + "\u001b[1;32mC:\\ProgramData\\Anaconda3\\lib\\site-packages\\torch\\utils\\data\\dataloader.py\u001b[0m in \u001b[0;36m__iter__\u001b[1;34m(self)\u001b[0m\n\u001b[0;32m 277\u001b[0m \u001b[1;32mreturn\u001b[0m \u001b[0m_SingleProcessDataLoaderIter\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 278\u001b[0m \u001b[1;32melse\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 279\u001b[1;33m \u001b[1;32mreturn\u001b[0m \u001b[0m_MultiProcessingDataLoaderIter\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 280\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 281\u001b[0m \u001b[1;33m@\u001b[0m\u001b[0mproperty\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", + "\u001b[1;32mC:\\ProgramData\\Anaconda3\\lib\\site-packages\\torch\\utils\\data\\dataloader.py\u001b[0m in \u001b[0;36m__init__\u001b[1;34m(self, loader)\u001b[0m\n\u001b[0;32m 717\u001b[0m \u001b[1;31m# before it starts, and __del__ tries to join but will get:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 718\u001b[0m \u001b[1;31m# AssertionError: can only join a started process.\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 719\u001b[1;33m \u001b[0mw\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mstart\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 720\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_index_queues\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mappend\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mindex_queue\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 721\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_workers\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mappend\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mw\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", + "\u001b[1;32mC:\\ProgramData\\Anaconda3\\lib\\multiprocessing\\process.py\u001b[0m in \u001b[0;36mstart\u001b[1;34m(self)\u001b[0m\n\u001b[0;32m 110\u001b[0m \u001b[1;34m'daemonic processes are not allowed to have children'\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 111\u001b[0m \u001b[0m_cleanup\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 112\u001b[1;33m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_popen\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_Popen\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 113\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_sentinel\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_popen\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0msentinel\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 114\u001b[0m \u001b[1;31m# Avoid a refcycle if the target function holds an indirect\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", + "\u001b[1;32mC:\\ProgramData\\Anaconda3\\lib\\multiprocessing\\context.py\u001b[0m in \u001b[0;36m_Popen\u001b[1;34m(process_obj)\u001b[0m\n\u001b[0;32m 221\u001b[0m \u001b[1;33m@\u001b[0m\u001b[0mstaticmethod\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 222\u001b[0m \u001b[1;32mdef\u001b[0m \u001b[0m_Popen\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mprocess_obj\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 223\u001b[1;33m \u001b[1;32mreturn\u001b[0m \u001b[0m_default_context\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mget_context\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mProcess\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_Popen\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mprocess_obj\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 224\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 225\u001b[0m \u001b[1;32mclass\u001b[0m \u001b[0mDefaultContext\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mBaseContext\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", + "\u001b[1;32mC:\\ProgramData\\Anaconda3\\lib\\multiprocessing\\context.py\u001b[0m in \u001b[0;36m_Popen\u001b[1;34m(process_obj)\u001b[0m\n\u001b[0;32m 320\u001b[0m \u001b[1;32mdef\u001b[0m \u001b[0m_Popen\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mprocess_obj\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 321\u001b[0m \u001b[1;32mfrom\u001b[0m \u001b[1;33m.\u001b[0m\u001b[0mpopen_spawn_win32\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0mPopen\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 322\u001b[1;33m \u001b[1;32mreturn\u001b[0m \u001b[0mPopen\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mprocess_obj\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 323\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 324\u001b[0m \u001b[1;32mclass\u001b[0m \u001b[0mSpawnContext\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mBaseContext\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", + "\u001b[1;32mC:\\ProgramData\\Anaconda3\\lib\\multiprocessing\\popen_spawn_win32.py\u001b[0m in \u001b[0;36m__init__\u001b[1;34m(self, process_obj)\u001b[0m\n\u001b[0;32m 44\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 45\u001b[0m \u001b[1;32mdef\u001b[0m \u001b[0m__init__\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mprocess_obj\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 46\u001b[1;33m \u001b[0mprep_data\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mspawn\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mget_preparation_data\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mprocess_obj\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_name\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 47\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 48\u001b[0m \u001b[1;31m# read end of pipe will be \"stolen\" by the child process\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", + "\u001b[1;32mC:\\ProgramData\\Anaconda3\\lib\\multiprocessing\\spawn.py\u001b[0m in \u001b[0;36mget_preparation_data\u001b[1;34m(name)\u001b[0m\n\u001b[0;32m 170\u001b[0m \u001b[1;31m# or through direct execution (or to leave it alone entirely)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 171\u001b[0m \u001b[0mmain_module\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0msys\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mmodules\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;34m'__main__'\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 172\u001b[1;33m \u001b[0mmain_mod_name\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mgetattr\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mmain_module\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m__spec__\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;34m\"name\"\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;32mNone\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 173\u001b[0m \u001b[1;32mif\u001b[0m \u001b[0mmain_mod_name\u001b[0m \u001b[1;32mis\u001b[0m \u001b[1;32mnot\u001b[0m \u001b[1;32mNone\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 174\u001b[0m \u001b[0md\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;34m'init_main_from_name'\u001b[0m\u001b[1;33m]\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mmain_mod_name\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", + "\u001b[1;31mAttributeError\u001b[0m: module '__main__' has no attribute '__spec__'" + ] + } + ], + "source": [ + "%run generate_library_of_models.py" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.3" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git "a/code/2022_autumn/\346\275\230\345\200\251-\346\275\230\345\200\251-\345\237\272\344\272\216WR-EL\346\250\241\345\236\213\347\232\204\345\260\217\351\272\246\351\224\210\347\227\205\350\257\206\345\210\253\347\240\224\347\251\266/.pytest_cache/CACHEDIR.TAG" "b/code/2022_autumn/\346\275\230\345\200\251-\346\275\230\345\200\251-\345\237\272\344\272\216WR-EL\346\250\241\345\236\213\347\232\204\345\260\217\351\272\246\351\224\210\347\227\205\350\257\206\345\210\253\347\240\224\347\251\266/.pytest_cache/CACHEDIR.TAG" new file mode 100644 index 0000000..381f03a --- /dev/null +++ "b/code/2022_autumn/\346\275\230\345\200\251-\346\275\230\345\200\251-\345\237\272\344\272\216WR-EL\346\250\241\345\236\213\347\232\204\345\260\217\351\272\246\351\224\210\347\227\205\350\257\206\345\210\253\347\240\224\347\251\266/.pytest_cache/CACHEDIR.TAG" @@ -0,0 +1,4 @@ +Signature: 8a477f597d28d172789f06886806bc55 +# This file is a cache directory tag created by pytest. +# For information about cache directory tags, see: +# http://www.bford.info/cachedir/spec.html diff --git "a/code/2022_autumn/\346\275\230\345\200\251-\346\275\230\345\200\251-\345\237\272\344\272\216WR-EL\346\250\241\345\236\213\347\232\204\345\260\217\351\272\246\351\224\210\347\227\205\350\257\206\345\210\253\347\240\224\347\251\266/.pytest_cache/README.md" "b/code/2022_autumn/\346\275\230\345\200\251-\346\275\230\345\200\251-\345\237\272\344\272\216WR-EL\346\250\241\345\236\213\347\232\204\345\260\217\351\272\246\351\224\210\347\227\205\350\257\206\345\210\253\347\240\224\347\251\266/.pytest_cache/README.md" new file mode 100644 index 0000000..b10f023 --- /dev/null +++ "b/code/2022_autumn/\346\275\230\345\200\251-\346\275\230\345\200\251-\345\237\272\344\272\216WR-EL\346\250\241\345\236\213\347\232\204\345\260\217\351\272\246\351\224\210\347\227\205\350\257\206\345\210\253\347\240\224\347\251\266/.pytest_cache/README.md" @@ -0,0 +1,8 @@ +# pytest cache directory # + +This directory contains data from the pytest's cache plugin, +which provides the `--lf` and `--ff` options, as well as the `cache` fixture. + +**Do not** commit this to version control. + +See [the docs](https://docs.pytest.org/en/latest/cache.html) for more information. diff --git "a/code/2022_autumn/\346\275\230\345\200\251-\346\275\230\345\200\251-\345\237\272\344\272\216WR-EL\346\250\241\345\236\213\347\232\204\345\260\217\351\272\246\351\224\210\347\227\205\350\257\206\345\210\253\347\240\224\347\251\266/README.md" "b/code/2022_autumn/\346\275\230\345\200\251-\346\275\230\345\200\251-\345\237\272\344\272\216WR-EL\346\250\241\345\236\213\347\232\204\345\260\217\351\272\246\351\224\210\347\227\205\350\257\206\345\210\253\347\240\224\347\251\266/README.md" new file mode 100644 index 0000000..def227c --- /dev/null +++ "b/code/2022_autumn/\346\275\230\345\200\251-\346\275\230\345\200\251-\345\237\272\344\272\216WR-EL\346\250\241\345\236\213\347\232\204\345\260\217\351\272\246\351\224\210\347\227\205\350\257\206\345\210\253\347\240\224\347\251\266/README.md" @@ -0,0 +1,127 @@ +# Wheat Rust Classification from Ensemble Selection of CNNs + +Fourth place solution for CGIAR Computer Vision for Crop Disease competition organized by CV4A workshop at ICLR 2020. The main objective of the competition is to classify a given image of wheat whether it is healthy, has a stem rust or has a leaf rust. + +## Summary of Approach + +Create an ensemble from a library of diverse models with different architectures and augmentations. All models are initially pre-trained on imagenet and fine-tuned on the dataset. The models and augmentations are chosen automatically using hyperparameter optimization. + +### Model Architectures + +The following architecturs are included in the library of models: + +* ResNet [1] +* ResNext [2] +* WideResNet [3] +* DenseNet [4] + +### Data Augmentations + +The following augmentations are included in the search space of hyperparameter optimization to choose from: + +* Rotation +* Random cropping and resizing +* Horizontal flipping +* Vertical flipping +* Brightness augmentation +* Hue augmentation +* Contrast augmentation +* Mixup augmentation [5] + +### Common Configuration + +The following configurations is applied on all trails in hyperparameter optimization process: + +* Stochastic Gradient Descent (SGD) optimizer +* Snapshot ensemble [6] +* 5-Fold training + +## Getting Started + +### Prerequisites + +Firstly, you need to have + +* Ubuntu 18.04 +* Python3 +* At least 11 GB GPU RAM + +Secondly, you need to install the challenge data and sample submission file by the following the instructions [here](https://zindi.africa/competitions/iclr-workshop-challenge-1-cgiar-computer-vision-for-crop-disease/data). + +Thirdly, you need to install the dependencies by running: + +``` +pip3 install -r requirements.txt +``` + +### Project files + +* prepare_dataset.py: reads training and test data, removes duplicates from training data and saves them in numpy matrices. It has the following arguments: + + --train_data_path: path to training data folder + + --test_data_path: path to test data folder + + --save_path: save path for training and test numpy matrices of images + +* generate_library_of_models.py: generates a library of models with different architectures and augmentations through hyperparameter optimization search. It has the following arguments: + + --data_path: path to training and test numpy matrices of images + + --library_size: number of models to be trained in the library of models + + --library_path: save path for validation and test predictions of the library of models + +* ensemble_selection.py: applies Ensemble Selection [7] algorithm on the generated library of models to find the best ensemble with the lowest validation error and use it to create the final submission. It has the following arguments: + + --train_data_path: path to training data folder + + --data_path: path to training and test numpy matrices of images + + --sample_sub_file_path: path to sample submission file + + --library_size: number of models to be trained in the library of models + + --library_path: save path for validation and test predictions of the library of models + + --final_sub_file_save_path: save path for final submission file + +* dataset.py: has the dataset class for training and test data. + +* utils.py: utility functions for training, testing and reading dataset images. + +## Running + +### 1- Prepare dataset + +``` +python3 prepare_dataset.py +``` + +### 2- Generate the library of models + +``` +python3 generate_library_of_models.py +``` + +### 3- Create ensemble and generate submission file + +``` +python3 ensemble_selection.py +``` + +## References +[1] He, Kaiming, et al. "Deep residual learning for image recognition." Proceedings of the IEEE conference on computer vision and pattern recognition. 2016. + +[2] Xie, Saining, et al. "Aggregated residual transformations for deep neural networks." Proceedings of the IEEE conference on computer vision and pattern recognition. 2017. + +[3] Zagoruyko, Sergey, and Nikos Komodakis. "Wide residual networks." arXiv preprint arXiv:1605.07146 (2016). + +[4] Huang, Gao, et al. "Densely connected convolutional networks." Proceedings of the IEEE conference on computer vision and pattern recognition. 2017. + +[5] Zhang, Hongyi, et al. "mixup: Beyond empirical risk minimization." arXiv preprint arXiv:1710.09412 (2017). + +[6] Huang, Gao, et al. "Snapshot ensembles: Train 1, get m for free." arXiv preprint arXiv:1704.00109 (2017). + +[7] Caruana, Rich, et al. "Ensemble selection from libraries of models." Proceedings of the twenty-first international conference on Machine learning. 2004. + diff --git "a/code/2022_autumn/\346\275\230\345\200\251-\346\275\230\345\200\251-\345\237\272\344\272\216WR-EL\346\250\241\345\236\213\347\232\204\345\260\217\351\272\246\351\224\210\347\227\205\350\257\206\345\210\253\347\240\224\347\251\266/Untitled.ipynb" "b/code/2022_autumn/\346\275\230\345\200\251-\346\275\230\345\200\251-\345\237\272\344\272\216WR-EL\346\250\241\345\236\213\347\232\204\345\260\217\351\272\246\351\224\210\347\227\205\350\257\206\345\210\253\347\240\224\347\251\266/Untitled.ipynb" new file mode 100644 index 0000000..c8c0a43 --- /dev/null +++ "b/code/2022_autumn/\346\275\230\345\200\251-\346\275\230\345\200\251-\345\237\272\344\272\216WR-EL\346\250\241\345\236\213\347\232\204\345\260\217\351\272\246\351\224\210\347\227\205\350\257\206\345\210\253\347\240\224\347\251\266/Untitled.ipynb" @@ -0,0 +1,620 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "# %load dataset.py\n", + "from torch.utils.data import Dataset\n", + "from torchvision import transforms, utils\n", + "import numpy as np\n", + "from scipy import ndimage\n", + "import torch\n", + "from PIL import Image #图像处理库\n", + "\n", + "class ICLRDataset(Dataset):\n", + " def __init__(self, imgs, gts, split_type, index, transform, img_mix_enable = True):\n", + " if index is None:\n", + " self.imgs = imgs\n", + " self.gts = gts\n", + " else:\n", + " self.imgs = [imgs[i] for i in index] #图片集\n", + " self.gts = [gts[i] for i in index] \n", + " \n", + " self.split_type = split_type\n", + " self.transform = transform\n", + " self.img_mix_enable = img_mix_enable\n", + " \n", + " def __len__(self):\n", + " return len(self.imgs)\n", + " \n", + " def augment(self, img, y): \n", + " p = np.random.random(1) #生成0-1之间的一个1维数组\n", + " if p[0] > 0.5: #取出数组里的那个数跟0.5作比较\n", + " while True:\n", + " rnd_idx = np.random.randint(0, len(self.imgs)) #前闭后开,其实就是所有图片索引\n", + " if self.gts[rnd_idx] != y: #如果图片标签不是y就跳出---检查是不是有分错类的图片\n", + " break\n", + " rnd_crop = self.transform(Image.fromarray(self.imgs[rnd_idx])) #用于变换的图片集\n", + " d = 0.8\n", + " img = img * d + rnd_crop * (1 - d) #对图像进行混合和随机裁剪\n", + " return img\n", + "\n", + " def __getitem__(self, idx):\n", + " img = self.imgs[idx]\n", + " y = self.gts[idx]\n", + " img = Image.fromarray(img)\n", + " img = self.transform(img)\n", + " if (self.split_type == 'train') & self.img_mix_enable:\n", + " img = self.augment(img, y) \n", + " return img, y #增强训练集数据,返回增强后的图片和对应标签\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "# %load utils.py\n", + "#用于训练、测试和读取数据集图像的使用函数\n", + "import torch\n", + "import torch.nn as nn\n", + "import torch.optim as optim\n", + "from torch.optim import lr_scheduler\n", + "import numpy as np\n", + "import torchvision\n", + "from torchvision import datasets, models, transforms\n", + "#import matplotlib.pyplot as plt\n", + "import time\n", + "import os\n", + "import copy\n", + "import torch.nn.functional as F\n", + "from PIL import Image, ExifTags\n", + "\n", + "def train_model_snapshot(model, criterion, lr, dataloaders, dataset_sizes, device, num_cycles, num_epochs_per_cycle):\n", + " since = time.time() #记录训练时间\n", + "\n", + " best_model_wts = copy.deepcopy(model.state_dict()) #从预训练的模型中复制权重并初始化模型\n", + " best_acc = 0.0\n", + " best_loss = 1000000.0\n", + " model_w_arr = []\n", + " prob = torch.zeros((dataset_sizes['val'], 3), dtype = torch.float32).to(device) #预测\n", + " lbl = torch.zeros((dataset_sizes['val'],), dtype = torch.long).to(device) #标签\n", + " for cycle in range(num_cycles):\n", + " optimizer = optim.SGD(model.parameters(), lr=lr, momentum=0.9)#, weight_decay = 0.0005)\n", + " scheduler = lr_scheduler.CosineAnnealingLR(optimizer, num_epochs_per_cycle*len(dataloaders['train'])) \n", + " #余弦周期调整学习率,修改优化器中的学习率,(优化器,调整间隔数,调整系数)\n", + " for epoch in range(num_epochs_per_cycle):\n", + " #print('Cycle {}: Epoch {}/{}'.format(cycle, epoch, num_epochs_per_cycle - 1))\n", + " #print('-' * 10)\n", + "\n", + " # Each epoch has a training and validation phase\n", + " for phase in ['train', 'val']:\n", + " if phase == 'train':\n", + " model.train() # Set model to training mode\n", + " else:\n", + " model.eval() # Set model to evaluate mode\n", + "\n", + " running_loss = 0.0\n", + " running_corrects = 0\n", + " idx = 0\n", + " # Iterate over data.迭代数据\n", + " for inputs, labels in dataloaders[phase]:\n", + " inputs = inputs.to(device)\n", + " labels = labels.to(device)\n", + "\n", + " # zero the parameter gradients\n", + " optimizer.zero_grad()\n", + "\n", + " # forward\n", + " # track history if only in train\n", + " with torch.set_grad_enabled(phase == 'train'):\n", + " outputs = model(inputs)\n", + " _, preds = torch.max(outputs, 1)\n", + " if (epoch == num_epochs_per_cycle-1) and (phase == 'val'):\n", + " prob[idx:idx+inputs.shape[0]] += F.softmax(outputs, dim = 1)\n", + " lbl[idx:idx+inputs.shape[0]] = labels\n", + " idx += inputs.shape[0]\n", + " loss = criterion(outputs, labels)\n", + " # backward + optimize only if in training phase\n", + " if phase == 'train':\n", + " loss.backward()\n", + " optimizer.step()\n", + " scheduler.step()\n", + " #print(optimizer.param_groups[0]['lr'])\n", + " \n", + " # statistics\n", + " running_loss += loss.item() * inputs.size(0)\n", + " running_corrects += torch.sum(preds == labels.data)\n", + "\n", + " epoch_loss = running_loss / dataset_sizes[phase]\n", + " epoch_acc = running_corrects.double() / dataset_sizes[phase]\n", + "\n", + " #print('{} Loss: {:.4f} Acc: {:.4f}'.format(\n", + " # phase, epoch_loss, epoch_acc))\n", + "\n", + " # deep copy the model\n", + " if phase == 'val' and epoch_loss < best_loss:\n", + " best_loss = epoch_loss\n", + " best_model_wts = copy.deepcopy(model.state_dict())\n", + " #print()\n", + " model_w_arr.append(copy.deepcopy(model.state_dict()))\n", + "\n", + " prob /= num_cycles\n", + " ensemble_loss = F.nll_loss(torch.log(prob), lbl) \n", + " ensemble_loss = ensemble_loss.item()\n", + " time_elapsed = time.time() - since\n", + " #print('Training complete in {:.0f}m {:.0f}s'.format(\n", + " # time_elapsed // 60, time_elapsed % 60))\n", + " #print('Ensemble Loss : {:4f}, Best val Loss: {:4f}'.format(ensemble_loss, best_loss))\n", + "\n", + " # load best model weights加载最佳模型权重\n", + " model_arr =[]\n", + " for weights in model_w_arr:\n", + " model.load_state_dict(weights) \n", + " model_arr.append(model) \n", + " return model_arr, ensemble_loss, best_loss, prob\n", + "\n", + "def test(models_arr, loader, device):\n", + " res = np.zeros((610, 3), dtype = np.float32)\n", + " for model in models_arr:\n", + " model.eval()\n", + " res_arr = []\n", + " for inputs, _ in loader:\n", + " inputs = inputs.to(device)\n", + " # forward\n", + " # track history if only in train\n", + " with torch.set_grad_enabled(False):\n", + " outputs = F.softmax(model(inputs), dim = 1) \n", + " res_arr.append(outputs.detach().cpu().numpy())\n", + " res_arr = np.concatenate(res_arr, axis = 0)\n", + " res += res_arr\n", + " return res / len(models_arr)\n", + "\n", + "def read_train_data(p):\n", + " imgs = []\n", + " labels = []\n", + " for i, lbl in enumerate(os.listdir(p)):\n", + " for fname in os.listdir(os.path.join(p, lbl)):\n", + " #read image\n", + " img = Image.open(os.path.join(p, lbl, fname))\n", + " #rotate image to original view旋转图像到原始视图\n", + " try:\n", + " exif=dict((ExifTags.TAGS[k], v) for k, v in img._getexif().items() if k in ExifTags.TAGS)\n", + " if exif['Orientation'] == 3:\n", + " img=img.rotate(180, expand=True)\n", + " elif exif['Orientation'] == 6:\n", + " img=img.rotate(270, expand=True)\n", + " elif exif['Orientation'] == 8:\n", + " img=img.rotate(90, expand=True)\n", + " except:\n", + " pass\n", + " #resize all images to the same size将所有图像调整为相同的大小\n", + " img = np.array(img.convert('RGB').resize((512,512), Image.ANTIALIAS))\n", + " imgs.append(img)\n", + " labels.append(i)\n", + " return imgs, labels\n", + "\n", + "def read_test_data(p):\n", + " imgs = []\n", + " labels = []\n", + " ids = []\n", + " for fname in os.listdir(p):\n", + " #read image\n", + " img = Image.open(os.path.join(p, fname))\n", + " #rotate image to original view\n", + " try:\n", + " if not('DMWVNR' in fname):\n", + " exif=dict((ExifTags.TAGS[k], v) for k, v in img._getexif().items() if k in ExifTags.TAGS)\n", + " if exif['Orientation'] == 3:\n", + " img=img.rotate(180, expand=True)\n", + " elif exif['Orientation'] == 6:\n", + " img=img.rotate(270, expand=True)\n", + " elif exif['Orientation'] == 8:\n", + " img=img.rotate(90, expand=True)\n", + " except:\n", + " pass\n", + " #resize all images to the same size\n", + " img = img.convert('RGB').resize((512,512), Image.ANTIALIAS)\n", + " imgs.append(np.array(img.copy()))\n", + " labels.append(0)\n", + " ids.append(fname.split('.')[0])\n", + " img.close()\n", + " return imgs, labels, ids\n" + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "unique train imgs: 732\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "usage: ipykernel_launcher.py [-h] [--train_data_path TRAIN_DATA_PATH]\n", + " [--test_data_path TEST_DATA_PATH]\n", + " [--save_path SAVE_PATH]\n", + "ipykernel_launcher.py: error: unrecognized arguments: -f C:\\Users\\Administrator\\AppData\\Roaming\\jupyter\\runtime\\kernel-60e74fca-82ff-42d7-afc4-1d27b752461b.json\n" + ] + }, + { + "ename": "SystemExit", + "evalue": "2", + "output_type": "error", + "traceback": [ + "An exception has occurred, use %tb to see the full traceback.\n", + "\u001b[1;31mSystemExit\u001b[0m\u001b[1;31m:\u001b[0m 2\n" + ] + } + ], + "source": [ + "# %load prepare_dataset.py\n", + "#读取训练数据和测试数据,从训练数据中删除重复的数据并保存在numpy矩阵中\n", + "%run prepare_dataset.py\n", + "import numpy as np\n", + "import os\n", + "import argparse\n", + "from utils import read_train_data, read_test_data\n", + "\n", + "parser = argparse.ArgumentParser(description='Data preperation')\n", + "parser.add_argument('--train_data_path', help='path', default='D:/datasets/train', type=str)\n", + "parser.add_argument('--test_data_path', help='path', default='D:/datasets/test', type=str)\n", + "parser.add_argument('--save_path', help='save', default='D:/datasets/savepath', type=str)\n", + "args = parser.parse_args() #获取参数,调用上面的属性\n", + "\n", + "#read training data\n", + "train_imgs, train_gts = read_train_data(args.train_data_path)\n", + "\n", + "#remove dublicate training imgs\n", + "idx_to_rmv = []\n", + "for i in range(len(train_imgs)-1):\n", + " for j in range(i+1, len(train_imgs)):\n", + " if np.all(train_imgs[i] == train_imgs[j]):\n", + " idx_to_rmv.append(i)\n", + " if train_gts[i] != train_gts[j]:\n", + " idx_to_rmv.append(j)\n", + "\n", + "idx = [i for i in range(len(train_imgs)) if not(i in idx_to_rmv)]\n", + "print('unique train imgs:',len(idx))\n", + "\n", + "#save unique training imgs\n", + "np.save(os.path.join(args.save_path, 'unique_train_imgs_rot_fixed'), np.array(train_imgs)[idx])\n", + "np.save(os.path.join(args.save_path, 'unique_train_gts_rot_fixed'), np.array(train_gts)[idx])\n", + "\n", + "#read test data\n", + "test_imgs, test_gts, ids = read_test_data(args.test_data_path)\n", + "\n", + "#save test data\n", + "np.save(os.path.join(args.save_path, 'test_imgs_rot_fixed'), np.array(test_imgs))\n", + "np.save(os.path.join(args.save_path, 'test_gts'), np.array(test_gts))\n", + "np.save(os.path.join(args.save_path, 'ids'), np.array(ids))\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "unique train imgs: 732\n" + ] + } + ], + "source": [ + "%run prepare_dataset.py" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "usage: ipykernel_launcher.py [-h] [--data_path DATA_PATH]\n", + " [--library_size LIBRARY_SIZE]\n", + " [--library_path LIBRARY_PATH]\n", + "ipykernel_launcher.py: error: unrecognized arguments: -f C:\\Users\\Administrator\\AppData\\Roaming\\jupyter\\runtime\\kernel-9ba0d6cf-6ce4-4517-8c53-84c6b0e19712.json\n" + ] + }, + { + "ename": "SystemExit", + "evalue": "2", + "output_type": "error", + "traceback": [ + "An exception has occurred, use %tb to see the full traceback.\n", + "\u001b[1;31mSystemExit\u001b[0m\u001b[1;31m:\u001b[0m 2\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "C:\\ProgramData\\Anaconda3\\lib\\site-packages\\IPython\\core\\interactiveshell.py:3333: UserWarning: To exit: use 'exit', 'quit', or Ctrl-D.\n", + " warn(\"To exit: use 'exit', 'quit', or Ctrl-D.\", stacklevel=1)\n" + ] + } + ], + "source": [ + "# %load generate_library_of_models.py\n", + "import torch\n", + "import torch.nn as nn\n", + "import torch.optim as optim\n", + "from torch.optim import lr_scheduler\n", + "import numpy as np\n", + "import torchvision\n", + "from torchvision import datasets, models, transforms\n", + "import pandas as pd\n", + "import matplotlib.pyplot as plt\n", + "import time\n", + "import os\n", + "import argparse\n", + "import copy\n", + "from sklearn.model_selection import StratifiedKFold\n", + "import datetime\n", + "from PIL import Image\n", + "import torch.nn.functional as F\n", + "\n", + "from dataset import ICLRDataset\n", + "from utils import train_model_snapshot, test\n", + "from sklearn.metrics import confusion_matrix\n", + "from hyperopt import hp, tpe, fmin, Trials\n", + "from collections import OrderedDict\n", + "\n", + "\n", + "def score(params):\n", + " global test_prob, val_prob, trails_sc_arr,idx # 全局变量 \n", + " print(params)\n", + " k = 5 \n", + " sss = StratifiedKFold(n_splits=k, shuffle = True, random_state=seed_arr[idx]) #提供训练/测试索引来分割训练/测试集中的数据\n", + " #(折叠的数量,至少是2;在分组前是否对每个类的样本进行洗牌;当shuffle为真时,random_state将影响索引的排序)\n", + " \n", + " #define trail data augmentations 训练集数据增强和归一化、验证集归一化\n", + " data_transforms = {\n", + " 'train': transforms.Compose([\n", + " transforms.ColorJitter(contrast = params['contrast'], hue = params['hue'], brightness = params['brightness']),\n", + " transforms.RandomAffine(degrees = params['degrees']),\n", + " transforms.RandomResizedCrop(224),\n", + " transforms.RandomHorizontalFlip(p = 0.5 if params['h_flip'] else 0.0), #以概率P水平翻转图像\n", + " transforms.RandomVerticalFlip(p = 0.5 if params['v_flip'] else 0.0),#以概率P垂直翻转图像\n", + " transforms.ToTensor(),\n", + " transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n", + " ]),\n", + " 'val': transforms.Compose([\n", + " transforms.Resize((params['val_img_size'], params['val_img_size'])),\n", + " transforms.CenterCrop(224),\n", + " transforms.ToTensor(),\n", + " transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n", + " ]),\n", + " }\n", + "\n", + " trail_test_prob = np.zeros((test_imgs.shape[0], 3), dtype = np.float32)\n", + " trail_val_prob = torch.zeros((train_imgs.shape[0], 3), dtype = torch.float32).to(device)\n", + " \n", + " sc_arr = []\n", + " models_arr = []\n", + " fold = 0\n", + " #train a model for each split 为每一个分割训练一个模型\n", + " for train_index, val_index in sss.split(train_imgs, train_gts):\n", + " #define dataset and loader for training and validation 确定数据集,载入训练集、验证集\n", + " image_datasets = {'train': ICLRDataset(train_imgs, train_gts, 'train', train_index, data_transforms['train'], params['img_mix_enable']),\n", + "\t 'val': ICLRDataset(train_imgs, train_gts, 'val', val_index, data_transforms['val'])}\n", + "\n", + " dataloaders = {'train': torch.utils.data.DataLoader(image_datasets['train'], batch_size=16, shuffle=True, num_workers=2),\n", + " 'val': torch.utils.data.DataLoader(image_datasets['val'], batch_size=16, shuffle=False, num_workers=2)}\n", + "\n", + " #create model instance创建模型实例\n", + " model_ft = params['arch'](pretrained=True) #预训练\n", + " try:\n", + " num_ftrs = model_ft.fc.in_features\n", + " model_ft.fc = nn.Linear(num_ftrs, 3)\n", + " except:\n", + " num_ftrs = model_ft.classifier.in_features\n", + " model_ft.classifier = nn.Linear(num_ftrs, 3)\n", + " model_ft = model_ft.to(device)\n", + "\n", + " criterion = nn.CrossEntropyLoss()\n", + "\n", + " dataset_sizes = {x:len(image_datasets[x]) for x in ['train', 'val']}\n", + " \n", + " model_ft_arr, ensemble_loss, _, fold_val_prob = train_model_snapshot(model_ft, criterion, params['lr'], dataloaders, dataset_sizes, device,\n", + " num_cycles=params['num_cycles'], num_epochs_per_cycle=params['num_epochs_per_cycle'])\n", + " models_arr.extend(model_ft_arr)\n", + " fold += 1\n", + " sc_arr.append(ensemble_loss)\n", + " trail_val_prob[val_index] = fold_val_prob\n", + " \n", + " #predict on test data using average of kfold models\n", + " image_datasets['test'] = ICLRDataset(test_imgs, test_gts, 'test', None, data_transforms['val'])\n", + " test_loader = torch.utils.data.DataLoader(image_datasets['test'], batch_size=4,shuffle=False, num_workers=16)\n", + " trail_test_prob = test(models_arr, test_loader, device)\n", + "\n", + " print('mean val loss:', np.mean(sc_arr))\n", + "\n", + " test_prob.append(trail_test_prob)\n", + " val_prob.append(trail_val_prob)\n", + "\n", + " #save validation and test results for further processing \n", + " np.save(os.path.join(args.library_path, 'val_prob_trail_%d'%(idx)), trail_val_prob.detach().cpu().numpy())\n", + " np.save(os.path.join(args.library_path, 'test_prob_trail_%d'%(idx)), trail_test_prob)\n", + " idx += 1\n", + " \n", + " trails_sc_arr.append(np.mean(sc_arr))\n", + "\n", + " torch.cuda.empty_cache()\n", + " del models_arr\n", + "\n", + " return np.mean(sc_arr)\n", + "\n", + "parser = argparse.ArgumentParser(description='Data preperation')\n", + "parser.add_argument('--data_path', help='path to training and test numpy matrices of images', default='.', type=str)\n", + "parser.add_argument('--library_size', help='number of models to be trained in the library of models', default=50, type=int)\n", + "parser.add_argument('--library_path', help='save path for validation and test predictions of the library of models', default='trails', type=str)\n", + "args = parser.parse_args()\n", + "\n", + "device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n", + "\n", + "torch.manual_seed(0)\n", + "np.random.seed(0)\n", + "\n", + "torch.backends.cudnn.deterministic = True\n", + "torch.backends.cudnn.benchmark = False\n", + "\n", + "#read train data\n", + "train_imgs = np.load(os.path.join(args.data_path, 'D:/datasets/savepath/unique_train_imgs_rot_fixed.npy'))\n", + "train_gts = np.load(os.path.join(args.data_path, 'unique_train_gts_rot_fixed.npy'))\n", + "\n", + "#read test data\n", + "test_imgs = np.load(os.path.join(args.data_path, 'test_imgs_rot_fixed.npy'))\n", + "test_gts = np.load(os.path.join(args.data_path, 'test_gts.npy'))\n", + "ids = np.load(os.path.join(args.data_path, 'ids.npy')).tolist()\n", + "\n", + "test_prob = []\n", + "val_prob = []\n", + "trails_sc_arr = []\n", + "\n", + "n_trails = args.library_size\n", + "seed_arr = np.random.randint(low=0, high=1000000, size=n_trails)\n", + "\n", + "#create search space for hyperparameter optimization\n", + "space = OrderedDict([('lr', hp.choice('lr', [i*0.001 for i in range(1,4)])),\n", + " ('num_cycles', hp.choice('num_cycles', range(3, 6))),\n", + " ('num_epochs_per_cycle', hp.choice('num_epochs_per_cycle', range(3, 6))),\n", + " ('arch', hp.choice('arch', [models.densenet201, models.densenet121, models.densenet169,\n", + " models.wide_resnet50_2, models.resnet152, \n", + " models.resnet101, models.resnet50, models.resnet34, models.resnet18])),\n", + " ('img_mix_enable', hp.choice('img_mix_enable', [True, False])),\n", + " ('v_flip', hp.choice('v_flip', [True, False])),\n", + " ('h_flip', hp.choice('h_flip', [True, False])),\n", + " ('degrees', hp.choice('degrees', range(1, 90))),\n", + " ('contrast', hp.uniform('contrast', 0.0, 0.3)),\n", + " ('hue', hp.uniform('hue', 0.0, 0.3)),\n", + " ('brightness', hp.uniform('brightness', 0.0, 0.3)),\n", + " ('val_img_size', hp.choice('val_img_size', range(224, 512, 24))),\n", + " ])\n", + "\n", + "trials = Trials()\n", + "\n", + "idx = 0\n", + "if not os.path.exists(args.library_path):\n", + " os.mkdir(args.library_path)\n", + "\n", + "#use tpe algorithm in hyperopt to generate a library of differnet models 利用hyperopt中的tpe算法生成不同模型库\n", + "best = fmin(fn=score,space=space,algo=tpe.suggest,max_evals=n_trails,trials=trials)\n", + "#fmin是对不用的算法集及其超参数进行迭代,使目标函数最小化的优化函数\n", + "#(最小化的目标函数;定义的搜索空间;搜索算法-为超参数空间的顺序搜索提供逻辑;最大评估数;trials对象)\n", + "print(best)\n", + "\n", + "np.save(os.path.join(args.library_path, 'scores.npy'), np.array(trails_sc_arr))\n" + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{'arch': , 'brightness': 0.006903174616102503, 'contrast': 0.10395561286019817, 'degrees': 14, 'h_flip': True, 'hue': 0.12930405533670436, 'img_mix_enable': False, 'lr': 0.001, 'num_cycles': 5, 'num_epochs_per_cycle': 5, 'v_flip': True, 'val_img_size': 416}\n", + " 0%| | 0/100 [00:00\u001b[1;34m\u001b[0m\n\u001b[0;32m 161\u001b[0m \u001b[1;31m#use tpe algorithm in hyperopt to generate a library of differnet models\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 162\u001b[0m \u001b[1;31m#best = fmin(fn=score,space=space,algo=tpe.suggest,max_evals=n_trails,trials=trials)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 163\u001b[1;33m \u001b[0mbest\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mfmin\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mfn\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mscore\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mspace\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mspace\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0malgo\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mtpe\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0msuggest\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mmax_evals\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;36m100\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mtrials\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mtrials\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 164\u001b[0m \u001b[0mprint\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mbest\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 165\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n", + "\u001b[1;32m~\\AppData\\Roaming\\Python\\Python37\\site-packages\\hyperopt\\fmin.py\u001b[0m in \u001b[0;36mfmin\u001b[1;34m(fn, space, algo, max_evals, timeout, loss_threshold, trials, rstate, allow_trials_fmin, pass_expr_memo_ctrl, catch_eval_exceptions, verbose, return_argmin, points_to_evaluate, max_queue_len, show_progressbar)\u001b[0m\n\u001b[0;32m 480\u001b[0m \u001b[0mcatch_eval_exceptions\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mcatch_eval_exceptions\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 481\u001b[0m \u001b[0mreturn_argmin\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mreturn_argmin\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 482\u001b[1;33m \u001b[0mshow_progressbar\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mshow_progressbar\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 483\u001b[0m )\n\u001b[0;32m 484\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n", + "\u001b[1;32m~\\AppData\\Roaming\\Python\\Python37\\site-packages\\hyperopt\\base.py\u001b[0m in \u001b[0;36mfmin\u001b[1;34m(self, fn, space, algo, max_evals, timeout, loss_threshold, max_queue_len, rstate, verbose, pass_expr_memo_ctrl, catch_eval_exceptions, return_argmin, show_progressbar)\u001b[0m\n\u001b[0;32m 684\u001b[0m \u001b[0mcatch_eval_exceptions\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mcatch_eval_exceptions\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 685\u001b[0m \u001b[0mreturn_argmin\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mreturn_argmin\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 686\u001b[1;33m \u001b[0mshow_progressbar\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mshow_progressbar\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 687\u001b[0m )\n\u001b[0;32m 688\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n", + "\u001b[1;32m~\\AppData\\Roaming\\Python\\Python37\\site-packages\\hyperopt\\fmin.py\u001b[0m in \u001b[0;36mfmin\u001b[1;34m(fn, space, algo, max_evals, timeout, loss_threshold, trials, rstate, allow_trials_fmin, pass_expr_memo_ctrl, catch_eval_exceptions, verbose, return_argmin, points_to_evaluate, max_queue_len, show_progressbar)\u001b[0m\n\u001b[0;32m 507\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 508\u001b[0m \u001b[1;31m# next line is where the fmin is actually executed\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 509\u001b[1;33m \u001b[0mrval\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mexhaust\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 510\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 511\u001b[0m \u001b[1;32mif\u001b[0m \u001b[0mreturn_argmin\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", + "\u001b[1;32m~\\AppData\\Roaming\\Python\\Python37\\site-packages\\hyperopt\\fmin.py\u001b[0m in \u001b[0;36mexhaust\u001b[1;34m(self)\u001b[0m\n\u001b[0;32m 328\u001b[0m \u001b[1;32mdef\u001b[0m \u001b[0mexhaust\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 329\u001b[0m \u001b[0mn_done\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mlen\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mtrials\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 330\u001b[1;33m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mrun\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mmax_evals\u001b[0m \u001b[1;33m-\u001b[0m \u001b[0mn_done\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mblock_until_done\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0masynchronous\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 331\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mtrials\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mrefresh\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 332\u001b[0m \u001b[1;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", + "\u001b[1;32m~\\AppData\\Roaming\\Python\\Python37\\site-packages\\hyperopt\\fmin.py\u001b[0m in \u001b[0;36mrun\u001b[1;34m(self, N, block_until_done)\u001b[0m\n\u001b[0;32m 284\u001b[0m \u001b[1;32melse\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 285\u001b[0m \u001b[1;31m# -- loop over trials and do the jobs directly\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 286\u001b[1;33m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mserial_evaluate\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 287\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 288\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mtrials\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mrefresh\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", + "\u001b[1;32m~\\AppData\\Roaming\\Python\\Python37\\site-packages\\hyperopt\\fmin.py\u001b[0m in \u001b[0;36mserial_evaluate\u001b[1;34m(self, N)\u001b[0m\n\u001b[0;32m 163\u001b[0m \u001b[0mctrl\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mbase\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mCtrl\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mtrials\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mcurrent_trial\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mtrial\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 164\u001b[0m \u001b[1;32mtry\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 165\u001b[1;33m \u001b[0mresult\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mdomain\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mevaluate\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mspec\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mctrl\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 166\u001b[0m \u001b[1;32mexcept\u001b[0m \u001b[0mException\u001b[0m \u001b[1;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 167\u001b[0m \u001b[0mlogger\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0merror\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m\"job exception: %s\"\u001b[0m \u001b[1;33m%\u001b[0m \u001b[0mstr\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0me\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", + "\u001b[1;32m~\\AppData\\Roaming\\Python\\Python37\\site-packages\\hyperopt\\base.py\u001b[0m in \u001b[0;36mevaluate\u001b[1;34m(self, config, ctrl, attach_attachments)\u001b[0m\n\u001b[0;32m 892\u001b[0m \u001b[0mprint_node_on_error\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mrec_eval_print_node_on_error\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 893\u001b[0m )\n\u001b[1;32m--> 894\u001b[1;33m \u001b[0mrval\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mfn\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mpyll_rval\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 895\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 896\u001b[0m \u001b[1;32mif\u001b[0m \u001b[0misinstance\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mrval\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m(\u001b[0m\u001b[0mfloat\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mint\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mnp\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mnumber\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", + "\u001b[1;32m~\\Desktop\\WheatRustClassification-master\\generate_library_of_models.py\u001b[0m in \u001b[0;36mscore\u001b[1;34m(params)\u001b[0m\n\u001b[0;32m 78\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 79\u001b[0m model_ft_arr, ensemble_loss, _, fold_val_prob = train_model_snapshot(model_ft, criterion, params['lr'], dataloaders, dataset_sizes, device,\n\u001b[1;32m---> 80\u001b[1;33m num_cycles=params['num_cycles'], num_epochs_per_cycle=params['num_epochs_per_cycle'])\n\u001b[0m\u001b[0;32m 81\u001b[0m \u001b[0mmodels_arr\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mextend\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mmodel_ft_arr\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 82\u001b[0m \u001b[0mfold\u001b[0m \u001b[1;33m+=\u001b[0m \u001b[1;36m1\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", + "\u001b[1;32m~\\Desktop\\WheatRustClassification-master\\utils.py\u001b[0m in \u001b[0;36mtrain_model_snapshot\u001b[1;34m(model, criterion, lr, dataloaders, dataset_sizes, device, num_cycles, num_epochs_per_cycle)\u001b[0m\n\u001b[0;32m 40\u001b[0m \u001b[0midx\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;36m0\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 41\u001b[0m \u001b[1;31m# Iterate over data.\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 42\u001b[1;33m \u001b[1;32mfor\u001b[0m \u001b[0minputs\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mlabels\u001b[0m \u001b[1;32min\u001b[0m \u001b[0mdataloaders\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mphase\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 43\u001b[0m \u001b[0minputs\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0minputs\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mto\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mdevice\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 44\u001b[0m \u001b[0mlabels\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mlabels\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mto\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mdevice\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", + "\u001b[1;32mC:\\ProgramData\\Anaconda3\\lib\\site-packages\\torch\\utils\\data\\dataloader.py\u001b[0m in \u001b[0;36m__iter__\u001b[1;34m(self)\u001b[0m\n\u001b[0;32m 277\u001b[0m \u001b[1;32mreturn\u001b[0m \u001b[0m_SingleProcessDataLoaderIter\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 278\u001b[0m \u001b[1;32melse\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 279\u001b[1;33m \u001b[1;32mreturn\u001b[0m \u001b[0m_MultiProcessingDataLoaderIter\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 280\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 281\u001b[0m \u001b[1;33m@\u001b[0m\u001b[0mproperty\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", + "\u001b[1;32mC:\\ProgramData\\Anaconda3\\lib\\site-packages\\torch\\utils\\data\\dataloader.py\u001b[0m in \u001b[0;36m__init__\u001b[1;34m(self, loader)\u001b[0m\n\u001b[0;32m 717\u001b[0m \u001b[1;31m# before it starts, and __del__ tries to join but will get:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 718\u001b[0m \u001b[1;31m# AssertionError: can only join a started process.\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 719\u001b[1;33m \u001b[0mw\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mstart\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 720\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_index_queues\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mappend\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mindex_queue\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 721\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_workers\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mappend\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mw\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", + "\u001b[1;32mC:\\ProgramData\\Anaconda3\\lib\\multiprocessing\\process.py\u001b[0m in \u001b[0;36mstart\u001b[1;34m(self)\u001b[0m\n\u001b[0;32m 110\u001b[0m \u001b[1;34m'daemonic processes are not allowed to have children'\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 111\u001b[0m \u001b[0m_cleanup\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 112\u001b[1;33m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_popen\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_Popen\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 113\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_sentinel\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_popen\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0msentinel\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 114\u001b[0m \u001b[1;31m# Avoid a refcycle if the target function holds an indirect\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", + "\u001b[1;32mC:\\ProgramData\\Anaconda3\\lib\\multiprocessing\\context.py\u001b[0m in \u001b[0;36m_Popen\u001b[1;34m(process_obj)\u001b[0m\n\u001b[0;32m 221\u001b[0m \u001b[1;33m@\u001b[0m\u001b[0mstaticmethod\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 222\u001b[0m \u001b[1;32mdef\u001b[0m \u001b[0m_Popen\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mprocess_obj\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 223\u001b[1;33m \u001b[1;32mreturn\u001b[0m \u001b[0m_default_context\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mget_context\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mProcess\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_Popen\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mprocess_obj\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 224\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 225\u001b[0m \u001b[1;32mclass\u001b[0m \u001b[0mDefaultContext\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mBaseContext\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", + "\u001b[1;32mC:\\ProgramData\\Anaconda3\\lib\\multiprocessing\\context.py\u001b[0m in \u001b[0;36m_Popen\u001b[1;34m(process_obj)\u001b[0m\n\u001b[0;32m 320\u001b[0m \u001b[1;32mdef\u001b[0m \u001b[0m_Popen\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mprocess_obj\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 321\u001b[0m \u001b[1;32mfrom\u001b[0m \u001b[1;33m.\u001b[0m\u001b[0mpopen_spawn_win32\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0mPopen\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 322\u001b[1;33m \u001b[1;32mreturn\u001b[0m \u001b[0mPopen\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mprocess_obj\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 323\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 324\u001b[0m \u001b[1;32mclass\u001b[0m \u001b[0mSpawnContext\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mBaseContext\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", + "\u001b[1;32mC:\\ProgramData\\Anaconda3\\lib\\multiprocessing\\popen_spawn_win32.py\u001b[0m in \u001b[0;36m__init__\u001b[1;34m(self, process_obj)\u001b[0m\n\u001b[0;32m 44\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 45\u001b[0m \u001b[1;32mdef\u001b[0m \u001b[0m__init__\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mprocess_obj\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 46\u001b[1;33m \u001b[0mprep_data\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mspawn\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mget_preparation_data\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mprocess_obj\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_name\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 47\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 48\u001b[0m \u001b[1;31m# read end of pipe will be \"stolen\" by the child process\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", + "\u001b[1;32mC:\\ProgramData\\Anaconda3\\lib\\multiprocessing\\spawn.py\u001b[0m in \u001b[0;36mget_preparation_data\u001b[1;34m(name)\u001b[0m\n\u001b[0;32m 170\u001b[0m \u001b[1;31m# or through direct execution (or to leave it alone entirely)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 171\u001b[0m \u001b[0mmain_module\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0msys\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mmodules\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;34m'__main__'\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 172\u001b[1;33m \u001b[0mmain_mod_name\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mgetattr\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mmain_module\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m__spec__\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;34m\"name\"\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;32mNone\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 173\u001b[0m \u001b[1;32mif\u001b[0m \u001b[0mmain_mod_name\u001b[0m \u001b[1;32mis\u001b[0m \u001b[1;32mnot\u001b[0m \u001b[1;32mNone\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 174\u001b[0m \u001b[0md\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;34m'init_main_from_name'\u001b[0m\u001b[1;33m]\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mmain_mod_name\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", + "\u001b[1;31mAttributeError\u001b[0m: module '__main__' has no attribute '__spec__'" + ] + } + ], + "source": [ + "%run generate_library_of_models.py" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.3" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git "a/code/2022_autumn/\346\275\230\345\200\251-\346\275\230\345\200\251-\345\237\272\344\272\216WR-EL\346\250\241\345\236\213\347\232\204\345\260\217\351\272\246\351\224\210\347\227\205\350\257\206\345\210\253\347\240\224\347\251\266/__pycache__/dataset.cpython-36.pyc" "b/code/2022_autumn/\346\275\230\345\200\251-\346\275\230\345\200\251-\345\237\272\344\272\216WR-EL\346\250\241\345\236\213\347\232\204\345\260\217\351\272\246\351\224\210\347\227\205\350\257\206\345\210\253\347\240\224\347\251\266/__pycache__/dataset.cpython-36.pyc" new file mode 100644 index 0000000000000000000000000000000000000000..fb3841946c3e952ba38f0a23c969491afe094e13 GIT binary patch literal 1827 zcmb7FOK&7K5cZ>A^T-m|A-h@NfRL$)bbi^cI!T9;?}!X! zcu8cK(>YJNl0PSLbOqK)Ol`QEnpEc|X0~lqTI<0`RoeO!Q+{`mZD zd$TOG$wt+9>j64}9tGrL;|@Q4A&R;%Li9JwQI?i^3&8cJ{}+mLc^Qpr!BMJ>kx|Qs zW)Y9-qILzud(fBe#J=jF8a=daJubl;b3V>(>!xt*1E54y#Z=^V`g57bq4jiL4sJIp zG-&;zmibhzqPh1`EU0y+7i-Z08(qhYPHJ3DXhZ`Vu^#QQ0MJG6T@>BWff+_Fcc5zw z(m-ZU=$t|U&;!UNFZ0?R7*q8D%->SuD6pXrutJCr(EwT^^y1E?*s~UPG`J<(p)E!pL%LM-EcEZG6MyA;?3!t%{)MU&cRAL^QAWn8rlRs()bd_$D~Idkr{0;HvixXLsuw<(4qbPbol#k*sh+&Ez9b$ z4dA-VQDs9Mi`pm*@=m5)b^_4Z)$g*R09{}*-?&pjFg1Fbo zj2m{s8wUKK&bvXt&?#zyZK8Hz3>(&P;G5w%V89qVus)P=QOD683NU*05ei2J*6yNS zKLr$f*IdY{`WUA^L4l>b5Uz$hLJV_Ln7k6=GnmqNeF$wYf2+X4#iRQbb+aDc-y^s} za1H9T%7w6PA*zv_l!&`RoK4blF{AL=QfnyCHT5})TTdKvcN*&xzch-7w!0CJx(c=N zid{7$mA&e_x4)0ilS1-s$EklQG(27kQ=l*x)~_ek_*`L1t*^5JF#MSTcFQe4+}l@< bK(ziYXKb{&h-M4xRHJ`gx(YYEi=F)o99M^J literal 0 HcmV?d00001 diff --git "a/code/2022_autumn/\346\275\230\345\200\251-\346\275\230\345\200\251-\345\237\272\344\272\216WR-EL\346\250\241\345\236\213\347\232\204\345\260\217\351\272\246\351\224\210\347\227\205\350\257\206\345\210\253\347\240\224\347\251\266/__pycache__/dataset.cpython-37(1).pyc" "b/code/2022_autumn/\346\275\230\345\200\251-\346\275\230\345\200\251-\345\237\272\344\272\216WR-EL\346\250\241\345\236\213\347\232\204\345\260\217\351\272\246\351\224\210\347\227\205\350\257\206\345\210\253\347\240\224\347\251\266/__pycache__/dataset.cpython-37(1).pyc" new file mode 100644 index 0000000000000000000000000000000000000000..0fbc904d20541e8000ff00da46a22a48026ce44a GIT binary patch literal 1842 zcmb7FOK%)S5bnpk_F-el;=}=@NO15TSaC*{W3VN#WLc486f`olhUwYOcxPtU-Mz@R zW>2=1TsgwYKJr8O6MaJBt;GJ}-4R;cg=*)DPZ5owSx<68-w*JKAh2F;rmCVazkb&s$ zqLAXs#o!=p0qGvFjKoxun8}u8l3&u8OHcZjB=%+z#J-GV1HJ>XWO`^u*@0uG0tWHPecyk%!!QcXNzJ3p?o)EBw5u&$IjMAjgn;H4aLYK+h}^|*j&%-J}zjoTiw%Ycq3^HY&k$uC9L z4XvlMqJOtgp+oEEmCR1n3cC9c*$idpdNvmWu+buC3{vB4LL(Z`h;?X(1t6Vw-Urbw z99V3`avMlvk_L)=OfM+(0^NtQ@*=C;hFz*Y0{a~_4gwnr0W*a77#*M|LeC#P+|t=o z>{}ez#{rPr(STrJzbok3YdYmMbmAJHp$qe-Bv3EhQ+0+Bcue~)=vmsYc zUyzfhuOQ;9z=T&3&O+NY`tqeW4Qg5k9a8%e-cLw}JSJ0GV{ZQb4W_P;4S-<${e1Ap z?>KIrcN>;fV;jK9m!r~#_|7Y%Fv)wBa@E0HY@@G6WujDa)(ss@mP4H_J+pKihpLhy zm#1-rLaIh%OXYuXOeTXetIQK{=S~};ZFm^%&}As&D)m5mi0pht>QIMecvz7fvU?x^ z9`2}48Ay=#8kus#u6WG=AJAQS3+WgT8se;Ai>Mv2gf*)<;LY&eXMh;nw?4FSUUj2; z5McJ|6J!ny?A=+vJ_D)iEwGTM>Qgj*h74PI##|4#gcxKdH(4pf7huwOdI*9sa0gpkbREqE)plXGmZU; zUm96No9&24ZH3zIl3jt@D}CK_zkwcJPYS`eEvNo9*Km0$Y;=Wnuzocu$7c#lYJHvN nAj5wRWH;UN7rT4P0f^q;7mSTIX4!0FpK6S+O;_NAw~^StRd02=1TsgwYKJr8O6MaJBt;GJ}-4R;cg=*)DPZ5owSx<68-w*JKAh2F;rmCVazkb&s$ zqLAXs#o!=p0qGvFjKoxun8}u8l3&u8OHcZjB=%+z#J-GV1HJ>XWO`^u*@0uG0tWHPecyk%!!QcXNzJ3p?o)EBw5u&$IjMAjgn;H4aLYK+h}^|*j&%-J}zjoTiw%Ycq3^HY&k$uC9L z4XvlMqJOtgp+oEEmCR1n3cC9c*$idpdNvmWu+buC3{vB4LL(Z`h;?X(1t6Vw-Urbw z99V3`avMlvk_L)=OfM+(0^NtQ@*=C;hFz*Y0{a~_4gwnr0W*a77#*M|LeC#P+|t=o z>{}ez#{rPr(STrJzbok3YdYmMbmAJHp$qe-Bv3EhQ+0+Bcue~)=vmsYc zUyzfhuOQ;9z=T&3&O+NY`tqeW4Qg5k9a8%e-cLw}JSJ0GV{ZQb4W_P;4S-<${e1Ap z?>KIrcN>;fV;jK9m!r~#_|7Y%Fv)wBa@E0HY@@G6WujDa)(ss@mP4H_J+pKihpLhy zm#1-rLaIh%OXYuXOeTXetIQK{=S~};ZFm^%&}As&D)m5mi0pht>QIMecvz7fvU?x^ z9`2}48Ay=#8kus#u6WG=AJAQS3+WgT8se;Ai>Mv2gf*)<;LY&eXMh;nw?4FSUUj2; z5McJ|6J!ny?A=+vJ_D)iEwGTM>Qgj*h74PI##|4#gcxKdH(4pf7huwOdI*9sa0gpkbREqE)plXGmZU; zUm96No9&24ZH3zIl3jt@D}CK_zkwcJPYS`eEvNo9*Km0$Y;=Wnuzocu$7c#lYJHvN nAj5wRWH;UN7rT4P0f^q;7mSTIX4!0FpK6S+O;_NAw~^StRd0 0.5: + while True: + rnd_idx = np.random.randint(0, len(self.imgs)) + if self.gts[rnd_idx] != y: + break + rnd_crop = self.transform(Image.fromarray(self.imgs[rnd_idx])) + d = 0.8 + img = img * d + rnd_crop * (1 - d) + return img + + def __getitem__(self, idx): + img = self.imgs[idx] + y = self.gts[idx] + img = Image.fromarray(img) + img = self.transform(img) + if (self.split_type == 'train') & self.img_mix_enable: + img = self.augment(img, y) + return img, y \ No newline at end of file diff --git "a/code/2022_autumn/\346\275\230\345\200\251-\346\275\230\345\200\251-\345\237\272\344\272\216WR-EL\346\250\241\345\236\213\347\232\204\345\260\217\351\272\246\351\224\210\347\227\205\350\257\206\345\210\253\347\240\224\347\251\266/ensemble_selection.py" "b/code/2022_autumn/\346\275\230\345\200\251-\346\275\230\345\200\251-\345\237\272\344\272\216WR-EL\346\250\241\345\236\213\347\232\204\345\260\217\351\272\246\351\224\210\347\227\205\350\257\206\345\210\253\347\240\224\347\251\266/ensemble_selection.py" new file mode 100644 index 0000000..9da4b64 --- /dev/null +++ "b/code/2022_autumn/\346\275\230\345\200\251-\346\275\230\345\200\251-\345\237\272\344\272\216WR-EL\346\250\241\345\236\213\347\232\204\345\260\217\351\272\246\351\224\210\347\227\205\350\257\206\345\210\253\347\240\224\347\251\266/ensemble_selection.py" @@ -0,0 +1,169 @@ +import torch +import torch.nn as nn +import torch.optim as optim +from torch.optim import lr_scheduler +import numpy as np +import torchvision +from torchvision import datasets, models, transforms +import pandas as pd +#import matplotlib.pyplot as plt +import time +import os +import copy +import argparse +from sklearn.model_selection import StratifiedKFold +import datetime +from PIL import Image + +import torch.nn.functional as F + +def cross_entropy(y, p): + p /= p.sum(1).reshape(-1,1) + return F.nll_loss(torch.log(torch.tensor(p)), torch.tensor(y)).numpy() + +def weighted_cross_entropy(y, p): + p /= p.sum(1).reshape(-1,1) + w_arr = np.array([0.53, 0.3, 0.0]) + return np.sum([F.nll_loss(torch.log(torch.tensor(p[y==c])), torch.tensor(y[y==c])).numpy()*w_arr[c] for c in range(3)]) + +class ensembleSelection: + + def __init__(self, metric): + self.metric = metric + + def _compare(self, sc1, sc2): + if sc1 < sc2: + return True + return False + + def _initialize(self, X_p, y): + """ + This function finds the id of the best validation probabiltiy + """ + current_sc = self.metric(y, X_p[0]) + ind = 0 + for i in range(1, X_p.shape[0]): + print(i) + sc = self.metric(y, X_p[i]) + print(sc) + if self._compare(sc, current_sc): + current_sc = sc + ind = i + return ind, current_sc + + def es_with_replacement(self, X_p, Xtest_p, y): + best_ind, best_sc = self._initialize(X_p, y) + current_sc = best_sc + sumP = np.copy(X_p[best_ind]) + sumP_test = np.copy(Xtest_p[best_ind]) + i = 1 + # find the best combintation of input models' reuslts + while True: + i += 1 + ind = -1 + for m in range(X_p.shape[0]): + #check if adding model m to the combination of best models will improve the results or not + sc = self.metric(y, (sumP*X_p[m])**(1/i)) + if self._compare(sc, current_sc): + current_sc = sc + ind = m + if ind>-1: + sumP *= X_p[ind] + sumP_test *= Xtest_p[ind] + else: + break + sumP = sumP**(1/(i-1)) + sumP_test = sumP_test**(1/(i-1)) + + sumP /= sumP.sum(1).reshape(-1,1) + sumP_test /= sumP_test.sum(1).reshape(-1,1) + + return current_sc, sumP, sumP_test + + def es_with_bagging(self, X_p, Xtest_p, y, f = 0.5, n_bags = 20): + list_of_indecies = [i for i in range(X_p.shape[0])] + bag_size = int(f*X_p.shape[0]) + sumP = None + sumP_test = None + for i in range(n_bags): + #create a random subset (bag) of models + model_weight = [0 for j in range(X_p.shape[0])] + rng = np.copy(list_of_indecies) + np.random.shuffle(rng) + rng = rng[:bag_size] + #find the best combination from the input bag + sc, p, ptest = self.es_with_replacement(X_p[rng], Xtest_p[rng], y) + print('bag: %d, sc: %f'%(i, sc)) + if sumP is None: + sumP = p + sumP_test = ptest + else: + sumP *= p + sumP_test *= ptest + + #combine the reuslts of all bags + sumP = sumP**(1/n_bags) + sumP_test = sumP_test**(1/n_bags) + + sumP /= sumP.sum(1).reshape(-1,1) + sumP_test /= sumP_test.sum(1).reshape(-1,1) + + sumP[sumP < 1e-6] = 1e-6 + sumP_test[sumP_test < 1e-6] = 1e-6 + + final_sc = self.metric(y, sumP) + print('avg sc: %f'%(final_sc)) + return (final_sc, sumP, sumP_test) + +parser = argparse.ArgumentParser(description='Data preperation') +parser.add_argument('--train_data_path', help='path to training data folder', default='train_data', type=str) +parser.add_argument('--data_path', help='path to training and test numpy matrices of images', default='.', type=str) +parser.add_argument('--sample_sub_file_path', help='path to sample submission file', default='.', type=str) +parser.add_argument('--library_size', help='number of models to be trained in the library of models', default=50, type=int) +parser.add_argument('--library_path', help='save path for validation and test predictions of the library of models', default='trails', type=str) +parser.add_argument('--final_sub_file_save_path', help='save path for final submission file', default='.', type=str) +args = parser.parse_args() + +np.random.seed(4321) + +n = args.library_size + +#read training gt +train_gts = np.load(os.path.join(args.data_path, 'C:/Users/x2/Desktop/pq/wheat rust/savepath/unique_train_gts_rot_fixed.npy')) +#train_gts = np.load(os.path.join(args.data_path, 'unique_train_gts_rot_fixed.npy')) +#read validation probability on training data generated from automatuic hypropt trails +#and create a matrix of (N,D,3) where N i the number of models and D is the data size +train_prob = np.array([np.load(os.path.join(args.library_path, 'C:/Users/x2/Desktop/pq/wheat rust/trails/val_prob_trail_%d.npy'%(i))) for i in range(n)]) +#train_prob = np.array([np.load(os.path.join(args.library_path, 'val_prob_trail_%d.npy'%(i))) for i in range(n)]) + +#read test probability generated from hypropt trails +#and create a matrix of (N,D,3) where N is the number of models and D is the data size +test_prob = np.array([np.load(os.path.join(args.library_path, 'C:/Users/x2/Desktop/pq/wheat rust/trails/test_prob_trail_%d.npy'%(i))) for i in range(n)]) +#test_prob = np.array([np.load(os.path.join(args.library_path, 'test_prob_trail_%d.npy'%(i))) for i in range(n)]) + +ids = np.load('C:/Users/x2/Desktop/pq/wheat rust/savepath/ids.npy').tolist() + +#use ensemble selection algorithm to find best combination of models using geometric average +es_obj = ensembleSelection(cross_entropy) +sc, es_train_prob, es_test_prob = es_obj.es_with_bagging(train_prob, test_prob, train_gts, n_bags = 10, f = 0.65) + +#detect samples with high confidence for healthy wheat +idx = (np.max(es_test_prob, 1) > 0.7) & (np.argmax(es_test_prob, 1) == 2) + +#create another ensemble with more weights for leaf and stem classes +es_obj = ensembleSelection(weighted_cross_entropy) +sc, es_train_prob, es_test_prob = es_obj.es_with_bagging(train_prob, test_prob, train_gts, n_bags = 10, f = 0.65) + +#increase the probability of confident samples for healthy wheat +es_test_prob[idx, 0] = 1e-6 +es_test_prob[idx, 1] = 1e-6 +es_test_prob[idx, 2] = 1.0 + +#create submission +sub = pd.read_csv(os.path.join(args.sample_sub_file_path, 'sample_submission.csv')) +sub['ID'] = ids +lbl_names = os.listdir(args.train_data_path) +for i, name in enumerate(lbl_names): + sub[name] = es_test_prob[:,i].tolist() +sub.to_csv(os.path.join(args.final_sub_file_save_path, 'final_sub.csv'), index = False) + diff --git "a/code/2022_autumn/\346\275\230\345\200\251-\346\275\230\345\200\251-\345\237\272\344\272\216WR-EL\346\250\241\345\236\213\347\232\204\345\260\217\351\272\246\351\224\210\347\227\205\350\257\206\345\210\253\347\240\224\347\251\266/generate_library_of_models.py" "b/code/2022_autumn/\346\275\230\345\200\251-\346\275\230\345\200\251-\345\237\272\344\272\216WR-EL\346\250\241\345\236\213\347\232\204\345\260\217\351\272\246\351\224\210\347\227\205\350\257\206\345\210\253\347\240\224\347\251\266/generate_library_of_models.py" new file mode 100644 index 0000000..d397095 --- /dev/null +++ "b/code/2022_autumn/\346\275\230\345\200\251-\346\275\230\345\200\251-\345\237\272\344\272\216WR-EL\346\250\241\345\236\213\347\232\204\345\260\217\351\272\246\351\224\210\347\227\205\350\257\206\345\210\253\347\240\224\347\251\266/generate_library_of_models.py" @@ -0,0 +1,191 @@ +import torch +import torch.nn as nn +import torch.optim as optim +from torch.optim import lr_scheduler +import numpy as np +from torchvision import datasets, models, transforms + +import pandas as pd +#import matplotlib.pyplot as plt +import time +import os +import argparse +import copy +from sklearn.model_selection import StratifiedKFold +import datetime +from PIL import Image +import torch.nn.functional as F + +from dataset import ICLRDataset +from utils import train_model_snapshot, test +from sklearn.metrics import confusion_matrix +from hyperopt import hp, tpe, fmin, Trials +from collections import OrderedDict + + + + + +def score(params): + global test_prob, val_prob, trails_sc_arr,idx + print(params) + k = 5 + sss = StratifiedKFold(n_splits=k, shuffle = True, random_state=seed_arr[idx]) + #define trail data augmentations + data_transforms = { + 'train': transforms.Compose([ + transforms.ColorJitter(contrast = params['contrast'], hue = params['hue'], brightness = params['brightness']), + transforms.RandomAffine(degrees = params['degrees']), + transforms.RandomResizedCrop(224), + transforms.RandomHorizontalFlip(p = 0.5 if params['h_flip'] else 0.0), + transforms.RandomVerticalFlip(p = 0.5 if params['v_flip'] else 0.0), + transforms.ToTensor(), + transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) + ]), + 'val': transforms.Compose([ + transforms.Resize((params['val_img_size'], params['val_img_size'])), + transforms.CenterCrop(224), + transforms.ToTensor(), + transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) + ]), + } + + trail_test_prob = np.zeros((test_imgs.shape[0], 3), dtype = np.float32) + trail_val_prob = torch.zeros((train_imgs.shape[0], 3), dtype = torch.float32).to(device) + + sc_arr = [] + models_arr = [] +# fold = 0 + #train a model for each split + for train_index, val_index in sss.split(train_imgs, train_gts): + #define dataset and loader for training and validation + image_datasets = {'train': ICLRDataset(train_imgs, train_gts, 'train', train_index, data_transforms['train'], params['img_mix_enable']), + 'val': ICLRDataset(train_imgs, train_gts, 'val', val_index, data_transforms['val'])} + + dataloaders = {'train': torch.utils.data.DataLoader(image_datasets['train'], batch_size=16, shuffle=True, num_workers=0), + 'val': torch.utils.data.DataLoader(image_datasets['val'], batch_size=16, shuffle=False, num_workers=0)} + + #create model instance + model_ft = params['arch'](pretrained=True) + try: + num_ftrs = model_ft.fc.in_features + model_ft.fc = nn.Linear(num_ftrs, 3) + except: + num_ftrs = model_ft.classifier.in_features + model_ft.classifier = nn.Linear(num_ftrs, 3) + model_ft = model_ft.to(device) + + criterion = nn.CrossEntropyLoss() + + dataset_sizes = {x:len(image_datasets[x]) for x in ['train', 'val']} + + model_ft_arr, ensemble_loss, _, fold_val_prob = train_model_snapshot(model_ft, criterion, params['lr'], dataloaders, dataset_sizes, device, + num_cycles=params['num_cycles'], num_epochs_per_cycle=params['num_epochs_per_cycle']) + models_arr.extend(model_ft_arr) + fold += 1 #K折交叉验证 + sc_arr.append(ensemble_loss) + trail_val_prob[val_index] = fold_val_prob + + #predict on test data using average of kfold models + image_datasets['test'] = ICLRDataset(test_imgs, test_gts, 'test', None, data_transforms['val']) + test_loader = torch.utils.data.DataLoader(image_datasets['test'], batch_size=4,shuffle=False, num_workers=0) + trail_test_prob = test(models_arr, test_loader, device) + + print('mean val loss:', np.mean(sc_arr)) + + test_prob.append(trail_test_prob) + val_prob.append(trail_val_prob) + + #save validation and test results for further processing + np.save(os.path.join(args.library_path, 'val_prob_trail_%d'%(idx)), trail_val_prob.detach().cpu().numpy()) + np.save(os.path.join(args.library_path, 'test_prob_trail_%d'%(idx)), trail_test_prob) + idx += 1 + + trails_sc_arr.append(np.mean(sc_arr)) + + torch.cuda.empty_cache() + del models_arr + + return np.mean(sc_arr) + +parser = argparse.ArgumentParser(description='Data preperation') +parser.add_argument('--data_path', help='path to training and test numpy matrices of images', default='.', type=str) +parser.add_argument('--library_size', help='number of models to be trained in the library of models', default=50, type=int) +parser.add_argument('--library_path', help='save path for validation and test predictions of the library of models', default='trails', type=str) +args = parser.parse_args() + +device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") + +torch.manual_seed(0) +np.random.seed(0) + +torch.backends.cudnn.deterministic = True +torch.backends.cudnn.benchmark = False + +#read train data +train_imgs = np.load(os.path.join('D:/datasets/savepath', 'unique_train_imgs_rot_fixed.npy')) +train_gts = np.load(os.path.join('D:/datasets/savepath', 'unique_train_gts_rot_fixed.npy')) + +#read test data +test_imgs = np.load(os.path.join('D:/datasets/savepath', 'test_imgs_rot_fixed.npy')) +test_gts = np.load(os.path.join('D:/datasets/savepath', 'test_gts.npy')) +ids = np.load(os.path.join('D:/datasets/savepath', 'ids.npy')).tolist() + +test_prob = [] +val_prob = [] +trails_sc_arr = [] + +n_trails = args.library_size +seed_arr = np.random.randint(low=0, high=1000000, size=n_trails) + +#create search space for hyperparameter optimization +space = OrderedDict([('lr', hp.choice('lr', [i*0.001 for i in range(1,4)])), + ('num_cycles', hp.choice('num_cycles', range(3, 6))), + ('num_epochs_per_cycle', hp.choice('num_epochs_per_cycle', range(3, 6))), + ('arch', hp.choice('arch', [models.densenet201, models.densenet169, + models.resnet152, models.resnet101, + models.vgg])), + ('img_mix_enable', hp.choice('img_mix_enable', [True, False])), + ('v_flip', hp.choice('v_flip', [True, False])), + ('h_flip', hp.choice('h_flip', [True, False])), + ('degrees', hp.choice('degrees', range(1, 90))), + ('contrast', hp.uniform('contrast', 0.0, 0.3)), + ('hue', hp.uniform('hue', 0.0, 0.3)), + ('brightness', hp.uniform('brightness', 0.0, 0.3)), + ('val_img_size', hp.choice('val_img_size', range(224, 512, 24))), + ]) + +trials = Trials() + +idx = 0 +if not os.path.exists(args.library_path): + os.mkdir(args.library_path) + +#use tpe algorithm in hyperopt to generate a library of differnet models +best = fmin(fn=score,space=space,algo=tpe.suggest,max_evals=n_trails,trials=trials) + + +np.save(os.path.join(args.library_path, 'scores.npy'), np.array(trails_sc_arr)) +#np.save(os.path.join('D:/datasets', 'scores.npy'), np.array(trails_sc_arr)) + +#from multiprocessing import Process +#import os +#if __name__=="__main__": + +# best = fmin(fn=score, space=space, algo=tpe.suggest, max_evals=n_trails, trials=trials) +# p = Process(best) +# print(p) + + + + + + + + + + + + + + diff --git "a/code/2022_autumn/\346\275\230\345\200\251-\346\275\230\345\200\251-\345\237\272\344\272\216WR-EL\346\250\241\345\236\213\347\232\204\345\260\217\351\272\246\351\224\210\347\227\205\350\257\206\345\210\253\347\240\224\347\251\266/prepare_dataset.py" "b/code/2022_autumn/\346\275\230\345\200\251-\346\275\230\345\200\251-\345\237\272\344\272\216WR-EL\346\250\241\345\236\213\347\232\204\345\260\217\351\272\246\351\224\210\347\227\205\350\257\206\345\210\253\347\240\224\347\251\266/prepare_dataset.py" new file mode 100644 index 0000000..f88d16b --- /dev/null +++ "b/code/2022_autumn/\346\275\230\345\200\251-\346\275\230\345\200\251-\345\237\272\344\272\216WR-EL\346\250\241\345\236\213\347\232\204\345\260\217\351\272\246\351\224\210\347\227\205\350\257\206\345\210\253\347\240\224\347\251\266/prepare_dataset.py" @@ -0,0 +1,38 @@ +import numpy as np +import os +import argparse +from utils import read_train_data, read_test_data + +parser = argparse.ArgumentParser(description='Data preperation') +parser.add_argument('--train_data_path', help='path', default='C:/Users/x2/Desktop/pq/wheat rust/train', type=str) +parser.add_argument('--test_data_path', help='path', default='C:/Users/x2/Desktop/pq/wheat rust/test', type=str) +parser.add_argument('--save_path', help='save', default='C:/Users/x2/Desktop/pq/wheat rust/savepath', type=str) +args = parser.parse_args() + +#read training data +train_imgs, train_gts = read_train_data(args.train_data_path) + +#remove dublicate training imgs +idx_to_rmv = [] +for i in range(len(train_imgs)-1): + for j in range(i+1, len(train_imgs)): + if np.all(train_imgs[i] == train_imgs[j]): + idx_to_rmv.append(i) + if train_gts[i] != train_gts[j]: + idx_to_rmv.append(j) + +idx = [i for i in range(len(train_imgs)) if not(i in idx_to_rmv)] +print('unique train imgs:',len(idx)) + +#save unique training imgs +np.save(os.path.join(args.save_path, 'unique_train_imgs_rot_fixed'), np.array(train_imgs)[idx]) +np.save(os.path.join(args.save_path, 'unique_train_gts_rot_fixed'), np.array(train_gts)[idx]) + +#read test data +test_imgs, test_gts, ids = read_test_data(args.test_data_path) + +#save test data +np.save(os.path.join(args.save_path, 'test_imgs_rot_fixed'), np.array(test_imgs)) +np.save(os.path.join(args.save_path, 'test_gts'), np.array(test_gts)) +np.save(os.path.join(args.save_path, 'ids'), np.array(ids)) + diff --git "a/code/2022_autumn/\346\275\230\345\200\251-\346\275\230\345\200\251-\345\237\272\344\272\216WR-EL\346\250\241\345\236\213\347\232\204\345\260\217\351\272\246\351\224\210\347\227\205\350\257\206\345\210\253\347\240\224\347\251\266/requirements.txt" "b/code/2022_autumn/\346\275\230\345\200\251-\346\275\230\345\200\251-\345\237\272\344\272\216WR-EL\346\250\241\345\236\213\347\232\204\345\260\217\351\272\246\351\224\210\347\227\205\350\257\206\345\210\253\347\240\224\347\251\266/requirements.txt" new file mode 100644 index 0000000..3ada6af --- /dev/null +++ "b/code/2022_autumn/\346\275\230\345\200\251-\346\275\230\345\200\251-\345\237\272\344\272\216WR-EL\346\250\241\345\236\213\347\232\204\345\260\217\351\272\246\351\224\210\347\227\205\350\257\206\345\210\253\347\240\224\347\251\266/requirements.txt" @@ -0,0 +1,8 @@ +hyperopt==0.2.3 +pandas==1.0.1 +torch==1.4.0 +scipy==1.4.1 +torchvision==0.5.0 +numpy==1.18.1 +Pillow==7.0.0 +scikit_learn==0.22.2.post1 diff --git "a/code/2022_autumn/\346\275\230\345\200\251-\346\275\230\345\200\251-\345\237\272\344\272\216WR-EL\346\250\241\345\236\213\347\232\204\345\260\217\351\272\246\351\224\210\347\227\205\350\257\206\345\210\253\347\240\224\347\251\266/utils.py" "b/code/2022_autumn/\346\275\230\345\200\251-\346\275\230\345\200\251-\345\237\272\344\272\216WR-EL\346\250\241\345\236\213\347\232\204\345\260\217\351\272\246\351\224\210\347\227\205\350\257\206\345\210\253\347\240\224\347\251\266/utils.py" new file mode 100644 index 0000000..ec9737d --- /dev/null +++ "b/code/2022_autumn/\346\275\230\345\200\251-\346\275\230\345\200\251-\345\237\272\344\272\216WR-EL\346\250\241\345\236\213\347\232\204\345\260\217\351\272\246\351\224\210\347\227\205\350\257\206\345\210\253\347\240\224\347\251\266/utils.py" @@ -0,0 +1,163 @@ +import torch +import torch.nn as nn +import torch.optim as optim +from torch.optim import lr_scheduler +import numpy as np +import torchvision +from torchvision import datasets, models, transforms +#import matplotlib.pyplot as plt +import time +import os +import copy +import torch.nn.functional as F +from PIL import Image, ExifTags + +def train_model_snapshot(model, criterion, lr, dataloaders, dataset_sizes, device, num_cycles, num_epochs_per_cycle): + since = time.time() + + best_model_wts = copy.deepcopy(model.state_dict()) + best_acc = 0.0 + best_loss = 1000000.0 + model_w_arr = [] + prob = torch.zeros((dataset_sizes['val'], 3), dtype = torch.float32).to(device) + lbl = torch.zeros((dataset_sizes['val'],), dtype = torch.long).to(device) + for cycle in range(num_cycles): + optimizer = optim.SGD(model.parameters(), lr=lr, momentum=0.9)#, weight_decay = 0.0005) + scheduler = lr_scheduler.CosineAnnealingLR(optimizer, num_epochs_per_cycle*len(dataloaders['train'])) + for epoch in range(num_epochs_per_cycle): + #print('Cycle {}: Epoch {}/{}'.format(cycle, epoch, num_epochs_per_cycle - 1)) + #print('-' * 10) + + # Each epoch has a training and validation phase + for phase in ['train', 'val']: + if phase == 'train': + model.train() # Set model to training mode + else: + model.eval() # Set model to evaluate mode + + running_loss = 0.0 + running_corrects = 0 + idx = 0 + # Iterate over data. + for inputs, labels in dataloaders[phase]: + inputs = inputs.to(device) + labels = labels.to(device) + + # zero the parameter gradients + optimizer.zero_grad() + + # forward + # track history if only in train + with torch.set_grad_enabled(phase == 'train'): + outputs = model(inputs) + _, preds = torch.max(outputs, 1) + if (epoch == num_epochs_per_cycle-1) and (phase == 'val'): + prob[idx:idx+inputs.shape[0]] += F.softmax(outputs, dim = 1) + lbl[idx:idx+inputs.shape[0]] = labels + idx += inputs.shape[0] + loss = criterion(outputs, labels) + # backward + optimize only if in training phase + if phase == 'train': + loss.backward() + optimizer.step() + scheduler.step() + #print(optimizer.param_groups[0]['lr']) + + # statistics + running_loss += loss.item() * inputs.size(0) + running_corrects += torch.sum(preds == labels.data) + + epoch_loss = running_loss / dataset_sizes[phase] + epoch_acc = running_corrects.double() / dataset_sizes[phase] + + #print('{} Loss: {:.4f} Acc: {:.4f}'.format( + # phase, epoch_loss, epoch_acc)) + + # deep copy the model + if phase == 'val' and epoch_loss < best_loss: + best_loss = epoch_loss + best_model_wts = copy.deepcopy(model.state_dict()) + #print() + model_w_arr.append(copy.deepcopy(model.state_dict())) + + prob /= num_cycles + ensemble_loss = F.nll_loss(torch.log(prob), lbl) + ensemble_loss = ensemble_loss.item() + time_elapsed = time.time() - since + #print('Training complete in {:.0f}m {:.0f}s'.format( + # time_elapsed // 60, time_elapsed % 60)) + #print('Ensemble Loss : {:4f}, Best val Loss: {:4f}'.format(ensemble_loss, best_loss)) + + # load best model weights + model_arr =[] + for weights in model_w_arr: + model.load_state_dict(weights) + model_arr.append(model) + return model_arr, ensemble_loss, best_loss, prob + +def test(models_arr, loader, device): + res = np.zeros((610, 3), dtype = np.float32) + for model in models_arr: + model.eval() + res_arr = [] + for inputs, _ in loader: + inputs = inputs.to(device) + # forward + # track history if only in train + with torch.set_grad_enabled(False): + outputs = F.softmax(model(inputs), dim = 1) + res_arr.append(outputs.detach().cpu().numpy()) + res_arr = np.concatenate(res_arr, axis = 0) + res += res_arr + return res / len(models_arr) + +def read_train_data(p): + imgs = [] + labels = [] + for i, lbl in enumerate(os.listdir(p)): + for fname in os.listdir(os.path.join(p, lbl)): + #read image + img = Image.open(os.path.join(p, lbl, fname)) + #rotate image to original view + try: + exif=dict((ExifTags.TAGS[k], v) for k, v in img._getexif().items() if k in ExifTags.TAGS) + if exif['Orientation'] == 3: + img=img.rotate(180, expand=True) + elif exif['Orientation'] == 6: + img=img.rotate(270, expand=True) + elif exif['Orientation'] == 8: + img=img.rotate(90, expand=True) + except: + pass + #resize all images to the same size + img = np.array(img.convert('RGB').resize((512,512), Image.ANTIALIAS)) + imgs.append(img) + labels.append(i) + return imgs, labels + +def read_test_data(p): + imgs = [] + labels = [] + ids = [] + for fname in os.listdir(p): + #read image + img = Image.open(os.path.join(p, fname)) + #rotate image to original view + try: + if not('DMWVNR' in fname): + exif=dict((ExifTags.TAGS[k], v) for k, v in img._getexif().items() if k in ExifTags.TAGS) + if exif['Orientation'] == 3: + img=img.rotate(180, expand=True) + elif exif['Orientation'] == 6: + img=img.rotate(270, expand=True) + elif exif['Orientation'] == 8: + img=img.rotate(90, expand=True) + except: + pass + #resize all images to the same size + img = img.convert('RGB').resize((512,512), Image.ANTIALIAS) + imgs.append(np.array(img.copy())) + labels.append(0) + ids.append(fname.split('.')[0]) + img.close() + return imgs, labels, ids diff --git "a/code/2022_autumn/\346\275\230\345\200\251-\346\275\230\345\200\251-\345\237\272\344\272\216WR-EL\346\250\241\345\236\213\347\232\204\345\260\217\351\272\246\351\224\210\347\227\205\350\257\206\345\210\253\347\240\224\347\251\266/utils.pyc" "b/code/2022_autumn/\346\275\230\345\200\251-\346\275\230\345\200\251-\345\237\272\344\272\216WR-EL\346\250\241\345\236\213\347\232\204\345\260\217\351\272\246\351\224\210\347\227\205\350\257\206\345\210\253\347\240\224\347\251\266/utils.pyc" new file mode 100644 index 0000000000000000000000000000000000000000..863636974968572fee27f88e1d9430217988b249 GIT binary patch literal 5586 zcmcIoOK&5`5w0Fe)cgJ3r?iF>8Jk@=$zcJ*u&u1T#sao%&)Sy3NXv{kLu#nu3^hI6 zr5rK{;5{V=ACgNh$tAZSr{oXh5ag2lfgq<`f&e)=H(!$Pt0tvgV~<{uQ|#%k?y9b? z`l?!`|ISQS?4KXjWbo(V_boj3pCBQ=MiPgk#!RlW<%c>)hId?Fno%{vZDKwhfxa zG)R(HZS6P;J4uvpm)JWF5(Y(Jqrw(tytJ|~N^CL7!8}N9JHf+qCZxytvU~V*uRkZ) zS#wIL%rupO6?>3_v*cv2KQG0MG-stZFTF7-=A>7Wa6&e)!1Cu%c0sx?>zZS3%{8yT zDBW2PJW#vgL5z6)CFw13+KDImhM}-|3Nz`XN?yl%%Nq=2>YjUyqHR-KC^@`DXX)fx5G~iQ`q4F1h7fQzhH2Q|xZCQF75>5$rLkX9p ze_48q+$}oSsqsr5wJ)jmXwOJ*RliU}HQiXNm1L_}l_r$IT5xJCXUv_^o6xNX<9(g~ zCVS4O-@#T>Xk|DXGicWTb^Ag9o{AJpa zCv^vg?XBo`u`u=rlU?P0Fn9;%K7GsVbMlB9d!>Js+|6sc&am&AYUHf&mGrNSi^s9i^czmnKOy$*gsX zXAR&LjwlPl>V+wt^=TD_cHK=ahsZq6QlCl{Qy`SYnXnKL@H{D0EmU%uA&efywMea* zc3RciSuMfHGI;qKwX@o>t+u1wjW|=pDQ3)qmcx!(mgi9o zy3uRnu&cI?({^W&7?a?LiIGm2bqej~8CTueU+NgyC zb>q~Fp;3*Jpl#t!oZxIg3sB_aC~h1Vb{kDmV(+1K_qBtMEl%uUCu|||ECR5|@`K&T zo)lU8VE;G@3cq8EyGdYeT#su(fkVI53gCMAL8pildmDksGt}4}oRCeS#&M?JWa|`Z zro9zY*3V6|ZI;ZI*)&U|OJ>^4c$1!vcxz_P%#|ibR?)j|u9#b(%Vy4-Hk0@)ja)UG z-mF!Q z@ORP&WFz&JG)(}-`lHgdOfS2KCz|8R(ns-7W<`f&4>;_XiEB+zP2*p@DF!atM5zD{ ztrk#!p!lGqah4rTByKPu%?%5OCV<*O>%Qiq)7NF6sv`T1vZ_Ey!PzKikkLnxbxLx2 z9jK2@ps3K9E3Qb}i;sY;3bsCv+Ghkf03`$k-Pp$X6)nOpc(>`iii2s}C!G0oua!e@YTMdV+?yH=G7 z0qUMsBb2K+=Phz#gA+Z^^Y9ydcMspx%B)1U>%MB-Fsa@c zaI)|rByVM%rvnT;?O6_%vT#^+#j{`2OehDRU^2WEl_(r(f^^pjrTc=zLtX3&l0PXU z>SuLR92I@7Ea?a5d}S@){ypTwY-n=MhoWJy-njBD7z)OEC}W@IL#36Di7_TM@+t5W zUQb3`SG3S-&awLA22tM*BMX7oTp$~CH;6aB=vK|?dnk==AP7+5c&zUSB}k1&Q5rm! zM{x%PraZi*@e3Dp|gV1M9 zLu>5peRO~4-TONa6nnNQLl0n31GO5~-yQbF{%dS<-FnKxjouNr8Y-7sy^urGgqKGs zpIkO%O_W2h{T7I5_Pb$T_O`r|xnbskEAF{sN@m3SHc)28p$ZX&s^7(9&%TB#L>OeR zePRm|n@a_|hxZKd1aJ-*;efjfKLsWLlC$3vBmgfbW0HSKe*q#q0T1F2*#@%|Ifj^l zn=LrpU7FXr-zcL7IPw!f+~ud!FVFeL6bt?`>m`5zli;3u;_iFziQ&Q zNHUT($tej-4#f)}4kAA5V2yCwPGWcCtD$<1wi!M?^A6mb|o<}5)L0*Vm z^}gazfHxF^z|SDdeh0*XK-sK%*8l?3&%nTSg@J1*d}quh6d2nZ;MXcluRAErYsa_K z)Rk