From a17de876f1ba0a3fe9d4193ff79832fb40018638 Mon Sep 17 00:00:00 2001 From: dirkmc Date: Thu, 6 Apr 2023 19:27:13 +0800 Subject: [PATCH] feat: option to cleanup data for offline deals after add piece (#1341) --- api/api.go | 28 ++++---- api/proxy_gen.go | 8 +-- build/openrpc/boost.json.gz | Bin 5489 -> 5504 bytes cmd/boostd/import_data.go | 15 ++++- db/deals.go | 1 + db/fixtures.go | 7 +- .../20230330111514_deals_cleanup_data.sql | 10 +++ .../20230330111524_deals_cleanup_data.go | 23 +++++++ .../deals_announce_to_ipni_test.go | 33 ++++++---- .../deals_cleanup_data_test.go | 61 ++++++++++++++++++ documentation/en/api-v1-methods.md | 5 +- gql/resolver_piece.go | 2 +- gql/schema.graphql | 1 + itests/dummydeal_offline_test.go | 2 +- node/impl/boost.go | 4 +- react/src/DealDetail.js | 4 ++ react/src/gql.js | 2 + storagemarket/deal_execution.go | 4 +- storagemarket/provider.go | 12 ++-- storagemarket/provider_offline_test.go | 33 +++++++++- storagemarket/provider_test.go | 59 +++++++++++------ storagemarket/storagespace/storagespace.go | 2 +- storagemarket/types/deal_state.go | 3 + 23 files changed, 253 insertions(+), 66 deletions(-) create mode 100644 db/migrations/20230330111514_deals_cleanup_data.sql create mode 100644 db/migrations/20230330111524_deals_cleanup_data.go create mode 100644 db/migrations_tests/deals_cleanup_data_test.go diff --git a/api/api.go b/api/api.go index d6d7441e0..cc067d49c 100644 --- a/api/api.go +++ b/api/api.go @@ -34,20 +34,20 @@ type Boost interface { Net // MethodGroup: Boost - BoostIndexerAnnounceAllDeals(ctx context.Context) error //perm:admin - BoostOfflineDealWithData(ctx context.Context, dealUuid uuid.UUID, filePath string) (*ProviderDealRejectionInfo, error) //perm:admin - BoostDeal(ctx context.Context, dealUuid uuid.UUID) (*smtypes.ProviderDealState, error) //perm:admin - BoostDealBySignedProposalCid(ctx context.Context, proposalCid cid.Cid) (*smtypes.ProviderDealState, error) //perm:admin - BoostDummyDeal(context.Context, smtypes.DealParams) (*ProviderDealRejectionInfo, error) //perm:admin - BoostDagstoreRegisterShard(ctx context.Context, key string) error //perm:admin - BoostDagstoreDestroyShard(ctx context.Context, key string) error //perm:admin - BoostDagstoreInitializeShard(ctx context.Context, key string) error //perm:admin - BoostDagstoreInitializeAll(ctx context.Context, params DagstoreInitializeAllParams) (<-chan DagstoreInitializeAllEvent, error) //perm:admin - BoostDagstoreRecoverShard(ctx context.Context, key string) error //perm:admin - BoostDagstoreGC(ctx context.Context) ([]DagstoreShardResult, error) //perm:admin - BoostDagstorePiecesContainingMultihash(ctx context.Context, mh multihash.Multihash) ([]cid.Cid, error) //perm:read - BoostDagstoreListShards(ctx context.Context) ([]DagstoreShardInfo, error) //perm:admin - BoostMakeDeal(context.Context, smtypes.DealParams) (*ProviderDealRejectionInfo, error) //perm:write + BoostIndexerAnnounceAllDeals(ctx context.Context) error //perm:admin + BoostOfflineDealWithData(ctx context.Context, dealUuid uuid.UUID, filePath string, delAfterImport bool) (*ProviderDealRejectionInfo, error) //perm:admin + BoostDeal(ctx context.Context, dealUuid uuid.UUID) (*smtypes.ProviderDealState, error) //perm:admin + BoostDealBySignedProposalCid(ctx context.Context, proposalCid cid.Cid) (*smtypes.ProviderDealState, error) //perm:admin + BoostDummyDeal(context.Context, smtypes.DealParams) (*ProviderDealRejectionInfo, error) //perm:admin + BoostDagstoreRegisterShard(ctx context.Context, key string) error //perm:admin + BoostDagstoreDestroyShard(ctx context.Context, key string) error //perm:admin + BoostDagstoreInitializeShard(ctx context.Context, key string) error //perm:admin + BoostDagstoreInitializeAll(ctx context.Context, params DagstoreInitializeAllParams) (<-chan DagstoreInitializeAllEvent, error) //perm:admin + BoostDagstoreRecoverShard(ctx context.Context, key string) error //perm:admin + BoostDagstoreGC(ctx context.Context) ([]DagstoreShardResult, error) //perm:admin + BoostDagstorePiecesContainingMultihash(ctx context.Context, mh multihash.Multihash) ([]cid.Cid, error) //perm:read + BoostDagstoreListShards(ctx context.Context) ([]DagstoreShardInfo, error) //perm:admin + BoostMakeDeal(context.Context, smtypes.DealParams) (*ProviderDealRejectionInfo, error) //perm:write // MethodGroup: Blockstore BlockstoreGet(ctx context.Context, c cid.Cid) ([]byte, error) //perm:read diff --git a/api/proxy_gen.go b/api/proxy_gen.go index e32b865cc..31b9e6183 100644 --- a/api/proxy_gen.go +++ b/api/proxy_gen.go @@ -70,7 +70,7 @@ type BoostStruct struct { BoostMakeDeal func(p0 context.Context, p1 smtypes.DealParams) (*ProviderDealRejectionInfo, error) `perm:"write"` - BoostOfflineDealWithData func(p0 context.Context, p1 uuid.UUID, p2 string) (*ProviderDealRejectionInfo, error) `perm:"admin"` + BoostOfflineDealWithData func(p0 context.Context, p1 uuid.UUID, p2 string, p3 bool) (*ProviderDealRejectionInfo, error) `perm:"admin"` DealsConsiderOfflineRetrievalDeals func(p0 context.Context) (bool, error) `perm:"admin"` @@ -452,14 +452,14 @@ func (s *BoostStub) BoostMakeDeal(p0 context.Context, p1 smtypes.DealParams) (*P return nil, ErrNotSupported } -func (s *BoostStruct) BoostOfflineDealWithData(p0 context.Context, p1 uuid.UUID, p2 string) (*ProviderDealRejectionInfo, error) { +func (s *BoostStruct) BoostOfflineDealWithData(p0 context.Context, p1 uuid.UUID, p2 string, p3 bool) (*ProviderDealRejectionInfo, error) { if s.Internal.BoostOfflineDealWithData == nil { return nil, ErrNotSupported } - return s.Internal.BoostOfflineDealWithData(p0, p1, p2) + return s.Internal.BoostOfflineDealWithData(p0, p1, p2, p3) } -func (s *BoostStub) BoostOfflineDealWithData(p0 context.Context, p1 uuid.UUID, p2 string) (*ProviderDealRejectionInfo, error) { +func (s *BoostStub) BoostOfflineDealWithData(p0 context.Context, p1 uuid.UUID, p2 string, p3 bool) (*ProviderDealRejectionInfo, error) { return nil, ErrNotSupported } diff --git a/build/openrpc/boost.json.gz b/build/openrpc/boost.json.gz index e35629efce54dd66f2e63447d0838a38ca825b3a..fb230f3d9fc21ad3a349b0056256cff7b9752f47 100644 GIT binary patch delta 5422 zcmV+}718SPDu64H7=H-?LOy2phlF%n?xwwYq20ZA+sv!5MbO5`t|ijY4FCHZNw)Ea zWXr~+DfMor1tT3FN#|QfM~|lKLF{{$d1@ZCyX}G5G7+A-=INtJ5p|$>YF@f@L5%CG zp>cLKGF#?7^a*k??=fh1U-?$oT(!(Oq%+qh=IN(L6N5SH`F}fdpyeXmKC`Ink0JjX zeTQbtv>~y4=?q@t*V9R(xjF;!GOX!&T3ybaPK;L)$J>&5+{!G!-Ang&R(8s_@<7fQ#n}LxdT7ITJ zqzfM#FERX@{GMg zP|JmD5r2ke600|e&K48HhjW)g!$J7YY?%w+F;C4Ir5-um-@oNe+Lk-tp9bvr%JW?| z>H8DLR{Imjo$Sv6q0rw49@_VO_hI>ew-C2)-T%4l4vx*$s+Hy8bH}yr2z7n<2GVjI zoan0JUklmoA+q<37oTn>OPb5C34l4=Dj=2O(tk9r&y7ceX(}dcKDU?}BO79hrbt5M zLu4C1qd~$k1_pu7^p$8cLHJgz6Q-eTg|>krXl-Le8GlS&e-3S9vNVA4r%(+o0~kZs zhL-W-+YDJV17RC6I%n^gFC2<2_UkJTSa;Bl-om(O&j8pqfbovCngKFvC;9Ht+-LI! zrhm)s)(-;wQ%izg=RhF|Smz6H z%h}}u66(9l@eKHONvWU$qmmF}iT8wLkO~GBM7fYCAsup1-MXX*C|k?I6>W`s;D0P& zW$RYP9q$)TmOFxnF_{(VLC5*IY5$3g{LLAm|eazCvqGzaJrR*X!A2N@i%# zJpz;OebgTe4(;jUyWjgdCH=`9*nb22p@-0|{nc!l)Hb#v1)5rqNA=Z5jT*rUe~X2@V6JFi+|7?S4^R?%;vN?NTXLBg1*0KN~FKR5Gs zdm79;!iXZ^pzm-~-Ian=kNQdhtyP0%5pI;&E-JD9p#~pZB0@PCly&OlK!3E9+6Yfw zZI!l$t*_eH3#u!&#Au#E!7+4;)LYQziIp1#l;O)U;9t(}0dl~^f#$PObpprCFT1bj z4hI_ieTAR}31dZoFv7QyN4^2OD`Bd1-#iKfL`}I=2%;d`vOxLbT+tAQmMRN`yzy>b zbxP(=Pmi8|6I&c>KCBY#%zwl-EAo(B46egqY2jBGAWMq8TEa=v$=aa+T*i{xEqORN z(vXMO&~opgzp2q52UCy5ABR}0xt~I{vGsS+)<06ElO!qO7BK*u6Mqa&UDO`~Su^=3 zk!|RI7wP|4mHWU+0fOvgXplPuj*7Y$3uL!HejHuw882RvIh#n*z5()+4F(q->l7Xh zUXRE-N8#K6CL9M+2*O``R+Yd5b^0 z1QY0pSsPQ})7PGB%^28&&Oyt3jqT)vF?D@#3$J`+!7JzoKboBw^r7oGfI^>P!haBm zq#m!r=VNq>fncUb^8=@fXNj4>lsP65X0j`OZ{>cNTBT0 zwM;=V{Vtn-`x!u(4UfS%>@ym=T1*^7W}Hx&Mr)ZOLz#U)X_^0?P6?zT!CgXpOK0J? z;S5@Lo{Jc{9mK!pG0tn>&!T?~DQ9A^psv|6-=8ljB-hY_=$^@ula~2AAe6O2@P19} zXBfK+Y{3ui=<5AQJY5wfdnZF@X%N}1pVX{LXS@gVY5|)=8&LR) zqB&gmTF3Ab^e@F*Dx*qFj)YZ9^bQf5LmTlA6NG`ke2p#FW0g5MY z-8YC=u3F|sqb=-|d0ke}m4&dDw>s1k$nLT)iR+`>FimDHVtPjhzqP(8jiN-Cl!Xyj zXFAw_%2iw0wHe8ItKpDFvAf%xGN+n3!cD0}(1c9coHzE}cH1iq+z+n6@svmVuI$?FK~}Uk8V{ zol;u7DhNBYUn*(~ec=@86~(?+mOo1#3wxe_u71Cx%1{nn;PWNZ^Uw~Jr)+~k&-<4^ zD$ZZtFdQIxXT1>u`JkGN=U*-3xd~h}fr}<^(F87q*RKCRcf9bfVZT*YfRckkgxOdxQeJ#@WgxK`sk_wuSUyaRWzq7xi;eJUR~d|!PoaBKvP2i$nT zjR)L#z>Npoc)*PZ+<3r^2i$nTjR#!D&FJ@Y+>C$$*|PnG=#1;(@*al-O5t&T;g;cb zgQM{+!|NGrWj6CeBv{nvlE{b1hQtQKM$jBI-o`=WHQ{>QA75Y~hE4AW)ktn6_0CG_ zLq(ZF9;6$(n6PyV1W}Y}R3Tzn3{PIXu&5q=G2EJp)N2i3EABVYK85;d%wE1Y8~DDgnC{dX=N!5bTo>?6IPn znPiFT60R9wwGr#qMXL<;=diQc&~=)wPZU+k66^U9-v_bMnrK@Num<{;1KtLWD~Hes zc`@|ysSufejz|}ysGO*hqSq1tH!9EGWb`@tW_%eu%K4kNMTi+Z+O5U?8 zhoQRpK%>UF9AND=&6Rj>>5hY@xOY?ByEcaIDKZ*4QI8?5J5X2zQnk|~HguH9j#{uc z&3e=wz}BY^Xpi<@L9Xn$1tI4IVBF{sx;2TKK$Xyj#BznrX$X^m+63=)L=8#Y@_tupV9z4X53hN-iukYR9Sby0H1|^Txh@qC~5u&PdC^b17T0IhT zs@bF3>2PEf*C5Cq2p21(bs~4Y$~w(c6jlx~+4=9EtkS}NZk^N`^c6W6k<^U#i>j-M zI0%Kl@WUp7MATIhanSW@ixkq^Q5%wBXLW_Vk&c20>pPq;X{ZcD5rVjSxB|;L#FWnf zulL>WW*&~Pn|iCY+cw-T!O>gLG?9bA3eLc(WQGUu()(@ zv+vOWvAS8^tWvu#B}~p3EtD%JmRNJ?wKow3$bOBMT~c5xw(_!@Vxl!HG0b(RsPI{t za$xa)gCj*c>eNoOdR;T>t_ak ze37`TmzzS!#ralk%4HdG`i$9&RTusm63-PzWmOQ|9f~d-6nO~zdf(uh#^Kp@wya%M z>pH0enxWO~U+GVkcQ$?2YxSs9Eld}GDPfo847r!9e}F;Wtt7ufagbud?mNjisr%Q^=5 z8hlfE=#f1UIAhr)ld>r#va}SYpCNk-`#BVM7thb<;?u#aj&CIxB;-YSOV`?GUEeK# zvcBqGgp0z9_1U-8O)Q|?VFTw^=x?(BT=;&5E-v9M zu$EW8OI^!#!aJX;dZ!6WdFG0LQ8FP~&RufKOM7mbBTcVM*&VN>pekuSK7G>IW?oV# z0Lp|R1ZP*C-bA6VE9fKF=2)vYYZ7hE(?KWt zuVniAMq2tO-KS{c=uI5`*~ihfEXt%5(3*USka;wxu0ID*hoKqB`te}d9 z8kbEBV}{*UWmJ6DH1B{li){94iZ@RNJx46FA}2|3TNCr#3#>tZ)c2(WNU^~dN73un zFo?sSb`Xu~xIF6RR>K)@UhiOBSnq%<6WOVL9R*Na@SuVwNFluSVt!3YgIzCa(1@DL zqrT$4Dp7L$99VY?ubhO5_f!d%!Ws#2Ae%~%6hdmjvAqG@7K5Pe_@;1NHFQ}!OkOb?kE7zuG$PcU;BCYb2 zCB&>nByFQ(5Y@lK$P=+IU5lSp5ihxhPM8_OOsT|Kt5)qhovWLY23hr8neR+sV76{WtL0?PZL{Q}_r&CGc#8GWj z&ab9&1odU+HhR3B*i^rIc* zP88>T*90v-Rb^lCsTSrHpSH-l5v~p6HjMks8TYuOIFWO`z!c5lcrhVM!q>MH=VrR+ zoX6qCZPt^X6c`&BG|2ifodL@C{2&GshrJzPvCW4tp#WR3{TY+d6dQjn);dt+iwhru zT*GNeriyixv(p@SpM;#c`EW!Rs&h>({Sl%OuOi z0{AQQ%=|cqJ~V(24eU~51*rk>%COwU0%A&xsp}hbj));9y=}Hk5BOjqrfli6$)BuV zc`iYGhJ&9Ii{l+;mLWBT35?hmYYrEHkp_GK3(tWJo_{&*9Cmws^R~sh^Vb3A+IKzZ zQ^Z}y=I-P#Xi@Wa-7@{GMg zP|Kxj5r2ke605g}&X*IzhYOcN!$J7YY?({nG0)67r5-tZ{d&fmv@Lh>dK$3bE6;b? zq`#gpwt78r+{x<&AQbwqfrnmuzWcEHzZt~snfpIi-NA{uUbnJ5eBrp(9igrd-$GiB zgA-j<{A(e*Jw*0_@#6FCWJPoNH32Y(TLq+2Tz{Iz^||qAFipjT&F280~qgFt2rR^c9QQN&DU(+ zz<+ero%{e61ikOXo)1<%&>PT6kKDn9_m}(d&0DVAh4vXuRle-{r5@UYmnU~h_H!}>vhe`-n4>l`X10qcAT zW}ICvA)&sz8qa}mmy`-BFe(WlmiRzO2B~0BL6i%L64D_D)tyUnLT+!CJ2Y=2I zR<>?s-0@-QWVs`F7?W9%9(0_aoAwX88npj*xCO<(9ZKk&d*xvZPGL8EC34ka5^AoS zE&*N6YtM{eYtonddJ;q>uAjVBF`s;XG=GCDu7GA>070KX@C{lE`r{aZyI#*GQ!+<` z?lGAB=%fB%aAZ%HKm6XeDd|rZz<(at4?Tot_BXR-j+d4N$tD2zhAotNW`0?AIz5Xo z8~+H+_3drTL=*-bpBu*WVvim}m?49O?7VJ?VMv}wSVg~uDrvoz1PNoJ0{A{y{oKsk z?P)OY2qTJsgMPqmbyo^fJ?bk3v{nt4MYvI7`>4eFM;d%^g$U(jP}ZrF1Aoy{Y9l;# zwN=^%w!Ug(FQ~5A5~Fzv1;@}WQg1<dE?!= z>Xgi#o*upUF19!}d{`yenSY6GR^%bM7~F)x(!#GWK$aAFwS<$TleI?yxQr#WU-EEp ztRW9?pyl2}e_NwJ4yGQ9KMt{0b3cV@W9#put$(aaCrMJm3^4%P6Mqa&UDO`~Su^=3 zk!|RIAL;)@mHWU+0fOvgXplPuj*7aMOJuh{eHvXJ7%yIu1)E6Hz5(`=4F(q-=M)|d zUXRE-$KwB2lE1?LcVnn7z{5xYJsh;m1(QJr6CKkM+2)zvq_)hV57_|DDeKbd9KPaz zK6W~1{Li10aRwBBCHq{5q1_S?c`DM`VW-z^nZHAyS;IDG)nNVwV&++x``R+Yd5b^0 z0u$(nSsPQ}(>I=L%^BFk&SA@ZgYD#lF?D?~gV#Q?;5GDvAI(k-`p|V8K%vht;XepO zQjgc+^D&xXAeiaV{K%=Q+cGZ!1?E{c=WM-hnIrP=>C{1g7$!%04WZ8_**xrC_I`8S zKi>X$`0?AH|GkIr{zJXDgLC44`qzc^$HTjeU&q~di)O?(5czt)WKeR zIt(N)G2bfHuzZrhIg%}X4MGUrsu)K|+C>B#K+Y0LcgbV?u%sp|^j8J&mUhI44$c`jnK zbr}De$0cukKa2hiq?}{HlDcNg{BW_Nkla8EqI)JZPFv=$fKb*7!TSwipJVJUu?0W6 zqw5bN@pN63*`17&r9ou(c~Y|`5voCt;QKl`G3DBSpYtBfg#~O5Z9w5GiWYFwYZI`O z%bOBom#|%l-hwe7%>}d(|1d!q_^UVAa&3ko;~4@6oc1%g3;Hgen@!&!_PB1DTaC7` zQ|5J9p;Z>bMt15DLm-FCz9g<&azieewTS5u9sD-Bqcn;Vy-*fLTs7%nD_2ux-Q6CxMCbg3!1RpapbcH}#SCw1(jUDtDT8(L^LvAA$&U>-N7#NJ}KqW|~BPH`? zqB)~fC17G!f(%5c({!jQt(bKBgc?+fw_?h;>{8_33iT&$NfAHJ~0C_`)vyHB0T%tslY^bmW zoBTw+NezC)C$YePpC3_R=*vW&tX)r?r_;rr@yfny_RJmAIyZam<|18zLv#sh9V z;Kl=PJmAIyZam;JZbrYK<7Na5$d>I7MCV)&m-jd%PzsOnwhXTu9a-PT{gAwWygpAv zJ~B2EhJSPBcNb@VH?-*GPOp^FJyw?G0!nLQOEgvIdWMf{5D zvKPaxsaU(#0CrNhuPG9qCfb3b2Ayn;0*9q0l8r;Fk6`0acSx=&H1Vr ztt+vfpYVMUo2-eptqfcZL@NWk3zAi! z-w^DR5Nua*wt+8$*jPh0zx~WXq~v|Gau})`05ocS%K_G2+gpkEj%xjXH$|?SBGMbv=y=YO)xwj{1r$Mg=KsX` z(22BGXN#Sx3mNJSOY9AQOC0BI>c;4xwg_z@%$iuEOrW+pdnkv{u*lx9$VuK-a*P&g ziUk)ytcgAJL&DXWLxFz78hgVUr+J&!V@T_WI?KSQm|znmF6IQy;B$HMmtnhenr{Nk z&pE){$cn2$N0A8d9{6{V4gt0xKUWkTmnkJGR8Uls!Uh!3piMh}Y#f{o@F4Wt{By5J zt*TzTtt`nIPIv@>p|?|!qL`xMO5kPE`eQ<=cPq*A-#TqSSWLaV{Yr(9tLFE|(C#%h z%-<%Lx{5px!GdogJtucnWRd78-y(lWgh{;ME!(27e_$q`le@f)^EmKYP8+4L-O0vy z!FVNw>x6$45hs}VyjbNhaPs?H(MnhaIvDHG7yLUIKq|>0TCrr%1Q1F zc&+Lcs@Ln4zKD;ytmI5gP(|ssAEMz%k$)msWVB$s6c)>XLl^m`f(T=jiB?wCb`SMm;LH$^i?ySelOwlFbmrh*(Qg1cC5gB1b5umOg z^2vydA?7u;#P{Fbr8S3=YpmWwLck1WfA}|@L;o6o@;((WB@3s3^Di^i#uUtisTF?u z{Ahji(( zT=A~NnoF;li6}t!Yqac=f>yDWm)*<~tzn5_ZaPJU#>$l6iZ6FrBRN4xlJN)%sD>CP z8VSsQ1l?>Oc1V{@6CCr@@0A8b0;yK!lAf}c`uMuH7DX~nt7OeyolhAYE7B=9dUb>? zm#HUOB^yQwMrCp&gT_3d$lB|f9jSNZ8Xk%lRWv@bquY`Bp+H+=f~Z?+nITOb;T_cx z-jt0Uip%KgnbC9G{&k6M!hwp*O~V`3Hc!HT?T?CfS+pP(`-^LfMevOgKyWu~+i~fV zEEsODr-UxN!)}jTm=xH6g8Wuv2atJkiKX|aD(zK1Yd4TUpI>vw7yJfJ`Cx#!<9*aSrMBTjmY;&a;tXWu4FO0dl~^fxo(bX26$;t8cj}gj}3&)uvpQ5vR|8 zn7vqa;cp=ETwzpJ1;O2+=z>3yhrn;k4X#BTp5J84+Eul#6Up*&Y8DZ6A1!mUzy5o~3@rsQ~2a{Sqs9M70I zCD$Zox0Jsbc3P4(Hi~!0w3AfeT0JU%m1j>{EgEIN(@Ir~v|?tL9w`E-c7ckH{j2y%Z3g1{U za-dnO@o7uoX$)5qanR|b7kdCJ#|3u1dmYRbVv^3`vW@}10pC>~dSp)o&R8~o$)s#b zi7YLJ>1W8^!hR0L-R1N1x%hPOs^dEe1_^l)-io#HSvPmNpKh*}7vZAtB7632eH#lX zci6!BmG#@~Ke-e*V{S0Bl?mL4@FIMXaH%=cd2W+PI$*sRqr%G zDbHLnN+v|hxl2xYY0ph_r0I2kDZAs96jUXx$EQyk+s;c01wffFgy8IIn`C8z0iit5 zX&XusEweQeLDj+`=^ePLOCa~Lw#fn9NJ-gVZA?p+STU|pD+Qgl-6M=Nsg{GW5$`(9 zYtEM3FSp=Saj;$(xYrP~m9Oj&#QV!#0S9=f9cxx}=}#7Jj@2&%Uct5t=%qV$PS+c%J~x!Ja!fdtHx z6f}^$x1M7n@&Pp^4VsdF22DwW`ef5#ZmE@V<&p+tRY?Obf!O?!_63@aFy&$=^`K{f z32G1L0Aa2LTZY%?kFJ4jLp!npT84KddtNBD#yLe1$_lDzsBziEFlN~8R7S;TP4fa) zv&d$zrg-yY&~wBxD{_(qw=*%%y}%koeP23&6dUYt6uoW@gE;zsX$R4$j;rHdZZ(|o z_Vo_Nh4l`&GLfC?*HHk)1rI7{f)v6#FXq>jG}!l&292n>I_@j(s}d#0FMxHo^vX$? zcu$pJDXftY2ePdMNg<>b9J@==$&)k9A^pFB?2i-`g}7}*-aFjFdV)~Z$ePUrfzq(N4FS0+8yYWsX8Q`f8SB*oUDj+~o4+p-ua z3RXo@_&sNmGe(iIfD8!a~nP0O>C-P zJ-qfj1Iw?@X0HF3@u?Q( z6`yvA#-;1nBwFV;F#mPlTN3@KW)6j!%ipe zIQLJEjt8BV86H*R--Y+=0m>%kI^pWJUsiRJcje56xHp?D89wix@(sHG4*&rF|A9{i Jp!f#}0RWkiYs~-v diff --git a/cmd/boostd/import_data.go b/cmd/boostd/import_data.go index 72c12dc85..92aed947e 100644 --- a/cmd/boostd/import_data.go +++ b/cmd/boostd/import_data.go @@ -17,6 +17,13 @@ var importDataCmd = &cli.Command{ Name: "import-data", Usage: "Import data for offline deal made with Boost", ArgsUsage: " or ", + Flags: []cli.Flag{ + &cli.BoolFlag{ + Name: "delete-after-import", + Usage: "whether to delete the data for the offline deal after the deal has been added to a sector", + Value: false, + }, + }, Action: func(cctx *cli.Context) error { if cctx.Args().Len() < 2 { return fmt.Errorf("must specify proposal CID / deal UUID and file path") @@ -58,7 +65,12 @@ var importDataCmd = &cli.Command{ defer closer() // If the user has supplied a signed proposal cid + deleteAfterImport := cctx.Bool("delete-after-import") if proposalCid != nil { + if deleteAfterImport { + return fmt.Errorf("legacy deal data cannot be automatically deleted after import (only new deals)") + } + // Look up the deal in the boost database deal, err := napi.BoostDealBySignedProposalCid(cctx.Context, *proposalCid) if err != nil { @@ -74,6 +86,7 @@ var importDataCmd = &cli.Command{ if err != nil { return fmt.Errorf("couldnt import v1.1.0 deal, or find boost deal: %w", err) } + fmt.Printf("Offline deal import for v1.1.0 deal %s scheduled for execution\n", proposalCid.String()) return nil } @@ -83,7 +96,7 @@ var importDataCmd = &cli.Command{ } // Deal proposal by deal uuid (v1.2.0 deal) - rej, err := napi.BoostOfflineDealWithData(cctx.Context, dealUuid, filePath) + rej, err := napi.BoostOfflineDealWithData(cctx.Context, dealUuid, filePath, deleteAfterImport) if err != nil { return fmt.Errorf("failed to execute offline deal: %w", err) } diff --git a/db/deals.go b/db/deals.go index ef365e5eb..9e31a6550 100644 --- a/db/deals.go +++ b/db/deals.go @@ -62,6 +62,7 @@ func newDealAccessor(db *sql.DB, deal *types.ProviderDealState) *dealAccessor { "PieceSize": &fielddef.FieldDef{F: &deal.ClientDealProposal.Proposal.PieceSize}, "VerifiedDeal": &fielddef.FieldDef{F: &deal.ClientDealProposal.Proposal.VerifiedDeal}, "IsOffline": &fielddef.FieldDef{F: &deal.IsOffline}, + "CleanupData": &fielddef.FieldDef{F: &deal.CleanupData}, "ClientAddress": &fielddef.AddrFieldDef{F: &deal.ClientDealProposal.Proposal.Client}, "ProviderAddress": &fielddef.AddrFieldDef{F: &deal.ClientDealProposal.Proposal.Provider}, "Label": &fielddef.LabelFieldDef{F: &deal.ClientDealProposal.Proposal.Label}, diff --git a/db/fixtures.go b/db/fixtures.go index 956c5efe4..75566c59b 100644 --- a/db/fixtures.go +++ b/db/fixtures.go @@ -54,9 +54,10 @@ func GenerateNDeals(count int) ([]types.ProviderDealState, error) { return nil, err } deal := types.ProviderDealState{ - DealUuid: uuid.New(), - CreatedAt: time.Now(), - IsOffline: true, + DealUuid: uuid.New(), + CreatedAt: time.Now(), + IsOffline: true, + CleanupData: false, ClientDealProposal: market.ClientDealProposal{ Proposal: market.DealProposal{ PieceCID: testutil.GenerateCid(), diff --git a/db/migrations/20230330111514_deals_cleanup_data.sql b/db/migrations/20230330111514_deals_cleanup_data.sql new file mode 100644 index 000000000..dc63f0144 --- /dev/null +++ b/db/migrations/20230330111514_deals_cleanup_data.sql @@ -0,0 +1,10 @@ +-- +goose Up +-- +goose StatementBegin +ALTER TABLE Deals + ADD CleanupData BOOL; +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin +SELECT 'down SQL query'; +-- +goose StatementEnd diff --git a/db/migrations/20230330111524_deals_cleanup_data.go b/db/migrations/20230330111524_deals_cleanup_data.go new file mode 100644 index 000000000..24c02ffb8 --- /dev/null +++ b/db/migrations/20230330111524_deals_cleanup_data.go @@ -0,0 +1,23 @@ +package migrations + +import ( + "database/sql" + "github.com/pressly/goose/v3" +) + +func init() { + goose.AddMigration(upDealsCleanupData, downDealsCleanupData) +} + +func upDealsCleanupData(tx *sql.Tx) error { + _, err := tx.Exec("UPDATE Deals SET CleanupData = NOT IsOffline") + if err != nil { + return err + } + return nil +} + +func downDealsCleanupData(tx *sql.Tx) error { + // This code is executed when the migration is rolled back. + return nil +} diff --git a/db/migrations_tests/deals_announce_to_ipni_test.go b/db/migrations_tests/deals_announce_to_ipni_test.go index 086235fcc..2a766e828 100644 --- a/db/migrations_tests/deals_announce_to_ipni_test.go +++ b/db/migrations_tests/deals_announce_to_ipni_test.go @@ -27,20 +27,31 @@ func TestDealAnnounceToIPNI(t *testing.T) { deals, err := db.GenerateNDeals(1) req.NoError(err) - // Insert the deals in DB - err = dealsDB.Insert(ctx, &deals[0]) - require.NoError(t, err) + // Insert the deal into the DB + deal := deals[0] + _, err = sqldb.Exec(`INSERT INTO Deals ("ID", "CreatedAt", "DealProposalSignature", "PieceCID", "PieceSize", + "VerifiedDeal", "IsOffline", "ClientAddress", "ProviderAddress","Label", "StartEpoch", "EndEpoch", + "StoragePricePerEpoch", "ProviderCollateral", "ClientCollateral", "ClientPeerID", "DealDataRoot", + "InboundFilePath", "TransferType", "TransferParams", "TransferSize", "ChainDealID", "PublishCID", + "SectorID", "Offset", "Length", "Checkpoint", "CheckpointAt", "Error", "Retry", "SignedProposalCID", + "FastRetrieval") + VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)`, + deal.DealUuid, deal.CreatedAt, []byte("test"), deal.ClientDealProposal.Proposal.PieceCID.String(), + deal.ClientDealProposal.Proposal.PieceSize, deal.ClientDealProposal.Proposal.VerifiedDeal, deal.IsOffline, + deal.ClientDealProposal.Proposal.Client.String(), deal.ClientDealProposal.Proposal.Provider.String(), "test", + deal.ClientDealProposal.Proposal.StartEpoch, deal.ClientDealProposal.Proposal.EndEpoch, deal.ClientDealProposal.Proposal.StoragePricePerEpoch.Uint64(), + deal.ClientDealProposal.Proposal.ProviderCollateral.Int64(), deal.ClientDealProposal.Proposal.ClientCollateral.Uint64(), deal.ClientPeerID.String(), + deal.DealDataRoot.String(), deal.InboundFilePath, deal.Transfer.Type, deal.Transfer.Params, deal.Transfer.Size, deal.ChainDealID, + deal.PublishCID.String(), deal.SectorID, deal.Offset, deal.Length, deal.Checkpoint.String(), deal.CheckpointAt, deal.Err, deal.Retry, []byte("test"), + deal.FastRetrieval) - // Get deal state - dealState, err := dealsDB.ByID(ctx, deals[0].DealUuid) - require.NoError(t, err) - require.False(t, dealState.AnnounceToIPNI) + req.NoError(err) - //Run migration - req.NoError(goose.UpByOne(sqldb, ".")) + // Run migration + req.NoError(goose.Up(sqldb, ".")) - // Check the deal state again - dealState, err = dealsDB.ByID(ctx, deals[0].DealUuid) + // Get the deal state + dealState, err := dealsDB.ByID(ctx, deals[0].DealUuid) require.NoError(t, err) require.True(t, dealState.AnnounceToIPNI) } diff --git a/db/migrations_tests/deals_cleanup_data_test.go b/db/migrations_tests/deals_cleanup_data_test.go new file mode 100644 index 000000000..c3767c823 --- /dev/null +++ b/db/migrations_tests/deals_cleanup_data_test.go @@ -0,0 +1,61 @@ +package migrations_tests + +import ( + "context" + "testing" + + "github.com/filecoin-project/boost/db" + "github.com/filecoin-project/boost/db/migrations" + "github.com/pressly/goose/v3" + "github.com/stretchr/testify/require" +) + +func TestDealCleanupData(t *testing.T) { + req := require.New(t) + ctx := context.Background() + + sqldb := db.CreateTestTmpDB(t) + req.NoError(db.CreateAllBoostTables(ctx, sqldb, sqldb)) + + // Run migrations up to the one that adds the CleanupData field to Deals + goose.SetBaseFS(migrations.EmbedMigrations) + req.NoError(goose.SetDialect("sqlite3")) + req.NoError(goose.UpTo(sqldb, ".", 20230330111514)) + + // Generate 1 deal + dealsDB := db.NewDealsDB(sqldb) + deals, err := db.GenerateNDeals(1) + req.NoError(err) + + // Insert the deal into the DB + deal := deals[0] + deal.IsOffline = false + _, err = sqldb.Exec(`INSERT INTO Deals ("ID", "CreatedAt", "DealProposalSignature", "PieceCID", "PieceSize", + "VerifiedDeal", "IsOffline", "ClientAddress", "ProviderAddress","Label", "StartEpoch", "EndEpoch", + "StoragePricePerEpoch", "ProviderCollateral", "ClientCollateral", "ClientPeerID", "DealDataRoot", + "InboundFilePath", "TransferType", "TransferParams", "TransferSize", "ChainDealID", "PublishCID", + "SectorID", "Offset", "Length", "Checkpoint", "CheckpointAt", "Error", "Retry", "SignedProposalCID", + "FastRetrieval", "AnnounceToIPNI") + VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)`, + deal.DealUuid, deal.CreatedAt, []byte("test"), deal.ClientDealProposal.Proposal.PieceCID.String(), + deal.ClientDealProposal.Proposal.PieceSize, deal.ClientDealProposal.Proposal.VerifiedDeal, deal.IsOffline, + deal.ClientDealProposal.Proposal.Client.String(), deal.ClientDealProposal.Proposal.Provider.String(), "test", + deal.ClientDealProposal.Proposal.StartEpoch, deal.ClientDealProposal.Proposal.EndEpoch, deal.ClientDealProposal.Proposal.StoragePricePerEpoch.Uint64(), + deal.ClientDealProposal.Proposal.ProviderCollateral.Int64(), deal.ClientDealProposal.Proposal.ClientCollateral.Uint64(), deal.ClientPeerID.String(), + deal.DealDataRoot.String(), deal.InboundFilePath, deal.Transfer.Type, deal.Transfer.Params, deal.Transfer.Size, deal.ChainDealID, + deal.PublishCID.String(), deal.SectorID, deal.Offset, deal.Length, deal.Checkpoint.String(), deal.CheckpointAt, deal.Err, deal.Retry, []byte("test"), + deal.FastRetrieval, deal.AnnounceToIPNI) + + req.NoError(err) + + // Run migration + req.NoError(goose.Up(sqldb, ".")) + + // Get the deal state + dealState, err := dealsDB.ByID(ctx, deals[0].DealUuid) + require.NoError(t, err) + + // Expect CleanupData to be true because the migration should set + // CleanupData to be the opposite of IsOffline + require.True(t, dealState.CleanupData) +} diff --git a/documentation/en/api-v1-methods.md b/documentation/en/api-v1-methods.md index b752e996a..d355958e9 100644 --- a/documentation/en/api-v1-methods.md +++ b/documentation/en/api-v1-methods.md @@ -380,6 +380,7 @@ Response: } }, "IsOffline": true, + "CleanupData": true, "ClientPeerID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", "DealDataRoot": { "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" @@ -447,6 +448,7 @@ Response: } }, "IsOffline": true, + "CleanupData": true, "ClientPeerID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", "DealDataRoot": { "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" @@ -601,7 +603,8 @@ Inputs: ```json [ "07070707-0707-0707-0707-070707070707", - "string value" + "string value", + true ] ``` diff --git a/gql/resolver_piece.go b/gql/resolver_piece.go index 55597f62f..b4f8038d2 100644 --- a/gql/resolver_piece.go +++ b/gql/resolver_piece.go @@ -185,7 +185,7 @@ func (r *resolver) PieceStatus(ctx context.Context, args struct{ PieceCid string } bd.Message = dl.Checkpoint.String() - // Only check the unseal state if the deal has already been added to a piece + // Only check the unseal state if the deal has already been added to a sector st := &sealStatus{IsUnsealed: false} if dl.Checkpoint >= dealcheckpoints.AddedPiece { isUnsealed, err := r.sa.IsUnsealed(ctx, dl.SectorID, dl.Offset.Unpadded(), dl.Length.Unpadded()) diff --git a/gql/schema.graphql b/gql/schema.graphql index f6780f96d..411c08071 100644 --- a/gql/schema.graphql +++ b/gql/schema.graphql @@ -70,6 +70,7 @@ type Deal { ChainDealID: Uint64! PublishCid: String! IsOffline: Boolean! + CleanupData: Boolean! Transfer: TransferParams! TransferSamples: [TransferPoint]! IsTransferStalled: Boolean! diff --git a/itests/dummydeal_offline_test.go b/itests/dummydeal_offline_test.go index ecb87070b..d4302d842 100644 --- a/itests/dummydeal_offline_test.go +++ b/itests/dummydeal_offline_test.go @@ -38,7 +38,7 @@ func TestDummydealOffline(t *testing.T) { res := dealRes.Result require.NoError(t, err) require.True(t, res.Accepted) - res, err = f.Boost.BoostOfflineDealWithData(context.Background(), offlineDealUuid, carFilepath) + res, err = f.Boost.BoostOfflineDealWithData(context.Background(), offlineDealUuid, carFilepath, false) require.NoError(t, err) require.True(t, res.Accepted) err = f.WaitForDealAddedToSector(offlineDealUuid) diff --git a/node/impl/boost.go b/node/impl/boost.go index 7353b9327..5d2927966 100644 --- a/node/impl/boost.go +++ b/node/impl/boost.go @@ -145,8 +145,8 @@ func (sm *BoostAPI) BoostIndexerAnnounceAllDeals(ctx context.Context) error { return sm.IndexProvider.IndexerAnnounceAllDeals(ctx) } -func (sm *BoostAPI) BoostOfflineDealWithData(ctx context.Context, dealUuid uuid.UUID, filePath string) (*api.ProviderDealRejectionInfo, error) { - res, err := sm.StorageProvider.ImportOfflineDealData(ctx, dealUuid, filePath) +func (sm *BoostAPI) BoostOfflineDealWithData(ctx context.Context, dealUuid uuid.UUID, filePath string, delAfterImport bool) (*api.ProviderDealRejectionInfo, error) { + res, err := sm.StorageProvider.ImportOfflineDealData(ctx, dealUuid, filePath, delAfterImport) return res, err } diff --git a/react/src/DealDetail.js b/react/src/DealDetail.js index cc68126a2..9d895d3f5 100644 --- a/react/src/DealDetail.js +++ b/react/src/DealDetail.js @@ -267,6 +267,10 @@ export function DealDetail(props) { Inbound File Path {deal.InboundFilePath} + + Delete After Add Piece + {deal.CleanupData ? 'Yes' : 'No'} + {deal.Sector.ID > 0 ? ( <> diff --git a/react/src/gql.js b/react/src/gql.js index 58377b889..450190ac6 100644 --- a/react/src/gql.js +++ b/react/src/gql.js @@ -81,6 +81,7 @@ const DealsListQuery = gql` AnnounceToIPNI KeepUnsealedCopy IsOffline + CleanupData Err Retry Message @@ -166,6 +167,7 @@ const DealSubscription = gql` ChainDealID PublishCid IsOffline + CleanupData Checkpoint CheckpointAt AnnounceToIPNI diff --git a/storagemarket/deal_execution.go b/storagemarket/deal_execution.go index c37ad3a83..ba2d145d9 100644 --- a/storagemarket/deal_execution.go +++ b/storagemarket/deal_execution.go @@ -217,9 +217,9 @@ func (p *Provider) execDealUptoAddPiece(ctx context.Context, deal *types.Provide } // as deal has already been handed to the sealer, we can remove the inbound file and reclaim the tagged space - if !deal.IsOffline { + if deal.CleanupData { _ = os.Remove(deal.InboundFilePath) - p.dealLogger.Infow(deal.DealUuid, "removed inbound file as deal handed to sealer", "path", deal.InboundFilePath) + p.dealLogger.Infow(deal.DealUuid, "removed piece data from disk as deal has been added to a sector", "path", deal.InboundFilePath) } if err := p.untagStorageSpaceAfterSealing(ctx, deal); err != nil { // If there's an error untagging storage space we should still try to continue, diff --git a/storagemarket/provider.go b/storagemarket/provider.go index e6aa5bd3f..79faa2eeb 100644 --- a/storagemarket/provider.go +++ b/storagemarket/provider.go @@ -239,10 +239,10 @@ func (p *Provider) GetAsk() *storagemarket.SignedStorageAsk { // ImportOfflineDealData is called when the Storage Provider imports data for // an offline deal (the deal must already have been proposed by the client) -func (p *Provider) ImportOfflineDealData(ctx context.Context, dealUuid uuid.UUID, filePath string) (pi *api.ProviderDealRejectionInfo, err error) { - p.dealLogger.Infow(dealUuid, "import data for offline deal", "filepath", filePath) +func (p *Provider) ImportOfflineDealData(ctx context.Context, dealUuid uuid.UUID, filePath string, delAfterImport bool) (pi *api.ProviderDealRejectionInfo, err error) { + p.dealLogger.Infow(dealUuid, "import data for offline deal", "filepath", filePath, "delete after import", delAfterImport) - // db should already have a deal with this uuid as the deal proposal should have been agreed before hand + // db should already have a deal with this uuid as the deal proposal should have been made beforehand ds, err := p.dealsDB.ByID(p.ctx, dealUuid) if err != nil { if errors.Is(err, sql.ErrNoRows) { @@ -258,6 +258,7 @@ func (p *Provider) ImportOfflineDealData(ctx context.Context, dealUuid uuid.UUID } ds.InboundFilePath = filePath + ds.CleanupData = delAfterImport resp, err := p.checkForDealAcceptance(ctx, ds, true) if err != nil { @@ -265,12 +266,12 @@ func (p *Provider) ImportOfflineDealData(ctx context.Context, dealUuid uuid.UUID return nil, fmt.Errorf("failed to send deal for acceptance: %w", err) } - // if there was an error, we don't return a rejection reason + // if there was an error, we just return the error message (there is no rejection reason) if resp.err != nil { return nil, fmt.Errorf("failed to accept deal: %w", resp.err) } - // return rejection reason as provider has rejected the deal. + // return rejection reason as provider has rejected the deal if !resp.ri.Accepted { p.dealLogger.Infow(dealUuid, "deal execution rejected by provider", "reason", resp.ri.Reason) return resp.ri, nil @@ -297,6 +298,7 @@ func (p *Provider) ExecuteDeal(ctx context.Context, dp *types.DealParams, client DealDataRoot: dp.DealDataRoot, Transfer: dp.Transfer, IsOffline: dp.IsOffline, + CleanupData: !dp.IsOffline, Retry: smtypes.DealRetryAuto, FastRetrieval: !dp.RemoveUnsealedCopy, AnnounceToIPNI: !dp.SkipIPNIAnnounce, diff --git a/storagemarket/provider_offline_test.go b/storagemarket/provider_offline_test.go index 46a395d39..dc50b37c9 100644 --- a/storagemarket/provider_offline_test.go +++ b/storagemarket/provider_offline_test.go @@ -2,6 +2,7 @@ package storagemarket import ( "context" + "fmt" "testing" "github.com/libp2p/go-libp2p/core/peer" @@ -31,7 +32,7 @@ func TestSimpleOfflineDealHappy(t *testing.T) { td.waitForAndAssert(t, ctx, dealcheckpoints.Accepted) // import data for offline deal - require.NoError(t, td.executeAndSubscribeImportOfflineDeal()) + require.NoError(t, td.executeAndSubscribeImportOfflineDeal(false)) // unblock commp -> wait for Transferred checkpoint td.unblockCommp() @@ -79,8 +80,36 @@ func TestOfflineDealInsufficientProviderFunds(t *testing.T) { // expect that when the deal data is imported, the import will fail because // there are not enough funds for the deal - pi, err = td.ph.Provider.ImportOfflineDealData(context.Background(), td.params.DealUUID, td.carv2FilePath) + pi, err = td.ph.Provider.ImportOfflineDealData(context.Background(), td.params.DealUUID, td.carv2FilePath, false) require.NoError(t, err) require.False(t, pi.Accepted) require.Contains(t, pi.Reason, "insufficient funds") } + +func TestOfflineDealDataCleanup(t *testing.T) { + ctx := context.Background() + + for _, delAfterImport := range []bool{true, false} { + t.Run(fmt.Sprintf("delete after import: %t", delAfterImport), func(t *testing.T) { + harness := NewHarness(t) + harness.Start(t, ctx) + defer harness.Stop() + + // first make an offline deal proposal + td := harness.newDealBuilder(t, 1, withOfflineDeal()).withAllMinerCallsNonBlocking().build() + + // execute deal + require.NoError(t, td.executeAndSubscribe()) + + // wait for Accepted checkpoint + td.waitForAndAssert(t, ctx, dealcheckpoints.Accepted) + + // import the deal data + require.NoError(t, td.executeAndSubscribeImportOfflineDeal(delAfterImport)) + + // check whether the deal data was removed after add piece + td.waitForAndAssert(t, ctx, dealcheckpoints.AddedPiece) + harness.EventuallyAssertNoTagged(t, ctx) + }) + } +} diff --git a/storagemarket/provider_test.go b/storagemarket/provider_test.go index 3c06b0796..624aec6ab 100644 --- a/storagemarket/provider_test.go +++ b/storagemarket/provider_test.go @@ -592,7 +592,7 @@ func TestOfflineDealRestartAfterManualRecoverableErrors(t *testing.T) { require.NoError(t, err) // execute deal - err = td.executeAndSubscribeImportOfflineDeal() + err = td.executeAndSubscribeImportOfflineDeal(false) require.NoError(t, err) // expect recoverable error with retry type Manual @@ -1218,10 +1218,11 @@ func (h *ProviderHarness) AssertEventuallyDealCleanedup(t *testing.T, ctx contex return false } - // the deal inbound file should no longer exist if it is an online deal - if !dp.IsOffline { + // the deal inbound file should no longer exist if it is an online deal, + // or if it is an offline deal with the delete after import flag set + if dbState.CleanupData { _, statErr := os.Stat(dbState.InboundFilePath) - return statErr != nil + return os.IsNotExist(statErr) } return true }, 5*time.Second, 200*time.Millisecond) @@ -1822,11 +1823,18 @@ func (ph *ProviderHarness) newDealBuilder(t *testing.T, seed int, opts ...dealPr RemoveUnsealedCopy: true, } + // Create a copy of the car file so that if the original car file gets + // cleaned up after the deal is added to a sector, we still have a copy + // we can use to compare with the contents of the unsealed file. + carFileCopyPath := carFilePath + ".copy" + err = copyFile(carFilePath, carFileCopyPath) + require.NoError(tbuilder.t, err) td := &testDeal{ - ph: tbuilder.ph, - params: dealParams, - carv2FilePath: carFilePath, - carv2FileName: name, + ph: tbuilder.ph, + params: dealParams, + carv2FilePath: carFilePath, + carv2CopyFilePath: carFileCopyPath, + carv2FileName: name, } publishCid := testutil.GenerateCid() @@ -2058,18 +2066,19 @@ func (tbuilder *testDealBuilder) buildAnnounce() *testDealBuilder { } type testDeal struct { - ph *ProviderHarness - params *types.DealParams - carv2FilePath string - carv2FileName string - stubOutput *smtestutil.StubbedMinerOutput - sub event.Subscription + ph *ProviderHarness + params *types.DealParams + carv2FilePath string + carv2FileName string + carv2CopyFilePath string + stubOutput *smtestutil.StubbedMinerOutput + sub event.Subscription tBuilder *testDealBuilder } -func (td *testDeal) executeAndSubscribeImportOfflineDeal() error { - pi, err := td.ph.Provider.ImportOfflineDealData(context.Background(), td.params.DealUUID, td.carv2FilePath) +func (td *testDeal) executeAndSubscribeImportOfflineDeal(delAfterImport bool) error { + pi, err := td.ph.Provider.ImportOfflineDealData(context.Background(), td.params.DealUUID, td.carv2FilePath, delAfterImport) if err != nil { return err } @@ -2200,7 +2209,7 @@ func (td *testDeal) waitForAndAssert(t *testing.T, ctx context.Context, cp dealc case dealcheckpoints.PublishConfirmed: td.ph.AssertPublishConfirmed(t, ctx, td.params, td.stubOutput) case dealcheckpoints.AddedPiece: - td.ph.AssertPieceAdded(t, ctx, td.params, td.stubOutput, td.carv2FilePath) + td.ph.AssertPieceAdded(t, ctx, td.params, td.stubOutput, td.carv2CopyFilePath) case dealcheckpoints.IndexedAndAnnounced: td.ph.AssertDealIndexed(t, ctx, td.params, td.stubOutput) default: @@ -2229,7 +2238,7 @@ func (td *testDeal) unblockAddPiece() { } func (td *testDeal) assertPieceAdded(t *testing.T, ctx context.Context) { - td.ph.AssertPieceAdded(t, ctx, td.params, td.stubOutput, td.carv2FilePath) + td.ph.AssertPieceAdded(t, ctx, td.params, td.stubOutput, td.carv2CopyFilePath) } func (td *testDeal) assertDealFailedTransferNonRecoverable(t *testing.T, ctx context.Context, errStr string) { @@ -2278,3 +2287,17 @@ type mockSignatureVerifier struct { func (m *mockSignatureVerifier) VerifySignature(ctx context.Context, sig acrypto.Signature, addr address.Address, input []byte) (bool, error) { return m.valid, m.err } + +func copyFile(source string, dest string) error { + input, err := os.ReadFile(source) + if err != nil { + return err + } + + err = os.WriteFile(dest, input, 0644) + if err != nil { + return err + } + + return nil +} diff --git a/storagemarket/storagespace/storagespace.go b/storagemarket/storagespace/storagespace.go index 521e9b8b4..7ba73ec14 100644 --- a/storagemarket/storagespace/storagespace.go +++ b/storagemarket/storagespace/storagespace.go @@ -13,7 +13,7 @@ type Status struct { TotalAvailable uint64 // The number of bytes reserved for accepted deals Tagged uint64 - // The number of bytes that have been downloaded and are waiting to be added to a piece + // The number of bytes that have been downloaded and are waiting to be added to a sector Staged uint64 // The number of bytes that are not tagged Free uint64 diff --git a/storagemarket/types/deal_state.go b/storagemarket/types/deal_state.go index 092536b0d..d109ec05c 100644 --- a/storagemarket/types/deal_state.go +++ b/storagemarket/types/deal_state.go @@ -24,6 +24,9 @@ type ProviderDealState struct { // IsOffline is true for offline deals i.e. deals where the actual data to be stored by the SP is sent out of band // and not via an online data transfer. IsOffline bool + // CleanupData indicates whether to remove the data for a deal after the deal has been added to a sector. + // This is always true for online deals, and can be set as a flag for offline deals. + CleanupData bool // ClientPeerID is the Clients libp2p Peer ID. ClientPeerID peer.ID