diff --git a/bitswap/internal/defaults/defaults.go b/bitswap/internal/defaults/defaults.go index 6f7c2e745..1b0de2497 100644 --- a/bitswap/internal/defaults/defaults.go +++ b/bitswap/internal/defaults/defaults.go @@ -24,4 +24,7 @@ const ( // provideCollector even before they are actually provided. // TODO: Does this need to be this large givent that? HasBlockBufferSize = 256 + + // Maximum size of the wantlist we are willing to keep in memory. + MaxQueuedWantlistEntiresPerPeer = 1024 ) diff --git a/bitswap/options.go b/bitswap/options.go index 4d5c4b40c..1e2e09018 100644 --- a/bitswap/options.go +++ b/bitswap/options.go @@ -29,6 +29,10 @@ func MaxOutstandingBytesPerPeer(count int) Option { return Option{server.MaxOutstandingBytesPerPeer(count)} } +func MaxQueuedWantlistEntriesPerPeer(count uint) Option { + return Option{server.MaxQueuedWantlistEntriesPerPeer(count)} +} + func TaskWorkerCount(count int) Option { return Option{server.TaskWorkerCount(count)} } diff --git a/bitswap/server/internal/decision/engine.go b/bitswap/server/internal/decision/engine.go index f21553f96..29ac1aa2a 100644 --- a/bitswap/server/internal/decision/engine.go +++ b/bitswap/server/internal/decision/engine.go @@ -4,6 +4,7 @@ package decision import ( "context" "fmt" + "math/bits" "sync" "time" @@ -147,9 +148,6 @@ type Engine struct { lock sync.RWMutex // protects the fields immediately below - // ledgerMap lists block-related Ledgers by their Partner key. - ledgerMap map[peer.ID]*ledger - // peerLedger saves which peers are waiting for a Cid peerLedger *peerLedger @@ -187,6 +185,8 @@ type Engine struct { bstoreWorkerCount int maxOutstandingBytesPerPeer int + + maxQueuedWantlistEntriesPerPeer uint } // TaskInfo represents the details of a request from a peer. @@ -270,6 +270,15 @@ func WithMaxOutstandingBytesPerPeer(count int) Option { } } +// WithMaxQueuedWantlistEntriesPerPeer limits how much individual entries each peer is allowed to send. +// If a peer send us more than this we will truncate newest entries. +// It defaults to DefaultMaxQueuedWantlistEntiresPerPeer. +func WithMaxQueuedWantlistEntriesPerPeer(count uint) Option { + return func(e *Engine) { + e.maxQueuedWantlistEntriesPerPeer = count + } +} + func WithSetSendDontHave(send bool) Option { return func(e *Engine) { e.sendDontHaves = send @@ -330,7 +339,6 @@ func newEngine( opts ...Option, ) *Engine { e := &Engine{ - ledgerMap: make(map[peer.ID]*ledger), scoreLedger: NewDefaultScoreLedger(), bstoreWorkerCount: defaults.BitswapEngineBlockstoreWorkerCount, maxOutstandingBytesPerPeer: defaults.BitswapMaxOutstandingBytesPerPeer, @@ -348,6 +356,7 @@ func newEngine( targetMessageSize: defaultTargetMessageSize, tagQueued: fmt.Sprintf(tagFormat, "queued", uuid.New().String()), tagUseful: fmt.Sprintf(tagFormat, "useful", uuid.New().String()), + maxQueuedWantlistEntriesPerPeer: defaults.MaxQueuedWantlistEntiresPerPeer, } for _, opt := range opts { @@ -450,13 +459,10 @@ func (e *Engine) onPeerRemoved(p peer.ID) { // WantlistForPeer returns the list of keys that the given peer has asked for func (e *Engine) WantlistForPeer(p peer.ID) []wl.Entry { - partner := e.findOrCreate(p) - - partner.lk.Lock() - entries := partner.wantList.Entries() - partner.lk.Unlock() + e.lock.RLock() + defer e.lock.RUnlock() - return entries + return e.peerLedger.WantlistForPeer(p) } // LedgerForPeer returns aggregated data communication with a given peer. @@ -605,12 +611,7 @@ func (e *Engine) Peers() []peer.ID { e.lock.RLock() defer e.lock.RUnlock() - response := make([]peer.ID, 0, len(e.ledgerMap)) - - for _, ledger := range e.ledgerMap { - response = append(response, ledger.Partner) - } - return response + return e.peerLedger.CollectPeerIDs() } // MessageReceived is called when a message is received from a remote peer. @@ -659,33 +660,34 @@ func (e *Engine) MessageReceived(ctx context.Context, p peer.ID, m bsmsg.BitSwap } e.lock.Lock() - for _, entry := range wants { - e.peerLedger.Wants(p, entry.Cid) - } - for _, entry := range cancels { - e.peerLedger.CancelWant(p, entry.Cid) - } - e.lock.Unlock() - // Get the ledger for the peer - l := e.findOrCreate(p) - l.lk.Lock() - defer l.lk.Unlock() - - // If the peer sent a full wantlist, replace the ledger's wantlist if m.Full() { - l.wantList = wl.New() + e.peerLedger.ClearPeerWantlist(p) } - var activeEntries []peertask.Task + s := uint(e.peerLedger.WantlistSizeForPeer(p)) + if wouldBe := s + uint(len(wants)); wouldBe > e.maxQueuedWantlistEntriesPerPeer { + log.Debugw("wantlist overflow", "local", e.self, "remote", p, "would be", wouldBe) + // truncate wantlist to avoid overflow + available, o := bits.Sub(e.maxQueuedWantlistEntriesPerPeer, s, 0) + if o != 0 { + available = 0 + } + wants = wants[:available] + } - // Remove cancelled blocks from the queue + for _, entry := range wants { + e.peerLedger.Wants(p, entry.Entry) + } for _, entry := range cancels { log.Debugw("Bitswap engine <- cancel", "local", e.self, "from", p, "cid", entry.Cid) - if l.CancelWant(entry.Cid) { + if e.peerLedger.CancelWant(p, entry.Cid) { e.peerRequestQueue.Remove(entry.Cid, p) } } + e.lock.Unlock() + + var activeEntries []peertask.Task // Cancel a block operation sendDontHave := func(entry bsmsg.Entry) { @@ -724,9 +726,6 @@ func (e *Engine) MessageReceived(ctx context.Context, p peer.ID, m bsmsg.BitSwap c := entry.Cid blockSize, found := blockSizes[entry.Cid] - // Add each want-have / want-block to the ledger - l.Wants(c, entry.Priority, entry.WantType) - // If the block was not found if !found { log.Debugw("Bitswap engine: block not found", "local", e.self, "from", p, "cid", entry.Cid, "sendDontHave", entry.SendDontHave) @@ -763,7 +762,7 @@ func (e *Engine) MessageReceived(ctx context.Context, p peer.ID, m bsmsg.BitSwap // Push entries onto the request queue if len(activeEntries) > 0 { - e.peerRequestQueue.PushTasks(p, activeEntries...) + e.peerRequestQueue.PushTasksTruncated(e.maxQueuedWantlistEntriesPerPeer, p, activeEntries...) e.updateMetrics() } } @@ -809,14 +808,10 @@ func (e *Engine) ReceivedBlocks(from peer.ID, blks []blocks.Block) { return } - l := e.findOrCreate(from) - // Record how many bytes were received in the ledger - l.lk.Lock() - defer l.lk.Unlock() for _, blk := range blks { log.Debugw("Bitswap engine <- block", "local", e.self, "from", from, "cid", blk.Cid(), "size", len(blk.RawData())) - e.scoreLedger.AddToReceivedBytes(l.Partner, len(blk.RawData())) + e.scoreLedger.AddToReceivedBytes(from, len(blk.RawData())) } } @@ -835,7 +830,6 @@ func (e *Engine) NotifyNewBlocks(blks []blocks.Block) { // Check each peer to see if it wants one of the blocks we received var work bool - missingWants := make(map[peer.ID][]cid.Cid) for _, b := range blks { k := b.Cid() @@ -843,26 +837,7 @@ func (e *Engine) NotifyNewBlocks(blks []blocks.Block) { peers := e.peerLedger.Peers(k) e.lock.RUnlock() - for _, p := range peers { - e.lock.RLock() - ledger, ok := e.ledgerMap[p] - e.lock.RUnlock() - - if !ok { - // This can happen if the peer has disconnected while we're processing this list. - log.Debugw("failed to find peer in ledger", "peer", p) - missingWants[p] = append(missingWants[p], k) - continue - } - ledger.lk.RLock() - entry, ok := ledger.WantListContains(k) - ledger.lk.RUnlock() - if !ok { - // This can happen if the peer has canceled their want while we're processing this message. - log.Debugw("wantlist index doesn't match peer's wantlist", "peer", p) - missingWants[p] = append(missingWants[p], k) - continue - } + for _, entry := range peers { work = true blockSize := blockSizes[k] @@ -873,8 +848,8 @@ func (e *Engine) NotifyNewBlocks(blks []blocks.Block) { entrySize = bsmsg.BlockPresenceSize(k) } - e.peerRequestQueue.PushTasks(p, peertask.Task{ - Topic: entry.Cid, + e.peerRequestQueue.PushTasksTruncated(e.maxQueuedWantlistEntriesPerPeer, entry.Peer, peertask.Task{ + Topic: k, Priority: int(entry.Priority), Work: entrySize, Data: &taskData{ @@ -888,30 +863,6 @@ func (e *Engine) NotifyNewBlocks(blks []blocks.Block) { } } - // If we found missing wants (e.g., because the peer disconnected, we have some races here) - // remove them from the list. Unfortunately, we still have to re-check because the user - // could have re-connected in the meantime. - if len(missingWants) > 0 { - e.lock.Lock() - for p, wl := range missingWants { - if ledger, ok := e.ledgerMap[p]; ok { - ledger.lk.RLock() - for _, k := range wl { - if _, has := ledger.WantListContains(k); has { - continue - } - e.peerLedger.CancelWant(p, k) - } - ledger.lk.RUnlock() - } else { - for _, k := range wl { - e.peerLedger.CancelWant(p, k) - } - } - } - e.lock.Unlock() - } - if work { e.signalNewWork() } @@ -926,21 +877,20 @@ func (e *Engine) NotifyNewBlocks(blks []blocks.Block) { // MessageSent is called when a message has successfully been sent out, to record // changes. func (e *Engine) MessageSent(p peer.ID, m bsmsg.BitSwapMessage) { - l := e.findOrCreate(p) - l.lk.Lock() - defer l.lk.Unlock() + e.lock.Lock() + defer e.lock.Unlock() // Remove sent blocks from the want list for the peer for _, block := range m.Blocks() { - e.scoreLedger.AddToSentBytes(l.Partner, len(block.RawData())) - l.wantList.RemoveType(block.Cid(), pb.Message_Wantlist_Block) + e.scoreLedger.AddToSentBytes(p, len(block.RawData())) + e.peerLedger.CancelWantWithType(p, block.Cid(), pb.Message_Wantlist_Block) } // Remove sent block presences from the want list for the peer for _, bp := range m.BlockPresences() { // Don't record sent data. We reserve that for data blocks. if bp.Type == pb.Message_Have { - l.wantList.RemoveType(bp.Cid, pb.Message_Wantlist_Have) + e.peerLedger.CancelWantWithType(p, bp.Cid, pb.Message_Wantlist_Have) } } } @@ -951,31 +901,17 @@ func (e *Engine) PeerConnected(p peer.ID) { e.lock.Lock() defer e.lock.Unlock() - _, ok := e.ledgerMap[p] - if !ok { - e.ledgerMap[p] = newLedger(p) - } - e.scoreLedger.PeerConnected(p) } // PeerDisconnected is called when a peer disconnects. func (e *Engine) PeerDisconnected(p peer.ID) { + e.peerRequestQueue.Clear(p) + e.lock.Lock() defer e.lock.Unlock() - ledger, ok := e.ledgerMap[p] - if ok { - ledger.lk.RLock() - entries := ledger.Entries() - ledger.lk.RUnlock() - - for _, entry := range entries { - e.peerLedger.CancelWant(p, entry.Cid) - } - } - delete(e.ledgerMap, p) - + e.peerLedger.PeerDisconnected(p) e.scoreLedger.PeerDisconnected(p) } @@ -994,29 +930,6 @@ func (e *Engine) numBytesReceivedFrom(p peer.ID) uint64 { return e.LedgerForPeer(p).Recv } -// ledger lazily instantiates a ledger -func (e *Engine) findOrCreate(p peer.ID) *ledger { - // Take a read lock (as it's less expensive) to check if we have a ledger - // for the peer - e.lock.RLock() - l, ok := e.ledgerMap[p] - e.lock.RUnlock() - if ok { - return l - } - - // There's no ledger, so take a write lock, then check again and create the - // ledger if necessary - e.lock.Lock() - defer e.lock.Unlock() - l, ok = e.ledgerMap[p] - if !ok { - l = newLedger(p) - e.ledgerMap[p] = l - } - return l -} - func (e *Engine) signalNewWork() { // Signal task generation to restart (if stopped!) select { diff --git a/bitswap/server/internal/decision/engine_test.go b/bitswap/server/internal/decision/engine_test.go index 35d35b195..9fd3b1b07 100644 --- a/bitswap/server/internal/decision/engine_test.go +++ b/bitswap/server/internal/decision/engine_test.go @@ -146,7 +146,7 @@ func TestConsistentAccounting(t *testing.T) { } } -func TestPeerIsAddedToPeersWhenMessageReceivedOrSent(t *testing.T) { +func TestPeerIsAddedToPeersWhenMessageSent(t *testing.T) { test.Flaky(t) ctx, cancel := context.WithCancel(context.Background()) @@ -156,17 +156,15 @@ func TestPeerIsAddedToPeersWhenMessageReceivedOrSent(t *testing.T) { m := message.New(true) - sanfrancisco.Engine.MessageSent(seattle.Peer, m) + // We need to request something for it to add us as partner. + m.AddEntry(blocks.NewBlock([]byte("Hæ")).Cid(), 0, pb.Message_Wantlist_Block, true) + seattle.Engine.MessageReceived(ctx, sanfrancisco.Peer, m) if seattle.Peer == sanfrancisco.Peer { t.Fatal("Sanity Check: Peers have same Key!") } - if !peerIsPartner(seattle.Peer, sanfrancisco.Engine) { - t.Fatal("Peer wasn't added as a Partner") - } - if !peerIsPartner(sanfrancisco.Peer, seattle.Engine) { t.Fatal("Peer wasn't added as a Partner") } @@ -1053,10 +1051,6 @@ func TestWantlistForPeer(t *testing.T) { if len(entries) != 4 { t.Fatal("expected wantlist to contain all wants from parter") } - if entries[0].Priority != 4 || entries[1].Priority != 3 || entries[2].Priority != 2 || entries[3].Priority != 1 { - t.Fatal("expected wantlist to be sorted") - } - } func TestTaskComparator(t *testing.T) { diff --git a/bitswap/server/internal/decision/peer_ledger.go b/bitswap/server/internal/decision/peer_ledger.go index c22322b28..102dad4c4 100644 --- a/bitswap/server/internal/decision/peer_ledger.go +++ b/bitswap/server/internal/decision/peer_ledger.go @@ -1,28 +1,80 @@ package decision import ( + wl "github.com/ipfs/go-libipfs/bitswap/client/wantlist" + pb "github.com/ipfs/go-libipfs/bitswap/message/pb" + "github.com/ipfs/go-cid" "github.com/libp2p/go-libp2p/core/peer" ) type peerLedger struct { - cids map[cid.Cid]map[peer.ID]struct{} + // thoses two maps are inversions of each other + peers map[peer.ID]map[cid.Cid]entry + cids map[cid.Cid]map[peer.ID]entry } func newPeerLedger() *peerLedger { - return &peerLedger{cids: make(map[cid.Cid]map[peer.ID]struct{})} + return &peerLedger{ + peers: make(map[peer.ID]map[cid.Cid]entry), + cids: make(map[cid.Cid]map[peer.ID]entry), + } } -func (l *peerLedger) Wants(p peer.ID, k cid.Cid) { - m, ok := l.cids[k] +func (l *peerLedger) Wants(p peer.ID, e wl.Entry) { + cids, ok := l.peers[p] + if !ok { + cids = make(map[cid.Cid]entry) + l.peers[p] = cids + } + cids[e.Cid] = entry{e.Priority, e.WantType} + + m, ok := l.cids[e.Cid] + if !ok { + m = make(map[peer.ID]entry) + l.cids[e.Cid] = m + } + m[p] = entry{e.Priority, e.WantType} +} + +// CancelWant returns true if the cid was present in the wantlist. +func (l *peerLedger) CancelWant(p peer.ID, k cid.Cid) bool { + wants, ok := l.peers[p] + if !ok { + return false + } + delete(wants, k) + if len(wants) == 0 { + delete(l.peers, p) + } + + l.removePeerFromCid(p, k) + return true +} + +// CancelWantWithType will not cancel WantBlock if we sent a HAVE message. +func (l *peerLedger) CancelWantWithType(p peer.ID, k cid.Cid, typ pb.Message_Wantlist_WantType) { + wants, ok := l.peers[p] if !ok { - m = make(map[peer.ID]struct{}) - l.cids[k] = m + return + } + e, ok := wants[k] + if !ok { + return } - m[p] = struct{}{} + if typ == pb.Message_Wantlist_Have && e.WantType == pb.Message_Wantlist_Block { + return + } + + delete(wants, k) + if len(wants) == 0 { + delete(l.peers, p) + } + + l.removePeerFromCid(p, k) } -func (l *peerLedger) CancelWant(p peer.ID, k cid.Cid) { +func (l *peerLedger) removePeerFromCid(p peer.ID, k cid.Cid) { m, ok := l.cids[k] if !ok { return @@ -33,14 +85,72 @@ func (l *peerLedger) CancelWant(p peer.ID, k cid.Cid) { } } -func (l *peerLedger) Peers(k cid.Cid) []peer.ID { +type entryForPeer struct { + Peer peer.ID + entry +} + +type entry struct { + Priority int32 + WantType pb.Message_Wantlist_WantType +} + +func (l *peerLedger) Peers(k cid.Cid) []entryForPeer { m, ok := l.cids[k] if !ok { return nil } - peers := make([]peer.ID, 0, len(m)) - for p := range m { + peers := make([]entryForPeer, 0, len(m)) + for p, e := range m { + peers = append(peers, entryForPeer{p, e}) + } + return peers +} + +func (l *peerLedger) CollectPeerIDs() []peer.ID { + peers := make([]peer.ID, 0, len(l.peers)) + for p := range l.peers { peers = append(peers, p) } return peers } + +func (l *peerLedger) WantlistSizeForPeer(p peer.ID) int { + return len(l.peers[p]) +} + +func (l *peerLedger) WantlistForPeer(p peer.ID) []wl.Entry { + cids, ok := l.peers[p] + if !ok { + return nil + } + + entries := make([]wl.Entry, 0, len(l.cids)) + for c, e := range cids { + entries = append(entries, wl.Entry{ + Cid: c, + Priority: e.Priority, + WantType: e.WantType, + }) + } + return entries +} + +// ClearPeerWantlist does not take an effort to fully erase it from memory. +// This is intended when the peer is still connected and the map capacity could +// be reused. If the memory should be freed use PeerDisconnected instead. +func (l *peerLedger) ClearPeerWantlist(p peer.ID) { + cids, ok := l.peers[p] + if !ok { + return + } + + for c := range cids { + l.removePeerFromCid(p, c) + } +} + +func (l *peerLedger) PeerDisconnected(p peer.ID) { + l.ClearPeerWantlist(p) + delete(l.peers, p) +} diff --git a/bitswap/server/server.go b/bitswap/server/server.go index 136ae3df9..424456036 100644 --- a/bitswap/server/server.go +++ b/bitswap/server/server.go @@ -210,6 +210,16 @@ func MaxOutstandingBytesPerPeer(count int) Option { } } +// MaxQueuedWantlistEntriesPerPeer limits how much individual entries each peer is allowed to send. +// If a peer send us more than this we will truncate newest entries. +// It defaults to defaults.MaxQueuedWantlistEntiresPerPeer. +func MaxQueuedWantlistEntriesPerPeer(count uint) Option { + o := decision.WithMaxQueuedWantlistEntriesPerPeer(count) + return func(bs *Server) { + bs.engineOptions = append(bs.engineOptions, o) + } +} + // HasBlockBufferSize configure how big the new blocks buffer should be. func HasBlockBufferSize(count int) Option { if count < 0 { diff --git a/examples/go.sum b/examples/go.sum index a336c0ec3..49755f76c 100644 --- a/examples/go.sum +++ b/examples/go.sum @@ -420,8 +420,8 @@ github.com/ipfs/go-ipfs-files v0.3.0/go.mod h1:xAUtYMwB+iu/dtf6+muHNSFQCJG2dSiSt github.com/ipfs/go-ipfs-posinfo v0.0.1 h1:Esoxj+1JgSjX0+ylc0hUmJCOv6V2vFoZiETLR6OtpRs= github.com/ipfs/go-ipfs-posinfo v0.0.1/go.mod h1:SwyeVP+jCwiDu0C313l/8jg6ZxM0qqtlt2a0vILTc1A= github.com/ipfs/go-ipfs-pq v0.0.1/go.mod h1:LWIqQpqfRG3fNc5XsnIhz/wQ2XXGyugQwls7BgUmUfY= -github.com/ipfs/go-ipfs-pq v0.0.2 h1:e1vOOW6MuOwG2lqxcLA+wEn93i/9laCY8sXAw76jFOY= github.com/ipfs/go-ipfs-pq v0.0.2/go.mod h1:LWIqQpqfRG3fNc5XsnIhz/wQ2XXGyugQwls7BgUmUfY= +github.com/ipfs/go-ipfs-pq v0.0.3 h1:YpoHVJB+jzK15mr/xsWC574tyDLkezVrDNeaalQBsTE= github.com/ipfs/go-ipfs-redirects-file v0.1.1 h1:Io++k0Vf/wK+tfnhEh63Yte1oQK5VGT2hIEYpD0Rzx8= github.com/ipfs/go-ipfs-redirects-file v0.1.1/go.mod h1:tAwRjCV0RjLTjH8DR/AU7VYvfQECg+lpUy2Mdzv7gyk= github.com/ipfs/go-ipfs-routing v0.1.0/go.mod h1:hYoUkJLyAUKhF58tysKpids8RNDPO42BVMgK5dNsoqY= @@ -473,7 +473,7 @@ github.com/ipfs/go-path v0.3.0 h1:tkjga3MtpXyM5v+3EbRvOHEoo+frwi4oumw5K+KYWyA= github.com/ipfs/go-path v0.3.0/go.mod h1:NOScsVgxfC/eIw4nz6OiGwK42PjaSJ4Y/ZFPn1Xe07I= github.com/ipfs/go-peertaskqueue v0.1.0/go.mod h1:Jmk3IyCcfl1W3jTW3YpghSwSEC6IJ3Vzz/jUmWw8Z0U= github.com/ipfs/go-peertaskqueue v0.7.0/go.mod h1:M/akTIE/z1jGNXMU7kFB4TeSEFvj68ow0Rrb04donIU= -github.com/ipfs/go-peertaskqueue v0.8.0 h1:JyNO144tfu9bx6Hpo119zvbEL9iQ760FHOiJYsUjqaU= +github.com/ipfs/go-peertaskqueue v0.8.1 h1:YhxAs1+wxb5jk7RvS0LHdyiILpNmRIRnZVztekOF0pg= github.com/ipfs/go-unixfs v0.2.4/go.mod h1:SUdisfUjNoSDzzhGVxvCL9QO/nKdwXdr+gbMUdqcbYw= github.com/ipfs/go-unixfs v0.3.1 h1:LrfED0OGfG98ZEegO4/xiprx2O+yS+krCMQSp7zLVv8= github.com/ipfs/go-unixfs v0.3.1/go.mod h1:h4qfQYzghiIc8ZNFKiLMFWOTzrWIAtzYQ59W/pCFf1o= diff --git a/go.mod b/go.mod index ade83a31a..53d1eec4e 100644 --- a/go.mod +++ b/go.mod @@ -33,7 +33,7 @@ require ( github.com/ipfs/go-metrics-interface v0.0.1 github.com/ipfs/go-namesys v0.7.0 github.com/ipfs/go-path v0.3.0 - github.com/ipfs/go-peertaskqueue v0.8.0 + github.com/ipfs/go-peertaskqueue v0.8.1 github.com/ipfs/go-unixfs v0.3.1 github.com/ipfs/go-unixfsnode v1.5.1 github.com/ipfs/interface-go-ipfs-core v0.10.0 @@ -83,7 +83,7 @@ require ( github.com/ipfs/go-block-format v0.1.1 // indirect github.com/ipfs/go-ipfs-ds-help v1.1.0 // indirect github.com/ipfs/go-ipfs-files v0.3.0 // indirect - github.com/ipfs/go-ipfs-pq v0.0.2 // indirect + github.com/ipfs/go-ipfs-pq v0.0.3 // indirect github.com/ipfs/go-ipld-cbor v0.0.6 // indirect github.com/ipfs/go-ipld-legacy v0.1.1 // indirect github.com/ipfs/go-verifcid v0.0.2 // indirect diff --git a/go.sum b/go.sum index 9def56b73..da272b67c 100644 --- a/go.sum +++ b/go.sum @@ -425,8 +425,9 @@ github.com/ipfs/go-ipfs-files v0.3.0/go.mod h1:xAUtYMwB+iu/dtf6+muHNSFQCJG2dSiSt github.com/ipfs/go-ipfs-posinfo v0.0.1 h1:Esoxj+1JgSjX0+ylc0hUmJCOv6V2vFoZiETLR6OtpRs= github.com/ipfs/go-ipfs-posinfo v0.0.1/go.mod h1:SwyeVP+jCwiDu0C313l/8jg6ZxM0qqtlt2a0vILTc1A= github.com/ipfs/go-ipfs-pq v0.0.1/go.mod h1:LWIqQpqfRG3fNc5XsnIhz/wQ2XXGyugQwls7BgUmUfY= -github.com/ipfs/go-ipfs-pq v0.0.2 h1:e1vOOW6MuOwG2lqxcLA+wEn93i/9laCY8sXAw76jFOY= github.com/ipfs/go-ipfs-pq v0.0.2/go.mod h1:LWIqQpqfRG3fNc5XsnIhz/wQ2XXGyugQwls7BgUmUfY= +github.com/ipfs/go-ipfs-pq v0.0.3 h1:YpoHVJB+jzK15mr/xsWC574tyDLkezVrDNeaalQBsTE= +github.com/ipfs/go-ipfs-pq v0.0.3/go.mod h1:btNw5hsHBpRcSSgZtiNm/SLj5gYIZ18AKtv3kERkRb4= github.com/ipfs/go-ipfs-redirects-file v0.1.1 h1:Io++k0Vf/wK+tfnhEh63Yte1oQK5VGT2hIEYpD0Rzx8= github.com/ipfs/go-ipfs-redirects-file v0.1.1/go.mod h1:tAwRjCV0RjLTjH8DR/AU7VYvfQECg+lpUy2Mdzv7gyk= github.com/ipfs/go-ipfs-routing v0.1.0/go.mod h1:hYoUkJLyAUKhF58tysKpids8RNDPO42BVMgK5dNsoqY= @@ -479,8 +480,8 @@ github.com/ipfs/go-path v0.3.0 h1:tkjga3MtpXyM5v+3EbRvOHEoo+frwi4oumw5K+KYWyA= github.com/ipfs/go-path v0.3.0/go.mod h1:NOScsVgxfC/eIw4nz6OiGwK42PjaSJ4Y/ZFPn1Xe07I= github.com/ipfs/go-peertaskqueue v0.1.0/go.mod h1:Jmk3IyCcfl1W3jTW3YpghSwSEC6IJ3Vzz/jUmWw8Z0U= github.com/ipfs/go-peertaskqueue v0.7.0/go.mod h1:M/akTIE/z1jGNXMU7kFB4TeSEFvj68ow0Rrb04donIU= -github.com/ipfs/go-peertaskqueue v0.8.0 h1:JyNO144tfu9bx6Hpo119zvbEL9iQ760FHOiJYsUjqaU= -github.com/ipfs/go-peertaskqueue v0.8.0/go.mod h1:cz8hEnnARq4Du5TGqiWKgMr/BOSQ5XOgMOh1K5YYKKM= +github.com/ipfs/go-peertaskqueue v0.8.1 h1:YhxAs1+wxb5jk7RvS0LHdyiILpNmRIRnZVztekOF0pg= +github.com/ipfs/go-peertaskqueue v0.8.1/go.mod h1:Oxxd3eaK279FxeydSPPVGHzbwVeHjatZ2GA8XD+KbPU= github.com/ipfs/go-unixfs v0.2.4/go.mod h1:SUdisfUjNoSDzzhGVxvCL9QO/nKdwXdr+gbMUdqcbYw= github.com/ipfs/go-unixfs v0.3.1 h1:LrfED0OGfG98ZEegO4/xiprx2O+yS+krCMQSp7zLVv8= github.com/ipfs/go-unixfs v0.3.1/go.mod h1:h4qfQYzghiIc8ZNFKiLMFWOTzrWIAtzYQ59W/pCFf1o=