Skip to content
This repository has been archived by the owner on Feb 1, 2023. It is now read-only.

Commit

Permalink
Merge pull request #84 from ipfs/fix/reduce-alloc-2
Browse files Browse the repository at this point in the history
remove allocations round two
  • Loading branch information
Stebalien authored Feb 27, 2019
2 parents 6e3e040 + 8d357ff commit ee93aa8
Show file tree
Hide file tree
Showing 6 changed files with 38 additions and 17 deletions.
29 changes: 25 additions & 4 deletions messagequeue/messagequeue.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ package messagequeue

import (
"context"
"sync"
"time"

bsmsg "github.com/ipfs/go-bitswap/message"
Expand Down Expand Up @@ -43,7 +44,7 @@ type MessageQueue struct {
}

type messageRequest struct {
entries []*bsmsg.Entry
entries []bsmsg.Entry
ses uint64
}

Expand All @@ -65,9 +66,9 @@ func New(ctx context.Context, p peer.ID, network MessageNetwork) *MessageQueue {
}

// AddMessage adds new entries to an outgoing message for a given session.
func (mq *MessageQueue) AddMessage(entries []*bsmsg.Entry, ses uint64) {
func (mq *MessageQueue) AddMessage(entries []bsmsg.Entry, ses uint64) {
select {
case mq.newRequests <- &messageRequest{entries, ses}:
case mq.newRequests <- newMessageRequest(entries, ses):
case <-mq.ctx.Done():
}
}
Expand Down Expand Up @@ -123,8 +124,28 @@ func (mq *MessageQueue) runQueue() {
}
}

// We allocate a bunch of these so use a pool.
var messageRequestPool = sync.Pool{
New: func() interface{} {
return new(messageRequest)
},
}

func newMessageRequest(entries []bsmsg.Entry, session uint64) *messageRequest {
mr := messageRequestPool.Get().(*messageRequest)
mr.entries = entries
mr.ses = session
return mr
}

func returnMessageRequest(mr *messageRequest) {
*mr = messageRequest{}
messageRequestPool.Put(mr)
}

func (mr *messageRequest) handle(mq *MessageQueue) {
mq.addEntries(mr.entries, mr.ses)
returnMessageRequest(mr)
}

func (wr *wantlistRequest) handle(mq *MessageQueue) {
Expand All @@ -140,7 +161,7 @@ func (wr *wantlistRequest) handle(mq *MessageQueue) {
}
}

func (mq *MessageQueue) addEntries(entries []*bsmsg.Entry, ses uint64) {
func (mq *MessageQueue) addEntries(entries []bsmsg.Entry, ses uint64) {
for _, e := range entries {
if e.Cancel {
if mq.wl.Remove(e.Cid, ses) {
Expand Down
4 changes: 2 additions & 2 deletions peermanager/peermanager.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ var (

// PeerQueue provides a queer of messages to be sent for a single peer.
type PeerQueue interface {
AddMessage(entries []*bsmsg.Entry, ses uint64)
AddMessage(entries []bsmsg.Entry, ses uint64)
Startup()
AddWantlist(initialWants *wantlist.SessionTrackedWantlist)
Shutdown()
Expand Down Expand Up @@ -108,7 +108,7 @@ func (pm *PeerManager) Disconnected(p peer.ID) {

// SendMessage is called to send a message to all or some peers in the pool;
// if targets is nil, it sends to all.
func (pm *PeerManager) SendMessage(entries []*bsmsg.Entry, targets []peer.ID, from uint64) {
func (pm *PeerManager) SendMessage(entries []bsmsg.Entry, targets []peer.ID, from uint64) {
if len(targets) == 0 {
pm.peerQueuesLk.RLock()
for _, p := range pm.peerQueues {
Expand Down
6 changes: 3 additions & 3 deletions peermanager/peermanager_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ import (

type messageSent struct {
p peer.ID
entries []*bsmsg.Entry
entries []bsmsg.Entry
ses uint64
}

Expand All @@ -27,7 +27,7 @@ type fakePeer struct {
func (fp *fakePeer) Startup() {}
func (fp *fakePeer) Shutdown() {}

func (fp *fakePeer) AddMessage(entries []*bsmsg.Entry, ses uint64) {
func (fp *fakePeer) AddMessage(entries []bsmsg.Entry, ses uint64) {
fp.messagesSent <- messageSent{fp.p, entries, ses}
}
func (fp *fakePeer) AddWantlist(initialWants *wantlist.SessionTrackedWantlist) {}
Expand All @@ -44,7 +44,7 @@ func collectAndCheckMessages(
ctx context.Context,
t *testing.T,
messagesSent <-chan messageSent,
entries []*bsmsg.Entry,
entries []bsmsg.Entry,
ses uint64,
timeout time.Duration) []peer.ID {
var peersReceived []peer.ID
Expand Down
6 changes: 3 additions & 3 deletions testutil/testutil.go
Original file line number Diff line number Diff line change
Expand Up @@ -50,11 +50,11 @@ func GenerateWantlist(n int, ses uint64) *wantlist.SessionTrackedWantlist {
}

// GenerateMessageEntries makes fake bitswap message entries.
func GenerateMessageEntries(n int, isCancel bool) []*bsmsg.Entry {
bsmsgs := make([]*bsmsg.Entry, 0, n)
func GenerateMessageEntries(n int, isCancel bool) []bsmsg.Entry {
bsmsgs := make([]bsmsg.Entry, 0, n)
for i := 0; i < n; i++ {
prioritySeq++
msg := &bsmsg.Entry{
msg := bsmsg.Entry{
Entry: wantlist.NewRefEntry(blockGenerator.Next().Cid(), prioritySeq),
Cancel: isCancel,
}
Expand Down
8 changes: 4 additions & 4 deletions wantmanager/wantmanager.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ const (
type PeerHandler interface {
Disconnected(p peer.ID)
Connected(p peer.ID, initialWants *wantlist.SessionTrackedWantlist)
SendMessage(entries []*bsmsg.Entry, targets []peer.ID, from uint64)
SendMessage(entries []bsmsg.Entry, targets []peer.ID, from uint64)
}

type wantMessage interface {
Expand Down Expand Up @@ -187,9 +187,9 @@ func (wm *WantManager) run() {
}

func (wm *WantManager) addEntries(ctx context.Context, ks []cid.Cid, targets []peer.ID, cancel bool, ses uint64) {
entries := make([]*bsmsg.Entry, 0, len(ks))
entries := make([]bsmsg.Entry, 0, len(ks))
for i, k := range ks {
entries = append(entries, &bsmsg.Entry{
entries = append(entries, bsmsg.Entry{
Cancel: cancel,
Entry: wantlist.NewRefEntry(k, maxPriority-i),
})
Expand All @@ -202,7 +202,7 @@ func (wm *WantManager) addEntries(ctx context.Context, ks []cid.Cid, targets []p
}

type wantSet struct {
entries []*bsmsg.Entry
entries []bsmsg.Entry
targets []peer.ID
from uint64
}
Expand Down
2 changes: 1 addition & 1 deletion wantmanager/wantmanager_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ type fakePeerHandler struct {
lastWantSet wantSet
}

func (fph *fakePeerHandler) SendMessage(entries []*bsmsg.Entry, targets []peer.ID, from uint64) {
func (fph *fakePeerHandler) SendMessage(entries []bsmsg.Entry, targets []peer.ID, from uint64) {
fph.lk.Lock()
fph.lastWantSet = wantSet{entries, targets, from}
fph.lk.Unlock()
Expand Down

0 comments on commit ee93aa8

Please sign in to comment.