Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

codec/proto: reuse of marshal byte buffers #3167

Merged
merged 8 commits into from
Dec 20, 2019
Merged
Show file tree
Hide file tree
Changes from 7 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 7 additions & 2 deletions Documentation/encoding.md
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,13 @@ into bytes and vice-versa for the purposes of network transmission.
## Codecs (Serialization and Deserialization)

A `Codec` contains code to serialize a message into a byte slice (`Marshal`) and
deserialize a byte slice back into a message (`Unmarshal`). `Codec`s are
registered by name into a global registry maintained in the `encoding` package.
deserialize a byte slice back into a message (`Unmarshal`). Optionally, a
`ReturnBuffer` method to potentially reuse the byte slice returned by the
`Marshal` method may also be implemented; note that this is an experimental
feature with an API that is still in flux.

`Codec`s are registered by name into a global registry maintained in the
`encoding` package.

### Implementing a `Codec`

Expand Down
16 changes: 16 additions & 0 deletions codec.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,22 @@ type baseCodec interface {
Unmarshal(data []byte, v interface{}) error
}

// A reusableBaseCodec is exactly like a baseCodec, but also requires a
// ReturnBuffer method to be implemented. Once a Marshal caller is done with
// the returned byte buffer, they can choose to return it back to the encoding
// library for re-use using this method.
type reusableBaseCodec interface {
baseCodec
adtac marked this conversation as resolved.
Show resolved Hide resolved
// If implemented in a codec, this function may be called with the byte
// buffer returned by Marshal after gRPC is done with the buffer.
//
// gRPC will not call ReturnBuffer after it's done with the buffer if any of
// the following is true:
// 1. Stats handlers are used.
// 2. Binlogs are enabled.
ReturnBuffer(buf []byte)
}

var _ baseCodec = Codec(nil)
var _ baseCodec = encoding.Codec(nil)

Expand Down
5 changes: 5 additions & 0 deletions encoding/encoding.go
Original file line number Diff line number Diff line change
Expand Up @@ -75,6 +75,11 @@ func GetCompressor(name string) Compressor {
// Codec defines the interface gRPC uses to encode and decode messages. Note
// that implementations of this interface must be thread safe; a Codec's
// methods can be called from concurrent goroutines.
//
// Optionally, if a ReturnBuffer(buf []byte) is implemented, it may be called
// to return the byte slice it received from the Marshal function after gRPC is
// done with it. The codec may reuse this byte slice in a future Marshal
// operation to reduce the application's memory footprint.
type Codec interface {
// Marshal returns the wire format of v.
Marshal(v interface{}) ([]byte, error)
Expand Down
68 changes: 35 additions & 33 deletions encoding/proto/proto.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@
package proto

import (
"math"
"sync"

"github.com/golang/protobuf/proto"
Expand All @@ -38,29 +37,16 @@ func init() {
// codec is a Codec implementation with protobuf. It is the default codec for gRPC.
type codec struct{}

type cachedProtoBuffer struct {
lastMarshaledSize uint32
proto.Buffer
}

func capToMaxInt32(val int) uint32 {
if val > math.MaxInt32 {
return uint32(math.MaxInt32)
}
return uint32(val)
}

func marshal(v interface{}, cb *cachedProtoBuffer) ([]byte, error) {
func marshal(v interface{}, pb *proto.Buffer) ([]byte, error) {
protoMsg := v.(proto.Message)
newSlice := make([]byte, 0, cb.lastMarshaledSize)
newSlice := returnBufferPool.Get().([]byte)

cb.SetBuf(newSlice)
cb.Reset()
if err := cb.Marshal(protoMsg); err != nil {
pb.SetBuf(newSlice)
pb.Reset()
if err := pb.Marshal(protoMsg); err != nil {
return nil, err
}
out := cb.Bytes()
cb.lastMarshaledSize = capToMaxInt32(len(out))
out := pb.Bytes()
return out, nil
}

Expand All @@ -70,12 +56,12 @@ func (codec) Marshal(v interface{}) ([]byte, error) {
return pm.Marshal()
}

cb := protoBufferPool.Get().(*cachedProtoBuffer)
out, err := marshal(v, cb)
pb := protoBufferPool.Get().(*proto.Buffer)
out, err := marshal(v, pb)

// put back buffer and lose the ref to the slice
cb.SetBuf(nil)
protoBufferPool.Put(cb)
pb.SetBuf(nil)
protoBufferPool.Put(pb)
return out, err
}

Expand All @@ -88,23 +74,39 @@ func (codec) Unmarshal(data []byte, v interface{}) error {
return pu.Unmarshal(data)
}

cb := protoBufferPool.Get().(*cachedProtoBuffer)
cb.SetBuf(data)
err := cb.Unmarshal(protoMsg)
cb.SetBuf(nil)
protoBufferPool.Put(cb)
pb := protoBufferPool.Get().(*proto.Buffer)
pb.SetBuf(data)
err := pb.Unmarshal(protoMsg)
pb.SetBuf(nil)
protoBufferPool.Put(pb)
return err
}

func (codec) ReturnBuffer(data []byte) {
// Make sure we set the length of the buffer to zero so that future appends
// will start from the zeroeth byte, not append to the previous, stale data.
//
// Apparently, sync.Pool with non-pointer objects (slices, in this case)
dfawley marked this conversation as resolved.
Show resolved Hide resolved
// causes small allocations because of how interface{} works under the hood.
// This isn't a problem for us, however, because we're more concerned with
// _how_ much that allocation is. Ideally, we'd be using bytes.Buffer as the
// Marshal return value to remove even that allocation, but we can't change
// the Marshal interface at this point.
returnBufferPool.Put(data[:0])
}

func (codec) Name() string {
return Name
}

var protoBufferPool = &sync.Pool{
New: func() interface{} {
return &cachedProtoBuffer{
Buffer: proto.Buffer{},
lastMarshaledSize: 16,
}
return &proto.Buffer{}
},
}

var returnBufferPool = &sync.Pool{
New: func() interface{} {
return make([]byte, 0, 16)
},
}
50 changes: 50 additions & 0 deletions encoding/proto/proto_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -127,3 +127,53 @@ func TestStaggeredMarshalAndUnmarshalUsingSamePool(t *testing.T) {
}
}
}

func TestBufferReuse(t *testing.T) {
c := codec{}

marshal := func(toMarshal []byte) []byte {
protoIn := &codec_perf.Buffer{Body: toMarshal}
b, err := c.Marshal(protoIn)
if err != nil {
t.Errorf("codec.Marshal(%v) failed: %v", protoIn, err)
}
// We cannot expect the actual pointer to be the same because sync.Pool
// during GC pauses.
bc := append([]byte(nil), b...)
c.ReturnBuffer(b)
return bc
}

unmarshal := func(b []byte) []byte {
protoOut := &codec_perf.Buffer{}
if err := c.Unmarshal(b, protoOut); err != nil {
t.Errorf("codec.Unarshal(%v) failed: %v", protoOut, err)
}
return protoOut.GetBody()
}

check := func(in []byte, out []byte) {
if len(in) != len(out) {
t.Errorf("unequal lengths: len(in=%v)=%d, len(out=%v)=%d", in, len(in), out, len(out))
}

for i := 0; i < len(in); i++ {
if in[i] != out[i] {
t.Errorf("unequal values: in[%d] = %v, out[%d] = %v", i, in[i], i, out[i])
}
}
}

// To test that the returned buffer does not have unexpected data at the end,
// we use a second input data that is smaller than the first.
in1 := []byte{1, 2, 3}
b1 := marshal(in1)
in2 := []byte{4, 5}
b2 := marshal(in2)

out1 := unmarshal(b1)
out2 := unmarshal(b2)

check(in1, out1)
check(in2, out2)
}
18 changes: 16 additions & 2 deletions internal/leakcheck/leakcheck.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ import (
"runtime"
"sort"
"strings"
"sync/atomic"
"time"
)

Expand Down Expand Up @@ -74,11 +75,24 @@ func ignore(g string) bool {
return false
}

var lastStacktraceSize uint32 = 4 << 10

// interestingGoroutines returns all goroutines we care about for the purpose of
// leak checking. It excludes testing or runtime ones.
func interestingGoroutines() (gs []string) {
buf := make([]byte, 2<<20)
buf = buf[:runtime.Stack(buf, true)]
n := atomic.LoadUint32(&lastStacktraceSize)
buf := make([]byte, n)
for {
nb := uint32(runtime.Stack(buf, true))
if nb < uint32(len(buf)) {
buf = buf[:nb]
break
}
n <<= 1
buf = make([]byte, n)
}
atomic.StoreUint32(&lastStacktraceSize, n)

for _, g := range strings.Split(string(buf), "\n\n") {
if !ignore(g) {
gs = append(gs, g)
Expand Down
33 changes: 24 additions & 9 deletions internal/transport/controlbuf.go
Original file line number Diff line number Diff line change
Expand Up @@ -34,17 +34,18 @@ var updateHeaderTblSize = func(e *hpack.Encoder, v uint32) {
}

type itemNode struct {
it interface{}
next *itemNode
it interface{}
onDequeue func()
next *itemNode
}

type itemList struct {
head *itemNode
tail *itemNode
}

func (il *itemList) enqueue(i interface{}) {
n := &itemNode{it: i}
func (il *itemList) enqueue(i interface{}, onDequeue func()) {
n := &itemNode{it: i, onDequeue: onDequeue}
if il.tail == nil {
il.head, il.tail = n, n
return
Expand All @@ -63,11 +64,14 @@ func (il *itemList) dequeue() interface{} {
if il.head == nil {
return nil
}
i := il.head.it
i, onDequeue := il.head.it, il.head.onDequeue
il.head = il.head.next
if il.head == nil {
il.tail = nil
}
if onDequeue != nil {
onDequeue()
}
return i
}

Expand Down Expand Up @@ -136,6 +140,7 @@ type dataFrame struct {
// onEachWrite is called every time
// a part of d is written out.
onEachWrite func()
rb *ReturnBuffer
}

func (*dataFrame) isTransportResponseFrame() bool { return false }
Expand Down Expand Up @@ -329,7 +334,7 @@ func (c *controlBuffer) executeAndPut(f func(it interface{}) bool, it cbItem) (b
wakeUp = true
c.consumerWaiting = false
}
c.list.enqueue(it)
c.list.enqueue(it, nil)
if it.isTransportResponseFrame() {
c.transportResponseFrames++
if c.transportResponseFrames == maxQueuedTransportResponseFrames {
Expand Down Expand Up @@ -616,7 +621,7 @@ func (l *loopyWriter) headerHandler(h *headerFrame) error {

if str.state != empty { // either active or waiting on stream quota.
// add it str's list of items.
str.itl.enqueue(h)
str.itl.enqueue(h, nil)
return nil
}
if err := l.writeHeader(h.streamID, h.endStream, h.hf, h.onWrite); err != nil {
Expand All @@ -631,7 +636,7 @@ func (l *loopyWriter) headerHandler(h *headerFrame) error {
itl: &itemList{},
wq: h.wq,
}
str.itl.enqueue(h)
str.itl.enqueue(h, nil)
return l.originateStream(str)
}

Expand Down Expand Up @@ -702,7 +707,11 @@ func (l *loopyWriter) preprocessData(df *dataFrame) error {
}
// If we got data for a stream it means that
// stream was originated and the headers were sent out.
str.itl.enqueue(df)
var onDequeue func()
if df.rb != nil {
onDequeue = df.rb.Done
}
str.itl.enqueue(df, onDequeue)
if str.state == empty {
str.state = active
l.activeStreams.enqueue(str)
Expand All @@ -726,6 +735,12 @@ func (l *loopyWriter) outFlowControlSizeRequestHandler(o *outFlowControlSizeRequ
func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error {
c.onWrite()
if str, ok := l.estdStreams[c.streamID]; ok {
// Dequeue all items from the stream's item list. This would call any pending onDequeue functions.
if str.state == active {
for !str.itl.isEmpty() {
str.itl.dequeue()
}
}
// On the server side it could be a trailers-only response or
// a RST_STREAM before stream initialization thus the stream might
// not be established yet.
Expand Down
4 changes: 4 additions & 0 deletions internal/transport/http2_client.go
Original file line number Diff line number Diff line change
Expand Up @@ -834,6 +834,7 @@ func (t *http2Client) Write(s *Stream, hdr []byte, data []byte, opts *Options) e
df := &dataFrame{
streamID: s.id,
endStream: opts.Last,
rb: opts.ReturnBuffer,
}
if hdr != nil || data != nil { // If it's not an empty data frame.
// Add some data to grpc message header so that we can equally
Expand All @@ -850,6 +851,9 @@ func (t *http2Client) Write(s *Stream, hdr []byte, data []byte, opts *Options) e
return err
}
}
if df.rb != nil {
df.rb.Add(1)
}
return t.controlBuf.put(df)
}

Expand Down
4 changes: 4 additions & 0 deletions internal/transport/http2_server.go
Original file line number Diff line number Diff line change
Expand Up @@ -912,6 +912,7 @@ func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) e
h: hdr,
d: data,
onEachWrite: t.setResetPingStrikes,
rb: opts.ReturnBuffer,
}
if err := s.wq.get(int32(len(hdr) + len(data))); err != nil {
select {
Expand All @@ -921,6 +922,9 @@ func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) e
}
return ContextErr(s.ctx.Err())
}
if df.rb != nil {
df.rb.Add(1)
}
return t.controlBuf.put(df)
}

Expand Down
Loading