From 431e3c201447a1a8b5ecb46f10cba0f3c5bb10c9 Mon Sep 17 00:00:00 2001 From: Rusty Conover Date: Mon, 27 Jan 2020 14:03:39 -0500 Subject: [PATCH 1/3] test: Remove sequential/test-https-keep-alive-large-write.js Remove a test that made a flawed assumption that a single large buffer write can be interrupted by a timeout event. --- .../test-https-keep-alive-large-write.js | 47 ------------------- 1 file changed, 47 deletions(-) delete mode 100644 test/sequential/test-https-keep-alive-large-write.js diff --git a/test/sequential/test-https-keep-alive-large-write.js b/test/sequential/test-https-keep-alive-large-write.js deleted file mode 100644 index 79381ba8735756..00000000000000 --- a/test/sequential/test-https-keep-alive-large-write.js +++ /dev/null @@ -1,47 +0,0 @@ -'use strict'; -const common = require('../common'); -if (!common.hasCrypto) - common.skip('missing crypto'); -const fixtures = require('../common/fixtures'); -const https = require('https'); - -// This test assesses whether long-running writes can complete -// or timeout because the socket is not aware that the backing -// stream is still writing. - -const writeSize = 30000000; -let socket; - -const server = https.createServer({ - key: fixtures.readKey('agent1-key.pem'), - cert: fixtures.readKey('agent1-cert.pem') -}, common.mustCall((req, res) => { - const content = Buffer.alloc(writeSize, 0x44); - - res.writeHead(200, { - 'Content-Type': 'application/octet-stream', - 'Content-Length': content.length.toString(), - 'Vary': 'Accept-Encoding' - }); - - socket = res.socket; - const onTimeout = socket._onTimeout; - socket._onTimeout = common.mustCallAtLeast(() => onTimeout.call(socket), 1); - res.write(content); - res.end(); -})); -server.on('timeout', common.mustNotCall()); - -server.listen(0, common.mustCall(() => { - https.get({ - path: '/', - port: server.address().port, - rejectUnauthorized: false - }, (res) => { - res.once('data', () => { - socket._onTimeout(); - res.on('data', () => {}); - }); - res.on('end', () => server.close()); - }); -})); From 7f1e0f8370708257252c9a296dd68474c1855984 Mon Sep 17 00:00:00 2001 From: Rusty Conover Date: Tue, 25 Feb 2020 23:12:51 -0500 Subject: [PATCH 2/3] test: change test to not be sensitive to buffer send size Change the test to not be sensitive to the buffer size causing TCP resets to be received by the client causing the test to fail. The test now reads the entire expected buffer and then checks for the expected event to fire. --- .../test-tls-close-event-after-write.js | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/test/parallel/test-tls-close-event-after-write.js b/test/parallel/test-tls-close-event-after-write.js index 31ebc897b14758..57c79e2e5ab72d 100644 --- a/test/parallel/test-tls-close-event-after-write.js +++ b/test/parallel/test-tls-close-event-after-write.js @@ -12,23 +12,22 @@ const tls = require('tls'); const fixtures = require('../common/fixtures'); let cconn = null; let sconn = null; +let read_len = 0; +const buffer_size = 1024 * 1024; function test() { if (cconn && sconn) { cconn.resume(); sconn.resume(); - sconn.end(Buffer.alloc(1024 * 1024)); - cconn.end(); + sconn.end(Buffer.alloc(buffer_size)); } } const server = tls.createServer({ key: fixtures.readKey('agent1-key.pem'), cert: fixtures.readKey('agent1-cert.pem') -}, function(c) { - c.on('close', function() { - server.close(); - }); +}, (c) => { + c.on('close', common.mustCall(() => server.close())); sconn = c; test(); }).listen(0, common.mustCall(function() { @@ -36,6 +35,12 @@ const server = tls.createServer({ rejectUnauthorized: false }, common.mustCall(function() { cconn = this; + cconn.on('data', (d) => { + read_len += d.length; + if (read_len === buffer_size) { + cconn.end(); + } + }); test(); })); })); From 32617643d54b7abe7e577a6503dc6bcb1cacc0e9 Mon Sep 17 00:00:00 2001 From: Rusty Conover Date: Fri, 24 Jan 2020 12:44:26 -0500 Subject: [PATCH 3/3] tls: Reduce memory copying and number of BIO buffer allocations Avoid copying buffers before passing to SSL_write if there are zero length buffers involved. Only copy the data when the buffer has a non zero length. Send a memory allocation hint to the crypto BIO about how much memory will likely be needed to be allocated by the next call to SSL_write. This makes a single allocation rather than the BIO allocating a buffer for each 16k TLS segment written. This solves a problem with large buffers written over TLS triggering V8's GC. --- benchmark/tls/throughput.js | 2 +- src/node_crypto_bio.cc | 7 +++++++ src/node_crypto_bio.h | 16 ++++++++++++++++ src/tls_wrap.cc | 30 ++++++++++++++++++++++++++---- 4 files changed, 50 insertions(+), 5 deletions(-) diff --git a/benchmark/tls/throughput.js b/benchmark/tls/throughput.js index a8f2d19649d04a..727d20e460008d 100644 --- a/benchmark/tls/throughput.js +++ b/benchmark/tls/throughput.js @@ -3,7 +3,7 @@ const common = require('../common.js'); const bench = common.createBenchmark(main, { dur: [5], type: ['buf', 'asc', 'utf'], - size: [2, 1024, 1024 * 1024] + size: [2, 1024, 1024 * 1024, 4 * 1024 * 1024, 16 * 1024 * 1024] }); const fixtures = require('../../test/common/fixtures'); diff --git a/src/node_crypto_bio.cc b/src/node_crypto_bio.cc index fc143043ba56b1..55f5e8a5a37650 100644 --- a/src/node_crypto_bio.cc +++ b/src/node_crypto_bio.cc @@ -438,6 +438,13 @@ void NodeBIO::TryAllocateForWrite(size_t hint) { kThroughputBufferLength; if (len < hint) len = hint; + + // If there is a one time allocation size hint, use it. + if (allocate_hint_ > len) { + len = allocate_hint_; + allocate_hint_ = 0; + } + Buffer* next = new Buffer(env_, len); if (w == nullptr) { diff --git a/src/node_crypto_bio.h b/src/node_crypto_bio.h index 5de943806a9642..333a50848c7efd 100644 --- a/src/node_crypto_bio.h +++ b/src/node_crypto_bio.h @@ -96,6 +96,21 @@ class NodeBIO : public MemoryRetainer { return length_; } + // Provide a hint about the size of the next pending set of writes. TLS + // writes records of a maximum length of 16k of data plus a 5-byte header, + // a MAC (up to 20 bytes for SSLv3, TLS 1.0, TLS 1.1, and up to 32 bytes + // for TLS 1.2), and padding if a block cipher is used. If there is a + // large write this will result in potentially many buffers being + // allocated and gc'ed which can cause long pauses. By providing a + // guess about the amount of buffer space that will be needed in the + // next allocation this overhead is removed. + inline void set_allocate_tls_hint(size_t size) { + constexpr size_t kThreshold = 16 * 1024; + if (size >= kThreshold) { + allocate_hint_ = (size / kThreshold + 1) * (kThreshold + 5 + 32); + } + } + inline void set_eof_return(int num) { eof_return_ = num; } @@ -164,6 +179,7 @@ class NodeBIO : public MemoryRetainer { Environment* env_ = nullptr; size_t initial_ = kInitialBufferLength; size_t length_ = 0; + size_t allocate_hint_ = 0; int eof_return_ = -1; Buffer* read_head_ = nullptr; Buffer* write_head_ = nullptr; diff --git a/src/tls_wrap.cc b/src/tls_wrap.cc index 82274fde6db0c1..2f8da61f647f44 100644 --- a/src/tls_wrap.cc +++ b/src/tls_wrap.cc @@ -587,6 +587,7 @@ void TLSWrap::ClearIn() { AllocatedBuffer data = std::move(pending_cleartext_input_); crypto::MarkPopErrorOnReturn mark_pop_error_on_return; + crypto::NodeBIO::FromBIO(enc_out_)->set_allocate_tls_hint(data.size()); int written = SSL_write(ssl_.get(), data.data(), data.size()); Debug(this, "Writing %zu bytes, written = %d", data.size(), written); CHECK(written == -1 || written == static_cast(data.size())); @@ -701,8 +702,15 @@ int TLSWrap::DoWrite(WriteWrap* w, size_t length = 0; size_t i; - for (i = 0; i < count; i++) + size_t nonempty_i = 0; + size_t nonempty_count = 0; + for (i = 0; i < count; i++) { length += bufs[i].len; + if (bufs[i].len > 0) { + nonempty_i = i; + nonempty_count += 1; + } + } // We want to trigger a Write() on the underlying stream to drive the stream // system, but don't want to encrypt empty buffers into a TLS frame, so see @@ -747,20 +755,34 @@ int TLSWrap::DoWrite(WriteWrap* w, crypto::MarkPopErrorOnReturn mark_pop_error_on_return; int written = 0; - if (count != 1) { + + // It is common for zero length buffers to be written, + // don't copy data if there there is one buffer with data + // and one or more zero length buffers. + // _http_outgoing.js writes a zero length buffer in + // in OutgoingMessage.prototype.end. If there was a large amount + // of data supplied to end() there is no sense allocating + // and copying it when it could just be used. + + if (nonempty_count != 1) { data = env()->AllocateManaged(length); size_t offset = 0; for (i = 0; i < count; i++) { memcpy(data.data() + offset, bufs[i].base, bufs[i].len); offset += bufs[i].len; } + + crypto::NodeBIO::FromBIO(enc_out_)->set_allocate_tls_hint(length); written = SSL_write(ssl_.get(), data.data(), length); } else { // Only one buffer: try to write directly, only store if it fails - written = SSL_write(ssl_.get(), bufs[0].base, bufs[0].len); + uv_buf_t* buf = &bufs[nonempty_i]; + crypto::NodeBIO::FromBIO(enc_out_)->set_allocate_tls_hint(buf->len); + written = SSL_write(ssl_.get(), buf->base, buf->len); + if (written == -1) { data = env()->AllocateManaged(length); - memcpy(data.data(), bufs[0].base, bufs[0].len); + memcpy(data.data(), buf->base, buf->len); } }