From c60cd67e1c689a60f0a32ecd2bfd199552a3c75e Mon Sep 17 00:00:00 2001 From: Richard Lau Date: Tue, 16 Apr 2024 00:23:18 +0100 Subject: [PATCH 01/41] test: skip test for dynamically linked OpenSSL MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit As a temporary measure to unblock the CI, skip the RSA implicit rejection test when Node.js is built against a dynamically linked OpenSSL. PR-URL: https://github.com/nodejs/node/pull/52542 Refs: https://github.com/nodejs/node/issues/52537 Refs: https://github.com/nodejs-private/node-private/pull/525 Refs: https://hackerone.com/reports/2269177 Reviewed-By: Michaël Zasso Reviewed-By: Antoine du Hamel Reviewed-By: Luigi Pinca Reviewed-By: Michael Dawson --- test/parallel/test-crypto-rsa-dsa.js | 32 ++++++++++++++++------------ 1 file changed, 18 insertions(+), 14 deletions(-) diff --git a/test/parallel/test-crypto-rsa-dsa.js b/test/parallel/test-crypto-rsa-dsa.js index 438037acc867c2..ecda345989789d 100644 --- a/test/parallel/test-crypto-rsa-dsa.js +++ b/test/parallel/test-crypto-rsa-dsa.js @@ -223,20 +223,24 @@ function test_rsa(padding, encryptOaepHash, decryptOaepHash) { if (padding === constants.RSA_PKCS1_PADDING) { - assert.throws(() => { - crypto.privateDecrypt({ - key: rsaKeyPem, - padding: padding, - oaepHash: decryptOaepHash - }, encryptedBuffer); - }, { code: 'ERR_INVALID_ARG_VALUE' }); - assert.throws(() => { - crypto.privateDecrypt({ - key: rsaPkcs8KeyPem, - padding: padding, - oaepHash: decryptOaepHash - }, encryptedBuffer); - }, { code: 'ERR_INVALID_ARG_VALUE' }); + // TODO(richardlau): see if it's possible to determine implicit rejection + // support when dynamically linked against OpenSSL. + if (!process.config.variables.node_shared_openssl) { + assert.throws(() => { + crypto.privateDecrypt({ + key: rsaKeyPem, + padding: padding, + oaepHash: decryptOaepHash + }, encryptedBuffer); + }, { code: 'ERR_INVALID_ARG_VALUE' }); + assert.throws(() => { + crypto.privateDecrypt({ + key: rsaPkcs8KeyPem, + padding: padding, + oaepHash: decryptOaepHash + }, encryptedBuffer); + }, { code: 'ERR_INVALID_ARG_VALUE' }); + } } else { let decryptedBuffer = crypto.privateDecrypt({ key: rsaKeyPem, From 678641f470e64b42b3c0bc45270d0561ebb3c269 Mon Sep 17 00:00:00 2001 From: Bo Anderson Date: Tue, 2 Apr 2024 22:55:25 +0100 Subject: [PATCH 02/41] deps: V8: cherry-pick d15d49b09dc7 Original commit message: Make bitfields only as wide as necessary for enums clang now complains when a BitField for an enum is too wide. We could suppress this, but it seems kind of useful from an uninformed distance, so I made a few bitfields smaller instead. (For AddressingMode, since its size is target-dependent, I added an explicit underlying type to the enum instead, which suppresses the diag on a per-enum basis.) This is without any understanding of the code I'm touching. Especially the change in v8-internal.h feels a bit risky to me. Bug: chromium:1348574 Change-Id: I73395de593045036b72dadf4e3147b5f7e13c958 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3794708 Commit-Queue: Nico Weber Reviewed-by: Leszek Swirski Reviewed-by: Hannes Payer Auto-Submit: Nico Weber Cr-Commit-Position: refs/heads/main@{#82109} Refs: https://github.com/v8/v8/commit/d15d49b09dc7aef9edcc4cf6a0cb2b77a0db203f PR-URL: https://github.com/nodejs/node/pull/52337 Fixes: https://github.com/nodejs/node/issues/52230 Reviewed-By: Rafael Gonzaga --- common.gypi | 2 +- deps/v8/src/ast/ast.h | 2 +- deps/v8/src/base/bit-field.h | 5 +++++ deps/v8/src/compiler/backend/instruction-codes.h | 4 ++-- deps/v8/src/compiler/backend/instruction.h | 4 ++-- deps/v8/src/maglev/maglev-ir.h | 2 +- deps/v8/src/wasm/wasm-code-manager.h | 2 +- 7 files changed, 13 insertions(+), 8 deletions(-) diff --git a/common.gypi b/common.gypi index 38471d4639eb5e..ec92c9df4c1ea2 100644 --- a/common.gypi +++ b/common.gypi @@ -36,7 +36,7 @@ # Reset this number to 0 on major V8 upgrades. # Increment by one for each non-official patch applied to deps/v8. - 'v8_embedder_string': '-node.36', + 'v8_embedder_string': '-node.37', ##### V8 defaults for Node.js ##### diff --git a/deps/v8/src/ast/ast.h b/deps/v8/src/ast/ast.h index 971a2b0ec1321e..be1fe09ee438ad 100644 --- a/deps/v8/src/ast/ast.h +++ b/deps/v8/src/ast/ast.h @@ -999,7 +999,7 @@ class Literal final : public Expression { friend class AstNodeFactory; friend Zone; - using TypeField = Expression::NextBitField; + using TypeField = Expression::NextBitField; Literal(int smi, int position) : Expression(position, kLiteral), smi_(smi) { bit_field_ = TypeField::update(bit_field_, kSmi); diff --git a/deps/v8/src/base/bit-field.h b/deps/v8/src/base/bit-field.h index 63142a20fa2c29..9605c41c14f8ce 100644 --- a/deps/v8/src/base/bit-field.h +++ b/deps/v8/src/base/bit-field.h @@ -40,6 +40,11 @@ class BitField final { static constexpr U kNumValues = U{1} << kSize; // Value for the field with all bits set. + // If clang complains + // "constexpr variable 'kMax' must be initialized by a constant expression" + // on this line, then you're creating a BitField for an enum with more bits + // than needed for the enum values. Either reduce the BitField size, + // or give the enum an explicit underlying type. static constexpr T kMax = static_cast(kNumValues - 1); template diff --git a/deps/v8/src/compiler/backend/instruction-codes.h b/deps/v8/src/compiler/backend/instruction-codes.h index b06b522287f2d1..19cb21d041b897 100644 --- a/deps/v8/src/compiler/backend/instruction-codes.h +++ b/deps/v8/src/compiler/backend/instruction-codes.h @@ -195,7 +195,7 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os, V(None) \ TARGET_ADDRESSING_MODE_LIST(V) -enum AddressingMode { +enum AddressingMode : uint8_t { #define DECLARE_ADDRESSING_MODE(Name) kMode_##Name, ADDRESSING_MODE_LIST(DECLARE_ADDRESSING_MODE) #undef DECLARE_ADDRESSING_MODE @@ -306,7 +306,7 @@ using MiscField = base::BitField; // LaneSizeField and AccessModeField are helper types to encode/decode a lane // size, an access mode, or both inside the overlapping MiscField. using LaneSizeField = base::BitField; -using AccessModeField = base::BitField; +using AccessModeField = base::BitField; // TODO(turbofan): {HasMemoryAccessMode} is currently only used to guard // decoding (in CodeGenerator and InstructionScheduler). Encoding (in // InstructionSelector) is not yet guarded. There are in fact instructions for diff --git a/deps/v8/src/compiler/backend/instruction.h b/deps/v8/src/compiler/backend/instruction.h index 89394b2c2427d3..66a6232c32a8d0 100644 --- a/deps/v8/src/compiler/backend/instruction.h +++ b/deps/v8/src/compiler/backend/instruction.h @@ -586,8 +586,8 @@ class LocationOperand : public InstructionOperand { } STATIC_ASSERT(KindField::kSize == 3); - using LocationKindField = base::BitField64; - using RepresentationField = base::BitField64; + using LocationKindField = base::BitField64; + using RepresentationField = LocationKindField::Next; using IndexField = base::BitField64; }; diff --git a/deps/v8/src/maglev/maglev-ir.h b/deps/v8/src/maglev/maglev-ir.h index 1f7c5471de025d..9ff1a3085790c8 100644 --- a/deps/v8/src/maglev/maglev-ir.h +++ b/deps/v8/src/maglev/maglev-ir.h @@ -196,7 +196,7 @@ class OpProperties { } constexpr bool is_pure() const { - return (bitfield_ | kPureMask) == kPureValue; + return (bitfield_ & kPureMask) == kPureValue; } constexpr bool is_required_when_unused() const { return can_write() || non_memory_side_effects(); diff --git a/deps/v8/src/wasm/wasm-code-manager.h b/deps/v8/src/wasm/wasm-code-manager.h index 137c3074d503f0..c6e878a1db508e 100644 --- a/deps/v8/src/wasm/wasm-code-manager.h +++ b/deps/v8/src/wasm/wasm-code-manager.h @@ -474,7 +474,7 @@ class V8_EXPORT_PRIVATE WasmCode final { int trap_handler_index_ = -1; // Bits encoded in {flags_}: - using KindField = base::BitField8; + using KindField = base::BitField8; using ExecutionTierField = KindField::Next; using ForDebuggingField = ExecutionTierField::Next; From 6689a9848803fc47b0334a887e3764433d50b63d Mon Sep 17 00:00:00 2001 From: Kumar Rishav Date: Tue, 2 Apr 2024 22:01:55 +0000 Subject: [PATCH 03/41] http: remove closeIdleConnections function while calling server close Correcting the https://github.com/nodejs/node/pull/50194 backporting mistake. closeIdleConnections shouldnot be called while server.close in node v18. This behavior is for node v19 and above. Fixes: https://github.com/nodejs/node/issues/52330 Fixes: https://github.com/nodejs/node/issues/51677 PR-URL: https://github.com/nodejs/node/pull/52336 Refs: https://github.com/nodejs/node/pull/50194 Reviewed-By: Nitzan Uziely --- lib/_http_server.js | 1 - test/parallel/test-http-server-close-idle.js | 1 + test/parallel/test-https-server-close-idle.js | 1 + 3 files changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/_http_server.js b/lib/_http_server.js index 0dbf11ce079156..1f90b2237f6699 100644 --- a/lib/_http_server.js +++ b/lib/_http_server.js @@ -506,7 +506,6 @@ function setupConnectionsTracking() { } function httpServerPreClose(server) { - server.closeIdleConnections(); clearInterval(server[kConnectionsCheckingInterval]); } diff --git a/test/parallel/test-http-server-close-idle.js b/test/parallel/test-http-server-close-idle.js index 36e9752d36b528..361ccf990fabcc 100644 --- a/test/parallel/test-http-server-close-idle.js +++ b/test/parallel/test-http-server-close-idle.js @@ -42,6 +42,7 @@ server.listen(0, function() { assert(response.startsWith('HTTP/1.1 200 OK\r\nConnection: keep-alive')); assert.strictEqual(connections, 2); + server.closeIdleConnections(); server.close(common.mustCall()); // Check that only the idle connection got closed diff --git a/test/parallel/test-https-server-close-idle.js b/test/parallel/test-https-server-close-idle.js index 49b525dd05f117..7f093c47cd8609 100644 --- a/test/parallel/test-https-server-close-idle.js +++ b/test/parallel/test-https-server-close-idle.js @@ -52,6 +52,7 @@ server.listen(0, function() { assert(response.startsWith('HTTP/1.1 200 OK\r\nConnection: keep-alive')); assert.strictEqual(connections, 2); + server.closeIdleConnections(); server.close(common.mustCall()); // Check that only the idle connection got closed From 5186e453d9f5e3e711020ea0ece93b4be667f06e Mon Sep 17 00:00:00 2001 From: Luigi Pinca Date: Sat, 9 Sep 2023 14:44:31 +0200 Subject: [PATCH 04/41] test: deflake test-http-regr-gh-2928 Hard code the value of the host parameter to `common.localhostIPv4` in `server.listen()` and `net.connect()`. This 1. ensures that the client `socket._handle` is not reinitialized during connection due to the family autodetection algorithm, preventing `parser.consume()` from being called with an invalid `socket._handle` parameter. 2. works around an issue in the FreeBSD 12 machine where the stress test is run where some sockets get stuck after connection. PR-URL: https://github.com/nodejs/node/pull/49574 Backport-PR-URL: https://github.com/nodejs/node/pull/52384 Closes: https://github.com/nodejs/node/pull/49565 Fixes: https://github.com/nodejs/node/issues/49564 Reviewed-By: Yagiz Nizipli Reviewed-By: James M Snell --- test/sequential/test-http-regr-gh-2928.js | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/sequential/test-http-regr-gh-2928.js b/test/sequential/test-http-regr-gh-2928.js index 149aaeb6e6e420..25476e0453c53b 100644 --- a/test/sequential/test-http-regr-gh-2928.js +++ b/test/sequential/test-http-regr-gh-2928.js @@ -25,7 +25,7 @@ function execAndClose() { const parser = parsers.pop(); parser.initialize(HTTPParser.RESPONSE, {}); - const socket = net.connect(common.PORT); + const socket = net.connect(common.PORT, common.localhostIPv4); socket.on('error', (e) => { // If SmartOS and ECONNREFUSED, then retry. See // https://github.com/nodejs/node/issues/2663. @@ -57,7 +57,7 @@ const server = net.createServer(function(c) { c.end('HTTP/1.1 200 OK\r\n\r\n', function() { c.destroySoon(); }); -}).listen(common.PORT, execAndClose); +}).listen(common.PORT, common.localhostIPv4, execAndClose); process.on('exit', function() { assert.strictEqual(gotResponses, COUNT); From 5cec2efc31e42fb9f2581ff424ee7cae881f2cd0 Mon Sep 17 00:00:00 2001 From: Luigi Pinca Date: Fri, 20 Oct 2023 14:56:44 +0200 Subject: [PATCH 05/41] test: reduce the number of requests and parsers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The maximum number of parsers in the free list is set to 1000. However the test does not need to use this maximum. Reduce it to 50. Refs: https://github.com/nodejs/node/pull/50228#issuecomment-1768293624 PR-URL: https://github.com/nodejs/node/pull/50240 Backport-PR-URL: https://github.com/nodejs/node/pull/52384 Fixes: https://github.com/nodejs/node/issues/49564 Reviewed-By: Yagiz Nizipli Reviewed-By: Vinícius Lourenço Claro Cardoso --- test/sequential/test-http-regr-gh-2928.js | 2 ++ 1 file changed, 2 insertions(+) diff --git a/test/sequential/test-http-regr-gh-2928.js b/test/sequential/test-http-regr-gh-2928.js index 25476e0453c53b..f6a9e1603288a4 100644 --- a/test/sequential/test-http-regr-gh-2928.js +++ b/test/sequential/test-http-regr-gh-2928.js @@ -8,6 +8,8 @@ const httpCommon = require('_http_common'); const { HTTPParser } = require('_http_common'); const net = require('net'); +httpCommon.parsers.max = 50; + const COUNT = httpCommon.parsers.max + 1; const parsers = new Array(COUNT); From 5e93eae9729dfbf7ad188b92a176385e2971edc5 Mon Sep 17 00:00:00 2001 From: marco-ippolito Date: Thu, 28 Mar 2024 19:16:33 +0100 Subject: [PATCH 06/41] doc: add release key for marco-ippolito PR-URL: https://github.com/nodejs/node/pull/52257 Refs: https://github.com/nodejs/Release/issues/984 Reviewed-By: Antoine du Hamel Reviewed-By: Luigi Pinca Reviewed-By: Richard Lau Reviewed-By: Ruy Adorno Reviewed-By: Rafael Gonzaga Reviewed-By: Matteo Collina --- README.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/README.md b/README.md index 62432860a15041..438d970948e1eb 100644 --- a/README.md +++ b/README.md @@ -745,6 +745,8 @@ Primary GPG keys for Node.js Releasers (some Releasers sign with subkeys): `74F12602B6F1C4E913FAA37AD3A89613643B6201` * **Juan José Arboleda** <> `DD792F5973C6DE52C432CBDAC77ABFA00DDBF2B7` +* **Marco Ippolito** <> + `CC68F5A3106FF448322E48ED27F5E38D5B0A215F` * **Michaël Zasso** <> `8FCCA13FEF1D0C2E91008E09770F7A9A5AE15600` * **Myles Borins** <> @@ -766,6 +768,7 @@ gpg --keyserver hkps://keys.openpgp.org --recv-keys 4ED778F539E3634C779C87C6D706 gpg --keyserver hkps://keys.openpgp.org --recv-keys 141F07595B7B3FFE74309A937405533BE57C7D57 gpg --keyserver hkps://keys.openpgp.org --recv-keys 74F12602B6F1C4E913FAA37AD3A89613643B6201 gpg --keyserver hkps://keys.openpgp.org --recv-keys DD792F5973C6DE52C432CBDAC77ABFA00DDBF2B7 +gpg --keyserver hkps://keys.openpgp.org --recv-keys CC68F5A3106FF448322E48ED27F5E38D5B0A215F gpg --keyserver hkps://keys.openpgp.org --recv-keys 8FCCA13FEF1D0C2E91008E09770F7A9A5AE15600 gpg --keyserver hkps://keys.openpgp.org --recv-keys C4F0DFFF4E8C1A8236409D08E73BC641CC11F4C8 gpg --keyserver hkps://keys.openpgp.org --recv-keys 890C08DB8579162FEE0DF9DB8BEAB4DFCF555EF4 From e28316da54dabfbf569c7cc50b82eff1a1bea9e3 Mon Sep 17 00:00:00 2001 From: "Node.js GitHub Bot" Date: Thu, 2 Nov 2023 07:22:34 +0000 Subject: [PATCH 07/41] deps: update nghttp2 to 1.58.0 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit PR-URL: https://github.com/nodejs/node/pull/50441 Reviewed-By: Michaël Zasso Reviewed-By: Marco Ippolito Reviewed-By: James M Snell Reviewed-By: Luigi Pinca --- deps/nghttp2/lib/includes/nghttp2/nghttp2ver.h | 4 ++-- deps/nghttp2/lib/nghttp2_map.c | 2 ++ deps/nghttp2/lib/nghttp2_map.h | 2 ++ deps/nghttp2/lib/nghttp2_time.c | 14 +++++++------- 4 files changed, 13 insertions(+), 9 deletions(-) diff --git a/deps/nghttp2/lib/includes/nghttp2/nghttp2ver.h b/deps/nghttp2/lib/includes/nghttp2/nghttp2ver.h index f56954e7fded45..f38fe2b0478ff3 100644 --- a/deps/nghttp2/lib/includes/nghttp2/nghttp2ver.h +++ b/deps/nghttp2/lib/includes/nghttp2/nghttp2ver.h @@ -29,7 +29,7 @@ * @macro * Version number of the nghttp2 library release */ -#define NGHTTP2_VERSION "1.57.0" +#define NGHTTP2_VERSION "1.58.0" /** * @macro @@ -37,6 +37,6 @@ * release. This is a 24 bit number with 8 bits for major number, 8 bits * for minor and 8 bits for patch. Version 1.2.3 becomes 0x010203. */ -#define NGHTTP2_VERSION_NUM 0x013900 +#define NGHTTP2_VERSION_NUM 0x013a00 #endif /* NGHTTP2VER_H */ diff --git a/deps/nghttp2/lib/nghttp2_map.c b/deps/nghttp2/lib/nghttp2_map.c index 5f63fc2bb87e99..0aaaf29155cd02 100644 --- a/deps/nghttp2/lib/nghttp2_map.c +++ b/deps/nghttp2/lib/nghttp2_map.c @@ -126,6 +126,7 @@ static void map_bucket_set_data(nghttp2_map_bucket *bkt, uint32_t hash, bkt->data = data; } +#ifndef WIN32 void nghttp2_map_print_distance(nghttp2_map *map) { uint32_t i; size_t idx; @@ -145,6 +146,7 @@ void nghttp2_map_print_distance(nghttp2_map *map) { distance(map->tablelen, map->tablelenbits, bkt, idx)); } } +#endif /* !WIN32 */ static int insert(nghttp2_map_bucket *table, uint32_t tablelen, uint32_t tablelenbits, uint32_t hash, diff --git a/deps/nghttp2/lib/nghttp2_map.h b/deps/nghttp2/lib/nghttp2_map.h index d90245aab74c97..236d28296e31da 100644 --- a/deps/nghttp2/lib/nghttp2_map.h +++ b/deps/nghttp2/lib/nghttp2_map.h @@ -131,6 +131,8 @@ size_t nghttp2_map_size(nghttp2_map *map); int nghttp2_map_each(nghttp2_map *map, int (*func)(void *data, void *ptr), void *ptr); +#ifndef WIN32 void nghttp2_map_print_distance(nghttp2_map *map); +#endif /* !WIN32 */ #endif /* NGHTTP2_MAP_H */ diff --git a/deps/nghttp2/lib/nghttp2_time.c b/deps/nghttp2/lib/nghttp2_time.c index 2a5f1a6ff524df..897556fe2c1a51 100644 --- a/deps/nghttp2/lib/nghttp2_time.c +++ b/deps/nghttp2/lib/nghttp2_time.c @@ -32,7 +32,7 @@ # include #endif /* HAVE_SYSINFOAPI_H */ -#ifndef HAVE_GETTICKCOUNT64 +#if !defined(HAVE_GETTICKCOUNT64) || defined(__CYGWIN__) static uint64_t time_now_sec(void) { time_t t = time(NULL); @@ -42,9 +42,11 @@ static uint64_t time_now_sec(void) { return (uint64_t)t; } -#endif /* HAVE_GETTICKCOUNT64 */ +#endif /* !HAVE_GETTICKCOUNT64 || __CYGWIN__ */ -#ifdef HAVE_CLOCK_GETTIME +#if defined(HAVE_GETTICKCOUNT64) && !defined(__CYGWIN__) +uint64_t nghttp2_time_now_sec(void) { return GetTickCount64() / 1000; } +#elif defined(HAVE_CLOCK_GETTIME) uint64_t nghttp2_time_now_sec(void) { struct timespec tp; int rv = clock_gettime(CLOCK_MONOTONIC, &tp); @@ -55,8 +57,6 @@ uint64_t nghttp2_time_now_sec(void) { return (uint64_t)tp.tv_sec; } -#elif defined(HAVE_GETTICKCOUNT64) -uint64_t nghttp2_time_now_sec(void) { return GetTickCount64() / 1000; } -#else /* !HAVE_CLOCK_GETTIME && !HAVE_GETTICKCOUNT64 */ +#else /* (!HAVE_CLOCK_GETTIME || __CYGWIN__) && !HAVE_GETTICKCOUNT64 */ uint64_t nghttp2_time_now_sec(void) { return time_now_sec(); } -#endif /* !HAVE_CLOCK_GETTIME && !HAVE_GETTICKCOUNT64 */ +#endif /* (!HAVE_CLOCK_GETTIME || __CYGWIN__) && !HAVE_GETTICKCOUNT64 */ From 3c9dbbf4d4666b3aa860e7836435776bb19fa24c Mon Sep 17 00:00:00 2001 From: "Node.js GitHub Bot" Date: Thu, 1 Feb 2024 17:48:53 +0200 Subject: [PATCH 08/41] deps: update nghttp2 to 1.59.0 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit PR-URL: https://github.com/nodejs/node/pull/51581 Reviewed-By: Michaël Zasso Reviewed-By: Matteo Collina --- deps/nghttp2/lib/CMakeLists.txt | 2 +- deps/nghttp2/lib/Makefile.am | 4 +- deps/nghttp2/lib/Makefile.in | 22 +-- deps/nghttp2/lib/Makefile.msvc | 2 +- deps/nghttp2/lib/includes/nghttp2/nghttp2.h | 137 ++++++++++++++++-- .../nghttp2/lib/includes/nghttp2/nghttp2ver.h | 4 +- .../lib/{nghttp2_npn.c => nghttp2_alpn.c} | 29 +++- .../lib/{nghttp2_npn.h => nghttp2_alpn.h} | 6 +- deps/nghttp2/lib/nghttp2_extpri.c | 6 + deps/nghttp2/lib/nghttp2_session.c | 29 +++- deps/nghttp2/lib/nghttp2_time.c | 19 +-- deps/nghttp2/nghttp2.gyp | 2 +- 12 files changed, 209 insertions(+), 53 deletions(-) rename deps/nghttp2/lib/{nghttp2_npn.c => nghttp2_alpn.c} (65%) rename deps/nghttp2/lib/{nghttp2_npn.h => nghttp2_alpn.h} (94%) diff --git a/deps/nghttp2/lib/CMakeLists.txt b/deps/nghttp2/lib/CMakeLists.txt index 7adba3a3ffa2da..4180748f209ff0 100644 --- a/deps/nghttp2/lib/CMakeLists.txt +++ b/deps/nghttp2/lib/CMakeLists.txt @@ -14,7 +14,7 @@ set(NGHTTP2_SOURCES nghttp2_stream.c nghttp2_outbound_item.c nghttp2_session.c nghttp2_submit.c nghttp2_helper.c - nghttp2_npn.c + nghttp2_alpn.c nghttp2_hd.c nghttp2_hd_huffman.c nghttp2_hd_huffman_data.c nghttp2_version.c nghttp2_priority_spec.c diff --git a/deps/nghttp2/lib/Makefile.am b/deps/nghttp2/lib/Makefile.am index c3ace4029a69b8..1168c1e6135661 100644 --- a/deps/nghttp2/lib/Makefile.am +++ b/deps/nghttp2/lib/Makefile.am @@ -41,7 +41,7 @@ OBJECTS = nghttp2_pq.c nghttp2_map.c nghttp2_queue.c \ nghttp2_stream.c nghttp2_outbound_item.c \ nghttp2_session.c nghttp2_submit.c \ nghttp2_helper.c \ - nghttp2_npn.c \ + nghttp2_alpn.c \ nghttp2_hd.c nghttp2_hd_huffman.c nghttp2_hd_huffman_data.c \ nghttp2_version.c \ nghttp2_priority_spec.c \ @@ -60,7 +60,7 @@ HFILES = nghttp2_pq.h nghttp2_int.h nghttp2_map.h nghttp2_queue.h \ nghttp2_frame.h \ nghttp2_buf.h \ nghttp2_session.h nghttp2_helper.h nghttp2_stream.h nghttp2_int.h \ - nghttp2_npn.h \ + nghttp2_alpn.h \ nghttp2_submit.h nghttp2_outbound_item.h \ nghttp2_net.h \ nghttp2_hd.h nghttp2_hd_huffman.h \ diff --git a/deps/nghttp2/lib/Makefile.in b/deps/nghttp2/lib/Makefile.in index 0b95613bc21808..81d285390957c5 100644 --- a/deps/nghttp2/lib/Makefile.in +++ b/deps/nghttp2/lib/Makefile.in @@ -153,7 +153,7 @@ am__objects_1 = am__objects_2 = nghttp2_pq.lo nghttp2_map.lo nghttp2_queue.lo \ nghttp2_frame.lo nghttp2_buf.lo nghttp2_stream.lo \ nghttp2_outbound_item.lo nghttp2_session.lo nghttp2_submit.lo \ - nghttp2_helper.lo nghttp2_npn.lo nghttp2_hd.lo \ + nghttp2_helper.lo nghttp2_alpn.lo nghttp2_hd.lo \ nghttp2_hd_huffman.lo nghttp2_hd_huffman_data.lo \ nghttp2_version.lo nghttp2_priority_spec.lo nghttp2_option.lo \ nghttp2_callbacks.lo nghttp2_mem.lo nghttp2_http.lo \ @@ -183,15 +183,15 @@ am__v_at_1 = DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/depcomp am__maybe_remake_depfiles = depfiles -am__depfiles_remade = ./$(DEPDIR)/nghttp2_buf.Plo \ - ./$(DEPDIR)/nghttp2_callbacks.Plo \ +am__depfiles_remade = ./$(DEPDIR)/nghttp2_alpn.Plo \ + ./$(DEPDIR)/nghttp2_buf.Plo ./$(DEPDIR)/nghttp2_callbacks.Plo \ ./$(DEPDIR)/nghttp2_debug.Plo ./$(DEPDIR)/nghttp2_extpri.Plo \ ./$(DEPDIR)/nghttp2_frame.Plo ./$(DEPDIR)/nghttp2_hd.Plo \ ./$(DEPDIR)/nghttp2_hd_huffman.Plo \ ./$(DEPDIR)/nghttp2_hd_huffman_data.Plo \ ./$(DEPDIR)/nghttp2_helper.Plo ./$(DEPDIR)/nghttp2_http.Plo \ ./$(DEPDIR)/nghttp2_map.Plo ./$(DEPDIR)/nghttp2_mem.Plo \ - ./$(DEPDIR)/nghttp2_npn.Plo ./$(DEPDIR)/nghttp2_option.Plo \ + ./$(DEPDIR)/nghttp2_option.Plo \ ./$(DEPDIR)/nghttp2_outbound_item.Plo \ ./$(DEPDIR)/nghttp2_pq.Plo \ ./$(DEPDIR)/nghttp2_priority_spec.Plo \ @@ -502,7 +502,7 @@ OBJECTS = nghttp2_pq.c nghttp2_map.c nghttp2_queue.c \ nghttp2_stream.c nghttp2_outbound_item.c \ nghttp2_session.c nghttp2_submit.c \ nghttp2_helper.c \ - nghttp2_npn.c \ + nghttp2_alpn.c \ nghttp2_hd.c nghttp2_hd_huffman.c nghttp2_hd_huffman_data.c \ nghttp2_version.c \ nghttp2_priority_spec.c \ @@ -521,7 +521,7 @@ HFILES = nghttp2_pq.h nghttp2_int.h nghttp2_map.h nghttp2_queue.h \ nghttp2_frame.h \ nghttp2_buf.h \ nghttp2_session.h nghttp2_helper.h nghttp2_stream.h nghttp2_int.h \ - nghttp2_npn.h \ + nghttp2_alpn.h \ nghttp2_submit.h nghttp2_outbound_item.h \ nghttp2_net.h \ nghttp2_hd.h nghttp2_hd_huffman.h \ @@ -621,6 +621,7 @@ mostlyclean-compile: distclean-compile: -rm -f *.tab.c +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/nghttp2_alpn.Plo@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/nghttp2_buf.Plo@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/nghttp2_callbacks.Plo@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/nghttp2_debug.Plo@am__quote@ # am--include-marker @@ -633,7 +634,6 @@ distclean-compile: @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/nghttp2_http.Plo@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/nghttp2_map.Plo@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/nghttp2_mem.Plo@am__quote@ # am--include-marker -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/nghttp2_npn.Plo@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/nghttp2_option.Plo@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/nghttp2_outbound_item.Plo@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/nghttp2_pq.Plo@am__quote@ # am--include-marker @@ -906,7 +906,8 @@ clean-am: clean-generic clean-libLTLIBRARIES clean-libtool \ mostlyclean-am distclean: distclean-recursive - -rm -f ./$(DEPDIR)/nghttp2_buf.Plo + -rm -f ./$(DEPDIR)/nghttp2_alpn.Plo + -rm -f ./$(DEPDIR)/nghttp2_buf.Plo -rm -f ./$(DEPDIR)/nghttp2_callbacks.Plo -rm -f ./$(DEPDIR)/nghttp2_debug.Plo -rm -f ./$(DEPDIR)/nghttp2_extpri.Plo @@ -918,7 +919,6 @@ distclean: distclean-recursive -rm -f ./$(DEPDIR)/nghttp2_http.Plo -rm -f ./$(DEPDIR)/nghttp2_map.Plo -rm -f ./$(DEPDIR)/nghttp2_mem.Plo - -rm -f ./$(DEPDIR)/nghttp2_npn.Plo -rm -f ./$(DEPDIR)/nghttp2_option.Plo -rm -f ./$(DEPDIR)/nghttp2_outbound_item.Plo -rm -f ./$(DEPDIR)/nghttp2_pq.Plo @@ -977,7 +977,8 @@ install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-recursive - -rm -f ./$(DEPDIR)/nghttp2_buf.Plo + -rm -f ./$(DEPDIR)/nghttp2_alpn.Plo + -rm -f ./$(DEPDIR)/nghttp2_buf.Plo -rm -f ./$(DEPDIR)/nghttp2_callbacks.Plo -rm -f ./$(DEPDIR)/nghttp2_debug.Plo -rm -f ./$(DEPDIR)/nghttp2_extpri.Plo @@ -989,7 +990,6 @@ maintainer-clean: maintainer-clean-recursive -rm -f ./$(DEPDIR)/nghttp2_http.Plo -rm -f ./$(DEPDIR)/nghttp2_map.Plo -rm -f ./$(DEPDIR)/nghttp2_mem.Plo - -rm -f ./$(DEPDIR)/nghttp2_npn.Plo -rm -f ./$(DEPDIR)/nghttp2_option.Plo -rm -f ./$(DEPDIR)/nghttp2_outbound_item.Plo -rm -f ./$(DEPDIR)/nghttp2_pq.Plo diff --git a/deps/nghttp2/lib/Makefile.msvc b/deps/nghttp2/lib/Makefile.msvc index 611b39d0b1d95e..752389e0fc485a 100644 --- a/deps/nghttp2/lib/Makefile.msvc +++ b/deps/nghttp2/lib/Makefile.msvc @@ -74,7 +74,7 @@ NGHTTP2_SRC := nghttp2_pq.c \ nghttp2_session.c \ nghttp2_submit.c \ nghttp2_helper.c \ - nghttp2_npn.c \ + nghttp2_alpn.c \ nghttp2_hd.c \ nghttp2_hd_huffman.c \ nghttp2_hd_huffman_data.c \ diff --git a/deps/nghttp2/lib/includes/nghttp2/nghttp2.h b/deps/nghttp2/lib/includes/nghttp2/nghttp2.h index fa22081c517497..7910db230aae3e 100644 --- a/deps/nghttp2/lib/includes/nghttp2/nghttp2.h +++ b/deps/nghttp2/lib/includes/nghttp2/nghttp2.h @@ -1997,7 +1997,7 @@ typedef int (*nghttp2_on_extension_chunk_recv_callback)( * ``NULL``. The |*payload| is available as ``frame->ext.payload`` in * :type:`nghttp2_on_frame_recv_callback`. Therefore if application * can free that memory inside :type:`nghttp2_on_frame_recv_callback` - * callback. Of course, application has a liberty not ot use + * callback. Of course, application has a liberty not to use * |*payload|, and do its own mechanism to process extension frames. * * To abort processing this extension frame, return @@ -4958,6 +4958,55 @@ NGHTTP2_EXTERN int nghttp2_session_change_extpri_stream_priority( nghttp2_session *session, int32_t stream_id, const nghttp2_extpri *extpri, int ignore_client_signal); +/** + * @function + * + * Stores the stream priority of the existing stream denoted by + * |stream_id| in the object pointed by |extpri|. This function is + * meant to be used by server for :rfc:`9218` extensible + * prioritization scheme. + * + * If |session| is initialized as client, this function returns + * :enum:`nghttp2_error.NGHTTP2_ERR_INVALID_STATE`. + * + * If + * :enum:`nghttp2_settings_id.NGHTTP2_SETTINGS_NO_RFC7540_PRIORITIES` + * of value of 1 is not submitted via `nghttp2_submit_settings()`, + * this function does nothing and returns 0. + * + * This function returns 0 if it succeeds, or one of the following + * negative error codes: + * + * :enum:`nghttp2_error.NGHTTP2_ERR_INVALID_STATE` + * The |session| is initialized as client. + * :enum:`nghttp2_error.NGHTTP2_ERR_INVALID_ARGUMENT` + * |stream_id| is zero; or a stream denoted by |stream_id| is not + * found. + */ +NGHTTP2_EXTERN int nghttp2_session_get_extpri_stream_priority( + nghttp2_session *session, nghttp2_extpri *extpri, int32_t stream_id); + +/** + * @function + * + * Parses Priority header field value pointed by |value| of length + * |len|, and stores the result in the object pointed by |extpri|. + * Priority header field is defined in :rfc:`9218`. + * + * This function does not initialize the object pointed by |extpri| + * before storing the result. It only assigns the values that the + * parser correctly extracted to fields. + * + * This function returns 0 if it succeeds, or one of the following + * negative error codes: + * + * :enum:`nghttp2_error.NGHTTP2_ERR_INVALID_ARGUMENT` + * Failed to parse the header field value. + */ +NGHTTP2_EXTERN int nghttp2_extpri_parse_priority(nghttp2_extpri *extpri, + const uint8_t *value, + size_t len); + /** * @function * @@ -4973,11 +5022,14 @@ NGHTTP2_EXTERN int nghttp2_nv_compare_name(const nghttp2_nv *lhs, /** * @function * - * A helper function for dealing with NPN in client side or ALPN in - * server side. The |in| contains peer's protocol list in preferable - * order. The format of |in| is length-prefixed and not - * null-terminated. For example, ``h2`` and - * ``http/1.1`` stored in |in| like this:: + * .. warning:: + * + * Deprecated. Use `nghttp2_select_alpn` instead. + * + * A helper function for dealing with ALPN in server side. The |in| + * contains peer's protocol list in preferable order. The format of + * |in| is length-prefixed and not null-terminated. For example, + * ``h2`` and ``http/1.1`` stored in |in| like this:: * * in[0] = 2 * in[1..2] = "h2" @@ -5002,20 +5054,18 @@ NGHTTP2_EXTERN int nghttp2_nv_compare_name(const nghttp2_nv *lhs, * * For ALPN, refer to https://tools.ietf.org/html/rfc7301 * - * See http://technotes.googlecode.com/git/nextprotoneg.html for more - * details about NPN. - * - * For NPN, to use this method you should do something like:: + * To use this method you should do something like:: * - * static int select_next_proto_cb(SSL* ssl, - * unsigned char **out, + * static int alpn_select_proto_cb(SSL* ssl, + * const unsigned char **out, * unsigned char *outlen, * const unsigned char *in, * unsigned int inlen, * void *arg) * { * int rv; - * rv = nghttp2_select_next_protocol(out, outlen, in, inlen); + * rv = nghttp2_select_next_protocol((unsigned char**)out, outlen, + * in, inlen); * if (rv == -1) { * return SSL_TLSEXT_ERR_NOACK; * } @@ -5025,7 +5075,7 @@ NGHTTP2_EXTERN int nghttp2_nv_compare_name(const nghttp2_nv *lhs, * return SSL_TLSEXT_ERR_OK; * } * ... - * SSL_CTX_set_next_proto_select_cb(ssl_ctx, select_next_proto_cb, my_obj); + * SSL_CTX_set_alpn_select_cb(ssl_ctx, alpn_select_proto_cb, my_obj); * */ NGHTTP2_EXTERN int nghttp2_select_next_protocol(unsigned char **out, @@ -5033,6 +5083,65 @@ NGHTTP2_EXTERN int nghttp2_select_next_protocol(unsigned char **out, const unsigned char *in, unsigned int inlen); +/** + * @function + * + * A helper function for dealing with ALPN in server side. The |in| + * contains peer's protocol list in preferable order. The format of + * |in| is length-prefixed and not null-terminated. For example, + * ``h2`` and ``http/1.1`` stored in |in| like this:: + * + * in[0] = 2 + * in[1..2] = "h2" + * in[3] = 8 + * in[4..11] = "http/1.1" + * inlen = 12 + * + * The selection algorithm is as follows: + * + * 1. If peer's list contains HTTP/2 protocol the library supports, + * it is selected and returns 1. The following step is not taken. + * + * 2. If peer's list contains ``http/1.1``, this function selects + * ``http/1.1`` and returns 0. The following step is not taken. + * + * 3. This function selects nothing and returns -1 (So called + * non-overlap case). In this case, |out| and |outlen| are left + * untouched. + * + * Selecting ``h2`` means that ``h2`` is written into |*out| and its + * length (which is 2) is assigned to |*outlen|. + * + * For ALPN, refer to https://tools.ietf.org/html/rfc7301 + * + * To use this method you should do something like:: + * + * static int alpn_select_proto_cb(SSL* ssl, + * const unsigned char **out, + * unsigned char *outlen, + * const unsigned char *in, + * unsigned int inlen, + * void *arg) + * { + * int rv; + * rv = nghttp2_select_alpn(out, outlen, in, inlen); + * if (rv == -1) { + * return SSL_TLSEXT_ERR_NOACK; + * } + * if (rv == 1) { + * ((MyType*)arg)->http2_selected = 1; + * } + * return SSL_TLSEXT_ERR_OK; + * } + * ... + * SSL_CTX_set_alpn_select_cb(ssl_ctx, alpn_select_proto_cb, my_obj); + * + */ +NGHTTP2_EXTERN int nghttp2_select_alpn(const unsigned char **out, + unsigned char *outlen, + const unsigned char *in, + unsigned int inlen); + /** * @function * diff --git a/deps/nghttp2/lib/includes/nghttp2/nghttp2ver.h b/deps/nghttp2/lib/includes/nghttp2/nghttp2ver.h index f38fe2b0478ff3..518755bbab7914 100644 --- a/deps/nghttp2/lib/includes/nghttp2/nghttp2ver.h +++ b/deps/nghttp2/lib/includes/nghttp2/nghttp2ver.h @@ -29,7 +29,7 @@ * @macro * Version number of the nghttp2 library release */ -#define NGHTTP2_VERSION "1.58.0" +#define NGHTTP2_VERSION "1.59.0" /** * @macro @@ -37,6 +37,6 @@ * release. This is a 24 bit number with 8 bits for major number, 8 bits * for minor and 8 bits for patch. Version 1.2.3 becomes 0x010203. */ -#define NGHTTP2_VERSION_NUM 0x013a00 +#define NGHTTP2_VERSION_NUM 0x013b00 #endif /* NGHTTP2VER_H */ diff --git a/deps/nghttp2/lib/nghttp2_npn.c b/deps/nghttp2/lib/nghttp2_alpn.c similarity index 65% rename from deps/nghttp2/lib/nghttp2_npn.c rename to deps/nghttp2/lib/nghttp2_alpn.c index d1384c80758d9b..33c5885f8d889f 100644 --- a/deps/nghttp2/lib/nghttp2_npn.c +++ b/deps/nghttp2/lib/nghttp2_alpn.c @@ -22,13 +22,13 @@ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ -#include "nghttp2_npn.h" +#include "nghttp2_alpn.h" #include -static int select_next_protocol(unsigned char **out, unsigned char *outlen, - const unsigned char *in, unsigned int inlen, - const char *key, unsigned int keylen) { +static int select_alpn(const unsigned char **out, unsigned char *outlen, + const unsigned char *in, unsigned int inlen, + const char *key, unsigned int keylen) { unsigned int i; for (i = 0; i + keylen <= inlen; i += (unsigned int)(in[i] + 1)) { if (memcmp(&in[i], key, keylen) == 0) { @@ -45,12 +45,25 @@ static int select_next_protocol(unsigned char **out, unsigned char *outlen, int nghttp2_select_next_protocol(unsigned char **out, unsigned char *outlen, const unsigned char *in, unsigned int inlen) { - if (select_next_protocol(out, outlen, in, inlen, NGHTTP2_PROTO_ALPN, - NGHTTP2_PROTO_ALPN_LEN) == 0) { + if (select_alpn((const unsigned char **)out, outlen, in, inlen, + NGHTTP2_PROTO_ALPN, NGHTTP2_PROTO_ALPN_LEN) == 0) { return 1; } - if (select_next_protocol(out, outlen, in, inlen, NGHTTP2_HTTP_1_1_ALPN, - NGHTTP2_HTTP_1_1_ALPN_LEN) == 0) { + if (select_alpn((const unsigned char **)out, outlen, in, inlen, + NGHTTP2_HTTP_1_1_ALPN, NGHTTP2_HTTP_1_1_ALPN_LEN) == 0) { + return 0; + } + return -1; +} + +int nghttp2_select_alpn(const unsigned char **out, unsigned char *outlen, + const unsigned char *in, unsigned int inlen) { + if (select_alpn(out, outlen, in, inlen, NGHTTP2_PROTO_ALPN, + NGHTTP2_PROTO_ALPN_LEN) == 0) { + return 1; + } + if (select_alpn(out, outlen, in, inlen, NGHTTP2_HTTP_1_1_ALPN, + NGHTTP2_HTTP_1_1_ALPN_LEN) == 0) { return 0; } return -1; diff --git a/deps/nghttp2/lib/nghttp2_npn.h b/deps/nghttp2/lib/nghttp2_alpn.h similarity index 94% rename from deps/nghttp2/lib/nghttp2_npn.h rename to deps/nghttp2/lib/nghttp2_alpn.h index c6f1c04b683594..09810fd821490a 100644 --- a/deps/nghttp2/lib/nghttp2_npn.h +++ b/deps/nghttp2/lib/nghttp2_alpn.h @@ -22,8 +22,8 @@ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ -#ifndef NGHTTP2_NPN_H -#define NGHTTP2_NPN_H +#ifndef NGHTTP2_ALPN_H +#define NGHTTP2_ALPN_H #ifdef HAVE_CONFIG_H # include @@ -31,4 +31,4 @@ #include -#endif /* NGHTTP2_NPN_H */ +#endif /* NGHTTP2_ALPN_H */ diff --git a/deps/nghttp2/lib/nghttp2_extpri.c b/deps/nghttp2/lib/nghttp2_extpri.c index 3fd9b78163e323..ba0263e7c8eb28 100644 --- a/deps/nghttp2/lib/nghttp2_extpri.c +++ b/deps/nghttp2/lib/nghttp2_extpri.c @@ -24,6 +24,7 @@ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include "nghttp2_extpri.h" +#include "nghttp2_http.h" uint8_t nghttp2_extpri_to_uint8(const nghttp2_extpri *extpri) { return (uint8_t)((uint32_t)extpri->inc << 7 | extpri->urgency); @@ -33,3 +34,8 @@ void nghttp2_extpri_from_uint8(nghttp2_extpri *extpri, uint8_t u8extpri) { extpri->urgency = nghttp2_extpri_uint8_urgency(u8extpri); extpri->inc = nghttp2_extpri_uint8_inc(u8extpri); } + +int nghttp2_extpri_parse_priority(nghttp2_extpri *extpri, const uint8_t *value, + size_t len) { + return nghttp2_http_parse_priority(extpri, value, len); +} diff --git a/deps/nghttp2/lib/nghttp2_session.c b/deps/nghttp2/lib/nghttp2_session.c index ec5024d0f2168e..ce21caf9d7bb9d 100644 --- a/deps/nghttp2/lib/nghttp2_session.c +++ b/deps/nghttp2/lib/nghttp2_session.c @@ -3302,7 +3302,7 @@ static ssize_t nghttp2_session_mem_send_internal(nghttp2_session *session, } if (rv == NGHTTP2_ERR_HEADER_COMP) { - /* If header compression error occurred, should terminiate + /* If header compression error occurred, should terminate connection. */ rv = nghttp2_session_terminate_session(session, NGHTTP2_INTERNAL_ERROR); @@ -8366,3 +8366,30 @@ int nghttp2_session_change_extpri_stream_priority( return session_update_stream_priority(session, stream, nghttp2_extpri_to_uint8(&extpri)); } + +int nghttp2_session_get_extpri_stream_priority(nghttp2_session *session, + nghttp2_extpri *extpri, + int32_t stream_id) { + nghttp2_stream *stream; + + if (!session->server) { + return NGHTTP2_ERR_INVALID_STATE; + } + + if (session->pending_no_rfc7540_priorities != 1) { + return 0; + } + + if (stream_id == 0) { + return NGHTTP2_ERR_INVALID_ARGUMENT; + } + + stream = nghttp2_session_get_stream_raw(session, stream_id); + if (!stream) { + return NGHTTP2_ERR_INVALID_ARGUMENT; + } + + nghttp2_extpri_from_uint8(extpri, stream->extpri); + + return 0; +} diff --git a/deps/nghttp2/lib/nghttp2_time.c b/deps/nghttp2/lib/nghttp2_time.c index 897556fe2c1a51..947b5449e5ac6d 100644 --- a/deps/nghttp2/lib/nghttp2_time.c +++ b/deps/nghttp2/lib/nghttp2_time.c @@ -24,13 +24,11 @@ */ #include "nghttp2_time.h" -#ifdef HAVE_TIME_H -# include -#endif /* HAVE_TIME_H */ +#ifdef HAVE_WINDOWS_H +# include +#endif /* HAVE_WINDOWS_H */ -#ifdef HAVE_SYSINFOAPI_H -# include -#endif /* HAVE_SYSINFOAPI_H */ +#include #if !defined(HAVE_GETTICKCOUNT64) || defined(__CYGWIN__) static uint64_t time_now_sec(void) { @@ -46,7 +44,8 @@ static uint64_t time_now_sec(void) { #if defined(HAVE_GETTICKCOUNT64) && !defined(__CYGWIN__) uint64_t nghttp2_time_now_sec(void) { return GetTickCount64() / 1000; } -#elif defined(HAVE_CLOCK_GETTIME) +#elif defined(HAVE_CLOCK_GETTIME) && defined(HAVE_DECL_CLOCK_MONOTONIC) && \ + HAVE_DECL_CLOCK_MONOTONIC uint64_t nghttp2_time_now_sec(void) { struct timespec tp; int rv = clock_gettime(CLOCK_MONOTONIC, &tp); @@ -57,6 +56,8 @@ uint64_t nghttp2_time_now_sec(void) { return (uint64_t)tp.tv_sec; } -#else /* (!HAVE_CLOCK_GETTIME || __CYGWIN__) && !HAVE_GETTICKCOUNT64 */ +#else /* (!HAVE_CLOCK_GETTIME || !HAVE_DECL_CLOCK_MONOTONIC) && \ + (!HAVE_GETTICKCOUNT64 || __CYGWIN__)) */ uint64_t nghttp2_time_now_sec(void) { return time_now_sec(); } -#endif /* (!HAVE_CLOCK_GETTIME || __CYGWIN__) && !HAVE_GETTICKCOUNT64 */ +#endif /* (!HAVE_CLOCK_GETTIME || !HAVE_DECL_CLOCK_MONOTONIC) && \ + (!HAVE_GETTICKCOUNT64 || __CYGWIN__)) */ diff --git a/deps/nghttp2/nghttp2.gyp b/deps/nghttp2/nghttp2.gyp index 87909c5f296a2c..2601342f9e6f75 100644 --- a/deps/nghttp2/nghttp2.gyp +++ b/deps/nghttp2/nghttp2.gyp @@ -13,7 +13,7 @@ 'lib/nghttp2_http.c', 'lib/nghttp2_map.c', 'lib/nghttp2_mem.c', - 'lib/nghttp2_npn.c', + 'lib/nghttp2_alpn.c', 'lib/nghttp2_option.c', 'lib/nghttp2_outbound_item.c', 'lib/nghttp2_pq.c', From 1b6fa706205c6abcc89dd9b091ccb954b17d5e7d Mon Sep 17 00:00:00 2001 From: "Node.js GitHub Bot" Date: Tue, 5 Mar 2024 02:45:17 +0200 Subject: [PATCH 09/41] deps: update nghttp2 to 1.60.0 PR-URL: https://github.com/nodejs/node/pull/51948 Reviewed-By: Luigi Pinca Reviewed-By: Marco Ippolito Reviewed-By: Rafael Gonzaga --- deps/nghttp2/lib/CMakeLists.txt | 69 +- deps/nghttp2/lib/Makefile.in | 6 +- deps/nghttp2/lib/includes/Makefile.in | 6 +- deps/nghttp2/lib/includes/nghttp2/nghttp2.h | 1246 +++++++++++++++-- .../nghttp2/lib/includes/nghttp2/nghttp2ver.h | 4 +- deps/nghttp2/lib/nghttp2_buf.c | 4 +- deps/nghttp2/lib/nghttp2_buf.h | 2 +- deps/nghttp2/lib/nghttp2_callbacks.c | 28 + deps/nghttp2/lib/nghttp2_callbacks.h | 45 +- deps/nghttp2/lib/nghttp2_hd.c | 114 +- deps/nghttp2/lib/nghttp2_hd.h | 20 +- deps/nghttp2/lib/nghttp2_hd_huffman.c | 8 +- deps/nghttp2/lib/nghttp2_outbound_item.c | 26 + deps/nghttp2/lib/nghttp2_outbound_item.h | 27 +- deps/nghttp2/lib/nghttp2_session.c | 332 +++-- deps/nghttp2/lib/nghttp2_submit.c | 125 +- deps/nghttp2/lib/nghttp2_submit.h | 6 + 17 files changed, 1706 insertions(+), 362 deletions(-) diff --git a/deps/nghttp2/lib/CMakeLists.txt b/deps/nghttp2/lib/CMakeLists.txt index 4180748f209ff0..211c8e4340da12 100644 --- a/deps/nghttp2/lib/CMakeLists.txt +++ b/deps/nghttp2/lib/CMakeLists.txt @@ -31,6 +31,8 @@ set(NGHTTP2_SOURCES ) set(NGHTTP2_RES "") +set(STATIC_LIB "nghttp2_static") +set(SHARED_LIB "nghttp2") if(WIN32) configure_file( @@ -41,40 +43,61 @@ if(WIN32) set(NGHTTP2_RES ${CMAKE_CURRENT_BINARY_DIR}/version.rc) endif() +set(EXPORT_SET "${PROJECT_NAME}-targets") + # Public shared library -if(ENABLE_SHARED_LIB) - add_library(nghttp2 SHARED ${NGHTTP2_SOURCES} ${NGHTTP2_RES}) - set_target_properties(nghttp2 PROPERTIES +if(BUILD_SHARED_LIBS) + add_library(${SHARED_LIB} SHARED ${NGHTTP2_SOURCES} ${NGHTTP2_RES}) + + set_target_properties(${SHARED_LIB} PROPERTIES COMPILE_FLAGS "${WARNCFLAGS}" VERSION ${LT_VERSION} SOVERSION ${LT_SOVERSION} C_VISIBILITY_PRESET hidden ) - target_include_directories(nghttp2 INTERFACE - "${CMAKE_CURRENT_BINARY_DIR}/includes" - "${CMAKE_CURRENT_SOURCE_DIR}/includes" + + target_include_directories(${SHARED_LIB} INTERFACE + $ + $ + $ ) - install(TARGETS nghttp2 - ARCHIVE DESTINATION "${CMAKE_INSTALL_LIBDIR}" - LIBRARY DESTINATION "${CMAKE_INSTALL_LIBDIR}" - RUNTIME DESTINATION "${CMAKE_INSTALL_BINDIR}") + install(TARGETS ${SHARED_LIB} EXPORT ${EXPORT_SET}) + list(APPEND nghttp2_exports ${SHARED_LIB}) endif() -if(HAVE_CUNIT OR ENABLE_STATIC_LIB) - # Static library (for unittests because of symbol visibility) - add_library(nghttp2_static STATIC ${NGHTTP2_SOURCES}) - set_target_properties(nghttp2_static PROPERTIES - COMPILE_FLAGS "${WARNCFLAGS}" - VERSION ${LT_VERSION} SOVERSION ${LT_SOVERSION} - ARCHIVE_OUTPUT_NAME nghttp2${STATIC_LIB_SUFFIX} - ) - target_compile_definitions(nghttp2_static PUBLIC "-DNGHTTP2_STATICLIB") - if(ENABLE_STATIC_LIB) - install(TARGETS nghttp2_static - DESTINATION "${CMAKE_INSTALL_LIBDIR}") - endif() +# Static library (for unittests because of symbol visibility) +add_library(${STATIC_LIB} STATIC ${NGHTTP2_SOURCES}) + +set_target_properties(${STATIC_LIB} PROPERTIES + COMPILE_FLAGS "${WARNCFLAGS}" + VERSION ${LT_VERSION} SOVERSION ${LT_SOVERSION} + ARCHIVE_OUTPUT_NAME nghttp2${STATIC_LIB_SUFFIX} +) + +target_include_directories(${STATIC_LIB} INTERFACE + $ + $ + $ +) + +target_compile_definitions(${STATIC_LIB} PUBLIC "-DNGHTTP2_STATICLIB") + +if(BUILD_STATIC_LIBS) + install(TARGETS ${STATIC_LIB} EXPORT ${EXPORT_SET}) + list(APPEND nghttp2_exports ${STATIC_LIB}) endif() +if(BUILD_SHARED_LIBS) + set(LIB_SELECTED ${SHARED_LIB}) +else() + set(LIB_SELECTED ${STATIC_LIB}) +endif() + +add_library(${PROJECT_NAME}::nghttp2 ALIAS ${LIB_SELECTED}) install(FILES "${CMAKE_CURRENT_BINARY_DIR}/libnghttp2.pc" DESTINATION "${CMAKE_INSTALL_LIBDIR}/pkgconfig") + +install(EXPORT ${EXPORT_SET} + DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/${PROJECT_NAME} + NAMESPACE ${PROJECT_NAME}::) diff --git a/deps/nghttp2/lib/Makefile.in b/deps/nghttp2/lib/Makefile.in index 81d285390957c5..53ca2403de3552 100644 --- a/deps/nghttp2/lib/Makefile.in +++ b/deps/nghttp2/lib/Makefile.in @@ -306,8 +306,6 @@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CSCOPE = @CSCOPE@ CTAGS = @CTAGS@ -CUNIT_CFLAGS = @CUNIT_CFLAGS@ -CUNIT_LIBS = @CUNIT_LIBS@ CXX = @CXX@ CXX1XCXXFLAGS = @CXX1XCXXFLAGS@ CXXCPP = @CXXCPP@ @@ -345,6 +343,10 @@ LD = @LD@ LDFLAGS = @LDFLAGS@ LIBBPF_CFLAGS = @LIBBPF_CFLAGS@ LIBBPF_LIBS = @LIBBPF_LIBS@ +LIBBROTLIDEC_CFLAGS = @LIBBROTLIDEC_CFLAGS@ +LIBBROTLIDEC_LIBS = @LIBBROTLIDEC_LIBS@ +LIBBROTLIENC_CFLAGS = @LIBBROTLIENC_CFLAGS@ +LIBBROTLIENC_LIBS = @LIBBROTLIENC_LIBS@ LIBCARES_CFLAGS = @LIBCARES_CFLAGS@ LIBCARES_LIBS = @LIBCARES_LIBS@ LIBEVENT_OPENSSL_CFLAGS = @LIBEVENT_OPENSSL_CFLAGS@ diff --git a/deps/nghttp2/lib/includes/Makefile.in b/deps/nghttp2/lib/includes/Makefile.in index 3de90d7bef3e3a..eaab6b209030c5 100644 --- a/deps/nghttp2/lib/includes/Makefile.in +++ b/deps/nghttp2/lib/includes/Makefile.in @@ -211,8 +211,6 @@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CSCOPE = @CSCOPE@ CTAGS = @CTAGS@ -CUNIT_CFLAGS = @CUNIT_CFLAGS@ -CUNIT_LIBS = @CUNIT_LIBS@ CXX = @CXX@ CXX1XCXXFLAGS = @CXX1XCXXFLAGS@ CXXCPP = @CXXCPP@ @@ -250,6 +248,10 @@ LD = @LD@ LDFLAGS = @LDFLAGS@ LIBBPF_CFLAGS = @LIBBPF_CFLAGS@ LIBBPF_LIBS = @LIBBPF_LIBS@ +LIBBROTLIDEC_CFLAGS = @LIBBROTLIDEC_CFLAGS@ +LIBBROTLIDEC_LIBS = @LIBBROTLIDEC_LIBS@ +LIBBROTLIENC_CFLAGS = @LIBBROTLIENC_CFLAGS@ +LIBBROTLIENC_LIBS = @LIBBROTLIENC_LIBS@ LIBCARES_CFLAGS = @LIBCARES_CFLAGS@ LIBCARES_LIBS = @LIBCARES_LIBS@ LIBEVENT_OPENSSL_CFLAGS = @LIBEVENT_OPENSSL_CFLAGS@ diff --git a/deps/nghttp2/lib/includes/nghttp2/nghttp2.h b/deps/nghttp2/lib/includes/nghttp2/nghttp2.h index 7910db230aae3e..889176097dd31d 100644 --- a/deps/nghttp2/lib/includes/nghttp2/nghttp2.h +++ b/deps/nghttp2/lib/includes/nghttp2/nghttp2.h @@ -51,6 +51,7 @@ extern "C" { #endif /* !defined(_MSC_VER) || (_MSC_VER >= 1800) */ #include #include +#include #include @@ -71,6 +72,13 @@ extern "C" { # endif /* !BUILDING_NGHTTP2 */ #endif /* !defined(WIN32) */ +/** + * @typedef + * + * :type:`nghttp2_ssize` is a signed counterpart of size_t. + */ +typedef ptrdiff_t nghttp2_ssize; + /** * @macro * @@ -168,6 +176,12 @@ typedef struct { /** * @macro * + * .. warning:: + * + * Deprecated. :rfc:`7540` priorities are deprecated by + * :rfc:`9113`. Consider migrating to :rfc:`9218` extensible + * prioritization scheme. + * * The default weight of stream dependency. */ #define NGHTTP2_DEFAULT_WEIGHT 16 @@ -175,6 +189,12 @@ typedef struct { /** * @macro * + * .. warning:: + * + * Deprecated. :rfc:`7540` priorities are deprecated by + * :rfc:`9113`. Consider migrating to :rfc:`9218` extensible + * prioritization scheme. + * * The maximum weight of stream dependency. */ #define NGHTTP2_MAX_WEIGHT 256 @@ -182,6 +202,12 @@ typedef struct { /** * @macro * + * .. warning:: + * + * Deprecated. :rfc:`7540` priorities are deprecated by + * :rfc:`9113`. Consider migrating to :rfc:`9218` extensible + * prioritization scheme. + * * The minimum weight of stream dependency. */ #define NGHTTP2_MIN_WEIGHT 1 @@ -255,7 +281,7 @@ typedef enum { */ NGHTTP2_ERR_UNSUPPORTED_VERSION = -503, /** - * Used as a return value from :type:`nghttp2_send_callback`, + * Used as a return value from :type:`nghttp2_send_callback2`, * :type:`nghttp2_recv_callback` and * :type:`nghttp2_send_data_callback` to indicate that the operation * would block. @@ -275,9 +301,9 @@ typedef enum { NGHTTP2_ERR_EOF = -507, /** * Used as a return value from - * :func:`nghttp2_data_source_read_callback` to indicate that data + * :func:`nghttp2_data_source_read_callback2` to indicate that data * transfer is postponed. See - * :func:`nghttp2_data_source_read_callback` for details. + * :func:`nghttp2_data_source_read_callback2` for details. */ NGHTTP2_ERR_DEFERRED = -508, /** @@ -830,7 +856,7 @@ typedef struct { * @union * * This union represents the some kind of data source passed to - * :type:`nghttp2_data_source_read_callback`. + * :type:`nghttp2_data_source_read_callback2`. */ typedef union { /** @@ -847,7 +873,7 @@ typedef union { * @enum * * The flags used to set in |data_flags| output parameter in - * :type:`nghttp2_data_source_read_callback`. + * :type:`nghttp2_data_source_read_callback2`. */ typedef enum { /** @@ -861,8 +887,8 @@ typedef enum { /** * Indicates that END_STREAM flag must not be set even if * NGHTTP2_DATA_FLAG_EOF is set. Usually this flag is used to send - * trailer fields with `nghttp2_submit_request()` or - * `nghttp2_submit_response()`. + * trailer fields with `nghttp2_submit_request2()` or + * `nghttp2_submit_response2()`. */ NGHTTP2_DATA_FLAG_NO_END_STREAM = 0x02, /** @@ -872,9 +898,15 @@ typedef enum { NGHTTP2_DATA_FLAG_NO_COPY = 0x04 } nghttp2_data_flag; +#ifndef NGHTTP2_NO_SSIZE_T /** * @functypedef * + * .. warning:: + * + * Deprecated. Use :type:`nghttp2_data_source_read_callback2` + * instead. + * * Callback function invoked when the library wants to read data from * the |source|. The read data is sent in the stream |stream_id|. * The implementation of this function must read at most |length| @@ -939,9 +971,83 @@ typedef ssize_t (*nghttp2_data_source_read_callback)( nghttp2_session *session, int32_t stream_id, uint8_t *buf, size_t length, uint32_t *data_flags, nghttp2_data_source *source, void *user_data); +#endif /* NGHTTP2_NO_SSIZE_T */ + +/** + * @functypedef + * + * Callback function invoked when the library wants to read data from + * the |source|. The read data is sent in the stream |stream_id|. + * The implementation of this function must read at most |length| + * bytes of data from |source| (or possibly other places) and store + * them in |buf| and return number of data stored in |buf|. If EOF is + * reached, set :enum:`nghttp2_data_flag.NGHTTP2_DATA_FLAG_EOF` flag + * in |*data_flags|. + * + * Sometime it is desirable to avoid copying data into |buf| and let + * application to send data directly. To achieve this, set + * :enum:`nghttp2_data_flag.NGHTTP2_DATA_FLAG_NO_COPY` to + * |*data_flags| (and possibly other flags, just like when we do + * copy), and return the number of bytes to send without copying data + * into |buf|. The library, seeing + * :enum:`nghttp2_data_flag.NGHTTP2_DATA_FLAG_NO_COPY`, will invoke + * :type:`nghttp2_send_data_callback`. The application must send + * complete DATA frame in that callback. + * + * If this callback is set by `nghttp2_submit_request2()`, + * `nghttp2_submit_response2()` or `nghttp2_submit_headers()` and + * `nghttp2_submit_data2()` with flag parameter + * :enum:`nghttp2_flag.NGHTTP2_FLAG_END_STREAM` set, and + * :enum:`nghttp2_data_flag.NGHTTP2_DATA_FLAG_EOF` flag is set to + * |*data_flags|, DATA frame will have END_STREAM flag set. Usually, + * this is expected behaviour and all are fine. One exception is send + * trailer fields. You cannot send trailer fields after sending frame + * with END_STREAM set. To avoid this problem, one can set + * :enum:`nghttp2_data_flag.NGHTTP2_DATA_FLAG_NO_END_STREAM` along + * with :enum:`nghttp2_data_flag.NGHTTP2_DATA_FLAG_EOF` to signal the + * library not to set END_STREAM in DATA frame. Then application can + * use `nghttp2_submit_trailer()` to send trailer fields. + * `nghttp2_submit_trailer()` can be called inside this callback. + * + * If the application wants to postpone DATA frames (e.g., + * asynchronous I/O, or reading data blocks for long time), it is + * achieved by returning :enum:`nghttp2_error.NGHTTP2_ERR_DEFERRED` + * without reading any data in this invocation. The library removes + * DATA frame from the outgoing queue temporarily. To move back + * deferred DATA frame to outgoing queue, call + * `nghttp2_session_resume_data()`. + * + * By default, |length| is limited to 16KiB at maximum. If peer + * allows larger frames, application can enlarge transmission buffer + * size. See :type:`nghttp2_data_source_read_length_callback` for + * more details. + * + * If the application just wants to return from + * `nghttp2_session_send()` or `nghttp2_session_mem_send2()` without + * sending anything, return :enum:`nghttp2_error.NGHTTP2_ERR_PAUSE`. + * + * In case of error, there are 2 choices. Returning + * :enum:`nghttp2_error.NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE` will + * close the stream by issuing RST_STREAM with + * :enum:`nghttp2_error_code.NGHTTP2_INTERNAL_ERROR`. If a different + * error code is desirable, use `nghttp2_submit_rst_stream()` with a + * desired error code and then return + * :enum:`nghttp2_error.NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE`. + * Returning :enum:`nghttp2_error.NGHTTP2_ERR_CALLBACK_FAILURE` will + * signal the entire session failure. + */ +typedef nghttp2_ssize (*nghttp2_data_source_read_callback2)( + nghttp2_session *session, int32_t stream_id, uint8_t *buf, size_t length, + uint32_t *data_flags, nghttp2_data_source *source, void *user_data); + +#ifndef NGHTTP2_NO_SSIZE_T /** * @struct * + * .. warning:: + * + * Deprecated. Use :type:`nghttp2_data_provider2` instead. + * * This struct represents the data source and the way to read a chunk * of data from it. */ @@ -956,6 +1062,25 @@ typedef struct { nghttp2_data_source_read_callback read_callback; } nghttp2_data_provider; +#endif /* NGHTTP2_NO_SSIZE_T */ + +/** + * @struct + * + * This struct represents the data source and the way to read a chunk + * of data from it. + */ +typedef struct { + /** + * The data source. + */ + nghttp2_data_source source; + /** + * The callback function to read a chunk of data from the |source|. + */ + nghttp2_data_source_read_callback2 read_callback; +} nghttp2_data_provider2; + /** * @struct * @@ -1008,6 +1133,12 @@ typedef enum { /** * @struct * + * .. warning:: + * + * Deprecated. :rfc:`7540` priorities are deprecated by + * :rfc:`9113`. Consider migrating to :rfc:`9218` extensible + * prioritization scheme. + * * The structure to specify stream dependency. */ typedef struct { @@ -1042,6 +1173,12 @@ typedef struct { */ size_t padlen; /** + * .. warning:: + * + * Deprecated. :rfc:`7540` priorities are deprecated by + * :rfc:`9113`. Consider migrating to :rfc:`9218` extensible + * prioritization scheme. + * * The priority specification */ nghttp2_priority_spec pri_spec; @@ -1062,6 +1199,12 @@ typedef struct { /** * @struct * + * .. warning:: + * + * Deprecated. :rfc:`7540` priorities are deprecated by + * :rfc:`9113`. Consider migrating to :rfc:`9218` extensible + * prioritization scheme. + * * The PRIORITY frame. It has the following members: */ typedef struct { @@ -1305,9 +1448,14 @@ typedef union { nghttp2_extension ext; } nghttp2_frame; +#ifndef NGHTTP2_NO_SSIZE_T /** * @functypedef * + * .. warning:: + * + * Deprecated. Use :type:`nghttp2_send_callback2` instead. + * * Callback function invoked when |session| wants to send data to the * remote peer. The implementation of this function must send at most * |length| bytes of data stored in |data|. The |flags| is currently @@ -1340,6 +1488,44 @@ typedef ssize_t (*nghttp2_send_callback)(nghttp2_session *session, const uint8_t *data, size_t length, int flags, void *user_data); +#endif /* NGHTTP2_NO_SSIZE_T */ + +/** + * @functypedef + * + * Callback function invoked when |session| wants to send data to the + * remote peer. The implementation of this function must send at most + * |length| bytes of data stored in |data|. The |flags| is currently + * not used and always 0. It must return the number of bytes sent if + * it succeeds. If it cannot send any single byte without blocking, + * it must return :enum:`nghttp2_error.NGHTTP2_ERR_WOULDBLOCK`. For + * other errors, it must return + * :enum:`nghttp2_error.NGHTTP2_ERR_CALLBACK_FAILURE`. The + * |user_data| pointer is the third argument passed in to the call to + * `nghttp2_session_client_new()` or `nghttp2_session_server_new()`. + * + * This callback is required if the application uses + * `nghttp2_session_send()` to send data to the remote endpoint. If + * the application uses solely `nghttp2_session_mem_send2()` instead, + * this callback function is unnecessary. + * + * To set this callback to :type:`nghttp2_session_callbacks`, use + * `nghttp2_session_callbacks_set_send_callback2()`. + * + * .. note:: + * + * The |length| may be very small. If that is the case, and + * application disables Nagle algorithm (``TCP_NODELAY``), then just + * writing |data| to the network stack leads to very small packet, + * and it is very inefficient. An application should be responsible + * to buffer up small chunks of data as necessary to avoid this + * situation. + */ +typedef nghttp2_ssize (*nghttp2_send_callback2)(nghttp2_session *session, + const uint8_t *data, + size_t length, int flags, + void *user_data); + /** * @functypedef * @@ -1370,7 +1556,7 @@ typedef ssize_t (*nghttp2_send_callback)(nghttp2_session *session, * error; if partial frame data has already sent, it is impossible to * send another data in that state, and all we can do is tear down * connection). When data is fully processed, but application wants - * to make `nghttp2_session_mem_send()` or `nghttp2_session_send()` + * to make `nghttp2_session_mem_send2()` or `nghttp2_session_send()` * return immediately without processing next frames, return * :enum:`nghttp2_error.NGHTTP2_ERR_PAUSE`. If application decided to * reset this stream, return @@ -1387,9 +1573,14 @@ typedef int (*nghttp2_send_data_callback)(nghttp2_session *session, nghttp2_data_source *source, void *user_data); +#ifndef NGHTTP2_NO_SSIZE_T /** * @functypedef * + * .. warning:: + * + * Deprecated. Use :type:`nghttp2_recv_callback2` instead. + * * Callback function invoked when |session| wants to receive data from * the remote peer. The implementation of this function must read at * most |length| bytes of data and store it in |buf|. The |flags| is @@ -1417,11 +1608,43 @@ typedef ssize_t (*nghttp2_recv_callback)(nghttp2_session *session, uint8_t *buf, size_t length, int flags, void *user_data); +#endif /* NGHTTP2_NO_SSIZE_T */ + +/** + * @functypedef + * + * Callback function invoked when |session| wants to receive data from + * the remote peer. The implementation of this function must read at + * most |length| bytes of data and store it in |buf|. The |flags| is + * currently not used and always 0. It must return the number of + * bytes written in |buf| if it succeeds. If it cannot read any + * single byte without blocking, it must return + * :enum:`nghttp2_error.NGHTTP2_ERR_WOULDBLOCK`. If it gets EOF + * before it reads any single byte, it must return + * :enum:`nghttp2_error.NGHTTP2_ERR_EOF`. For other errors, it must + * return :enum:`nghttp2_error.NGHTTP2_ERR_CALLBACK_FAILURE`. + * Returning 0 is treated as + * :enum:`nghttp2_error.NGHTTP2_ERR_WOULDBLOCK`. The |user_data| + * pointer is the third argument passed in to the call to + * `nghttp2_session_client_new()` or `nghttp2_session_server_new()`. + * + * This callback is required if the application uses + * `nghttp2_session_recv()` to receive data from the remote endpoint. + * If the application uses solely `nghttp2_session_mem_recv2()` + * instead, this callback function is unnecessary. + * + * To set this callback to :type:`nghttp2_session_callbacks`, use + * `nghttp2_session_callbacks_set_recv_callback2()`. + */ +typedef nghttp2_ssize (*nghttp2_recv_callback2)(nghttp2_session *session, + uint8_t *buf, size_t length, + int flags, void *user_data); + /** * @functypedef * * Callback function invoked by `nghttp2_session_recv()` and - * `nghttp2_session_mem_recv()` when a frame is received. The + * `nghttp2_session_mem_recv2()` when a frame is received. The * |user_data| pointer is the third argument passed in to the call to * `nghttp2_session_client_new()` or `nghttp2_session_server_new()`. * @@ -1439,8 +1662,8 @@ typedef ssize_t (*nghttp2_recv_callback)(nghttp2_session *session, uint8_t *buf, * * The implementation of this function must return 0 if it succeeds. * If nonzero value is returned, it is treated as fatal error and - * `nghttp2_session_recv()` and `nghttp2_session_mem_recv()` functions - * immediately return + * `nghttp2_session_recv()` and `nghttp2_session_mem_recv2()` + * functions immediately return * :enum:`nghttp2_error.NGHTTP2_ERR_CALLBACK_FAILURE`. * * To set this callback to :type:`nghttp2_session_callbacks`, use @@ -1454,7 +1677,7 @@ typedef int (*nghttp2_on_frame_recv_callback)(nghttp2_session *session, * @functypedef * * Callback function invoked by `nghttp2_session_recv()` and - * `nghttp2_session_mem_recv()` when an invalid non-DATA frame is + * `nghttp2_session_mem_recv2()` when an invalid non-DATA frame is * received. The error is indicated by the |lib_error_code|, which is * one of the values defined in :type:`nghttp2_error`. When this * callback function is invoked, the library automatically submits @@ -1468,8 +1691,8 @@ typedef int (*nghttp2_on_frame_recv_callback)(nghttp2_session *session, * * The implementation of this function must return 0 if it succeeds. * If nonzero is returned, it is treated as fatal error and - * `nghttp2_session_recv()` and `nghttp2_session_mem_recv()` functions - * immediately return + * `nghttp2_session_recv()` and `nghttp2_session_mem_recv2()` + * functions immediately return * :enum:`nghttp2_error.NGHTTP2_ERR_CALLBACK_FAILURE`. * * To set this callback to :type:`nghttp2_session_callbacks`, use @@ -1492,19 +1715,19 @@ typedef int (*nghttp2_on_invalid_frame_recv_callback)( * argument passed in to the call to `nghttp2_session_client_new()` or * `nghttp2_session_server_new()`. * - * If the application uses `nghttp2_session_mem_recv()`, it can return - * :enum:`nghttp2_error.NGHTTP2_ERR_PAUSE` to make - * `nghttp2_session_mem_recv()` return without processing further + * If the application uses `nghttp2_session_mem_recv2()`, it can + * return :enum:`nghttp2_error.NGHTTP2_ERR_PAUSE` to make + * `nghttp2_session_mem_recv2()` return without processing further * input bytes. The memory by pointed by the |data| is retained until - * `nghttp2_session_mem_recv()` or `nghttp2_session_recv()` is called. - * The application must retain the input bytes which was used to - * produce the |data| parameter, because it may refer to the memory + * `nghttp2_session_mem_recv2()` or `nghttp2_session_recv()` is + * called. The application must retain the input bytes which was used + * to produce the |data| parameter, because it may refer to the memory * region included in the input bytes. * * The implementation of this function must return 0 if it succeeds. * If nonzero is returned, it is treated as fatal error, and - * `nghttp2_session_recv()` and `nghttp2_session_mem_recv()` functions - * immediately return + * `nghttp2_session_recv()` and `nghttp2_session_mem_recv2()` + * functions immediately return * :enum:`nghttp2_error.NGHTTP2_ERR_CALLBACK_FAILURE`. * * To set this callback to :type:`nghttp2_session_callbacks`, use @@ -1531,8 +1754,8 @@ typedef int (*nghttp2_on_data_chunk_recv_callback)(nghttp2_session *session, * If there is a fatal error while executing this callback, the * implementation should return * :enum:`nghttp2_error.NGHTTP2_ERR_CALLBACK_FAILURE`, which makes - * `nghttp2_session_send()` and `nghttp2_session_mem_send()` functions - * immediately return + * `nghttp2_session_send()` and `nghttp2_session_mem_send2()` + * functions immediately return * :enum:`nghttp2_error.NGHTTP2_ERR_CALLBACK_FAILURE`. * * If the other value is returned, it is treated as if @@ -1556,8 +1779,8 @@ typedef int (*nghttp2_before_frame_send_callback)(nghttp2_session *session, * * The implementation of this function must return 0 if it succeeds. * If nonzero is returned, it is treated as fatal error and - * `nghttp2_session_send()` and `nghttp2_session_mem_send()` functions - * immediately return + * `nghttp2_session_send()` and `nghttp2_session_mem_send2()` + * functions immediately return * :enum:`nghttp2_error.NGHTTP2_ERR_CALLBACK_FAILURE`. * * To set this callback to :type:`nghttp2_session_callbacks`, use @@ -1579,8 +1802,8 @@ typedef int (*nghttp2_on_frame_send_callback)(nghttp2_session *session, * * The implementation of this function must return 0 if it succeeds. * If nonzero is returned, it is treated as fatal error and - * `nghttp2_session_send()` and `nghttp2_session_mem_send()` functions - * immediately return + * `nghttp2_session_send()` and `nghttp2_session_mem_send2()` + * functions immediately return * :enum:`nghttp2_error.NGHTTP2_ERR_CALLBACK_FAILURE`. * * `nghttp2_session_get_stream_user_data()` can be used to get @@ -1601,7 +1824,7 @@ typedef int (*nghttp2_on_frame_not_send_callback)(nghttp2_session *session, * The reason of closure is indicated by the |error_code|. The * |error_code| is usually one of :enum:`nghttp2_error_code`, but that * is not guaranteed. The stream_user_data, which was specified in - * `nghttp2_submit_request()` or `nghttp2_submit_headers()`, is still + * `nghttp2_submit_request2()` or `nghttp2_submit_headers()`, is still * available in this function. The |user_data| pointer is the third * argument passed in to the call to `nghttp2_session_client_new()` or * `nghttp2_session_server_new()`. @@ -1610,8 +1833,8 @@ typedef int (*nghttp2_on_frame_not_send_callback)(nghttp2_session *session, * * The implementation of this function must return 0 if it succeeds. * If nonzero is returned, it is treated as fatal error and - * `nghttp2_session_recv()`, `nghttp2_session_mem_recv()`, - * `nghttp2_session_send()`, and `nghttp2_session_mem_send()` + * `nghttp2_session_recv()`, `nghttp2_session_mem_recv2()`, + * `nghttp2_session_send()`, and `nghttp2_session_mem_send2()` * functions immediately return * :enum:`nghttp2_error.NGHTTP2_ERR_CALLBACK_FAILURE`. * @@ -1681,7 +1904,7 @@ typedef int (*nghttp2_on_stream_close_callback)(nghttp2_session *session, * value is returned, it is treated as if * :enum:`nghttp2_error.NGHTTP2_ERR_CALLBACK_FAILURE` is returned. If * :enum:`nghttp2_error.NGHTTP2_ERR_CALLBACK_FAILURE` is returned, - * `nghttp2_session_mem_recv()` function will immediately return + * `nghttp2_session_mem_recv2()` function will immediately return * :enum:`nghttp2_error.NGHTTP2_ERR_CALLBACK_FAILURE`. * * To set this callback to :type:`nghttp2_session_callbacks`, use @@ -1726,11 +1949,11 @@ typedef int (*nghttp2_on_begin_headers_callback)(nghttp2_session *session, * performs validation based on HTTP Messaging rule, which is briefly * explained in :ref:`http-messaging` section. * - * If the application uses `nghttp2_session_mem_recv()`, it can return - * :enum:`nghttp2_error.NGHTTP2_ERR_PAUSE` to make - * `nghttp2_session_mem_recv()` return without processing further + * If the application uses `nghttp2_session_mem_recv2()`, it can + * return :enum:`nghttp2_error.NGHTTP2_ERR_PAUSE` to make + * `nghttp2_session_mem_recv2()` return without processing further * input bytes. The memory pointed by |frame|, |name| and |value| - * parameters are retained until `nghttp2_session_mem_recv()` or + * parameters are retained until `nghttp2_session_mem_recv2()` or * `nghttp2_session_recv()` is called. The application must retain * the input bytes which was used to produce these parameters, because * it may refer to the memory region included in the input bytes. @@ -1757,8 +1980,8 @@ typedef int (*nghttp2_on_begin_headers_callback)(nghttp2_session *session, * nonzero value is returned, it is treated as * :enum:`nghttp2_error.NGHTTP2_ERR_CALLBACK_FAILURE`. If * :enum:`nghttp2_error.NGHTTP2_ERR_CALLBACK_FAILURE` is returned, - * `nghttp2_session_recv()` and `nghttp2_session_mem_recv()` functions - * immediately return + * `nghttp2_session_recv()` and `nghttp2_session_mem_recv2()` + * functions immediately return * :enum:`nghttp2_error.NGHTTP2_ERR_CALLBACK_FAILURE`. * * To set this callback to :type:`nghttp2_session_callbacks`, use @@ -1873,9 +2096,15 @@ typedef int (*nghttp2_on_invalid_header_callback2)( nghttp2_session *session, const nghttp2_frame *frame, nghttp2_rcbuf *name, nghttp2_rcbuf *value, uint8_t flags, void *user_data); +#ifndef NGHTTP2_NO_SSIZE_T /** * @functypedef * + * .. warning:: + * + * Deprecated. Use :type:`nghttp2_select_padding_callback2` + * instead. + * * Callback function invoked when the library asks application how * many padding bytes are required for the transmission of the * |frame|. The application must choose the total length of payload @@ -1896,9 +2125,39 @@ typedef ssize_t (*nghttp2_select_padding_callback)(nghttp2_session *session, size_t max_payloadlen, void *user_data); +#endif /* NGHTTP2_NO_SSIZE_T */ + +/** + * @functypedef + * + * Callback function invoked when the library asks application how + * many padding bytes are required for the transmission of the + * |frame|. The application must choose the total length of payload + * including padded bytes in range [frame->hd.length, max_payloadlen], + * inclusive. Choosing number not in this range will be treated as + * :enum:`nghttp2_error.NGHTTP2_ERR_CALLBACK_FAILURE`. Returning + * ``frame->hd.length`` means no padding is added. Returning + * :enum:`nghttp2_error.NGHTTP2_ERR_CALLBACK_FAILURE` will make + * `nghttp2_session_send()` and `nghttp2_session_mem_send2()` + * functions immediately return + * :enum:`nghttp2_error.NGHTTP2_ERR_CALLBACK_FAILURE`. + * + * To set this callback to :type:`nghttp2_session_callbacks`, use + * `nghttp2_session_callbacks_set_select_padding_callback2()`. + */ +typedef nghttp2_ssize (*nghttp2_select_padding_callback2)( + nghttp2_session *session, const nghttp2_frame *frame, size_t max_payloadlen, + void *user_data); + +#ifndef NGHTTP2_NO_SSIZE_T /** * @functypedef * + * .. warning:: + * + * Deprecated. Use + * :type:`nghttp2_data_source_read_length_callback2` instead. + * * Callback function invoked when library wants to get max length of * data to send data to the remote peer. The implementation of this * function should return a value in the following range. [1, @@ -1926,6 +2185,38 @@ typedef ssize_t (*nghttp2_data_source_read_length_callback)( int32_t session_remote_window_size, int32_t stream_remote_window_size, uint32_t remote_max_frame_size, void *user_data); +#endif /* NGHTTP2_NO_SSIZE_T */ + +/** + * @functypedef + * + * Callback function invoked when library wants to get max length of + * data to send data to the remote peer. The implementation of this + * function should return a value in the following range. [1, + * min(|session_remote_window_size|, |stream_remote_window_size|, + * |remote_max_frame_size|)]. If a value greater than this range is + * returned than the max allow value will be used. Returning a value + * smaller than this range is treated as + * :enum:`nghttp2_error.NGHTTP2_ERR_CALLBACK_FAILURE`. The + * |frame_type| is provided for future extensibility and identifies + * the type of frame (see :type:`nghttp2_frame_type`) for which to get + * the length for. Currently supported frame types are: + * :enum:`nghttp2_frame_type.NGHTTP2_DATA`. + * + * This callback can be used to control the length in bytes for which + * :type:`nghttp2_data_source_read_callback` is allowed to send to the + * remote endpoint. This callback is optional. Returning + * :enum:`nghttp2_error.NGHTTP2_ERR_CALLBACK_FAILURE` will signal the + * entire session failure. + * + * To set this callback to :type:`nghttp2_session_callbacks`, use + * `nghttp2_session_callbacks_set_data_source_read_length_callback2()`. + */ +typedef nghttp2_ssize (*nghttp2_data_source_read_length_callback2)( + nghttp2_session *session, uint8_t frame_type, int32_t stream_id, + int32_t session_remote_window_size, int32_t stream_remote_window_size, + uint32_t remote_max_frame_size, void *user_data); + /** * @functypedef * @@ -1942,8 +2233,8 @@ typedef ssize_t (*nghttp2_data_source_read_length_callback)( * * The implementation of this function must return 0 if it succeeds. * If nonzero value is returned, it is treated as fatal error and - * `nghttp2_session_recv()` and `nghttp2_session_mem_recv()` functions - * immediately return + * `nghttp2_session_recv()` and `nghttp2_session_mem_recv2()` + * functions immediately return * :enum:`nghttp2_error.NGHTTP2_ERR_CALLBACK_FAILURE`. * * To set this callback to :type:`nghttp2_session_callbacks`, use @@ -1967,8 +2258,8 @@ typedef int (*nghttp2_on_begin_frame_callback)(nghttp2_session *session, * * If fatal error occurred, application should return * :enum:`nghttp2_error.NGHTTP2_ERR_CALLBACK_FAILURE`. In this case, - * `nghttp2_session_recv()` and `nghttp2_session_mem_recv()` functions - * immediately return + * `nghttp2_session_recv()` and `nghttp2_session_mem_recv2()` + * functions immediately return * :enum:`nghttp2_error.NGHTTP2_ERR_CALLBACK_FAILURE`. If the other * values are returned, currently they are treated as * :enum:`nghttp2_error.NGHTTP2_ERR_CALLBACK_FAILURE`. @@ -2005,8 +2296,8 @@ typedef int (*nghttp2_on_extension_chunk_recv_callback)( * * If fatal error occurred, application should return * :enum:`nghttp2_error.NGHTTP2_ERR_CALLBACK_FAILURE`. In this case, - * `nghttp2_session_recv()` and `nghttp2_session_mem_recv()` functions - * immediately return + * `nghttp2_session_recv()` and `nghttp2_session_mem_recv2()` + * functions immediately return * :enum:`nghttp2_error.NGHTTP2_ERR_CALLBACK_FAILURE`. If the other * values are returned, currently they are treated as * :enum:`nghttp2_error.NGHTTP2_ERR_CALLBACK_FAILURE`. @@ -2016,9 +2307,15 @@ typedef int (*nghttp2_unpack_extension_callback)(nghttp2_session *session, const nghttp2_frame_hd *hd, void *user_data); +#ifndef NGHTTP2_NO_SSIZE_T /** * @functypedef * + * .. warning:: + * + * Deprecated. Use :type:`nghttp2_pack_extension_callback2` + * instead. + * * Callback function invoked when library asks the application to pack * extension payload in its wire format. The frame header will be * packed by library. Application must pack payload only. @@ -2049,18 +2346,53 @@ typedef ssize_t (*nghttp2_pack_extension_callback)(nghttp2_session *session, const nghttp2_frame *frame, void *user_data); +#endif /* NGHTTP2_NO_SSIZE_T */ + /** * @functypedef * - * Callback function invoked when library provides the error message - * intended for human consumption. This callback is solely for - * debugging purpose. The |msg| is typically NULL-terminated string + * Callback function invoked when library asks the application to pack + * extension payload in its wire format. The frame header will be + * packed by library. Application must pack payload only. + * ``frame->ext.payload`` is the object passed to + * `nghttp2_submit_extension()` as payload parameter. Application + * must pack extension payload to the |buf| of its capacity |len| + * bytes. The |len| is at least 16KiB. + * + * The implementation of this function should return the number of + * bytes written into |buf| when it succeeds. + * + * To abort processing this extension frame, return + * :enum:`nghttp2_error.NGHTTP2_ERR_CANCEL`, and + * :type:`nghttp2_on_frame_not_send_callback` will be invoked. + * + * If fatal error occurred, application should return + * :enum:`nghttp2_error.NGHTTP2_ERR_CALLBACK_FAILURE`. In this case, + * `nghttp2_session_send()` and `nghttp2_session_mem_send2()` + * functions immediately return + * :enum:`nghttp2_error.NGHTTP2_ERR_CALLBACK_FAILURE`. If the other + * values are returned, currently they are treated as + * :enum:`nghttp2_error.NGHTTP2_ERR_CALLBACK_FAILURE`. If the return + * value is strictly larger than |len|, it is treated as + * :enum:`nghttp2_error.NGHTTP2_ERR_CALLBACK_FAILURE`. + */ +typedef nghttp2_ssize (*nghttp2_pack_extension_callback2)( + nghttp2_session *session, uint8_t *buf, size_t len, + const nghttp2_frame *frame, void *user_data); + +/** + * @functypedef + * + * .. warning:: + * + * Deprecated. Use :type:`nghttp2_error_callback2` instead. + * + * Callback function invoked when library provides the error message + * intended for human consumption. This callback is solely for + * debugging purpose. The |msg| is typically NULL-terminated string * of length |len|. |len| does not include the sentinel NULL * character. * - * This function is deprecated. The new application should use - * :type:`nghttp2_error_callback2`. - * * The format of error message may change between nghttp2 library * versions. The application should not depend on the particular * format. @@ -2143,9 +2475,15 @@ nghttp2_session_callbacks_new(nghttp2_session_callbacks **callbacks_ptr); NGHTTP2_EXTERN void nghttp2_session_callbacks_del(nghttp2_session_callbacks *callbacks); +#ifndef NGHTTP2_NO_SSIZE_T /** * @function * + * .. warning:: + * + * Deprecated. Use `nghttp2_session_callbacks_set_send_callback2()` + * with :type:`nghttp2_send_callback2` instead. + * * Sets callback function invoked when a session wants to send data to * the remote peer. This callback is not necessary if the application * uses solely `nghttp2_session_mem_send()` to serialize data to @@ -2154,9 +2492,28 @@ nghttp2_session_callbacks_del(nghttp2_session_callbacks *callbacks); NGHTTP2_EXTERN void nghttp2_session_callbacks_set_send_callback( nghttp2_session_callbacks *cbs, nghttp2_send_callback send_callback); +#endif /* NGHTTP2_NO_SSIZE_T */ + +/** + * @function + * + * Sets callback function invoked when a session wants to send data to + * the remote peer. This callback is not necessary if the application + * uses solely `nghttp2_session_mem_send2()` to serialize data to + * transmit. + */ +NGHTTP2_EXTERN void nghttp2_session_callbacks_set_send_callback2( + nghttp2_session_callbacks *cbs, nghttp2_send_callback2 send_callback); + +#ifndef NGHTTP2_NO_SSIZE_T /** * @function * + * .. warning:: + * + * Deprecated. Use `nghttp2_session_callbacks_set_recv_callback2()` + * with :type:`nghttp2_recv_callback2` instead. + * * Sets callback function invoked when the a session wants to receive * data from the remote peer. This callback is not necessary if the * application uses solely `nghttp2_session_mem_recv()` to process @@ -2165,11 +2522,24 @@ NGHTTP2_EXTERN void nghttp2_session_callbacks_set_send_callback( NGHTTP2_EXTERN void nghttp2_session_callbacks_set_recv_callback( nghttp2_session_callbacks *cbs, nghttp2_recv_callback recv_callback); +#endif /* NGHTTP2_NO_SSIZE_T */ + +/** + * @function + * + * Sets callback function invoked when the a session wants to receive + * data from the remote peer. This callback is not necessary if the + * application uses solely `nghttp2_session_mem_recv2()` to process + * received data. + */ +NGHTTP2_EXTERN void nghttp2_session_callbacks_set_recv_callback2( + nghttp2_session_callbacks *cbs, nghttp2_recv_callback2 recv_callback); + /** * @function * * Sets callback function invoked by `nghttp2_session_recv()` and - * `nghttp2_session_mem_recv()` when a frame is received. + * `nghttp2_session_mem_recv2()` when a frame is received. */ NGHTTP2_EXTERN void nghttp2_session_callbacks_set_on_frame_recv_callback( nghttp2_session_callbacks *cbs, @@ -2179,7 +2549,7 @@ NGHTTP2_EXTERN void nghttp2_session_callbacks_set_on_frame_recv_callback( * @function * * Sets callback function invoked by `nghttp2_session_recv()` and - * `nghttp2_session_mem_recv()` when an invalid non-DATA frame is + * `nghttp2_session_mem_recv2()` when an invalid non-DATA frame is * received. */ NGHTTP2_EXTERN void @@ -2290,9 +2660,16 @@ NGHTTP2_EXTERN void nghttp2_session_callbacks_set_on_invalid_header_callback2( nghttp2_session_callbacks *cbs, nghttp2_on_invalid_header_callback2 on_invalid_header_callback2); +#ifndef NGHTTP2_NO_SSIZE_T /** * @function * + * .. warning:: + * + * Deprecated. Use + * `nghttp2_session_callbacks_set_select_padding_callback2()` with + * :type:`nghttp2_select_padding_callback2` instead. + * * Sets callback function invoked when the library asks application * how many padding bytes are required for the transmission of the * given frame. @@ -2301,9 +2678,29 @@ NGHTTP2_EXTERN void nghttp2_session_callbacks_set_select_padding_callback( nghttp2_session_callbacks *cbs, nghttp2_select_padding_callback select_padding_callback); +#endif /* NGHTTP2_NO_SSIZE_T */ + +/** + * @function + * + * Sets callback function invoked when the library asks application + * how many padding bytes are required for the transmission of the + * given frame. + */ +NGHTTP2_EXTERN void nghttp2_session_callbacks_set_select_padding_callback2( + nghttp2_session_callbacks *cbs, + nghttp2_select_padding_callback2 select_padding_callback); + +#ifndef NGHTTP2_NO_SSIZE_T /** * @function * + * .. warning:: + * + * Deprecated. Use + * `nghttp2_session_callbacks_set_data_source_read_length_callback2()` + * with :type:`nghttp2_data_source_read_length_callback2` instead. + * * Sets callback function determine the length allowed in * :type:`nghttp2_data_source_read_callback`. */ @@ -2312,6 +2709,19 @@ nghttp2_session_callbacks_set_data_source_read_length_callback( nghttp2_session_callbacks *cbs, nghttp2_data_source_read_length_callback data_source_read_length_callback); +#endif /* NGHTTP2_NO_SSIZE_T */ + +/** + * @function + * + * Sets callback function determine the length allowed in + * :type:`nghttp2_data_source_read_callback2`. + */ +NGHTTP2_EXTERN void +nghttp2_session_callbacks_set_data_source_read_length_callback2( + nghttp2_session_callbacks *cbs, + nghttp2_data_source_read_length_callback2 data_source_read_length_callback); + /** * @function * @@ -2326,15 +2736,22 @@ NGHTTP2_EXTERN void nghttp2_session_callbacks_set_on_begin_frame_callback( * * Sets callback function invoked when * :enum:`nghttp2_data_flag.NGHTTP2_DATA_FLAG_NO_COPY` is used in - * :type:`nghttp2_data_source_read_callback` to avoid data copy. + * :type:`nghttp2_data_source_read_callback2` to avoid data copy. */ NGHTTP2_EXTERN void nghttp2_session_callbacks_set_send_data_callback( nghttp2_session_callbacks *cbs, nghttp2_send_data_callback send_data_callback); +#ifndef NGHTTP2_NO_SSIZE_T /** * @function * + * .. warning:: + * + * Deprecated. Use + * `nghttp2_session_callbacks_set_pack_extension_callback2()` with + * :type:`nghttp2_pack_extension_callback2` instead. + * * Sets callback function invoked when the library asks the * application to pack extension frame payload in wire format. */ @@ -2342,6 +2759,18 @@ NGHTTP2_EXTERN void nghttp2_session_callbacks_set_pack_extension_callback( nghttp2_session_callbacks *cbs, nghttp2_pack_extension_callback pack_extension_callback); +#endif /* NGHTTP2_NO_SSIZE_T */ + +/** + * @function + * + * Sets callback function invoked when the library asks the + * application to pack extension frame payload in wire format. + */ +NGHTTP2_EXTERN void nghttp2_session_callbacks_set_pack_extension_callback2( + nghttp2_session_callbacks *cbs, + nghttp2_pack_extension_callback2 pack_extension_callback); + /** * @function * @@ -2366,12 +2795,15 @@ nghttp2_session_callbacks_set_on_extension_chunk_recv_callback( /** * @function * + * .. warning:: + * + * Deprecated. Use + * `nghttp2_session_callbacks_set_error_callback2()` with + * :type:`nghttp2_error_callback2` instead. + * * Sets callback function invoked when library tells error message to * the application. * - * This function is deprecated. The new application should use - * `nghttp2_session_callbacks_set_error_callback2()`. - * * If both :type:`nghttp2_error_callback` and * :type:`nghttp2_error_callback2` are set, the latter takes * precedence. @@ -2568,7 +3000,7 @@ nghttp2_option_set_peer_max_concurrent_streams(nghttp2_option *option, * * If this option is not used or used with zero value, if MAGIC does * not match :macro:`NGHTTP2_CLIENT_MAGIC`, `nghttp2_session_recv()` - * and `nghttp2_session_mem_recv()` will return error + * and `nghttp2_session_mem_recv2()` will return error * :enum:`nghttp2_error.NGHTTP2_ERR_BAD_CLIENT_MAGIC`, which is fatal * error. */ @@ -2781,7 +3213,7 @@ nghttp2_option_set_stream_reset_rate_limit(nghttp2_option *option, * does not store |callbacks|. The |user_data| is an arbitrary user * supplied data, which will be passed to the callback functions. * - * The :type:`nghttp2_send_callback` must be specified. If the + * The :type:`nghttp2_send_callback2` must be specified. If the * application code uses `nghttp2_session_recv()`, the * :type:`nghttp2_recv_callback` must be specified. The other members * of |callbacks| can be ``NULL``. @@ -2807,7 +3239,7 @@ nghttp2_session_client_new(nghttp2_session **session_ptr, * does not store |callbacks|. The |user_data| is an arbitrary user * supplied data, which will be passed to the callback functions. * - * The :type:`nghttp2_send_callback` must be specified. If the + * The :type:`nghttp2_send_callback2` must be specified. If the * application code uses `nghttp2_session_recv()`, the * :type:`nghttp2_recv_callback` must be specified. The other members * of |callbacks| can be ``NULL``. @@ -2943,7 +3375,7 @@ NGHTTP2_EXTERN void nghttp2_session_del(nghttp2_session *session); * This function retrieves the highest prioritized frame from the * outbound queue and sends it to the remote peer. It does this as * many times as possible until the user callback - * :type:`nghttp2_send_callback` returns + * :type:`nghttp2_send_callback2` returns * :enum:`nghttp2_error.NGHTTP2_ERR_WOULDBLOCK`, the outbound queue * becomes empty or flow control is triggered (remote window size * becomes depleted or maximum number of concurrent streams is @@ -2973,7 +3405,7 @@ NGHTTP2_EXTERN void nghttp2_session_del(nghttp2_session *session); * :type:`nghttp2_on_frame_not_send_callback` is invoked. Abort * the following steps. * - * 8. :type:`nghttp2_send_callback` is invoked one or more times to + * 8. :type:`nghttp2_send_callback2` is invoked one or more times to * send the frame. * * 9. :type:`nghttp2_on_frame_send_callback` is invoked. @@ -2992,9 +3424,14 @@ NGHTTP2_EXTERN void nghttp2_session_del(nghttp2_session *session); */ NGHTTP2_EXTERN int nghttp2_session_send(nghttp2_session *session); +#ifndef NGHTTP2_NO_SSIZE_T /** * @function * + * .. warning:: + * + * Deprecated. Use `nghttp2_session_mem_send2()` instead. + * * Returns the serialized data to send. * * This function behaves like `nghttp2_session_send()` except that it @@ -3034,6 +3471,50 @@ NGHTTP2_EXTERN int nghttp2_session_send(nghttp2_session *session); NGHTTP2_EXTERN ssize_t nghttp2_session_mem_send(nghttp2_session *session, const uint8_t **data_ptr); +#endif /* NGHTTP2_NO_SSIZE_T */ + +/** + * @function + * + * Returns the serialized data to send. + * + * This function behaves like `nghttp2_session_send()` except that it + * does not use :type:`nghttp2_send_callback2` to transmit data. + * Instead, it assigns the pointer to the serialized data to the + * |*data_ptr| and returns its length. The other callbacks are called + * in the same way as they are in `nghttp2_session_send()`. + * + * If no data is available to send, this function returns 0. + * + * This function may not return all serialized data in one invocation. + * To get all data, call this function repeatedly until it returns 0 + * or one of negative error codes. + * + * The assigned |*data_ptr| is valid until the next call of + * `nghttp2_session_mem_send2()` or `nghttp2_session_send()`. + * + * The caller must send all data before sending the next chunk of + * data. + * + * This function returns the length of the data pointed by the + * |*data_ptr| if it succeeds, or one of the following negative error + * codes: + * + * :enum:`nghttp2_error.NGHTTP2_ERR_NOMEM` + * Out of memory. + * + * .. note:: + * + * This function may produce very small byte string. If that is the + * case, and application disables Nagle algorithm (``TCP_NODELAY``), + * then writing this small chunk leads to very small packet, and it + * is very inefficient. An application should be responsible to + * buffer up small chunks of data as necessary to avoid this + * situation. + */ +NGHTTP2_EXTERN nghttp2_ssize +nghttp2_session_mem_send2(nghttp2_session *session, const uint8_t **data_ptr); + /** * @function * @@ -3104,9 +3585,14 @@ NGHTTP2_EXTERN ssize_t nghttp2_session_mem_send(nghttp2_session *session, */ NGHTTP2_EXTERN int nghttp2_session_recv(nghttp2_session *session); +#ifndef NGHTTP2_NO_SSIZE_T /** * @function * + * .. warning:: + * + * Deprecated. Use `nghttp2_session_mem_recv2()` instead. + * * Processes data |in| as an input from the remote endpoint. The * |inlen| indicates the number of bytes to receive in the |in|. * @@ -3145,6 +3631,49 @@ NGHTTP2_EXTERN ssize_t nghttp2_session_mem_recv(nghttp2_session *session, const uint8_t *in, size_t inlen); +#endif /* NGHTTP2_NO_SSIZE_T */ + +/** + * @function + * + * Processes data |in| as an input from the remote endpoint. The + * |inlen| indicates the number of bytes to receive in the |in|. + * + * This function behaves like `nghttp2_session_recv()` except that it + * does not use :type:`nghttp2_recv_callback` to receive data; the + * |in| is the only data for the invocation of this function. If all + * bytes are processed, this function returns. The other callbacks + * are called in the same way as they are in `nghttp2_session_recv()`. + * + * In the current implementation, this function always tries to + * processes |inlen| bytes of input data unless either an error occurs or + * :enum:`nghttp2_error.NGHTTP2_ERR_PAUSE` is returned from + * :type:`nghttp2_on_header_callback` or + * :type:`nghttp2_on_data_chunk_recv_callback`. If + * :enum:`nghttp2_error.NGHTTP2_ERR_PAUSE` is used, the return value + * includes the number of bytes which was used to produce the data or + * frame for the callback. + * + * This function returns the number of processed bytes, or one of the + * following negative error codes: + * + * :enum:`nghttp2_error.NGHTTP2_ERR_NOMEM` + * Out of memory. + * :enum:`nghttp2_error.NGHTTP2_ERR_CALLBACK_FAILURE` + * The callback function failed. + * :enum:`nghttp2_error.NGHTTP2_ERR_BAD_CLIENT_MAGIC` + * Invalid client magic was detected. This error only returns + * when |session| was configured as server and + * `nghttp2_option_set_no_recv_client_magic()` is not used with + * nonzero value. + * :enum:`nghttp2_error.NGHTTP2_ERR_FLOODED` + * Flooding was detected in this HTTP/2 session, and it must be + * closed. This is most likely caused by misbehaviour of peer. + */ +NGHTTP2_EXTERN nghttp2_ssize nghttp2_session_mem_recv2(nghttp2_session *session, + const uint8_t *in, + size_t inlen); + /** * @function * @@ -3190,7 +3719,7 @@ NGHTTP2_EXTERN int nghttp2_session_want_write(nghttp2_session *session); * @function * * Returns stream_user_data for the stream |stream_id|. The - * stream_user_data is provided by `nghttp2_submit_request()`, + * stream_user_data is provided by `nghttp2_submit_request2()`, * `nghttp2_submit_headers()` or * `nghttp2_session_set_stream_user_data()`. Unless it is set using * `nghttp2_session_set_stream_user_data()`, if the stream is @@ -3626,6 +4155,13 @@ NGHTTP2_EXTERN int nghttp2_session_consume_stream(nghttp2_session *session, /** * @function * + * .. warning:: + * + * Deprecated. :rfc:`7540` priorities are deprecated by + * :rfc:`9113`. Consider migrating to :rfc:`9218` extensible + * prioritization scheme. In the future release after the end of + * 2024, this function will always return 0 without doing anything. + * * Changes priority of existing stream denoted by |stream_id|. The * new priority specification is |pri_spec|. * @@ -3665,6 +4201,13 @@ nghttp2_session_change_stream_priority(nghttp2_session *session, /** * @function * + * .. warning:: + * + * Deprecated. :rfc:`7540` priorities are deprecated by + * :rfc:`9113`. Consider migrating to :rfc:`9218` extensible + * prioritization scheme. In the future release after the end of + * 2024, this function will always return 0 without doing anything. + * * Creates idle stream with the given |stream_id|, and priority * |pri_spec|. * @@ -3715,10 +4258,6 @@ nghttp2_session_create_idle_stream(nghttp2_session *session, int32_t stream_id, /** * @function * - * Performs post-process of HTTP Upgrade request. This function can - * be called from both client and server, but the behavior is very - * different in each other. - * * .. warning:: * * This function is deprecated in favor of @@ -3730,6 +4269,10 @@ nghttp2_session_create_idle_stream(nghttp2_session *session, int32_t stream_id, * HEAD is used in request, the length of response body must be 0 * regardless of value included in content-length header field. * + * Performs post-process of HTTP Upgrade request. This function can + * be called from both client and server, but the behavior is very + * different in each other. + * * If called from client side, the |settings_payload| must be the * value sent in ``HTTP2-Settings`` header field and must be decoded * by base64url decoder. The |settings_payloadlen| is the length of @@ -3809,9 +4352,14 @@ NGHTTP2_EXTERN int nghttp2_session_upgrade2(nghttp2_session *session, int head_request, void *stream_user_data); +#ifndef NGHTTP2_NO_SSIZE_T /** * @function * + * .. warning:: + * + * Deprecated. Use `nghttp2_pack_settings_payload2()` instead. + * * Serializes the SETTINGS values |iv| in the |buf|. The size of the * |buf| is specified by |buflen|. The number of entries in the |iv| * array is given by |niv|. The required space in |buf| for the |niv| @@ -3833,6 +4381,32 @@ NGHTTP2_EXTERN int nghttp2_session_upgrade2(nghttp2_session *session, NGHTTP2_EXTERN ssize_t nghttp2_pack_settings_payload( uint8_t *buf, size_t buflen, const nghttp2_settings_entry *iv, size_t niv); +#endif /* NGHTTP2_NO_SSIZE_T */ + +/** + * @function + * + * Serializes the SETTINGS values |iv| in the |buf|. The size of the + * |buf| is specified by |buflen|. The number of entries in the |iv| + * array is given by |niv|. The required space in |buf| for the |niv| + * entries is ``6*niv`` bytes and if the given buffer is too small, an + * error is returned. This function is used mainly for creating a + * SETTINGS payload to be sent with the ``HTTP2-Settings`` header + * field in an HTTP Upgrade request. The data written in |buf| is NOT + * base64url encoded and the application is responsible for encoding. + * + * This function returns the number of bytes written in |buf|, or one + * of the following negative error codes: + * + * :enum:`nghttp2_error.NGHTTP2_ERR_INVALID_ARGUMENT` + * The |iv| contains duplicate settings ID or invalid value. + * + * :enum:`nghttp2_error.NGHTTP2_ERR_INSUFF_BUFSIZE` + * The provided |buflen| size is too small to hold the output. + */ +NGHTTP2_EXTERN nghttp2_ssize nghttp2_pack_settings_payload2( + uint8_t *buf, size_t buflen, const nghttp2_settings_entry *iv, size_t niv); + /** * @function * @@ -3854,6 +4428,12 @@ NGHTTP2_EXTERN const char *nghttp2_http2_strerror(uint32_t error_code); /** * @function * + * .. warning:: + * + * Deprecated. :rfc:`7540` priorities are deprecated by + * :rfc:`9113`. Consider migrating to :rfc:`9218` extensible + * prioritization scheme. + * * Initializes |pri_spec| with the |stream_id| of the stream to depend * on with |weight| and its exclusive flag. If |exclusive| is * nonzero, exclusive flag is set. @@ -3868,6 +4448,12 @@ NGHTTP2_EXTERN void nghttp2_priority_spec_init(nghttp2_priority_spec *pri_spec, /** * @function * + * .. warning:: + * + * Deprecated. :rfc:`7540` priorities are deprecated by + * :rfc:`9113`. Consider migrating to :rfc:`9218` extensible + * prioritization scheme. + * * Initializes |pri_spec| with the default values. The default values * are: stream_id = 0, weight = :macro:`NGHTTP2_DEFAULT_WEIGHT` and * exclusive = 0. @@ -3878,18 +4464,29 @@ nghttp2_priority_spec_default_init(nghttp2_priority_spec *pri_spec); /** * @function * + * .. warning:: + * + * Deprecated. :rfc:`7540` priorities are deprecated by + * :rfc:`9113`. Consider migrating to :rfc:`9218` extensible + * prioritization scheme. + * * Returns nonzero if the |pri_spec| is filled with default values. */ NGHTTP2_EXTERN int nghttp2_priority_spec_check_default(const nghttp2_priority_spec *pri_spec); +#ifndef NGHTTP2_NO_SSIZE_T /** * @function * + * .. warning:: + * + * Deprecated. Use `nghttp2_submit_request2()` instead. + * * Submits HEADERS frame and optionally one or more DATA frames. * - * The |pri_spec| is priority specification of this request. ``NULL`` - * means the default priority (see + * The |pri_spec| is a deprecated priority specification of this + * request. ``NULL`` means the default priority (see * `nghttp2_priority_spec_default_init()`). To specify the priority, * use `nghttp2_priority_spec_init()`. If |pri_spec| is not ``NULL``, * this function will copy its data members. @@ -3970,11 +4567,107 @@ NGHTTP2_EXTERN int32_t nghttp2_submit_request( const nghttp2_nv *nva, size_t nvlen, const nghttp2_data_provider *data_prd, void *stream_user_data); +#endif /* NGHTTP2_NO_SSIZE_T */ + /** * @function * - * Submits response HEADERS frame and optionally one or more DATA - * frames against the stream |stream_id|. + * Submits HEADERS frame and optionally one or more DATA frames. + * + * The |pri_spec| is a deprecated priority specification of this + * request. ``NULL`` means the default priority (see + * `nghttp2_priority_spec_default_init()`). To specify the priority, + * use `nghttp2_priority_spec_init()`. If |pri_spec| is not ``NULL``, + * this function will copy its data members. In the future release + * after the end of 2024, this function will ignore |pri_spec| and + * behave as if ``NULL`` is given. + * + * The ``pri_spec->weight`` must be in [:macro:`NGHTTP2_MIN_WEIGHT`, + * :macro:`NGHTTP2_MAX_WEIGHT`], inclusive. If ``pri_spec->weight`` + * is strictly less than :macro:`NGHTTP2_MIN_WEIGHT`, it becomes + * :macro:`NGHTTP2_MIN_WEIGHT`. If it is strictly greater than + * :macro:`NGHTTP2_MAX_WEIGHT`, it becomes + * :macro:`NGHTTP2_MAX_WEIGHT`. + * + * If + * :enum:`nghttp2_settings_id.NGHTTP2_SETTINGS_NO_RFC7540_PRIORITIES` + * of value of 1 is received by a remote endpoint, |pri_spec| is + * ignored, and treated as if ``NULL`` is specified. + * + * The |nva| is an array of name/value pair :type:`nghttp2_nv` with + * |nvlen| elements. The application is responsible to include + * required pseudo-header fields (header field whose name starts with + * ":") in |nva| and must place pseudo-headers before regular header + * fields. + * + * This function creates copies of all name/value pairs in |nva|. It + * also lower-cases all names in |nva|. The order of elements in + * |nva| is preserved. For header fields with + * :enum:`nghttp2_nv_flag.NGHTTP2_NV_FLAG_NO_COPY_NAME` and + * :enum:`nghttp2_nv_flag.NGHTTP2_NV_FLAG_NO_COPY_VALUE` are set, + * header field name and value are not copied respectively. With + * :enum:`nghttp2_nv_flag.NGHTTP2_NV_FLAG_NO_COPY_NAME`, application + * is responsible to pass header field name in lowercase. The + * application should maintain the references to them until + * :type:`nghttp2_on_frame_send_callback` or + * :type:`nghttp2_on_frame_not_send_callback` is called. + * + * HTTP/2 specification has requirement about header fields in the + * request HEADERS. See the specification for more details. + * + * If |data_prd| is not ``NULL``, it provides data which will be sent + * in subsequent DATA frames. In this case, a method that allows + * request message bodies + * (https://tools.ietf.org/html/rfc7231#section-4) must be specified + * with ``:method`` key in |nva| (e.g. ``POST``). This function does + * not take ownership of the |data_prd|. The function copies the + * members of the |data_prd|. If |data_prd| is ``NULL``, HEADERS have + * END_STREAM set. The |stream_user_data| is data associated to the + * stream opened by this request and can be an arbitrary pointer, + * which can be retrieved later by + * `nghttp2_session_get_stream_user_data()`. + * + * This function returns assigned stream ID if it succeeds, or one of + * the following negative error codes: + * + * :enum:`nghttp2_error.NGHTTP2_ERR_NOMEM` + * Out of memory. + * :enum:`nghttp2_error.NGHTTP2_ERR_STREAM_ID_NOT_AVAILABLE` + * No stream ID is available because maximum stream ID was + * reached. + * :enum:`nghttp2_error.NGHTTP2_ERR_INVALID_ARGUMENT` + * Trying to depend on itself (new stream ID equals + * ``pri_spec->stream_id``). + * :enum:`nghttp2_error.NGHTTP2_ERR_PROTO` + * The |session| is server session. + * + * .. warning:: + * + * This function returns assigned stream ID if it succeeds. But + * that stream is not created yet. The application must not submit + * frame to that stream ID before + * :type:`nghttp2_before_frame_send_callback` is called for this + * frame. This means `nghttp2_session_get_stream_user_data()` does + * not work before the callback. But + * `nghttp2_session_set_stream_user_data()` handles this situation + * specially, and it can set data to a stream during this period. + * + */ +NGHTTP2_EXTERN int32_t nghttp2_submit_request2( + nghttp2_session *session, const nghttp2_priority_spec *pri_spec, + const nghttp2_nv *nva, size_t nvlen, const nghttp2_data_provider2 *data_prd, + void *stream_user_data); + +#ifndef NGHTTP2_NO_SSIZE_T +/** + * @function + * + * .. warning:: + * + * Deprecated. Use `nghttp2_submit_response2()` instead. + * + * Submits response HEADERS frame and optionally one or more DATA + * frames against the stream |stream_id|. * * The |nva| is an array of name/value pair :type:`nghttp2_nv` with * |nvlen| elements. The application is responsible to include @@ -4039,6 +4732,77 @@ nghttp2_submit_response(nghttp2_session *session, int32_t stream_id, const nghttp2_nv *nva, size_t nvlen, const nghttp2_data_provider *data_prd); +#endif /* NGHTTP2_NO_SSIZE_T */ + +/** + * @function + * + * Submits response HEADERS frame and optionally one or more DATA + * frames against the stream |stream_id|. + * + * The |nva| is an array of name/value pair :type:`nghttp2_nv` with + * |nvlen| elements. The application is responsible to include + * required pseudo-header fields (header field whose name starts with + * ":") in |nva| and must place pseudo-headers before regular header + * fields. + * + * This function creates copies of all name/value pairs in |nva|. It + * also lower-cases all names in |nva|. The order of elements in + * |nva| is preserved. For header fields with + * :enum:`nghttp2_nv_flag.NGHTTP2_NV_FLAG_NO_COPY_NAME` and + * :enum:`nghttp2_nv_flag.NGHTTP2_NV_FLAG_NO_COPY_VALUE` are set, + * header field name and value are not copied respectively. With + * :enum:`nghttp2_nv_flag.NGHTTP2_NV_FLAG_NO_COPY_NAME`, application + * is responsible to pass header field name in lowercase. The + * application should maintain the references to them until + * :type:`nghttp2_on_frame_send_callback` or + * :type:`nghttp2_on_frame_not_send_callback` is called. + * + * HTTP/2 specification has requirement about header fields in the + * response HEADERS. See the specification for more details. + * + * If |data_prd| is not ``NULL``, it provides data which will be sent + * in subsequent DATA frames. This function does not take ownership + * of the |data_prd|. The function copies the members of the + * |data_prd|. If |data_prd| is ``NULL``, HEADERS will have + * END_STREAM flag set. + * + * This method can be used as normal HTTP response and push response. + * When pushing a resource using this function, the |session| must be + * configured using `nghttp2_session_server_new()` or its variants and + * the target stream denoted by the |stream_id| must be reserved using + * `nghttp2_submit_push_promise()`. + * + * To send non-final response headers (e.g., HTTP status 101), don't + * use this function because this function half-closes the outbound + * stream. Instead, use `nghttp2_submit_headers()` for this purpose. + * + * This function returns 0 if it succeeds, or one of the following + * negative error codes: + * + * :enum:`nghttp2_error.NGHTTP2_ERR_NOMEM` + * Out of memory. + * :enum:`nghttp2_error.NGHTTP2_ERR_INVALID_ARGUMENT` + * The |stream_id| is 0. + * :enum:`nghttp2_error.NGHTTP2_ERR_DATA_EXIST` + * DATA or HEADERS has been already submitted and not fully + * processed yet. Normally, this does not happen, but when + * application wrongly calls `nghttp2_submit_response2()` twice, + * this may happen. + * :enum:`nghttp2_error.NGHTTP2_ERR_PROTO` + * The |session| is client session. + * + * .. warning:: + * + * Calling this function twice for the same stream ID may lead to + * program crash. It is generally considered to a programming error + * to commit response twice. + */ +NGHTTP2_EXTERN int +nghttp2_submit_response2(nghttp2_session *session, int32_t stream_id, + const nghttp2_nv *nva, size_t nvlen, + const nghttp2_data_provider2 *data_prd); + /** * @function * @@ -4064,22 +4828,23 @@ nghttp2_submit_response(nghttp2_session *session, int32_t stream_id, * DATA without END_STREAM flat set. The library does not enforce * this requirement, and applications should do this for themselves. * If `nghttp2_submit_trailer()` is called before any response HEADERS - * submission (usually by `nghttp2_submit_response()`), the content of - * |nva| will be sent as response headers, which will result in error. + * submission (usually by `nghttp2_submit_response2()`), the content + * of |nva| will be sent as response headers, which will result in + * error. * * This function has the same effect with `nghttp2_submit_headers()`, * with flags = :enum:`nghttp2_flag.NGHTTP2_FLAG_END_STREAM` and both * pri_spec and stream_user_data to NULL. * - * To submit trailer fields after `nghttp2_submit_response()` is + * To submit trailer fields after `nghttp2_submit_response2()` is * called, the application has to specify - * :type:`nghttp2_data_provider` to `nghttp2_submit_response()`. - * Inside of :type:`nghttp2_data_source_read_callback`, when setting + * :type:`nghttp2_data_provider2` to `nghttp2_submit_response2()`. + * Inside of :type:`nghttp2_data_source_read_callback2`, when setting * :enum:`nghttp2_data_flag.NGHTTP2_DATA_FLAG_EOF`, also set * :enum:`nghttp2_data_flag.NGHTTP2_DATA_FLAG_NO_END_STREAM`. After * that, the application can send trailer fields using * `nghttp2_submit_trailer()`. `nghttp2_submit_trailer()` can be used - * inside :type:`nghttp2_data_source_read_callback`. + * inside :type:`nghttp2_data_source_read_callback2`. * * This function returns 0 if it succeeds and |stream_id| is -1. * Otherwise, this function returns 0 if it succeeds, or one of the @@ -4114,11 +4879,13 @@ NGHTTP2_EXTERN int nghttp2_submit_trailer(nghttp2_session *session, * assigned stream ID will be returned. Otherwise, specify stream ID * in |stream_id|. * - * The |pri_spec| is priority specification of this request. ``NULL`` - * means the default priority (see + * The |pri_spec| is a deprecated priority specification of this + * request. ``NULL`` means the default priority (see * `nghttp2_priority_spec_default_init()`). To specify the priority, * use `nghttp2_priority_spec_init()`. If |pri_spec| is not ``NULL``, - * this function will copy its data members. + * this function will copy its data members. In the future release + * after the end of 2024, this function will ignore |pri_spec| and + * behave as if ``NULL`` is given. * * The ``pri_spec->weight`` must be in [:macro:`NGHTTP2_MIN_WEIGHT`, * :macro:`NGHTTP2_MAX_WEIGHT`], inclusive. If ``pri_spec->weight`` @@ -4156,8 +4923,8 @@ NGHTTP2_EXTERN int nghttp2_submit_trailer(nghttp2_session *session, * * This function is low-level in a sense that the application code can * specify flags directly. For usual HTTP request, - * `nghttp2_submit_request()` is useful. Likewise, for HTTP response, - * prefer `nghttp2_submit_response()`. + * `nghttp2_submit_request2()` is useful. Likewise, for HTTP + * response, prefer `nghttp2_submit_response2()`. * * This function returns newly assigned stream ID if it succeeds and * |stream_id| is -1. Otherwise, this function returns 0 if it @@ -4192,9 +4959,14 @@ NGHTTP2_EXTERN int32_t nghttp2_submit_headers( const nghttp2_priority_spec *pri_spec, const nghttp2_nv *nva, size_t nvlen, void *stream_user_data); +#ifndef NGHTTP2_NO_SSIZE_T /** * @function * + * .. warning:: + * + * Deprecated. Use `nghttp2_submit_data2()` instead. + * * Submits one or more DATA frames to the stream |stream_id|. The * data to be sent are provided by |data_prd|. If |flags| contains * :enum:`nghttp2_flag.NGHTTP2_FLAG_END_STREAM`, the last DATA frame @@ -4237,19 +5009,73 @@ NGHTTP2_EXTERN int nghttp2_submit_data(nghttp2_session *session, uint8_t flags, int32_t stream_id, const nghttp2_data_provider *data_prd); +#endif /* NGHTTP2_NO_SSIZE_T */ + +/** + * @function + * + * Submits one or more DATA frames to the stream |stream_id|. The + * data to be sent are provided by |data_prd|. If |flags| contains + * :enum:`nghttp2_flag.NGHTTP2_FLAG_END_STREAM`, the last DATA frame + * has END_STREAM flag set. + * + * This function does not take ownership of the |data_prd|. The + * function copies the members of the |data_prd|. + * + * This function returns 0 if it succeeds, or one of the following + * negative error codes: + * + * :enum:`nghttp2_error.NGHTTP2_ERR_NOMEM` + * Out of memory. + * :enum:`nghttp2_error.NGHTTP2_ERR_DATA_EXIST` + * DATA or HEADERS has been already submitted and not fully + * processed yet. + * :enum:`nghttp2_error.NGHTTP2_ERR_INVALID_ARGUMENT` + * The |stream_id| is 0. + * :enum:`nghttp2_error.NGHTTP2_ERR_STREAM_CLOSED` + * The stream was already closed; or the |stream_id| is invalid. + * + * .. note:: + * + * Currently, only one DATA or HEADERS is allowed for a stream at a + * time. Submitting these frames more than once before first DATA + * or HEADERS is finished results in + * :enum:`nghttp2_error.NGHTTP2_ERR_DATA_EXIST` error code. The + * earliest callback which tells that previous frame is done is + * :type:`nghttp2_on_frame_send_callback`. In side that callback, + * new data can be submitted using `nghttp2_submit_data2()`. Of + * course, all data except for last one must not have + * :enum:`nghttp2_flag.NGHTTP2_FLAG_END_STREAM` flag set in |flags|. + * This sounds a bit complicated, and we recommend to use + * `nghttp2_submit_request2()` and `nghttp2_submit_response2()` to + * avoid this cascading issue. The experience shows that for HTTP + * use, these two functions are enough to implement both client and + * server. + */ +NGHTTP2_EXTERN int nghttp2_submit_data2(nghttp2_session *session, uint8_t flags, + int32_t stream_id, + const nghttp2_data_provider2 *data_prd); + /** * @function * + * .. warning:: + * + * Deprecated. :rfc:`7540` priorities are deprecated by + * :rfc:`9113`. Consider migrating to :rfc:`9218` extensible + * prioritization scheme. In the future release after the end of + * 2024, this function will always return 0 without doing anything. + * * Submits PRIORITY frame to change the priority of stream |stream_id| * to the priority specification |pri_spec|. * * The |flags| is currently ignored and should be * :enum:`nghttp2_flag.NGHTTP2_FLAG_NONE`. * - * The |pri_spec| is priority specification of this request. ``NULL`` - * is not allowed for this function. To specify the priority, use - * `nghttp2_priority_spec_init()`. This function will copy its data - * members. + * The |pri_spec| is a deprecated priority specification of this + * request. ``NULL`` is not allowed for this function. To specify the + * priority, use `nghttp2_priority_spec_init()`. This function will + * copy its data members. * * The ``pri_spec->weight`` must be in [:macro:`NGHTTP2_MIN_WEIGHT`, * :macro:`NGHTTP2_MAX_WEIGHT`], inclusive. If ``pri_spec->weight`` @@ -4429,7 +5255,7 @@ NGHTTP2_EXTERN int nghttp2_submit_settings(nghttp2_session *session, * The client side is not allowed to use this function. * * To submit response headers and data, use - * `nghttp2_submit_response()`. + * `nghttp2_submit_response2()`. * * This function returns assigned promised stream ID if it succeeds, * or one of the following negative error codes: @@ -4568,10 +5394,11 @@ nghttp2_session_get_last_proc_stream_id(nghttp2_session *session); * reasons are: session is server; stream ID has been spent; GOAWAY * has been sent or received. * - * The application can call `nghttp2_submit_request()` without - * consulting this function. In that case, `nghttp2_submit_request()` - * may return error. Or, request is failed to sent, and - * :type:`nghttp2_on_stream_close_callback` is called. + * The application can call `nghttp2_submit_request2()` without + * consulting this function. In that case, + * `nghttp2_submit_request2()` may return error. Or, request is + * failed to sent, and :type:`nghttp2_on_stream_close_callback` is + * called. */ NGHTTP2_EXTERN int nghttp2_session_check_request_allowed(nghttp2_session *session); @@ -4673,11 +5500,11 @@ nghttp2_session_set_local_window_size(nghttp2_session *session, uint8_t flags, * Application can pass arbitrary frame flags and stream ID in |flags| * and |stream_id| respectively. The |payload| is opaque pointer, and * it can be accessible though ``frame->ext.payload`` in - * :type:`nghttp2_pack_extension_callback`. The library will not own + * :type:`nghttp2_pack_extension_callback2`. The library will not own * passed |payload| pointer. * - * The application must set :type:`nghttp2_pack_extension_callback` - * using `nghttp2_session_callbacks_set_pack_extension_callback()`. + * The application must set :type:`nghttp2_pack_extension_callback2` + * using `nghttp2_session_callbacks_set_pack_extension_callback2()`. * * The application should retain the memory pointed by |payload| until * the transmission of extension frame is done (which is indicated by @@ -4685,7 +5512,7 @@ nghttp2_session_set_local_window_size(nghttp2_session *session, uint8_t flags, * (which is indicated by :type:`nghttp2_on_frame_not_send_callback`). * If application does not touch this memory region after packing it * into a wire format, application can free it inside - * :type:`nghttp2_pack_extension_callback`. + * :type:`nghttp2_pack_extension_callback2`. * * The standard HTTP/2 frame cannot be sent with this function, so * |type| must be strictly grater than 0x9. Otherwise, this function @@ -4696,7 +5523,7 @@ nghttp2_session_set_local_window_size(nghttp2_session *session, uint8_t flags, * negative error codes: * * :enum:`nghttp2_error.NGHTTP2_ERR_INVALID_STATE` - * If :type:`nghttp2_pack_extension_callback` is not set. + * If :type:`nghttp2_pack_extension_callback2` is not set. * :enum:`nghttp2_error.NGHTTP2_ERR_INVALID_ARGUMENT` * If |type| specifies standard HTTP/2 frame type. The frame * types in the rage [0x0, 0x9], both inclusive, are standard @@ -5317,9 +6144,14 @@ NGHTTP2_EXTERN int nghttp2_hd_deflate_change_table_size(nghttp2_hd_deflater *deflater, size_t settings_max_dynamic_table_size); +#ifndef NGHTTP2_NO_SSIZE_T /** * @function * + * .. warning:: + * + * Deprecated. Use `nghttp2_hd_deflate_hd2()` instead. + * * Deflates the |nva|, which has the |nvlen| name/value pairs, into * the |buf| of length |buflen|. * @@ -5349,10 +6181,48 @@ NGHTTP2_EXTERN ssize_t nghttp2_hd_deflate_hd(nghttp2_hd_deflater *deflater, const nghttp2_nv *nva, size_t nvlen); +#endif /* NGHTTP2_NO_SSIZE_T */ + /** * @function * * Deflates the |nva|, which has the |nvlen| name/value pairs, into + * the |buf| of length |buflen|. + * + * If |buf| is not large enough to store the deflated header block, + * this function fails with + * :enum:`nghttp2_error.NGHTTP2_ERR_INSUFF_BUFSIZE`. The caller + * should use `nghttp2_hd_deflate_bound()` to know the upper bound of + * buffer size required to deflate given header name/value pairs. + * + * Once this function fails, subsequent call of this function always + * returns :enum:`nghttp2_error.NGHTTP2_ERR_HEADER_COMP`. + * + * After this function returns, it is safe to delete the |nva|. + * + * This function returns the number of bytes written to |buf| if it + * succeeds, or one of the following negative error codes: + * + * :enum:`nghttp2_error.NGHTTP2_ERR_NOMEM` + * Out of memory. + * :enum:`nghttp2_error.NGHTTP2_ERR_HEADER_COMP` + * Deflation process has failed. + * :enum:`nghttp2_error.NGHTTP2_ERR_INSUFF_BUFSIZE` + * The provided |buflen| size is too small to hold the output. + */ +NGHTTP2_EXTERN nghttp2_ssize +nghttp2_hd_deflate_hd2(nghttp2_hd_deflater *deflater, uint8_t *buf, + size_t buflen, const nghttp2_nv *nva, size_t nvlen); + +#ifndef NGHTTP2_NO_SSIZE_T +/** + * @function + * + * .. warning:: + * + * Deprecated. Use `nghttp2_hd_deflate_hd_vec2()` instead. + * + * Deflates the |nva|, which has the |nvlen| name/value pairs, into * the |veclen| size of buf vector |vec|. The each size of buffer * must be set in len field of :type:`nghttp2_vec`. If and only if * one chunk is filled up completely, next chunk will be used. If @@ -5383,6 +6253,40 @@ NGHTTP2_EXTERN ssize_t nghttp2_hd_deflate_hd_vec(nghttp2_hd_deflater *deflater, const nghttp2_nv *nva, size_t nvlen); +#endif /* NGHTTP2_NO_SSIZE_T */ + +/** + * @function + * + * Deflates the |nva|, which has the |nvlen| name/value pairs, into + * the |veclen| size of buf vector |vec|. The each size of buffer + * must be set in len field of :type:`nghttp2_vec`. If and only if + * one chunk is filled up completely, next chunk will be used. If + * |vec| is not large enough to store the deflated header block, this + * function fails with + * :enum:`nghttp2_error.NGHTTP2_ERR_INSUFF_BUFSIZE`. The caller + * should use `nghttp2_hd_deflate_bound()` to know the upper bound of + * buffer size required to deflate given header name/value pairs. + * + * Once this function fails, subsequent call of this function always + * returns :enum:`nghttp2_error.NGHTTP2_ERR_HEADER_COMP`. + * + * After this function returns, it is safe to delete the |nva|. + * + * This function returns the number of bytes written to |vec| if it + * succeeds, or one of the following negative error codes: + * + * :enum:`nghttp2_error.NGHTTP2_ERR_NOMEM` + * Out of memory. + * :enum:`nghttp2_error.NGHTTP2_ERR_HEADER_COMP` + * Deflation process has failed. + * :enum:`nghttp2_error.NGHTTP2_ERR_INSUFF_BUFSIZE` + * The provided |buflen| size is too small to hold the output. + */ +NGHTTP2_EXTERN nghttp2_ssize nghttp2_hd_deflate_hd_vec2( + nghttp2_hd_deflater *deflater, const nghttp2_vec *vec, size_t veclen, + const nghttp2_nv *nva, size_t nvlen); + /** * @function * @@ -5496,7 +6400,7 @@ NGHTTP2_EXTERN void nghttp2_hd_inflate_del(nghttp2_hd_inflater *inflater); * This function must not be called while header block is being * inflated. In other words, this function must be called after * initialization of |inflater|, but before calling - * `nghttp2_hd_inflate_hd2()`, or after + * `nghttp2_hd_inflate_hd3()`, or after * `nghttp2_hd_inflate_end_headers()`. Otherwise, * `NGHTTP2_ERR_INVALID_STATE` was returned. * @@ -5534,6 +6438,7 @@ typedef enum { NGHTTP2_HD_INFLATE_EMIT = 0x02 } nghttp2_hd_inflate_flag; +#ifndef NGHTTP2_NO_SSIZE_T /** * @function * @@ -5621,9 +6526,16 @@ NGHTTP2_EXTERN ssize_t nghttp2_hd_inflate_hd(nghttp2_hd_inflater *inflater, int *inflate_flags, uint8_t *in, size_t inlen, int in_final); +#endif /* NGHTTP2_NO_SSIZE_T */ + +#ifndef NGHTTP2_NO_SSIZE_T /** * @function * + * .. warning:: + * + * Deprecated. Use `nghttp2_hd_inflate_hd3()` instead. + * * Inflates name/value block stored in |in| with length |inlen|. This * function performs decompression. For each successful emission of * header name/value pair, @@ -5710,6 +6622,95 @@ NGHTTP2_EXTERN ssize_t nghttp2_hd_inflate_hd2(nghttp2_hd_inflater *inflater, const uint8_t *in, size_t inlen, int in_final); +#endif /* NGHTTP2_NO_SSIZE_T */ + +/** + * @function + * + * Inflates name/value block stored in |in| with length |inlen|. This + * function performs decompression. For each successful emission of + * header name/value pair, + * :enum:`nghttp2_hd_inflate_flag.NGHTTP2_HD_INFLATE_EMIT` is set in + * |*inflate_flags| and name/value pair is assigned to the |nv_out| + * and the function returns. The caller must not free the members of + * |nv_out|. + * + * The |nv_out| may include pointers to the memory region in the |in|. + * The caller must retain the |in| while the |nv_out| is used. + * + * The application should call this function repeatedly until the + * ``(*inflate_flags) & NGHTTP2_HD_INFLATE_FINAL`` is nonzero and + * return value is non-negative. If that happens, all given input + * data (|inlen| bytes) are processed successfully. Then the + * application must call `nghttp2_hd_inflate_end_headers()` to prepare + * for the next header block input. + * + * In other words, if |in_final| is nonzero, and this function returns + * |inlen|, you can assert that + * :enum:`nghttp2_hd_inflate_final.NGHTTP2_HD_INFLATE_FINAL` is set in + * |*inflate_flags|. + * + * The caller can feed complete compressed header block. It also can + * feed it in several chunks. The caller must set |in_final| to + * nonzero if the given input is the last block of the compressed + * header. + * + * This function returns the number of bytes processed if it succeeds, + * or one of the following negative error codes: + * + * :enum:`nghttp2_error.NGHTTP2_ERR_NOMEM` + * Out of memory. + * :enum:`nghttp2_error.NGHTTP2_ERR_HEADER_COMP` + * Inflation process has failed. + * :enum:`nghttp2_error.NGHTTP2_ERR_BUFFER_ERROR` + * The header field name or value is too large. + * + * Example follows:: + * + * int inflate_header_block(nghttp2_hd_inflater *hd_inflater, + * uint8_t *in, size_t inlen, int final) + * { + * nghttp2_ssize rv; + * + * for(;;) { + * nghttp2_nv nv; + * int inflate_flags = 0; + * + * rv = nghttp2_hd_inflate_hd3(hd_inflater, &nv, &inflate_flags, + * in, inlen, final); + * + * if(rv < 0) { + * fprintf(stderr, "inflate failed with error code %td", rv); + * return -1; + * } + * + * in += rv; + * inlen -= rv; + * + * if(inflate_flags & NGHTTP2_HD_INFLATE_EMIT) { + * fwrite(nv.name, nv.namelen, 1, stderr); + * fprintf(stderr, ": "); + * fwrite(nv.value, nv.valuelen, 1, stderr); + * fprintf(stderr, "\n"); + * } + * if(inflate_flags & NGHTTP2_HD_INFLATE_FINAL) { + * nghttp2_hd_inflate_end_headers(hd_inflater); + * break; + * } + * if((inflate_flags & NGHTTP2_HD_INFLATE_EMIT) == 0 && + * inlen == 0) { + * break; + * } + * } + * + * return 0; + * } + * + */ +NGHTTP2_EXTERN nghttp2_ssize nghttp2_hd_inflate_hd3( + nghttp2_hd_inflater *inflater, nghttp2_nv *nv_out, int *inflate_flags, + const uint8_t *in, size_t inlen, int in_final); + /** * @function * @@ -5783,8 +6784,8 @@ typedef struct nghttp2_stream nghttp2_stream; * `nghttp2_session_get_root_stream()`) if 0 is given in |stream_id|. * * Unless |stream_id| == 0, the returned pointer is valid until next - * call of `nghttp2_session_send()`, `nghttp2_session_mem_send()`, - * `nghttp2_session_recv()`, and `nghttp2_session_mem_recv()`. + * call of `nghttp2_session_send()`, `nghttp2_session_mem_send2()`, + * `nghttp2_session_recv()`, and `nghttp2_session_mem_recv2()`. */ NGHTTP2_EXTERN nghttp2_stream * nghttp2_session_find_stream(nghttp2_session *session, int32_t stream_id); @@ -5838,6 +6839,12 @@ nghttp2_stream_get_state(nghttp2_stream *stream); /** * @function * + * .. warning:: + * + * Deprecated. :rfc:`7540` priorities are deprecated by + * :rfc:`9113`. Consider migrating to :rfc:`9218` extensible + * prioritization scheme. + * * Returns root of dependency tree, which is imaginary stream with * stream ID 0. The returned pointer is valid until |session| is * freed by `nghttp2_session_del()`. @@ -5848,6 +6855,13 @@ nghttp2_session_get_root_stream(nghttp2_session *session); /** * @function * + * .. warning:: + * + * Deprecated. :rfc:`7540` priorities are deprecated by + * :rfc:`9113`. Consider migrating to :rfc:`9218` extensible + * prioritization scheme. In the future release after the end of + * 2024, this function will always return NULL. + * * Returns the parent stream of |stream| in dependency tree. Returns * NULL if there is no such stream. */ @@ -5859,6 +6873,13 @@ NGHTTP2_EXTERN int32_t nghttp2_stream_get_stream_id(nghttp2_stream *stream); /** * @function * + * .. warning:: + * + * Deprecated. :rfc:`7540` priorities are deprecated by + * :rfc:`9113`. Consider migrating to :rfc:`9218` extensible + * prioritization scheme. In the future release after the end of + * 2024, this function will always return NULL. + * * Returns the next sibling stream of |stream| in dependency tree. * Returns NULL if there is no such stream. */ @@ -5868,6 +6889,13 @@ nghttp2_stream_get_next_sibling(nghttp2_stream *stream); /** * @function * + * .. warning:: + * + * Deprecated. :rfc:`7540` priorities are deprecated by + * :rfc:`9113`. Consider migrating to :rfc:`9218` extensible + * prioritization scheme. In the future release after the end of + * 2024, this function will always return NULL. + * * Returns the previous sibling stream of |stream| in dependency tree. * Returns NULL if there is no such stream. */ @@ -5877,6 +6905,13 @@ nghttp2_stream_get_previous_sibling(nghttp2_stream *stream); /** * @function * + * .. warning:: + * + * Deprecated. :rfc:`7540` priorities are deprecated by + * :rfc:`9113`. Consider migrating to :rfc:`9218` extensible + * prioritization scheme. In the future release after the end of + * 2024, this function will always return NULL. + * * Returns the first child stream of |stream| in dependency tree. * Returns NULL if there is no such stream. */ @@ -5886,6 +6921,14 @@ nghttp2_stream_get_first_child(nghttp2_stream *stream); /** * @function * + * .. warning:: + * + * Deprecated. :rfc:`7540` priorities are deprecated by + * :rfc:`9113`. Consider migrating to :rfc:`9218` extensible + * prioritization scheme. In the future release after the end of + * 2024, this function will always return + * :macro:`NGHTTP2_DEFAULT_WEIGHT`. + * * Returns dependency weight to the parent stream of |stream|. */ NGHTTP2_EXTERN int32_t nghttp2_stream_get_weight(nghttp2_stream *stream); @@ -5893,6 +6936,13 @@ NGHTTP2_EXTERN int32_t nghttp2_stream_get_weight(nghttp2_stream *stream); /** * @function * + * .. warning:: + * + * Deprecated. :rfc:`7540` priorities are deprecated by + * :rfc:`9113`. Consider migrating to :rfc:`9218` extensible + * prioritization scheme. In the future release after the end of + * 2024, this function will always return 0. + * * Returns the sum of the weight for |stream|'s children. */ NGHTTP2_EXTERN int32_t diff --git a/deps/nghttp2/lib/includes/nghttp2/nghttp2ver.h b/deps/nghttp2/lib/includes/nghttp2/nghttp2ver.h index 518755bbab7914..d38b89adc6268a 100644 --- a/deps/nghttp2/lib/includes/nghttp2/nghttp2ver.h +++ b/deps/nghttp2/lib/includes/nghttp2/nghttp2ver.h @@ -29,7 +29,7 @@ * @macro * Version number of the nghttp2 library release */ -#define NGHTTP2_VERSION "1.59.0" +#define NGHTTP2_VERSION "1.60.0" /** * @macro @@ -37,6 +37,6 @@ * release. This is a 24 bit number with 8 bits for major number, 8 bits * for minor and 8 bits for patch. Version 1.2.3 becomes 0x010203. */ -#define NGHTTP2_VERSION_NUM 0x013b00 +#define NGHTTP2_VERSION_NUM 0x013c00 #endif /* NGHTTP2VER_H */ diff --git a/deps/nghttp2/lib/nghttp2_buf.c b/deps/nghttp2/lib/nghttp2_buf.c index a32844712e4493..101035f923ec64 100644 --- a/deps/nghttp2/lib/nghttp2_buf.c +++ b/deps/nghttp2/lib/nghttp2_buf.c @@ -430,7 +430,7 @@ int nghttp2_bufs_orb_hold(nghttp2_bufs *bufs, uint8_t b) { return 0; } -ssize_t nghttp2_bufs_remove(nghttp2_bufs *bufs, uint8_t **out) { +nghttp2_ssize nghttp2_bufs_remove(nghttp2_bufs *bufs, uint8_t **out) { size_t len; nghttp2_buf_chain *chain; nghttp2_buf *buf; @@ -462,7 +462,7 @@ ssize_t nghttp2_bufs_remove(nghttp2_bufs *bufs, uint8_t **out) { *out = res; - return (ssize_t)len; + return (nghttp2_ssize)len; } size_t nghttp2_bufs_remove_copy(nghttp2_bufs *bufs, uint8_t *out) { diff --git a/deps/nghttp2/lib/nghttp2_buf.h b/deps/nghttp2/lib/nghttp2_buf.h index 45f62f16e271dc..95ff3706a232a3 100644 --- a/deps/nghttp2/lib/nghttp2_buf.h +++ b/deps/nghttp2/lib/nghttp2_buf.h @@ -349,7 +349,7 @@ int nghttp2_bufs_orb_hold(nghttp2_bufs *bufs, uint8_t b); * NGHTTP2_ERR_NOMEM * Out of memory */ -ssize_t nghttp2_bufs_remove(nghttp2_bufs *bufs, uint8_t **out); +nghttp2_ssize nghttp2_bufs_remove(nghttp2_bufs *bufs, uint8_t **out); /* * Copies all data stored in |bufs| to |out|. This function assumes diff --git a/deps/nghttp2/lib/nghttp2_callbacks.c b/deps/nghttp2/lib/nghttp2_callbacks.c index 3c38214859b17a..1776f7d276b79f 100644 --- a/deps/nghttp2/lib/nghttp2_callbacks.c +++ b/deps/nghttp2/lib/nghttp2_callbacks.c @@ -45,11 +45,21 @@ void nghttp2_session_callbacks_set_send_callback( cbs->send_callback = send_callback; } +void nghttp2_session_callbacks_set_send_callback2( + nghttp2_session_callbacks *cbs, nghttp2_send_callback2 send_callback) { + cbs->send_callback2 = send_callback; +} + void nghttp2_session_callbacks_set_recv_callback( nghttp2_session_callbacks *cbs, nghttp2_recv_callback recv_callback) { cbs->recv_callback = recv_callback; } +void nghttp2_session_callbacks_set_recv_callback2( + nghttp2_session_callbacks *cbs, nghttp2_recv_callback2 recv_callback) { + cbs->recv_callback2 = recv_callback; +} + void nghttp2_session_callbacks_set_on_frame_recv_callback( nghttp2_session_callbacks *cbs, nghttp2_on_frame_recv_callback on_frame_recv_callback) { @@ -128,12 +138,24 @@ void nghttp2_session_callbacks_set_select_padding_callback( cbs->select_padding_callback = select_padding_callback; } +void nghttp2_session_callbacks_set_select_padding_callback2( + nghttp2_session_callbacks *cbs, + nghttp2_select_padding_callback2 select_padding_callback) { + cbs->select_padding_callback2 = select_padding_callback; +} + void nghttp2_session_callbacks_set_data_source_read_length_callback( nghttp2_session_callbacks *cbs, nghttp2_data_source_read_length_callback data_source_read_length_callback) { cbs->read_length_callback = data_source_read_length_callback; } +void nghttp2_session_callbacks_set_data_source_read_length_callback2( + nghttp2_session_callbacks *cbs, nghttp2_data_source_read_length_callback2 + data_source_read_length_callback) { + cbs->read_length_callback2 = data_source_read_length_callback; +} + void nghttp2_session_callbacks_set_on_begin_frame_callback( nghttp2_session_callbacks *cbs, nghttp2_on_begin_frame_callback on_begin_frame_callback) { @@ -152,6 +174,12 @@ void nghttp2_session_callbacks_set_pack_extension_callback( cbs->pack_extension_callback = pack_extension_callback; } +void nghttp2_session_callbacks_set_pack_extension_callback2( + nghttp2_session_callbacks *cbs, + nghttp2_pack_extension_callback2 pack_extension_callback) { + cbs->pack_extension_callback2 = pack_extension_callback; +} + void nghttp2_session_callbacks_set_unpack_extension_callback( nghttp2_session_callbacks *cbs, nghttp2_unpack_extension_callback unpack_extension_callback) { diff --git a/deps/nghttp2/lib/nghttp2_callbacks.h b/deps/nghttp2/lib/nghttp2_callbacks.h index 61e51fa53638de..a611f485481e7c 100644 --- a/deps/nghttp2/lib/nghttp2_callbacks.h +++ b/deps/nghttp2/lib/nghttp2_callbacks.h @@ -35,20 +35,34 @@ * Callback functions. */ struct nghttp2_session_callbacks { + /** + * Deprecated. Use send_callback2 instead. Callback function + * invoked when the session wants to send data to the remote peer. + * This callback is not necessary if the application uses solely + * `nghttp2_session_mem_send()` to serialize data to transmit. + */ + nghttp2_send_callback send_callback; /** * Callback function invoked when the session wants to send data to * the remote peer. This callback is not necessary if the - * application uses solely `nghttp2_session_mem_send()` to serialize - * data to transmit. + * application uses solely `nghttp2_session_mem_send2()` to + * serialize data to transmit. */ - nghttp2_send_callback send_callback; + nghttp2_send_callback2 send_callback2; + /** + * Deprecated. Use recv_callback2 instead. Callback function + * invoked when the session wants to receive data from the remote + * peer. This callback is not necessary if the application uses + * solely `nghttp2_session_mem_recv()` to process received data. + */ + nghttp2_recv_callback recv_callback; /** * Callback function invoked when the session wants to receive data * from the remote peer. This callback is not necessary if the - * application uses solely `nghttp2_session_mem_recv()` to process + * application uses solely `nghttp2_session_mem_recv2()` to process * received data. */ - nghttp2_recv_callback recv_callback; + nghttp2_recv_callback2 recv_callback2; /** * Callback function invoked by `nghttp2_session_recv()` when a * frame is received. @@ -99,23 +113,40 @@ struct nghttp2_session_callbacks { */ nghttp2_on_invalid_header_callback on_invalid_header_callback; nghttp2_on_invalid_header_callback2 on_invalid_header_callback2; + /** + * Deprecated. Use select_padding_callback2 instead. Callback + * function invoked when the library asks application how many + * padding bytes are required for the transmission of the given + * frame. + */ + nghttp2_select_padding_callback select_padding_callback; /** * Callback function invoked when the library asks application how * many padding bytes are required for the transmission of the given * frame. */ - nghttp2_select_padding_callback select_padding_callback; + nghttp2_select_padding_callback2 select_padding_callback2; /** - * The callback function used to determine the length allowed in + * Deprecated. Use read_length_callback2 instead. The callback + * function used to determine the length allowed in * `nghttp2_data_source_read_callback()` */ nghttp2_data_source_read_length_callback read_length_callback; + /** + * The callback function used to determine the length allowed in + * `nghttp2_data_source_read_callback2()` + */ + nghttp2_data_source_read_length_callback2 read_length_callback2; /** * Sets callback function invoked when a frame header is received. */ nghttp2_on_begin_frame_callback on_begin_frame_callback; nghttp2_send_data_callback send_data_callback; + /** + * Deprecated. Use pack_extension_callback2 instead. + */ nghttp2_pack_extension_callback pack_extension_callback; + nghttp2_pack_extension_callback2 pack_extension_callback2; nghttp2_unpack_extension_callback unpack_extension_callback; nghttp2_on_extension_chunk_recv_callback on_extension_chunk_recv_callback; nghttp2_error_callback error_callback; diff --git a/deps/nghttp2/lib/nghttp2_hd.c b/deps/nghttp2/lib/nghttp2_hd.c index 8a2bda64c1fe46..1b0c71331ae61f 100644 --- a/deps/nghttp2/lib/nghttp2_hd.c +++ b/deps/nghttp2/lib/nghttp2_hd.c @@ -850,9 +850,10 @@ static size_t encode_length(uint8_t *buf, size_t n, size_t prefix) { * in the next call will be stored in |*shift_ptr|) and returns number * of bytes processed, or returns -1, indicating decoding error. */ -static ssize_t decode_length(uint32_t *res, size_t *shift_ptr, int *fin, - uint32_t initial, size_t shift, const uint8_t *in, - const uint8_t *last, size_t prefix) { +static nghttp2_ssize decode_length(uint32_t *res, size_t *shift_ptr, int *fin, + uint32_t initial, size_t shift, + const uint8_t *in, const uint8_t *last, + size_t prefix) { uint32_t k = (uint8_t)((1 << prefix) - 1); uint32_t n = initial; const uint8_t *start = in; @@ -871,7 +872,7 @@ static ssize_t decode_length(uint32_t *res, size_t *shift_ptr, int *fin, if (++in == last) { *res = n; - return (ssize_t)(in - start); + return (nghttp2_ssize)(in - start); } } @@ -906,12 +907,12 @@ static ssize_t decode_length(uint32_t *res, size_t *shift_ptr, int *fin, if (in == last) { *res = n; - return (ssize_t)(in - start); + return (nghttp2_ssize)(in - start); } *res = n; *fin = 1; - return (ssize_t)(in + 1 - start); + return (nghttp2_ssize)(in + 1 - start); } static int emit_table_size(nghttp2_bufs *bufs, size_t table_size) { @@ -1164,7 +1165,7 @@ static int add_hd_table_incremental(nghttp2_hd_context *context, } typedef struct { - ssize_t index; + nghttp2_ssize index; /* Nonzero if both name and value are matched. */ int name_value_match; } search_result; @@ -1213,8 +1214,8 @@ static search_result search_hd_table(nghttp2_hd_context *context, return res; } - res.index = - (ssize_t)(context->next_seq - 1 - ent->seq + NGHTTP2_STATIC_TABLE_LENGTH); + res.index = (nghttp2_ssize)(context->next_seq - 1 - ent->seq + + NGHTTP2_STATIC_TABLE_LENGTH); res.name_value_match = exact_match; return res; @@ -1343,7 +1344,7 @@ static int deflate_nv(nghttp2_hd_deflater *deflater, nghttp2_bufs *bufs, const nghttp2_nv *nv) { int rv; search_result res; - ssize_t idx; + nghttp2_ssize idx; int indexing_mode; int32_t token; nghttp2_mem *mem; @@ -1379,7 +1380,7 @@ static int deflate_nv(nghttp2_hd_deflater *deflater, nghttp2_bufs *bufs, if (res.name_value_match) { - DEBUGF("deflatehd: name/value match index=%zd\n", idx); + DEBUGF("deflatehd: name/value match index=%td\n", idx); rv = emit_indexed_block(bufs, (size_t)idx); if (rv != 0) { @@ -1390,7 +1391,7 @@ static int deflate_nv(nghttp2_hd_deflater *deflater, nghttp2_bufs *bufs, } if (res.index != -1) { - DEBUGF("deflatehd: name match index=%zd\n", res.index); + DEBUGF("deflatehd: name match index=%td\n", res.index); } if (indexing_mode == NGHTTP2_HD_WITH_INDEXING) { @@ -1491,6 +1492,12 @@ int nghttp2_hd_deflate_hd_bufs(nghttp2_hd_deflater *deflater, ssize_t nghttp2_hd_deflate_hd(nghttp2_hd_deflater *deflater, uint8_t *buf, size_t buflen, const nghttp2_nv *nv, size_t nvlen) { + return (ssize_t)nghttp2_hd_deflate_hd2(deflater, buf, buflen, nv, nvlen); +} + +nghttp2_ssize nghttp2_hd_deflate_hd2(nghttp2_hd_deflater *deflater, + uint8_t *buf, size_t buflen, + const nghttp2_nv *nv, size_t nvlen) { nghttp2_bufs bufs; int rv; nghttp2_mem *mem; @@ -1517,12 +1524,18 @@ ssize_t nghttp2_hd_deflate_hd(nghttp2_hd_deflater *deflater, uint8_t *buf, return rv; } - return (ssize_t)buflen; + return (nghttp2_ssize)buflen; } ssize_t nghttp2_hd_deflate_hd_vec(nghttp2_hd_deflater *deflater, const nghttp2_vec *vec, size_t veclen, const nghttp2_nv *nv, size_t nvlen) { + return (ssize_t)nghttp2_hd_deflate_hd_vec2(deflater, vec, veclen, nv, nvlen); +} + +nghttp2_ssize nghttp2_hd_deflate_hd_vec2(nghttp2_hd_deflater *deflater, + const nghttp2_vec *vec, size_t veclen, + const nghttp2_nv *nv, size_t nvlen) { nghttp2_bufs bufs; int rv; nghttp2_mem *mem; @@ -1550,7 +1563,7 @@ ssize_t nghttp2_hd_deflate_hd_vec(nghttp2_hd_deflater *deflater, return rv; } - return (ssize_t)buflen; + return (nghttp2_ssize)buflen; } size_t nghttp2_hd_deflate_bound(nghttp2_hd_deflater *deflater, @@ -1643,10 +1656,11 @@ static void hd_inflate_set_huffman_encoded(nghttp2_hd_inflater *inflater, * NGHTTP2_ERR_HEADER_COMP * Integer decoding failed */ -static ssize_t hd_inflate_read_len(nghttp2_hd_inflater *inflater, int *rfin, - const uint8_t *in, const uint8_t *last, - size_t prefix, size_t maxlen) { - ssize_t rv; +static nghttp2_ssize hd_inflate_read_len(nghttp2_hd_inflater *inflater, + int *rfin, const uint8_t *in, + const uint8_t *last, size_t prefix, + size_t maxlen) { + nghttp2_ssize rv; uint32_t out; *rfin = 0; @@ -1684,10 +1698,10 @@ static ssize_t hd_inflate_read_len(nghttp2_hd_inflater *inflater, int *rfin, * NGHTTP2_ERR_HEADER_COMP * Huffman decoding failed */ -static ssize_t hd_inflate_read_huff(nghttp2_hd_inflater *inflater, - nghttp2_buf *buf, const uint8_t *in, - const uint8_t *last) { - ssize_t readlen; +static nghttp2_ssize hd_inflate_read_huff(nghttp2_hd_inflater *inflater, + nghttp2_buf *buf, const uint8_t *in, + const uint8_t *last) { + nghttp2_ssize readlen; int fin = 0; if ((size_t)(last - in) >= inflater->left) { last = in + inflater->left; @@ -1721,14 +1735,15 @@ static ssize_t hd_inflate_read_huff(nghttp2_hd_inflater *inflater, * NGHTTP2_ERR_HEADER_COMP * Header decompression failed */ -static ssize_t hd_inflate_read(nghttp2_hd_inflater *inflater, nghttp2_buf *buf, - const uint8_t *in, const uint8_t *last) { +static nghttp2_ssize hd_inflate_read(nghttp2_hd_inflater *inflater, + nghttp2_buf *buf, const uint8_t *in, + const uint8_t *last) { size_t len = nghttp2_min((size_t)(last - in), inflater->left); buf->last = nghttp2_cpymem(buf->last, in, len); inflater->left -= len; - return (ssize_t)len; + return (nghttp2_ssize)len; } /* @@ -1843,7 +1858,15 @@ ssize_t nghttp2_hd_inflate_hd(nghttp2_hd_inflater *inflater, nghttp2_nv *nv_out, ssize_t nghttp2_hd_inflate_hd2(nghttp2_hd_inflater *inflater, nghttp2_nv *nv_out, int *inflate_flags, const uint8_t *in, size_t inlen, int in_final) { - ssize_t rv; + return (nghttp2_ssize)nghttp2_hd_inflate_hd3(inflater, nv_out, inflate_flags, + in, inlen, in_final); +} + +nghttp2_ssize nghttp2_hd_inflate_hd3(nghttp2_hd_inflater *inflater, + nghttp2_nv *nv_out, int *inflate_flags, + const uint8_t *in, size_t inlen, + int in_final) { + nghttp2_ssize rv; nghttp2_hd_nv hd_nv; rv = nghttp2_hd_inflate_hd_nv(inflater, &hd_nv, inflate_flags, in, inlen, @@ -1866,11 +1889,11 @@ ssize_t nghttp2_hd_inflate_hd2(nghttp2_hd_inflater *inflater, return rv; } -ssize_t nghttp2_hd_inflate_hd_nv(nghttp2_hd_inflater *inflater, - nghttp2_hd_nv *nv_out, int *inflate_flags, - const uint8_t *in, size_t inlen, - int in_final) { - ssize_t rv = 0; +nghttp2_ssize nghttp2_hd_inflate_hd_nv(nghttp2_hd_inflater *inflater, + nghttp2_hd_nv *nv_out, + int *inflate_flags, const uint8_t *in, + size_t inlen, int in_final) { + nghttp2_ssize rv = 0; const uint8_t *first = in; const uint8_t *last = in + inlen; int rfin = 0; @@ -1992,7 +2015,7 @@ ssize_t nghttp2_hd_inflate_hd_nv(nghttp2_hd_inflater *inflater, inflater->state = NGHTTP2_HD_STATE_OPCODE; *inflate_flags |= NGHTTP2_HD_INFLATE_EMIT; - return (ssize_t)(in - first); + return (nghttp2_ssize)(in - first); } else { inflater->index = inflater->left; --inflater->index; @@ -2050,7 +2073,7 @@ ssize_t nghttp2_hd_inflate_hd_nv(nghttp2_hd_inflater *inflater, in += rv; - DEBUGF("inflatehd: %zd bytes read\n", rv); + DEBUGF("inflatehd: %td bytes read\n", rv); if (inflater->left) { DEBUGF("inflatehd: still %zu bytes to go\n", inflater->left); @@ -2072,7 +2095,7 @@ ssize_t nghttp2_hd_inflate_hd_nv(nghttp2_hd_inflater *inflater, in += rv; - DEBUGF("inflatehd: %zd bytes read\n", rv); + DEBUGF("inflatehd: %td bytes read\n", rv); if (inflater->left) { DEBUGF("inflatehd: still %zu bytes to go\n", inflater->left); @@ -2138,7 +2161,7 @@ ssize_t nghttp2_hd_inflate_hd_nv(nghttp2_hd_inflater *inflater, in += rv; - DEBUGF("inflatehd: %zd bytes read\n", rv); + DEBUGF("inflatehd: %td bytes read\n", rv); if (inflater->left) { DEBUGF("inflatehd: still %zu bytes to go\n", inflater->left); @@ -2162,18 +2185,18 @@ ssize_t nghttp2_hd_inflate_hd_nv(nghttp2_hd_inflater *inflater, inflater->state = NGHTTP2_HD_STATE_OPCODE; *inflate_flags |= NGHTTP2_HD_INFLATE_EMIT; - return (ssize_t)(in - first); + return (nghttp2_ssize)(in - first); case NGHTTP2_HD_STATE_READ_VALUE: rv = hd_inflate_read(inflater, &inflater->valuebuf, in, last); if (rv < 0) { - DEBUGF("inflatehd: value read failure %zd: %s\n", rv, + DEBUGF("inflatehd: value read failure %td: %s\n", rv, nghttp2_strerror((int)rv)); goto fail; } in += rv; - DEBUGF("inflatehd: %zd bytes read\n", rv); + DEBUGF("inflatehd: %td bytes read\n", rv); if (inflater->left) { DEBUGF("inflatehd: still %zu bytes to go\n", inflater->left); @@ -2196,7 +2219,7 @@ ssize_t nghttp2_hd_inflate_hd_nv(nghttp2_hd_inflater *inflater, inflater->state = NGHTTP2_HD_STATE_OPCODE; *inflate_flags |= NGHTTP2_HD_INFLATE_EMIT; - return (ssize_t)(in - first); + return (nghttp2_ssize)(in - first); } } @@ -2216,7 +2239,7 @@ ssize_t nghttp2_hd_inflate_hd_nv(nghttp2_hd_inflater *inflater, } *inflate_flags |= NGHTTP2_HD_INFLATE_FINAL; } - return (ssize_t)(in - first); + return (nghttp2_ssize)(in - first); almost_ok: if (in_final) { @@ -2226,10 +2249,10 @@ ssize_t nghttp2_hd_inflate_hd_nv(nghttp2_hd_inflater *inflater, goto fail; } - return (ssize_t)(in - first); + return (nghttp2_ssize)(in - first); fail: - DEBUGF("inflatehd: error return %zd\n", rv); + DEBUGF("inflatehd: error return %td\n", rv); inflater->ctx.bad = 1; return rv; @@ -2297,9 +2320,10 @@ int nghttp2_hd_emit_table_size(nghttp2_bufs *bufs, size_t table_size) { return emit_table_size(bufs, table_size); } -ssize_t nghttp2_hd_decode_length(uint32_t *res, size_t *shift_ptr, int *fin, - uint32_t initial, size_t shift, uint8_t *in, - uint8_t *last, size_t prefix) { +nghttp2_ssize nghttp2_hd_decode_length(uint32_t *res, size_t *shift_ptr, + int *fin, uint32_t initial, size_t shift, + uint8_t *in, uint8_t *last, + size_t prefix) { return decode_length(res, shift_ptr, fin, initial, shift, in, last, prefix); } diff --git a/deps/nghttp2/lib/nghttp2_hd.h b/deps/nghttp2/lib/nghttp2_hd.h index 6de0052aaea6cd..38a31a83c3891d 100644 --- a/deps/nghttp2/lib/nghttp2_hd.h +++ b/deps/nghttp2/lib/nghttp2_hd.h @@ -357,9 +357,10 @@ void nghttp2_hd_inflate_free(nghttp2_hd_inflater *inflater); * that return values and semantics are the same as * nghttp2_hd_inflate_hd(). */ -ssize_t nghttp2_hd_inflate_hd_nv(nghttp2_hd_inflater *inflater, - nghttp2_hd_nv *nv_out, int *inflate_flags, - const uint8_t *in, size_t inlen, int in_final); +nghttp2_ssize nghttp2_hd_inflate_hd_nv(nghttp2_hd_inflater *inflater, + nghttp2_hd_nv *nv_out, + int *inflate_flags, const uint8_t *in, + size_t inlen, int in_final); /* For unittesting purpose */ int nghttp2_hd_emit_indname_block(nghttp2_bufs *bufs, size_t index, @@ -376,9 +377,10 @@ int nghttp2_hd_emit_table_size(nghttp2_bufs *bufs, size_t table_size); nghttp2_hd_nv nghttp2_hd_table_get(nghttp2_hd_context *context, size_t index); /* For unittesting purpose */ -ssize_t nghttp2_hd_decode_length(uint32_t *res, size_t *shift_ptr, int *fin, - uint32_t initial, size_t shift, uint8_t *in, - uint8_t *last, size_t prefix); +nghttp2_ssize nghttp2_hd_decode_length(uint32_t *res, size_t *shift_ptr, + int *fin, uint32_t initial, size_t shift, + uint8_t *in, uint8_t *last, + size_t prefix); /* Huffman encoding/decoding functions */ @@ -427,9 +429,9 @@ void nghttp2_hd_huff_decode_context_init(nghttp2_hd_huff_decode_context *ctx); * NGHTTP2_ERR_HEADER_COMP * Decoding process has failed. */ -ssize_t nghttp2_hd_huff_decode(nghttp2_hd_huff_decode_context *ctx, - nghttp2_buf *buf, const uint8_t *src, - size_t srclen, int fin); +nghttp2_ssize nghttp2_hd_huff_decode(nghttp2_hd_huff_decode_context *ctx, + nghttp2_buf *buf, const uint8_t *src, + size_t srclen, int fin); /* * nghttp2_hd_huff_decode_failure_state returns nonzero if |ctx| diff --git a/deps/nghttp2/lib/nghttp2_hd_huffman.c b/deps/nghttp2/lib/nghttp2_hd_huffman.c index ac90f49c44f147..959053f774eda3 100644 --- a/deps/nghttp2/lib/nghttp2_hd_huffman.c +++ b/deps/nghttp2/lib/nghttp2_hd_huffman.c @@ -107,9 +107,9 @@ void nghttp2_hd_huff_decode_context_init(nghttp2_hd_huff_decode_context *ctx) { ctx->fstate = NGHTTP2_HUFF_ACCEPTED; } -ssize_t nghttp2_hd_huff_decode(nghttp2_hd_huff_decode_context *ctx, - nghttp2_buf *buf, const uint8_t *src, - size_t srclen, int final) { +nghttp2_ssize nghttp2_hd_huff_decode(nghttp2_hd_huff_decode_context *ctx, + nghttp2_buf *buf, const uint8_t *src, + size_t srclen, int final) { const uint8_t *end = src + srclen; nghttp2_huff_decode node = {ctx->fstate, 0}; const nghttp2_huff_decode *t = &node; @@ -136,7 +136,7 @@ ssize_t nghttp2_hd_huff_decode(nghttp2_hd_huff_decode_context *ctx, return NGHTTP2_ERR_HEADER_COMP; } - return (ssize_t)srclen; + return (nghttp2_ssize)srclen; } int nghttp2_hd_huff_decode_failure_state(nghttp2_hd_huff_decode_context *ctx) { diff --git a/deps/nghttp2/lib/nghttp2_outbound_item.c b/deps/nghttp2/lib/nghttp2_outbound_item.c index 2a3041db195355..a9e9f7693eda8c 100644 --- a/deps/nghttp2/lib/nghttp2_outbound_item.c +++ b/deps/nghttp2/lib/nghttp2_outbound_item.c @@ -27,6 +27,32 @@ #include #include +nghttp2_data_provider_wrap * +nghttp2_data_provider_wrap_v1(nghttp2_data_provider_wrap *dpw, + const nghttp2_data_provider *data_prd) { + if (!data_prd) { + return NULL; + } + + dpw->version = NGHTTP2_DATA_PROVIDER_V1; + dpw->data_prd.v1 = *data_prd; + + return dpw; +} + +nghttp2_data_provider_wrap * +nghttp2_data_provider_wrap_v2(nghttp2_data_provider_wrap *dpw, + const nghttp2_data_provider2 *data_prd) { + if (!data_prd) { + return NULL; + } + + dpw->version = NGHTTP2_DATA_PROVIDER_V2; + dpw->data_prd.v2 = *data_prd; + + return dpw; +} + void nghttp2_outbound_item_init(nghttp2_outbound_item *item) { item->cycle = 0; item->qnext = NULL; diff --git a/deps/nghttp2/lib/nghttp2_outbound_item.h b/deps/nghttp2/lib/nghttp2_outbound_item.h index bd4611b551bbbd..4e91750088f809 100644 --- a/deps/nghttp2/lib/nghttp2_outbound_item.h +++ b/deps/nghttp2/lib/nghttp2_outbound_item.h @@ -33,9 +33,32 @@ #include "nghttp2_frame.h" #include "nghttp2_mem.h" +#define NGHTTP2_DATA_PROVIDER_V1 1 +#define NGHTTP2_DATA_PROVIDER_V2 2 + +typedef struct nghttp2_data_provider_wrap { + int version; + union { + struct { + nghttp2_data_source source; + void *read_callback; + }; + nghttp2_data_provider v1; + nghttp2_data_provider2 v2; + } data_prd; +} nghttp2_data_provider_wrap; + +nghttp2_data_provider_wrap * +nghttp2_data_provider_wrap_v1(nghttp2_data_provider_wrap *dpw, + const nghttp2_data_provider *data_prd); + +nghttp2_data_provider_wrap * +nghttp2_data_provider_wrap_v2(nghttp2_data_provider_wrap *dpw, + const nghttp2_data_provider2 *data_prd); + /* struct used for HEADERS and PUSH_PROMISE frame */ typedef struct { - nghttp2_data_provider data_prd; + nghttp2_data_provider_wrap dpw; void *stream_user_data; /* error code when request HEADERS is canceled by RST_STREAM while it is in queue. */ @@ -50,7 +73,7 @@ typedef struct { /** * The data to be sent for this DATA frame. */ - nghttp2_data_provider data_prd; + nghttp2_data_provider_wrap dpw; /** * The flags of DATA frame. We use separate flags here and * nghttp2_data frame. The latter contains flags actually sent to diff --git a/deps/nghttp2/lib/nghttp2_session.c b/deps/nghttp2/lib/nghttp2_session.c index ce21caf9d7bb9d..226cdd59e8e98e 100644 --- a/deps/nghttp2/lib/nghttp2_session.c +++ b/deps/nghttp2/lib/nghttp2_session.c @@ -39,6 +39,7 @@ #include "nghttp2_extpri.h" #include "nghttp2_time.h" #include "nghttp2_debug.h" +#include "nghttp2_submit.h" /* * Returns non-zero if the number of outgoing opened streams is larger @@ -2103,10 +2104,9 @@ static int session_predicate_priority_update_send(nghttp2_session *session, /* Take into account settings max frame size and both connection-level flow control here */ -static ssize_t -nghttp2_session_enforce_flow_control_limits(nghttp2_session *session, - nghttp2_stream *stream, - ssize_t requested_window_size) { +static nghttp2_ssize nghttp2_session_enforce_flow_control_limits( + nghttp2_session *session, nghttp2_stream *stream, + nghttp2_ssize requested_window_size) { DEBUGF("send: remote windowsize connection=%d, remote maxframsize=%u, " "stream(id %d)=%d\n", session->remote_window_size, session->remote_settings.max_frame_size, @@ -2126,12 +2126,12 @@ nghttp2_session_enforce_flow_control_limits(nghttp2_session *session, */ static size_t nghttp2_session_next_data_read(nghttp2_session *session, nghttp2_stream *stream) { - ssize_t window_size; + nghttp2_ssize window_size; window_size = nghttp2_session_enforce_flow_control_limits( session, stream, NGHTTP2_DATA_PAYLOADLEN); - DEBUGF("send: available window=%zd\n", window_size); + DEBUGF("send: available window=%td\n", window_size); return window_size > 0 ? (size_t)window_size : 0; } @@ -2186,29 +2186,33 @@ static int nghttp2_session_predicate_data_send(nghttp2_session *session, return NGHTTP2_ERR_INVALID_STREAM_STATE; } -static ssize_t session_call_select_padding(nghttp2_session *session, - const nghttp2_frame *frame, - size_t max_payloadlen) { - ssize_t rv; +static nghttp2_ssize session_call_select_padding(nghttp2_session *session, + const nghttp2_frame *frame, + size_t max_payloadlen) { + nghttp2_ssize rv; + size_t max_paddedlen; - if (frame->hd.length >= max_payloadlen) { - return (ssize_t)frame->hd.length; + if (frame->hd.length >= max_payloadlen || + (!session->callbacks.select_padding_callback2 && + !session->callbacks.select_padding_callback)) { + return (nghttp2_ssize)frame->hd.length; } - if (session->callbacks.select_padding_callback) { - size_t max_paddedlen; - - max_paddedlen = - nghttp2_min(frame->hd.length + NGHTTP2_MAX_PADLEN, max_payloadlen); + max_paddedlen = + nghttp2_min(frame->hd.length + NGHTTP2_MAX_PADLEN, max_payloadlen); - rv = session->callbacks.select_padding_callback( + if (session->callbacks.select_padding_callback2) { + rv = session->callbacks.select_padding_callback2( session, frame, max_paddedlen, session->user_data); - if (rv < (ssize_t)frame->hd.length || rv > (ssize_t)max_paddedlen) { - return NGHTTP2_ERR_CALLBACK_FAILURE; - } - return rv; + } else { + rv = (nghttp2_ssize)session->callbacks.select_padding_callback( + session, frame, max_paddedlen, session->user_data); + } + if (rv < (nghttp2_ssize)frame->hd.length || + rv > (nghttp2_ssize)max_paddedlen) { + return NGHTTP2_ERR_CALLBACK_FAILURE; } - return (ssize_t)frame->hd.length; + return rv; } /* Add padding to HEADERS or PUSH_PROMISE. We use @@ -2216,7 +2220,7 @@ static ssize_t session_call_select_padding(nghttp2_session *session, frame->push_promise has also padlen in the same position. */ static int session_headers_add_pad(nghttp2_session *session, nghttp2_frame *frame) { - ssize_t padded_payloadlen; + nghttp2_ssize padded_payloadlen; nghttp2_active_outbound_item *aob; nghttp2_bufs *framebufs; size_t padlen; @@ -2237,7 +2241,7 @@ static int session_headers_add_pad(nghttp2_session *session, padlen = (size_t)padded_payloadlen - frame->hd.length; - DEBUGF("send: padding selected: payloadlen=%zd, padlen=%zu\n", + DEBUGF("send: padding selected: payloadlen=%td, padlen=%zu\n", padded_payloadlen, padlen); nghttp2_frame_add_pad(framebufs, &frame->hd, padlen, 0); @@ -2257,18 +2261,24 @@ static size_t session_estimate_headers_payload(nghttp2_session *session, static int session_pack_extension(nghttp2_session *session, nghttp2_bufs *bufs, nghttp2_frame *frame) { - ssize_t rv; + nghttp2_ssize rv; nghttp2_buf *buf; size_t buflen; size_t framelen; - assert(session->callbacks.pack_extension_callback); + assert(session->callbacks.pack_extension_callback2 || + session->callbacks.pack_extension_callback); buf = &bufs->head->buf; buflen = nghttp2_min(nghttp2_buf_avail(buf), NGHTTP2_MAX_PAYLOADLEN); - rv = session->callbacks.pack_extension_callback(session, buf->last, buflen, - frame, session->user_data); + if (session->callbacks.pack_extension_callback2) { + rv = session->callbacks.pack_extension_callback2(session, buf->last, buflen, + frame, session->user_data); + } else { + rv = (nghttp2_ssize)session->callbacks.pack_extension_callback( + session, buf->last, buflen, frame, session->user_data); + } if (rv == NGHTTP2_ERR_CANCEL) { return (int)rv; } @@ -2451,7 +2461,7 @@ static int session_prep_frame(nghttp2_session *session, return rv; } - DEBUGF("send: before padding, HEADERS serialized in %zd bytes\n", + DEBUGF("send: before padding, HEADERS serialized in %zu bytes\n", nghttp2_bufs_len(&session->aob.framebufs)); rv = session_headers_add_pad(session, frame); @@ -2460,7 +2470,7 @@ static int session_prep_frame(nghttp2_session *session, return rv; } - DEBUGF("send: HEADERS finally serialized in %zd bytes\n", + DEBUGF("send: HEADERS finally serialized in %zu bytes\n", nghttp2_bufs_len(&session->aob.framebufs)); if (frame->headers.cat == NGHTTP2_HCAT_REQUEST) { @@ -2877,7 +2887,7 @@ static int session_after_frame_sent1(nghttp2_session *session) { /* Call on_frame_send_callback after nghttp2_stream_detach_item(), so that application can issue - nghttp2_submit_data() in the callback. */ + nghttp2_submit_data2() in the callback. */ if (session->callbacks.on_frame_send_callback) { rv = session_call_on_frame_send(session, frame); if (nghttp2_is_fatal(rv)) { @@ -2949,15 +2959,17 @@ static int session_after_frame_sent1(nghttp2_session *session) { } /* We assume aux_data is a pointer to nghttp2_headers_aux_data */ aux_data = &item->aux_data.headers; - if (aux_data->data_prd.read_callback) { - /* nghttp2_submit_data() makes a copy of aux_data->data_prd */ - rv = nghttp2_submit_data(session, NGHTTP2_FLAG_END_STREAM, - frame->hd.stream_id, &aux_data->data_prd); + if (aux_data->dpw.data_prd.read_callback) { + /* nghttp2_submit_data_shared() makes a copy of + aux_data->dpw */ + rv = nghttp2_submit_data_shared(session, NGHTTP2_FLAG_END_STREAM, + frame->hd.stream_id, &aux_data->dpw); if (nghttp2_is_fatal(rv)) { return rv; } - /* TODO nghttp2_submit_data() may fail if stream has already - DATA frame item. We might have to handle it here. */ + /* TODO nghttp2_submit_data_shared() may fail if stream has + already DATA frame item. We might have to handle it + here. */ } return 0; } @@ -2978,14 +2990,15 @@ static int session_after_frame_sent1(nghttp2_session *session) { } /* We assume aux_data is a pointer to nghttp2_headers_aux_data */ aux_data = &item->aux_data.headers; - if (aux_data->data_prd.read_callback) { - rv = nghttp2_submit_data(session, NGHTTP2_FLAG_END_STREAM, - frame->hd.stream_id, &aux_data->data_prd); + if (aux_data->dpw.data_prd.read_callback) { + rv = nghttp2_submit_data_shared(session, NGHTTP2_FLAG_END_STREAM, + frame->hd.stream_id, &aux_data->dpw); if (nghttp2_is_fatal(rv)) { return rv; } - /* TODO nghttp2_submit_data() may fail if stream has already - DATA frame item. We might have to handle it here. */ + /* TODO nghttp2_submit_data_shared() may fail if stream has + already DATA frame item. We might have to handle it + here. */ } return 0; default: @@ -3144,7 +3157,7 @@ static void session_after_frame_sent2(nghttp2_session *session) { aux_data = &item->aux_data.data; /* On EOF, we have already detached data. Please note that - application may issue nghttp2_submit_data() in + application may issue nghttp2_submit_data2() in on_frame_send_callback (call from session_after_frame_sent1), which attach data to stream. We don't want to detach it. */ if (aux_data->eof) { @@ -3191,7 +3204,7 @@ static int session_call_send_data(nghttp2_session *session, aux_data = &item->aux_data.data; rv = session->callbacks.send_data_callback(session, frame, buf->pos, length, - &aux_data->data_prd.source, + &aux_data->dpw.data_prd.source, session->user_data); switch (rv) { @@ -3205,9 +3218,9 @@ static int session_call_send_data(nghttp2_session *session, } } -static ssize_t nghttp2_session_mem_send_internal(nghttp2_session *session, - const uint8_t **data_ptr, - int fast_cb) { +static nghttp2_ssize nghttp2_session_mem_send_internal(nghttp2_session *session, + const uint8_t **data_ptr, + int fast_cb) { int rv; nghttp2_active_outbound_item *aob; nghttp2_bufs *framebufs; @@ -3385,7 +3398,7 @@ static ssize_t nghttp2_session_mem_send_internal(nghttp2_session *session, } } - DEBUGF("send: start transmitting frame type=%u, length=%zd\n", + DEBUGF("send: start transmitting frame type=%u, length=%td\n", framebufs->cur->buf.pos[3], framebufs->cur->buf.last - framebufs->cur->buf.pos); @@ -3425,7 +3438,7 @@ static ssize_t nghttp2_session_mem_send_internal(nghttp2_session *session, everything, we will adjust it. */ buf->pos += datalen; - return (ssize_t)datalen; + return (nghttp2_ssize)datalen; } case NGHTTP2_OB_SEND_NO_COPY: { nghttp2_stream *stream; @@ -3502,7 +3515,7 @@ static ssize_t nghttp2_session_mem_send_internal(nghttp2_session *session, buf->pos += datalen; - return (ssize_t)datalen; + return (nghttp2_ssize)datalen; } } } @@ -3510,8 +3523,13 @@ static ssize_t nghttp2_session_mem_send_internal(nghttp2_session *session, ssize_t nghttp2_session_mem_send(nghttp2_session *session, const uint8_t **data_ptr) { + return (ssize_t)nghttp2_session_mem_send2(session, data_ptr); +} + +nghttp2_ssize nghttp2_session_mem_send2(nghttp2_session *session, + const uint8_t **data_ptr) { int rv; - ssize_t len; + nghttp2_ssize len; *data_ptr = NULL; @@ -3528,7 +3546,7 @@ ssize_t nghttp2_session_mem_send(nghttp2_session *session, rv = session_after_frame_sent1(session); if (rv < 0) { assert(nghttp2_is_fatal(rv)); - return (ssize_t)rv; + return (nghttp2_ssize)rv; } } @@ -3537,8 +3555,8 @@ ssize_t nghttp2_session_mem_send(nghttp2_session *session, int nghttp2_session_send(nghttp2_session *session) { const uint8_t *data = NULL; - ssize_t datalen; - ssize_t sentlen; + nghttp2_ssize datalen; + nghttp2_ssize sentlen; nghttp2_bufs *framebufs; framebufs = &session->aob.framebufs; @@ -3548,8 +3566,13 @@ int nghttp2_session_send(nghttp2_session *session) { if (datalen <= 0) { return (int)datalen; } - sentlen = session->callbacks.send_callback(session, data, (size_t)datalen, - 0, session->user_data); + if (session->callbacks.send_callback2) { + sentlen = session->callbacks.send_callback2( + session, data, (size_t)datalen, 0, session->user_data); + } else { + sentlen = (nghttp2_ssize)session->callbacks.send_callback( + session, data, (size_t)datalen, 0, session->user_data); + } if (sentlen < 0) { if (sentlen == NGHTTP2_ERR_WOULDBLOCK) { /* Transmission canceled. Rewind the offset */ @@ -3564,11 +3587,17 @@ int nghttp2_session_send(nghttp2_session *session) { } } -static ssize_t session_recv(nghttp2_session *session, uint8_t *buf, - size_t len) { - ssize_t rv; - rv = session->callbacks.recv_callback(session, buf, len, 0, - session->user_data); +static nghttp2_ssize session_recv(nghttp2_session *session, uint8_t *buf, + size_t len) { + nghttp2_ssize rv; + + if (session->callbacks.recv_callback2) { + rv = session->callbacks.recv_callback2(session, buf, len, 0, + session->user_data); + } else { + rv = (nghttp2_ssize)session->callbacks.recv_callback(session, buf, len, 0, + session->user_data); + } if (rv > 0) { if ((size_t)rv > len) { return NGHTTP2_ERR_CALLBACK_FAILURE; @@ -3870,7 +3899,7 @@ static int session_inflate_handle_invalid_connection(nghttp2_session *session, static int inflate_header_block(nghttp2_session *session, nghttp2_frame *frame, size_t *readlen_ptr, uint8_t *in, size_t inlen, int final, int call_header_cb) { - ssize_t proclen; + nghttp2_ssize proclen; int rv; int inflate_flags; nghttp2_hd_nv nv; @@ -3923,7 +3952,7 @@ static int inflate_header_block(nghttp2_session *session, nghttp2_frame *frame, inlen -= (size_t)proclen; *readlen_ptr += (size_t)proclen; - DEBUGF("recv: proclen=%zd\n", proclen); + DEBUGF("recv: proclen=%td\n", proclen); if (call_header_cb && (inflate_flags & NGHTTP2_HD_INFLATE_EMIT)) { rv = 0; @@ -5763,7 +5792,7 @@ static int inbound_frame_handle_pad(nghttp2_inbound_frame *iframe, * Computes number of padding based on flags. This function returns * the calculated length if it succeeds, or -1. */ -static ssize_t inbound_frame_compute_pad(nghttp2_inbound_frame *iframe) { +static nghttp2_ssize inbound_frame_compute_pad(nghttp2_inbound_frame *iframe) { size_t padlen; /* 1 for Pad Length field */ @@ -5778,7 +5807,7 @@ static ssize_t inbound_frame_compute_pad(nghttp2_inbound_frame *iframe) { iframe->padlen = padlen; - return (ssize_t)padlen; + return (nghttp2_ssize)padlen; } /* @@ -5787,9 +5816,9 @@ static ssize_t inbound_frame_compute_pad(nghttp2_inbound_frame *iframe) { * |payloadleft| does not include |readlen|. If padding was started * strictly before this data chunk, this function returns -1. */ -static ssize_t inbound_frame_effective_readlen(nghttp2_inbound_frame *iframe, - size_t payloadleft, - size_t readlen) { +static nghttp2_ssize +inbound_frame_effective_readlen(nghttp2_inbound_frame *iframe, + size_t payloadleft, size_t readlen) { size_t trail_padlen = nghttp2_frame_trail_padlen(&iframe->frame, iframe->padlen); @@ -5799,19 +5828,24 @@ static ssize_t inbound_frame_effective_readlen(nghttp2_inbound_frame *iframe, if (readlen < padlen) { return -1; } - return (ssize_t)(readlen - padlen); + return (nghttp2_ssize)(readlen - padlen); } - return (ssize_t)(readlen); + return (nghttp2_ssize)(readlen); } static const uint8_t static_in[] = {0}; ssize_t nghttp2_session_mem_recv(nghttp2_session *session, const uint8_t *in, size_t inlen) { + return (ssize_t)nghttp2_session_mem_recv2(session, in, inlen); +} + +nghttp2_ssize nghttp2_session_mem_recv2(nghttp2_session *session, + const uint8_t *in, size_t inlen) { const uint8_t *first, *last; nghttp2_inbound_frame *iframe = &session->iframe; size_t readlen; - ssize_t padlen; + nghttp2_ssize padlen; int rv; int busy = 0; nghttp2_frame_hd cont_hd; @@ -5841,7 +5875,7 @@ ssize_t nghttp2_session_mem_recv(nghttp2_session *session, const uint8_t *in, } if (!nghttp2_session_want_read(session)) { - return (ssize_t)inlen; + return (nghttp2_ssize)inlen; } for (;;) { @@ -5871,7 +5905,7 @@ ssize_t nghttp2_session_mem_recv(nghttp2_session *session, const uint8_t *in, in += readlen; if (nghttp2_buf_mark_avail(&iframe->sbuf)) { - return (ssize_t)(in - first); + return (nghttp2_ssize)(in - first); } if (iframe->sbuf.pos[3] != NGHTTP2_SETTINGS || @@ -5893,7 +5927,7 @@ ssize_t nghttp2_session_mem_recv(nghttp2_session *session, const uint8_t *in, return rv; } - return (ssize_t)inlen; + return (nghttp2_ssize)inlen; } iframe->state = NGHTTP2_IB_READ_HEAD; @@ -5908,7 +5942,7 @@ ssize_t nghttp2_session_mem_recv(nghttp2_session *session, const uint8_t *in, in += readlen; if (nghttp2_buf_mark_avail(&iframe->sbuf)) { - return (ssize_t)(in - first); + return (nghttp2_ssize)(in - first); } nghttp2_frame_unpack_frame_hd(&iframe->frame.hd, iframe->sbuf.pos); @@ -5929,7 +5963,7 @@ ssize_t nghttp2_session_mem_recv(nghttp2_session *session, const uint8_t *in, return rv; } - return (ssize_t)inlen; + return (nghttp2_ssize)inlen; } switch (iframe->frame.hd.type) { @@ -5944,7 +5978,7 @@ ssize_t nghttp2_session_mem_recv(nghttp2_session *session, const uint8_t *in, rv = session_on_data_received_fail_fast(session); if (iframe->state == NGHTTP2_IB_IGN_ALL) { - return (ssize_t)inlen; + return (nghttp2_ssize)inlen; } if (rv == NGHTTP2_ERR_IGN_PAYLOAD) { DEBUGF("recv: DATA not allowed stream_id=%d\n", @@ -5966,7 +6000,7 @@ ssize_t nghttp2_session_mem_recv(nghttp2_session *session, const uint8_t *in, if (nghttp2_is_fatal(rv)) { return rv; } - return (ssize_t)inlen; + return (nghttp2_ssize)inlen; } if (rv == 1) { @@ -5993,7 +6027,7 @@ ssize_t nghttp2_session_mem_recv(nghttp2_session *session, const uint8_t *in, if (nghttp2_is_fatal(rv)) { return rv; } - return (ssize_t)inlen; + return (nghttp2_ssize)inlen; } if (rv == 1) { @@ -6036,7 +6070,7 @@ ssize_t nghttp2_session_mem_recv(nghttp2_session *session, const uint8_t *in, busy = 1; if (iframe->state == NGHTTP2_IB_IGN_ALL) { - return (ssize_t)inlen; + return (nghttp2_ssize)inlen; } if (rv == NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE) { @@ -6137,7 +6171,7 @@ ssize_t nghttp2_session_mem_recv(nghttp2_session *session, const uint8_t *in, if (nghttp2_is_fatal(rv)) { return rv; } - return (ssize_t)inlen; + return (nghttp2_ssize)inlen; } iframe->iv = nghttp2_mem_malloc(mem, sizeof(nghttp2_settings_entry) * @@ -6175,7 +6209,7 @@ ssize_t nghttp2_session_mem_recv(nghttp2_session *session, const uint8_t *in, if (nghttp2_is_fatal(rv)) { return rv; } - return (ssize_t)inlen; + return (nghttp2_ssize)inlen; } if (rv == 1) { @@ -6235,7 +6269,7 @@ ssize_t nghttp2_session_mem_recv(nghttp2_session *session, const uint8_t *in, return rv; } - return (ssize_t)inlen; + return (nghttp2_ssize)inlen; default: DEBUGF("recv: extension frame\n"); @@ -6346,7 +6380,7 @@ ssize_t nghttp2_session_mem_recv(nghttp2_session *session, const uint8_t *in, if (nghttp2_is_fatal(rv)) { return rv; } - return (ssize_t)inlen; + return (nghttp2_ssize)inlen; } if (iframe->payloadleft < 4) { @@ -6404,11 +6438,11 @@ ssize_t nghttp2_session_mem_recv(nghttp2_session *session, const uint8_t *in, in += readlen; iframe->payloadleft -= readlen; - DEBUGF("recv: readlen=%zu, payloadleft=%zu, left=%zd\n", readlen, + DEBUGF("recv: readlen=%zu, payloadleft=%zu, left=%zu\n", readlen, iframe->payloadleft, nghttp2_buf_mark_avail(&iframe->sbuf)); if (nghttp2_buf_mark_avail(&iframe->sbuf)) { - return (ssize_t)(in - first); + return (nghttp2_ssize)(in - first); } switch (iframe->frame.hd.type) { @@ -6424,7 +6458,7 @@ ssize_t nghttp2_session_mem_recv(nghttp2_session *session, const uint8_t *in, if (nghttp2_is_fatal(rv)) { return rv; } - return (ssize_t)inlen; + return (nghttp2_ssize)inlen; } iframe->frame.headers.padlen = (size_t)padlen; @@ -6451,7 +6485,7 @@ ssize_t nghttp2_session_mem_recv(nghttp2_session *session, const uint8_t *in, busy = 1; if (iframe->state == NGHTTP2_IB_IGN_ALL) { - return (ssize_t)inlen; + return (nghttp2_ssize)inlen; } if (rv == NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE) { @@ -6481,7 +6515,7 @@ ssize_t nghttp2_session_mem_recv(nghttp2_session *session, const uint8_t *in, } if (iframe->state == NGHTTP2_IB_IGN_ALL) { - return (ssize_t)inlen; + return (nghttp2_ssize)inlen; } } @@ -6495,7 +6529,7 @@ ssize_t nghttp2_session_mem_recv(nghttp2_session *session, const uint8_t *in, } if (iframe->state == NGHTTP2_IB_IGN_ALL) { - return (ssize_t)inlen; + return (nghttp2_ssize)inlen; } session_inbound_frame_reset(session); @@ -6513,7 +6547,7 @@ ssize_t nghttp2_session_mem_recv(nghttp2_session *session, const uint8_t *in, if (nghttp2_is_fatal(rv)) { return rv; } - return (ssize_t)inlen; + return (nghttp2_ssize)inlen; } iframe->frame.push_promise.padlen = (size_t)padlen; @@ -6539,7 +6573,7 @@ ssize_t nghttp2_session_mem_recv(nghttp2_session *session, const uint8_t *in, busy = 1; if (iframe->state == NGHTTP2_IB_IGN_ALL) { - return (ssize_t)inlen; + return (nghttp2_ssize)inlen; } if (rv == NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE) { @@ -6568,7 +6602,7 @@ ssize_t nghttp2_session_mem_recv(nghttp2_session *session, const uint8_t *in, } if (iframe->state == NGHTTP2_IB_IGN_ALL) { - return (ssize_t)inlen; + return (nghttp2_ssize)inlen; } session_inbound_frame_reset(session); @@ -6603,7 +6637,7 @@ ssize_t nghttp2_session_mem_recv(nghttp2_session *session, const uint8_t *in, } if (iframe->state == NGHTTP2_IB_IGN_ALL) { - return (ssize_t)inlen; + return (nghttp2_ssize)inlen; } session_inbound_frame_reset(session); @@ -6661,7 +6695,7 @@ ssize_t nghttp2_session_mem_recv(nghttp2_session *session, const uint8_t *in, break; case NGHTTP2_IB_READ_HEADER_BLOCK: case NGHTTP2_IB_IGN_HEADER_BLOCK: { - ssize_t data_readlen; + nghttp2_ssize data_readlen; size_t trail_padlen; int final; #ifdef DEBUGBUILD @@ -6705,14 +6739,14 @@ ssize_t nghttp2_session_mem_recv(nghttp2_session *session, const uint8_t *in, } if (iframe->state == NGHTTP2_IB_IGN_ALL) { - return (ssize_t)inlen; + return (nghttp2_ssize)inlen; } if (rv == NGHTTP2_ERR_PAUSE) { in += hd_proclen; iframe->payloadleft -= hd_proclen; - return (ssize_t)(in - first); + return (nghttp2_ssize)(in - first); } if (rv == NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE) { @@ -6819,7 +6853,7 @@ ssize_t nghttp2_session_mem_recv(nghttp2_session *session, const uint8_t *in, assert(iframe->state == NGHTTP2_IB_IGN_ALL); - return (ssize_t)inlen; + return (nghttp2_ssize)inlen; case NGHTTP2_IB_READ_SETTINGS: DEBUGF("recv: [IB_READ_SETTINGS]\n"); @@ -6849,7 +6883,7 @@ ssize_t nghttp2_session_mem_recv(nghttp2_session *session, const uint8_t *in, } if (iframe->state == NGHTTP2_IB_IGN_ALL) { - return (ssize_t)inlen; + return (nghttp2_ssize)inlen; } session_inbound_frame_reset(session); @@ -6883,7 +6917,7 @@ ssize_t nghttp2_session_mem_recv(nghttp2_session *session, const uint8_t *in, } if (iframe->state == NGHTTP2_IB_IGN_ALL) { - return (ssize_t)inlen; + return (nghttp2_ssize)inlen; } session_inbound_frame_reset(session); @@ -6903,7 +6937,7 @@ ssize_t nghttp2_session_mem_recv(nghttp2_session *session, const uint8_t *in, in += readlen; if (nghttp2_buf_mark_avail(&iframe->sbuf)) { - return (ssize_t)(in - first); + return (nghttp2_ssize)(in - first); } nghttp2_frame_unpack_frame_hd(&cont_hd, iframe->sbuf.pos); @@ -6925,7 +6959,7 @@ ssize_t nghttp2_session_mem_recv(nghttp2_session *session, const uint8_t *in, return rv; } - return (ssize_t)inlen; + return (nghttp2_ssize)inlen; } /* CONTINUATION won't bear NGHTTP2_PADDED flag */ @@ -6961,7 +6995,7 @@ ssize_t nghttp2_session_mem_recv(nghttp2_session *session, const uint8_t *in, iframe->payloadleft, nghttp2_buf_mark_avail(&iframe->sbuf)); if (nghttp2_buf_mark_avail(&iframe->sbuf)) { - return (ssize_t)(in - first); + return (nghttp2_ssize)(in - first); } /* Pad Length field is subject to flow control */ @@ -6971,7 +7005,7 @@ ssize_t nghttp2_session_mem_recv(nghttp2_session *session, const uint8_t *in, } if (iframe->state == NGHTTP2_IB_IGN_ALL) { - return (ssize_t)inlen; + return (nghttp2_ssize)inlen; } /* Pad Length field is consumed immediately */ @@ -6983,7 +7017,7 @@ ssize_t nghttp2_session_mem_recv(nghttp2_session *session, const uint8_t *in, } if (iframe->state == NGHTTP2_IB_IGN_ALL) { - return (ssize_t)inlen; + return (nghttp2_ssize)inlen; } stream = nghttp2_session_get_stream(session, iframe->frame.hd.stream_id); @@ -7006,7 +7040,7 @@ ssize_t nghttp2_session_mem_recv(nghttp2_session *session, const uint8_t *in, if (nghttp2_is_fatal(rv)) { return rv; } - return (ssize_t)inlen; + return (nghttp2_ssize)inlen; } iframe->frame.data.padlen = (size_t)padlen; @@ -7033,7 +7067,7 @@ ssize_t nghttp2_session_mem_recv(nghttp2_session *session, const uint8_t *in, iframe->payloadleft); if (readlen > 0) { - ssize_t data_readlen; + nghttp2_ssize data_readlen; rv = nghttp2_session_update_recv_connection_window_size(session, readlen); @@ -7042,7 +7076,7 @@ ssize_t nghttp2_session_mem_recv(nghttp2_session *session, const uint8_t *in, } if (iframe->state == NGHTTP2_IB_IGN_ALL) { - return (ssize_t)inlen; + return (nghttp2_ssize)inlen; } rv = nghttp2_session_update_recv_stream_window_size( @@ -7061,7 +7095,7 @@ ssize_t nghttp2_session_mem_recv(nghttp2_session *session, const uint8_t *in, data_readlen = 0; } - padlen = (ssize_t)readlen - data_readlen; + padlen = (nghttp2_ssize)readlen - data_readlen; if (padlen > 0) { /* Padding is considered as "consumed" immediately */ @@ -7073,11 +7107,11 @@ ssize_t nghttp2_session_mem_recv(nghttp2_session *session, const uint8_t *in, } if (iframe->state == NGHTTP2_IB_IGN_ALL) { - return (ssize_t)inlen; + return (nghttp2_ssize)inlen; } } - DEBUGF("recv: data_readlen=%zd\n", data_readlen); + DEBUGF("recv: data_readlen=%td\n", data_readlen); if (data_readlen > 0) { if (session_enforce_http_messaging(session)) { @@ -7092,7 +7126,7 @@ ssize_t nghttp2_session_mem_recv(nghttp2_session *session, const uint8_t *in, } if (iframe->state == NGHTTP2_IB_IGN_DATA) { - return (ssize_t)inlen; + return (nghttp2_ssize)inlen; } } @@ -7111,7 +7145,7 @@ ssize_t nghttp2_session_mem_recv(nghttp2_session *session, const uint8_t *in, session, iframe->frame.hd.flags, iframe->frame.hd.stream_id, in - readlen, (size_t)data_readlen, session->user_data); if (rv == NGHTTP2_ERR_PAUSE) { - return (ssize_t)(in - first); + return (nghttp2_ssize)(in - first); } if (nghttp2_is_fatal(rv)) { @@ -7153,7 +7187,7 @@ ssize_t nghttp2_session_mem_recv(nghttp2_session *session, const uint8_t *in, } if (iframe->state == NGHTTP2_IB_IGN_ALL) { - return (ssize_t)inlen; + return (nghttp2_ssize)inlen; } if (session->opt_flags & NGHTTP2_OPTMASK_NO_AUTO_WINDOW_UPDATE) { @@ -7166,7 +7200,7 @@ ssize_t nghttp2_session_mem_recv(nghttp2_session *session, const uint8_t *in, } if (iframe->state == NGHTTP2_IB_IGN_ALL) { - return (ssize_t)inlen; + return (nghttp2_ssize)inlen; } } } @@ -7179,7 +7213,7 @@ ssize_t nghttp2_session_mem_recv(nghttp2_session *session, const uint8_t *in, break; case NGHTTP2_IB_IGN_ALL: - return (ssize_t)inlen; + return (nghttp2_ssize)inlen; case NGHTTP2_IB_READ_EXTENSION_PAYLOAD: DEBUGF("recv: [IB_READ_EXTENSION_PAYLOAD]\n"); @@ -7274,7 +7308,7 @@ ssize_t nghttp2_session_mem_recv(nghttp2_session *session, const uint8_t *in, } if (iframe->state == NGHTTP2_IB_IGN_ALL) { - return (ssize_t)inlen; + return (nghttp2_ssize)inlen; } session_inbound_frame_reset(session); @@ -7291,16 +7325,17 @@ ssize_t nghttp2_session_mem_recv(nghttp2_session *session, const uint8_t *in, assert(in == last); - return (ssize_t)(in - first); + return (nghttp2_ssize)(in - first); } int nghttp2_session_recv(nghttp2_session *session) { uint8_t buf[NGHTTP2_INBOUND_BUFFER_LENGTH]; while (1) { - ssize_t readlen; + nghttp2_ssize readlen; readlen = session_recv(session, buf, sizeof(buf)); if (readlen > 0) { - ssize_t proclen = nghttp2_session_mem_recv(session, buf, (size_t)readlen); + nghttp2_ssize proclen = + nghttp2_session_mem_recv2(session, buf, (size_t)readlen); if (proclen < 0) { return (int)proclen; } @@ -7642,8 +7677,8 @@ int nghttp2_session_pack_data(nghttp2_session *session, nghttp2_bufs *bufs, nghttp2_stream *stream) { int rv; uint32_t data_flags; - ssize_t payloadlen; - ssize_t padded_payloadlen; + nghttp2_ssize payloadlen; + nghttp2_ssize padded_payloadlen; nghttp2_buf *buf; size_t max_payloadlen; @@ -7651,19 +7686,26 @@ int nghttp2_session_pack_data(nghttp2_session *session, nghttp2_bufs *bufs, buf = &bufs->cur->buf; - if (session->callbacks.read_length_callback) { - - payloadlen = session->callbacks.read_length_callback( - session, frame->hd.type, stream->stream_id, session->remote_window_size, - stream->remote_window_size, session->remote_settings.max_frame_size, - session->user_data); + if (session->callbacks.read_length_callback2 || + session->callbacks.read_length_callback) { + if (session->callbacks.read_length_callback2) { + payloadlen = session->callbacks.read_length_callback2( + session, frame->hd.type, stream->stream_id, + session->remote_window_size, stream->remote_window_size, + session->remote_settings.max_frame_size, session->user_data); + } else { + payloadlen = (nghttp2_ssize)session->callbacks.read_length_callback( + session, frame->hd.type, stream->stream_id, + session->remote_window_size, stream->remote_window_size, + session->remote_settings.max_frame_size, session->user_data); + } - DEBUGF("send: read_length_callback=%zd\n", payloadlen); + DEBUGF("send: read_length_callback=%td\n", payloadlen); payloadlen = nghttp2_session_enforce_flow_control_limits(session, stream, payloadlen); - DEBUGF("send: read_length_callback after flow control=%zd\n", payloadlen); + DEBUGF("send: read_length_callback after flow control=%td\n", payloadlen); if (payloadlen <= 0) { return NGHTTP2_ERR_CALLBACK_FAILURE; @@ -7679,9 +7721,9 @@ int nghttp2_session_pack_data(nghttp2_session *session, nghttp2_bufs *bufs, DEBUGF("send: realloc buffer failed rv=%d", rv); /* If reallocation failed, old buffers are still in tact. So use safe limit. */ - payloadlen = (ssize_t)datamax; + payloadlen = (nghttp2_ssize)datamax; - DEBUGF("send: use safe limit payloadlen=%zd", payloadlen); + DEBUGF("send: use safe limit payloadlen=%td", payloadlen); } else { assert(&session->aob.framebufs == bufs); @@ -7695,9 +7737,23 @@ int nghttp2_session_pack_data(nghttp2_session *session, nghttp2_bufs *bufs, assert(nghttp2_buf_avail(buf) >= datamax); data_flags = NGHTTP2_DATA_FLAG_NONE; - payloadlen = aux_data->data_prd.read_callback( - session, frame->hd.stream_id, buf->pos, datamax, &data_flags, - &aux_data->data_prd.source, session->user_data); + switch (aux_data->dpw.version) { + case NGHTTP2_DATA_PROVIDER_V1: + payloadlen = (nghttp2_ssize)aux_data->dpw.data_prd.v1.read_callback( + session, frame->hd.stream_id, buf->pos, datamax, &data_flags, + &aux_data->dpw.data_prd.source, session->user_data); + + break; + case NGHTTP2_DATA_PROVIDER_V2: + payloadlen = aux_data->dpw.data_prd.v2.read_callback( + session, frame->hd.stream_id, buf->pos, datamax, &data_flags, + &aux_data->dpw.data_prd.source, session->user_data); + + break; + default: + assert(0); + abort(); + } if (payloadlen == NGHTTP2_ERR_DEFERRED || payloadlen == NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE || @@ -8275,7 +8331,7 @@ int nghttp2_session_change_stream_priority( /* We don't intentionally call nghttp2_session_adjust_idle_stream() so that idle stream created by this function, and existing ones are kept for application. We will adjust number of idle stream - in nghttp2_session_mem_send or nghttp2_session_mem_recv is + in nghttp2_session_mem_send2 or nghttp2_session_mem_recv2 is called. */ return 0; } @@ -8313,7 +8369,7 @@ int nghttp2_session_create_idle_stream(nghttp2_session *session, /* We don't intentionally call nghttp2_session_adjust_idle_stream() so that idle stream created by this function, and existing ones are kept for application. We will adjust number of idle stream - in nghttp2_session_mem_send or nghttp2_session_mem_recv is + in nghttp2_session_mem_send2 or nghttp2_session_mem_recv2 is called. */ return 0; } diff --git a/deps/nghttp2/lib/nghttp2_submit.c b/deps/nghttp2/lib/nghttp2_submit.c index f5554eb56494ed..f947969cd99c1f 100644 --- a/deps/nghttp2/lib/nghttp2_submit.c +++ b/deps/nghttp2/lib/nghttp2_submit.c @@ -68,7 +68,7 @@ static int32_t submit_headers_shared(nghttp2_session *session, uint8_t flags, int32_t stream_id, const nghttp2_priority_spec *pri_spec, nghttp2_nv *nva_copy, size_t nvlen, - const nghttp2_data_provider *data_prd, + const nghttp2_data_provider_wrap *dpw, void *stream_user_data) { int rv; uint8_t flags_copy; @@ -87,8 +87,8 @@ static int32_t submit_headers_shared(nghttp2_session *session, uint8_t flags, nghttp2_outbound_item_init(item); - if (data_prd != NULL && data_prd->read_callback != NULL) { - item->aux_data.headers.data_prd = *data_prd; + if (dpw != NULL && dpw->data_prd.read_callback != NULL) { + item->aux_data.headers.dpw = *dpw; } item->aux_data.headers.stream_user_data = stream_user_data; @@ -143,7 +143,7 @@ static int32_t submit_headers_shared_nva(nghttp2_session *session, uint8_t flags, int32_t stream_id, const nghttp2_priority_spec *pri_spec, const nghttp2_nv *nva, size_t nvlen, - const nghttp2_data_provider *data_prd, + const nghttp2_data_provider_wrap *dpw, void *stream_user_data) { int rv; nghttp2_nv *nva_copy; @@ -165,7 +165,7 @@ static int32_t submit_headers_shared_nva(nghttp2_session *session, } return submit_headers_shared(session, flags, stream_id, ©_pri_spec, - nva_copy, nvlen, data_prd, stream_user_data); + nva_copy, nvlen, dpw, stream_user_data); } int nghttp2_submit_trailer(nghttp2_session *session, int32_t stream_id, @@ -740,9 +740,9 @@ int nghttp2_submit_priority_update(nghttp2_session *session, uint8_t flags, } static uint8_t set_request_flags(const nghttp2_priority_spec *pri_spec, - const nghttp2_data_provider *data_prd) { + const nghttp2_data_provider_wrap *dpw) { uint8_t flags = NGHTTP2_FLAG_NONE; - if (data_prd == NULL || data_prd->read_callback == NULL) { + if (dpw == NULL || dpw->data_prd.read_callback == NULL) { flags |= NGHTTP2_FLAG_END_STREAM; } @@ -753,11 +753,11 @@ static uint8_t set_request_flags(const nghttp2_priority_spec *pri_spec, return flags; } -int32_t nghttp2_submit_request(nghttp2_session *session, - const nghttp2_priority_spec *pri_spec, - const nghttp2_nv *nva, size_t nvlen, - const nghttp2_data_provider *data_prd, - void *stream_user_data) { +static int32_t submit_request_shared(nghttp2_session *session, + const nghttp2_priority_spec *pri_spec, + const nghttp2_nv *nva, size_t nvlen, + const nghttp2_data_provider_wrap *dpw, + void *stream_user_data) { uint8_t flags; int rv; @@ -775,23 +775,47 @@ int32_t nghttp2_submit_request(nghttp2_session *session, pri_spec = NULL; } - flags = set_request_flags(pri_spec, data_prd); + flags = set_request_flags(pri_spec, dpw); return submit_headers_shared_nva(session, flags, -1, pri_spec, nva, nvlen, - data_prd, stream_user_data); + dpw, stream_user_data); +} + +int32_t nghttp2_submit_request(nghttp2_session *session, + const nghttp2_priority_spec *pri_spec, + const nghttp2_nv *nva, size_t nvlen, + const nghttp2_data_provider *data_prd, + void *stream_user_data) { + nghttp2_data_provider_wrap dpw; + + return submit_request_shared(session, pri_spec, nva, nvlen, + nghttp2_data_provider_wrap_v1(&dpw, data_prd), + stream_user_data); +} + +int32_t nghttp2_submit_request2(nghttp2_session *session, + const nghttp2_priority_spec *pri_spec, + const nghttp2_nv *nva, size_t nvlen, + const nghttp2_data_provider2 *data_prd, + void *stream_user_data) { + nghttp2_data_provider_wrap dpw; + + return submit_request_shared(session, pri_spec, nva, nvlen, + nghttp2_data_provider_wrap_v2(&dpw, data_prd), + stream_user_data); } -static uint8_t set_response_flags(const nghttp2_data_provider *data_prd) { +static uint8_t set_response_flags(const nghttp2_data_provider_wrap *dpw) { uint8_t flags = NGHTTP2_FLAG_NONE; - if (data_prd == NULL || data_prd->read_callback == NULL) { + if (dpw == NULL || dpw->data_prd.read_callback == NULL) { flags |= NGHTTP2_FLAG_END_STREAM; } return flags; } -int nghttp2_submit_response(nghttp2_session *session, int32_t stream_id, - const nghttp2_nv *nva, size_t nvlen, - const nghttp2_data_provider *data_prd) { +static int submit_response_shared(nghttp2_session *session, int32_t stream_id, + const nghttp2_nv *nva, size_t nvlen, + const nghttp2_data_provider_wrap *dpw) { uint8_t flags; if (stream_id <= 0) { @@ -802,14 +826,32 @@ int nghttp2_submit_response(nghttp2_session *session, int32_t stream_id, return NGHTTP2_ERR_PROTO; } - flags = set_response_flags(data_prd); + flags = set_response_flags(dpw); return submit_headers_shared_nva(session, flags, stream_id, NULL, nva, nvlen, - data_prd, NULL); + dpw, NULL); } -int nghttp2_submit_data(nghttp2_session *session, uint8_t flags, - int32_t stream_id, - const nghttp2_data_provider *data_prd) { +int nghttp2_submit_response(nghttp2_session *session, int32_t stream_id, + const nghttp2_nv *nva, size_t nvlen, + const nghttp2_data_provider *data_prd) { + nghttp2_data_provider_wrap dpw; + + return submit_response_shared(session, stream_id, nva, nvlen, + nghttp2_data_provider_wrap_v1(&dpw, data_prd)); +} + +int nghttp2_submit_response2(nghttp2_session *session, int32_t stream_id, + const nghttp2_nv *nva, size_t nvlen, + const nghttp2_data_provider2 *data_prd) { + nghttp2_data_provider_wrap dpw; + + return submit_response_shared(session, stream_id, nva, nvlen, + nghttp2_data_provider_wrap_v2(&dpw, data_prd)); +} + +int nghttp2_submit_data_shared(nghttp2_session *session, uint8_t flags, + int32_t stream_id, + const nghttp2_data_provider_wrap *dpw) { int rv; nghttp2_outbound_item *item; nghttp2_frame *frame; @@ -832,7 +874,7 @@ int nghttp2_submit_data(nghttp2_session *session, uint8_t flags, frame = &item->frame; aux_data = &item->aux_data.data; - aux_data->data_prd = *data_prd; + aux_data->dpw = *dpw; aux_data->eof = 0; aux_data->flags = nflags; @@ -848,9 +890,37 @@ int nghttp2_submit_data(nghttp2_session *session, uint8_t flags, return 0; } +int nghttp2_submit_data(nghttp2_session *session, uint8_t flags, + int32_t stream_id, + const nghttp2_data_provider *data_prd) { + nghttp2_data_provider_wrap dpw; + + assert(data_prd); + + return nghttp2_submit_data_shared( + session, flags, stream_id, nghttp2_data_provider_wrap_v1(&dpw, data_prd)); +} + +int nghttp2_submit_data2(nghttp2_session *session, uint8_t flags, + int32_t stream_id, + const nghttp2_data_provider2 *data_prd) { + nghttp2_data_provider_wrap dpw; + + assert(data_prd); + + return nghttp2_submit_data_shared( + session, flags, stream_id, nghttp2_data_provider_wrap_v2(&dpw, data_prd)); +} + ssize_t nghttp2_pack_settings_payload(uint8_t *buf, size_t buflen, const nghttp2_settings_entry *iv, size_t niv) { + return (ssize_t)nghttp2_pack_settings_payload2(buf, buflen, iv, niv); +} + +nghttp2_ssize nghttp2_pack_settings_payload2(uint8_t *buf, size_t buflen, + const nghttp2_settings_entry *iv, + size_t niv) { if (!nghttp2_iv_check(iv, niv)) { return NGHTTP2_ERR_INVALID_ARGUMENT; } @@ -859,7 +929,7 @@ ssize_t nghttp2_pack_settings_payload(uint8_t *buf, size_t buflen, return NGHTTP2_ERR_INSUFF_BUFSIZE; } - return (ssize_t)nghttp2_frame_pack_settings_payload(buf, iv, niv); + return (nghttp2_ssize)nghttp2_frame_pack_settings_payload(buf, iv, niv); } int nghttp2_submit_extension(nghttp2_session *session, uint8_t type, @@ -875,7 +945,8 @@ int nghttp2_submit_extension(nghttp2_session *session, uint8_t type, return NGHTTP2_ERR_INVALID_ARGUMENT; } - if (!session->callbacks.pack_extension_callback) { + if (!session->callbacks.pack_extension_callback2 && + !session->callbacks.pack_extension_callback) { return NGHTTP2_ERR_INVALID_STATE; } diff --git a/deps/nghttp2/lib/nghttp2_submit.h b/deps/nghttp2/lib/nghttp2_submit.h index 74d702fbcf077e..96781d2a274515 100644 --- a/deps/nghttp2/lib/nghttp2_submit.h +++ b/deps/nghttp2/lib/nghttp2_submit.h @@ -31,4 +31,10 @@ #include +typedef struct nghttp2_data_provider_wrap nghttp2_data_provider_wrap; + +int nghttp2_submit_data_shared(nghttp2_session *session, uint8_t flags, + int32_t stream_id, + const nghttp2_data_provider_wrap *dpw); + #endif /* NGHTTP2_SUBMIT_H */ From a9f3b9d9d1cf2022e9aaca60362f5366fd85baa6 Mon Sep 17 00:00:00 2001 From: "Node.js GitHub Bot" Date: Wed, 17 Apr 2024 00:22:12 +0300 Subject: [PATCH 10/41] deps: update nghttp2 to 1.61.0 PR-URL: https://github.com/nodejs/node/pull/52395 Reviewed-By: Marco Ippolito Reviewed-By: Luigi Pinca Reviewed-By: Mohammed Keyvanzadeh Reviewed-By: Michael Dawson Reviewed-By: Rafael Gonzaga --- deps/nghttp2/lib/CMakeLists.txt | 30 ++++++---- deps/nghttp2/lib/Makefile.in | 1 - deps/nghttp2/lib/includes/Makefile.in | 1 - deps/nghttp2/lib/includes/nghttp2/nghttp2.h | 18 +++++- .../nghttp2/lib/includes/nghttp2/nghttp2ver.h | 4 +- deps/nghttp2/lib/nghttp2_helper.c | 2 + deps/nghttp2/lib/nghttp2_option.c | 5 ++ deps/nghttp2/lib/nghttp2_option.h | 5 ++ deps/nghttp2/lib/nghttp2_session.c | 59 +++++++++++++------ deps/nghttp2/lib/nghttp2_session.h | 10 ++++ 10 files changed, 100 insertions(+), 35 deletions(-) diff --git a/deps/nghttp2/lib/CMakeLists.txt b/deps/nghttp2/lib/CMakeLists.txt index 211c8e4340da12..fda8dcb7fc7f2a 100644 --- a/deps/nghttp2/lib/CMakeLists.txt +++ b/deps/nghttp2/lib/CMakeLists.txt @@ -34,6 +34,10 @@ set(NGHTTP2_RES "") set(STATIC_LIB "nghttp2_static") set(SHARED_LIB "nghttp2") +if(BUILD_SHARED_LIBS AND BUILD_STATIC_LIBS AND MSVC AND NOT STATIC_LIB_SUFFIX) + set(STATIC_LIB_SUFFIX "_static") +endif() + if(WIN32) configure_file( version.rc.in @@ -66,23 +70,23 @@ if(BUILD_SHARED_LIBS) endif() # Static library (for unittests because of symbol visibility) -add_library(${STATIC_LIB} STATIC ${NGHTTP2_SOURCES}) +if(BUILD_STATIC_LIBS) + add_library(${STATIC_LIB} STATIC ${NGHTTP2_SOURCES}) -set_target_properties(${STATIC_LIB} PROPERTIES - COMPILE_FLAGS "${WARNCFLAGS}" - VERSION ${LT_VERSION} SOVERSION ${LT_SOVERSION} - ARCHIVE_OUTPUT_NAME nghttp2${STATIC_LIB_SUFFIX} -) + set_target_properties(${STATIC_LIB} PROPERTIES + COMPILE_FLAGS "${WARNCFLAGS}" + VERSION ${LT_VERSION} SOVERSION ${LT_SOVERSION} + ARCHIVE_OUTPUT_NAME nghttp2${STATIC_LIB_SUFFIX} + ) -target_include_directories(${STATIC_LIB} INTERFACE - $ - $ - $ -) + target_include_directories(${STATIC_LIB} INTERFACE + $ + $ + $ + ) -target_compile_definitions(${STATIC_LIB} PUBLIC "-DNGHTTP2_STATICLIB") + target_compile_definitions(${STATIC_LIB} PUBLIC "-DNGHTTP2_STATICLIB") -if(BUILD_STATIC_LIBS) install(TARGETS ${STATIC_LIB} EXPORT ${EXPORT_SET}) list(APPEND nghttp2_exports ${STATIC_LIB}) endif() diff --git a/deps/nghttp2/lib/Makefile.in b/deps/nghttp2/lib/Makefile.in index 53ca2403de3552..a5653128884f26 100644 --- a/deps/nghttp2/lib/Makefile.in +++ b/deps/nghttp2/lib/Makefile.in @@ -327,7 +327,6 @@ EXTRABPFCFLAGS = @EXTRABPFCFLAGS@ EXTRACFLAG = @EXTRACFLAG@ EXTRA_DEFS = @EXTRA_DEFS@ FGREP = @FGREP@ -FILECMD = @FILECMD@ GREP = @GREP@ HAVE_CXX14 = @HAVE_CXX14@ INSTALL = @INSTALL@ diff --git a/deps/nghttp2/lib/includes/Makefile.in b/deps/nghttp2/lib/includes/Makefile.in index eaab6b209030c5..47f46764652434 100644 --- a/deps/nghttp2/lib/includes/Makefile.in +++ b/deps/nghttp2/lib/includes/Makefile.in @@ -232,7 +232,6 @@ EXTRABPFCFLAGS = @EXTRABPFCFLAGS@ EXTRACFLAG = @EXTRACFLAG@ EXTRA_DEFS = @EXTRA_DEFS@ FGREP = @FGREP@ -FILECMD = @FILECMD@ GREP = @GREP@ HAVE_CXX14 = @HAVE_CXX14@ INSTALL = @INSTALL@ diff --git a/deps/nghttp2/lib/includes/nghttp2/nghttp2.h b/deps/nghttp2/lib/includes/nghttp2/nghttp2.h index 889176097dd31d..92c3ccc6e4855a 100644 --- a/deps/nghttp2/lib/includes/nghttp2/nghttp2.h +++ b/deps/nghttp2/lib/includes/nghttp2/nghttp2.h @@ -466,7 +466,12 @@ typedef enum { * exhaustion on server side to send these frames forever and does * not read network. */ - NGHTTP2_ERR_FLOODED = -904 + NGHTTP2_ERR_FLOODED = -904, + /** + * When a local endpoint receives too many CONTINUATION frames + * following a HEADER frame. + */ + NGHTTP2_ERR_TOO_MANY_CONTINUATIONS = -905, } nghttp2_error; /** @@ -3205,6 +3210,17 @@ NGHTTP2_EXTERN void nghttp2_option_set_stream_reset_rate_limit(nghttp2_option *option, uint64_t burst, uint64_t rate); +/** + * @function + * + * This function sets the maximum number of CONTINUATION frames + * following an incoming HEADER frame. If more than those frames are + * received, the remote endpoint is considered to be misbehaving and + * session will be closed. The default value is 8. + */ +NGHTTP2_EXTERN void nghttp2_option_set_max_continuations(nghttp2_option *option, + size_t val); + /** * @function * diff --git a/deps/nghttp2/lib/includes/nghttp2/nghttp2ver.h b/deps/nghttp2/lib/includes/nghttp2/nghttp2ver.h index d38b89adc6268a..a21d6a1605e010 100644 --- a/deps/nghttp2/lib/includes/nghttp2/nghttp2ver.h +++ b/deps/nghttp2/lib/includes/nghttp2/nghttp2ver.h @@ -29,7 +29,7 @@ * @macro * Version number of the nghttp2 library release */ -#define NGHTTP2_VERSION "1.60.0" +#define NGHTTP2_VERSION "1.61.0" /** * @macro @@ -37,6 +37,6 @@ * release. This is a 24 bit number with 8 bits for major number, 8 bits * for minor and 8 bits for patch. Version 1.2.3 becomes 0x010203. */ -#define NGHTTP2_VERSION_NUM 0x013c00 +#define NGHTTP2_VERSION_NUM 0x013d00 #endif /* NGHTTP2VER_H */ diff --git a/deps/nghttp2/lib/nghttp2_helper.c b/deps/nghttp2/lib/nghttp2_helper.c index 93dd4754b7f304..b3563d98e0d910 100644 --- a/deps/nghttp2/lib/nghttp2_helper.c +++ b/deps/nghttp2/lib/nghttp2_helper.c @@ -336,6 +336,8 @@ const char *nghttp2_strerror(int error_code) { "closed"; case NGHTTP2_ERR_TOO_MANY_SETTINGS: return "SETTINGS frame contained more than the maximum allowed entries"; + case NGHTTP2_ERR_TOO_MANY_CONTINUATIONS: + return "Too many CONTINUATION frames following a HEADER frame"; default: return "Unknown error code"; } diff --git a/deps/nghttp2/lib/nghttp2_option.c b/deps/nghttp2/lib/nghttp2_option.c index 43d4e952291ba4..53144b9b75c289 100644 --- a/deps/nghttp2/lib/nghttp2_option.c +++ b/deps/nghttp2/lib/nghttp2_option.c @@ -150,3 +150,8 @@ void nghttp2_option_set_stream_reset_rate_limit(nghttp2_option *option, option->stream_reset_burst = burst; option->stream_reset_rate = rate; } + +void nghttp2_option_set_max_continuations(nghttp2_option *option, size_t val) { + option->opt_set_mask |= NGHTTP2_OPT_MAX_CONTINUATIONS; + option->max_continuations = val; +} diff --git a/deps/nghttp2/lib/nghttp2_option.h b/deps/nghttp2/lib/nghttp2_option.h index 2259e1849d810f..c89cb97f8bb685 100644 --- a/deps/nghttp2/lib/nghttp2_option.h +++ b/deps/nghttp2/lib/nghttp2_option.h @@ -71,6 +71,7 @@ typedef enum { NGHTTP2_OPT_SERVER_FALLBACK_RFC7540_PRIORITIES = 1 << 13, NGHTTP2_OPT_NO_RFC9113_LEADING_AND_TRAILING_WS_VALIDATION = 1 << 14, NGHTTP2_OPT_STREAM_RESET_RATE_LIMIT = 1 << 15, + NGHTTP2_OPT_MAX_CONTINUATIONS = 1 << 16, } nghttp2_option_flag; /** @@ -98,6 +99,10 @@ struct nghttp2_option { * NGHTTP2_OPT_MAX_SETTINGS */ size_t max_settings; + /** + * NGHTTP2_OPT_MAX_CONTINUATIONS + */ + size_t max_continuations; /** * Bitwise OR of nghttp2_option_flag to determine that which fields * are specified. diff --git a/deps/nghttp2/lib/nghttp2_session.c b/deps/nghttp2/lib/nghttp2_session.c index 226cdd59e8e98e..004a4dffaa7903 100644 --- a/deps/nghttp2/lib/nghttp2_session.c +++ b/deps/nghttp2/lib/nghttp2_session.c @@ -497,6 +497,7 @@ static int session_new(nghttp2_session **session_ptr, (*session_ptr)->max_send_header_block_length = NGHTTP2_MAX_HEADERSLEN; (*session_ptr)->max_outbound_ack = NGHTTP2_DEFAULT_MAX_OBQ_FLOOD_ITEM; (*session_ptr)->max_settings = NGHTTP2_DEFAULT_MAX_SETTINGS; + (*session_ptr)->max_continuations = NGHTTP2_DEFAULT_MAX_CONTINUATIONS; if (option) { if ((option->opt_set_mask & NGHTTP2_OPT_NO_AUTO_WINDOW_UPDATE) && @@ -585,6 +586,10 @@ static int session_new(nghttp2_session **session_ptr, option->stream_reset_burst, option->stream_reset_rate); } + + if (option->opt_set_mask & NGHTTP2_OPT_MAX_CONTINUATIONS) { + (*session_ptr)->max_continuations = option->max_continuations; + } } rv = nghttp2_hd_deflate_init2(&(*session_ptr)->hd_deflater, @@ -979,7 +984,14 @@ static int session_attach_stream_item(nghttp2_session *session, return 0; } - return session_ob_data_push(session, stream); + rv = session_ob_data_push(session, stream); + if (rv != 0) { + nghttp2_stream_detach_item(stream); + + return rv; + } + + return 0; } static void session_detach_stream_item(nghttp2_session *session, @@ -1309,9 +1321,11 @@ nghttp2_stream *nghttp2_session_open_stream(nghttp2_session *session, assert((stream->flags & NGHTTP2_STREAM_FLAG_NO_RFC7540_PRIORITIES) || nghttp2_stream_in_dep_tree(stream)); + nghttp2_session_detach_idle_stream(session, stream); + if (nghttp2_stream_in_dep_tree(stream)) { assert(!(stream->flags & NGHTTP2_STREAM_FLAG_NO_RFC7540_PRIORITIES)); - nghttp2_session_detach_idle_stream(session, stream); + rv = nghttp2_stream_dep_remove(stream); if (rv != 0) { return NULL; @@ -1471,6 +1485,21 @@ int nghttp2_session_close_stream(nghttp2_session *session, int32_t stream_id, DEBUGF("stream: stream(%p)=%d close\n", stream, stream->stream_id); + /* We call on_stream_close_callback even if stream->state is + NGHTTP2_STREAM_INITIAL. This will happen while sending request + HEADERS, a local endpoint receives RST_STREAM for that stream. It + may be PROTOCOL_ERROR, but without notifying stream closure will + hang the stream in a local endpoint. + */ + + if (session->callbacks.on_stream_close_callback) { + if (session->callbacks.on_stream_close_callback( + session, stream_id, error_code, session->user_data) != 0) { + + return NGHTTP2_ERR_CALLBACK_FAILURE; + } + } + if (stream->item) { nghttp2_outbound_item *item; @@ -1488,21 +1517,6 @@ int nghttp2_session_close_stream(nghttp2_session *session, int32_t stream_id, } } - /* We call on_stream_close_callback even if stream->state is - NGHTTP2_STREAM_INITIAL. This will happen while sending request - HEADERS, a local endpoint receives RST_STREAM for that stream. It - may be PROTOCOL_ERROR, but without notifying stream closure will - hang the stream in a local endpoint. - */ - - if (session->callbacks.on_stream_close_callback) { - if (session->callbacks.on_stream_close_callback( - session, stream_id, error_code, session->user_data) != 0) { - - return NGHTTP2_ERR_CALLBACK_FAILURE; - } - } - is_my_stream_id = nghttp2_session_is_my_stream_id(session, stream_id); /* pushed streams which is not opened yet is not counted toward max @@ -1559,6 +1573,11 @@ int nghttp2_session_destroy_stream(nghttp2_session *session, } } + if (stream->queued && + (stream->flags & NGHTTP2_STREAM_FLAG_NO_RFC7540_PRIORITIES)) { + session_ob_data_remove(session, stream); + } + nghttp2_map_remove(&session->streams, stream->stream_id); nghttp2_stream_free(stream); nghttp2_mem_free(mem, stream); @@ -6812,6 +6831,8 @@ nghttp2_ssize nghttp2_session_mem_recv2(nghttp2_session *session, } } session_inbound_frame_reset(session); + + session->num_continuations = 0; } break; } @@ -6933,6 +6954,10 @@ nghttp2_ssize nghttp2_session_mem_recv2(nghttp2_session *session, } #endif /* DEBUGBUILD */ + if (++session->num_continuations > session->max_continuations) { + return NGHTTP2_ERR_TOO_MANY_CONTINUATIONS; + } + readlen = inbound_frame_buf_read(iframe, in, last); in += readlen; diff --git a/deps/nghttp2/lib/nghttp2_session.h b/deps/nghttp2/lib/nghttp2_session.h index b119329a04da45..ef8f7b27d67261 100644 --- a/deps/nghttp2/lib/nghttp2_session.h +++ b/deps/nghttp2/lib/nghttp2_session.h @@ -110,6 +110,10 @@ typedef struct { #define NGHTTP2_DEFAULT_STREAM_RESET_BURST 1000 #define NGHTTP2_DEFAULT_STREAM_RESET_RATE 33 +/* The default max number of CONTINUATION frames following an incoming + HEADER frame. */ +#define NGHTTP2_DEFAULT_MAX_CONTINUATIONS 8 + /* Internal state when receiving incoming frame */ typedef enum { /* Receiving frame header */ @@ -290,6 +294,12 @@ struct nghttp2_session { size_t max_send_header_block_length; /* The maximum number of settings accepted per SETTINGS frame. */ size_t max_settings; + /* The maximum number of CONTINUATION frames following an incoming + HEADER frame. */ + size_t max_continuations; + /* The number of CONTINUATION frames following an incoming HEADER + frame. This variable is reset when END_HEADERS flag is seen. */ + size_t num_continuations; /* Next Stream ID. Made unsigned int to detect >= (1 << 31). */ uint32_t next_stream_id; /* The last stream ID this session initiated. For client session, From b56f66e2508bd673b3cea14f216fd3398b4e0e4f Mon Sep 17 00:00:00 2001 From: "Node.js GitHub Bot" Date: Sat, 9 Mar 2024 16:40:15 +0200 Subject: [PATCH 11/41] deps: update simdutf to 4.0.9 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit PR-URL: https://github.com/nodejs/node/pull/51655 Reviewed-By: Luigi Pinca Reviewed-By: Marco Ippolito Reviewed-By: Rafael Gonzaga Reviewed-By: Ulises Gascón --- deps/simdutf/simdutf.cpp | 74 +++++++++++++++++++--------------------- deps/simdutf/simdutf.h | 8 ++--- 2 files changed, 40 insertions(+), 42 deletions(-) diff --git a/deps/simdutf/simdutf.cpp b/deps/simdutf/simdutf.cpp index dc58d9ab5c3637..d9b854a8cc1a3a 100644 --- a/deps/simdutf/simdutf.cpp +++ b/deps/simdutf/simdutf.cpp @@ -1,4 +1,4 @@ -/* auto-generated on 2023-12-01 13:59:01 -0500. Do not edit! */ +/* auto-generated on 2024-01-29 10:40:15 -0500. Do not edit! */ /* begin file src/simdutf.cpp */ #include "simdutf.h" /* begin file src/implementation.cpp */ @@ -1151,7 +1151,7 @@ namespace icelake { // We should not get warnings while including yet we do // under some versions of GCC. // If the x86intrin.h header has uninitialized values that are problematic, -// it is a GCC issue, we want to ignore these warnigns. +// it is a GCC issue, we want to ignore these warnings. SIMDUTF_DISABLE_GCC_WARNING(-Wuninitialized) #endif @@ -1568,7 +1568,7 @@ class implementation final : public simdutf::implementation { // We should not get warnings while including yet we do // under some versions of GCC. // If the x86intrin.h header has uninitialized values that are problematic, -// it is a GCC issue, we want to ignore these warnigns. +// it is a GCC issue, we want to ignore these warnings. SIMDUTF_DISABLE_GCC_WARNING(-Wuninitialized) #endif @@ -2498,7 +2498,7 @@ class implementation final : public simdutf::implementation { // We should not get warnings while including yet we do // under some versions of GCC. // If the x86intrin.h header has uninitialized values that are problematic, -// it is a GCC issue, we want to ignore these warnigns. +// it is a GCC issue, we want to ignore these warnings. SIMDUTF_DISABLE_GCC_WARNING(-Wuninitialized) #endif @@ -11655,7 +11655,7 @@ inline result convert_with_errors(const char* buf, size_t len, char16_t* utf16_o * * The caller is responsible to ensure that len > 0. * - * If the error is believed to have occured prior to 'buf', the count value contain in the result + * If the error is believed to have occurred prior to 'buf', the count value contain in the result * will be SIZE_T - 1, SIZE_T - 2, or SIZE_T - 3. */ template @@ -11934,7 +11934,7 @@ inline result convert_with_errors(const char* buf, size_t len, char32_t* utf32_o * * The caller is responsible to ensure that len > 0. * - * If the error is believed to have occured prior to 'buf', the count value contain in the result + * If the error is believed to have occurred prior to 'buf', the count value contain in the result * will be SIZE_T - 1, SIZE_T - 2, or SIZE_T - 3. */ inline result rewind_and_convert_with_errors(size_t prior_bytes, const char* buf, size_t len, char32_t* utf32_output) { @@ -16084,11 +16084,11 @@ using namespace simd; // 8 bytes when calling convert_masked_utf8_to_utf32. If you skip the last 16 bytes, // and if the data is valid, then it is entirely safe because 16 UTF-8 bytes generate // much more than 8 bytes. However, you cannot generally assume that you have valid - // UTF-8 input, so we are going to go back from the end counting 4 leading bytes, + // UTF-8 input, so we are going to go back from the end counting 8 leading bytes, // to give us a good margin. size_t leading_byte = 0; size_t margin = size; - for(; margin > 0 && leading_byte < 4; margin--) { + for(; margin > 0 && leading_byte < 8; margin--) { leading_byte += (int8_t(in[margin-1]) > -65); } // If the input is long enough, then we have that margin-1 is the fourth last leading byte. @@ -16158,11 +16158,11 @@ using namespace simd; // 8 bytes when calling convert_masked_utf8_to_utf32. If you skip the last 16 bytes, // and if the data is valid, then it is entirely safe because 16 UTF-8 bytes generate // much more than 8 bytes. However, you cannot generally assume that you have valid - // UTF-8 input, so we are going to go back from the end counting 4 leading bytes, + // UTF-8 input, so we are going to go back from the end counting 8 leading bytes, // to give us a good margin. size_t leading_byte = 0; size_t margin = size; - for(; margin > 0 && leading_byte < 4; margin--) { + for(; margin > 0 && leading_byte < 8; margin--) { leading_byte += (int8_t(in[margin-1]) > -65); } // If the input is long enough, then we have that margin-1 is the fourth last leading byte. @@ -17930,7 +17930,7 @@ simdutf_really_inline bool process_block_utf8_to_utf16(const char *&in, char16_t __m512i indexofthirdlastbytes = _mm512_add_epi16(mask_ffffffff, indexofsecondlastbytes); // indices of the second last bytes __m512i thirdlastbyte = _mm512_maskz_mov_epi8(m34, - clearedbytes); // only those that are the third last byte of a sequece + clearedbytes); // only those that are the third last byte of a sequence __m512i thirdlastbytes = _mm512_maskz_permutexvar_epi8(0x5555555555555555, indexofthirdlastbytes, thirdlastbyte); // the third last bytes (of three byte sequences, hi // surrogate) @@ -17992,7 +17992,7 @@ simdutf_really_inline bool process_block_utf8_to_utf16(const char *&in, char16_t __m512i indexofthirdlastbytes = _mm512_add_epi16(mask_ffffffff, indexofsecondlastbytes); // indices of the second last bytes __m512i thirdlastbyte = _mm512_maskz_mov_epi8(m34, - clearedbytes); // only those that are the third last byte of a sequece + clearedbytes); // only those that are the third last byte of a sequence __m512i thirdlastbytes = _mm512_maskz_permutexvar_epi8(0x5555555555555555, indexofthirdlastbytes, thirdlastbyte); // the third last bytes (of three byte sequences, hi // surrogate) @@ -18048,7 +18048,7 @@ simdutf_really_inline bool process_block_utf8_to_utf16(const char *&in, char16_t } // Fast path 2: all ASCII or 2 byte __mmask64 continuation_or_ascii = (tail == SIMDUTF_FULL) ? _knot_mask64(m234) : _kand_mask64(_knot_mask64(m234), b); - // on top of -0xc0 we substract -2 which we get back later of the + // on top of -0xc0 we subtract -2 which we get back later of the // continuation byte tags __m512i leading2byte = _mm512_maskz_sub_epi8(m234, input, mask_c2c2c2c2); __mmask64 leading = tail == (tail == SIMDUTF_FULL) ? _kor_mask64(m1, m234) : _kand_mask64(_kor_mask64(m1, m234), b); // first bytes of each sequence @@ -18296,7 +18296,7 @@ __m512i rotate_by_N_epi8(const __m512i input) { stored at separate 32-bit lanes. For each lane we have also a character class (`char_class), given in form - 0x8080800N, where N is 4 higest bits from the leading byte; 0x80 resets + 0x8080800N, where N is 4 highest bits from the leading byte; 0x80 resets corresponding bytes during pshufb. */ simdutf_really_inline __m512i expanded_utf8_to_utf32(__m512i char_class, __m512i utf8) { @@ -19214,7 +19214,7 @@ simdutf_really_inline size_t process_block_from_utf8_to_latin1(const char *buf, // _mm512_storeu_si512((__m512i *)latin_output, output); I tried using // _mm512_storeu_si512 and have the next process_block start from the // "written_out" point but the compiler shuffles memory in such a way that it - // is signifcantly slower... + // is significantly slower... // **************************** _mm512_mask_storeu_epi8((__m512i *)latin_output, store_mask, output); @@ -22028,10 +22028,9 @@ simdutf_unused simdutf_really_inline simd8 must_be_continuation(const simd } simdutf_really_inline simd8 must_be_2_3_continuation(const simd8 prev2, const simd8 prev3) { - simd8 is_third_byte = prev2.saturating_sub(0b11100000u-1); // Only 111_____ will be > 0 - simd8 is_fourth_byte = prev3.saturating_sub(0b11110000u-1); // Only 1111____ will be > 0 - // Caller requires a bool (all 1's). All values resulting from the subtraction will be <= 64, so signed comparison is fine. - return simd8(is_third_byte | is_fourth_byte) > int8_t(0); + simd8 is_third_byte = prev2.saturating_sub(0xe0u-0x80); // Only 111_____ will be > 0x80 + simd8 is_fourth_byte = prev3.saturating_sub(0xf0u-0x80); // Only 1111____ will be > 0x80 + return simd8(is_third_byte | is_fourth_byte); } /* begin file src/haswell/avx2_detect_encodings.cpp */ @@ -25495,11 +25494,11 @@ using namespace simd; // 8 bytes when calling convert_masked_utf8_to_utf32. If you skip the last 16 bytes, // and if the data is valid, then it is entirely safe because 16 UTF-8 bytes generate // much more than 8 bytes. However, you cannot generally assume that you have valid - // UTF-8 input, so we are going to go back from the end counting 4 leading bytes, + // UTF-8 input, so we are going to go back from the end counting 8 leading bytes, // to give us a good margin. size_t leading_byte = 0; size_t margin = size; - for(; margin > 0 && leading_byte < 4; margin--) { + for(; margin > 0 && leading_byte < 8; margin--) { leading_byte += (int8_t(in[margin-1]) > -65); } // If the input is long enough, then we have that margin-1 is the fourth last leading byte. @@ -25569,11 +25568,11 @@ using namespace simd; // 8 bytes when calling convert_masked_utf8_to_utf32. If you skip the last 16 bytes, // and if the data is valid, then it is entirely safe because 16 UTF-8 bytes generate // much more than 8 bytes. However, you cannot generally assume that you have valid - // UTF-8 input, so we are going to go back from the end counting 4 leading bytes, + // UTF-8 input, so we are going to go back from the end counting 8 leading bytes, // to give us a good margin. size_t leading_byte = 0; size_t margin = size; - for(; margin > 0 && leading_byte < 4; margin--) { + for(; margin > 0 && leading_byte < 8; margin--) { leading_byte += (int8_t(in[margin-1]) > -65); } // If the input is long enough, then we have that margin-1 is the fourth last leading byte. @@ -26887,10 +26886,10 @@ simdutf_unused simdutf_really_inline simd8 must_be_continuation(const simd } simdutf_really_inline simd8 must_be_2_3_continuation(const simd8 prev2, const simd8 prev3) { - simd8 is_third_byte = prev2.saturating_sub(0b11100000u-1); // Only 111_____ will be > 0 - simd8 is_fourth_byte = prev3.saturating_sub(0b11110000u-1); // Only 1111____ will be > 0 + simd8 is_third_byte = prev2.saturating_sub(0xe0u-0x80); // Only 111_____ will be >= 0x80 + simd8 is_fourth_byte = prev3.saturating_sub(0xf0u-0x80); // Only 1111____ will be >= 0x80 // Caller requires a bool (all 1's). All values resulting from the subtraction will be <= 64, so signed comparison is fine. - return simd8(is_third_byte | is_fourth_byte) > int8_t(0); + return simd8(is_third_byte | is_fourth_byte); } } // unnamed namespace @@ -27867,11 +27866,11 @@ using namespace simd; // 8 bytes when calling convert_masked_utf8_to_utf32. If you skip the last 16 bytes, // and if the data is valid, then it is entirely safe because 16 UTF-8 bytes generate // much more than 8 bytes. However, you cannot generally assume that you have valid - // UTF-8 input, so we are going to go back from the end counting 4 leading bytes, + // UTF-8 input, so we are going to go back from the end counting 8 leading bytes, // to give us a good margin. size_t leading_byte = 0; size_t margin = size; - for(; margin > 0 && leading_byte < 4; margin--) { + for(; margin > 0 && leading_byte < 8; margin--) { leading_byte += (int8_t(in[margin-1]) > -65); } // If the input is long enough, then we have that margin-1 is the fourth last leading byte. @@ -27941,11 +27940,11 @@ using namespace simd; // 8 bytes when calling convert_masked_utf8_to_utf32. If you skip the last 16 bytes, // and if the data is valid, then it is entirely safe because 16 UTF-8 bytes generate // much more than 8 bytes. However, you cannot generally assume that you have valid - // UTF-8 input, so we are going to go back from the end counting 4 leading bytes, + // UTF-8 input, so we are going to go back from the end counting 8 leading bytes, // to give us a good margin. size_t leading_byte = 0; size_t margin = size; - for(; margin > 0 && leading_byte < 4; margin--) { + for(; margin > 0 && leading_byte < 8; margin--) { leading_byte += (int8_t(in[margin-1]) > -65); } // If the input is long enough, then we have that margin-1 is the fourth last leading byte. @@ -28407,10 +28406,9 @@ simdutf_unused simdutf_really_inline simd8 must_be_continuation(const simd } simdutf_really_inline simd8 must_be_2_3_continuation(const simd8 prev2, const simd8 prev3) { - simd8 is_third_byte = prev2.saturating_sub(0b11100000u-1); // Only 111_____ will be > 0 - simd8 is_fourth_byte = prev3.saturating_sub(0b11110000u-1); // Only 1111____ will be > 0 - // Caller requires a bool (all 1's). All values resulting from the subtraction will be <= 64, so signed comparison is fine. - return simd8(is_third_byte | is_fourth_byte) > int8_t(0); + simd8 is_third_byte = prev2.saturating_sub(0xe0u-0x80); // Only 111_____ will be >= 0x80 + simd8 is_fourth_byte = prev3.saturating_sub(0xf0u-0x80); // Only 1111____ will be >= 0x80 + return simd8(is_third_byte | is_fourth_byte); } /* begin file src/westmere/internal/loader.cpp */ @@ -31874,11 +31872,11 @@ using namespace simd; // 8 bytes when calling convert_masked_utf8_to_utf32. If you skip the last 16 bytes, // and if the data is valid, then it is entirely safe because 16 UTF-8 bytes generate // much more than 8 bytes. However, you cannot generally assume that you have valid - // UTF-8 input, so we are going to go back from the end counting 4 leading bytes, + // UTF-8 input, so we are going to go back from the end counting 8 leading bytes, // to give us a good margin. size_t leading_byte = 0; size_t margin = size; - for(; margin > 0 && leading_byte < 4; margin--) { + for(; margin > 0 && leading_byte < 8; margin--) { leading_byte += (int8_t(in[margin-1]) > -65); } // If the input is long enough, then we have that margin-1 is the fourth last leading byte. @@ -31948,11 +31946,11 @@ using namespace simd; // 8 bytes when calling convert_masked_utf8_to_utf32. If you skip the last 16 bytes, // and if the data is valid, then it is entirely safe because 16 UTF-8 bytes generate // much more than 8 bytes. However, you cannot generally assume that you have valid - // UTF-8 input, so we are going to go back from the end counting 4 leading bytes, + // UTF-8 input, so we are going to go back from the end counting 8 leading bytes, // to give us a good margin. size_t leading_byte = 0; size_t margin = size; - for(; margin > 0 && leading_byte < 4; margin--) { + for(; margin > 0 && leading_byte < 8; margin--) { leading_byte += (int8_t(in[margin-1]) > -65); } // If the input is long enough, then we have that margin-1 is the fourth last leading byte. diff --git a/deps/simdutf/simdutf.h b/deps/simdutf/simdutf.h index f4db9217e2a946..b0466f52d9d742 100644 --- a/deps/simdutf/simdutf.h +++ b/deps/simdutf/simdutf.h @@ -1,4 +1,4 @@ -/* auto-generated on 2023-12-01 13:59:01 -0500. Do not edit! */ +/* auto-generated on 2024-01-29 10:40:15 -0500. Do not edit! */ /* begin file include/simdutf.h */ #ifndef SIMDUTF_H #define SIMDUTF_H @@ -567,7 +567,7 @@ SIMDUTF_DISABLE_UNDESIRED_WARNINGS #define SIMDUTF_SIMDUTF_VERSION_H /** The version of simdutf being used (major.minor.revision) */ -#define SIMDUTF_VERSION "4.0.8" +#define SIMDUTF_VERSION "4.0.9" namespace simdutf { enum { @@ -582,7 +582,7 @@ enum { /** * The revision (major.minor.REVISION) of simdutf being used. */ - SIMDUTF_VERSION_REVISION = 8 + SIMDUTF_VERSION_REVISION = 9 }; } // namespace simdutf @@ -874,7 +874,7 @@ simdutf_really_inline simdutf_warn_unused simdutf::encoding_type autodetect_enco * E.g., if the input might be UTF-16LE or UTF-8, this function returns * the value (simdutf::encoding_type::UTF8 | simdutf::encoding_type::UTF16_LE). * - * Overriden by each implementation. + * Overridden by each implementation. * * @param input the string to analyze. * @param length the length of the string in bytes. From be30309ea0581324f89c1ce2c6f6aa7af85cb0f9 Mon Sep 17 00:00:00 2001 From: Daniel Lemire Date: Wed, 20 Mar 2024 13:44:36 -0400 Subject: [PATCH 12/41] deps: update simdutf to 5.0.0 PR-URL: https://github.com/nodejs/node/pull/52138 Reviewed-By: Yagiz Nizipli Reviewed-By: Michael Dawson Reviewed-By: Marco Ippolito Reviewed-By: Rafael Gonzaga --- deps/simdutf/simdutf.cpp | 4532 ++++++++++++++++++++++++++++++++++++-- deps/simdutf/simdutf.h | 185 +- 2 files changed, 4522 insertions(+), 195 deletions(-) diff --git a/deps/simdutf/simdutf.cpp b/deps/simdutf/simdutf.cpp index d9b854a8cc1a3a..8452ff3896c4da 100644 --- a/deps/simdutf/simdutf.cpp +++ b/deps/simdutf/simdutf.cpp @@ -1,4 +1,4 @@ -/* auto-generated on 2024-01-29 10:40:15 -0500. Do not edit! */ +/* auto-generated on 2024-03-18 10:58:28 -0400. Do not edit! */ /* begin file src/simdutf.cpp */ #include "simdutf.h" /* begin file src/implementation.cpp */ @@ -140,7 +140,10 @@ class implementation final : public simdutf::implementation { simdutf_warn_unused size_t utf32_length_from_latin1(size_t length) const noexcept; simdutf_warn_unused size_t utf16_length_from_latin1(size_t length) const noexcept; simdutf_warn_unused size_t utf8_length_from_latin1(const char * input, size_t length) const noexcept; - + simdutf_warn_unused size_t maximal_binary_length_from_base64(const char * input, size_t length) const noexcept; + simdutf_warn_unused result base64_to_binary(const char * input, size_t length, char* output) const noexcept; + simdutf_warn_unused size_t base64_length_from_binary(size_t length) const noexcept; + size_t binary_to_base64(const char * input, size_t length, char* output) const noexcept; }; } // namespace arm64 @@ -179,6 +182,20 @@ simdutf_really_inline int count_ones(uint64_t input_num) { return vaddv_u8(vcnt_u8(vcreate_u8(input_num))); } +#if SIMDUTF_NEED_TRAILING_ZEROES +simdutf_really_inline int trailing_zeroes(uint64_t input_num) { +#ifdef SIMDUTF_REGULAR_VISUAL_STUDIO + unsigned long ret; + // Search the mask data from least significant bit (LSB) + // to the most significant bit (MSB) for a set bit (1). + _BitScanForward64(&ret, input_num); + return (int)ret; +#else // SIMDUTF_REGULAR_VISUAL_STUDIO + return __builtin_ctzll(input_num); +#endif // SIMDUTF_REGULAR_VISUAL_STUDIO +} +#endif + } // unnamed namespace } // namespace arm64 } // namespace simdutf @@ -1340,6 +1357,10 @@ class implementation final : public simdutf::implementation { simdutf_warn_unused size_t utf32_length_from_latin1(size_t length) const noexcept; simdutf_warn_unused size_t utf16_length_from_latin1(size_t length) const noexcept; simdutf_warn_unused size_t utf8_length_from_latin1(const char * input, size_t length) const noexcept; + simdutf_warn_unused size_t maximal_binary_length_from_base64(const char * input, size_t length) const noexcept; + simdutf_warn_unused result base64_to_binary(const char * input, size_t length, char* output) const noexcept; + simdutf_warn_unused size_t base64_length_from_binary(size_t length) const noexcept; + size_t binary_to_base64(const char * input, size_t length, char* output) const noexcept; }; } // namespace icelake @@ -1385,6 +1406,16 @@ simdutf_really_inline long long int count_ones(uint64_t input_num) { } #endif +#if SIMDUTF_NEED_TRAILING_ZEROES +simdutf_really_inline int trailing_zeroes(uint64_t input_num) { +#if SIMDUTF_REGULAR_VISUAL_STUDIO + return (int)_tzcnt_u64(input_num); +#else // SIMDUTF_REGULAR_VISUAL_STUDIO + return __builtin_ctzll(input_num); +#endif // SIMDUTF_REGULAR_VISUAL_STUDIO +} +#endif + } // unnamed namespace } // namespace icelake } // namespace simdutf @@ -1547,6 +1578,10 @@ class implementation final : public simdutf::implementation { simdutf_warn_unused size_t utf32_length_from_latin1(size_t length) const noexcept; simdutf_warn_unused size_t utf16_length_from_latin1(size_t length) const noexcept; simdutf_warn_unused size_t utf8_length_from_latin1(const char * input, size_t length) const noexcept; + simdutf_warn_unused virtual size_t maximal_binary_length_from_base64(const char * input, size_t length) const noexcept; + simdutf_warn_unused virtual result base64_to_binary(const char * input, size_t length, char* output) const noexcept; + simdutf_warn_unused virtual size_t base64_length_from_binary(size_t length) const noexcept; + size_t binary_to_base64(const char * input, size_t length, char* output) const noexcept; }; } // namespace haswell @@ -1656,6 +1691,16 @@ simdutf_really_inline long long int count_ones(uint64_t input_num) { } #endif +#if SIMDUTF_NEED_TRAILING_ZEROES +simdutf_inline int trailing_zeroes(uint64_t input_num) { +#if SIMDUTF_REGULAR_VISUAL_STUDIO + return (int)_tzcnt_u64(input_num); +#else // SIMDUTF_REGULAR_VISUAL_STUDIO + return __builtin_ctzll(input_num); +#endif // SIMDUTF_REGULAR_VISUAL_STUDIO +} +#endif + } // unnamed namespace } // namespace haswell } // namespace simdutf @@ -2478,6 +2523,10 @@ class implementation final : public simdutf::implementation { simdutf_warn_unused size_t utf32_length_from_latin1(size_t length) const noexcept; simdutf_warn_unused size_t utf16_length_from_latin1(size_t length) const noexcept; simdutf_warn_unused size_t utf8_length_from_latin1(const char * input, size_t length) const noexcept; + simdutf_warn_unused size_t maximal_binary_length_from_base64(const char * input, size_t length) const noexcept; + simdutf_warn_unused result base64_to_binary(const char * input, size_t length, char* output) const noexcept; + simdutf_warn_unused size_t base64_length_from_binary(size_t length) const noexcept; + size_t binary_to_base64(const char * input, size_t length, char* output) const noexcept; }; } // namespace westmere @@ -2564,6 +2613,18 @@ simdutf_really_inline long long int count_ones(uint64_t input_num) { } #endif +#if SIMDUTF_NEED_TRAILING_ZEROES +simdutf_really_inline int trailing_zeroes(uint64_t input_num) { +#if SIMDUTF_REGULAR_VISUAL_STUDIO + unsigned long ret; + _BitScanForward64(&ret, input_num); + return (int)ret; +#else // SIMDUTF_REGULAR_VISUAL_STUDIO + return __builtin_ctzll(input_num); +#endif // SIMDUTF_REGULAR_VISUAL_STUDIO +} +#endif + } // unnamed namespace } // namespace westmere } // namespace simdutf @@ -3397,6 +3458,10 @@ class implementation final : public simdutf::implementation { simdutf_warn_unused size_t utf8_length_from_utf32(const char32_t * input, size_t length) const noexcept; simdutf_warn_unused size_t utf16_length_from_utf32(const char32_t * input, size_t length) const noexcept; simdutf_warn_unused size_t utf32_length_from_utf8(const char * input, size_t length) const noexcept; + simdutf_warn_unused size_t maximal_binary_length_from_base64(const char * input, size_t length) const noexcept; + simdutf_warn_unused result base64_to_binary(const char * input, size_t length, char* output) const noexcept; + simdutf_warn_unused size_t base64_length_from_binary(size_t length) const noexcept; + size_t binary_to_base64(const char * input, size_t length, char* output) const noexcept; }; } // namespace ppc64 @@ -3954,6 +4019,278 @@ template struct simd8x64 { #endif // SIMDUTF_PPC64_H /* end file src/simdutf/ppc64.h */ +/* begin file src/simdutf/rvv.h */ +#ifndef SIMDUTF_RVV_H +#define SIMDUTF_RVV_H + +#ifdef SIMDUTF_FALLBACK_H +#error "rvv.h must be included before fallback.h" +#endif + + +#define SIMDUTF_CAN_ALWAYS_RUN_RVV SIMDUTF_IS_RVV + +#ifndef SIMDUTF_IMPLEMENTATION_RVV +#define SIMDUTF_IMPLEMENTATION_RVV (SIMDUTF_CAN_ALWAYS_RUN_RVV || (SIMDUTF_IS_RISCV64 && SIMDUTF_HAS_RVV_INTRINSICS && SIMDUTF_HAS_RVV_TARGET_REGION)) +#endif + +#if SIMDUTF_IMPLEMENTATION_RVV + +#if SIMDUTF_CAN_ALWAYS_RUN_RVV +#define SIMDUTF_TARGET_RVV +#else +#define SIMDUTF_TARGET_RVV SIMDUTF_TARGET_REGION("arch=+v") +#endif +#if !SIMDUTF_IS_ZVBB && SIMDUTF_HAS_ZVBB_INTRINSICS +#define SIMDUTF_TARGET_ZVBB SIMDUTF_TARGET_REGION("arch=+v,+zvbb") +#endif + +namespace simdutf { +namespace rvv { +} // namespace rvv +} // namespace simdutf + +/* begin file src/simdutf/rvv/implementation.h */ +#ifndef SIMDUTF_RVV_IMPLEMENTATION_H +#define SIMDUTF_RVV_IMPLEMENTATION_H + + +namespace simdutf { +namespace rvv { + +namespace { +using namespace simdutf; +} // namespace + +class implementation final : public simdutf::implementation { +public: + simdutf_really_inline implementation() + : simdutf::implementation("rvv", "RISC-V Vector Extension", + internal::instruction_set::RVV) + , _supports_zvbb(internal::detect_supported_architectures() & internal::instruction_set::ZVBB) + {} + simdutf_warn_unused int detect_encodings(const char *buf, size_t len) const noexcept final; + simdutf_warn_unused bool validate_utf8(const char *buf, size_t len) const noexcept final; + simdutf_warn_unused result validate_utf8_with_errors(const char *buf, size_t len) const noexcept final; + simdutf_warn_unused bool validate_ascii(const char *buf, size_t len) const noexcept final; + simdutf_warn_unused result validate_ascii_with_errors(const char *buf, size_t len) const noexcept final; + simdutf_warn_unused bool validate_utf16le(const char16_t *buf, size_t len) const noexcept final; + simdutf_warn_unused bool validate_utf16be(const char16_t *buf, size_t len) const noexcept final; + simdutf_warn_unused result validate_utf16le_with_errors(const char16_t *buf, size_t len) const noexcept final; + simdutf_warn_unused result validate_utf16be_with_errors(const char16_t *buf, size_t len) const noexcept final; + simdutf_warn_unused bool validate_utf32(const char32_t *buf, size_t len) const noexcept final; + simdutf_warn_unused result validate_utf32_with_errors(const char32_t *buf, size_t len) const noexcept final; + simdutf_warn_unused size_t convert_latin1_to_utf8(const char *buf, size_t len, char *utf8_output) const noexcept final; + simdutf_warn_unused size_t convert_latin1_to_utf16le(const char *buf, size_t len, char16_t *utf16_buffer) const noexcept final; + simdutf_warn_unused size_t convert_latin1_to_utf16be(const char *buf, size_t len, char16_t *utf16_buffer) const noexcept final; + simdutf_warn_unused size_t convert_latin1_to_utf32(const char *buf, size_t len, char32_t *utf32_output) const noexcept final; + simdutf_warn_unused size_t convert_utf8_to_latin1(const char *buf, size_t len, char *latin1_output) const noexcept final; + simdutf_warn_unused result convert_utf8_to_latin1_with_errors(const char *buf, size_t len, char *latin1_buffer) const noexcept final; + simdutf_warn_unused size_t convert_valid_utf8_to_latin1(const char *buf, size_t len, char *latin1_output) const noexcept final; + simdutf_warn_unused size_t convert_utf8_to_utf16le(const char *buf, size_t len, char16_t *utf16_output) const noexcept final; + simdutf_warn_unused size_t convert_utf8_to_utf16be(const char *buf, size_t len, char16_t *utf16_output) const noexcept final; + simdutf_warn_unused result convert_utf8_to_utf16le_with_errors(const char *buf, size_t len, char16_t *utf16_output) const noexcept final; + simdutf_warn_unused result convert_utf8_to_utf16be_with_errors(const char *buf, size_t len, char16_t *utf16_output) const noexcept final; + simdutf_warn_unused size_t convert_valid_utf8_to_utf16le(const char *buf, size_t len, char16_t *utf16_buffer) const noexcept final; + simdutf_warn_unused size_t convert_valid_utf8_to_utf16be(const char *buf, size_t len, char16_t *utf16_buffer) const noexcept final; + simdutf_warn_unused size_t convert_utf8_to_utf32(const char *buf, size_t len, char32_t *utf32_output) const noexcept final; + simdutf_warn_unused result convert_utf8_to_utf32_with_errors(const char *buf, size_t len, char32_t *utf32_output) const noexcept final; + simdutf_warn_unused size_t convert_valid_utf8_to_utf32(const char *buf, size_t len, char32_t *utf32_buffer) const noexcept final; + simdutf_warn_unused size_t convert_utf16le_to_latin1(const char16_t *buf, size_t len, char *latin1_buffer) const noexcept final; + simdutf_warn_unused size_t convert_utf16be_to_latin1(const char16_t *buf, size_t len, char *latin1_buffer) const noexcept final; + simdutf_warn_unused result convert_utf16le_to_latin1_with_errors(const char16_t *buf, size_t len, char *latin1_buffer) const noexcept final; + simdutf_warn_unused result convert_utf16be_to_latin1_with_errors(const char16_t *buf, size_t len, char *latin1_buffer) const noexcept final; + simdutf_warn_unused size_t convert_valid_utf16le_to_latin1(const char16_t *buf, size_t len, char *latin1_buffer) const noexcept final; + simdutf_warn_unused size_t convert_valid_utf16be_to_latin1(const char16_t *buf, size_t len, char *latin1_buffer) const noexcept final; + simdutf_warn_unused size_t convert_utf16le_to_utf8(const char16_t *buf, size_t len, char *utf8_buffer) const noexcept final; + simdutf_warn_unused size_t convert_utf16be_to_utf8(const char16_t *buf, size_t len, char *utf8_buffer) const noexcept final; + simdutf_warn_unused result convert_utf16le_to_utf8_with_errors(const char16_t *buf, size_t len, char *utf8_buffer) const noexcept final; + simdutf_warn_unused result convert_utf16be_to_utf8_with_errors(const char16_t *buf, size_t len, char *utf8_buffer) const noexcept final; + simdutf_warn_unused size_t convert_valid_utf16le_to_utf8(const char16_t *buf, size_t len, char *utf8_buffer) const noexcept final; + simdutf_warn_unused size_t convert_valid_utf16be_to_utf8(const char16_t *buf, size_t len, char *utf8_buffer) const noexcept final; + simdutf_warn_unused size_t convert_utf32_to_utf8(const char32_t *buf, size_t len, char *utf8_buffer) const noexcept final; + simdutf_warn_unused result convert_utf32_to_utf8_with_errors(const char32_t *buf, size_t len, char *utf8_buffer) const noexcept final; + simdutf_warn_unused size_t convert_valid_utf32_to_utf8(const char32_t *buf, size_t len, char *utf8_buffer) const noexcept final; + simdutf_warn_unused size_t convert_utf32_to_latin1(const char32_t *buf, size_t len, char *latin1_output) const noexcept final; + simdutf_warn_unused result convert_utf32_to_latin1_with_errors(const char32_t *buf, size_t len, char *latin1_output) const noexcept final; + simdutf_warn_unused size_t convert_valid_utf32_to_latin1(const char32_t *buf, size_t len, char *latin1_output) const noexcept final; + simdutf_warn_unused size_t convert_utf32_to_utf16le(const char32_t *buf, size_t len, char16_t *utf16_buffer) const noexcept final; + simdutf_warn_unused size_t convert_utf32_to_utf16be(const char32_t *buf, size_t len, char16_t *utf16_buffer) const noexcept final; + simdutf_warn_unused result convert_utf32_to_utf16le_with_errors(const char32_t *buf, size_t len, char16_t *utf16_buffer) const noexcept final; + simdutf_warn_unused result convert_utf32_to_utf16be_with_errors(const char32_t *buf, size_t len, char16_t *utf16_buffer) const noexcept final; + simdutf_warn_unused size_t convert_valid_utf32_to_utf16le(const char32_t *buf, size_t len, char16_t *utf16_buffer) const noexcept final; + simdutf_warn_unused size_t convert_valid_utf32_to_utf16be(const char32_t *buf, size_t len, char16_t *utf16_buffer) const noexcept final; + simdutf_warn_unused size_t convert_utf16le_to_utf32(const char16_t *buf, size_t len, char32_t *utf32_buffer) const noexcept final; + simdutf_warn_unused size_t convert_utf16be_to_utf32(const char16_t *buf, size_t len, char32_t *utf32_buffer) const noexcept final; + simdutf_warn_unused result convert_utf16le_to_utf32_with_errors(const char16_t *buf, size_t len, char32_t *utf32_buffer) const noexcept final; + simdutf_warn_unused result convert_utf16be_to_utf32_with_errors(const char16_t *buf, size_t len, char32_t *utf32_buffer) const noexcept final; + simdutf_warn_unused size_t convert_valid_utf16le_to_utf32(const char16_t *buf, size_t len, char32_t *utf32_buffer) const noexcept final; + simdutf_warn_unused size_t convert_valid_utf16be_to_utf32(const char16_t *buf, size_t len, char32_t *utf32_buffer) const noexcept final; + void change_endianness_utf16(const char16_t *buf, size_t len, char16_t *output) const noexcept final; + simdutf_warn_unused size_t count_utf16le(const char16_t *buf, size_t len) const noexcept; + simdutf_warn_unused size_t count_utf16be(const char16_t *buf, size_t len) const noexcept; + simdutf_warn_unused size_t count_utf8(const char *buf, size_t len) const noexcept; + simdutf_warn_unused size_t utf8_length_from_utf16le(const char16_t *buf, size_t len) const noexcept; + simdutf_warn_unused size_t utf8_length_from_utf16be(const char16_t *buf, size_t len) const noexcept; + simdutf_warn_unused size_t utf32_length_from_utf16le(const char16_t *buf, size_t len) const noexcept; + simdutf_warn_unused size_t utf32_length_from_utf16be(const char16_t *buf, size_t len) const noexcept; + simdutf_warn_unused size_t utf16_length_from_utf8(const char *buf, size_t len) const noexcept; + simdutf_warn_unused size_t utf8_length_from_utf32(const char32_t *buf, size_t len) const noexcept; + simdutf_warn_unused size_t utf16_length_from_utf32(const char32_t *buf, size_t len) const noexcept; + simdutf_warn_unused size_t utf32_length_from_utf8(const char *buf, size_t len) const noexcept; + simdutf_warn_unused size_t latin1_length_from_utf8(const char *buf, size_t len) const noexcept; + simdutf_warn_unused size_t latin1_length_from_utf16(size_t len) const noexcept; + simdutf_warn_unused size_t latin1_length_from_utf32(size_t len) const noexcept; + simdutf_warn_unused size_t utf32_length_from_latin1(size_t len) const noexcept; + simdutf_warn_unused size_t utf16_length_from_latin1(size_t len) const noexcept; + simdutf_warn_unused size_t utf8_length_from_latin1(const char *buf, size_t len) const noexcept; + simdutf_warn_unused size_t maximal_binary_length_from_base64(const char * input, size_t length) const noexcept; + simdutf_warn_unused result base64_to_binary(const char * input, size_t length, char* output) const noexcept; + simdutf_warn_unused size_t base64_length_from_binary(size_t length) const noexcept; + size_t binary_to_base64(const char * input, size_t length, char* output) const noexcept; +private: + const bool _supports_zvbb; + +#if SIMDUTF_IS_ZVBB + bool supports_zvbb() const { return true; } +#elif SIMDUTF_HAS_ZVBB_INTRINSICS + bool supports_zvbb() const { return _supports_zvbb; } +#else + bool supports_zvbb() const { return false; } +#endif +}; + +} // namespace rvv +} // namespace simdutf + +#endif // SIMDUTF_RVV_IMPLEMENTATION_H +/* end file src/simdutf/rvv/implementation.h */ +/* begin file src/simdutf/rvv/begin.h */ +// redefining SIMDUTF_IMPLEMENTATION to "rvv" +// #define SIMDUTF_IMPLEMENTATION rvv + +#if SIMDUTF_CAN_ALWAYS_RUN_RVV +// nothing needed. +#else +SIMDUTF_TARGET_RVV +#endif +/* end file src/simdutf/rvv/begin.h */ +/* begin file src/simdutf/rvv/intrinsics.h */ +#ifndef SIMDUTF_RVV_INTRINSICS_H +#define SIMDUTF_RVV_INTRINSICS_H + + +#include + +#if __riscv_v_intrinsic >= 1000000 || __GCC__ >= 14 +#define simdutf_vrgather_u8m1x2(tbl, idx) __riscv_vcreate_v_u8m1_u8m2( \ + __riscv_vrgather_vv_u8m1(tbl, __riscv_vget_v_u8m2_u8m1(idx, 0), __riscv_vsetvlmax_e8m1()), \ + __riscv_vrgather_vv_u8m1(tbl, __riscv_vget_v_u8m2_u8m1(idx, 1), __riscv_vsetvlmax_e8m1())); + +#define simdutf_vrgather_u8m1x4(tbl, idx) __riscv_vcreate_v_u8m1_u8m4( \ + __riscv_vrgather_vv_u8m1(tbl, __riscv_vget_v_u8m4_u8m1(idx, 0), __riscv_vsetvlmax_e8m1()), \ + __riscv_vrgather_vv_u8m1(tbl, __riscv_vget_v_u8m4_u8m1(idx, 1), __riscv_vsetvlmax_e8m1()), \ + __riscv_vrgather_vv_u8m1(tbl, __riscv_vget_v_u8m4_u8m1(idx, 2), __riscv_vsetvlmax_e8m1()), \ + __riscv_vrgather_vv_u8m1(tbl, __riscv_vget_v_u8m4_u8m1(idx, 3), __riscv_vsetvlmax_e8m1())); +#else +// This has worse codegen on gcc +#define simdutf_vrgather_u8m1x2(tbl, idx) \ + __riscv_vset_v_u8m1_u8m2(__riscv_vlmul_ext_v_u8m1_u8m2( \ + __riscv_vrgather_vv_u8m1(tbl, __riscv_vget_v_u8m2_u8m1(idx, 0), __riscv_vsetvlmax_e8m1())), 1, \ + __riscv_vrgather_vv_u8m1(tbl, __riscv_vget_v_u8m2_u8m1(idx, 1), __riscv_vsetvlmax_e8m1())) + +#define simdutf_vrgather_u8m1x4(tbl, idx) \ + __riscv_vset_v_u8m1_u8m4(__riscv_vset_v_u8m1_u8m4(\ + __riscv_vset_v_u8m1_u8m4(__riscv_vlmul_ext_v_u8m1_u8m4( \ + __riscv_vrgather_vv_u8m1(tbl, __riscv_vget_v_u8m4_u8m1(idx, 0), __riscv_vsetvlmax_e8m1())), 1, \ + __riscv_vrgather_vv_u8m1(tbl, __riscv_vget_v_u8m4_u8m1(idx, 1), __riscv_vsetvlmax_e8m1())), 2, \ + __riscv_vrgather_vv_u8m1(tbl, __riscv_vget_v_u8m4_u8m1(idx, 2), __riscv_vsetvlmax_e8m1())), 3, \ + __riscv_vrgather_vv_u8m1(tbl, __riscv_vget_v_u8m4_u8m1(idx, 3), __riscv_vsetvlmax_e8m1())) +#endif + +/* Zvbb adds dedicated support for endianness swaps with vrev8, but if we can't + * use that, we have to emulate it with the standard V extension. + * Using LMUL=1 vrgathers could be faster than the srl+macc variant, but that + * would increase register pressure, and vrgather implementations performance + * varies a lot. */ +enum class simdutf_ByteFlip { NONE, V, ZVBB }; + +template +simdutf_really_inline static uint16_t simdutf_byteflip(uint16_t v) { + if (method != simdutf_ByteFlip::NONE) + return (uint16_t)((v*1u) << 8 | (v*1u) >> 8); + return v; +} + +#ifdef SIMDUTF_TARGET_ZVBB +SIMDUTF_UNTARGET_REGION +SIMDUTF_TARGET_ZVBB +#endif + +template +simdutf_really_inline static vuint16m1_t simdutf_byteflip(vuint16m1_t v, size_t vl) { +#if SIMDUTF_HAS_ZVBB_INTRINSICS + if (method == simdutf_ByteFlip::ZVBB) + return __riscv_vrev8_v_u16m1(v, vl); +#endif + if (method == simdutf_ByteFlip::V) + return __riscv_vmacc_vx_u16m1(__riscv_vsrl_vx_u16m1(v, 8, vl), 0x100, v, vl); + return v; +} + +template +simdutf_really_inline static vuint16m2_t simdutf_byteflip(vuint16m2_t v, size_t vl) { +#if SIMDUTF_HAS_ZVBB_INTRINSICS + if (method == simdutf_ByteFlip::ZVBB) + return __riscv_vrev8_v_u16m2(v, vl); +#endif + if (method == simdutf_ByteFlip::V) + return __riscv_vmacc_vx_u16m2(__riscv_vsrl_vx_u16m2(v, 8, vl), 0x100, v, vl); + return v; +} + +template +simdutf_really_inline static vuint16m4_t simdutf_byteflip(vuint16m4_t v, size_t vl) { +#if SIMDUTF_HAS_ZVBB_INTRINSICS + if (method == simdutf_ByteFlip::ZVBB) + return __riscv_vrev8_v_u16m4(v, vl); +#endif + if (method == simdutf_ByteFlip::V) + return __riscv_vmacc_vx_u16m4(__riscv_vsrl_vx_u16m4(v, 8, vl), 0x100, v, vl); + return v; +} + +template +simdutf_really_inline static vuint16m8_t simdutf_byteflip(vuint16m8_t v, size_t vl) { +#if SIMDUTF_HAS_ZVBB_INTRINSICS + if (method == simdutf_ByteFlip::ZVBB) + return __riscv_vrev8_v_u16m8(v, vl); +#endif + if (method == simdutf_ByteFlip::V) + return __riscv_vmacc_vx_u16m8(__riscv_vsrl_vx_u16m8(v, 8, vl), 0x100, v, vl); + return v; +} + +#ifdef SIMDUTF_TARGET_ZVBB +SIMDUTF_UNTARGET_REGION +SIMDUTF_TARGET_RVV +#endif + +#endif // SIMDUTF_RVV_INTRINSICS_H +/* end file src/simdutf/rvv/intrinsics.h */ +/* begin file src/simdutf/rvv/end.h */ +#if SIMDUTF_CAN_ALWAYS_RUN_RVV +// nothing needed. +#else +SIMDUTF_UNTARGET_REGION +#endif + +/* end file src/simdutf/rvv/end.h */ + +#endif // SIMDUTF_IMPLEMENTATION_RVV + +#endif // SIMDUTF_RVV_H +/* end file src/simdutf/rvv.h */ /* begin file src/simdutf/fallback.h */ #ifndef SIMDUTF_FALLBACK_H #define SIMDUTF_FALLBACK_H @@ -3963,7 +4300,7 @@ template struct simd8x64 { // Default Fallback to on unless a builtin implementation has already been selected. #ifndef SIMDUTF_IMPLEMENTATION_FALLBACK -#if SIMDUTF_CAN_ALWAYS_RUN_ARM64 || SIMDUTF_CAN_ALWAYS_RUN_ICELAKE || SIMDUTF_CAN_ALWAYS_RUN_HASWELL || SIMDUTF_CAN_ALWAYS_RUN_WESTMERE || SIMDUTF_CAN_ALWAYS_RUN_PPC64 +#if SIMDUTF_CAN_ALWAYS_RUN_ARM64 || SIMDUTF_CAN_ALWAYS_RUN_ICELAKE || SIMDUTF_CAN_ALWAYS_RUN_HASWELL || SIMDUTF_CAN_ALWAYS_RUN_WESTMERE || SIMDUTF_CAN_ALWAYS_RUN_PPC64 || SIMDUTF_CAN_ALWAYS_RUN_RVV #define SIMDUTF_IMPLEMENTATION_FALLBACK 0 #else #define SIMDUTF_IMPLEMENTATION_FALLBACK 1 @@ -4075,8 +4412,12 @@ class implementation final : public simdutf::implementation { simdutf_warn_unused size_t latin1_length_from_utf32(size_t length) const noexcept; simdutf_warn_unused size_t utf32_length_from_latin1(size_t length) const noexcept; simdutf_warn_unused size_t utf16_length_from_latin1(size_t length) const noexcept; - simdutf_warn_unused size_t utf8_length_from_latin1(const char * input, size_t length) const noexcept;}; - + simdutf_warn_unused size_t utf8_length_from_latin1(const char * input, size_t length) const noexcept; + simdutf_warn_unused size_t maximal_binary_length_from_base64(const char * input, size_t length) const noexcept; + simdutf_warn_unused result base64_to_binary(const char * input, size_t length, char* output) const noexcept; + simdutf_warn_unused size_t base64_length_from_binary(size_t length) const noexcept; + size_t binary_to_base64(const char * input, size_t length, char* output) const noexcept; +}; } // namespace fallback } // namespace simdutf @@ -4121,7 +4462,7 @@ namespace simdutf { namespace scalar { namespace { namespace utf8 { -#if SIMDUTF_IMPLEMENTATION_FALLBACK +#if SIMDUTF_IMPLEMENTATION_FALLBACK || SIMDUTF_IMPLEMENTATION_RVV // only used by the fallback kernel. // credit: based on code from Google Fuchsia (Apache Licensed) inline simdutf_warn_unused bool validate(const char *buf, size_t len) noexcept { @@ -4486,6 +4827,13 @@ simdutf_warn_unused encoding_type implementation::autodetect_encoding(const char } namespace internal { +// When there is a single implementation, we should not pay a price + // for dispatching to the best implementation. We should just use the + // one we have. This is a compile-time check. + #define SIMDUTF_SINGLE_IMPLEMENTATION (SIMDUTF_IMPLEMENTATION_ICELAKE \ + + SIMDUTF_IMPLEMENTATION_HASWELL + SIMDUTF_IMPLEMENTATION_WESTMERE \ + + SIMDUTF_IMPLEMENTATION_ARM64 + SIMDUTF_IMPLEMENTATION_PPC64 \ + + SIMDUTF_IMPLEMENTATION_FALLBACK == 1) // Static array of known implementations. We're hoping these get baked into the executable // without requiring a static initializer. @@ -4521,6 +4869,12 @@ static const ppc64::implementation* get_ppc64_singleton() { return &ppc64_singleton; } #endif +#if SIMDUTF_IMPLEMENTATION_RVV +static const rvv::implementation* get_rvv_singleton() { + static const rvv::implementation rvv_singleton{}; + return &rvv_singleton; +} +#endif #if SIMDUTF_IMPLEMENTATION_FALLBACK static const fallback::implementation* get_fallback_singleton() { static const fallback::implementation fallback_singleton{}; @@ -4528,6 +4882,30 @@ static const fallback::implementation* get_fallback_singleton() { } #endif +#if SIMDUTF_SINGLE_IMPLEMENTATION +static const implementation* get_single_implementation() { + return +#if SIMDUTF_IMPLEMENTATION_ICELAKE + get_icelake_singleton(); +#endif +#if SIMDUTF_IMPLEMENTATION_HASWELL + get_haswell_singleton(); +#endif +#if SIMDUTF_IMPLEMENTATION_WESTMERE + get_westmere_singleton(); +#endif +#if SIMDUTF_IMPLEMENTATION_ARM64 + get_arm64_singleton(); +#endif +#if SIMDUTF_IMPLEMENTATION_PPC64 + get_ppc64_singleton(); +#endif +#if SIMDUTF_IMPLEMENTATION_FALLBACK + get_fallback_singleton(); +#endif +} +#endif + /** * @private Detects best supported implementation on first use, and sets it */ @@ -4837,6 +5215,22 @@ class detect_best_supported_implementation_on_first_use final : public implement return set_best()->utf32_length_from_utf8(buf, len); } + simdutf_warn_unused size_t maximal_binary_length_from_base64(const char * input, size_t length) const noexcept override { + return set_best()->maximal_binary_length_from_base64(input, length); + } + + simdutf_warn_unused result base64_to_binary(const char * input, size_t length, char* output) const noexcept override { + return set_best()->base64_to_binary(input, length, output); + } + + simdutf_warn_unused size_t base64_length_from_binary(size_t length) const noexcept override { + return set_best()->base64_length_from_binary(length); + } + + size_t binary_to_base64(const char * input, size_t length, char* output) const noexcept override { + return set_best()->binary_to_base64(input, length, output); + } + simdutf_really_inline detect_best_supported_implementation_on_first_use() noexcept : implementation("best_supported_detector", "Detects the best supported implementation and sets it", 0) {} private: @@ -4860,6 +5254,9 @@ static const std::initializer_list& get_available_implem #if SIMDUTF_IMPLEMENTATION_PPC64 get_ppc64_singleton(), #endif +#if SIMDUTF_IMPLEMENTATION_RVV + get_rvv_singleton(), +#endif #if SIMDUTF_IMPLEMENTATION_FALLBACK get_fallback_singleton(), #endif @@ -5152,7 +5549,7 @@ class unsupported_implementation final : public implementation { return 0; } - simdutf_warn_unused size_t utf32_length_from_latin1(size_t) const noexcept override { + simdutf_warn_unused size_t utf32_length_from_latin1(size_t) const noexcept override { return 0; } @@ -5174,6 +5571,22 @@ class unsupported_implementation final : public implementation { return 0; } + simdutf_warn_unused size_t maximal_binary_length_from_base64(const char *, size_t) const noexcept override { + return 0; + } + + simdutf_warn_unused result base64_to_binary(const char *, size_t, char*) const noexcept override { + return result(error_code::OTHER, 0); + } + + simdutf_warn_unused size_t base64_length_from_binary(size_t) const noexcept override { + return 0; + } + + size_t binary_to_base64(const char *, size_t, char*) const noexcept override { + return 0; + } + unsupported_implementation() : implementation("unsupported", "Unsupported CPU (no detected SIMD instructions)", 0) {} }; @@ -5232,22 +5645,40 @@ SIMDUTF_DLLIMPORTEXPORT const internal::available_implementation_list& get_avail * The active implementation. */ SIMDUTF_DLLIMPORTEXPORT internal::atomic_ptr& get_active_implementation() { +#if SIMDUTF_SINGLE_IMPLEMENTATION + // skip runtime detection + static internal::atomic_ptr active_implementation{internal::get_single_implementation()}; + return active_implementation; +#else static const internal::detect_best_supported_implementation_on_first_use detect_best_supported_implementation_on_first_use_singleton; static internal::atomic_ptr active_implementation{&detect_best_supported_implementation_on_first_use_singleton}; return active_implementation; +#endif } + +#if SIMDUTF_SINGLE_IMPLEMENTATION +const implementation * get_default_implementation() { + return internal::get_single_implementation(); +} +#else +internal::atomic_ptr& get_default_implementation() { + return get_active_implementation(); +} +#endif +#define SIMDUTF_GET_CURRENT_IMPLEMENTION + simdutf_warn_unused bool validate_utf8(const char *buf, size_t len) noexcept { - return get_active_implementation()->validate_utf8(buf, len); + return get_default_implementation()->validate_utf8(buf, len); } simdutf_warn_unused result validate_utf8_with_errors(const char *buf, size_t len) noexcept { - return get_active_implementation()->validate_utf8_with_errors(buf, len); + return get_default_implementation()->validate_utf8_with_errors(buf, len); } simdutf_warn_unused bool validate_ascii(const char *buf, size_t len) noexcept { - return get_active_implementation()->validate_ascii(buf, len); + return get_default_implementation()->validate_ascii(buf, len); } simdutf_warn_unused result validate_ascii_with_errors(const char *buf, size_t len) noexcept { - return get_active_implementation()->validate_ascii_with_errors(buf, len); + return get_default_implementation()->validate_ascii_with_errors(buf, len); } simdutf_warn_unused size_t convert_utf8_to_utf16(const char * input, size_t length, char16_t* utf16_output) noexcept { #if SIMDUTF_IS_BIG_ENDIAN @@ -5257,31 +5688,31 @@ simdutf_warn_unused size_t convert_utf8_to_utf16(const char * input, size_t leng #endif } simdutf_warn_unused size_t convert_latin1_to_utf8(const char * buf, size_t len, char* utf8_output) noexcept { - return get_active_implementation()->convert_latin1_to_utf8(buf, len,utf8_output); + return get_default_implementation()->convert_latin1_to_utf8(buf, len,utf8_output); } simdutf_warn_unused size_t convert_latin1_to_utf16le(const char * buf, size_t len, char16_t* utf16_output) noexcept { - return get_active_implementation()->convert_latin1_to_utf16le(buf, len, utf16_output); + return get_default_implementation()->convert_latin1_to_utf16le(buf, len, utf16_output); } simdutf_warn_unused size_t convert_latin1_to_utf16be(const char * buf, size_t len, char16_t* utf16_output) noexcept{ - return get_active_implementation()->convert_latin1_to_utf16be(buf, len, utf16_output); + return get_default_implementation()->convert_latin1_to_utf16be(buf, len, utf16_output); } simdutf_warn_unused size_t convert_latin1_to_utf32(const char * buf, size_t len, char32_t * latin1_output) noexcept { - return get_active_implementation()->convert_latin1_to_utf32(buf, len,latin1_output); + return get_default_implementation()->convert_latin1_to_utf32(buf, len,latin1_output); } simdutf_warn_unused size_t convert_utf8_to_latin1(const char * buf, size_t len, char* latin1_output) noexcept { - return get_active_implementation()->convert_utf8_to_latin1(buf, len,latin1_output); + return get_default_implementation()->convert_utf8_to_latin1(buf, len,latin1_output); } simdutf_warn_unused result convert_utf8_to_latin1_with_errors(const char* buf, size_t len, char* latin1_output) noexcept { - return get_active_implementation()->convert_utf8_to_latin1_with_errors(buf, len, latin1_output); + return get_default_implementation()->convert_utf8_to_latin1_with_errors(buf, len, latin1_output); } simdutf_warn_unused size_t convert_valid_utf8_to_latin1(const char * buf, size_t len, char* latin1_output) noexcept { - return get_active_implementation()->convert_valid_utf8_to_latin1(buf, len,latin1_output); + return get_default_implementation()->convert_valid_utf8_to_latin1(buf, len,latin1_output); } simdutf_warn_unused size_t convert_utf8_to_utf16le(const char * input, size_t length, char16_t* utf16_output) noexcept { - return get_active_implementation()->convert_utf8_to_utf16le(input, length, utf16_output); + return get_default_implementation()->convert_utf8_to_utf16le(input, length, utf16_output); } simdutf_warn_unused size_t convert_utf8_to_utf16be(const char * input, size_t length, char16_t* utf16_output) noexcept { - return get_active_implementation()->convert_utf8_to_utf16be(input, length, utf16_output); + return get_default_implementation()->convert_utf8_to_utf16be(input, length, utf16_output); } simdutf_warn_unused result convert_utf8_to_utf16_with_errors(const char * input, size_t length, char16_t* utf16_output) noexcept { #if SIMDUTF_IS_BIG_ENDIAN @@ -5291,16 +5722,16 @@ simdutf_warn_unused result convert_utf8_to_utf16_with_errors(const char * input, #endif } simdutf_warn_unused result convert_utf8_to_utf16le_with_errors(const char * input, size_t length, char16_t* utf16_output) noexcept { - return get_active_implementation()->convert_utf8_to_utf16le_with_errors(input, length, utf16_output); + return get_default_implementation()->convert_utf8_to_utf16le_with_errors(input, length, utf16_output); } simdutf_warn_unused result convert_utf8_to_utf16be_with_errors(const char * input, size_t length, char16_t* utf16_output) noexcept { - return get_active_implementation()->convert_utf8_to_utf16be_with_errors(input, length, utf16_output); + return get_default_implementation()->convert_utf8_to_utf16be_with_errors(input, length, utf16_output); } simdutf_warn_unused size_t convert_utf8_to_utf32(const char * input, size_t length, char32_t* utf32_output) noexcept { - return get_active_implementation()->convert_utf8_to_utf32(input, length, utf32_output); + return get_default_implementation()->convert_utf8_to_utf32(input, length, utf32_output); } simdutf_warn_unused result convert_utf8_to_utf32_with_errors(const char * input, size_t length, char32_t* utf32_output) noexcept { - return get_active_implementation()->convert_utf8_to_utf32_with_errors(input, length, utf32_output); + return get_default_implementation()->convert_utf8_to_utf32_with_errors(input, length, utf32_output); } simdutf_warn_unused bool validate_utf16(const char16_t * buf, size_t len) noexcept { #if SIMDUTF_IS_BIG_ENDIAN @@ -5310,10 +5741,10 @@ simdutf_warn_unused bool validate_utf16(const char16_t * buf, size_t len) noexce #endif } simdutf_warn_unused bool validate_utf16le(const char16_t * buf, size_t len) noexcept { - return get_active_implementation()->validate_utf16le(buf, len); + return get_default_implementation()->validate_utf16le(buf, len); } simdutf_warn_unused bool validate_utf16be(const char16_t * buf, size_t len) noexcept { - return get_active_implementation()->validate_utf16be(buf, len); + return get_default_implementation()->validate_utf16be(buf, len); } simdutf_warn_unused result validate_utf16_with_errors(const char16_t * buf, size_t len) noexcept { #if SIMDUTF_IS_BIG_ENDIAN @@ -5323,16 +5754,16 @@ simdutf_warn_unused result validate_utf16_with_errors(const char16_t * buf, size #endif } simdutf_warn_unused result validate_utf16le_with_errors(const char16_t * buf, size_t len) noexcept { - return get_active_implementation()->validate_utf16le_with_errors(buf, len); + return get_default_implementation()->validate_utf16le_with_errors(buf, len); } simdutf_warn_unused result validate_utf16be_with_errors(const char16_t * buf, size_t len) noexcept { - return get_active_implementation()->validate_utf16be_with_errors(buf, len); + return get_default_implementation()->validate_utf16be_with_errors(buf, len); } simdutf_warn_unused bool validate_utf32(const char32_t * buf, size_t len) noexcept { - return get_active_implementation()->validate_utf32(buf, len); + return get_default_implementation()->validate_utf32(buf, len); } simdutf_warn_unused result validate_utf32_with_errors(const char32_t * buf, size_t len) noexcept { - return get_active_implementation()->validate_utf32_with_errors(buf, len); + return get_default_implementation()->validate_utf32_with_errors(buf, len); } simdutf_warn_unused size_t convert_valid_utf8_to_utf16(const char * input, size_t length, char16_t* utf16_buffer) noexcept { #if SIMDUTF_IS_BIG_ENDIAN @@ -5342,13 +5773,13 @@ simdutf_warn_unused size_t convert_valid_utf8_to_utf16(const char * input, size_ #endif } simdutf_warn_unused size_t convert_valid_utf8_to_utf16le(const char * input, size_t length, char16_t* utf16_buffer) noexcept { - return get_active_implementation()->convert_valid_utf8_to_utf16le(input, length, utf16_buffer); + return get_default_implementation()->convert_valid_utf8_to_utf16le(input, length, utf16_buffer); } simdutf_warn_unused size_t convert_valid_utf8_to_utf16be(const char * input, size_t length, char16_t* utf16_buffer) noexcept { - return get_active_implementation()->convert_valid_utf8_to_utf16be(input, length, utf16_buffer); + return get_default_implementation()->convert_valid_utf8_to_utf16be(input, length, utf16_buffer); } simdutf_warn_unused size_t convert_valid_utf8_to_utf32(const char * input, size_t length, char32_t* utf32_buffer) noexcept { - return get_active_implementation()->convert_valid_utf8_to_utf32(input, length, utf32_buffer); + return get_default_implementation()->convert_valid_utf8_to_utf32(input, length, utf32_buffer); } simdutf_warn_unused size_t convert_utf16_to_utf8(const char16_t * buf, size_t len, char* utf8_buffer) noexcept { #if SIMDUTF_IS_BIG_ENDIAN @@ -5372,28 +5803,28 @@ simdutf_warn_unused size_t convert_latin1_to_utf16(const char * buf, size_t len, #endif } simdutf_warn_unused size_t convert_utf16be_to_latin1(const char16_t * buf, size_t len, char* latin1_buffer) noexcept { - return get_active_implementation()->convert_utf16be_to_latin1(buf, len, latin1_buffer); + return get_default_implementation()->convert_utf16be_to_latin1(buf, len, latin1_buffer); } simdutf_warn_unused size_t convert_utf16le_to_latin1(const char16_t * buf, size_t len, char* latin1_buffer) noexcept { - return get_active_implementation()->convert_utf16le_to_latin1(buf, len, latin1_buffer); + return get_default_implementation()->convert_utf16le_to_latin1(buf, len, latin1_buffer); } simdutf_warn_unused size_t convert_valid_utf16be_to_latin1(const char16_t * buf, size_t len, char* latin1_buffer) noexcept { - return get_active_implementation()->convert_valid_utf16be_to_latin1(buf, len, latin1_buffer); + return get_default_implementation()->convert_valid_utf16be_to_latin1(buf, len, latin1_buffer); } simdutf_warn_unused size_t convert_valid_utf16le_to_latin1(const char16_t * buf, size_t len, char* latin1_buffer) noexcept { - return get_active_implementation()->convert_valid_utf16le_to_latin1(buf, len, latin1_buffer); + return get_default_implementation()->convert_valid_utf16le_to_latin1(buf, len, latin1_buffer); } simdutf_warn_unused result convert_utf16le_to_latin1_with_errors(const char16_t * buf, size_t len, char* latin1_buffer) noexcept { - return get_active_implementation()->convert_utf16le_to_latin1_with_errors(buf, len, latin1_buffer); + return get_default_implementation()->convert_utf16le_to_latin1_with_errors(buf, len, latin1_buffer); } simdutf_warn_unused result convert_utf16be_to_latin1_with_errors(const char16_t * buf, size_t len, char* latin1_buffer) noexcept { - return get_active_implementation()->convert_utf16be_to_latin1_with_errors(buf, len, latin1_buffer); + return get_default_implementation()->convert_utf16be_to_latin1_with_errors(buf, len, latin1_buffer); } simdutf_warn_unused size_t convert_utf16le_to_utf8(const char16_t * buf, size_t len, char* utf8_buffer) noexcept { - return get_active_implementation()->convert_utf16le_to_utf8(buf, len, utf8_buffer); + return get_default_implementation()->convert_utf16le_to_utf8(buf, len, utf8_buffer); } simdutf_warn_unused size_t convert_utf16be_to_utf8(const char16_t * buf, size_t len, char* utf8_buffer) noexcept { - return get_active_implementation()->convert_utf16be_to_utf8(buf, len, utf8_buffer); + return get_default_implementation()->convert_utf16be_to_utf8(buf, len, utf8_buffer); } simdutf_warn_unused result convert_utf16_to_utf8_with_errors(const char16_t * buf, size_t len, char* utf8_buffer) noexcept { #if SIMDUTF_IS_BIG_ENDIAN @@ -5410,10 +5841,10 @@ simdutf_warn_unused result convert_utf16_to_latin1_with_errors(const char16_t * #endif } simdutf_warn_unused result convert_utf16le_to_utf8_with_errors(const char16_t * buf, size_t len, char* utf8_buffer) noexcept { - return get_active_implementation()->convert_utf16le_to_utf8_with_errors(buf, len, utf8_buffer); + return get_default_implementation()->convert_utf16le_to_utf8_with_errors(buf, len, utf8_buffer); } simdutf_warn_unused result convert_utf16be_to_utf8_with_errors(const char16_t * buf, size_t len, char* utf8_buffer) noexcept { - return get_active_implementation()->convert_utf16be_to_utf8_with_errors(buf, len, utf8_buffer); + return get_default_implementation()->convert_utf16be_to_utf8_with_errors(buf, len, utf8_buffer); } simdutf_warn_unused size_t convert_valid_utf16_to_utf8(const char16_t * buf, size_t len, char* utf8_buffer) noexcept { #if SIMDUTF_IS_BIG_ENDIAN @@ -5430,19 +5861,19 @@ simdutf_warn_unused size_t convert_valid_utf16_to_latin1(const char16_t * buf, s #endif } simdutf_warn_unused size_t convert_valid_utf16le_to_utf8(const char16_t * buf, size_t len, char* utf8_buffer) noexcept { - return get_active_implementation()->convert_valid_utf16le_to_utf8(buf, len, utf8_buffer); + return get_default_implementation()->convert_valid_utf16le_to_utf8(buf, len, utf8_buffer); } simdutf_warn_unused size_t convert_valid_utf16be_to_utf8(const char16_t * buf, size_t len, char* utf8_buffer) noexcept { - return get_active_implementation()->convert_valid_utf16be_to_utf8(buf, len, utf8_buffer); + return get_default_implementation()->convert_valid_utf16be_to_utf8(buf, len, utf8_buffer); } simdutf_warn_unused size_t convert_utf32_to_utf8(const char32_t * buf, size_t len, char* utf8_buffer) noexcept { - return get_active_implementation()->convert_utf32_to_utf8(buf, len, utf8_buffer); + return get_default_implementation()->convert_utf32_to_utf8(buf, len, utf8_buffer); } simdutf_warn_unused result convert_utf32_to_utf8_with_errors(const char32_t * buf, size_t len, char* utf8_buffer) noexcept { - return get_active_implementation()->convert_utf32_to_utf8_with_errors(buf, len, utf8_buffer); + return get_default_implementation()->convert_utf32_to_utf8_with_errors(buf, len, utf8_buffer); } simdutf_warn_unused size_t convert_valid_utf32_to_utf8(const char32_t * buf, size_t len, char* utf8_buffer) noexcept { - return get_active_implementation()->convert_valid_utf32_to_utf8(buf, len, utf8_buffer); + return get_default_implementation()->convert_valid_utf32_to_utf8(buf, len, utf8_buffer); } simdutf_warn_unused size_t convert_utf32_to_utf16(const char32_t * buf, size_t len, char16_t* utf16_buffer) noexcept { #if SIMDUTF_IS_BIG_ENDIAN @@ -5452,13 +5883,13 @@ simdutf_warn_unused size_t convert_utf32_to_utf16(const char32_t * buf, size_t l #endif } simdutf_warn_unused size_t convert_utf32_to_latin1(const char32_t * input, size_t length, char* latin1_output) noexcept { - return get_active_implementation()->convert_utf32_to_latin1(input, length, latin1_output); + return get_default_implementation()->convert_utf32_to_latin1(input, length, latin1_output); } simdutf_warn_unused size_t convert_utf32_to_utf16le(const char32_t * buf, size_t len, char16_t* utf16_buffer) noexcept { - return get_active_implementation()->convert_utf32_to_utf16le(buf, len, utf16_buffer); + return get_default_implementation()->convert_utf32_to_utf16le(buf, len, utf16_buffer); } simdutf_warn_unused size_t convert_utf32_to_utf16be(const char32_t * buf, size_t len, char16_t* utf16_buffer) noexcept { - return get_active_implementation()->convert_utf32_to_utf16be(buf, len, utf16_buffer); + return get_default_implementation()->convert_utf32_to_utf16be(buf, len, utf16_buffer); } simdutf_warn_unused result convert_utf32_to_utf16_with_errors(const char32_t * buf, size_t len, char16_t* utf16_buffer) noexcept { #if SIMDUTF_IS_BIG_ENDIAN @@ -5468,10 +5899,10 @@ simdutf_warn_unused result convert_utf32_to_utf16_with_errors(const char32_t * b #endif } simdutf_warn_unused result convert_utf32_to_utf16le_with_errors(const char32_t * buf, size_t len, char16_t* utf16_buffer) noexcept { - return get_active_implementation()->convert_utf32_to_utf16le_with_errors(buf, len, utf16_buffer); + return get_default_implementation()->convert_utf32_to_utf16le_with_errors(buf, len, utf16_buffer); } simdutf_warn_unused result convert_utf32_to_utf16be_with_errors(const char32_t * buf, size_t len, char16_t* utf16_buffer) noexcept { - return get_active_implementation()->convert_utf32_to_utf16be_with_errors(buf, len, utf16_buffer); + return get_default_implementation()->convert_utf32_to_utf16be_with_errors(buf, len, utf16_buffer); } simdutf_warn_unused size_t convert_valid_utf32_to_utf16(const char32_t * buf, size_t len, char16_t* utf16_buffer) noexcept { #if SIMDUTF_IS_BIG_ENDIAN @@ -5481,10 +5912,10 @@ simdutf_warn_unused size_t convert_valid_utf32_to_utf16(const char32_t * buf, si #endif } simdutf_warn_unused size_t convert_valid_utf32_to_utf16le(const char32_t * buf, size_t len, char16_t* utf16_buffer) noexcept { - return get_active_implementation()->convert_valid_utf32_to_utf16le(buf, len, utf16_buffer); + return get_default_implementation()->convert_valid_utf32_to_utf16le(buf, len, utf16_buffer); } simdutf_warn_unused size_t convert_valid_utf32_to_utf16be(const char32_t * buf, size_t len, char16_t* utf16_buffer) noexcept { - return get_active_implementation()->convert_valid_utf32_to_utf16be(buf, len, utf16_buffer); + return get_default_implementation()->convert_valid_utf32_to_utf16be(buf, len, utf16_buffer); } simdutf_warn_unused size_t convert_utf16_to_utf32(const char16_t * buf, size_t len, char32_t* utf32_buffer) noexcept { #if SIMDUTF_IS_BIG_ENDIAN @@ -5494,10 +5925,10 @@ simdutf_warn_unused size_t convert_utf16_to_utf32(const char16_t * buf, size_t l #endif } simdutf_warn_unused size_t convert_utf16le_to_utf32(const char16_t * buf, size_t len, char32_t* utf32_buffer) noexcept { - return get_active_implementation()->convert_utf16le_to_utf32(buf, len, utf32_buffer); + return get_default_implementation()->convert_utf16le_to_utf32(buf, len, utf32_buffer); } simdutf_warn_unused size_t convert_utf16be_to_utf32(const char16_t * buf, size_t len, char32_t* utf32_buffer) noexcept { - return get_active_implementation()->convert_utf16be_to_utf32(buf, len, utf32_buffer); + return get_default_implementation()->convert_utf16be_to_utf32(buf, len, utf32_buffer); } simdutf_warn_unused result convert_utf16_to_utf32_with_errors(const char16_t * buf, size_t len, char32_t* utf32_buffer) noexcept { #if SIMDUTF_IS_BIG_ENDIAN @@ -5507,10 +5938,10 @@ simdutf_warn_unused result convert_utf16_to_utf32_with_errors(const char16_t * b #endif } simdutf_warn_unused result convert_utf16le_to_utf32_with_errors(const char16_t * buf, size_t len, char32_t* utf32_buffer) noexcept { - return get_active_implementation()->convert_utf16le_to_utf32_with_errors(buf, len, utf32_buffer); + return get_default_implementation()->convert_utf16le_to_utf32_with_errors(buf, len, utf32_buffer); } simdutf_warn_unused result convert_utf16be_to_utf32_with_errors(const char16_t * buf, size_t len, char32_t* utf32_buffer) noexcept { - return get_active_implementation()->convert_utf16be_to_utf32_with_errors(buf, len, utf32_buffer); + return get_default_implementation()->convert_utf16be_to_utf32_with_errors(buf, len, utf32_buffer); } simdutf_warn_unused size_t convert_valid_utf16_to_utf32(const char16_t * buf, size_t len, char32_t* utf32_buffer) noexcept { #if SIMDUTF_IS_BIG_ENDIAN @@ -5520,13 +5951,13 @@ simdutf_warn_unused size_t convert_valid_utf16_to_utf32(const char16_t * buf, si #endif } simdutf_warn_unused size_t convert_valid_utf16le_to_utf32(const char16_t * buf, size_t len, char32_t* utf32_buffer) noexcept { - return get_active_implementation()->convert_valid_utf16le_to_utf32(buf, len, utf32_buffer); + return get_default_implementation()->convert_valid_utf16le_to_utf32(buf, len, utf32_buffer); } simdutf_warn_unused size_t convert_valid_utf16be_to_utf32(const char16_t * buf, size_t len, char32_t* utf32_buffer) noexcept { - return get_active_implementation()->convert_valid_utf16be_to_utf32(buf, len, utf32_buffer); + return get_default_implementation()->convert_valid_utf16be_to_utf32(buf, len, utf32_buffer); } void change_endianness_utf16(const char16_t * input, size_t length, char16_t * output) noexcept { - get_active_implementation()->change_endianness_utf16(input, length, output); + get_default_implementation()->change_endianness_utf16(input, length, output); } simdutf_warn_unused size_t count_utf16(const char16_t * input, size_t length) noexcept { #if SIMDUTF_IS_BIG_ENDIAN @@ -5536,25 +5967,25 @@ simdutf_warn_unused size_t count_utf16(const char16_t * input, size_t length) no #endif } simdutf_warn_unused size_t count_utf16le(const char16_t * input, size_t length) noexcept { - return get_active_implementation()->count_utf16le(input, length); + return get_default_implementation()->count_utf16le(input, length); } simdutf_warn_unused size_t count_utf16be(const char16_t * input, size_t length) noexcept { - return get_active_implementation()->count_utf16be(input, length); + return get_default_implementation()->count_utf16be(input, length); } simdutf_warn_unused size_t count_utf8(const char * input, size_t length) noexcept { - return get_active_implementation()->count_utf8(input, length); + return get_default_implementation()->count_utf8(input, length); } simdutf_warn_unused size_t latin1_length_from_utf8(const char * buf, size_t len) noexcept { - return get_active_implementation()->latin1_length_from_utf8(buf, len); + return get_default_implementation()->latin1_length_from_utf8(buf, len); } simdutf_warn_unused size_t latin1_length_from_utf16(size_t len) noexcept { - return get_active_implementation()->latin1_length_from_utf16(len); + return get_default_implementation()->latin1_length_from_utf16(len); } simdutf_warn_unused size_t latin1_length_from_utf32(size_t len) noexcept { - return get_active_implementation()->latin1_length_from_utf32(len); + return get_default_implementation()->latin1_length_from_utf32(len); } simdutf_warn_unused size_t utf8_length_from_latin1(const char * buf, size_t len) noexcept { - return get_active_implementation()->utf8_length_from_latin1(buf, len); + return get_default_implementation()->utf8_length_from_latin1(buf, len); } simdutf_warn_unused size_t utf8_length_from_utf16(const char16_t * input, size_t length) noexcept { #if SIMDUTF_IS_BIG_ENDIAN @@ -5564,10 +5995,10 @@ simdutf_warn_unused size_t utf8_length_from_utf16(const char16_t * input, size_t #endif } simdutf_warn_unused size_t utf8_length_from_utf16le(const char16_t * input, size_t length) noexcept { - return get_active_implementation()->utf8_length_from_utf16le(input, length); + return get_default_implementation()->utf8_length_from_utf16le(input, length); } simdutf_warn_unused size_t utf8_length_from_utf16be(const char16_t * input, size_t length) noexcept { - return get_active_implementation()->utf8_length_from_utf16be(input, length); + return get_default_implementation()->utf8_length_from_utf16be(input, length); } simdutf_warn_unused size_t utf32_length_from_utf16(const char16_t * input, size_t length) noexcept { #if SIMDUTF_IS_BIG_ENDIAN @@ -5577,31 +6008,48 @@ simdutf_warn_unused size_t utf32_length_from_utf16(const char16_t * input, size_ #endif } simdutf_warn_unused size_t utf32_length_from_utf16le(const char16_t * input, size_t length) noexcept { - return get_active_implementation()->utf32_length_from_utf16le(input, length); + return get_default_implementation()->utf32_length_from_utf16le(input, length); } simdutf_warn_unused size_t utf32_length_from_utf16be(const char16_t * input, size_t length) noexcept { - return get_active_implementation()->utf32_length_from_utf16be(input, length); + return get_default_implementation()->utf32_length_from_utf16be(input, length); } simdutf_warn_unused size_t utf16_length_from_utf8(const char * input, size_t length) noexcept { - return get_active_implementation()->utf16_length_from_utf8(input, length); + return get_default_implementation()->utf16_length_from_utf8(input, length); } simdutf_warn_unused size_t utf16_length_from_latin1(size_t length) noexcept { - return get_active_implementation()->utf16_length_from_latin1(length); + return get_default_implementation()->utf16_length_from_latin1(length); } simdutf_warn_unused size_t utf8_length_from_utf32(const char32_t * input, size_t length) noexcept { - return get_active_implementation()->utf8_length_from_utf32(input, length); + return get_default_implementation()->utf8_length_from_utf32(input, length); } simdutf_warn_unused size_t utf16_length_from_utf32(const char32_t * input, size_t length) noexcept { - return get_active_implementation()->utf16_length_from_utf32(input, length); + return get_default_implementation()->utf16_length_from_utf32(input, length); } simdutf_warn_unused size_t utf32_length_from_utf8(const char * input, size_t length) noexcept { - return get_active_implementation()->utf32_length_from_utf8(input, length); + return get_default_implementation()->utf32_length_from_utf8(input, length); +} + +simdutf_warn_unused size_t maximal_binary_length_from_base64(const char * input, size_t length) noexcept { + return get_default_implementation()->maximal_binary_length_from_base64(input, length); +} + +simdutf_warn_unused result base64_to_binary(const char * input, size_t length, char* output) noexcept { + return get_default_implementation()->base64_to_binary(input, length, output); } + +simdutf_warn_unused size_t base64_length_from_binary(size_t length) noexcept { + return get_default_implementation()->base64_length_from_binary(length); +} + +size_t binary_to_base64(const char * input, size_t length, char* output) noexcept { + return get_default_implementation()->binary_to_base64(input, length, output); +} + simdutf_warn_unused simdutf::encoding_type autodetect_encoding(const char * buf, size_t length) noexcept { - return get_active_implementation()->autodetect_encoding(buf, length); + return get_default_implementation()->autodetect_encoding(buf, length); } simdutf_warn_unused int detect_encodings(const char * buf, size_t length) noexcept { - return get_active_implementation()->detect_encodings(buf, length); + return get_default_implementation()->detect_encodings(buf, length); } const implementation * builtin_implementation() { static const implementation * builtin_impl = get_available_implementations()[SIMDUTF_STRINGIFY(SIMDUTF_BUILTIN_IMPLEMENTATION)]; @@ -5667,7 +6115,7 @@ encoding_type check_bom(const uint8_t* byte, size_t length) { return encoding_type::UTF16_BE; } else if (length >= 4 && byte[0] == 0x00 and byte[1] == 0x00 and byte[2] == 0xfe and byte[3] == 0xff) { return encoding_type::UTF32_BE; - } else if (length >= 4 && byte[0] == 0xef and byte[1] == 0xbb and byte[3] == 0xbf) { + } else if (length >= 4 && byte[0] == 0xef and byte[1] == 0xbb and byte[2] == 0xbf) { return encoding_type::UTF8; } return encoding_type::unspecified; @@ -5703,6 +6151,420 @@ namespace simdutf { /* end file src/error.cpp */ // The large tables should be included once and they // should not depend on a kernel. +/* begin file src/tables/base64_tables.h */ +#ifndef SIMDUTF_BASE64_TABLES_H +#define SIMDUTF_BASE64_TABLES_H +#include +#include + +namespace simdutf { +namespace { +namespace tables { +namespace base64 { + +const char e0[256] = { + 'A', 'A', 'A', 'A', 'B', 'B', 'B', 'B', 'C', 'C', 'C', 'C', 'D', 'D', 'D', + 'D', 'E', 'E', 'E', 'E', 'F', 'F', 'F', 'F', 'G', 'G', 'G', 'G', 'H', 'H', + 'H', 'H', 'I', 'I', 'I', 'I', 'J', 'J', 'J', 'J', 'K', 'K', 'K', 'K', 'L', + 'L', 'L', 'L', 'M', 'M', 'M', 'M', 'N', 'N', 'N', 'N', 'O', 'O', 'O', 'O', + 'P', 'P', 'P', 'P', 'Q', 'Q', 'Q', 'Q', 'R', 'R', 'R', 'R', 'S', 'S', 'S', + 'S', 'T', 'T', 'T', 'T', 'U', 'U', 'U', 'U', 'V', 'V', 'V', 'V', 'W', 'W', + 'W', 'W', 'X', 'X', 'X', 'X', 'Y', 'Y', 'Y', 'Y', 'Z', 'Z', 'Z', 'Z', 'a', + 'a', 'a', 'a', 'b', 'b', 'b', 'b', 'c', 'c', 'c', 'c', 'd', 'd', 'd', 'd', + 'e', 'e', 'e', 'e', 'f', 'f', 'f', 'f', 'g', 'g', 'g', 'g', 'h', 'h', 'h', + 'h', 'i', 'i', 'i', 'i', 'j', 'j', 'j', 'j', 'k', 'k', 'k', 'k', 'l', 'l', + 'l', 'l', 'm', 'm', 'm', 'm', 'n', 'n', 'n', 'n', 'o', 'o', 'o', 'o', 'p', + 'p', 'p', 'p', 'q', 'q', 'q', 'q', 'r', 'r', 'r', 'r', 's', 's', 's', 's', + 't', 't', 't', 't', 'u', 'u', 'u', 'u', 'v', 'v', 'v', 'v', 'w', 'w', 'w', + 'w', 'x', 'x', 'x', 'x', 'y', 'y', 'y', 'y', 'z', 'z', 'z', 'z', '0', '0', + '0', '0', '1', '1', '1', '1', '2', '2', '2', '2', '3', '3', '3', '3', '4', + '4', '4', '4', '5', '5', '5', '5', '6', '6', '6', '6', '7', '7', '7', '7', + '8', '8', '8', '8', '9', '9', '9', '9', '+', '+', '+', '+', '/', '/', '/', + '/'}; + +const char e1[256] = { + 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', + 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'a', 'b', 'c', 'd', + 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', + 't', 'u', 'v', 'w', 'x', 'y', 'z', '0', '1', '2', '3', '4', '5', '6', '7', + '8', '9', '+', '/', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', + 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', + 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', + 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', '0', '1', '2', '3', + '4', '5', '6', '7', '8', '9', '+', '/', 'A', 'B', 'C', 'D', 'E', 'F', 'G', + 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', + 'W', 'X', 'Y', 'Z', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', + 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', + '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '+', '/', 'A', 'B', 'C', + 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', + 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'a', 'b', 'c', 'd', 'e', 'f', 'g', + 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', + 'w', 'x', 'y', 'z', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '+', + '/'}; + +const char e2[256] = { + 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', + 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'a', 'b', 'c', 'd', + 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', + 't', 'u', 'v', 'w', 'x', 'y', 'z', '0', '1', '2', '3', '4', '5', '6', '7', + '8', '9', '+', '/', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', + 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', + 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', + 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', '0', '1', '2', '3', + '4', '5', '6', '7', '8', '9', '+', '/', 'A', 'B', 'C', 'D', 'E', 'F', 'G', + 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', + 'W', 'X', 'Y', 'Z', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', + 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', + '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '+', '/', 'A', 'B', 'C', + 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', + 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'a', 'b', 'c', 'd', 'e', 'f', 'g', + 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', + 'w', 'x', 'y', 'z', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '+', + '/'}; + +const int8_t decoding_table[256] = { + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -2, -1, -1, -2, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -2, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, 62, -1, 62, -1, 63, 52, 53, 54, 55, 56, 57, 58, 59, 60, + 61, -1, -1, -1, -1, -1, -1, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, + 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, -1, -1, -1, -1, + 63, -1, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, + 43, 44, 45, 46, 47, 48, 49, 50, 51, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1}; + +/* SPECIAL DECODE TABLES FOR LITTLE ENDIAN CPUS */ + +const uint32_t d0[256] = { + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x000000f8, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x000000fc, + 0x000000d0, 0x000000d4, 0x000000d8, 0x000000dc, 0x000000e0, 0x000000e4, + 0x000000e8, 0x000000ec, 0x000000f0, 0x000000f4, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x00000000, + 0x00000004, 0x00000008, 0x0000000c, 0x00000010, 0x00000014, 0x00000018, + 0x0000001c, 0x00000020, 0x00000024, 0x00000028, 0x0000002c, 0x00000030, + 0x00000034, 0x00000038, 0x0000003c, 0x00000040, 0x00000044, 0x00000048, + 0x0000004c, 0x00000050, 0x00000054, 0x00000058, 0x0000005c, 0x00000060, + 0x00000064, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x00000068, 0x0000006c, 0x00000070, 0x00000074, 0x00000078, + 0x0000007c, 0x00000080, 0x00000084, 0x00000088, 0x0000008c, 0x00000090, + 0x00000094, 0x00000098, 0x0000009c, 0x000000a0, 0x000000a4, 0x000000a8, + 0x000000ac, 0x000000b0, 0x000000b4, 0x000000b8, 0x000000bc, 0x000000c0, + 0x000000c4, 0x000000c8, 0x000000cc, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff}; + +const uint32_t d1[256] = { + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x0000e003, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x0000f003, + 0x00004003, 0x00005003, 0x00006003, 0x00007003, 0x00008003, 0x00009003, + 0x0000a003, 0x0000b003, 0x0000c003, 0x0000d003, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x00000000, + 0x00001000, 0x00002000, 0x00003000, 0x00004000, 0x00005000, 0x00006000, + 0x00007000, 0x00008000, 0x00009000, 0x0000a000, 0x0000b000, 0x0000c000, + 0x0000d000, 0x0000e000, 0x0000f000, 0x00000001, 0x00001001, 0x00002001, + 0x00003001, 0x00004001, 0x00005001, 0x00006001, 0x00007001, 0x00008001, + 0x00009001, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x0000a001, 0x0000b001, 0x0000c001, 0x0000d001, 0x0000e001, + 0x0000f001, 0x00000002, 0x00001002, 0x00002002, 0x00003002, 0x00004002, + 0x00005002, 0x00006002, 0x00007002, 0x00008002, 0x00009002, 0x0000a002, + 0x0000b002, 0x0000c002, 0x0000d002, 0x0000e002, 0x0000f002, 0x00000003, + 0x00001003, 0x00002003, 0x00003003, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff}; + +const uint32_t d2[256] = { + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x00800f00, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x00c00f00, + 0x00000d00, 0x00400d00, 0x00800d00, 0x00c00d00, 0x00000e00, 0x00400e00, + 0x00800e00, 0x00c00e00, 0x00000f00, 0x00400f00, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x00000000, + 0x00400000, 0x00800000, 0x00c00000, 0x00000100, 0x00400100, 0x00800100, + 0x00c00100, 0x00000200, 0x00400200, 0x00800200, 0x00c00200, 0x00000300, + 0x00400300, 0x00800300, 0x00c00300, 0x00000400, 0x00400400, 0x00800400, + 0x00c00400, 0x00000500, 0x00400500, 0x00800500, 0x00c00500, 0x00000600, + 0x00400600, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x00800600, 0x00c00600, 0x00000700, 0x00400700, 0x00800700, + 0x00c00700, 0x00000800, 0x00400800, 0x00800800, 0x00c00800, 0x00000900, + 0x00400900, 0x00800900, 0x00c00900, 0x00000a00, 0x00400a00, 0x00800a00, + 0x00c00a00, 0x00000b00, 0x00400b00, 0x00800b00, 0x00c00b00, 0x00000c00, + 0x00400c00, 0x00800c00, 0x00c00c00, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff}; + +const uint32_t d3[256] = { + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x003e0000, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x003f0000, + 0x00340000, 0x00350000, 0x00360000, 0x00370000, 0x00380000, 0x00390000, + 0x003a0000, 0x003b0000, 0x003c0000, 0x003d0000, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x00000000, + 0x00010000, 0x00020000, 0x00030000, 0x00040000, 0x00050000, 0x00060000, + 0x00070000, 0x00080000, 0x00090000, 0x000a0000, 0x000b0000, 0x000c0000, + 0x000d0000, 0x000e0000, 0x000f0000, 0x00100000, 0x00110000, 0x00120000, + 0x00130000, 0x00140000, 0x00150000, 0x00160000, 0x00170000, 0x00180000, + 0x00190000, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x001a0000, 0x001b0000, 0x001c0000, 0x001d0000, 0x001e0000, + 0x001f0000, 0x00200000, 0x00210000, 0x00220000, 0x00230000, 0x00240000, + 0x00250000, 0x00260000, 0x00270000, 0x00280000, 0x00290000, 0x002a0000, + 0x002b0000, 0x002c0000, 0x002d0000, 0x002e0000, 0x002f0000, 0x00300000, + 0x00310000, 0x00320000, 0x00330000, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff}; +const uint64_t thintable_epi8[256] = { + 0x0706050403020100, 0x0007060504030201, 0x0007060504030200, + 0x0000070605040302, 0x0007060504030100, 0x0000070605040301, + 0x0000070605040300, 0x0000000706050403, 0x0007060504020100, + 0x0000070605040201, 0x0000070605040200, 0x0000000706050402, + 0x0000070605040100, 0x0000000706050401, 0x0000000706050400, + 0x0000000007060504, 0x0007060503020100, 0x0000070605030201, + 0x0000070605030200, 0x0000000706050302, 0x0000070605030100, + 0x0000000706050301, 0x0000000706050300, 0x0000000007060503, + 0x0000070605020100, 0x0000000706050201, 0x0000000706050200, + 0x0000000007060502, 0x0000000706050100, 0x0000000007060501, + 0x0000000007060500, 0x0000000000070605, 0x0007060403020100, + 0x0000070604030201, 0x0000070604030200, 0x0000000706040302, + 0x0000070604030100, 0x0000000706040301, 0x0000000706040300, + 0x0000000007060403, 0x0000070604020100, 0x0000000706040201, + 0x0000000706040200, 0x0000000007060402, 0x0000000706040100, + 0x0000000007060401, 0x0000000007060400, 0x0000000000070604, + 0x0000070603020100, 0x0000000706030201, 0x0000000706030200, + 0x0000000007060302, 0x0000000706030100, 0x0000000007060301, + 0x0000000007060300, 0x0000000000070603, 0x0000000706020100, + 0x0000000007060201, 0x0000000007060200, 0x0000000000070602, + 0x0000000007060100, 0x0000000000070601, 0x0000000000070600, + 0x0000000000000706, 0x0007050403020100, 0x0000070504030201, + 0x0000070504030200, 0x0000000705040302, 0x0000070504030100, + 0x0000000705040301, 0x0000000705040300, 0x0000000007050403, + 0x0000070504020100, 0x0000000705040201, 0x0000000705040200, + 0x0000000007050402, 0x0000000705040100, 0x0000000007050401, + 0x0000000007050400, 0x0000000000070504, 0x0000070503020100, + 0x0000000705030201, 0x0000000705030200, 0x0000000007050302, + 0x0000000705030100, 0x0000000007050301, 0x0000000007050300, + 0x0000000000070503, 0x0000000705020100, 0x0000000007050201, + 0x0000000007050200, 0x0000000000070502, 0x0000000007050100, + 0x0000000000070501, 0x0000000000070500, 0x0000000000000705, + 0x0000070403020100, 0x0000000704030201, 0x0000000704030200, + 0x0000000007040302, 0x0000000704030100, 0x0000000007040301, + 0x0000000007040300, 0x0000000000070403, 0x0000000704020100, + 0x0000000007040201, 0x0000000007040200, 0x0000000000070402, + 0x0000000007040100, 0x0000000000070401, 0x0000000000070400, + 0x0000000000000704, 0x0000000703020100, 0x0000000007030201, + 0x0000000007030200, 0x0000000000070302, 0x0000000007030100, + 0x0000000000070301, 0x0000000000070300, 0x0000000000000703, + 0x0000000007020100, 0x0000000000070201, 0x0000000000070200, + 0x0000000000000702, 0x0000000000070100, 0x0000000000000701, + 0x0000000000000700, 0x0000000000000007, 0x0006050403020100, + 0x0000060504030201, 0x0000060504030200, 0x0000000605040302, + 0x0000060504030100, 0x0000000605040301, 0x0000000605040300, + 0x0000000006050403, 0x0000060504020100, 0x0000000605040201, + 0x0000000605040200, 0x0000000006050402, 0x0000000605040100, + 0x0000000006050401, 0x0000000006050400, 0x0000000000060504, + 0x0000060503020100, 0x0000000605030201, 0x0000000605030200, + 0x0000000006050302, 0x0000000605030100, 0x0000000006050301, + 0x0000000006050300, 0x0000000000060503, 0x0000000605020100, + 0x0000000006050201, 0x0000000006050200, 0x0000000000060502, + 0x0000000006050100, 0x0000000000060501, 0x0000000000060500, + 0x0000000000000605, 0x0000060403020100, 0x0000000604030201, + 0x0000000604030200, 0x0000000006040302, 0x0000000604030100, + 0x0000000006040301, 0x0000000006040300, 0x0000000000060403, + 0x0000000604020100, 0x0000000006040201, 0x0000000006040200, + 0x0000000000060402, 0x0000000006040100, 0x0000000000060401, + 0x0000000000060400, 0x0000000000000604, 0x0000000603020100, + 0x0000000006030201, 0x0000000006030200, 0x0000000000060302, + 0x0000000006030100, 0x0000000000060301, 0x0000000000060300, + 0x0000000000000603, 0x0000000006020100, 0x0000000000060201, + 0x0000000000060200, 0x0000000000000602, 0x0000000000060100, + 0x0000000000000601, 0x0000000000000600, 0x0000000000000006, + 0x0000050403020100, 0x0000000504030201, 0x0000000504030200, + 0x0000000005040302, 0x0000000504030100, 0x0000000005040301, + 0x0000000005040300, 0x0000000000050403, 0x0000000504020100, + 0x0000000005040201, 0x0000000005040200, 0x0000000000050402, + 0x0000000005040100, 0x0000000000050401, 0x0000000000050400, + 0x0000000000000504, 0x0000000503020100, 0x0000000005030201, + 0x0000000005030200, 0x0000000000050302, 0x0000000005030100, + 0x0000000000050301, 0x0000000000050300, 0x0000000000000503, + 0x0000000005020100, 0x0000000000050201, 0x0000000000050200, + 0x0000000000000502, 0x0000000000050100, 0x0000000000000501, + 0x0000000000000500, 0x0000000000000005, 0x0000000403020100, + 0x0000000004030201, 0x0000000004030200, 0x0000000000040302, + 0x0000000004030100, 0x0000000000040301, 0x0000000000040300, + 0x0000000000000403, 0x0000000004020100, 0x0000000000040201, + 0x0000000000040200, 0x0000000000000402, 0x0000000000040100, + 0x0000000000000401, 0x0000000000000400, 0x0000000000000004, + 0x0000000003020100, 0x0000000000030201, 0x0000000000030200, + 0x0000000000000302, 0x0000000000030100, 0x0000000000000301, + 0x0000000000000300, 0x0000000000000003, 0x0000000000020100, + 0x0000000000000201, 0x0000000000000200, 0x0000000000000002, + 0x0000000000000100, 0x0000000000000001, 0x0000000000000000, + 0x0000000000000000, +}; + +const uint8_t pshufb_combine_table[272] = { + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, + 0x0c, 0x0d, 0x0e, 0x0f, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x08, + 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0xff, 0x00, 0x01, 0x02, 0x03, + 0x04, 0x05, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0xff, 0xff, + 0x00, 0x01, 0x02, 0x03, 0x04, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, + 0x0f, 0xff, 0xff, 0xff, 0x00, 0x01, 0x02, 0x03, 0x08, 0x09, 0x0a, 0x0b, + 0x0c, 0x0d, 0x0e, 0x0f, 0xff, 0xff, 0xff, 0xff, 0x00, 0x01, 0x02, 0x08, + 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x00, 0x01, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0x00, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, + 0x0f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x08, 0x09, 0x0a, 0x0b, + 0x0c, 0x0d, 0x0e, 0x0f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, +}; + +const unsigned char BitsSetTable256mul2[256] = { + 0, 2, 2, 4, 2, 4, 4, 6, 2, 4, 4, 6, 4, 6, 6, 8, 2, 4, 4, + 6, 4, 6, 6, 8, 4, 6, 6, 8, 6, 8, 8, 10, 2, 4, 4, 6, 4, 6, + 6, 8, 4, 6, 6, 8, 6, 8, 8, 10, 4, 6, 6, 8, 6, 8, 8, 10, 6, + 8, 8, 10, 8, 10, 10, 12, 2, 4, 4, 6, 4, 6, 6, 8, 4, 6, 6, 8, + 6, 8, 8, 10, 4, 6, 6, 8, 6, 8, 8, 10, 6, 8, 8, 10, 8, 10, 10, + 12, 4, 6, 6, 8, 6, 8, 8, 10, 6, 8, 8, 10, 8, 10, 10, 12, 6, 8, + 8, 10, 8, 10, 10, 12, 8, 10, 10, 12, 10, 12, 12, 14, 2, 4, 4, 6, 4, + 6, 6, 8, 4, 6, 6, 8, 6, 8, 8, 10, 4, 6, 6, 8, 6, 8, 8, 10, + 6, 8, 8, 10, 8, 10, 10, 12, 4, 6, 6, 8, 6, 8, 8, 10, 6, 8, 8, + 10, 8, 10, 10, 12, 6, 8, 8, 10, 8, 10, 10, 12, 8, 10, 10, 12, 10, 12, + 12, 14, 4, 6, 6, 8, 6, 8, 8, 10, 6, 8, 8, 10, 8, 10, 10, 12, 6, + 8, 8, 10, 8, 10, 10, 12, 8, 10, 10, 12, 10, 12, 12, 14, 6, 8, 8, 10, + 8, 10, 10, 12, 8, 10, 10, 12, 10, 12, 12, 14, 8, 10, 10, 12, 10, 12, 12, + 14, 10, 12, 12, 14, 12, 14, 14, 16}; + +const uint8_t to_base64_value[] = { + 255, 255, 255, 255, 255, 255, 255, 255, 255, 64, 64, 255, 255, 64, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 64, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 62, 255, + 255, 255, 63, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 255, 255, + 255, 255, 255, 255, 255, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, + 25, 255, 255, 255, 255, 255, 255, 26, 27, 28, 29, 30, 31, 32, 33, + 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, + 49, 50, 51, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255}; +} // namespace base64 +} // namespace tables +} // unnamed namespace +} // namespace simdutf + +#endif // SIMDUTF_BASE64_TABLES_H +/* end file src/tables/base64_tables.h */ /* begin file src/tables/utf8_to_utf16_tables.h */ #ifndef SIMDUTF_UTF8_TO_UTF16_TABLES_H #define SIMDUTF_UTF8_TO_UTF16_TABLES_H @@ -10707,7 +11569,12 @@ inline size_t latin1_length_from_utf32(size_t len) { return len; // a utf32 codepoint will always represent 1 latin1 character } - +inline simdutf_warn_unused uint32_t swap_bytes(const uint32_t word) { + return ((word >> 24) & 0xff) | // move byte 3 to byte 0 + ((word << 8) & 0xff0000) | // move byte 1 to byte 2 + ((word >> 8) & 0xff00) | // move byte 2 to byte 1 + ((word << 24) & 0xff000000); // byte 0 to byte 3 +} } // utf32 namespace } // unnamed namespace @@ -10750,84 +11617,261 @@ inline size_t utf16_length_from_latin1(size_t len) { #endif /* end file src/scalar/latin1.h */ +/* begin file src/scalar/base64.h */ +#ifndef SIMDUTF_BASE64_H +#define SIMDUTF_BASE64_H -/* begin file src/scalar/utf32_to_utf8/valid_utf32_to_utf8.h */ -#ifndef SIMDUTF_VALID_UTF32_TO_UTF8_H -#define SIMDUTF_VALID_UTF32_TO_UTF8_H - +#include +#include +#include namespace simdutf { namespace scalar { namespace { -namespace utf32_to_utf8 { +namespace base64 { -#if SIMDUTF_IMPLEMENTATION_FALLBACK || SIMDUTF_IMPLEMENTATION_PPC64 -// only used by the fallback and POWER kernel -inline size_t convert_valid(const char32_t* buf, size_t len, char* utf8_output) { - const uint32_t *data = reinterpret_cast(buf); - size_t pos = 0; - char* start{utf8_output}; - while (pos < len) { - // try to convert the next block of 2 ASCII characters - if (pos + 2 <= len) { // if it is safe to read 8 more bytes, check that they are ascii - uint64_t v; - ::memcpy(&v, data + pos, sizeof(uint64_t)); - if ((v & 0xFFFFFF80FFFFFF80) == 0) { - *utf8_output++ = char(buf[pos]); - *utf8_output++ = char(buf[pos+1]); - pos += 2; - continue; +// Returns true upon success. The destination buffer must be large enough and is +// incremented by the number of bytes written and src is incremented by the number of bytes read. +// This functions assumes that the padding (=) has been removed. +result base64_tail_decode(char *dst, const char *src, size_t length) { + const char *srcend = src + length; + const char *srcinit = src; + const char *dstinit = dst; + + uint32_t x; + size_t idx; + uint8_t buffer[4]; + while (true) { + while (src + 4 <= srcend && + (x = tables::base64::d0[uint8_t(src[0])] | tables::base64::d1[uint8_t(src[1])] | + tables::base64::d2[uint8_t(src[2])] | tables::base64::d3[uint8_t(src[3])]) < 0x01FFFFFF) { + if(match_system(endianness::BIG)) { + x = scalar::utf32::swap_bytes(x); + } + std::memcpy(dst, &x, 3); // optimization opportunity: copy 4 bytes + dst += 3; + src += 4; + } + idx = 0; + // we need at least four characters. + while (idx < 4 && src < srcend) { + char c = *src; + uint8_t code = tables::base64::to_base64_value[uint8_t(c)]; + buffer[idx] = uint8_t(code); + if (code <= 63) { + idx++; + } else if (code > 64) { + return {INVALID_BASE64_CHARACTER, size_t(src - srcinit)}; } + src++; + } + if (idx != 4) { + if (idx == 2) { + uint32_t triple = + (uint32_t(buffer[0]) << 3 * 6) + (uint32_t(buffer[1]) << 2 * 6); + if(match_system(endianness::BIG)) { + triple <<= 8; + std::memcpy(dst, &triple, 1); + } else { + triple = scalar::utf32::swap_bytes(triple); + triple >>= 8; + std::memcpy(dst, &triple, 1); + } + dst += 1; + + } else if (idx == 3) { + uint32_t triple = (uint32_t(buffer[0]) << 3 * 6) + + (uint32_t(buffer[1]) << 2 * 6) + + (uint32_t(buffer[2]) << 1 * 6); + if(match_system(endianness::BIG)) { + triple <<= 8; + std::memcpy(dst, &triple, 2); + } else { + triple = scalar::utf32::swap_bytes(triple); + triple >>= 8; + std::memcpy(dst, &triple, 2); + } + dst += 2; + } else if (idx == 1) { + return {BASE64_INPUT_REMAINDER, size_t(dst - dstinit)}; + } + return {SUCCESS, size_t(dst - dstinit)}; } - uint32_t word = data[pos]; - if((word & 0xFFFFFF80)==0) { - // will generate one UTF-8 bytes - *utf8_output++ = char(word); - pos++; - } else if((word & 0xFFFFF800)==0) { - // will generate two UTF-8 bytes - // we have 0b110XXXXX 0b10XXXXXX - *utf8_output++ = char((word>>6) | 0b11000000); - *utf8_output++ = char((word & 0b111111) | 0b10000000); - pos++; - } else if((word & 0xFFFF0000)==0) { - // will generate three UTF-8 bytes - // we have 0b1110XXXX 0b10XXXXXX 0b10XXXXXX - *utf8_output++ = char((word>>12) | 0b11100000); - *utf8_output++ = char(((word>>6) & 0b111111) | 0b10000000); - *utf8_output++ = char((word & 0b111111) | 0b10000000); - pos++; + + uint32_t triple = + (uint32_t(buffer[0]) << 3 * 6) + (uint32_t(buffer[1]) << 2 * 6) + + (uint32_t(buffer[2]) << 1 * 6) + (uint32_t(buffer[3]) << 0 * 6); + if(match_system(endianness::BIG)) { + triple <<= 8; + std::memcpy(dst, &triple, 3); } else { - // will generate four UTF-8 bytes - // we have 0b11110XXX 0b10XXXXXX 0b10XXXXXX 0b10XXXXXX - *utf8_output++ = char((word>>18) | 0b11110000); - *utf8_output++ = char(((word>>12) & 0b111111) | 0b10000000); - *utf8_output++ = char(((word>>6) & 0b111111) | 0b10000000); - *utf8_output++ = char((word & 0b111111) | 0b10000000); - pos ++; + triple = scalar::utf32::swap_bytes(triple); + triple >>= 8; + std::memcpy(dst, &triple, 3); } + dst += 3; } - return utf8_output - start; } -#endif // SIMDUTF_IMPLEMENTATION_FALLBACK || SIMDUTF_IMPLEMENTATION_PPC64 -} // utf32_to_utf8 namespace +// Returns the number of bytes written. The destination buffer must be large +// enough. It will add padding (=) if needed. +size_t tail_encode_base64(char *dst, const char *src, size_t srclen) { + char *out = dst; + size_t i = 0; + uint8_t t1, t2, t3; + for (; i + 2 < srclen; i += 3) { + t1 = (uint8_t)src[i]; + t2 = (uint8_t)src[i + 1]; + t3 = (uint8_t)src[i + 2]; + *out++ = tables::base64::e0[t1]; + *out++ = tables::base64::e1[((t1 & 0x03) << 4) | ((t2 >> 4) & 0x0F)]; + *out++ = tables::base64::e1[((t2 & 0x0F) << 2) | ((t3 >> 6) & 0x03)]; + *out++ = tables::base64::e2[t3]; + } + switch (srclen - i) { + case 0: + break; + case 1: + t1 = (uint8_t)src[i]; + *out++ = tables::base64::e0[t1]; + *out++ = tables::base64::e1[(t1 & 0x03) << 4]; + *out++ = '='; + *out++ = '='; + break; + default: /* case 2 */ + t1 = (uint8_t)src[i]; + t2 = (uint8_t)src[i + 1]; + *out++ = tables::base64::e0[t1]; + *out++ = tables::base64::e1[((t1 & 0x03) << 4) | ((t2 >> 4) & 0x0F)]; + *out++ = tables::base64::e2[(t2 & 0x0F) << 2]; + *out++ = '='; + } + return (size_t)(out - dst); +} + +simdutf_warn_unused size_t maximal_binary_length_from_base64(const char * input, size_t length) noexcept { + // We follow https://infra.spec.whatwg.org/#forgiving-base64-decode + size_t padding = 0; + if(length > 0) { + if(input[length - 1] == '=') { + padding++; + if(length > 1 && input[length - 2] == '=') { + padding++; + } + } + } + size_t actual_length = length - padding; + if(actual_length % 4 == 0) { + return actual_length / 4 * 3; + } + // if we have a valid input, then the remainder must be 2 or 3 adding one or two extra bytes. + return actual_length / 4 * 3 + (actual_length %4) - 1; +} + +simdutf_warn_unused simdutf_really_inline result base64_to_binary(const char * input, size_t length, char* output) noexcept { + if(length > 0 && input[length - 1] == '=') { + length -= 1; + if(length > 0 && input[length - 1] == '=') { + length -= 1; + } + } + if(length == 0) { + return {SUCCESS, 0}; + } + return base64_tail_decode(output, input, length); +} + +simdutf_warn_unused size_t base64_length_from_binary(size_t length) noexcept { + return (length + 2)/3 * 4; // We use padding to make the length a multiple of 4. +} + +simdutf_really_inline size_t binary_to_base64(const char * input, size_t length, char* output) noexcept { + return tail_encode_base64(output, input, length); +} +} // namespace base64 } // unnamed namespace } // namespace scalar } // namespace simdutf #endif -/* end file src/scalar/utf32_to_utf8/valid_utf32_to_utf8.h */ -/* begin file src/scalar/utf32_to_utf8/utf32_to_utf8.h */ -#ifndef SIMDUTF_UTF32_TO_UTF8_H -#define SIMDUTF_UTF32_TO_UTF8_H +/* end file src/scalar/base64.h */ + +/* begin file src/scalar/utf32_to_utf8/valid_utf32_to_utf8.h */ +#ifndef SIMDUTF_VALID_UTF32_TO_UTF8_H +#define SIMDUTF_VALID_UTF32_TO_UTF8_H namespace simdutf { namespace scalar { namespace { namespace utf32_to_utf8 { -inline size_t convert(const char32_t* buf, size_t len, char* utf8_output) { - const uint32_t *data = reinterpret_cast(buf); +#if SIMDUTF_IMPLEMENTATION_FALLBACK || SIMDUTF_IMPLEMENTATION_PPC64 +// only used by the fallback and POWER kernel +inline size_t convert_valid(const char32_t* buf, size_t len, char* utf8_output) { + const uint32_t *data = reinterpret_cast(buf); + size_t pos = 0; + char* start{utf8_output}; + while (pos < len) { + // try to convert the next block of 2 ASCII characters + if (pos + 2 <= len) { // if it is safe to read 8 more bytes, check that they are ascii + uint64_t v; + ::memcpy(&v, data + pos, sizeof(uint64_t)); + if ((v & 0xFFFFFF80FFFFFF80) == 0) { + *utf8_output++ = char(buf[pos]); + *utf8_output++ = char(buf[pos+1]); + pos += 2; + continue; + } + } + uint32_t word = data[pos]; + if((word & 0xFFFFFF80)==0) { + // will generate one UTF-8 bytes + *utf8_output++ = char(word); + pos++; + } else if((word & 0xFFFFF800)==0) { + // will generate two UTF-8 bytes + // we have 0b110XXXXX 0b10XXXXXX + *utf8_output++ = char((word>>6) | 0b11000000); + *utf8_output++ = char((word & 0b111111) | 0b10000000); + pos++; + } else if((word & 0xFFFF0000)==0) { + // will generate three UTF-8 bytes + // we have 0b1110XXXX 0b10XXXXXX 0b10XXXXXX + *utf8_output++ = char((word>>12) | 0b11100000); + *utf8_output++ = char(((word>>6) & 0b111111) | 0b10000000); + *utf8_output++ = char((word & 0b111111) | 0b10000000); + pos++; + } else { + // will generate four UTF-8 bytes + // we have 0b11110XXX 0b10XXXXXX 0b10XXXXXX 0b10XXXXXX + *utf8_output++ = char((word>>18) | 0b11110000); + *utf8_output++ = char(((word>>12) & 0b111111) | 0b10000000); + *utf8_output++ = char(((word>>6) & 0b111111) | 0b10000000); + *utf8_output++ = char((word & 0b111111) | 0b10000000); + pos ++; + } + } + return utf8_output - start; +} +#endif // SIMDUTF_IMPLEMENTATION_FALLBACK || SIMDUTF_IMPLEMENTATION_PPC64 + +} // utf32_to_utf8 namespace +} // unnamed namespace +} // namespace scalar +} // namespace simdutf + +#endif +/* end file src/scalar/utf32_to_utf8/valid_utf32_to_utf8.h */ +/* begin file src/scalar/utf32_to_utf8/utf32_to_utf8.h */ +#ifndef SIMDUTF_UTF32_TO_UTF8_H +#define SIMDUTF_UTF32_TO_UTF8_H + +namespace simdutf { +namespace scalar { +namespace { +namespace utf32_to_utf8 { + +inline size_t convert(const char32_t* buf, size_t len, char* utf8_output) { + const uint32_t *data = reinterpret_cast(buf); size_t pos = 0; char* start{utf8_output}; while (pos < len) { @@ -12156,7 +13200,7 @@ inline size_t convert(const char* buf, size_t len, char* latin_output) { // range check - uint32_t code_point = (leading_byte & 0b00011111) << 6 | (data[pos + 1] & 0b00111111); // assembles the Unicode code point from the two bytes. It does this by discarding the leading 110 and 10 bits from the two bytes, shifting the remaining bits of the first byte, and then combining the results with a bitwise OR operation. if (code_point < 0x80 || 0xFF < code_point) { - return 0; // We only care about the range 129-255 which is Non-ASCII latin1 characters. A code_point beneath 0x80 is invalid as it's already covered by bytes whose leading bit is zero. + return 0; // We only care about the range 129-255 which is Non-ASCII latin1 characters. A code_point beneath 0x80 is invalid as it's already covered by bytes whose leading bit is zero. } *latin_output++ = char(code_point); pos += 2; @@ -12307,7 +13351,7 @@ inline size_t convert(const char16_t* buf, size_t len, char* latin_output) { // Only copy to latin_output if there were no errors std::memcpy(latin_output, temp_output.data(), len); - + return current_write - temp_output.data(); } @@ -14535,8 +15579,9 @@ std::pair arm_convert_utf32_to_utf8(const char32_t* buf, const uint16x8_t v_c080 = vmovq_n_u16((uint16_t)0xc080); uint16x8_t forbidden_bytemask = vmovq_n_u16(0x0); + const size_t safety_margin = 12; // to avoid overruns, see issue https://github.com/simdutf/simdutf/issues/92 - while (buf + 8 < end) { + while (buf + 16 + safety_margin < end) { uint32x4_t in = vld1q_u32(reinterpret_cast(buf)); uint32x4_t nextin = vld1q_u32(reinterpret_cast(buf+4)); @@ -14768,8 +15813,9 @@ std::pair arm_convert_utf32_to_utf8_with_errors(const char32_t* b const char32_t* end = buf + len; const uint16x8_t v_c080 = vmovq_n_u16((uint16_t)0xc080); + const size_t safety_margin = 12; // to avoid overruns, see issue https://github.com/simdutf/simdutf/issues/92 - while (buf + 8 < end) { + while (buf + 16 + safety_margin < end) { uint32x4_t in = vld1q_u32(reinterpret_cast(buf)); uint32x4_t nextin = vld1q_u32(reinterpret_cast(buf+4)); @@ -15111,6 +16157,395 @@ std::pair arm_convert_utf32_to_utf16_with_errors(const char32 return std::make_pair(result(error_code::SUCCESS, buf - start), reinterpret_cast(utf16_output)); } /* end file src/arm64/arm_convert_utf32_to_utf16.cpp */ +/* begin file src/arm64/arm_base64.cpp */ +/** + * References and further reading: + * + * Wojciech Muła, Daniel Lemire, Base64 encoding and decoding at almost the + * speed of a memory copy, Software: Practice and Experience 50 (2), 2020. + * https://arxiv.org/abs/1910.05109 + * + * Wojciech Muła, Daniel Lemire, Faster Base64 Encoding and Decoding using AVX2 + * Instructions, ACM Transactions on the Web 12 (3), 2018. + * https://arxiv.org/abs/1704.00605 + * + * Simon Josefsson. 2006. The Base16, Base32, and Base64 Data Encodings. + * https://tools.ietf.org/html/rfc4648. (2006). Internet Engineering Task Force, + * Request for Comments: 4648. + * + * Alfred Klomp. 2014a. Fast Base64 encoding/decoding with SSE vectorization. + * http://www.alfredklomp.com/programming/sse-base64/. (2014). + * + * Alfred Klomp. 2014b. Fast Base64 stream encoder/decoder in C99, with SIMD + * acceleration. https://github.com/aklomp/base64. (2014). + * + * Hanson Char. 2014. A Fast and Correct Base 64 Codec. (2014). + * https://aws.amazon.com/blogs/developer/a-fast-and-correct-base-64-codec/ + * + * Nick Kopp. 2013. Base64 Encoding on a GPU. + * https://www.codeproject.com/Articles/276993/Base-Encoding-on-a-GPU. (2013). + */ + +size_t encode_base64(char *dst, const char *src, size_t srclen) { + // credit: Wojciech Muła + uint8_t *out = (uint8_t *)dst; + constexpr static uint8_t source_table[64] = { + 'A', 'Q', 'g', 'w', 'B', 'R', 'h', 'x', 'C', 'S', 'i', 'y', 'D', + 'T', 'j', 'z', 'E', 'U', 'k', '0', 'F', 'V', 'l', '1', 'G', 'W', + 'm', '2', 'H', 'X', 'n', '3', 'I', 'Y', 'o', '4', 'J', 'Z', 'p', + '5', 'K', 'a', 'q', '6', 'L', 'b', 'r', '7', 'M', 'c', 's', '8', + 'N', 'd', 't', '9', 'O', 'e', 'u', '+', 'P', 'f', 'v', '/', + }; + const uint8x16_t v3f = vdupq_n_u8(0x3f); + const uint8x16x4_t table = vld4q_u8(source_table); + size_t i = 0; + for (; i + 16 * 3 <= srclen; i += 16 * 3) { + const uint8x16x3_t in = vld3q_u8((const uint8_t *)src + i); + uint8x16x4_t result; + result.val[0] = vshrq_n_u8(in.val[0], 2); + result.val[1] = + vandq_u8(vsliq_n_u8(vshrq_n_u8(in.val[1], 4), in.val[0], 4), v3f); + result.val[2] = + vandq_u8(vsliq_n_u8(vshrq_n_u8(in.val[2], 6), in.val[1], 2), v3f); + result.val[3] = vandq_u8(in.val[2], v3f); + result.val[0] = vqtbl4q_u8(table, result.val[0]); + result.val[1] = vqtbl4q_u8(table, result.val[1]); + result.val[2] = vqtbl4q_u8(table, result.val[2]); + result.val[3] = vqtbl4q_u8(table, result.val[3]); + vst4q_u8(out, result); + out += 64; + } + out += scalar::base64::tail_encode_base64((char *)out, src + i, srclen - i); + + return size_t((char *)out - dst); +} + +static inline void compress(uint8x16_t data, uint16_t mask, char *output) { + if (mask == 0) { + vst1q_u8((uint8_t *)output, data); + return; + } + uint8_t mask1 = uint8_t(mask); // least significant 8 bits + uint8_t mask2 = uint8_t(mask >> 8); // most significant 8 bits + uint64x2_t compactmasku64 = {tables::base64::thintable_epi8[mask1], + tables::base64::thintable_epi8[mask2]}; + uint8x16_t compactmask = vreinterpretq_u8_u64(compactmasku64); +#ifdef SIMDUTF_REGULAR_VISUAL_STUDIO + const uint8x16_t off = + simdutf_make_uint8x16_t(0, 0, 0, 0, 0, 0, 0, 0, 8, 8, 8, 8, 8, 8, 8, 8); +#else + const uint8x16_t off = {0, 0, 0, 0, 0, 0, 0, 0, 8, 8, 8, 8, 8, 8, 8, 8}; +#endif + + compactmask = vaddq_u8(compactmask, off); + uint8x16_t pruned = vqtbl1q_u8(data, compactmask); + + int pop1 = tables::base64::BitsSetTable256mul2[mask1]; + // then load the corresponding mask, what it does is to write + // only the first pop1 bytes from the first 8 bytes, and then + // it fills in with the bytes from the second 8 bytes + some filling + // at the end. + compactmask = vld1q_u8(tables::base64::pshufb_combine_table + pop1 * 8); + uint8x16_t answer = vqtbl1q_u8(pruned, compactmask); + vst1q_u8((uint8_t *)output, answer); +} + +struct block64 { + uint8x16_t chunks[4]; +}; +static_assert(sizeof(block64) == 64, "block64 is not 64 bytes"); +uint64_t to_base64_mask(block64 *b, bool *error) { + uint8x16_t v0f = vdupq_n_u8(0xf); + + uint8x16_t lo_nibbles0 = vandq_u8(b->chunks[0], v0f); + uint8x16_t lo_nibbles1 = vandq_u8(b->chunks[1], v0f); + uint8x16_t lo_nibbles2 = vandq_u8(b->chunks[2], v0f); + uint8x16_t lo_nibbles3 = vandq_u8(b->chunks[3], v0f); + // Needed by the decoding step. + uint8x16_t hi_nibbles0 = vshrq_n_u8(b->chunks[0], 4); + uint8x16_t hi_nibbles1 = vshrq_n_u8(b->chunks[1], 4); + uint8x16_t hi_nibbles2 = vshrq_n_u8(b->chunks[2], 4); + uint8x16_t hi_nibbles3 = vshrq_n_u8(b->chunks[3], 4); +#ifdef SIMDUTF_REGULAR_VISUAL_STUDIO + const uint8x16_t lut_lo = + simdutf_make_uint8x16_t(0x3a, 0x70, 0x70, 0x70, 0x70, 0x70, 0x70, 0x70, + 0x70, 0x61, 0xe1, 0xb4, 0xf4, 0xe5, 0xf4, 0xb4); +#else + const uint8x16_t lut_lo = {0x3a, 0x70, 0x70, 0x70, 0x70, 0x70, 0x70, 0x70, + 0x70, 0x61, 0xe1, 0xb4, 0xf4, 0xe5, 0xf4, 0xb4}; +#endif + uint8x16_t lo0 = vqtbl1q_u8(lut_lo, lo_nibbles0); + uint8x16_t lo1 = vqtbl1q_u8(lut_lo, lo_nibbles1); + uint8x16_t lo2 = vqtbl1q_u8(lut_lo, lo_nibbles2); + uint8x16_t lo3 = vqtbl1q_u8(lut_lo, lo_nibbles3); +#ifdef SIMDUTF_REGULAR_VISUAL_STUDIO + const uint8x16_t lut_hi = + simdutf_make_uint8x16_t(0x11, 0x20, 0x42, 0x80, 0x8, 0x4, 0x8, 0x4, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20); +#else + const uint8x16_t lut_hi = {0x11, 0x20, 0x42, 0x80, 0x8, 0x4, 0x8, 0x4, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20}; +#endif + uint8x16_t hi0 = vqtbl1q_u8(lut_hi, hi_nibbles0); + uint8x16_t hi1 = vqtbl1q_u8(lut_hi, hi_nibbles1); + uint8x16_t hi2 = vqtbl1q_u8(lut_hi, hi_nibbles2); + uint8x16_t hi3 = vqtbl1q_u8(lut_hi, hi_nibbles3); + + uint8_t checks = + vmaxvq_u8(vorrq_u8(vorrq_u8(vandq_u8(lo0, hi0), vandq_u8(lo1, hi1)), + vorrq_u8(vandq_u8(lo2, hi2), vandq_u8(lo3, hi3)))); +#ifdef SIMDUTF_REGULAR_VISUAL_STUDIO + const uint8x16_t bit_mask = + simdutf_make_uint8x16_t(0x01, 0x02, 0x4, 0x8, 0x10, 0x20, 0x40, 0x80, + 0x01, 0x02, 0x4, 0x8, 0x10, 0x20, 0x40, 0x80); +#else + const uint8x16_t bit_mask = {0x01, 0x02, 0x4, 0x8, 0x10, 0x20, 0x40, 0x80, + 0x01, 0x02, 0x4, 0x8, 0x10, 0x20, 0x40, 0x80}; +#endif + uint64_t badcharmask = 0; + *error = checks > 0x3; + if (checks) { + // Add each of the elements next to each other, successively, to stuff each + // 8 byte mask into one. + uint8x16_t test0 = vtstq_u8(lo0, hi0); + uint8x16_t test1 = vtstq_u8(lo1, hi1); + uint8x16_t test2 = vtstq_u8(lo2, hi2); + uint8x16_t test3 = vtstq_u8(lo3, hi3); + uint8x16_t sum0 = + vpaddq_u8(vandq_u8(test0, bit_mask), vandq_u8(test1, bit_mask)); + uint8x16_t sum1 = + vpaddq_u8(vandq_u8(test2, bit_mask), vandq_u8(test3, bit_mask)); + sum0 = vpaddq_u8(sum0, sum1); + sum0 = vpaddq_u8(sum0, sum0); + badcharmask = vgetq_lane_u64(vreinterpretq_u64_u8(sum0), 0); + } + // This is the transformation step that can be done while we are waiting for + // sum0 +#ifdef SIMDUTF_REGULAR_VISUAL_STUDIO + const uint8x16_t roll_lut = + simdutf_make_uint8x16_t(0x0, 0x10, 0x13, 0x4, 0xbf, 0xbf, 0xb9, 0xb9, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0); +#else + const uint8x16_t roll_lut = {0x0, 0x10, 0x13, 0x4, 0xbf, 0xbf, 0xb9, 0xb9, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}; +#endif + uint8x16_t v2f = vdupq_n_u8(0x2f); + uint8x16_t roll0 = + vqtbl1q_u8(roll_lut, vaddq_u8(vceqq_u8(b->chunks[0], v2f), hi_nibbles0)); + uint8x16_t roll1 = + vqtbl1q_u8(roll_lut, vaddq_u8(vceqq_u8(b->chunks[1], v2f), hi_nibbles1)); + uint8x16_t roll2 = + vqtbl1q_u8(roll_lut, vaddq_u8(vceqq_u8(b->chunks[2], v2f), hi_nibbles2)); + uint8x16_t roll3 = + vqtbl1q_u8(roll_lut, vaddq_u8(vceqq_u8(b->chunks[3], v2f), hi_nibbles3)); + b->chunks[0] = vaddq_u8(b->chunks[0], roll0); + b->chunks[1] = vaddq_u8(b->chunks[1], roll1); + b->chunks[2] = vaddq_u8(b->chunks[2], roll2); + b->chunks[3] = vaddq_u8(b->chunks[3], roll3); + return badcharmask; +} + +void copy_block(block64 *b, char *output) { + vst1q_u8((uint8_t *)output, b->chunks[0]); + vst1q_u8((uint8_t *)output + 16, b->chunks[1]); + vst1q_u8((uint8_t *)output + 32, b->chunks[2]); + vst1q_u8((uint8_t *)output + 48, b->chunks[3]); +} + +uint64_t compress_block(block64 *b, uint64_t mask, char *output) { + uint64_t popcounts = + vget_lane_u64(vreinterpret_u64_u8(vcnt_u8(vcreate_u8(~mask))), 0); + uint64_t offsets = popcounts * 0x0101010101010101; + compress(b->chunks[0], uint16_t(mask), output); + compress(b->chunks[1], uint16_t(mask >> 16), &output[(offsets >> 8) & 0xFF]); + compress(b->chunks[2], uint16_t(mask >> 32), &output[(offsets >> 24) & 0xFF]); + compress(b->chunks[3], uint16_t(mask >> 48), &output[(offsets >> 40) & 0xFF]); + return offsets >> 56; +} + +void load_block(block64 *b, const char *src) { + b->chunks[0] = vld1q_u8(reinterpret_cast(src)); + b->chunks[1] = vld1q_u8(reinterpret_cast(src) + 16); + b->chunks[2] = vld1q_u8(reinterpret_cast(src) + 32); + b->chunks[3] = vld1q_u8(reinterpret_cast(src) + 48); +} + +// decode 64 bytes and output 48 bytes +void base64_decode_block(char *out, const char *src) { + uint8x16x4_t str = vld4q_u8((uint8_t *)src); + uint8x16x3_t outvec; + outvec.val[0] = + vorrq_u8(vshlq_n_u8(str.val[0], 2), vshrq_n_u8(str.val[1], 4)); + outvec.val[1] = + vorrq_u8(vshlq_n_u8(str.val[1], 4), vshrq_n_u8(str.val[2], 2)); + outvec.val[2] = vorrq_u8(vshlq_n_u8(str.val[2], 6), str.val[3]); + vst3q_u8((uint8_t *)out, outvec); +} + +result compress_decode_base64(char *dst, const char *src, size_t srclen) { + size_t equalsigns = 0; + if (srclen > 0 && src[srclen - 1] == '=') { + srclen--; + equalsigns = 1; + if (srclen > 0 && src[srclen - 1] == '=') { + srclen--; + equalsigns = 2; + } + } + const char *const srcinit = src; + const char *const dstinit = dst; + const char *const srcend = src + srclen; + + constexpr size_t block_size = 10; + char buffer[block_size * 64]; + char *bufferptr = buffer; + if (srclen >= 64) { + const char *const srcend64 = src + srclen - 64; + while (src <= srcend64) { + block64 b; + load_block(&b, src); + src += 64; + bool error = false; + uint64_t badcharmask = to_base64_mask(&b, &error); + if (error) { + src -= 64; + + while (src < srcend && + tables::base64::to_base64_value[uint8_t(*src)] <= 64) { + src++; + } + return {error_code::INVALID_BASE64_CHARACTER, size_t(src - srcinit)}; + } + + if (badcharmask != 0) { + // optimization opportunity: check for simple masks like those made of + // continuous 1s followed by continuous 0s. And masks containing a + // single bad character. + + bufferptr += compress_block(&b, badcharmask, bufferptr); + } else { + // optimization opportunity: if bufferptr == buffer and mask == 0, we + // can avoid the call to compress_block and decode directly. + copy_block(&b, bufferptr); + bufferptr += 64; + // base64_decode_block(dst, &b); + // dst += 48; + } + if (bufferptr >= (block_size - 1) * 64 + buffer) { + for (size_t i = 0; i < (block_size - 1); i++) { + base64_decode_block(dst, buffer + i * 64); + dst += 48; + } + std::memcpy(buffer, buffer + (block_size - 1) * 64, + 64); // 64 might be too much + bufferptr -= (block_size - 1) * 64; + } + } + } + char *buffer_start = buffer; + // Optimization note: if this is almost full, then it is worth our + // time, otherwise, we should just decode directly. + int last_block = (int)((bufferptr - buffer_start) % 64); + if (last_block != 0 && srcend - src + last_block >= 64) { + while ((bufferptr - buffer_start) % 64 != 0 && src < srcend) { + uint8_t val = tables::base64::to_base64_value[uint8_t(*src)]; + *bufferptr = char(val); + if (val > 64) { + return {error_code::INVALID_BASE64_CHARACTER, size_t(src - srcinit)}; + } + bufferptr += (val <= 63); + src++; + } + } + + for (; buffer_start + 64 <= bufferptr; buffer_start += 64) { + base64_decode_block(dst, buffer_start); + dst += 48; + } + if ((bufferptr - buffer_start) % 64 != 0) { + while (buffer_start + 4 < bufferptr) { + uint32_t triple = ((uint32_t(uint8_t(buffer_start[0])) << 3 * 6) + + (uint32_t(uint8_t(buffer_start[1])) << 2 * 6) + + (uint32_t(uint8_t(buffer_start[2])) << 1 * 6) + + (uint32_t(uint8_t(buffer_start[3])) << 0 * 6)) + << 8; + triple = scalar::utf32::swap_bytes(triple); + std::memcpy(dst, &triple, 4); + + dst += 3; + buffer_start += 4; + } + if (buffer_start + 4 <= bufferptr) { + uint32_t triple = ((uint32_t(uint8_t(buffer_start[0])) << 3 * 6) + + (uint32_t(uint8_t(buffer_start[1])) << 2 * 6) + + (uint32_t(uint8_t(buffer_start[2])) << 1 * 6) + + (uint32_t(uint8_t(buffer_start[3])) << 0 * 6)) + << 8; + triple = scalar::utf32::swap_bytes(triple); + std::memcpy(dst, &triple, 3); + + dst += 3; + buffer_start += 4; + } + // we may have 1, 2 or 3 bytes left and we need to decode them so let us + // bring in src content + int leftover = int(bufferptr - buffer_start); + if (leftover > 0) { + while (leftover < 4 && src < srcend) { + uint8_t val = tables::base64::to_base64_value[uint8_t(*src)]; + if (val > 64) { + return {error_code::INVALID_BASE64_CHARACTER, size_t(src - srcinit)}; + } + buffer_start[leftover] = char(val); + leftover += (val <= 63); + src++; + } + + if (leftover == 1) { + return {BASE64_INPUT_REMAINDER, size_t(dst - dstinit)}; + } + if (leftover == 2) { + uint32_t triple = (uint32_t(buffer_start[0]) << 3 * 6) + + (uint32_t(buffer_start[1]) << 2 * 6); + triple = scalar::utf32::swap_bytes(triple); + triple >>= 8; + std::memcpy(dst, &triple, 1); + dst += 1; + } else if (leftover == 3) { + uint32_t triple = (uint32_t(buffer_start[0]) << 3 * 6) + + (uint32_t(buffer_start[1]) << 2 * 6) + + (uint32_t(buffer_start[2]) << 1 * 6); + triple = scalar::utf32::swap_bytes(triple); + triple >>= 8; + + std::memcpy(dst, &triple, 2); + dst += 2; + } else { + uint32_t triple = ((uint32_t(uint8_t(buffer_start[0])) << 3 * 6) + + (uint32_t(uint8_t(buffer_start[1])) << 2 * 6) + + (uint32_t(uint8_t(buffer_start[2])) << 1 * 6) + + (uint32_t(uint8_t(buffer_start[3])) << 0 * 6)) + << 8; + triple = scalar::utf32::swap_bytes(triple); + std::memcpy(dst, &triple, 3); + dst += 3; + } + } + } + if (src < srcend + equalsigns) { + result r = scalar::base64::base64_tail_decode(dst, src, srcend - src); + if (r.error == error_code::INVALID_BASE64_CHARACTER) { + r.count += size_t(src - srcinit); + return r; + } else { + r.count += size_t(dst - dstinit); + } + return r; + } + return {SUCCESS, size_t(dst - dstinit)}; +} +/* end file src/arm64/arm_base64.cpp */ + } // unnamed namespace } // namespace arm64 } // namespace simdutf @@ -17418,6 +18853,23 @@ simdutf_warn_unused size_t implementation::utf32_length_from_utf8(const char * i return utf8::count_code_points(input, length); } +simdutf_warn_unused size_t implementation::maximal_binary_length_from_base64(const char * input, size_t length) const noexcept { + return scalar::base64::maximal_binary_length_from_base64(input, length); +} + +simdutf_warn_unused result implementation::base64_to_binary(const char * input, size_t length, char* output) const noexcept { + return compress_decode_base64(output, input, length); +} + +simdutf_warn_unused size_t implementation::base64_length_from_binary(size_t length) const noexcept { + return scalar::base64::base64_length_from_binary(length); +} + +size_t implementation::binary_to_base64(const char * input, size_t length, char* output) const noexcept { + return encode_base64(output, input, length); +} + + } // namespace arm64 } // namespace simdutf @@ -17756,6 +19208,21 @@ simdutf_warn_unused size_t implementation::utf32_length_from_utf8(const char * i return scalar::utf8::count_code_points(input, length); } +simdutf_warn_unused size_t implementation::maximal_binary_length_from_base64(const char * input, size_t length) const noexcept { + return scalar::base64::maximal_binary_length_from_base64(input, length); +} + +simdutf_warn_unused result implementation::base64_to_binary(const char * input, size_t length, char* output) const noexcept { + return scalar::base64::base64_to_binary(input, length, output); +} + +simdutf_warn_unused size_t implementation::base64_length_from_binary(size_t length) const noexcept { + return scalar::base64::base64_length_from_binary(length); +} + +size_t implementation::binary_to_base64(const char * input, size_t length, char* output) const noexcept { + return scalar::base64::binary_to_base64(input, length, output); +} } // namespace fallback } // namespace simdutf @@ -20486,17 +21953,17 @@ const char32_t* validate_utf32(const char32_t* buf, size_t len) { static inline size_t latin1_to_utf8_avx512_vec(__m512i input, size_t input_len, char *utf8_output, int mask_output) { __mmask64 nonascii = _mm512_movepi8_mask(input); size_t output_size = input_len + (size_t)count_ones(nonascii); - + // Mask to denote whether the byte is a leading byte that is not ascii __mmask64 sixth = _mm512_cmpge_epu8_mask(input, _mm512_set1_epi8(-64)); //binary representation of -64: 1100 0000 - + const uint64_t alternate_bits = UINT64_C(0x5555555555555555); uint64_t ascii = ~nonascii; // the bits in ascii are inverted and zeros are interspersed in between them uint64_t maskA = ~_pdep_u64(ascii, alternate_bits); uint64_t maskB = ~_pdep_u64(ascii>>32, alternate_bits); - + // interleave bytes from top and bottom halves (abcd...ABCD -> aAbBcCdD) __m512i input_interleaved = _mm512_permutexvar_epi8(_mm512_set_epi32( 0x3f1f3e1e, 0x3d1d3c1c, 0x3b1b3a1a, 0x39193818, @@ -20504,35 +21971,35 @@ static inline size_t latin1_to_utf8_avx512_vec(__m512i input, size_t input_len, 0x2f0f2e0e, 0x2d0d2c0c, 0x2b0b2a0a, 0x29092808, 0x27072606, 0x25052404, 0x23032202, 0x21012000 ), input); - + // double size of each byte, and insert the leading byte 1100 0010 -/* +/* upscale the bytes to 16-bit value, adding the 0b11000000 leading byte in the process. We adjust for the bytes that have their two most significant bits. This takes care of the first 32 bytes, assuming we interleaved the bytes. */ - __m512i outputA = _mm512_shldi_epi16(input_interleaved, _mm512_set1_epi8(-62), 8); + __m512i outputA = _mm512_shldi_epi16(input_interleaved, _mm512_set1_epi8(-62), 8); outputA = _mm512_mask_add_epi16( - outputA, - (__mmask32)sixth, - outputA, + outputA, + (__mmask32)sixth, + outputA, _mm512_set1_epi16(1 - 0x4000)); // 1- 0x4000 = 1100 0000 0000 0001???? - + // in the second 32-bit half, set first or second option based on whether original input is leading byte (second case) or not (first case) __m512i leadingB = _mm512_mask_blend_epi16( - (__mmask32)(sixth>>32), + (__mmask32)(sixth>>32), _mm512_set1_epi16(0x00c2), // 0000 0000 1101 0010 _mm512_set1_epi16(0x40c3));// 0100 0000 1100 0011 __m512i outputB = _mm512_ternarylogic_epi32( - input_interleaved, - leadingB, - _mm512_set1_epi16((short)0xff00), + input_interleaved, + leadingB, + _mm512_set1_epi16((short)0xff00), (240 & 170) ^ 204); // (input_interleaved & 0xff00) ^ leadingB - + // prune redundant bytes outputA = _mm512_maskz_compress_epi8(maskA, outputA); outputB = _mm512_maskz_compress_epi8(maskB, outputB); - - + + size_t output_sizeA = (size_t)count_ones((uint32_t)nonascii) + 32; if(mask_output) { @@ -20553,7 +22020,7 @@ We adjust for the bytes that have their two most significant bits. This takes ca } return output_size; } - + static inline size_t latin1_to_utf8_avx512_branch(__m512i input, char *utf8_output) { __mmask64 nonascii = _mm512_movepi8_mask(input); if(nonascii) { @@ -20563,7 +22030,7 @@ static inline size_t latin1_to_utf8_avx512_branch(__m512i input, char *utf8_outp return 64; } } - + size_t latin1_to_utf8_avx512_start(const char *buf, size_t len, char *utf8_output) { char *start = utf8_output; size_t pos = 0; @@ -20628,14 +22095,14 @@ size_t icelake_convert_latin1_to_utf16(const char *latin1_input, size_t len, /* begin file src/icelake/icelake_convert_latin1_to_utf32.inl.cpp */ std::pair avx512_convert_latin1_to_utf32(const char* buf, size_t len, char32_t* utf32_output) { size_t rounded_len = len & ~0xF; // Round down to nearest multiple of 16 - - for (size_t i = 0; i < rounded_len; i += 16) { + + for (size_t i = 0; i < rounded_len; i += 16) { // Load 16 Latin1 characters into a 128-bit register __m128i in = _mm_loadu_si128((__m128i*)&buf[i]); - + // Zero extend each set of 8 Latin1 characters to 16 32-bit integers using vpmovzxbd __m512i out = _mm512_cvtepu8_epi32(in); - + // Store the results back to memory _mm512_storeu_si512((__m512i*)&utf32_output[i], out); } @@ -20644,6 +22111,299 @@ std::pair avx512_convert_latin1_to_utf32(const char* buf return std::make_pair(buf + rounded_len, utf32_output + rounded_len); } /* end file src/icelake/icelake_convert_latin1_to_utf32.inl.cpp */ +/* begin file src/icelake/icelake_base64.inl.cpp */ +// file included directly +/** + * References and further reading: + * + * Wojciech Muła, Daniel Lemire, Base64 encoding and decoding at almost the + * speed of a memory copy, Software: Practice and Experience 50 (2), 2020. + * https://arxiv.org/abs/1910.05109 + * + * Wojciech Muła, Daniel Lemire, Faster Base64 Encoding and Decoding using AVX2 + * Instructions, ACM Transactions on the Web 12 (3), 2018. + * https://arxiv.org/abs/1704.00605 + * + * Simon Josefsson. 2006. The Base16, Base32, and Base64 Data Encodings. + * https://tools.ietf.org/html/rfc4648. (2006). Internet Engineering Task Force, + * Request for Comments: 4648. + * + * Alfred Klomp. 2014a. Fast Base64 encoding/decoding with SSE vectorization. + * http://www.alfredklomp.com/programming/sse-base64/. (2014). + * + * Alfred Klomp. 2014b. Fast Base64 stream encoder/decoder in C99, with SIMD + * acceleration. https://github.com/aklomp/base64. (2014). + * + * Hanson Char. 2014. A Fast and Correct Base 64 Codec. (2014). + * https://aws.amazon.com/blogs/developer/a-fast-and-correct-base-64-codec/ + * + * Nick Kopp. 2013. Base64 Encoding on a GPU. + * https://www.codeproject.com/Articles/276993/Base-Encoding-on-a-GPU. (2013). + */ + +struct block64 { + __m512i chunks[1]; +}; + +size_t encode_base64(char *dst, const char *src, size_t srclen) { + // credit: Wojciech Muła + + const uint8_t *input = (const uint8_t *)src; + + uint8_t *out = (uint8_t *)dst; + static const char *lookup_tbl = + "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"; + + const __m512i shuffle_input = _mm512_setr_epi32( + 0x01020001, 0x04050304, 0x07080607, 0x0a0b090a, 0x0d0e0c0d, 0x10110f10, + 0x13141213, 0x16171516, 0x191a1819, 0x1c1d1b1c, 0x1f201e1f, 0x22232122, + 0x25262425, 0x28292728, 0x2b2c2a2b, 0x2e2f2d2e); + const __m512i lookup = + _mm512_loadu_si512(reinterpret_cast(lookup_tbl)); + const __m512i multi_shifts = _mm512_set1_epi64(UINT64_C(0x3036242a1016040a)); + size_t i = 0; + for (; i + 64 <= srclen; i += 48) { + const __m512i v = + _mm512_loadu_si512(reinterpret_cast(input + i)); + const __m512i in = _mm512_permutexvar_epi8(shuffle_input, v); + const __m512i indices = _mm512_multishift_epi64_epi8(multi_shifts, in); + const __m512i result = _mm512_permutexvar_epi8(indices, lookup); + _mm512_storeu_si512(reinterpret_cast<__m512i *>(out), result); + out += 64; + } + return i / 3 * 4 + + scalar::base64::tail_encode_base64((char *)out, src + i, srclen - i); +} + +static inline uint64_t to_base64_mask(block64 *b, bool *error) { + __m512i input = b->chunks[0]; + const __m512i ascii_space_tbl = _mm512_set_epi8( + 0, 0, 13, 0, 0, 10, 9, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 13, 0, 0, 10, 9, + 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 13, 0, 0, 10, 9, 0, 0, 0, 0, 0, 0, 0, 0, + 32, 0, 0, 13, 0, 0, 10, 9, 0, 0, 0, 0, 0, 0, 0, 0, 32); + __m512i lookup0 = _mm512_set_epi8( + -128, -128, -128, -128, -128, -128, 61, 60, 59, 58, 57, 56, 55, 54, 53, + 52, 63, -128, -128, -128, 62, -128, -128, -128, -128, -128, -128, -128, + -128, -128, -128, -64, -128, -128, -128, -128, -128, -128, -128, -128, + -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -64, -128, + -128, -64, -64, -128, -128, -128, -128, -128, -128, -128, -128, -64); + __m512i lookup1 = _mm512_set_epi8( + -128, -128, -128, -128, -128, 51, 50, 49, 48, 47, 46, 45, 44, 43, 42, 41, + 40, 39, 38, 37, 36, 35, 34, 33, 32, 31, 30, 29, 28, 27, 26, -128, -128, + -128, -128, -128, -128, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, + 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, -128); + const __m512i translated = _mm512_permutex2var_epi8(lookup0, input, lookup1); + const __m512i combined = _mm512_or_si512(translated, input); + const __mmask64 mask = _mm512_movepi8_mask(combined); + if (mask) { + const __mmask64 spaces = _mm512_cmpeq_epi8_mask( + _mm512_shuffle_epi8(ascii_space_tbl, input), input); + *error |= (mask != spaces); + } + b->chunks[0] = translated; + + return mask; +} + +static inline void copy_block(block64 *b, char *output) { + _mm512_storeu_si512(reinterpret_cast<__m512i *>(output), b->chunks[0]); +} + +static inline uint64_t compress_block(block64 *b, uint64_t mask, char *output) { + uint64_t nmask = ~mask; + __m512i c = _mm512_maskz_compress_epi8(nmask, b->chunks[0]); + _mm512_storeu_si512(reinterpret_cast<__m512i *>(output), c); + return _mm_popcnt_u64(nmask); +} + +static inline void load_block(block64 *b, const char *src) { + b->chunks[0] = _mm512_loadu_si512(reinterpret_cast(src)); +} + +static inline void base64_decode(char *out, __m512i str) { + const __m512i merge_ab_and_bc = + _mm512_maddubs_epi16(str, _mm512_set1_epi32(0x01400140)); + const __m512i merged = + _mm512_madd_epi16(merge_ab_and_bc, _mm512_set1_epi32(0x00011000)); + const __m512i pack = _mm512_set_epi8( + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 60, 61, 62, 56, 57, 58, + 52, 53, 54, 48, 49, 50, 44, 45, 46, 40, 41, 42, 36, 37, 38, 32, 33, 34, + 28, 29, 30, 24, 25, 26, 20, 21, 22, 16, 17, 18, 12, 13, 14, 8, 9, 10, 4, + 5, 6, 0, 1, 2); + const __m512i shuffled = _mm512_permutexvar_epi8(pack, merged); + _mm512_mask_storeu_epi8( + (__m512i *)out, 0xffffffffffff, + shuffled); // mask would be 0xffffffffffff since we write 48 bytes. +} +// decode 64 bytes and output 48 bytes +static inline void base64_decode_block(char *out, const char *src) { + base64_decode(out, + _mm512_loadu_si512(reinterpret_cast(src))); +} +static inline void base64_decode_block(char *out, block64 *b) { + base64_decode(out, b->chunks[0]); +} + +result compress_decode_base64(char *dst, const char *src, size_t srclen) { + size_t equalsigns = 0; + if (srclen > 0 && src[srclen - 1] == '=') { + srclen--; + equalsigns = 1; + if (srclen > 0 && src[srclen - 1] == '=') { + srclen--; + equalsigns = 2; + } + } + const char *const srcinit = src; + const char *const dstinit = dst; + const char *const srcend = src + srclen; + + // figure out why block_size == 2 is sometimes best??? + constexpr size_t block_size = 6; + char buffer[block_size * 64]; + char *bufferptr = buffer; + if (srclen >= 64) { + const char *const srcend64 = src + srclen - 64; + while (src <= srcend64) { + block64 b; + load_block(&b, src); + src += 64; + bool error = false; + uint64_t badcharmask = to_base64_mask(&b, &error); + if (error) { + src -= 64; + while (src < srcend && + tables::base64::to_base64_value[uint8_t(*src)] <= 64) { + src++; + } + return {error_code::INVALID_BASE64_CHARACTER, size_t(src - srcinit)}; + } + if (badcharmask != 0) { + // optimization opportunity: check for simple masks like those made of + // continuous 1s followed by continuous 0s. And masks containing a + // single bad character. + bufferptr += compress_block(&b, badcharmask, bufferptr); + } else if (bufferptr != buffer) { + copy_block(&b, bufferptr); + bufferptr += 64; + } else { + base64_decode_block(dst, &b); + dst += 48; + } + if (bufferptr >= (block_size - 1) * 64 + buffer) { + for (size_t i = 0; i < (block_size - 1); i++) { + base64_decode_block(dst, buffer + i * 64); + dst += 48; + } + std::memcpy(buffer, buffer + (block_size - 1) * 64, + 64); // 64 might be too much + bufferptr -= (block_size - 1) * 64; + } + } + } + + char *buffer_start = buffer; + // Optimization note: if this is almost full, then it is worth our + // time, otherwise, we should just decode directly. + int last_block = (int)((bufferptr - buffer_start) % 64); + if (last_block != 0 && srcend - src + last_block >= 64) { + + while ((bufferptr - buffer_start) % 64 != 0 && src < srcend) { + uint8_t val = tables::base64::to_base64_value[uint8_t(*src)]; + *bufferptr = char(val); + if (val > 64) { + return {error_code::INVALID_BASE64_CHARACTER, size_t(src - srcinit)}; + } + bufferptr += (val <= 63); + src++; + } + } + + for (; buffer_start + 64 <= bufferptr; buffer_start += 64) { + base64_decode_block(dst, buffer_start); + dst += 48; + } + if ((bufferptr - buffer_start) % 64 != 0) { + while (buffer_start + 4 < bufferptr) { + uint32_t triple = ((uint32_t(uint8_t(buffer_start[0])) << 3 * 6) + + (uint32_t(uint8_t(buffer_start[1])) << 2 * 6) + + (uint32_t(uint8_t(buffer_start[2])) << 1 * 6) + + (uint32_t(uint8_t(buffer_start[3])) << 0 * 6)) + << 8; + triple = scalar::utf32::swap_bytes(triple); + std::memcpy(dst, &triple, 4); + dst += 3; + buffer_start += 4; + } + if (buffer_start + 4 <= bufferptr) { + uint32_t triple = ((uint32_t(uint8_t(buffer_start[0])) << 3 * 6) + + (uint32_t(uint8_t(buffer_start[1])) << 2 * 6) + + (uint32_t(uint8_t(buffer_start[2])) << 1 * 6) + + (uint32_t(uint8_t(buffer_start[3])) << 0 * 6)) + << 8; + triple = scalar::utf32::swap_bytes(triple); + std::memcpy(dst, &triple, 3); + dst += 3; + buffer_start += 4; + } + // we may have 1, 2 or 3 bytes left and we need to decode them so let us + // bring in src content + int leftover = int(bufferptr - buffer_start); + if (leftover > 0) { + while (leftover < 4 && src < srcend) { + uint8_t val = tables::base64::to_base64_value[uint8_t(*src)]; + if (val > 64) { + return {error_code::INVALID_BASE64_CHARACTER, size_t(src - srcinit)}; + } + buffer_start[leftover] = char(val); + leftover += (val <= 63); + src++; + } + + if (leftover == 1) { + return {BASE64_INPUT_REMAINDER, size_t(dst - dstinit)}; + } + if (leftover == 2) { + uint32_t triple = (uint32_t(buffer_start[0]) << 3 * 6) + + (uint32_t(buffer_start[1]) << 2 * 6); + triple = scalar::utf32::swap_bytes(triple); + triple >>= 8; + std::memcpy(dst, &triple, 1); + dst += 1; + } else if (leftover == 3) { + uint32_t triple = (uint32_t(buffer_start[0]) << 3 * 6) + + (uint32_t(buffer_start[1]) << 2 * 6) + + (uint32_t(buffer_start[2]) << 1 * 6); + triple = scalar::utf32::swap_bytes(triple); + triple >>= 8; + + std::memcpy(dst, &triple, 2); + dst += 2; + } else { + uint32_t triple = ((uint32_t(uint8_t(buffer_start[0])) << 3 * 6) + + (uint32_t(uint8_t(buffer_start[1])) << 2 * 6) + + (uint32_t(uint8_t(buffer_start[2])) << 1 * 6) + + (uint32_t(uint8_t(buffer_start[3])) << 0 * 6)) + << 8; + triple = scalar::utf32::swap_bytes(triple); + std::memcpy(dst, &triple, 3); + dst += 3; + } + } + } + if (src < srcend + equalsigns) { + result r = scalar::base64::base64_tail_decode(dst, src, srcend - src); + if (r.error == error_code::INVALID_BASE64_CHARACTER) { + r.count += size_t(src - srcinit); + return r; + } else { + r.count += size_t(dst - dstinit); + } + return r; + } + return {SUCCESS, size_t(dst - dstinit)}; +} +/* end file src/icelake/icelake_base64.inl.cpp */ #include @@ -21129,15 +22889,15 @@ simdutf_warn_unused result implementation::convert_utf8_to_latin1_with_errors(co // First, try to convert as much as possible using the SIMD implementation. inlen = icelake::utf8_to_latin1_avx512(buf, len, latin1_output); - + // If we have completely converted the string if(inlen == len) { return {simdutf::SUCCESS, len}; } - + // Else if there are remaining bytes, use the scalar function to process them. - // Note: This is assuming scalar::utf8_to_latin1::convert_with_errors is a function that takes - // the input buffer, length, and output buffer, and returns a result object with an error code + // Note: This is assuming scalar::utf8_to_latin1::convert_with_errors is a function that takes + // the input buffer, length, and output buffer, and returns a result object with an error code // and the number of characters processed. result res = scalar::utf8_to_latin1::convert_with_errors(buf + inlen, len - inlen, latin1_output + inlen); res.count += inlen; // Add the number of characters processed by the SIMD implementation @@ -21692,7 +23452,7 @@ simdutf_warn_unused size_t implementation::count_utf8(const char * input, size_t const uint8_t *str = reinterpret_cast(input); size_t answer = length / sizeof(__m512i) * sizeof(__m512i); // Number of 512-bit chunks that fits into the length. size_t i = 0; - __m512i unrolled_popcount{0}; + __m512i unrolled_popcount{0}; const __m512i continuation = _mm512_set1_epi8(char(0b10111111)); @@ -21972,6 +23732,22 @@ simdutf_warn_unused size_t implementation::utf32_length_from_utf8(const char * i return implementation::count_utf8(input, length); } +simdutf_warn_unused size_t implementation::maximal_binary_length_from_base64(const char * input, size_t length) const noexcept { + return scalar::base64::maximal_binary_length_from_base64(input, length); +} + +simdutf_warn_unused result implementation::base64_to_binary(const char * input, size_t length, char* output) const noexcept { + return compress_decode_base64(output, input, length); +} + +simdutf_warn_unused size_t implementation::base64_length_from_binary(size_t length) const noexcept { + return scalar::base64::base64_length_from_binary(length); +} + +size_t implementation::binary_to_base64(const char * input, size_t length, char* output) const noexcept { + return encode_base64(output, input, length); +} + } // namespace icelake } // namespace simdutf @@ -22581,10 +24357,10 @@ std::pair avx2_convert_latin1_to_utf16(const char* latin // Zero extend each byte in xmm0 to word and put it in another xmm register __m128i xmm1 = _mm_cvtepu8_epi16(xmm0); - + // Shift xmm0 to the right by 8 bytes xmm0 = _mm_srli_si128(xmm0, 8); - + // Zero extend each byte in the shifted xmm0 to word in xmm0 xmm0 = _mm_cvtepu8_epi16(xmm0); @@ -22593,10 +24369,10 @@ std::pair avx2_convert_latin1_to_utf16(const char* latin xmm0 = _mm_shuffle_epi8(xmm0, swap); xmm1 = _mm_shuffle_epi8(xmm1, swap); } - + // Store the contents of xmm1 into the address pointed by (output + i) _mm_storeu_si128(reinterpret_cast<__m128i*>(utf16_output + i), xmm1); - + // Store the contents of xmm0 into the address pointed by (output + i + 8) _mm_storeu_si128(reinterpret_cast<__m128i*>(utf16_output + i + 8), xmm0); } @@ -22608,14 +24384,14 @@ std::pair avx2_convert_latin1_to_utf16(const char* latin /* begin file src/haswell/avx2_convert_latin1_to_utf32.cpp */ std::pair avx2_convert_latin1_to_utf32(const char* buf, size_t len, char32_t* utf32_output) { size_t rounded_len = ((len | 7) ^ 7); // Round down to nearest multiple of 8 - - for (size_t i = 0; i < rounded_len; i += 8) { + + for (size_t i = 0; i < rounded_len; i += 8) { // Load 8 Latin1 characters into a 64-bit register __m128i in = _mm_loadl_epi64((__m128i*)&buf[i]); - + // Zero extend each set of 8 Latin1 characters to 8 32-bit integers using vpmovzxbd __m256i out = _mm256_cvtepu8_epi32(in); - + // Store the results back to memory _mm256_storeu_si256((__m256i*)&utf32_output[i], out); } @@ -24520,6 +26296,503 @@ size_t convert_masked_utf8_to_latin1(const char *input, } /* end file src/haswell/avx2_convert_utf8_to_latin1.cpp */ +/* begin file src/haswell/avx2_base64.cpp */ +/** + * References and further reading: + * + * Wojciech Muła, Daniel Lemire, Base64 encoding and decoding at almost the + * speed of a memory copy, Software: Practice and Experience 50 (2), 2020. + * https://arxiv.org/abs/1910.05109 + * + * Wojciech Muła, Daniel Lemire, Faster Base64 Encoding and Decoding using AVX2 + * Instructions, ACM Transactions on the Web 12 (3), 2018. + * https://arxiv.org/abs/1704.00605 + * + * Simon Josefsson. 2006. The Base16, Base32, and Base64 Data Encodings. + * https://tools.ietf.org/html/rfc4648. (2006). Internet Engineering Task Force, + * Request for Comments: 4648. + * + * Alfred Klomp. 2014a. Fast Base64 encoding/decoding with SSE vectorization. + * http://www.alfredklomp.com/programming/sse-base64/. (2014). + * + * Alfred Klomp. 2014b. Fast Base64 stream encoder/decoder in C99, with SIMD + * acceleration. https://github.com/aklomp/base64. (2014). + * + * Hanson Char. 2014. A Fast and Correct Base 64 Codec. (2014). + * https://aws.amazon.com/blogs/developer/a-fast-and-correct-base-64-codec/ + * + * Nick Kopp. 2013. Base64 Encoding on a GPU. + * https://www.codeproject.com/Articles/276993/Base-Encoding-on-a-GPU. (2013). + */ + +__m256i lookup_pshufb_improved(const __m256i input) { + // credit: Wojciech Muła + __m256i result = _mm256_subs_epu8(input, _mm256_set1_epi8(51)); + const __m256i less = _mm256_cmpgt_epi8(_mm256_set1_epi8(26), input); + result = + _mm256_or_si256(result, _mm256_and_si256(less, _mm256_set1_epi8(13))); + const __m256i shift_LUT = _mm256_setr_epi8( + 'a' - 26, '0' - 52, '0' - 52, '0' - 52, '0' - 52, '0' - 52, '0' - 52, + '0' - 52, '0' - 52, '0' - 52, '0' - 52, '+' - 62, '/' - 63, 'A', 0, 0, + + 'a' - 26, '0' - 52, '0' - 52, '0' - 52, '0' - 52, '0' - 52, '0' - 52, + '0' - 52, '0' - 52, '0' - 52, '0' - 52, '+' - 62, '/' - 63, 'A', 0, 0); + + result = _mm256_shuffle_epi8(shift_LUT, result); + return _mm256_add_epi8(result, input); +} + +size_t encode_base64(char *dst, const char *src, size_t srclen) { + // credit: Wojciech Muła + const uint8_t *input = (const uint8_t *)src; + + uint8_t *out = (uint8_t *)dst; + const __m256i shuf = + _mm256_set_epi8(10, 11, 9, 10, 7, 8, 6, 7, 4, 5, 3, 4, 1, 2, 0, 1, + + 10, 11, 9, 10, 7, 8, 6, 7, 4, 5, 3, 4, 1, 2, 0, 1); + size_t i = 0; + for (; i + 100 <= srclen; i += 96) { + const __m128i lo0 = _mm_loadu_si128( + reinterpret_cast(input + i + 4 * 3 * 0)); + const __m128i hi0 = _mm_loadu_si128( + reinterpret_cast(input + i + 4 * 3 * 1)); + const __m128i lo1 = _mm_loadu_si128( + reinterpret_cast(input + i + 4 * 3 * 2)); + const __m128i hi1 = _mm_loadu_si128( + reinterpret_cast(input + i + 4 * 3 * 3)); + const __m128i lo2 = _mm_loadu_si128( + reinterpret_cast(input + i + 4 * 3 * 4)); + const __m128i hi2 = _mm_loadu_si128( + reinterpret_cast(input + i + 4 * 3 * 5)); + const __m128i lo3 = _mm_loadu_si128( + reinterpret_cast(input + i + 4 * 3 * 6)); + const __m128i hi3 = _mm_loadu_si128( + reinterpret_cast(input + i + 4 * 3 * 7)); + + __m256i in0 = _mm256_shuffle_epi8(_mm256_set_m128i(hi0, lo0), shuf); + __m256i in1 = _mm256_shuffle_epi8(_mm256_set_m128i(hi1, lo1), shuf); + __m256i in2 = _mm256_shuffle_epi8(_mm256_set_m128i(hi2, lo2), shuf); + __m256i in3 = _mm256_shuffle_epi8(_mm256_set_m128i(hi3, lo3), shuf); + + const __m256i t0_0 = _mm256_and_si256(in0, _mm256_set1_epi32(0x0fc0fc00)); + const __m256i t0_1 = _mm256_and_si256(in1, _mm256_set1_epi32(0x0fc0fc00)); + const __m256i t0_2 = _mm256_and_si256(in2, _mm256_set1_epi32(0x0fc0fc00)); + const __m256i t0_3 = _mm256_and_si256(in3, _mm256_set1_epi32(0x0fc0fc00)); + + const __m256i t1_0 = + _mm256_mulhi_epu16(t0_0, _mm256_set1_epi32(0x04000040)); + const __m256i t1_1 = + _mm256_mulhi_epu16(t0_1, _mm256_set1_epi32(0x04000040)); + const __m256i t1_2 = + _mm256_mulhi_epu16(t0_2, _mm256_set1_epi32(0x04000040)); + const __m256i t1_3 = + _mm256_mulhi_epu16(t0_3, _mm256_set1_epi32(0x04000040)); + + const __m256i t2_0 = _mm256_and_si256(in0, _mm256_set1_epi32(0x003f03f0)); + const __m256i t2_1 = _mm256_and_si256(in1, _mm256_set1_epi32(0x003f03f0)); + const __m256i t2_2 = _mm256_and_si256(in2, _mm256_set1_epi32(0x003f03f0)); + const __m256i t2_3 = _mm256_and_si256(in3, _mm256_set1_epi32(0x003f03f0)); + + const __m256i t3_0 = + _mm256_mullo_epi16(t2_0, _mm256_set1_epi32(0x01000010)); + const __m256i t3_1 = + _mm256_mullo_epi16(t2_1, _mm256_set1_epi32(0x01000010)); + const __m256i t3_2 = + _mm256_mullo_epi16(t2_2, _mm256_set1_epi32(0x01000010)); + const __m256i t3_3 = + _mm256_mullo_epi16(t2_3, _mm256_set1_epi32(0x01000010)); + + const __m256i input0 = _mm256_or_si256(t1_0, t3_0); + const __m256i input1 = _mm256_or_si256(t1_1, t3_1); + const __m256i input2 = _mm256_or_si256(t1_2, t3_2); + const __m256i input3 = _mm256_or_si256(t1_3, t3_3); + + _mm256_storeu_si256(reinterpret_cast<__m256i *>(out), + lookup_pshufb_improved(input0)); + out += 32; + + _mm256_storeu_si256(reinterpret_cast<__m256i *>(out), + lookup_pshufb_improved(input1)); + out += 32; + + _mm256_storeu_si256(reinterpret_cast<__m256i *>(out), + lookup_pshufb_improved(input2)); + out += 32; + _mm256_storeu_si256(reinterpret_cast<__m256i *>(out), + lookup_pshufb_improved(input3)); + out += 32; + } + for (; i + 28 <= srclen; i += 24) { + // lo = [xxxx|DDDC|CCBB|BAAA] + // hi = [xxxx|HHHG|GGFF|FEEE] + const __m128i lo = + _mm_loadu_si128(reinterpret_cast(input + i)); + const __m128i hi = + _mm_loadu_si128(reinterpret_cast(input + i + 4 * 3)); + + // bytes from groups A, B and C are needed in separate 32-bit lanes + // in = [0HHH|0GGG|0FFF|0EEE[0DDD|0CCC|0BBB|0AAA] + __m256i in = _mm256_shuffle_epi8(_mm256_set_m128i(hi, lo), shuf); + + // this part is well commented in encode.sse.cpp + + const __m256i t0 = _mm256_and_si256(in, _mm256_set1_epi32(0x0fc0fc00)); + const __m256i t1 = _mm256_mulhi_epu16(t0, _mm256_set1_epi32(0x04000040)); + const __m256i t2 = _mm256_and_si256(in, _mm256_set1_epi32(0x003f03f0)); + const __m256i t3 = _mm256_mullo_epi16(t2, _mm256_set1_epi32(0x01000010)); + const __m256i indices = _mm256_or_si256(t1, t3); + + _mm256_storeu_si256(reinterpret_cast<__m256i *>(out), + lookup_pshufb_improved(indices)); + out += 32; + } + return i / 3 * 4 + + scalar::base64::tail_encode_base64((char *)out, src + i, srclen - i); +} + +static inline void compress(__m128i data, uint16_t mask, char *output) { + if (mask == 0) { + _mm_storeu_si128(reinterpret_cast<__m128i *>(output), data); + return; + } + // this particular implementation was inspired by work done by @animetosho + // we do it in two steps, first 8 bytes and then second 8 bytes + uint8_t mask1 = uint8_t(mask); // least significant 8 bits + uint8_t mask2 = uint8_t(mask >> 8); // most significant 8 bits + // next line just loads the 64-bit values thintable_epi8[mask1] and + // thintable_epi8[mask2] into a 128-bit register, using only + // two instructions on most compilers. + + __m128i shufmask = _mm_set_epi64x(tables::base64::thintable_epi8[mask2], + tables::base64::thintable_epi8[mask1]); + // we increment by 0x08 the second half of the mask + shufmask = + _mm_add_epi8(shufmask, _mm_set_epi32(0x08080808, 0x08080808, 0, 0)); + // this is the version "nearly pruned" + __m128i pruned = _mm_shuffle_epi8(data, shufmask); + // we still need to put the two halves together. + // we compute the popcount of the first half: + int pop1 = tables::base64::BitsSetTable256mul2[mask1]; + // then load the corresponding mask, what it does is to write + // only the first pop1 bytes from the first 8 bytes, and then + // it fills in with the bytes from the second 8 bytes + some filling + // at the end. + __m128i compactmask = _mm_loadu_si128(reinterpret_cast( + tables::base64::pshufb_combine_table + pop1 * 8)); + __m128i answer = _mm_shuffle_epi8(pruned, compactmask); + + _mm_storeu_si128(reinterpret_cast<__m128i *>(output), answer); +} + +static inline void compress(__m256i data, uint32_t mask, char *output) { + if (mask == 0) { + _mm256_storeu_si256(reinterpret_cast<__m256i *>(output), data); + return; + } + compress(_mm256_castsi256_si128(data), uint16_t(mask), output); + compress(_mm256_extracti128_si256(data, 1), uint16_t(mask >> 16), + output + _mm_popcnt_u32(~mask & 0xFFFF)); +} + +struct block64 { + __m256i chunks[2]; +}; + +static inline uint32_t to_base64_mask(__m256i *src, bool *error) { + const __m256i ascii_space_tbl = + _mm256_setr_epi8(0x20, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x9, 0xa, + 0x0, 0x0, 0xd, 0x0, 0x0, 0x20, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x9, 0xa, 0x0, 0x0, 0xd, 0x0, 0x0); + // credit: aqrit + const __m256i delta_asso = _mm256_setr_epi8( + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x0F, 0x00, 0x0F, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x0F, 0x00, 0x0F); + const __m256i delta_values = _mm256_setr_epi8( + int8_t(0x00), int8_t(0x00), int8_t(0x00), int8_t(0x13), int8_t(0x04), + int8_t(0xBF), int8_t(0xBF), int8_t(0xB9), int8_t(0xB9), int8_t(0x00), + int8_t(0x10), int8_t(0xC3), int8_t(0xBF), int8_t(0xBF), int8_t(0xB9), + int8_t(0xB9), int8_t(0x00), int8_t(0x00), int8_t(0x00), int8_t(0x13), + int8_t(0x04), int8_t(0xBF), int8_t(0xBF), int8_t(0xB9), int8_t(0xB9), + int8_t(0x00), int8_t(0x10), int8_t(0xC3), int8_t(0xBF), int8_t(0xBF), + int8_t(0xB9), int8_t(0xB9)); + const __m256i check_asso = _mm256_setr_epi8( + 0x0D, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x03, 0x07, + 0x0B, 0x0B, 0x0B, 0x0F, 0x0D, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x03, 0x07, 0x0B, 0x0B, 0x0B, 0x0F); + const __m256i check_values = _mm256_setr_epi8( + int8_t(0x80), int8_t(0x80), int8_t(0x80), int8_t(0x80), int8_t(0xCF), + int8_t(0xBF), int8_t(0xD5), int8_t(0xA6), int8_t(0xB5), int8_t(0x86), + int8_t(0xD1), int8_t(0x80), int8_t(0xB1), int8_t(0x80), int8_t(0x91), + int8_t(0x80), int8_t(0x80), int8_t(0x80), int8_t(0x80), int8_t(0x80), + int8_t(0xCF), int8_t(0xBF), int8_t(0xD5), int8_t(0xA6), int8_t(0xB5), + int8_t(0x86), int8_t(0xD1), int8_t(0x80), int8_t(0xB1), int8_t(0x80), + int8_t(0x91), int8_t(0x80)); + const __m256i shifted = _mm256_srli_epi32(*src, 3); + + const __m256i delta_hash = + _mm256_avg_epu8(_mm256_shuffle_epi8(delta_asso, *src), shifted); + const __m256i check_hash = + _mm256_avg_epu8(_mm256_shuffle_epi8(check_asso, *src), shifted); + + const __m256i out = + _mm256_adds_epi8(_mm256_shuffle_epi8(delta_values, delta_hash), *src); + const __m256i chk = + _mm256_adds_epi8(_mm256_shuffle_epi8(check_values, check_hash), *src); + const int mask = _mm256_movemask_epi8(chk); + if (mask) { + __m256i ascii_space = + _mm256_cmpeq_epi8(_mm256_shuffle_epi8(ascii_space_tbl, *src), *src); + *error |= (mask != _mm256_movemask_epi8(ascii_space)); + } + *src = out; + return (uint32_t)mask; +} +static inline uint64_t to_base64_mask(block64 *b, bool *error) { + *error = 0; + uint64_t m0 = to_base64_mask(&b->chunks[0], error); + uint64_t m1 = to_base64_mask(&b->chunks[1], error); + return m0 | (m1 << 32); +} + +static inline void copy_block(block64 *b, char *output) { + _mm256_storeu_si256(reinterpret_cast<__m256i *>(output), b->chunks[0]); + _mm256_storeu_si256(reinterpret_cast<__m256i *>(output + 32), b->chunks[1]); +} + +static inline uint64_t compress_block(block64 *b, uint64_t mask, char *output) { + uint64_t nmask = ~mask; + compress(b->chunks[0], uint32_t(mask), output); + compress(b->chunks[1], uint32_t(mask >> 32), + output + _mm_popcnt_u64(nmask & 0xFFFFFFFF)); + return _mm_popcnt_u64(nmask); +} + +static inline void load_block(block64 *b, const char *src) { + b->chunks[0] = _mm256_loadu_si256(reinterpret_cast(src)); + b->chunks[1] = + _mm256_loadu_si256(reinterpret_cast(src + 32)); +} + +static inline void base64_decode(char *out, __m256i str) { + // credit: aqrit + const __m256i pack_shuffle = + _mm256_setr_epi8(2, 1, 0, 6, 5, 4, 10, 9, 8, 14, 13, 12, -1, -1, -1, -1, + 2, 1, 0, 6, 5, 4, 10, 9, 8, 14, 13, 12, -1, -1, -1, -1); + const __m256i t0 = _mm256_maddubs_epi16(str, _mm256_set1_epi32(0x01400140)); + const __m256i t1 = _mm256_madd_epi16(t0, _mm256_set1_epi32(0x00011000)); + const __m256i t2 = _mm256_shuffle_epi8(t1, pack_shuffle); + + // Store the output: + _mm_storeu_si128((__m128i *)out, _mm256_castsi256_si128(t2)); + _mm_storeu_si128((__m128i *)(out + 12), _mm256_extracti128_si256(t2, 1)); +} +// decode 64 bytes and output 48 bytes +static inline void base64_decode_block(char *out, const char *src) { + base64_decode(out, + _mm256_loadu_si256(reinterpret_cast(src))); + base64_decode(out + 24, _mm256_loadu_si256( + reinterpret_cast(src + 32))); +} +static inline void base64_decode_block_safe(char *out, const char *src) { + base64_decode(out, + _mm256_loadu_si256(reinterpret_cast(src))); + char buffer[32]; // We enforce safety with a buffer. + base64_decode( + buffer, _mm256_loadu_si256(reinterpret_cast(src + 32))); + std::memcpy(out + 24, buffer, 24); +} +static inline void base64_decode_block(char *out, block64 *b) { + base64_decode(out, b->chunks[0]); + base64_decode(out + 24, b->chunks[1]); +} +static inline void base64_decode_block_safe(char *out, block64 *b) { + base64_decode(out, b->chunks[0]); + char buffer[32]; // We enforce safety with a buffer. + base64_decode(buffer, b->chunks[1]); + std::memcpy(out + 24, buffer, 24); +} + +result compress_decode_base64(char *dst, const char *src, size_t srclen) { + size_t equalsigns = 0; + if (srclen > 0 && src[srclen - 1] == '=') { + srclen--; + equalsigns = 1; + if (srclen > 0 && src[srclen - 1] == '=') { + srclen--; + equalsigns = 2; + } + } + char *end_of_safe_64byte_zone = + (srclen + 3) / 4 * 3 >= 63 ? dst + (srclen + 3) / 4 * 3 - 63 : dst; + + const char *const srcinit = src; + const char *const dstinit = dst; + const char *const srcend = src + srclen; + + constexpr size_t block_size = 6; + static_assert(block_size >= 2, "block_size must be at least two"); + char buffer[block_size * 64]; + char *bufferptr = buffer; + if (srclen >= 64) { + const char *const srcend64 = src + srclen - 64; + while (src <= srcend64) { + block64 b; + load_block(&b, src); + src += 64; + bool error = false; + uint64_t badcharmask = to_base64_mask(&b, &error); + if (error) { + src -= 64; + while (src < srcend && + tables::base64::to_base64_value[uint8_t(*src)] <= 64) { + src++; + } + return {error_code::INVALID_BASE64_CHARACTER, size_t(src - srcinit)}; + } + if (badcharmask != 0) { + // optimization opportunity: check for simple masks like those made of + // continuous 1s followed by continuous 0s. And masks containing a + // single bad character. + bufferptr += compress_block(&b, badcharmask, bufferptr); + } else if (bufferptr != buffer) { + copy_block(&b, bufferptr); + bufferptr += 64; + } else { + if (dst >= end_of_safe_64byte_zone) { + base64_decode_block_safe(dst, &b); + } else { + base64_decode_block(dst, &b); + } + dst += 48; + } + if (bufferptr >= (block_size - 1) * 64 + buffer) { + for (size_t i = 0; i < (block_size - 2); i++) { + base64_decode_block(dst, buffer + i * 64); + dst += 48; + } + if (dst >= end_of_safe_64byte_zone) { + base64_decode_block_safe(dst, buffer + (block_size - 2) * 64); + } else { + base64_decode_block(dst, buffer + (block_size - 2) * 64); + } + dst += 48; + std::memcpy(buffer, buffer + (block_size - 1) * 64, + 64); // 64 might be too much + bufferptr -= (block_size - 1) * 64; + } + } + } + + char *buffer_start = buffer; + // Optimization note: if this is almost full, then it is worth our + // time, otherwise, we should just decode directly. + int last_block = (int)((bufferptr - buffer_start) % 64); + if (last_block != 0 && srcend - src + last_block >= 64) { + + while ((bufferptr - buffer_start) % 64 != 0 && src < srcend) { + uint8_t val = tables::base64::to_base64_value[uint8_t(*src)]; + *bufferptr = char(val); + if (val > 64) { + return {error_code::INVALID_BASE64_CHARACTER, size_t(src - srcinit)}; + } + bufferptr += (val <= 63); + src++; + } + } + + for (; buffer_start + 64 <= bufferptr; buffer_start += 64) { + if (dst >= end_of_safe_64byte_zone) { + base64_decode_block_safe(dst, buffer_start); + } else { + base64_decode_block(dst, buffer_start); + } + dst += 48; + } + if ((bufferptr - buffer_start) % 64 != 0) { + while (buffer_start + 4 < bufferptr) { + uint32_t triple = ((uint32_t(uint8_t(buffer_start[0])) << 3 * 6) + + (uint32_t(uint8_t(buffer_start[1])) << 2 * 6) + + (uint32_t(uint8_t(buffer_start[2])) << 1 * 6) + + (uint32_t(uint8_t(buffer_start[3])) << 0 * 6)) + << 8; + triple = scalar::utf32::swap_bytes(triple); + std::memcpy(dst, &triple, 4); + + dst += 3; + buffer_start += 4; + } + if (buffer_start + 4 <= bufferptr) { + uint32_t triple = ((uint32_t(uint8_t(buffer_start[0])) << 3 * 6) + + (uint32_t(uint8_t(buffer_start[1])) << 2 * 6) + + (uint32_t(uint8_t(buffer_start[2])) << 1 * 6) + + (uint32_t(uint8_t(buffer_start[3])) << 0 * 6)) + << 8; + triple = scalar::utf32::swap_bytes(triple); + std::memcpy(dst, &triple, 3); + + dst += 3; + buffer_start += 4; + } + // we may have 1, 2 or 3 bytes left and we need to decode them so let us + // bring in src content + int leftover = int(bufferptr - buffer_start); + if (leftover > 0) { + while (leftover < 4 && src < srcend) { + uint8_t val = tables::base64::to_base64_value[uint8_t(*src)]; + if (val > 64) { + return {error_code::INVALID_BASE64_CHARACTER, size_t(src - srcinit)}; + } + buffer_start[leftover] = char(val); + leftover += (val <= 63); + src++; + } + + if (leftover == 1) { + return {BASE64_INPUT_REMAINDER, size_t(dst - dstinit)}; + } + if (leftover == 2) { + uint32_t triple = (uint32_t(buffer_start[0]) << 3 * 6) + + (uint32_t(buffer_start[1]) << 2 * 6); + triple = scalar::utf32::swap_bytes(triple); + triple >>= 8; + std::memcpy(dst, &triple, 1); + dst += 1; + } else if (leftover == 3) { + uint32_t triple = (uint32_t(buffer_start[0]) << 3 * 6) + + (uint32_t(buffer_start[1]) << 2 * 6) + + (uint32_t(buffer_start[2]) << 1 * 6); + triple = scalar::utf32::swap_bytes(triple); + triple >>= 8; + std::memcpy(dst, &triple, 2); + dst += 2; + } else { + uint32_t triple = ((uint32_t(uint8_t(buffer_start[0])) << 3 * 6) + + (uint32_t(uint8_t(buffer_start[1])) << 2 * 6) + + (uint32_t(uint8_t(buffer_start[2])) << 1 * 6) + + (uint32_t(uint8_t(buffer_start[3])) << 0 * 6)) + << 8; + triple = scalar::utf32::swap_bytes(triple); + std::memcpy(dst, &triple, 3); + dst += 3; + } + } + } + if (src < srcend + equalsigns) { + result r = scalar::base64::base64_tail_decode(dst, src, srcend - src); + if (r.error == error_code::INVALID_BASE64_CHARACTER) { + r.count += size_t(src - srcinit); + return r; + } else { + r.count += size_t(dst - dstinit); + } + return r; + } + return {SUCCESS, size_t(dst - dstinit)}; +} +/* end file src/haswell/avx2_base64.cpp */ + } // unnamed namespace } // namespace haswell } // namespace simdutf @@ -26835,6 +29108,21 @@ simdutf_warn_unused size_t implementation::utf32_length_from_utf8(const char * i return utf8::count_code_points(input, length); } +simdutf_warn_unused size_t implementation::maximal_binary_length_from_base64(const char * input, size_t length) const noexcept { + return scalar::base64::maximal_binary_length_from_base64(input, length); +} + +simdutf_warn_unused result implementation::base64_to_binary(const char * input, size_t length, char* output) const noexcept { + return compress_decode_base64(output, input, length); +} + +simdutf_warn_unused size_t implementation::base64_length_from_binary(size_t length) const noexcept { + return scalar::base64::base64_length_from_binary(length); +} + +size_t implementation::binary_to_base64(const char * input, size_t length, char* output) const noexcept { + return encode_base64(output, input, length); +} } // namespace haswell } // namespace simdutf @@ -28366,6 +30654,22 @@ simdutf_warn_unused size_t implementation::utf32_length_from_utf8(const char * i return scalar::utf8::count_code_points(input, length); } + +simdutf_warn_unused size_t implementation::maximal_binary_length_from_base64(const char * input, size_t length) const noexcept { + return scalar::base64::maximal_binary_length_from_base64(input, length); +} + +simdutf_warn_unused result implementation::base64_to_binary(const char * input, size_t length, char* output) const noexcept { + return scalar::base64::base64_to_binary(input, length, output); +} + +simdutf_warn_unused size_t implementation::base64_length_from_binary(size_t length) const noexcept { + return scalar::base64::base64_length_from_binary(length); +} + +size_t implementation::binary_to_base64(const char * input, size_t length, char* output) const noexcept { + return scalar::base64::binary_to_base64(input, length, output); +} } // namespace ppc64 } // namespace simdutf @@ -28373,6 +30677,1334 @@ simdutf_warn_unused size_t implementation::utf32_length_from_utf8(const char * i /* end file src/simdutf/ppc64/end.h */ /* end file src/ppc64/implementation.cpp */ #endif +#if SIMDUTF_IMPLEMENTATION_RVV +/* begin file src/rvv/implementation.cpp */ + + + + + +/* begin file src/simdutf/rvv/begin.h */ +// redefining SIMDUTF_IMPLEMENTATION to "rvv" +// #define SIMDUTF_IMPLEMENTATION rvv + +#if SIMDUTF_CAN_ALWAYS_RUN_RVV +// nothing needed. +#else +SIMDUTF_TARGET_RVV +#endif +/* end file src/simdutf/rvv/begin.h */ +namespace simdutf { +namespace rvv { +namespace { +#ifndef SIMDUTF_RVV_H +#error "rvv.h must be included" +#endif + +} // unnamed namespace +} // namespace rvv +} // namespace simdutf + +// +// Implementation-specific overrides +// +namespace simdutf { +namespace rvv { + +/* begin file src/rvv/rvv_length_from.inl.cpp */ + +simdutf_warn_unused size_t implementation::count_utf16le(const char16_t *src, size_t len) const noexcept { + return utf32_length_from_utf16le(src, len); +} + +simdutf_warn_unused size_t implementation::count_utf16be(const char16_t *src, size_t len) const noexcept { + return utf32_length_from_utf16be(src, len); +} + +simdutf_warn_unused size_t implementation::count_utf8(const char *src, size_t len) const noexcept { + return utf32_length_from_utf8(src, len); +} + +simdutf_warn_unused size_t implementation::latin1_length_from_utf8(const char *src, size_t len) const noexcept { + return utf32_length_from_utf8(src, len); +} + +simdutf_warn_unused size_t implementation::latin1_length_from_utf16(size_t len) const noexcept { + return len; +} + +simdutf_warn_unused size_t implementation::latin1_length_from_utf32(size_t len) const noexcept { + return len; +} + +simdutf_warn_unused size_t implementation::utf16_length_from_latin1(size_t len) const noexcept { + return len; +} + +simdutf_warn_unused size_t implementation::utf32_length_from_latin1(size_t len) const noexcept { + return len; +} + +simdutf_warn_unused size_t implementation::utf32_length_from_utf8(const char *src, size_t len) const noexcept { + size_t count = 0; + for (size_t vl; len > 0; len -= vl, src += vl) { + vl = __riscv_vsetvl_e8m8(len); + vint8m8_t v = __riscv_vle8_v_i8m8((int8_t*)src, vl); + vbool1_t mask = __riscv_vmsgt_vx_i8m8_b1(v, -65, vl); + count += __riscv_vcpop_m_b1(mask, vl); + } + return count; +} + +template +simdutf_really_inline static size_t rvv_utf32_length_from_utf16(const char16_t *src, size_t len) { + size_t count = 0; + for (size_t vl; len > 0; len -= vl, src += vl) { + vl = __riscv_vsetvl_e16m8(len); + vuint16m8_t v = __riscv_vle16_v_u16m8((uint16_t*)src, vl); + v = simdutf_byteflip(v, vl); + vbool2_t notHigh = __riscv_vmor_mm_b2( + __riscv_vmsgtu_vx_u16m8_b2(v, 0xDFFF, vl), + __riscv_vmsltu_vx_u16m8_b2(v, 0xDC00, vl), vl); + count += __riscv_vcpop_m_b2(notHigh, vl); + } + return count; +} + +simdutf_warn_unused size_t implementation::utf32_length_from_utf16le(const char16_t *src, size_t len) const noexcept { + return rvv_utf32_length_from_utf16(src, len); +} + +simdutf_warn_unused size_t implementation::utf32_length_from_utf16be(const char16_t *src, size_t len) const noexcept { + if (supports_zvbb()) + return rvv_utf32_length_from_utf16(src, len); + else + return rvv_utf32_length_from_utf16(src, len); +} + +simdutf_warn_unused size_t implementation::utf8_length_from_latin1(const char *src, size_t len) const noexcept { + size_t count = len; + for (size_t vl; len > 0; len -= vl, src += vl) { + vl = __riscv_vsetvl_e8m8(len); + vint8m8_t v = __riscv_vle8_v_i8m8((int8_t*)src, vl); + count += __riscv_vcpop_m_b1(__riscv_vmslt_vx_i8m8_b1(v, 0, vl), vl); + } + return count; +} + +template +simdutf_really_inline static size_t rvv_utf8_length_from_utf16(const char16_t *src, size_t len) { + size_t count = 0; + for (size_t vl; len > 0; len -= vl, src += vl) { + vl = __riscv_vsetvl_e16m8(len); + vuint16m8_t v = __riscv_vle16_v_u16m8((uint16_t*)src, vl); + v = simdutf_byteflip(v, vl); + vbool2_t m234 = __riscv_vmsgtu_vx_u16m8_b2(v, 0x7F, vl); + vbool2_t m34 = __riscv_vmsgtu_vx_u16m8_b2(v, 0x7FF, vl); + vbool2_t notSur = __riscv_vmor_mm_b2( + __riscv_vmsltu_vx_u16m8_b2(v, 0xD800, vl), + __riscv_vmsgtu_vx_u16m8_b2(v, 0xDFFF, vl), vl); + vbool2_t m3 = __riscv_vmand_mm_b2(m34, notSur, vl); + count += vl + __riscv_vcpop_m_b2(m234, vl) + __riscv_vcpop_m_b2(m3, vl); + } + return count; +} + +simdutf_warn_unused size_t implementation::utf8_length_from_utf16le(const char16_t *src, size_t len) const noexcept { + return rvv_utf8_length_from_utf16(src, len); +} + +simdutf_warn_unused size_t implementation::utf8_length_from_utf16be(const char16_t *src, size_t len) const noexcept { + if (supports_zvbb()) + return rvv_utf8_length_from_utf16(src, len); + else + return rvv_utf8_length_from_utf16(src, len); +} + +simdutf_warn_unused size_t implementation::utf8_length_from_utf32(const char32_t *src, size_t len) const noexcept { + size_t count = 0; + for (size_t vl; len > 0; len -= vl, src += vl) { + vl = __riscv_vsetvl_e32m8(len); + vuint32m8_t v = __riscv_vle32_v_u32m8((uint32_t*)src, vl); + vbool4_t m234 = __riscv_vmsgtu_vx_u32m8_b4(v, 0x7F, vl); + vbool4_t m34 = __riscv_vmsgtu_vx_u32m8_b4(v, 0x7FF, vl); + vbool4_t m4 = __riscv_vmsgtu_vx_u32m8_b4(v, 0xFFFF, vl); + count += vl + __riscv_vcpop_m_b4(m234, vl) + __riscv_vcpop_m_b4(m34, vl) + __riscv_vcpop_m_b4(m4, vl); + } + return count; +} + +simdutf_warn_unused size_t implementation::utf16_length_from_utf8(const char *src, size_t len) const noexcept { + size_t count = 0; + for (size_t vl; len > 0; len -= vl, src += vl) { + vl = __riscv_vsetvl_e8m8(len); + vint8m8_t v = __riscv_vle8_v_i8m8((int8_t*)src, vl); + vbool1_t m1234 = __riscv_vmsgt_vx_i8m8_b1(v, -65, vl); + vbool1_t m4 = __riscv_vmsgtu_vx_u8m8_b1( + __riscv_vreinterpret_u8m8(v), (uint8_t)0b11101111, vl); + count += __riscv_vcpop_m_b1(m1234, vl) + __riscv_vcpop_m_b1(m4, vl); + } + return count; +} + +simdutf_warn_unused size_t implementation::utf16_length_from_utf32(const char32_t *src, size_t len) const noexcept { + size_t count = 0; + for (size_t vl; len > 0; len -= vl, src += vl) { + vl = __riscv_vsetvl_e32m8(len); + vuint32m8_t v = __riscv_vle32_v_u32m8((uint32_t*)src, vl); + vbool4_t m4 = __riscv_vmsgtu_vx_u32m8_b4(v, 0xFFFF, vl); + count += vl + __riscv_vcpop_m_b4(m4, vl); + } + return count; +} + +/* end file src/rvv/rvv_length_from.inl.cpp */ +/* begin file src/rvv/rvv_validate.inl.cpp */ + + +simdutf_warn_unused bool implementation::validate_ascii(const char *src, size_t len) const noexcept { + size_t vlmax = __riscv_vsetvlmax_e8m8(); + vint8m8_t mask = __riscv_vmv_v_x_i8m8(0, vlmax); + for (size_t vl; len > 0; len -= vl, src += vl) { + vl = __riscv_vsetvl_e8m8(len); + vint8m8_t v = __riscv_vle8_v_i8m8((int8_t*)src, vl); + mask = __riscv_vor_vv_i8m8_tu(mask, mask, v, vl); + } + return __riscv_vfirst_m_b1(__riscv_vmslt_vx_i8m8_b1(mask, 0, vlmax), vlmax) < 0; +} + +simdutf_warn_unused result implementation::validate_ascii_with_errors(const char *src, size_t len) const noexcept { + const char *beg = src; + for (size_t vl; len > 0; len -= vl, src += vl) { + vl = __riscv_vsetvl_e8m8(len); + vint8m8_t v = __riscv_vle8_v_i8m8((int8_t*)src, vl); + long idx = __riscv_vfirst_m_b1(__riscv_vmslt_vx_i8m8_b1(v, 0, vl), vl); + if (idx >= 0) return result(error_code::TOO_LARGE, src - beg + idx); + } + return result(error_code::SUCCESS, src - beg); +} + +/* Returns a close estimation of the number of valid UTF-8 bytes up to the + * first invalid one, but never overestimating. */ +simdutf_really_inline static size_t rvv_count_valid_utf8(const char *src, size_t len) { + const char *beg = src; + size_t tail = 32; // minimum of 3 + if (len < tail) return 0; + + /* validate first three bytes */ + { + size_t idx = tail; + while (idx < len && (src[idx] >> 6) == 0b10) + ++idx; + if (idx > tail + 3 || !scalar::utf8::validate(src, idx)) + return 0; + } + + static const uint64_t err1m[] = { 0x0202020202020202, 0x4915012180808080 }; + static const uint64_t err2m[] = { 0xCBCBCB8B8383A3E7, 0xCBCBDBCBCBCBCBCB }; + static const uint64_t err3m[] = { 0x0101010101010101, 0X01010101BABAAEE6 }; + + const vuint8m1_t err1tbl = __riscv_vreinterpret_v_u64m1_u8m1(__riscv_vle64_v_u64m1(err1m, 2)); + const vuint8m1_t err2tbl = __riscv_vreinterpret_v_u64m1_u8m1(__riscv_vle64_v_u64m1(err2m, 2)); + const vuint8m1_t err3tbl = __riscv_vreinterpret_v_u64m1_u8m1(__riscv_vle64_v_u64m1(err3m, 2)); + + size_t n = len - tail; + + for (size_t vl; n > 0; n -= vl, src += vl) { + vl = __riscv_vsetvl_e8m4(n); + vuint8m4_t v0 = __riscv_vle8_v_u8m4((uint8_t const*)src, vl); + + /* fast path: ASCII */ + if (__riscv_vfirst(__riscv_vmsgtu(v0, 0b01111111, vl), vl) < 0) + continue; + + /* see "Validating UTF-8 In Less Than One Instruction Per Byte" + * https://arxiv.org/abs/2010.03090 */ + vuint8m4_t v1 = __riscv_vslide1down_vx_u8m4(v0, src[vl+0], vl); + vuint8m4_t v2 = __riscv_vslide1down_vx_u8m4(v1, src[vl+1], vl); + vuint8m4_t v3 = __riscv_vslide1down_vx_u8m4(v2, src[vl+2], vl); + + vuint8m4_t s1 = __riscv_vreinterpret_v_u16m4_u8m4(__riscv_vsrl_vx_u16m4(__riscv_vreinterpret_v_u8m4_u16m4(v2), 4, __riscv_vsetvlmax_e16m4())); + vuint8m4_t s3 = __riscv_vreinterpret_v_u16m4_u8m4(__riscv_vsrl_vx_u16m4(__riscv_vreinterpret_v_u8m4_u16m4(v3), 4, __riscv_vsetvlmax_e16m4())); + + vuint8m4_t idx2 = __riscv_vand_vx_u8m4(v2, 0xF, vl); + vuint8m4_t idx1 = __riscv_vand_vx_u8m4(s1, 0xF, vl); + vuint8m4_t idx3 = __riscv_vand_vx_u8m4(s3, 0xF, vl); + + vuint8m4_t err1 = simdutf_vrgather_u8m1x4(err1tbl, idx1); + vuint8m4_t err2 = simdutf_vrgather_u8m1x4(err2tbl, idx2); + vuint8m4_t err3 = simdutf_vrgather_u8m1x4(err3tbl, idx3); + vint8m4_t errs = __riscv_vreinterpret_v_u8m4_i8m4(__riscv_vand_vv_u8m4(__riscv_vand_vv_u8m4(err1, err2, vl), err3, vl)); + + vbool2_t is_3 = __riscv_vmsgtu_vx_u8m4_b2(v1, 0b11100000-1, vl); + vbool2_t is_4 = __riscv_vmsgtu_vx_u8m4_b2(v0, 0b11110000-1, vl); + vbool2_t is_34 = __riscv_vmor_mm_b2(is_3, is_4, vl); + vbool2_t err34 = __riscv_vmxor_mm_b2(is_34, __riscv_vmslt_vx_i8m4_b2(errs, 0, vl), vl); + vbool2_t errm = __riscv_vmor_mm_b2(__riscv_vmsgt_vx_i8m4_b2(errs, 0, vl), err34, vl); + if (__riscv_vfirst_m_b2(errm , vl) >= 0) + break; + } + + /* we need to validate the last character */ + while (tail < len && (src[0] >> 6) == 0b10) --src, ++tail; + return src - beg; +} + +simdutf_warn_unused bool implementation::validate_utf8(const char *src, size_t len) const noexcept { + size_t count = rvv_count_valid_utf8(src, len); + return scalar::utf8::validate(src + count, len - count); +} + +simdutf_warn_unused result implementation::validate_utf8_with_errors(const char *src, size_t len) const noexcept { + size_t count = rvv_count_valid_utf8(src, len); + result res = scalar::utf8::validate_with_errors(src + count, len - count); + return result(res.error, count + res.count); +} + +simdutf_warn_unused bool implementation::validate_utf16le(const char16_t *src, size_t len) const noexcept { + return validate_utf16le_with_errors(src, len).error == error_code::SUCCESS; +} + +simdutf_warn_unused bool implementation::validate_utf16be(const char16_t *src, size_t len) const noexcept { + return validate_utf16be_with_errors(src, len).error == error_code::SUCCESS; +} + +template +simdutf_really_inline static result rvv_validate_utf16_with_errors(const char16_t *src, size_t len) { + const char16_t *beg = src; + uint16_t last = 0; + for (size_t vl; len > 0; len -= vl, src += vl, last = simdutf_byteflip(src[-1])) { + vl = __riscv_vsetvl_e16m8(len); + vuint16m8_t v1 = __riscv_vle16_v_u16m8((const uint16_t*)src, vl); + v1 = simdutf_byteflip(v1, vl); + vuint16m8_t v0 = __riscv_vslide1up_vx_u16m8(v1, last, vl); + + vbool2_t surhi = __riscv_vmseq_vx_u16m8_b2(__riscv_vand_vx_u16m8(v0, 0xFC00, vl), 0xD800, vl); + vbool2_t surlo = __riscv_vmseq_vx_u16m8_b2(__riscv_vand_vx_u16m8(v1, 0xFC00, vl), 0xDC00, vl); + + long idx = __riscv_vfirst_m_b2(__riscv_vmxor_mm_b2(surhi, surlo, vl), vl); + if (idx >= 0) { + last = idx > 0 ? simdutf_byteflip(src[idx-1]) : last; + return result(error_code::SURROGATE, src - beg + idx - (last - 0xD800u < 0x400u)); + break; + } + } + if (last - 0xD800u < 0x400u) + return result(error_code::SURROGATE, src - beg - 1); /* end on high surrogate */ + else + return result(error_code::SUCCESS, src - beg); +} + +simdutf_warn_unused result implementation::validate_utf16le_with_errors(const char16_t *src, size_t len) const noexcept { + return rvv_validate_utf16_with_errors(src, len); +} + +simdutf_warn_unused result implementation::validate_utf16be_with_errors(const char16_t *src, size_t len) const noexcept { + if (supports_zvbb()) + return rvv_validate_utf16_with_errors(src, len); + else + return rvv_validate_utf16_with_errors(src, len); +} + +simdutf_warn_unused bool implementation::validate_utf32(const char32_t *src, size_t len) const noexcept { + size_t vlmax = __riscv_vsetvlmax_e32m8(); + vuint32m8_t max = __riscv_vmv_v_x_u32m8(0x10FFFF, vlmax); + vuint32m8_t maxOff = __riscv_vmv_v_x_u32m8(0xFFFFF7FF, vlmax); + for (size_t vl; len > 0; len -= vl, src += vl) { + vl = __riscv_vsetvl_e32m8(len); + vuint32m8_t v = __riscv_vle32_v_u32m8((uint32_t*)src, vl); + vuint32m8_t off = __riscv_vadd_vx_u32m8(v, 0xFFFF2000, vl); + max = __riscv_vmaxu_vv_u32m8_tu(max, max, v, vl); + maxOff = __riscv_vmaxu_vv_u32m8_tu(maxOff, maxOff, off, vl); + } + return __riscv_vfirst_m_b4(__riscv_vmor_mm_b4( + __riscv_vmsne_vx_u32m8_b4(max, 0x10FFFF, vlmax), + __riscv_vmsne_vx_u32m8_b4(maxOff, 0xFFFFF7FF, vlmax), vlmax), vlmax) < 0; +} + +simdutf_warn_unused result implementation::validate_utf32_with_errors(const char32_t *src, size_t len) const noexcept { + const char32_t *beg = src; + for (size_t vl; len > 0; len -= vl, src += vl) { + vl = __riscv_vsetvl_e32m8(len); + vuint32m8_t v = __riscv_vle32_v_u32m8((uint32_t*)src, vl); + vuint32m8_t off = __riscv_vadd_vx_u32m8(v, 0xFFFF2000, vl); + long idx; + idx = __riscv_vfirst_m_b4(__riscv_vmsgtu_vx_u32m8_b4(v, 0x10FFFF, vl), vl); + if (idx >= 0) return result(error_code::TOO_LARGE, src - beg + idx); + idx = __riscv_vfirst_m_b4(__riscv_vmsgtu_vx_u32m8_b4(off, 0xFFFFF7FF, vl), vl); + if (idx >= 0) return result(error_code::SURROGATE, src - beg + idx); + } + return result(error_code::SUCCESS, src - beg); +} + +/* end file src/rvv/rvv_validate.inl.cpp */ + +/* begin file src/rvv/rvv_latin1_to.inl.cpp */ + +simdutf_warn_unused size_t implementation::convert_latin1_to_utf8(const char *src, size_t len, char *dst) const noexcept { + char *beg = dst; + for (size_t vl, vlOut; len > 0; len -= vl, src += vl, dst += vlOut) { + vl = __riscv_vsetvl_e8m2(len); + vuint8m2_t v1 = __riscv_vle8_v_u8m2((uint8_t*)src, vl); + vbool4_t nascii = __riscv_vmslt_vx_i8m2_b4(__riscv_vreinterpret_v_u8m2_i8m2(v1), 0, vl); + size_t cnt = __riscv_vcpop_m_b4(nascii, vl); + vlOut = vl + cnt; + if (cnt == 0) { + __riscv_vse8_v_u8m2((uint8_t*)dst, v1, vlOut); + continue; + } + + vuint8m2_t v0 = __riscv_vor_vx_u8m2(__riscv_vsrl_vx_u8m2(v1, 6, vl), 0b11000000, vl); + v1 = __riscv_vand_vx_u8m2_mu(nascii, v1, v1, 0b10111111, vl); + + vuint8m4_t wide = __riscv_vreinterpret_v_u16m4_u8m4(__riscv_vwmaccu_vx_u16m4(__riscv_vwaddu_vv_u16m4(v0, v1, vl), 0xFF, v1, vl)); + vbool2_t mask = __riscv_vmsgtu_vx_u8m4_b2(__riscv_vsub_vx_u8m4(wide, 0b11000000, vl*2), 1, vl*2); + vuint8m4_t comp = __riscv_vcompress_vm_u8m4(wide, mask, vl*2); + + __riscv_vse8_v_u8m4((uint8_t*)dst, comp, vlOut); + } + return dst - beg; +} + +simdutf_warn_unused size_t implementation::convert_latin1_to_utf16le(const char *src, size_t len, char16_t *dst) const noexcept { + char16_t *beg = dst; + for (size_t vl; len > 0; len -= vl, src += vl, dst += vl) { + vl = __riscv_vsetvl_e8m4(len); + vuint8m4_t v = __riscv_vle8_v_u8m4((uint8_t*)src, vl); + __riscv_vse16_v_u16m8((uint16_t*)dst, __riscv_vzext_vf2_u16m8(v, vl), vl); + } + return dst - beg; +} + +simdutf_warn_unused size_t implementation::convert_latin1_to_utf16be(const char *src, size_t len, char16_t *dst) const noexcept { + char16_t *beg = dst; + for (size_t vl; len > 0; len -= vl, src += vl, dst += vl) { + vl = __riscv_vsetvl_e8m4(len); + vuint8m4_t v = __riscv_vle8_v_u8m4((uint8_t*)src, vl); + __riscv_vse16_v_u16m8((uint16_t*)dst, __riscv_vsll_vx_u16m8(__riscv_vzext_vf2_u16m8(v, vl), 8, vl), vl); + } + return dst - beg; +} + +simdutf_warn_unused size_t implementation::convert_latin1_to_utf32(const char *src, size_t len, char32_t *dst) const noexcept { + char32_t *beg = dst; + for (size_t vl; len > 0; len -= vl, src += vl, dst += vl) { + vl = __riscv_vsetvl_e8m2(len); + vuint8m2_t v = __riscv_vle8_v_u8m2((uint8_t*)src, vl); + __riscv_vse32_v_u32m8((uint32_t*)dst, __riscv_vzext_vf4_u32m8(v, vl), vl); + } + return dst - beg; +} + +/* end file src/rvv/rvv_latin1_to.inl.cpp */ +/* begin file src/rvv/rvv_utf8_to.inl.cpp */ +template +simdutf_really_inline static size_t rvv_utf32_store_utf16_m4(uint16_t *dst, vuint32m4_t utf32, size_t vl, vbool4_t m4even) { + /* convert [000000000000aaaa|aaaaaabbbbbbbbbb] + * to [110111bbbbbbbbbb|110110aaaaaaaaaa] */ + vuint32m4_t sur = __riscv_vsub_vx_u32m4(utf32, 0x10000, vl); + sur = __riscv_vor_vv_u32m4(__riscv_vsll_vx_u32m4(sur, 16, vl), + __riscv_vsrl_vx_u32m4(sur, 10, vl), vl); + sur = __riscv_vand_vx_u32m4(sur, 0x3FF03FF, vl); + sur = __riscv_vor_vx_u32m4(sur, 0xDC00D800, vl); + /* merge 1 byte utf32 and 2 byte sur */ + vbool8_t m4 = __riscv_vmsgtu_vx_u32m4_b8(utf32, 0xFFFF, vl); + vuint16m4_t utf32_16 = __riscv_vreinterpret_v_u32m4_u16m4(__riscv_vmerge_vvm_u32m4(utf32, sur, m4, vl)); + /* compress and store */ + vbool4_t mOut = __riscv_vmor_mm_b4(__riscv_vmsne_vx_u16m4_b4(utf32_16, 0, vl*2), m4even, vl*2); + vuint16m4_t vout = __riscv_vcompress_vm_u16m4(utf32_16, mOut, vl*2); + vl = __riscv_vcpop_m_b4(mOut, vl*2); + __riscv_vse16_v_u16m4(dst, simdutf_byteflip(vout, vl), vl); + return vl; +}; + +template +simdutf_really_inline static size_t rvv_utf8_to_common(char const *src, size_t len, Tdst *dst) { + static_assert(std::is_same() || std::is_same(), "invalid type"); + constexpr bool is16 = std::is_same(); + constexpr endianness endian = bflip == simdutf_ByteFlip::NONE ? endianness::LITTLE : endianness::BIG; + const auto scalar = [](char const *in, size_t count, Tdst *out) { + return is16 ? scalar::utf8_to_utf16::convert(in, count, (char16_t*)out) + : scalar::utf8_to_utf32::convert(in, count, (char32_t*)out); + }; + + size_t tail = 32; // the minimum value is 3 + if (len < tail) return scalar(src, len, dst); + + /* validate first three bytes */ + if (validate) { + size_t idx = tail; + while (idx < len && (src[idx] >> 6) == 0b10) + ++idx; + if (idx > tail + 3 || !scalar::utf8::validate(src, idx)) + return 0; + } + + size_t n = len - tail; + Tdst *beg = dst; + + static const uint64_t err1m[] = { 0x0202020202020202, 0x4915012180808080 }; + static const uint64_t err2m[] = { 0xCBCBCB8B8383A3E7, 0xCBCBDBCBCBCBCBCB }; + static const uint64_t err3m[] = { 0x0101010101010101, 0X01010101BABAAEE6 }; + + const vuint8m1_t err1tbl = __riscv_vreinterpret_v_u64m1_u8m1(__riscv_vle64_v_u64m1(err1m, 2)); + const vuint8m1_t err2tbl = __riscv_vreinterpret_v_u64m1_u8m1(__riscv_vle64_v_u64m1(err2m, 2)); + const vuint8m1_t err3tbl = __riscv_vreinterpret_v_u64m1_u8m1(__riscv_vle64_v_u64m1(err3m, 2)); + + size_t vl8m2 = __riscv_vsetvlmax_e8m2(); + vbool4_t m4even = __riscv_vmseq_vx_u8m2_b4(__riscv_vand_vx_u8m2(__riscv_vid_v_u8m2(vl8m2), 1, vl8m2), 0, vl8m2); + + for (size_t vl, vlOut; n > 0; n -= vl, src += vl, dst += vlOut) { + vl = __riscv_vsetvl_e8m2(n); + + vuint8m2_t v0 = __riscv_vle8_v_u8m2((uint8_t const*)src, vl); + uint64_t max = __riscv_vmv_x_s_u8m1_u8(__riscv_vredmaxu_vs_u8m2_u8m1(v0, __riscv_vmv_s_x_u8m1(0, vl), vl)); + + /* fast path: ASCII */ + if (max < 0b10000000) { + vlOut = vl; + if (is16) __riscv_vse16_v_u16m4((uint16_t*)dst, simdutf_byteflip(__riscv_vzext_vf2_u16m4(v0, vlOut), vlOut), vlOut); + else __riscv_vse32_v_u32m8((uint32_t*)dst, __riscv_vzext_vf4_u32m8(v0, vlOut), vlOut); + continue; + } + + /* see "Validating UTF-8 In Less Than One Instruction Per Byte" + * https://arxiv.org/abs/2010.03090 */ + vuint8m2_t v1 = __riscv_vslide1down_vx_u8m2(v0, src[vl+0], vl); + vuint8m2_t v2 = __riscv_vslide1down_vx_u8m2(v1, src[vl+1], vl); + vuint8m2_t v3 = __riscv_vslide1down_vx_u8m2(v2, src[vl+2], vl); + + if (validate) { + vuint8m2_t s1 = __riscv_vreinterpret_v_u16m2_u8m2(__riscv_vsrl_vx_u16m2(__riscv_vreinterpret_v_u8m2_u16m2(v2), 4, __riscv_vsetvlmax_e16m2())); + vuint8m2_t s3 = __riscv_vreinterpret_v_u16m2_u8m2(__riscv_vsrl_vx_u16m2(__riscv_vreinterpret_v_u8m2_u16m2(v3), 4, __riscv_vsetvlmax_e16m2())); + + vuint8m2_t idx2 = __riscv_vand_vx_u8m2(v2, 0xF, vl); + vuint8m2_t idx1 = __riscv_vand_vx_u8m2(s1, 0xF, vl); + vuint8m2_t idx3 = __riscv_vand_vx_u8m2(s3, 0xF, vl); + + vuint8m2_t err1 = simdutf_vrgather_u8m1x2(err1tbl, idx1); + vuint8m2_t err2 = simdutf_vrgather_u8m1x2(err2tbl, idx2); + vuint8m2_t err3 = simdutf_vrgather_u8m1x2(err3tbl, idx3); + vint8m2_t errs = __riscv_vreinterpret_v_u8m2_i8m2(__riscv_vand_vv_u8m2(__riscv_vand_vv_u8m2(err1, err2, vl), err3, vl)); + + vbool4_t is_3 = __riscv_vmsgtu_vx_u8m2_b4(v1, 0b11100000-1, vl); + vbool4_t is_4 = __riscv_vmsgtu_vx_u8m2_b4(v0, 0b11110000-1, vl); + vbool4_t is_34 = __riscv_vmor_mm_b4(is_3, is_4, vl); + vbool4_t err34 = __riscv_vmxor_mm_b4(is_34, __riscv_vmslt_vx_i8m2_b4(errs, 0, vl), vl); + vbool4_t errm = __riscv_vmor_mm_b4(__riscv_vmsgt_vx_i8m2_b4(errs, 0, vl), err34, vl); + if (__riscv_vfirst_m_b4(errm , vl) >= 0) + return 0; + } + + /* decoding */ + + /* mask of non continuation bytes */ + vbool4_t m = __riscv_vmsgt_vx_i8m2_b4(__riscv_vreinterpret_v_u8m2_i8m2(v0), -65, vl); + vlOut = __riscv_vcpop_m_b4(m, vl); + + /* extract first and second bytes */ + vuint8m2_t b1 = __riscv_vcompress_vm_u8m2(v0, m, vl); + vuint8m2_t b2 = __riscv_vcompress_vm_u8m2(v1, m, vl); + + /* fast path: one and two byte */ + if (max < 0b11100000) { + b2 = __riscv_vand_vx_u8m2(b2, 0b00111111, vlOut); + + vbool4_t m1 = __riscv_vmsgtu_vx_u8m2_b4(b1, 0b10111111, vlOut); + b1 = __riscv_vand_vx_u8m2_mu(m1, b1, b1, 63, vlOut); + + vuint16m4_t b12 = __riscv_vwmulu_vv_u16m4(b1, __riscv_vmerge_vxm_u8m2(__riscv_vmv_v_x_u8m2(1, vlOut), 1<<6, m1, vlOut), vlOut); + b12 = __riscv_vwaddu_wv_u16m4_mu(m1, b12, b12, b2, vlOut); + if (is16) __riscv_vse16_v_u16m4((uint16_t*)dst, simdutf_byteflip(b12, vlOut), vlOut); + else __riscv_vse32_v_u32m8((uint32_t*)dst, __riscv_vzext_vf2_u32m8(b12, vlOut), vlOut); + continue; + } + + /* fast path: one, two and three byte */ + if (max < 0b11110000) { + vuint8m2_t b3 = __riscv_vcompress_vm_u8m2(v2, m, vl); + + b2 = __riscv_vand_vx_u8m2(b2, 0b00111111, vlOut); + b3 = __riscv_vand_vx_u8m2(b3, 0b00111111, vlOut); + + vbool4_t m1 = __riscv_vmsgtu_vx_u8m2_b4(b1, 0b10111111, vlOut); + vbool4_t m3 = __riscv_vmsgtu_vx_u8m2_b4(b1, 0b11011111, vlOut); + + vuint8m2_t t1 = __riscv_vand_vx_u8m2_mu(m1, b1, b1, 63, vlOut); + b1 = __riscv_vand_vx_u8m2_mu(m3, t1, b1, 15, vlOut); + + vuint16m4_t b12 = __riscv_vwmulu_vv_u16m4(b1, __riscv_vmerge_vxm_u8m2(__riscv_vmv_v_x_u8m2(1, vlOut), 1<<6, m1, vlOut), vlOut); + b12 = __riscv_vwaddu_wv_u16m4_mu(m1, b12, b12, b2, vlOut); + vuint16m4_t b123 = __riscv_vwaddu_wv_u16m4_mu(m3, b12, __riscv_vsll_vx_u16m4_mu(m3, b12, b12, 6, vlOut), b3, vlOut); + if (is16) __riscv_vse16_v_u16m4((uint16_t*)dst, simdutf_byteflip(b123, vlOut), vlOut); + else __riscv_vse32_v_u32m8((uint32_t*)dst, __riscv_vzext_vf2_u32m8(b123, vlOut), vlOut); + continue; + } + + /* extract third and fourth bytes */ + vuint8m2_t b3 = __riscv_vcompress_vm_u8m2(v2, m, vl); + vuint8m2_t b4 = __riscv_vcompress_vm_u8m2(v3, m, vl); + + #define SIMDUTF_RVV_UTF8_TO_COMMON_M1(idx) \ + vuint8m1_t c1 = __riscv_vget_v_u8m2_u8m1(b1, idx); \ + vuint8m1_t c2 = __riscv_vget_v_u8m2_u8m1(b2, idx); \ + vuint8m1_t c3 = __riscv_vget_v_u8m2_u8m1(b3, idx); \ + vuint8m1_t c4 = __riscv_vget_v_u8m2_u8m1(b4, idx); \ + /* remove prefix from trailing bytes */ \ + c2 = __riscv_vand_vx_u8m1(c2, 0b00111111, vlOut); \ + c3 = __riscv_vand_vx_u8m1(c3, 0b00111111, vlOut); \ + c4 = __riscv_vand_vx_u8m1(c4, 0b00111111, vlOut); \ + /* remove prefix from leading bytes + * + * We could also use vrgather here, but it increases register pressure, + * and its performance varies widely on current platforms. It might be + * worth reconsidering, though, once there is more hardware available. + * Same goes for the __riscv_vsrl_vv_u32m4 correction step. + * + * We shift left and then right by the number of bytes in the prefix, + * which can be calculated as follows: + * x max(x-10, 0) + * 0xxx -> 0000-0111 -> sift by 0 or 1 -> 0 + * 10xx -> 1000-1011 -> don't care + * 110x -> 1100,1101 -> sift by 3 -> 2,3 + * 1110 -> 1110 -> sift by 4 -> 4 + * 1111 -> 1111 -> sift by 5 -> 5 + * + * vssubu.vx v, 10, (max(x-10, 0)) almost gives us what we want, we + * just need to manually detect and handle the one special case: + */ \ + vuint8m1_t shift = __riscv_vsrl_vx_u8m1(c1, 4, vlOut); \ + shift = __riscv_vmerge_vxm_u8m1(__riscv_vssubu_vx_u8m1(shift, 10, vlOut), 3, __riscv_vmseq_vx_u8m1_b8(shift, 12, vlOut), vlOut); \ + c1 = __riscv_vsll_vv_u8m1(c1, shift, vlOut); \ + c1 = __riscv_vsrl_vv_u8m1(c1, shift, vlOut); \ + /* unconditionally widen and combine to c1234 */ \ + vuint16m2_t c34 = __riscv_vwaddu_wv_u16m2(__riscv_vwmulu_vx_u16m2(c3, 1<<6, vlOut), c4, vlOut); \ + vuint16m2_t c12 = __riscv_vwaddu_wv_u16m2(__riscv_vwmulu_vx_u16m2(c1, 1<<6, vlOut), c2, vlOut); \ + vuint32m4_t c1234 = __riscv_vwaddu_wv_u32m4(__riscv_vwmulu_vx_u32m4(c12, 1 << 12, vlOut), c34, vlOut); \ + /* derive required right-shift amount from `shift` to reduce + * c1234 to the required number of bytes */ \ + c1234 = __riscv_vsrl_vv_u32m4(c1234, __riscv_vzext_vf4_u32m4(__riscv_vmul_vx_u8m1( \ + __riscv_vrsub_vx_u8m1( __riscv_vssubu_vx_u8m1(shift, 2, vlOut), 3, vlOut), 6, vlOut), vlOut), vlOut); \ + /* store result in desired format */ \ + if (is16) vlDst = rvv_utf32_store_utf16_m4((uint16_t*)dst, c1234, vlOut, m4even); \ + else vlDst = vlOut, __riscv_vse32_v_u32m4((uint32_t*)dst, c1234, vlOut); + + /* Unrolling this manually reduces register pressure and allows + * us to terminate early. */ + { + size_t vlOutm2 = vlOut, vlDst; + vlOut = __riscv_vsetvl_e8m1(vlOut); + SIMDUTF_RVV_UTF8_TO_COMMON_M1(0) + if (vlOutm2 == vlOut) { + vlOut = vlDst; + continue; + } + + dst += vlDst; + vlOut = vlOutm2 - vlOut; + } + { + size_t vlDst; + SIMDUTF_RVV_UTF8_TO_COMMON_M1(1) + vlOut = vlDst; + } + +#undef SIMDUTF_RVV_UTF8_TO_COMMON_M1 + } + + /* validate the last character and reparse it + tail */ + if (len > tail) { + if ((src[0] >> 6) == 0b10) + --dst; + while ((src[0] >> 6) == 0b10 && tail < len) + --src, ++tail; + if (is16) { + /* go back one more, when on high surrogate */ + if (simdutf_byteflip((uint16_t)dst[-1]) >= 0xD800 && simdutf_byteflip((uint16_t)dst[-1]) <= 0xDBFF) + --dst; + } + } + size_t ret = scalar(src, tail, dst); + if (ret == 0) return 0; + return (size_t)(dst - beg) + ret; +} + + +simdutf_warn_unused size_t implementation::convert_utf8_to_latin1(const char *src, size_t len, char *dst) const noexcept { + const char *beg = dst; + uint8_t last = 0b10000000; + for (size_t vl, vlOut; len > 0; len -= vl, src += vl, dst += vlOut, last = src[-1]) { + vl = __riscv_vsetvl_e8m2(len); + vuint8m2_t v1 = __riscv_vle8_v_u8m2((uint8_t*)src, vl); + vbool4_t m = __riscv_vmsltu_vx_u8m2_b4(v1, 0b11000000, vl); + vlOut = __riscv_vcpop_m_b4(m, vl); + if (vlOut != vl || last > 0b01111111) { + vuint8m2_t v0 = __riscv_vslide1up_vx_u8m2(v1, last, vl); + + vbool4_t leading0 = __riscv_vmsgtu_vx_u8m2_b4(v0, 0b10111111, vl); + vbool4_t trailing1 = __riscv_vmslt_vx_i8m2_b4(__riscv_vreinterpret_v_u8m2_i8m2(v1), (uint8_t)0b11000000, vl); + vbool4_t tobig = __riscv_vmand_mm_b4(leading0, __riscv_vmsgtu_vx_u8m2_b4(__riscv_vxor_vx_u8m2(v0, (uint8_t)-62, vl), 1, vl), vl); + if (__riscv_vfirst_m_b4(__riscv_vmor_mm_b4(tobig, __riscv_vmxor_mm_b4(leading0, trailing1, vl), vl), vl) >= 0) + return 0; + + v1 = __riscv_vor_vx_u8m2_mu(__riscv_vmseq_vx_u8m2_b4(v0, 0b11000011, vl), v1, v1, 0b01000000, vl); + v1 = __riscv_vcompress_vm_u8m2(v1, m, vl); + } + __riscv_vse8_v_u8m2((uint8_t*)dst, v1, vlOut); + } + if (last > 0b10111111) + return 0; + return dst - beg; +} + +simdutf_warn_unused result implementation::convert_utf8_to_latin1_with_errors(const char *src, size_t len, char *dst) const noexcept { + size_t res = convert_utf8_to_latin1(src, len, dst); + if (res) return result(error_code::SUCCESS, res); + return scalar::utf8_to_latin1::convert_with_errors(src, len, dst); +} + +simdutf_warn_unused size_t implementation::convert_valid_utf8_to_latin1(const char *src, size_t len, char *dst) const noexcept { + const char *beg = dst; + uint8_t last = 0b11000000; + for (size_t vl, vlOut; len > 0; len -= vl, src += vl, dst += vlOut, last = src[-1]) { + vl = __riscv_vsetvl_e8m2(len); + vuint8m2_t v1 = __riscv_vle8_v_u8m2((uint8_t*)src, vl); + vbool4_t m = __riscv_vmsltu_vx_u8m2_b4(v1, 0b11000000, vl); + vlOut = __riscv_vcpop_m_b4(m, vl); + if (vlOut != vl || last > 0b01111111) { + vuint8m2_t v0 = __riscv_vslide1up_vx_u8m2(v1, last, vl); + v1 = __riscv_vor_vx_u8m2_mu(__riscv_vmseq_vx_u8m2_b4(v0, 0b11000011, vl), v1, v1, 0b01000000, vl); + v1 = __riscv_vcompress_vm_u8m2(v1, m, vl); + } + __riscv_vse8_v_u8m2((uint8_t*)dst, v1, vlOut); + } + return dst - beg; +} + +simdutf_warn_unused size_t implementation::convert_utf8_to_utf16le(const char *src, size_t len, char16_t *dst) const noexcept { + return rvv_utf8_to_common(src, len, (uint16_t*)dst); +} + +simdutf_warn_unused size_t implementation::convert_utf8_to_utf16be(const char *src, size_t len, char16_t *dst) const noexcept { + if (supports_zvbb()) + return rvv_utf8_to_common(src, len, (uint16_t*)dst); + else + return rvv_utf8_to_common(src, len, (uint16_t*)dst); +} + +simdutf_warn_unused result implementation::convert_utf8_to_utf16le_with_errors(const char *src, size_t len, char16_t *dst) const noexcept { + size_t res = convert_utf8_to_utf16le(src, len, dst); + if (res) return result(error_code::SUCCESS, res); + return scalar::utf8_to_utf16::convert_with_errors(src, len, dst); +} + +simdutf_warn_unused result implementation::convert_utf8_to_utf16be_with_errors(const char *src, size_t len, char16_t *dst) const noexcept { + size_t res = convert_utf8_to_utf16be(src, len, dst); + if (res) return result(error_code::SUCCESS, res); + return scalar::utf8_to_utf16::convert_with_errors(src, len, dst); +} + +simdutf_warn_unused size_t implementation::convert_valid_utf8_to_utf16le(const char *src, size_t len, char16_t *dst) const noexcept { + return rvv_utf8_to_common(src, len, (uint16_t*)dst); +} + +simdutf_warn_unused size_t implementation::convert_valid_utf8_to_utf16be(const char *src, size_t len, char16_t *dst) const noexcept { + if (supports_zvbb()) + return rvv_utf8_to_common(src, len, (uint16_t*)dst); + else + return rvv_utf8_to_common(src, len, (uint16_t*)dst); +} + +simdutf_warn_unused size_t implementation::convert_utf8_to_utf32(const char *src, size_t len, char32_t *dst) const noexcept { + return rvv_utf8_to_common(src, len, (uint32_t*)dst); +} + +simdutf_warn_unused result implementation::convert_utf8_to_utf32_with_errors(const char *src, size_t len, char32_t *dst) const noexcept { + size_t res = convert_utf8_to_utf32(src, len, dst); + if (res) return result(error_code::SUCCESS, res); + return scalar::utf8_to_utf32::convert_with_errors(src, len, dst); +} + +simdutf_warn_unused size_t implementation::convert_valid_utf8_to_utf32(const char *src, size_t len, char32_t *dst) const noexcept { + return rvv_utf8_to_common(src, len, (uint32_t*)dst); +} + +/* end file src/rvv/rvv_utf8_to.inl.cpp */ +/* begin file src/rvv/rvv_utf16_to.inl.cpp */ +template +simdutf_really_inline static result rvv_utf16_to_latin1_with_errors(const char16_t *src, size_t len, char *dst) { + const char16_t *const beg = src; + for (size_t vl; len > 0; len -= vl, src += vl, dst += vl) { + vl = __riscv_vsetvl_e16m8(len); + vuint16m8_t v = __riscv_vle16_v_u16m8((uint16_t*)src, vl); + v = simdutf_byteflip(v, vl); + long idx = __riscv_vfirst_m_b2(__riscv_vmsgtu_vx_u16m8_b2(v, 255, vl), vl); + if (idx >= 0) + return result(error_code::TOO_LARGE, beg - src + idx); + __riscv_vse8_v_u8m4((uint8_t*)dst, __riscv_vncvt_x_x_w_u8m4(v, vl), vl); + } + return result(error_code::SUCCESS, src - beg); +} + +simdutf_warn_unused size_t implementation::convert_utf16le_to_latin1(const char16_t *src, size_t len, char *dst) const noexcept { + result res = convert_utf16le_to_latin1_with_errors(src, len, dst); + return res.error == error_code::SUCCESS ? res.count : 0; +} + +simdutf_warn_unused size_t implementation::convert_utf16be_to_latin1(const char16_t *src, size_t len, char *dst) const noexcept { + result res = convert_utf16be_to_latin1_with_errors(src, len, dst); + return res.error == error_code::SUCCESS ? res.count : 0; +} + +simdutf_warn_unused result implementation::convert_utf16le_to_latin1_with_errors(const char16_t *src, size_t len, char *dst) const noexcept { + return rvv_utf16_to_latin1_with_errors(src, len, dst); +} + +simdutf_warn_unused result implementation::convert_utf16be_to_latin1_with_errors(const char16_t *src, size_t len, char *dst) const noexcept { + if (supports_zvbb()) + return rvv_utf16_to_latin1_with_errors(src, len, dst); + else + return rvv_utf16_to_latin1_with_errors(src, len, dst); +} + +simdutf_warn_unused size_t implementation::convert_valid_utf16le_to_latin1(const char16_t *src, size_t len, char *dst) const noexcept { + const char16_t *const beg = src; + for (size_t vl; len > 0; len -= vl, src += vl, dst += vl) { + vl = __riscv_vsetvl_e16m8(len); + vuint16m8_t v = __riscv_vle16_v_u16m8((uint16_t*)src, vl); + __riscv_vse8_v_u8m4((uint8_t*)dst, __riscv_vncvt_x_x_w_u8m4(v, vl), vl); + } + return src - beg; +} + +simdutf_warn_unused size_t implementation::convert_valid_utf16be_to_latin1(const char16_t *src, size_t len, char *dst) const noexcept { + const char16_t *const beg = src; + for (size_t vl; len > 0; len -= vl, src += vl, dst += vl) { + vl = __riscv_vsetvl_e16m8(len); + vuint16m8_t v = __riscv_vle16_v_u16m8((uint16_t*)src, vl); + __riscv_vse8_v_u8m4((uint8_t*)dst, __riscv_vnsrl_wx_u8m4(v, 8, vl), vl); + } + return src - beg; +} + +template +simdutf_really_inline static result rvv_utf16_to_utf8_with_errors(const char16_t *src, size_t len, char *dst) { + size_t n = len; + const char16_t *srcBeg = src; + const char *dstBeg = dst; + size_t vl8m4 = __riscv_vsetvlmax_e8m4(); + vbool2_t m4mulp2 = __riscv_vmseq_vx_u8m4_b2(__riscv_vand_vx_u8m4(__riscv_vid_v_u8m4(vl8m4), 3, vl8m4), 2, vl8m4); + + for (size_t vl, vlOut; n > 0; ) { + vl = __riscv_vsetvl_e16m2(n); + + vuint16m2_t v = __riscv_vle16_v_u16m2((uint16_t const*)src, vl); + v = simdutf_byteflip(v, vl); + vbool8_t m234 = __riscv_vmsgtu_vx_u16m2_b8(v, 0x80-1, vl); + + if (__riscv_vfirst_m_b8(m234,vl) < 0) { /* 1 byte utf8 */ + vlOut = vl; + __riscv_vse8_v_u8m1((uint8_t*)dst, __riscv_vncvt_x_x_w_u8m1(v, vlOut), vlOut); + n -= vl, src += vl, dst += vlOut; + continue; + } + + vbool8_t m34 = __riscv_vmsgtu_vx_u16m2_b8(v, 0x800-1, vl); + + if (__riscv_vfirst_m_b8(m34,vl) < 0) { /* 1/2 byte utf8 */ + /* 0: [ aaa|aabbbbbb] + * 1: [aabbbbbb| ] vsll 8 + * 2: [ | aaaaa] vsrl 6 + * 3: [00111111|00011111] + * 4: [ bbbbbb|000aaaaa] (1|2)&3 + * 5: [11000000|11000000] + * 6: [10bbbbbb|110aaaaa] 4|5 */ + vuint16m2_t twoByte = + __riscv_vand_vx_u16m2(__riscv_vor_vv_u16m2( + __riscv_vsll_vx_u16m2(v, 8, vl), + __riscv_vsrl_vx_u16m2(v, 6, vl), + vl), 0b0011111100011111, vl); + vuint16m2_t vout16 = __riscv_vor_vx_u16m2_mu(m234, v, twoByte, 0b1000000011000000, vl); + vuint8m2_t vout = __riscv_vreinterpret_v_u16m2_u8m2(vout16); + + /* Every high byte that is zero should be compressed + * low bytes should never be compressed, so we set them + * to all ones, and then create a non-zero bytes mask */ + vbool4_t mcomp = __riscv_vmsne_vx_u8m2_b4(__riscv_vreinterpret_v_u16m2_u8m2(__riscv_vor_vx_u16m2(vout16, 0xFF, vl)), 0, vl*2); + vlOut = __riscv_vcpop_m_b4(mcomp, vl*2); + + vout = __riscv_vcompress_vm_u8m2(vout, mcomp, vl*2); + __riscv_vse8_v_u8m2((uint8_t*)dst, vout, vlOut); + + n -= vl, src += vl, dst += vlOut; + continue; + } + + vbool8_t sur = __riscv_vmseq_vx_u16m2_b8(__riscv_vand_vx_u16m2(v, 0xF800, vl), 0xD800, vl); + long first = __riscv_vfirst_m_b8(sur, vl); + size_t tail = vl - first; + vl = first < 0 ? vl : first; + + if (vl > 0) { /* 1/2/3 byte utf8 */ + /* in: [aaaabbbb|bbcccccc] + * v1: [0bcccccc| ] vsll 8 + * v1: [10cccccc| ] vsll 8 & 0b00111111 | 0b10000000 + * v2: [ |110bbbbb] vsrl 6 & 0b00111111 | 0b11000000 + * v2: [ |10bbbbbb] vsrl 6 & 0b00111111 | 0b10000000 + * v3: [ |1110aaaa] vsrl 12 | 0b11100000 + * 1: [00000000|0bcccccc|00000000|00000000] => [0bcccccc] + * 2: [00000000|10cccccc|110bbbbb|00000000] => [110bbbbb] [10cccccc] + * 3: [00000000|10cccccc|10bbbbbb|1110aaaa] => [1110aaaa] [10bbbbbb] [10cccccc] + */ + vuint16m2_t v1, v2, v3, v12; + v1 = __riscv_vor_vx_u16m2_mu(m234, v, __riscv_vand_vx_u16m2(v, 0b00111111, vl), 0b10000000, vl); + v1 = __riscv_vsll_vx_u16m2(v1, 8, vl); + + v2 = __riscv_vor_vx_u16m2(__riscv_vand_vx_u16m2(__riscv_vsrl_vx_u16m2(v, 6, vl), 0b00111111, vl), 0b10000000, vl); + v2 = __riscv_vor_vx_u16m2_mu(__riscv_vmnot_m_b8(m34,vl), v2, v2, 0b01000000, vl); + v3 = __riscv_vor_vx_u16m2(__riscv_vsrl_vx_u16m2(v, 12, vl), 0b11100000, vl); + v12 = __riscv_vor_vv_u16m2_mu(m234, v1, v1, v2, vl); + + vuint32m4_t w12 = __riscv_vwmulu_vx_u32m4(v12, 1<<8, vl); + vuint32m4_t w123 = __riscv_vwaddu_wv_u32m4_mu(m34, w12, w12, v3, vl); + vuint8m4_t vout = __riscv_vreinterpret_v_u32m4_u8m4(w123); + + vbool2_t mcomp = __riscv_vmor_mm_b2(m4mulp2, __riscv_vmsne_vx_u8m4_b2(vout, 0, vl*4), vl*4); + vlOut = __riscv_vcpop_m_b2(mcomp, vl*4); + + vout = __riscv_vcompress_vm_u8m4(vout, mcomp, vl*4); + __riscv_vse8_v_u8m4((uint8_t*)dst, vout, vlOut); + + n -= vl, src += vl, dst += vlOut; + } + + if (tail) while (n) { + uint16_t word = simdutf_byteflip(src[0]); + if((word & 0xFF80)==0) { + break; + } else if((word & 0xF800)==0) { + break; + } else if ((word & 0xF800) != 0xD800) { + break; + } else { + // must be a surrogate pair + if (n <= 1) return result(error_code::SURROGATE, src - srcBeg); + uint16_t diff = word - 0xD800; + if (diff > 0x3FF) return result(error_code::SURROGATE, src - srcBeg); + uint16_t diff2 = simdutf_byteflip(src[1]) - 0xDC00; + if (diff2 > 0x3FF) return result(error_code::SURROGATE, src - srcBeg); + + uint32_t value = ((diff + 0x40) << 10) + diff2 ; + + // will generate four UTF-8 bytes + // we have 0b11110XXX 0b10XXXXXX 0b10XXXXXX 0b10XXXXXX + *dst++ = (char)( (value>>18) | 0b11110000); + *dst++ = (char)(((value>>12) & 0b111111) | 0b10000000); + *dst++ = (char)(((value>> 6) & 0b111111) | 0b10000000); + *dst++ = (char)(( value & 0b111111) | 0b10000000); + src += 2; + n -= 2; + } + } + } + + return result(error_code::SUCCESS, dst - dstBeg); +} + +simdutf_warn_unused size_t implementation::convert_utf16le_to_utf8(const char16_t *src, size_t len, char *dst) const noexcept { + result res = convert_utf16le_to_utf8_with_errors(src, len, dst); + return res.error == error_code::SUCCESS ? res.count : 0; +} + +simdutf_warn_unused size_t implementation::convert_utf16be_to_utf8(const char16_t *src, size_t len, char *dst) const noexcept { + result res = convert_utf16be_to_utf8_with_errors(src, len, dst); + return res.error == error_code::SUCCESS ? res.count : 0; +} + +simdutf_warn_unused result implementation::convert_utf16le_to_utf8_with_errors(const char16_t *src, size_t len, char *dst) const noexcept { + return rvv_utf16_to_utf8_with_errors(src, len, dst); +} + +simdutf_warn_unused result implementation::convert_utf16be_to_utf8_with_errors(const char16_t *src, size_t len, char *dst) const noexcept { + if (supports_zvbb()) + return rvv_utf16_to_utf8_with_errors(src, len, dst); + else + return rvv_utf16_to_utf8_with_errors(src, len, dst); +} + +simdutf_warn_unused size_t implementation::convert_valid_utf16le_to_utf8(const char16_t *src, size_t len, char *dst) const noexcept { + return convert_utf16le_to_utf8(src, len, dst); +} + +simdutf_warn_unused size_t implementation::convert_valid_utf16be_to_utf8(const char16_t *src, size_t len, char *dst) const noexcept { + return convert_utf16be_to_utf8(src, len, dst); +} + +template +simdutf_really_inline static result rvv_utf16_to_utf32_with_errors(const char16_t *src, size_t len, char32_t *dst) { + const char16_t *const srcBeg = src; + char32_t *const dstBeg = dst; + + uint16_t last = 0; + for (size_t vl, vlOut; len > 0; len -= vl, src += vl, dst += vlOut, last = simdutf_byteflip(src[-1])) { + vl = __riscv_vsetvl_e16m2(len); + vuint16m2_t v1 = __riscv_vle16_v_u16m2((uint16_t const*)src, vl); + v1 = simdutf_byteflip(v1, vl); + vuint16m2_t v0 = __riscv_vslide1up_vx_u16m2(v1, last, vl); + + vbool8_t surhi0 = __riscv_vmseq_vx_u16m2_b8(__riscv_vand_vx_u16m2(v0, 0xFC00, vl), 0xD800, vl); + vbool8_t surlo1 = __riscv_vmseq_vx_u16m2_b8(__riscv_vand_vx_u16m2(v1, 0xFC00, vl), 0xDC00, vl); + + /* no surrogates */ + if (__riscv_vfirst_m_b8(__riscv_vmor_mm_b8(surhi0, surlo1, vl), vl) < 0) { + vlOut = vl; + __riscv_vse32_v_u32m4((uint32_t*)dst, __riscv_vzext_vf2_u32m4(v1, vl), vl); + continue; + } + + long idx = __riscv_vfirst_m_b8(__riscv_vmxor_mm_b8(surhi0, surlo1, vl), vl); + if (idx >= 0) { + last = idx > 0 ? simdutf_byteflip(src[idx-1]) : last; + return result(error_code::SURROGATE, src - srcBeg + idx - (last - 0xD800u < 0x400u)); + } + + vbool8_t surhi1 = __riscv_vmseq_vx_u16m2_b8(__riscv_vand_vx_u16m2(v1, 0xFC00, vl), 0xD800, vl); + uint16_t next = vl < len ? simdutf_byteflip(src[vl]) : 0; + + vuint32m4_t wide = __riscv_vzext_vf2_u32m4(v1, vl); + vuint32m4_t slided = __riscv_vslide1down_vx_u32m4(wide, next, vl); + vuint32m4_t aligned = __riscv_vsll_vx_u32m4_mu(surhi1, wide, wide, 10, vl); + vuint32m4_t added = __riscv_vadd_vv_u32m4_mu(surhi1, aligned, aligned, slided, vl); + vuint32m4_t utf32 = __riscv_vadd_vx_u32m4_mu(surhi1, added, added, 0xFCA02400, vl); + vbool8_t m = __riscv_vmnot_m_b8(surlo1, vl); + vlOut = __riscv_vcpop_m_b8(m, vl); + vuint32m4_t comp = __riscv_vcompress_vm_u32m4(utf32, m, vl); + __riscv_vse32_v_u32m4((uint32_t*)dst, comp, vlOut); + } + + if (last - 0xD800u < 0x400u) + return result(error_code::SURROGATE, src - srcBeg - 1); /* end on high surrogate */ + else + return result(error_code::SUCCESS, dst - dstBeg); +} + +simdutf_warn_unused size_t implementation::convert_utf16le_to_utf32(const char16_t *src, size_t len, char32_t *dst) const noexcept { + result res = convert_utf16le_to_utf32_with_errors(src, len, dst); + return res.error == error_code::SUCCESS ? res.count : 0; +} + +simdutf_warn_unused size_t implementation::convert_utf16be_to_utf32(const char16_t *src, size_t len, char32_t *dst) const noexcept { + result res = convert_utf16be_to_utf32_with_errors(src, len, dst); + return res.error == error_code::SUCCESS ? res.count : 0; +} + +simdutf_warn_unused result implementation::convert_utf16le_to_utf32_with_errors(const char16_t *src, size_t len, char32_t *dst) const noexcept { + return rvv_utf16_to_utf32_with_errors(src, len, dst); +} + +simdutf_warn_unused result implementation::convert_utf16be_to_utf32_with_errors(const char16_t *src, size_t len, char32_t *dst) const noexcept { + if (supports_zvbb()) + return rvv_utf16_to_utf32_with_errors(src, len, dst); + else + return rvv_utf16_to_utf32_with_errors(src, len, dst); +} + +simdutf_warn_unused size_t implementation::convert_valid_utf16le_to_utf32(const char16_t *src, size_t len, char32_t *dst) const noexcept { + return convert_utf16le_to_utf32(src, len, dst); +} + +simdutf_warn_unused size_t implementation::convert_valid_utf16be_to_utf32(const char16_t *src, size_t len, char32_t *dst) const noexcept { + return convert_utf16be_to_utf32(src, len, dst); +} +/* end file src/rvv/rvv_utf16_to.inl.cpp */ +/* begin file src/rvv/rvv_utf32_to.inl.cpp */ + +simdutf_warn_unused size_t implementation::convert_utf32_to_latin1(const char32_t *src, size_t len, char *dst) const noexcept { + result res = convert_utf32_to_latin1_with_errors(src, len, dst); + return res.error == error_code::SUCCESS ? res.count : 0; +} + +simdutf_warn_unused result implementation::convert_utf32_to_latin1_with_errors(const char32_t *src, size_t len, char *dst) const noexcept { + const char32_t *const beg = src; + for (size_t vl; len > 0; len -= vl, src += vl, dst += vl) { + vl = __riscv_vsetvl_e32m8(len); + vuint32m8_t v = __riscv_vle32_v_u32m8((uint32_t*)src, vl); + long idx = __riscv_vfirst_m_b4(__riscv_vmsgtu_vx_u32m8_b4(v, 255, vl), vl); + if (idx >= 0) + return result(error_code::TOO_LARGE, src - beg + idx); + /* We don't use vcompress here, because its performance varies widely on current platforms. + * This might be worth reconsidering once there is more hardware available. */ + __riscv_vse8_v_u8m2((uint8_t*)dst, __riscv_vncvt_x_x_w_u8m2(__riscv_vncvt_x_x_w_u16m4(v, vl), vl), vl); + } + return result(error_code::SUCCESS, src - beg); +} + +simdutf_warn_unused size_t implementation::convert_valid_utf32_to_latin1(const char32_t *src, size_t len, char *dst) const noexcept { + return convert_utf32_to_latin1(src, len, dst); +} + +simdutf_warn_unused result implementation::convert_utf32_to_utf8_with_errors(const char32_t *src, size_t len, char *dst) const noexcept { + size_t n = len; + const char32_t *srcBeg = src; + const char *dstBeg = dst; + size_t vl8m4 = __riscv_vsetvlmax_e8m4(); + vbool2_t m4mulp2 = __riscv_vmseq_vx_u8m4_b2(__riscv_vand_vx_u8m4(__riscv_vid_v_u8m4(vl8m4), 3, vl8m4), 2, vl8m4); + + for (size_t vl, vlOut; n > 0; ) { + vl = __riscv_vsetvl_e32m4(n); + + vuint32m4_t v = __riscv_vle32_v_u32m4((uint32_t const*)src, vl); + vbool8_t m234 = __riscv_vmsgtu_vx_u32m4_b8(v, 0x80-1, vl); + vuint16m2_t vn = __riscv_vncvt_x_x_w_u16m2(v, vl); + + if (__riscv_vfirst_m_b8(m234, vl) < 0) { /* 1 byte utf8 */ + vlOut = vl; + __riscv_vse8_v_u8m1((uint8_t*)dst, __riscv_vncvt_x_x_w_u8m1(vn, vlOut), vlOut); + n -= vl, src += vl, dst += vlOut; + continue; + } + + vbool8_t m34 = __riscv_vmsgtu_vx_u32m4_b8(v, 0x800-1, vl); + + if (__riscv_vfirst_m_b8(m34,vl) < 0) { /* 1/2 byte utf8 */ + /* 0: [ aaa|aabbbbbb] + * 1: [aabbbbbb| ] vsll 8 + * 2: [ | aaaaa] vsrl 6 + * 3: [00111111|00111111] + * 4: [ bbbbbb|000aaaaa] (1|2)&3 + * 5: [10000000|11000000] + * 6: [10bbbbbb|110aaaaa] 4|5 */ + vuint16m2_t twoByte = + __riscv_vand_vx_u16m2(__riscv_vor_vv_u16m2( + __riscv_vsll_vx_u16m2(vn, 8, vl), + __riscv_vsrl_vx_u16m2(vn, 6, vl), + vl), 0b0011111100111111, vl); + vuint16m2_t vout16 = __riscv_vor_vx_u16m2_mu(m234, vn, twoByte, 0b1000000011000000, vl); + vuint8m2_t vout = __riscv_vreinterpret_v_u16m2_u8m2(vout16); + + /* Every high byte that is zero should be compressed + * low bytes should never be compressed, so we set them + * to all ones, and then create a non-zero bytes mask */ + vbool4_t mcomp = __riscv_vmsne_vx_u8m2_b4(__riscv_vreinterpret_v_u16m2_u8m2(__riscv_vor_vx_u16m2(vout16, 0xFF, vl)), 0, vl*2); + vlOut = __riscv_vcpop_m_b4(mcomp, vl*2); + + vout = __riscv_vcompress_vm_u8m2(vout, mcomp, vl*2); + __riscv_vse8_v_u8m2((uint8_t*)dst, vout, vlOut); + + n -= vl, src += vl, dst += vlOut; + continue; + } + + vbool8_t sur = __riscv_vmseq_vx_u32m4_b8(__riscv_vand_vx_u32m4(v, 0xFFFFF800, vl), 0xD800, vl); + long idx = __riscv_vfirst_m_b8(sur, vl); + if (idx >= 0) return result(error_code::SURROGATE, src - srcBeg + idx); + + vbool8_t m4 = __riscv_vmsgtu_vx_u32m4_b8(v, 0x10000-1, vl); + long first = __riscv_vfirst_m_b8(m4, vl); + size_t tail = vl - first; + vl = first < 0 ? vl : first; + + if (vl > 0) { /* 1/2/3 byte utf8 */ + /* vn: [aaaabbbb|bbcccccc] + * v1: [0bcccccc| ] vsll 8 + * v1: [10cccccc| ] vsll 8 & 0b00111111 | 0b10000000 + * v2: [ |110bbbbb] vsrl 6 & 0b00111111 | 0b11000000 + * v2: [ |10bbbbbb] vsrl 6 & 0b00111111 | 0b10000000 + * v3: [ |1110aaaa] vsrl 12 | 0b11100000 + * 1: [00000000|0bcccccc|00000000|00000000] => [0bcccccc] + * 2: [00000000|10cccccc|110bbbbb|00000000] => [110bbbbb] [10cccccc] + * 3: [00000000|10cccccc|10bbbbbb|1110aaaa] => [1110aaaa] [10bbbbbb] [10cccccc] + */ + vuint16m2_t v1, v2, v3, v12; + v1 = __riscv_vor_vx_u16m2_mu(m234, vn, __riscv_vand_vx_u16m2(vn, 0b00111111, vl), 0b10000000, vl); + v1 = __riscv_vsll_vx_u16m2(v1, 8, vl); + + v2 = __riscv_vor_vx_u16m2(__riscv_vand_vx_u16m2(__riscv_vsrl_vx_u16m2(vn, 6, vl), 0b00111111, vl), 0b10000000, vl); + v2 = __riscv_vor_vx_u16m2_mu(__riscv_vmnot_m_b8(m34,vl), v2, v2, 0b01000000, vl); + v3 = __riscv_vor_vx_u16m2(__riscv_vsrl_vx_u16m2(vn, 12, vl), 0b11100000, vl); + v12 = __riscv_vor_vv_u16m2_mu(m234, v1, v1, v2, vl); + + vuint32m4_t w12 = __riscv_vwmulu_vx_u32m4(v12, 1<<8, vl); + vuint32m4_t w123 = __riscv_vwaddu_wv_u32m4_mu(m34, w12, w12, v3, vl); + vuint8m4_t vout = __riscv_vreinterpret_v_u32m4_u8m4(w123); + + vbool2_t mcomp = __riscv_vmor_mm_b2(m4mulp2, __riscv_vmsne_vx_u8m4_b2(vout, 0, vl*4), vl*4); + vlOut = __riscv_vcpop_m_b2(mcomp, vl*4); + + vout = __riscv_vcompress_vm_u8m4(vout, mcomp, vl*4); + __riscv_vse8_v_u8m4((uint8_t*)dst, vout, vlOut); + + n -= vl, src += vl, dst += vlOut; + } + + if (tail) while (n) { + uint32_t word = src[0]; + if (word < 0x10000) break; + if (word > 0x10FFFF) return result(error_code::TOO_LARGE, src - srcBeg); + *dst++ = (uint8_t)(( word>>18) | 0b11110000); + *dst++ = (uint8_t)(((word>>12) & 0b111111) | 0b10000000); + *dst++ = (uint8_t)(((word>> 6) & 0b111111) | 0b10000000); + *dst++ = (uint8_t)(( word & 0b111111) | 0b10000000); + ++src; + --n; + } + } + + return result(error_code::SUCCESS, dst - dstBeg); +} + +simdutf_warn_unused size_t implementation::convert_utf32_to_utf8(const char32_t *src, size_t len, char *dst) const noexcept { + result res = convert_utf32_to_utf8_with_errors(src, len, dst); + return res.error == error_code::SUCCESS ? res.count : 0; +} + +simdutf_warn_unused size_t implementation::convert_valid_utf32_to_utf8(const char32_t *src, size_t len, char *dst) const noexcept { + return convert_utf32_to_utf8(src, len, dst); +} + +template +simdutf_really_inline static result rvv_convert_utf32_to_utf16_with_errors(const char32_t *src, size_t len, char16_t *dst) { + size_t vl8m2 = __riscv_vsetvlmax_e8m2(); + vbool4_t m4even = __riscv_vmseq_vx_u8m2_b4(__riscv_vand_vx_u8m2(__riscv_vid_v_u8m2(vl8m2), 1, vl8m2), 0, vl8m2); + const char16_t *dstBeg = dst; + const char32_t *srcBeg = src; + for (size_t vl, vlOut; len > 0; len -= vl, src += vl, dst += vlOut) { + vl = __riscv_vsetvl_e32m4(len); + vuint32m4_t v = __riscv_vle32_v_u32m4((uint32_t*)src, vl); + vuint32m4_t off = __riscv_vadd_vx_u32m4(v, 0xFFFF2000, vl); + long idx; + idx = __riscv_vfirst_m_b8(__riscv_vmsgtu_vx_u32m4_b8(off, 0xFFFFF7FF, vl), vl); + if (idx >= 0) return result(error_code::SURROGATE, src - srcBeg + idx); + idx = __riscv_vfirst_m_b8(__riscv_vmsgtu_vx_u32m4_b8(v, 0xFFFF, vl), vl); + if (idx < 0) { + vlOut = vl; + vuint16m2_t n = simdutf_byteflip(__riscv_vncvt_x_x_w_u16m2(v, vlOut), vlOut); + __riscv_vse16_v_u16m2((uint16_t*)dst, n, vlOut); + continue; + } + idx = __riscv_vfirst_m_b8(__riscv_vmsgtu_vx_u32m4_b8(v, 0x10FFFF, vl), vl); + if (idx >= 0) return result(error_code::TOO_LARGE, src - srcBeg + idx); + vlOut = rvv_utf32_store_utf16_m4((uint16_t*)dst, v, vl, m4even); + } + return result(error_code::SUCCESS, dst - dstBeg); +} + +simdutf_warn_unused size_t implementation::convert_utf32_to_utf16le(const char32_t *src, size_t len, char16_t *dst) const noexcept { + result res = convert_utf32_to_utf16le_with_errors(src, len, dst); + return res.error == error_code::SUCCESS ? res.count : 0; +} + +simdutf_warn_unused size_t implementation::convert_utf32_to_utf16be(const char32_t *src, size_t len, char16_t *dst) const noexcept { + result res = convert_utf32_to_utf16be_with_errors(src, len, dst); + return res.error == error_code::SUCCESS ? res.count : 0; +} + +simdutf_warn_unused result implementation::convert_utf32_to_utf16le_with_errors(const char32_t *src, size_t len, char16_t *dst) const noexcept { + return rvv_convert_utf32_to_utf16_with_errors(src, len, dst); +} + +simdutf_warn_unused result implementation::convert_utf32_to_utf16be_with_errors(const char32_t *src, size_t len, char16_t *dst) const noexcept { + if (supports_zvbb()) + return rvv_convert_utf32_to_utf16_with_errors(src, len, dst); + else + return rvv_convert_utf32_to_utf16_with_errors(src, len, dst); +} + +template +simdutf_really_inline static size_t rvv_convert_valid_utf32_to_utf16(const char32_t *src, size_t len, char16_t *dst) { + size_t vl8m2 = __riscv_vsetvlmax_e8m2(); + vbool4_t m4even = __riscv_vmseq_vx_u8m2_b4(__riscv_vand_vx_u8m2(__riscv_vid_v_u8m2(vl8m2), 1, vl8m2), 0, vl8m2); + char16_t *dstBeg = dst; + for (size_t vl, vlOut; len > 0; len -= vl, src += vl, dst += vlOut) { + vl = __riscv_vsetvl_e32m4(len); + vuint32m4_t v = __riscv_vle32_v_u32m4((uint32_t*)src, vl); + if (__riscv_vfirst_m_b8(__riscv_vmsgtu_vx_u32m4_b8(v, 0xFFFF, vl), vl) < 0) { + vlOut = vl; + vuint16m2_t n = simdutf_byteflip(__riscv_vncvt_x_x_w_u16m2(v, vlOut), vlOut); + __riscv_vse16_v_u16m2((uint16_t*)dst, n, vlOut); + continue; + } + vlOut = rvv_utf32_store_utf16_m4((uint16_t*)dst, v, vl, m4even); + } + return dst - dstBeg; +} + +simdutf_warn_unused size_t implementation::convert_valid_utf32_to_utf16le(const char32_t *src, size_t len, char16_t *dst) const noexcept { + return rvv_convert_valid_utf32_to_utf16(src, len, dst); +} + +simdutf_warn_unused size_t implementation::convert_valid_utf32_to_utf16be(const char32_t *src, size_t len, char16_t *dst) const noexcept { + if (supports_zvbb()) + return rvv_convert_valid_utf32_to_utf16(src, len, dst); + else + return rvv_convert_valid_utf32_to_utf16(src, len, dst); +} +/* end file src/rvv/rvv_utf32_to.inl.cpp */ + +simdutf_warn_unused int implementation::detect_encodings(const char *input, size_t length) const noexcept { + // If there is a BOM, then we trust it. + auto bom_encoding = simdutf::BOM::check_bom(input, length); + if (bom_encoding != encoding_type::unspecified) + return bom_encoding; + int out = 0; + if (validate_utf8(input, length)) + out |= encoding_type::UTF8; + if (length % 2 == 0) { + if (validate_utf16(reinterpret_cast(input), length/2)) + out |= encoding_type::UTF16_LE; + } + if (length % 4 == 0) { + if (validate_utf32(reinterpret_cast(input), length/4)) + out |= encoding_type::UTF32_LE; + } + + return out; +} + +template +simdutf_really_inline static void rvv_change_endianness_utf16(const char16_t *src, size_t len, char16_t *dst) { + for (size_t vl; len > 0; len -= vl, src += vl, dst += vl) { + vl = __riscv_vsetvl_e16m8(len); + vuint16m8_t v = __riscv_vle16_v_u16m8((uint16_t*)src, vl); + __riscv_vse16_v_u16m8((uint16_t *)dst, simdutf_byteflip(v, vl), vl); + } +} + +void implementation::change_endianness_utf16(const char16_t *src, size_t len, char16_t *dst) const noexcept { + if (supports_zvbb()) + return rvv_change_endianness_utf16(src, len, dst); + else + return rvv_change_endianness_utf16(src, len, dst); +} + +simdutf_warn_unused size_t implementation::maximal_binary_length_from_base64(const char * input, size_t length) const noexcept { + return scalar::base64::maximal_binary_length_from_base64(input, length); +} + +simdutf_warn_unused result implementation::base64_to_binary(const char * input, size_t length, char* output) const noexcept { + return scalar::base64::base64_to_binary(input, length, output); +} + +simdutf_warn_unused size_t implementation::base64_length_from_binary(size_t length) const noexcept { + return scalar::base64::base64_length_from_binary(length); +} + +size_t implementation::binary_to_base64(const char * input, size_t length, char* output) const noexcept { + return scalar::base64::binary_to_base64(input, length, output); +} +} // namespace rvv +} // namespace simdutf + +/* begin file src/simdutf/rvv/end.h */ +#if SIMDUTF_CAN_ALWAYS_RUN_RVV +// nothing needed. +#else +SIMDUTF_UNTARGET_REGION +#endif + +/* end file src/simdutf/rvv/end.h */ +/* end file src/rvv/implementation.cpp */ +#endif #if SIMDUTF_IMPLEMENTATION_WESTMERE /* begin file src/westmere/implementation.cpp */ /* begin file src/simdutf/westmere/begin.h */ @@ -28999,7 +32631,7 @@ std::pair sse_convert_latin1_to_utf8( // each latin1 takes 1-2 utf8 bytes // slow path writes useful 8-15 bytes twice (eagerly writes 16 bytes and then adjust the pointer) - // so the last write can exceed the utf8_output size by 8-1 bytes + // so the last write can exceed the utf8_output size by 8-1 bytes // by reserving 8 extra input bytes, we expect the output to have 8-16 bytes free while (latin_input + 16 + 8 <= end) { // Load 16 Latin1 characters (16 bytes) into a 128-bit register @@ -29012,7 +32644,7 @@ std::pair sse_convert_latin1_to_utf8( utf8_output += 16; continue; } - + // assuming a/b are bytes and A/B are uint16 of the same value // aaaa_aaaa_bbbb_bbbb -> AAAA_AAAA @@ -29079,7 +32711,7 @@ std::pair sse_convert_latin1_to_utf32(const char* buf, s __m128i in_shifted2 = _mm_srli_si128(in, 8); __m128i in_shifted3 = _mm_srli_si128(in, 12); - // expand 8-bit to 32-bit unit + // expand 8-bit to 32-bit unit __m128i out1 = _mm_cvtepu8_epi32(in); __m128i out2 = _mm_cvtepu8_epi32(in_shifted1); __m128i out3 = _mm_cvtepu8_epi32(in_shifted2); @@ -30260,7 +33892,7 @@ std::pair sse_convert_utf32_to_utf8(const char32_t* buf, const __m128i v_c080 = _mm_set1_epi16((uint16_t)0xc080); //1100 0000 1000 0000 const __m128i v_ff80 = _mm_set1_epi16((uint16_t)0xff80); //1111 1111 1000 0000 const __m128i v_ffff0000 = _mm_set1_epi32((uint32_t)0xffff0000); //1111 1111 1111 1111 0000 0000 0000 0000 - const __m128i v_7fffffff = _mm_set1_epi32((uint32_t)0x7fffffff); //0111 1111 1111 1111 1111 1111 1111 1111 + const __m128i v_7fffffff = _mm_set1_epi32((uint32_t)0x7fffffff); //0111 1111 1111 1111 1111 1111 1111 1111 __m128i running_max = _mm_setzero_si128(); __m128i forbidden_bytemask = _mm_setzero_si128(); const size_t safety_margin = 12; // to avoid overruns, see issue https://github.com/simdutf/simdutf/issues/92 @@ -30275,15 +33907,15 @@ std::pair sse_convert_utf32_to_utf8(const char32_t* buf, // Pack 32-bit UTF-32 code units to 16-bit UTF-16 code units with unsigned saturation __m128i in_16 = _mm_packus_epi32( - _mm_and_si128(in, v_7fffffff), + _mm_and_si128(in, v_7fffffff), _mm_and_si128(nextin, v_7fffffff) - );//in this context pack the two __m128 into a single + );//in this context pack the two __m128 into a single //By ensuring the highest bit is set to 0(&v_7fffffff), we're making sure all values are interpreted as non-negative, or specifically, the values are within the range of valid Unicode code points. - //remember : having leading byte 0 means a positive number by the two complements system. Unicode is well beneath the range where you'll start getting issues so that's OK. + //remember : having leading byte 0 means a positive number by the two complements system. Unicode is well beneath the range where you'll start getting issues so that's OK. // Try to apply UTF-16 => UTF-8 from ./sse_convert_utf16_to_utf8.cpp - // Check for ASCII fast path + // Check for ASCII fast path // ASCII fast path!!!! // We eagerly load another 32 bytes, hoping that they will be ASCII too. @@ -30322,7 +33954,7 @@ std::pair sse_convert_utf32_to_utf8(const char32_t* buf, } // no bits set above 7th bit -- find out all the ASCII characters - const __m128i one_byte_bytemask = _mm_cmpeq_epi16( // this takes four bytes at a time and compares: + const __m128i one_byte_bytemask = _mm_cmpeq_epi16( // this takes four bytes at a time and compares: _mm_and_si128(in_16, v_ff80), // the vector that get only the first 9 bits of each 16-bit/2-byte units v_0000 // ); // they should be all zero if they are ASCII. E.g. ASCII in UTF32 is of format 0000 0000 0000 0XXX XXXX @@ -30346,11 +33978,11 @@ std::pair sse_convert_utf32_to_utf8(const char32_t* buf, // t1 = [000a|aaaa|0000|0000] const __m128i t1 = _mm_and_si128(t0, v_1f00); // potentital first utf8 byte // t2 = [0000|0000|00bb|bbbb] - const __m128i t2 = _mm_and_si128(in_16, v_003f);// potential second utf8 byte + const __m128i t2 = _mm_and_si128(in_16, v_003f);// potential second utf8 byte // t3 = [000a|aaaa|00bb|bbbb] - const __m128i t3 = _mm_or_si128(t1, t2); // first and second potential utf8 byte together + const __m128i t3 = _mm_or_si128(t1, t2); // first and second potential utf8 byte together // t4 = [110a|aaaa|10bb|bbbb] - const __m128i t4 = _mm_or_si128(t3, v_c080); // t3 | 1100 0000 1000 0000 = full potential 2-byte utf8 unit + const __m128i t4 = _mm_or_si128(t3, v_c080); // t3 | 1100 0000 1000 0000 = full potential 2-byte utf8 unit // 2. merge ASCII and 2-byte codewords const __m128i utf8_unpacked = _mm_blendv_epi8(t4, in_16, one_byte_bytemask); @@ -30897,6 +34529,511 @@ std::pair sse_convert_utf32_to_utf16_with_errors(const char32 return std::make_pair(result(error_code::SUCCESS, buf - start), utf16_output); } /* end file src/westmere/sse_convert_utf32_to_utf16.cpp */ +/* begin file src/westmere/sse_base64.cpp */ +/** + * References and further reading: + * + * Wojciech Muła, Daniel Lemire, Base64 encoding and decoding at almost the + * speed of a memory copy, Software: Practice and Experience 50 (2), 2020. + * https://arxiv.org/abs/1910.05109 + * + * Wojciech Muła, Daniel Lemire, Faster Base64 Encoding and Decoding using AVX2 + * Instructions, ACM Transactions on the Web 12 (3), 2018. + * https://arxiv.org/abs/1704.00605 + * + * Simon Josefsson. 2006. The Base16, Base32, and Base64 Data Encodings. + * https://tools.ietf.org/html/rfc4648. (2006). Internet Engineering Task Force, + * Request for Comments: 4648. + * + * Alfred Klomp. 2014a. Fast Base64 encoding/decoding with SSE vectorization. + * http://www.alfredklomp.com/programming/sse-base64/. (2014). + * + * Alfred Klomp. 2014b. Fast Base64 stream encoder/decoder in C99, with SIMD + * acceleration. https://github.com/aklomp/base64. (2014). + * + * Hanson Char. 2014. A Fast and Correct Base 64 Codec. (2014). + * https://aws.amazon.com/blogs/developer/a-fast-and-correct-base-64-codec/ + * + * Nick Kopp. 2013. Base64 Encoding on a GPU. + * https://www.codeproject.com/Articles/276993/Base-Encoding-on-a-GPU. (2013). + */ + +__m128i lookup_pshufb_improved(const __m128i input) { + // credit: Wojciech Muła + // reduce 0..51 -> 0 + // 52..61 -> 1 .. 10 + // 62 -> 11 + // 63 -> 12 + __m128i result = _mm_subs_epu8(input, _mm_set1_epi8(51)); + + // distinguish between ranges 0..25 and 26..51: + // 0 .. 25 -> remains 0 + // 26 .. 51 -> becomes 13 + const __m128i less = _mm_cmpgt_epi8(_mm_set1_epi8(26), input); + result = _mm_or_si128(result, _mm_and_si128(less, _mm_set1_epi8(13))); + + const __m128i shift_LUT = _mm_setr_epi8( + 'a' - 26, '0' - 52, '0' - 52, '0' - 52, '0' - 52, '0' - 52, '0' - 52, + '0' - 52, '0' - 52, '0' - 52, '0' - 52, '+' - 62, '/' - 63, 'A', 0, 0); + + // read shift + result = _mm_shuffle_epi8(shift_LUT, result); + + return _mm_add_epi8(result, input); +} + +size_t encode_base64(char *dst, const char *src, size_t srclen) { + // credit: Wojciech Muła + // SSE (lookup: pshufb improved unrolled) + const uint8_t *input = (const uint8_t *)src; + + uint8_t *out = (uint8_t *)dst; + const __m128i shuf = + _mm_set_epi8(10, 11, 9, 10, 7, 8, 6, 7, 4, 5, 3, 4, 1, 2, 0, 1); + + size_t i = 0; + for (; i + 52 <= srclen; i += 48) { + __m128i in0 = _mm_loadu_si128( + reinterpret_cast(input + i + 4 * 3 * 0)); + __m128i in1 = _mm_loadu_si128( + reinterpret_cast(input + i + 4 * 3 * 1)); + __m128i in2 = _mm_loadu_si128( + reinterpret_cast(input + i + 4 * 3 * 2)); + __m128i in3 = _mm_loadu_si128( + reinterpret_cast(input + i + 4 * 3 * 3)); + + in0 = _mm_shuffle_epi8(in0, shuf); + in1 = _mm_shuffle_epi8(in1, shuf); + in2 = _mm_shuffle_epi8(in2, shuf); + in3 = _mm_shuffle_epi8(in3, shuf); + + const __m128i t0_0 = _mm_and_si128(in0, _mm_set1_epi32(0x0fc0fc00)); + const __m128i t0_1 = _mm_and_si128(in1, _mm_set1_epi32(0x0fc0fc00)); + const __m128i t0_2 = _mm_and_si128(in2, _mm_set1_epi32(0x0fc0fc00)); + const __m128i t0_3 = _mm_and_si128(in3, _mm_set1_epi32(0x0fc0fc00)); + + const __m128i t1_0 = _mm_mulhi_epu16(t0_0, _mm_set1_epi32(0x04000040)); + const __m128i t1_1 = _mm_mulhi_epu16(t0_1, _mm_set1_epi32(0x04000040)); + const __m128i t1_2 = _mm_mulhi_epu16(t0_2, _mm_set1_epi32(0x04000040)); + const __m128i t1_3 = _mm_mulhi_epu16(t0_3, _mm_set1_epi32(0x04000040)); + + const __m128i t2_0 = _mm_and_si128(in0, _mm_set1_epi32(0x003f03f0)); + const __m128i t2_1 = _mm_and_si128(in1, _mm_set1_epi32(0x003f03f0)); + const __m128i t2_2 = _mm_and_si128(in2, _mm_set1_epi32(0x003f03f0)); + const __m128i t2_3 = _mm_and_si128(in3, _mm_set1_epi32(0x003f03f0)); + + const __m128i t3_0 = _mm_mullo_epi16(t2_0, _mm_set1_epi32(0x01000010)); + const __m128i t3_1 = _mm_mullo_epi16(t2_1, _mm_set1_epi32(0x01000010)); + const __m128i t3_2 = _mm_mullo_epi16(t2_2, _mm_set1_epi32(0x01000010)); + const __m128i t3_3 = _mm_mullo_epi16(t2_3, _mm_set1_epi32(0x01000010)); + + const __m128i input0 = _mm_or_si128(t1_0, t3_0); + const __m128i input1 = _mm_or_si128(t1_1, t3_1); + const __m128i input2 = _mm_or_si128(t1_2, t3_2); + const __m128i input3 = _mm_or_si128(t1_3, t3_3); + + _mm_storeu_si128(reinterpret_cast<__m128i *>(out), + lookup_pshufb_improved(input0)); + out += 16; + + _mm_storeu_si128(reinterpret_cast<__m128i *>(out), + lookup_pshufb_improved(input1)); + out += 16; + + _mm_storeu_si128(reinterpret_cast<__m128i *>(out), + lookup_pshufb_improved(input2)); + out += 16; + + _mm_storeu_si128(reinterpret_cast<__m128i *>(out), + lookup_pshufb_improved(input3)); + out += 16; + } + for (; i + 16 <= srclen; i += 12) { + + __m128i in = _mm_loadu_si128(reinterpret_cast(input + i)); + + // bytes from groups A, B and C are needed in separate 32-bit lanes + // in = [DDDD|CCCC|BBBB|AAAA] + // + // an input triplet has layout + // [????????|ccdddddd|bbbbcccc|aaaaaabb] + // byte 3 byte 2 byte 1 byte 0 -- byte 3 comes from the next + // triplet + // + // shuffling changes the order of bytes: 1, 0, 2, 1 + // [bbbbcccc|ccdddddd|aaaaaabb|bbbbcccc] + // ^^^^ ^^^^^^^^ ^^^^^^^^ ^^^^ + // processed bits + in = _mm_shuffle_epi8(in, shuf); + + // unpacking + + // t0 = [0000cccc|cc000000|aaaaaa00|00000000] + const __m128i t0 = _mm_and_si128(in, _mm_set1_epi32(0x0fc0fc00)); + // t1 = [00000000|00cccccc|00000000|00aaaaaa] + // (c * (1 << 10), a * (1 << 6)) >> 16 (note: an unsigned + // multiplication) + const __m128i t1 = _mm_mulhi_epu16(t0, _mm_set1_epi32(0x04000040)); + + // t2 = [00000000|00dddddd|000000bb|bbbb0000] + const __m128i t2 = _mm_and_si128(in, _mm_set1_epi32(0x003f03f0)); + // t3 = [00dddddd|00000000|00bbbbbb|00000000]( + // (d * (1 << 8), b * (1 << 4)) + const __m128i t3 = _mm_mullo_epi16(t2, _mm_set1_epi32(0x01000010)); + + // res = [00dddddd|00cccccc|00bbbbbb|00aaaaaa] = t1 | t3 + const __m128i indices = _mm_or_si128(t1, t3); + + _mm_storeu_si128(reinterpret_cast<__m128i *>(out), + lookup_pshufb_improved(indices)); + out += 16; + } + + return i / 3 * 4 + + scalar::base64::tail_encode_base64((char *)out, src + i, srclen - i); +} +static inline void compress(__m128i data, uint16_t mask, char *output) { + if (mask == 0) { + _mm_storeu_si128(reinterpret_cast<__m128i *>(output), data); + return; + } + + // this particular implementation was inspired by work done by @animetosho + // we do it in two steps, first 8 bytes and then second 8 bytes + uint8_t mask1 = uint8_t(mask); // least significant 8 bits + uint8_t mask2 = uint8_t(mask >> 8); // most significant 8 bits + // next line just loads the 64-bit values thintable_epi8[mask1] and + // thintable_epi8[mask2] into a 128-bit register, using only + // two instructions on most compilers. + + __m128i shufmask = _mm_set_epi64x(tables::base64::thintable_epi8[mask2], + tables::base64::thintable_epi8[mask1]); + // we increment by 0x08 the second half of the mask + shufmask = + _mm_add_epi8(shufmask, _mm_set_epi32(0x08080808, 0x08080808, 0, 0)); + // this is the version "nearly pruned" + __m128i pruned = _mm_shuffle_epi8(data, shufmask); + // we still need to put the two halves together. + // we compute the popcount of the first half: + int pop1 = tables::base64::BitsSetTable256mul2[mask1]; + // then load the corresponding mask, what it does is to write + // only the first pop1 bytes from the first 8 bytes, and then + // it fills in with the bytes from the second 8 bytes + some filling + // at the end. + __m128i compactmask = _mm_loadu_si128(reinterpret_cast( + tables::base64::pshufb_combine_table + pop1 * 8)); + __m128i answer = _mm_shuffle_epi8(pruned, compactmask); + _mm_storeu_si128(reinterpret_cast<__m128i *>(output), answer); +} + +struct block64 { + __m128i chunks[4]; +}; + +static inline uint16_t to_base64_mask(__m128i *src, bool *error) { + const __m128i ascii_space_tbl = + _mm_setr_epi8(0x20, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x9, 0xa, 0x0, + 0x0, 0xd, 0x0, 0x0); + // credit: aqrit + const __m128i delta_asso = + _mm_setr_epi8(0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x0F, 0x00, 0x0F); + const __m128i delta_values = + _mm_setr_epi8(int8_t(0x00), int8_t(0x00), int8_t(0x00), int8_t(0x13), + int8_t(0x04), int8_t(0xBF), int8_t(0xBF), int8_t(0xB9), + int8_t(0xB9), int8_t(0x00), int8_t(0x10), int8_t(0xC3), + int8_t(0xBF), int8_t(0xBF), int8_t(0xB9), int8_t(0xB9)); + const __m128i check_asso = + _mm_setr_epi8(0x0D, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x03, 0x07, 0x0B, 0x0B, 0x0B, 0x0F); + const __m128i check_values = + _mm_setr_epi8(int8_t(0x80), int8_t(0x80), int8_t(0x80), int8_t(0x80), + int8_t(0xCF), int8_t(0xBF), int8_t(0xD5), int8_t(0xA6), + int8_t(0xB5), int8_t(0x86), int8_t(0xD1), int8_t(0x80), + int8_t(0xB1), int8_t(0x80), int8_t(0x91), int8_t(0x80)); + const __m128i shifted = _mm_srli_epi32(*src, 3); + + const __m128i delta_hash = + _mm_avg_epu8(_mm_shuffle_epi8(delta_asso, *src), shifted); + const __m128i check_hash = + _mm_avg_epu8(_mm_shuffle_epi8(check_asso, *src), shifted); + + const __m128i out = + _mm_adds_epi8(_mm_shuffle_epi8(delta_values, delta_hash), *src); + const __m128i chk = + _mm_adds_epi8(_mm_shuffle_epi8(check_values, check_hash), *src); + const int mask = _mm_movemask_epi8(chk); + if (mask) { + __m128i ascii_space = + _mm_cmpeq_epi8(_mm_shuffle_epi8(ascii_space_tbl, *src), *src); + *error |= (mask != _mm_movemask_epi8(ascii_space)); + } + *src = out; + return (uint16_t)mask; +} +static inline uint64_t to_base64_mask(block64 *b, bool *error) { + *error = 0; + uint64_t m0 = to_base64_mask(&b->chunks[0], error); + uint64_t m1 = to_base64_mask(&b->chunks[1], error); + uint64_t m2 = to_base64_mask(&b->chunks[2], error); + uint64_t m3 = to_base64_mask(&b->chunks[3], error); + return m0 | (m1 << 16) | (m2 << 32) | (m3 << 48); +} + +static inline void copy_block(block64 *b, char *output) { + _mm_storeu_si128(reinterpret_cast<__m128i *>(output), b->chunks[0]); + _mm_storeu_si128(reinterpret_cast<__m128i *>(output + 16), b->chunks[1]); + _mm_storeu_si128(reinterpret_cast<__m128i *>(output + 32), b->chunks[2]); + _mm_storeu_si128(reinterpret_cast<__m128i *>(output + 48), b->chunks[3]); +} + +static inline uint64_t compress_block(block64 *b, uint64_t mask, char *output) { + uint64_t nmask = ~mask; + compress(b->chunks[0], uint16_t(mask), output); + compress(b->chunks[1], uint16_t(mask >> 16), + output + _mm_popcnt_u64(nmask & 0xFFFF)); + compress(b->chunks[2], uint16_t(mask >> 32), + output + _mm_popcnt_u64(nmask & 0xFFFFFFFF)); + compress(b->chunks[3], uint16_t(mask >> 48), + output + _mm_popcnt_u64(nmask & 0xFFFFFFFFFFFFULL)); + return _mm_popcnt_u64(nmask); +} + +static inline void load_block(block64 *b, const char *src) { + b->chunks[0] = _mm_loadu_si128(reinterpret_cast(src)); + b->chunks[1] = _mm_loadu_si128(reinterpret_cast(src + 16)); + b->chunks[2] = _mm_loadu_si128(reinterpret_cast(src + 32)); + b->chunks[3] = _mm_loadu_si128(reinterpret_cast(src + 48)); +} + +static inline void base64_decode(char *out, __m128i str) { + // credit: aqrit + + const __m128i pack_shuffle = + _mm_setr_epi8(2, 1, 0, 6, 5, 4, 10, 9, 8, 14, 13, 12, -1, -1, -1, -1); + + const __m128i t0 = _mm_maddubs_epi16(str, _mm_set1_epi32(0x01400140)); + const __m128i t1 = _mm_madd_epi16(t0, _mm_set1_epi32(0x00011000)); + const __m128i t2 = _mm_shuffle_epi8(t1, pack_shuffle); + // Store the output: + // this writes 16 bytes, but we only need 12. + _mm_storeu_si128((__m128i *)out, t2); +} +// decode 64 bytes and output 48 bytes +static inline void base64_decode_block(char *out, const char *src) { + base64_decode(out, _mm_loadu_si128(reinterpret_cast(src))); + base64_decode(out + 12, + _mm_loadu_si128(reinterpret_cast(src + 16))); + base64_decode(out + 24, + _mm_loadu_si128(reinterpret_cast(src + 32))); + base64_decode(out + 36, + _mm_loadu_si128(reinterpret_cast(src + 48))); +} +static inline void base64_decode_block_safe(char *out, const char *src) { + base64_decode(out, _mm_loadu_si128(reinterpret_cast(src))); + base64_decode(out + 12, + _mm_loadu_si128(reinterpret_cast(src + 16))); + base64_decode(out + 24, + _mm_loadu_si128(reinterpret_cast(src + 32))); + char buffer[16]; + base64_decode(buffer, + _mm_loadu_si128(reinterpret_cast(src + 48))); + std::memcpy(out + 36, buffer, 12); +} +static inline void base64_decode_block(char *out, block64 *b) { + base64_decode(out, b->chunks[0]); + base64_decode(out + 12, b->chunks[1]); + base64_decode(out + 24, b->chunks[2]); + base64_decode(out + 36, b->chunks[3]); +} +static inline void base64_decode_block_safe(char *out, block64 *b) { + base64_decode(out, b->chunks[0]); + base64_decode(out + 12, b->chunks[1]); + base64_decode(out + 24, b->chunks[2]); + char buffer[16]; + base64_decode(buffer, b->chunks[3]); + std::memcpy(out + 36, buffer, 12); +} + +result compress_decode_base64(char *dst, const char *src, size_t srclen) { + size_t equalsigns = 0; + if (srclen > 0 && src[srclen - 1] == '=') { + srclen--; + equalsigns = 1; + if (srclen > 0 && src[srclen - 1] == '=') { + srclen--; + equalsigns = 2; + } + } + char *end_of_safe_64byte_zone = + (srclen + 3) / 4 * 3 >= 63 ? dst + (srclen + 3) / 4 * 3 - 63 : dst; + + const char *const srcinit = src; + const char *const dstinit = dst; + const char *const srcend = src + srclen; + + constexpr size_t block_size = 6; + static_assert(block_size >= 2, "block should of size 2 or more"); + char buffer[block_size * 64]; + char *bufferptr = buffer; + if (srclen >= 64) { + const char *const srcend64 = src + srclen - 64; + while (src <= srcend64) { + block64 b; + load_block(&b, src); + src += 64; + bool error = false; + uint64_t badcharmask = to_base64_mask(&b, &error); + if (error) { + src -= 64; + while (src < srcend && + tables::base64::to_base64_value[uint8_t(*src)] <= 64) { + src++; + } + return {error_code::INVALID_BASE64_CHARACTER, size_t(src - srcinit)}; + } + if (badcharmask != 0) { + // optimization opportunity: check for simple masks like those made of + // continuous 1s followed by continuous 0s. And masks containing a + // single bad character. + bufferptr += compress_block(&b, badcharmask, bufferptr); + } else if (bufferptr != buffer) { + copy_block(&b, bufferptr); + bufferptr += 64; + } else { + if (dst >= end_of_safe_64byte_zone) { + base64_decode_block_safe(dst, &b); + } else { + base64_decode_block(dst, &b); + } + dst += 48; + } + if (bufferptr >= (block_size - 1) * 64 + buffer) { + for (size_t i = 0; i < (block_size - 2); i++) { + base64_decode_block(dst, buffer + i * 64); + dst += 48; + } + if (dst >= end_of_safe_64byte_zone) { + base64_decode_block_safe(dst, buffer + (block_size - 2) * 64); + } else { + base64_decode_block(dst, buffer + (block_size - 2) * 64); + } + dst += 48; + std::memcpy(buffer, buffer + (block_size - 1) * 64, + 64); // 64 might be too much + bufferptr -= (block_size - 1) * 64; + } + } + } + + char *buffer_start = buffer; + // Optimization note: if this is almost full, then it is worth our + // time, otherwise, we should just decode directly. + int last_block = (int)((bufferptr - buffer_start) % 64); + if (last_block != 0 && srcend - src + last_block >= 64) { + while ((bufferptr - buffer_start) % 64 != 0 && src < srcend) { + uint8_t val = tables::base64::to_base64_value[uint8_t(*src)]; + *bufferptr = char(val); + if (val > 64) { + return {error_code::INVALID_BASE64_CHARACTER, size_t(src - srcinit)}; + } + bufferptr += (val <= 63); + src++; + } + } + + for (; buffer_start + 64 <= bufferptr; buffer_start += 64) { + if (dst >= end_of_safe_64byte_zone) { + base64_decode_block_safe(dst, buffer_start); + } else { + base64_decode_block(dst, buffer_start); + } + dst += 48; + } + if ((bufferptr - buffer_start) % 64 != 0) { + while (buffer_start + 4 < bufferptr) { + uint32_t triple = ((uint32_t(uint8_t(buffer_start[0])) << 3 * 6) + + (uint32_t(uint8_t(buffer_start[1])) << 2 * 6) + + (uint32_t(uint8_t(buffer_start[2])) << 1 * 6) + + (uint32_t(uint8_t(buffer_start[3])) << 0 * 6)) + << 8; + triple = scalar::utf32::swap_bytes(triple); + std::memcpy(dst, &triple, 4); + + dst += 3; + buffer_start += 4; + } + if (buffer_start + 4 <= bufferptr) { + uint32_t triple = ((uint32_t(uint8_t(buffer_start[0])) << 3 * 6) + + (uint32_t(uint8_t(buffer_start[1])) << 2 * 6) + + (uint32_t(uint8_t(buffer_start[2])) << 1 * 6) + + (uint32_t(uint8_t(buffer_start[3])) << 0 * 6)) + << 8; + triple = scalar::utf32::swap_bytes(triple); + std::memcpy(dst, &triple, 3); + + dst += 3; + buffer_start += 4; + } + // we may have 1, 2 or 3 bytes left and we need to decode them so let us + // bring in src content + int leftover = int(bufferptr - buffer_start); + if (leftover > 0) { + while (leftover < 4 && src < srcend) { + uint8_t val = tables::base64::to_base64_value[uint8_t(*src)]; + if (val > 64) { + return {error_code::INVALID_BASE64_CHARACTER, size_t(src - srcinit)}; + } + buffer_start[leftover] = char(val); + leftover += (val <= 63); + src++; + } + + if (leftover == 1) { + return {BASE64_INPUT_REMAINDER, size_t(dst - dstinit)}; + } + if (leftover == 2) { + uint32_t triple = (uint32_t(buffer_start[0]) << 3 * 6) + + (uint32_t(buffer_start[1]) << 2 * 6); + triple = scalar::utf32::swap_bytes(triple); + triple >>= 8; + std::memcpy(dst, &triple, 1); + dst += 1; + } else if (leftover == 3) { + uint32_t triple = (uint32_t(buffer_start[0]) << 3 * 6) + + (uint32_t(buffer_start[1]) << 2 * 6) + + (uint32_t(buffer_start[2]) << 1 * 6); + triple = scalar::utf32::swap_bytes(triple); + + triple >>= 8; + + std::memcpy(dst, &triple, 2); + dst += 2; + } else { + uint32_t triple = ((uint32_t(uint8_t(buffer_start[0])) << 3 * 6) + + (uint32_t(uint8_t(buffer_start[1])) << 2 * 6) + + (uint32_t(uint8_t(buffer_start[2])) << 1 * 6) + + (uint32_t(uint8_t(buffer_start[3])) << 0 * 6)) + << 8; + triple = scalar::utf32::swap_bytes(triple); + std::memcpy(dst, &triple, 3); + dst += 3; + } + } + } + if (src < srcend + equalsigns) { + result r = scalar::base64::base64_tail_decode(dst, src, srcend - src); + if (r.error == error_code::INVALID_BASE64_CHARACTER) { + r.count += size_t(src - srcinit); + return r; + } else { + r.count += size_t(dst - dstinit); + } + return r; + } + return {SUCCESS, size_t(dst - dstinit)}; +} +/* end file src/westmere/sse_base64.cpp */ } // unnamed namespace } // namespace westmere @@ -33142,7 +37279,7 @@ simdutf_warn_unused size_t implementation::utf8_length_from_latin1(const char * __m128i input4 = _mm_loadu_si128((const __m128i *)(str + i + 3*sizeof(__m128i))); __m128i input12 = _mm_add_epi8( _mm_cmpgt_epi8( - _mm_setzero_si128(), + _mm_setzero_si128(), input1), _mm_cmpgt_epi8( _mm_setzero_si128(), @@ -33227,6 +37364,21 @@ simdutf_warn_unused size_t implementation::utf32_length_from_utf8(const char * i return utf8::count_code_points(input, length); } +simdutf_warn_unused size_t implementation::maximal_binary_length_from_base64(const char * input, size_t length) const noexcept { + return scalar::base64::maximal_binary_length_from_base64(input, length); +} + +simdutf_warn_unused result implementation::base64_to_binary(const char * input, size_t length, char* output) const noexcept { + return compress_decode_base64(output, input, length); +} + +simdutf_warn_unused size_t implementation::base64_length_from_binary(size_t length) const noexcept { + return scalar::base64::base64_length_from_binary(length); +} + +size_t implementation::binary_to_base64(const char * input, size_t length, char* output) const noexcept { + return encode_base64(output, input, length); +} } // namespace westmere } // namespace simdutf diff --git a/deps/simdutf/simdutf.h b/deps/simdutf/simdutf.h index b0466f52d9d742..539b1ebfc28eb1 100644 --- a/deps/simdutf/simdutf.h +++ b/deps/simdutf/simdutf.h @@ -1,4 +1,4 @@ -/* auto-generated on 2024-01-29 10:40:15 -0500. Do not edit! */ +/* auto-generated on 2024-03-18 10:58:28 -0400. Do not edit! */ /* begin file include/simdutf.h */ #ifndef SIMDUTF_H #define SIMDUTF_H @@ -142,6 +142,30 @@ // s390 IBM system. Big endian. #elif (defined(__riscv) || defined(__riscv__)) && __riscv_xlen == 64 // RISC-V 64-bit +#define SIMDUTF_IS_RISCV64 1 + +#if __clang_major__ >= 19 +// Does the compiler support target regions for RISC-V +#define SIMDUTF_HAS_RVV_TARGET_REGION 1 +#endif + +#if __riscv_v_intrinsic >= 11000 && !(__GNUC__ == 13 && __GNUC_MINOR__ == 2 && __GNUC_PATCHLEVEL__ == 0) +#define SIMDUTF_HAS_RVV_INTRINSICS 1 +#endif + +#define SIMDUTF_HAS_ZVBB_INTRINSICS 0 // there is currently no way to detect this + +#if SIMDUTF_HAS_RVV_INTRINSICS && __riscv_vector && __riscv_v_min_vlen >= 128 && __riscv_v_elen >= 64 +// RISC-V V extension +#define SIMDUTF_IS_RVV 1 +#if SIMDUTF_HAS_ZVBB_INTRINSICS && __riscv_zvbb >= 1000000 +// RISC-V Vector Basic Bit-manipulation +#define SIMDUTF_IS_ZVBB 1 +#endif +#endif + +#elif defined(__loongarch_lp64) +// LoongArch 64-bit #else // The simdutf library is designed // for 64-bit processors and it seems that you are not @@ -540,6 +564,8 @@ enum error_code { SURROGATE, // The decoded character must be not be in U+D800...DFFF (UTF-8 or UTF-32) OR // a high surrogate must be followed by a low surrogate and a low surrogate must be preceded by a high surrogate (UTF-16) OR // there must be no surrogate at all (Latin1) + INVALID_BASE64_CHARACTER, // Found a character that cannot be part of a valid base64 string. + BASE64_INPUT_REMAINDER, // The base64 input terminates with a single character, excluding padding (=). OTHER // Not related to validation/transcoding. }; @@ -567,14 +593,14 @@ SIMDUTF_DISABLE_UNDESIRED_WARNINGS #define SIMDUTF_SIMDUTF_VERSION_H /** The version of simdutf being used (major.minor.revision) */ -#define SIMDUTF_VERSION "4.0.9" +#define SIMDUTF_VERSION "5.0.0" namespace simdutf { enum { /** * The major version (MAJOR.minor.revision) of simdutf being used. */ - SIMDUTF_VERSION_MAJOR = 4, + SIMDUTF_VERSION_MAJOR = 5, /** * The minor version (major.MINOR.revision) of simdutf being used. */ @@ -582,7 +608,7 @@ enum { /** * The revision (major.minor.REVISION) of simdutf being used. */ - SIMDUTF_VERSION_REVISION = 9 + SIMDUTF_VERSION_REVISION = 0 }; } // namespace simdutf @@ -654,6 +680,7 @@ POSSIBILITY OF SUCH DAMAGE. #include #endif + namespace simdutf { namespace internal { @@ -675,7 +702,9 @@ enum instruction_set { AVX512BW = 0x4000, AVX512VL = 0x8000, AVX512VBMI2 = 0x10000, - AVX512VPOPCNTDQ = 0x2000 + AVX512VPOPCNTDQ = 0x2000, + RVV = 0x4000, + ZVBB = 0x8000, }; #if defined(__PPC64__) @@ -684,6 +713,40 @@ static inline uint32_t detect_supported_architectures() { return instruction_set::ALTIVEC; } +#elif SIMDUTF_IS_RISCV64 + +#if defined(__linux__) +#include +// We define these our selfs, for backwards compatibility +struct simdutf_riscv_hwprobe { int64_t key; uint64_t value; }; +#define simdutf_riscv_hwprobe(...) syscall(258, __VA_ARGS__) +#define SIMDUTF_RISCV_HWPROBE_KEY_IMA_EXT_0 4 +#define SIMDUTF_RISCV_HWPROBE_IMA_V (1 << 2) +#define SIMDUTF_RISCV_HWPROBE_EXT_ZVBB (1 << 17) +#endif + +static inline uint32_t detect_supported_architectures() { + uint32_t host_isa = instruction_set::DEFAULT; +#if SIMDUTF_IS_RVV + host_isa |= instruction_set::RVV; +#endif +#if SIMDUTF_IS_ZVBB + host_isa |= instruction_set::ZVBB; +#endif +#if defined(__linux__) + simdutf_riscv_hwprobe probes[] = { { SIMDUTF_RISCV_HWPROBE_KEY_IMA_EXT_0, 0 } }; + long ret = simdutf_riscv_hwprobe(&probes, sizeof probes/sizeof *probes, 0, nullptr, 0); + if (ret == 0) { + uint64_t extensions = probes[0].value; + if (extensions & SIMDUTF_RISCV_HWPROBE_IMA_V) + host_isa |= instruction_set::RVV; + if (extensions & SIMDUTF_RISCV_HWPROBE_EXT_ZVBB) + host_isa |= instruction_set::ZVBB; + } +#endif + return host_isa; +} + #elif defined(__aarch64__) || defined(_M_ARM64) static inline uint32_t detect_supported_architectures() { @@ -2222,6 +2285,63 @@ simdutf_warn_unused size_t trim_partial_utf16le(const char16_t* input, size_t le */ simdutf_warn_unused size_t trim_partial_utf16(const char16_t* input, size_t length); + +/** + * Provide the maximal binary length in bytes given the base64 input. + * In general, if the input contains ASCII spaces, the result will be less than + * the maximum length. + * + * @param input the base64 input to process + * @param length the length of the base64 input in bytes + * @return number of base64 bytes + */ +simdutf_warn_unused size_t maximal_binary_length_from_base64(const char * input, size_t length) noexcept; + +/** + * Convert a base64 input to a binary ouput. + * + * This function follows the WHATWG forgiving-base64 format, which means that it will + * ignore any ASCII spaces in the input. You may provide a padded input (with one or two + * equal signs at the end) or an unpadded input (without any equal signs at the end). + * + * See https://infra.spec.whatwg.org/#forgiving-base64-decode + * + * This function will fail in case of invalid input. There are two possible reasons for + * failure: the input is contains a number of base64 characters that when divided by 4, leaves + * a singler remainder character (BASE64_INPUT_REMAINDER), or the input contains a character + * that is not a valid base64 character (INVALID_BASE64_CHARACTER). + * + * You should call this function with a buffer that is at least maximal_binary_length_from_base64(input, length) bytes long. + * If you fail to provide that much space, the function may cause a buffer overflow. + * + * @param input the base64 string to process + * @param length the length of the string in bytes + * @param output the pointer to buffer that can hold the conversion result (should be at least maximal_binary_length_from_base64(input, length) bytes long). + * @return a result pair struct (of type simdutf::error containing the two fields error and count) with an error code and either position of the error (in the input in bytes) if any, or the number of bytes written if successful. + */ +simdutf_warn_unused result base64_to_binary(const char * input, size_t length, char* output) noexcept; + +/** + * Provide the base64 length in bytes given the length of a binary input. + * + * @param length the length of the input in bytes + * @return number of base64 bytes + */ +simdutf_warn_unused size_t base64_length_from_binary(size_t length) noexcept; + +/** + * Convert a binary input to a base64 ouput. The output is always padded with equal signs so that it is + * a multiple of 4 bytes long. + * + * This function always succeeds. + * + * @param input the binary to process + * @param length the length of the input in bytes + * @param output the pointer to buffer that can hold the conversion result (should be at least base64_length_from_binary(length) bytes long) + * @return number of written bytes, will be equal to base64_length_from_binary(length) + */ +size_t binary_to_base64(const char * input, size_t length, char* output) noexcept; + /** * An implementation of simdutf for a particular CPU architecture. * @@ -3282,6 +3402,61 @@ class implementation { */ simdutf_warn_unused virtual size_t count_utf8(const char * input, size_t length) const noexcept = 0; + /** + * Provide the maximal binary length in bytes given the base64 input. + * In general, if the input contains ASCII spaces, the result will be less than + * the maximum length. + * + * @param input the base64 input to process + * @param length the length of the base64 input in bytes + * @return number of base64 bytes + */ + simdutf_warn_unused virtual size_t maximal_binary_length_from_base64(const char * input, size_t length) const noexcept = 0; + + /** + * Convert a base64 input to a binary ouput. + * + * This function follows the WHATWG forgiving-base64 format, which means that it will + * ignore any ASCII spaces in the input. You may provide a padded input (with one or two + * equal signs at the end) or an unpadded input (without any equal signs at the end). + * + * See https://infra.spec.whatwg.org/#forgiving-base64-decode + * + * This function will fail in case of invalid input. There are two possible reasons for + * failure: the input is contains a number of base64 characters that when divided by 4, leaves + * a singler remainder character (BASE64_INPUT_REMAINDER), or the input contains a character + * that is not a valid base64 character (INVALID_BASE64_CHARACTER). + * + * You should call this function with a buffer that is at least maximal_binary_length_from_base64(input, length) bytes long. + * If you fail to provide that much space, the function may cause a buffer overflow. + * + * @param input the base64 string to process + * @param length the length of the string in bytes + * @param output the pointer to buffer that can hold the conversion result (should be at least maximal_binary_length_from_base64(input, length) bytes long). + * @return a result pair struct (of type simdutf::error containing the two fields error and count) with an error code and either position of the error (in the input in bytes) if any, or the number of bytes written if successful. + */ + simdutf_warn_unused virtual result base64_to_binary(const char * input, size_t length, char* output) const noexcept = 0; + + /** + * Provide the base64 length in bytes given the length of a binary input. + * + * @param length the length of the input in bytes + * @return number of base64 bytes + */ + simdutf_warn_unused virtual size_t base64_length_from_binary(size_t length) const noexcept = 0; + + /** + * Convert a binary input to a base64 ouput. The output is always padded with equal signs so that it is + * a multiple of 4 bytes long. + * + * This function always succeeds. + * + * @param input the binary to process + * @param length the length of the input in bytes + * @param output the pointer to buffer that can hold the conversion result (should be at least base64_length_from_binary(length) bytes long) + * @return number of written bytes, will be equal to base64_length_from_binary(length) + */ + virtual size_t binary_to_base64(const char * input, size_t length, char* output) const noexcept = 0; protected: From 5114cbe18ac539f4ff167b6355aaea1bc9b9d581 Mon Sep 17 00:00:00 2001 From: Yagiz Nizipli Date: Thu, 4 Apr 2024 17:53:41 -0400 Subject: [PATCH 13/41] deps: update simdutf to 5.2.3 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit PR-URL: https://github.com/nodejs/node/pull/52381 Refs: https://github.com/nodejs/node/pull/51670 Reviewed-By: Daniel Lemire Reviewed-By: Vinícius Lourenço Claro Cardoso Reviewed-By: Matteo Collina Reviewed-By: Robert Nagy Reviewed-By: Benjamin Gruenbaum Reviewed-By: Filip Skokan --- deps/simdutf/simdutf.cpp | 3000 ++++++++++++++++++++++++++------------ deps/simdutf/simdutf.h | 163 ++- 2 files changed, 2204 insertions(+), 959 deletions(-) diff --git a/deps/simdutf/simdutf.cpp b/deps/simdutf/simdutf.cpp index 8452ff3896c4da..c4d4ed3f7ae481 100644 --- a/deps/simdutf/simdutf.cpp +++ b/deps/simdutf/simdutf.cpp @@ -1,9 +1,685 @@ -/* auto-generated on 2024-03-18 10:58:28 -0400. Do not edit! */ +/* auto-generated on 2024-04-05 16:29:02 -0400. Do not edit! */ /* begin file src/simdutf.cpp */ #include "simdutf.h" +// We include base64_tables once. +/* begin file src/tables/base64_tables.h */ +#ifndef SIMDUTF_BASE64_TABLES_H +#define SIMDUTF_BASE64_TABLES_H +#include +#include + +namespace simdutf { +namespace { +namespace tables { +namespace base64 { +namespace base64_default { + +const char e0[256] = { + 'A', 'A', 'A', 'A', 'B', 'B', 'B', 'B', 'C', 'C', 'C', 'C', 'D', 'D', 'D', + 'D', 'E', 'E', 'E', 'E', 'F', 'F', 'F', 'F', 'G', 'G', 'G', 'G', 'H', 'H', + 'H', 'H', 'I', 'I', 'I', 'I', 'J', 'J', 'J', 'J', 'K', 'K', 'K', 'K', 'L', + 'L', 'L', 'L', 'M', 'M', 'M', 'M', 'N', 'N', 'N', 'N', 'O', 'O', 'O', 'O', + 'P', 'P', 'P', 'P', 'Q', 'Q', 'Q', 'Q', 'R', 'R', 'R', 'R', 'S', 'S', 'S', + 'S', 'T', 'T', 'T', 'T', 'U', 'U', 'U', 'U', 'V', 'V', 'V', 'V', 'W', 'W', + 'W', 'W', 'X', 'X', 'X', 'X', 'Y', 'Y', 'Y', 'Y', 'Z', 'Z', 'Z', 'Z', 'a', + 'a', 'a', 'a', 'b', 'b', 'b', 'b', 'c', 'c', 'c', 'c', 'd', 'd', 'd', 'd', + 'e', 'e', 'e', 'e', 'f', 'f', 'f', 'f', 'g', 'g', 'g', 'g', 'h', 'h', 'h', + 'h', 'i', 'i', 'i', 'i', 'j', 'j', 'j', 'j', 'k', 'k', 'k', 'k', 'l', 'l', + 'l', 'l', 'm', 'm', 'm', 'm', 'n', 'n', 'n', 'n', 'o', 'o', 'o', 'o', 'p', + 'p', 'p', 'p', 'q', 'q', 'q', 'q', 'r', 'r', 'r', 'r', 's', 's', 's', 's', + 't', 't', 't', 't', 'u', 'u', 'u', 'u', 'v', 'v', 'v', 'v', 'w', 'w', 'w', + 'w', 'x', 'x', 'x', 'x', 'y', 'y', 'y', 'y', 'z', 'z', 'z', 'z', '0', '0', + '0', '0', '1', '1', '1', '1', '2', '2', '2', '2', '3', '3', '3', '3', '4', + '4', '4', '4', '5', '5', '5', '5', '6', '6', '6', '6', '7', '7', '7', '7', + '8', '8', '8', '8', '9', '9', '9', '9', '+', '+', '+', '+', '/', '/', '/', + '/'}; + +const char e1[256] = { + 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', + 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'a', 'b', 'c', 'd', + 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', + 't', 'u', 'v', 'w', 'x', 'y', 'z', '0', '1', '2', '3', '4', '5', '6', '7', + '8', '9', '+', '/', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', + 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', + 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', + 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', '0', '1', '2', '3', + '4', '5', '6', '7', '8', '9', '+', '/', 'A', 'B', 'C', 'D', 'E', 'F', 'G', + 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', + 'W', 'X', 'Y', 'Z', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', + 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', + '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '+', '/', 'A', 'B', 'C', + 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', + 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'a', 'b', 'c', 'd', 'e', 'f', 'g', + 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', + 'w', 'x', 'y', 'z', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '+', + '/'}; + +const char e2[256] = { + 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', + 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'a', 'b', 'c', 'd', + 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', + 't', 'u', 'v', 'w', 'x', 'y', 'z', '0', '1', '2', '3', '4', '5', '6', '7', + '8', '9', '+', '/', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', + 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', + 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', + 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', '0', '1', '2', '3', + '4', '5', '6', '7', '8', '9', '+', '/', 'A', 'B', 'C', 'D', 'E', 'F', 'G', + 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', + 'W', 'X', 'Y', 'Z', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', + 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', + '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '+', '/', 'A', 'B', 'C', + 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', + 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'a', 'b', 'c', 'd', 'e', 'f', 'g', + 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', + 'w', 'x', 'y', 'z', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '+', + '/'}; + +const uint32_t d0[256] = { + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x000000f8, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x000000fc, + 0x000000d0, 0x000000d4, 0x000000d8, 0x000000dc, 0x000000e0, 0x000000e4, + 0x000000e8, 0x000000ec, 0x000000f0, 0x000000f4, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x00000000, + 0x00000004, 0x00000008, 0x0000000c, 0x00000010, 0x00000014, 0x00000018, + 0x0000001c, 0x00000020, 0x00000024, 0x00000028, 0x0000002c, 0x00000030, + 0x00000034, 0x00000038, 0x0000003c, 0x00000040, 0x00000044, 0x00000048, + 0x0000004c, 0x00000050, 0x00000054, 0x00000058, 0x0000005c, 0x00000060, + 0x00000064, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x00000068, 0x0000006c, 0x00000070, 0x00000074, 0x00000078, + 0x0000007c, 0x00000080, 0x00000084, 0x00000088, 0x0000008c, 0x00000090, + 0x00000094, 0x00000098, 0x0000009c, 0x000000a0, 0x000000a4, 0x000000a8, + 0x000000ac, 0x000000b0, 0x000000b4, 0x000000b8, 0x000000bc, 0x000000c0, + 0x000000c4, 0x000000c8, 0x000000cc, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff}; + +const uint32_t d1[256] = { + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x0000e003, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x0000f003, + 0x00004003, 0x00005003, 0x00006003, 0x00007003, 0x00008003, 0x00009003, + 0x0000a003, 0x0000b003, 0x0000c003, 0x0000d003, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x00000000, + 0x00001000, 0x00002000, 0x00003000, 0x00004000, 0x00005000, 0x00006000, + 0x00007000, 0x00008000, 0x00009000, 0x0000a000, 0x0000b000, 0x0000c000, + 0x0000d000, 0x0000e000, 0x0000f000, 0x00000001, 0x00001001, 0x00002001, + 0x00003001, 0x00004001, 0x00005001, 0x00006001, 0x00007001, 0x00008001, + 0x00009001, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x0000a001, 0x0000b001, 0x0000c001, 0x0000d001, 0x0000e001, + 0x0000f001, 0x00000002, 0x00001002, 0x00002002, 0x00003002, 0x00004002, + 0x00005002, 0x00006002, 0x00007002, 0x00008002, 0x00009002, 0x0000a002, + 0x0000b002, 0x0000c002, 0x0000d002, 0x0000e002, 0x0000f002, 0x00000003, + 0x00001003, 0x00002003, 0x00003003, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff}; + +const uint32_t d2[256] = { + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x00800f00, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x00c00f00, + 0x00000d00, 0x00400d00, 0x00800d00, 0x00c00d00, 0x00000e00, 0x00400e00, + 0x00800e00, 0x00c00e00, 0x00000f00, 0x00400f00, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x00000000, + 0x00400000, 0x00800000, 0x00c00000, 0x00000100, 0x00400100, 0x00800100, + 0x00c00100, 0x00000200, 0x00400200, 0x00800200, 0x00c00200, 0x00000300, + 0x00400300, 0x00800300, 0x00c00300, 0x00000400, 0x00400400, 0x00800400, + 0x00c00400, 0x00000500, 0x00400500, 0x00800500, 0x00c00500, 0x00000600, + 0x00400600, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x00800600, 0x00c00600, 0x00000700, 0x00400700, 0x00800700, + 0x00c00700, 0x00000800, 0x00400800, 0x00800800, 0x00c00800, 0x00000900, + 0x00400900, 0x00800900, 0x00c00900, 0x00000a00, 0x00400a00, 0x00800a00, + 0x00c00a00, 0x00000b00, 0x00400b00, 0x00800b00, 0x00c00b00, 0x00000c00, + 0x00400c00, 0x00800c00, 0x00c00c00, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff}; + +const uint32_t d3[256] = { + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x003e0000, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x003f0000, + 0x00340000, 0x00350000, 0x00360000, 0x00370000, 0x00380000, 0x00390000, + 0x003a0000, 0x003b0000, 0x003c0000, 0x003d0000, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x00000000, + 0x00010000, 0x00020000, 0x00030000, 0x00040000, 0x00050000, 0x00060000, + 0x00070000, 0x00080000, 0x00090000, 0x000a0000, 0x000b0000, 0x000c0000, + 0x000d0000, 0x000e0000, 0x000f0000, 0x00100000, 0x00110000, 0x00120000, + 0x00130000, 0x00140000, 0x00150000, 0x00160000, 0x00170000, 0x00180000, + 0x00190000, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x001a0000, 0x001b0000, 0x001c0000, 0x001d0000, 0x001e0000, + 0x001f0000, 0x00200000, 0x00210000, 0x00220000, 0x00230000, 0x00240000, + 0x00250000, 0x00260000, 0x00270000, 0x00280000, 0x00290000, 0x002a0000, + 0x002b0000, 0x002c0000, 0x002d0000, 0x002e0000, 0x002f0000, 0x00300000, + 0x00310000, 0x00320000, 0x00330000, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff}; +} // namespace base64_default + +namespace base64_url { + +const char e0[256] = { + 'A', 'A', 'A', 'A', 'B', 'B', 'B', 'B', 'C', 'C', 'C', 'C', 'D', 'D', 'D', + 'D', 'E', 'E', 'E', 'E', 'F', 'F', 'F', 'F', 'G', 'G', 'G', 'G', 'H', 'H', + 'H', 'H', 'I', 'I', 'I', 'I', 'J', 'J', 'J', 'J', 'K', 'K', 'K', 'K', 'L', + 'L', 'L', 'L', 'M', 'M', 'M', 'M', 'N', 'N', 'N', 'N', 'O', 'O', 'O', 'O', + 'P', 'P', 'P', 'P', 'Q', 'Q', 'Q', 'Q', 'R', 'R', 'R', 'R', 'S', 'S', 'S', + 'S', 'T', 'T', 'T', 'T', 'U', 'U', 'U', 'U', 'V', 'V', 'V', 'V', 'W', 'W', + 'W', 'W', 'X', 'X', 'X', 'X', 'Y', 'Y', 'Y', 'Y', 'Z', 'Z', 'Z', 'Z', 'a', + 'a', 'a', 'a', 'b', 'b', 'b', 'b', 'c', 'c', 'c', 'c', 'd', 'd', 'd', 'd', + 'e', 'e', 'e', 'e', 'f', 'f', 'f', 'f', 'g', 'g', 'g', 'g', 'h', 'h', 'h', + 'h', 'i', 'i', 'i', 'i', 'j', 'j', 'j', 'j', 'k', 'k', 'k', 'k', 'l', 'l', + 'l', 'l', 'm', 'm', 'm', 'm', 'n', 'n', 'n', 'n', 'o', 'o', 'o', 'o', 'p', + 'p', 'p', 'p', 'q', 'q', 'q', 'q', 'r', 'r', 'r', 'r', 's', 's', 's', 's', + 't', 't', 't', 't', 'u', 'u', 'u', 'u', 'v', 'v', 'v', 'v', 'w', 'w', 'w', + 'w', 'x', 'x', 'x', 'x', 'y', 'y', 'y', 'y', 'z', 'z', 'z', 'z', '0', '0', + '0', '0', '1', '1', '1', '1', '2', '2', '2', '2', '3', '3', '3', '3', '4', + '4', '4', '4', '5', '5', '5', '5', '6', '6', '6', '6', '7', '7', '7', '7', + '8', '8', '8', '8', '9', '9', '9', '9', '-', '-', '-', '-', '_', '_', '_', + '_'}; + +const char e1[256] = { + 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', + 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'a', 'b', 'c', 'd', + 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', + 't', 'u', 'v', 'w', 'x', 'y', 'z', '0', '1', '2', '3', '4', '5', '6', '7', + '8', '9', '-', '_', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', + 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', + 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', + 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', '0', '1', '2', '3', + '4', '5', '6', '7', '8', '9', '-', '_', 'A', 'B', 'C', 'D', 'E', 'F', 'G', + 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', + 'W', 'X', 'Y', 'Z', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', + 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', + '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '-', '_', 'A', 'B', 'C', + 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', + 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'a', 'b', 'c', 'd', 'e', 'f', 'g', + 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', + 'w', 'x', 'y', 'z', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '-', + '_'}; + +const char e2[256] = { + 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', + 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'a', 'b', 'c', 'd', + 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', + 't', 'u', 'v', 'w', 'x', 'y', 'z', '0', '1', '2', '3', '4', '5', '6', '7', + '8', '9', '-', '_', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', + 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', + 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', + 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', '0', '1', '2', '3', + '4', '5', '6', '7', '8', '9', '-', '_', 'A', 'B', 'C', 'D', 'E', 'F', 'G', + 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', + 'W', 'X', 'Y', 'Z', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', + 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', + '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '-', '_', 'A', 'B', 'C', + 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', + 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'a', 'b', 'c', 'd', 'e', 'f', 'g', + 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', + 'w', 'x', 'y', 'z', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '-', + '_'}; + +const uint32_t d0[256] = { + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x000000f8, 0x01ffffff, 0x01ffffff, + 0x000000d0, 0x000000d4, 0x000000d8, 0x000000dc, 0x000000e0, 0x000000e4, + 0x000000e8, 0x000000ec, 0x000000f0, 0x000000f4, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x00000000, + 0x00000004, 0x00000008, 0x0000000c, 0x00000010, 0x00000014, 0x00000018, + 0x0000001c, 0x00000020, 0x00000024, 0x00000028, 0x0000002c, 0x00000030, + 0x00000034, 0x00000038, 0x0000003c, 0x00000040, 0x00000044, 0x00000048, + 0x0000004c, 0x00000050, 0x00000054, 0x00000058, 0x0000005c, 0x00000060, + 0x00000064, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x000000fc, + 0x01ffffff, 0x00000068, 0x0000006c, 0x00000070, 0x00000074, 0x00000078, + 0x0000007c, 0x00000080, 0x00000084, 0x00000088, 0x0000008c, 0x00000090, + 0x00000094, 0x00000098, 0x0000009c, 0x000000a0, 0x000000a4, 0x000000a8, + 0x000000ac, 0x000000b0, 0x000000b4, 0x000000b8, 0x000000bc, 0x000000c0, + 0x000000c4, 0x000000c8, 0x000000cc, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff}; +const uint32_t d1[256] = { + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x0000e003, 0x01ffffff, 0x01ffffff, + 0x00004003, 0x00005003, 0x00006003, 0x00007003, 0x00008003, 0x00009003, + 0x0000a003, 0x0000b003, 0x0000c003, 0x0000d003, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x00000000, + 0x00001000, 0x00002000, 0x00003000, 0x00004000, 0x00005000, 0x00006000, + 0x00007000, 0x00008000, 0x00009000, 0x0000a000, 0x0000b000, 0x0000c000, + 0x0000d000, 0x0000e000, 0x0000f000, 0x00000001, 0x00001001, 0x00002001, + 0x00003001, 0x00004001, 0x00005001, 0x00006001, 0x00007001, 0x00008001, + 0x00009001, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x0000f003, + 0x01ffffff, 0x0000a001, 0x0000b001, 0x0000c001, 0x0000d001, 0x0000e001, + 0x0000f001, 0x00000002, 0x00001002, 0x00002002, 0x00003002, 0x00004002, + 0x00005002, 0x00006002, 0x00007002, 0x00008002, 0x00009002, 0x0000a002, + 0x0000b002, 0x0000c002, 0x0000d002, 0x0000e002, 0x0000f002, 0x00000003, + 0x00001003, 0x00002003, 0x00003003, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff}; +const uint32_t d2[256] = { + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x00800f00, 0x01ffffff, 0x01ffffff, + 0x00000d00, 0x00400d00, 0x00800d00, 0x00c00d00, 0x00000e00, 0x00400e00, + 0x00800e00, 0x00c00e00, 0x00000f00, 0x00400f00, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x00000000, + 0x00400000, 0x00800000, 0x00c00000, 0x00000100, 0x00400100, 0x00800100, + 0x00c00100, 0x00000200, 0x00400200, 0x00800200, 0x00c00200, 0x00000300, + 0x00400300, 0x00800300, 0x00c00300, 0x00000400, 0x00400400, 0x00800400, + 0x00c00400, 0x00000500, 0x00400500, 0x00800500, 0x00c00500, 0x00000600, + 0x00400600, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x00c00f00, + 0x01ffffff, 0x00800600, 0x00c00600, 0x00000700, 0x00400700, 0x00800700, + 0x00c00700, 0x00000800, 0x00400800, 0x00800800, 0x00c00800, 0x00000900, + 0x00400900, 0x00800900, 0x00c00900, 0x00000a00, 0x00400a00, 0x00800a00, + 0x00c00a00, 0x00000b00, 0x00400b00, 0x00800b00, 0x00c00b00, 0x00000c00, + 0x00400c00, 0x00800c00, 0x00c00c00, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff}; +const uint32_t d3[256] = { + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x003e0000, 0x01ffffff, 0x01ffffff, + 0x00340000, 0x00350000, 0x00360000, 0x00370000, 0x00380000, 0x00390000, + 0x003a0000, 0x003b0000, 0x003c0000, 0x003d0000, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x00000000, + 0x00010000, 0x00020000, 0x00030000, 0x00040000, 0x00050000, 0x00060000, + 0x00070000, 0x00080000, 0x00090000, 0x000a0000, 0x000b0000, 0x000c0000, + 0x000d0000, 0x000e0000, 0x000f0000, 0x00100000, 0x00110000, 0x00120000, + 0x00130000, 0x00140000, 0x00150000, 0x00160000, 0x00170000, 0x00180000, + 0x00190000, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x003f0000, + 0x01ffffff, 0x001a0000, 0x001b0000, 0x001c0000, 0x001d0000, 0x001e0000, + 0x001f0000, 0x00200000, 0x00210000, 0x00220000, 0x00230000, 0x00240000, + 0x00250000, 0x00260000, 0x00270000, 0x00280000, 0x00290000, 0x002a0000, + 0x002b0000, 0x002c0000, 0x002d0000, 0x002e0000, 0x002f0000, 0x00300000, + 0x00310000, 0x00320000, 0x00330000, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, + 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff}; +} // namespace base64_url +const uint64_t thintable_epi8[256] = { + 0x0706050403020100, 0x0007060504030201, 0x0007060504030200, + 0x0000070605040302, 0x0007060504030100, 0x0000070605040301, + 0x0000070605040300, 0x0000000706050403, 0x0007060504020100, + 0x0000070605040201, 0x0000070605040200, 0x0000000706050402, + 0x0000070605040100, 0x0000000706050401, 0x0000000706050400, + 0x0000000007060504, 0x0007060503020100, 0x0000070605030201, + 0x0000070605030200, 0x0000000706050302, 0x0000070605030100, + 0x0000000706050301, 0x0000000706050300, 0x0000000007060503, + 0x0000070605020100, 0x0000000706050201, 0x0000000706050200, + 0x0000000007060502, 0x0000000706050100, 0x0000000007060501, + 0x0000000007060500, 0x0000000000070605, 0x0007060403020100, + 0x0000070604030201, 0x0000070604030200, 0x0000000706040302, + 0x0000070604030100, 0x0000000706040301, 0x0000000706040300, + 0x0000000007060403, 0x0000070604020100, 0x0000000706040201, + 0x0000000706040200, 0x0000000007060402, 0x0000000706040100, + 0x0000000007060401, 0x0000000007060400, 0x0000000000070604, + 0x0000070603020100, 0x0000000706030201, 0x0000000706030200, + 0x0000000007060302, 0x0000000706030100, 0x0000000007060301, + 0x0000000007060300, 0x0000000000070603, 0x0000000706020100, + 0x0000000007060201, 0x0000000007060200, 0x0000000000070602, + 0x0000000007060100, 0x0000000000070601, 0x0000000000070600, + 0x0000000000000706, 0x0007050403020100, 0x0000070504030201, + 0x0000070504030200, 0x0000000705040302, 0x0000070504030100, + 0x0000000705040301, 0x0000000705040300, 0x0000000007050403, + 0x0000070504020100, 0x0000000705040201, 0x0000000705040200, + 0x0000000007050402, 0x0000000705040100, 0x0000000007050401, + 0x0000000007050400, 0x0000000000070504, 0x0000070503020100, + 0x0000000705030201, 0x0000000705030200, 0x0000000007050302, + 0x0000000705030100, 0x0000000007050301, 0x0000000007050300, + 0x0000000000070503, 0x0000000705020100, 0x0000000007050201, + 0x0000000007050200, 0x0000000000070502, 0x0000000007050100, + 0x0000000000070501, 0x0000000000070500, 0x0000000000000705, + 0x0000070403020100, 0x0000000704030201, 0x0000000704030200, + 0x0000000007040302, 0x0000000704030100, 0x0000000007040301, + 0x0000000007040300, 0x0000000000070403, 0x0000000704020100, + 0x0000000007040201, 0x0000000007040200, 0x0000000000070402, + 0x0000000007040100, 0x0000000000070401, 0x0000000000070400, + 0x0000000000000704, 0x0000000703020100, 0x0000000007030201, + 0x0000000007030200, 0x0000000000070302, 0x0000000007030100, + 0x0000000000070301, 0x0000000000070300, 0x0000000000000703, + 0x0000000007020100, 0x0000000000070201, 0x0000000000070200, + 0x0000000000000702, 0x0000000000070100, 0x0000000000000701, + 0x0000000000000700, 0x0000000000000007, 0x0006050403020100, + 0x0000060504030201, 0x0000060504030200, 0x0000000605040302, + 0x0000060504030100, 0x0000000605040301, 0x0000000605040300, + 0x0000000006050403, 0x0000060504020100, 0x0000000605040201, + 0x0000000605040200, 0x0000000006050402, 0x0000000605040100, + 0x0000000006050401, 0x0000000006050400, 0x0000000000060504, + 0x0000060503020100, 0x0000000605030201, 0x0000000605030200, + 0x0000000006050302, 0x0000000605030100, 0x0000000006050301, + 0x0000000006050300, 0x0000000000060503, 0x0000000605020100, + 0x0000000006050201, 0x0000000006050200, 0x0000000000060502, + 0x0000000006050100, 0x0000000000060501, 0x0000000000060500, + 0x0000000000000605, 0x0000060403020100, 0x0000000604030201, + 0x0000000604030200, 0x0000000006040302, 0x0000000604030100, + 0x0000000006040301, 0x0000000006040300, 0x0000000000060403, + 0x0000000604020100, 0x0000000006040201, 0x0000000006040200, + 0x0000000000060402, 0x0000000006040100, 0x0000000000060401, + 0x0000000000060400, 0x0000000000000604, 0x0000000603020100, + 0x0000000006030201, 0x0000000006030200, 0x0000000000060302, + 0x0000000006030100, 0x0000000000060301, 0x0000000000060300, + 0x0000000000000603, 0x0000000006020100, 0x0000000000060201, + 0x0000000000060200, 0x0000000000000602, 0x0000000000060100, + 0x0000000000000601, 0x0000000000000600, 0x0000000000000006, + 0x0000050403020100, 0x0000000504030201, 0x0000000504030200, + 0x0000000005040302, 0x0000000504030100, 0x0000000005040301, + 0x0000000005040300, 0x0000000000050403, 0x0000000504020100, + 0x0000000005040201, 0x0000000005040200, 0x0000000000050402, + 0x0000000005040100, 0x0000000000050401, 0x0000000000050400, + 0x0000000000000504, 0x0000000503020100, 0x0000000005030201, + 0x0000000005030200, 0x0000000000050302, 0x0000000005030100, + 0x0000000000050301, 0x0000000000050300, 0x0000000000000503, + 0x0000000005020100, 0x0000000000050201, 0x0000000000050200, + 0x0000000000000502, 0x0000000000050100, 0x0000000000000501, + 0x0000000000000500, 0x0000000000000005, 0x0000000403020100, + 0x0000000004030201, 0x0000000004030200, 0x0000000000040302, + 0x0000000004030100, 0x0000000000040301, 0x0000000000040300, + 0x0000000000000403, 0x0000000004020100, 0x0000000000040201, + 0x0000000000040200, 0x0000000000000402, 0x0000000000040100, + 0x0000000000000401, 0x0000000000000400, 0x0000000000000004, + 0x0000000003020100, 0x0000000000030201, 0x0000000000030200, + 0x0000000000000302, 0x0000000000030100, 0x0000000000000301, + 0x0000000000000300, 0x0000000000000003, 0x0000000000020100, + 0x0000000000000201, 0x0000000000000200, 0x0000000000000002, + 0x0000000000000100, 0x0000000000000001, 0x0000000000000000, + 0x0000000000000000, +}; + +const uint8_t pshufb_combine_table[272] = { + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, + 0x0c, 0x0d, 0x0e, 0x0f, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x08, + 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0xff, 0x00, 0x01, 0x02, 0x03, + 0x04, 0x05, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0xff, 0xff, + 0x00, 0x01, 0x02, 0x03, 0x04, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, + 0x0f, 0xff, 0xff, 0xff, 0x00, 0x01, 0x02, 0x03, 0x08, 0x09, 0x0a, 0x0b, + 0x0c, 0x0d, 0x0e, 0x0f, 0xff, 0xff, 0xff, 0xff, 0x00, 0x01, 0x02, 0x08, + 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x00, 0x01, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0x00, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, + 0x0f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x08, 0x09, 0x0a, 0x0b, + 0x0c, 0x0d, 0x0e, 0x0f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, +}; + +const unsigned char BitsSetTable256mul2[256] = { + 0, 2, 2, 4, 2, 4, 4, 6, 2, 4, 4, 6, 4, 6, 6, 8, 2, 4, 4, + 6, 4, 6, 6, 8, 4, 6, 6, 8, 6, 8, 8, 10, 2, 4, 4, 6, 4, 6, + 6, 8, 4, 6, 6, 8, 6, 8, 8, 10, 4, 6, 6, 8, 6, 8, 8, 10, 6, + 8, 8, 10, 8, 10, 10, 12, 2, 4, 4, 6, 4, 6, 6, 8, 4, 6, 6, 8, + 6, 8, 8, 10, 4, 6, 6, 8, 6, 8, 8, 10, 6, 8, 8, 10, 8, 10, 10, + 12, 4, 6, 6, 8, 6, 8, 8, 10, 6, 8, 8, 10, 8, 10, 10, 12, 6, 8, + 8, 10, 8, 10, 10, 12, 8, 10, 10, 12, 10, 12, 12, 14, 2, 4, 4, 6, 4, + 6, 6, 8, 4, 6, 6, 8, 6, 8, 8, 10, 4, 6, 6, 8, 6, 8, 8, 10, + 6, 8, 8, 10, 8, 10, 10, 12, 4, 6, 6, 8, 6, 8, 8, 10, 6, 8, 8, + 10, 8, 10, 10, 12, 6, 8, 8, 10, 8, 10, 10, 12, 8, 10, 10, 12, 10, 12, + 12, 14, 4, 6, 6, 8, 6, 8, 8, 10, 6, 8, 8, 10, 8, 10, 10, 12, 6, + 8, 8, 10, 8, 10, 10, 12, 8, 10, 10, 12, 10, 12, 12, 14, 6, 8, 8, 10, + 8, 10, 10, 12, 8, 10, 10, 12, 10, 12, 12, 14, 8, 10, 10, 12, 10, 12, 12, + 14, 10, 12, 12, 14, 12, 14, 14, 16}; + +constexpr uint8_t to_base64_value[] = { + 255, 255, 255, 255, 255, 255, 255, 255, 255, 64, 64, 255, 64, 64, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 64, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 62, 255, + 255, 255, 63, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 255, 255, + 255, 255, 255, 255, 255, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, + 25, 255, 255, 255, 255, 255, 255, 26, 27, 28, 29, 30, 31, 32, 33, + 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, + 49, 50, 51, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255}; + +constexpr uint8_t to_base64_url_value[] = { + 255, 255, 255, 255, 255, 255, 255, 255, 255, 64, 64, 255, 64, 64, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 64, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 62, 255, 255, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 255, 255, + 255, 255, 255, 255, 255, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, + 25, 255, 255, 255, 255, 63, 255, 26, 27, 28, 29, 30, 31, 32, 33, + 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, + 49, 50, 51, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255}; +static_assert(sizeof(to_base64_value) == 256, "to_base64_value must have 256 elements"); +static_assert(sizeof(to_base64_url_value) == 256, "to_base64_url_value must have 256 elements"); +static_assert(to_base64_value[uint8_t(' ')] == 64, "space must be == 64 in to_base64_value"); +static_assert(to_base64_url_value[uint8_t(' ')] == 64, "space must be == 64 in to_base64_url_value"); +static_assert(to_base64_value[uint8_t('\t')] == 64, "tab must be == 64 in to_base64_value"); +static_assert(to_base64_url_value[uint8_t('\t')] == 64, "tab must be == 64 in to_base64_url_value"); +static_assert(to_base64_value[uint8_t('\r')] == 64, "cr must be == 64 in to_base64_value"); +static_assert(to_base64_url_value[uint8_t('\r')] == 64, "cr must be == 64 in to_base64_url_value"); +static_assert(to_base64_value[uint8_t('\n')] == 64, "lf must be == 64 in to_base64_value"); +static_assert(to_base64_url_value[uint8_t('\n')] == 64, "lf must be == 64 in to_base64_url_value"); +static_assert(to_base64_value[uint8_t('\f')] == 64, "ff must be == 64 in to_base64_value"); +static_assert(to_base64_url_value[uint8_t('\f')] == 64, "ff must be == 64 in to_base64_url_value"); +static_assert(to_base64_value[uint8_t('+')] == 62, "+ must be == 62 in to_base64_value"); +static_assert(to_base64_url_value[uint8_t('-')] == 62, "- must be == 62 in to_base64_url_value"); +static_assert(to_base64_value[uint8_t('/')] == 63, "/ must be == 62 in to_base64_value"); +static_assert(to_base64_url_value[uint8_t('_')] == 63, "_ must be == 62 in to_base64_url_value"); +} // namespace base64 +} // namespace tables +} // unnamed namespace +} // namespace simdutf + +#endif // SIMDUTF_BASE64_TABLES_H +/* end file src/tables/base64_tables.h */ /* begin file src/implementation.cpp */ #include #include +#include // Useful for debugging purposes namespace simdutf { @@ -36,7 +712,11 @@ std::string toBinaryString(T b) { #ifndef SIMDUTF_IMPLEMENTATION_ARM64 #define SIMDUTF_IMPLEMENTATION_ARM64 (SIMDUTF_IS_ARM64) #endif -#define SIMDUTF_CAN_ALWAYS_RUN_ARM64 SIMDUTF_IMPLEMENTATION_ARM64 && SIMDUTF_IS_ARM64 +#if SIMDUTF_IMPLEMENTATION_ARM64 && SIMDUTF_IS_ARM64 +#define SIMDUTF_CAN_ALWAYS_RUN_ARM64 1 +#else +#define SIMDUTF_CAN_ALWAYS_RUN_ARM64 0 +#endif @@ -141,9 +821,11 @@ class implementation final : public simdutf::implementation { simdutf_warn_unused size_t utf16_length_from_latin1(size_t length) const noexcept; simdutf_warn_unused size_t utf8_length_from_latin1(const char * input, size_t length) const noexcept; simdutf_warn_unused size_t maximal_binary_length_from_base64(const char * input, size_t length) const noexcept; - simdutf_warn_unused result base64_to_binary(const char * input, size_t length, char* output) const noexcept; - simdutf_warn_unused size_t base64_length_from_binary(size_t length) const noexcept; - size_t binary_to_base64(const char * input, size_t length, char* output) const noexcept; + simdutf_warn_unused result base64_to_binary(const char * input, size_t length, char* output, base64_options options) const noexcept; + simdutf_warn_unused size_t maximal_binary_length_from_base64(const char16_t * input, size_t length) const noexcept; + simdutf_warn_unused result base64_to_binary(const char16_t * input, size_t length, char* output, base64_options options) const noexcept; + simdutf_warn_unused size_t base64_length_from_binary(size_t length, base64_options options) const noexcept; + size_t binary_to_base64(const char * input, size_t length, char* output, base64_options options) const noexcept; }; } // namespace arm64 @@ -944,7 +1626,7 @@ struct simd16: base16_numeric { simdutf_really_inline simd16 operator&(const simd16 other) const { return vandq_u16(*this, other); } simdutf_really_inline simd16 operator^(const simd16 other) const { return veorq_u16(*this, other); } - // Pack with the unsigned saturation two uint16_t code units into single uint8_t vector + // Pack with the unsigned saturation of two uint16_t code units into single uint8_t vector static simdutf_really_inline simd8 pack(const simd16& v0, const simd16& v1) { return vqmovn_high_u16(vqmovn_u16(v0), v1); } @@ -1131,10 +1813,14 @@ simdutf_really_inline simd16::operator simd16() const { retur // To see why (__BMI__) && (__LZCNT__) are not part of this next line, see // https://github.com/simdutf/simdutf/issues/1247 -#define SIMDUTF_CAN_ALWAYS_RUN_ICELAKE ((SIMDUTF_IMPLEMENTATION_ICELAKE) && (SIMDUTF_IS_X86_64) && (__AVX2__) && (SIMDUTF_HAS_AVX512F && \ +#if ((SIMDUTF_IMPLEMENTATION_ICELAKE) && (SIMDUTF_IS_X86_64) && (__AVX2__) && (SIMDUTF_HAS_AVX512F && \ SIMDUTF_HAS_AVX512DQ && \ SIMDUTF_HAS_AVX512VL && \ SIMDUTF_HAS_AVX512VBMI2) && (!SIMDUTF_IS_32BITS)) +#define SIMDUTF_CAN_ALWAYS_RUN_ICELAKE 1 +#else +#define SIMDUTF_CAN_ALWAYS_RUN_ICELAKE 0 +#endif #if SIMDUTF_IMPLEMENTATION_ICELAKE #if SIMDUTF_CAN_ALWAYS_RUN_ICELAKE @@ -1358,9 +2044,11 @@ class implementation final : public simdutf::implementation { simdutf_warn_unused size_t utf16_length_from_latin1(size_t length) const noexcept; simdutf_warn_unused size_t utf8_length_from_latin1(const char * input, size_t length) const noexcept; simdutf_warn_unused size_t maximal_binary_length_from_base64(const char * input, size_t length) const noexcept; - simdutf_warn_unused result base64_to_binary(const char * input, size_t length, char* output) const noexcept; - simdutf_warn_unused size_t base64_length_from_binary(size_t length) const noexcept; - size_t binary_to_base64(const char * input, size_t length, char* output) const noexcept; + simdutf_warn_unused result base64_to_binary(const char * input, size_t length, char* output, base64_options options) const noexcept; + simdutf_warn_unused size_t maximal_binary_length_from_base64(const char16_t * input, size_t length) const noexcept; + simdutf_warn_unused result base64_to_binary(const char16_t * input, size_t length, char* output, base64_options options) const noexcept; + simdutf_warn_unused size_t base64_length_from_binary(size_t length, base64_options options) const noexcept; + size_t binary_to_base64(const char * input, size_t length, char* output, base64_options options) const noexcept; }; } // namespace icelake @@ -1468,7 +2156,11 @@ SIMDUTF_POP_DISABLE_WARNINGS #endif // To see why (__BMI__) && (__LZCNT__) are not part of this next line, see // https://github.com/simdutf/simdutf/issues/1247 -#define SIMDUTF_CAN_ALWAYS_RUN_HASWELL ((SIMDUTF_IMPLEMENTATION_HASWELL) && (SIMDUTF_IS_X86_64) && (__AVX2__)) +#if ((SIMDUTF_IMPLEMENTATION_HASWELL) && (SIMDUTF_IS_X86_64) && (__AVX2__)) +#define SIMDUTF_CAN_ALWAYS_RUN_HASWELL 1 +#else +#define SIMDUTF_CAN_ALWAYS_RUN_HASWELL 0 +#endif #if SIMDUTF_IMPLEMENTATION_HASWELL @@ -1579,9 +2271,11 @@ class implementation final : public simdutf::implementation { simdutf_warn_unused size_t utf16_length_from_latin1(size_t length) const noexcept; simdutf_warn_unused size_t utf8_length_from_latin1(const char * input, size_t length) const noexcept; simdutf_warn_unused virtual size_t maximal_binary_length_from_base64(const char * input, size_t length) const noexcept; - simdutf_warn_unused virtual result base64_to_binary(const char * input, size_t length, char* output) const noexcept; - simdutf_warn_unused virtual size_t base64_length_from_binary(size_t length) const noexcept; - size_t binary_to_base64(const char * input, size_t length, char* output) const noexcept; + simdutf_warn_unused virtual result base64_to_binary(const char * input, size_t length, char* output, base64_options options) const noexcept; + simdutf_warn_unused virtual size_t maximal_binary_length_from_base64(const char16_t * input, size_t length) const noexcept; + simdutf_warn_unused virtual result base64_to_binary(const char16_t * input, size_t length, char* output, base64_options options) const noexcept; + simdutf_warn_unused virtual size_t base64_length_from_binary(size_t length, base64_options options) const noexcept; + size_t binary_to_base64(const char * input, size_t length, char* output, base64_options options) const noexcept; }; } // namespace haswell @@ -2243,7 +2937,7 @@ struct simd16: base16_numeric { return _mm256_shuffle_epi8(*this, swap); } - // Pack with the unsigned saturation two uint16_t code units into single uint8_t vector + // Pack with the unsigned saturation of two uint16_t code units into single uint8_t vector static simdutf_really_inline simd8 pack(const simd16& v0, const simd16& v1) { // Note: the AVX2 variant of pack operates on 128-bit lanes, thus // we have to shuffle lanes in order to produce bytes in the @@ -2415,7 +3109,11 @@ SIMDUTF_POP_DISABLE_WARNINGS #endif -#define SIMDUTF_CAN_ALWAYS_RUN_WESTMERE (SIMDUTF_IMPLEMENTATION_WESTMERE && SIMDUTF_IS_X86_64 && __SSE4_2__) +#if (SIMDUTF_IMPLEMENTATION_WESTMERE && SIMDUTF_IS_X86_64 && __SSE4_2__) +#define SIMDUTF_CAN_ALWAYS_RUN_WESTMERE 1 +#else +#define SIMDUTF_CAN_ALWAYS_RUN_WESTMERE 0 +#endif #if SIMDUTF_IMPLEMENTATION_WESTMERE @@ -2524,9 +3222,11 @@ class implementation final : public simdutf::implementation { simdutf_warn_unused size_t utf16_length_from_latin1(size_t length) const noexcept; simdutf_warn_unused size_t utf8_length_from_latin1(const char * input, size_t length) const noexcept; simdutf_warn_unused size_t maximal_binary_length_from_base64(const char * input, size_t length) const noexcept; - simdutf_warn_unused result base64_to_binary(const char * input, size_t length, char* output) const noexcept; - simdutf_warn_unused size_t base64_length_from_binary(size_t length) const noexcept; - size_t binary_to_base64(const char * input, size_t length, char* output) const noexcept; + simdutf_warn_unused result base64_to_binary(const char * input, size_t length, char* output, base64_options options) const noexcept; + simdutf_warn_unused size_t maximal_binary_length_from_base64(const char16_t * input, size_t length) const noexcept; + simdutf_warn_unused result base64_to_binary(const char16_t * input, size_t length, char* output, base64_options options) const noexcept; + simdutf_warn_unused size_t base64_length_from_binary(size_t length, base64_options options) const noexcept; + size_t binary_to_base64(const char * input, size_t length, char* output, base64_options options) const noexcept; }; } // namespace westmere @@ -3223,7 +3923,7 @@ struct simd16: base16_numeric { return _mm_shuffle_epi8(*this, swap); } - // Pack with the unsigned saturation two uint16_t code units into single uint8_t vector + // Pack with the unsigned saturation of two uint16_t code units into single uint8_t vector static simdutf_really_inline simd8 pack(const simd16& v0, const simd16& v1) { return _mm_packus_epi16(v0, v1); } @@ -3459,9 +4159,11 @@ class implementation final : public simdutf::implementation { simdutf_warn_unused size_t utf16_length_from_utf32(const char32_t * input, size_t length) const noexcept; simdutf_warn_unused size_t utf32_length_from_utf8(const char * input, size_t length) const noexcept; simdutf_warn_unused size_t maximal_binary_length_from_base64(const char * input, size_t length) const noexcept; - simdutf_warn_unused result base64_to_binary(const char * input, size_t length, char* output) const noexcept; - simdutf_warn_unused size_t base64_length_from_binary(size_t length) const noexcept; - size_t binary_to_base64(const char * input, size_t length, char* output) const noexcept; + simdutf_warn_unused result base64_to_binary(const char * input, size_t length, char* output, base64_options options) const noexcept; + simdutf_warn_unused size_t maximal_binary_length_from_base64(const char16_t * input, size_t length) const noexcept; + simdutf_warn_unused result base64_to_binary(const char16_t * input, size_t length, char* output, base64_options options) const noexcept; + simdutf_warn_unused size_t base64_length_from_binary(size_t length, base64_options options) const noexcept; + size_t binary_to_base64(const char * input, size_t length, char* output, base64_options options) const noexcept; }; } // namespace ppc64 @@ -4145,9 +4847,11 @@ class implementation final : public simdutf::implementation { simdutf_warn_unused size_t utf16_length_from_latin1(size_t len) const noexcept; simdutf_warn_unused size_t utf8_length_from_latin1(const char *buf, size_t len) const noexcept; simdutf_warn_unused size_t maximal_binary_length_from_base64(const char * input, size_t length) const noexcept; - simdutf_warn_unused result base64_to_binary(const char * input, size_t length, char* output) const noexcept; - simdutf_warn_unused size_t base64_length_from_binary(size_t length) const noexcept; - size_t binary_to_base64(const char * input, size_t length, char* output) const noexcept; + simdutf_warn_unused result base64_to_binary(const char * input, size_t length, char* output, base64_options options) const noexcept; + simdutf_warn_unused size_t maximal_binary_length_from_base64(const char16_t * input, size_t length) const noexcept; + simdutf_warn_unused result base64_to_binary(const char16_t * input, size_t length, char* output, base64_options options) const noexcept; + simdutf_warn_unused size_t base64_length_from_binary(size_t length, base64_options options) const noexcept; + size_t binary_to_base64(const char * input, size_t length, char* output, base64_options options) const noexcept; private: const bool _supports_zvbb; @@ -4414,9 +5118,11 @@ class implementation final : public simdutf::implementation { simdutf_warn_unused size_t utf16_length_from_latin1(size_t length) const noexcept; simdutf_warn_unused size_t utf8_length_from_latin1(const char * input, size_t length) const noexcept; simdutf_warn_unused size_t maximal_binary_length_from_base64(const char * input, size_t length) const noexcept; - simdutf_warn_unused result base64_to_binary(const char * input, size_t length, char* output) const noexcept; - simdutf_warn_unused size_t base64_length_from_binary(size_t length) const noexcept; - size_t binary_to_base64(const char * input, size_t length, char* output) const noexcept; + simdutf_warn_unused result base64_to_binary(const char * input, size_t length, char* output, base64_options options) const noexcept; + simdutf_warn_unused size_t maximal_binary_length_from_base64(const char16_t * input, size_t length) const noexcept; + simdutf_warn_unused result base64_to_binary(const char16_t * input, size_t length, char* output, base64_options options) const noexcept; + simdutf_warn_unused size_t base64_length_from_binary(size_t length, base64_options options) const noexcept; + size_t binary_to_base64(const char * input, size_t length, char* output, base64_options options) const noexcept; }; } // namespace fallback } // namespace simdutf @@ -4794,6 +5500,388 @@ simdutf_warn_unused inline size_t trim_partial_utf16(const char16_t* input, size #endif /* end file src/scalar/utf16.h */ +/* begin file src/scalar/utf32.h */ +#ifndef SIMDUTF_UTF32_H +#define SIMDUTF_UTF32_H + +namespace simdutf { +namespace scalar { +namespace { +namespace utf32 { + +inline simdutf_warn_unused bool validate(const char32_t *buf, size_t len) noexcept { + const uint32_t *data = reinterpret_cast(buf); + uint64_t pos = 0; + for(;pos < len; pos++) { + uint32_t word = data[pos]; + if(word > 0x10FFFF || (word >= 0xD800 && word <= 0xDFFF)) { + return false; + } + } + return true; +} + +inline simdutf_warn_unused result validate_with_errors(const char32_t *buf, size_t len) noexcept { + const uint32_t *data = reinterpret_cast(buf); + size_t pos = 0; + for(;pos < len; pos++) { + uint32_t word = data[pos]; + if(word > 0x10FFFF) { + return result(error_code::TOO_LARGE, pos); + } + if(word >= 0xD800 && word <= 0xDFFF) { + return result(error_code::SURROGATE, pos); + } + } + return result(error_code::SUCCESS, pos); +} + +inline size_t utf8_length_from_utf32(const char32_t* buf, size_t len) { + // We are not BOM aware. + const uint32_t * p = reinterpret_cast(buf); + size_t counter{0}; + for(size_t i = 0; i < len; i++) { + // credit: @ttsugriy for the vectorizable approach + counter++; // ASCII + counter += static_cast(p[i] > 0x7F); // two-byte + counter += static_cast(p[i] > 0x7FF); // three-byte + counter += static_cast(p[i] > 0xFFFF); // four-bytes + } + return counter; +} + +inline size_t utf16_length_from_utf32(const char32_t* buf, size_t len) { + // We are not BOM aware. + const uint32_t * p = reinterpret_cast(buf); + size_t counter{0}; + for(size_t i = 0; i < len; i++) { + counter++; // non-surrogate word + counter += static_cast(p[i] > 0xFFFF); // surrogate pair + } + return counter; +} + +inline size_t latin1_length_from_utf32(size_t len) { + // We are not BOM aware. + return len; // a utf32 codepoint will always represent 1 latin1 character +} + +inline simdutf_warn_unused uint32_t swap_bytes(const uint32_t word) { + return ((word >> 24) & 0xff) | // move byte 3 to byte 0 + ((word << 8) & 0xff0000) | // move byte 1 to byte 2 + ((word >> 8) & 0xff00) | // move byte 2 to byte 1 + ((word << 24) & 0xff000000); // byte 0 to byte 3 +} + +} // utf32 namespace +} // unnamed namespace +} // namespace scalar +} // namespace simdutf + +#endif +/* end file src/scalar/utf32.h */ +/* begin file src/scalar/base64.h */ +#ifndef SIMDUTF_BASE64_H +#define SIMDUTF_BASE64_H + +#include +#include +#include +namespace simdutf { +namespace scalar { +namespace { +namespace base64 { + +// This function is not expected to be fast. Do not use in long loops. +template +bool is_ascii_white_space(char_type c) { + return c == ' ' || c == '\t' || c == '\n' || c == '\r' || c == '\f'; +} + +// Returns true upon success. The destination buffer must be large enough. +// This functions assumes that the padding (=) has been removed. +template +result base64_tail_decode(char *dst, const char_type *src, size_t length, base64_options options) { + // This looks like 5 branches, but we expect the compiler to resolve this to a single branch: + const uint8_t *to_base64 = (options & base64_url) ? tables::base64::to_base64_url_value : tables::base64::to_base64_value; + const uint32_t *d0 = (options & base64_url) ? tables::base64::base64_url::d0 : tables::base64::base64_default::d0; + const uint32_t *d1 = (options & base64_url) ? tables::base64::base64_url::d1 : tables::base64::base64_default::d1; + const uint32_t *d2 = (options & base64_url) ? tables::base64::base64_url::d2 : tables::base64::base64_default::d2; + const uint32_t *d3 = (options & base64_url) ? tables::base64::base64_url::d3 : tables::base64::base64_default::d3; + + const char_type *srcend = src + length; + const char_type *srcinit = src; + const char *dstinit = dst; + + uint32_t x; + size_t idx; + uint8_t buffer[4]; + while (true) { + while (src + 4 <= srcend && + (x = d0[uint8_t(src[0])] | d1[uint8_t(src[1])] | + d2[uint8_t(src[2])] | d3[uint8_t(src[3])]) < 0x01FFFFFF) { + if(match_system(endianness::BIG)) { + x = scalar::utf32::swap_bytes(x); + } + std::memcpy(dst, &x, 3); // optimization opportunity: copy 4 bytes + dst += 3; + src += 4; + } + idx = 0; + // we need at least four characters. + while (idx < 4 && src < srcend) { + char_type c = *src; + uint8_t code = to_base64[uint8_t(c)]; + buffer[idx] = uint8_t(code); + if (code <= 63) { + idx++; + } else if (code > 64) { + return {INVALID_BASE64_CHARACTER, size_t(src - srcinit)}; + } else { + // We have a space or a newline. We ignore it. + } + src++; + } + if (idx != 4) { + if (idx == 2) { + uint32_t triple = + (uint32_t(buffer[0]) << 3 * 6) + (uint32_t(buffer[1]) << 2 * 6); + if(match_system(endianness::BIG)) { + triple <<= 8; + std::memcpy(dst, &triple, 1); + } else { + triple = scalar::utf32::swap_bytes(triple); + triple >>= 8; + std::memcpy(dst, &triple, 1); + } + dst += 1; + + } else if (idx == 3) { + uint32_t triple = (uint32_t(buffer[0]) << 3 * 6) + + (uint32_t(buffer[1]) << 2 * 6) + + (uint32_t(buffer[2]) << 1 * 6); + if(match_system(endianness::BIG)) { + triple <<= 8; + std::memcpy(dst, &triple, 2); + } else { + triple = scalar::utf32::swap_bytes(triple); + triple >>= 8; + std::memcpy(dst, &triple, 2); + } + dst += 2; + } else if (idx == 1) { + return {BASE64_INPUT_REMAINDER, size_t(dst - dstinit)}; + } + return {SUCCESS, size_t(dst - dstinit)}; + } + + uint32_t triple = + (uint32_t(buffer[0]) << 3 * 6) + (uint32_t(buffer[1]) << 2 * 6) + + (uint32_t(buffer[2]) << 1 * 6) + (uint32_t(buffer[3]) << 0 * 6); + if(match_system(endianness::BIG)) { + triple <<= 8; + std::memcpy(dst, &triple, 3); + } else { + triple = scalar::utf32::swap_bytes(triple); + triple >>= 8; + std::memcpy(dst, &triple, 3); + } + dst += 3; + } +} + +// like base64_tail_decode, but it will not write past the end of the ouput buffer. +// outlen is modified to reflect the number of bytes written. +// This functions assumes that the padding (=) has been removed. +template +result base64_tail_decode_safe(char *dst, size_t& outlen, const char_type *src, size_t length, base64_options options) { + // This looks like 5 branches, but we expect the compiler to resolve this to a single branch: + const uint8_t *to_base64 = (options & base64_url) ? tables::base64::to_base64_url_value : tables::base64::to_base64_value; + const uint32_t *d0 = (options & base64_url) ? tables::base64::base64_url::d0 : tables::base64::base64_default::d0; + const uint32_t *d1 = (options & base64_url) ? tables::base64::base64_url::d1 : tables::base64::base64_default::d1; + const uint32_t *d2 = (options & base64_url) ? tables::base64::base64_url::d2 : tables::base64::base64_default::d2; + const uint32_t *d3 = (options & base64_url) ? tables::base64::base64_url::d3 : tables::base64::base64_default::d3; + + const char_type *srcend = src + length; + const char_type *srcinit = src; + const char *dstinit = dst; + const char *dstend = dst + outlen; + + uint32_t x; + size_t idx; + uint8_t buffer[4]; + while (true) { + while (src + 4 <= srcend && + (x = d0[uint8_t(src[0])] | d1[uint8_t(src[1])] | + d2[uint8_t(src[2])] | d3[uint8_t(src[3])]) < 0x01FFFFFF) { + if(match_system(endianness::BIG)) { + x = scalar::utf32::swap_bytes(x); + } + if(dst + 3 > dstend) { + outlen = size_t(dst - dstinit); + return {OUTPUT_BUFFER_TOO_SMALL, size_t(src - srcinit)}; + } + std::memcpy(dst, &x, 3); // optimization opportunity: copy 4 bytes + dst += 3; + src += 4; + } + idx = 0; + const char_type *srccur = src; + + // we need at least four characters. + while (idx < 4 && src < srcend) { + char_type c = *src; + uint8_t code = to_base64[uint8_t(c)]; + buffer[idx] = uint8_t(code); + if (code <= 63) { + idx++; + } else if (code > 64) { + outlen = size_t(dst - dstinit); + return {INVALID_BASE64_CHARACTER, size_t(src - srcinit)}; + } else { + // We have a space or a newline. We ignore it. + } + src++; + } + if (idx != 4) { + if (idx == 2) { + if(dst == dstend) { + outlen = size_t(dst - dstinit); + return {OUTPUT_BUFFER_TOO_SMALL, size_t(srccur - srcinit)}; + } + uint32_t triple = + (uint32_t(buffer[0]) << 3 * 6) + (uint32_t(buffer[1]) << 2 * 6); + if(match_system(endianness::BIG)) { + triple <<= 8; + std::memcpy(dst, &triple, 1); + } else { + triple = scalar::utf32::swap_bytes(triple); + triple >>= 8; + std::memcpy(dst, &triple, 1); + } + dst += 1; + + } else if (idx == 3) { + if(dst + 2 >= dstend) { + outlen = size_t(dst - dstinit); + return {OUTPUT_BUFFER_TOO_SMALL, size_t(srccur - srcinit)}; + } + uint32_t triple = (uint32_t(buffer[0]) << 3 * 6) + + (uint32_t(buffer[1]) << 2 * 6) + + (uint32_t(buffer[2]) << 1 * 6); + if(match_system(endianness::BIG)) { + triple <<= 8; + std::memcpy(dst, &triple, 2); + } else { + triple = scalar::utf32::swap_bytes(triple); + triple >>= 8; + std::memcpy(dst, &triple, 2); + } + dst += 2; + } else if (idx == 1) { + outlen = size_t(dst - dstinit); + return {BASE64_INPUT_REMAINDER, size_t(dst - dstinit)}; + } + outlen = size_t(dst - dstinit); + return {SUCCESS, size_t(dst - dstinit)}; + } + if(dst + 3 >= dstend) { + outlen = size_t(dst - dstinit); + return {OUTPUT_BUFFER_TOO_SMALL, size_t(srccur - srcinit)}; + } + uint32_t triple = + (uint32_t(buffer[0]) << 3 * 6) + (uint32_t(buffer[1]) << 2 * 6) + + (uint32_t(buffer[2]) << 1 * 6) + (uint32_t(buffer[3]) << 0 * 6); + if(match_system(endianness::BIG)) { + triple <<= 8; + std::memcpy(dst, &triple, 3); + } else { + triple = scalar::utf32::swap_bytes(triple); + triple >>= 8; + std::memcpy(dst, &triple, 3); + } + dst += 3; + } +} + +// Returns the number of bytes written. The destination buffer must be large +// enough. It will add padding (=) if needed. +size_t tail_encode_base64(char *dst, const char *src, size_t srclen, base64_options options) { + // This looks like 3 branches, but we expect the compiler to resolve this to a single branch: + const char *e0 = (options & base64_url) ? tables::base64::base64_url::e0 : tables::base64::base64_default::e0; + const char *e1 = (options & base64_url) ? tables::base64::base64_url::e1 : tables::base64::base64_default::e1; + const char *e2 = (options & base64_url) ? tables::base64::base64_url::e2 : tables::base64::base64_default::e2; + char *out = dst; + size_t i = 0; + uint8_t t1, t2, t3; + for (; i + 2 < srclen; i += 3) { + t1 = uint8_t(src[i]); + t2 = uint8_t(src[i + 1]); + t3 = uint8_t(src[i + 2]); + *out++ = e0[t1]; + *out++ = e1[((t1 & 0x03) << 4) | ((t2 >> 4) & 0x0F)]; + *out++ = e1[((t2 & 0x0F) << 2) | ((t3 >> 6) & 0x03)]; + *out++ = e2[t3]; + } + switch (srclen - i) { + case 0: + break; + case 1: + t1 = uint8_t(src[i]); + *out++ = e0[t1]; + *out++ = e1[(t1 & 0x03) << 4]; + if((options & base64_url) == 0) { + *out++ = '='; + *out++ = '='; + } + break; + default: /* case 2 */ + t1 = uint8_t(src[i]); + t2 = uint8_t(src[i + 1]); + *out++ = e0[t1]; + *out++ = e1[((t1 & 0x03) << 4) | ((t2 >> 4) & 0x0F)]; + *out++ = e2[(t2 & 0x0F) << 2]; + if((options & base64_url) == 0) { + *out++ = '='; + } + } + return (size_t)(out - dst); +} + +template +simdutf_warn_unused size_t maximal_binary_length_from_base64(const char_type * input, size_t length) noexcept { + // We follow https://infra.spec.whatwg.org/#forgiving-base64-decode + size_t padding = 0; + if(length > 0) { + if(input[length - 1] == '=') { + padding++; + if(length > 1 && input[length - 2] == '=') { + padding++; + } + } + } + size_t actual_length = length - padding; + if(actual_length % 4 <= 1) { + return actual_length / 4 * 3; + } + // if we have a valid input, then the remainder must be 2 or 3 adding one or two extra bytes. + return actual_length / 4 * 3 + (actual_length %4) - 1; +} + +simdutf_warn_unused size_t base64_length_from_binary(size_t length, base64_options options) noexcept { + if(options & base64_url) { + return length/3 * 4 + ((length % 3) ? (length % 3) + 1 : 0); + } + return (length + 2)/3 * 4; // We use padding to make the length a multiple of 4. +} + +} // namespace base64 +} // unnamed namespace +} // namespace scalar +} // namespace simdutf + +#endif +/* end file src/scalar/base64.h */ namespace simdutf { bool implementation::supported_by_runtime_system() const { @@ -5219,16 +6307,24 @@ class detect_best_supported_implementation_on_first_use final : public implement return set_best()->maximal_binary_length_from_base64(input, length); } - simdutf_warn_unused result base64_to_binary(const char * input, size_t length, char* output) const noexcept override { - return set_best()->base64_to_binary(input, length, output); + simdutf_warn_unused result base64_to_binary(const char * input, size_t length, char* output, base64_options options) const noexcept override { + return set_best()->base64_to_binary(input, length, output, options); } - simdutf_warn_unused size_t base64_length_from_binary(size_t length) const noexcept override { - return set_best()->base64_length_from_binary(length); + simdutf_warn_unused size_t maximal_binary_length_from_base64(const char16_t * input, size_t length) const noexcept override { + return set_best()->maximal_binary_length_from_base64(input, length); } - size_t binary_to_base64(const char * input, size_t length, char* output) const noexcept override { - return set_best()->binary_to_base64(input, length, output); + simdutf_warn_unused result base64_to_binary(const char16_t * input, size_t length, char* output, base64_options options) const noexcept override { + return set_best()->base64_to_binary(input, length, output, options); + } + + simdutf_warn_unused size_t base64_length_from_binary(size_t length, base64_options options) const noexcept override { + return set_best()->base64_length_from_binary(length, options); + } + + size_t binary_to_base64(const char * input, size_t length, char* output, base64_options options) const noexcept override { + return set_best()->binary_to_base64(input, length, output, options); } simdutf_really_inline detect_best_supported_implementation_on_first_use() noexcept : implementation("best_supported_detector", "Detects the best supported implementation and sets it", 0) {} @@ -5575,15 +6671,24 @@ class unsupported_implementation final : public implementation { return 0; } - simdutf_warn_unused result base64_to_binary(const char *, size_t, char*) const noexcept override { + simdutf_warn_unused result base64_to_binary(const char *, size_t, char*, base64_options) const noexcept override { + return result(error_code::OTHER, 0); + } + + simdutf_warn_unused size_t maximal_binary_length_from_base64(const char16_t *, size_t) const noexcept override { + return 0; + } + + simdutf_warn_unused result base64_to_binary(const char16_t *, size_t, char*, base64_options) const noexcept override { return result(error_code::OTHER, 0); } - simdutf_warn_unused size_t base64_length_from_binary(size_t) const noexcept override { + + simdutf_warn_unused size_t base64_length_from_binary(size_t, base64_options) const noexcept override { return 0; } - size_t binary_to_base64(const char *, size_t, char*) const noexcept override { + size_t binary_to_base64(const char *, size_t, char*, base64_options) const noexcept override { return 0; } @@ -6033,16 +7138,96 @@ simdutf_warn_unused size_t maximal_binary_length_from_base64(const char * input, return get_default_implementation()->maximal_binary_length_from_base64(input, length); } -simdutf_warn_unused result base64_to_binary(const char * input, size_t length, char* output) noexcept { - return get_default_implementation()->base64_to_binary(input, length, output); +simdutf_warn_unused result base64_to_binary(const char * input, size_t length, char* output, base64_options options) noexcept { + return get_default_implementation()->base64_to_binary(input, length, output, options); +} + +simdutf_warn_unused size_t maximal_binary_length_from_base64(const char16_t * input, size_t length) noexcept { + return get_default_implementation()->maximal_binary_length_from_base64(input, length); +} + +simdutf_warn_unused result base64_to_binary(const char16_t * input, size_t length, char* output, base64_options options) noexcept { + return get_default_implementation()->base64_to_binary(input, length, output, options); +} + +template +simdutf_warn_unused result base64_to_binary_safe_impl(const chartype * input, size_t length, char* output, size_t& outlen, base64_options options) noexcept { + static_assert(std::is_same::value || std::is_same::value, "Only char and char16_t are supported."); + // The implementation could be nicer, but we expect that most times, the user + // will provide us with a buffer that is large enough. + size_t max_length = maximal_binary_length_from_base64(input, length); + if(outlen >= max_length) { + // fast path + result r = base64_to_binary(input, length, output, options); + if(r.error != error_code::INVALID_BASE64_CHARACTER) { outlen = r.count; r.count = length; } + return r; + } + // The output buffer is maybe too small. We will decode a truncated version of the input. + size_t outlen3 = outlen / 3 * 3; // round down to multiple of 3 + size_t safe_input = base64_length_from_binary(outlen3, options); + result r = base64_to_binary(input, safe_input, output, options); + if(r.error == error_code::INVALID_BASE64_CHARACTER) { return r; } + size_t offset = (r.error == error_code::BASE64_INPUT_REMAINDER) ? 1 : + ((r.count % 3) == 0 ? 0 : (r.count % 3) + 1); + size_t output_index = r.count - (r.count % 3); + size_t input_index = safe_input; + // offset is a value that is no larger than 3. We backtrack + // by up to offset characters + an undetermined number of + // white space characters. It is expected that the next loop + // runs at most 3 times + the number of white space characters + // in between them, so we are not worried about performance. + while(offset > 0 && input_index > 0) { + chartype c = input[--input_index]; + if(scalar::base64::is_ascii_white_space(c)){ + // skipping + } else { + offset--; + } + } + size_t remaining_out = outlen - output_index; + const chartype * tail_input = input + input_index; + size_t tail_length = length - input_index; + while(tail_length > 0 && scalar::base64::is_ascii_white_space(tail_input[tail_length - 1])) { + tail_length--; + } + size_t padding_characts = 0; + if(tail_length > 0 && tail_input[tail_length - 1] == '=') { + tail_length--; + padding_characts++; + while(tail_length > 0 && scalar::base64::is_ascii_white_space(tail_input[tail_length - 1])) { + tail_length--; + } + if(tail_length > 0 && tail_input[tail_length - 1] == '=') { + tail_length--; + padding_characts++; + } + } + r = scalar::base64::base64_tail_decode_safe(output + output_index, remaining_out, tail_input, tail_length, options); + outlen = output_index + remaining_out; + if(r.error == error_code::SUCCESS && padding_characts > 0) { + // additional checks + if((outlen % 3 == 0) || ((outlen % 3) + 1 + padding_characts != 4)) { + r.error = error_code::INVALID_BASE64_CHARACTER; + } + } + r.count += input_index; + return r; +} + + +simdutf_warn_unused result base64_to_binary_safe(const char * input, size_t length, char* output, size_t& outlen, base64_options options) noexcept { + return base64_to_binary_safe_impl(input, length, output, outlen, options); +} +simdutf_warn_unused result base64_to_binary_safe(const char16_t * input, size_t length, char* output, size_t& outlen, base64_options options) noexcept { + return base64_to_binary_safe_impl(input, length, output, outlen, options); } -simdutf_warn_unused size_t base64_length_from_binary(size_t length) noexcept { - return get_default_implementation()->base64_length_from_binary(length); +simdutf_warn_unused size_t base64_length_from_binary(size_t length, base64_options options) noexcept { + return get_default_implementation()->base64_length_from_binary(length, options); } -size_t binary_to_base64(const char * input, size_t length, char* output) noexcept { - return get_default_implementation()->binary_to_base64(input, length, output); +size_t binary_to_base64(const char * input, size_t length, char* output, base64_options options) noexcept { + return get_default_implementation()->binary_to_base64(input, length, output, options); } simdutf_warn_unused simdutf::encoding_type autodetect_encoding(const char * buf, size_t length) noexcept { @@ -6142,429 +7327,11 @@ encoding_type check_bom(const char* byte, size_t length) { /* end file src/encoding_types.cpp */ /* begin file src/error.cpp */ namespace simdutf { - - simdutf_really_inline result::result() : error{error_code::SUCCESS}, count{0} {} - - simdutf_really_inline result::result(error_code _err, size_t _pos) : error{_err}, count{_pos} {} - +// deliberately empty } /* end file src/error.cpp */ // The large tables should be included once and they // should not depend on a kernel. -/* begin file src/tables/base64_tables.h */ -#ifndef SIMDUTF_BASE64_TABLES_H -#define SIMDUTF_BASE64_TABLES_H -#include -#include - -namespace simdutf { -namespace { -namespace tables { -namespace base64 { - -const char e0[256] = { - 'A', 'A', 'A', 'A', 'B', 'B', 'B', 'B', 'C', 'C', 'C', 'C', 'D', 'D', 'D', - 'D', 'E', 'E', 'E', 'E', 'F', 'F', 'F', 'F', 'G', 'G', 'G', 'G', 'H', 'H', - 'H', 'H', 'I', 'I', 'I', 'I', 'J', 'J', 'J', 'J', 'K', 'K', 'K', 'K', 'L', - 'L', 'L', 'L', 'M', 'M', 'M', 'M', 'N', 'N', 'N', 'N', 'O', 'O', 'O', 'O', - 'P', 'P', 'P', 'P', 'Q', 'Q', 'Q', 'Q', 'R', 'R', 'R', 'R', 'S', 'S', 'S', - 'S', 'T', 'T', 'T', 'T', 'U', 'U', 'U', 'U', 'V', 'V', 'V', 'V', 'W', 'W', - 'W', 'W', 'X', 'X', 'X', 'X', 'Y', 'Y', 'Y', 'Y', 'Z', 'Z', 'Z', 'Z', 'a', - 'a', 'a', 'a', 'b', 'b', 'b', 'b', 'c', 'c', 'c', 'c', 'd', 'd', 'd', 'd', - 'e', 'e', 'e', 'e', 'f', 'f', 'f', 'f', 'g', 'g', 'g', 'g', 'h', 'h', 'h', - 'h', 'i', 'i', 'i', 'i', 'j', 'j', 'j', 'j', 'k', 'k', 'k', 'k', 'l', 'l', - 'l', 'l', 'm', 'm', 'm', 'm', 'n', 'n', 'n', 'n', 'o', 'o', 'o', 'o', 'p', - 'p', 'p', 'p', 'q', 'q', 'q', 'q', 'r', 'r', 'r', 'r', 's', 's', 's', 's', - 't', 't', 't', 't', 'u', 'u', 'u', 'u', 'v', 'v', 'v', 'v', 'w', 'w', 'w', - 'w', 'x', 'x', 'x', 'x', 'y', 'y', 'y', 'y', 'z', 'z', 'z', 'z', '0', '0', - '0', '0', '1', '1', '1', '1', '2', '2', '2', '2', '3', '3', '3', '3', '4', - '4', '4', '4', '5', '5', '5', '5', '6', '6', '6', '6', '7', '7', '7', '7', - '8', '8', '8', '8', '9', '9', '9', '9', '+', '+', '+', '+', '/', '/', '/', - '/'}; - -const char e1[256] = { - 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', - 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'a', 'b', 'c', 'd', - 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', - 't', 'u', 'v', 'w', 'x', 'y', 'z', '0', '1', '2', '3', '4', '5', '6', '7', - '8', '9', '+', '/', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', - 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', - 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', - 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', '0', '1', '2', '3', - '4', '5', '6', '7', '8', '9', '+', '/', 'A', 'B', 'C', 'D', 'E', 'F', 'G', - 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', - 'W', 'X', 'Y', 'Z', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', - 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', - '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '+', '/', 'A', 'B', 'C', - 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', - 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'a', 'b', 'c', 'd', 'e', 'f', 'g', - 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', - 'w', 'x', 'y', 'z', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '+', - '/'}; - -const char e2[256] = { - 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', - 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'a', 'b', 'c', 'd', - 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', - 't', 'u', 'v', 'w', 'x', 'y', 'z', '0', '1', '2', '3', '4', '5', '6', '7', - '8', '9', '+', '/', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', - 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', - 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', - 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', '0', '1', '2', '3', - '4', '5', '6', '7', '8', '9', '+', '/', 'A', 'B', 'C', 'D', 'E', 'F', 'G', - 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', - 'W', 'X', 'Y', 'Z', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', - 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', - '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '+', '/', 'A', 'B', 'C', - 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', - 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'a', 'b', 'c', 'd', 'e', 'f', 'g', - 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', - 'w', 'x', 'y', 'z', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '+', - '/'}; - -const int8_t decoding_table[256] = { - -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -2, -1, -1, -2, -1, -1, -1, -1, -1, - -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -2, -1, -1, -1, -1, -1, - -1, -1, -1, -1, -1, 62, -1, 62, -1, 63, 52, 53, 54, 55, 56, 57, 58, 59, 60, - 61, -1, -1, -1, -1, -1, -1, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, - 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, -1, -1, -1, -1, - 63, -1, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, - 43, 44, 45, 46, 47, 48, 49, 50, 51, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, - -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, - -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, - -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, - -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, - -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, - -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, - -1, -1, -1, -1, -1, -1, -1, -1, -1}; - -/* SPECIAL DECODE TABLES FOR LITTLE ENDIAN CPUS */ - -const uint32_t d0[256] = { - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x000000f8, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x000000fc, - 0x000000d0, 0x000000d4, 0x000000d8, 0x000000dc, 0x000000e0, 0x000000e4, - 0x000000e8, 0x000000ec, 0x000000f0, 0x000000f4, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x00000000, - 0x00000004, 0x00000008, 0x0000000c, 0x00000010, 0x00000014, 0x00000018, - 0x0000001c, 0x00000020, 0x00000024, 0x00000028, 0x0000002c, 0x00000030, - 0x00000034, 0x00000038, 0x0000003c, 0x00000040, 0x00000044, 0x00000048, - 0x0000004c, 0x00000050, 0x00000054, 0x00000058, 0x0000005c, 0x00000060, - 0x00000064, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x00000068, 0x0000006c, 0x00000070, 0x00000074, 0x00000078, - 0x0000007c, 0x00000080, 0x00000084, 0x00000088, 0x0000008c, 0x00000090, - 0x00000094, 0x00000098, 0x0000009c, 0x000000a0, 0x000000a4, 0x000000a8, - 0x000000ac, 0x000000b0, 0x000000b4, 0x000000b8, 0x000000bc, 0x000000c0, - 0x000000c4, 0x000000c8, 0x000000cc, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff}; - -const uint32_t d1[256] = { - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x0000e003, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x0000f003, - 0x00004003, 0x00005003, 0x00006003, 0x00007003, 0x00008003, 0x00009003, - 0x0000a003, 0x0000b003, 0x0000c003, 0x0000d003, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x00000000, - 0x00001000, 0x00002000, 0x00003000, 0x00004000, 0x00005000, 0x00006000, - 0x00007000, 0x00008000, 0x00009000, 0x0000a000, 0x0000b000, 0x0000c000, - 0x0000d000, 0x0000e000, 0x0000f000, 0x00000001, 0x00001001, 0x00002001, - 0x00003001, 0x00004001, 0x00005001, 0x00006001, 0x00007001, 0x00008001, - 0x00009001, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x0000a001, 0x0000b001, 0x0000c001, 0x0000d001, 0x0000e001, - 0x0000f001, 0x00000002, 0x00001002, 0x00002002, 0x00003002, 0x00004002, - 0x00005002, 0x00006002, 0x00007002, 0x00008002, 0x00009002, 0x0000a002, - 0x0000b002, 0x0000c002, 0x0000d002, 0x0000e002, 0x0000f002, 0x00000003, - 0x00001003, 0x00002003, 0x00003003, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff}; - -const uint32_t d2[256] = { - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x00800f00, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x00c00f00, - 0x00000d00, 0x00400d00, 0x00800d00, 0x00c00d00, 0x00000e00, 0x00400e00, - 0x00800e00, 0x00c00e00, 0x00000f00, 0x00400f00, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x00000000, - 0x00400000, 0x00800000, 0x00c00000, 0x00000100, 0x00400100, 0x00800100, - 0x00c00100, 0x00000200, 0x00400200, 0x00800200, 0x00c00200, 0x00000300, - 0x00400300, 0x00800300, 0x00c00300, 0x00000400, 0x00400400, 0x00800400, - 0x00c00400, 0x00000500, 0x00400500, 0x00800500, 0x00c00500, 0x00000600, - 0x00400600, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x00800600, 0x00c00600, 0x00000700, 0x00400700, 0x00800700, - 0x00c00700, 0x00000800, 0x00400800, 0x00800800, 0x00c00800, 0x00000900, - 0x00400900, 0x00800900, 0x00c00900, 0x00000a00, 0x00400a00, 0x00800a00, - 0x00c00a00, 0x00000b00, 0x00400b00, 0x00800b00, 0x00c00b00, 0x00000c00, - 0x00400c00, 0x00800c00, 0x00c00c00, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff}; - -const uint32_t d3[256] = { - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x003e0000, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x003f0000, - 0x00340000, 0x00350000, 0x00360000, 0x00370000, 0x00380000, 0x00390000, - 0x003a0000, 0x003b0000, 0x003c0000, 0x003d0000, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x00000000, - 0x00010000, 0x00020000, 0x00030000, 0x00040000, 0x00050000, 0x00060000, - 0x00070000, 0x00080000, 0x00090000, 0x000a0000, 0x000b0000, 0x000c0000, - 0x000d0000, 0x000e0000, 0x000f0000, 0x00100000, 0x00110000, 0x00120000, - 0x00130000, 0x00140000, 0x00150000, 0x00160000, 0x00170000, 0x00180000, - 0x00190000, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x001a0000, 0x001b0000, 0x001c0000, 0x001d0000, 0x001e0000, - 0x001f0000, 0x00200000, 0x00210000, 0x00220000, 0x00230000, 0x00240000, - 0x00250000, 0x00260000, 0x00270000, 0x00280000, 0x00290000, 0x002a0000, - 0x002b0000, 0x002c0000, 0x002d0000, 0x002e0000, 0x002f0000, 0x00300000, - 0x00310000, 0x00320000, 0x00330000, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff, - 0x01ffffff, 0x01ffffff, 0x01ffffff, 0x01ffffff}; -const uint64_t thintable_epi8[256] = { - 0x0706050403020100, 0x0007060504030201, 0x0007060504030200, - 0x0000070605040302, 0x0007060504030100, 0x0000070605040301, - 0x0000070605040300, 0x0000000706050403, 0x0007060504020100, - 0x0000070605040201, 0x0000070605040200, 0x0000000706050402, - 0x0000070605040100, 0x0000000706050401, 0x0000000706050400, - 0x0000000007060504, 0x0007060503020100, 0x0000070605030201, - 0x0000070605030200, 0x0000000706050302, 0x0000070605030100, - 0x0000000706050301, 0x0000000706050300, 0x0000000007060503, - 0x0000070605020100, 0x0000000706050201, 0x0000000706050200, - 0x0000000007060502, 0x0000000706050100, 0x0000000007060501, - 0x0000000007060500, 0x0000000000070605, 0x0007060403020100, - 0x0000070604030201, 0x0000070604030200, 0x0000000706040302, - 0x0000070604030100, 0x0000000706040301, 0x0000000706040300, - 0x0000000007060403, 0x0000070604020100, 0x0000000706040201, - 0x0000000706040200, 0x0000000007060402, 0x0000000706040100, - 0x0000000007060401, 0x0000000007060400, 0x0000000000070604, - 0x0000070603020100, 0x0000000706030201, 0x0000000706030200, - 0x0000000007060302, 0x0000000706030100, 0x0000000007060301, - 0x0000000007060300, 0x0000000000070603, 0x0000000706020100, - 0x0000000007060201, 0x0000000007060200, 0x0000000000070602, - 0x0000000007060100, 0x0000000000070601, 0x0000000000070600, - 0x0000000000000706, 0x0007050403020100, 0x0000070504030201, - 0x0000070504030200, 0x0000000705040302, 0x0000070504030100, - 0x0000000705040301, 0x0000000705040300, 0x0000000007050403, - 0x0000070504020100, 0x0000000705040201, 0x0000000705040200, - 0x0000000007050402, 0x0000000705040100, 0x0000000007050401, - 0x0000000007050400, 0x0000000000070504, 0x0000070503020100, - 0x0000000705030201, 0x0000000705030200, 0x0000000007050302, - 0x0000000705030100, 0x0000000007050301, 0x0000000007050300, - 0x0000000000070503, 0x0000000705020100, 0x0000000007050201, - 0x0000000007050200, 0x0000000000070502, 0x0000000007050100, - 0x0000000000070501, 0x0000000000070500, 0x0000000000000705, - 0x0000070403020100, 0x0000000704030201, 0x0000000704030200, - 0x0000000007040302, 0x0000000704030100, 0x0000000007040301, - 0x0000000007040300, 0x0000000000070403, 0x0000000704020100, - 0x0000000007040201, 0x0000000007040200, 0x0000000000070402, - 0x0000000007040100, 0x0000000000070401, 0x0000000000070400, - 0x0000000000000704, 0x0000000703020100, 0x0000000007030201, - 0x0000000007030200, 0x0000000000070302, 0x0000000007030100, - 0x0000000000070301, 0x0000000000070300, 0x0000000000000703, - 0x0000000007020100, 0x0000000000070201, 0x0000000000070200, - 0x0000000000000702, 0x0000000000070100, 0x0000000000000701, - 0x0000000000000700, 0x0000000000000007, 0x0006050403020100, - 0x0000060504030201, 0x0000060504030200, 0x0000000605040302, - 0x0000060504030100, 0x0000000605040301, 0x0000000605040300, - 0x0000000006050403, 0x0000060504020100, 0x0000000605040201, - 0x0000000605040200, 0x0000000006050402, 0x0000000605040100, - 0x0000000006050401, 0x0000000006050400, 0x0000000000060504, - 0x0000060503020100, 0x0000000605030201, 0x0000000605030200, - 0x0000000006050302, 0x0000000605030100, 0x0000000006050301, - 0x0000000006050300, 0x0000000000060503, 0x0000000605020100, - 0x0000000006050201, 0x0000000006050200, 0x0000000000060502, - 0x0000000006050100, 0x0000000000060501, 0x0000000000060500, - 0x0000000000000605, 0x0000060403020100, 0x0000000604030201, - 0x0000000604030200, 0x0000000006040302, 0x0000000604030100, - 0x0000000006040301, 0x0000000006040300, 0x0000000000060403, - 0x0000000604020100, 0x0000000006040201, 0x0000000006040200, - 0x0000000000060402, 0x0000000006040100, 0x0000000000060401, - 0x0000000000060400, 0x0000000000000604, 0x0000000603020100, - 0x0000000006030201, 0x0000000006030200, 0x0000000000060302, - 0x0000000006030100, 0x0000000000060301, 0x0000000000060300, - 0x0000000000000603, 0x0000000006020100, 0x0000000000060201, - 0x0000000000060200, 0x0000000000000602, 0x0000000000060100, - 0x0000000000000601, 0x0000000000000600, 0x0000000000000006, - 0x0000050403020100, 0x0000000504030201, 0x0000000504030200, - 0x0000000005040302, 0x0000000504030100, 0x0000000005040301, - 0x0000000005040300, 0x0000000000050403, 0x0000000504020100, - 0x0000000005040201, 0x0000000005040200, 0x0000000000050402, - 0x0000000005040100, 0x0000000000050401, 0x0000000000050400, - 0x0000000000000504, 0x0000000503020100, 0x0000000005030201, - 0x0000000005030200, 0x0000000000050302, 0x0000000005030100, - 0x0000000000050301, 0x0000000000050300, 0x0000000000000503, - 0x0000000005020100, 0x0000000000050201, 0x0000000000050200, - 0x0000000000000502, 0x0000000000050100, 0x0000000000000501, - 0x0000000000000500, 0x0000000000000005, 0x0000000403020100, - 0x0000000004030201, 0x0000000004030200, 0x0000000000040302, - 0x0000000004030100, 0x0000000000040301, 0x0000000000040300, - 0x0000000000000403, 0x0000000004020100, 0x0000000000040201, - 0x0000000000040200, 0x0000000000000402, 0x0000000000040100, - 0x0000000000000401, 0x0000000000000400, 0x0000000000000004, - 0x0000000003020100, 0x0000000000030201, 0x0000000000030200, - 0x0000000000000302, 0x0000000000030100, 0x0000000000000301, - 0x0000000000000300, 0x0000000000000003, 0x0000000000020100, - 0x0000000000000201, 0x0000000000000200, 0x0000000000000002, - 0x0000000000000100, 0x0000000000000001, 0x0000000000000000, - 0x0000000000000000, -}; - -const uint8_t pshufb_combine_table[272] = { - 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, - 0x0c, 0x0d, 0x0e, 0x0f, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x08, - 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0xff, 0x00, 0x01, 0x02, 0x03, - 0x04, 0x05, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0xff, 0xff, - 0x00, 0x01, 0x02, 0x03, 0x04, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, - 0x0f, 0xff, 0xff, 0xff, 0x00, 0x01, 0x02, 0x03, 0x08, 0x09, 0x0a, 0x0b, - 0x0c, 0x0d, 0x0e, 0x0f, 0xff, 0xff, 0xff, 0xff, 0x00, 0x01, 0x02, 0x08, - 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0xff, 0xff, 0xff, 0xff, 0xff, - 0x00, 0x01, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0x00, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, - 0x0f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x08, 0x09, 0x0a, 0x0b, - 0x0c, 0x0d, 0x0e, 0x0f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -}; - -const unsigned char BitsSetTable256mul2[256] = { - 0, 2, 2, 4, 2, 4, 4, 6, 2, 4, 4, 6, 4, 6, 6, 8, 2, 4, 4, - 6, 4, 6, 6, 8, 4, 6, 6, 8, 6, 8, 8, 10, 2, 4, 4, 6, 4, 6, - 6, 8, 4, 6, 6, 8, 6, 8, 8, 10, 4, 6, 6, 8, 6, 8, 8, 10, 6, - 8, 8, 10, 8, 10, 10, 12, 2, 4, 4, 6, 4, 6, 6, 8, 4, 6, 6, 8, - 6, 8, 8, 10, 4, 6, 6, 8, 6, 8, 8, 10, 6, 8, 8, 10, 8, 10, 10, - 12, 4, 6, 6, 8, 6, 8, 8, 10, 6, 8, 8, 10, 8, 10, 10, 12, 6, 8, - 8, 10, 8, 10, 10, 12, 8, 10, 10, 12, 10, 12, 12, 14, 2, 4, 4, 6, 4, - 6, 6, 8, 4, 6, 6, 8, 6, 8, 8, 10, 4, 6, 6, 8, 6, 8, 8, 10, - 6, 8, 8, 10, 8, 10, 10, 12, 4, 6, 6, 8, 6, 8, 8, 10, 6, 8, 8, - 10, 8, 10, 10, 12, 6, 8, 8, 10, 8, 10, 10, 12, 8, 10, 10, 12, 10, 12, - 12, 14, 4, 6, 6, 8, 6, 8, 8, 10, 6, 8, 8, 10, 8, 10, 10, 12, 6, - 8, 8, 10, 8, 10, 10, 12, 8, 10, 10, 12, 10, 12, 12, 14, 6, 8, 8, 10, - 8, 10, 10, 12, 8, 10, 10, 12, 10, 12, 12, 14, 8, 10, 10, 12, 10, 12, 12, - 14, 10, 12, 12, 14, 12, 14, 14, 16}; - -const uint8_t to_base64_value[] = { - 255, 255, 255, 255, 255, 255, 255, 255, 255, 64, 64, 255, 255, 64, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 64, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 62, 255, - 255, 255, 63, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 255, 255, - 255, 255, 255, 255, 255, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, - 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, - 25, 255, 255, 255, 255, 255, 255, 26, 27, 28, 29, 30, 31, 32, 33, - 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, - 49, 50, 51, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255}; -} // namespace base64 -} // namespace tables -} // unnamed namespace -} // namespace simdutf - -#endif // SIMDUTF_BASE64_TABLES_H -/* end file src/tables/base64_tables.h */ /* begin file src/tables/utf8_to_utf16_tables.h */ #ifndef SIMDUTF_UTF8_TO_UTF16_TABLES_H #define SIMDUTF_UTF8_TO_UTF16_TABLES_H @@ -11503,86 +12270,6 @@ inline simdutf_warn_unused result validate_with_errors(const char *buf, size_t l #endif /* end file src/scalar/ascii.h */ -/* begin file src/scalar/utf32.h */ -#ifndef SIMDUTF_UTF32_H -#define SIMDUTF_UTF32_H - -namespace simdutf { -namespace scalar { -namespace { -namespace utf32 { - -inline simdutf_warn_unused bool validate(const char32_t *buf, size_t len) noexcept { - const uint32_t *data = reinterpret_cast(buf); - uint64_t pos = 0; - for(;pos < len; pos++) { - uint32_t word = data[pos]; - if(word > 0x10FFFF || (word >= 0xD800 && word <= 0xDFFF)) { - return false; - } - } - return true; -} - -inline simdutf_warn_unused result validate_with_errors(const char32_t *buf, size_t len) noexcept { - const uint32_t *data = reinterpret_cast(buf); - size_t pos = 0; - for(;pos < len; pos++) { - uint32_t word = data[pos]; - if(word > 0x10FFFF) { - return result(error_code::TOO_LARGE, pos); - } - if(word >= 0xD800 && word <= 0xDFFF) { - return result(error_code::SURROGATE, pos); - } - } - return result(error_code::SUCCESS, pos); -} - -inline size_t utf8_length_from_utf32(const char32_t* buf, size_t len) { - // We are not BOM aware. - const uint32_t * p = reinterpret_cast(buf); - size_t counter{0}; - for(size_t i = 0; i < len; i++) { - // credit: @ttsugriy for the vectorizable approach - counter++; // ASCII - counter += static_cast(p[i] > 0x7F); // two-byte - counter += static_cast(p[i] > 0x7FF); // three-byte - counter += static_cast(p[i] > 0xFFFF); // four-bytes - } - return counter; -} - -inline size_t utf16_length_from_utf32(const char32_t* buf, size_t len) { - // We are not BOM aware. - const uint32_t * p = reinterpret_cast(buf); - size_t counter{0}; - for(size_t i = 0; i < len; i++) { - counter++; // non-surrogate word - counter += static_cast(p[i] > 0xFFFF); // surrogate pair - } - return counter; -} - -inline size_t latin1_length_from_utf32(size_t len) { - // We are not BOM aware. - return len; // a utf32 codepoint will always represent 1 latin1 character -} - -inline simdutf_warn_unused uint32_t swap_bytes(const uint32_t word) { - return ((word >> 24) & 0xff) | // move byte 3 to byte 0 - ((word << 8) & 0xff0000) | // move byte 1 to byte 2 - ((word >> 8) & 0xff00) | // move byte 2 to byte 1 - ((word << 24) & 0xff000000); // byte 0 to byte 3 -} - -} // utf32 namespace -} // unnamed namespace -} // namespace scalar -} // namespace simdutf - -#endif -/* end file src/scalar/utf32.h */ /* begin file src/scalar/latin1.h */ #ifndef SIMDUTF_LATIN1_H #define SIMDUTF_LATIN1_H @@ -11617,183 +12304,6 @@ inline size_t utf16_length_from_latin1(size_t len) { #endif /* end file src/scalar/latin1.h */ -/* begin file src/scalar/base64.h */ -#ifndef SIMDUTF_BASE64_H -#define SIMDUTF_BASE64_H - -#include -#include -#include -namespace simdutf { -namespace scalar { -namespace { -namespace base64 { - -// Returns true upon success. The destination buffer must be large enough and is -// incremented by the number of bytes written and src is incremented by the number of bytes read. -// This functions assumes that the padding (=) has been removed. -result base64_tail_decode(char *dst, const char *src, size_t length) { - const char *srcend = src + length; - const char *srcinit = src; - const char *dstinit = dst; - - uint32_t x; - size_t idx; - uint8_t buffer[4]; - while (true) { - while (src + 4 <= srcend && - (x = tables::base64::d0[uint8_t(src[0])] | tables::base64::d1[uint8_t(src[1])] | - tables::base64::d2[uint8_t(src[2])] | tables::base64::d3[uint8_t(src[3])]) < 0x01FFFFFF) { - if(match_system(endianness::BIG)) { - x = scalar::utf32::swap_bytes(x); - } - std::memcpy(dst, &x, 3); // optimization opportunity: copy 4 bytes - dst += 3; - src += 4; - } - idx = 0; - // we need at least four characters. - while (idx < 4 && src < srcend) { - char c = *src; - uint8_t code = tables::base64::to_base64_value[uint8_t(c)]; - buffer[idx] = uint8_t(code); - if (code <= 63) { - idx++; - } else if (code > 64) { - return {INVALID_BASE64_CHARACTER, size_t(src - srcinit)}; - } - src++; - } - if (idx != 4) { - if (idx == 2) { - uint32_t triple = - (uint32_t(buffer[0]) << 3 * 6) + (uint32_t(buffer[1]) << 2 * 6); - if(match_system(endianness::BIG)) { - triple <<= 8; - std::memcpy(dst, &triple, 1); - } else { - triple = scalar::utf32::swap_bytes(triple); - triple >>= 8; - std::memcpy(dst, &triple, 1); - } - dst += 1; - - } else if (idx == 3) { - uint32_t triple = (uint32_t(buffer[0]) << 3 * 6) + - (uint32_t(buffer[1]) << 2 * 6) + - (uint32_t(buffer[2]) << 1 * 6); - if(match_system(endianness::BIG)) { - triple <<= 8; - std::memcpy(dst, &triple, 2); - } else { - triple = scalar::utf32::swap_bytes(triple); - triple >>= 8; - std::memcpy(dst, &triple, 2); - } - dst += 2; - } else if (idx == 1) { - return {BASE64_INPUT_REMAINDER, size_t(dst - dstinit)}; - } - return {SUCCESS, size_t(dst - dstinit)}; - } - - uint32_t triple = - (uint32_t(buffer[0]) << 3 * 6) + (uint32_t(buffer[1]) << 2 * 6) + - (uint32_t(buffer[2]) << 1 * 6) + (uint32_t(buffer[3]) << 0 * 6); - if(match_system(endianness::BIG)) { - triple <<= 8; - std::memcpy(dst, &triple, 3); - } else { - triple = scalar::utf32::swap_bytes(triple); - triple >>= 8; - std::memcpy(dst, &triple, 3); - } - dst += 3; - } -} - -// Returns the number of bytes written. The destination buffer must be large -// enough. It will add padding (=) if needed. -size_t tail_encode_base64(char *dst, const char *src, size_t srclen) { - char *out = dst; - size_t i = 0; - uint8_t t1, t2, t3; - for (; i + 2 < srclen; i += 3) { - t1 = (uint8_t)src[i]; - t2 = (uint8_t)src[i + 1]; - t3 = (uint8_t)src[i + 2]; - *out++ = tables::base64::e0[t1]; - *out++ = tables::base64::e1[((t1 & 0x03) << 4) | ((t2 >> 4) & 0x0F)]; - *out++ = tables::base64::e1[((t2 & 0x0F) << 2) | ((t3 >> 6) & 0x03)]; - *out++ = tables::base64::e2[t3]; - } - switch (srclen - i) { - case 0: - break; - case 1: - t1 = (uint8_t)src[i]; - *out++ = tables::base64::e0[t1]; - *out++ = tables::base64::e1[(t1 & 0x03) << 4]; - *out++ = '='; - *out++ = '='; - break; - default: /* case 2 */ - t1 = (uint8_t)src[i]; - t2 = (uint8_t)src[i + 1]; - *out++ = tables::base64::e0[t1]; - *out++ = tables::base64::e1[((t1 & 0x03) << 4) | ((t2 >> 4) & 0x0F)]; - *out++ = tables::base64::e2[(t2 & 0x0F) << 2]; - *out++ = '='; - } - return (size_t)(out - dst); -} - -simdutf_warn_unused size_t maximal_binary_length_from_base64(const char * input, size_t length) noexcept { - // We follow https://infra.spec.whatwg.org/#forgiving-base64-decode - size_t padding = 0; - if(length > 0) { - if(input[length - 1] == '=') { - padding++; - if(length > 1 && input[length - 2] == '=') { - padding++; - } - } - } - size_t actual_length = length - padding; - if(actual_length % 4 == 0) { - return actual_length / 4 * 3; - } - // if we have a valid input, then the remainder must be 2 or 3 adding one or two extra bytes. - return actual_length / 4 * 3 + (actual_length %4) - 1; -} - -simdutf_warn_unused simdutf_really_inline result base64_to_binary(const char * input, size_t length, char* output) noexcept { - if(length > 0 && input[length - 1] == '=') { - length -= 1; - if(length > 0 && input[length - 1] == '=') { - length -= 1; - } - } - if(length == 0) { - return {SUCCESS, 0}; - } - return base64_tail_decode(output, input, length); -} - -simdutf_warn_unused size_t base64_length_from_binary(size_t length) noexcept { - return (length + 2)/3 * 4; // We use padding to make the length a multiple of 4. -} - -simdutf_really_inline size_t binary_to_base64(const char * input, size_t length, char* output) noexcept { - return tail_encode_base64(output, input, length); -} -} // namespace base64 -} // unnamed namespace -} // namespace scalar -} // namespace simdutf - -#endif -/* end file src/scalar/base64.h */ /* begin file src/scalar/utf32_to_utf8/valid_utf32_to_utf8.h */ #ifndef SIMDUTF_VALID_UTF32_TO_UTF8_H @@ -16186,7 +16696,8 @@ std::pair arm_convert_utf32_to_utf16_with_errors(const char32 * https://www.codeproject.com/Articles/276993/Base-Encoding-on-a-GPU. (2013). */ -size_t encode_base64(char *dst, const char *src, size_t srclen) { +size_t encode_base64(char *dst, const char *src, size_t srclen, + base64_options options) { // credit: Wojciech Muła uint8_t *out = (uint8_t *)dst; constexpr static uint8_t source_table[64] = { @@ -16196,8 +16707,16 @@ size_t encode_base64(char *dst, const char *src, size_t srclen) { '5', 'K', 'a', 'q', '6', 'L', 'b', 'r', '7', 'M', 'c', 's', '8', 'N', 'd', 't', '9', 'O', 'e', 'u', '+', 'P', 'f', 'v', '/', }; + constexpr static uint8_t source_table_url[64] = { + 'A', 'Q', 'g', 'w', 'B', 'R', 'h', 'x', 'C', 'S', 'i', 'y', 'D', + 'T', 'j', 'z', 'E', 'U', 'k', '0', 'F', 'V', 'l', '1', 'G', 'W', + 'm', '2', 'H', 'X', 'n', '3', 'I', 'Y', 'o', '4', 'J', 'Z', 'p', + '5', 'K', 'a', 'q', '6', 'L', 'b', 'r', '7', 'M', 'c', 's', '8', + 'N', 'd', 't', '9', 'O', 'e', 'u', '-', 'P', 'f', 'v', '_', + }; const uint8x16_t v3f = vdupq_n_u8(0x3f); - const uint8x16x4_t table = vld4q_u8(source_table); + const uint8x16x4_t table = + vld4q_u8((options & base64_url) ? source_table_url : source_table); size_t i = 0; for (; i + 16 * 3 <= srclen; i += 16 * 3) { const uint8x16x3_t in = vld3q_u8((const uint8_t *)src + i); @@ -16215,7 +16734,8 @@ size_t encode_base64(char *dst, const char *src, size_t srclen) { vst4q_u8(out, result); out += 64; } - out += scalar::base64::tail_encode_base64((char *)out, src + i, srclen - i); + out += scalar::base64::tail_encode_base64((char *)out, src + i, srclen - i, + options); return size_t((char *)out - dst); } @@ -16254,9 +16774,22 @@ struct block64 { uint8x16_t chunks[4]; }; static_assert(sizeof(block64) == 64, "block64 is not 64 bytes"); -uint64_t to_base64_mask(block64 *b, bool *error) { +template uint64_t to_base64_mask(block64 *b, bool *error) { uint8x16_t v0f = vdupq_n_u8(0xf); + uint8x16_t underscore0, underscore1, underscore2, underscore3; + if (base64_url) { + underscore0 = vceqq_u8(b->chunks[0], vdupq_n_u8(0x5f)); + underscore1 = vceqq_u8(b->chunks[1], vdupq_n_u8(0x5f)); + underscore2 = vceqq_u8(b->chunks[2], vdupq_n_u8(0x5f)); + underscore3 = vceqq_u8(b->chunks[3], vdupq_n_u8(0x5f)); + } else { + (void)underscore0; + (void)underscore1; + (void)underscore2; + (void)underscore3; + } + uint8x16_t lo_nibbles0 = vandq_u8(b->chunks[0], v0f); uint8x16_t lo_nibbles1 = vandq_u8(b->chunks[1], v0f); uint8x16_t lo_nibbles2 = vandq_u8(b->chunks[2], v0f); @@ -16266,31 +16799,62 @@ uint64_t to_base64_mask(block64 *b, bool *error) { uint8x16_t hi_nibbles1 = vshrq_n_u8(b->chunks[1], 4); uint8x16_t hi_nibbles2 = vshrq_n_u8(b->chunks[2], 4); uint8x16_t hi_nibbles3 = vshrq_n_u8(b->chunks[3], 4); + uint8x16_t lut_lo; #ifdef SIMDUTF_REGULAR_VISUAL_STUDIO - const uint8x16_t lut_lo = - simdutf_make_uint8x16_t(0x3a, 0x70, 0x70, 0x70, 0x70, 0x70, 0x70, 0x70, - 0x70, 0x61, 0xe1, 0xb4, 0xf4, 0xe5, 0xf4, 0xb4); + if (base64_url) { + lut_lo = + simdutf_make_uint8x16_t(0x3a, 0x70, 0x70, 0x70, 0x70, 0x70, 0x70, 0x70, + 0x70, 0x61, 0xe1, 0xf4, 0xf5, 0xa5, 0xf4, 0xf4); + } else { + lut_lo = + simdutf_make_uint8x16_t(0x3a, 0x70, 0x70, 0x70, 0x70, 0x70, 0x70, 0x70, + 0x70, 0x61, 0xe1, 0xb4, 0xf5, 0xe5, 0xf4, 0xb4); + } #else - const uint8x16_t lut_lo = {0x3a, 0x70, 0x70, 0x70, 0x70, 0x70, 0x70, 0x70, - 0x70, 0x61, 0xe1, 0xb4, 0xf4, 0xe5, 0xf4, 0xb4}; + if (base64_url) { + lut_lo = uint8x16_t{0x3a, 0x70, 0x70, 0x70, 0x70, 0x70, 0x70, 0x70, + 0x70, 0x61, 0xe1, 0xf4, 0xf5, 0xa5, 0xf4, 0xf4}; + } else { + lut_lo = uint8x16_t{0x3a, 0x70, 0x70, 0x70, 0x70, 0x70, 0x70, 0x70, + 0x70, 0x61, 0xe1, 0xb4, 0xf5, 0xe5, 0xf4, 0xb4}; + } #endif uint8x16_t lo0 = vqtbl1q_u8(lut_lo, lo_nibbles0); uint8x16_t lo1 = vqtbl1q_u8(lut_lo, lo_nibbles1); uint8x16_t lo2 = vqtbl1q_u8(lut_lo, lo_nibbles2); uint8x16_t lo3 = vqtbl1q_u8(lut_lo, lo_nibbles3); + uint8x16_t lut_hi; #ifdef SIMDUTF_REGULAR_VISUAL_STUDIO - const uint8x16_t lut_hi = - simdutf_make_uint8x16_t(0x11, 0x20, 0x42, 0x80, 0x8, 0x4, 0x8, 0x4, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20); + if (base64_url) { + lut_hi = + simdutf_make_uint8x16_t(0x11, 0x20, 0x42, 0x80, 0x8, 0x4, 0x8, 0x4, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20); + } else { + lut_hi = + simdutf_make_uint8x16_t(0x11, 0x20, 0x42, 0x80, 0x8, 0x4, 0x8, 0x4, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20); + } #else - const uint8x16_t lut_hi = {0x11, 0x20, 0x42, 0x80, 0x8, 0x4, 0x8, 0x4, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20}; + if (base64_url) { + lut_hi = uint8x16_t{0x11, 0x20, 0x42, 0x80, 0x8, 0x4, 0x8, 0x4, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20}; + } else { + lut_hi = uint8x16_t{0x11, 0x20, 0x42, 0x80, 0x8, 0x4, 0x8, 0x4, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20}; + } #endif uint8x16_t hi0 = vqtbl1q_u8(lut_hi, hi_nibbles0); uint8x16_t hi1 = vqtbl1q_u8(lut_hi, hi_nibbles1); uint8x16_t hi2 = vqtbl1q_u8(lut_hi, hi_nibbles2); uint8x16_t hi3 = vqtbl1q_u8(lut_hi, hi_nibbles3); + if (base64_url) { + hi0 = vbicq_u8(hi0, underscore0); + hi1 = vbicq_u8(hi1, underscore1); + hi2 = vbicq_u8(hi2, underscore2); + hi3 = vbicq_u8(hi3, underscore3); + } + uint8_t checks = vmaxvq_u8(vorrq_u8(vorrq_u8(vandq_u8(lo0, hi0), vandq_u8(lo1, hi1)), vorrq_u8(vandq_u8(lo2, hi2), vandq_u8(lo3, hi3)))); @@ -16321,23 +16885,41 @@ uint64_t to_base64_mask(block64 *b, bool *error) { } // This is the transformation step that can be done while we are waiting for // sum0 + uint8x16_t roll_lut; #ifdef SIMDUTF_REGULAR_VISUAL_STUDIO - const uint8x16_t roll_lut = - simdutf_make_uint8x16_t(0x0, 0x10, 0x13, 0x4, 0xbf, 0xbf, 0xb9, 0xb9, 0x0, - 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0); + if (base64_url) { + roll_lut = + simdutf_make_uint8x16_t(0xe0, 0x11, 0x13, 0x4, 0xbf, 0xbf, 0xb9, 0xb9, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0); + } else { + roll_lut = + simdutf_make_uint8x16_t(0x0, 0x10, 0x13, 0x4, 0xbf, 0xbf, 0xb9, 0xb9, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0); + } #else - const uint8x16_t roll_lut = {0x0, 0x10, 0x13, 0x4, 0xbf, 0xbf, 0xb9, 0xb9, - 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}; + if (base64_url) { + roll_lut = uint8x16_t{0xe0, 0x11, 0x13, 0x4, 0xbf, 0xbf, 0xb9, 0xb9, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}; + } else { + roll_lut = uint8x16_t{0x0, 0x10, 0x13, 0x4, 0xbf, 0xbf, 0xb9, 0xb9, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}; + } #endif - uint8x16_t v2f = vdupq_n_u8(0x2f); - uint8x16_t roll0 = - vqtbl1q_u8(roll_lut, vaddq_u8(vceqq_u8(b->chunks[0], v2f), hi_nibbles0)); - uint8x16_t roll1 = - vqtbl1q_u8(roll_lut, vaddq_u8(vceqq_u8(b->chunks[1], v2f), hi_nibbles1)); - uint8x16_t roll2 = - vqtbl1q_u8(roll_lut, vaddq_u8(vceqq_u8(b->chunks[2], v2f), hi_nibbles2)); - uint8x16_t roll3 = - vqtbl1q_u8(roll_lut, vaddq_u8(vceqq_u8(b->chunks[3], v2f), hi_nibbles3)); + uint8x16_t vsecond_last = base64_url ? vdupq_n_u8(0x2d) : vdupq_n_u8(0x2f); + if (base64_url) { + hi_nibbles0 = vbicq_u8(hi_nibbles0, underscore0); + hi_nibbles1 = vbicq_u8(hi_nibbles1, underscore1); + hi_nibbles2 = vbicq_u8(hi_nibbles2, underscore2); + hi_nibbles3 = vbicq_u8(hi_nibbles3, underscore3); + } + uint8x16_t roll0 = vqtbl1q_u8( + roll_lut, vaddq_u8(vceqq_u8(b->chunks[0], vsecond_last), hi_nibbles0)); + uint8x16_t roll1 = vqtbl1q_u8( + roll_lut, vaddq_u8(vceqq_u8(b->chunks[1], vsecond_last), hi_nibbles1)); + uint8x16_t roll2 = vqtbl1q_u8( + roll_lut, vaddq_u8(vceqq_u8(b->chunks[2], vsecond_last), hi_nibbles2)); + uint8x16_t roll3 = vqtbl1q_u8( + roll_lut, vaddq_u8(vceqq_u8(b->chunks[3], vsecond_last), hi_nibbles3)); b->chunks[0] = vaddq_u8(b->chunks[0], roll0); b->chunks[1] = vaddq_u8(b->chunks[1], roll1); b->chunks[2] = vaddq_u8(b->chunks[2], roll2); @@ -16363,6 +16945,8 @@ uint64_t compress_block(block64 *b, uint64_t mask, char *output) { return offsets >> 56; } +// The caller of this function is responsible to ensure that there are 64 bytes available +// from reading at src. The data is read into a block64 structure. void load_block(block64 *b, const char *src) { b->chunks[0] = vld1q_u8(reinterpret_cast(src)); b->chunks[1] = vld1q_u8(reinterpret_cast(src) + 16); @@ -16370,6 +16954,23 @@ void load_block(block64 *b, const char *src) { b->chunks[3] = vld1q_u8(reinterpret_cast(src) + 48); } +// The caller of this function is responsible to ensure that there are 32 bytes available +// from reading at data. It returns a 16-byte value, narrowing with saturation the 16-bit words. +inline uint8x16_t load_satured(const uint16_t *data) { + uint16x8_t in1 = vld1q_u16(data); + uint16x8_t in2 = vld1q_u16(data + 8); + return vqmovn_high_u16(vqmovn_u16(in1), in2); +} + +// The caller of this function is responsible to ensure that there are 128 bytes available +// from reading at src. The data is read into a block64 structure. +void load_block(block64 *b, const char16_t *src) { + b->chunks[0] = load_satured(reinterpret_cast(src)); + b->chunks[1] = load_satured(reinterpret_cast(src) + 16); + b->chunks[2] = load_satured(reinterpret_cast(src) + 32); + b->chunks[3] = load_satured(reinterpret_cast(src) + 48); +} + // decode 64 bytes and output 48 bytes void base64_decode_block(char *out, const char *src) { uint8x16x4_t str = vld4q_u8((uint8_t *)src); @@ -16382,36 +16983,48 @@ void base64_decode_block(char *out, const char *src) { vst3q_u8((uint8_t *)out, outvec); } -result compress_decode_base64(char *dst, const char *src, size_t srclen) { +template +result compress_decode_base64(char *dst, const char_type *src, size_t srclen, + base64_options options) { + const uint8_t *to_base64 = base64_url ? tables::base64::to_base64_url_value + : tables::base64::to_base64_value; + // skip trailing spaces + while (srclen > 0 && to_base64[uint8_t(src[srclen - 1])] == 64) { + srclen--; + } size_t equalsigns = 0; if (srclen > 0 && src[srclen - 1] == '=') { srclen--; equalsigns = 1; + // skip trailing spaces + while (srclen > 0 && to_base64[uint8_t(src[srclen - 1])] == 64) { + srclen--; + } if (srclen > 0 && src[srclen - 1] == '=') { srclen--; equalsigns = 2; } } - const char *const srcinit = src; + const char_type *const srcinit = src; const char *const dstinit = dst; - const char *const srcend = src + srclen; + const char_type *const srcend = src + srclen; constexpr size_t block_size = 10; char buffer[block_size * 64]; char *bufferptr = buffer; if (srclen >= 64) { - const char *const srcend64 = src + srclen - 64; + const char_type *const srcend64 = src + srclen - 64; while (src <= srcend64) { block64 b; load_block(&b, src); src += 64; bool error = false; - uint64_t badcharmask = to_base64_mask(&b, &error); + uint64_t badcharmask = to_base64_mask(&b, &error); + if(badcharmask) if (error) { src -= 64; - while (src < srcend && - tables::base64::to_base64_value[uint8_t(*src)] <= 64) { + while (src < srcend && to_base64[uint8_t(*src)] <= 64) { src++; } return {error_code::INVALID_BASE64_CHARACTER, size_t(src - srcinit)}; @@ -16448,7 +17061,7 @@ result compress_decode_base64(char *dst, const char *src, size_t srclen) { int last_block = (int)((bufferptr - buffer_start) % 64); if (last_block != 0 && srcend - src + last_block >= 64) { while ((bufferptr - buffer_start) % 64 != 0 && src < srcend) { - uint8_t val = tables::base64::to_base64_value[uint8_t(*src)]; + uint8_t val = to_base64[uint8_t(*src)]; *bufferptr = char(val); if (val > 64) { return {error_code::INVALID_BASE64_CHARACTER, size_t(src - srcinit)}; @@ -16492,7 +17105,7 @@ result compress_decode_base64(char *dst, const char *src, size_t srclen) { int leftover = int(bufferptr - buffer_start); if (leftover > 0) { while (leftover < 4 && src < srcend) { - uint8_t val = tables::base64::to_base64_value[uint8_t(*src)]; + uint8_t val = to_base64[uint8_t(*src)]; if (val > 64) { return {error_code::INVALID_BASE64_CHARACTER, size_t(src - srcinit)}; } @@ -16533,15 +17146,27 @@ result compress_decode_base64(char *dst, const char *src, size_t srclen) { } } if (src < srcend + equalsigns) { - result r = scalar::base64::base64_tail_decode(dst, src, srcend - src); + result r = + scalar::base64::base64_tail_decode(dst, src, srcend - src, options); if (r.error == error_code::INVALID_BASE64_CHARACTER) { r.count += size_t(src - srcinit); return r; } else { r.count += size_t(dst - dstinit); } + if(r.error == error_code::SUCCESS && equalsigns > 0) { + // additional checks + if((r.count % 3 == 0) || ((r.count % 3) + 1 + equalsigns != 4)) { + r.error = error_code::INVALID_BASE64_CHARACTER; + } + } return r; } + if(equalsigns > 0) { + if((size_t(dst - dstinit) % 3 == 0) || ((size_t(dst - dstinit) % 3) + 1 + equalsigns != 4)) { + return {INVALID_BASE64_CHARACTER, size_t(dst - dstinit)}; + } + } return {SUCCESS, size_t(dst - dstinit)}; } /* end file src/arm64/arm_base64.cpp */ @@ -18857,16 +19482,24 @@ simdutf_warn_unused size_t implementation::maximal_binary_length_from_base64(con return scalar::base64::maximal_binary_length_from_base64(input, length); } -simdutf_warn_unused result implementation::base64_to_binary(const char * input, size_t length, char* output) const noexcept { - return compress_decode_base64(output, input, length); +simdutf_warn_unused result implementation::base64_to_binary(const char * input, size_t length, char* output, base64_options options) const noexcept { + return (options & base64_url) ? compress_decode_base64(output, input, length, options) : compress_decode_base64(output, input, length, options); } -simdutf_warn_unused size_t implementation::base64_length_from_binary(size_t length) const noexcept { - return scalar::base64::base64_length_from_binary(length); +simdutf_warn_unused size_t implementation::maximal_binary_length_from_base64(const char16_t * input, size_t length) const noexcept { + return scalar::base64::maximal_binary_length_from_base64(input, length); +} + +simdutf_warn_unused result implementation::base64_to_binary(const char16_t * input, size_t length, char* output, base64_options options) const noexcept { + return (options & base64_url) ? compress_decode_base64(output, input, length, options) : compress_decode_base64(output, input, length, options); } -size_t implementation::binary_to_base64(const char * input, size_t length, char* output) const noexcept { - return encode_base64(output, input, length); +simdutf_warn_unused size_t implementation::base64_length_from_binary(size_t length, base64_options options) const noexcept { + return scalar::base64::base64_length_from_binary(length, options); +} + +size_t implementation::binary_to_base64(const char * input, size_t length, char* output, base64_options options) const noexcept { + return encode_base64(output, input, length, options); } @@ -19212,16 +19845,82 @@ simdutf_warn_unused size_t implementation::maximal_binary_length_from_base64(con return scalar::base64::maximal_binary_length_from_base64(input, length); } -simdutf_warn_unused result implementation::base64_to_binary(const char * input, size_t length, char* output) const noexcept { - return scalar::base64::base64_to_binary(input, length, output); +simdutf_warn_unused result implementation::base64_to_binary(const char * input, size_t length, char* output, base64_options options) const noexcept { + while(length > 0 && scalar::base64::is_ascii_white_space(input[length - 1])) { + length--; + } + size_t equallocation = length; // location of the first padding character if any + size_t equalsigns = 0; + if(length > 0 && input[length - 1] == '=') { + length -= 1; + equalsigns++; + while(length > 0 && scalar::base64::is_ascii_white_space(input[length - 1])) { + length--; + } + if(length > 0 && input[length - 1] == '=') { + equalsigns++; + length -= 1; + } + } + if(length == 0) { + if(equalsigns > 0) { + return {INVALID_BASE64_CHARACTER, equallocation}; + } + return {SUCCESS, 0}; + } + result r = scalar::base64::base64_tail_decode(output, input, length, options); + if(r.error == error_code::SUCCESS && equalsigns > 0) { + // additional checks + if((r.count % 3 == 0) || ((r.count % 3) + 1 + equalsigns != 4)) { + return {INVALID_BASE64_CHARACTER, equallocation}; + } + } + return r; } -simdutf_warn_unused size_t implementation::base64_length_from_binary(size_t length) const noexcept { - return scalar::base64::base64_length_from_binary(length); +simdutf_warn_unused size_t implementation::maximal_binary_length_from_base64(const char16_t * input, size_t length) const noexcept { + return scalar::base64::maximal_binary_length_from_base64(input, length); } -size_t implementation::binary_to_base64(const char * input, size_t length, char* output) const noexcept { - return scalar::base64::binary_to_base64(input, length, output); +simdutf_warn_unused result implementation::base64_to_binary(const char16_t * input, size_t length, char* output, base64_options options) const noexcept { + while(length > 0 && scalar::base64::is_ascii_white_space(input[length - 1])) { + length--; + } + size_t equallocation = length; // location of the first padding character if any + size_t equalsigns = 0; + if(length > 0 && input[length - 1] == '=') { + length -= 1; + equalsigns++; + while(length > 0 && scalar::base64::is_ascii_white_space(input[length - 1])) { + length--; + } + if(length > 0 && input[length - 1] == '=') { + equalsigns++; + length -= 1; + } + } + if(length == 0) { + if(equalsigns > 0) { + return {INVALID_BASE64_CHARACTER, equallocation}; + } + return {SUCCESS, 0}; + } + result r = scalar::base64::base64_tail_decode(output, input, length, options); + if(r.error == error_code::SUCCESS && equalsigns > 0) { + // additional checks + if((r.count % 3 == 0) || ((r.count % 3) + 1 + equalsigns != 4)) { + return {INVALID_BASE64_CHARACTER, equallocation}; + } + } + return r; +} + +simdutf_warn_unused size_t implementation::base64_length_from_binary(size_t length, base64_options options) const noexcept { + return scalar::base64::base64_length_from_binary(length, options); +} + +size_t implementation::binary_to_base64(const char * input, size_t length, char* output, base64_options options) const noexcept { + return scalar::base64::tail_encode_base64(output, input, length, options); } } // namespace fallback } // namespace simdutf @@ -22145,14 +22844,17 @@ struct block64 { __m512i chunks[1]; }; -size_t encode_base64(char *dst, const char *src, size_t srclen) { +template +size_t encode_base64(char *dst, const char *src, size_t srclen, + base64_options options) { // credit: Wojciech Muła - const uint8_t *input = (const uint8_t *)src; uint8_t *out = (uint8_t *)dst; static const char *lookup_tbl = - "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"; + base64_url + ? "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_" + : "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"; const __m512i shuffle_input = _mm512_setr_epi32( 0x01020001, 0x04050304, 0x07080607, 0x0a0b090a, 0x0d0e0c0d, 0x10110f10, @@ -22171,27 +22873,48 @@ size_t encode_base64(char *dst, const char *src, size_t srclen) { _mm512_storeu_si512(reinterpret_cast<__m512i *>(out), result); out += 64; } - return i / 3 * 4 + - scalar::base64::tail_encode_base64((char *)out, src + i, srclen - i); + return i / 3 * 4 + scalar::base64::tail_encode_base64((char *)out, src + i, + srclen - i, options); } +template static inline uint64_t to_base64_mask(block64 *b, bool *error) { __m512i input = b->chunks[0]; const __m512i ascii_space_tbl = _mm512_set_epi8( - 0, 0, 13, 0, 0, 10, 9, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 13, 0, 0, 10, 9, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 13, 0, 0, 10, 9, 0, 0, 0, 0, 0, 0, 0, 0, - 32, 0, 0, 13, 0, 0, 10, 9, 0, 0, 0, 0, 0, 0, 0, 0, 32); - __m512i lookup0 = _mm512_set_epi8( - -128, -128, -128, -128, -128, -128, 61, 60, 59, 58, 57, 56, 55, 54, 53, - 52, 63, -128, -128, -128, 62, -128, -128, -128, -128, -128, -128, -128, - -128, -128, -128, -64, -128, -128, -128, -128, -128, -128, -128, -128, - -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -64, -128, - -128, -64, -64, -128, -128, -128, -128, -128, -128, -128, -128, -64); - __m512i lookup1 = _mm512_set_epi8( - -128, -128, -128, -128, -128, 51, 50, 49, 48, 47, 46, 45, 44, 43, 42, 41, - 40, 39, 38, 37, 36, 35, 34, 33, 32, 31, 30, 29, 28, 27, 26, -128, -128, - -128, -128, -128, -128, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, - 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, -128); + 0, 0, 13, 12, 0, 10, 9, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 13, 12, 0, 10, 9, + 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 13, 12, 0, 10, 9, 0, 0, 0, 0, 0, 0, 0, 0, + 32, 0, 0, 13, 12, 0, 10, 9, 0, 0, 0, 0, 0, 0, 0, 0, 32); + __m512i lookup0; + if (base64_url) { + lookup0 = _mm512_set_epi8( + -128, -128, -128, -128, -128, -128, 61, 60, 59, 58, 57, 56, 55, 54, 53, + 52, -128, -128, 62, -128, -128, -128, -128, -128, -128, -128, -128, + -128, -128, -128, -128, -1, -128, -128, -128, -128, -128, -128, -128, + -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -1, + -128, -128, -1, -1, -128, -128, -128, -128, -128, -128, -128, -128, -1); + } else { + lookup0 = _mm512_set_epi8( + -128, -128, -128, -128, -128, -128, 61, 60, 59, 58, 57, 56, 55, 54, 53, + 52, 63, -128, -128, -128, 62, -128, -128, -128, -128, -128, -128, -128, + -128, -128, -128, -1, -128, -128, -128, -128, -128, -128, -128, -128, + -128, -128, -128, -128, -128, -128, -128, -128, -128, -128, -1, -128, + -128, -1, -1, -128, -128, -128, -128, -128, -128, -128, -128, -128); + } + __m512i lookup1; + if (base64_url) { + lookup1 = _mm512_set_epi8( + -128, -128, -128, -128, -128, 51, 50, 49, 48, 47, 46, 45, 44, 43, 42, + 41, 40, 39, 38, 37, 36, 35, 34, 33, 32, 31, 30, 29, 28, 27, 26, -128, + 63, -128, -128, -128, -128, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, + 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, -128); + } else { + lookup1 = _mm512_set_epi8( + -128, -128, -128, -128, -128, 51, 50, 49, 48, 47, 46, 45, 44, 43, 42, + 41, 40, 39, 38, 37, 36, 35, 34, 33, 32, 31, 30, 29, 28, 27, 26, -128, + -128, -128, -128, -128, -128, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16, + 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, -128); + } + const __m512i translated = _mm512_permutex2var_epi8(lookup0, input, lookup1); const __m512i combined = _mm512_or_si512(translated, input); const __mmask64 mask = _mm512_movepi8_mask(combined); @@ -22216,10 +22939,22 @@ static inline uint64_t compress_block(block64 *b, uint64_t mask, char *output) { return _mm_popcnt_u64(nmask); } +// The caller of this function is responsible to ensure that there are 64 bytes available +// from reading at src. The data is read into a block64 structure. static inline void load_block(block64 *b, const char *src) { b->chunks[0] = _mm512_loadu_si512(reinterpret_cast(src)); } +// The caller of this function is responsible to ensure that there are 128 bytes available +// from reading at src. The data is read into a block64 structure. +static inline void load_block(block64 *b, const char16_t *src) { + __m512i m1 = _mm512_loadu_si512(reinterpret_cast(src)); + __m512i m2 = _mm512_loadu_si512(reinterpret_cast(src + 32)); + __m512i p = _mm512_packus_epi16(m1, m2); + b->chunks[0] = + _mm512_permutexvar_epi64(_mm512_setr_epi64(0, 2, 4, 6, 1, 3, 5, 7), p); +} + static inline void base64_decode(char *out, __m512i str) { const __m512i merge_ab_and_bc = _mm512_maddubs_epi16(str, _mm512_set1_epi32(0x01400140)); @@ -22244,36 +22979,47 @@ static inline void base64_decode_block(char *out, block64 *b) { base64_decode(out, b->chunks[0]); } -result compress_decode_base64(char *dst, const char *src, size_t srclen) { +template +result compress_decode_base64(char *dst, const chartype *src, size_t srclen, + base64_options options) { + const uint8_t *to_base64 = base64_url ? tables::base64::to_base64_url_value + : tables::base64::to_base64_value; size_t equalsigns = 0; + // skip trailing spaces + while (srclen > 0 && to_base64[uint8_t(src[srclen - 1])] == 64) { + srclen--; + } if (srclen > 0 && src[srclen - 1] == '=') { srclen--; equalsigns = 1; + // skip trailing spaces + while (srclen > 0 && to_base64[uint8_t(src[srclen - 1])] == 64) { + srclen--; + } if (srclen > 0 && src[srclen - 1] == '=') { srclen--; equalsigns = 2; } } - const char *const srcinit = src; + const chartype *const srcinit = src; const char *const dstinit = dst; - const char *const srcend = src + srclen; + const chartype *const srcend = src + srclen; // figure out why block_size == 2 is sometimes best??? constexpr size_t block_size = 6; char buffer[block_size * 64]; char *bufferptr = buffer; if (srclen >= 64) { - const char *const srcend64 = src + srclen - 64; + const chartype *const srcend64 = src + srclen - 64; while (src <= srcend64) { block64 b; load_block(&b, src); src += 64; bool error = false; - uint64_t badcharmask = to_base64_mask(&b, &error); + uint64_t badcharmask = to_base64_mask(&b, &error); if (error) { src -= 64; - while (src < srcend && - tables::base64::to_base64_value[uint8_t(*src)] <= 64) { + while (src < srcend && to_base64[uint8_t(*src)] <= 64) { src++; } return {error_code::INVALID_BASE64_CHARACTER, size_t(src - srcinit)}; @@ -22309,7 +23055,7 @@ result compress_decode_base64(char *dst, const char *src, size_t srclen) { if (last_block != 0 && srcend - src + last_block >= 64) { while ((bufferptr - buffer_start) % 64 != 0 && src < srcend) { - uint8_t val = tables::base64::to_base64_value[uint8_t(*src)]; + uint8_t val = to_base64[uint8_t(*src)]; *bufferptr = char(val); if (val > 64) { return {error_code::INVALID_BASE64_CHARACTER, size_t(src - srcinit)}; @@ -22351,7 +23097,7 @@ result compress_decode_base64(char *dst, const char *src, size_t srclen) { int leftover = int(bufferptr - buffer_start); if (leftover > 0) { while (leftover < 4 && src < srcend) { - uint8_t val = tables::base64::to_base64_value[uint8_t(*src)]; + uint8_t val = to_base64[uint8_t(*src)]; if (val > 64) { return {error_code::INVALID_BASE64_CHARACTER, size_t(src - srcinit)}; } @@ -22392,15 +23138,27 @@ result compress_decode_base64(char *dst, const char *src, size_t srclen) { } } if (src < srcend + equalsigns) { - result r = scalar::base64::base64_tail_decode(dst, src, srcend - src); + result r = + scalar::base64::base64_tail_decode(dst, src, srcend - src, options); if (r.error == error_code::INVALID_BASE64_CHARACTER) { r.count += size_t(src - srcinit); return r; } else { r.count += size_t(dst - dstinit); } + if(r.error == error_code::SUCCESS && equalsigns > 0) { + // additional checks + if((r.count % 3 == 0) || ((r.count % 3) + 1 + equalsigns != 4)) { + r.error = error_code::INVALID_BASE64_CHARACTER; + } + } return r; } + if(equalsigns > 0) { + if((size_t(dst - dstinit) % 3 == 0) || ((size_t(dst - dstinit) % 3) + 1 + equalsigns != 4)) { + return {INVALID_BASE64_CHARACTER, size_t(dst - dstinit)}; + } + } return {SUCCESS, size_t(dst - dstinit)}; } /* end file src/icelake/icelake_base64.inl.cpp */ @@ -23736,16 +24494,29 @@ simdutf_warn_unused size_t implementation::maximal_binary_length_from_base64(con return scalar::base64::maximal_binary_length_from_base64(input, length); } -simdutf_warn_unused result implementation::base64_to_binary(const char * input, size_t length, char* output) const noexcept { - return compress_decode_base64(output, input, length); +simdutf_warn_unused result implementation::base64_to_binary(const char * input, size_t length, char* output, base64_options options) const noexcept { + return (options & base64_url) ? compress_decode_base64(output, input, length, options) : compress_decode_base64(output, input, length, options); +} + +simdutf_warn_unused size_t implementation::maximal_binary_length_from_base64(const char16_t * input, size_t length) const noexcept { + return scalar::base64::maximal_binary_length_from_base64(input, length); +} + +simdutf_warn_unused result implementation::base64_to_binary(const char16_t * input, size_t length, char* output, base64_options options) const noexcept { + return (options & base64_url) ? compress_decode_base64(output, input, length, options) : compress_decode_base64(output, input, length, options); } -simdutf_warn_unused size_t implementation::base64_length_from_binary(size_t length) const noexcept { - return scalar::base64::base64_length_from_binary(length); + +simdutf_warn_unused size_t implementation::base64_length_from_binary(size_t length, base64_options options) const noexcept { + return scalar::base64::base64_length_from_binary(length, options); } -size_t implementation::binary_to_base64(const char * input, size_t length, char* output) const noexcept { - return encode_base64(output, input, length); +size_t implementation::binary_to_base64(const char * input, size_t length, char* output, base64_options options) const noexcept { + if(options & base64_url) { + return encode_base64(output, input, length, options); + } else { + return encode_base64(output, input, length, options); + } } } // namespace icelake @@ -26325,23 +27096,35 @@ size_t convert_masked_utf8_to_latin1(const char *input, * https://www.codeproject.com/Articles/276993/Base-Encoding-on-a-GPU. (2013). */ -__m256i lookup_pshufb_improved(const __m256i input) { +template +simdutf_really_inline __m256i lookup_pshufb_improved(const __m256i input) { // credit: Wojciech Muła __m256i result = _mm256_subs_epu8(input, _mm256_set1_epi8(51)); const __m256i less = _mm256_cmpgt_epi8(_mm256_set1_epi8(26), input); result = _mm256_or_si256(result, _mm256_and_si256(less, _mm256_set1_epi8(13))); - const __m256i shift_LUT = _mm256_setr_epi8( - 'a' - 26, '0' - 52, '0' - 52, '0' - 52, '0' - 52, '0' - 52, '0' - 52, - '0' - 52, '0' - 52, '0' - 52, '0' - 52, '+' - 62, '/' - 63, 'A', 0, 0, + __m256i shift_LUT; + if (base64_url) { + shift_LUT = _mm256_setr_epi8( + 'a' - 26, '0' - 52, '0' - 52, '0' - 52, '0' - 52, '0' - 52, '0' - 52, + '0' - 52, '0' - 52, '0' - 52, '0' - 52, '-' - 62, '_' - 63, 'A', 0, 0, + + 'a' - 26, '0' - 52, '0' - 52, '0' - 52, '0' - 52, '0' - 52, '0' - 52, + '0' - 52, '0' - 52, '0' - 52, '0' - 52, '-' - 62, '_' - 63, 'A', 0, 0); + } else { + shift_LUT = _mm256_setr_epi8( + 'a' - 26, '0' - 52, '0' - 52, '0' - 52, '0' - 52, '0' - 52, '0' - 52, + '0' - 52, '0' - 52, '0' - 52, '0' - 52, '+' - 62, '/' - 63, 'A', 0, 0, - 'a' - 26, '0' - 52, '0' - 52, '0' - 52, '0' - 52, '0' - 52, '0' - 52, - '0' - 52, '0' - 52, '0' - 52, '0' - 52, '+' - 62, '/' - 63, 'A', 0, 0); + 'a' - 26, '0' - 52, '0' - 52, '0' - 52, '0' - 52, '0' - 52, '0' - 52, + '0' - 52, '0' - 52, '0' - 52, '0' - 52, '+' - 62, '/' - 63, 'A', 0, 0); + } result = _mm256_shuffle_epi8(shift_LUT, result); return _mm256_add_epi8(result, input); } +template size_t encode_base64(char *dst, const char *src, size_t srclen) { // credit: Wojciech Muła const uint8_t *input = (const uint8_t *)src; @@ -26409,18 +27192,18 @@ size_t encode_base64(char *dst, const char *src, size_t srclen) { const __m256i input3 = _mm256_or_si256(t1_3, t3_3); _mm256_storeu_si256(reinterpret_cast<__m256i *>(out), - lookup_pshufb_improved(input0)); + lookup_pshufb_improved(input0)); out += 32; _mm256_storeu_si256(reinterpret_cast<__m256i *>(out), - lookup_pshufb_improved(input1)); + lookup_pshufb_improved(input1)); out += 32; _mm256_storeu_si256(reinterpret_cast<__m256i *>(out), - lookup_pshufb_improved(input2)); + lookup_pshufb_improved(input2)); out += 32; _mm256_storeu_si256(reinterpret_cast<__m256i *>(out), - lookup_pshufb_improved(input3)); + lookup_pshufb_improved(input3)); out += 32; } for (; i + 28 <= srclen; i += 24) { @@ -26444,11 +27227,11 @@ size_t encode_base64(char *dst, const char *src, size_t srclen) { const __m256i indices = _mm256_or_si256(t1, t3); _mm256_storeu_si256(reinterpret_cast<__m256i *>(out), - lookup_pshufb_improved(indices)); + lookup_pshufb_improved(indices)); out += 32; } - return i / 3 * 4 + - scalar::base64::tail_encode_base64((char *)out, src + i, srclen - i); + return i / 3 * 4 + scalar::base64::tail_encode_base64((char *)out, src + i, + srclen - i, options); } static inline void compress(__m128i data, uint16_t mask, char *output) { @@ -26499,43 +27282,83 @@ struct block64 { __m256i chunks[2]; }; +template static inline uint32_t to_base64_mask(__m256i *src, bool *error) { const __m256i ascii_space_tbl = _mm256_setr_epi8(0x20, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x9, 0xa, - 0x0, 0x0, 0xd, 0x0, 0x0, 0x20, 0x0, 0x0, 0x0, 0x0, 0x0, - 0x0, 0x0, 0x0, 0x9, 0xa, 0x0, 0x0, 0xd, 0x0, 0x0); + 0x0, 0xc, 0xd, 0x0, 0x0, 0x20, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x9, 0xa, 0x0, 0xc, 0xd, 0x0, 0x0); // credit: aqrit - const __m256i delta_asso = _mm256_setr_epi8( - 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x0F, 0x00, 0x0F, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x0F, 0x00, 0x0F); - const __m256i delta_values = _mm256_setr_epi8( - int8_t(0x00), int8_t(0x00), int8_t(0x00), int8_t(0x13), int8_t(0x04), - int8_t(0xBF), int8_t(0xBF), int8_t(0xB9), int8_t(0xB9), int8_t(0x00), - int8_t(0x10), int8_t(0xC3), int8_t(0xBF), int8_t(0xBF), int8_t(0xB9), - int8_t(0xB9), int8_t(0x00), int8_t(0x00), int8_t(0x00), int8_t(0x13), - int8_t(0x04), int8_t(0xBF), int8_t(0xBF), int8_t(0xB9), int8_t(0xB9), - int8_t(0x00), int8_t(0x10), int8_t(0xC3), int8_t(0xBF), int8_t(0xBF), - int8_t(0xB9), int8_t(0xB9)); - const __m256i check_asso = _mm256_setr_epi8( - 0x0D, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x03, 0x07, - 0x0B, 0x0B, 0x0B, 0x0F, 0x0D, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, - 0x01, 0x01, 0x03, 0x07, 0x0B, 0x0B, 0x0B, 0x0F); - const __m256i check_values = _mm256_setr_epi8( - int8_t(0x80), int8_t(0x80), int8_t(0x80), int8_t(0x80), int8_t(0xCF), - int8_t(0xBF), int8_t(0xD5), int8_t(0xA6), int8_t(0xB5), int8_t(0x86), - int8_t(0xD1), int8_t(0x80), int8_t(0xB1), int8_t(0x80), int8_t(0x91), - int8_t(0x80), int8_t(0x80), int8_t(0x80), int8_t(0x80), int8_t(0x80), - int8_t(0xCF), int8_t(0xBF), int8_t(0xD5), int8_t(0xA6), int8_t(0xB5), - int8_t(0x86), int8_t(0xD1), int8_t(0x80), int8_t(0xB1), int8_t(0x80), - int8_t(0x91), int8_t(0x80)); - const __m256i shifted = _mm256_srli_epi32(*src, 3); + __m256i delta_asso; + if (base64_url) { + delta_asso = + _mm256_setr_epi8(0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x0, 0x0, 0x0, + 0x0, 0x0, 0xF, 0x0, 0xF, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, + 0x1, 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0xF, 0x0, 0xF); + } else { + delta_asso = _mm256_setr_epi8( + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x0F, 0x00, 0x0F, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x0F, 0x00, 0x0F); + } + + __m256i delta_values; + if (base64_url) { + delta_values = _mm256_setr_epi8( + 0x0, 0x0, 0x0, 0x13, 0x4, uint8_t(0xBF), uint8_t(0xBF), uint8_t(0xB9), + uint8_t(0xB9), 0x0, 0x11, uint8_t(0xC3), uint8_t(0xBF), uint8_t(0xE0), + uint8_t(0xB9), uint8_t(0xB9), 0x0, 0x0, 0x0, 0x13, 0x4, uint8_t(0xBF), + uint8_t(0xBF), uint8_t(0xB9), uint8_t(0xB9), 0x0, 0x11, uint8_t(0xC3), + uint8_t(0xBF), uint8_t(0xE0), uint8_t(0xB9), uint8_t(0xB9)); + } else { + delta_values = _mm256_setr_epi8( + int8_t(0x00), int8_t(0x00), int8_t(0x00), int8_t(0x13), int8_t(0x04), + int8_t(0xBF), int8_t(0xBF), int8_t(0xB9), int8_t(0xB9), int8_t(0x00), + int8_t(0x10), int8_t(0xC3), int8_t(0xBF), int8_t(0xBF), int8_t(0xB9), + int8_t(0xB9), int8_t(0x00), int8_t(0x00), int8_t(0x00), int8_t(0x13), + int8_t(0x04), int8_t(0xBF), int8_t(0xBF), int8_t(0xB9), int8_t(0xB9), + int8_t(0x00), int8_t(0x10), int8_t(0xC3), int8_t(0xBF), int8_t(0xBF), + int8_t(0xB9), int8_t(0xB9)); + } + __m256i check_asso; + + if (base64_url) { + check_asso = + _mm256_setr_epi8(0xD, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x3, + 0x7, 0xB, 0x6, 0xB, 0x12, 0xD, 0x1, 0x1, 0x1, 0x1, 0x1, + 0x1, 0x1, 0x1, 0x1, 0x3, 0x7, 0xB, 0x6, 0xB, 0x12); + } else { + check_asso = _mm256_setr_epi8( + 0x0D, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x03, 0x07, + 0x0B, 0x0B, 0x0B, 0x0F, 0x0D, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x03, 0x07, 0x0B, 0x0B, 0x0B, 0x0F); + } + __m256i check_values; + if (base64_url) { + check_values = _mm256_setr_epi8( + 0x0, uint8_t(0x80), uint8_t(0x80), uint8_t(0x80), uint8_t(0xCF), + uint8_t(0xBF), uint8_t(0xD3), uint8_t(0xA6), uint8_t(0xB5), + uint8_t(0x86), uint8_t(0xD0), uint8_t(0x80), uint8_t(0xB0), + uint8_t(0x80), 0x0, 0x0, 0x0, uint8_t(0x80), uint8_t(0x80), + uint8_t(0x80), uint8_t(0xCF), uint8_t(0xBF), uint8_t(0xD3), + uint8_t(0xA6), uint8_t(0xB5), uint8_t(0x86), uint8_t(0xD0), + uint8_t(0x80), uint8_t(0xB0), uint8_t(0x80), 0x0, 0x0); + } else { + check_values = _mm256_setr_epi8( + int8_t(0x80), int8_t(0x80), int8_t(0x80), int8_t(0x80), int8_t(0xCF), + int8_t(0xBF), int8_t(0xD5), int8_t(0xA6), int8_t(0xB5), int8_t(0x86), + int8_t(0xD1), int8_t(0x80), int8_t(0xB1), int8_t(0x80), int8_t(0x91), + int8_t(0x80), int8_t(0x80), int8_t(0x80), int8_t(0x80), int8_t(0x80), + int8_t(0xCF), int8_t(0xBF), int8_t(0xD5), int8_t(0xA6), int8_t(0xB5), + int8_t(0x86), int8_t(0xD1), int8_t(0x80), int8_t(0xB1), int8_t(0x80), + int8_t(0x91), int8_t(0x80)); + } + const __m256i shifted = _mm256_srli_epi32(*src, 3); const __m256i delta_hash = _mm256_avg_epu8(_mm256_shuffle_epi8(delta_asso, *src), shifted); const __m256i check_hash = _mm256_avg_epu8(_mm256_shuffle_epi8(check_asso, *src), shifted); - const __m256i out = _mm256_adds_epi8(_mm256_shuffle_epi8(delta_values, delta_hash), *src); const __m256i chk = @@ -26549,10 +27372,12 @@ static inline uint32_t to_base64_mask(__m256i *src, bool *error) { *src = out; return (uint32_t)mask; } + +template static inline uint64_t to_base64_mask(block64 *b, bool *error) { *error = 0; - uint64_t m0 = to_base64_mask(&b->chunks[0], error); - uint64_t m1 = to_base64_mask(&b->chunks[1], error); + uint64_t m0 = to_base64_mask(&b->chunks[0], error); + uint64_t m1 = to_base64_mask(&b->chunks[1], error); return m0 | (m1 << 32); } @@ -26569,12 +27394,29 @@ static inline uint64_t compress_block(block64 *b, uint64_t mask, char *output) { return _mm_popcnt_u64(nmask); } +// The caller of this function is responsible to ensure that there are 64 bytes available +// from reading at src. The data is read into a block64 structure. static inline void load_block(block64 *b, const char *src) { b->chunks[0] = _mm256_loadu_si256(reinterpret_cast(src)); b->chunks[1] = _mm256_loadu_si256(reinterpret_cast(src + 32)); } +// The caller of this function is responsible to ensure that there are 128 bytes available +// from reading at src. The data is read into a block64 structure. +static inline void load_block(block64 *b, const char16_t *src) { + __m256i m1 = _mm256_loadu_si256(reinterpret_cast(src)); + __m256i m2 = _mm256_loadu_si256(reinterpret_cast(src + 16)); + __m256i m3 = _mm256_loadu_si256(reinterpret_cast(src + 32)); + __m256i m4 = _mm256_loadu_si256(reinterpret_cast(src + 48)); + __m256i m1p = _mm256_permute2x128_si256(m1, m2, 0x20); + __m256i m2p = _mm256_permute2x128_si256(m1, m2, 0x31); + __m256i m3p = _mm256_permute2x128_si256(m3, m4, 0x20); + __m256i m4p = _mm256_permute2x128_si256(m3, m4, 0x31); + b->chunks[0] = _mm256_packus_epi16(m1p, m2p); + b->chunks[1] = _mm256_packus_epi16(m3p, m4p); +} + static inline void base64_decode(char *out, __m256i str) { // credit: aqrit const __m256i pack_shuffle = @@ -26614,11 +27456,26 @@ static inline void base64_decode_block_safe(char *out, block64 *b) { std::memcpy(out + 24, buffer, 24); } -result compress_decode_base64(char *dst, const char *src, size_t srclen) { +template +result compress_decode_base64(char *dst, const chartype *src, size_t srclen, + base64_options options) { + const uint8_t *to_base64 = base64_url ? tables::base64::to_base64_url_value + : tables::base64::to_base64_value; + // skip trailing spaces + while (srclen > 0 && to_base64[uint8_t(src[srclen - 1])] == 64) { + srclen--; + } size_t equalsigns = 0; if (srclen > 0 && src[srclen - 1] == '=') { srclen--; equalsigns = 1; + // skip trailing spaces + while (srclen > 0 && to_base64[uint8_t(src[srclen - 1])] == 64) { + srclen--; + } + while (srclen > 0 && to_base64[uint8_t(src[srclen - 1])] == 64) { + srclen--; + } if (srclen > 0 && src[srclen - 1] == '=') { srclen--; equalsigns = 2; @@ -26627,26 +27484,25 @@ result compress_decode_base64(char *dst, const char *src, size_t srclen) { char *end_of_safe_64byte_zone = (srclen + 3) / 4 * 3 >= 63 ? dst + (srclen + 3) / 4 * 3 - 63 : dst; - const char *const srcinit = src; + const chartype *const srcinit = src; const char *const dstinit = dst; - const char *const srcend = src + srclen; + const chartype *const srcend = src + srclen; constexpr size_t block_size = 6; static_assert(block_size >= 2, "block_size must be at least two"); char buffer[block_size * 64]; char *bufferptr = buffer; if (srclen >= 64) { - const char *const srcend64 = src + srclen - 64; + const chartype *const srcend64 = src + srclen - 64; while (src <= srcend64) { block64 b; load_block(&b, src); src += 64; bool error = false; - uint64_t badcharmask = to_base64_mask(&b, &error); + uint64_t badcharmask = to_base64_mask(&b, &error); if (error) { src -= 64; - while (src < srcend && - tables::base64::to_base64_value[uint8_t(*src)] <= 64) { + while (src < srcend && to_base64[uint8_t(*src)] <= 64) { src++; } return {error_code::INVALID_BASE64_CHARACTER, size_t(src - srcinit)}; @@ -26692,7 +27548,7 @@ result compress_decode_base64(char *dst, const char *src, size_t srclen) { if (last_block != 0 && srcend - src + last_block >= 64) { while ((bufferptr - buffer_start) % 64 != 0 && src < srcend) { - uint8_t val = tables::base64::to_base64_value[uint8_t(*src)]; + uint8_t val = to_base64[uint8_t(*src)]; *bufferptr = char(val); if (val > 64) { return {error_code::INVALID_BASE64_CHARACTER, size_t(src - srcinit)}; @@ -26740,7 +27596,7 @@ result compress_decode_base64(char *dst, const char *src, size_t srclen) { int leftover = int(bufferptr - buffer_start); if (leftover > 0) { while (leftover < 4 && src < srcend) { - uint8_t val = tables::base64::to_base64_value[uint8_t(*src)]; + uint8_t val = to_base64[uint8_t(*src)]; if (val > 64) { return {error_code::INVALID_BASE64_CHARACTER, size_t(src - srcinit)}; } @@ -26780,15 +27636,27 @@ result compress_decode_base64(char *dst, const char *src, size_t srclen) { } } if (src < srcend + equalsigns) { - result r = scalar::base64::base64_tail_decode(dst, src, srcend - src); + result r = + scalar::base64::base64_tail_decode(dst, src, srcend - src, options); if (r.error == error_code::INVALID_BASE64_CHARACTER) { r.count += size_t(src - srcinit); return r; } else { r.count += size_t(dst - dstinit); } + if(r.error == error_code::SUCCESS && equalsigns > 0) { + // additional checks + if((r.count % 3 == 0) || ((r.count % 3) + 1 + equalsigns != 4)) { + r.error = error_code::INVALID_BASE64_CHARACTER; + } + } return r; } + if(equalsigns > 0) { + if((size_t(dst - dstinit) % 3 == 0) || ((size_t(dst - dstinit) % 3) + 1 + equalsigns != 4)) { + return {INVALID_BASE64_CHARACTER, size_t(dst - dstinit)}; + } + } return {SUCCESS, size_t(dst - dstinit)}; } /* end file src/haswell/avx2_base64.cpp */ @@ -29112,16 +29980,28 @@ simdutf_warn_unused size_t implementation::maximal_binary_length_from_base64(con return scalar::base64::maximal_binary_length_from_base64(input, length); } -simdutf_warn_unused result implementation::base64_to_binary(const char * input, size_t length, char* output) const noexcept { - return compress_decode_base64(output, input, length); +simdutf_warn_unused result implementation::base64_to_binary(const char * input, size_t length, char* output, base64_options options) const noexcept { + return (options & base64_url) ? compress_decode_base64(output, input, length, options) : compress_decode_base64(output, input, length, options); +} + +simdutf_warn_unused size_t implementation::maximal_binary_length_from_base64(const char16_t * input, size_t length) const noexcept { + return scalar::base64::maximal_binary_length_from_base64(input, length); +} + +simdutf_warn_unused result implementation::base64_to_binary(const char16_t * input, size_t length, char* output, base64_options options) const noexcept { + return (options & base64_url) ? compress_decode_base64(output, input, length, options) : compress_decode_base64(output, input, length, options); } -simdutf_warn_unused size_t implementation::base64_length_from_binary(size_t length) const noexcept { - return scalar::base64::base64_length_from_binary(length); +simdutf_warn_unused size_t implementation::base64_length_from_binary(size_t length, base64_options options) const noexcept { + return scalar::base64::base64_length_from_binary(length, options); } -size_t implementation::binary_to_base64(const char * input, size_t length, char* output) const noexcept { - return encode_base64(output, input, length); +size_t implementation::binary_to_base64(const char * input, size_t length, char* output, base64_options options) const noexcept { + if(options & base64_url) { + return encode_base64(output, input, length); + } else { + return encode_base64(output, input, length); + } } } // namespace haswell } // namespace simdutf @@ -30659,16 +31539,84 @@ simdutf_warn_unused size_t implementation::maximal_binary_length_from_base64(con return scalar::base64::maximal_binary_length_from_base64(input, length); } -simdutf_warn_unused result implementation::base64_to_binary(const char * input, size_t length, char* output) const noexcept { - return scalar::base64::base64_to_binary(input, length, output); +simdutf_warn_unused result implementation::base64_to_binary(const char * input, size_t length, char* output, base64_options options) const noexcept { + // skip trailing spaces + while(length > 0 && scalar::base64::is_ascii_white_space(input[length - 1])) { + length--; + } + size_t equallocation = length; // location of the first padding character if any + size_t equalsigns = 0; + if(length > 0 && input[length - 1] == '=') { + length -= 1; + equalsigns++; + while(length > 0 && scalar::base64::is_ascii_white_space(input[length - 1])) { + length--; + } + if(length > 0 && input[length - 1] == '=') { + equalsigns++; + length -= 1; + } + } + if(length == 0) { + if(equalsigns > 0) { + return {INVALID_BASE64_CHARACTER, equallocation};; + } + return {SUCCESS, 0}; + } + result r = scalar::base64::base64_tail_decode(output, input, length, options); + if(r.error == error_code::SUCCESS && equalsigns > 0) { + // additional checks + if((r.count % 3 == 0) || ((r.count % 3) + 1 + equalsigns != 4)) { + return {INVALID_BASE64_CHARACTER, equallocation}; + } + } + return r; +} + +simdutf_warn_unused size_t implementation::maximal_binary_length_from_base64(const char16_t * input, size_t length) const noexcept { + return scalar::base64::maximal_binary_length_from_base64(input, length); +} + +simdutf_warn_unused result implementation::base64_to_binary(const char16_t * input, size_t length, char* output, base64_options options) const noexcept { + // skip trailing spaces + while(length > 0 && scalar::base64::is_ascii_white_space(input[length - 1])) { + length--; + } + size_t equallocation = length; // location of the first padding character if any + size_t equalsigns = 0; + if(length > 0 && input[length - 1] == '=') { + length -= 1; + equalsigns++; + while(length > 0 && scalar::base64::is_ascii_white_space(input[length - 1])) { + length--; + } + if(length > 0 && input[length - 1] == '=') { + equalsigns++; + length -= 1; + } + } + if(length == 0) { + if(equalsigns > 0) { + return {INVALID_BASE64_CHARACTER, equallocation}; + } + return {SUCCESS, 0}; + } + result r = scalar::base64::base64_tail_decode(output, input, length, options); + if(r.error == error_code::SUCCESS && equalsigns > 0) { + // additional checks + if((r.count % 3 == 0) || ((r.count % 3) + 1 + equalsigns != 4)) { + return {INVALID_BASE64_CHARACTER, equallocation}; + } + } + return r; } -simdutf_warn_unused size_t implementation::base64_length_from_binary(size_t length) const noexcept { - return scalar::base64::base64_length_from_binary(length); +simdutf_warn_unused size_t implementation::base64_length_from_binary(size_t length, base64_options options) const noexcept { + return scalar::base64::base64_length_from_binary(length, options); } -size_t implementation::binary_to_base64(const char * input, size_t length, char* output) const noexcept { - return scalar::base64::binary_to_base64(input, length, output); +size_t implementation::binary_to_base64(const char * input, size_t length, char* output, base64_options options) const noexcept { + return scalar::base64::binary_to_base64(input, length, output, options); } } // namespace ppc64 } // namespace simdutf @@ -30888,15 +31836,14 @@ simdutf_warn_unused result implementation::validate_ascii_with_errors(const char * first invalid one, but never overestimating. */ simdutf_really_inline static size_t rvv_count_valid_utf8(const char *src, size_t len) { const char *beg = src; - size_t tail = 32; // minimum of 3 - if (len < tail) return 0; + if (len < 32) return 0; /* validate first three bytes */ { - size_t idx = tail; + size_t idx = 3; while (idx < len && (src[idx] >> 6) == 0b10) ++idx; - if (idx > tail + 3 || !scalar::utf8::validate(src, idx)) + if (idx > 3+3 || !scalar::utf8::validate(src, idx)) return 0; } @@ -30908,21 +31855,26 @@ simdutf_really_inline static size_t rvv_count_valid_utf8(const char *src, size_t const vuint8m1_t err2tbl = __riscv_vreinterpret_v_u64m1_u8m1(__riscv_vle64_v_u64m1(err2m, 2)); const vuint8m1_t err3tbl = __riscv_vreinterpret_v_u64m1_u8m1(__riscv_vle64_v_u64m1(err3m, 2)); + size_t tail = 3; size_t n = len - tail; for (size_t vl; n > 0; n -= vl, src += vl) { vl = __riscv_vsetvl_e8m4(n); vuint8m4_t v0 = __riscv_vle8_v_u8m4((uint8_t const*)src, vl); + uint8_t next0 = src[vl+0]; + uint8_t next1 = src[vl+1]; + uint8_t next2 = src[vl+2]; + /* fast path: ASCII */ - if (__riscv_vfirst(__riscv_vmsgtu(v0, 0b01111111, vl), vl) < 0) + if (__riscv_vfirst_m_b2(__riscv_vmsgtu_vx_u8m4_b2(v0, 0b01111111, vl), vl) < 0 && (next0|next1|next2) < 0b10000000) continue; /* see "Validating UTF-8 In Less Than One Instruction Per Byte" * https://arxiv.org/abs/2010.03090 */ - vuint8m4_t v1 = __riscv_vslide1down_vx_u8m4(v0, src[vl+0], vl); - vuint8m4_t v2 = __riscv_vslide1down_vx_u8m4(v1, src[vl+1], vl); - vuint8m4_t v3 = __riscv_vslide1down_vx_u8m4(v2, src[vl+2], vl); + vuint8m4_t v1 = __riscv_vslide1down_vx_u8m4(v0, next0, vl); + vuint8m4_t v2 = __riscv_vslide1down_vx_u8m4(v1, next1, vl); + vuint8m4_t v3 = __riscv_vslide1down_vx_u8m4(v2, next2, vl); vuint8m4_t s1 = __riscv_vreinterpret_v_u16m4_u8m4(__riscv_vsrl_vx_u16m4(__riscv_vreinterpret_v_u8m4_u16m4(v2), 4, __riscv_vsetvlmax_e16m4())); vuint8m4_t s3 = __riscv_vreinterpret_v_u16m4_u8m4(__riscv_vsrl_vx_u16m4(__riscv_vreinterpret_v_u8m4_u16m4(v3), 4, __riscv_vsetvlmax_e16m4())); @@ -31128,18 +32080,18 @@ simdutf_really_inline static size_t rvv_utf8_to_common(char const *src, size_t l : scalar::utf8_to_utf32::convert(in, count, (char32_t*)out); }; - size_t tail = 32; // the minimum value is 3 - if (len < tail) return scalar(src, len, dst); + if (len < 32) return scalar(src, len, dst); /* validate first three bytes */ if (validate) { - size_t idx = tail; + size_t idx = 3; while (idx < len && (src[idx] >> 6) == 0b10) ++idx; - if (idx > tail + 3 || !scalar::utf8::validate(src, idx)) + if (idx > 3+3 || !scalar::utf8::validate(src, idx)) return 0; } + size_t tail = 3; size_t n = len - tail; Tdst *beg = dst; @@ -31160,8 +32112,12 @@ simdutf_really_inline static size_t rvv_utf8_to_common(char const *src, size_t l vuint8m2_t v0 = __riscv_vle8_v_u8m2((uint8_t const*)src, vl); uint64_t max = __riscv_vmv_x_s_u8m1_u8(__riscv_vredmaxu_vs_u8m2_u8m1(v0, __riscv_vmv_s_x_u8m1(0, vl), vl)); + uint8_t next0 = src[vl+0]; + uint8_t next1 = src[vl+1]; + uint8_t next2 = src[vl+2]; + /* fast path: ASCII */ - if (max < 0b10000000) { + if ((max|next0|next1|next2) < 0b10000000) { vlOut = vl; if (is16) __riscv_vse16_v_u16m4((uint16_t*)dst, simdutf_byteflip(__riscv_vzext_vf2_u16m4(v0, vlOut), vlOut), vlOut); else __riscv_vse32_v_u32m8((uint32_t*)dst, __riscv_vzext_vf4_u32m8(v0, vlOut), vlOut); @@ -31170,9 +32126,9 @@ simdutf_really_inline static size_t rvv_utf8_to_common(char const *src, size_t l /* see "Validating UTF-8 In Less Than One Instruction Per Byte" * https://arxiv.org/abs/2010.03090 */ - vuint8m2_t v1 = __riscv_vslide1down_vx_u8m2(v0, src[vl+0], vl); - vuint8m2_t v2 = __riscv_vslide1down_vx_u8m2(v1, src[vl+1], vl); - vuint8m2_t v3 = __riscv_vslide1down_vx_u8m2(v2, src[vl+2], vl); + vuint8m2_t v1 = __riscv_vslide1down_vx_u8m2(v0, next0, vl); + vuint8m2_t v2 = __riscv_vslide1down_vx_u8m2(v1, next1, vl); + vuint8m2_t v3 = __riscv_vslide1down_vx_u8m2(v2, next2, vl); if (validate) { vuint8m2_t s1 = __riscv_vreinterpret_v_u16m2_u8m2(__riscv_vsrl_vx_u16m2(__riscv_vreinterpret_v_u8m2_u16m2(v2), 4, __riscv_vsetvlmax_e16m2())); @@ -31981,16 +32937,83 @@ simdutf_warn_unused size_t implementation::maximal_binary_length_from_base64(con return scalar::base64::maximal_binary_length_from_base64(input, length); } -simdutf_warn_unused result implementation::base64_to_binary(const char * input, size_t length, char* output) const noexcept { - return scalar::base64::base64_to_binary(input, length, output); +simdutf_warn_unused result implementation::base64_to_binary(const char * input, size_t length, char* output, base64_options options) const noexcept { + while(length > 0 && scalar::base64::is_ascii_white_space(input[length - 1])) { + length--; + } + size_t equallocation = length; // location of the first padding character if any + size_t equalsigns = 0; + if(length > 0 && input[length - 1] == '=') { + length -= 1; + equalsigns++; + while(length > 0 && scalar::base64::is_ascii_white_space(input[length - 1])) { + length--; + } + if(length > 0 && input[length - 1] == '=') { + equalsigns++; + length -= 1; + } + } + if(length == 0) { + if(equalsigns > 0) { + return {INVALID_BASE64_CHARACTER, equallocation}; + } + return {SUCCESS, 0}; + } + result r = scalar::base64::base64_tail_decode(output, input, length, options); + if(r.error == error_code::SUCCESS && equalsigns > 0) { + // additional checks + if((r.count % 3 == 0) || ((r.count % 3) + 1 + equalsigns != 4)) { + return {INVALID_BASE64_CHARACTER, equallocation}; + } + } + return r; +} + + +simdutf_warn_unused size_t implementation::maximal_binary_length_from_base64(const char16_t * input, size_t length) const noexcept { + return scalar::base64::maximal_binary_length_from_base64(input, length); +} + +simdutf_warn_unused result implementation::base64_to_binary(const char16_t * input, size_t length, char* output, base64_options options) const noexcept { + while(length > 0 && scalar::base64::is_ascii_white_space(input[length - 1])) { + length--; + } + size_t equallocation = length; // location of the first padding character if any + auto equalsigns = 0; + if(length > 0 && input[length - 1] == '=') { + length -= 1; + equalsigns++; + while(length > 0 && scalar::base64::is_ascii_white_space(input[length - 1])) { + length--; + } + if(length > 0 && input[length - 1] == '=') { + equalsigns++; + length -= 1; + } + } + if(length == 0) { + if(equalsigns > 0) { + return {INVALID_BASE64_CHARACTER, equallocation}; + } + return {SUCCESS, 0}; + } + result r = scalar::base64::base64_tail_decode(output, input, length, options); + if(r.error == error_code::SUCCESS && equalsigns > 0) { + // additional checks + if((r.count % 3 == 0) || ((r.count % 3) + 1 + equalsigns != 4)) { + return {INVALID_BASE64_CHARACTER, equallocation}; + } + } + return r; } -simdutf_warn_unused size_t implementation::base64_length_from_binary(size_t length) const noexcept { - return scalar::base64::base64_length_from_binary(length); +simdutf_warn_unused size_t implementation::base64_length_from_binary(size_t length, base64_options options) const noexcept { + return scalar::base64::base64_length_from_binary(length, options); } -size_t implementation::binary_to_base64(const char * input, size_t length, char* output) const noexcept { - return scalar::base64::binary_to_base64(input, length, output); +size_t implementation::binary_to_base64(const char * input, size_t length, char* output, base64_options options) const noexcept { + return scalar::base64::tail_encode_base64(output, input, length, options); } } // namespace rvv } // namespace simdutf @@ -34557,8 +35580,7 @@ std::pair sse_convert_utf32_to_utf16_with_errors(const char32 * Nick Kopp. 2013. Base64 Encoding on a GPU. * https://www.codeproject.com/Articles/276993/Base-Encoding-on-a-GPU. (2013). */ - -__m128i lookup_pshufb_improved(const __m128i input) { +template __m128i lookup_pshufb_improved(const __m128i input) { // credit: Wojciech Muła // reduce 0..51 -> 0 // 52..61 -> 1 .. 10 @@ -34572,9 +35594,16 @@ __m128i lookup_pshufb_improved(const __m128i input) { const __m128i less = _mm_cmpgt_epi8(_mm_set1_epi8(26), input); result = _mm_or_si128(result, _mm_and_si128(less, _mm_set1_epi8(13))); - const __m128i shift_LUT = _mm_setr_epi8( - 'a' - 26, '0' - 52, '0' - 52, '0' - 52, '0' - 52, '0' - 52, '0' - 52, - '0' - 52, '0' - 52, '0' - 52, '0' - 52, '+' - 62, '/' - 63, 'A', 0, 0); + __m128i shift_LUT; + if (base64_url) { + shift_LUT = _mm_setr_epi8('a' - 26, '0' - 52, '0' - 52, '0' - 52, '0' - 52, + '0' - 52, '0' - 52, '0' - 52, '0' - 52, '0' - 52, + '0' - 52, '-' - 62, '_' - 63, 'A', 0, 0); + } else { + shift_LUT = _mm_setr_epi8('a' - 26, '0' - 52, '0' - 52, '0' - 52, '0' - 52, + '0' - 52, '0' - 52, '0' - 52, '0' - 52, '0' - 52, + '0' - 52, '+' - 62, '/' - 63, 'A', 0, 0); + } // read shift result = _mm_shuffle_epi8(shift_LUT, result); @@ -34582,6 +35611,7 @@ __m128i lookup_pshufb_improved(const __m128i input) { return _mm_add_epi8(result, input); } +template size_t encode_base64(char *dst, const char *src, size_t srclen) { // credit: Wojciech Muła // SSE (lookup: pshufb improved unrolled) @@ -34633,19 +35663,19 @@ size_t encode_base64(char *dst, const char *src, size_t srclen) { const __m128i input3 = _mm_or_si128(t1_3, t3_3); _mm_storeu_si128(reinterpret_cast<__m128i *>(out), - lookup_pshufb_improved(input0)); + lookup_pshufb_improved(input0)); out += 16; _mm_storeu_si128(reinterpret_cast<__m128i *>(out), - lookup_pshufb_improved(input1)); + lookup_pshufb_improved(input1)); out += 16; _mm_storeu_si128(reinterpret_cast<__m128i *>(out), - lookup_pshufb_improved(input2)); + lookup_pshufb_improved(input2)); out += 16; _mm_storeu_si128(reinterpret_cast<__m128i *>(out), - lookup_pshufb_improved(input3)); + lookup_pshufb_improved(input3)); out += 16; } for (; i + 16 <= srclen; i += 12) { @@ -34685,12 +35715,12 @@ size_t encode_base64(char *dst, const char *src, size_t srclen) { const __m128i indices = _mm_or_si128(t1, t3); _mm_storeu_si128(reinterpret_cast<__m128i *>(out), - lookup_pshufb_improved(indices)); + lookup_pshufb_improved(indices)); out += 16; } - return i / 3 * 4 + - scalar::base64::tail_encode_base64((char *)out, src + i, srclen - i); + return i / 3 * 4 + scalar::base64::tail_encode_base64((char *)out, src + i, + srclen - i, options); } static inline void compress(__m128i data, uint16_t mask, char *output) { if (mask == 0) { @@ -34730,27 +35760,59 @@ struct block64 { __m128i chunks[4]; }; +template static inline uint16_t to_base64_mask(__m128i *src, bool *error) { const __m128i ascii_space_tbl = _mm_setr_epi8(0x20, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x9, 0xa, 0x0, - 0x0, 0xd, 0x0, 0x0); + 0xc, 0xd, 0x0, 0x0); // credit: aqrit - const __m128i delta_asso = - _mm_setr_epi8(0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x0F, 0x00, 0x0F); - const __m128i delta_values = - _mm_setr_epi8(int8_t(0x00), int8_t(0x00), int8_t(0x00), int8_t(0x13), - int8_t(0x04), int8_t(0xBF), int8_t(0xBF), int8_t(0xB9), - int8_t(0xB9), int8_t(0x00), int8_t(0x10), int8_t(0xC3), - int8_t(0xBF), int8_t(0xBF), int8_t(0xB9), int8_t(0xB9)); - const __m128i check_asso = - _mm_setr_epi8(0x0D, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, - 0x03, 0x07, 0x0B, 0x0B, 0x0B, 0x0F); - const __m128i check_values = - _mm_setr_epi8(int8_t(0x80), int8_t(0x80), int8_t(0x80), int8_t(0x80), - int8_t(0xCF), int8_t(0xBF), int8_t(0xD5), int8_t(0xA6), - int8_t(0xB5), int8_t(0x86), int8_t(0xD1), int8_t(0x80), - int8_t(0xB1), int8_t(0x80), int8_t(0x91), int8_t(0x80)); + __m128i delta_asso; + if (base64_url) { + delta_asso = _mm_setr_epi8(0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x0, 0x0, + 0x0, 0x0, 0x0, 0xF, 0x0, 0xF); + } else { + + delta_asso = _mm_setr_epi8(0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x0F, 0x00, 0x0F); + } + __m128i delta_values; + if (base64_url) { + delta_values = _mm_setr_epi8(0x0, 0x0, 0x0, 0x13, 0x4, uint8_t(0xBF), + uint8_t(0xBF), uint8_t(0xB9), uint8_t(0xB9), + 0x0, 0x11, uint8_t(0xC3), uint8_t(0xBF), + uint8_t(0xE0), uint8_t(0xB9), uint8_t(0xB9)); + } else { + + delta_values = + _mm_setr_epi8(int8_t(0x00), int8_t(0x00), int8_t(0x00), int8_t(0x13), + int8_t(0x04), int8_t(0xBF), int8_t(0xBF), int8_t(0xB9), + int8_t(0xB9), int8_t(0x00), int8_t(0x10), int8_t(0xC3), + int8_t(0xBF), int8_t(0xBF), int8_t(0xB9), int8_t(0xB9)); + } + __m128i check_asso; + if (base64_url) { + check_asso = _mm_setr_epi8(0xD, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, + 0x3, 0x7, 0xB, 0x6, 0xB, 0x12); + } else { + + check_asso = _mm_setr_epi8(0x0D, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x03, 0x07, 0x0B, 0x0B, 0x0B, 0x0F); + } + __m128i check_values; + if (base64_url) { + check_values = _mm_setr_epi8(0x0, uint8_t(0x80), uint8_t(0x80), + uint8_t(0x80), uint8_t(0xCF), uint8_t(0xBF), + uint8_t(0xD3), uint8_t(0xA6), uint8_t(0xB5), + uint8_t(0x86), uint8_t(0xD0), uint8_t(0x80), + uint8_t(0xB0), uint8_t(0x80), 0x0, 0x0); + } else { + + check_values = + _mm_setr_epi8(int8_t(0x80), int8_t(0x80), int8_t(0x80), int8_t(0x80), + int8_t(0xCF), int8_t(0xBF), int8_t(0xD5), int8_t(0xA6), + int8_t(0xB5), int8_t(0x86), int8_t(0xD1), int8_t(0x80), + int8_t(0xB1), int8_t(0x80), int8_t(0x91), int8_t(0x80)); + } const __m128i shifted = _mm_srli_epi32(*src, 3); const __m128i delta_hash = @@ -34771,12 +35833,14 @@ static inline uint16_t to_base64_mask(__m128i *src, bool *error) { *src = out; return (uint16_t)mask; } + +template static inline uint64_t to_base64_mask(block64 *b, bool *error) { *error = 0; - uint64_t m0 = to_base64_mask(&b->chunks[0], error); - uint64_t m1 = to_base64_mask(&b->chunks[1], error); - uint64_t m2 = to_base64_mask(&b->chunks[2], error); - uint64_t m3 = to_base64_mask(&b->chunks[3], error); + uint64_t m0 = to_base64_mask(&b->chunks[0], error); + uint64_t m1 = to_base64_mask(&b->chunks[1], error); + uint64_t m2 = to_base64_mask(&b->chunks[2], error); + uint64_t m3 = to_base64_mask(&b->chunks[3], error); return m0 | (m1 << 16) | (m2 << 32) | (m3 << 48); } @@ -34799,6 +35863,8 @@ static inline uint64_t compress_block(block64 *b, uint64_t mask, char *output) { return _mm_popcnt_u64(nmask); } +// The caller of this function is responsible to ensure that there are 64 bytes available +// from reading at src. The data is read into a block64 structure. static inline void load_block(block64 *b, const char *src) { b->chunks[0] = _mm_loadu_si128(reinterpret_cast(src)); b->chunks[1] = _mm_loadu_si128(reinterpret_cast(src + 16)); @@ -34806,6 +35872,23 @@ static inline void load_block(block64 *b, const char *src) { b->chunks[3] = _mm_loadu_si128(reinterpret_cast(src + 48)); } +// The caller of this function is responsible to ensure that there are 128 bytes available +// from reading at src. The data is read into a block64 structure. +static inline void load_block(block64 *b, const char16_t *src) { + __m128i m1 = _mm_loadu_si128(reinterpret_cast(src)); + __m128i m2 = _mm_loadu_si128(reinterpret_cast(src + 8)); + __m128i m3 = _mm_loadu_si128(reinterpret_cast(src + 16)); + __m128i m4 = _mm_loadu_si128(reinterpret_cast(src + 24)); + __m128i m5 = _mm_loadu_si128(reinterpret_cast(src + 32)); + __m128i m6 = _mm_loadu_si128(reinterpret_cast(src + 40)); + __m128i m7 = _mm_loadu_si128(reinterpret_cast(src + 48)); + __m128i m8 = _mm_loadu_si128(reinterpret_cast(src + 56)); + b->chunks[0] = _mm_packus_epi16(m1, m2); + b->chunks[1] = _mm_packus_epi16(m3, m4); + b->chunks[2] = _mm_packus_epi16(m5, m6); + b->chunks[3] = _mm_packus_epi16(m7, m8); +} + static inline void base64_decode(char *out, __m128i str) { // credit: aqrit @@ -34855,11 +35938,23 @@ static inline void base64_decode_block_safe(char *out, block64 *b) { std::memcpy(out + 36, buffer, 12); } -result compress_decode_base64(char *dst, const char *src, size_t srclen) { +template +result compress_decode_base64(char *dst, const chartype *src, size_t srclen, + base64_options options) { + const uint8_t *to_base64 = base64_url ? tables::base64::to_base64_url_value + : tables::base64::to_base64_value; + // skip trailing spaces + while (srclen > 0 && to_base64[uint8_t(src[srclen - 1])] == 64) { + srclen--; + } size_t equalsigns = 0; if (srclen > 0 && src[srclen - 1] == '=') { srclen--; equalsigns = 1; + // skip trailing spaces + while (srclen > 0 && to_base64[uint8_t(src[srclen - 1])] == 64) { + srclen--; + } if (srclen > 0 && src[srclen - 1] == '=') { srclen--; equalsigns = 2; @@ -34868,26 +35963,25 @@ result compress_decode_base64(char *dst, const char *src, size_t srclen) { char *end_of_safe_64byte_zone = (srclen + 3) / 4 * 3 >= 63 ? dst + (srclen + 3) / 4 * 3 - 63 : dst; - const char *const srcinit = src; + const chartype *const srcinit = src; const char *const dstinit = dst; - const char *const srcend = src + srclen; + const chartype *const srcend = src + srclen; constexpr size_t block_size = 6; static_assert(block_size >= 2, "block should of size 2 or more"); char buffer[block_size * 64]; char *bufferptr = buffer; if (srclen >= 64) { - const char *const srcend64 = src + srclen - 64; + const chartype *const srcend64 = src + srclen - 64; while (src <= srcend64) { block64 b; load_block(&b, src); src += 64; bool error = false; - uint64_t badcharmask = to_base64_mask(&b, &error); + uint64_t badcharmask = to_base64_mask(&b, &error); if (error) { src -= 64; - while (src < srcend && - tables::base64::to_base64_value[uint8_t(*src)] <= 64) { + while (src < srcend && to_base64[uint8_t(*src)] <= 64) { src++; } return {error_code::INVALID_BASE64_CHARACTER, size_t(src - srcinit)}; @@ -34932,7 +36026,7 @@ result compress_decode_base64(char *dst, const char *src, size_t srclen) { int last_block = (int)((bufferptr - buffer_start) % 64); if (last_block != 0 && srcend - src + last_block >= 64) { while ((bufferptr - buffer_start) % 64 != 0 && src < srcend) { - uint8_t val = tables::base64::to_base64_value[uint8_t(*src)]; + uint8_t val = to_base64[uint8_t(*src)]; *bufferptr = char(val); if (val > 64) { return {error_code::INVALID_BASE64_CHARACTER, size_t(src - srcinit)}; @@ -34980,7 +36074,7 @@ result compress_decode_base64(char *dst, const char *src, size_t srclen) { int leftover = int(bufferptr - buffer_start); if (leftover > 0) { while (leftover < 4 && src < srcend) { - uint8_t val = tables::base64::to_base64_value[uint8_t(*src)]; + uint8_t val = to_base64[uint8_t(*src)]; if (val > 64) { return {error_code::INVALID_BASE64_CHARACTER, size_t(src - srcinit)}; } @@ -35022,15 +36116,27 @@ result compress_decode_base64(char *dst, const char *src, size_t srclen) { } } if (src < srcend + equalsigns) { - result r = scalar::base64::base64_tail_decode(dst, src, srcend - src); + result r = + scalar::base64::base64_tail_decode(dst, src, srcend - src, options); if (r.error == error_code::INVALID_BASE64_CHARACTER) { r.count += size_t(src - srcinit); return r; } else { r.count += size_t(dst - dstinit); } + if(r.error == error_code::SUCCESS && equalsigns > 0) { + // additional checks + if((r.count % 3 == 0) || ((r.count % 3) + 1 + equalsigns != 4)) { + r.error = error_code::INVALID_BASE64_CHARACTER; + } + } return r; } + if(equalsigns > 0) { + if((size_t(dst - dstinit) % 3 == 0) || ((size_t(dst - dstinit) % 3) + 1 + equalsigns != 4)) { + return {INVALID_BASE64_CHARACTER, size_t(dst - dstinit)}; + } + } return {SUCCESS, size_t(dst - dstinit)}; } /* end file src/westmere/sse_base64.cpp */ @@ -37368,16 +38474,28 @@ simdutf_warn_unused size_t implementation::maximal_binary_length_from_base64(con return scalar::base64::maximal_binary_length_from_base64(input, length); } -simdutf_warn_unused result implementation::base64_to_binary(const char * input, size_t length, char* output) const noexcept { - return compress_decode_base64(output, input, length); +simdutf_warn_unused result implementation::base64_to_binary(const char * input, size_t length, char* output, base64_options options) const noexcept { + return (options & base64_url) ? compress_decode_base64(output, input, length, options) : compress_decode_base64(output, input, length, options); } -simdutf_warn_unused size_t implementation::base64_length_from_binary(size_t length) const noexcept { - return scalar::base64::base64_length_from_binary(length); +simdutf_warn_unused size_t implementation::maximal_binary_length_from_base64(const char16_t * input, size_t length) const noexcept { + return scalar::base64::maximal_binary_length_from_base64(input, length); } -size_t implementation::binary_to_base64(const char * input, size_t length, char* output) const noexcept { - return encode_base64(output, input, length); +simdutf_warn_unused result implementation::base64_to_binary(const char16_t * input, size_t length, char* output, base64_options options) const noexcept { + return (options & base64_url) ? compress_decode_base64(output, input, length, options) : compress_decode_base64(output, input, length, options); +} + +simdutf_warn_unused size_t implementation::base64_length_from_binary(size_t length, base64_options options) const noexcept { + return scalar::base64::base64_length_from_binary(length, options); +} + +size_t implementation::binary_to_base64(const char * input, size_t length, char* output, base64_options options) const noexcept { + if(options == base64_url) { + return encode_base64(output, input, length); + } else { + return encode_base64(output, input, length); + } } } // namespace westmere } // namespace simdutf diff --git a/deps/simdutf/simdutf.h b/deps/simdutf/simdutf.h index 539b1ebfc28eb1..8bc2061a4bca8e 100644 --- a/deps/simdutf/simdutf.h +++ b/deps/simdutf/simdutf.h @@ -1,4 +1,4 @@ -/* auto-generated on 2024-03-18 10:58:28 -0400. Do not edit! */ +/* auto-generated on 2024-04-05 16:29:02 -0400. Do not edit! */ /* begin file include/simdutf.h */ #ifndef SIMDUTF_H #define SIMDUTF_H @@ -566,6 +566,7 @@ enum error_code { // there must be no surrogate at all (Latin1) INVALID_BASE64_CHARACTER, // Found a character that cannot be part of a valid base64 string. BASE64_INPUT_REMAINDER, // The base64 input terminates with a single character, excluding padding (=). + OUTPUT_BUFFER_TOO_SMALL, // The provided buffer is too small. OTHER // Not related to validation/transcoding. }; @@ -573,9 +574,9 @@ struct result { error_code error; size_t count; // In case of error, indicates the position of the error. In case of success, indicates the number of code units validated/written. - simdutf_really_inline result(); + simdutf_really_inline result() : error{error_code::SUCCESS}, count{0} {} - simdutf_really_inline result(error_code, size_t); + simdutf_really_inline result(error_code _err, size_t _pos) : error{_err}, count{_pos} {} }; } @@ -593,7 +594,7 @@ SIMDUTF_DISABLE_UNDESIRED_WARNINGS #define SIMDUTF_SIMDUTF_VERSION_H /** The version of simdutf being used (major.minor.revision) */ -#define SIMDUTF_VERSION "5.0.0" +#define SIMDUTF_VERSION "5.2.3" namespace simdutf { enum { @@ -604,11 +605,11 @@ enum { /** * The minor version (major.MINOR.revision) of simdutf being used. */ - SIMDUTF_VERSION_MINOR = 0, + SIMDUTF_VERSION_MINOR = 2, /** * The revision (major.minor.REVISION) of simdutf being used. */ - SIMDUTF_VERSION_REVISION = 0 + SIMDUTF_VERSION_REVISION = 3 }; } // namespace simdutf @@ -2285,6 +2286,12 @@ simdutf_warn_unused size_t trim_partial_utf16le(const char16_t* input, size_t le */ simdutf_warn_unused size_t trim_partial_utf16(const char16_t* input, size_t length); +// base64_options are used to specify the base64 encoding options. +using base64_options = uint64_t; +enum : base64_options { + base64_default = 0, /* standard base64 format */ + base64_url = 1 /* base64url format*/ +}; /** * Provide the maximal binary length in bytes given the base64 input. @@ -2293,10 +2300,21 @@ simdutf_warn_unused size_t trim_partial_utf16(const char16_t* input, size_t leng * * @param input the base64 input to process * @param length the length of the base64 input in bytes - * @return number of base64 bytes + * @return maximum number of binary bytes */ simdutf_warn_unused size_t maximal_binary_length_from_base64(const char * input, size_t length) noexcept; +/** + * Provide the maximal binary length in bytes given the base64 input. + * In general, if the input contains ASCII spaces, the result will be less than + * the maximum length. + * + * @param input the base64 input to process, in ASCII stored as 16-bit units + * @param length the length of the base64 input in 16-bit units + * @return maximal number of binary bytes + */ +simdutf_warn_unused size_t maximal_binary_length_from_base64(const char16_t * input, size_t length) noexcept; + /** * Convert a base64 input to a binary ouput. * @@ -2307,19 +2325,24 @@ simdutf_warn_unused size_t maximal_binary_length_from_base64(const char * input, * See https://infra.spec.whatwg.org/#forgiving-base64-decode * * This function will fail in case of invalid input. There are two possible reasons for - * failure: the input is contains a number of base64 characters that when divided by 4, leaves - * a singler remainder character (BASE64_INPUT_REMAINDER), or the input contains a character + * failure: the input contains a number of base64 characters that when divided by 4, leaves + * a single remainder character (BASE64_INPUT_REMAINDER), or the input contains a character * that is not a valid base64 character (INVALID_BASE64_CHARACTER). * + * When the error is INVALID_BASE64_CHARACTER, r.count contains the index in the input + * where the invalid character was found. When the error is BASE64_INPUT_REMAINDER, then + * r.count contains the number of bytes decoded. + * * You should call this function with a buffer that is at least maximal_binary_length_from_base64(input, length) bytes long. * If you fail to provide that much space, the function may cause a buffer overflow. * * @param input the base64 string to process * @param length the length of the string in bytes * @param output the pointer to buffer that can hold the conversion result (should be at least maximal_binary_length_from_base64(input, length) bytes long). + * @param options the base64 options to use, can be base64_default or base64_url, is base64_default by default. * @return a result pair struct (of type simdutf::error containing the two fields error and count) with an error code and either position of the error (in the input in bytes) if any, or the number of bytes written if successful. */ -simdutf_warn_unused result base64_to_binary(const char * input, size_t length, char* output) noexcept; +simdutf_warn_unused result base64_to_binary(const char * input, size_t length, char* output, base64_options options = base64_default) noexcept; /** * Provide the base64 length in bytes given the length of a binary input. @@ -2327,7 +2350,7 @@ simdutf_warn_unused result base64_to_binary(const char * input, size_t length, c * @param length the length of the input in bytes * @return number of base64 bytes */ -simdutf_warn_unused size_t base64_length_from_binary(size_t length) noexcept; +simdutf_warn_unused size_t base64_length_from_binary(size_t length, base64_options options = base64_default) noexcept; /** * Convert a binary input to a base64 ouput. The output is always padded with equal signs so that it is @@ -2338,9 +2361,74 @@ simdutf_warn_unused size_t base64_length_from_binary(size_t length) noexcept; * @param input the binary to process * @param length the length of the input in bytes * @param output the pointer to buffer that can hold the conversion result (should be at least base64_length_from_binary(length) bytes long) + * @param options the base64 options to use, can be base64_default or base64_url, is base64_default by default. * @return number of written bytes, will be equal to base64_length_from_binary(length) */ -size_t binary_to_base64(const char * input, size_t length, char* output) noexcept; +size_t binary_to_base64(const char * input, size_t length, char* output, base64_options options = base64_default) noexcept; + +/** + * Convert a base64 input to a binary ouput. + * + * This function follows the WHATWG forgiving-base64 format, which means that it will + * ignore any ASCII spaces in the input. You may provide a padded input (with one or two + * equal signs at the end) or an unpadded input (without any equal signs at the end). + * + * See https://infra.spec.whatwg.org/#forgiving-base64-decode + * + * This function will fail in case of invalid input. There are two possible reasons for + * failure: the input contains a number of base64 characters that when divided by 4, leaves + * a single remainder character (BASE64_INPUT_REMAINDER), or the input contains a character + * that is not a valid base64 character (INVALID_BASE64_CHARACTER). + * + * When the error is INVALID_BASE64_CHARACTER, r.count contains the index in the input + * where the invalid character was found. When the error is BASE64_INPUT_REMAINDER, then + * r.count contains the number of bytes decoded. + * + * You should call this function with a buffer that is at least maximal_binary_length_from_utf6_base64(input, length) bytes long. + * If you fail to provide that much space, the function may cause a buffer overflow. + * + * @param input the base64 string to process, in ASCII stored as 16-bit units + * @param length the length of the string in 16-bit units + * @param output the pointer to buffer that can hold the conversion result (should be at least maximal_binary_length_from_base64(input, length) bytes long). + * @param options the base64 options to use, can be base64_default or base64_url, is base64_default by default. + * @return a result pair struct (of type simdutf::error containing the two fields error and count) with an error code and position of the INVALID_BASE64_CHARACTER error (in the input in units) if any, or the number of bytes written if successful. + */ +simdutf_warn_unused result base64_to_binary(const char16_t * input, size_t length, char* output, base64_options options = base64_default) noexcept; + +/** + * Convert a base64 input to a binary ouput. + * + * This function follows the WHATWG forgiving-base64 format, which means that it will + * ignore any ASCII spaces in the input. You may provide a padded input (with one or two + * equal signs at the end) or an unpadded input (without any equal signs at the end). + * + * See https://infra.spec.whatwg.org/#forgiving-base64-decode + * + * This function will fail in case of invalid input. There are three possible reasons for + * failure: the input contains a number of base64 characters that when divided by 4, leaves + * a single remainder character (BASE64_INPUT_REMAINDER), the input contains a character + * that is not a valid base64 character (INVALID_BASE64_CHARACTER), or the output buffer + * is too small (OUTPUT_BUFFER_TOO_SMALL). + * + * When OUTPUT_BUFFER_TOO_SMALL, we return both the number of bytes written + * and the number of units processed, see description of the parameters and returned value. + * + * When the error is INVALID_BASE64_CHARACTER, r.count contains the index in the input + * where the invalid character was found. When the error is BASE64_INPUT_REMAINDER, then + * r.count contains the number of bytes decoded. + * + * The INVALID_BASE64_CHARACTER cases are considered fatal and you are expected to discard + * the output. + * + * @param input the base64 string to process, in ASCII stored as 8-bit or 16-bit units + * @param length the length of the string in 8-bit or 16-bit units. + * @param output the pointer to buffer that can hold the conversion result. + * @param outlen the number of bytes that can be written in the output buffer. Upon return, it is modified to reflect how many bytes were written. + * @param options the base64 options to use, can be base64_default or base64_url, is base64_default by default. + * @return a result pair struct (of type simdutf::error containing the two fields error and count) with an error code and position of the INVALID_BASE64_CHARACTER error (in the input in units) if any, or the number of units processed if successful. + */ +simdutf_warn_unused result base64_to_binary_safe(const char * input, size_t length, char* output, size_t& outlen, base64_options options = base64_default) noexcept; +simdutf_warn_unused result base64_to_binary_safe(const char16_t * input, size_t length, char* output, size_t& outlen, base64_options options = base64_default) noexcept; /** * An implementation of simdutf for a particular CPU architecture. @@ -3409,10 +3497,21 @@ class implementation { * * @param input the base64 input to process * @param length the length of the base64 input in bytes - * @return number of base64 bytes + * @return maximal number of binary bytes */ simdutf_warn_unused virtual size_t maximal_binary_length_from_base64(const char * input, size_t length) const noexcept = 0; + /** + * Provide the maximal binary length in bytes given the base64 input. + * In general, if the input contains ASCII spaces, the result will be less than + * the maximum length. + * + * @param input the base64 input to process, in ASCII stored as 16-bit units + * @param length the length of the base64 input in 16-bit units + * @return maximal number of binary bytes + */ + simdutf_warn_unused virtual size_t maximal_binary_length_from_base64(const char16_t * input, size_t length) const noexcept = 0; + /** * Convert a base64 input to a binary ouput. * @@ -3423,8 +3522,8 @@ class implementation { * See https://infra.spec.whatwg.org/#forgiving-base64-decode * * This function will fail in case of invalid input. There are two possible reasons for - * failure: the input is contains a number of base64 characters that when divided by 4, leaves - * a singler remainder character (BASE64_INPUT_REMAINDER), or the input contains a character + * failure: the input contains a number of base64 characters that when divided by 4, leaves + * a single remainder character (BASE64_INPUT_REMAINDER), or the input contains a character * that is not a valid base64 character (INVALID_BASE64_CHARACTER). * * You should call this function with a buffer that is at least maximal_binary_length_from_base64(input, length) bytes long. @@ -3433,17 +3532,44 @@ class implementation { * @param input the base64 string to process * @param length the length of the string in bytes * @param output the pointer to buffer that can hold the conversion result (should be at least maximal_binary_length_from_base64(input, length) bytes long). + * @param options the base64 options to use, can be base64_default or base64_url, is base64_default by default. * @return a result pair struct (of type simdutf::error containing the two fields error and count) with an error code and either position of the error (in the input in bytes) if any, or the number of bytes written if successful. */ - simdutf_warn_unused virtual result base64_to_binary(const char * input, size_t length, char* output) const noexcept = 0; + simdutf_warn_unused virtual result base64_to_binary(const char * input, size_t length, char* output, base64_options options = base64_default) const noexcept = 0; + + /** + * Convert a base64 input to a binary ouput. + * + * This function follows the WHATWG forgiving-base64 format, which means that it will + * ignore any ASCII spaces in the input. You may provide a padded input (with one or two + * equal signs at the end) or an unpadded input (without any equal signs at the end). + * + * See https://infra.spec.whatwg.org/#forgiving-base64-decode + * + * This function will fail in case of invalid input. There are two possible reasons for + * failure: the input contains a number of base64 characters that when divided by 4, leaves + * a single remainder character (BASE64_INPUT_REMAINDER), or the input contains a character + * that is not a valid base64 character (INVALID_BASE64_CHARACTER). + * + * You should call this function with a buffer that is at least maximal_binary_length_from_utf6_base64(input, length) bytes long. + * If you fail to provide that much space, the function may cause a buffer overflow. + * + * @param input the base64 string to process, in ASCII stored as 16-bit units + * @param length the length of the string in 16-bit units + * @param output the pointer to buffer that can hold the conversion result (should be at least maximal_binary_length_from_base64(input, length) bytes long). + * @param options the base64 options to use, can be base64_default or base64_url, is base64_default by default. + * @return a result pair struct (of type simdutf::error containing the two fields error and count) with an error code and position of the INVALID_BASE64_CHARACTER error (in the input in units) if any, or the number of bytes written if successful. + */ + simdutf_warn_unused virtual result base64_to_binary(const char16_t * input, size_t length, char* output, base64_options options = base64_default) const noexcept = 0; /** * Provide the base64 length in bytes given the length of a binary input. * * @param length the length of the input in bytes + * @parem options the base64 options to use, can be base64_default or base64_url, is base64_default by default. * @return number of base64 bytes */ - simdutf_warn_unused virtual size_t base64_length_from_binary(size_t length) const noexcept = 0; + simdutf_warn_unused virtual size_t base64_length_from_binary(size_t length, base64_options options = base64_default) const noexcept = 0; /** * Convert a binary input to a base64 ouput. The output is always padded with equal signs so that it is @@ -3454,9 +3580,10 @@ class implementation { * @param input the binary to process * @param length the length of the input in bytes * @param output the pointer to buffer that can hold the conversion result (should be at least base64_length_from_binary(length) bytes long) + * @param options the base64 options to use, can be base64_default or base64_url, is base64_default by default. * @return number of written bytes, will be equal to base64_length_from_binary(length) */ - virtual size_t binary_to_base64(const char * input, size_t length, char* output) const noexcept = 0; + virtual size_t binary_to_base64(const char * input, size_t length, char* output, base64_options options = base64_default) const noexcept = 0; protected: From 209823d3af10b84923faa0cfa5dee24cd5cf714d Mon Sep 17 00:00:00 2001 From: "Node.js GitHub Bot" Date: Sat, 13 Apr 2024 18:13:45 +0300 Subject: [PATCH 14/41] deps: update simdutf to 5.2.4 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit PR-URL: https://github.com/nodejs/node/pull/52473 Reviewed-By: Richard Lau Reviewed-By: Michaël Zasso Reviewed-By: Marco Ippolito --- deps/simdutf/simdutf.cpp | 12 +++++++++-- deps/simdutf/simdutf.h | 45 ++++++++++++++++++++++++++-------------- 2 files changed, 39 insertions(+), 18 deletions(-) diff --git a/deps/simdutf/simdutf.cpp b/deps/simdutf/simdutf.cpp index c4d4ed3f7ae481..13c408263e7ac4 100644 --- a/deps/simdutf/simdutf.cpp +++ b/deps/simdutf/simdutf.cpp @@ -1,4 +1,4 @@ -/* auto-generated on 2024-04-05 16:29:02 -0400. Do not edit! */ +/* auto-generated on 2024-04-11 09:56:55 -0400. Do not edit! */ /* begin file src/simdutf.cpp */ #include "simdutf.h" // We include base64_tables once. @@ -13665,7 +13665,6 @@ inline size_t convert(const char *buf, size_t len, char32_t *utf32_output) { /* begin file src/scalar/utf8_to_latin1/utf8_to_latin1.h */ #ifndef SIMDUTF_UTF8_TO_LATIN1_H #define SIMDUTF_UTF8_TO_LATIN1_H -#include namespace simdutf { namespace scalar { @@ -16715,8 +16714,17 @@ size_t encode_base64(char *dst, const char *src, size_t srclen, 'N', 'd', 't', '9', 'O', 'e', 'u', '-', 'P', 'f', 'v', '_', }; const uint8x16_t v3f = vdupq_n_u8(0x3f); +#ifdef SIMDUTF_REGULAR_VISUAL_STUDIO + // When trying to load a uint8_t array, Visual Studio might + // error with: error C2664: '__n128x4 neon_ld4m_q8(const char *)': + // cannot convert argument 1 from 'const uint8_t [64]' to 'const char * + const uint8x16x4_t table = + vld4q_u8((reinterpret_cast( + options & base64_url) ? source_table_url : source_table)); +#else const uint8x16x4_t table = vld4q_u8((options & base64_url) ? source_table_url : source_table); +#endif size_t i = 0; for (; i + 16 * 3 <= srclen; i += 16 * 3) { const uint8x16x3_t in = vld3q_u8((const uint8_t *)src + i); diff --git a/deps/simdutf/simdutf.h b/deps/simdutf/simdutf.h index 8bc2061a4bca8e..7a4a3a2cf45033 100644 --- a/deps/simdutf/simdutf.h +++ b/deps/simdutf/simdutf.h @@ -1,4 +1,4 @@ -/* auto-generated on 2024-04-05 16:29:02 -0400. Do not edit! */ +/* auto-generated on 2024-04-11 09:56:55 -0400. Do not edit! */ /* begin file include/simdutf.h */ #ifndef SIMDUTF_H #define SIMDUTF_H @@ -130,9 +130,9 @@ #include #endif -#if defined(__x86_64__) || defined(_M_AMD64) +#if (defined(__x86_64__) || defined(_M_AMD64)) && !defined(_M_ARM64EC) #define SIMDUTF_IS_X86_64 1 -#elif defined(__aarch64__) || defined(_M_ARM64) +#elif defined(__aarch64__) || defined(_M_ARM64) || defined(_M_ARM64EC) #define SIMDUTF_IS_ARM64 1 #elif defined(__PPC64__) || defined(_M_PPC64) //#define SIMDUTF_IS_PPC64 1 @@ -594,7 +594,7 @@ SIMDUTF_DISABLE_UNDESIRED_WARNINGS #define SIMDUTF_SIMDUTF_VERSION_H /** The version of simdutf being used (major.minor.revision) */ -#define SIMDUTF_VERSION "5.2.3" +#define SIMDUTF_VERSION "5.2.4" namespace simdutf { enum { @@ -609,7 +609,7 @@ enum { /** * The revision (major.minor.REVISION) of simdutf being used. */ - SIMDUTF_VERSION_REVISION = 3 + SIMDUTF_VERSION_REVISION = 4 }; } // namespace simdutf @@ -748,7 +748,7 @@ static inline uint32_t detect_supported_architectures() { return host_isa; } -#elif defined(__aarch64__) || defined(_M_ARM64) +#elif defined(__aarch64__) || defined(_M_ARM64) || defined(_M_ARM64EC) static inline uint32_t detect_supported_architectures() { return instruction_set::NEON; @@ -1170,7 +1170,7 @@ simdutf_warn_unused result validate_utf32_with_errors(const char32_t *buf, size_ * @param input the UTF-8 string to convert * @param length the length of the string in bytes * @param latin1_output the pointer to buffer that can hold conversion result - * @return the number of written char; 0 if the input was not valid UTF-8 string + * @return the number of written char; 0 if the input was not valid UTF-8 string or if it cannot be represented as Latin1 */ simdutf_warn_unused size_t convert_utf8_to_latin1(const char * input, size_t length, char* latin1_output) noexcept; @@ -1227,6 +1227,8 @@ simdutf_warn_unused size_t convert_utf8_to_utf16be(const char * input, size_t le /** * Convert possibly broken UTF-8 string into latin1 string with errors. + * If the string cannot be represented as Latin1, an error + * code is returned. * * During the conversion also validation of the input string is done. * This function is suitable to work with inputs from untrusted sources. @@ -1446,12 +1448,14 @@ simdutf_warn_unused size_t convert_utf16_to_utf8(const char16_t * input, size_t * @param input the UTF-16 string to convert * @param length the length of the string in 2-byte code units (char16_t) * @param latin1_buffer the pointer to buffer that can hold conversion result - * @return number of written code units; 0 if input is not a valid UTF-16LE string + * @return number of written code units; 0 if input is not a valid UTF-16 string or if it cannot be represented as Latin1 */ simdutf_warn_unused size_t convert_utf16_to_latin1(const char16_t * input, size_t length, char* latin1_buffer) noexcept; /** * Convert possibly broken UTF-16LE string into Latin1 string. + * If the string cannot be represented as Latin1, an error + * is returned. * * During the conversion also validation of the input string is done. * This function is suitable to work with inputs from untrusted sources. @@ -1461,7 +1465,7 @@ simdutf_warn_unused size_t convert_utf16_to_latin1(const char16_t * input, size_ * @param input the UTF-16LE string to convert * @param length the length of the string in 2-byte code units (char16_t) * @param latin1_buffer the pointer to buffer that can hold conversion result - * @return number of written code units; 0 if input is not a valid UTF-16LE string + * @return number of written code units; 0 if input is not a valid UTF-16LE string or if it cannot be represented as Latin1 */ simdutf_warn_unused size_t convert_utf16le_to_latin1(const char16_t * input, size_t length, char* latin1_buffer) noexcept; @@ -1476,7 +1480,7 @@ simdutf_warn_unused size_t convert_utf16le_to_latin1(const char16_t * input, siz * @param input the UTF-16BE string to convert * @param length the length of the string in 2-byte code units (char16_t) * @param latin1_buffer the pointer to buffer that can hold conversion result - * @return a result pair struct (of type simdutf::error containing the two fields error and count) with an error code and either position of the error (in the input in code units) if any, or the number of char written if successful. + * @return number of written code units; 0 if input is not a valid UTF-16BE string or if it cannot be represented as Latin1 */ simdutf_warn_unused size_t convert_utf16be_to_latin1(const char16_t * input, size_t length, char* latin1_buffer) noexcept; @@ -1541,6 +1545,8 @@ simdutf_warn_unused result convert_utf16le_to_latin1_with_errors(const char16_t /** * Convert possibly broken UTF-16BE string into Latin1 string. + * If the string cannot be represented as Latin1, an error + * is returned. * * During the conversion also validation of the input string is done. * This function is suitable to work with inputs from untrusted sources. @@ -1951,13 +1957,14 @@ simdutf_warn_unused size_t convert_utf32_to_utf16le(const char32_t * input, size * @param input the UTF-32 string to convert * @param length the length of the string in 4-byte code units (char32_t) * @param latin1_buffer the pointer to buffer that can hold conversion result - * @return number of written code units; 0 if input is not a valid UTF-32 string + * @return number of written code units; 0 if input is not a valid UTF-32 string or if it cannot be represented as Latin1 */ simdutf_warn_unused size_t convert_utf32_to_latin1(const char32_t * input, size_t length, char* latin1_buffer) noexcept; /** * Convert possibly broken UTF-32 string into Latin1 string and stop on error. + * If the string cannot be represented as Latin1, an error is returned. * * During the conversion also validation of the input string is done. * This function is suitable to work with inputs from untrusted sources. @@ -2681,12 +2688,14 @@ class implementation { * @param input the UTF-8 string to convert * @param length the length of the string in bytes * @param latin1_output the pointer to buffer that can hold conversion result - * @return the number of written char; 0 if the input was not valid UTF-8 string + * @return the number of written char; 0 if the input was not valid UTF-8 string or if it cannot be represented as Latin1 */ simdutf_warn_unused virtual size_t convert_utf8_to_latin1(const char * input, size_t length, char* latin1_output) const noexcept = 0; /** - * Convert possibly broken UTF-8 string into latin1 string with errors + * Convert possibly broken UTF-8 string into latin1 string with errors. + * If the string cannot be represented as Latin1, an error + * code is returned. * * During the conversion also validation of the input string is done. * This function is suitable to work with inputs from untrusted sources. @@ -2862,7 +2871,7 @@ class implementation { * @param input the UTF-16LE string to convert * @param length the length of the string in 2-byte code units (char16_t) * @param latin1_buffer the pointer to buffer that can hold conversion result - * @return number of written code units; 0 if input is not a valid UTF-16LE string + * @return number of written code units; 0 if input is not a valid UTF-16LE string or if it cannot be represented as Latin1 */ simdutf_warn_unused virtual size_t convert_utf16le_to_latin1(const char16_t * input, size_t length, char* latin1_buffer) const noexcept = 0; @@ -2877,12 +2886,14 @@ class implementation { * @param input the UTF-16BE string to convert * @param length the length of the string in 2-byte code units (char16_t) * @param latin1_buffer the pointer to buffer that can hold conversion result - * @return a result pair struct (of type simdutf::error containing the two fields error and count) with an error code and either position of the error (in the input in code units) if any, or the number of char written if successful. + * @return number of written code units; 0 if input is not a valid UTF-16BE string or if it cannot be represented as Latin1 */ simdutf_warn_unused virtual size_t convert_utf16be_to_latin1(const char16_t * input, size_t length, char* latin1_buffer) const noexcept = 0; /** * Convert possibly broken UTF-16LE string into Latin1 string. + * If the string cannot be represented as Latin1, an error + * is returned. * * During the conversion also validation of the input string is done. * This function is suitable to work with inputs from untrusted sources. @@ -2897,6 +2908,8 @@ class implementation { /** * Convert possibly broken UTF-16BE string into Latin1 string. + * If the string cannot be represented as Latin1, an error + * is returned. * * During the conversion also validation of the input string is done. * This function is suitable to work with inputs from untrusted sources. @@ -3157,6 +3170,7 @@ class implementation { /** * Convert possibly broken UTF-32 string into Latin1 string and stop on error. + * If the string cannot be represented as Latin1, an error is returned. * * During the conversion also validation of the input string is done. * This function is suitable to work with inputs from untrusted sources. @@ -3168,7 +3182,6 @@ class implementation { * @param latin1_buffer the pointer to buffer that can hold conversion result * @return a result pair struct (of type simdutf::error containing the two fields error and count) with an error code and either position of the error (in the input in code units) if any, or the number of char written if successful. */ - simdutf_warn_unused virtual result convert_utf32_to_latin1_with_errors(const char32_t * input, size_t length, char* latin1_buffer) const noexcept = 0; /** From 052b0ba0c63901474ef47973141e265a31d484dc Mon Sep 17 00:00:00 2001 From: npm CLI robot Date: Sun, 7 Apr 2024 14:36:14 -0700 Subject: [PATCH 15/41] deps: upgrade npm to 10.5.1 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit PR-URL: https://github.com/nodejs/node/pull/52351 Reviewed-By: Luke Karrys Reviewed-By: Michael Dawson Reviewed-By: Mohammed Keyvanzadeh Reviewed-By: Marco Ippolito Reviewed-By: Ulises Gascón Reviewed-By: Luigi Pinca --- deps/npm/bin/npm | 3 +- deps/npm/bin/npm-prefix.js | 30 ++ deps/npm/bin/npm.cmd | 3 +- deps/npm/bin/npm.ps1 | 4 +- deps/npm/bin/npx | 4 +- deps/npm/bin/npx.cmd | 4 +- deps/npm/bin/npx.ps1 | 4 +- deps/npm/docs/content/commands/npm-audit.md | 9 +- deps/npm/docs/content/commands/npm-ls.md | 2 +- deps/npm/docs/content/commands/npm-search.md | 10 + deps/npm/docs/content/commands/npm.md | 2 +- .../content/configuring-npm/package-json.md | 20 +- deps/npm/docs/output/commands/npm-audit.html | 8 +- deps/npm/docs/output/commands/npm-ls.html | 2 +- deps/npm/docs/output/commands/npm-search.html | 9 +- deps/npm/docs/output/commands/npm.html | 2 +- .../output/configuring-npm/package-json.html | 20 +- deps/npm/lib/commands/adduser.js | 2 +- deps/npm/lib/commands/ci.js | 20 +- deps/npm/lib/commands/login.js | 2 +- deps/npm/lib/commands/publish.js | 2 +- deps/npm/lib/commands/query.js | 6 +- deps/npm/lib/commands/search.js | 1 + deps/npm/lib/npm.js | 2 +- deps/npm/lib/utils/audit-error.js | 2 +- deps/npm/lib/utils/error-message.js | 2 +- deps/npm/lib/utils/exit-handler.js | 2 +- deps/npm/lib/utils/read-user-info.js | 2 +- deps/npm/lib/utils/replace-info.js | 31 -- deps/npm/man/man1/npm-access.1 | 2 +- deps/npm/man/man1/npm-adduser.1 | 2 +- deps/npm/man/man1/npm-audit.1 | 6 +- deps/npm/man/man1/npm-bugs.1 | 2 +- deps/npm/man/man1/npm-cache.1 | 2 +- deps/npm/man/man1/npm-ci.1 | 2 +- deps/npm/man/man1/npm-completion.1 | 2 +- deps/npm/man/man1/npm-config.1 | 2 +- deps/npm/man/man1/npm-dedupe.1 | 2 +- deps/npm/man/man1/npm-deprecate.1 | 2 +- deps/npm/man/man1/npm-diff.1 | 2 +- deps/npm/man/man1/npm-dist-tag.1 | 2 +- deps/npm/man/man1/npm-docs.1 | 2 +- deps/npm/man/man1/npm-doctor.1 | 2 +- deps/npm/man/man1/npm-edit.1 | 2 +- deps/npm/man/man1/npm-exec.1 | 2 +- deps/npm/man/man1/npm-explain.1 | 2 +- deps/npm/man/man1/npm-explore.1 | 2 +- deps/npm/man/man1/npm-find-dupes.1 | 2 +- deps/npm/man/man1/npm-fund.1 | 2 +- deps/npm/man/man1/npm-help-search.1 | 2 +- deps/npm/man/man1/npm-help.1 | 2 +- deps/npm/man/man1/npm-hook.1 | 2 +- deps/npm/man/man1/npm-init.1 | 2 +- deps/npm/man/man1/npm-install-ci-test.1 | 2 +- deps/npm/man/man1/npm-install-test.1 | 2 +- deps/npm/man/man1/npm-install.1 | 2 +- deps/npm/man/man1/npm-link.1 | 2 +- deps/npm/man/man1/npm-login.1 | 2 +- deps/npm/man/man1/npm-logout.1 | 2 +- deps/npm/man/man1/npm-ls.1 | 4 +- deps/npm/man/man1/npm-org.1 | 2 +- deps/npm/man/man1/npm-outdated.1 | 2 +- deps/npm/man/man1/npm-owner.1 | 2 +- deps/npm/man/man1/npm-pack.1 | 2 +- deps/npm/man/man1/npm-ping.1 | 2 +- deps/npm/man/man1/npm-pkg.1 | 2 +- deps/npm/man/man1/npm-prefix.1 | 2 +- deps/npm/man/man1/npm-profile.1 | 2 +- deps/npm/man/man1/npm-prune.1 | 2 +- deps/npm/man/man1/npm-publish.1 | 2 +- deps/npm/man/man1/npm-query.1 | 2 +- deps/npm/man/man1/npm-rebuild.1 | 2 +- deps/npm/man/man1/npm-repo.1 | 2 +- deps/npm/man/man1/npm-restart.1 | 2 +- deps/npm/man/man1/npm-root.1 | 2 +- deps/npm/man/man1/npm-run-script.1 | 2 +- deps/npm/man/man1/npm-sbom.1 | 2 +- deps/npm/man/man1/npm-search.1 | 12 +- deps/npm/man/man1/npm-shrinkwrap.1 | 2 +- deps/npm/man/man1/npm-star.1 | 2 +- deps/npm/man/man1/npm-stars.1 | 2 +- deps/npm/man/man1/npm-start.1 | 2 +- deps/npm/man/man1/npm-stop.1 | 2 +- deps/npm/man/man1/npm-team.1 | 2 +- deps/npm/man/man1/npm-test.1 | 2 +- deps/npm/man/man1/npm-token.1 | 2 +- deps/npm/man/man1/npm-uninstall.1 | 2 +- deps/npm/man/man1/npm-unpublish.1 | 2 +- deps/npm/man/man1/npm-unstar.1 | 2 +- deps/npm/man/man1/npm-update.1 | 2 +- deps/npm/man/man1/npm-version.1 | 2 +- deps/npm/man/man1/npm-view.1 | 2 +- deps/npm/man/man1/npm-whoami.1 | 2 +- deps/npm/man/man1/npm.1 | 4 +- deps/npm/man/man1/npx.1 | 2 +- deps/npm/man/man5/folders.5 | 2 +- deps/npm/man/man5/install.5 | 2 +- deps/npm/man/man5/npm-global.5 | 2 +- deps/npm/man/man5/npm-json.5 | 20 +- deps/npm/man/man5/npm-shrinkwrap-json.5 | 2 +- deps/npm/man/man5/npmrc.5 | 2 +- deps/npm/man/man5/package-json.5 | 20 +- deps/npm/man/man5/package-lock-json.5 | 2 +- deps/npm/man/man7/config.7 | 2 +- deps/npm/man/man7/dependency-selectors.7 | 2 +- deps/npm/man/man7/developers.7 | 2 +- deps/npm/man/man7/logging.7 | 2 +- deps/npm/man/man7/orgs.7 | 2 +- deps/npm/man/man7/package-spec.7 | 2 +- deps/npm/man/man7/registry.7 | 2 +- deps/npm/man/man7/removal.7 | 2 +- deps/npm/man/man7/scope.7 | 2 +- deps/npm/man/man7/scripts.7 | 2 +- deps/npm/man/man7/workspaces.7 | 2 +- .../arborist/lib/query-selector-all.js | 7 +- .../@npmcli/arborist/package.json | 6 +- .../node_modules/@npmcli/config/package.json | 4 +- deps/npm/node_modules/@npmcli/redact/LICENSE | 21 + .../node_modules/@npmcli/redact/lib/index.js | 59 +++ .../node_modules/@npmcli/redact/package.json | 45 ++ .../node_modules/@sigstore/tuf/package.json | 2 +- .../npm/node_modules/@sigstore/tuf/seeds.json | 2 +- deps/npm/node_modules/agent-base/LICENSE | 22 + .../npm/node_modules/agent-base/dist/index.js | 65 ++- deps/npm/node_modules/agent-base/package.json | 2 +- .../binary-extensions/binary-extensions.json | 3 + .../node_modules/binary-extensions/license | 3 +- .../binary-extensions/package.json | 6 +- deps/npm/node_modules/cli-table3/package.json | 2 +- deps/npm/node_modules/glob/README.md | 8 +- .../glob/dist/commonjs/ignore.d.ts.map | 2 +- .../node_modules/glob/dist/commonjs/ignore.js | 6 + .../glob/dist/commonjs/ignore.js.map | 2 +- .../glob/dist/commonjs/package.json | 4 +- .../glob/dist/commonjs/walker.d.ts.map | 2 +- .../node_modules/glob/dist/commonjs/walker.js | 24 +- .../glob/dist/commonjs/walker.js.map | 2 +- .../glob/dist/esm/ignore.d.ts.map | 2 +- deps/npm/node_modules/glob/dist/esm/ignore.js | 6 + .../node_modules/glob/dist/esm/ignore.js.map | 2 +- .../node_modules/glob/dist/esm/package.json | 4 +- .../glob/dist/esm/walker.d.ts.map | 2 +- deps/npm/node_modules/glob/dist/esm/walker.js | 24 +- .../node_modules/glob/dist/esm/walker.js.map | 2 +- deps/npm/node_modules/glob/package.json | 17 +- deps/npm/node_modules/ini/lib/ini.js | 2 +- deps/npm/node_modules/ini/package.json | 8 +- .../lib/init-package-json.js | 115 +++-- .../init-package-json/package.json | 26 +- .../node_modules/libnpmaccess/package.json | 4 +- deps/npm/node_modules/libnpmdiff/package.json | 8 +- deps/npm/node_modules/libnpmexec/lib/index.js | 2 +- deps/npm/node_modules/libnpmexec/package.json | 4 +- deps/npm/node_modules/libnpmfund/package.json | 2 +- deps/npm/node_modules/libnpmhook/package.json | 4 +- deps/npm/node_modules/libnpmorg/package.json | 4 +- deps/npm/node_modules/libnpmpack/package.json | 2 +- .../node_modules/libnpmpublish/package.json | 4 +- .../node_modules/libnpmsearch/package.json | 4 +- deps/npm/node_modules/libnpmteam/package.json | 4 +- .../{cjs => commonjs}/assert-valid-pattern.js | 0 .../minimatch/dist/{cjs => commonjs}/ast.js | 3 + .../{cjs => commonjs}/brace-expressions.js | 0 .../dist/{cjs => commonjs}/escape.js | 0 .../minimatch/dist/{cjs => commonjs}/index.js | 7 +- .../dist/{cjs => commonjs}/package.json | 0 .../dist/{cjs => commonjs}/unescape.js | 0 .../dist/{mjs => esm}/assert-valid-pattern.js | 0 .../minimatch/dist/{mjs => esm}/ast.js | 3 + .../dist/{mjs => esm}/brace-expressions.js | 0 .../minimatch/dist/{mjs => esm}/escape.js | 0 .../minimatch/dist/{mjs => esm}/index.js | 7 +- .../minimatch/dist/{mjs => esm}/package.json | 0 .../minimatch/dist/{mjs => esm}/unescape.js | 0 deps/npm/node_modules/minimatch/package.json | 46 +- .../node-gyp/.release-please-manifest.json | 3 + deps/npm/node_modules/node-gyp/CHANGELOG.md | 38 +- deps/npm/node_modules/node-gyp/README.md | 12 +- deps/npm/node_modules/node-gyp/gyp/README.md | 30 -- .../gyp/pylib/gyp/generator/android.py | 6 +- .../node-gyp/gyp/pylib/gyp/generator/gypsh.py | 7 +- .../node-gyp/gyp/pylib/gyp/generator/msvs.py | 20 +- .../node-gyp/gyp/pylib/gyp/input.py | 10 +- .../node-gyp/gyp/pylib/gyp/msvs_emulation.py | 9 +- .../node_modules/node-gyp/gyp/pyproject.toml | 8 +- .../node_modules/node-gyp/lib/configure.js | 28 +- .../node_modules/node-gyp/lib/find-python.js | 2 +- .../node-gyp/lib/find-visualstudio.js | 127 ++++- deps/npm/node_modules/node-gyp/package.json | 2 +- .../node-gyp/release-please-config.json | 40 ++ .../npm-registry-fetch/lib/check-response.js | 2 +- .../npm-registry-fetch/lib/clean-url.js | 27 -- .../npm-registry-fetch/lib/index.js | 5 +- .../npm-registry-fetch/package.json | 9 +- .../dist/{cjs => commonjs}/index.js | 22 +- .../dist/{cjs => commonjs}/package.json | 0 .../path-scurry/dist/{mjs => esm}/index.js | 22 +- .../dist/{mjs => esm}/package.json | 0 .../npm/node_modules/path-scurry/package.json | 54 +-- deps/npm/node_modules/promzard/lib/index.js | 2 +- deps/npm/node_modules/promzard/package.json | 11 +- .../read/dist/commonjs/package.json | 3 + .../node_modules/read/dist/commonjs/read.js | 95 ++++ .../node_modules/read/dist/esm/package.json | 3 + deps/npm/node_modules/read/dist/esm/read.js | 88 ++++ deps/npm/node_modules/read/lib/read.js | 82 ---- deps/npm/node_modules/read/package.json | 77 ++- deps/npm/node_modules/tar/lib/unpack.js | 27 +- deps/npm/node_modules/tar/package.json | 2 +- deps/npm/package.json | 24 +- .../test/lib/commands/query.js.test.cjs | 25 + .../tap-snapshots/test/lib/docs.js.test.cjs | 6 +- deps/npm/test/bin/windows-shims.js | 15 +- deps/npm/test/lib/arborist-cmd.js | 78 +++ deps/npm/test/lib/commands/ci.js | 20 + deps/npm/test/lib/commands/install.js | 447 ++++++------------ deps/npm/test/lib/commands/query.js | 22 + deps/npm/test/lib/utils/read-user-info.js | 4 +- deps/npm/test/lib/utils/replace-info.js | 116 ----- 219 files changed, 1600 insertions(+), 1081 deletions(-) create mode 100755 deps/npm/bin/npm-prefix.js delete mode 100644 deps/npm/lib/utils/replace-info.js create mode 100644 deps/npm/node_modules/@npmcli/redact/LICENSE create mode 100644 deps/npm/node_modules/@npmcli/redact/lib/index.js create mode 100644 deps/npm/node_modules/@npmcli/redact/package.json create mode 100644 deps/npm/node_modules/agent-base/LICENSE rename deps/npm/node_modules/minimatch/dist/{cjs => commonjs}/assert-valid-pattern.js (100%) rename deps/npm/node_modules/minimatch/dist/{cjs => commonjs}/ast.js (99%) rename deps/npm/node_modules/minimatch/dist/{cjs => commonjs}/brace-expressions.js (100%) rename deps/npm/node_modules/minimatch/dist/{cjs => commonjs}/escape.js (100%) rename deps/npm/node_modules/minimatch/dist/{cjs => commonjs}/index.js (99%) rename deps/npm/node_modules/minimatch/dist/{cjs => commonjs}/package.json (100%) rename deps/npm/node_modules/minimatch/dist/{cjs => commonjs}/unescape.js (100%) rename deps/npm/node_modules/minimatch/dist/{mjs => esm}/assert-valid-pattern.js (100%) rename deps/npm/node_modules/minimatch/dist/{mjs => esm}/ast.js (99%) rename deps/npm/node_modules/minimatch/dist/{mjs => esm}/brace-expressions.js (100%) rename deps/npm/node_modules/minimatch/dist/{mjs => esm}/escape.js (100%) rename deps/npm/node_modules/minimatch/dist/{mjs => esm}/index.js (99%) rename deps/npm/node_modules/minimatch/dist/{mjs => esm}/package.json (100%) rename deps/npm/node_modules/minimatch/dist/{mjs => esm}/unescape.js (100%) create mode 100644 deps/npm/node_modules/node-gyp/.release-please-manifest.json delete mode 100644 deps/npm/node_modules/node-gyp/gyp/README.md create mode 100644 deps/npm/node_modules/node-gyp/release-please-config.json delete mode 100644 deps/npm/node_modules/npm-registry-fetch/lib/clean-url.js rename deps/npm/node_modules/path-scurry/dist/{cjs => commonjs}/index.js (99%) rename deps/npm/node_modules/path-scurry/dist/{cjs => commonjs}/package.json (100%) rename deps/npm/node_modules/path-scurry/dist/{mjs => esm}/index.js (99%) rename deps/npm/node_modules/path-scurry/dist/{mjs => esm}/package.json (100%) create mode 100644 deps/npm/node_modules/read/dist/commonjs/package.json create mode 100644 deps/npm/node_modules/read/dist/commonjs/read.js create mode 100644 deps/npm/node_modules/read/dist/esm/package.json create mode 100644 deps/npm/node_modules/read/dist/esm/read.js delete mode 100644 deps/npm/node_modules/read/lib/read.js delete mode 100644 deps/npm/test/lib/utils/replace-info.js diff --git a/deps/npm/bin/npm b/deps/npm/bin/npm index 7f210b936e1fad..027dc9d128d22c 100755 --- a/deps/npm/bin/npm +++ b/deps/npm/bin/npm @@ -41,8 +41,9 @@ if [ $? -ne 0 ]; then fi no_node_dir fi +NPM_PREFIX_JS="$CLI_BASEDIR/node_modules/npm/bin/npm-prefix.js" NPM_CLI_JS="$CLI_BASEDIR/node_modules/npm/bin/npm-cli.js" -NPM_PREFIX=`"$NODE_EXE" "$NPM_CLI_JS" prefix -g` +NPM_PREFIX=`"$NODE_EXE" "$NPM_PREFIX_JS"` if [ $? -ne 0 ]; then no_node_dir fi diff --git a/deps/npm/bin/npm-prefix.js b/deps/npm/bin/npm-prefix.js new file mode 100755 index 00000000000000..3ff43de0dd84de --- /dev/null +++ b/deps/npm/bin/npm-prefix.js @@ -0,0 +1,30 @@ +#!/usr/bin/env node +// This is a single-use bin to help windows discover the proper prefix for npm +// without having to load all of npm first +// It does not accept argv params + +const path = require('path') +const Config = require('@npmcli/config') +const { definitions, flatten, shorthands } = require('@npmcli/config/lib/definitions') +const config = new Config({ + npmPath: path.dirname(__dirname), + // argv is explicitly not looked at since prefix is not something that can be changed via argv + argv: [], + definitions, + flatten, + shorthands, + excludeNpmCwd: false, +}) + +async function main () { + try { + await config.load() + // eslint-disable-next-line no-console + console.log(config.globalPrefix) + } catch (err) { + // eslint-disable-next-line no-console + console.error(err) + process.exit(1) + } +} +main() diff --git a/deps/npm/bin/npm.cmd b/deps/npm/bin/npm.cmd index f111c59d1efb6e..1a02e83ac365a9 100755 --- a/deps/npm/bin/npm.cmd +++ b/deps/npm/bin/npm.cmd @@ -8,8 +8,9 @@ IF NOT EXIST "%NODE_EXE%" ( SET "NODE_EXE=node" ) +SET "NPM_PREFIX_JS=%~dp0\node_modules\npm\bin\npm-prefix.js" SET "NPM_CLI_JS=%~dp0\node_modules\npm\bin\npm-cli.js" -FOR /F "delims=" %%F IN ('CALL "%NODE_EXE%" "%NPM_CLI_JS%" prefix -g') DO ( +FOR /F "delims=" %%F IN ('CALL "%NODE_EXE%" "%NPM_PREFIX_JS%"') DO ( SET "NPM_PREFIX_NPM_CLI_JS=%%F\node_modules\npm\bin\npm-cli.js" ) IF EXIST "%NPM_PREFIX_NPM_CLI_JS%" ( diff --git a/deps/npm/bin/npm.ps1 b/deps/npm/bin/npm.ps1 index f2f236adc23db2..399e33360e853c 100644 --- a/deps/npm/bin/npm.ps1 +++ b/deps/npm/bin/npm.ps1 @@ -17,8 +17,8 @@ if ($nodebin -eq $null) { } $nodedir = $(New-Object -ComObject Scripting.FileSystemObject).GetFile("$nodebin").ParentFolder.Path -$npmclijs="$nodedir/node_modules/npm/bin/npm-cli.js" -$npmprefix=(& $nodeexe $npmclijs prefix -g) +$npmprefixjs="$nodedir/node_modules/npm/bin/npm-prefix.js" +$npmprefix=(& $nodeexe $npmprefixjs) if ($LASTEXITCODE -ne 0) { Write-Host "Could not determine Node.js install directory" exit 1 diff --git a/deps/npm/bin/npx b/deps/npm/bin/npx index 719ff8ecdc19b9..b8619ee9c5e37a 100755 --- a/deps/npm/bin/npx +++ b/deps/npm/bin/npx @@ -41,9 +41,9 @@ if [ $? -ne 0 ]; then fi no_node_dir fi -NPM_CLI_JS="$CLI_BASEDIR/node_modules/npm/bin/npm-cli.js" +NPM_PREFIX_JS="$CLI_BASEDIR/node_modules/npm/bin/npm-prefix.js" NPX_CLI_JS="$CLI_BASEDIR/node_modules/npm/bin/npx-cli.js" -NPM_PREFIX=`"$NODE_EXE" "$NPM_CLI_JS" prefix -g` +NPM_PREFIX=`"$NODE_EXE" "$NPM_PREFIX_JS"` if [ $? -ne 0 ]; then no_node_dir fi diff --git a/deps/npm/bin/npx.cmd b/deps/npm/bin/npx.cmd index b79518ec505409..0f02d2dc498544 100755 --- a/deps/npm/bin/npx.cmd +++ b/deps/npm/bin/npx.cmd @@ -8,9 +8,9 @@ IF NOT EXIST "%NODE_EXE%" ( SET "NODE_EXE=node" ) -SET "NPM_CLI_JS=%~dp0\node_modules\npm\bin\npm-cli.js" +SET "NPM_PREFIX_JS=%~dp0\node_modules\npm\bin\npm-prefix.js" SET "NPX_CLI_JS=%~dp0\node_modules\npm\bin\npx-cli.js" -FOR /F "delims=" %%F IN ('CALL "%NODE_EXE%" "%NPM_CLI_JS%" prefix -g') DO ( +FOR /F "delims=" %%F IN ('CALL "%NODE_EXE%" "%NPM_PREFIX_JS%"') DO ( SET "NPM_PREFIX_NPX_CLI_JS=%%F\node_modules\npm\bin\npx-cli.js" ) IF EXIST "%NPM_PREFIX_NPX_CLI_JS%" ( diff --git a/deps/npm/bin/npx.ps1 b/deps/npm/bin/npx.ps1 index 437e2a7b74c3af..1d59fc52083d70 100644 --- a/deps/npm/bin/npx.ps1 +++ b/deps/npm/bin/npx.ps1 @@ -17,8 +17,8 @@ if ($nodebin -eq $null) { } $nodedir = $(New-Object -ComObject Scripting.FileSystemObject).GetFile("$nodebin").ParentFolder.Path -$npmclijs="$nodedir/node_modules/npm/bin/npm-cli.js" -$npmprefix=(& $nodeexe $npmclijs prefix -g) +$npmprefixjs="$nodedir/node_modules/npm/bin/npm-prefix.js" +$npmprefix=(& $nodeexe $npmprefixjs) if ($LASTEXITCODE -ne 0) { Write-Host "Could not determine Node.js install directory" exit 1 diff --git a/deps/npm/docs/content/commands/npm-audit.md b/deps/npm/docs/content/commands/npm-audit.md index 7d83fd582f3fd3..7ab0c182d2db96 100644 --- a/deps/npm/docs/content/commands/npm-audit.md +++ b/deps/npm/docs/content/commands/npm-audit.md @@ -49,6 +49,13 @@ Registry signatures can be verified using the following `audit` command: $ npm audit signatures ``` +The `audit signatures` command will also verify the provenance attestations of +downloaded packages. Because provenance attestations are such a new feature, +security features may be added to (or changed in) the attestation format over +time. To ensure that you're always able to verify attestation signatures check +that you're running the latest version of the npm CLI. Please note this often +means updating npm beyond the version that ships with Node.js. + The npm CLI supports registry signatures and signing keys provided by any registry if the following conventions are followed: 1. Signatures are provided in the package's `packument` in each published version within the `dist` object: @@ -89,7 +96,7 @@ Keys response: - `scheme`: only `ecdsa-sha2-nistp256` is currently supported by the npm CLI - `key`: base64 encoded public key -See this [example key's response from the public npm registry](https://registry.npmjs.org/-/npm/v1/keys"). +See this [example key's response from the public npm registry](https://registry.npmjs.org/-/npm/v1/keys). ### Audit Endpoints diff --git a/deps/npm/docs/content/commands/npm-ls.md b/deps/npm/docs/content/commands/npm-ls.md index d21af25d6c783b..0c6bfe1bf8ac26 100644 --- a/deps/npm/docs/content/commands/npm-ls.md +++ b/deps/npm/docs/content/commands/npm-ls.md @@ -27,7 +27,7 @@ packages will *also* show the paths to the specified packages. For example, running `npm ls promzard` in npm's source tree will show: ```bash -npm@10.5.0 /path/to/npm +npm@10.5.1 /path/to/npm └─┬ init-package-json@0.0.4 └── promzard@0.1.5 ``` diff --git a/deps/npm/docs/content/commands/npm-search.md b/deps/npm/docs/content/commands/npm-search.md index df5cc51b417ea2..047102af61766e 100644 --- a/deps/npm/docs/content/commands/npm-search.md +++ b/deps/npm/docs/content/commands/npm-search.md @@ -91,6 +91,16 @@ Show the description in `npm search` +#### `searchlimit` + +* Default: 20 +* Type: Number + +Number of items to limit search results to. Will not apply at all to legacy +searches. + + + #### `searchopts` * Default: "" diff --git a/deps/npm/docs/content/commands/npm.md b/deps/npm/docs/content/commands/npm.md index 3b14832d96da56..d69d753e245b27 100644 --- a/deps/npm/docs/content/commands/npm.md +++ b/deps/npm/docs/content/commands/npm.md @@ -14,7 +14,7 @@ Note: This command is unaware of workspaces. ### Version -10.5.0 +10.5.1 ### Description diff --git a/deps/npm/docs/content/configuring-npm/package-json.md b/deps/npm/docs/content/configuring-npm/package-json.md index c857fc8cb8ce5c..ec5cfcab1bb49e 100644 --- a/deps/npm/docs/content/configuring-npm/package-json.md +++ b/deps/npm/docs/content/configuring-npm/package-json.md @@ -40,7 +40,7 @@ Some tips: * Don't use the same name as a core Node module. * Don't put "js" or "node" in the name. It's assumed that it's js, since you're writing a package.json file, and you can specify the engine using - the "engines" field. (See below.) + the "[engines](#engines)" field. (See below.) * The name will probably be passed as an argument to require(), so it should be something short, but also reasonably descriptive. * You may want to check the npm registry to see if there's something by @@ -75,7 +75,7 @@ your package as it's listed in `npm search`. ### homepage -The url to the project homepage. +The URL to the project homepage. Example: @@ -85,7 +85,7 @@ Example: ### bugs -The url to your project's issue tracker and / or the email address to which +The URL to your project's issue tracker and / or the email address to which issues should be reported. These are helpful for people who encounter issues with your package. @@ -101,10 +101,10 @@ It should look like this: ``` You can specify either one or both values. If you want to provide only a -url, you can specify the value for "bugs" as a simple string instead of an +URL, you can specify the value for "bugs" as a simple string instead of an object. -If a url is provided, it will be used by the `npm bugs` command. +If a URL is provided, it will be used by the `npm bugs` command. ### license @@ -511,9 +511,9 @@ Do it like this: } ``` -The URL should be a publicly available (perhaps read-only) url that can be +The URL should be a publicly available (perhaps read-only) URL that can be handed directly to a VCS program without any modification. It should not -be a url to an html project page that you put in your browser. It's for +be a URL to an html project page that you put in your browser. It's for computers. For GitHub, GitHub gist, Bitbucket, or GitLab repositories you can use the @@ -636,7 +636,7 @@ install time. #### Git URLs as Dependencies -Git urls are of the form: +Git URLs are of the form: ```bash ://[[:]@][:][:][/][# | #semver:] @@ -683,7 +683,7 @@ will be rebuilt for every installation. #### GitHub URLs -As of version 1.1.65, you can refer to GitHub urls as just "foo": +As of version 1.1.65, you can refer to GitHub URLs as just "foo": "user/foo-project". Just as with git URLs, a `commit-ish` suffix can be included. For example: @@ -889,7 +889,7 @@ none. If a dependency can be used, but you would like npm to proceed if it cannot be found or fails to install, then you may put it in the `optionalDependencies` object. This is a map of package name to version or -url, just like the `dependencies` object. The difference is that build +URL, just like the `dependencies` object. The difference is that build failures do not cause installation to fail. Running `npm install --omit=optional` will prevent these dependencies from being installed. diff --git a/deps/npm/docs/output/commands/npm-audit.html b/deps/npm/docs/output/commands/npm-audit.html index 7ba3101ded64a8..ab20ea72c65208 100644 --- a/deps/npm/docs/output/commands/npm-audit.html +++ b/deps/npm/docs/output/commands/npm-audit.html @@ -174,6 +174,12 @@

Audit Signatures

Registry signatures can be verified using the following audit command:

$ npm audit signatures
 
+

The audit signatures command will also verify the provenance attestations of +downloaded packages. Because provenance attestations are such a new feature, +security features may be added to (or changed in) the attestation format over +time. To ensure that you're always able to verify attestation signatures check +that you're running the latest version of the npm CLI. Please note this often +means updating npm beyond the version that ships with Node.js.

The npm CLI supports registry signatures and signing keys provided by any registry if the following conventions are followed:

  1. Signatures are provided in the package's packument in each published version within the dist object:
  2. @@ -209,7 +215,7 @@

    Audit Signatures

  3. scheme: only ecdsa-sha2-nistp256 is currently supported by the npm CLI
  4. key: base64 encoded public key
  5. -

    See this example key's response from the public npm registry.

    +

    See this example key's response from the public npm registry.

    Audit Endpoints

    There are two audit endpoints that npm may use to fetch vulnerability information: the Bulk Advisory endpoint and the Quick Audit endpoint.

    diff --git a/deps/npm/docs/output/commands/npm-ls.html b/deps/npm/docs/output/commands/npm-ls.html index d6e376a0bf2f8a..0f4beb45d09397 100644 --- a/deps/npm/docs/output/commands/npm-ls.html +++ b/deps/npm/docs/output/commands/npm-ls.html @@ -160,7 +160,7 @@

    Description

    the results to only the paths to the packages named. Note that nested packages will also show the paths to the specified packages. For example, running npm ls promzard in npm's source tree will show:

    -
    npm@10.5.0 /path/to/npm
    +
    npm@10.5.1 /path/to/npm
     └─┬ init-package-json@0.0.4
       └── promzard@0.1.5
     
    diff --git a/deps/npm/docs/output/commands/npm-search.html b/deps/npm/docs/output/commands/npm-search.html index 16fe0a50e97a11..fb35290351bf63 100644 --- a/deps/npm/docs/output/commands/npm-search.html +++ b/deps/npm/docs/output/commands/npm-search.html @@ -142,7 +142,7 @@

    npm-search

    Table of contents

    - +

    Synopsis

    @@ -207,6 +207,13 @@

    description

  6. Type: Boolean
  7. Show the description in npm search

    +

    searchlimit

    +
      +
    • Default: 20
    • +
    • Type: Number
    • +
    +

    Number of items to limit search results to. Will not apply at all to legacy +searches.

    searchopts

    • Default: ""
    • diff --git a/deps/npm/docs/output/commands/npm.html b/deps/npm/docs/output/commands/npm.html index 976024c99e441a..4be373a7159e9f 100644 --- a/deps/npm/docs/output/commands/npm.html +++ b/deps/npm/docs/output/commands/npm.html @@ -150,7 +150,7 @@

      Table of contents

    Note: This command is unaware of workspaces.

    Version

    -

    10.5.0

    +

    10.5.1

    Description

    npm is the package manager for the Node JavaScript platform. It puts modules in place so that node can find them, and manages dependency diff --git a/deps/npm/docs/output/configuring-npm/package-json.html b/deps/npm/docs/output/configuring-npm/package-json.html index 6c85ea97e813a4..becaa89251f049 100644 --- a/deps/npm/docs/output/configuring-npm/package-json.html +++ b/deps/npm/docs/output/configuring-npm/package-json.html @@ -175,7 +175,7 @@

    name

  8. Don't use the same name as a core Node module.
  9. Don't put "js" or "node" in the name. It's assumed that it's js, since you're writing a package.json file, and you can specify the engine using -the "engines" field. (See below.)
  10. +the "engines" field. (See below.)
  11. The name will probably be passed as an argument to require(), so it should be something short, but also reasonably descriptive.
  12. You may want to check the npm registry to see if there's something by @@ -201,12 +201,12 @@

    keywords

    Put keywords in it. It's an array of strings. This helps people discover your package as it's listed in npm search.

    homepage

    -

    The url to the project homepage.

    +

    The URL to the project homepage.

    Example:

    "homepage": "https://github.com/owner/project#readme"
     

    bugs

    -

    The url to your project's issue tracker and / or the email address to which +

    The URL to your project's issue tracker and / or the email address to which issues should be reported. These are helpful for people who encounter issues with your package.

    It should look like this:

    @@ -218,9 +218,9 @@

    bugs

    }

    You can specify either one or both values. If you want to provide only a -url, you can specify the value for "bugs" as a simple string instead of an +URL, you can specify the value for "bugs" as a simple string instead of an object.

    -

    If a url is provided, it will be used by the npm bugs command.

    +

    If a URL is provided, it will be used by the npm bugs command.

    license

    You should specify a license for your package so that people know how they are permitted to use it, and any restrictions you're placing on it.

    @@ -531,9 +531,9 @@

    repository

    } } -

    The URL should be a publicly available (perhaps read-only) url that can be +

    The URL should be a publicly available (perhaps read-only) URL that can be handed directly to a VCS program without any modification. It should not -be a url to an html project page that you put in your browser. It's for +be a URL to an html project page that you put in your browser. It's for computers.

    For GitHub, GitHub gist, Bitbucket, or GitLab repositories you can use the same shortcut syntax you use for npm install:

    @@ -630,7 +630,7 @@

    URLs as Dependencies

    This tarball will be downloaded and installed locally to your package at install time.

    Git URLs as Dependencies

    -

    Git urls are of the form:

    +

    Git URLs are of the form:

    <protocol>://[<user>[:<password>]@]<hostname>[:<port>][:][/]<path>[#<commit-ish> | #semver:<semver>]
     

    <protocol> is one of git, git+ssh, git+http, git+https, or @@ -666,7 +666,7 @@

    Git URLs as Dependencies

    make sure that none of the above scripts are defined, or your dependency will be rebuilt for every installation.

    GitHub URLs

    -

    As of version 1.1.65, you can refer to GitHub urls as just "foo": +

    As of version 1.1.65, you can refer to GitHub URLs as just "foo": "user/foo-project". Just as with git URLs, a commit-ish suffix can be included. For example:

    {
    @@ -820,7 +820,7 @@ 

    optionalDependencies

    If a dependency can be used, but you would like npm to proceed if it cannot be found or fails to install, then you may put it in the optionalDependencies object. This is a map of package name to version or -url, just like the dependencies object. The difference is that build +URL, just like the dependencies object. The difference is that build failures do not cause installation to fail. Running npm install --omit=optional will prevent these dependencies from being installed.

    It is still your program's responsibility to handle the lack of the dependency. For example, something like this:

    diff --git a/deps/npm/lib/commands/adduser.js b/deps/npm/lib/commands/adduser.js index cd4cba60511cb6..a69ef366fbf32c 100644 --- a/deps/npm/lib/commands/adduser.js +++ b/deps/npm/lib/commands/adduser.js @@ -1,5 +1,5 @@ const log = require('../utils/log-shim.js') -const replaceInfo = require('../utils/replace-info.js') +const { redactLog: replaceInfo } = require('@npmcli/redact') const auth = require('../utils/auth.js') const BaseCommand = require('../base-command.js') diff --git a/deps/npm/lib/commands/ci.js b/deps/npm/lib/commands/ci.js index 706b77ac361cf2..428c43e6c30edc 100644 --- a/deps/npm/lib/commands/ci.js +++ b/deps/npm/lib/commands/ci.js @@ -75,14 +75,18 @@ class CI extends ArboristWorkspaceCmd { ) } - // Only remove node_modules after we've successfully loaded the virtual - // tree and validated the lockfile - await this.npm.time('npm-ci:rm', async () => { - const path = `${where}/node_modules` - // get the list of entries so we can skip the glob for performance - const entries = await fs.readdir(path, null).catch(er => []) - return Promise.all(entries.map(f => fs.rm(`${path}/${f}`, { force: true, recursive: true }))) - }) + const dryRun = this.npm.config.get('dry-run') + if (!dryRun) { + // Only remove node_modules after we've successfully loaded the virtual + // tree and validated the lockfile + await this.npm.time('npm-ci:rm', async () => { + const path = `${where}/node_modules` + // get the list of entries so we can skip the glob for performance + const entries = await fs.readdir(path, null).catch(er => []) + return Promise.all(entries.map(f => fs.rm(`${path}/${f}`, + { force: true, recursive: true }))) + }) + } await arb.reify(opts) diff --git a/deps/npm/lib/commands/login.js b/deps/npm/lib/commands/login.js index dc4ed8a67acd97..b498a3bf2ecd8b 100644 --- a/deps/npm/lib/commands/login.js +++ b/deps/npm/lib/commands/login.js @@ -1,5 +1,5 @@ const log = require('../utils/log-shim.js') -const replaceInfo = require('../utils/replace-info.js') +const { redactLog: replaceInfo } = require('@npmcli/redact') const auth = require('../utils/auth.js') const BaseCommand = require('../base-command.js') diff --git a/deps/npm/lib/commands/publish.js b/deps/npm/lib/commands/publish.js index 63abc50b4745f4..0456fd7e8320e6 100644 --- a/deps/npm/lib/commands/publish.js +++ b/deps/npm/lib/commands/publish.js @@ -6,7 +6,7 @@ const runScript = require('@npmcli/run-script') const pacote = require('pacote') const npa = require('npm-package-arg') const npmFetch = require('npm-registry-fetch') -const replaceInfo = require('../utils/replace-info.js') +const { redactLog: replaceInfo } = require('@npmcli/redact') const otplease = require('../utils/otplease.js') const { getContents, logTar } = require('../utils/tar.js') diff --git a/deps/npm/lib/commands/query.js b/deps/npm/lib/commands/query.js index 17a55a446b0869..dfa1356ebf436d 100644 --- a/deps/npm/lib/commands/query.js +++ b/deps/npm/lib/commands/query.js @@ -113,10 +113,12 @@ class Query extends BaseCommand { // builds a normalized inventory buildResponse (items) { for (const node of items) { - if (!this.#seen.has(node.target.location)) { + if (!node.target.location || !this.#seen.has(node.target.location)) { const item = new QuerySelectorItem(node) this.#response.push(item) - this.#seen.add(item.location) + if (node.target.location) { + this.#seen.add(item.location) + } } } } diff --git a/deps/npm/lib/commands/search.js b/deps/npm/lib/commands/search.js index f4a4ce32491424..bb94d6da20f1c1 100644 --- a/deps/npm/lib/commands/search.js +++ b/deps/npm/lib/commands/search.js @@ -42,6 +42,7 @@ class Search extends BaseCommand { 'color', 'parseable', 'description', + 'searchlimit', 'searchopts', 'searchexclude', 'registry', diff --git a/deps/npm/lib/npm.js b/deps/npm/lib/npm.js index 0a023f4ac8a302..d05b74ac74b833 100644 --- a/deps/npm/lib/npm.js +++ b/deps/npm/lib/npm.js @@ -12,7 +12,7 @@ const LogFile = require('./utils/log-file.js') const Timers = require('./utils/timers.js') const Display = require('./utils/display.js') const log = require('./utils/log-shim') -const replaceInfo = require('./utils/replace-info.js') +const { redactLog: replaceInfo } = require('@npmcli/redact') const updateNotifier = require('./utils/update-notifier.js') const pkg = require('../package.json') const { deref } = require('./utils/cmd-list.js') diff --git a/deps/npm/lib/utils/audit-error.js b/deps/npm/lib/utils/audit-error.js index aaf35566fc0304..f9850d718b198e 100644 --- a/deps/npm/lib/utils/audit-error.js +++ b/deps/npm/lib/utils/audit-error.js @@ -1,5 +1,5 @@ const log = require('./log-shim') -const replaceInfo = require('./replace-info.js') +const { redactLog: replaceInfo } = require('@npmcli/redact') // print an error or just nothing if the audit report has an error // this is called by the audit command, and by the reify-output util diff --git a/deps/npm/lib/utils/error-message.js b/deps/npm/lib/utils/error-message.js index e3d6c3526936f8..fc7be8301662e1 100644 --- a/deps/npm/lib/utils/error-message.js +++ b/deps/npm/lib/utils/error-message.js @@ -1,7 +1,7 @@ const { format } = require('util') const { resolve } = require('path') const nameValidator = require('validate-npm-package-name') -const replaceInfo = require('./replace-info.js') +const { redactLog: replaceInfo } = require('@npmcli/redact') const { report } = require('./explain-eresolve.js') const log = require('./log-shim') diff --git a/deps/npm/lib/utils/exit-handler.js b/deps/npm/lib/utils/exit-handler.js index 25cecd170a584c..8b4ab45c4d4745 100644 --- a/deps/npm/lib/utils/exit-handler.js +++ b/deps/npm/lib/utils/exit-handler.js @@ -3,7 +3,7 @@ const fs = require('fs') const log = require('./log-shim.js') const errorMessage = require('./error-message.js') -const replaceInfo = require('./replace-info.js') +const { redactLog: replaceInfo } = require('@npmcli/redact') let npm = null // set by the cli let exitHandlerCalled = false diff --git a/deps/npm/lib/utils/read-user-info.js b/deps/npm/lib/utils/read-user-info.js index 1cac8ee6d2668b..fa1cea158e8974 100644 --- a/deps/npm/lib/utils/read-user-info.js +++ b/deps/npm/lib/utils/read-user-info.js @@ -1,4 +1,4 @@ -const read = require('read') +const { read } = require('read') const userValidate = require('npm-user-validate') const log = require('./log-shim.js') diff --git a/deps/npm/lib/utils/replace-info.js b/deps/npm/lib/utils/replace-info.js deleted file mode 100644 index b9ce61935ffb76..00000000000000 --- a/deps/npm/lib/utils/replace-info.js +++ /dev/null @@ -1,31 +0,0 @@ -const { cleanUrl } = require('npm-registry-fetch') -const isString = (v) => typeof v === 'string' - -// split on \s|= similar to how nopt parses options -const splitAndReplace = (str) => { - // stateful regex, don't move out of this scope - const splitChars = /[\s=]/g - - let match = null - let result = '' - let index = 0 - while (match = splitChars.exec(str)) { - result += cleanUrl(str.slice(index, match.index)) + match[0] - index = splitChars.lastIndex - } - - return result + cleanUrl(str.slice(index)) -} - -// replaces auth info in an array of arguments or in a strings -function replaceInfo (arg) { - if (isString(arg)) { - return splitAndReplace(arg) - } else if (Array.isArray(arg)) { - return arg.map((a) => isString(a) ? splitAndReplace(a) : a) - } - - return arg -} - -module.exports = replaceInfo diff --git a/deps/npm/man/man1/npm-access.1 b/deps/npm/man/man1/npm-access.1 index 5231dbac1d504b..f11a6a5ba98230 100644 --- a/deps/npm/man/man1/npm-access.1 +++ b/deps/npm/man/man1/npm-access.1 @@ -1,4 +1,4 @@ -.TH "NPM-ACCESS" "1" "February 2024" "" "" +.TH "NPM-ACCESS" "1" "April 2024" "" "" .SH "NAME" \fBnpm-access\fR - Set access level on published packages .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-adduser.1 b/deps/npm/man/man1/npm-adduser.1 index c7cbb2cfcec253..e867396ccfc4bf 100644 --- a/deps/npm/man/man1/npm-adduser.1 +++ b/deps/npm/man/man1/npm-adduser.1 @@ -1,4 +1,4 @@ -.TH "NPM-ADDUSER" "1" "February 2024" "" "" +.TH "NPM-ADDUSER" "1" "April 2024" "" "" .SH "NAME" \fBnpm-adduser\fR - Add a registry user account .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-audit.1 b/deps/npm/man/man1/npm-audit.1 index e6b1982cb4c171..e8d71147400c45 100644 --- a/deps/npm/man/man1/npm-audit.1 +++ b/deps/npm/man/man1/npm-audit.1 @@ -1,4 +1,4 @@ -.TH "NPM-AUDIT" "1" "February 2024" "" "" +.TH "NPM-AUDIT" "1" "April 2024" "" "" .SH "NAME" \fBnpm-audit\fR - Run a security audit .SS "Synopsis" @@ -33,6 +33,8 @@ $ npm audit signatures .fi .RE .P +The \fBaudit signatures\fR command will also verify the provenance attestations of downloaded packages. Because provenance attestations are such a new feature, security features may be added to (or changed in) the attestation format over time. To ensure that you're always able to verify attestation signatures check that you're running the latest version of the npm CLI. Please note this often means updating npm beyond the version that ships with Node.js. +.P The npm CLI supports registry signatures and signing keys provided by any registry if the following conventions are followed: .RS 0 .IP 1. 4 @@ -90,7 +92,7 @@ Keys response: .RE 0 .P -See this \fBexample key's response from the public npm registry\fR \fI\(lahttps://registry.npmjs.org/-/npm/v1/keys"\(ra\fR. +See this \fBexample key's response from the public npm registry\fR \fI\(lahttps://registry.npmjs.org/-/npm/v1/keys\(ra\fR. .SS "Audit Endpoints" .P There are two audit endpoints that npm may use to fetch vulnerability information: the \fBBulk Advisory\fR endpoint and the \fBQuick Audit\fR endpoint. diff --git a/deps/npm/man/man1/npm-bugs.1 b/deps/npm/man/man1/npm-bugs.1 index 7db5b6cdb87f71..ea6371ffbe4fc1 100644 --- a/deps/npm/man/man1/npm-bugs.1 +++ b/deps/npm/man/man1/npm-bugs.1 @@ -1,4 +1,4 @@ -.TH "NPM-BUGS" "1" "February 2024" "" "" +.TH "NPM-BUGS" "1" "April 2024" "" "" .SH "NAME" \fBnpm-bugs\fR - Report bugs for a package in a web browser .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-cache.1 b/deps/npm/man/man1/npm-cache.1 index 2ec9e67be0634d..c741829317d2e1 100644 --- a/deps/npm/man/man1/npm-cache.1 +++ b/deps/npm/man/man1/npm-cache.1 @@ -1,4 +1,4 @@ -.TH "NPM-CACHE" "1" "February 2024" "" "" +.TH "NPM-CACHE" "1" "April 2024" "" "" .SH "NAME" \fBnpm-cache\fR - Manipulates packages cache .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-ci.1 b/deps/npm/man/man1/npm-ci.1 index 16ed2f6d9b5bb8..793f02006ab229 100644 --- a/deps/npm/man/man1/npm-ci.1 +++ b/deps/npm/man/man1/npm-ci.1 @@ -1,4 +1,4 @@ -.TH "NPM-CI" "1" "February 2024" "" "" +.TH "NPM-CI" "1" "April 2024" "" "" .SH "NAME" \fBnpm-ci\fR - Clean install a project .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-completion.1 b/deps/npm/man/man1/npm-completion.1 index 647d5e11ac202d..28bfb2906d1922 100644 --- a/deps/npm/man/man1/npm-completion.1 +++ b/deps/npm/man/man1/npm-completion.1 @@ -1,4 +1,4 @@ -.TH "NPM-COMPLETION" "1" "February 2024" "" "" +.TH "NPM-COMPLETION" "1" "April 2024" "" "" .SH "NAME" \fBnpm-completion\fR - Tab Completion for npm .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-config.1 b/deps/npm/man/man1/npm-config.1 index f3be24e542465d..bd572dea7c4ab8 100644 --- a/deps/npm/man/man1/npm-config.1 +++ b/deps/npm/man/man1/npm-config.1 @@ -1,4 +1,4 @@ -.TH "NPM-CONFIG" "1" "February 2024" "" "" +.TH "NPM-CONFIG" "1" "April 2024" "" "" .SH "NAME" \fBnpm-config\fR - Manage the npm configuration files .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-dedupe.1 b/deps/npm/man/man1/npm-dedupe.1 index 5480af746dcae5..529e14b0efb722 100644 --- a/deps/npm/man/man1/npm-dedupe.1 +++ b/deps/npm/man/man1/npm-dedupe.1 @@ -1,4 +1,4 @@ -.TH "NPM-DEDUPE" "1" "February 2024" "" "" +.TH "NPM-DEDUPE" "1" "April 2024" "" "" .SH "NAME" \fBnpm-dedupe\fR - Reduce duplication in the package tree .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-deprecate.1 b/deps/npm/man/man1/npm-deprecate.1 index 5e8d47a8bc77fc..2a3891596da0c6 100644 --- a/deps/npm/man/man1/npm-deprecate.1 +++ b/deps/npm/man/man1/npm-deprecate.1 @@ -1,4 +1,4 @@ -.TH "NPM-DEPRECATE" "1" "February 2024" "" "" +.TH "NPM-DEPRECATE" "1" "April 2024" "" "" .SH "NAME" \fBnpm-deprecate\fR - Deprecate a version of a package .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-diff.1 b/deps/npm/man/man1/npm-diff.1 index bea8bb3ef1d674..f8f0b5e7f40aac 100644 --- a/deps/npm/man/man1/npm-diff.1 +++ b/deps/npm/man/man1/npm-diff.1 @@ -1,4 +1,4 @@ -.TH "NPM-DIFF" "1" "February 2024" "" "" +.TH "NPM-DIFF" "1" "April 2024" "" "" .SH "NAME" \fBnpm-diff\fR - The registry diff command .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-dist-tag.1 b/deps/npm/man/man1/npm-dist-tag.1 index 6c4acc26b43b18..c0b6e8d70df6dc 100644 --- a/deps/npm/man/man1/npm-dist-tag.1 +++ b/deps/npm/man/man1/npm-dist-tag.1 @@ -1,4 +1,4 @@ -.TH "NPM-DIST-TAG" "1" "February 2024" "" "" +.TH "NPM-DIST-TAG" "1" "April 2024" "" "" .SH "NAME" \fBnpm-dist-tag\fR - Modify package distribution tags .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-docs.1 b/deps/npm/man/man1/npm-docs.1 index b9cc1da9abe783..be22f95c00df95 100644 --- a/deps/npm/man/man1/npm-docs.1 +++ b/deps/npm/man/man1/npm-docs.1 @@ -1,4 +1,4 @@ -.TH "NPM-DOCS" "1" "February 2024" "" "" +.TH "NPM-DOCS" "1" "April 2024" "" "" .SH "NAME" \fBnpm-docs\fR - Open documentation for a package in a web browser .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-doctor.1 b/deps/npm/man/man1/npm-doctor.1 index 42187f46922d5e..1aa4af1e485b08 100644 --- a/deps/npm/man/man1/npm-doctor.1 +++ b/deps/npm/man/man1/npm-doctor.1 @@ -1,4 +1,4 @@ -.TH "NPM-DOCTOR" "1" "February 2024" "" "" +.TH "NPM-DOCTOR" "1" "April 2024" "" "" .SH "NAME" \fBnpm-doctor\fR - Check the health of your npm environment .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-edit.1 b/deps/npm/man/man1/npm-edit.1 index 82cd85851f0480..0f91c98e4b6cd9 100644 --- a/deps/npm/man/man1/npm-edit.1 +++ b/deps/npm/man/man1/npm-edit.1 @@ -1,4 +1,4 @@ -.TH "NPM-EDIT" "1" "February 2024" "" "" +.TH "NPM-EDIT" "1" "April 2024" "" "" .SH "NAME" \fBnpm-edit\fR - Edit an installed package .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-exec.1 b/deps/npm/man/man1/npm-exec.1 index 246498125c14ca..0040f141315cd3 100644 --- a/deps/npm/man/man1/npm-exec.1 +++ b/deps/npm/man/man1/npm-exec.1 @@ -1,4 +1,4 @@ -.TH "NPM-EXEC" "1" "February 2024" "" "" +.TH "NPM-EXEC" "1" "April 2024" "" "" .SH "NAME" \fBnpm-exec\fR - Run a command from a local or remote npm package .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-explain.1 b/deps/npm/man/man1/npm-explain.1 index e1bb6de84f3348..ea53b43add250b 100644 --- a/deps/npm/man/man1/npm-explain.1 +++ b/deps/npm/man/man1/npm-explain.1 @@ -1,4 +1,4 @@ -.TH "NPM-EXPLAIN" "1" "February 2024" "" "" +.TH "NPM-EXPLAIN" "1" "April 2024" "" "" .SH "NAME" \fBnpm-explain\fR - Explain installed packages .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-explore.1 b/deps/npm/man/man1/npm-explore.1 index b4800ccfa6b299..7127aff53ee73f 100644 --- a/deps/npm/man/man1/npm-explore.1 +++ b/deps/npm/man/man1/npm-explore.1 @@ -1,4 +1,4 @@ -.TH "NPM-EXPLORE" "1" "February 2024" "" "" +.TH "NPM-EXPLORE" "1" "April 2024" "" "" .SH "NAME" \fBnpm-explore\fR - Browse an installed package .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-find-dupes.1 b/deps/npm/man/man1/npm-find-dupes.1 index 1f80df08c59e20..fc625fb8ebd10b 100644 --- a/deps/npm/man/man1/npm-find-dupes.1 +++ b/deps/npm/man/man1/npm-find-dupes.1 @@ -1,4 +1,4 @@ -.TH "NPM-FIND-DUPES" "1" "February 2024" "" "" +.TH "NPM-FIND-DUPES" "1" "April 2024" "" "" .SH "NAME" \fBnpm-find-dupes\fR - Find duplication in the package tree .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-fund.1 b/deps/npm/man/man1/npm-fund.1 index 22d61e036d929a..c8fb9354beb570 100644 --- a/deps/npm/man/man1/npm-fund.1 +++ b/deps/npm/man/man1/npm-fund.1 @@ -1,4 +1,4 @@ -.TH "NPM-FUND" "1" "February 2024" "" "" +.TH "NPM-FUND" "1" "April 2024" "" "" .SH "NAME" \fBnpm-fund\fR - Retrieve funding information .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-help-search.1 b/deps/npm/man/man1/npm-help-search.1 index 4a9dd82c921cde..892777ce4e6b4a 100644 --- a/deps/npm/man/man1/npm-help-search.1 +++ b/deps/npm/man/man1/npm-help-search.1 @@ -1,4 +1,4 @@ -.TH "NPM-HELP-SEARCH" "1" "February 2024" "" "" +.TH "NPM-HELP-SEARCH" "1" "April 2024" "" "" .SH "NAME" \fBnpm-help-search\fR - Search npm help documentation .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-help.1 b/deps/npm/man/man1/npm-help.1 index c2880c831f6490..bb08ed91cccb79 100644 --- a/deps/npm/man/man1/npm-help.1 +++ b/deps/npm/man/man1/npm-help.1 @@ -1,4 +1,4 @@ -.TH "NPM-HELP" "1" "February 2024" "" "" +.TH "NPM-HELP" "1" "April 2024" "" "" .SH "NAME" \fBnpm-help\fR - Get help on npm .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-hook.1 b/deps/npm/man/man1/npm-hook.1 index 2fc1b0dd667b88..ece059dfbb5257 100644 --- a/deps/npm/man/man1/npm-hook.1 +++ b/deps/npm/man/man1/npm-hook.1 @@ -1,4 +1,4 @@ -.TH "NPM-HOOK" "1" "February 2024" "" "" +.TH "NPM-HOOK" "1" "April 2024" "" "" .SH "NAME" \fBnpm-hook\fR - Manage registry hooks .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-init.1 b/deps/npm/man/man1/npm-init.1 index bfc7e48dc532a3..cc2c543925e464 100644 --- a/deps/npm/man/man1/npm-init.1 +++ b/deps/npm/man/man1/npm-init.1 @@ -1,4 +1,4 @@ -.TH "NPM-INIT" "1" "February 2024" "" "" +.TH "NPM-INIT" "1" "April 2024" "" "" .SH "NAME" \fBnpm-init\fR - Create a package.json file .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-install-ci-test.1 b/deps/npm/man/man1/npm-install-ci-test.1 index adc70da13cdf63..bd7f5b84a13016 100644 --- a/deps/npm/man/man1/npm-install-ci-test.1 +++ b/deps/npm/man/man1/npm-install-ci-test.1 @@ -1,4 +1,4 @@ -.TH "NPM-INSTALL-CI-TEST" "1" "February 2024" "" "" +.TH "NPM-INSTALL-CI-TEST" "1" "April 2024" "" "" .SH "NAME" \fBnpm-install-ci-test\fR - Install a project with a clean slate and run tests .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-install-test.1 b/deps/npm/man/man1/npm-install-test.1 index 8a0fcc72025b28..331fe854774f58 100644 --- a/deps/npm/man/man1/npm-install-test.1 +++ b/deps/npm/man/man1/npm-install-test.1 @@ -1,4 +1,4 @@ -.TH "NPM-INSTALL-TEST" "1" "February 2024" "" "" +.TH "NPM-INSTALL-TEST" "1" "April 2024" "" "" .SH "NAME" \fBnpm-install-test\fR - Install package(s) and run tests .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-install.1 b/deps/npm/man/man1/npm-install.1 index d6b5990abffd56..6d98e0e40923c0 100644 --- a/deps/npm/man/man1/npm-install.1 +++ b/deps/npm/man/man1/npm-install.1 @@ -1,4 +1,4 @@ -.TH "NPM-INSTALL" "1" "February 2024" "" "" +.TH "NPM-INSTALL" "1" "April 2024" "" "" .SH "NAME" \fBnpm-install\fR - Install a package .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-link.1 b/deps/npm/man/man1/npm-link.1 index a2ee4fe19a0995..174f9d1bd1baf8 100644 --- a/deps/npm/man/man1/npm-link.1 +++ b/deps/npm/man/man1/npm-link.1 @@ -1,4 +1,4 @@ -.TH "NPM-LINK" "1" "February 2024" "" "" +.TH "NPM-LINK" "1" "April 2024" "" "" .SH "NAME" \fBnpm-link\fR - Symlink a package folder .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-login.1 b/deps/npm/man/man1/npm-login.1 index 858a80ba3e19c1..1b7aea8819d6ec 100644 --- a/deps/npm/man/man1/npm-login.1 +++ b/deps/npm/man/man1/npm-login.1 @@ -1,4 +1,4 @@ -.TH "NPM-LOGIN" "1" "February 2024" "" "" +.TH "NPM-LOGIN" "1" "April 2024" "" "" .SH "NAME" \fBnpm-login\fR - Login to a registry user account .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-logout.1 b/deps/npm/man/man1/npm-logout.1 index 9151725cd4f5e2..113d6c5fab0995 100644 --- a/deps/npm/man/man1/npm-logout.1 +++ b/deps/npm/man/man1/npm-logout.1 @@ -1,4 +1,4 @@ -.TH "NPM-LOGOUT" "1" "February 2024" "" "" +.TH "NPM-LOGOUT" "1" "April 2024" "" "" .SH "NAME" \fBnpm-logout\fR - Log out of the registry .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-ls.1 b/deps/npm/man/man1/npm-ls.1 index 0acb36abe2ae8a..a9f7e4af68b87b 100644 --- a/deps/npm/man/man1/npm-ls.1 +++ b/deps/npm/man/man1/npm-ls.1 @@ -1,4 +1,4 @@ -.TH "NPM-LS" "1" "February 2024" "" "" +.TH "NPM-LS" "1" "April 2024" "" "" .SH "NAME" \fBnpm-ls\fR - List installed packages .SS "Synopsis" @@ -20,7 +20,7 @@ Positional arguments are \fBname@version-range\fR identifiers, which will limit .P .RS 2 .nf -npm@10.5.0 /path/to/npm +npm@10.5.1 /path/to/npm └─┬ init-package-json@0.0.4 └── promzard@0.1.5 .fi diff --git a/deps/npm/man/man1/npm-org.1 b/deps/npm/man/man1/npm-org.1 index 56c41d3cf2991c..002259ab5578a8 100644 --- a/deps/npm/man/man1/npm-org.1 +++ b/deps/npm/man/man1/npm-org.1 @@ -1,4 +1,4 @@ -.TH "NPM-ORG" "1" "February 2024" "" "" +.TH "NPM-ORG" "1" "April 2024" "" "" .SH "NAME" \fBnpm-org\fR - Manage orgs .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-outdated.1 b/deps/npm/man/man1/npm-outdated.1 index 8357aa2472a59c..528bc0d0e24d54 100644 --- a/deps/npm/man/man1/npm-outdated.1 +++ b/deps/npm/man/man1/npm-outdated.1 @@ -1,4 +1,4 @@ -.TH "NPM-OUTDATED" "1" "February 2024" "" "" +.TH "NPM-OUTDATED" "1" "April 2024" "" "" .SH "NAME" \fBnpm-outdated\fR - Check for outdated packages .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-owner.1 b/deps/npm/man/man1/npm-owner.1 index d11d2bf54290e5..114cd78939c6a8 100644 --- a/deps/npm/man/man1/npm-owner.1 +++ b/deps/npm/man/man1/npm-owner.1 @@ -1,4 +1,4 @@ -.TH "NPM-OWNER" "1" "February 2024" "" "" +.TH "NPM-OWNER" "1" "April 2024" "" "" .SH "NAME" \fBnpm-owner\fR - Manage package owners .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-pack.1 b/deps/npm/man/man1/npm-pack.1 index a25b769cc74edb..6c0046bce58252 100644 --- a/deps/npm/man/man1/npm-pack.1 +++ b/deps/npm/man/man1/npm-pack.1 @@ -1,4 +1,4 @@ -.TH "NPM-PACK" "1" "February 2024" "" "" +.TH "NPM-PACK" "1" "April 2024" "" "" .SH "NAME" \fBnpm-pack\fR - Create a tarball from a package .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-ping.1 b/deps/npm/man/man1/npm-ping.1 index 20bc98116b5e75..c6d509c56a4be4 100644 --- a/deps/npm/man/man1/npm-ping.1 +++ b/deps/npm/man/man1/npm-ping.1 @@ -1,4 +1,4 @@ -.TH "NPM-PING" "1" "February 2024" "" "" +.TH "NPM-PING" "1" "April 2024" "" "" .SH "NAME" \fBnpm-ping\fR - Ping npm registry .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-pkg.1 b/deps/npm/man/man1/npm-pkg.1 index 1c83741aff95e6..170639b8d9af70 100644 --- a/deps/npm/man/man1/npm-pkg.1 +++ b/deps/npm/man/man1/npm-pkg.1 @@ -1,4 +1,4 @@ -.TH "NPM-PKG" "1" "February 2024" "" "" +.TH "NPM-PKG" "1" "April 2024" "" "" .SH "NAME" \fBnpm-pkg\fR - Manages your package.json .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-prefix.1 b/deps/npm/man/man1/npm-prefix.1 index a9fc70239b5de3..0af03494691d7f 100644 --- a/deps/npm/man/man1/npm-prefix.1 +++ b/deps/npm/man/man1/npm-prefix.1 @@ -1,4 +1,4 @@ -.TH "NPM-PREFIX" "1" "February 2024" "" "" +.TH "NPM-PREFIX" "1" "April 2024" "" "" .SH "NAME" \fBnpm-prefix\fR - Display prefix .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-profile.1 b/deps/npm/man/man1/npm-profile.1 index 82f6af354da89e..401c26f05b2d5c 100644 --- a/deps/npm/man/man1/npm-profile.1 +++ b/deps/npm/man/man1/npm-profile.1 @@ -1,4 +1,4 @@ -.TH "NPM-PROFILE" "1" "February 2024" "" "" +.TH "NPM-PROFILE" "1" "April 2024" "" "" .SH "NAME" \fBnpm-profile\fR - Change settings on your registry profile .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-prune.1 b/deps/npm/man/man1/npm-prune.1 index c54bb96caee5c8..a7162e053616dd 100644 --- a/deps/npm/man/man1/npm-prune.1 +++ b/deps/npm/man/man1/npm-prune.1 @@ -1,4 +1,4 @@ -.TH "NPM-PRUNE" "1" "February 2024" "" "" +.TH "NPM-PRUNE" "1" "April 2024" "" "" .SH "NAME" \fBnpm-prune\fR - Remove extraneous packages .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-publish.1 b/deps/npm/man/man1/npm-publish.1 index 55a90c1380b487..9ebfbaa761bb12 100644 --- a/deps/npm/man/man1/npm-publish.1 +++ b/deps/npm/man/man1/npm-publish.1 @@ -1,4 +1,4 @@ -.TH "NPM-PUBLISH" "1" "February 2024" "" "" +.TH "NPM-PUBLISH" "1" "April 2024" "" "" .SH "NAME" \fBnpm-publish\fR - Publish a package .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-query.1 b/deps/npm/man/man1/npm-query.1 index 3940d6e80e15e6..d2271722e13711 100644 --- a/deps/npm/man/man1/npm-query.1 +++ b/deps/npm/man/man1/npm-query.1 @@ -1,4 +1,4 @@ -.TH "NPM-QUERY" "1" "February 2024" "" "" +.TH "NPM-QUERY" "1" "April 2024" "" "" .SH "NAME" \fBnpm-query\fR - Dependency selector query .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-rebuild.1 b/deps/npm/man/man1/npm-rebuild.1 index ec29b9e036471d..9fe38d79998a7c 100644 --- a/deps/npm/man/man1/npm-rebuild.1 +++ b/deps/npm/man/man1/npm-rebuild.1 @@ -1,4 +1,4 @@ -.TH "NPM-REBUILD" "1" "February 2024" "" "" +.TH "NPM-REBUILD" "1" "April 2024" "" "" .SH "NAME" \fBnpm-rebuild\fR - Rebuild a package .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-repo.1 b/deps/npm/man/man1/npm-repo.1 index a909f41a882fd2..197d333c10a919 100644 --- a/deps/npm/man/man1/npm-repo.1 +++ b/deps/npm/man/man1/npm-repo.1 @@ -1,4 +1,4 @@ -.TH "NPM-REPO" "1" "February 2024" "" "" +.TH "NPM-REPO" "1" "April 2024" "" "" .SH "NAME" \fBnpm-repo\fR - Open package repository page in the browser .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-restart.1 b/deps/npm/man/man1/npm-restart.1 index f83b41709ff3e1..59acbf2c09f56c 100644 --- a/deps/npm/man/man1/npm-restart.1 +++ b/deps/npm/man/man1/npm-restart.1 @@ -1,4 +1,4 @@ -.TH "NPM-RESTART" "1" "February 2024" "" "" +.TH "NPM-RESTART" "1" "April 2024" "" "" .SH "NAME" \fBnpm-restart\fR - Restart a package .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-root.1 b/deps/npm/man/man1/npm-root.1 index 9819a9d9e5e9e1..e56b2f3d3afb13 100644 --- a/deps/npm/man/man1/npm-root.1 +++ b/deps/npm/man/man1/npm-root.1 @@ -1,4 +1,4 @@ -.TH "NPM-ROOT" "1" "February 2024" "" "" +.TH "NPM-ROOT" "1" "April 2024" "" "" .SH "NAME" \fBnpm-root\fR - Display npm root .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-run-script.1 b/deps/npm/man/man1/npm-run-script.1 index 513eb7de904399..d6e98845b4c411 100644 --- a/deps/npm/man/man1/npm-run-script.1 +++ b/deps/npm/man/man1/npm-run-script.1 @@ -1,4 +1,4 @@ -.TH "NPM-RUN-SCRIPT" "1" "February 2024" "" "" +.TH "NPM-RUN-SCRIPT" "1" "April 2024" "" "" .SH "NAME" \fBnpm-run-script\fR - Run arbitrary package scripts .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-sbom.1 b/deps/npm/man/man1/npm-sbom.1 index 5c3016bff248e0..b984044896eaed 100644 --- a/deps/npm/man/man1/npm-sbom.1 +++ b/deps/npm/man/man1/npm-sbom.1 @@ -1,4 +1,4 @@ -.TH "NPM-SBOM" "1" "February 2024" "" "" +.TH "NPM-SBOM" "1" "April 2024" "" "" .SH "NAME" \fBnpm-sbom\fR - Generate a Software Bill of Materials (SBOM) .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-search.1 b/deps/npm/man/man1/npm-search.1 index 3467f3256b8cb0..7927e562bf9e59 100644 --- a/deps/npm/man/man1/npm-search.1 +++ b/deps/npm/man/man1/npm-search.1 @@ -1,4 +1,4 @@ -.TH "NPM-SEARCH" "1" "February 2024" "" "" +.TH "NPM-SEARCH" "1" "April 2024" "" "" .SH "NAME" \fBnpm-search\fR - Search for packages .SS "Synopsis" @@ -79,6 +79,16 @@ Type: Boolean .P Show the description in \fBnpm search\fR +.SS "\fBsearchlimit\fR" +.RS 0 +.IP \(bu 4 +Default: 20 +.IP \(bu 4 +Type: Number +.RE 0 + +.P +Number of items to limit search results to. Will not apply at all to legacy searches. .SS "\fBsearchopts\fR" .RS 0 .IP \(bu 4 diff --git a/deps/npm/man/man1/npm-shrinkwrap.1 b/deps/npm/man/man1/npm-shrinkwrap.1 index 37320600e7d922..cb74dfd382f0a1 100644 --- a/deps/npm/man/man1/npm-shrinkwrap.1 +++ b/deps/npm/man/man1/npm-shrinkwrap.1 @@ -1,4 +1,4 @@ -.TH "NPM-SHRINKWRAP" "1" "February 2024" "" "" +.TH "NPM-SHRINKWRAP" "1" "April 2024" "" "" .SH "NAME" \fBnpm-shrinkwrap\fR - Lock down dependency versions for publication .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-star.1 b/deps/npm/man/man1/npm-star.1 index 22cbdb0d3be628..951ab5bdeb7eb3 100644 --- a/deps/npm/man/man1/npm-star.1 +++ b/deps/npm/man/man1/npm-star.1 @@ -1,4 +1,4 @@ -.TH "NPM-STAR" "1" "February 2024" "" "" +.TH "NPM-STAR" "1" "April 2024" "" "" .SH "NAME" \fBnpm-star\fR - Mark your favorite packages .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-stars.1 b/deps/npm/man/man1/npm-stars.1 index 1f61a034e89c89..7848df08500154 100644 --- a/deps/npm/man/man1/npm-stars.1 +++ b/deps/npm/man/man1/npm-stars.1 @@ -1,4 +1,4 @@ -.TH "NPM-STARS" "1" "February 2024" "" "" +.TH "NPM-STARS" "1" "April 2024" "" "" .SH "NAME" \fBnpm-stars\fR - View packages marked as favorites .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-start.1 b/deps/npm/man/man1/npm-start.1 index 3aebd2cb6cb71c..c46b8dabed2e89 100644 --- a/deps/npm/man/man1/npm-start.1 +++ b/deps/npm/man/man1/npm-start.1 @@ -1,4 +1,4 @@ -.TH "NPM-START" "1" "February 2024" "" "" +.TH "NPM-START" "1" "April 2024" "" "" .SH "NAME" \fBnpm-start\fR - Start a package .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-stop.1 b/deps/npm/man/man1/npm-stop.1 index 0129b04701b769..48ef025074040e 100644 --- a/deps/npm/man/man1/npm-stop.1 +++ b/deps/npm/man/man1/npm-stop.1 @@ -1,4 +1,4 @@ -.TH "NPM-STOP" "1" "February 2024" "" "" +.TH "NPM-STOP" "1" "April 2024" "" "" .SH "NAME" \fBnpm-stop\fR - Stop a package .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-team.1 b/deps/npm/man/man1/npm-team.1 index 61994725fea1ec..4bbed65d580c39 100644 --- a/deps/npm/man/man1/npm-team.1 +++ b/deps/npm/man/man1/npm-team.1 @@ -1,4 +1,4 @@ -.TH "NPM-TEAM" "1" "February 2024" "" "" +.TH "NPM-TEAM" "1" "April 2024" "" "" .SH "NAME" \fBnpm-team\fR - Manage organization teams and team memberships .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-test.1 b/deps/npm/man/man1/npm-test.1 index d30343b3032130..233c6db054a77c 100644 --- a/deps/npm/man/man1/npm-test.1 +++ b/deps/npm/man/man1/npm-test.1 @@ -1,4 +1,4 @@ -.TH "NPM-TEST" "1" "February 2024" "" "" +.TH "NPM-TEST" "1" "April 2024" "" "" .SH "NAME" \fBnpm-test\fR - Test a package .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-token.1 b/deps/npm/man/man1/npm-token.1 index 3eeb0763970b65..431302c44a7757 100644 --- a/deps/npm/man/man1/npm-token.1 +++ b/deps/npm/man/man1/npm-token.1 @@ -1,4 +1,4 @@ -.TH "NPM-TOKEN" "1" "February 2024" "" "" +.TH "NPM-TOKEN" "1" "April 2024" "" "" .SH "NAME" \fBnpm-token\fR - Manage your authentication tokens .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-uninstall.1 b/deps/npm/man/man1/npm-uninstall.1 index 321b58fc391d6e..700037a7e1cbf8 100644 --- a/deps/npm/man/man1/npm-uninstall.1 +++ b/deps/npm/man/man1/npm-uninstall.1 @@ -1,4 +1,4 @@ -.TH "NPM-UNINSTALL" "1" "February 2024" "" "" +.TH "NPM-UNINSTALL" "1" "April 2024" "" "" .SH "NAME" \fBnpm-uninstall\fR - Remove a package .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-unpublish.1 b/deps/npm/man/man1/npm-unpublish.1 index 2cf081e01f7007..01697803cdfaf6 100644 --- a/deps/npm/man/man1/npm-unpublish.1 +++ b/deps/npm/man/man1/npm-unpublish.1 @@ -1,4 +1,4 @@ -.TH "NPM-UNPUBLISH" "1" "February 2024" "" "" +.TH "NPM-UNPUBLISH" "1" "April 2024" "" "" .SH "NAME" \fBnpm-unpublish\fR - Remove a package from the registry .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-unstar.1 b/deps/npm/man/man1/npm-unstar.1 index 700a7b3aa186b7..98a6980bd021d5 100644 --- a/deps/npm/man/man1/npm-unstar.1 +++ b/deps/npm/man/man1/npm-unstar.1 @@ -1,4 +1,4 @@ -.TH "NPM-UNSTAR" "1" "February 2024" "" "" +.TH "NPM-UNSTAR" "1" "April 2024" "" "" .SH "NAME" \fBnpm-unstar\fR - Remove an item from your favorite packages .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-update.1 b/deps/npm/man/man1/npm-update.1 index 833d3446c47899..af80d1b0e398b7 100644 --- a/deps/npm/man/man1/npm-update.1 +++ b/deps/npm/man/man1/npm-update.1 @@ -1,4 +1,4 @@ -.TH "NPM-UPDATE" "1" "February 2024" "" "" +.TH "NPM-UPDATE" "1" "April 2024" "" "" .SH "NAME" \fBnpm-update\fR - Update packages .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-version.1 b/deps/npm/man/man1/npm-version.1 index be406e39b084fe..3ec27ac0569159 100644 --- a/deps/npm/man/man1/npm-version.1 +++ b/deps/npm/man/man1/npm-version.1 @@ -1,4 +1,4 @@ -.TH "NPM-VERSION" "1" "February 2024" "" "" +.TH "NPM-VERSION" "1" "April 2024" "" "" .SH "NAME" \fBnpm-version\fR - Bump a package version .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-view.1 b/deps/npm/man/man1/npm-view.1 index 8258a85f30be75..39bf6ae8c9cc2e 100644 --- a/deps/npm/man/man1/npm-view.1 +++ b/deps/npm/man/man1/npm-view.1 @@ -1,4 +1,4 @@ -.TH "NPM-VIEW" "1" "February 2024" "" "" +.TH "NPM-VIEW" "1" "April 2024" "" "" .SH "NAME" \fBnpm-view\fR - View registry info .SS "Synopsis" diff --git a/deps/npm/man/man1/npm-whoami.1 b/deps/npm/man/man1/npm-whoami.1 index 6ac250c269fd34..7e38f165585c9a 100644 --- a/deps/npm/man/man1/npm-whoami.1 +++ b/deps/npm/man/man1/npm-whoami.1 @@ -1,4 +1,4 @@ -.TH "NPM-WHOAMI" "1" "February 2024" "" "" +.TH "NPM-WHOAMI" "1" "April 2024" "" "" .SH "NAME" \fBnpm-whoami\fR - Display npm username .SS "Synopsis" diff --git a/deps/npm/man/man1/npm.1 b/deps/npm/man/man1/npm.1 index aee40ff5bac8ff..6ac1a88baf121d 100644 --- a/deps/npm/man/man1/npm.1 +++ b/deps/npm/man/man1/npm.1 @@ -1,4 +1,4 @@ -.TH "NPM" "1" "February 2024" "" "" +.TH "NPM" "1" "April 2024" "" "" .SH "NAME" \fBnpm\fR - javascript package manager .SS "Synopsis" @@ -12,7 +12,7 @@ npm Note: This command is unaware of workspaces. .SS "Version" .P -10.5.0 +10.5.1 .SS "Description" .P npm is the package manager for the Node JavaScript platform. It puts modules in place so that node can find them, and manages dependency conflicts intelligently. diff --git a/deps/npm/man/man1/npx.1 b/deps/npm/man/man1/npx.1 index 155451463a0729..0a4ff19f4bc901 100644 --- a/deps/npm/man/man1/npx.1 +++ b/deps/npm/man/man1/npx.1 @@ -1,4 +1,4 @@ -.TH "NPX" "1" "February 2024" "" "" +.TH "NPX" "1" "April 2024" "" "" .SH "NAME" \fBnpx\fR - Run a command from a local or remote npm package .SS "Synopsis" diff --git a/deps/npm/man/man5/folders.5 b/deps/npm/man/man5/folders.5 index de010820184ef4..12bda4e2881711 100644 --- a/deps/npm/man/man5/folders.5 +++ b/deps/npm/man/man5/folders.5 @@ -1,4 +1,4 @@ -.TH "FOLDERS" "5" "February 2024" "" "" +.TH "FOLDERS" "5" "April 2024" "" "" .SH "NAME" \fBfolders\fR - Folder Structures Used by npm .SS "Description" diff --git a/deps/npm/man/man5/install.5 b/deps/npm/man/man5/install.5 index 85a83de6062b8f..42e18760236ea4 100644 --- a/deps/npm/man/man5/install.5 +++ b/deps/npm/man/man5/install.5 @@ -1,4 +1,4 @@ -.TH "INSTALL" "5" "February 2024" "" "" +.TH "INSTALL" "5" "April 2024" "" "" .SH "NAME" \fBinstall\fR - Download and install node and npm .SS "Description" diff --git a/deps/npm/man/man5/npm-global.5 b/deps/npm/man/man5/npm-global.5 index de010820184ef4..12bda4e2881711 100644 --- a/deps/npm/man/man5/npm-global.5 +++ b/deps/npm/man/man5/npm-global.5 @@ -1,4 +1,4 @@ -.TH "FOLDERS" "5" "February 2024" "" "" +.TH "FOLDERS" "5" "April 2024" "" "" .SH "NAME" \fBfolders\fR - Folder Structures Used by npm .SS "Description" diff --git a/deps/npm/man/man5/npm-json.5 b/deps/npm/man/man5/npm-json.5 index 61405e54c35d70..69dcb58aa1b1ef 100644 --- a/deps/npm/man/man5/npm-json.5 +++ b/deps/npm/man/man5/npm-json.5 @@ -1,4 +1,4 @@ -.TH "PACKAGE.JSON" "5" "February 2024" "" "" +.TH "PACKAGE.JSON" "5" "April 2024" "" "" .SH "NAME" \fBpackage.json\fR - Specifics of npm's package.json handling .SS "Description" @@ -30,7 +30,7 @@ Some tips: .IP \(bu 4 Don't use the same name as a core Node module. .IP \(bu 4 -Don't put "js" or "node" in the name. It's assumed that it's js, since you're writing a package.json file, and you can specify the engine using the "engines" field. (See below.) +Don't put "js" or "node" in the name. It's assumed that it's js, since you're writing a package.json file, and you can specify the engine using the "\fBengines\fR \fI(engines)\fR" field. (See below.) .IP \(bu 4 The name will probably be passed as an argument to require(), so it should be something short, but also reasonably descriptive. .IP \(bu 4 @@ -52,7 +52,7 @@ Put a description in it. It's a string. This helps people discover your package, Put keywords in it. It's an array of strings. This helps people discover your package as it's listed in \fBnpm search\fR. .SS "homepage" .P -The url to the project homepage. +The URL to the project homepage. .P Example: .P @@ -63,7 +63,7 @@ Example: .RE .SS "bugs" .P -The url to your project's issue tracker and / or the email address to which issues should be reported. These are helpful for people who encounter issues with your package. +The URL to your project's issue tracker and / or the email address to which issues should be reported. These are helpful for people who encounter issues with your package. .P It should look like this: .P @@ -78,9 +78,9 @@ It should look like this: .fi .RE .P -You can specify either one or both values. If you want to provide only a url, you can specify the value for "bugs" as a simple string instead of an object. +You can specify either one or both values. If you want to provide only a URL, you can specify the value for "bugs" as a simple string instead of an object. .P -If a url is provided, it will be used by the \fBnpm bugs\fR command. +If a URL is provided, it will be used by the \fBnpm bugs\fR command. .SS "license" .P You should specify a license for your package so that people know how they are permitted to use it, and any restrictions you're placing on it. @@ -471,7 +471,7 @@ Do it like this: .fi .RE .P -The URL should be a publicly available (perhaps read-only) url that can be handed directly to a VCS program without any modification. It should not be a url to an html project page that you put in your browser. It's for computers. +The URL should be a publicly available (perhaps read-only) URL that can be handed directly to a VCS program without any modification. It should not be a URL to an html project page that you put in your browser. It's for computers. .P For GitHub, GitHub gist, Bitbucket, or GitLab repositories you can use the same shortcut syntax you use for \fBnpm install\fR: .P @@ -599,7 +599,7 @@ You may specify a tarball URL in place of a version range. This tarball will be downloaded and installed locally to your package at install time. .SS "Git URLs as Dependencies" .P -Git urls are of the form: +Git URLs are of the form: .P .RS 2 .nf @@ -644,7 +644,7 @@ This flow will occur if your git dependency uses \fBworkspaces\fR, or if any of If your git repository includes pre-built artifacts, you will likely want to make sure that none of the above scripts are defined, or your dependency will be rebuilt for every installation. .SS "GitHub URLs" .P -As of version 1.1.65, you can refer to GitHub urls as just "foo": "user/foo-project". Just as with git URLs, a \fBcommit-ish\fR suffix can be included. For example: +As of version 1.1.65, you can refer to GitHub URLs as just "foo": "user/foo-project". Just as with git URLs, a \fBcommit-ish\fR suffix can be included. For example: .P .RS 2 .nf @@ -808,7 +808,7 @@ If this is spelled \fB"bundledDependencies"\fR, then that is also honored. Alternatively, \fB"bundleDependencies"\fR can be defined as a boolean value. A value of \fBtrue\fR will bundle all dependencies, a value of \fBfalse\fR will bundle none. .SS "optionalDependencies" .P -If a dependency can be used, but you would like npm to proceed if it cannot be found or fails to install, then you may put it in the \fBoptionalDependencies\fR object. This is a map of package name to version or url, just like the \fBdependencies\fR object. The difference is that build failures do not cause installation to fail. Running \fBnpm install +If a dependency can be used, but you would like npm to proceed if it cannot be found or fails to install, then you may put it in the \fBoptionalDependencies\fR object. This is a map of package name to version or URL, just like the \fBdependencies\fR object. The difference is that build failures do not cause installation to fail. Running \fBnpm install --omit=optional\fR will prevent these dependencies from being installed. .P It is still your program's responsibility to handle the lack of the dependency. For example, something like this: diff --git a/deps/npm/man/man5/npm-shrinkwrap-json.5 b/deps/npm/man/man5/npm-shrinkwrap-json.5 index b3abb0cc092c2a..36a4073b92ac02 100644 --- a/deps/npm/man/man5/npm-shrinkwrap-json.5 +++ b/deps/npm/man/man5/npm-shrinkwrap-json.5 @@ -1,4 +1,4 @@ -.TH "NPM-SHRINKWRAP.JSON" "5" "February 2024" "" "" +.TH "NPM-SHRINKWRAP.JSON" "5" "April 2024" "" "" .SH "NAME" \fBnpm-shrinkwrap.json\fR - A publishable lockfile .SS "Description" diff --git a/deps/npm/man/man5/npmrc.5 b/deps/npm/man/man5/npmrc.5 index c86c21b155dbd7..16543b3bc98520 100644 --- a/deps/npm/man/man5/npmrc.5 +++ b/deps/npm/man/man5/npmrc.5 @@ -1,4 +1,4 @@ -.TH "NPMRC" "5" "February 2024" "" "" +.TH "NPMRC" "5" "April 2024" "" "" .SH "NAME" \fBnpmrc\fR - The npm config files .SS "Description" diff --git a/deps/npm/man/man5/package-json.5 b/deps/npm/man/man5/package-json.5 index 61405e54c35d70..69dcb58aa1b1ef 100644 --- a/deps/npm/man/man5/package-json.5 +++ b/deps/npm/man/man5/package-json.5 @@ -1,4 +1,4 @@ -.TH "PACKAGE.JSON" "5" "February 2024" "" "" +.TH "PACKAGE.JSON" "5" "April 2024" "" "" .SH "NAME" \fBpackage.json\fR - Specifics of npm's package.json handling .SS "Description" @@ -30,7 +30,7 @@ Some tips: .IP \(bu 4 Don't use the same name as a core Node module. .IP \(bu 4 -Don't put "js" or "node" in the name. It's assumed that it's js, since you're writing a package.json file, and you can specify the engine using the "engines" field. (See below.) +Don't put "js" or "node" in the name. It's assumed that it's js, since you're writing a package.json file, and you can specify the engine using the "\fBengines\fR \fI(engines)\fR" field. (See below.) .IP \(bu 4 The name will probably be passed as an argument to require(), so it should be something short, but also reasonably descriptive. .IP \(bu 4 @@ -52,7 +52,7 @@ Put a description in it. It's a string. This helps people discover your package, Put keywords in it. It's an array of strings. This helps people discover your package as it's listed in \fBnpm search\fR. .SS "homepage" .P -The url to the project homepage. +The URL to the project homepage. .P Example: .P @@ -63,7 +63,7 @@ Example: .RE .SS "bugs" .P -The url to your project's issue tracker and / or the email address to which issues should be reported. These are helpful for people who encounter issues with your package. +The URL to your project's issue tracker and / or the email address to which issues should be reported. These are helpful for people who encounter issues with your package. .P It should look like this: .P @@ -78,9 +78,9 @@ It should look like this: .fi .RE .P -You can specify either one or both values. If you want to provide only a url, you can specify the value for "bugs" as a simple string instead of an object. +You can specify either one or both values. If you want to provide only a URL, you can specify the value for "bugs" as a simple string instead of an object. .P -If a url is provided, it will be used by the \fBnpm bugs\fR command. +If a URL is provided, it will be used by the \fBnpm bugs\fR command. .SS "license" .P You should specify a license for your package so that people know how they are permitted to use it, and any restrictions you're placing on it. @@ -471,7 +471,7 @@ Do it like this: .fi .RE .P -The URL should be a publicly available (perhaps read-only) url that can be handed directly to a VCS program without any modification. It should not be a url to an html project page that you put in your browser. It's for computers. +The URL should be a publicly available (perhaps read-only) URL that can be handed directly to a VCS program without any modification. It should not be a URL to an html project page that you put in your browser. It's for computers. .P For GitHub, GitHub gist, Bitbucket, or GitLab repositories you can use the same shortcut syntax you use for \fBnpm install\fR: .P @@ -599,7 +599,7 @@ You may specify a tarball URL in place of a version range. This tarball will be downloaded and installed locally to your package at install time. .SS "Git URLs as Dependencies" .P -Git urls are of the form: +Git URLs are of the form: .P .RS 2 .nf @@ -644,7 +644,7 @@ This flow will occur if your git dependency uses \fBworkspaces\fR, or if any of If your git repository includes pre-built artifacts, you will likely want to make sure that none of the above scripts are defined, or your dependency will be rebuilt for every installation. .SS "GitHub URLs" .P -As of version 1.1.65, you can refer to GitHub urls as just "foo": "user/foo-project". Just as with git URLs, a \fBcommit-ish\fR suffix can be included. For example: +As of version 1.1.65, you can refer to GitHub URLs as just "foo": "user/foo-project". Just as with git URLs, a \fBcommit-ish\fR suffix can be included. For example: .P .RS 2 .nf @@ -808,7 +808,7 @@ If this is spelled \fB"bundledDependencies"\fR, then that is also honored. Alternatively, \fB"bundleDependencies"\fR can be defined as a boolean value. A value of \fBtrue\fR will bundle all dependencies, a value of \fBfalse\fR will bundle none. .SS "optionalDependencies" .P -If a dependency can be used, but you would like npm to proceed if it cannot be found or fails to install, then you may put it in the \fBoptionalDependencies\fR object. This is a map of package name to version or url, just like the \fBdependencies\fR object. The difference is that build failures do not cause installation to fail. Running \fBnpm install +If a dependency can be used, but you would like npm to proceed if it cannot be found or fails to install, then you may put it in the \fBoptionalDependencies\fR object. This is a map of package name to version or URL, just like the \fBdependencies\fR object. The difference is that build failures do not cause installation to fail. Running \fBnpm install --omit=optional\fR will prevent these dependencies from being installed. .P It is still your program's responsibility to handle the lack of the dependency. For example, something like this: diff --git a/deps/npm/man/man5/package-lock-json.5 b/deps/npm/man/man5/package-lock-json.5 index ceea2b43da89e2..426901d95437d9 100644 --- a/deps/npm/man/man5/package-lock-json.5 +++ b/deps/npm/man/man5/package-lock-json.5 @@ -1,4 +1,4 @@ -.TH "PACKAGE-LOCK.JSON" "5" "February 2024" "" "" +.TH "PACKAGE-LOCK.JSON" "5" "April 2024" "" "" .SH "NAME" \fBpackage-lock.json\fR - A manifestation of the manifest .SS "Description" diff --git a/deps/npm/man/man7/config.7 b/deps/npm/man/man7/config.7 index 181a7d6a469a9e..ffbf3e50537e37 100644 --- a/deps/npm/man/man7/config.7 +++ b/deps/npm/man/man7/config.7 @@ -1,4 +1,4 @@ -.TH "CONFIG" "7" "February 2024" "" "" +.TH "CONFIG" "7" "April 2024" "" "" .SH "NAME" \fBconfig\fR - More than you probably want to know about npm configuration .SS "Description" diff --git a/deps/npm/man/man7/dependency-selectors.7 b/deps/npm/man/man7/dependency-selectors.7 index 78f451177e3172..588989321c6ad1 100644 --- a/deps/npm/man/man7/dependency-selectors.7 +++ b/deps/npm/man/man7/dependency-selectors.7 @@ -1,4 +1,4 @@ -.TH "QUERYING" "7" "February 2024" "" "" +.TH "QUERYING" "7" "April 2024" "" "" .SH "NAME" \fBQuerying\fR - Dependency Selector Syntax & Querying .SS "Description" diff --git a/deps/npm/man/man7/developers.7 b/deps/npm/man/man7/developers.7 index 8dd07548d9b873..a69fcc052a6da0 100644 --- a/deps/npm/man/man7/developers.7 +++ b/deps/npm/man/man7/developers.7 @@ -1,4 +1,4 @@ -.TH "DEVELOPERS" "7" "February 2024" "" "" +.TH "DEVELOPERS" "7" "April 2024" "" "" .SH "NAME" \fBdevelopers\fR - Developer Guide .SS "Description" diff --git a/deps/npm/man/man7/logging.7 b/deps/npm/man/man7/logging.7 index b85fb4c47cfcdc..dfbed85ffbb07f 100644 --- a/deps/npm/man/man7/logging.7 +++ b/deps/npm/man/man7/logging.7 @@ -1,4 +1,4 @@ -.TH "LOGGING" "7" "February 2024" "" "" +.TH "LOGGING" "7" "April 2024" "" "" .SH "NAME" \fBLogging\fR - Why, What & How We Log .SS "Description" diff --git a/deps/npm/man/man7/orgs.7 b/deps/npm/man/man7/orgs.7 index 36850dd961897d..181693d04032d7 100644 --- a/deps/npm/man/man7/orgs.7 +++ b/deps/npm/man/man7/orgs.7 @@ -1,4 +1,4 @@ -.TH "ORGS" "7" "February 2024" "" "" +.TH "ORGS" "7" "April 2024" "" "" .SH "NAME" \fBorgs\fR - Working with Teams & Orgs .SS "Description" diff --git a/deps/npm/man/man7/package-spec.7 b/deps/npm/man/man7/package-spec.7 index c9d309ccbbdbe0..167a6915218b94 100644 --- a/deps/npm/man/man7/package-spec.7 +++ b/deps/npm/man/man7/package-spec.7 @@ -1,4 +1,4 @@ -.TH "PACKAGE-SPEC" "7" "February 2024" "" "" +.TH "PACKAGE-SPEC" "7" "April 2024" "" "" .SH "NAME" \fBpackage-spec\fR - Package name specifier .SS "Description" diff --git a/deps/npm/man/man7/registry.7 b/deps/npm/man/man7/registry.7 index c0e4474c09010e..73f17929c427aa 100644 --- a/deps/npm/man/man7/registry.7 +++ b/deps/npm/man/man7/registry.7 @@ -1,4 +1,4 @@ -.TH "REGISTRY" "7" "February 2024" "" "" +.TH "REGISTRY" "7" "April 2024" "" "" .SH "NAME" \fBregistry\fR - The JavaScript Package Registry .SS "Description" diff --git a/deps/npm/man/man7/removal.7 b/deps/npm/man/man7/removal.7 index a846bcca25ffd3..9b091566aaf453 100644 --- a/deps/npm/man/man7/removal.7 +++ b/deps/npm/man/man7/removal.7 @@ -1,4 +1,4 @@ -.TH "REMOVAL" "7" "February 2024" "" "" +.TH "REMOVAL" "7" "April 2024" "" "" .SH "NAME" \fBremoval\fR - Cleaning the Slate .SS "Synopsis" diff --git a/deps/npm/man/man7/scope.7 b/deps/npm/man/man7/scope.7 index eb6fe9b6d822e9..437a4df173394e 100644 --- a/deps/npm/man/man7/scope.7 +++ b/deps/npm/man/man7/scope.7 @@ -1,4 +1,4 @@ -.TH "SCOPE" "7" "February 2024" "" "" +.TH "SCOPE" "7" "April 2024" "" "" .SH "NAME" \fBscope\fR - Scoped packages .SS "Description" diff --git a/deps/npm/man/man7/scripts.7 b/deps/npm/man/man7/scripts.7 index e88a9c4e69519f..b051216d26213e 100644 --- a/deps/npm/man/man7/scripts.7 +++ b/deps/npm/man/man7/scripts.7 @@ -1,4 +1,4 @@ -.TH "SCRIPTS" "7" "February 2024" "" "" +.TH "SCRIPTS" "7" "April 2024" "" "" .SH "NAME" \fBscripts\fR - How npm handles the "scripts" field .SS "Description" diff --git a/deps/npm/man/man7/workspaces.7 b/deps/npm/man/man7/workspaces.7 index 9db114e4ded21b..78242b33b18c7b 100644 --- a/deps/npm/man/man7/workspaces.7 +++ b/deps/npm/man/man7/workspaces.7 @@ -1,4 +1,4 @@ -.TH "WORKSPACES" "7" "February 2024" "" "" +.TH "WORKSPACES" "7" "April 2024" "" "" .SH "NAME" \fBworkspaces\fR - Working with workspaces .SS "Description" diff --git a/deps/npm/node_modules/@npmcli/arborist/lib/query-selector-all.js b/deps/npm/node_modules/@npmcli/arborist/lib/query-selector-all.js index ce49201ce624c6..c8ec866f0f9691 100644 --- a/deps/npm/node_modules/@npmcli/arborist/lib/query-selector-all.js +++ b/deps/npm/node_modules/@npmcli/arborist/lib/query-selector-all.js @@ -257,7 +257,12 @@ class Results { for (const edge of node.edgesOut.values()) { if (edge.missing) { const pkg = { name: edge.name, version: edge.spec } - res.push(new this.#targetNode.constructor({ pkg })) + const item = new this.#targetNode.constructor({ pkg }) + item.queryContext = { + missing: true, + } + item.edgesIn = new Set([edge]) + res.push(item) } } return res diff --git a/deps/npm/node_modules/@npmcli/arborist/package.json b/deps/npm/node_modules/@npmcli/arborist/package.json index c761bc10d6cec2..d7c393d99dfa57 100644 --- a/deps/npm/node_modules/@npmcli/arborist/package.json +++ b/deps/npm/node_modules/@npmcli/arborist/package.json @@ -1,6 +1,6 @@ { "name": "@npmcli/arborist", - "version": "7.4.0", + "version": "7.4.1", "description": "Manage node_modules trees", "dependencies": { "@isaacs/string-locale-compare": "^1.1.0", @@ -19,12 +19,12 @@ "hosted-git-info": "^7.0.1", "json-parse-even-better-errors": "^3.0.0", "json-stringify-nice": "^1.1.4", - "minimatch": "^9.0.0", + "minimatch": "^9.0.4", "nopt": "^7.0.0", "npm-install-checks": "^6.2.0", "npm-package-arg": "^11.0.1", "npm-pick-manifest": "^9.0.0", - "npm-registry-fetch": "^16.0.0", + "npm-registry-fetch": "^16.2.0", "npmlog": "^7.0.1", "pacote": "^17.0.4", "parse-conflict-json": "^3.0.0", diff --git a/deps/npm/node_modules/@npmcli/config/package.json b/deps/npm/node_modules/@npmcli/config/package.json index 28102bea2781aa..b5c73e1b13a9b1 100644 --- a/deps/npm/node_modules/@npmcli/config/package.json +++ b/deps/npm/node_modules/@npmcli/config/package.json @@ -1,6 +1,6 @@ { "name": "@npmcli/config", - "version": "8.2.0", + "version": "8.2.1", "files": [ "bin/", "lib/" @@ -38,7 +38,7 @@ "dependencies": { "@npmcli/map-workspaces": "^3.0.2", "ci-info": "^4.0.0", - "ini": "^4.1.0", + "ini": "^4.1.2", "nopt": "^7.0.0", "proc-log": "^3.0.0", "read-package-json-fast": "^3.0.2", diff --git a/deps/npm/node_modules/@npmcli/redact/LICENSE b/deps/npm/node_modules/@npmcli/redact/LICENSE new file mode 100644 index 00000000000000..c21644115c85d0 --- /dev/null +++ b/deps/npm/node_modules/@npmcli/redact/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2024 npm + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/deps/npm/node_modules/@npmcli/redact/lib/index.js b/deps/npm/node_modules/@npmcli/redact/lib/index.js new file mode 100644 index 00000000000000..e5b5e74157c2a3 --- /dev/null +++ b/deps/npm/node_modules/@npmcli/redact/lib/index.js @@ -0,0 +1,59 @@ +const { URL } = require('url') + +const REPLACE = '***' +const TOKEN_REGEX = /\bnpm_[a-zA-Z0-9]{36}\b/g +const GUID_REGEX = /\b[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}\b/g + +const redact = (value) => { + if (typeof value !== 'string' || !value) { + return value + } + + let urlValue + try { + urlValue = new URL(value) + } catch { + // If it's not a URL then we can ignore all errors + } + + if (urlValue?.password) { + urlValue.password = REPLACE + value = urlValue.toString() + } + + return value + .replace(TOKEN_REGEX, `npm_${REPLACE}`) + .replace(GUID_REGEX, REPLACE) +} + +// split on \s|= similar to how nopt parses options +const splitAndRedact = (str) => { + // stateful regex, don't move out of this scope + const splitChars = /[\s=]/g + + let match = null + let result = '' + let index = 0 + while (match = splitChars.exec(str)) { + result += redact(str.slice(index, match.index)) + match[0] + index = splitChars.lastIndex + } + + return result + redact(str.slice(index)) +} + +// replaces auth info in an array of arguments or in a strings +const redactLog = (arg) => { + if (typeof arg === 'string') { + return splitAndRedact(arg) + } else if (Array.isArray(arg)) { + return arg.map((a) => typeof a === 'string' ? splitAndRedact(a) : a) + } + + return arg +} + +module.exports = { + redact, + redactLog, +} diff --git a/deps/npm/node_modules/@npmcli/redact/package.json b/deps/npm/node_modules/@npmcli/redact/package.json new file mode 100644 index 00000000000000..1fc64a4c02f28e --- /dev/null +++ b/deps/npm/node_modules/@npmcli/redact/package.json @@ -0,0 +1,45 @@ +{ + "name": "@npmcli/redact", + "version": "1.1.0", + "description": "Redact sensitive npm information from output", + "main": "lib/index.js", + "scripts": { + "test": "tap", + "lint": "eslint \"**/*.{js,cjs,ts,mjs,jsx,tsx}\"", + "postlint": "template-oss-check", + "template-oss-apply": "template-oss-apply --force", + "lintfix": "npm run lint -- --fix", + "snap": "tap", + "posttest": "npm run lint" + }, + "keywords": [], + "author": "GitHub Inc.", + "license": "ISC", + "files": [ + "bin/", + "lib/" + ], + "repository": { + "type": "git", + "url": "https://github.com/npm/redact.git" + }, + "templateOSS": { + "//@npmcli/template-oss": "This file is partially managed by @npmcli/template-oss. Edits may be overwritten.", + "version": "4.21.3", + "publish": true + }, + "tap": { + "nyc-arg": [ + "--exclude", + "tap-snapshots/**" + ] + }, + "devDependencies": { + "@npmcli/eslint-config": "^4.0.2", + "@npmcli/template-oss": "4.21.3", + "tap": "^16.3.10" + }, + "engines": { + "node": "^16.14.0 || >=18.0.0" + } +} diff --git a/deps/npm/node_modules/@sigstore/tuf/package.json b/deps/npm/node_modules/@sigstore/tuf/package.json index 0e5fab2a2762d8..fc842df1814700 100644 --- a/deps/npm/node_modules/@sigstore/tuf/package.json +++ b/deps/npm/node_modules/@sigstore/tuf/package.json @@ -1,6 +1,6 @@ { "name": "@sigstore/tuf", - "version": "2.3.1", + "version": "2.3.2", "description": "Client for the Sigstore TUF repository", "main": "dist/index.js", "types": "dist/index.d.ts", diff --git a/deps/npm/node_modules/@sigstore/tuf/seeds.json b/deps/npm/node_modules/@sigstore/tuf/seeds.json index a0051cea67b7b5..e8d97d5fa7a672 100644 --- a/deps/npm/node_modules/@sigstore/tuf/seeds.json +++ b/deps/npm/node_modules/@sigstore/tuf/seeds.json @@ -1 +1 @@ -{"https://tuf-repo-cdn.sigstore.dev":{"root.json":"ewoJInNpZ25lZCI6IHsKCQkiX3R5cGUiOiAicm9vdCIsCgkJInNwZWNfdmVyc2lvbiI6ICIxLjAiLAoJCSJ2ZXJzaW9uIjogOCwKCQkiZXhwaXJlcyI6ICIyMDI0LTAzLTI2VDA0OjM4OjU1WiIsCgkJImtleXMiOiB7CgkJCSIyNWEwZWI0NTBmZDNlZTJiZDc5MjE4Yzk2M2RjZTNmMWNjNjExOGJhZGYyNTFiZjE0OWYwYmQwN2Q1Y2FiZTk5IjogewoJCQkJImtleXR5cGUiOiAiZWNkc2Etc2hhMi1uaXN0cDI1NiIsCgkJCQkic2NoZW1lIjogImVjZHNhLXNoYTItbmlzdHAyNTYiLAoJCQkJImtleWlkX2hhc2hfYWxnb3JpdGhtcyI6IFsKCQkJCQkic2hhMjU2IiwKCQkJCQkic2hhNTEyIgoJCQkJXSwKCQkJCSJrZXl2YWwiOiB7CgkJCQkJInB1YmxpYyI6ICItLS0tLUJFR0lOIFBVQkxJQyBLRVktLS0tLVxuTUZrd0V3WUhLb1pJemowQ0FRWUlLb1pJemowREFRY0RRZ0FFRVhzejNTWlhGYjhqTVY0Mmo2cEpseWpialI4S1xuTjNCd29jZXhxNkxNSWI1cXNXS09RdkxOMTZOVWVmTGM0SHN3T291bVJzVlZhYWpTcFFTNmZvYmtSdz09XG4tLS0tLUVORCBQVUJMSUMgS0VZLS0tLS1cbiIKCQkJCX0KCQkJfSwKCQkJIjJlNjFjZDBjYmY0YThmNDU4MDliZGE5ZjdmNzhjMGQzM2FkMTE4NDJmZjk0YWUzNDA4NzNlMjY2NGRjODQzZGUiOiB7CgkJCQkia2V5dHlwZSI6ICJlY2RzYS1zaGEyLW5pc3RwMjU2IiwKCQkJCSJzY2hlbWUiOiAiZWNkc2Etc2hhMi1uaXN0cDI1NiIsCgkJCQkia2V5aWRfaGFzaF9hbGdvcml0aG1zIjogWwoJCQkJCSJzaGEyNTYiLAoJCQkJCSJzaGE1MTIiCgkJCQldLAoJCQkJImtleXZhbCI6IHsKCQkJCQkicHVibGljIjogIi0tLS0tQkVHSU4gUFVCTElDIEtFWS0tLS0tXG5NRmt3RXdZSEtvWkl6ajBDQVFZSUtvWkl6ajBEQVFjRFFnQUUwZ2hyaDkyTHcxWXIzaWRHVjVXcUN0TURCOEN4XG4rRDhoZEM0dzJaTE5JcGxWUm9WR0xza1lhM2doZU15T2ppSjhrUGkxNWFRMi8vN1Arb2o3VXZKUEd3PT1cbi0tLS0tRU5EIFBVQkxJQyBLRVktLS0tLVxuIgoJCQkJfQoJCQl9LAoJCQkiNDViMjgzODI1ZWIxODRjYWJkNTgyZWIxN2I3NGZjOGVkNDA0ZjY4Y2Y0NTJhY2FiZGFkMmVkNmY5MGNlMjE2YiI6IHsKCQkJCSJrZXl0eXBlIjogImVjZHNhLXNoYTItbmlzdHAyNTYiLAoJCQkJInNjaGVtZSI6ICJlY2RzYS1zaGEyLW5pc3RwMjU2IiwKCQkJCSJrZXlpZF9oYXNoX2FsZ29yaXRobXMiOiBbCgkJCQkJInNoYTI1NiIsCgkJCQkJInNoYTUxMiIKCQkJCV0sCgkJCQkia2V5dmFsIjogewoJCQkJCSJwdWJsaWMiOiAiLS0tLS1CRUdJTiBQVUJMSUMgS0VZLS0tLS1cbk1Ga3dFd1lIS29aSXpqMENBUVlJS29aSXpqMERBUWNEUWdBRUxyV3ZOdDk0djRSMDg1RUxlZUNNeEhwN1BsZEZcbjAvVDFHeHVrVWgyT0R1Z2dMR0pFMHBjMWU4Q1NCZjZDUzkxRndvOUZVT3VSc2pCVWxkK1ZxU3lDZFE9PVxuLS0tLS1FTkQgUFVCTElDIEtFWS0tLS0tXG4iCgkJCQl9CgkJCX0sCgkJCSI3Zjc1MTNiMjU0MjlhNjQ0NzNlMTBjZTNhZDJmM2RhMzcyYmJkZDE0YjY1ZDA3YmJhZjU0N2U3YzhiYmJlNjJiIjogewoJCQkJImtleXR5cGUiOiAiZWNkc2Etc2hhMi1uaXN0cDI1NiIsCgkJCQkic2NoZW1lIjogImVjZHNhLXNoYTItbmlzdHAyNTYiLAoJCQkJImtleWlkX2hhc2hfYWxnb3JpdGhtcyI6IFsKCQkJCQkic2hhMjU2IiwKCQkJCQkic2hhNTEyIgoJCQkJXSwKCQkJCSJrZXl2YWwiOiB7CgkJCQkJInB1YmxpYyI6ICItLS0tLUJFR0lOIFBVQkxJQyBLRVktLS0tLVxuTUZrd0V3WUhLb1pJemowQ0FRWUlLb1pJemowREFRY0RRZ0FFaW5pa1NzQVFtWWtOZUg1ZVlxL0NuSXpMYWFjT1xueGxTYWF3UURPd3FLeS90Q3F4cTV4eFBTSmMyMUs0V0loczlHeU9rS2Z6dWVZM0dJTHpjTUpaNGNXdz09XG4tLS0tLUVORCBQVUJMSUMgS0VZLS0tLS1cbiIKCQkJCX0KCQkJfSwKCQkJImUxODYzYmEwMjA3MDMyMmViYzYyNmRjZWNmOWQ4ODFhM2EzOGMzNWMzYjQxYTgzNzY1YjZhZDZjMzdlYWVjMmEiOiB7CgkJCQkia2V5dHlwZSI6ICJlY2RzYS1zaGEyLW5pc3RwMjU2IiwKCQkJCSJzY2hlbWUiOiAiZWNkc2Etc2hhMi1uaXN0cDI1NiIsCgkJCQkia2V5aWRfaGFzaF9hbGdvcml0aG1zIjogWwoJCQkJCSJzaGEyNTYiLAoJCQkJCSJzaGE1MTIiCgkJCQldLAoJCQkJImtleXZhbCI6IHsKCQkJCQkicHVibGljIjogIi0tLS0tQkVHSU4gUFVCTElDIEtFWS0tLS0tXG5NRmt3RXdZSEtvWkl6ajBDQVFZSUtvWkl6ajBEQVFjRFFnQUVXUmlHcjUraiszSjVTc0grWnRyNW5FMkgyd083XG5CVituTzNzOTNnTGNhMThxVE96SFkxb1d5QUdEeWtNU3NHVFVCU3Q5RCtBbjBLZktzRDJtZlNNNDJRPT1cbi0tLS0tRU5EIFBVQkxJQyBLRVktLS0tLVxuIgoJCQkJfQoJCQl9LAoJCQkiZjUzMTJmNTQyYzIxMjczZDk0ODVhNDkzOTQzODZjNDU3NTgwNDc3MDY2N2YyZGRiNTliM2JmMDY2OWZkZGQyZiI6IHsKCQkJCSJrZXl0eXBlIjogImVjZHNhLXNoYTItbmlzdHAyNTYiLAoJCQkJInNjaGVtZSI6ICJlY2RzYS1zaGEyLW5pc3RwMjU2IiwKCQkJCSJrZXlpZF9oYXNoX2FsZ29yaXRobXMiOiBbCgkJCQkJInNoYTI1NiIsCgkJCQkJInNoYTUxMiIKCQkJCV0sCgkJCQkia2V5dmFsIjogewoJCQkJCSJwdWJsaWMiOiAiLS0tLS1CRUdJTiBQVUJMSUMgS0VZLS0tLS1cbk1Ga3dFd1lIS29aSXpqMENBUVlJS29aSXpqMERBUWNEUWdBRXpCelZPbUhDUG9qTVZMU0kzNjRXaWlWOE5QckRcbjZJZ1J4Vmxpc2t6L3YreTNKRVI1bWNWR2NPTmxpRGNXTUM1SjJsZkhtalBOUGhiNEg3eG04THpmU0E9PVxuLS0tLS1FTkQgUFVCTElDIEtFWS0tLS0tXG4iCgkJCQl9CgkJCX0sCgkJCSJmZjUxZTE3ZmNmMjUzMTE5YjcwMzNmNmY1NzUxMjYzMWRhNGEwOTY5NDQyYWZjZjlmYzhiMTQxYzdmMmJlOTljIjogewoJCQkJImtleXR5cGUiOiAiZWNkc2Etc2hhMi1uaXN0cDI1NiIsCgkJCQkic2NoZW1lIjogImVjZHNhLXNoYTItbmlzdHAyNTYiLAoJCQkJImtleWlkX2hhc2hfYWxnb3JpdGhtcyI6IFsKCQkJCQkic2hhMjU2IiwKCQkJCQkic2hhNTEyIgoJCQkJXSwKCQkJCSJrZXl2YWwiOiB7CgkJCQkJInB1YmxpYyI6ICItLS0tLUJFR0lOIFBVQkxJQyBLRVktLS0tLVxuTUZrd0V3WUhLb1pJemowQ0FRWUlLb1pJemowREFRY0RRZ0FFeThYS3NtaEJZREk4SmMwR3d6QnhlS2F4MGNtNVxuU1RLRVU2NUhQRnVuVW40MXNUOHBpMEZqTTRJa0h6L1lVbXdtTFVPMFd0N2x4aGo2QmtMSUs0cVlBdz09XG4tLS0tLUVORCBQVUJMSUMgS0VZLS0tLS1cbiIKCQkJCX0KCQkJfQoJCX0sCgkJInJvbGVzIjogewoJCQkicm9vdCI6IHsKCQkJCSJrZXlpZHMiOiBbCgkJCQkJImZmNTFlMTdmY2YyNTMxMTliNzAzM2Y2ZjU3NTEyNjMxZGE0YTA5Njk0NDJhZmNmOWZjOGIxNDFjN2YyYmU5OWMiLAoJCQkJCSIyNWEwZWI0NTBmZDNlZTJiZDc5MjE4Yzk2M2RjZTNmMWNjNjExOGJhZGYyNTFiZjE0OWYwYmQwN2Q1Y2FiZTk5IiwKCQkJCQkiZjUzMTJmNTQyYzIxMjczZDk0ODVhNDkzOTQzODZjNDU3NTgwNDc3MDY2N2YyZGRiNTliM2JmMDY2OWZkZGQyZiIsCgkJCQkJIjdmNzUxM2IyNTQyOWE2NDQ3M2UxMGNlM2FkMmYzZGEzNzJiYmRkMTRiNjVkMDdiYmFmNTQ3ZTdjOGJiYmU2MmIiLAoJCQkJCSIyZTYxY2QwY2JmNGE4ZjQ1ODA5YmRhOWY3Zjc4YzBkMzNhZDExODQyZmY5NGFlMzQwODczZTI2NjRkYzg0M2RlIgoJCQkJXSwKCQkJCSJ0aHJlc2hvbGQiOiAzCgkJCX0sCgkJCSJzbmFwc2hvdCI6IHsKCQkJCSJrZXlpZHMiOiBbCgkJCQkJIjQ1YjI4MzgyNWViMTg0Y2FiZDU4MmViMTdiNzRmYzhlZDQwNGY2OGNmNDUyYWNhYmRhZDJlZDZmOTBjZTIxNmIiCgkJCQldLAoJCQkJInRocmVzaG9sZCI6IDEKCQkJfSwKCQkJInRhcmdldHMiOiB7CgkJCQkia2V5aWRzIjogWwoJCQkJCSJmZjUxZTE3ZmNmMjUzMTE5YjcwMzNmNmY1NzUxMjYzMWRhNGEwOTY5NDQyYWZjZjlmYzhiMTQxYzdmMmJlOTljIiwKCQkJCQkiMjVhMGViNDUwZmQzZWUyYmQ3OTIxOGM5NjNkY2UzZjFjYzYxMThiYWRmMjUxYmYxNDlmMGJkMDdkNWNhYmU5OSIsCgkJCQkJImY1MzEyZjU0MmMyMTI3M2Q5NDg1YTQ5Mzk0Mzg2YzQ1NzU4MDQ3NzA2NjdmMmRkYjU5YjNiZjA2NjlmZGRkMmYiLAoJCQkJCSI3Zjc1MTNiMjU0MjlhNjQ0NzNlMTBjZTNhZDJmM2RhMzcyYmJkZDE0YjY1ZDA3YmJhZjU0N2U3YzhiYmJlNjJiIiwKCQkJCQkiMmU2MWNkMGNiZjRhOGY0NTgwOWJkYTlmN2Y3OGMwZDMzYWQxMTg0MmZmOTRhZTM0MDg3M2UyNjY0ZGM4NDNkZSIKCQkJCV0sCgkJCQkidGhyZXNob2xkIjogMwoJCQl9LAoJCQkidGltZXN0YW1wIjogewoJCQkJImtleWlkcyI6IFsKCQkJCQkiZTE4NjNiYTAyMDcwMzIyZWJjNjI2ZGNlY2Y5ZDg4MWEzYTM4YzM1YzNiNDFhODM3NjViNmFkNmMzN2VhZWMyYSIKCQkJCV0sCgkJCQkidGhyZXNob2xkIjogMQoJCQl9CgkJfSwKCQkiY29uc2lzdGVudF9zbmFwc2hvdCI6IHRydWUKCX0sCgkic2lnbmF0dXJlcyI6IFsKCQl7CgkJCSJrZXlpZCI6ICJmNTMxMmY1NDJjMjEyNzNkOTQ4NWE0OTM5NDM4NmM0NTc1ODA0NzcwNjY3ZjJkZGI1OWIzYmYwNjY5ZmRkZDJmIiwKCQkJInNpZyI6ICIzMDQ0MDIyMDI0YjgwMzZiMzc0ZjcwNzE3MjNmM2YyY2IxOTc5YzQyZTVkYTE5MTBmMGIxNzg4MzVhZDU0NmUzYzM2MDgzNjMwMjIwNzE0MGNjZDQwOGFmY2Y4NzIwZGQ5YmVhN2YwMDMyNTc2OGMzYWE0N2MyMmQ1MzFjODQ5Yzk3NGZkNTBlNDVkZCIKCQl9LAoJCXsKCQkJImtleWlkIjogIjdmNzUxM2IyNTQyOWE2NDQ3M2UxMGNlM2FkMmYzZGEzNzJiYmRkMTRiNjVkMDdiYmFmNTQ3ZTdjOGJiYmU2MmIiLAoJCQkic2lnIjogIjMwNDYwMjIxMDBkY2IxYTk2ZWNiZmMwNTc2OGEzYzczNzI2YTkyZDY4MWRhNzhlYWVjMDY4YTlhMGNmZTEzYTEyZGI2NzJlNDRiMDIyMTAwYTBkYWU3YmMyZTZiOTUzZTIxNWY1N2NjNjE0ZWI3MTY2MGI5NDYxZDZkYzg2MjY0YjBiNzRhNGYyZTEzMDdlMSIKCQl9LAoJCXsKCQkJImtleWlkIjogIjJlNjFjZDBjYmY0YThmNDU4MDliZGE5ZjdmNzhjMGQzM2FkMTE4NDJmZjk0YWUzNDA4NzNlMjY2NGRjODQzZGUiLAoJCQkic2lnIjogIjMwNDYwMjIxMDBjNDcwOGQ5NDA3N2NiM2Q2ZGQ2MGViZDJkZDY2NTQ1ZTdhZmIwNDY0Y2UyNTkzYTVmMjNmNmUzNjA0YjlmMjFlMDIyMTAwOTkyZTk2OWNkNTA2OWVhYjE3NDM5YjJiYTYwNzQzZmU0MjI4NzdiYzFhMWM0NmU5MzVhNmQ1Y2I0N2IzY2ZjNiIKCQl9LAoJCXsKCQkJImtleWlkIjogIjI1YTBlYjQ1MGZkM2VlMmJkNzkyMThjOTYzZGNlM2YxY2M2MTE4YmFkZjI1MWJmMTQ5ZjBiZDA3ZDVjYWJlOTkiLAoJCQkic2lnIjogIjMwNDUwMjIwNTFmYWE2YjZmYzM3MzczMGI5N2MxYTRjZDkyZDAzZWZkOThiODNkNGM5YzkzYmY0ZjQwNGQxZjg4ZWEyZWIxODAyMjEwMGY3MWFjMWNkNzNkY2JhOTUwZjQyMTBiMTJmOWEwNWI4MTQwYjA0OTAyNDdjNTMzOTE5MWU4NDJiODY4MTU1YjQiCgkJfQoJXQp9","targets":{"trusted_root.json":"{
  "mediaType": "application/vnd.dev.sigstore.trustedroot+json;version=0.1",
  "tlogs": [
    {
      "baseUrl": "https://rekor.sigstore.dev",
      "hashAlgorithm": "SHA2_256",
      "publicKey": {
        "rawBytes": "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAE2G2Y+2tabdTV5BcGiBIx0a9fAFwrkBbmLSGtks4L3qX6yYY0zufBnhC8Ur/iy55GhWP/9A/bY2LhC30M9+RYtw==",
        "keyDetails": "PKIX_ECDSA_P256_SHA_256",
        "validFor": {
          "start": "2021-01-12T11:53:27.000Z"
        }
      },
      "logId": {
        "keyId": "wNI9atQGlz+VWfO6LRygH4QUfY/8W4RFwiT5i5WRgB0="
      }
    }
  ],
  "certificateAuthorities": [
    {
      "subject": {
        "organization": "sigstore.dev",
        "commonName": "sigstore"
      },
      "uri": "https://fulcio.sigstore.dev",
      "certChain": {
        "certificates": [
          {
            "rawBytes": "MIIB+DCCAX6gAwIBAgITNVkDZoCiofPDsy7dfm6geLbuhzAKBggqhkjOPQQDAzAqMRUwEwYDVQQKEwxzaWdzdG9yZS5kZXYxETAPBgNVBAMTCHNpZ3N0b3JlMB4XDTIxMDMwNzAzMjAyOVoXDTMxMDIyMzAzMjAyOVowKjEVMBMGA1UEChMMc2lnc3RvcmUuZGV2MREwDwYDVQQDEwhzaWdzdG9yZTB2MBAGByqGSM49AgEGBSuBBAAiA2IABLSyA7Ii5k+pNO8ZEWY0ylemWDowOkNa3kL+GZE5Z5GWehL9/A9bRNA3RbrsZ5i0JcastaRL7Sp5fp/jD5dxqc/UdTVnlvS16an+2Yfswe/QuLolRUCrcOE2+2iA5+tzd6NmMGQwDgYDVR0PAQH/BAQDAgEGMBIGA1UdEwEB/wQIMAYBAf8CAQEwHQYDVR0OBBYEFMjFHQBBmiQpMlEk6w2uSu1KBtPsMB8GA1UdIwQYMBaAFMjFHQBBmiQpMlEk6w2uSu1KBtPsMAoGCCqGSM49BAMDA2gAMGUCMH8liWJfMui6vXXBhjDgY4MwslmN/TJxVe/83WrFomwmNf056y1X48F9c4m3a3ozXAIxAKjRay5/aj/jsKKGIkmQatjI8uupHr/+CxFvaJWmpYqNkLDGRU+9orzh5hI2RrcuaQ=="
          }
        ]
      },
      "validFor": {
        "start": "2021-03-07T03:20:29.000Z",
        "end": "2022-12-31T23:59:59.999Z"
      }
    },
    {
      "subject": {
        "organization": "sigstore.dev",
        "commonName": "sigstore"
      },
      "uri": "https://fulcio.sigstore.dev",
      "certChain": {
        "certificates": [
          {
            "rawBytes": "MIICGjCCAaGgAwIBAgIUALnViVfnU0brJasmRkHrn/UnfaQwCgYIKoZIzj0EAwMwKjEVMBMGA1UEChMMc2lnc3RvcmUuZGV2MREwDwYDVQQDEwhzaWdzdG9yZTAeFw0yMjA0MTMyMDA2MTVaFw0zMTEwMDUxMzU2NThaMDcxFTATBgNVBAoTDHNpZ3N0b3JlLmRldjEeMBwGA1UEAxMVc2lnc3RvcmUtaW50ZXJtZWRpYXRlMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAE8RVS/ysH+NOvuDZyPIZtilgUF9NlarYpAd9HP1vBBH1U5CV77LSS7s0ZiH4nE7Hv7ptS6LvvR/STk798LVgMzLlJ4HeIfF3tHSaexLcYpSASr1kS0N/RgBJz/9jWCiXno3sweTAOBgNVHQ8BAf8EBAMCAQYwEwYDVR0lBAwwCgYIKwYBBQUHAwMwEgYDVR0TAQH/BAgwBgEB/wIBADAdBgNVHQ4EFgQU39Ppz1YkEZb5qNjpKFWixi4YZD8wHwYDVR0jBBgwFoAUWMAeX5FFpWapesyQoZMi0CrFxfowCgYIKoZIzj0EAwMDZwAwZAIwPCsQK4DYiZYDPIaDi5HFKnfxXx6ASSVmERfsynYBiX2X6SJRnZU84/9DZdnFvvxmAjBOt6QpBlc4J/0DxvkTCqpclvziL6BCCPnjdlIB3Pu3BxsPmygUY7Ii2zbdCdliiow="
          },
          {
            "rawBytes": "MIIB9zCCAXygAwIBAgIUALZNAPFdxHPwjeDloDwyYChAO/4wCgYIKoZIzj0EAwMwKjEVMBMGA1UEChMMc2lnc3RvcmUuZGV2MREwDwYDVQQDEwhzaWdzdG9yZTAeFw0yMTEwMDcxMzU2NTlaFw0zMTEwMDUxMzU2NThaMCoxFTATBgNVBAoTDHNpZ3N0b3JlLmRldjERMA8GA1UEAxMIc2lnc3RvcmUwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAT7XeFT4rb3PQGwS4IajtLk3/OlnpgangaBclYpsYBr5i+4ynB07ceb3LP0OIOZdxexX69c5iVuyJRQ+Hz05yi+UF3uBWAlHpiS5sh0+H2GHE7SXrk1EC5m1Tr19L9gg92jYzBhMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRYwB5fkUWlZql6zJChkyLQKsXF+jAfBgNVHSMEGDAWgBRYwB5fkUWlZql6zJChkyLQKsXF+jAKBggqhkjOPQQDAwNpADBmAjEAj1nHeXZp+13NWBNa+EDsDP8G1WWg1tCMWP/WHPqpaVo0jhsweNFZgSs0eE7wYI4qAjEA2WB9ot98sIkoF3vZYdd3/VtWB5b9TNMea7Ix/stJ5TfcLLeABLE4BNJOsQ4vnBHJ"
          }
        ]
      },
      "validFor": {
        "start": "2022-04-13T20:06:15.000Z"
      }
    }
  ],
  "ctlogs": [
    {
      "baseUrl": "https://ctfe.sigstore.dev/test",
      "hashAlgorithm": "SHA2_256",
      "publicKey": {
        "rawBytes": "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEbfwR+RJudXscgRBRpKX1XFDy3PyudDxz/SfnRi1fT8ekpfBd2O1uoz7jr3Z8nKzxA69EUQ+eFCFI3zeubPWU7w==",
        "keyDetails": "PKIX_ECDSA_P256_SHA_256",
        "validFor": {
          "start": "2021-03-14T00:00:00.000Z",
          "end": "2022-10-31T23:59:59.999Z"
        }
      },
      "logId": {
        "keyId": "CGCS8ChS/2hF0dFrJ4ScRWcYrBY9wzjSbea8IgY2b3I="
      }
    },
    {
      "baseUrl": "https://ctfe.sigstore.dev/2022",
      "hashAlgorithm": "SHA2_256",
      "publicKey": {
        "rawBytes": "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEiPSlFi0CmFTfEjCUqF9HuCEcYXNKAaYalIJmBZ8yyezPjTqhxrKBpMnaocVtLJBI1eM3uXnQzQGAJdJ4gs9Fyw==",
        "keyDetails": "PKIX_ECDSA_P256_SHA_256",
        "validFor": {
          "start": "2022-10-20T00:00:00.000Z"
        }
      },
      "logId": {
        "keyId": "3T0wasbHETJjGR4cmWc3AqJKXrjePK3/h4pygC8p7o4="
      }
    }
  ],
  "timestampAuthorities": [
    {
      "subject": {
        "organization": "GitHub, Inc.",
        "commonName": "Internal Services Root"
      },
      "certChain": {
        "certificates": [
          {
            "rawBytes": "MIIB3DCCAWKgAwIBAgIUchkNsH36Xa04b1LqIc+qr9DVecMwCgYIKoZIzj0EAwMwMjEVMBMGA1UEChMMR2l0SHViLCBJbmMuMRkwFwYDVQQDExBUU0EgaW50ZXJtZWRpYXRlMB4XDTIzMDQxNDAwMDAwMFoXDTI0MDQxMzAwMDAwMFowMjEVMBMGA1UEChMMR2l0SHViLCBJbmMuMRkwFwYDVQQDExBUU0EgVGltZXN0YW1waW5nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEUD5ZNbSqYMd6r8qpOOEX9ibGnZT9GsuXOhr/f8U9FJugBGExKYp40OULS0erjZW7xV9xV52NnJf5OeDq4e5ZKqNWMFQwDgYDVR0PAQH/BAQDAgeAMBMGA1UdJQQMMAoGCCsGAQUFBwMIMAwGA1UdEwEB/wQCMAAwHwYDVR0jBBgwFoAUaW1RudOgVt0leqY0WKYbuPr47wAwCgYIKoZIzj0EAwMDaAAwZQIwbUH9HvD4ejCZJOWQnqAlkqURllvu9M8+VqLbiRK+zSfZCZwsiljRn8MQQRSkXEE5AjEAg+VxqtojfVfu8DhzzhCx9GKETbJHb19iV72mMKUbDAFmzZ6bQ8b54Zb8tidy5aWe"
          },
          {
            "rawBytes": "MIICEDCCAZWgAwIBAgIUX8ZO5QXP7vN4dMQ5e9sU3nub8OgwCgYIKoZIzj0EAwMwODEVMBMGA1UEChMMR2l0SHViLCBJbmMuMR8wHQYDVQQDExZJbnRlcm5hbCBTZXJ2aWNlcyBSb290MB4XDTIzMDQxNDAwMDAwMFoXDTI4MDQxMjAwMDAwMFowMjEVMBMGA1UEChMMR2l0SHViLCBJbmMuMRkwFwYDVQQDExBUU0EgaW50ZXJtZWRpYXRlMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAEvMLY/dTVbvIJYANAuszEwJnQE1llftynyMKIMhh48HmqbVr5ygybzsLRLVKbBWOdZ21aeJz+gZiytZetqcyF9WlER5NEMf6JV7ZNojQpxHq4RHGoGSceQv/qvTiZxEDKo2YwZDAOBgNVHQ8BAf8EBAMCAQYwEgYDVR0TAQH/BAgwBgEB/wIBADAdBgNVHQ4EFgQUaW1RudOgVt0leqY0WKYbuPr47wAwHwYDVR0jBBgwFoAU9NYYlobnAG4c0/qjxyH/lq/wz+QwCgYIKoZIzj0EAwMDaQAwZgIxAK1B185ygCrIYFlIs3GjswjnwSMG6LY8woLVdakKDZxVa8f8cqMs1DhcxJ0+09w95QIxAO+tBzZk7vjUJ9iJgD4R6ZWTxQWKqNm74jO99o+o9sv4FI/SZTZTFyMn0IJEHdNmyA=="
          },
          {
            "rawBytes": "MIIB9DCCAXqgAwIBAgIUa/JAkdUjK4JUwsqtaiRJGWhqLSowCgYIKoZIzj0EAwMwODEVMBMGA1UEChMMR2l0SHViLCBJbmMuMR8wHQYDVQQDExZJbnRlcm5hbCBTZXJ2aWNlcyBSb290MB4XDTIzMDQxNDAwMDAwMFoXDTMzMDQxMTAwMDAwMFowODEVMBMGA1UEChMMR2l0SHViLCBJbmMuMR8wHQYDVQQDExZJbnRlcm5hbCBTZXJ2aWNlcyBSb290MHYwEAYHKoZIzj0CAQYFK4EEACIDYgAEf9jFAXxz4kx68AHRMOkFBhflDcMTvzaXz4x/FCcXjJ/1qEKon/qPIGnaURskDtyNbNDOpeJTDDFqt48iMPrnzpx6IZwqemfUJN4xBEZfza+pYt/iyod+9tZr20RRWSv/o0UwQzAOBgNVHQ8BAf8EBAMCAQYwEgYDVR0TAQH/BAgwBgEB/wIBAjAdBgNVHQ4EFgQU9NYYlobnAG4c0/qjxyH/lq/wz+QwCgYIKoZIzj0EAwMDaAAwZQIxALZLZ8BgRXzKxLMMN9VIlO+e4hrBnNBgF7tz7Hnrowv2NetZErIACKFymBlvWDvtMAIwZO+ki6ssQ1bsZo98O8mEAf2NZ7iiCgDDU0Vwjeco6zyeh0zBTs9/7gV6AHNQ53xD"
          }
        ]
      },
      "validFor": {
        "start": "2023-04-14T00:00:00.000Z"
      }
    }
  ]
}
","registry.npmjs.org%2Fkeys.json":"ewogICAgImtleXMiOiBbCiAgICAgICAgewogICAgICAgICAgICAia2V5SWQiOiAiU0hBMjU2OmpsM2J3c3d1ODBQampva0NnaDBvMnc1YzJVNExoUUFFNTdnajljejFrekEiLAogICAgICAgICAgICAia2V5VXNhZ2UiOiAibnBtOnNpZ25hdHVyZXMiLAogICAgICAgICAgICAicHVibGljS2V5IjogewogICAgICAgICAgICAgICAgInJhd0J5dGVzIjogIk1Ga3dFd1lIS29aSXpqMENBUVlJS29aSXpqMERBUWNEUWdBRTFPbGIzek1BRkZ4WEtIaUlrUU81Y0ozWWhsNWk2VVBwK0lodXRlQkpidUhjQTVVb2dLbzBFV3RsV3dXNktTYUtvVE5FWUw3SmxDUWlWbmtoQmt0VWdnPT0iLAogICAgICAgICAgICAgICAgImtleURldGFpbHMiOiAiUEtJWF9FQ0RTQV9QMjU2X1NIQV8yNTYiLAogICAgICAgICAgICAgICAgInZhbGlkRm9yIjogewogICAgICAgICAgICAgICAgICAgICJzdGFydCI6ICIxOTk5LTAxLTAxVDAwOjAwOjAwLjAwMFoiCiAgICAgICAgICAgICAgICB9CiAgICAgICAgICAgIH0KICAgICAgICB9LAogICAgICAgIHsKICAgICAgICAgICAgImtleUlkIjogIlNIQTI1NjpqbDNid3N3dTgwUGpqb2tDZ2gwbzJ3NWMyVTRMaFFBRTU3Z2o5Y3oxa3pBIiwKICAgICAgICAgICAgImtleVVzYWdlIjogIm5wbTphdHRlc3RhdGlvbnMiLAogICAgICAgICAgICAicHVibGljS2V5IjogewogICAgICAgICAgICAgICAgInJhd0J5dGVzIjogIk1Ga3dFd1lIS29aSXpqMENBUVlJS29aSXpqMERBUWNEUWdBRTFPbGIzek1BRkZ4WEtIaUlrUU81Y0ozWWhsNWk2VVBwK0lodXRlQkpidUhjQTVVb2dLbzBFV3RsV3dXNktTYUtvVE5FWUw3SmxDUWlWbmtoQmt0VWdnPT0iLAogICAgICAgICAgICAgICAgImtleURldGFpbHMiOiAiUEtJWF9FQ0RTQV9QMjU2X1NIQV8yNTYiLAogICAgICAgICAgICAgICAgInZhbGlkRm9yIjogewogICAgICAgICAgICAgICAgICAgICJzdGFydCI6ICIyMDIyLTEyLTAxVDAwOjAwOjAwLjAwMFoiCiAgICAgICAgICAgICAgICB9CiAgICAgICAgICAgIH0KICAgICAgICB9CiAgICBdCn0K"}}} +{"https://tuf-repo-cdn.sigstore.dev":{"root.json":"{
	"signed": {
		"_type": "root",
		"spec_version": "1.0",
		"version": 9,
		"expires": "2024-09-12T06:53:10Z",
		"keys": {
			"1e1d65ce98b10addad4764febf7dda2d0436b3d3a3893579c0dddaea20e54849": {
				"keytype": "ecdsa",
				"scheme": "ecdsa-sha2-nistp256",
				"keyid_hash_algorithms": [
					"sha256",
					"sha512"
				],
				"keyval": {
					"public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEzBzVOmHCPojMVLSI364WiiV8NPrD\n6IgRxVliskz/v+y3JER5mcVGcONliDcWMC5J2lfHmjPNPhb4H7xm8LzfSA==\n-----END PUBLIC KEY-----\n"
				}
			},
			"230e212616274a4195cdc28e9fce782c20e6c720f1a811b40f98228376bdd3ac": {
				"keytype": "ecdsa",
				"scheme": "ecdsa-sha2-nistp256",
				"keyid_hash_algorithms": [
					"sha256",
					"sha512"
				],
				"keyval": {
					"public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAELrWvNt94v4R085ELeeCMxHp7PldF\n0/T1GxukUh2ODuggLGJE0pc1e8CSBf6CS91Fwo9FUOuRsjBUld+VqSyCdQ==\n-----END PUBLIC KEY-----\n"
				}
			},
			"3c344aa068fd4cc4e87dc50b612c02431fbc771e95003993683a2b0bf260cf0e": {
				"keytype": "ecdsa",
				"scheme": "ecdsa-sha2-nistp256",
				"keyid_hash_algorithms": [
					"sha256",
					"sha512"
				],
				"keyval": {
					"public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEy8XKsmhBYDI8Jc0GwzBxeKax0cm5\nSTKEU65HPFunUn41sT8pi0FjM4IkHz/YUmwmLUO0Wt7lxhj6BkLIK4qYAw==\n-----END PUBLIC KEY-----\n"
				}
			},
			"923bb39e60dd6fa2c31e6ea55473aa93b64dd4e53e16fbe42f6a207d3f97de2d": {
				"keytype": "ecdsa",
				"scheme": "ecdsa-sha2-nistp256",
				"keyid_hash_algorithms": [
					"sha256",
					"sha512"
				],
				"keyval": {
					"public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEWRiGr5+j+3J5SsH+Ztr5nE2H2wO7\nBV+nO3s93gLca18qTOzHY1oWyAGDykMSsGTUBSt9D+An0KfKsD2mfSM42Q==\n-----END PUBLIC KEY-----\n"
				}
			},
			"e2f59acb9488519407e18cbfc9329510be03c04aca9929d2f0301343fec85523": {
				"keytype": "ecdsa",
				"scheme": "ecdsa-sha2-nistp256",
				"keyid_hash_algorithms": [
					"sha256",
					"sha512"
				],
				"keyval": {
					"public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEinikSsAQmYkNeH5eYq/CnIzLaacO\nxlSaawQDOwqKy/tCqxq5xxPSJc21K4WIhs9GyOkKfzueY3GILzcMJZ4cWw==\n-----END PUBLIC KEY-----\n"
				}
			},
			"ec81669734e017996c5b85f3d02c3de1dd4637a152019fe1af125d2f9368b95e": {
				"keytype": "ecdsa",
				"scheme": "ecdsa-sha2-nistp256",
				"keyid_hash_algorithms": [
					"sha256",
					"sha512"
				],
				"keyval": {
					"public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEEXsz3SZXFb8jMV42j6pJlyjbjR8K\nN3Bwocexq6LMIb5qsWKOQvLN16NUefLc4HswOoumRsVVaajSpQS6fobkRw==\n-----END PUBLIC KEY-----\n"
				}
			},
			"fdfa83a07b5a83589b87ded41f77f39d232ad91f7cce52868dacd06ba089849f": {
				"keytype": "ecdsa",
				"scheme": "ecdsa-sha2-nistp256",
				"keyid_hash_algorithms": [
					"sha256",
					"sha512"
				],
				"keyval": {
					"public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAE0ghrh92Lw1Yr3idGV5WqCtMDB8Cx\n+D8hdC4w2ZLNIplVRoVGLskYa3gheMyOjiJ8kPi15aQ2//7P+oj7UvJPGw==\n-----END PUBLIC KEY-----\n"
				}
			}
		},
		"roles": {
			"root": {
				"keyids": [
					"3c344aa068fd4cc4e87dc50b612c02431fbc771e95003993683a2b0bf260cf0e",
					"ec81669734e017996c5b85f3d02c3de1dd4637a152019fe1af125d2f9368b95e",
					"1e1d65ce98b10addad4764febf7dda2d0436b3d3a3893579c0dddaea20e54849",
					"e2f59acb9488519407e18cbfc9329510be03c04aca9929d2f0301343fec85523",
					"fdfa83a07b5a83589b87ded41f77f39d232ad91f7cce52868dacd06ba089849f"
				],
				"threshold": 3
			},
			"snapshot": {
				"keyids": [
					"230e212616274a4195cdc28e9fce782c20e6c720f1a811b40f98228376bdd3ac"
				],
				"threshold": 1
			},
			"targets": {
				"keyids": [
					"3c344aa068fd4cc4e87dc50b612c02431fbc771e95003993683a2b0bf260cf0e",
					"ec81669734e017996c5b85f3d02c3de1dd4637a152019fe1af125d2f9368b95e",
					"1e1d65ce98b10addad4764febf7dda2d0436b3d3a3893579c0dddaea20e54849",
					"e2f59acb9488519407e18cbfc9329510be03c04aca9929d2f0301343fec85523",
					"fdfa83a07b5a83589b87ded41f77f39d232ad91f7cce52868dacd06ba089849f"
				],
				"threshold": 3
			},
			"timestamp": {
				"keyids": [
					"923bb39e60dd6fa2c31e6ea55473aa93b64dd4e53e16fbe42f6a207d3f97de2d"
				],
				"threshold": 1
			}
		},
		"consistent_snapshot": true
	},
	"signatures": [
		{
			"keyid": "ff51e17fcf253119b7033f6f57512631da4a0969442afcf9fc8b141c7f2be99c",
			"sig": "30450221008b78f894c3cfed3bd486379c4e0e0dfb3e7dd8cbc4d5598d2818eea1ba3c7550022029d3d06e89d04d37849985dc46c0e10dc5b1fc68dc70af1ec9910303a1f3ee2f"
		},
		{
			"keyid": "25a0eb450fd3ee2bd79218c963dce3f1cc6118badf251bf149f0bd07d5cabe99",
			"sig": "30450221009e6b90b935e09b837a90d4402eaa27d5ea26eb7891948ba0ed7090841248f436022003dc2251c4d4a7999b91e9ad0868765ae09ac7269279f2a7899bafef7a2d9260"
		},
		{
			"keyid": "f5312f542c21273d9485a49394386c4575804770667f2ddb59b3bf0669fddd2f",
			"sig": "30440220099e907dcf90b7b6e109fd1d6e442006fccbb48894aaaff47ab824b03fb35d0d02202aa0a06c21a4233f37900a48bc8777d3b47f59e3a38616ce631a04df57f96736"
		},
		{
			"keyid": "3c344aa068fd4cc4e87dc50b612c02431fbc771e95003993683a2b0bf260cf0e",
			"sig": "30450221008b78f894c3cfed3bd486379c4e0e0dfb3e7dd8cbc4d5598d2818eea1ba3c7550022029d3d06e89d04d37849985dc46c0e10dc5b1fc68dc70af1ec9910303a1f3ee2f"
		},
		{
			"keyid": "ec81669734e017996c5b85f3d02c3de1dd4637a152019fe1af125d2f9368b95e",
			"sig": "30450221009e6b90b935e09b837a90d4402eaa27d5ea26eb7891948ba0ed7090841248f436022003dc2251c4d4a7999b91e9ad0868765ae09ac7269279f2a7899bafef7a2d9260"
		},
		{
			"keyid": "e2f59acb9488519407e18cbfc9329510be03c04aca9929d2f0301343fec85523",
			"sig": "304502200e5613b901e0f3e08eceabddc73f98b50ddf892e998d0b369c6e3d451ac48875022100940cf92d1f43ee2e5cdbb22572bb52925ed3863a688f7ffdd4bd2e2e56f028b3"
		},
		{
			"keyid": "2e61cd0cbf4a8f45809bda9f7f78c0d33ad11842ff94ae340873e2664dc843de",
			"sig": "304502202cff44f2215d7a47b28b8f5f580c2cfbbd1bfcfcbbe78de323045b2c0badc5e9022100c743949eb3f4ea5a4b9ae27ac6eddea1f0ff9bfd004f8a9a9d18c6e4142b6e75"
		},
		{
			"keyid": "1e1d65ce98b10addad4764febf7dda2d0436b3d3a3893579c0dddaea20e54849",
			"sig": "30440220099e907dcf90b7b6e109fd1d6e442006fccbb48894aaaff47ab824b03fb35d0d02202aa0a06c21a4233f37900a48bc8777d3b47f59e3a38616ce631a04df57f96736"
		},
		{
			"keyid": "fdfa83a07b5a83589b87ded41f77f39d232ad91f7cce52868dacd06ba089849f",
			"sig": "304502202cff44f2215d7a47b28b8f5f580c2cfbbd1bfcfcbbe78de323045b2c0badc5e9022100c743949eb3f4ea5a4b9ae27ac6eddea1f0ff9bfd004f8a9a9d18c6e4142b6e75"
		},
		{
			"keyid": "7f7513b25429a64473e10ce3ad2f3da372bbdd14b65d07bbaf547e7c8bbbe62b",
			"sig": "304502200e5613b901e0f3e08eceabddc73f98b50ddf892e998d0b369c6e3d451ac48875022100940cf92d1f43ee2e5cdbb22572bb52925ed3863a688f7ffdd4bd2e2e56f028b3"
		}
	]
}","targets":{"trusted_root.json":"{
  "mediaType": "application/vnd.dev.sigstore.trustedroot+json;version=0.1",
  "tlogs": [
    {
      "baseUrl": "https://rekor.sigstore.dev",
      "hashAlgorithm": "SHA2_256",
      "publicKey": {
        "rawBytes": "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAE2G2Y+2tabdTV5BcGiBIx0a9fAFwrkBbmLSGtks4L3qX6yYY0zufBnhC8Ur/iy55GhWP/9A/bY2LhC30M9+RYtw==",
        "keyDetails": "PKIX_ECDSA_P256_SHA_256",
        "validFor": {
          "start": "2021-01-12T11:53:27.000Z"
        }
      },
      "logId": {
        "keyId": "wNI9atQGlz+VWfO6LRygH4QUfY/8W4RFwiT5i5WRgB0="
      }
    }
  ],
  "certificateAuthorities": [
    {
      "subject": {
        "organization": "sigstore.dev",
        "commonName": "sigstore"
      },
      "uri": "https://fulcio.sigstore.dev",
      "certChain": {
        "certificates": [
          {
            "rawBytes": "MIIB+DCCAX6gAwIBAgITNVkDZoCiofPDsy7dfm6geLbuhzAKBggqhkjOPQQDAzAqMRUwEwYDVQQKEwxzaWdzdG9yZS5kZXYxETAPBgNVBAMTCHNpZ3N0b3JlMB4XDTIxMDMwNzAzMjAyOVoXDTMxMDIyMzAzMjAyOVowKjEVMBMGA1UEChMMc2lnc3RvcmUuZGV2MREwDwYDVQQDEwhzaWdzdG9yZTB2MBAGByqGSM49AgEGBSuBBAAiA2IABLSyA7Ii5k+pNO8ZEWY0ylemWDowOkNa3kL+GZE5Z5GWehL9/A9bRNA3RbrsZ5i0JcastaRL7Sp5fp/jD5dxqc/UdTVnlvS16an+2Yfswe/QuLolRUCrcOE2+2iA5+tzd6NmMGQwDgYDVR0PAQH/BAQDAgEGMBIGA1UdEwEB/wQIMAYBAf8CAQEwHQYDVR0OBBYEFMjFHQBBmiQpMlEk6w2uSu1KBtPsMB8GA1UdIwQYMBaAFMjFHQBBmiQpMlEk6w2uSu1KBtPsMAoGCCqGSM49BAMDA2gAMGUCMH8liWJfMui6vXXBhjDgY4MwslmN/TJxVe/83WrFomwmNf056y1X48F9c4m3a3ozXAIxAKjRay5/aj/jsKKGIkmQatjI8uupHr/+CxFvaJWmpYqNkLDGRU+9orzh5hI2RrcuaQ=="
          }
        ]
      },
      "validFor": {
        "start": "2021-03-07T03:20:29.000Z",
        "end": "2022-12-31T23:59:59.999Z"
      }
    },
    {
      "subject": {
        "organization": "sigstore.dev",
        "commonName": "sigstore"
      },
      "uri": "https://fulcio.sigstore.dev",
      "certChain": {
        "certificates": [
          {
            "rawBytes": "MIICGjCCAaGgAwIBAgIUALnViVfnU0brJasmRkHrn/UnfaQwCgYIKoZIzj0EAwMwKjEVMBMGA1UEChMMc2lnc3RvcmUuZGV2MREwDwYDVQQDEwhzaWdzdG9yZTAeFw0yMjA0MTMyMDA2MTVaFw0zMTEwMDUxMzU2NThaMDcxFTATBgNVBAoTDHNpZ3N0b3JlLmRldjEeMBwGA1UEAxMVc2lnc3RvcmUtaW50ZXJtZWRpYXRlMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAE8RVS/ysH+NOvuDZyPIZtilgUF9NlarYpAd9HP1vBBH1U5CV77LSS7s0ZiH4nE7Hv7ptS6LvvR/STk798LVgMzLlJ4HeIfF3tHSaexLcYpSASr1kS0N/RgBJz/9jWCiXno3sweTAOBgNVHQ8BAf8EBAMCAQYwEwYDVR0lBAwwCgYIKwYBBQUHAwMwEgYDVR0TAQH/BAgwBgEB/wIBADAdBgNVHQ4EFgQU39Ppz1YkEZb5qNjpKFWixi4YZD8wHwYDVR0jBBgwFoAUWMAeX5FFpWapesyQoZMi0CrFxfowCgYIKoZIzj0EAwMDZwAwZAIwPCsQK4DYiZYDPIaDi5HFKnfxXx6ASSVmERfsynYBiX2X6SJRnZU84/9DZdnFvvxmAjBOt6QpBlc4J/0DxvkTCqpclvziL6BCCPnjdlIB3Pu3BxsPmygUY7Ii2zbdCdliiow="
          },
          {
            "rawBytes": "MIIB9zCCAXygAwIBAgIUALZNAPFdxHPwjeDloDwyYChAO/4wCgYIKoZIzj0EAwMwKjEVMBMGA1UEChMMc2lnc3RvcmUuZGV2MREwDwYDVQQDEwhzaWdzdG9yZTAeFw0yMTEwMDcxMzU2NTlaFw0zMTEwMDUxMzU2NThaMCoxFTATBgNVBAoTDHNpZ3N0b3JlLmRldjERMA8GA1UEAxMIc2lnc3RvcmUwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAT7XeFT4rb3PQGwS4IajtLk3/OlnpgangaBclYpsYBr5i+4ynB07ceb3LP0OIOZdxexX69c5iVuyJRQ+Hz05yi+UF3uBWAlHpiS5sh0+H2GHE7SXrk1EC5m1Tr19L9gg92jYzBhMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRYwB5fkUWlZql6zJChkyLQKsXF+jAfBgNVHSMEGDAWgBRYwB5fkUWlZql6zJChkyLQKsXF+jAKBggqhkjOPQQDAwNpADBmAjEAj1nHeXZp+13NWBNa+EDsDP8G1WWg1tCMWP/WHPqpaVo0jhsweNFZgSs0eE7wYI4qAjEA2WB9ot98sIkoF3vZYdd3/VtWB5b9TNMea7Ix/stJ5TfcLLeABLE4BNJOsQ4vnBHJ"
          }
        ]
      },
      "validFor": {
        "start": "2022-04-13T20:06:15.000Z"
      }
    }
  ],
  "ctlogs": [
    {
      "baseUrl": "https://ctfe.sigstore.dev/test",
      "hashAlgorithm": "SHA2_256",
      "publicKey": {
        "rawBytes": "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEbfwR+RJudXscgRBRpKX1XFDy3PyudDxz/SfnRi1fT8ekpfBd2O1uoz7jr3Z8nKzxA69EUQ+eFCFI3zeubPWU7w==",
        "keyDetails": "PKIX_ECDSA_P256_SHA_256",
        "validFor": {
          "start": "2021-03-14T00:00:00.000Z",
          "end": "2022-10-31T23:59:59.999Z"
        }
      },
      "logId": {
        "keyId": "CGCS8ChS/2hF0dFrJ4ScRWcYrBY9wzjSbea8IgY2b3I="
      }
    },
    {
      "baseUrl": "https://ctfe.sigstore.dev/2022",
      "hashAlgorithm": "SHA2_256",
      "publicKey": {
        "rawBytes": "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEiPSlFi0CmFTfEjCUqF9HuCEcYXNKAaYalIJmBZ8yyezPjTqhxrKBpMnaocVtLJBI1eM3uXnQzQGAJdJ4gs9Fyw==",
        "keyDetails": "PKIX_ECDSA_P256_SHA_256",
        "validFor": {
          "start": "2022-10-20T00:00:00.000Z"
        }
      },
      "logId": {
        "keyId": "3T0wasbHETJjGR4cmWc3AqJKXrjePK3/h4pygC8p7o4="
      }
    }
  ],
  "timestampAuthorities": [
    {
      "subject": {
        "organization": "GitHub, Inc.",
        "commonName": "Internal Services Root"
      },
      "certChain": {
        "certificates": [
          {
            "rawBytes": "MIIB3DCCAWKgAwIBAgIUchkNsH36Xa04b1LqIc+qr9DVecMwCgYIKoZIzj0EAwMwMjEVMBMGA1UEChMMR2l0SHViLCBJbmMuMRkwFwYDVQQDExBUU0EgaW50ZXJtZWRpYXRlMB4XDTIzMDQxNDAwMDAwMFoXDTI0MDQxMzAwMDAwMFowMjEVMBMGA1UEChMMR2l0SHViLCBJbmMuMRkwFwYDVQQDExBUU0EgVGltZXN0YW1waW5nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEUD5ZNbSqYMd6r8qpOOEX9ibGnZT9GsuXOhr/f8U9FJugBGExKYp40OULS0erjZW7xV9xV52NnJf5OeDq4e5ZKqNWMFQwDgYDVR0PAQH/BAQDAgeAMBMGA1UdJQQMMAoGCCsGAQUFBwMIMAwGA1UdEwEB/wQCMAAwHwYDVR0jBBgwFoAUaW1RudOgVt0leqY0WKYbuPr47wAwCgYIKoZIzj0EAwMDaAAwZQIwbUH9HvD4ejCZJOWQnqAlkqURllvu9M8+VqLbiRK+zSfZCZwsiljRn8MQQRSkXEE5AjEAg+VxqtojfVfu8DhzzhCx9GKETbJHb19iV72mMKUbDAFmzZ6bQ8b54Zb8tidy5aWe"
          },
          {
            "rawBytes": "MIICEDCCAZWgAwIBAgIUX8ZO5QXP7vN4dMQ5e9sU3nub8OgwCgYIKoZIzj0EAwMwODEVMBMGA1UEChMMR2l0SHViLCBJbmMuMR8wHQYDVQQDExZJbnRlcm5hbCBTZXJ2aWNlcyBSb290MB4XDTIzMDQxNDAwMDAwMFoXDTI4MDQxMjAwMDAwMFowMjEVMBMGA1UEChMMR2l0SHViLCBJbmMuMRkwFwYDVQQDExBUU0EgaW50ZXJtZWRpYXRlMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAEvMLY/dTVbvIJYANAuszEwJnQE1llftynyMKIMhh48HmqbVr5ygybzsLRLVKbBWOdZ21aeJz+gZiytZetqcyF9WlER5NEMf6JV7ZNojQpxHq4RHGoGSceQv/qvTiZxEDKo2YwZDAOBgNVHQ8BAf8EBAMCAQYwEgYDVR0TAQH/BAgwBgEB/wIBADAdBgNVHQ4EFgQUaW1RudOgVt0leqY0WKYbuPr47wAwHwYDVR0jBBgwFoAU9NYYlobnAG4c0/qjxyH/lq/wz+QwCgYIKoZIzj0EAwMDaQAwZgIxAK1B185ygCrIYFlIs3GjswjnwSMG6LY8woLVdakKDZxVa8f8cqMs1DhcxJ0+09w95QIxAO+tBzZk7vjUJ9iJgD4R6ZWTxQWKqNm74jO99o+o9sv4FI/SZTZTFyMn0IJEHdNmyA=="
          },
          {
            "rawBytes": "MIIB9DCCAXqgAwIBAgIUa/JAkdUjK4JUwsqtaiRJGWhqLSowCgYIKoZIzj0EAwMwODEVMBMGA1UEChMMR2l0SHViLCBJbmMuMR8wHQYDVQQDExZJbnRlcm5hbCBTZXJ2aWNlcyBSb290MB4XDTIzMDQxNDAwMDAwMFoXDTMzMDQxMTAwMDAwMFowODEVMBMGA1UEChMMR2l0SHViLCBJbmMuMR8wHQYDVQQDExZJbnRlcm5hbCBTZXJ2aWNlcyBSb290MHYwEAYHKoZIzj0CAQYFK4EEACIDYgAEf9jFAXxz4kx68AHRMOkFBhflDcMTvzaXz4x/FCcXjJ/1qEKon/qPIGnaURskDtyNbNDOpeJTDDFqt48iMPrnzpx6IZwqemfUJN4xBEZfza+pYt/iyod+9tZr20RRWSv/o0UwQzAOBgNVHQ8BAf8EBAMCAQYwEgYDVR0TAQH/BAgwBgEB/wIBAjAdBgNVHQ4EFgQU9NYYlobnAG4c0/qjxyH/lq/wz+QwCgYIKoZIzj0EAwMDaAAwZQIxALZLZ8BgRXzKxLMMN9VIlO+e4hrBnNBgF7tz7Hnrowv2NetZErIACKFymBlvWDvtMAIwZO+ki6ssQ1bsZo98O8mEAf2NZ7iiCgDDU0Vwjeco6zyeh0zBTs9/7gV6AHNQ53xD"
          }
        ]
      },
      "validFor": {
        "start": "2023-04-14T00:00:00.000Z"
      }
    }
  ]
}
","registry.npmjs.org%2Fkeys.json":"ewogICAgImtleXMiOiBbCiAgICAgICAgewogICAgICAgICAgICAia2V5SWQiOiAiU0hBMjU2OmpsM2J3c3d1ODBQampva0NnaDBvMnc1YzJVNExoUUFFNTdnajljejFrekEiLAogICAgICAgICAgICAia2V5VXNhZ2UiOiAibnBtOnNpZ25hdHVyZXMiLAogICAgICAgICAgICAicHVibGljS2V5IjogewogICAgICAgICAgICAgICAgInJhd0J5dGVzIjogIk1Ga3dFd1lIS29aSXpqMENBUVlJS29aSXpqMERBUWNEUWdBRTFPbGIzek1BRkZ4WEtIaUlrUU81Y0ozWWhsNWk2VVBwK0lodXRlQkpidUhjQTVVb2dLbzBFV3RsV3dXNktTYUtvVE5FWUw3SmxDUWlWbmtoQmt0VWdnPT0iLAogICAgICAgICAgICAgICAgImtleURldGFpbHMiOiAiUEtJWF9FQ0RTQV9QMjU2X1NIQV8yNTYiLAogICAgICAgICAgICAgICAgInZhbGlkRm9yIjogewogICAgICAgICAgICAgICAgICAgICJzdGFydCI6ICIxOTk5LTAxLTAxVDAwOjAwOjAwLjAwMFoiCiAgICAgICAgICAgICAgICB9CiAgICAgICAgICAgIH0KICAgICAgICB9LAogICAgICAgIHsKICAgICAgICAgICAgImtleUlkIjogIlNIQTI1NjpqbDNid3N3dTgwUGpqb2tDZ2gwbzJ3NWMyVTRMaFFBRTU3Z2o5Y3oxa3pBIiwKICAgICAgICAgICAgImtleVVzYWdlIjogIm5wbTphdHRlc3RhdGlvbnMiLAogICAgICAgICAgICAicHVibGljS2V5IjogewogICAgICAgICAgICAgICAgInJhd0J5dGVzIjogIk1Ga3dFd1lIS29aSXpqMENBUVlJS29aSXpqMERBUWNEUWdBRTFPbGIzek1BRkZ4WEtIaUlrUU81Y0ozWWhsNWk2VVBwK0lodXRlQkpidUhjQTVVb2dLbzBFV3RsV3dXNktTYUtvVE5FWUw3SmxDUWlWbmtoQmt0VWdnPT0iLAogICAgICAgICAgICAgICAgImtleURldGFpbHMiOiAiUEtJWF9FQ0RTQV9QMjU2X1NIQV8yNTYiLAogICAgICAgICAgICAgICAgInZhbGlkRm9yIjogewogICAgICAgICAgICAgICAgICAgICJzdGFydCI6ICIyMDIyLTEyLTAxVDAwOjAwOjAwLjAwMFoiCiAgICAgICAgICAgICAgICB9CiAgICAgICAgICAgIH0KICAgICAgICB9CiAgICBdCn0K"}}} diff --git a/deps/npm/node_modules/agent-base/LICENSE b/deps/npm/node_modules/agent-base/LICENSE new file mode 100644 index 00000000000000..008728cb51847d --- /dev/null +++ b/deps/npm/node_modules/agent-base/LICENSE @@ -0,0 +1,22 @@ +(The MIT License) + +Copyright (c) 2013 Nathan Rajlich + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +'Software'), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/deps/npm/node_modules/agent-base/dist/index.js b/deps/npm/node_modules/agent-base/dist/index.js index 7bafc8c68604f3..69396356e74db7 100644 --- a/deps/npm/node_modules/agent-base/dist/index.js +++ b/deps/npm/node_modules/agent-base/dist/index.js @@ -27,7 +27,9 @@ var __exportStar = (this && this.__exportStar) || function(m, exports) { }; Object.defineProperty(exports, "__esModule", { value: true }); exports.Agent = void 0; +const net = __importStar(require("net")); const http = __importStar(require("http")); +const https_1 = require("https"); __exportStar(require("./helpers"), exports); const INTERNAL = Symbol('AgentBaseInternalState'); class Agent extends http.Agent { @@ -64,14 +66,72 @@ class Agent extends http.Agent { .some((l) => l.indexOf('(https.js:') !== -1 || l.indexOf('node:https:') !== -1); } + // In order to support async signatures in `connect()` and Node's native + // connection pooling in `http.Agent`, the array of sockets for each origin + // has to be updated synchronously. This is so the length of the array is + // accurate when `addRequest()` is next called. We achieve this by creating a + // fake socket and adding it to `sockets[origin]` and incrementing + // `totalSocketCount`. + incrementSockets(name) { + // If `maxSockets` and `maxTotalSockets` are both Infinity then there is no + // need to create a fake socket because Node.js native connection pooling + // will never be invoked. + if (this.maxSockets === Infinity && this.maxTotalSockets === Infinity) { + return null; + } + // All instances of `sockets` are expected TypeScript errors. The + // alternative is to add it as a private property of this class but that + // will break TypeScript subclassing. + if (!this.sockets[name]) { + // @ts-expect-error `sockets` is readonly in `@types/node` + this.sockets[name] = []; + } + const fakeSocket = new net.Socket({ writable: false }); + this.sockets[name].push(fakeSocket); + // @ts-expect-error `totalSocketCount` isn't defined in `@types/node` + this.totalSocketCount++; + return fakeSocket; + } + decrementSockets(name, socket) { + if (!this.sockets[name] || socket === null) { + return; + } + const sockets = this.sockets[name]; + const index = sockets.indexOf(socket); + if (index !== -1) { + sockets.splice(index, 1); + // @ts-expect-error `totalSocketCount` isn't defined in `@types/node` + this.totalSocketCount--; + if (sockets.length === 0) { + // @ts-expect-error `sockets` is readonly in `@types/node` + delete this.sockets[name]; + } + } + } + // In order to properly update the socket pool, we need to call `getName()` on + // the core `https.Agent` if it is a secureEndpoint. + getName(options) { + const secureEndpoint = typeof options.secureEndpoint === 'boolean' + ? options.secureEndpoint + : this.isSecureEndpoint(options); + if (secureEndpoint) { + // @ts-expect-error `getName()` isn't defined in `@types/node` + return https_1.Agent.prototype.getName.call(this, options); + } + // @ts-expect-error `getName()` isn't defined in `@types/node` + return super.getName(options); + } createSocket(req, options, cb) { const connectOpts = { ...options, secureEndpoint: this.isSecureEndpoint(options), }; + const name = this.getName(connectOpts); + const fakeSocket = this.incrementSockets(name); Promise.resolve() .then(() => this.connect(req, connectOpts)) .then((socket) => { + this.decrementSockets(name, fakeSocket); if (socket instanceof http.Agent) { // @ts-expect-error `addRequest()` isn't defined in `@types/node` return socket.addRequest(req, connectOpts); @@ -79,7 +139,10 @@ class Agent extends http.Agent { this[INTERNAL].currentSocket = socket; // @ts-expect-error `createSocket()` isn't defined in `@types/node` super.createSocket(req, options, cb); - }, cb); + }, (err) => { + this.decrementSockets(name, fakeSocket); + cb(err); + }); } createConnection() { const socket = this[INTERNAL].currentSocket; diff --git a/deps/npm/node_modules/agent-base/package.json b/deps/npm/node_modules/agent-base/package.json index 7178f4983f4fb9..8e95171707fef1 100644 --- a/deps/npm/node_modules/agent-base/package.json +++ b/deps/npm/node_modules/agent-base/package.json @@ -1,6 +1,6 @@ { "name": "agent-base", - "version": "7.1.0", + "version": "7.1.1", "description": "Turn a function into an `http.Agent` instance", "main": "./dist/index.js", "types": "./dist/index.d.ts", diff --git a/deps/npm/node_modules/binary-extensions/binary-extensions.json b/deps/npm/node_modules/binary-extensions/binary-extensions.json index 4aab3837893a2c..ac08048e40e2df 100644 --- a/deps/npm/node_modules/binary-extensions/binary-extensions.json +++ b/deps/npm/node_modules/binary-extensions/binary-extensions.json @@ -7,6 +7,9 @@ "a", "aac", "adp", + "afdesign", + "afphoto", + "afpub", "ai", "aif", "aiff", diff --git a/deps/npm/node_modules/binary-extensions/license b/deps/npm/node_modules/binary-extensions/license index 401b1c731bcd3e..5493a1a6e3f9a5 100644 --- a/deps/npm/node_modules/binary-extensions/license +++ b/deps/npm/node_modules/binary-extensions/license @@ -1,6 +1,7 @@ MIT License -Copyright (c) 2019 Sindre Sorhus (https://sindresorhus.com), Paul Miller (https://paulmillr.com) +Copyright (c) Sindre Sorhus (https://sindresorhus.com) +Copyright (c) Paul Miller (https://paulmillr.com) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: diff --git a/deps/npm/node_modules/binary-extensions/package.json b/deps/npm/node_modules/binary-extensions/package.json index c4d3641735b91b..4710c339aeb2d5 100644 --- a/deps/npm/node_modules/binary-extensions/package.json +++ b/deps/npm/node_modules/binary-extensions/package.json @@ -1,14 +1,16 @@ { "name": "binary-extensions", - "version": "2.2.0", + "version": "2.3.0", "description": "List of binary file extensions", "license": "MIT", "repository": "sindresorhus/binary-extensions", + "funding": "https://github.com/sponsors/sindresorhus", "author": { "name": "Sindre Sorhus", "email": "sindresorhus@gmail.com", - "url": "sindresorhus.com" + "url": "https://sindresorhus.com" }, + "sideEffects": false, "engines": { "node": ">=8" }, diff --git a/deps/npm/node_modules/cli-table3/package.json b/deps/npm/node_modules/cli-table3/package.json index 6e84bf66675054..0bd5d31d102463 100644 --- a/deps/npm/node_modules/cli-table3/package.json +++ b/deps/npm/node_modules/cli-table3/package.json @@ -1,6 +1,6 @@ { "name": "cli-table3", - "version": "0.6.3", + "version": "0.6.4", "description": "Pretty unicode tables for the command line. Based on the original cli-table.", "main": "index.js", "types": "index.d.ts", diff --git a/deps/npm/node_modules/glob/README.md b/deps/npm/node_modules/glob/README.md index 9f6e80fe665672..92c202afa1df8d 100644 --- a/deps/npm/node_modules/glob/README.md +++ b/deps/npm/node_modules/glob/README.md @@ -422,7 +422,7 @@ share the previously loaded cache. `process.cwd()`. See also: "Windows, CWDs, Drive Letters, and UNC Paths", below. - This option may be eiher a string path or a `file://` URL + This option may be either a string path or a `file://` URL object or string. - `root` A string path resolved against the `cwd` option, which @@ -509,6 +509,9 @@ share the previously loaded cache. - `nodir` Do not match directories, only files. (Note: to match _only_ directories, put a `/` at the end of the pattern.) + Note: when `follow` and `nodir` are both set, then symbolic + links to directories are also omitted. + - `stat` Call `lstat()` on all entries, whether required or not to determine whether it's a valid match. When used with `withFileTypes`, this means that matches will include data such @@ -539,6 +542,9 @@ share the previously loaded cache. it is not the first item in the pattern, or none if it is the first item in the pattern, following the same behavior as Bash. + Note: when `follow` and `nodir` are both set, then symbolic + links to directories are also omitted. + - `realpath` Set to true to call `fs.realpath` on all of the results. In the case of an entry that cannot be resolved, the entry is omitted. This incurs a slight performance penalty, of diff --git a/deps/npm/node_modules/glob/dist/commonjs/ignore.d.ts.map b/deps/npm/node_modules/glob/dist/commonjs/ignore.d.ts.map index be7831769d33ed..21ab57b94d1a76 100644 --- a/deps/npm/node_modules/glob/dist/commonjs/ignore.d.ts.map +++ b/deps/npm/node_modules/glob/dist/commonjs/ignore.d.ts.map @@ -1 +1 @@ -{"version":3,"file":"ignore.d.ts","sourceRoot":"","sources":["../../src/ignore.ts"],"names":[],"mappings":"AAKA,OAAO,EAAE,SAAS,EAAE,MAAM,WAAW,CAAA;AACrC,OAAO,EAAE,IAAI,EAAE,MAAM,aAAa,CAAA;AAElC,OAAO,EAAE,cAAc,EAAE,MAAM,aAAa,CAAA;AAE5C,MAAM,WAAW,UAAU;IACzB,OAAO,CAAC,EAAE,CAAC,CAAC,EAAE,IAAI,KAAK,OAAO,CAAA;IAC9B,eAAe,CAAC,EAAE,CAAC,CAAC,EAAE,IAAI,KAAK,OAAO,CAAA;CACvC;AASD;;GAEG;AACH,qBAAa,MAAO,YAAW,UAAU;IACvC,QAAQ,EAAE,SAAS,EAAE,CAAA;IACrB,gBAAgB,EAAE,SAAS,EAAE,CAAA;IAC7B,QAAQ,EAAE,SAAS,EAAE,CAAA;IACrB,gBAAgB,EAAE,SAAS,EAAE,CAAA;gBAG3B,OAAO,EAAE,MAAM,EAAE,EACjB,EACE,OAAO,EACP,MAAM,EACN,KAAK,EACL,UAAU,EACV,QAA0B,GAC3B,EAAE,cAAc;IAsDnB,OAAO,CAAC,CAAC,EAAE,IAAI,GAAG,OAAO;IAczB,eAAe,CAAC,CAAC,EAAE,IAAI,GAAG,OAAO;CAWlC"} \ No newline at end of file +{"version":3,"file":"ignore.d.ts","sourceRoot":"","sources":["../../src/ignore.ts"],"names":[],"mappings":"AAKA,OAAO,EAAE,SAAS,EAAE,MAAM,WAAW,CAAA;AACrC,OAAO,EAAE,IAAI,EAAE,MAAM,aAAa,CAAA;AAElC,OAAO,EAAE,cAAc,EAAE,MAAM,aAAa,CAAA;AAE5C,MAAM,WAAW,UAAU;IACzB,OAAO,CAAC,EAAE,CAAC,CAAC,EAAE,IAAI,KAAK,OAAO,CAAA;IAC9B,eAAe,CAAC,EAAE,CAAC,CAAC,EAAE,IAAI,KAAK,OAAO,CAAA;CACvC;AASD;;GAEG;AACH,qBAAa,MAAO,YAAW,UAAU;IACvC,QAAQ,EAAE,SAAS,EAAE,CAAA;IACrB,gBAAgB,EAAE,SAAS,EAAE,CAAA;IAC7B,QAAQ,EAAE,SAAS,EAAE,CAAA;IACrB,gBAAgB,EAAE,SAAS,EAAE,CAAA;gBAG3B,OAAO,EAAE,MAAM,EAAE,EACjB,EACE,OAAO,EACP,MAAM,EACN,KAAK,EACL,UAAU,EACV,QAA0B,GAC3B,EAAE,cAAc;IA4DnB,OAAO,CAAC,CAAC,EAAE,IAAI,GAAG,OAAO;IAczB,eAAe,CAAC,CAAC,EAAE,IAAI,GAAG,OAAO;CAWlC"} \ No newline at end of file diff --git a/deps/npm/node_modules/glob/dist/commonjs/ignore.js b/deps/npm/node_modules/glob/dist/commonjs/ignore.js index 6cffb49f8ed27c..3c0daeff86ff87 100644 --- a/deps/npm/node_modules/glob/dist/commonjs/ignore.js +++ b/deps/npm/node_modules/glob/dist/commonjs/ignore.js @@ -57,6 +57,12 @@ class Ignore { if (!parsed || !globParts) { throw new Error('invalid pattern object'); } + // strip off leading ./ portions + // https://github.com/isaacs/node-glob/issues/570 + while (parsed[0] === '.' && globParts[0] === '.') { + parsed.shift(); + globParts.shift(); + } /* c8 ignore stop */ const p = new pattern_js_1.Pattern(parsed, globParts, 0, platform); const m = new minimatch_1.Minimatch(p.globString(), mmopts); diff --git a/deps/npm/node_modules/glob/dist/commonjs/ignore.js.map b/deps/npm/node_modules/glob/dist/commonjs/ignore.js.map index 3533cfc8811e90..30a4f731ee6048 100644 --- a/deps/npm/node_modules/glob/dist/commonjs/ignore.js.map +++ b/deps/npm/node_modules/glob/dist/commonjs/ignore.js.map @@ -1 +1 @@ -{"version":3,"file":"ignore.js","sourceRoot":"","sources":["../../src/ignore.ts"],"names":[],"mappings":";AAAA,sDAAsD;AACtD,kCAAkC;AAClC,kEAAkE;AAClE,6CAA6C;;;AAE7C,yCAAqC;AAErC,6CAAsC;AAQtC,MAAM,eAAe,GACnB,OAAO,OAAO,KAAK,QAAQ;IAC3B,OAAO;IACP,OAAO,OAAO,CAAC,QAAQ,KAAK,QAAQ;IAClC,CAAC,CAAC,OAAO,CAAC,QAAQ;IAClB,CAAC,CAAC,OAAO,CAAA;AAEb;;GAEG;AACH,MAAa,MAAM;IACjB,QAAQ,CAAa;IACrB,gBAAgB,CAAa;IAC7B,QAAQ,CAAa;IACrB,gBAAgB,CAAa;IAE7B,YACE,OAAiB,EACjB,EACE,OAAO,EACP,MAAM,EACN,KAAK,EACL,UAAU,EACV,QAAQ,GAAG,eAAe,GACX;QAEjB,IAAI,CAAC,QAAQ,GAAG,EAAE,CAAA;QAClB,IAAI,CAAC,QAAQ,GAAG,EAAE,CAAA;QAClB,IAAI,CAAC,gBAAgB,GAAG,EAAE,CAAA;QAC1B,IAAI,CAAC,gBAAgB,GAAG,EAAE,CAAA;QAC1B,MAAM,MAAM,GAAG;YACb,GAAG,EAAE,IAAI;YACT,OAAO;YACP,MAAM;YACN,KAAK;YACL,UAAU;YACV,iBAAiB,EAAE,CAAC;YACpB,QAAQ;YACR,SAAS,EAAE,IAAI;YACf,QAAQ,EAAE,IAAI;SACf,CAAA;QAED,mEAAmE;QACnE,gEAAgE;QAChE,mEAAmE;QACnE,uCAAuC;QACvC,mEAAmE;QACnE,qEAAqE;QACrE,uBAAuB;QACvB,uEAAuE;QACvE,oEAAoE;QACpE,qBAAqB;QACrB,sEAAsE;QACtE,wCAAwC;QACxC,KAAK,MAAM,GAAG,IAAI,OAAO,EAAE;YACzB,MAAM,EAAE,GAAG,IAAI,qBAAS,CAAC,GAAG,EAAE,MAAM,CAAC,CAAA;YACrC,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,EAAE,CAAC,GAAG,CAAC,MAAM,EAAE,CAAC,EAAE,EAAE;gBACtC,MAAM,MAAM,GAAG,EAAE,CAAC,GAAG,CAAC,CAAC,CAAC,CAAA;gBACxB,MAAM,SAAS,GAAG,EAAE,CAAC,SAAS,CAAC,CAAC,CAAC,CAAA;gBACjC,qBAAqB;gBACrB,IAAI,CAAC,MAAM,IAAI,CAAC,SAAS,EAAE;oBACzB,MAAM,IAAI,KAAK,CAAC,wBAAwB,CAAC,CAAA;iBAC1C;gBACD,oBAAoB;gBACpB,MAAM,CAAC,GAAG,IAAI,oBAAO,CAAC,MAAM,EAAE,SAAS,EAAE,CAAC,EAAE,QAAQ,CAAC,CAAA;gBACrD,MAAM,CAAC,GAAG,IAAI,qBAAS,CAAC,CAAC,CAAC,UAAU,EAAE,EAAE,MAAM,CAAC,CAAA;gBAC/C,MAAM,QAAQ,GAAG,SAAS,CAAC,SAAS,CAAC,MAAM,GAAG,CAAC,CAAC,KAAK,IAAI,CAAA;gBACzD,MAAM,QAAQ,GAAG,CAAC,CAAC,UAAU,EAAE,CAAA;gBAC/B,IAAI,QAAQ;oBAAE,IAAI,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC,CAAC,CAAA;;oBAC9B,IAAI,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC,CAAC,CAAA;gBAC1B,IAAI,QAAQ,EAAE;oBACZ,IAAI,QAAQ;wBAAE,IAAI,CAAC,gBAAgB,CAAC,IAAI,CAAC,CAAC,CAAC,CAAA;;wBACtC,IAAI,CAAC,gBAAgB,CAAC,IAAI,CAAC,CAAC,CAAC,CAAA;iBACnC;aACF;SACF;IACH,CAAC;IAED,OAAO,CAAC,CAAO;QACb,MAAM,QAAQ,GAAG,CAAC,CAAC,QAAQ,EAAE,CAAA;QAC7B,MAAM,SAAS,GAAG,GAAG,QAAQ,GAAG,CAAA;QAChC,MAAM,QAAQ,GAAG,CAAC,CAAC,QAAQ,EAAE,IAAI,GAAG,CAAA;QACpC,MAAM,SAAS,GAAG,GAAG,QAAQ,GAAG,CAAA;QAChC,KAAK,MAAM,CAAC,IAAI,IAAI,CAAC,QAAQ,EAAE;YAC7B,IAAI,CAAC,CAAC,KAAK,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC,KAAK,CAAC,SAAS,CAAC;gBAAE,OAAO,IAAI,CAAA;SACzD;QACD,KAAK,MAAM,CAAC,IAAI,IAAI,CAAC,QAAQ,EAAE;YAC7B,IAAI,CAAC,CAAC,KAAK,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC,KAAK,CAAC,SAAS,CAAC;gBAAE,OAAO,IAAI,CAAA;SACzD;QACD,OAAO,KAAK,CAAA;IACd,CAAC;IAED,eAAe,CAAC,CAAO;QACrB,MAAM,QAAQ,GAAG,CAAC,CAAC,QAAQ,EAAE,GAAG,GAAG,CAAA;QACnC,MAAM,QAAQ,GAAG,CAAC,CAAC,CAAC,QAAQ,EAAE,IAAI,GAAG,CAAC,GAAG,GAAG,CAAA;QAC5C,KAAK,MAAM,CAAC,IAAI,IAAI,CAAC,gBAAgB,EAAE;YACrC,IAAI,CAAC,CAAC,KAAK,CAAC,QAAQ,CAAC;gBAAE,OAAO,IAAI,CAAA;SACnC;QACD,KAAK,MAAM,CAAC,IAAI,IAAI,CAAC,gBAAgB,EAAE;YACrC,IAAI,CAAC,CAAC,KAAK,CAAC,QAAQ,CAAC;gBAAE,OAAO,IAAI,CAAA;SACnC;QACD,OAAO,KAAK,CAAA;IACd,CAAC;CACF;AA7FD,wBA6FC","sourcesContent":["// give it a pattern, and it'll be able to tell you if\n// a given path should be ignored.\n// Ignoring a path ignores its children if the pattern ends in /**\n// Ignores are always parsed in dot:true mode\n\nimport { Minimatch } from 'minimatch'\nimport { Path } from 'path-scurry'\nimport { Pattern } from './pattern.js'\nimport { GlobWalkerOpts } from './walker.js'\n\nexport interface IgnoreLike {\n ignored?: (p: Path) => boolean\n childrenIgnored?: (p: Path) => boolean\n}\n\nconst defaultPlatform: NodeJS.Platform =\n typeof process === 'object' &&\n process &&\n typeof process.platform === 'string'\n ? process.platform\n : 'linux'\n\n/**\n * Class used to process ignored patterns\n */\nexport class Ignore implements IgnoreLike {\n relative: Minimatch[]\n relativeChildren: Minimatch[]\n absolute: Minimatch[]\n absoluteChildren: Minimatch[]\n\n constructor(\n ignored: string[],\n {\n nobrace,\n nocase,\n noext,\n noglobstar,\n platform = defaultPlatform,\n }: GlobWalkerOpts\n ) {\n this.relative = []\n this.absolute = []\n this.relativeChildren = []\n this.absoluteChildren = []\n const mmopts = {\n dot: true,\n nobrace,\n nocase,\n noext,\n noglobstar,\n optimizationLevel: 2,\n platform,\n nocomment: true,\n nonegate: true,\n }\n\n // this is a little weird, but it gives us a clean set of optimized\n // minimatch matchers, without getting tripped up if one of them\n // ends in /** inside a brace section, and it's only inefficient at\n // the start of the walk, not along it.\n // It'd be nice if the Pattern class just had a .test() method, but\n // handling globstars is a bit of a pita, and that code already lives\n // in minimatch anyway.\n // Another way would be if maybe Minimatch could take its set/globParts\n // as an option, and then we could at least just use Pattern to test\n // for absolute-ness.\n // Yet another way, Minimatch could take an array of glob strings, and\n // a cwd option, and do the right thing.\n for (const ign of ignored) {\n const mm = new Minimatch(ign, mmopts)\n for (let i = 0; i < mm.set.length; i++) {\n const parsed = mm.set[i]\n const globParts = mm.globParts[i]\n /* c8 ignore start */\n if (!parsed || !globParts) {\n throw new Error('invalid pattern object')\n }\n /* c8 ignore stop */\n const p = new Pattern(parsed, globParts, 0, platform)\n const m = new Minimatch(p.globString(), mmopts)\n const children = globParts[globParts.length - 1] === '**'\n const absolute = p.isAbsolute()\n if (absolute) this.absolute.push(m)\n else this.relative.push(m)\n if (children) {\n if (absolute) this.absoluteChildren.push(m)\n else this.relativeChildren.push(m)\n }\n }\n }\n }\n\n ignored(p: Path): boolean {\n const fullpath = p.fullpath()\n const fullpaths = `${fullpath}/`\n const relative = p.relative() || '.'\n const relatives = `${relative}/`\n for (const m of this.relative) {\n if (m.match(relative) || m.match(relatives)) return true\n }\n for (const m of this.absolute) {\n if (m.match(fullpath) || m.match(fullpaths)) return true\n }\n return false\n }\n\n childrenIgnored(p: Path): boolean {\n const fullpath = p.fullpath() + '/'\n const relative = (p.relative() || '.') + '/'\n for (const m of this.relativeChildren) {\n if (m.match(relative)) return true\n }\n for (const m of this.absoluteChildren) {\n if (m.match(fullpath)) return true\n }\n return false\n }\n}\n"]} \ No newline at end of file +{"version":3,"file":"ignore.js","sourceRoot":"","sources":["../../src/ignore.ts"],"names":[],"mappings":";AAAA,sDAAsD;AACtD,kCAAkC;AAClC,kEAAkE;AAClE,6CAA6C;;;AAE7C,yCAAqC;AAErC,6CAAsC;AAQtC,MAAM,eAAe,GACnB,OAAO,OAAO,KAAK,QAAQ;IAC3B,OAAO;IACP,OAAO,OAAO,CAAC,QAAQ,KAAK,QAAQ;IAClC,CAAC,CAAC,OAAO,CAAC,QAAQ;IAClB,CAAC,CAAC,OAAO,CAAA;AAEb;;GAEG;AACH,MAAa,MAAM;IACjB,QAAQ,CAAa;IACrB,gBAAgB,CAAa;IAC7B,QAAQ,CAAa;IACrB,gBAAgB,CAAa;IAE7B,YACE,OAAiB,EACjB,EACE,OAAO,EACP,MAAM,EACN,KAAK,EACL,UAAU,EACV,QAAQ,GAAG,eAAe,GACX;QAEjB,IAAI,CAAC,QAAQ,GAAG,EAAE,CAAA;QAClB,IAAI,CAAC,QAAQ,GAAG,EAAE,CAAA;QAClB,IAAI,CAAC,gBAAgB,GAAG,EAAE,CAAA;QAC1B,IAAI,CAAC,gBAAgB,GAAG,EAAE,CAAA;QAC1B,MAAM,MAAM,GAAG;YACb,GAAG,EAAE,IAAI;YACT,OAAO;YACP,MAAM;YACN,KAAK;YACL,UAAU;YACV,iBAAiB,EAAE,CAAC;YACpB,QAAQ;YACR,SAAS,EAAE,IAAI;YACf,QAAQ,EAAE,IAAI;SACf,CAAA;QAED,mEAAmE;QACnE,gEAAgE;QAChE,mEAAmE;QACnE,uCAAuC;QACvC,mEAAmE;QACnE,qEAAqE;QACrE,uBAAuB;QACvB,uEAAuE;QACvE,oEAAoE;QACpE,qBAAqB;QACrB,sEAAsE;QACtE,wCAAwC;QACxC,KAAK,MAAM,GAAG,IAAI,OAAO,EAAE;YACzB,MAAM,EAAE,GAAG,IAAI,qBAAS,CAAC,GAAG,EAAE,MAAM,CAAC,CAAA;YACrC,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,EAAE,CAAC,GAAG,CAAC,MAAM,EAAE,CAAC,EAAE,EAAE;gBACtC,MAAM,MAAM,GAAG,EAAE,CAAC,GAAG,CAAC,CAAC,CAAC,CAAA;gBACxB,MAAM,SAAS,GAAG,EAAE,CAAC,SAAS,CAAC,CAAC,CAAC,CAAA;gBACjC,qBAAqB;gBACrB,IAAI,CAAC,MAAM,IAAI,CAAC,SAAS,EAAE;oBACzB,MAAM,IAAI,KAAK,CAAC,wBAAwB,CAAC,CAAA;iBAC1C;gBACD,gCAAgC;gBAChC,iDAAiD;gBACjD,OAAO,MAAM,CAAC,CAAC,CAAC,KAAK,GAAG,IAAI,SAAS,CAAC,CAAC,CAAC,KAAK,GAAG,EAAE;oBAChD,MAAM,CAAC,KAAK,EAAE,CAAA;oBACd,SAAS,CAAC,KAAK,EAAE,CAAA;iBAClB;gBACD,oBAAoB;gBACpB,MAAM,CAAC,GAAG,IAAI,oBAAO,CAAC,MAAM,EAAE,SAAS,EAAE,CAAC,EAAE,QAAQ,CAAC,CAAA;gBACrD,MAAM,CAAC,GAAG,IAAI,qBAAS,CAAC,CAAC,CAAC,UAAU,EAAE,EAAE,MAAM,CAAC,CAAA;gBAC/C,MAAM,QAAQ,GAAG,SAAS,CAAC,SAAS,CAAC,MAAM,GAAG,CAAC,CAAC,KAAK,IAAI,CAAA;gBACzD,MAAM,QAAQ,GAAG,CAAC,CAAC,UAAU,EAAE,CAAA;gBAC/B,IAAI,QAAQ;oBAAE,IAAI,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC,CAAC,CAAA;;oBAC9B,IAAI,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC,CAAC,CAAA;gBAC1B,IAAI,QAAQ,EAAE;oBACZ,IAAI,QAAQ;wBAAE,IAAI,CAAC,gBAAgB,CAAC,IAAI,CAAC,CAAC,CAAC,CAAA;;wBACtC,IAAI,CAAC,gBAAgB,CAAC,IAAI,CAAC,CAAC,CAAC,CAAA;iBACnC;aACF;SACF;IACH,CAAC;IAED,OAAO,CAAC,CAAO;QACb,MAAM,QAAQ,GAAG,CAAC,CAAC,QAAQ,EAAE,CAAA;QAC7B,MAAM,SAAS,GAAG,GAAG,QAAQ,GAAG,CAAA;QAChC,MAAM,QAAQ,GAAG,CAAC,CAAC,QAAQ,EAAE,IAAI,GAAG,CAAA;QACpC,MAAM,SAAS,GAAG,GAAG,QAAQ,GAAG,CAAA;QAChC,KAAK,MAAM,CAAC,IAAI,IAAI,CAAC,QAAQ,EAAE;YAC7B,IAAI,CAAC,CAAC,KAAK,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC,KAAK,CAAC,SAAS,CAAC;gBAAE,OAAO,IAAI,CAAA;SACzD;QACD,KAAK,MAAM,CAAC,IAAI,IAAI,CAAC,QAAQ,EAAE;YAC7B,IAAI,CAAC,CAAC,KAAK,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC,KAAK,CAAC,SAAS,CAAC;gBAAE,OAAO,IAAI,CAAA;SACzD;QACD,OAAO,KAAK,CAAA;IACd,CAAC;IAED,eAAe,CAAC,CAAO;QACrB,MAAM,QAAQ,GAAG,CAAC,CAAC,QAAQ,EAAE,GAAG,GAAG,CAAA;QACnC,MAAM,QAAQ,GAAG,CAAC,CAAC,CAAC,QAAQ,EAAE,IAAI,GAAG,CAAC,GAAG,GAAG,CAAA;QAC5C,KAAK,MAAM,CAAC,IAAI,IAAI,CAAC,gBAAgB,EAAE;YACrC,IAAI,CAAC,CAAC,KAAK,CAAC,QAAQ,CAAC;gBAAE,OAAO,IAAI,CAAA;SACnC;QACD,KAAK,MAAM,CAAC,IAAI,IAAI,CAAC,gBAAgB,EAAE;YACrC,IAAI,CAAC,CAAC,KAAK,CAAC,QAAQ,CAAC;gBAAE,OAAO,IAAI,CAAA;SACnC;QACD,OAAO,KAAK,CAAA;IACd,CAAC;CACF;AAnGD,wBAmGC","sourcesContent":["// give it a pattern, and it'll be able to tell you if\n// a given path should be ignored.\n// Ignoring a path ignores its children if the pattern ends in /**\n// Ignores are always parsed in dot:true mode\n\nimport { Minimatch } from 'minimatch'\nimport { Path } from 'path-scurry'\nimport { Pattern } from './pattern.js'\nimport { GlobWalkerOpts } from './walker.js'\n\nexport interface IgnoreLike {\n ignored?: (p: Path) => boolean\n childrenIgnored?: (p: Path) => boolean\n}\n\nconst defaultPlatform: NodeJS.Platform =\n typeof process === 'object' &&\n process &&\n typeof process.platform === 'string'\n ? process.platform\n : 'linux'\n\n/**\n * Class used to process ignored patterns\n */\nexport class Ignore implements IgnoreLike {\n relative: Minimatch[]\n relativeChildren: Minimatch[]\n absolute: Minimatch[]\n absoluteChildren: Minimatch[]\n\n constructor(\n ignored: string[],\n {\n nobrace,\n nocase,\n noext,\n noglobstar,\n platform = defaultPlatform,\n }: GlobWalkerOpts\n ) {\n this.relative = []\n this.absolute = []\n this.relativeChildren = []\n this.absoluteChildren = []\n const mmopts = {\n dot: true,\n nobrace,\n nocase,\n noext,\n noglobstar,\n optimizationLevel: 2,\n platform,\n nocomment: true,\n nonegate: true,\n }\n\n // this is a little weird, but it gives us a clean set of optimized\n // minimatch matchers, without getting tripped up if one of them\n // ends in /** inside a brace section, and it's only inefficient at\n // the start of the walk, not along it.\n // It'd be nice if the Pattern class just had a .test() method, but\n // handling globstars is a bit of a pita, and that code already lives\n // in minimatch anyway.\n // Another way would be if maybe Minimatch could take its set/globParts\n // as an option, and then we could at least just use Pattern to test\n // for absolute-ness.\n // Yet another way, Minimatch could take an array of glob strings, and\n // a cwd option, and do the right thing.\n for (const ign of ignored) {\n const mm = new Minimatch(ign, mmopts)\n for (let i = 0; i < mm.set.length; i++) {\n const parsed = mm.set[i]\n const globParts = mm.globParts[i]\n /* c8 ignore start */\n if (!parsed || !globParts) {\n throw new Error('invalid pattern object')\n }\n // strip off leading ./ portions\n // https://github.com/isaacs/node-glob/issues/570\n while (parsed[0] === '.' && globParts[0] === '.') {\n parsed.shift()\n globParts.shift()\n }\n /* c8 ignore stop */\n const p = new Pattern(parsed, globParts, 0, platform)\n const m = new Minimatch(p.globString(), mmopts)\n const children = globParts[globParts.length - 1] === '**'\n const absolute = p.isAbsolute()\n if (absolute) this.absolute.push(m)\n else this.relative.push(m)\n if (children) {\n if (absolute) this.absoluteChildren.push(m)\n else this.relativeChildren.push(m)\n }\n }\n }\n }\n\n ignored(p: Path): boolean {\n const fullpath = p.fullpath()\n const fullpaths = `${fullpath}/`\n const relative = p.relative() || '.'\n const relatives = `${relative}/`\n for (const m of this.relative) {\n if (m.match(relative) || m.match(relatives)) return true\n }\n for (const m of this.absolute) {\n if (m.match(fullpath) || m.match(fullpaths)) return true\n }\n return false\n }\n\n childrenIgnored(p: Path): boolean {\n const fullpath = p.fullpath() + '/'\n const relative = (p.relative() || '.') + '/'\n for (const m of this.relativeChildren) {\n if (m.match(relative)) return true\n }\n for (const m of this.absoluteChildren) {\n if (m.match(fullpath)) return true\n }\n return false\n }\n}\n"]} \ No newline at end of file diff --git a/deps/npm/node_modules/glob/dist/commonjs/package.json b/deps/npm/node_modules/glob/dist/commonjs/package.json index 0292b9956f2e40..5bbefffbabee39 100644 --- a/deps/npm/node_modules/glob/dist/commonjs/package.json +++ b/deps/npm/node_modules/glob/dist/commonjs/package.json @@ -1 +1,3 @@ -{"type":"commonjs"} \ No newline at end of file +{ + "type": "commonjs" +} diff --git a/deps/npm/node_modules/glob/dist/commonjs/walker.d.ts.map b/deps/npm/node_modules/glob/dist/commonjs/walker.d.ts.map index 7c8df20b2f323c..2cae287c2e1f42 100644 --- a/deps/npm/node_modules/glob/dist/commonjs/walker.d.ts.map +++ b/deps/npm/node_modules/glob/dist/commonjs/walker.d.ts.map @@ -1 +1 @@ -{"version":3,"file":"walker.d.ts","sourceRoot":"","sources":["../../src/walker.ts"],"names":[],"mappings":";AAAA;;;;;GAKG;AACH,OAAO,EAAE,QAAQ,EAAE,MAAM,UAAU,CAAA;AACnC,OAAO,EAAE,IAAI,EAAE,MAAM,aAAa,CAAA;AAClC,OAAO,EAAU,UAAU,EAAE,MAAM,aAAa,CAAA;AAOhD,OAAO,EAAE,OAAO,EAAE,MAAM,cAAc,CAAA;AACtC,OAAO,EAAE,SAAS,EAAE,MAAM,gBAAgB,CAAA;AAE1C,MAAM,WAAW,cAAc;IAC7B,QAAQ,CAAC,EAAE,OAAO,CAAA;IAClB,kBAAkB,CAAC,EAAE,OAAO,CAAA;IAC5B,GAAG,CAAC,EAAE,MAAM,GAAG,GAAG,CAAA;IAClB,GAAG,CAAC,EAAE,OAAO,CAAA;IACb,WAAW,CAAC,EAAE,OAAO,CAAA;IACrB,MAAM,CAAC,EAAE,OAAO,CAAA;IAChB,MAAM,CAAC,EAAE,MAAM,GAAG,MAAM,EAAE,GAAG,UAAU,CAAA;IACvC,IAAI,CAAC,EAAE,OAAO,CAAA;IACd,SAAS,CAAC,EAAE,OAAO,CAAA;IAGnB,QAAQ,CAAC,EAAE,MAAM,CAAA;IACjB,OAAO,CAAC,EAAE,OAAO,CAAA;IACjB,MAAM,CAAC,EAAE,OAAO,CAAA;IAChB,KAAK,CAAC,EAAE,OAAO,CAAA;IACf,KAAK,CAAC,EAAE,OAAO,CAAA;IACf,UAAU,CAAC,EAAE,OAAO,CAAA;IACpB,QAAQ,CAAC,EAAE,MAAM,CAAC,QAAQ,CAAA;IAC1B,KAAK,CAAC,EAAE,OAAO,CAAA;IACf,QAAQ,CAAC,EAAE,OAAO,CAAA;IAClB,IAAI,CAAC,EAAE,MAAM,CAAA;IACb,IAAI,CAAC,EAAE,OAAO,CAAA;IACd,MAAM,CAAC,EAAE,WAAW,CAAA;IACpB,oBAAoB,CAAC,EAAE,OAAO,CAAA;IAC9B,aAAa,CAAC,EAAE,OAAO,CAAA;CACxB;AAED,MAAM,MAAM,gBAAgB,GAAG,cAAc,GAAG;IAC9C,aAAa,EAAE,IAAI,CAAA;CACpB,CAAA;AACD,MAAM,MAAM,iBAAiB,GAAG,cAAc,GAAG;IAC/C,aAAa,EAAE,KAAK,CAAA;CACrB,CAAA;AACD,MAAM,MAAM,iBAAiB,GAAG,cAAc,GAAG;IAC/C,aAAa,CAAC,EAAE,SAAS,CAAA;CAC1B,CAAA;AAED,MAAM,MAAM,MAAM,CAAC,CAAC,SAAS,cAAc,IAAI,CAAC,SAAS,gBAAgB,GACrE,IAAI,GACJ,CAAC,SAAS,iBAAiB,GAC3B,MAAM,GACN,CAAC,SAAS,iBAAiB,GAC3B,MAAM,GACN,IAAI,GAAG,MAAM,CAAA;AAEjB,MAAM,MAAM,OAAO,CAAC,CAAC,SAAS,cAAc,IAAI,CAAC,SAAS,gBAAgB,GACtE,GAAG,CAAC,IAAI,CAAC,GACT,CAAC,SAAS,iBAAiB,GAC3B,GAAG,CAAC,MAAM,CAAC,GACX,CAAC,SAAS,iBAAiB,GAC3B,GAAG,CAAC,MAAM,CAAC,GACX,GAAG,CAAC,IAAI,GAAG,MAAM,CAAC,CAAA;AAEtB,MAAM,MAAM,WAAW,CAAC,CAAC,SAAS,cAAc,IAC9C,CAAC,SAAS,gBAAgB,GACtB,QAAQ,CAAC,IAAI,EAAE,IAAI,CAAC,GACpB,CAAC,SAAS,iBAAiB,GAC3B,QAAQ,CAAC,MAAM,EAAE,MAAM,CAAC,GACxB,CAAC,SAAS,iBAAiB,GAC3B,QAAQ,CAAC,MAAM,EAAE,MAAM,CAAC,GACxB,QAAQ,CAAC,IAAI,GAAG,MAAM,EAAE,IAAI,GAAG,MAAM,CAAC,CAAA;AAY5C;;GAEG;AACH,8BAAsB,QAAQ,CAAC,CAAC,SAAS,cAAc,GAAG,cAAc;;IACtE,IAAI,EAAE,IAAI,CAAA;IACV,QAAQ,EAAE,OAAO,EAAE,CAAA;IACnB,IAAI,EAAE,CAAC,CAAA;IACP,IAAI,EAAE,GAAG,CAAC,IAAI,CAAC,CAAkB;IACjC,MAAM,EAAE,OAAO,CAAQ;IACvB,OAAO,EAAE,OAAO,CAAQ;IAIxB,MAAM,CAAC,EAAE,WAAW,CAAA;IACpB,QAAQ,EAAE,MAAM,CAAA;gBAEJ,QAAQ,EAAE,OAAO,EAAE,EAAE,IAAI,EAAE,IAAI,EAAE,IAAI,EAAE,CAAC;IA8BpD,KAAK;IAGL,MAAM;IAUN,QAAQ,CAAC,EAAE,EAAE,MAAM,GAAG;IAahB,UAAU,CAAC,CAAC,EAAE,IAAI,EAAE,KAAK,EAAE,OAAO,GAAG,OAAO,CAAC,IAAI,GAAG,SAAS,CAAC;IAYpE,cAAc,CAAC,CAAC,EAAE,IAAI,GAAG,SAAS,EAAE,KAAK,EAAE,OAAO,GAAG,IAAI,GAAG,SAAS;IAUrE,cAAc,CAAC,CAAC,EAAE,IAAI,EAAE,KAAK,EAAE,OAAO,GAAG,IAAI,GAAG,SAAS;IAYzD,QAAQ,CAAC,SAAS,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,CAAC,GAAG,IAAI;IACtC,QAAQ,CAAC,SAAS,CAAC,CAAC,EAAE,MAAM,GAAG,IAAI,GAAG,IAAI;IAE1C,WAAW,CAAC,CAAC,EAAE,IAAI,EAAE,QAAQ,EAAE,OAAO;IAsBhC,KAAK,CAAC,CAAC,EAAE,IAAI,EAAE,QAAQ,EAAE,OAAO,EAAE,KAAK,EAAE,OAAO,GAAG,OAAO,CAAC,IAAI,CAAC;IAKtE,SAAS,CAAC,CAAC,EAAE,IAAI,EAAE,QAAQ,EAAE,OAAO,EAAE,KAAK,EAAE,OAAO,GAAG,IAAI;IAK3D,MAAM,CAAC,MAAM,EAAE,IAAI,EAAE,QAAQ,EAAE,OAAO,EAAE,EAAE,EAAE,EAAE,MAAM,GAAG;IAOvD,OAAO,CACL,MAAM,EAAE,IAAI,EACZ,QAAQ,EAAE,OAAO,EAAE,EACnB,SAAS,EAAE,SAAS,EACpB,EAAE,EAAE,MAAM,GAAG;IA2Cf,OAAO,CACL,MAAM,EAAE,IAAI,EACZ,OAAO,EAAE,IAAI,EAAE,EACf,SAAS,EAAE,SAAS,EACpB,EAAE,EAAE,MAAM,GAAG;IAsBf,UAAU,CAAC,MAAM,EAAE,IAAI,EAAE,QAAQ,EAAE,OAAO,EAAE,EAAE,EAAE,EAAE,MAAM,GAAG;IAO3D,WAAW,CACT,MAAM,EAAE,IAAI,EACZ,QAAQ,EAAE,OAAO,EAAE,EACnB,SAAS,EAAE,SAAS,EACpB,EAAE,EAAE,MAAM,GAAG;IAqCf,WAAW,CACT,MAAM,EAAE,IAAI,EACZ,OAAO,EAAE,IAAI,EAAE,EACf,SAAS,EAAE,SAAS,EACpB,EAAE,EAAE,MAAM,GAAG;CAoBhB;AAED,qBAAa,UAAU,CACrB,CAAC,SAAS,cAAc,GAAG,cAAc,CACzC,SAAQ,QAAQ,CAAC,CAAC,CAAC;IACnB,OAAO,EAAE,CAAC,SAAS,gBAAgB,GAC/B,GAAG,CAAC,IAAI,CAAC,GACT,CAAC,SAAS,iBAAiB,GAC3B,GAAG,CAAC,MAAM,CAAC,GACX,CAAC,SAAS,iBAAiB,GAC3B,GAAG,CAAC,MAAM,CAAC,GACX,GAAG,CAAC,IAAI,GAAG,MAAM,CAAC,CAAA;gBAEV,QAAQ,EAAE,OAAO,EAAE,EAAE,IAAI,EAAE,IAAI,EAAE,IAAI,EAAE,CAAC;IAKpD,SAAS,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,CAAC,GAAG,IAAI;IAKvB,IAAI,IAAI,OAAO,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC;IAiBjC,QAAQ,IAAI,OAAO,CAAC,CAAC,CAAC;CAWvB;AAED,qBAAa,UAAU,CACrB,CAAC,SAAS,cAAc,GAAG,cAAc,CACzC,SAAQ,QAAQ,CAAC,CAAC,CAAC;IACnB,OAAO,EAAE,CAAC,SAAS,gBAAgB,GAC/B,QAAQ,CAAC,IAAI,EAAE,IAAI,CAAC,GACpB,CAAC,SAAS,iBAAiB,GAC3B,QAAQ,CAAC,MAAM,EAAE,MAAM,CAAC,GACxB,CAAC,SAAS,iBAAiB,GAC3B,QAAQ,CAAC,MAAM,EAAE,MAAM,CAAC,GACxB,QAAQ,CAAC,IAAI,GAAG,MAAM,EAAE,IAAI,GAAG,MAAM,CAAC,CAAA;gBAE9B,QAAQ,EAAE,OAAO,EAAE,EAAE,IAAI,EAAE,IAAI,EAAE,IAAI,EAAE,CAAC;IAUpD,SAAS,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,CAAC,GAAG,IAAI;IAM7B,MAAM,IAAI,WAAW,CAAC,CAAC,CAAC;IAYxB,UAAU,IAAI,WAAW,CAAC,CAAC,CAAC;CAO7B"} \ No newline at end of file +{"version":3,"file":"walker.d.ts","sourceRoot":"","sources":["../../src/walker.ts"],"names":[],"mappings":";AAAA;;;;;GAKG;AACH,OAAO,EAAE,QAAQ,EAAE,MAAM,UAAU,CAAA;AACnC,OAAO,EAAE,IAAI,EAAE,MAAM,aAAa,CAAA;AAClC,OAAO,EAAU,UAAU,EAAE,MAAM,aAAa,CAAA;AAOhD,OAAO,EAAE,OAAO,EAAE,MAAM,cAAc,CAAA;AACtC,OAAO,EAAE,SAAS,EAAE,MAAM,gBAAgB,CAAA;AAE1C,MAAM,WAAW,cAAc;IAC7B,QAAQ,CAAC,EAAE,OAAO,CAAA;IAClB,kBAAkB,CAAC,EAAE,OAAO,CAAA;IAC5B,GAAG,CAAC,EAAE,MAAM,GAAG,GAAG,CAAA;IAClB,GAAG,CAAC,EAAE,OAAO,CAAA;IACb,WAAW,CAAC,EAAE,OAAO,CAAA;IACrB,MAAM,CAAC,EAAE,OAAO,CAAA;IAChB,MAAM,CAAC,EAAE,MAAM,GAAG,MAAM,EAAE,GAAG,UAAU,CAAA;IACvC,IAAI,CAAC,EAAE,OAAO,CAAA;IACd,SAAS,CAAC,EAAE,OAAO,CAAA;IAGnB,QAAQ,CAAC,EAAE,MAAM,CAAA;IACjB,OAAO,CAAC,EAAE,OAAO,CAAA;IACjB,MAAM,CAAC,EAAE,OAAO,CAAA;IAChB,KAAK,CAAC,EAAE,OAAO,CAAA;IACf,KAAK,CAAC,EAAE,OAAO,CAAA;IACf,UAAU,CAAC,EAAE,OAAO,CAAA;IACpB,QAAQ,CAAC,EAAE,MAAM,CAAC,QAAQ,CAAA;IAC1B,KAAK,CAAC,EAAE,OAAO,CAAA;IACf,QAAQ,CAAC,EAAE,OAAO,CAAA;IAClB,IAAI,CAAC,EAAE,MAAM,CAAA;IACb,IAAI,CAAC,EAAE,OAAO,CAAA;IACd,MAAM,CAAC,EAAE,WAAW,CAAA;IACpB,oBAAoB,CAAC,EAAE,OAAO,CAAA;IAC9B,aAAa,CAAC,EAAE,OAAO,CAAA;CACxB;AAED,MAAM,MAAM,gBAAgB,GAAG,cAAc,GAAG;IAC9C,aAAa,EAAE,IAAI,CAAA;CACpB,CAAA;AACD,MAAM,MAAM,iBAAiB,GAAG,cAAc,GAAG;IAC/C,aAAa,EAAE,KAAK,CAAA;CACrB,CAAA;AACD,MAAM,MAAM,iBAAiB,GAAG,cAAc,GAAG;IAC/C,aAAa,CAAC,EAAE,SAAS,CAAA;CAC1B,CAAA;AAED,MAAM,MAAM,MAAM,CAAC,CAAC,SAAS,cAAc,IAAI,CAAC,SAAS,gBAAgB,GACrE,IAAI,GACJ,CAAC,SAAS,iBAAiB,GAC3B,MAAM,GACN,CAAC,SAAS,iBAAiB,GAC3B,MAAM,GACN,IAAI,GAAG,MAAM,CAAA;AAEjB,MAAM,MAAM,OAAO,CAAC,CAAC,SAAS,cAAc,IAAI,CAAC,SAAS,gBAAgB,GACtE,GAAG,CAAC,IAAI,CAAC,GACT,CAAC,SAAS,iBAAiB,GAC3B,GAAG,CAAC,MAAM,CAAC,GACX,CAAC,SAAS,iBAAiB,GAC3B,GAAG,CAAC,MAAM,CAAC,GACX,GAAG,CAAC,IAAI,GAAG,MAAM,CAAC,CAAA;AAEtB,MAAM,MAAM,WAAW,CAAC,CAAC,SAAS,cAAc,IAC9C,CAAC,SAAS,gBAAgB,GACtB,QAAQ,CAAC,IAAI,EAAE,IAAI,CAAC,GACpB,CAAC,SAAS,iBAAiB,GAC3B,QAAQ,CAAC,MAAM,EAAE,MAAM,CAAC,GACxB,CAAC,SAAS,iBAAiB,GAC3B,QAAQ,CAAC,MAAM,EAAE,MAAM,CAAC,GACxB,QAAQ,CAAC,IAAI,GAAG,MAAM,EAAE,IAAI,GAAG,MAAM,CAAC,CAAA;AAY5C;;GAEG;AACH,8BAAsB,QAAQ,CAAC,CAAC,SAAS,cAAc,GAAG,cAAc;;IACtE,IAAI,EAAE,IAAI,CAAA;IACV,QAAQ,EAAE,OAAO,EAAE,CAAA;IACnB,IAAI,EAAE,CAAC,CAAA;IACP,IAAI,EAAE,GAAG,CAAC,IAAI,CAAC,CAAkB;IACjC,MAAM,EAAE,OAAO,CAAQ;IACvB,OAAO,EAAE,OAAO,CAAQ;IAIxB,MAAM,CAAC,EAAE,WAAW,CAAA;IACpB,QAAQ,EAAE,MAAM,CAAA;gBAEJ,QAAQ,EAAE,OAAO,EAAE,EAAE,IAAI,EAAE,IAAI,EAAE,IAAI,EAAE,CAAC;IA8BpD,KAAK;IAGL,MAAM;IAUN,QAAQ,CAAC,EAAE,EAAE,MAAM,GAAG;IAahB,UAAU,CAAC,CAAC,EAAE,IAAI,EAAE,KAAK,EAAE,OAAO,GAAG,OAAO,CAAC,IAAI,GAAG,SAAS,CAAC;IAqBpE,cAAc,CAAC,CAAC,EAAE,IAAI,GAAG,SAAS,EAAE,KAAK,EAAE,OAAO,GAAG,IAAI,GAAG,SAAS;IAcrE,cAAc,CAAC,CAAC,EAAE,IAAI,EAAE,KAAK,EAAE,OAAO,GAAG,IAAI,GAAG,SAAS;IAmBzD,QAAQ,CAAC,SAAS,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,CAAC,GAAG,IAAI;IACtC,QAAQ,CAAC,SAAS,CAAC,CAAC,EAAE,MAAM,GAAG,IAAI,GAAG,IAAI;IAE1C,WAAW,CAAC,CAAC,EAAE,IAAI,EAAE,QAAQ,EAAE,OAAO;IAsBhC,KAAK,CAAC,CAAC,EAAE,IAAI,EAAE,QAAQ,EAAE,OAAO,EAAE,KAAK,EAAE,OAAO,GAAG,OAAO,CAAC,IAAI,CAAC;IAKtE,SAAS,CAAC,CAAC,EAAE,IAAI,EAAE,QAAQ,EAAE,OAAO,EAAE,KAAK,EAAE,OAAO,GAAG,IAAI;IAK3D,MAAM,CAAC,MAAM,EAAE,IAAI,EAAE,QAAQ,EAAE,OAAO,EAAE,EAAE,EAAE,EAAE,MAAM,GAAG;IAOvD,OAAO,CACL,MAAM,EAAE,IAAI,EACZ,QAAQ,EAAE,OAAO,EAAE,EACnB,SAAS,EAAE,SAAS,EACpB,EAAE,EAAE,MAAM,GAAG;IA2Cf,OAAO,CACL,MAAM,EAAE,IAAI,EACZ,OAAO,EAAE,IAAI,EAAE,EACf,SAAS,EAAE,SAAS,EACpB,EAAE,EAAE,MAAM,GAAG;IAsBf,UAAU,CAAC,MAAM,EAAE,IAAI,EAAE,QAAQ,EAAE,OAAO,EAAE,EAAE,EAAE,EAAE,MAAM,GAAG;IAO3D,WAAW,CACT,MAAM,EAAE,IAAI,EACZ,QAAQ,EAAE,OAAO,EAAE,EACnB,SAAS,EAAE,SAAS,EACpB,EAAE,EAAE,MAAM,GAAG;IAqCf,WAAW,CACT,MAAM,EAAE,IAAI,EACZ,OAAO,EAAE,IAAI,EAAE,EACf,SAAS,EAAE,SAAS,EACpB,EAAE,EAAE,MAAM,GAAG;CAoBhB;AAED,qBAAa,UAAU,CACrB,CAAC,SAAS,cAAc,GAAG,cAAc,CACzC,SAAQ,QAAQ,CAAC,CAAC,CAAC;IACnB,OAAO,EAAE,CAAC,SAAS,gBAAgB,GAC/B,GAAG,CAAC,IAAI,CAAC,GACT,CAAC,SAAS,iBAAiB,GAC3B,GAAG,CAAC,MAAM,CAAC,GACX,CAAC,SAAS,iBAAiB,GAC3B,GAAG,CAAC,MAAM,CAAC,GACX,GAAG,CAAC,IAAI,GAAG,MAAM,CAAC,CAAA;gBAEV,QAAQ,EAAE,OAAO,EAAE,EAAE,IAAI,EAAE,IAAI,EAAE,IAAI,EAAE,CAAC;IAKpD,SAAS,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,CAAC,GAAG,IAAI;IAKvB,IAAI,IAAI,OAAO,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC;IAiBjC,QAAQ,IAAI,OAAO,CAAC,CAAC,CAAC;CAWvB;AAED,qBAAa,UAAU,CACrB,CAAC,SAAS,cAAc,GAAG,cAAc,CACzC,SAAQ,QAAQ,CAAC,CAAC,CAAC;IACnB,OAAO,EAAE,CAAC,SAAS,gBAAgB,GAC/B,QAAQ,CAAC,IAAI,EAAE,IAAI,CAAC,GACpB,CAAC,SAAS,iBAAiB,GAC3B,QAAQ,CAAC,MAAM,EAAE,MAAM,CAAC,GACxB,CAAC,SAAS,iBAAiB,GAC3B,QAAQ,CAAC,MAAM,EAAE,MAAM,CAAC,GACxB,QAAQ,CAAC,IAAI,GAAG,MAAM,EAAE,IAAI,GAAG,MAAM,CAAC,CAAA;gBAE9B,QAAQ,EAAE,OAAO,EAAE,EAAE,IAAI,EAAE,IAAI,EAAE,IAAI,EAAE,CAAC;IAUpD,SAAS,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,CAAC,GAAG,IAAI;IAM7B,MAAM,IAAI,WAAW,CAAC,CAAC,CAAC;IAYxB,UAAU,IAAI,WAAW,CAAC,CAAC,CAAC;CAO7B"} \ No newline at end of file diff --git a/deps/npm/node_modules/glob/dist/commonjs/walker.js b/deps/npm/node_modules/glob/dist/commonjs/walker.js index 9651ce1164016c..20cec70fdf95f5 100644 --- a/deps/npm/node_modules/glob/dist/commonjs/walker.js +++ b/deps/npm/node_modules/glob/dist/commonjs/walker.js @@ -96,13 +96,26 @@ class GlobUtil { e = rpc; } const needStat = e.isUnknown() || this.opts.stat; - return this.matchCheckTest(needStat ? await e.lstat() : e, ifDir); + const s = needStat ? await e.lstat() : e; + if (this.opts.follow && this.opts.nodir && s?.isSymbolicLink()) { + const target = await s.realpath(); + /* c8 ignore start */ + if (target && (target.isUnknown() || this.opts.stat)) { + await target.lstat(); + } + /* c8 ignore stop */ + } + return this.matchCheckTest(s, ifDir); } matchCheckTest(e, ifDir) { return e && (this.maxDepth === Infinity || e.depth() <= this.maxDepth) && (!ifDir || e.canReaddir()) && (!this.opts.nodir || !e.isDirectory()) && + (!this.opts.nodir || + !this.opts.follow || + !e.isSymbolicLink() || + !e.realpathCached()?.isDirectory()) && !this.#ignored(e) ? e : undefined; @@ -118,7 +131,14 @@ class GlobUtil { e = rpc; } const needStat = e.isUnknown() || this.opts.stat; - return this.matchCheckTest(needStat ? e.lstatSync() : e, ifDir); + const s = needStat ? e.lstatSync() : e; + if (this.opts.follow && this.opts.nodir && s?.isSymbolicLink()) { + const target = s.realpathSync(); + if (target && (target?.isUnknown() || this.opts.stat)) { + target.lstatSync(); + } + } + return this.matchCheckTest(s, ifDir); } matchFinish(e, absolute) { if (this.#ignored(e)) diff --git a/deps/npm/node_modules/glob/dist/commonjs/walker.js.map b/deps/npm/node_modules/glob/dist/commonjs/walker.js.map index ad1cd5f8379893..5c2a58531858b0 100644 --- a/deps/npm/node_modules/glob/dist/commonjs/walker.js.map +++ b/deps/npm/node_modules/glob/dist/commonjs/walker.js.map @@ -1 +1 @@ -{"version":3,"file":"walker.js","sourceRoot":"","sources":["../../src/walker.ts"],"names":[],"mappings":";;;AAAA;;;;;GAKG;AACH,uCAAmC;AAEnC,2CAAgD;AAQhD,iDAA0C;AAiE1C,MAAM,UAAU,GAAG,CACjB,MAAsC,EACtC,IAAoB,EACR,EAAE,CACd,OAAO,MAAM,KAAK,QAAQ;IACxB,CAAC,CAAC,IAAI,kBAAM,CAAC,CAAC,MAAM,CAAC,EAAE,IAAI,CAAC;IAC5B,CAAC,CAAC,KAAK,CAAC,OAAO,CAAC,MAAM,CAAC;QACvB,CAAC,CAAC,IAAI,kBAAM,CAAC,MAAM,EAAE,IAAI,CAAC;QAC1B,CAAC,CAAC,MAAM,CAAA;AAEZ;;GAEG;AACH,MAAsB,QAAQ;IAC5B,IAAI,CAAM;IACV,QAAQ,CAAW;IACnB,IAAI,CAAG;IACP,IAAI,GAAc,IAAI,GAAG,EAAQ,CAAA;IACjC,MAAM,GAAY,KAAK,CAAA;IACvB,OAAO,GAAY,KAAK,CAAA;IACxB,SAAS,GAAkB,EAAE,CAAA;IAC7B,OAAO,CAAa;IACpB,IAAI,CAAY;IAChB,MAAM,CAAc;IACpB,QAAQ,CAAQ;IAGhB,YAAY,QAAmB,EAAE,IAAU,EAAE,IAAO;QAClD,IAAI,CAAC,QAAQ,GAAG,QAAQ,CAAA;QACxB,IAAI,CAAC,IAAI,GAAG,IAAI,CAAA;QAChB,IAAI,CAAC,IAAI,GAAG,IAAI,CAAA;QAChB,IAAI,CAAC,IAAI,GAAG,CAAC,IAAI,CAAC,KAAK,IAAI,IAAI,CAAC,QAAQ,KAAK,OAAO,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,GAAG,CAAA;QACjE,IAAI,IAAI,CAAC,MAAM,EAAE;YACf,IAAI,CAAC,OAAO,GAAG,UAAU,CAAC,IAAI,CAAC,MAAM,EAAE,IAAI,CAAC,CAAA;SAC7C;QACD,6DAA6D;QAC7D,mBAAmB;QACnB,qBAAqB;QACrB,IAAI,CAAC,QAAQ,GAAG,IAAI,CAAC,QAAQ,IAAI,QAAQ,CAAA;QACzC,oBAAoB;QACpB,IAAI,IAAI,CAAC,MAAM,EAAE;YACf,IAAI,CAAC,MAAM,GAAG,IAAI,CAAC,MAAM,CAAA;YACzB,IAAI,CAAC,MAAM,CAAC,gBAAgB,CAAC,OAAO,EAAE,GAAG,EAAE;gBACzC,IAAI,CAAC,SAAS,CAAC,MAAM,GAAG,CAAC,CAAA;YAC3B,CAAC,CAAC,CAAA;SACH;IACH,CAAC;IAED,QAAQ,CAAC,IAAU;QACjB,OAAO,IAAI,CAAC,IAAI,CAAC,GAAG,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC,IAAI,CAAC,OAAO,EAAE,OAAO,EAAE,CAAC,IAAI,CAAC,CAAA;IAC/D,CAAC;IACD,gBAAgB,CAAC,IAAU;QACzB,OAAO,CAAC,CAAC,IAAI,CAAC,OAAO,EAAE,eAAe,EAAE,CAAC,IAAI,CAAC,CAAA;IAChD,CAAC;IAED,yBAAyB;IACzB,KAAK;QACH,IAAI,CAAC,MAAM,GAAG,IAAI,CAAA;IACpB,CAAC;IACD,MAAM;QACJ,qBAAqB;QACrB,IAAI,IAAI,CAAC,MAAM,EAAE,OAAO;YAAE,OAAM;QAChC,oBAAoB;QACpB,IAAI,CAAC,MAAM,GAAG,KAAK,CAAA;QACnB,IAAI,EAAE,GAA4B,SAAS,CAAA;QAC3C,OAAO,CAAC,IAAI,CAAC,MAAM,IAAI,CAAC,EAAE,GAAG,IAAI,CAAC,SAAS,CAAC,KAAK,EAAE,CAAC,EAAE;YACpD,EAAE,EAAE,CAAA;SACL;IACH,CAAC;IACD,QAAQ,CAAC,EAAa;QACpB,IAAI,IAAI,CAAC,MAAM,EAAE,OAAO;YAAE,OAAM;QAChC,qBAAqB;QACrB,IAAI,CAAC,IAAI,CAAC,MAAM,EAAE;YAChB,EAAE,EAAE,CAAA;SACL;aAAM;YACL,oBAAoB;YACpB,IAAI,CAAC,SAAS,CAAC,IAAI,CAAC,EAAE,CAAC,CAAA;SACxB;IACH,CAAC;IAED,+DAA+D;IAC/D,wCAAwC;IACxC,KAAK,CAAC,UAAU,CAAC,CAAO,EAAE,KAAc;QACtC,IAAI,KAAK,IAAI,IAAI,CAAC,IAAI,CAAC,KAAK;YAAE,OAAO,SAAS,CAAA;QAC9C,IAAI,GAAqB,CAAA;QACzB,IAAI,IAAI,CAAC,IAAI,CAAC,QAAQ,EAAE;YACtB,GAAG,GAAG,CAAC,CAAC,cAAc,EAAE,IAAI,CAAC,MAAM,CAAC,CAAC,QAAQ,EAAE,CAAC,CAAA;YAChD,IAAI,CAAC,GAAG;gBAAE,OAAO,SAAS,CAAA;YAC1B,CAAC,GAAG,GAAG,CAAA;SACR;QACD,MAAM,QAAQ,GAAG,CAAC,CAAC,SAAS,EAAE,IAAI,IAAI,CAAC,IAAI,CAAC,IAAI,CAAA;QAChD,OAAO,IAAI,CAAC,cAAc,CAAC,QAAQ,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,KAAK,EAAE,CAAC,CAAC,CAAC,CAAC,EAAE,KAAK,CAAC,CAAA;IACnE,CAAC;IAED,cAAc,CAAC,CAAmB,EAAE,KAAc;QAChD,OAAO,CAAC;YACN,CAAC,IAAI,CAAC,QAAQ,KAAK,QAAQ,IAAI,CAAC,CAAC,KAAK,EAAE,IAAI,IAAI,CAAC,QAAQ,CAAC;YAC1D,CAAC,CAAC,KAAK,IAAI,CAAC,CAAC,UAAU,EAAE,CAAC;YAC1B,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,KAAK,IAAI,CAAC,CAAC,CAAC,WAAW,EAAE,CAAC;YACtC,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC;YACjB,CAAC,CAAC,CAAC;YACH,CAAC,CAAC,SAAS,CAAA;IACf,CAAC;IAED,cAAc,CAAC,CAAO,EAAE,KAAc;QACpC,IAAI,KAAK,IAAI,IAAI,CAAC,IAAI,CAAC,KAAK;YAAE,OAAO,SAAS,CAAA;QAC9C,IAAI,GAAqB,CAAA;QACzB,IAAI,IAAI,CAAC,IAAI,CAAC,QAAQ,EAAE;YACtB,GAAG,GAAG,CAAC,CAAC,cAAc,EAAE,IAAI,CAAC,CAAC,YAAY,EAAE,CAAA;YAC5C,IAAI,CAAC,GAAG;gBAAE,OAAO,SAAS,CAAA;YAC1B,CAAC,GAAG,GAAG,CAAA;SACR;QACD,MAAM,QAAQ,GAAG,CAAC,CAAC,SAAS,EAAE,IAAI,IAAI,CAAC,IAAI,CAAC,IAAI,CAAA;QAChD,OAAO,IAAI,CAAC,cAAc,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,CAAC,EAAE,KAAK,CAAC,CAAA;IACjE,CAAC;IAKD,WAAW,CAAC,CAAO,EAAE,QAAiB;QACpC,IAAI,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC;YAAE,OAAM;QAC5B,MAAM,GAAG,GACP,IAAI,CAAC,IAAI,CAAC,QAAQ,KAAK,SAAS,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,QAAQ,CAAA;QAClE,IAAI,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,CAAC,CAAA;QAChB,MAAM,IAAI,GAAG,IAAI,CAAC,IAAI,CAAC,IAAI,IAAI,CAAC,CAAC,WAAW,EAAE,CAAC,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC,CAAC,EAAE,CAAA;QAC/D,4BAA4B;QAC5B,IAAI,IAAI,CAAC,IAAI,CAAC,aAAa,EAAE;YAC3B,IAAI,CAAC,SAAS,CAAC,CAAC,CAAC,CAAA;SAClB;aAAM,IAAI,GAAG,EAAE;YACd,MAAM,GAAG,GAAG,IAAI,CAAC,IAAI,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,aAAa,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,QAAQ,EAAE,CAAA;YAC9D,IAAI,CAAC,SAAS,CAAC,GAAG,GAAG,IAAI,CAAC,CAAA;SAC3B;aAAM;YACL,MAAM,GAAG,GAAG,IAAI,CAAC,IAAI,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,aAAa,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,QAAQ,EAAE,CAAA;YAC9D,MAAM,GAAG,GACP,IAAI,CAAC,IAAI,CAAC,WAAW,IAAI,CAAC,GAAG,CAAC,UAAU,CAAC,IAAI,GAAG,IAAI,CAAC,IAAI,CAAC;gBACxD,CAAC,CAAC,GAAG,GAAG,IAAI,CAAC,IAAI;gBACjB,CAAC,CAAC,EAAE,CAAA;YACR,IAAI,CAAC,SAAS,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,GAAG,GAAG,IAAI,CAAC,CAAC,CAAC,GAAG,GAAG,GAAG,GAAG,IAAI,CAAC,CAAA;SACrD;IACH,CAAC;IAED,KAAK,CAAC,KAAK,CAAC,CAAO,EAAE,QAAiB,EAAE,KAAc;QACpD,MAAM,CAAC,GAAG,MAAM,IAAI,CAAC,UAAU,CAAC,CAAC,EAAE,KAAK,CAAC,CAAA;QACzC,IAAI,CAAC;YAAE,IAAI,CAAC,WAAW,CAAC,CAAC,EAAE,QAAQ,CAAC,CAAA;IACtC,CAAC;IAED,SAAS,CAAC,CAAO,EAAE,QAAiB,EAAE,KAAc;QAClD,MAAM,CAAC,GAAG,IAAI,CAAC,cAAc,CAAC,CAAC,EAAE,KAAK,CAAC,CAAA;QACvC,IAAI,CAAC;YAAE,IAAI,CAAC,WAAW,CAAC,CAAC,EAAE,QAAQ,CAAC,CAAA;IACtC,CAAC;IAED,MAAM,CAAC,MAAY,EAAE,QAAmB,EAAE,EAAa;QACrD,qBAAqB;QACrB,IAAI,IAAI,CAAC,MAAM,EAAE,OAAO;YAAE,EAAE,EAAE,CAAA;QAC9B,oBAAoB;QACpB,IAAI,CAAC,OAAO,CAAC,MAAM,EAAE,QAAQ,EAAE,IAAI,wBAAS,CAAC,IAAI,CAAC,IAAI,CAAC,EAAE,EAAE,CAAC,CAAA;IAC9D,CAAC;IAED,OAAO,CACL,MAAY,EACZ,QAAmB,EACnB,SAAoB,EACpB,EAAa;QAEb,IAAI,IAAI,CAAC,gBAAgB,CAAC,MAAM,CAAC;YAAE,OAAO,EAAE,EAAE,CAAA;QAC9C,IAAI,IAAI,CAAC,MAAM,EAAE,OAAO;YAAE,EAAE,EAAE,CAAA;QAC9B,IAAI,IAAI,CAAC,MAAM,EAAE;YACf,IAAI,CAAC,QAAQ,CAAC,GAAG,EAAE,CAAC,IAAI,CAAC,OAAO,CAAC,MAAM,EAAE,QAAQ,EAAE,SAAS,EAAE,EAAE,CAAC,CAAC,CAAA;YAClE,OAAM;SACP;QACD,SAAS,CAAC,eAAe,CAAC,MAAM,EAAE,QAAQ,CAAC,CAAA;QAE3C,qEAAqE;QACrE,4DAA4D;QAC5D,yDAAyD;QACzD,IAAI,KAAK,GAAG,CAAC,CAAA;QACb,MAAM,IAAI,GAAG,GAAG,EAAE;YAChB,IAAI,EAAE,KAAK,KAAK,CAAC;gBAAE,EAAE,EAAE,CAAA;QACzB,CAAC,CAAA;QAED,KAAK,MAAM,CAAC,CAAC,EAAE,QAAQ,EAAE,KAAK,CAAC,IAAI,SAAS,CAAC,OAAO,CAAC,OAAO,EAAE,EAAE;YAC9D,IAAI,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC;gBAAE,SAAQ;YAC9B,KAAK,EAAE,CAAA;YACP,IAAI,CAAC,KAAK,CAAC,CAAC,EAAE,QAAQ,EAAE,KAAK,CAAC,CAAC,IAAI,CAAC,GAAG,EAAE,CAAC,IAAI,EAAE,CAAC,CAAA;SAClD;QAED,KAAK,MAAM,CAAC,IAAI,SAAS,CAAC,cAAc,EAAE,EAAE;YAC1C,IAAI,IAAI,CAAC,QAAQ,KAAK,QAAQ,IAAI,CAAC,CAAC,KAAK,EAAE,IAAI,IAAI,CAAC,QAAQ,EAAE;gBAC5D,SAAQ;aACT;YACD,KAAK,EAAE,CAAA;YACP,MAAM,cAAc,GAAG,CAAC,CAAC,aAAa,EAAE,CAAA;YACxC,IAAI,CAAC,CAAC,aAAa,EAAE;gBACnB,IAAI,CAAC,OAAO,CAAC,CAAC,EAAE,cAAc,EAAE,SAAS,EAAE,IAAI,CAAC,CAAA;iBAC7C;gBACH,CAAC,CAAC,SAAS,CACT,CAAC,CAAC,EAAE,OAAO,EAAE,EAAE,CAAC,IAAI,CAAC,OAAO,CAAC,CAAC,EAAE,OAAO,EAAE,SAAS,EAAE,IAAI,CAAC,EACzD,IAAI,CACL,CAAA;aACF;SACF;QAED,IAAI,EAAE,CAAA;IACR,CAAC;IAED,OAAO,CACL,MAAY,EACZ,OAAe,EACf,SAAoB,EACpB,EAAa;QAEb,SAAS,GAAG,SAAS,CAAC,aAAa,CAAC,MAAM,EAAE,OAAO,CAAC,CAAA;QAEpD,IAAI,KAAK,GAAG,CAAC,CAAA;QACb,MAAM,IAAI,GAAG,GAAG,EAAE;YAChB,IAAI,EAAE,KAAK,KAAK,CAAC;gBAAE,EAAE,EAAE,CAAA;QACzB,CAAC,CAAA;QAED,KAAK,MAAM,CAAC,CAAC,EAAE,QAAQ,EAAE,KAAK,CAAC,IAAI,SAAS,CAAC,OAAO,CAAC,OAAO,EAAE,EAAE;YAC9D,IAAI,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC;gBAAE,SAAQ;YAC9B,KAAK,EAAE,CAAA;YACP,IAAI,CAAC,KAAK,CAAC,CAAC,EAAE,QAAQ,EAAE,KAAK,CAAC,CAAC,IAAI,CAAC,GAAG,EAAE,CAAC,IAAI,EAAE,CAAC,CAAA;SAClD;QACD,KAAK,MAAM,CAAC,MAAM,EAAE,QAAQ,CAAC,IAAI,SAAS,CAAC,QAAQ,CAAC,OAAO,EAAE,EAAE;YAC7D,KAAK,EAAE,CAAA;YACP,IAAI,CAAC,OAAO,CAAC,MAAM,EAAE,QAAQ,EAAE,SAAS,CAAC,KAAK,EAAE,EAAE,IAAI,CAAC,CAAA;SACxD;QAED,IAAI,EAAE,CAAA;IACR,CAAC;IAED,UAAU,CAAC,MAAY,EAAE,QAAmB,EAAE,EAAa;QACzD,qBAAqB;QACrB,IAAI,IAAI,CAAC,MAAM,EAAE,OAAO;YAAE,EAAE,EAAE,CAAA;QAC9B,oBAAoB;QACpB,IAAI,CAAC,WAAW,CAAC,MAAM,EAAE,QAAQ,EAAE,IAAI,wBAAS,CAAC,IAAI,CAAC,IAAI,CAAC,EAAE,EAAE,CAAC,CAAA;IAClE,CAAC;IAED,WAAW,CACT,MAAY,EACZ,QAAmB,EACnB,SAAoB,EACpB,EAAa;QAEb,IAAI,IAAI,CAAC,gBAAgB,CAAC,MAAM,CAAC;YAAE,OAAO,EAAE,EAAE,CAAA;QAC9C,IAAI,IAAI,CAAC,MAAM,EAAE,OAAO;YAAE,EAAE,EAAE,CAAA;QAC9B,IAAI,IAAI,CAAC,MAAM,EAAE;YACf,IAAI,CAAC,QAAQ,CAAC,GAAG,EAAE,CACjB,IAAI,CAAC,WAAW,CAAC,MAAM,EAAE,QAAQ,EAAE,SAAS,EAAE,EAAE,CAAC,CAClD,CAAA;YACD,OAAM;SACP;QACD,SAAS,CAAC,eAAe,CAAC,MAAM,EAAE,QAAQ,CAAC,CAAA;QAE3C,qEAAqE;QACrE,4DAA4D;QAC5D,yDAAyD;QACzD,IAAI,KAAK,GAAG,CAAC,CAAA;QACb,MAAM,IAAI,GAAG,GAAG,EAAE;YAChB,IAAI,EAAE,KAAK,KAAK,CAAC;gBAAE,EAAE,EAAE,CAAA;QACzB,CAAC,CAAA;QAED,KAAK,MAAM,CAAC,CAAC,EAAE,QAAQ,EAAE,KAAK,CAAC,IAAI,SAAS,CAAC,OAAO,CAAC,OAAO,EAAE,EAAE;YAC9D,IAAI,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC;gBAAE,SAAQ;YAC9B,IAAI,CAAC,SAAS,CAAC,CAAC,EAAE,QAAQ,EAAE,KAAK,CAAC,CAAA;SACnC;QAED,KAAK,MAAM,CAAC,IAAI,SAAS,CAAC,cAAc,EAAE,EAAE;YAC1C,IAAI,IAAI,CAAC,QAAQ,KAAK,QAAQ,IAAI,CAAC,CAAC,KAAK,EAAE,IAAI,IAAI,CAAC,QAAQ,EAAE;gBAC5D,SAAQ;aACT;YACD,KAAK,EAAE,CAAA;YACP,MAAM,QAAQ,GAAG,CAAC,CAAC,WAAW,EAAE,CAAA;YAChC,IAAI,CAAC,WAAW,CAAC,CAAC,EAAE,QAAQ,EAAE,SAAS,EAAE,IAAI,CAAC,CAAA;SAC/C;QAED,IAAI,EAAE,CAAA;IACR,CAAC;IAED,WAAW,CACT,MAAY,EACZ,OAAe,EACf,SAAoB,EACpB,EAAa;QAEb,SAAS,GAAG,SAAS,CAAC,aAAa,CAAC,MAAM,EAAE,OAAO,CAAC,CAAA;QAEpD,IAAI,KAAK,GAAG,CAAC,CAAA;QACb,MAAM,IAAI,GAAG,GAAG,EAAE;YAChB,IAAI,EAAE,KAAK,KAAK,CAAC;gBAAE,EAAE,EAAE,CAAA;QACzB,CAAC,CAAA;QAED,KAAK,MAAM,CAAC,CAAC,EAAE,QAAQ,EAAE,KAAK,CAAC,IAAI,SAAS,CAAC,OAAO,CAAC,OAAO,EAAE,EAAE;YAC9D,IAAI,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC;gBAAE,SAAQ;YAC9B,IAAI,CAAC,SAAS,CAAC,CAAC,EAAE,QAAQ,EAAE,KAAK,CAAC,CAAA;SACnC;QACD,KAAK,MAAM,CAAC,MAAM,EAAE,QAAQ,CAAC,IAAI,SAAS,CAAC,QAAQ,CAAC,OAAO,EAAE,EAAE;YAC7D,KAAK,EAAE,CAAA;YACP,IAAI,CAAC,WAAW,CAAC,MAAM,EAAE,QAAQ,EAAE,SAAS,CAAC,KAAK,EAAE,EAAE,IAAI,CAAC,CAAA;SAC5D;QAED,IAAI,EAAE,CAAA;IACR,CAAC;CACF;AAlSD,4BAkSC;AAED,MAAa,UAEX,SAAQ,QAAW;IACnB,OAAO,CAMe;IAEtB,YAAY,QAAmB,EAAE,IAAU,EAAE,IAAO;QAClD,KAAK,CAAC,QAAQ,EAAE,IAAI,EAAE,IAAI,CAAC,CAAA;QAC3B,IAAI,CAAC,OAAO,GAAG,IAAI,GAAG,EAAgB,CAAA;IACxC,CAAC;IAGD,SAAS,CAAC,CAAgB;QACxB,IAAI,CAAC,OAAO,CAAC,GAAG,CAAC,CAAC,CAAC,CAAA;IACrB,CAAC;IAED,KAAK,CAAC,IAAI;QACR,IAAI,IAAI,CAAC,MAAM,EAAE,OAAO;YAAE,MAAM,IAAI,CAAC,MAAM,CAAC,MAAM,CAAA;QAClD,IAAI,IAAI,CAAC,IAAI,CAAC,SAAS,EAAE,EAAE;YACzB,MAAM,IAAI,CAAC,IAAI,CAAC,KAAK,EAAE,CAAA;SACxB;QACD,MAAM,IAAI,OAAO,CAAC,CAAC,GAAG,EAAE,GAAG,EAAE,EAAE;YAC7B,IAAI,CAAC,MAAM,CAAC,IAAI,CAAC,IAAI,EAAE,IAAI,CAAC,QAAQ,EAAE,GAAG,EAAE;gBACzC,IAAI,IAAI,CAAC,MAAM,EAAE,OAAO,EAAE;oBACxB,GAAG,CAAC,IAAI,CAAC,MAAM,CAAC,MAAM,CAAC,CAAA;iBACxB;qBAAM;oBACL,GAAG,CAAC,IAAI,CAAC,OAAO,CAAC,CAAA;iBAClB;YACH,CAAC,CAAC,CAAA;QACJ,CAAC,CAAC,CAAA;QACF,OAAO,IAAI,CAAC,OAAO,CAAA;IACrB,CAAC;IAED,QAAQ;QACN,IAAI,IAAI,CAAC,MAAM,EAAE,OAAO;YAAE,MAAM,IAAI,CAAC,MAAM,CAAC,MAAM,CAAA;QAClD,IAAI,IAAI,CAAC,IAAI,CAAC,SAAS,EAAE,EAAE;YACzB,IAAI,CAAC,IAAI,CAAC,SAAS,EAAE,CAAA;SACtB;QACD,4DAA4D;QAC5D,IAAI,CAAC,UAAU,CAAC,IAAI,CAAC,IAAI,EAAE,IAAI,CAAC,QAAQ,EAAE,GAAG,EAAE;YAC7C,IAAI,IAAI,CAAC,MAAM,EAAE,OAAO;gBAAE,MAAM,IAAI,CAAC,MAAM,CAAC,MAAM,CAAA;QACpD,CAAC,CAAC,CAAA;QACF,OAAO,IAAI,CAAC,OAAO,CAAA;IACrB,CAAC;CACF;AAjDD,gCAiDC;AAED,MAAa,UAEX,SAAQ,QAAW;IACnB,OAAO,CAMmC;IAE1C,YAAY,QAAmB,EAAE,IAAU,EAAE,IAAO;QAClD,KAAK,CAAC,QAAQ,EAAE,IAAI,EAAE,IAAI,CAAC,CAAA;QAC3B,IAAI,CAAC,OAAO,GAAG,IAAI,mBAAQ,CAAC;YAC1B,MAAM,EAAE,IAAI,CAAC,MAAM;YACnB,UAAU,EAAE,IAAI;SACjB,CAAmB,CAAA;QACpB,IAAI,CAAC,OAAO,CAAC,EAAE,CAAC,OAAO,EAAE,GAAG,EAAE,CAAC,IAAI,CAAC,MAAM,EAAE,CAAC,CAAA;QAC7C,IAAI,CAAC,OAAO,CAAC,EAAE,CAAC,QAAQ,EAAE,GAAG,EAAE,CAAC,IAAI,CAAC,MAAM,EAAE,CAAC,CAAA;IAChD,CAAC;IAGD,SAAS,CAAC,CAAgB;QACxB,IAAI,CAAC,OAAO,CAAC,KAAK,CAAC,CAAC,CAAC,CAAA;QACrB,IAAI,CAAC,IAAI,CAAC,OAAO,CAAC,OAAO;YAAE,IAAI,CAAC,KAAK,EAAE,CAAA;IACzC,CAAC;IAED,MAAM;QACJ,MAAM,MAAM,GAAG,IAAI,CAAC,IAAI,CAAA;QACxB,IAAI,MAAM,CAAC,SAAS,EAAE,EAAE;YACtB,MAAM,CAAC,KAAK,EAAE,CAAC,IAAI,CAAC,GAAG,EAAE;gBACvB,IAAI,CAAC,MAAM,CAAC,MAAM,EAAE,IAAI,CAAC,QAAQ,EAAE,GAAG,EAAE,CAAC,IAAI,CAAC,OAAO,CAAC,GAAG,EAAE,CAAC,CAAA;YAC9D,CAAC,CAAC,CAAA;SACH;aAAM;YACL,IAAI,CAAC,MAAM,CAAC,MAAM,EAAE,IAAI,CAAC,QAAQ,EAAE,GAAG,EAAE,CAAC,IAAI,CAAC,OAAO,CAAC,GAAG,EAAE,CAAC,CAAA;SAC7D;QACD,OAAO,IAAI,CAAC,OAAO,CAAA;IACrB,CAAC;IAED,UAAU;QACR,IAAI,IAAI,CAAC,IAAI,CAAC,SAAS,EAAE,EAAE;YACzB,IAAI,CAAC,IAAI,CAAC,SAAS,EAAE,CAAA;SACtB;QACD,IAAI,CAAC,UAAU,CAAC,IAAI,CAAC,IAAI,EAAE,IAAI,CAAC,QAAQ,EAAE,GAAG,EAAE,CAAC,IAAI,CAAC,OAAO,CAAC,GAAG,EAAE,CAAC,CAAA;QACnE,OAAO,IAAI,CAAC,OAAO,CAAA;IACrB,CAAC;CACF;AA9CD,gCA8CC","sourcesContent":["/**\n * Single-use utility classes to provide functionality to the {@link Glob}\n * methods.\n *\n * @module\n */\nimport { Minipass } from 'minipass'\nimport { Path } from 'path-scurry'\nimport { Ignore, IgnoreLike } from './ignore.js'\n\n// XXX can we somehow make it so that it NEVER processes a given path more than\n// once, enough that the match set tracking is no longer needed? that'd speed\n// things up a lot. Or maybe bring back nounique, and skip it in that case?\n\n// a single minimatch set entry with 1 or more parts\nimport { Pattern } from './pattern.js'\nimport { Processor } from './processor.js'\n\nexport interface GlobWalkerOpts {\n absolute?: boolean\n allowWindowsEscape?: boolean\n cwd?: string | URL\n dot?: boolean\n dotRelative?: boolean\n follow?: boolean\n ignore?: string | string[] | IgnoreLike\n mark?: boolean\n matchBase?: boolean\n // Note: maxDepth here means \"maximum actual Path.depth()\",\n // not \"maximum depth beyond cwd\"\n maxDepth?: number\n nobrace?: boolean\n nocase?: boolean\n nodir?: boolean\n noext?: boolean\n noglobstar?: boolean\n platform?: NodeJS.Platform\n posix?: boolean\n realpath?: boolean\n root?: string\n stat?: boolean\n signal?: AbortSignal\n windowsPathsNoEscape?: boolean\n withFileTypes?: boolean\n}\n\nexport type GWOFileTypesTrue = GlobWalkerOpts & {\n withFileTypes: true\n}\nexport type GWOFileTypesFalse = GlobWalkerOpts & {\n withFileTypes: false\n}\nexport type GWOFileTypesUnset = GlobWalkerOpts & {\n withFileTypes?: undefined\n}\n\nexport type Result = O extends GWOFileTypesTrue\n ? Path\n : O extends GWOFileTypesFalse\n ? string\n : O extends GWOFileTypesUnset\n ? string\n : Path | string\n\nexport type Matches = O extends GWOFileTypesTrue\n ? Set\n : O extends GWOFileTypesFalse\n ? Set\n : O extends GWOFileTypesUnset\n ? Set\n : Set\n\nexport type MatchStream =\n O extends GWOFileTypesTrue\n ? Minipass\n : O extends GWOFileTypesFalse\n ? Minipass\n : O extends GWOFileTypesUnset\n ? Minipass\n : Minipass\n\nconst makeIgnore = (\n ignore: string | string[] | IgnoreLike,\n opts: GlobWalkerOpts\n): IgnoreLike =>\n typeof ignore === 'string'\n ? new Ignore([ignore], opts)\n : Array.isArray(ignore)\n ? new Ignore(ignore, opts)\n : ignore\n\n/**\n * basic walking utilities that all the glob walker types use\n */\nexport abstract class GlobUtil {\n path: Path\n patterns: Pattern[]\n opts: O\n seen: Set = new Set()\n paused: boolean = false\n aborted: boolean = false\n #onResume: (() => any)[] = []\n #ignore?: IgnoreLike\n #sep: '\\\\' | '/'\n signal?: AbortSignal\n maxDepth: number\n\n constructor(patterns: Pattern[], path: Path, opts: O)\n constructor(patterns: Pattern[], path: Path, opts: O) {\n this.patterns = patterns\n this.path = path\n this.opts = opts\n this.#sep = !opts.posix && opts.platform === 'win32' ? '\\\\' : '/'\n if (opts.ignore) {\n this.#ignore = makeIgnore(opts.ignore, opts)\n }\n // ignore, always set with maxDepth, but it's optional on the\n // GlobOptions type\n /* c8 ignore start */\n this.maxDepth = opts.maxDepth || Infinity\n /* c8 ignore stop */\n if (opts.signal) {\n this.signal = opts.signal\n this.signal.addEventListener('abort', () => {\n this.#onResume.length = 0\n })\n }\n }\n\n #ignored(path: Path): boolean {\n return this.seen.has(path) || !!this.#ignore?.ignored?.(path)\n }\n #childrenIgnored(path: Path): boolean {\n return !!this.#ignore?.childrenIgnored?.(path)\n }\n\n // backpressure mechanism\n pause() {\n this.paused = true\n }\n resume() {\n /* c8 ignore start */\n if (this.signal?.aborted) return\n /* c8 ignore stop */\n this.paused = false\n let fn: (() => any) | undefined = undefined\n while (!this.paused && (fn = this.#onResume.shift())) {\n fn()\n }\n }\n onResume(fn: () => any) {\n if (this.signal?.aborted) return\n /* c8 ignore start */\n if (!this.paused) {\n fn()\n } else {\n /* c8 ignore stop */\n this.#onResume.push(fn)\n }\n }\n\n // do the requisite realpath/stat checking, and return the path\n // to add or undefined to filter it out.\n async matchCheck(e: Path, ifDir: boolean): Promise {\n if (ifDir && this.opts.nodir) return undefined\n let rpc: Path | undefined\n if (this.opts.realpath) {\n rpc = e.realpathCached() || (await e.realpath())\n if (!rpc) return undefined\n e = rpc\n }\n const needStat = e.isUnknown() || this.opts.stat\n return this.matchCheckTest(needStat ? await e.lstat() : e, ifDir)\n }\n\n matchCheckTest(e: Path | undefined, ifDir: boolean): Path | undefined {\n return e &&\n (this.maxDepth === Infinity || e.depth() <= this.maxDepth) &&\n (!ifDir || e.canReaddir()) &&\n (!this.opts.nodir || !e.isDirectory()) &&\n !this.#ignored(e)\n ? e\n : undefined\n }\n\n matchCheckSync(e: Path, ifDir: boolean): Path | undefined {\n if (ifDir && this.opts.nodir) return undefined\n let rpc: Path | undefined\n if (this.opts.realpath) {\n rpc = e.realpathCached() || e.realpathSync()\n if (!rpc) return undefined\n e = rpc\n }\n const needStat = e.isUnknown() || this.opts.stat\n return this.matchCheckTest(needStat ? e.lstatSync() : e, ifDir)\n }\n\n abstract matchEmit(p: Result): void\n abstract matchEmit(p: string | Path): void\n\n matchFinish(e: Path, absolute: boolean) {\n if (this.#ignored(e)) return\n const abs =\n this.opts.absolute === undefined ? absolute : this.opts.absolute\n this.seen.add(e)\n const mark = this.opts.mark && e.isDirectory() ? this.#sep : ''\n // ok, we have what we need!\n if (this.opts.withFileTypes) {\n this.matchEmit(e)\n } else if (abs) {\n const abs = this.opts.posix ? e.fullpathPosix() : e.fullpath()\n this.matchEmit(abs + mark)\n } else {\n const rel = this.opts.posix ? e.relativePosix() : e.relative()\n const pre =\n this.opts.dotRelative && !rel.startsWith('..' + this.#sep)\n ? '.' + this.#sep\n : ''\n this.matchEmit(!rel ? '.' + mark : pre + rel + mark)\n }\n }\n\n async match(e: Path, absolute: boolean, ifDir: boolean): Promise {\n const p = await this.matchCheck(e, ifDir)\n if (p) this.matchFinish(p, absolute)\n }\n\n matchSync(e: Path, absolute: boolean, ifDir: boolean): void {\n const p = this.matchCheckSync(e, ifDir)\n if (p) this.matchFinish(p, absolute)\n }\n\n walkCB(target: Path, patterns: Pattern[], cb: () => any) {\n /* c8 ignore start */\n if (this.signal?.aborted) cb()\n /* c8 ignore stop */\n this.walkCB2(target, patterns, new Processor(this.opts), cb)\n }\n\n walkCB2(\n target: Path,\n patterns: Pattern[],\n processor: Processor,\n cb: () => any\n ) {\n if (this.#childrenIgnored(target)) return cb()\n if (this.signal?.aborted) cb()\n if (this.paused) {\n this.onResume(() => this.walkCB2(target, patterns, processor, cb))\n return\n }\n processor.processPatterns(target, patterns)\n\n // done processing. all of the above is sync, can be abstracted out.\n // subwalks is a map of paths to the entry filters they need\n // matches is a map of paths to [absolute, ifDir] tuples.\n let tasks = 1\n const next = () => {\n if (--tasks === 0) cb()\n }\n\n for (const [m, absolute, ifDir] of processor.matches.entries()) {\n if (this.#ignored(m)) continue\n tasks++\n this.match(m, absolute, ifDir).then(() => next())\n }\n\n for (const t of processor.subwalkTargets()) {\n if (this.maxDepth !== Infinity && t.depth() >= this.maxDepth) {\n continue\n }\n tasks++\n const childrenCached = t.readdirCached()\n if (t.calledReaddir())\n this.walkCB3(t, childrenCached, processor, next)\n else {\n t.readdirCB(\n (_, entries) => this.walkCB3(t, entries, processor, next),\n true\n )\n }\n }\n\n next()\n }\n\n walkCB3(\n target: Path,\n entries: Path[],\n processor: Processor,\n cb: () => any\n ) {\n processor = processor.filterEntries(target, entries)\n\n let tasks = 1\n const next = () => {\n if (--tasks === 0) cb()\n }\n\n for (const [m, absolute, ifDir] of processor.matches.entries()) {\n if (this.#ignored(m)) continue\n tasks++\n this.match(m, absolute, ifDir).then(() => next())\n }\n for (const [target, patterns] of processor.subwalks.entries()) {\n tasks++\n this.walkCB2(target, patterns, processor.child(), next)\n }\n\n next()\n }\n\n walkCBSync(target: Path, patterns: Pattern[], cb: () => any) {\n /* c8 ignore start */\n if (this.signal?.aborted) cb()\n /* c8 ignore stop */\n this.walkCB2Sync(target, patterns, new Processor(this.opts), cb)\n }\n\n walkCB2Sync(\n target: Path,\n patterns: Pattern[],\n processor: Processor,\n cb: () => any\n ) {\n if (this.#childrenIgnored(target)) return cb()\n if (this.signal?.aborted) cb()\n if (this.paused) {\n this.onResume(() =>\n this.walkCB2Sync(target, patterns, processor, cb)\n )\n return\n }\n processor.processPatterns(target, patterns)\n\n // done processing. all of the above is sync, can be abstracted out.\n // subwalks is a map of paths to the entry filters they need\n // matches is a map of paths to [absolute, ifDir] tuples.\n let tasks = 1\n const next = () => {\n if (--tasks === 0) cb()\n }\n\n for (const [m, absolute, ifDir] of processor.matches.entries()) {\n if (this.#ignored(m)) continue\n this.matchSync(m, absolute, ifDir)\n }\n\n for (const t of processor.subwalkTargets()) {\n if (this.maxDepth !== Infinity && t.depth() >= this.maxDepth) {\n continue\n }\n tasks++\n const children = t.readdirSync()\n this.walkCB3Sync(t, children, processor, next)\n }\n\n next()\n }\n\n walkCB3Sync(\n target: Path,\n entries: Path[],\n processor: Processor,\n cb: () => any\n ) {\n processor = processor.filterEntries(target, entries)\n\n let tasks = 1\n const next = () => {\n if (--tasks === 0) cb()\n }\n\n for (const [m, absolute, ifDir] of processor.matches.entries()) {\n if (this.#ignored(m)) continue\n this.matchSync(m, absolute, ifDir)\n }\n for (const [target, patterns] of processor.subwalks.entries()) {\n tasks++\n this.walkCB2Sync(target, patterns, processor.child(), next)\n }\n\n next()\n }\n}\n\nexport class GlobWalker<\n O extends GlobWalkerOpts = GlobWalkerOpts\n> extends GlobUtil {\n matches: O extends GWOFileTypesTrue\n ? Set\n : O extends GWOFileTypesFalse\n ? Set\n : O extends GWOFileTypesUnset\n ? Set\n : Set\n\n constructor(patterns: Pattern[], path: Path, opts: O) {\n super(patterns, path, opts)\n this.matches = new Set() as Matches\n }\n\n matchEmit(e: Result): void\n matchEmit(e: Path | string): void {\n this.matches.add(e)\n }\n\n async walk(): Promise> {\n if (this.signal?.aborted) throw this.signal.reason\n if (this.path.isUnknown()) {\n await this.path.lstat()\n }\n await new Promise((res, rej) => {\n this.walkCB(this.path, this.patterns, () => {\n if (this.signal?.aborted) {\n rej(this.signal.reason)\n } else {\n res(this.matches)\n }\n })\n })\n return this.matches\n }\n\n walkSync(): Matches {\n if (this.signal?.aborted) throw this.signal.reason\n if (this.path.isUnknown()) {\n this.path.lstatSync()\n }\n // nothing for the callback to do, because this never pauses\n this.walkCBSync(this.path, this.patterns, () => {\n if (this.signal?.aborted) throw this.signal.reason\n })\n return this.matches\n }\n}\n\nexport class GlobStream<\n O extends GlobWalkerOpts = GlobWalkerOpts\n> extends GlobUtil {\n results: O extends GWOFileTypesTrue\n ? Minipass\n : O extends GWOFileTypesFalse\n ? Minipass\n : O extends GWOFileTypesUnset\n ? Minipass\n : Minipass\n\n constructor(patterns: Pattern[], path: Path, opts: O) {\n super(patterns, path, opts)\n this.results = new Minipass({\n signal: this.signal,\n objectMode: true,\n }) as MatchStream\n this.results.on('drain', () => this.resume())\n this.results.on('resume', () => this.resume())\n }\n\n matchEmit(e: Result): void\n matchEmit(e: Path | string): void {\n this.results.write(e)\n if (!this.results.flowing) this.pause()\n }\n\n stream(): MatchStream {\n const target = this.path\n if (target.isUnknown()) {\n target.lstat().then(() => {\n this.walkCB(target, this.patterns, () => this.results.end())\n })\n } else {\n this.walkCB(target, this.patterns, () => this.results.end())\n }\n return this.results\n }\n\n streamSync(): MatchStream {\n if (this.path.isUnknown()) {\n this.path.lstatSync()\n }\n this.walkCBSync(this.path, this.patterns, () => this.results.end())\n return this.results\n }\n}\n"]} \ No newline at end of file +{"version":3,"file":"walker.js","sourceRoot":"","sources":["../../src/walker.ts"],"names":[],"mappings":";;;AAAA;;;;;GAKG;AACH,uCAAmC;AAEnC,2CAAgD;AAQhD,iDAA0C;AAiE1C,MAAM,UAAU,GAAG,CACjB,MAAsC,EACtC,IAAoB,EACR,EAAE,CACd,OAAO,MAAM,KAAK,QAAQ;IACxB,CAAC,CAAC,IAAI,kBAAM,CAAC,CAAC,MAAM,CAAC,EAAE,IAAI,CAAC;IAC5B,CAAC,CAAC,KAAK,CAAC,OAAO,CAAC,MAAM,CAAC;QACvB,CAAC,CAAC,IAAI,kBAAM,CAAC,MAAM,EAAE,IAAI,CAAC;QAC1B,CAAC,CAAC,MAAM,CAAA;AAEZ;;GAEG;AACH,MAAsB,QAAQ;IAC5B,IAAI,CAAM;IACV,QAAQ,CAAW;IACnB,IAAI,CAAG;IACP,IAAI,GAAc,IAAI,GAAG,EAAQ,CAAA;IACjC,MAAM,GAAY,KAAK,CAAA;IACvB,OAAO,GAAY,KAAK,CAAA;IACxB,SAAS,GAAkB,EAAE,CAAA;IAC7B,OAAO,CAAa;IACpB,IAAI,CAAY;IAChB,MAAM,CAAc;IACpB,QAAQ,CAAQ;IAGhB,YAAY,QAAmB,EAAE,IAAU,EAAE,IAAO;QAClD,IAAI,CAAC,QAAQ,GAAG,QAAQ,CAAA;QACxB,IAAI,CAAC,IAAI,GAAG,IAAI,CAAA;QAChB,IAAI,CAAC,IAAI,GAAG,IAAI,CAAA;QAChB,IAAI,CAAC,IAAI,GAAG,CAAC,IAAI,CAAC,KAAK,IAAI,IAAI,CAAC,QAAQ,KAAK,OAAO,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,GAAG,CAAA;QACjE,IAAI,IAAI,CAAC,MAAM,EAAE;YACf,IAAI,CAAC,OAAO,GAAG,UAAU,CAAC,IAAI,CAAC,MAAM,EAAE,IAAI,CAAC,CAAA;SAC7C;QACD,6DAA6D;QAC7D,mBAAmB;QACnB,qBAAqB;QACrB,IAAI,CAAC,QAAQ,GAAG,IAAI,CAAC,QAAQ,IAAI,QAAQ,CAAA;QACzC,oBAAoB;QACpB,IAAI,IAAI,CAAC,MAAM,EAAE;YACf,IAAI,CAAC,MAAM,GAAG,IAAI,CAAC,MAAM,CAAA;YACzB,IAAI,CAAC,MAAM,CAAC,gBAAgB,CAAC,OAAO,EAAE,GAAG,EAAE;gBACzC,IAAI,CAAC,SAAS,CAAC,MAAM,GAAG,CAAC,CAAA;YAC3B,CAAC,CAAC,CAAA;SACH;IACH,CAAC;IAED,QAAQ,CAAC,IAAU;QACjB,OAAO,IAAI,CAAC,IAAI,CAAC,GAAG,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC,IAAI,CAAC,OAAO,EAAE,OAAO,EAAE,CAAC,IAAI,CAAC,CAAA;IAC/D,CAAC;IACD,gBAAgB,CAAC,IAAU;QACzB,OAAO,CAAC,CAAC,IAAI,CAAC,OAAO,EAAE,eAAe,EAAE,CAAC,IAAI,CAAC,CAAA;IAChD,CAAC;IAED,yBAAyB;IACzB,KAAK;QACH,IAAI,CAAC,MAAM,GAAG,IAAI,CAAA;IACpB,CAAC;IACD,MAAM;QACJ,qBAAqB;QACrB,IAAI,IAAI,CAAC,MAAM,EAAE,OAAO;YAAE,OAAM;QAChC,oBAAoB;QACpB,IAAI,CAAC,MAAM,GAAG,KAAK,CAAA;QACnB,IAAI,EAAE,GAA4B,SAAS,CAAA;QAC3C,OAAO,CAAC,IAAI,CAAC,MAAM,IAAI,CAAC,EAAE,GAAG,IAAI,CAAC,SAAS,CAAC,KAAK,EAAE,CAAC,EAAE;YACpD,EAAE,EAAE,CAAA;SACL;IACH,CAAC;IACD,QAAQ,CAAC,EAAa;QACpB,IAAI,IAAI,CAAC,MAAM,EAAE,OAAO;YAAE,OAAM;QAChC,qBAAqB;QACrB,IAAI,CAAC,IAAI,CAAC,MAAM,EAAE;YAChB,EAAE,EAAE,CAAA;SACL;aAAM;YACL,oBAAoB;YACpB,IAAI,CAAC,SAAS,CAAC,IAAI,CAAC,EAAE,CAAC,CAAA;SACxB;IACH,CAAC;IAED,+DAA+D;IAC/D,wCAAwC;IACxC,KAAK,CAAC,UAAU,CAAC,CAAO,EAAE,KAAc;QACtC,IAAI,KAAK,IAAI,IAAI,CAAC,IAAI,CAAC,KAAK;YAAE,OAAO,SAAS,CAAA;QAC9C,IAAI,GAAqB,CAAA;QACzB,IAAI,IAAI,CAAC,IAAI,CAAC,QAAQ,EAAE;YACtB,GAAG,GAAG,CAAC,CAAC,cAAc,EAAE,IAAI,CAAC,MAAM,CAAC,CAAC,QAAQ,EAAE,CAAC,CAAA;YAChD,IAAI,CAAC,GAAG;gBAAE,OAAO,SAAS,CAAA;YAC1B,CAAC,GAAG,GAAG,CAAA;SACR;QACD,MAAM,QAAQ,GAAG,CAAC,CAAC,SAAS,EAAE,IAAI,IAAI,CAAC,IAAI,CAAC,IAAI,CAAA;QAChD,MAAM,CAAC,GAAG,QAAQ,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,KAAK,EAAE,CAAC,CAAC,CAAC,CAAC,CAAA;QACxC,IAAI,IAAI,CAAC,IAAI,CAAC,MAAM,IAAI,IAAI,CAAC,IAAI,CAAC,KAAK,IAAI,CAAC,EAAE,cAAc,EAAE,EAAE;YAC9D,MAAM,MAAM,GAAG,MAAM,CAAC,CAAC,QAAQ,EAAE,CAAA;YACjC,qBAAqB;YACrB,IAAI,MAAM,IAAI,CAAC,MAAM,CAAC,SAAS,EAAE,IAAI,IAAI,CAAC,IAAI,CAAC,IAAI,CAAC,EAAE;gBACpD,MAAM,MAAM,CAAC,KAAK,EAAE,CAAA;aACrB;YACD,oBAAoB;SACrB;QACD,OAAO,IAAI,CAAC,cAAc,CAAC,CAAC,EAAE,KAAK,CAAC,CAAA;IACtC,CAAC;IAED,cAAc,CAAC,CAAmB,EAAE,KAAc;QAChD,OAAO,CAAC;YACN,CAAC,IAAI,CAAC,QAAQ,KAAK,QAAQ,IAAI,CAAC,CAAC,KAAK,EAAE,IAAI,IAAI,CAAC,QAAQ,CAAC;YAC1D,CAAC,CAAC,KAAK,IAAI,CAAC,CAAC,UAAU,EAAE,CAAC;YAC1B,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,KAAK,IAAI,CAAC,CAAC,CAAC,WAAW,EAAE,CAAC;YACtC,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,KAAK;gBACf,CAAC,IAAI,CAAC,IAAI,CAAC,MAAM;gBACjB,CAAC,CAAC,CAAC,cAAc,EAAE;gBACnB,CAAC,CAAC,CAAC,cAAc,EAAE,EAAE,WAAW,EAAE,CAAC;YACrC,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC;YACjB,CAAC,CAAC,CAAC;YACH,CAAC,CAAC,SAAS,CAAA;IACf,CAAC;IAED,cAAc,CAAC,CAAO,EAAE,KAAc;QACpC,IAAI,KAAK,IAAI,IAAI,CAAC,IAAI,CAAC,KAAK;YAAE,OAAO,SAAS,CAAA;QAC9C,IAAI,GAAqB,CAAA;QACzB,IAAI,IAAI,CAAC,IAAI,CAAC,QAAQ,EAAE;YACtB,GAAG,GAAG,CAAC,CAAC,cAAc,EAAE,IAAI,CAAC,CAAC,YAAY,EAAE,CAAA;YAC5C,IAAI,CAAC,GAAG;gBAAE,OAAO,SAAS,CAAA;YAC1B,CAAC,GAAG,GAAG,CAAA;SACR;QACD,MAAM,QAAQ,GAAG,CAAC,CAAC,SAAS,EAAE,IAAI,IAAI,CAAC,IAAI,CAAC,IAAI,CAAA;QAChD,MAAM,CAAC,GAAG,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,CAAC,CAAA;QACtC,IAAI,IAAI,CAAC,IAAI,CAAC,MAAM,IAAI,IAAI,CAAC,IAAI,CAAC,KAAK,IAAI,CAAC,EAAE,cAAc,EAAE,EAAE;YAC9D,MAAM,MAAM,GAAG,CAAC,CAAC,YAAY,EAAE,CAAA;YAC/B,IAAI,MAAM,IAAI,CAAC,MAAM,EAAE,SAAS,EAAE,IAAI,IAAI,CAAC,IAAI,CAAC,IAAI,CAAC,EAAE;gBACrD,MAAM,CAAC,SAAS,EAAE,CAAA;aACnB;SACF;QACD,OAAO,IAAI,CAAC,cAAc,CAAC,CAAC,EAAE,KAAK,CAAC,CAAA;IACtC,CAAC;IAKD,WAAW,CAAC,CAAO,EAAE,QAAiB;QACpC,IAAI,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC;YAAE,OAAM;QAC5B,MAAM,GAAG,GACP,IAAI,CAAC,IAAI,CAAC,QAAQ,KAAK,SAAS,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,QAAQ,CAAA;QAClE,IAAI,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,CAAC,CAAA;QAChB,MAAM,IAAI,GAAG,IAAI,CAAC,IAAI,CAAC,IAAI,IAAI,CAAC,CAAC,WAAW,EAAE,CAAC,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC,CAAC,EAAE,CAAA;QAC/D,4BAA4B;QAC5B,IAAI,IAAI,CAAC,IAAI,CAAC,aAAa,EAAE;YAC3B,IAAI,CAAC,SAAS,CAAC,CAAC,CAAC,CAAA;SAClB;aAAM,IAAI,GAAG,EAAE;YACd,MAAM,GAAG,GAAG,IAAI,CAAC,IAAI,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,aAAa,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,QAAQ,EAAE,CAAA;YAC9D,IAAI,CAAC,SAAS,CAAC,GAAG,GAAG,IAAI,CAAC,CAAA;SAC3B;aAAM;YACL,MAAM,GAAG,GAAG,IAAI,CAAC,IAAI,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,aAAa,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,QAAQ,EAAE,CAAA;YAC9D,MAAM,GAAG,GACP,IAAI,CAAC,IAAI,CAAC,WAAW,IAAI,CAAC,GAAG,CAAC,UAAU,CAAC,IAAI,GAAG,IAAI,CAAC,IAAI,CAAC;gBACxD,CAAC,CAAC,GAAG,GAAG,IAAI,CAAC,IAAI;gBACjB,CAAC,CAAC,EAAE,CAAA;YACR,IAAI,CAAC,SAAS,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,GAAG,GAAG,IAAI,CAAC,CAAC,CAAC,GAAG,GAAG,GAAG,GAAG,IAAI,CAAC,CAAA;SACrD;IACH,CAAC;IAED,KAAK,CAAC,KAAK,CAAC,CAAO,EAAE,QAAiB,EAAE,KAAc;QACpD,MAAM,CAAC,GAAG,MAAM,IAAI,CAAC,UAAU,CAAC,CAAC,EAAE,KAAK,CAAC,CAAA;QACzC,IAAI,CAAC;YAAE,IAAI,CAAC,WAAW,CAAC,CAAC,EAAE,QAAQ,CAAC,CAAA;IACtC,CAAC;IAED,SAAS,CAAC,CAAO,EAAE,QAAiB,EAAE,KAAc;QAClD,MAAM,CAAC,GAAG,IAAI,CAAC,cAAc,CAAC,CAAC,EAAE,KAAK,CAAC,CAAA;QACvC,IAAI,CAAC;YAAE,IAAI,CAAC,WAAW,CAAC,CAAC,EAAE,QAAQ,CAAC,CAAA;IACtC,CAAC;IAED,MAAM,CAAC,MAAY,EAAE,QAAmB,EAAE,EAAa;QACrD,qBAAqB;QACrB,IAAI,IAAI,CAAC,MAAM,EAAE,OAAO;YAAE,EAAE,EAAE,CAAA;QAC9B,oBAAoB;QACpB,IAAI,CAAC,OAAO,CAAC,MAAM,EAAE,QAAQ,EAAE,IAAI,wBAAS,CAAC,IAAI,CAAC,IAAI,CAAC,EAAE,EAAE,CAAC,CAAA;IAC9D,CAAC;IAED,OAAO,CACL,MAAY,EACZ,QAAmB,EACnB,SAAoB,EACpB,EAAa;QAEb,IAAI,IAAI,CAAC,gBAAgB,CAAC,MAAM,CAAC;YAAE,OAAO,EAAE,EAAE,CAAA;QAC9C,IAAI,IAAI,CAAC,MAAM,EAAE,OAAO;YAAE,EAAE,EAAE,CAAA;QAC9B,IAAI,IAAI,CAAC,MAAM,EAAE;YACf,IAAI,CAAC,QAAQ,CAAC,GAAG,EAAE,CAAC,IAAI,CAAC,OAAO,CAAC,MAAM,EAAE,QAAQ,EAAE,SAAS,EAAE,EAAE,CAAC,CAAC,CAAA;YAClE,OAAM;SACP;QACD,SAAS,CAAC,eAAe,CAAC,MAAM,EAAE,QAAQ,CAAC,CAAA;QAE3C,qEAAqE;QACrE,4DAA4D;QAC5D,yDAAyD;QACzD,IAAI,KAAK,GAAG,CAAC,CAAA;QACb,MAAM,IAAI,GAAG,GAAG,EAAE;YAChB,IAAI,EAAE,KAAK,KAAK,CAAC;gBAAE,EAAE,EAAE,CAAA;QACzB,CAAC,CAAA;QAED,KAAK,MAAM,CAAC,CAAC,EAAE,QAAQ,EAAE,KAAK,CAAC,IAAI,SAAS,CAAC,OAAO,CAAC,OAAO,EAAE,EAAE;YAC9D,IAAI,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC;gBAAE,SAAQ;YAC9B,KAAK,EAAE,CAAA;YACP,IAAI,CAAC,KAAK,CAAC,CAAC,EAAE,QAAQ,EAAE,KAAK,CAAC,CAAC,IAAI,CAAC,GAAG,EAAE,CAAC,IAAI,EAAE,CAAC,CAAA;SAClD;QAED,KAAK,MAAM,CAAC,IAAI,SAAS,CAAC,cAAc,EAAE,EAAE;YAC1C,IAAI,IAAI,CAAC,QAAQ,KAAK,QAAQ,IAAI,CAAC,CAAC,KAAK,EAAE,IAAI,IAAI,CAAC,QAAQ,EAAE;gBAC5D,SAAQ;aACT;YACD,KAAK,EAAE,CAAA;YACP,MAAM,cAAc,GAAG,CAAC,CAAC,aAAa,EAAE,CAAA;YACxC,IAAI,CAAC,CAAC,aAAa,EAAE;gBACnB,IAAI,CAAC,OAAO,CAAC,CAAC,EAAE,cAAc,EAAE,SAAS,EAAE,IAAI,CAAC,CAAA;iBAC7C;gBACH,CAAC,CAAC,SAAS,CACT,CAAC,CAAC,EAAE,OAAO,EAAE,EAAE,CAAC,IAAI,CAAC,OAAO,CAAC,CAAC,EAAE,OAAO,EAAE,SAAS,EAAE,IAAI,CAAC,EACzD,IAAI,CACL,CAAA;aACF;SACF;QAED,IAAI,EAAE,CAAA;IACR,CAAC;IAED,OAAO,CACL,MAAY,EACZ,OAAe,EACf,SAAoB,EACpB,EAAa;QAEb,SAAS,GAAG,SAAS,CAAC,aAAa,CAAC,MAAM,EAAE,OAAO,CAAC,CAAA;QAEpD,IAAI,KAAK,GAAG,CAAC,CAAA;QACb,MAAM,IAAI,GAAG,GAAG,EAAE;YAChB,IAAI,EAAE,KAAK,KAAK,CAAC;gBAAE,EAAE,EAAE,CAAA;QACzB,CAAC,CAAA;QAED,KAAK,MAAM,CAAC,CAAC,EAAE,QAAQ,EAAE,KAAK,CAAC,IAAI,SAAS,CAAC,OAAO,CAAC,OAAO,EAAE,EAAE;YAC9D,IAAI,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC;gBAAE,SAAQ;YAC9B,KAAK,EAAE,CAAA;YACP,IAAI,CAAC,KAAK,CAAC,CAAC,EAAE,QAAQ,EAAE,KAAK,CAAC,CAAC,IAAI,CAAC,GAAG,EAAE,CAAC,IAAI,EAAE,CAAC,CAAA;SAClD;QACD,KAAK,MAAM,CAAC,MAAM,EAAE,QAAQ,CAAC,IAAI,SAAS,CAAC,QAAQ,CAAC,OAAO,EAAE,EAAE;YAC7D,KAAK,EAAE,CAAA;YACP,IAAI,CAAC,OAAO,CAAC,MAAM,EAAE,QAAQ,EAAE,SAAS,CAAC,KAAK,EAAE,EAAE,IAAI,CAAC,CAAA;SACxD;QAED,IAAI,EAAE,CAAA;IACR,CAAC;IAED,UAAU,CAAC,MAAY,EAAE,QAAmB,EAAE,EAAa;QACzD,qBAAqB;QACrB,IAAI,IAAI,CAAC,MAAM,EAAE,OAAO;YAAE,EAAE,EAAE,CAAA;QAC9B,oBAAoB;QACpB,IAAI,CAAC,WAAW,CAAC,MAAM,EAAE,QAAQ,EAAE,IAAI,wBAAS,CAAC,IAAI,CAAC,IAAI,CAAC,EAAE,EAAE,CAAC,CAAA;IAClE,CAAC;IAED,WAAW,CACT,MAAY,EACZ,QAAmB,EACnB,SAAoB,EACpB,EAAa;QAEb,IAAI,IAAI,CAAC,gBAAgB,CAAC,MAAM,CAAC;YAAE,OAAO,EAAE,EAAE,CAAA;QAC9C,IAAI,IAAI,CAAC,MAAM,EAAE,OAAO;YAAE,EAAE,EAAE,CAAA;QAC9B,IAAI,IAAI,CAAC,MAAM,EAAE;YACf,IAAI,CAAC,QAAQ,CAAC,GAAG,EAAE,CACjB,IAAI,CAAC,WAAW,CAAC,MAAM,EAAE,QAAQ,EAAE,SAAS,EAAE,EAAE,CAAC,CAClD,CAAA;YACD,OAAM;SACP;QACD,SAAS,CAAC,eAAe,CAAC,MAAM,EAAE,QAAQ,CAAC,CAAA;QAE3C,qEAAqE;QACrE,4DAA4D;QAC5D,yDAAyD;QACzD,IAAI,KAAK,GAAG,CAAC,CAAA;QACb,MAAM,IAAI,GAAG,GAAG,EAAE;YAChB,IAAI,EAAE,KAAK,KAAK,CAAC;gBAAE,EAAE,EAAE,CAAA;QACzB,CAAC,CAAA;QAED,KAAK,MAAM,CAAC,CAAC,EAAE,QAAQ,EAAE,KAAK,CAAC,IAAI,SAAS,CAAC,OAAO,CAAC,OAAO,EAAE,EAAE;YAC9D,IAAI,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC;gBAAE,SAAQ;YAC9B,IAAI,CAAC,SAAS,CAAC,CAAC,EAAE,QAAQ,EAAE,KAAK,CAAC,CAAA;SACnC;QAED,KAAK,MAAM,CAAC,IAAI,SAAS,CAAC,cAAc,EAAE,EAAE;YAC1C,IAAI,IAAI,CAAC,QAAQ,KAAK,QAAQ,IAAI,CAAC,CAAC,KAAK,EAAE,IAAI,IAAI,CAAC,QAAQ,EAAE;gBAC5D,SAAQ;aACT;YACD,KAAK,EAAE,CAAA;YACP,MAAM,QAAQ,GAAG,CAAC,CAAC,WAAW,EAAE,CAAA;YAChC,IAAI,CAAC,WAAW,CAAC,CAAC,EAAE,QAAQ,EAAE,SAAS,EAAE,IAAI,CAAC,CAAA;SAC/C;QAED,IAAI,EAAE,CAAA;IACR,CAAC;IAED,WAAW,CACT,MAAY,EACZ,OAAe,EACf,SAAoB,EACpB,EAAa;QAEb,SAAS,GAAG,SAAS,CAAC,aAAa,CAAC,MAAM,EAAE,OAAO,CAAC,CAAA;QAEpD,IAAI,KAAK,GAAG,CAAC,CAAA;QACb,MAAM,IAAI,GAAG,GAAG,EAAE;YAChB,IAAI,EAAE,KAAK,KAAK,CAAC;gBAAE,EAAE,EAAE,CAAA;QACzB,CAAC,CAAA;QAED,KAAK,MAAM,CAAC,CAAC,EAAE,QAAQ,EAAE,KAAK,CAAC,IAAI,SAAS,CAAC,OAAO,CAAC,OAAO,EAAE,EAAE;YAC9D,IAAI,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC;gBAAE,SAAQ;YAC9B,IAAI,CAAC,SAAS,CAAC,CAAC,EAAE,QAAQ,EAAE,KAAK,CAAC,CAAA;SACnC;QACD,KAAK,MAAM,CAAC,MAAM,EAAE,QAAQ,CAAC,IAAI,SAAS,CAAC,QAAQ,CAAC,OAAO,EAAE,EAAE;YAC7D,KAAK,EAAE,CAAA;YACP,IAAI,CAAC,WAAW,CAAC,MAAM,EAAE,QAAQ,EAAE,SAAS,CAAC,KAAK,EAAE,EAAE,IAAI,CAAC,CAAA;SAC5D;QAED,IAAI,EAAE,CAAA;IACR,CAAC;CACF;AAtTD,4BAsTC;AAED,MAAa,UAEX,SAAQ,QAAW;IACnB,OAAO,CAMe;IAEtB,YAAY,QAAmB,EAAE,IAAU,EAAE,IAAO;QAClD,KAAK,CAAC,QAAQ,EAAE,IAAI,EAAE,IAAI,CAAC,CAAA;QAC3B,IAAI,CAAC,OAAO,GAAG,IAAI,GAAG,EAAgB,CAAA;IACxC,CAAC;IAGD,SAAS,CAAC,CAAgB;QACxB,IAAI,CAAC,OAAO,CAAC,GAAG,CAAC,CAAC,CAAC,CAAA;IACrB,CAAC;IAED,KAAK,CAAC,IAAI;QACR,IAAI,IAAI,CAAC,MAAM,EAAE,OAAO;YAAE,MAAM,IAAI,CAAC,MAAM,CAAC,MAAM,CAAA;QAClD,IAAI,IAAI,CAAC,IAAI,CAAC,SAAS,EAAE,EAAE;YACzB,MAAM,IAAI,CAAC,IAAI,CAAC,KAAK,EAAE,CAAA;SACxB;QACD,MAAM,IAAI,OAAO,CAAC,CAAC,GAAG,EAAE,GAAG,EAAE,EAAE;YAC7B,IAAI,CAAC,MAAM,CAAC,IAAI,CAAC,IAAI,EAAE,IAAI,CAAC,QAAQ,EAAE,GAAG,EAAE;gBACzC,IAAI,IAAI,CAAC,MAAM,EAAE,OAAO,EAAE;oBACxB,GAAG,CAAC,IAAI,CAAC,MAAM,CAAC,MAAM,CAAC,CAAA;iBACxB;qBAAM;oBACL,GAAG,CAAC,IAAI,CAAC,OAAO,CAAC,CAAA;iBAClB;YACH,CAAC,CAAC,CAAA;QACJ,CAAC,CAAC,CAAA;QACF,OAAO,IAAI,CAAC,OAAO,CAAA;IACrB,CAAC;IAED,QAAQ;QACN,IAAI,IAAI,CAAC,MAAM,EAAE,OAAO;YAAE,MAAM,IAAI,CAAC,MAAM,CAAC,MAAM,CAAA;QAClD,IAAI,IAAI,CAAC,IAAI,CAAC,SAAS,EAAE,EAAE;YACzB,IAAI,CAAC,IAAI,CAAC,SAAS,EAAE,CAAA;SACtB;QACD,4DAA4D;QAC5D,IAAI,CAAC,UAAU,CAAC,IAAI,CAAC,IAAI,EAAE,IAAI,CAAC,QAAQ,EAAE,GAAG,EAAE;YAC7C,IAAI,IAAI,CAAC,MAAM,EAAE,OAAO;gBAAE,MAAM,IAAI,CAAC,MAAM,CAAC,MAAM,CAAA;QACpD,CAAC,CAAC,CAAA;QACF,OAAO,IAAI,CAAC,OAAO,CAAA;IACrB,CAAC;CACF;AAjDD,gCAiDC;AAED,MAAa,UAEX,SAAQ,QAAW;IACnB,OAAO,CAMmC;IAE1C,YAAY,QAAmB,EAAE,IAAU,EAAE,IAAO;QAClD,KAAK,CAAC,QAAQ,EAAE,IAAI,EAAE,IAAI,CAAC,CAAA;QAC3B,IAAI,CAAC,OAAO,GAAG,IAAI,mBAAQ,CAAC;YAC1B,MAAM,EAAE,IAAI,CAAC,MAAM;YACnB,UAAU,EAAE,IAAI;SACjB,CAAmB,CAAA;QACpB,IAAI,CAAC,OAAO,CAAC,EAAE,CAAC,OAAO,EAAE,GAAG,EAAE,CAAC,IAAI,CAAC,MAAM,EAAE,CAAC,CAAA;QAC7C,IAAI,CAAC,OAAO,CAAC,EAAE,CAAC,QAAQ,EAAE,GAAG,EAAE,CAAC,IAAI,CAAC,MAAM,EAAE,CAAC,CAAA;IAChD,CAAC;IAGD,SAAS,CAAC,CAAgB;QACxB,IAAI,CAAC,OAAO,CAAC,KAAK,CAAC,CAAC,CAAC,CAAA;QACrB,IAAI,CAAC,IAAI,CAAC,OAAO,CAAC,OAAO;YAAE,IAAI,CAAC,KAAK,EAAE,CAAA;IACzC,CAAC;IAED,MAAM;QACJ,MAAM,MAAM,GAAG,IAAI,CAAC,IAAI,CAAA;QACxB,IAAI,MAAM,CAAC,SAAS,EAAE,EAAE;YACtB,MAAM,CAAC,KAAK,EAAE,CAAC,IAAI,CAAC,GAAG,EAAE;gBACvB,IAAI,CAAC,MAAM,CAAC,MAAM,EAAE,IAAI,CAAC,QAAQ,EAAE,GAAG,EAAE,CAAC,IAAI,CAAC,OAAO,CAAC,GAAG,EAAE,CAAC,CAAA;YAC9D,CAAC,CAAC,CAAA;SACH;aAAM;YACL,IAAI,CAAC,MAAM,CAAC,MAAM,EAAE,IAAI,CAAC,QAAQ,EAAE,GAAG,EAAE,CAAC,IAAI,CAAC,OAAO,CAAC,GAAG,EAAE,CAAC,CAAA;SAC7D;QACD,OAAO,IAAI,CAAC,OAAO,CAAA;IACrB,CAAC;IAED,UAAU;QACR,IAAI,IAAI,CAAC,IAAI,CAAC,SAAS,EAAE,EAAE;YACzB,IAAI,CAAC,IAAI,CAAC,SAAS,EAAE,CAAA;SACtB;QACD,IAAI,CAAC,UAAU,CAAC,IAAI,CAAC,IAAI,EAAE,IAAI,CAAC,QAAQ,EAAE,GAAG,EAAE,CAAC,IAAI,CAAC,OAAO,CAAC,GAAG,EAAE,CAAC,CAAA;QACnE,OAAO,IAAI,CAAC,OAAO,CAAA;IACrB,CAAC;CACF;AA9CD,gCA8CC","sourcesContent":["/**\n * Single-use utility classes to provide functionality to the {@link Glob}\n * methods.\n *\n * @module\n */\nimport { Minipass } from 'minipass'\nimport { Path } from 'path-scurry'\nimport { Ignore, IgnoreLike } from './ignore.js'\n\n// XXX can we somehow make it so that it NEVER processes a given path more than\n// once, enough that the match set tracking is no longer needed? that'd speed\n// things up a lot. Or maybe bring back nounique, and skip it in that case?\n\n// a single minimatch set entry with 1 or more parts\nimport { Pattern } from './pattern.js'\nimport { Processor } from './processor.js'\n\nexport interface GlobWalkerOpts {\n absolute?: boolean\n allowWindowsEscape?: boolean\n cwd?: string | URL\n dot?: boolean\n dotRelative?: boolean\n follow?: boolean\n ignore?: string | string[] | IgnoreLike\n mark?: boolean\n matchBase?: boolean\n // Note: maxDepth here means \"maximum actual Path.depth()\",\n // not \"maximum depth beyond cwd\"\n maxDepth?: number\n nobrace?: boolean\n nocase?: boolean\n nodir?: boolean\n noext?: boolean\n noglobstar?: boolean\n platform?: NodeJS.Platform\n posix?: boolean\n realpath?: boolean\n root?: string\n stat?: boolean\n signal?: AbortSignal\n windowsPathsNoEscape?: boolean\n withFileTypes?: boolean\n}\n\nexport type GWOFileTypesTrue = GlobWalkerOpts & {\n withFileTypes: true\n}\nexport type GWOFileTypesFalse = GlobWalkerOpts & {\n withFileTypes: false\n}\nexport type GWOFileTypesUnset = GlobWalkerOpts & {\n withFileTypes?: undefined\n}\n\nexport type Result = O extends GWOFileTypesTrue\n ? Path\n : O extends GWOFileTypesFalse\n ? string\n : O extends GWOFileTypesUnset\n ? string\n : Path | string\n\nexport type Matches = O extends GWOFileTypesTrue\n ? Set\n : O extends GWOFileTypesFalse\n ? Set\n : O extends GWOFileTypesUnset\n ? Set\n : Set\n\nexport type MatchStream =\n O extends GWOFileTypesTrue\n ? Minipass\n : O extends GWOFileTypesFalse\n ? Minipass\n : O extends GWOFileTypesUnset\n ? Minipass\n : Minipass\n\nconst makeIgnore = (\n ignore: string | string[] | IgnoreLike,\n opts: GlobWalkerOpts\n): IgnoreLike =>\n typeof ignore === 'string'\n ? new Ignore([ignore], opts)\n : Array.isArray(ignore)\n ? new Ignore(ignore, opts)\n : ignore\n\n/**\n * basic walking utilities that all the glob walker types use\n */\nexport abstract class GlobUtil {\n path: Path\n patterns: Pattern[]\n opts: O\n seen: Set = new Set()\n paused: boolean = false\n aborted: boolean = false\n #onResume: (() => any)[] = []\n #ignore?: IgnoreLike\n #sep: '\\\\' | '/'\n signal?: AbortSignal\n maxDepth: number\n\n constructor(patterns: Pattern[], path: Path, opts: O)\n constructor(patterns: Pattern[], path: Path, opts: O) {\n this.patterns = patterns\n this.path = path\n this.opts = opts\n this.#sep = !opts.posix && opts.platform === 'win32' ? '\\\\' : '/'\n if (opts.ignore) {\n this.#ignore = makeIgnore(opts.ignore, opts)\n }\n // ignore, always set with maxDepth, but it's optional on the\n // GlobOptions type\n /* c8 ignore start */\n this.maxDepth = opts.maxDepth || Infinity\n /* c8 ignore stop */\n if (opts.signal) {\n this.signal = opts.signal\n this.signal.addEventListener('abort', () => {\n this.#onResume.length = 0\n })\n }\n }\n\n #ignored(path: Path): boolean {\n return this.seen.has(path) || !!this.#ignore?.ignored?.(path)\n }\n #childrenIgnored(path: Path): boolean {\n return !!this.#ignore?.childrenIgnored?.(path)\n }\n\n // backpressure mechanism\n pause() {\n this.paused = true\n }\n resume() {\n /* c8 ignore start */\n if (this.signal?.aborted) return\n /* c8 ignore stop */\n this.paused = false\n let fn: (() => any) | undefined = undefined\n while (!this.paused && (fn = this.#onResume.shift())) {\n fn()\n }\n }\n onResume(fn: () => any) {\n if (this.signal?.aborted) return\n /* c8 ignore start */\n if (!this.paused) {\n fn()\n } else {\n /* c8 ignore stop */\n this.#onResume.push(fn)\n }\n }\n\n // do the requisite realpath/stat checking, and return the path\n // to add or undefined to filter it out.\n async matchCheck(e: Path, ifDir: boolean): Promise {\n if (ifDir && this.opts.nodir) return undefined\n let rpc: Path | undefined\n if (this.opts.realpath) {\n rpc = e.realpathCached() || (await e.realpath())\n if (!rpc) return undefined\n e = rpc\n }\n const needStat = e.isUnknown() || this.opts.stat\n const s = needStat ? await e.lstat() : e\n if (this.opts.follow && this.opts.nodir && s?.isSymbolicLink()) {\n const target = await s.realpath()\n /* c8 ignore start */\n if (target && (target.isUnknown() || this.opts.stat)) {\n await target.lstat()\n }\n /* c8 ignore stop */\n }\n return this.matchCheckTest(s, ifDir)\n }\n\n matchCheckTest(e: Path | undefined, ifDir: boolean): Path | undefined {\n return e &&\n (this.maxDepth === Infinity || e.depth() <= this.maxDepth) &&\n (!ifDir || e.canReaddir()) &&\n (!this.opts.nodir || !e.isDirectory()) &&\n (!this.opts.nodir ||\n !this.opts.follow ||\n !e.isSymbolicLink() ||\n !e.realpathCached()?.isDirectory()) &&\n !this.#ignored(e)\n ? e\n : undefined\n }\n\n matchCheckSync(e: Path, ifDir: boolean): Path | undefined {\n if (ifDir && this.opts.nodir) return undefined\n let rpc: Path | undefined\n if (this.opts.realpath) {\n rpc = e.realpathCached() || e.realpathSync()\n if (!rpc) return undefined\n e = rpc\n }\n const needStat = e.isUnknown() || this.opts.stat\n const s = needStat ? e.lstatSync() : e\n if (this.opts.follow && this.opts.nodir && s?.isSymbolicLink()) {\n const target = s.realpathSync()\n if (target && (target?.isUnknown() || this.opts.stat)) {\n target.lstatSync()\n }\n }\n return this.matchCheckTest(s, ifDir)\n }\n\n abstract matchEmit(p: Result): void\n abstract matchEmit(p: string | Path): void\n\n matchFinish(e: Path, absolute: boolean) {\n if (this.#ignored(e)) return\n const abs =\n this.opts.absolute === undefined ? absolute : this.opts.absolute\n this.seen.add(e)\n const mark = this.opts.mark && e.isDirectory() ? this.#sep : ''\n // ok, we have what we need!\n if (this.opts.withFileTypes) {\n this.matchEmit(e)\n } else if (abs) {\n const abs = this.opts.posix ? e.fullpathPosix() : e.fullpath()\n this.matchEmit(abs + mark)\n } else {\n const rel = this.opts.posix ? e.relativePosix() : e.relative()\n const pre =\n this.opts.dotRelative && !rel.startsWith('..' + this.#sep)\n ? '.' + this.#sep\n : ''\n this.matchEmit(!rel ? '.' + mark : pre + rel + mark)\n }\n }\n\n async match(e: Path, absolute: boolean, ifDir: boolean): Promise {\n const p = await this.matchCheck(e, ifDir)\n if (p) this.matchFinish(p, absolute)\n }\n\n matchSync(e: Path, absolute: boolean, ifDir: boolean): void {\n const p = this.matchCheckSync(e, ifDir)\n if (p) this.matchFinish(p, absolute)\n }\n\n walkCB(target: Path, patterns: Pattern[], cb: () => any) {\n /* c8 ignore start */\n if (this.signal?.aborted) cb()\n /* c8 ignore stop */\n this.walkCB2(target, patterns, new Processor(this.opts), cb)\n }\n\n walkCB2(\n target: Path,\n patterns: Pattern[],\n processor: Processor,\n cb: () => any\n ) {\n if (this.#childrenIgnored(target)) return cb()\n if (this.signal?.aborted) cb()\n if (this.paused) {\n this.onResume(() => this.walkCB2(target, patterns, processor, cb))\n return\n }\n processor.processPatterns(target, patterns)\n\n // done processing. all of the above is sync, can be abstracted out.\n // subwalks is a map of paths to the entry filters they need\n // matches is a map of paths to [absolute, ifDir] tuples.\n let tasks = 1\n const next = () => {\n if (--tasks === 0) cb()\n }\n\n for (const [m, absolute, ifDir] of processor.matches.entries()) {\n if (this.#ignored(m)) continue\n tasks++\n this.match(m, absolute, ifDir).then(() => next())\n }\n\n for (const t of processor.subwalkTargets()) {\n if (this.maxDepth !== Infinity && t.depth() >= this.maxDepth) {\n continue\n }\n tasks++\n const childrenCached = t.readdirCached()\n if (t.calledReaddir())\n this.walkCB3(t, childrenCached, processor, next)\n else {\n t.readdirCB(\n (_, entries) => this.walkCB3(t, entries, processor, next),\n true\n )\n }\n }\n\n next()\n }\n\n walkCB3(\n target: Path,\n entries: Path[],\n processor: Processor,\n cb: () => any\n ) {\n processor = processor.filterEntries(target, entries)\n\n let tasks = 1\n const next = () => {\n if (--tasks === 0) cb()\n }\n\n for (const [m, absolute, ifDir] of processor.matches.entries()) {\n if (this.#ignored(m)) continue\n tasks++\n this.match(m, absolute, ifDir).then(() => next())\n }\n for (const [target, patterns] of processor.subwalks.entries()) {\n tasks++\n this.walkCB2(target, patterns, processor.child(), next)\n }\n\n next()\n }\n\n walkCBSync(target: Path, patterns: Pattern[], cb: () => any) {\n /* c8 ignore start */\n if (this.signal?.aborted) cb()\n /* c8 ignore stop */\n this.walkCB2Sync(target, patterns, new Processor(this.opts), cb)\n }\n\n walkCB2Sync(\n target: Path,\n patterns: Pattern[],\n processor: Processor,\n cb: () => any\n ) {\n if (this.#childrenIgnored(target)) return cb()\n if (this.signal?.aborted) cb()\n if (this.paused) {\n this.onResume(() =>\n this.walkCB2Sync(target, patterns, processor, cb)\n )\n return\n }\n processor.processPatterns(target, patterns)\n\n // done processing. all of the above is sync, can be abstracted out.\n // subwalks is a map of paths to the entry filters they need\n // matches is a map of paths to [absolute, ifDir] tuples.\n let tasks = 1\n const next = () => {\n if (--tasks === 0) cb()\n }\n\n for (const [m, absolute, ifDir] of processor.matches.entries()) {\n if (this.#ignored(m)) continue\n this.matchSync(m, absolute, ifDir)\n }\n\n for (const t of processor.subwalkTargets()) {\n if (this.maxDepth !== Infinity && t.depth() >= this.maxDepth) {\n continue\n }\n tasks++\n const children = t.readdirSync()\n this.walkCB3Sync(t, children, processor, next)\n }\n\n next()\n }\n\n walkCB3Sync(\n target: Path,\n entries: Path[],\n processor: Processor,\n cb: () => any\n ) {\n processor = processor.filterEntries(target, entries)\n\n let tasks = 1\n const next = () => {\n if (--tasks === 0) cb()\n }\n\n for (const [m, absolute, ifDir] of processor.matches.entries()) {\n if (this.#ignored(m)) continue\n this.matchSync(m, absolute, ifDir)\n }\n for (const [target, patterns] of processor.subwalks.entries()) {\n tasks++\n this.walkCB2Sync(target, patterns, processor.child(), next)\n }\n\n next()\n }\n}\n\nexport class GlobWalker<\n O extends GlobWalkerOpts = GlobWalkerOpts\n> extends GlobUtil {\n matches: O extends GWOFileTypesTrue\n ? Set\n : O extends GWOFileTypesFalse\n ? Set\n : O extends GWOFileTypesUnset\n ? Set\n : Set\n\n constructor(patterns: Pattern[], path: Path, opts: O) {\n super(patterns, path, opts)\n this.matches = new Set() as Matches\n }\n\n matchEmit(e: Result): void\n matchEmit(e: Path | string): void {\n this.matches.add(e)\n }\n\n async walk(): Promise> {\n if (this.signal?.aborted) throw this.signal.reason\n if (this.path.isUnknown()) {\n await this.path.lstat()\n }\n await new Promise((res, rej) => {\n this.walkCB(this.path, this.patterns, () => {\n if (this.signal?.aborted) {\n rej(this.signal.reason)\n } else {\n res(this.matches)\n }\n })\n })\n return this.matches\n }\n\n walkSync(): Matches {\n if (this.signal?.aborted) throw this.signal.reason\n if (this.path.isUnknown()) {\n this.path.lstatSync()\n }\n // nothing for the callback to do, because this never pauses\n this.walkCBSync(this.path, this.patterns, () => {\n if (this.signal?.aborted) throw this.signal.reason\n })\n return this.matches\n }\n}\n\nexport class GlobStream<\n O extends GlobWalkerOpts = GlobWalkerOpts\n> extends GlobUtil {\n results: O extends GWOFileTypesTrue\n ? Minipass\n : O extends GWOFileTypesFalse\n ? Minipass\n : O extends GWOFileTypesUnset\n ? Minipass\n : Minipass\n\n constructor(patterns: Pattern[], path: Path, opts: O) {\n super(patterns, path, opts)\n this.results = new Minipass({\n signal: this.signal,\n objectMode: true,\n }) as MatchStream\n this.results.on('drain', () => this.resume())\n this.results.on('resume', () => this.resume())\n }\n\n matchEmit(e: Result): void\n matchEmit(e: Path | string): void {\n this.results.write(e)\n if (!this.results.flowing) this.pause()\n }\n\n stream(): MatchStream {\n const target = this.path\n if (target.isUnknown()) {\n target.lstat().then(() => {\n this.walkCB(target, this.patterns, () => this.results.end())\n })\n } else {\n this.walkCB(target, this.patterns, () => this.results.end())\n }\n return this.results\n }\n\n streamSync(): MatchStream {\n if (this.path.isUnknown()) {\n this.path.lstatSync()\n }\n this.walkCBSync(this.path, this.patterns, () => this.results.end())\n return this.results\n }\n}\n"]} \ No newline at end of file diff --git a/deps/npm/node_modules/glob/dist/esm/ignore.d.ts.map b/deps/npm/node_modules/glob/dist/esm/ignore.d.ts.map index be7831769d33ed..21ab57b94d1a76 100644 --- a/deps/npm/node_modules/glob/dist/esm/ignore.d.ts.map +++ b/deps/npm/node_modules/glob/dist/esm/ignore.d.ts.map @@ -1 +1 @@ -{"version":3,"file":"ignore.d.ts","sourceRoot":"","sources":["../../src/ignore.ts"],"names":[],"mappings":"AAKA,OAAO,EAAE,SAAS,EAAE,MAAM,WAAW,CAAA;AACrC,OAAO,EAAE,IAAI,EAAE,MAAM,aAAa,CAAA;AAElC,OAAO,EAAE,cAAc,EAAE,MAAM,aAAa,CAAA;AAE5C,MAAM,WAAW,UAAU;IACzB,OAAO,CAAC,EAAE,CAAC,CAAC,EAAE,IAAI,KAAK,OAAO,CAAA;IAC9B,eAAe,CAAC,EAAE,CAAC,CAAC,EAAE,IAAI,KAAK,OAAO,CAAA;CACvC;AASD;;GAEG;AACH,qBAAa,MAAO,YAAW,UAAU;IACvC,QAAQ,EAAE,SAAS,EAAE,CAAA;IACrB,gBAAgB,EAAE,SAAS,EAAE,CAAA;IAC7B,QAAQ,EAAE,SAAS,EAAE,CAAA;IACrB,gBAAgB,EAAE,SAAS,EAAE,CAAA;gBAG3B,OAAO,EAAE,MAAM,EAAE,EACjB,EACE,OAAO,EACP,MAAM,EACN,KAAK,EACL,UAAU,EACV,QAA0B,GAC3B,EAAE,cAAc;IAsDnB,OAAO,CAAC,CAAC,EAAE,IAAI,GAAG,OAAO;IAczB,eAAe,CAAC,CAAC,EAAE,IAAI,GAAG,OAAO;CAWlC"} \ No newline at end of file +{"version":3,"file":"ignore.d.ts","sourceRoot":"","sources":["../../src/ignore.ts"],"names":[],"mappings":"AAKA,OAAO,EAAE,SAAS,EAAE,MAAM,WAAW,CAAA;AACrC,OAAO,EAAE,IAAI,EAAE,MAAM,aAAa,CAAA;AAElC,OAAO,EAAE,cAAc,EAAE,MAAM,aAAa,CAAA;AAE5C,MAAM,WAAW,UAAU;IACzB,OAAO,CAAC,EAAE,CAAC,CAAC,EAAE,IAAI,KAAK,OAAO,CAAA;IAC9B,eAAe,CAAC,EAAE,CAAC,CAAC,EAAE,IAAI,KAAK,OAAO,CAAA;CACvC;AASD;;GAEG;AACH,qBAAa,MAAO,YAAW,UAAU;IACvC,QAAQ,EAAE,SAAS,EAAE,CAAA;IACrB,gBAAgB,EAAE,SAAS,EAAE,CAAA;IAC7B,QAAQ,EAAE,SAAS,EAAE,CAAA;IACrB,gBAAgB,EAAE,SAAS,EAAE,CAAA;gBAG3B,OAAO,EAAE,MAAM,EAAE,EACjB,EACE,OAAO,EACP,MAAM,EACN,KAAK,EACL,UAAU,EACV,QAA0B,GAC3B,EAAE,cAAc;IA4DnB,OAAO,CAAC,CAAC,EAAE,IAAI,GAAG,OAAO;IAczB,eAAe,CAAC,CAAC,EAAE,IAAI,GAAG,OAAO;CAWlC"} \ No newline at end of file diff --git a/deps/npm/node_modules/glob/dist/esm/ignore.js b/deps/npm/node_modules/glob/dist/esm/ignore.js index e8cbaf96b3a809..2b2808d0dfe0f1 100644 --- a/deps/npm/node_modules/glob/dist/esm/ignore.js +++ b/deps/npm/node_modules/glob/dist/esm/ignore.js @@ -54,6 +54,12 @@ export class Ignore { if (!parsed || !globParts) { throw new Error('invalid pattern object'); } + // strip off leading ./ portions + // https://github.com/isaacs/node-glob/issues/570 + while (parsed[0] === '.' && globParts[0] === '.') { + parsed.shift(); + globParts.shift(); + } /* c8 ignore stop */ const p = new Pattern(parsed, globParts, 0, platform); const m = new Minimatch(p.globString(), mmopts); diff --git a/deps/npm/node_modules/glob/dist/esm/ignore.js.map b/deps/npm/node_modules/glob/dist/esm/ignore.js.map index 736e4466180177..baa7eb2efa01df 100644 --- a/deps/npm/node_modules/glob/dist/esm/ignore.js.map +++ b/deps/npm/node_modules/glob/dist/esm/ignore.js.map @@ -1 +1 @@ -{"version":3,"file":"ignore.js","sourceRoot":"","sources":["../../src/ignore.ts"],"names":[],"mappings":"AAAA,sDAAsD;AACtD,kCAAkC;AAClC,kEAAkE;AAClE,6CAA6C;AAE7C,OAAO,EAAE,SAAS,EAAE,MAAM,WAAW,CAAA;AAErC,OAAO,EAAE,OAAO,EAAE,MAAM,cAAc,CAAA;AAQtC,MAAM,eAAe,GACnB,OAAO,OAAO,KAAK,QAAQ;IAC3B,OAAO;IACP,OAAO,OAAO,CAAC,QAAQ,KAAK,QAAQ;IAClC,CAAC,CAAC,OAAO,CAAC,QAAQ;IAClB,CAAC,CAAC,OAAO,CAAA;AAEb;;GAEG;AACH,MAAM,OAAO,MAAM;IACjB,QAAQ,CAAa;IACrB,gBAAgB,CAAa;IAC7B,QAAQ,CAAa;IACrB,gBAAgB,CAAa;IAE7B,YACE,OAAiB,EACjB,EACE,OAAO,EACP,MAAM,EACN,KAAK,EACL,UAAU,EACV,QAAQ,GAAG,eAAe,GACX;QAEjB,IAAI,CAAC,QAAQ,GAAG,EAAE,CAAA;QAClB,IAAI,CAAC,QAAQ,GAAG,EAAE,CAAA;QAClB,IAAI,CAAC,gBAAgB,GAAG,EAAE,CAAA;QAC1B,IAAI,CAAC,gBAAgB,GAAG,EAAE,CAAA;QAC1B,MAAM,MAAM,GAAG;YACb,GAAG,EAAE,IAAI;YACT,OAAO;YACP,MAAM;YACN,KAAK;YACL,UAAU;YACV,iBAAiB,EAAE,CAAC;YACpB,QAAQ;YACR,SAAS,EAAE,IAAI;YACf,QAAQ,EAAE,IAAI;SACf,CAAA;QAED,mEAAmE;QACnE,gEAAgE;QAChE,mEAAmE;QACnE,uCAAuC;QACvC,mEAAmE;QACnE,qEAAqE;QACrE,uBAAuB;QACvB,uEAAuE;QACvE,oEAAoE;QACpE,qBAAqB;QACrB,sEAAsE;QACtE,wCAAwC;QACxC,KAAK,MAAM,GAAG,IAAI,OAAO,EAAE;YACzB,MAAM,EAAE,GAAG,IAAI,SAAS,CAAC,GAAG,EAAE,MAAM,CAAC,CAAA;YACrC,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,EAAE,CAAC,GAAG,CAAC,MAAM,EAAE,CAAC,EAAE,EAAE;gBACtC,MAAM,MAAM,GAAG,EAAE,CAAC,GAAG,CAAC,CAAC,CAAC,CAAA;gBACxB,MAAM,SAAS,GAAG,EAAE,CAAC,SAAS,CAAC,CAAC,CAAC,CAAA;gBACjC,qBAAqB;gBACrB,IAAI,CAAC,MAAM,IAAI,CAAC,SAAS,EAAE;oBACzB,MAAM,IAAI,KAAK,CAAC,wBAAwB,CAAC,CAAA;iBAC1C;gBACD,oBAAoB;gBACpB,MAAM,CAAC,GAAG,IAAI,OAAO,CAAC,MAAM,EAAE,SAAS,EAAE,CAAC,EAAE,QAAQ,CAAC,CAAA;gBACrD,MAAM,CAAC,GAAG,IAAI,SAAS,CAAC,CAAC,CAAC,UAAU,EAAE,EAAE,MAAM,CAAC,CAAA;gBAC/C,MAAM,QAAQ,GAAG,SAAS,CAAC,SAAS,CAAC,MAAM,GAAG,CAAC,CAAC,KAAK,IAAI,CAAA;gBACzD,MAAM,QAAQ,GAAG,CAAC,CAAC,UAAU,EAAE,CAAA;gBAC/B,IAAI,QAAQ;oBAAE,IAAI,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC,CAAC,CAAA;;oBAC9B,IAAI,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC,CAAC,CAAA;gBAC1B,IAAI,QAAQ,EAAE;oBACZ,IAAI,QAAQ;wBAAE,IAAI,CAAC,gBAAgB,CAAC,IAAI,CAAC,CAAC,CAAC,CAAA;;wBACtC,IAAI,CAAC,gBAAgB,CAAC,IAAI,CAAC,CAAC,CAAC,CAAA;iBACnC;aACF;SACF;IACH,CAAC;IAED,OAAO,CAAC,CAAO;QACb,MAAM,QAAQ,GAAG,CAAC,CAAC,QAAQ,EAAE,CAAA;QAC7B,MAAM,SAAS,GAAG,GAAG,QAAQ,GAAG,CAAA;QAChC,MAAM,QAAQ,GAAG,CAAC,CAAC,QAAQ,EAAE,IAAI,GAAG,CAAA;QACpC,MAAM,SAAS,GAAG,GAAG,QAAQ,GAAG,CAAA;QAChC,KAAK,MAAM,CAAC,IAAI,IAAI,CAAC,QAAQ,EAAE;YAC7B,IAAI,CAAC,CAAC,KAAK,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC,KAAK,CAAC,SAAS,CAAC;gBAAE,OAAO,IAAI,CAAA;SACzD;QACD,KAAK,MAAM,CAAC,IAAI,IAAI,CAAC,QAAQ,EAAE;YAC7B,IAAI,CAAC,CAAC,KAAK,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC,KAAK,CAAC,SAAS,CAAC;gBAAE,OAAO,IAAI,CAAA;SACzD;QACD,OAAO,KAAK,CAAA;IACd,CAAC;IAED,eAAe,CAAC,CAAO;QACrB,MAAM,QAAQ,GAAG,CAAC,CAAC,QAAQ,EAAE,GAAG,GAAG,CAAA;QACnC,MAAM,QAAQ,GAAG,CAAC,CAAC,CAAC,QAAQ,EAAE,IAAI,GAAG,CAAC,GAAG,GAAG,CAAA;QAC5C,KAAK,MAAM,CAAC,IAAI,IAAI,CAAC,gBAAgB,EAAE;YACrC,IAAI,CAAC,CAAC,KAAK,CAAC,QAAQ,CAAC;gBAAE,OAAO,IAAI,CAAA;SACnC;QACD,KAAK,MAAM,CAAC,IAAI,IAAI,CAAC,gBAAgB,EAAE;YACrC,IAAI,CAAC,CAAC,KAAK,CAAC,QAAQ,CAAC;gBAAE,OAAO,IAAI,CAAA;SACnC;QACD,OAAO,KAAK,CAAA;IACd,CAAC;CACF","sourcesContent":["// give it a pattern, and it'll be able to tell you if\n// a given path should be ignored.\n// Ignoring a path ignores its children if the pattern ends in /**\n// Ignores are always parsed in dot:true mode\n\nimport { Minimatch } from 'minimatch'\nimport { Path } from 'path-scurry'\nimport { Pattern } from './pattern.js'\nimport { GlobWalkerOpts } from './walker.js'\n\nexport interface IgnoreLike {\n ignored?: (p: Path) => boolean\n childrenIgnored?: (p: Path) => boolean\n}\n\nconst defaultPlatform: NodeJS.Platform =\n typeof process === 'object' &&\n process &&\n typeof process.platform === 'string'\n ? process.platform\n : 'linux'\n\n/**\n * Class used to process ignored patterns\n */\nexport class Ignore implements IgnoreLike {\n relative: Minimatch[]\n relativeChildren: Minimatch[]\n absolute: Minimatch[]\n absoluteChildren: Minimatch[]\n\n constructor(\n ignored: string[],\n {\n nobrace,\n nocase,\n noext,\n noglobstar,\n platform = defaultPlatform,\n }: GlobWalkerOpts\n ) {\n this.relative = []\n this.absolute = []\n this.relativeChildren = []\n this.absoluteChildren = []\n const mmopts = {\n dot: true,\n nobrace,\n nocase,\n noext,\n noglobstar,\n optimizationLevel: 2,\n platform,\n nocomment: true,\n nonegate: true,\n }\n\n // this is a little weird, but it gives us a clean set of optimized\n // minimatch matchers, without getting tripped up if one of them\n // ends in /** inside a brace section, and it's only inefficient at\n // the start of the walk, not along it.\n // It'd be nice if the Pattern class just had a .test() method, but\n // handling globstars is a bit of a pita, and that code already lives\n // in minimatch anyway.\n // Another way would be if maybe Minimatch could take its set/globParts\n // as an option, and then we could at least just use Pattern to test\n // for absolute-ness.\n // Yet another way, Minimatch could take an array of glob strings, and\n // a cwd option, and do the right thing.\n for (const ign of ignored) {\n const mm = new Minimatch(ign, mmopts)\n for (let i = 0; i < mm.set.length; i++) {\n const parsed = mm.set[i]\n const globParts = mm.globParts[i]\n /* c8 ignore start */\n if (!parsed || !globParts) {\n throw new Error('invalid pattern object')\n }\n /* c8 ignore stop */\n const p = new Pattern(parsed, globParts, 0, platform)\n const m = new Minimatch(p.globString(), mmopts)\n const children = globParts[globParts.length - 1] === '**'\n const absolute = p.isAbsolute()\n if (absolute) this.absolute.push(m)\n else this.relative.push(m)\n if (children) {\n if (absolute) this.absoluteChildren.push(m)\n else this.relativeChildren.push(m)\n }\n }\n }\n }\n\n ignored(p: Path): boolean {\n const fullpath = p.fullpath()\n const fullpaths = `${fullpath}/`\n const relative = p.relative() || '.'\n const relatives = `${relative}/`\n for (const m of this.relative) {\n if (m.match(relative) || m.match(relatives)) return true\n }\n for (const m of this.absolute) {\n if (m.match(fullpath) || m.match(fullpaths)) return true\n }\n return false\n }\n\n childrenIgnored(p: Path): boolean {\n const fullpath = p.fullpath() + '/'\n const relative = (p.relative() || '.') + '/'\n for (const m of this.relativeChildren) {\n if (m.match(relative)) return true\n }\n for (const m of this.absoluteChildren) {\n if (m.match(fullpath)) return true\n }\n return false\n }\n}\n"]} \ No newline at end of file +{"version":3,"file":"ignore.js","sourceRoot":"","sources":["../../src/ignore.ts"],"names":[],"mappings":"AAAA,sDAAsD;AACtD,kCAAkC;AAClC,kEAAkE;AAClE,6CAA6C;AAE7C,OAAO,EAAE,SAAS,EAAE,MAAM,WAAW,CAAA;AAErC,OAAO,EAAE,OAAO,EAAE,MAAM,cAAc,CAAA;AAQtC,MAAM,eAAe,GACnB,OAAO,OAAO,KAAK,QAAQ;IAC3B,OAAO;IACP,OAAO,OAAO,CAAC,QAAQ,KAAK,QAAQ;IAClC,CAAC,CAAC,OAAO,CAAC,QAAQ;IAClB,CAAC,CAAC,OAAO,CAAA;AAEb;;GAEG;AACH,MAAM,OAAO,MAAM;IACjB,QAAQ,CAAa;IACrB,gBAAgB,CAAa;IAC7B,QAAQ,CAAa;IACrB,gBAAgB,CAAa;IAE7B,YACE,OAAiB,EACjB,EACE,OAAO,EACP,MAAM,EACN,KAAK,EACL,UAAU,EACV,QAAQ,GAAG,eAAe,GACX;QAEjB,IAAI,CAAC,QAAQ,GAAG,EAAE,CAAA;QAClB,IAAI,CAAC,QAAQ,GAAG,EAAE,CAAA;QAClB,IAAI,CAAC,gBAAgB,GAAG,EAAE,CAAA;QAC1B,IAAI,CAAC,gBAAgB,GAAG,EAAE,CAAA;QAC1B,MAAM,MAAM,GAAG;YACb,GAAG,EAAE,IAAI;YACT,OAAO;YACP,MAAM;YACN,KAAK;YACL,UAAU;YACV,iBAAiB,EAAE,CAAC;YACpB,QAAQ;YACR,SAAS,EAAE,IAAI;YACf,QAAQ,EAAE,IAAI;SACf,CAAA;QAED,mEAAmE;QACnE,gEAAgE;QAChE,mEAAmE;QACnE,uCAAuC;QACvC,mEAAmE;QACnE,qEAAqE;QACrE,uBAAuB;QACvB,uEAAuE;QACvE,oEAAoE;QACpE,qBAAqB;QACrB,sEAAsE;QACtE,wCAAwC;QACxC,KAAK,MAAM,GAAG,IAAI,OAAO,EAAE;YACzB,MAAM,EAAE,GAAG,IAAI,SAAS,CAAC,GAAG,EAAE,MAAM,CAAC,CAAA;YACrC,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,EAAE,CAAC,GAAG,CAAC,MAAM,EAAE,CAAC,EAAE,EAAE;gBACtC,MAAM,MAAM,GAAG,EAAE,CAAC,GAAG,CAAC,CAAC,CAAC,CAAA;gBACxB,MAAM,SAAS,GAAG,EAAE,CAAC,SAAS,CAAC,CAAC,CAAC,CAAA;gBACjC,qBAAqB;gBACrB,IAAI,CAAC,MAAM,IAAI,CAAC,SAAS,EAAE;oBACzB,MAAM,IAAI,KAAK,CAAC,wBAAwB,CAAC,CAAA;iBAC1C;gBACD,gCAAgC;gBAChC,iDAAiD;gBACjD,OAAO,MAAM,CAAC,CAAC,CAAC,KAAK,GAAG,IAAI,SAAS,CAAC,CAAC,CAAC,KAAK,GAAG,EAAE;oBAChD,MAAM,CAAC,KAAK,EAAE,CAAA;oBACd,SAAS,CAAC,KAAK,EAAE,CAAA;iBAClB;gBACD,oBAAoB;gBACpB,MAAM,CAAC,GAAG,IAAI,OAAO,CAAC,MAAM,EAAE,SAAS,EAAE,CAAC,EAAE,QAAQ,CAAC,CAAA;gBACrD,MAAM,CAAC,GAAG,IAAI,SAAS,CAAC,CAAC,CAAC,UAAU,EAAE,EAAE,MAAM,CAAC,CAAA;gBAC/C,MAAM,QAAQ,GAAG,SAAS,CAAC,SAAS,CAAC,MAAM,GAAG,CAAC,CAAC,KAAK,IAAI,CAAA;gBACzD,MAAM,QAAQ,GAAG,CAAC,CAAC,UAAU,EAAE,CAAA;gBAC/B,IAAI,QAAQ;oBAAE,IAAI,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC,CAAC,CAAA;;oBAC9B,IAAI,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC,CAAC,CAAA;gBAC1B,IAAI,QAAQ,EAAE;oBACZ,IAAI,QAAQ;wBAAE,IAAI,CAAC,gBAAgB,CAAC,IAAI,CAAC,CAAC,CAAC,CAAA;;wBACtC,IAAI,CAAC,gBAAgB,CAAC,IAAI,CAAC,CAAC,CAAC,CAAA;iBACnC;aACF;SACF;IACH,CAAC;IAED,OAAO,CAAC,CAAO;QACb,MAAM,QAAQ,GAAG,CAAC,CAAC,QAAQ,EAAE,CAAA;QAC7B,MAAM,SAAS,GAAG,GAAG,QAAQ,GAAG,CAAA;QAChC,MAAM,QAAQ,GAAG,CAAC,CAAC,QAAQ,EAAE,IAAI,GAAG,CAAA;QACpC,MAAM,SAAS,GAAG,GAAG,QAAQ,GAAG,CAAA;QAChC,KAAK,MAAM,CAAC,IAAI,IAAI,CAAC,QAAQ,EAAE;YAC7B,IAAI,CAAC,CAAC,KAAK,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC,KAAK,CAAC,SAAS,CAAC;gBAAE,OAAO,IAAI,CAAA;SACzD;QACD,KAAK,MAAM,CAAC,IAAI,IAAI,CAAC,QAAQ,EAAE;YAC7B,IAAI,CAAC,CAAC,KAAK,CAAC,QAAQ,CAAC,IAAI,CAAC,CAAC,KAAK,CAAC,SAAS,CAAC;gBAAE,OAAO,IAAI,CAAA;SACzD;QACD,OAAO,KAAK,CAAA;IACd,CAAC;IAED,eAAe,CAAC,CAAO;QACrB,MAAM,QAAQ,GAAG,CAAC,CAAC,QAAQ,EAAE,GAAG,GAAG,CAAA;QACnC,MAAM,QAAQ,GAAG,CAAC,CAAC,CAAC,QAAQ,EAAE,IAAI,GAAG,CAAC,GAAG,GAAG,CAAA;QAC5C,KAAK,MAAM,CAAC,IAAI,IAAI,CAAC,gBAAgB,EAAE;YACrC,IAAI,CAAC,CAAC,KAAK,CAAC,QAAQ,CAAC;gBAAE,OAAO,IAAI,CAAA;SACnC;QACD,KAAK,MAAM,CAAC,IAAI,IAAI,CAAC,gBAAgB,EAAE;YACrC,IAAI,CAAC,CAAC,KAAK,CAAC,QAAQ,CAAC;gBAAE,OAAO,IAAI,CAAA;SACnC;QACD,OAAO,KAAK,CAAA;IACd,CAAC;CACF","sourcesContent":["// give it a pattern, and it'll be able to tell you if\n// a given path should be ignored.\n// Ignoring a path ignores its children if the pattern ends in /**\n// Ignores are always parsed in dot:true mode\n\nimport { Minimatch } from 'minimatch'\nimport { Path } from 'path-scurry'\nimport { Pattern } from './pattern.js'\nimport { GlobWalkerOpts } from './walker.js'\n\nexport interface IgnoreLike {\n ignored?: (p: Path) => boolean\n childrenIgnored?: (p: Path) => boolean\n}\n\nconst defaultPlatform: NodeJS.Platform =\n typeof process === 'object' &&\n process &&\n typeof process.platform === 'string'\n ? process.platform\n : 'linux'\n\n/**\n * Class used to process ignored patterns\n */\nexport class Ignore implements IgnoreLike {\n relative: Minimatch[]\n relativeChildren: Minimatch[]\n absolute: Minimatch[]\n absoluteChildren: Minimatch[]\n\n constructor(\n ignored: string[],\n {\n nobrace,\n nocase,\n noext,\n noglobstar,\n platform = defaultPlatform,\n }: GlobWalkerOpts\n ) {\n this.relative = []\n this.absolute = []\n this.relativeChildren = []\n this.absoluteChildren = []\n const mmopts = {\n dot: true,\n nobrace,\n nocase,\n noext,\n noglobstar,\n optimizationLevel: 2,\n platform,\n nocomment: true,\n nonegate: true,\n }\n\n // this is a little weird, but it gives us a clean set of optimized\n // minimatch matchers, without getting tripped up if one of them\n // ends in /** inside a brace section, and it's only inefficient at\n // the start of the walk, not along it.\n // It'd be nice if the Pattern class just had a .test() method, but\n // handling globstars is a bit of a pita, and that code already lives\n // in minimatch anyway.\n // Another way would be if maybe Minimatch could take its set/globParts\n // as an option, and then we could at least just use Pattern to test\n // for absolute-ness.\n // Yet another way, Minimatch could take an array of glob strings, and\n // a cwd option, and do the right thing.\n for (const ign of ignored) {\n const mm = new Minimatch(ign, mmopts)\n for (let i = 0; i < mm.set.length; i++) {\n const parsed = mm.set[i]\n const globParts = mm.globParts[i]\n /* c8 ignore start */\n if (!parsed || !globParts) {\n throw new Error('invalid pattern object')\n }\n // strip off leading ./ portions\n // https://github.com/isaacs/node-glob/issues/570\n while (parsed[0] === '.' && globParts[0] === '.') {\n parsed.shift()\n globParts.shift()\n }\n /* c8 ignore stop */\n const p = new Pattern(parsed, globParts, 0, platform)\n const m = new Minimatch(p.globString(), mmopts)\n const children = globParts[globParts.length - 1] === '**'\n const absolute = p.isAbsolute()\n if (absolute) this.absolute.push(m)\n else this.relative.push(m)\n if (children) {\n if (absolute) this.absoluteChildren.push(m)\n else this.relativeChildren.push(m)\n }\n }\n }\n }\n\n ignored(p: Path): boolean {\n const fullpath = p.fullpath()\n const fullpaths = `${fullpath}/`\n const relative = p.relative() || '.'\n const relatives = `${relative}/`\n for (const m of this.relative) {\n if (m.match(relative) || m.match(relatives)) return true\n }\n for (const m of this.absolute) {\n if (m.match(fullpath) || m.match(fullpaths)) return true\n }\n return false\n }\n\n childrenIgnored(p: Path): boolean {\n const fullpath = p.fullpath() + '/'\n const relative = (p.relative() || '.') + '/'\n for (const m of this.relativeChildren) {\n if (m.match(relative)) return true\n }\n for (const m of this.absoluteChildren) {\n if (m.match(fullpath)) return true\n }\n return false\n }\n}\n"]} \ No newline at end of file diff --git a/deps/npm/node_modules/glob/dist/esm/package.json b/deps/npm/node_modules/glob/dist/esm/package.json index 7c34deb5837d8d..3dbc1ca591c055 100644 --- a/deps/npm/node_modules/glob/dist/esm/package.json +++ b/deps/npm/node_modules/glob/dist/esm/package.json @@ -1 +1,3 @@ -{"type":"module"} \ No newline at end of file +{ + "type": "module" +} diff --git a/deps/npm/node_modules/glob/dist/esm/walker.d.ts.map b/deps/npm/node_modules/glob/dist/esm/walker.d.ts.map index 7c8df20b2f323c..2cae287c2e1f42 100644 --- a/deps/npm/node_modules/glob/dist/esm/walker.d.ts.map +++ b/deps/npm/node_modules/glob/dist/esm/walker.d.ts.map @@ -1 +1 @@ -{"version":3,"file":"walker.d.ts","sourceRoot":"","sources":["../../src/walker.ts"],"names":[],"mappings":";AAAA;;;;;GAKG;AACH,OAAO,EAAE,QAAQ,EAAE,MAAM,UAAU,CAAA;AACnC,OAAO,EAAE,IAAI,EAAE,MAAM,aAAa,CAAA;AAClC,OAAO,EAAU,UAAU,EAAE,MAAM,aAAa,CAAA;AAOhD,OAAO,EAAE,OAAO,EAAE,MAAM,cAAc,CAAA;AACtC,OAAO,EAAE,SAAS,EAAE,MAAM,gBAAgB,CAAA;AAE1C,MAAM,WAAW,cAAc;IAC7B,QAAQ,CAAC,EAAE,OAAO,CAAA;IAClB,kBAAkB,CAAC,EAAE,OAAO,CAAA;IAC5B,GAAG,CAAC,EAAE,MAAM,GAAG,GAAG,CAAA;IAClB,GAAG,CAAC,EAAE,OAAO,CAAA;IACb,WAAW,CAAC,EAAE,OAAO,CAAA;IACrB,MAAM,CAAC,EAAE,OAAO,CAAA;IAChB,MAAM,CAAC,EAAE,MAAM,GAAG,MAAM,EAAE,GAAG,UAAU,CAAA;IACvC,IAAI,CAAC,EAAE,OAAO,CAAA;IACd,SAAS,CAAC,EAAE,OAAO,CAAA;IAGnB,QAAQ,CAAC,EAAE,MAAM,CAAA;IACjB,OAAO,CAAC,EAAE,OAAO,CAAA;IACjB,MAAM,CAAC,EAAE,OAAO,CAAA;IAChB,KAAK,CAAC,EAAE,OAAO,CAAA;IACf,KAAK,CAAC,EAAE,OAAO,CAAA;IACf,UAAU,CAAC,EAAE,OAAO,CAAA;IACpB,QAAQ,CAAC,EAAE,MAAM,CAAC,QAAQ,CAAA;IAC1B,KAAK,CAAC,EAAE,OAAO,CAAA;IACf,QAAQ,CAAC,EAAE,OAAO,CAAA;IAClB,IAAI,CAAC,EAAE,MAAM,CAAA;IACb,IAAI,CAAC,EAAE,OAAO,CAAA;IACd,MAAM,CAAC,EAAE,WAAW,CAAA;IACpB,oBAAoB,CAAC,EAAE,OAAO,CAAA;IAC9B,aAAa,CAAC,EAAE,OAAO,CAAA;CACxB;AAED,MAAM,MAAM,gBAAgB,GAAG,cAAc,GAAG;IAC9C,aAAa,EAAE,IAAI,CAAA;CACpB,CAAA;AACD,MAAM,MAAM,iBAAiB,GAAG,cAAc,GAAG;IAC/C,aAAa,EAAE,KAAK,CAAA;CACrB,CAAA;AACD,MAAM,MAAM,iBAAiB,GAAG,cAAc,GAAG;IAC/C,aAAa,CAAC,EAAE,SAAS,CAAA;CAC1B,CAAA;AAED,MAAM,MAAM,MAAM,CAAC,CAAC,SAAS,cAAc,IAAI,CAAC,SAAS,gBAAgB,GACrE,IAAI,GACJ,CAAC,SAAS,iBAAiB,GAC3B,MAAM,GACN,CAAC,SAAS,iBAAiB,GAC3B,MAAM,GACN,IAAI,GAAG,MAAM,CAAA;AAEjB,MAAM,MAAM,OAAO,CAAC,CAAC,SAAS,cAAc,IAAI,CAAC,SAAS,gBAAgB,GACtE,GAAG,CAAC,IAAI,CAAC,GACT,CAAC,SAAS,iBAAiB,GAC3B,GAAG,CAAC,MAAM,CAAC,GACX,CAAC,SAAS,iBAAiB,GAC3B,GAAG,CAAC,MAAM,CAAC,GACX,GAAG,CAAC,IAAI,GAAG,MAAM,CAAC,CAAA;AAEtB,MAAM,MAAM,WAAW,CAAC,CAAC,SAAS,cAAc,IAC9C,CAAC,SAAS,gBAAgB,GACtB,QAAQ,CAAC,IAAI,EAAE,IAAI,CAAC,GACpB,CAAC,SAAS,iBAAiB,GAC3B,QAAQ,CAAC,MAAM,EAAE,MAAM,CAAC,GACxB,CAAC,SAAS,iBAAiB,GAC3B,QAAQ,CAAC,MAAM,EAAE,MAAM,CAAC,GACxB,QAAQ,CAAC,IAAI,GAAG,MAAM,EAAE,IAAI,GAAG,MAAM,CAAC,CAAA;AAY5C;;GAEG;AACH,8BAAsB,QAAQ,CAAC,CAAC,SAAS,cAAc,GAAG,cAAc;;IACtE,IAAI,EAAE,IAAI,CAAA;IACV,QAAQ,EAAE,OAAO,EAAE,CAAA;IACnB,IAAI,EAAE,CAAC,CAAA;IACP,IAAI,EAAE,GAAG,CAAC,IAAI,CAAC,CAAkB;IACjC,MAAM,EAAE,OAAO,CAAQ;IACvB,OAAO,EAAE,OAAO,CAAQ;IAIxB,MAAM,CAAC,EAAE,WAAW,CAAA;IACpB,QAAQ,EAAE,MAAM,CAAA;gBAEJ,QAAQ,EAAE,OAAO,EAAE,EAAE,IAAI,EAAE,IAAI,EAAE,IAAI,EAAE,CAAC;IA8BpD,KAAK;IAGL,MAAM;IAUN,QAAQ,CAAC,EAAE,EAAE,MAAM,GAAG;IAahB,UAAU,CAAC,CAAC,EAAE,IAAI,EAAE,KAAK,EAAE,OAAO,GAAG,OAAO,CAAC,IAAI,GAAG,SAAS,CAAC;IAYpE,cAAc,CAAC,CAAC,EAAE,IAAI,GAAG,SAAS,EAAE,KAAK,EAAE,OAAO,GAAG,IAAI,GAAG,SAAS;IAUrE,cAAc,CAAC,CAAC,EAAE,IAAI,EAAE,KAAK,EAAE,OAAO,GAAG,IAAI,GAAG,SAAS;IAYzD,QAAQ,CAAC,SAAS,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,CAAC,GAAG,IAAI;IACtC,QAAQ,CAAC,SAAS,CAAC,CAAC,EAAE,MAAM,GAAG,IAAI,GAAG,IAAI;IAE1C,WAAW,CAAC,CAAC,EAAE,IAAI,EAAE,QAAQ,EAAE,OAAO;IAsBhC,KAAK,CAAC,CAAC,EAAE,IAAI,EAAE,QAAQ,EAAE,OAAO,EAAE,KAAK,EAAE,OAAO,GAAG,OAAO,CAAC,IAAI,CAAC;IAKtE,SAAS,CAAC,CAAC,EAAE,IAAI,EAAE,QAAQ,EAAE,OAAO,EAAE,KAAK,EAAE,OAAO,GAAG,IAAI;IAK3D,MAAM,CAAC,MAAM,EAAE,IAAI,EAAE,QAAQ,EAAE,OAAO,EAAE,EAAE,EAAE,EAAE,MAAM,GAAG;IAOvD,OAAO,CACL,MAAM,EAAE,IAAI,EACZ,QAAQ,EAAE,OAAO,EAAE,EACnB,SAAS,EAAE,SAAS,EACpB,EAAE,EAAE,MAAM,GAAG;IA2Cf,OAAO,CACL,MAAM,EAAE,IAAI,EACZ,OAAO,EAAE,IAAI,EAAE,EACf,SAAS,EAAE,SAAS,EACpB,EAAE,EAAE,MAAM,GAAG;IAsBf,UAAU,CAAC,MAAM,EAAE,IAAI,EAAE,QAAQ,EAAE,OAAO,EAAE,EAAE,EAAE,EAAE,MAAM,GAAG;IAO3D,WAAW,CACT,MAAM,EAAE,IAAI,EACZ,QAAQ,EAAE,OAAO,EAAE,EACnB,SAAS,EAAE,SAAS,EACpB,EAAE,EAAE,MAAM,GAAG;IAqCf,WAAW,CACT,MAAM,EAAE,IAAI,EACZ,OAAO,EAAE,IAAI,EAAE,EACf,SAAS,EAAE,SAAS,EACpB,EAAE,EAAE,MAAM,GAAG;CAoBhB;AAED,qBAAa,UAAU,CACrB,CAAC,SAAS,cAAc,GAAG,cAAc,CACzC,SAAQ,QAAQ,CAAC,CAAC,CAAC;IACnB,OAAO,EAAE,CAAC,SAAS,gBAAgB,GAC/B,GAAG,CAAC,IAAI,CAAC,GACT,CAAC,SAAS,iBAAiB,GAC3B,GAAG,CAAC,MAAM,CAAC,GACX,CAAC,SAAS,iBAAiB,GAC3B,GAAG,CAAC,MAAM,CAAC,GACX,GAAG,CAAC,IAAI,GAAG,MAAM,CAAC,CAAA;gBAEV,QAAQ,EAAE,OAAO,EAAE,EAAE,IAAI,EAAE,IAAI,EAAE,IAAI,EAAE,CAAC;IAKpD,SAAS,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,CAAC,GAAG,IAAI;IAKvB,IAAI,IAAI,OAAO,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC;IAiBjC,QAAQ,IAAI,OAAO,CAAC,CAAC,CAAC;CAWvB;AAED,qBAAa,UAAU,CACrB,CAAC,SAAS,cAAc,GAAG,cAAc,CACzC,SAAQ,QAAQ,CAAC,CAAC,CAAC;IACnB,OAAO,EAAE,CAAC,SAAS,gBAAgB,GAC/B,QAAQ,CAAC,IAAI,EAAE,IAAI,CAAC,GACpB,CAAC,SAAS,iBAAiB,GAC3B,QAAQ,CAAC,MAAM,EAAE,MAAM,CAAC,GACxB,CAAC,SAAS,iBAAiB,GAC3B,QAAQ,CAAC,MAAM,EAAE,MAAM,CAAC,GACxB,QAAQ,CAAC,IAAI,GAAG,MAAM,EAAE,IAAI,GAAG,MAAM,CAAC,CAAA;gBAE9B,QAAQ,EAAE,OAAO,EAAE,EAAE,IAAI,EAAE,IAAI,EAAE,IAAI,EAAE,CAAC;IAUpD,SAAS,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,CAAC,GAAG,IAAI;IAM7B,MAAM,IAAI,WAAW,CAAC,CAAC,CAAC;IAYxB,UAAU,IAAI,WAAW,CAAC,CAAC,CAAC;CAO7B"} \ No newline at end of file +{"version":3,"file":"walker.d.ts","sourceRoot":"","sources":["../../src/walker.ts"],"names":[],"mappings":";AAAA;;;;;GAKG;AACH,OAAO,EAAE,QAAQ,EAAE,MAAM,UAAU,CAAA;AACnC,OAAO,EAAE,IAAI,EAAE,MAAM,aAAa,CAAA;AAClC,OAAO,EAAU,UAAU,EAAE,MAAM,aAAa,CAAA;AAOhD,OAAO,EAAE,OAAO,EAAE,MAAM,cAAc,CAAA;AACtC,OAAO,EAAE,SAAS,EAAE,MAAM,gBAAgB,CAAA;AAE1C,MAAM,WAAW,cAAc;IAC7B,QAAQ,CAAC,EAAE,OAAO,CAAA;IAClB,kBAAkB,CAAC,EAAE,OAAO,CAAA;IAC5B,GAAG,CAAC,EAAE,MAAM,GAAG,GAAG,CAAA;IAClB,GAAG,CAAC,EAAE,OAAO,CAAA;IACb,WAAW,CAAC,EAAE,OAAO,CAAA;IACrB,MAAM,CAAC,EAAE,OAAO,CAAA;IAChB,MAAM,CAAC,EAAE,MAAM,GAAG,MAAM,EAAE,GAAG,UAAU,CAAA;IACvC,IAAI,CAAC,EAAE,OAAO,CAAA;IACd,SAAS,CAAC,EAAE,OAAO,CAAA;IAGnB,QAAQ,CAAC,EAAE,MAAM,CAAA;IACjB,OAAO,CAAC,EAAE,OAAO,CAAA;IACjB,MAAM,CAAC,EAAE,OAAO,CAAA;IAChB,KAAK,CAAC,EAAE,OAAO,CAAA;IACf,KAAK,CAAC,EAAE,OAAO,CAAA;IACf,UAAU,CAAC,EAAE,OAAO,CAAA;IACpB,QAAQ,CAAC,EAAE,MAAM,CAAC,QAAQ,CAAA;IAC1B,KAAK,CAAC,EAAE,OAAO,CAAA;IACf,QAAQ,CAAC,EAAE,OAAO,CAAA;IAClB,IAAI,CAAC,EAAE,MAAM,CAAA;IACb,IAAI,CAAC,EAAE,OAAO,CAAA;IACd,MAAM,CAAC,EAAE,WAAW,CAAA;IACpB,oBAAoB,CAAC,EAAE,OAAO,CAAA;IAC9B,aAAa,CAAC,EAAE,OAAO,CAAA;CACxB;AAED,MAAM,MAAM,gBAAgB,GAAG,cAAc,GAAG;IAC9C,aAAa,EAAE,IAAI,CAAA;CACpB,CAAA;AACD,MAAM,MAAM,iBAAiB,GAAG,cAAc,GAAG;IAC/C,aAAa,EAAE,KAAK,CAAA;CACrB,CAAA;AACD,MAAM,MAAM,iBAAiB,GAAG,cAAc,GAAG;IAC/C,aAAa,CAAC,EAAE,SAAS,CAAA;CAC1B,CAAA;AAED,MAAM,MAAM,MAAM,CAAC,CAAC,SAAS,cAAc,IAAI,CAAC,SAAS,gBAAgB,GACrE,IAAI,GACJ,CAAC,SAAS,iBAAiB,GAC3B,MAAM,GACN,CAAC,SAAS,iBAAiB,GAC3B,MAAM,GACN,IAAI,GAAG,MAAM,CAAA;AAEjB,MAAM,MAAM,OAAO,CAAC,CAAC,SAAS,cAAc,IAAI,CAAC,SAAS,gBAAgB,GACtE,GAAG,CAAC,IAAI,CAAC,GACT,CAAC,SAAS,iBAAiB,GAC3B,GAAG,CAAC,MAAM,CAAC,GACX,CAAC,SAAS,iBAAiB,GAC3B,GAAG,CAAC,MAAM,CAAC,GACX,GAAG,CAAC,IAAI,GAAG,MAAM,CAAC,CAAA;AAEtB,MAAM,MAAM,WAAW,CAAC,CAAC,SAAS,cAAc,IAC9C,CAAC,SAAS,gBAAgB,GACtB,QAAQ,CAAC,IAAI,EAAE,IAAI,CAAC,GACpB,CAAC,SAAS,iBAAiB,GAC3B,QAAQ,CAAC,MAAM,EAAE,MAAM,CAAC,GACxB,CAAC,SAAS,iBAAiB,GAC3B,QAAQ,CAAC,MAAM,EAAE,MAAM,CAAC,GACxB,QAAQ,CAAC,IAAI,GAAG,MAAM,EAAE,IAAI,GAAG,MAAM,CAAC,CAAA;AAY5C;;GAEG;AACH,8BAAsB,QAAQ,CAAC,CAAC,SAAS,cAAc,GAAG,cAAc;;IACtE,IAAI,EAAE,IAAI,CAAA;IACV,QAAQ,EAAE,OAAO,EAAE,CAAA;IACnB,IAAI,EAAE,CAAC,CAAA;IACP,IAAI,EAAE,GAAG,CAAC,IAAI,CAAC,CAAkB;IACjC,MAAM,EAAE,OAAO,CAAQ;IACvB,OAAO,EAAE,OAAO,CAAQ;IAIxB,MAAM,CAAC,EAAE,WAAW,CAAA;IACpB,QAAQ,EAAE,MAAM,CAAA;gBAEJ,QAAQ,EAAE,OAAO,EAAE,EAAE,IAAI,EAAE,IAAI,EAAE,IAAI,EAAE,CAAC;IA8BpD,KAAK;IAGL,MAAM;IAUN,QAAQ,CAAC,EAAE,EAAE,MAAM,GAAG;IAahB,UAAU,CAAC,CAAC,EAAE,IAAI,EAAE,KAAK,EAAE,OAAO,GAAG,OAAO,CAAC,IAAI,GAAG,SAAS,CAAC;IAqBpE,cAAc,CAAC,CAAC,EAAE,IAAI,GAAG,SAAS,EAAE,KAAK,EAAE,OAAO,GAAG,IAAI,GAAG,SAAS;IAcrE,cAAc,CAAC,CAAC,EAAE,IAAI,EAAE,KAAK,EAAE,OAAO,GAAG,IAAI,GAAG,SAAS;IAmBzD,QAAQ,CAAC,SAAS,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,CAAC,GAAG,IAAI;IACtC,QAAQ,CAAC,SAAS,CAAC,CAAC,EAAE,MAAM,GAAG,IAAI,GAAG,IAAI;IAE1C,WAAW,CAAC,CAAC,EAAE,IAAI,EAAE,QAAQ,EAAE,OAAO;IAsBhC,KAAK,CAAC,CAAC,EAAE,IAAI,EAAE,QAAQ,EAAE,OAAO,EAAE,KAAK,EAAE,OAAO,GAAG,OAAO,CAAC,IAAI,CAAC;IAKtE,SAAS,CAAC,CAAC,EAAE,IAAI,EAAE,QAAQ,EAAE,OAAO,EAAE,KAAK,EAAE,OAAO,GAAG,IAAI;IAK3D,MAAM,CAAC,MAAM,EAAE,IAAI,EAAE,QAAQ,EAAE,OAAO,EAAE,EAAE,EAAE,EAAE,MAAM,GAAG;IAOvD,OAAO,CACL,MAAM,EAAE,IAAI,EACZ,QAAQ,EAAE,OAAO,EAAE,EACnB,SAAS,EAAE,SAAS,EACpB,EAAE,EAAE,MAAM,GAAG;IA2Cf,OAAO,CACL,MAAM,EAAE,IAAI,EACZ,OAAO,EAAE,IAAI,EAAE,EACf,SAAS,EAAE,SAAS,EACpB,EAAE,EAAE,MAAM,GAAG;IAsBf,UAAU,CAAC,MAAM,EAAE,IAAI,EAAE,QAAQ,EAAE,OAAO,EAAE,EAAE,EAAE,EAAE,MAAM,GAAG;IAO3D,WAAW,CACT,MAAM,EAAE,IAAI,EACZ,QAAQ,EAAE,OAAO,EAAE,EACnB,SAAS,EAAE,SAAS,EACpB,EAAE,EAAE,MAAM,GAAG;IAqCf,WAAW,CACT,MAAM,EAAE,IAAI,EACZ,OAAO,EAAE,IAAI,EAAE,EACf,SAAS,EAAE,SAAS,EACpB,EAAE,EAAE,MAAM,GAAG;CAoBhB;AAED,qBAAa,UAAU,CACrB,CAAC,SAAS,cAAc,GAAG,cAAc,CACzC,SAAQ,QAAQ,CAAC,CAAC,CAAC;IACnB,OAAO,EAAE,CAAC,SAAS,gBAAgB,GAC/B,GAAG,CAAC,IAAI,CAAC,GACT,CAAC,SAAS,iBAAiB,GAC3B,GAAG,CAAC,MAAM,CAAC,GACX,CAAC,SAAS,iBAAiB,GAC3B,GAAG,CAAC,MAAM,CAAC,GACX,GAAG,CAAC,IAAI,GAAG,MAAM,CAAC,CAAA;gBAEV,QAAQ,EAAE,OAAO,EAAE,EAAE,IAAI,EAAE,IAAI,EAAE,IAAI,EAAE,CAAC;IAKpD,SAAS,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,CAAC,GAAG,IAAI;IAKvB,IAAI,IAAI,OAAO,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC;IAiBjC,QAAQ,IAAI,OAAO,CAAC,CAAC,CAAC;CAWvB;AAED,qBAAa,UAAU,CACrB,CAAC,SAAS,cAAc,GAAG,cAAc,CACzC,SAAQ,QAAQ,CAAC,CAAC,CAAC;IACnB,OAAO,EAAE,CAAC,SAAS,gBAAgB,GAC/B,QAAQ,CAAC,IAAI,EAAE,IAAI,CAAC,GACpB,CAAC,SAAS,iBAAiB,GAC3B,QAAQ,CAAC,MAAM,EAAE,MAAM,CAAC,GACxB,CAAC,SAAS,iBAAiB,GAC3B,QAAQ,CAAC,MAAM,EAAE,MAAM,CAAC,GACxB,QAAQ,CAAC,IAAI,GAAG,MAAM,EAAE,IAAI,GAAG,MAAM,CAAC,CAAA;gBAE9B,QAAQ,EAAE,OAAO,EAAE,EAAE,IAAI,EAAE,IAAI,EAAE,IAAI,EAAE,CAAC;IAUpD,SAAS,CAAC,CAAC,EAAE,MAAM,CAAC,CAAC,CAAC,GAAG,IAAI;IAM7B,MAAM,IAAI,WAAW,CAAC,CAAC,CAAC;IAYxB,UAAU,IAAI,WAAW,CAAC,CAAC,CAAC;CAO7B"} \ No newline at end of file diff --git a/deps/npm/node_modules/glob/dist/esm/walker.js b/deps/npm/node_modules/glob/dist/esm/walker.js index 6f3358b0c39a32..b58472e9de4fbc 100644 --- a/deps/npm/node_modules/glob/dist/esm/walker.js +++ b/deps/npm/node_modules/glob/dist/esm/walker.js @@ -93,13 +93,26 @@ export class GlobUtil { e = rpc; } const needStat = e.isUnknown() || this.opts.stat; - return this.matchCheckTest(needStat ? await e.lstat() : e, ifDir); + const s = needStat ? await e.lstat() : e; + if (this.opts.follow && this.opts.nodir && s?.isSymbolicLink()) { + const target = await s.realpath(); + /* c8 ignore start */ + if (target && (target.isUnknown() || this.opts.stat)) { + await target.lstat(); + } + /* c8 ignore stop */ + } + return this.matchCheckTest(s, ifDir); } matchCheckTest(e, ifDir) { return e && (this.maxDepth === Infinity || e.depth() <= this.maxDepth) && (!ifDir || e.canReaddir()) && (!this.opts.nodir || !e.isDirectory()) && + (!this.opts.nodir || + !this.opts.follow || + !e.isSymbolicLink() || + !e.realpathCached()?.isDirectory()) && !this.#ignored(e) ? e : undefined; @@ -115,7 +128,14 @@ export class GlobUtil { e = rpc; } const needStat = e.isUnknown() || this.opts.stat; - return this.matchCheckTest(needStat ? e.lstatSync() : e, ifDir); + const s = needStat ? e.lstatSync() : e; + if (this.opts.follow && this.opts.nodir && s?.isSymbolicLink()) { + const target = s.realpathSync(); + if (target && (target?.isUnknown() || this.opts.stat)) { + target.lstatSync(); + } + } + return this.matchCheckTest(s, ifDir); } matchFinish(e, absolute) { if (this.#ignored(e)) diff --git a/deps/npm/node_modules/glob/dist/esm/walker.js.map b/deps/npm/node_modules/glob/dist/esm/walker.js.map index 8756bfca294503..62f35d4a1a2955 100644 --- a/deps/npm/node_modules/glob/dist/esm/walker.js.map +++ b/deps/npm/node_modules/glob/dist/esm/walker.js.map @@ -1 +1 @@ -{"version":3,"file":"walker.js","sourceRoot":"","sources":["../../src/walker.ts"],"names":[],"mappings":"AAAA;;;;;GAKG;AACH,OAAO,EAAE,QAAQ,EAAE,MAAM,UAAU,CAAA;AAEnC,OAAO,EAAE,MAAM,EAAc,MAAM,aAAa,CAAA;AAQhD,OAAO,EAAE,SAAS,EAAE,MAAM,gBAAgB,CAAA;AAiE1C,MAAM,UAAU,GAAG,CACjB,MAAsC,EACtC,IAAoB,EACR,EAAE,CACd,OAAO,MAAM,KAAK,QAAQ;IACxB,CAAC,CAAC,IAAI,MAAM,CAAC,CAAC,MAAM,CAAC,EAAE,IAAI,CAAC;IAC5B,CAAC,CAAC,KAAK,CAAC,OAAO,CAAC,MAAM,CAAC;QACvB,CAAC,CAAC,IAAI,MAAM,CAAC,MAAM,EAAE,IAAI,CAAC;QAC1B,CAAC,CAAC,MAAM,CAAA;AAEZ;;GAEG;AACH,MAAM,OAAgB,QAAQ;IAC5B,IAAI,CAAM;IACV,QAAQ,CAAW;IACnB,IAAI,CAAG;IACP,IAAI,GAAc,IAAI,GAAG,EAAQ,CAAA;IACjC,MAAM,GAAY,KAAK,CAAA;IACvB,OAAO,GAAY,KAAK,CAAA;IACxB,SAAS,GAAkB,EAAE,CAAA;IAC7B,OAAO,CAAa;IACpB,IAAI,CAAY;IAChB,MAAM,CAAc;IACpB,QAAQ,CAAQ;IAGhB,YAAY,QAAmB,EAAE,IAAU,EAAE,IAAO;QAClD,IAAI,CAAC,QAAQ,GAAG,QAAQ,CAAA;QACxB,IAAI,CAAC,IAAI,GAAG,IAAI,CAAA;QAChB,IAAI,CAAC,IAAI,GAAG,IAAI,CAAA;QAChB,IAAI,CAAC,IAAI,GAAG,CAAC,IAAI,CAAC,KAAK,IAAI,IAAI,CAAC,QAAQ,KAAK,OAAO,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,GAAG,CAAA;QACjE,IAAI,IAAI,CAAC,MAAM,EAAE;YACf,IAAI,CAAC,OAAO,GAAG,UAAU,CAAC,IAAI,CAAC,MAAM,EAAE,IAAI,CAAC,CAAA;SAC7C;QACD,6DAA6D;QAC7D,mBAAmB;QACnB,qBAAqB;QACrB,IAAI,CAAC,QAAQ,GAAG,IAAI,CAAC,QAAQ,IAAI,QAAQ,CAAA;QACzC,oBAAoB;QACpB,IAAI,IAAI,CAAC,MAAM,EAAE;YACf,IAAI,CAAC,MAAM,GAAG,IAAI,CAAC,MAAM,CAAA;YACzB,IAAI,CAAC,MAAM,CAAC,gBAAgB,CAAC,OAAO,EAAE,GAAG,EAAE;gBACzC,IAAI,CAAC,SAAS,CAAC,MAAM,GAAG,CAAC,CAAA;YAC3B,CAAC,CAAC,CAAA;SACH;IACH,CAAC;IAED,QAAQ,CAAC,IAAU;QACjB,OAAO,IAAI,CAAC,IAAI,CAAC,GAAG,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC,IAAI,CAAC,OAAO,EAAE,OAAO,EAAE,CAAC,IAAI,CAAC,CAAA;IAC/D,CAAC;IACD,gBAAgB,CAAC,IAAU;QACzB,OAAO,CAAC,CAAC,IAAI,CAAC,OAAO,EAAE,eAAe,EAAE,CAAC,IAAI,CAAC,CAAA;IAChD,CAAC;IAED,yBAAyB;IACzB,KAAK;QACH,IAAI,CAAC,MAAM,GAAG,IAAI,CAAA;IACpB,CAAC;IACD,MAAM;QACJ,qBAAqB;QACrB,IAAI,IAAI,CAAC,MAAM,EAAE,OAAO;YAAE,OAAM;QAChC,oBAAoB;QACpB,IAAI,CAAC,MAAM,GAAG,KAAK,CAAA;QACnB,IAAI,EAAE,GAA4B,SAAS,CAAA;QAC3C,OAAO,CAAC,IAAI,CAAC,MAAM,IAAI,CAAC,EAAE,GAAG,IAAI,CAAC,SAAS,CAAC,KAAK,EAAE,CAAC,EAAE;YACpD,EAAE,EAAE,CAAA;SACL;IACH,CAAC;IACD,QAAQ,CAAC,EAAa;QACpB,IAAI,IAAI,CAAC,MAAM,EAAE,OAAO;YAAE,OAAM;QAChC,qBAAqB;QACrB,IAAI,CAAC,IAAI,CAAC,MAAM,EAAE;YAChB,EAAE,EAAE,CAAA;SACL;aAAM;YACL,oBAAoB;YACpB,IAAI,CAAC,SAAS,CAAC,IAAI,CAAC,EAAE,CAAC,CAAA;SACxB;IACH,CAAC;IAED,+DAA+D;IAC/D,wCAAwC;IACxC,KAAK,CAAC,UAAU,CAAC,CAAO,EAAE,KAAc;QACtC,IAAI,KAAK,IAAI,IAAI,CAAC,IAAI,CAAC,KAAK;YAAE,OAAO,SAAS,CAAA;QAC9C,IAAI,GAAqB,CAAA;QACzB,IAAI,IAAI,CAAC,IAAI,CAAC,QAAQ,EAAE;YACtB,GAAG,GAAG,CAAC,CAAC,cAAc,EAAE,IAAI,CAAC,MAAM,CAAC,CAAC,QAAQ,EAAE,CAAC,CAAA;YAChD,IAAI,CAAC,GAAG;gBAAE,OAAO,SAAS,CAAA;YAC1B,CAAC,GAAG,GAAG,CAAA;SACR;QACD,MAAM,QAAQ,GAAG,CAAC,CAAC,SAAS,EAAE,IAAI,IAAI,CAAC,IAAI,CAAC,IAAI,CAAA;QAChD,OAAO,IAAI,CAAC,cAAc,CAAC,QAAQ,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,KAAK,EAAE,CAAC,CAAC,CAAC,CAAC,EAAE,KAAK,CAAC,CAAA;IACnE,CAAC;IAED,cAAc,CAAC,CAAmB,EAAE,KAAc;QAChD,OAAO,CAAC;YACN,CAAC,IAAI,CAAC,QAAQ,KAAK,QAAQ,IAAI,CAAC,CAAC,KAAK,EAAE,IAAI,IAAI,CAAC,QAAQ,CAAC;YAC1D,CAAC,CAAC,KAAK,IAAI,CAAC,CAAC,UAAU,EAAE,CAAC;YAC1B,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,KAAK,IAAI,CAAC,CAAC,CAAC,WAAW,EAAE,CAAC;YACtC,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC;YACjB,CAAC,CAAC,CAAC;YACH,CAAC,CAAC,SAAS,CAAA;IACf,CAAC;IAED,cAAc,CAAC,CAAO,EAAE,KAAc;QACpC,IAAI,KAAK,IAAI,IAAI,CAAC,IAAI,CAAC,KAAK;YAAE,OAAO,SAAS,CAAA;QAC9C,IAAI,GAAqB,CAAA;QACzB,IAAI,IAAI,CAAC,IAAI,CAAC,QAAQ,EAAE;YACtB,GAAG,GAAG,CAAC,CAAC,cAAc,EAAE,IAAI,CAAC,CAAC,YAAY,EAAE,CAAA;YAC5C,IAAI,CAAC,GAAG;gBAAE,OAAO,SAAS,CAAA;YAC1B,CAAC,GAAG,GAAG,CAAA;SACR;QACD,MAAM,QAAQ,GAAG,CAAC,CAAC,SAAS,EAAE,IAAI,IAAI,CAAC,IAAI,CAAC,IAAI,CAAA;QAChD,OAAO,IAAI,CAAC,cAAc,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,CAAC,EAAE,KAAK,CAAC,CAAA;IACjE,CAAC;IAKD,WAAW,CAAC,CAAO,EAAE,QAAiB;QACpC,IAAI,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC;YAAE,OAAM;QAC5B,MAAM,GAAG,GACP,IAAI,CAAC,IAAI,CAAC,QAAQ,KAAK,SAAS,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,QAAQ,CAAA;QAClE,IAAI,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,CAAC,CAAA;QAChB,MAAM,IAAI,GAAG,IAAI,CAAC,IAAI,CAAC,IAAI,IAAI,CAAC,CAAC,WAAW,EAAE,CAAC,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC,CAAC,EAAE,CAAA;QAC/D,4BAA4B;QAC5B,IAAI,IAAI,CAAC,IAAI,CAAC,aAAa,EAAE;YAC3B,IAAI,CAAC,SAAS,CAAC,CAAC,CAAC,CAAA;SAClB;aAAM,IAAI,GAAG,EAAE;YACd,MAAM,GAAG,GAAG,IAAI,CAAC,IAAI,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,aAAa,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,QAAQ,EAAE,CAAA;YAC9D,IAAI,CAAC,SAAS,CAAC,GAAG,GAAG,IAAI,CAAC,CAAA;SAC3B;aAAM;YACL,MAAM,GAAG,GAAG,IAAI,CAAC,IAAI,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,aAAa,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,QAAQ,EAAE,CAAA;YAC9D,MAAM,GAAG,GACP,IAAI,CAAC,IAAI,CAAC,WAAW,IAAI,CAAC,GAAG,CAAC,UAAU,CAAC,IAAI,GAAG,IAAI,CAAC,IAAI,CAAC;gBACxD,CAAC,CAAC,GAAG,GAAG,IAAI,CAAC,IAAI;gBACjB,CAAC,CAAC,EAAE,CAAA;YACR,IAAI,CAAC,SAAS,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,GAAG,GAAG,IAAI,CAAC,CAAC,CAAC,GAAG,GAAG,GAAG,GAAG,IAAI,CAAC,CAAA;SACrD;IACH,CAAC;IAED,KAAK,CAAC,KAAK,CAAC,CAAO,EAAE,QAAiB,EAAE,KAAc;QACpD,MAAM,CAAC,GAAG,MAAM,IAAI,CAAC,UAAU,CAAC,CAAC,EAAE,KAAK,CAAC,CAAA;QACzC,IAAI,CAAC;YAAE,IAAI,CAAC,WAAW,CAAC,CAAC,EAAE,QAAQ,CAAC,CAAA;IACtC,CAAC;IAED,SAAS,CAAC,CAAO,EAAE,QAAiB,EAAE,KAAc;QAClD,MAAM,CAAC,GAAG,IAAI,CAAC,cAAc,CAAC,CAAC,EAAE,KAAK,CAAC,CAAA;QACvC,IAAI,CAAC;YAAE,IAAI,CAAC,WAAW,CAAC,CAAC,EAAE,QAAQ,CAAC,CAAA;IACtC,CAAC;IAED,MAAM,CAAC,MAAY,EAAE,QAAmB,EAAE,EAAa;QACrD,qBAAqB;QACrB,IAAI,IAAI,CAAC,MAAM,EAAE,OAAO;YAAE,EAAE,EAAE,CAAA;QAC9B,oBAAoB;QACpB,IAAI,CAAC,OAAO,CAAC,MAAM,EAAE,QAAQ,EAAE,IAAI,SAAS,CAAC,IAAI,CAAC,IAAI,CAAC,EAAE,EAAE,CAAC,CAAA;IAC9D,CAAC;IAED,OAAO,CACL,MAAY,EACZ,QAAmB,EACnB,SAAoB,EACpB,EAAa;QAEb,IAAI,IAAI,CAAC,gBAAgB,CAAC,MAAM,CAAC;YAAE,OAAO,EAAE,EAAE,CAAA;QAC9C,IAAI,IAAI,CAAC,MAAM,EAAE,OAAO;YAAE,EAAE,EAAE,CAAA;QAC9B,IAAI,IAAI,CAAC,MAAM,EAAE;YACf,IAAI,CAAC,QAAQ,CAAC,GAAG,EAAE,CAAC,IAAI,CAAC,OAAO,CAAC,MAAM,EAAE,QAAQ,EAAE,SAAS,EAAE,EAAE,CAAC,CAAC,CAAA;YAClE,OAAM;SACP;QACD,SAAS,CAAC,eAAe,CAAC,MAAM,EAAE,QAAQ,CAAC,CAAA;QAE3C,qEAAqE;QACrE,4DAA4D;QAC5D,yDAAyD;QACzD,IAAI,KAAK,GAAG,CAAC,CAAA;QACb,MAAM,IAAI,GAAG,GAAG,EAAE;YAChB,IAAI,EAAE,KAAK,KAAK,CAAC;gBAAE,EAAE,EAAE,CAAA;QACzB,CAAC,CAAA;QAED,KAAK,MAAM,CAAC,CAAC,EAAE,QAAQ,EAAE,KAAK,CAAC,IAAI,SAAS,CAAC,OAAO,CAAC,OAAO,EAAE,EAAE;YAC9D,IAAI,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC;gBAAE,SAAQ;YAC9B,KAAK,EAAE,CAAA;YACP,IAAI,CAAC,KAAK,CAAC,CAAC,EAAE,QAAQ,EAAE,KAAK,CAAC,CAAC,IAAI,CAAC,GAAG,EAAE,CAAC,IAAI,EAAE,CAAC,CAAA;SAClD;QAED,KAAK,MAAM,CAAC,IAAI,SAAS,CAAC,cAAc,EAAE,EAAE;YAC1C,IAAI,IAAI,CAAC,QAAQ,KAAK,QAAQ,IAAI,CAAC,CAAC,KAAK,EAAE,IAAI,IAAI,CAAC,QAAQ,EAAE;gBAC5D,SAAQ;aACT;YACD,KAAK,EAAE,CAAA;YACP,MAAM,cAAc,GAAG,CAAC,CAAC,aAAa,EAAE,CAAA;YACxC,IAAI,CAAC,CAAC,aAAa,EAAE;gBACnB,IAAI,CAAC,OAAO,CAAC,CAAC,EAAE,cAAc,EAAE,SAAS,EAAE,IAAI,CAAC,CAAA;iBAC7C;gBACH,CAAC,CAAC,SAAS,CACT,CAAC,CAAC,EAAE,OAAO,EAAE,EAAE,CAAC,IAAI,CAAC,OAAO,CAAC,CAAC,EAAE,OAAO,EAAE,SAAS,EAAE,IAAI,CAAC,EACzD,IAAI,CACL,CAAA;aACF;SACF;QAED,IAAI,EAAE,CAAA;IACR,CAAC;IAED,OAAO,CACL,MAAY,EACZ,OAAe,EACf,SAAoB,EACpB,EAAa;QAEb,SAAS,GAAG,SAAS,CAAC,aAAa,CAAC,MAAM,EAAE,OAAO,CAAC,CAAA;QAEpD,IAAI,KAAK,GAAG,CAAC,CAAA;QACb,MAAM,IAAI,GAAG,GAAG,EAAE;YAChB,IAAI,EAAE,KAAK,KAAK,CAAC;gBAAE,EAAE,EAAE,CAAA;QACzB,CAAC,CAAA;QAED,KAAK,MAAM,CAAC,CAAC,EAAE,QAAQ,EAAE,KAAK,CAAC,IAAI,SAAS,CAAC,OAAO,CAAC,OAAO,EAAE,EAAE;YAC9D,IAAI,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC;gBAAE,SAAQ;YAC9B,KAAK,EAAE,CAAA;YACP,IAAI,CAAC,KAAK,CAAC,CAAC,EAAE,QAAQ,EAAE,KAAK,CAAC,CAAC,IAAI,CAAC,GAAG,EAAE,CAAC,IAAI,EAAE,CAAC,CAAA;SAClD;QACD,KAAK,MAAM,CAAC,MAAM,EAAE,QAAQ,CAAC,IAAI,SAAS,CAAC,QAAQ,CAAC,OAAO,EAAE,EAAE;YAC7D,KAAK,EAAE,CAAA;YACP,IAAI,CAAC,OAAO,CAAC,MAAM,EAAE,QAAQ,EAAE,SAAS,CAAC,KAAK,EAAE,EAAE,IAAI,CAAC,CAAA;SACxD;QAED,IAAI,EAAE,CAAA;IACR,CAAC;IAED,UAAU,CAAC,MAAY,EAAE,QAAmB,EAAE,EAAa;QACzD,qBAAqB;QACrB,IAAI,IAAI,CAAC,MAAM,EAAE,OAAO;YAAE,EAAE,EAAE,CAAA;QAC9B,oBAAoB;QACpB,IAAI,CAAC,WAAW,CAAC,MAAM,EAAE,QAAQ,EAAE,IAAI,SAAS,CAAC,IAAI,CAAC,IAAI,CAAC,EAAE,EAAE,CAAC,CAAA;IAClE,CAAC;IAED,WAAW,CACT,MAAY,EACZ,QAAmB,EACnB,SAAoB,EACpB,EAAa;QAEb,IAAI,IAAI,CAAC,gBAAgB,CAAC,MAAM,CAAC;YAAE,OAAO,EAAE,EAAE,CAAA;QAC9C,IAAI,IAAI,CAAC,MAAM,EAAE,OAAO;YAAE,EAAE,EAAE,CAAA;QAC9B,IAAI,IAAI,CAAC,MAAM,EAAE;YACf,IAAI,CAAC,QAAQ,CAAC,GAAG,EAAE,CACjB,IAAI,CAAC,WAAW,CAAC,MAAM,EAAE,QAAQ,EAAE,SAAS,EAAE,EAAE,CAAC,CAClD,CAAA;YACD,OAAM;SACP;QACD,SAAS,CAAC,eAAe,CAAC,MAAM,EAAE,QAAQ,CAAC,CAAA;QAE3C,qEAAqE;QACrE,4DAA4D;QAC5D,yDAAyD;QACzD,IAAI,KAAK,GAAG,CAAC,CAAA;QACb,MAAM,IAAI,GAAG,GAAG,EAAE;YAChB,IAAI,EAAE,KAAK,KAAK,CAAC;gBAAE,EAAE,EAAE,CAAA;QACzB,CAAC,CAAA;QAED,KAAK,MAAM,CAAC,CAAC,EAAE,QAAQ,EAAE,KAAK,CAAC,IAAI,SAAS,CAAC,OAAO,CAAC,OAAO,EAAE,EAAE;YAC9D,IAAI,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC;gBAAE,SAAQ;YAC9B,IAAI,CAAC,SAAS,CAAC,CAAC,EAAE,QAAQ,EAAE,KAAK,CAAC,CAAA;SACnC;QAED,KAAK,MAAM,CAAC,IAAI,SAAS,CAAC,cAAc,EAAE,EAAE;YAC1C,IAAI,IAAI,CAAC,QAAQ,KAAK,QAAQ,IAAI,CAAC,CAAC,KAAK,EAAE,IAAI,IAAI,CAAC,QAAQ,EAAE;gBAC5D,SAAQ;aACT;YACD,KAAK,EAAE,CAAA;YACP,MAAM,QAAQ,GAAG,CAAC,CAAC,WAAW,EAAE,CAAA;YAChC,IAAI,CAAC,WAAW,CAAC,CAAC,EAAE,QAAQ,EAAE,SAAS,EAAE,IAAI,CAAC,CAAA;SAC/C;QAED,IAAI,EAAE,CAAA;IACR,CAAC;IAED,WAAW,CACT,MAAY,EACZ,OAAe,EACf,SAAoB,EACpB,EAAa;QAEb,SAAS,GAAG,SAAS,CAAC,aAAa,CAAC,MAAM,EAAE,OAAO,CAAC,CAAA;QAEpD,IAAI,KAAK,GAAG,CAAC,CAAA;QACb,MAAM,IAAI,GAAG,GAAG,EAAE;YAChB,IAAI,EAAE,KAAK,KAAK,CAAC;gBAAE,EAAE,EAAE,CAAA;QACzB,CAAC,CAAA;QAED,KAAK,MAAM,CAAC,CAAC,EAAE,QAAQ,EAAE,KAAK,CAAC,IAAI,SAAS,CAAC,OAAO,CAAC,OAAO,EAAE,EAAE;YAC9D,IAAI,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC;gBAAE,SAAQ;YAC9B,IAAI,CAAC,SAAS,CAAC,CAAC,EAAE,QAAQ,EAAE,KAAK,CAAC,CAAA;SACnC;QACD,KAAK,MAAM,CAAC,MAAM,EAAE,QAAQ,CAAC,IAAI,SAAS,CAAC,QAAQ,CAAC,OAAO,EAAE,EAAE;YAC7D,KAAK,EAAE,CAAA;YACP,IAAI,CAAC,WAAW,CAAC,MAAM,EAAE,QAAQ,EAAE,SAAS,CAAC,KAAK,EAAE,EAAE,IAAI,CAAC,CAAA;SAC5D;QAED,IAAI,EAAE,CAAA;IACR,CAAC;CACF;AAED,MAAM,OAAO,UAEX,SAAQ,QAAW;IACnB,OAAO,CAMe;IAEtB,YAAY,QAAmB,EAAE,IAAU,EAAE,IAAO;QAClD,KAAK,CAAC,QAAQ,EAAE,IAAI,EAAE,IAAI,CAAC,CAAA;QAC3B,IAAI,CAAC,OAAO,GAAG,IAAI,GAAG,EAAgB,CAAA;IACxC,CAAC;IAGD,SAAS,CAAC,CAAgB;QACxB,IAAI,CAAC,OAAO,CAAC,GAAG,CAAC,CAAC,CAAC,CAAA;IACrB,CAAC;IAED,KAAK,CAAC,IAAI;QACR,IAAI,IAAI,CAAC,MAAM,EAAE,OAAO;YAAE,MAAM,IAAI,CAAC,MAAM,CAAC,MAAM,CAAA;QAClD,IAAI,IAAI,CAAC,IAAI,CAAC,SAAS,EAAE,EAAE;YACzB,MAAM,IAAI,CAAC,IAAI,CAAC,KAAK,EAAE,CAAA;SACxB;QACD,MAAM,IAAI,OAAO,CAAC,CAAC,GAAG,EAAE,GAAG,EAAE,EAAE;YAC7B,IAAI,CAAC,MAAM,CAAC,IAAI,CAAC,IAAI,EAAE,IAAI,CAAC,QAAQ,EAAE,GAAG,EAAE;gBACzC,IAAI,IAAI,CAAC,MAAM,EAAE,OAAO,EAAE;oBACxB,GAAG,CAAC,IAAI,CAAC,MAAM,CAAC,MAAM,CAAC,CAAA;iBACxB;qBAAM;oBACL,GAAG,CAAC,IAAI,CAAC,OAAO,CAAC,CAAA;iBAClB;YACH,CAAC,CAAC,CAAA;QACJ,CAAC,CAAC,CAAA;QACF,OAAO,IAAI,CAAC,OAAO,CAAA;IACrB,CAAC;IAED,QAAQ;QACN,IAAI,IAAI,CAAC,MAAM,EAAE,OAAO;YAAE,MAAM,IAAI,CAAC,MAAM,CAAC,MAAM,CAAA;QAClD,IAAI,IAAI,CAAC,IAAI,CAAC,SAAS,EAAE,EAAE;YACzB,IAAI,CAAC,IAAI,CAAC,SAAS,EAAE,CAAA;SACtB;QACD,4DAA4D;QAC5D,IAAI,CAAC,UAAU,CAAC,IAAI,CAAC,IAAI,EAAE,IAAI,CAAC,QAAQ,EAAE,GAAG,EAAE;YAC7C,IAAI,IAAI,CAAC,MAAM,EAAE,OAAO;gBAAE,MAAM,IAAI,CAAC,MAAM,CAAC,MAAM,CAAA;QACpD,CAAC,CAAC,CAAA;QACF,OAAO,IAAI,CAAC,OAAO,CAAA;IACrB,CAAC;CACF;AAED,MAAM,OAAO,UAEX,SAAQ,QAAW;IACnB,OAAO,CAMmC;IAE1C,YAAY,QAAmB,EAAE,IAAU,EAAE,IAAO;QAClD,KAAK,CAAC,QAAQ,EAAE,IAAI,EAAE,IAAI,CAAC,CAAA;QAC3B,IAAI,CAAC,OAAO,GAAG,IAAI,QAAQ,CAAC;YAC1B,MAAM,EAAE,IAAI,CAAC,MAAM;YACnB,UAAU,EAAE,IAAI;SACjB,CAAmB,CAAA;QACpB,IAAI,CAAC,OAAO,CAAC,EAAE,CAAC,OAAO,EAAE,GAAG,EAAE,CAAC,IAAI,CAAC,MAAM,EAAE,CAAC,CAAA;QAC7C,IAAI,CAAC,OAAO,CAAC,EAAE,CAAC,QAAQ,EAAE,GAAG,EAAE,CAAC,IAAI,CAAC,MAAM,EAAE,CAAC,CAAA;IAChD,CAAC;IAGD,SAAS,CAAC,CAAgB;QACxB,IAAI,CAAC,OAAO,CAAC,KAAK,CAAC,CAAC,CAAC,CAAA;QACrB,IAAI,CAAC,IAAI,CAAC,OAAO,CAAC,OAAO;YAAE,IAAI,CAAC,KAAK,EAAE,CAAA;IACzC,CAAC;IAED,MAAM;QACJ,MAAM,MAAM,GAAG,IAAI,CAAC,IAAI,CAAA;QACxB,IAAI,MAAM,CAAC,SAAS,EAAE,EAAE;YACtB,MAAM,CAAC,KAAK,EAAE,CAAC,IAAI,CAAC,GAAG,EAAE;gBACvB,IAAI,CAAC,MAAM,CAAC,MAAM,EAAE,IAAI,CAAC,QAAQ,EAAE,GAAG,EAAE,CAAC,IAAI,CAAC,OAAO,CAAC,GAAG,EAAE,CAAC,CAAA;YAC9D,CAAC,CAAC,CAAA;SACH;aAAM;YACL,IAAI,CAAC,MAAM,CAAC,MAAM,EAAE,IAAI,CAAC,QAAQ,EAAE,GAAG,EAAE,CAAC,IAAI,CAAC,OAAO,CAAC,GAAG,EAAE,CAAC,CAAA;SAC7D;QACD,OAAO,IAAI,CAAC,OAAO,CAAA;IACrB,CAAC;IAED,UAAU;QACR,IAAI,IAAI,CAAC,IAAI,CAAC,SAAS,EAAE,EAAE;YACzB,IAAI,CAAC,IAAI,CAAC,SAAS,EAAE,CAAA;SACtB;QACD,IAAI,CAAC,UAAU,CAAC,IAAI,CAAC,IAAI,EAAE,IAAI,CAAC,QAAQ,EAAE,GAAG,EAAE,CAAC,IAAI,CAAC,OAAO,CAAC,GAAG,EAAE,CAAC,CAAA;QACnE,OAAO,IAAI,CAAC,OAAO,CAAA;IACrB,CAAC;CACF","sourcesContent":["/**\n * Single-use utility classes to provide functionality to the {@link Glob}\n * methods.\n *\n * @module\n */\nimport { Minipass } from 'minipass'\nimport { Path } from 'path-scurry'\nimport { Ignore, IgnoreLike } from './ignore.js'\n\n// XXX can we somehow make it so that it NEVER processes a given path more than\n// once, enough that the match set tracking is no longer needed? that'd speed\n// things up a lot. Or maybe bring back nounique, and skip it in that case?\n\n// a single minimatch set entry with 1 or more parts\nimport { Pattern } from './pattern.js'\nimport { Processor } from './processor.js'\n\nexport interface GlobWalkerOpts {\n absolute?: boolean\n allowWindowsEscape?: boolean\n cwd?: string | URL\n dot?: boolean\n dotRelative?: boolean\n follow?: boolean\n ignore?: string | string[] | IgnoreLike\n mark?: boolean\n matchBase?: boolean\n // Note: maxDepth here means \"maximum actual Path.depth()\",\n // not \"maximum depth beyond cwd\"\n maxDepth?: number\n nobrace?: boolean\n nocase?: boolean\n nodir?: boolean\n noext?: boolean\n noglobstar?: boolean\n platform?: NodeJS.Platform\n posix?: boolean\n realpath?: boolean\n root?: string\n stat?: boolean\n signal?: AbortSignal\n windowsPathsNoEscape?: boolean\n withFileTypes?: boolean\n}\n\nexport type GWOFileTypesTrue = GlobWalkerOpts & {\n withFileTypes: true\n}\nexport type GWOFileTypesFalse = GlobWalkerOpts & {\n withFileTypes: false\n}\nexport type GWOFileTypesUnset = GlobWalkerOpts & {\n withFileTypes?: undefined\n}\n\nexport type Result = O extends GWOFileTypesTrue\n ? Path\n : O extends GWOFileTypesFalse\n ? string\n : O extends GWOFileTypesUnset\n ? string\n : Path | string\n\nexport type Matches = O extends GWOFileTypesTrue\n ? Set\n : O extends GWOFileTypesFalse\n ? Set\n : O extends GWOFileTypesUnset\n ? Set\n : Set\n\nexport type MatchStream =\n O extends GWOFileTypesTrue\n ? Minipass\n : O extends GWOFileTypesFalse\n ? Minipass\n : O extends GWOFileTypesUnset\n ? Minipass\n : Minipass\n\nconst makeIgnore = (\n ignore: string | string[] | IgnoreLike,\n opts: GlobWalkerOpts\n): IgnoreLike =>\n typeof ignore === 'string'\n ? new Ignore([ignore], opts)\n : Array.isArray(ignore)\n ? new Ignore(ignore, opts)\n : ignore\n\n/**\n * basic walking utilities that all the glob walker types use\n */\nexport abstract class GlobUtil {\n path: Path\n patterns: Pattern[]\n opts: O\n seen: Set = new Set()\n paused: boolean = false\n aborted: boolean = false\n #onResume: (() => any)[] = []\n #ignore?: IgnoreLike\n #sep: '\\\\' | '/'\n signal?: AbortSignal\n maxDepth: number\n\n constructor(patterns: Pattern[], path: Path, opts: O)\n constructor(patterns: Pattern[], path: Path, opts: O) {\n this.patterns = patterns\n this.path = path\n this.opts = opts\n this.#sep = !opts.posix && opts.platform === 'win32' ? '\\\\' : '/'\n if (opts.ignore) {\n this.#ignore = makeIgnore(opts.ignore, opts)\n }\n // ignore, always set with maxDepth, but it's optional on the\n // GlobOptions type\n /* c8 ignore start */\n this.maxDepth = opts.maxDepth || Infinity\n /* c8 ignore stop */\n if (opts.signal) {\n this.signal = opts.signal\n this.signal.addEventListener('abort', () => {\n this.#onResume.length = 0\n })\n }\n }\n\n #ignored(path: Path): boolean {\n return this.seen.has(path) || !!this.#ignore?.ignored?.(path)\n }\n #childrenIgnored(path: Path): boolean {\n return !!this.#ignore?.childrenIgnored?.(path)\n }\n\n // backpressure mechanism\n pause() {\n this.paused = true\n }\n resume() {\n /* c8 ignore start */\n if (this.signal?.aborted) return\n /* c8 ignore stop */\n this.paused = false\n let fn: (() => any) | undefined = undefined\n while (!this.paused && (fn = this.#onResume.shift())) {\n fn()\n }\n }\n onResume(fn: () => any) {\n if (this.signal?.aborted) return\n /* c8 ignore start */\n if (!this.paused) {\n fn()\n } else {\n /* c8 ignore stop */\n this.#onResume.push(fn)\n }\n }\n\n // do the requisite realpath/stat checking, and return the path\n // to add or undefined to filter it out.\n async matchCheck(e: Path, ifDir: boolean): Promise {\n if (ifDir && this.opts.nodir) return undefined\n let rpc: Path | undefined\n if (this.opts.realpath) {\n rpc = e.realpathCached() || (await e.realpath())\n if (!rpc) return undefined\n e = rpc\n }\n const needStat = e.isUnknown() || this.opts.stat\n return this.matchCheckTest(needStat ? await e.lstat() : e, ifDir)\n }\n\n matchCheckTest(e: Path | undefined, ifDir: boolean): Path | undefined {\n return e &&\n (this.maxDepth === Infinity || e.depth() <= this.maxDepth) &&\n (!ifDir || e.canReaddir()) &&\n (!this.opts.nodir || !e.isDirectory()) &&\n !this.#ignored(e)\n ? e\n : undefined\n }\n\n matchCheckSync(e: Path, ifDir: boolean): Path | undefined {\n if (ifDir && this.opts.nodir) return undefined\n let rpc: Path | undefined\n if (this.opts.realpath) {\n rpc = e.realpathCached() || e.realpathSync()\n if (!rpc) return undefined\n e = rpc\n }\n const needStat = e.isUnknown() || this.opts.stat\n return this.matchCheckTest(needStat ? e.lstatSync() : e, ifDir)\n }\n\n abstract matchEmit(p: Result): void\n abstract matchEmit(p: string | Path): void\n\n matchFinish(e: Path, absolute: boolean) {\n if (this.#ignored(e)) return\n const abs =\n this.opts.absolute === undefined ? absolute : this.opts.absolute\n this.seen.add(e)\n const mark = this.opts.mark && e.isDirectory() ? this.#sep : ''\n // ok, we have what we need!\n if (this.opts.withFileTypes) {\n this.matchEmit(e)\n } else if (abs) {\n const abs = this.opts.posix ? e.fullpathPosix() : e.fullpath()\n this.matchEmit(abs + mark)\n } else {\n const rel = this.opts.posix ? e.relativePosix() : e.relative()\n const pre =\n this.opts.dotRelative && !rel.startsWith('..' + this.#sep)\n ? '.' + this.#sep\n : ''\n this.matchEmit(!rel ? '.' + mark : pre + rel + mark)\n }\n }\n\n async match(e: Path, absolute: boolean, ifDir: boolean): Promise {\n const p = await this.matchCheck(e, ifDir)\n if (p) this.matchFinish(p, absolute)\n }\n\n matchSync(e: Path, absolute: boolean, ifDir: boolean): void {\n const p = this.matchCheckSync(e, ifDir)\n if (p) this.matchFinish(p, absolute)\n }\n\n walkCB(target: Path, patterns: Pattern[], cb: () => any) {\n /* c8 ignore start */\n if (this.signal?.aborted) cb()\n /* c8 ignore stop */\n this.walkCB2(target, patterns, new Processor(this.opts), cb)\n }\n\n walkCB2(\n target: Path,\n patterns: Pattern[],\n processor: Processor,\n cb: () => any\n ) {\n if (this.#childrenIgnored(target)) return cb()\n if (this.signal?.aborted) cb()\n if (this.paused) {\n this.onResume(() => this.walkCB2(target, patterns, processor, cb))\n return\n }\n processor.processPatterns(target, patterns)\n\n // done processing. all of the above is sync, can be abstracted out.\n // subwalks is a map of paths to the entry filters they need\n // matches is a map of paths to [absolute, ifDir] tuples.\n let tasks = 1\n const next = () => {\n if (--tasks === 0) cb()\n }\n\n for (const [m, absolute, ifDir] of processor.matches.entries()) {\n if (this.#ignored(m)) continue\n tasks++\n this.match(m, absolute, ifDir).then(() => next())\n }\n\n for (const t of processor.subwalkTargets()) {\n if (this.maxDepth !== Infinity && t.depth() >= this.maxDepth) {\n continue\n }\n tasks++\n const childrenCached = t.readdirCached()\n if (t.calledReaddir())\n this.walkCB3(t, childrenCached, processor, next)\n else {\n t.readdirCB(\n (_, entries) => this.walkCB3(t, entries, processor, next),\n true\n )\n }\n }\n\n next()\n }\n\n walkCB3(\n target: Path,\n entries: Path[],\n processor: Processor,\n cb: () => any\n ) {\n processor = processor.filterEntries(target, entries)\n\n let tasks = 1\n const next = () => {\n if (--tasks === 0) cb()\n }\n\n for (const [m, absolute, ifDir] of processor.matches.entries()) {\n if (this.#ignored(m)) continue\n tasks++\n this.match(m, absolute, ifDir).then(() => next())\n }\n for (const [target, patterns] of processor.subwalks.entries()) {\n tasks++\n this.walkCB2(target, patterns, processor.child(), next)\n }\n\n next()\n }\n\n walkCBSync(target: Path, patterns: Pattern[], cb: () => any) {\n /* c8 ignore start */\n if (this.signal?.aborted) cb()\n /* c8 ignore stop */\n this.walkCB2Sync(target, patterns, new Processor(this.opts), cb)\n }\n\n walkCB2Sync(\n target: Path,\n patterns: Pattern[],\n processor: Processor,\n cb: () => any\n ) {\n if (this.#childrenIgnored(target)) return cb()\n if (this.signal?.aborted) cb()\n if (this.paused) {\n this.onResume(() =>\n this.walkCB2Sync(target, patterns, processor, cb)\n )\n return\n }\n processor.processPatterns(target, patterns)\n\n // done processing. all of the above is sync, can be abstracted out.\n // subwalks is a map of paths to the entry filters they need\n // matches is a map of paths to [absolute, ifDir] tuples.\n let tasks = 1\n const next = () => {\n if (--tasks === 0) cb()\n }\n\n for (const [m, absolute, ifDir] of processor.matches.entries()) {\n if (this.#ignored(m)) continue\n this.matchSync(m, absolute, ifDir)\n }\n\n for (const t of processor.subwalkTargets()) {\n if (this.maxDepth !== Infinity && t.depth() >= this.maxDepth) {\n continue\n }\n tasks++\n const children = t.readdirSync()\n this.walkCB3Sync(t, children, processor, next)\n }\n\n next()\n }\n\n walkCB3Sync(\n target: Path,\n entries: Path[],\n processor: Processor,\n cb: () => any\n ) {\n processor = processor.filterEntries(target, entries)\n\n let tasks = 1\n const next = () => {\n if (--tasks === 0) cb()\n }\n\n for (const [m, absolute, ifDir] of processor.matches.entries()) {\n if (this.#ignored(m)) continue\n this.matchSync(m, absolute, ifDir)\n }\n for (const [target, patterns] of processor.subwalks.entries()) {\n tasks++\n this.walkCB2Sync(target, patterns, processor.child(), next)\n }\n\n next()\n }\n}\n\nexport class GlobWalker<\n O extends GlobWalkerOpts = GlobWalkerOpts\n> extends GlobUtil {\n matches: O extends GWOFileTypesTrue\n ? Set\n : O extends GWOFileTypesFalse\n ? Set\n : O extends GWOFileTypesUnset\n ? Set\n : Set\n\n constructor(patterns: Pattern[], path: Path, opts: O) {\n super(patterns, path, opts)\n this.matches = new Set() as Matches\n }\n\n matchEmit(e: Result): void\n matchEmit(e: Path | string): void {\n this.matches.add(e)\n }\n\n async walk(): Promise> {\n if (this.signal?.aborted) throw this.signal.reason\n if (this.path.isUnknown()) {\n await this.path.lstat()\n }\n await new Promise((res, rej) => {\n this.walkCB(this.path, this.patterns, () => {\n if (this.signal?.aborted) {\n rej(this.signal.reason)\n } else {\n res(this.matches)\n }\n })\n })\n return this.matches\n }\n\n walkSync(): Matches {\n if (this.signal?.aborted) throw this.signal.reason\n if (this.path.isUnknown()) {\n this.path.lstatSync()\n }\n // nothing for the callback to do, because this never pauses\n this.walkCBSync(this.path, this.patterns, () => {\n if (this.signal?.aborted) throw this.signal.reason\n })\n return this.matches\n }\n}\n\nexport class GlobStream<\n O extends GlobWalkerOpts = GlobWalkerOpts\n> extends GlobUtil {\n results: O extends GWOFileTypesTrue\n ? Minipass\n : O extends GWOFileTypesFalse\n ? Minipass\n : O extends GWOFileTypesUnset\n ? Minipass\n : Minipass\n\n constructor(patterns: Pattern[], path: Path, opts: O) {\n super(patterns, path, opts)\n this.results = new Minipass({\n signal: this.signal,\n objectMode: true,\n }) as MatchStream\n this.results.on('drain', () => this.resume())\n this.results.on('resume', () => this.resume())\n }\n\n matchEmit(e: Result): void\n matchEmit(e: Path | string): void {\n this.results.write(e)\n if (!this.results.flowing) this.pause()\n }\n\n stream(): MatchStream {\n const target = this.path\n if (target.isUnknown()) {\n target.lstat().then(() => {\n this.walkCB(target, this.patterns, () => this.results.end())\n })\n } else {\n this.walkCB(target, this.patterns, () => this.results.end())\n }\n return this.results\n }\n\n streamSync(): MatchStream {\n if (this.path.isUnknown()) {\n this.path.lstatSync()\n }\n this.walkCBSync(this.path, this.patterns, () => this.results.end())\n return this.results\n }\n}\n"]} \ No newline at end of file +{"version":3,"file":"walker.js","sourceRoot":"","sources":["../../src/walker.ts"],"names":[],"mappings":"AAAA;;;;;GAKG;AACH,OAAO,EAAE,QAAQ,EAAE,MAAM,UAAU,CAAA;AAEnC,OAAO,EAAE,MAAM,EAAc,MAAM,aAAa,CAAA;AAQhD,OAAO,EAAE,SAAS,EAAE,MAAM,gBAAgB,CAAA;AAiE1C,MAAM,UAAU,GAAG,CACjB,MAAsC,EACtC,IAAoB,EACR,EAAE,CACd,OAAO,MAAM,KAAK,QAAQ;IACxB,CAAC,CAAC,IAAI,MAAM,CAAC,CAAC,MAAM,CAAC,EAAE,IAAI,CAAC;IAC5B,CAAC,CAAC,KAAK,CAAC,OAAO,CAAC,MAAM,CAAC;QACvB,CAAC,CAAC,IAAI,MAAM,CAAC,MAAM,EAAE,IAAI,CAAC;QAC1B,CAAC,CAAC,MAAM,CAAA;AAEZ;;GAEG;AACH,MAAM,OAAgB,QAAQ;IAC5B,IAAI,CAAM;IACV,QAAQ,CAAW;IACnB,IAAI,CAAG;IACP,IAAI,GAAc,IAAI,GAAG,EAAQ,CAAA;IACjC,MAAM,GAAY,KAAK,CAAA;IACvB,OAAO,GAAY,KAAK,CAAA;IACxB,SAAS,GAAkB,EAAE,CAAA;IAC7B,OAAO,CAAa;IACpB,IAAI,CAAY;IAChB,MAAM,CAAc;IACpB,QAAQ,CAAQ;IAGhB,YAAY,QAAmB,EAAE,IAAU,EAAE,IAAO;QAClD,IAAI,CAAC,QAAQ,GAAG,QAAQ,CAAA;QACxB,IAAI,CAAC,IAAI,GAAG,IAAI,CAAA;QAChB,IAAI,CAAC,IAAI,GAAG,IAAI,CAAA;QAChB,IAAI,CAAC,IAAI,GAAG,CAAC,IAAI,CAAC,KAAK,IAAI,IAAI,CAAC,QAAQ,KAAK,OAAO,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,GAAG,CAAA;QACjE,IAAI,IAAI,CAAC,MAAM,EAAE;YACf,IAAI,CAAC,OAAO,GAAG,UAAU,CAAC,IAAI,CAAC,MAAM,EAAE,IAAI,CAAC,CAAA;SAC7C;QACD,6DAA6D;QAC7D,mBAAmB;QACnB,qBAAqB;QACrB,IAAI,CAAC,QAAQ,GAAG,IAAI,CAAC,QAAQ,IAAI,QAAQ,CAAA;QACzC,oBAAoB;QACpB,IAAI,IAAI,CAAC,MAAM,EAAE;YACf,IAAI,CAAC,MAAM,GAAG,IAAI,CAAC,MAAM,CAAA;YACzB,IAAI,CAAC,MAAM,CAAC,gBAAgB,CAAC,OAAO,EAAE,GAAG,EAAE;gBACzC,IAAI,CAAC,SAAS,CAAC,MAAM,GAAG,CAAC,CAAA;YAC3B,CAAC,CAAC,CAAA;SACH;IACH,CAAC;IAED,QAAQ,CAAC,IAAU;QACjB,OAAO,IAAI,CAAC,IAAI,CAAC,GAAG,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC,IAAI,CAAC,OAAO,EAAE,OAAO,EAAE,CAAC,IAAI,CAAC,CAAA;IAC/D,CAAC;IACD,gBAAgB,CAAC,IAAU;QACzB,OAAO,CAAC,CAAC,IAAI,CAAC,OAAO,EAAE,eAAe,EAAE,CAAC,IAAI,CAAC,CAAA;IAChD,CAAC;IAED,yBAAyB;IACzB,KAAK;QACH,IAAI,CAAC,MAAM,GAAG,IAAI,CAAA;IACpB,CAAC;IACD,MAAM;QACJ,qBAAqB;QACrB,IAAI,IAAI,CAAC,MAAM,EAAE,OAAO;YAAE,OAAM;QAChC,oBAAoB;QACpB,IAAI,CAAC,MAAM,GAAG,KAAK,CAAA;QACnB,IAAI,EAAE,GAA4B,SAAS,CAAA;QAC3C,OAAO,CAAC,IAAI,CAAC,MAAM,IAAI,CAAC,EAAE,GAAG,IAAI,CAAC,SAAS,CAAC,KAAK,EAAE,CAAC,EAAE;YACpD,EAAE,EAAE,CAAA;SACL;IACH,CAAC;IACD,QAAQ,CAAC,EAAa;QACpB,IAAI,IAAI,CAAC,MAAM,EAAE,OAAO;YAAE,OAAM;QAChC,qBAAqB;QACrB,IAAI,CAAC,IAAI,CAAC,MAAM,EAAE;YAChB,EAAE,EAAE,CAAA;SACL;aAAM;YACL,oBAAoB;YACpB,IAAI,CAAC,SAAS,CAAC,IAAI,CAAC,EAAE,CAAC,CAAA;SACxB;IACH,CAAC;IAED,+DAA+D;IAC/D,wCAAwC;IACxC,KAAK,CAAC,UAAU,CAAC,CAAO,EAAE,KAAc;QACtC,IAAI,KAAK,IAAI,IAAI,CAAC,IAAI,CAAC,KAAK;YAAE,OAAO,SAAS,CAAA;QAC9C,IAAI,GAAqB,CAAA;QACzB,IAAI,IAAI,CAAC,IAAI,CAAC,QAAQ,EAAE;YACtB,GAAG,GAAG,CAAC,CAAC,cAAc,EAAE,IAAI,CAAC,MAAM,CAAC,CAAC,QAAQ,EAAE,CAAC,CAAA;YAChD,IAAI,CAAC,GAAG;gBAAE,OAAO,SAAS,CAAA;YAC1B,CAAC,GAAG,GAAG,CAAA;SACR;QACD,MAAM,QAAQ,GAAG,CAAC,CAAC,SAAS,EAAE,IAAI,IAAI,CAAC,IAAI,CAAC,IAAI,CAAA;QAChD,MAAM,CAAC,GAAG,QAAQ,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,KAAK,EAAE,CAAC,CAAC,CAAC,CAAC,CAAA;QACxC,IAAI,IAAI,CAAC,IAAI,CAAC,MAAM,IAAI,IAAI,CAAC,IAAI,CAAC,KAAK,IAAI,CAAC,EAAE,cAAc,EAAE,EAAE;YAC9D,MAAM,MAAM,GAAG,MAAM,CAAC,CAAC,QAAQ,EAAE,CAAA;YACjC,qBAAqB;YACrB,IAAI,MAAM,IAAI,CAAC,MAAM,CAAC,SAAS,EAAE,IAAI,IAAI,CAAC,IAAI,CAAC,IAAI,CAAC,EAAE;gBACpD,MAAM,MAAM,CAAC,KAAK,EAAE,CAAA;aACrB;YACD,oBAAoB;SACrB;QACD,OAAO,IAAI,CAAC,cAAc,CAAC,CAAC,EAAE,KAAK,CAAC,CAAA;IACtC,CAAC;IAED,cAAc,CAAC,CAAmB,EAAE,KAAc;QAChD,OAAO,CAAC;YACN,CAAC,IAAI,CAAC,QAAQ,KAAK,QAAQ,IAAI,CAAC,CAAC,KAAK,EAAE,IAAI,IAAI,CAAC,QAAQ,CAAC;YAC1D,CAAC,CAAC,KAAK,IAAI,CAAC,CAAC,UAAU,EAAE,CAAC;YAC1B,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,KAAK,IAAI,CAAC,CAAC,CAAC,WAAW,EAAE,CAAC;YACtC,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,KAAK;gBACf,CAAC,IAAI,CAAC,IAAI,CAAC,MAAM;gBACjB,CAAC,CAAC,CAAC,cAAc,EAAE;gBACnB,CAAC,CAAC,CAAC,cAAc,EAAE,EAAE,WAAW,EAAE,CAAC;YACrC,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC;YACjB,CAAC,CAAC,CAAC;YACH,CAAC,CAAC,SAAS,CAAA;IACf,CAAC;IAED,cAAc,CAAC,CAAO,EAAE,KAAc;QACpC,IAAI,KAAK,IAAI,IAAI,CAAC,IAAI,CAAC,KAAK;YAAE,OAAO,SAAS,CAAA;QAC9C,IAAI,GAAqB,CAAA;QACzB,IAAI,IAAI,CAAC,IAAI,CAAC,QAAQ,EAAE;YACtB,GAAG,GAAG,CAAC,CAAC,cAAc,EAAE,IAAI,CAAC,CAAC,YAAY,EAAE,CAAA;YAC5C,IAAI,CAAC,GAAG;gBAAE,OAAO,SAAS,CAAA;YAC1B,CAAC,GAAG,GAAG,CAAA;SACR;QACD,MAAM,QAAQ,GAAG,CAAC,CAAC,SAAS,EAAE,IAAI,IAAI,CAAC,IAAI,CAAC,IAAI,CAAA;QAChD,MAAM,CAAC,GAAG,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,SAAS,EAAE,CAAC,CAAC,CAAC,CAAC,CAAA;QACtC,IAAI,IAAI,CAAC,IAAI,CAAC,MAAM,IAAI,IAAI,CAAC,IAAI,CAAC,KAAK,IAAI,CAAC,EAAE,cAAc,EAAE,EAAE;YAC9D,MAAM,MAAM,GAAG,CAAC,CAAC,YAAY,EAAE,CAAA;YAC/B,IAAI,MAAM,IAAI,CAAC,MAAM,EAAE,SAAS,EAAE,IAAI,IAAI,CAAC,IAAI,CAAC,IAAI,CAAC,EAAE;gBACrD,MAAM,CAAC,SAAS,EAAE,CAAA;aACnB;SACF;QACD,OAAO,IAAI,CAAC,cAAc,CAAC,CAAC,EAAE,KAAK,CAAC,CAAA;IACtC,CAAC;IAKD,WAAW,CAAC,CAAO,EAAE,QAAiB;QACpC,IAAI,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC;YAAE,OAAM;QAC5B,MAAM,GAAG,GACP,IAAI,CAAC,IAAI,CAAC,QAAQ,KAAK,SAAS,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,QAAQ,CAAA;QAClE,IAAI,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,CAAC,CAAA;QAChB,MAAM,IAAI,GAAG,IAAI,CAAC,IAAI,CAAC,IAAI,IAAI,CAAC,CAAC,WAAW,EAAE,CAAC,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC,CAAC,EAAE,CAAA;QAC/D,4BAA4B;QAC5B,IAAI,IAAI,CAAC,IAAI,CAAC,aAAa,EAAE;YAC3B,IAAI,CAAC,SAAS,CAAC,CAAC,CAAC,CAAA;SAClB;aAAM,IAAI,GAAG,EAAE;YACd,MAAM,GAAG,GAAG,IAAI,CAAC,IAAI,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,aAAa,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,QAAQ,EAAE,CAAA;YAC9D,IAAI,CAAC,SAAS,CAAC,GAAG,GAAG,IAAI,CAAC,CAAA;SAC3B;aAAM;YACL,MAAM,GAAG,GAAG,IAAI,CAAC,IAAI,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC,aAAa,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC,QAAQ,EAAE,CAAA;YAC9D,MAAM,GAAG,GACP,IAAI,CAAC,IAAI,CAAC,WAAW,IAAI,CAAC,GAAG,CAAC,UAAU,CAAC,IAAI,GAAG,IAAI,CAAC,IAAI,CAAC;gBACxD,CAAC,CAAC,GAAG,GAAG,IAAI,CAAC,IAAI;gBACjB,CAAC,CAAC,EAAE,CAAA;YACR,IAAI,CAAC,SAAS,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,GAAG,GAAG,IAAI,CAAC,CAAC,CAAC,GAAG,GAAG,GAAG,GAAG,IAAI,CAAC,CAAA;SACrD;IACH,CAAC;IAED,KAAK,CAAC,KAAK,CAAC,CAAO,EAAE,QAAiB,EAAE,KAAc;QACpD,MAAM,CAAC,GAAG,MAAM,IAAI,CAAC,UAAU,CAAC,CAAC,EAAE,KAAK,CAAC,CAAA;QACzC,IAAI,CAAC;YAAE,IAAI,CAAC,WAAW,CAAC,CAAC,EAAE,QAAQ,CAAC,CAAA;IACtC,CAAC;IAED,SAAS,CAAC,CAAO,EAAE,QAAiB,EAAE,KAAc;QAClD,MAAM,CAAC,GAAG,IAAI,CAAC,cAAc,CAAC,CAAC,EAAE,KAAK,CAAC,CAAA;QACvC,IAAI,CAAC;YAAE,IAAI,CAAC,WAAW,CAAC,CAAC,EAAE,QAAQ,CAAC,CAAA;IACtC,CAAC;IAED,MAAM,CAAC,MAAY,EAAE,QAAmB,EAAE,EAAa;QACrD,qBAAqB;QACrB,IAAI,IAAI,CAAC,MAAM,EAAE,OAAO;YAAE,EAAE,EAAE,CAAA;QAC9B,oBAAoB;QACpB,IAAI,CAAC,OAAO,CAAC,MAAM,EAAE,QAAQ,EAAE,IAAI,SAAS,CAAC,IAAI,CAAC,IAAI,CAAC,EAAE,EAAE,CAAC,CAAA;IAC9D,CAAC;IAED,OAAO,CACL,MAAY,EACZ,QAAmB,EACnB,SAAoB,EACpB,EAAa;QAEb,IAAI,IAAI,CAAC,gBAAgB,CAAC,MAAM,CAAC;YAAE,OAAO,EAAE,EAAE,CAAA;QAC9C,IAAI,IAAI,CAAC,MAAM,EAAE,OAAO;YAAE,EAAE,EAAE,CAAA;QAC9B,IAAI,IAAI,CAAC,MAAM,EAAE;YACf,IAAI,CAAC,QAAQ,CAAC,GAAG,EAAE,CAAC,IAAI,CAAC,OAAO,CAAC,MAAM,EAAE,QAAQ,EAAE,SAAS,EAAE,EAAE,CAAC,CAAC,CAAA;YAClE,OAAM;SACP;QACD,SAAS,CAAC,eAAe,CAAC,MAAM,EAAE,QAAQ,CAAC,CAAA;QAE3C,qEAAqE;QACrE,4DAA4D;QAC5D,yDAAyD;QACzD,IAAI,KAAK,GAAG,CAAC,CAAA;QACb,MAAM,IAAI,GAAG,GAAG,EAAE;YAChB,IAAI,EAAE,KAAK,KAAK,CAAC;gBAAE,EAAE,EAAE,CAAA;QACzB,CAAC,CAAA;QAED,KAAK,MAAM,CAAC,CAAC,EAAE,QAAQ,EAAE,KAAK,CAAC,IAAI,SAAS,CAAC,OAAO,CAAC,OAAO,EAAE,EAAE;YAC9D,IAAI,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC;gBAAE,SAAQ;YAC9B,KAAK,EAAE,CAAA;YACP,IAAI,CAAC,KAAK,CAAC,CAAC,EAAE,QAAQ,EAAE,KAAK,CAAC,CAAC,IAAI,CAAC,GAAG,EAAE,CAAC,IAAI,EAAE,CAAC,CAAA;SAClD;QAED,KAAK,MAAM,CAAC,IAAI,SAAS,CAAC,cAAc,EAAE,EAAE;YAC1C,IAAI,IAAI,CAAC,QAAQ,KAAK,QAAQ,IAAI,CAAC,CAAC,KAAK,EAAE,IAAI,IAAI,CAAC,QAAQ,EAAE;gBAC5D,SAAQ;aACT;YACD,KAAK,EAAE,CAAA;YACP,MAAM,cAAc,GAAG,CAAC,CAAC,aAAa,EAAE,CAAA;YACxC,IAAI,CAAC,CAAC,aAAa,EAAE;gBACnB,IAAI,CAAC,OAAO,CAAC,CAAC,EAAE,cAAc,EAAE,SAAS,EAAE,IAAI,CAAC,CAAA;iBAC7C;gBACH,CAAC,CAAC,SAAS,CACT,CAAC,CAAC,EAAE,OAAO,EAAE,EAAE,CAAC,IAAI,CAAC,OAAO,CAAC,CAAC,EAAE,OAAO,EAAE,SAAS,EAAE,IAAI,CAAC,EACzD,IAAI,CACL,CAAA;aACF;SACF;QAED,IAAI,EAAE,CAAA;IACR,CAAC;IAED,OAAO,CACL,MAAY,EACZ,OAAe,EACf,SAAoB,EACpB,EAAa;QAEb,SAAS,GAAG,SAAS,CAAC,aAAa,CAAC,MAAM,EAAE,OAAO,CAAC,CAAA;QAEpD,IAAI,KAAK,GAAG,CAAC,CAAA;QACb,MAAM,IAAI,GAAG,GAAG,EAAE;YAChB,IAAI,EAAE,KAAK,KAAK,CAAC;gBAAE,EAAE,EAAE,CAAA;QACzB,CAAC,CAAA;QAED,KAAK,MAAM,CAAC,CAAC,EAAE,QAAQ,EAAE,KAAK,CAAC,IAAI,SAAS,CAAC,OAAO,CAAC,OAAO,EAAE,EAAE;YAC9D,IAAI,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC;gBAAE,SAAQ;YAC9B,KAAK,EAAE,CAAA;YACP,IAAI,CAAC,KAAK,CAAC,CAAC,EAAE,QAAQ,EAAE,KAAK,CAAC,CAAC,IAAI,CAAC,GAAG,EAAE,CAAC,IAAI,EAAE,CAAC,CAAA;SAClD;QACD,KAAK,MAAM,CAAC,MAAM,EAAE,QAAQ,CAAC,IAAI,SAAS,CAAC,QAAQ,CAAC,OAAO,EAAE,EAAE;YAC7D,KAAK,EAAE,CAAA;YACP,IAAI,CAAC,OAAO,CAAC,MAAM,EAAE,QAAQ,EAAE,SAAS,CAAC,KAAK,EAAE,EAAE,IAAI,CAAC,CAAA;SACxD;QAED,IAAI,EAAE,CAAA;IACR,CAAC;IAED,UAAU,CAAC,MAAY,EAAE,QAAmB,EAAE,EAAa;QACzD,qBAAqB;QACrB,IAAI,IAAI,CAAC,MAAM,EAAE,OAAO;YAAE,EAAE,EAAE,CAAA;QAC9B,oBAAoB;QACpB,IAAI,CAAC,WAAW,CAAC,MAAM,EAAE,QAAQ,EAAE,IAAI,SAAS,CAAC,IAAI,CAAC,IAAI,CAAC,EAAE,EAAE,CAAC,CAAA;IAClE,CAAC;IAED,WAAW,CACT,MAAY,EACZ,QAAmB,EACnB,SAAoB,EACpB,EAAa;QAEb,IAAI,IAAI,CAAC,gBAAgB,CAAC,MAAM,CAAC;YAAE,OAAO,EAAE,EAAE,CAAA;QAC9C,IAAI,IAAI,CAAC,MAAM,EAAE,OAAO;YAAE,EAAE,EAAE,CAAA;QAC9B,IAAI,IAAI,CAAC,MAAM,EAAE;YACf,IAAI,CAAC,QAAQ,CAAC,GAAG,EAAE,CACjB,IAAI,CAAC,WAAW,CAAC,MAAM,EAAE,QAAQ,EAAE,SAAS,EAAE,EAAE,CAAC,CAClD,CAAA;YACD,OAAM;SACP;QACD,SAAS,CAAC,eAAe,CAAC,MAAM,EAAE,QAAQ,CAAC,CAAA;QAE3C,qEAAqE;QACrE,4DAA4D;QAC5D,yDAAyD;QACzD,IAAI,KAAK,GAAG,CAAC,CAAA;QACb,MAAM,IAAI,GAAG,GAAG,EAAE;YAChB,IAAI,EAAE,KAAK,KAAK,CAAC;gBAAE,EAAE,EAAE,CAAA;QACzB,CAAC,CAAA;QAED,KAAK,MAAM,CAAC,CAAC,EAAE,QAAQ,EAAE,KAAK,CAAC,IAAI,SAAS,CAAC,OAAO,CAAC,OAAO,EAAE,EAAE;YAC9D,IAAI,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC;gBAAE,SAAQ;YAC9B,IAAI,CAAC,SAAS,CAAC,CAAC,EAAE,QAAQ,EAAE,KAAK,CAAC,CAAA;SACnC;QAED,KAAK,MAAM,CAAC,IAAI,SAAS,CAAC,cAAc,EAAE,EAAE;YAC1C,IAAI,IAAI,CAAC,QAAQ,KAAK,QAAQ,IAAI,CAAC,CAAC,KAAK,EAAE,IAAI,IAAI,CAAC,QAAQ,EAAE;gBAC5D,SAAQ;aACT;YACD,KAAK,EAAE,CAAA;YACP,MAAM,QAAQ,GAAG,CAAC,CAAC,WAAW,EAAE,CAAA;YAChC,IAAI,CAAC,WAAW,CAAC,CAAC,EAAE,QAAQ,EAAE,SAAS,EAAE,IAAI,CAAC,CAAA;SAC/C;QAED,IAAI,EAAE,CAAA;IACR,CAAC;IAED,WAAW,CACT,MAAY,EACZ,OAAe,EACf,SAAoB,EACpB,EAAa;QAEb,SAAS,GAAG,SAAS,CAAC,aAAa,CAAC,MAAM,EAAE,OAAO,CAAC,CAAA;QAEpD,IAAI,KAAK,GAAG,CAAC,CAAA;QACb,MAAM,IAAI,GAAG,GAAG,EAAE;YAChB,IAAI,EAAE,KAAK,KAAK,CAAC;gBAAE,EAAE,EAAE,CAAA;QACzB,CAAC,CAAA;QAED,KAAK,MAAM,CAAC,CAAC,EAAE,QAAQ,EAAE,KAAK,CAAC,IAAI,SAAS,CAAC,OAAO,CAAC,OAAO,EAAE,EAAE;YAC9D,IAAI,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC;gBAAE,SAAQ;YAC9B,IAAI,CAAC,SAAS,CAAC,CAAC,EAAE,QAAQ,EAAE,KAAK,CAAC,CAAA;SACnC;QACD,KAAK,MAAM,CAAC,MAAM,EAAE,QAAQ,CAAC,IAAI,SAAS,CAAC,QAAQ,CAAC,OAAO,EAAE,EAAE;YAC7D,KAAK,EAAE,CAAA;YACP,IAAI,CAAC,WAAW,CAAC,MAAM,EAAE,QAAQ,EAAE,SAAS,CAAC,KAAK,EAAE,EAAE,IAAI,CAAC,CAAA;SAC5D;QAED,IAAI,EAAE,CAAA;IACR,CAAC;CACF;AAED,MAAM,OAAO,UAEX,SAAQ,QAAW;IACnB,OAAO,CAMe;IAEtB,YAAY,QAAmB,EAAE,IAAU,EAAE,IAAO;QAClD,KAAK,CAAC,QAAQ,EAAE,IAAI,EAAE,IAAI,CAAC,CAAA;QAC3B,IAAI,CAAC,OAAO,GAAG,IAAI,GAAG,EAAgB,CAAA;IACxC,CAAC;IAGD,SAAS,CAAC,CAAgB;QACxB,IAAI,CAAC,OAAO,CAAC,GAAG,CAAC,CAAC,CAAC,CAAA;IACrB,CAAC;IAED,KAAK,CAAC,IAAI;QACR,IAAI,IAAI,CAAC,MAAM,EAAE,OAAO;YAAE,MAAM,IAAI,CAAC,MAAM,CAAC,MAAM,CAAA;QAClD,IAAI,IAAI,CAAC,IAAI,CAAC,SAAS,EAAE,EAAE;YACzB,MAAM,IAAI,CAAC,IAAI,CAAC,KAAK,EAAE,CAAA;SACxB;QACD,MAAM,IAAI,OAAO,CAAC,CAAC,GAAG,EAAE,GAAG,EAAE,EAAE;YAC7B,IAAI,CAAC,MAAM,CAAC,IAAI,CAAC,IAAI,EAAE,IAAI,CAAC,QAAQ,EAAE,GAAG,EAAE;gBACzC,IAAI,IAAI,CAAC,MAAM,EAAE,OAAO,EAAE;oBACxB,GAAG,CAAC,IAAI,CAAC,MAAM,CAAC,MAAM,CAAC,CAAA;iBACxB;qBAAM;oBACL,GAAG,CAAC,IAAI,CAAC,OAAO,CAAC,CAAA;iBAClB;YACH,CAAC,CAAC,CAAA;QACJ,CAAC,CAAC,CAAA;QACF,OAAO,IAAI,CAAC,OAAO,CAAA;IACrB,CAAC;IAED,QAAQ;QACN,IAAI,IAAI,CAAC,MAAM,EAAE,OAAO;YAAE,MAAM,IAAI,CAAC,MAAM,CAAC,MAAM,CAAA;QAClD,IAAI,IAAI,CAAC,IAAI,CAAC,SAAS,EAAE,EAAE;YACzB,IAAI,CAAC,IAAI,CAAC,SAAS,EAAE,CAAA;SACtB;QACD,4DAA4D;QAC5D,IAAI,CAAC,UAAU,CAAC,IAAI,CAAC,IAAI,EAAE,IAAI,CAAC,QAAQ,EAAE,GAAG,EAAE;YAC7C,IAAI,IAAI,CAAC,MAAM,EAAE,OAAO;gBAAE,MAAM,IAAI,CAAC,MAAM,CAAC,MAAM,CAAA;QACpD,CAAC,CAAC,CAAA;QACF,OAAO,IAAI,CAAC,OAAO,CAAA;IACrB,CAAC;CACF;AAED,MAAM,OAAO,UAEX,SAAQ,QAAW;IACnB,OAAO,CAMmC;IAE1C,YAAY,QAAmB,EAAE,IAAU,EAAE,IAAO;QAClD,KAAK,CAAC,QAAQ,EAAE,IAAI,EAAE,IAAI,CAAC,CAAA;QAC3B,IAAI,CAAC,OAAO,GAAG,IAAI,QAAQ,CAAC;YAC1B,MAAM,EAAE,IAAI,CAAC,MAAM;YACnB,UAAU,EAAE,IAAI;SACjB,CAAmB,CAAA;QACpB,IAAI,CAAC,OAAO,CAAC,EAAE,CAAC,OAAO,EAAE,GAAG,EAAE,CAAC,IAAI,CAAC,MAAM,EAAE,CAAC,CAAA;QAC7C,IAAI,CAAC,OAAO,CAAC,EAAE,CAAC,QAAQ,EAAE,GAAG,EAAE,CAAC,IAAI,CAAC,MAAM,EAAE,CAAC,CAAA;IAChD,CAAC;IAGD,SAAS,CAAC,CAAgB;QACxB,IAAI,CAAC,OAAO,CAAC,KAAK,CAAC,CAAC,CAAC,CAAA;QACrB,IAAI,CAAC,IAAI,CAAC,OAAO,CAAC,OAAO;YAAE,IAAI,CAAC,KAAK,EAAE,CAAA;IACzC,CAAC;IAED,MAAM;QACJ,MAAM,MAAM,GAAG,IAAI,CAAC,IAAI,CAAA;QACxB,IAAI,MAAM,CAAC,SAAS,EAAE,EAAE;YACtB,MAAM,CAAC,KAAK,EAAE,CAAC,IAAI,CAAC,GAAG,EAAE;gBACvB,IAAI,CAAC,MAAM,CAAC,MAAM,EAAE,IAAI,CAAC,QAAQ,EAAE,GAAG,EAAE,CAAC,IAAI,CAAC,OAAO,CAAC,GAAG,EAAE,CAAC,CAAA;YAC9D,CAAC,CAAC,CAAA;SACH;aAAM;YACL,IAAI,CAAC,MAAM,CAAC,MAAM,EAAE,IAAI,CAAC,QAAQ,EAAE,GAAG,EAAE,CAAC,IAAI,CAAC,OAAO,CAAC,GAAG,EAAE,CAAC,CAAA;SAC7D;QACD,OAAO,IAAI,CAAC,OAAO,CAAA;IACrB,CAAC;IAED,UAAU;QACR,IAAI,IAAI,CAAC,IAAI,CAAC,SAAS,EAAE,EAAE;YACzB,IAAI,CAAC,IAAI,CAAC,SAAS,EAAE,CAAA;SACtB;QACD,IAAI,CAAC,UAAU,CAAC,IAAI,CAAC,IAAI,EAAE,IAAI,CAAC,QAAQ,EAAE,GAAG,EAAE,CAAC,IAAI,CAAC,OAAO,CAAC,GAAG,EAAE,CAAC,CAAA;QACnE,OAAO,IAAI,CAAC,OAAO,CAAA;IACrB,CAAC;CACF","sourcesContent":["/**\n * Single-use utility classes to provide functionality to the {@link Glob}\n * methods.\n *\n * @module\n */\nimport { Minipass } from 'minipass'\nimport { Path } from 'path-scurry'\nimport { Ignore, IgnoreLike } from './ignore.js'\n\n// XXX can we somehow make it so that it NEVER processes a given path more than\n// once, enough that the match set tracking is no longer needed? that'd speed\n// things up a lot. Or maybe bring back nounique, and skip it in that case?\n\n// a single minimatch set entry with 1 or more parts\nimport { Pattern } from './pattern.js'\nimport { Processor } from './processor.js'\n\nexport interface GlobWalkerOpts {\n absolute?: boolean\n allowWindowsEscape?: boolean\n cwd?: string | URL\n dot?: boolean\n dotRelative?: boolean\n follow?: boolean\n ignore?: string | string[] | IgnoreLike\n mark?: boolean\n matchBase?: boolean\n // Note: maxDepth here means \"maximum actual Path.depth()\",\n // not \"maximum depth beyond cwd\"\n maxDepth?: number\n nobrace?: boolean\n nocase?: boolean\n nodir?: boolean\n noext?: boolean\n noglobstar?: boolean\n platform?: NodeJS.Platform\n posix?: boolean\n realpath?: boolean\n root?: string\n stat?: boolean\n signal?: AbortSignal\n windowsPathsNoEscape?: boolean\n withFileTypes?: boolean\n}\n\nexport type GWOFileTypesTrue = GlobWalkerOpts & {\n withFileTypes: true\n}\nexport type GWOFileTypesFalse = GlobWalkerOpts & {\n withFileTypes: false\n}\nexport type GWOFileTypesUnset = GlobWalkerOpts & {\n withFileTypes?: undefined\n}\n\nexport type Result = O extends GWOFileTypesTrue\n ? Path\n : O extends GWOFileTypesFalse\n ? string\n : O extends GWOFileTypesUnset\n ? string\n : Path | string\n\nexport type Matches = O extends GWOFileTypesTrue\n ? Set\n : O extends GWOFileTypesFalse\n ? Set\n : O extends GWOFileTypesUnset\n ? Set\n : Set\n\nexport type MatchStream =\n O extends GWOFileTypesTrue\n ? Minipass\n : O extends GWOFileTypesFalse\n ? Minipass\n : O extends GWOFileTypesUnset\n ? Minipass\n : Minipass\n\nconst makeIgnore = (\n ignore: string | string[] | IgnoreLike,\n opts: GlobWalkerOpts\n): IgnoreLike =>\n typeof ignore === 'string'\n ? new Ignore([ignore], opts)\n : Array.isArray(ignore)\n ? new Ignore(ignore, opts)\n : ignore\n\n/**\n * basic walking utilities that all the glob walker types use\n */\nexport abstract class GlobUtil {\n path: Path\n patterns: Pattern[]\n opts: O\n seen: Set = new Set()\n paused: boolean = false\n aborted: boolean = false\n #onResume: (() => any)[] = []\n #ignore?: IgnoreLike\n #sep: '\\\\' | '/'\n signal?: AbortSignal\n maxDepth: number\n\n constructor(patterns: Pattern[], path: Path, opts: O)\n constructor(patterns: Pattern[], path: Path, opts: O) {\n this.patterns = patterns\n this.path = path\n this.opts = opts\n this.#sep = !opts.posix && opts.platform === 'win32' ? '\\\\' : '/'\n if (opts.ignore) {\n this.#ignore = makeIgnore(opts.ignore, opts)\n }\n // ignore, always set with maxDepth, but it's optional on the\n // GlobOptions type\n /* c8 ignore start */\n this.maxDepth = opts.maxDepth || Infinity\n /* c8 ignore stop */\n if (opts.signal) {\n this.signal = opts.signal\n this.signal.addEventListener('abort', () => {\n this.#onResume.length = 0\n })\n }\n }\n\n #ignored(path: Path): boolean {\n return this.seen.has(path) || !!this.#ignore?.ignored?.(path)\n }\n #childrenIgnored(path: Path): boolean {\n return !!this.#ignore?.childrenIgnored?.(path)\n }\n\n // backpressure mechanism\n pause() {\n this.paused = true\n }\n resume() {\n /* c8 ignore start */\n if (this.signal?.aborted) return\n /* c8 ignore stop */\n this.paused = false\n let fn: (() => any) | undefined = undefined\n while (!this.paused && (fn = this.#onResume.shift())) {\n fn()\n }\n }\n onResume(fn: () => any) {\n if (this.signal?.aborted) return\n /* c8 ignore start */\n if (!this.paused) {\n fn()\n } else {\n /* c8 ignore stop */\n this.#onResume.push(fn)\n }\n }\n\n // do the requisite realpath/stat checking, and return the path\n // to add or undefined to filter it out.\n async matchCheck(e: Path, ifDir: boolean): Promise {\n if (ifDir && this.opts.nodir) return undefined\n let rpc: Path | undefined\n if (this.opts.realpath) {\n rpc = e.realpathCached() || (await e.realpath())\n if (!rpc) return undefined\n e = rpc\n }\n const needStat = e.isUnknown() || this.opts.stat\n const s = needStat ? await e.lstat() : e\n if (this.opts.follow && this.opts.nodir && s?.isSymbolicLink()) {\n const target = await s.realpath()\n /* c8 ignore start */\n if (target && (target.isUnknown() || this.opts.stat)) {\n await target.lstat()\n }\n /* c8 ignore stop */\n }\n return this.matchCheckTest(s, ifDir)\n }\n\n matchCheckTest(e: Path | undefined, ifDir: boolean): Path | undefined {\n return e &&\n (this.maxDepth === Infinity || e.depth() <= this.maxDepth) &&\n (!ifDir || e.canReaddir()) &&\n (!this.opts.nodir || !e.isDirectory()) &&\n (!this.opts.nodir ||\n !this.opts.follow ||\n !e.isSymbolicLink() ||\n !e.realpathCached()?.isDirectory()) &&\n !this.#ignored(e)\n ? e\n : undefined\n }\n\n matchCheckSync(e: Path, ifDir: boolean): Path | undefined {\n if (ifDir && this.opts.nodir) return undefined\n let rpc: Path | undefined\n if (this.opts.realpath) {\n rpc = e.realpathCached() || e.realpathSync()\n if (!rpc) return undefined\n e = rpc\n }\n const needStat = e.isUnknown() || this.opts.stat\n const s = needStat ? e.lstatSync() : e\n if (this.opts.follow && this.opts.nodir && s?.isSymbolicLink()) {\n const target = s.realpathSync()\n if (target && (target?.isUnknown() || this.opts.stat)) {\n target.lstatSync()\n }\n }\n return this.matchCheckTest(s, ifDir)\n }\n\n abstract matchEmit(p: Result): void\n abstract matchEmit(p: string | Path): void\n\n matchFinish(e: Path, absolute: boolean) {\n if (this.#ignored(e)) return\n const abs =\n this.opts.absolute === undefined ? absolute : this.opts.absolute\n this.seen.add(e)\n const mark = this.opts.mark && e.isDirectory() ? this.#sep : ''\n // ok, we have what we need!\n if (this.opts.withFileTypes) {\n this.matchEmit(e)\n } else if (abs) {\n const abs = this.opts.posix ? e.fullpathPosix() : e.fullpath()\n this.matchEmit(abs + mark)\n } else {\n const rel = this.opts.posix ? e.relativePosix() : e.relative()\n const pre =\n this.opts.dotRelative && !rel.startsWith('..' + this.#sep)\n ? '.' + this.#sep\n : ''\n this.matchEmit(!rel ? '.' + mark : pre + rel + mark)\n }\n }\n\n async match(e: Path, absolute: boolean, ifDir: boolean): Promise {\n const p = await this.matchCheck(e, ifDir)\n if (p) this.matchFinish(p, absolute)\n }\n\n matchSync(e: Path, absolute: boolean, ifDir: boolean): void {\n const p = this.matchCheckSync(e, ifDir)\n if (p) this.matchFinish(p, absolute)\n }\n\n walkCB(target: Path, patterns: Pattern[], cb: () => any) {\n /* c8 ignore start */\n if (this.signal?.aborted) cb()\n /* c8 ignore stop */\n this.walkCB2(target, patterns, new Processor(this.opts), cb)\n }\n\n walkCB2(\n target: Path,\n patterns: Pattern[],\n processor: Processor,\n cb: () => any\n ) {\n if (this.#childrenIgnored(target)) return cb()\n if (this.signal?.aborted) cb()\n if (this.paused) {\n this.onResume(() => this.walkCB2(target, patterns, processor, cb))\n return\n }\n processor.processPatterns(target, patterns)\n\n // done processing. all of the above is sync, can be abstracted out.\n // subwalks is a map of paths to the entry filters they need\n // matches is a map of paths to [absolute, ifDir] tuples.\n let tasks = 1\n const next = () => {\n if (--tasks === 0) cb()\n }\n\n for (const [m, absolute, ifDir] of processor.matches.entries()) {\n if (this.#ignored(m)) continue\n tasks++\n this.match(m, absolute, ifDir).then(() => next())\n }\n\n for (const t of processor.subwalkTargets()) {\n if (this.maxDepth !== Infinity && t.depth() >= this.maxDepth) {\n continue\n }\n tasks++\n const childrenCached = t.readdirCached()\n if (t.calledReaddir())\n this.walkCB3(t, childrenCached, processor, next)\n else {\n t.readdirCB(\n (_, entries) => this.walkCB3(t, entries, processor, next),\n true\n )\n }\n }\n\n next()\n }\n\n walkCB3(\n target: Path,\n entries: Path[],\n processor: Processor,\n cb: () => any\n ) {\n processor = processor.filterEntries(target, entries)\n\n let tasks = 1\n const next = () => {\n if (--tasks === 0) cb()\n }\n\n for (const [m, absolute, ifDir] of processor.matches.entries()) {\n if (this.#ignored(m)) continue\n tasks++\n this.match(m, absolute, ifDir).then(() => next())\n }\n for (const [target, patterns] of processor.subwalks.entries()) {\n tasks++\n this.walkCB2(target, patterns, processor.child(), next)\n }\n\n next()\n }\n\n walkCBSync(target: Path, patterns: Pattern[], cb: () => any) {\n /* c8 ignore start */\n if (this.signal?.aborted) cb()\n /* c8 ignore stop */\n this.walkCB2Sync(target, patterns, new Processor(this.opts), cb)\n }\n\n walkCB2Sync(\n target: Path,\n patterns: Pattern[],\n processor: Processor,\n cb: () => any\n ) {\n if (this.#childrenIgnored(target)) return cb()\n if (this.signal?.aborted) cb()\n if (this.paused) {\n this.onResume(() =>\n this.walkCB2Sync(target, patterns, processor, cb)\n )\n return\n }\n processor.processPatterns(target, patterns)\n\n // done processing. all of the above is sync, can be abstracted out.\n // subwalks is a map of paths to the entry filters they need\n // matches is a map of paths to [absolute, ifDir] tuples.\n let tasks = 1\n const next = () => {\n if (--tasks === 0) cb()\n }\n\n for (const [m, absolute, ifDir] of processor.matches.entries()) {\n if (this.#ignored(m)) continue\n this.matchSync(m, absolute, ifDir)\n }\n\n for (const t of processor.subwalkTargets()) {\n if (this.maxDepth !== Infinity && t.depth() >= this.maxDepth) {\n continue\n }\n tasks++\n const children = t.readdirSync()\n this.walkCB3Sync(t, children, processor, next)\n }\n\n next()\n }\n\n walkCB3Sync(\n target: Path,\n entries: Path[],\n processor: Processor,\n cb: () => any\n ) {\n processor = processor.filterEntries(target, entries)\n\n let tasks = 1\n const next = () => {\n if (--tasks === 0) cb()\n }\n\n for (const [m, absolute, ifDir] of processor.matches.entries()) {\n if (this.#ignored(m)) continue\n this.matchSync(m, absolute, ifDir)\n }\n for (const [target, patterns] of processor.subwalks.entries()) {\n tasks++\n this.walkCB2Sync(target, patterns, processor.child(), next)\n }\n\n next()\n }\n}\n\nexport class GlobWalker<\n O extends GlobWalkerOpts = GlobWalkerOpts\n> extends GlobUtil {\n matches: O extends GWOFileTypesTrue\n ? Set\n : O extends GWOFileTypesFalse\n ? Set\n : O extends GWOFileTypesUnset\n ? Set\n : Set\n\n constructor(patterns: Pattern[], path: Path, opts: O) {\n super(patterns, path, opts)\n this.matches = new Set() as Matches\n }\n\n matchEmit(e: Result): void\n matchEmit(e: Path | string): void {\n this.matches.add(e)\n }\n\n async walk(): Promise> {\n if (this.signal?.aborted) throw this.signal.reason\n if (this.path.isUnknown()) {\n await this.path.lstat()\n }\n await new Promise((res, rej) => {\n this.walkCB(this.path, this.patterns, () => {\n if (this.signal?.aborted) {\n rej(this.signal.reason)\n } else {\n res(this.matches)\n }\n })\n })\n return this.matches\n }\n\n walkSync(): Matches {\n if (this.signal?.aborted) throw this.signal.reason\n if (this.path.isUnknown()) {\n this.path.lstatSync()\n }\n // nothing for the callback to do, because this never pauses\n this.walkCBSync(this.path, this.patterns, () => {\n if (this.signal?.aborted) throw this.signal.reason\n })\n return this.matches\n }\n}\n\nexport class GlobStream<\n O extends GlobWalkerOpts = GlobWalkerOpts\n> extends GlobUtil {\n results: O extends GWOFileTypesTrue\n ? Minipass\n : O extends GWOFileTypesFalse\n ? Minipass\n : O extends GWOFileTypesUnset\n ? Minipass\n : Minipass\n\n constructor(patterns: Pattern[], path: Path, opts: O) {\n super(patterns, path, opts)\n this.results = new Minipass({\n signal: this.signal,\n objectMode: true,\n }) as MatchStream\n this.results.on('drain', () => this.resume())\n this.results.on('resume', () => this.resume())\n }\n\n matchEmit(e: Result): void\n matchEmit(e: Path | string): void {\n this.results.write(e)\n if (!this.results.flowing) this.pause()\n }\n\n stream(): MatchStream {\n const target = this.path\n if (target.isUnknown()) {\n target.lstat().then(() => {\n this.walkCB(target, this.patterns, () => this.results.end())\n })\n } else {\n this.walkCB(target, this.patterns, () => this.results.end())\n }\n return this.results\n }\n\n streamSync(): MatchStream {\n if (this.path.isUnknown()) {\n this.path.lstatSync()\n }\n this.walkCBSync(this.path, this.patterns, () => this.results.end())\n return this.results\n }\n}\n"]} \ No newline at end of file diff --git a/deps/npm/node_modules/glob/package.json b/deps/npm/node_modules/glob/package.json index ae0dfae17460bc..2caac096136c98 100644 --- a/deps/npm/node_modules/glob/package.json +++ b/deps/npm/node_modules/glob/package.json @@ -2,7 +2,7 @@ "author": "Isaac Z. Schlueter (https://blog.izs.me/)", "name": "glob", "description": "the most correct and second fastest glob implementation in JavaScript", - "version": "10.3.10", + "version": "10.3.12", "type": "module", "tshy": { "main": true, @@ -67,21 +67,22 @@ }, "dependencies": { "foreground-child": "^3.1.0", - "jackspeak": "^2.3.5", + "jackspeak": "^2.3.6", "minimatch": "^9.0.1", - "minipass": "^5.0.0 || ^6.0.2 || ^7.0.0", - "path-scurry": "^1.10.1" + "minipass": "^7.0.4", + "path-scurry": "^1.10.2" }, "devDependencies": { - "@types/node": "^20.3.2", + "@types/node": "^20.11.30", "memfs": "^3.4.13", "mkdirp": "^3.0.1", "prettier": "^2.8.3", "rimraf": "^5.0.1", "sync-content": "^1.0.2", - "tap": "^18.1.4", - "tshy": "^1.2.2", - "typedoc": "^0.25.1", + "tap": "^18.7.2", + "ts-node": "^10.9.2", + "tshy": "^1.12.0", + "typedoc": "^0.25.12", "typescript": "^5.2.2" }, "tap": { diff --git a/deps/npm/node_modules/ini/lib/ini.js b/deps/npm/node_modules/ini/lib/ini.js index 724d69d85a0e45..0e8623ee699294 100644 --- a/deps/npm/node_modules/ini/lib/ini.js +++ b/deps/npm/node_modules/ini/lib/ini.js @@ -225,7 +225,7 @@ const safe = val => { return val.split(';').join('\\;').split('#').join('\\#') } -const unsafe = (val, doUnesc) => { +const unsafe = val => { val = (val || '').trim() if (isQuoted(val)) { // remove the single quotes before calling JSON.parse diff --git a/deps/npm/node_modules/ini/package.json b/deps/npm/node_modules/ini/package.json index c1a50e93c07f9a..caa36d223f09ca 100644 --- a/deps/npm/node_modules/ini/package.json +++ b/deps/npm/node_modules/ini/package.json @@ -2,7 +2,7 @@ "author": "GitHub Inc.", "name": "ini", "description": "An ini encoder/decoder for node", - "version": "4.1.1", + "version": "4.1.2", "repository": { "type": "git", "url": "https://github.com/npm/ini.git" @@ -10,7 +10,7 @@ "main": "lib/ini.js", "scripts": { "eslint": "eslint", - "lint": "eslint \"**/*.js\"", + "lint": "eslint \"**/*.{js,cjs,ts,mjs,jsx,tsx}\"", "lintfix": "npm run lint -- --fix", "test": "tap", "snap": "tap", @@ -20,7 +20,7 @@ }, "devDependencies": { "@npmcli/eslint-config": "^4.0.0", - "@npmcli/template-oss": "4.15.1", + "@npmcli/template-oss": "4.21.3", "tap": "^16.0.1" }, "license": "ISC", @@ -33,7 +33,7 @@ }, "templateOSS": { "//@npmcli/template-oss": "This file is partially managed by @npmcli/template-oss. Edits may be overwritten.", - "version": "4.15.1", + "version": "4.21.3", "publish": "true" }, "tap": { diff --git a/deps/npm/node_modules/init-package-json/lib/init-package-json.js b/deps/npm/node_modules/init-package-json/lib/init-package-json.js index 077ebd96ffc529..23fd3dc94dbe4b 100644 --- a/deps/npm/node_modules/init-package-json/lib/init-package-json.js +++ b/deps/npm/node_modules/init-package-json/lib/init-package-json.js @@ -1,29 +1,28 @@ const promzard = require('promzard') const path = require('path') -const fs = require('fs/promises') const semver = require('semver') -const read = require('read') +const { read } = require('read') const util = require('util') -const rpj = require('read-package-json') +const PackageJson = require('@npmcli/package-json') const def = require.resolve('./default-input.js') -// to validate the data object at the end as a worthwhile package -// and assign default values for things. -const _extraSet = rpj.extraSet -const _rpj = util.promisify(rpj) -const _rpjExtras = util.promisify(rpj.extras) -const readPkgJson = async (file, pkg) => { - // only do a few of these. no need for mans or contributors if they're in the files - rpj.extraSet = _extraSet.filter(f => f.name !== 'authors' && f.name !== 'mans') - const p = pkg ? _rpjExtras(file, pkg) : _rpj(file) - return p.catch(() => ({})).finally(() => rpj.extraSet = _extraSet) -} +const extras = [ + 'bundleDependencies', + 'gypfile', + 'serverjs', + 'scriptpath', + 'readme', + 'bin', + 'githead', + 'fillTypes', + 'normalizeData', +] const isYes = (c) => !!(c.get('yes') || c.get('y') || c.get('force') || c.get('f')) -const getConfig = (c = {}) => { +const getConfig = (c) => { // accept either a plain-jane object, or a config object with a "get" method. if (typeof c.get !== 'function') { const data = c @@ -35,25 +34,31 @@ const getConfig = (c = {}) => { return c } +// Coverage disabled because this is just walking back the fixPeople +// normalization from the normalizeData step and we don't need to re-test all +// of those paths. +/* istanbul ignore next */ const stringifyPerson = (p) => { - if (typeof p === 'string') { - return p - } - const { name = '', url, web, email, mail } = p + const { name, url, web, email, mail } = p const u = url || web const e = email || mail return `${name}${e ? ` <${e}>` : ''}${u ? ` (${u})` : ''}` } - -async function init (dir, input = def, c = {}) { +async function init (dir, + // TODO test for non-default definitions + /* istanbul ignore next */ + input = def, + c = {}) { const config = getConfig(c) const yes = isYes(config) const packageFile = path.resolve(dir, 'package.json') - const pkg = await readPkgJson(packageFile) + // read what's already there to inform our prompts + const pkg = await PackageJson.load(dir, { create: true }) + await pkg.normalize() - if (!semver.valid(pkg.version)) { - delete pkg.version + if (!semver.valid(pkg.content.version)) { + delete pkg.content.version } // make sure that the input is valid. if not, use the default @@ -61,73 +66,67 @@ async function init (dir, input = def, c = {}) { yes, config, filename: packageFile, - dirname: path.dirname(packageFile), - basename: path.basename(path.dirname(packageFile)), - package: pkg, + dirname: dir, + basename: path.basename(dir), + package: pkg.content, }, { backupFile: def }) for (const [k, v] of Object.entries(pzData)) { if (v != null) { - pkg[k] = v + pkg.content[k] = v } } - const pkgExtras = await readPkgJson(packageFile, pkg) + await pkg.normalize({ steps: extras }) - // turn the objects into somewhat more humane strings. - if (pkgExtras.author) { - pkgExtras.author = stringifyPerson(pkgExtras.author) - } - - for (const set of ['maintainers', 'contributors']) { - if (Array.isArray(pkgExtras[set])) { - pkgExtras[set] = pkgExtras[set].map(stringifyPerson) - } + // turn the objects back into somewhat more humane strings. + // "normalizeData" does this and there isn't a way to choose which of those steps happen + if (pkg.content.author) { + pkg.content.author = stringifyPerson(pkg.content.author) } // no need for the readme now. - delete pkgExtras.readme - delete pkgExtras.readmeFilename + delete pkg.content.readme + delete pkg.content.readmeFilename // really don't want to have this lying around in the file - delete pkgExtras._id + delete pkg.content._id // ditto - delete pkgExtras.gitHead + delete pkg.content.gitHead // if the repo is empty, remove it. - if (!pkgExtras.repository) { - delete pkgExtras.repository + if (!pkg.content.repository) { + delete pkg.content.repository } // readJson filters out empty descriptions, but init-package-json // traditionally leaves them alone - if (!pkgExtras.description) { - pkgExtras.description = pzData.description + if (!pkg.content.description) { + pkg.content.description = pzData.description } // optionalDependencies don't need to be repeated in two places - if (pkgExtras.dependencies) { - if (pkgExtras.optionalDependencies) { - for (const name of Object.keys(pkgExtras.optionalDependencies)) { - delete pkgExtras.dependencies[name] + if (pkg.content.dependencies) { + if (pkg.content.optionalDependencies) { + for (const name of Object.keys(pkg.content.optionalDependencies)) { + delete pkg.content.dependencies[name] } } - if (Object.keys(pkgExtras.dependencies).length === 0) { - delete pkgExtras.dependencies + if (Object.keys(pkg.content.dependencies).length === 0) { + delete pkg.content.dependencies } } - const stringified = JSON.stringify(pkgExtras, null, 2) + '\n' + const stringified = JSON.stringify(pkg.content, null, 2) + '\n' const msg = util.format('%s:\n\n%s\n', packageFile, stringified) - const write = () => fs.writeFile(packageFile, stringified, 'utf8') if (yes) { - await write() + await pkg.save() if (!config.get('silent')) { console.log(`Wrote to ${msg}`) } - return pkgExtras + return pkg.content } console.log(`About to write to ${msg}`) @@ -137,8 +136,8 @@ async function init (dir, input = def, c = {}) { return } - await write() - return pkgExtras + await pkg.save() + return pkg.content } module.exports = init diff --git a/deps/npm/node_modules/init-package-json/package.json b/deps/npm/node_modules/init-package-json/package.json index a164169a74df3c..e867964e101569 100644 --- a/deps/npm/node_modules/init-package-json/package.json +++ b/deps/npm/node_modules/init-package-json/package.json @@ -1,10 +1,10 @@ { "name": "init-package-json", - "version": "6.0.0", + "version": "6.0.2", "main": "lib/init-package-json.js", "scripts": { "test": "tap", - "lint": "eslint \"**/*.js\"", + "lint": "eslint \"**/*.{js,cjs,ts,mjs,jsx,tsx}\"", "postlint": "template-oss-check", "lintfix": "npm run lint -- --fix", "snap": "tap", @@ -19,28 +19,24 @@ "license": "ISC", "description": "A node module to get your node module started", "dependencies": { + "@npmcli/package-json": "^5.0.0", "npm-package-arg": "^11.0.0", "promzard": "^1.0.0", - "read": "^2.0.0", - "read-package-json": "^7.0.0", + "read": "^3.0.1", "semver": "^7.3.5", "validate-npm-package-license": "^3.0.4", "validate-npm-package-name": "^5.0.0" }, "devDependencies": { - "@npmcli/config": "^7.0.0", + "@npmcli/config": "^8.2.0", "@npmcli/eslint-config": "^4.0.0", - "@npmcli/template-oss": "4.18.0", + "@npmcli/template-oss": "4.21.3", "tap": "^16.0.1" }, "engines": { "node": "^16.14.0 || >=18.0.0" }, "tap": { - "statements": 95, - "branches": 78, - "lines": 94, - "jobs": 1, "test-ignore": "fixtures/", "nyc-arg": [ "--exclude", @@ -63,13 +59,7 @@ ], "templateOSS": { "//@npmcli/template-oss": "This file is partially managed by @npmcli/template-oss. Edits may be overwritten.", - "version": "4.18.0", - "publish": true, - "ciVersions": [ - "16.14.0", - "16.x", - "18.0.0", - "18.x" - ] + "version": "4.21.3", + "publish": true } } diff --git a/deps/npm/node_modules/libnpmaccess/package.json b/deps/npm/node_modules/libnpmaccess/package.json index 8b8459dcec251b..81a87d6395455e 100644 --- a/deps/npm/node_modules/libnpmaccess/package.json +++ b/deps/npm/node_modules/libnpmaccess/package.json @@ -1,6 +1,6 @@ { "name": "libnpmaccess", - "version": "8.0.2", + "version": "8.0.3", "description": "programmatic library for `npm access` commands", "author": "GitHub Inc.", "license": "ISC", @@ -30,7 +30,7 @@ "homepage": "https://npmjs.com/package/libnpmaccess", "dependencies": { "npm-package-arg": "^11.0.1", - "npm-registry-fetch": "^16.0.0" + "npm-registry-fetch": "^16.2.0" }, "engines": { "node": "^16.14.0 || >=18.0.0" diff --git a/deps/npm/node_modules/libnpmdiff/package.json b/deps/npm/node_modules/libnpmdiff/package.json index b130b128cf7372..f6ad40b482a934 100644 --- a/deps/npm/node_modules/libnpmdiff/package.json +++ b/deps/npm/node_modules/libnpmdiff/package.json @@ -1,6 +1,6 @@ { "name": "libnpmdiff", - "version": "6.0.7", + "version": "6.0.8", "description": "The registry diff", "repository": { "type": "git", @@ -49,12 +49,12 @@ "@npmcli/arborist": "^7.2.1", "@npmcli/disparity-colors": "^3.0.0", "@npmcli/installed-package-contents": "^2.0.2", - "binary-extensions": "^2.2.0", + "binary-extensions": "^2.3.0", "diff": "^5.1.0", - "minimatch": "^9.0.0", + "minimatch": "^9.0.4", "npm-package-arg": "^11.0.1", "pacote": "^17.0.4", - "tar": "^6.2.0" + "tar": "^6.2.1" }, "templateOSS": { "//@npmcli/template-oss": "This file is partially managed by @npmcli/template-oss. Edits may be overwritten.", diff --git a/deps/npm/node_modules/libnpmexec/lib/index.js b/deps/npm/node_modules/libnpmexec/lib/index.js index 34bb20769bc2c7..6f548b943e2e65 100644 --- a/deps/npm/node_modules/libnpmexec/lib/index.js +++ b/deps/npm/node_modules/libnpmexec/lib/index.js @@ -8,7 +8,7 @@ const log = require('proc-log') const npa = require('npm-package-arg') const npmlog = require('npmlog') const pacote = require('pacote') -const read = require('read') +const { read } = require('read') const semver = require('semver') const { fileExists, localFileExists } = require('./file-exists.js') diff --git a/deps/npm/node_modules/libnpmexec/package.json b/deps/npm/node_modules/libnpmexec/package.json index 2b4fb559ba4a2e..8b6a9d217a00d0 100644 --- a/deps/npm/node_modules/libnpmexec/package.json +++ b/deps/npm/node_modules/libnpmexec/package.json @@ -1,6 +1,6 @@ { "name": "libnpmexec", - "version": "7.0.8", + "version": "7.0.9", "files": [ "bin/", "lib/" @@ -66,7 +66,7 @@ "npmlog": "^7.0.1", "pacote": "^17.0.4", "proc-log": "^3.0.0", - "read": "^2.0.0", + "read": "^3.0.1", "read-package-json-fast": "^3.0.2", "semver": "^7.3.7", "walk-up-path": "^3.0.1" diff --git a/deps/npm/node_modules/libnpmfund/package.json b/deps/npm/node_modules/libnpmfund/package.json index 66889c1fa6d5b5..994538c89fce17 100644 --- a/deps/npm/node_modules/libnpmfund/package.json +++ b/deps/npm/node_modules/libnpmfund/package.json @@ -1,6 +1,6 @@ { "name": "libnpmfund", - "version": "5.0.5", + "version": "5.0.6", "main": "lib/index.js", "files": [ "bin/", diff --git a/deps/npm/node_modules/libnpmhook/package.json b/deps/npm/node_modules/libnpmhook/package.json index 0bd822abba2c3f..7613c1c86fbc1c 100644 --- a/deps/npm/node_modules/libnpmhook/package.json +++ b/deps/npm/node_modules/libnpmhook/package.json @@ -1,6 +1,6 @@ { "name": "libnpmhook", - "version": "10.0.1", + "version": "10.0.2", "description": "programmatic API for managing npm registry hooks", "main": "lib/index.js", "files": [ @@ -31,7 +31,7 @@ "license": "ISC", "dependencies": { "aproba": "^2.0.0", - "npm-registry-fetch": "^16.0.0" + "npm-registry-fetch": "^16.2.0" }, "devDependencies": { "@npmcli/eslint-config": "^4.0.0", diff --git a/deps/npm/node_modules/libnpmorg/package.json b/deps/npm/node_modules/libnpmorg/package.json index a0aedb79b6084c..49671afd46371a 100644 --- a/deps/npm/node_modules/libnpmorg/package.json +++ b/deps/npm/node_modules/libnpmorg/package.json @@ -1,6 +1,6 @@ { "name": "libnpmorg", - "version": "6.0.2", + "version": "6.0.3", "description": "Programmatic api for `npm org` commands", "author": "GitHub Inc.", "main": "lib/index.js", @@ -42,7 +42,7 @@ "homepage": "https://npmjs.com/package/libnpmorg", "dependencies": { "aproba": "^2.0.0", - "npm-registry-fetch": "^16.0.0" + "npm-registry-fetch": "^16.2.0" }, "engines": { "node": "^16.14.0 || >=18.0.0" diff --git a/deps/npm/node_modules/libnpmpack/package.json b/deps/npm/node_modules/libnpmpack/package.json index 1d504fca4ec9b6..d49a17aa39f838 100644 --- a/deps/npm/node_modules/libnpmpack/package.json +++ b/deps/npm/node_modules/libnpmpack/package.json @@ -1,6 +1,6 @@ { "name": "libnpmpack", - "version": "6.0.7", + "version": "6.0.8", "description": "Programmatic API for the bits behind npm pack", "author": "GitHub Inc.", "main": "lib/index.js", diff --git a/deps/npm/node_modules/libnpmpublish/package.json b/deps/npm/node_modules/libnpmpublish/package.json index 67f63816d90d59..34f642794af40f 100644 --- a/deps/npm/node_modules/libnpmpublish/package.json +++ b/deps/npm/node_modules/libnpmpublish/package.json @@ -1,6 +1,6 @@ { "name": "libnpmpublish", - "version": "9.0.4", + "version": "9.0.5", "description": "Programmatic API for the bits behind npm publish and unpublish", "author": "GitHub Inc.", "main": "lib/index.js", @@ -41,7 +41,7 @@ "ci-info": "^4.0.0", "normalize-package-data": "^6.0.0", "npm-package-arg": "^11.0.1", - "npm-registry-fetch": "^16.0.0", + "npm-registry-fetch": "^16.2.0", "proc-log": "^3.0.0", "semver": "^7.3.7", "sigstore": "^2.2.0", diff --git a/deps/npm/node_modules/libnpmsearch/package.json b/deps/npm/node_modules/libnpmsearch/package.json index 42cb78839081cd..c27673d2202c06 100644 --- a/deps/npm/node_modules/libnpmsearch/package.json +++ b/deps/npm/node_modules/libnpmsearch/package.json @@ -1,6 +1,6 @@ { "name": "libnpmsearch", - "version": "7.0.1", + "version": "7.0.2", "description": "Programmatic API for searching in npm and compatible registries.", "author": "GitHub Inc.", "main": "lib/index.js", @@ -38,7 +38,7 @@ "bugs": "https://github.com/npm/libnpmsearch/issues", "homepage": "https://npmjs.com/package/libnpmsearch", "dependencies": { - "npm-registry-fetch": "^16.0.0" + "npm-registry-fetch": "^16.2.0" }, "engines": { "node": "^16.14.0 || >=18.0.0" diff --git a/deps/npm/node_modules/libnpmteam/package.json b/deps/npm/node_modules/libnpmteam/package.json index bafeeb3fcdc4c6..110304fa0a156d 100644 --- a/deps/npm/node_modules/libnpmteam/package.json +++ b/deps/npm/node_modules/libnpmteam/package.json @@ -1,7 +1,7 @@ { "name": "libnpmteam", "description": "npm Team management APIs", - "version": "6.0.1", + "version": "6.0.2", "author": "GitHub Inc.", "license": "ISC", "main": "lib/index.js", @@ -32,7 +32,7 @@ "homepage": "https://npmjs.com/package/libnpmteam", "dependencies": { "aproba": "^2.0.0", - "npm-registry-fetch": "^16.0.0" + "npm-registry-fetch": "^16.2.0" }, "engines": { "node": "^16.14.0 || >=18.0.0" diff --git a/deps/npm/node_modules/minimatch/dist/cjs/assert-valid-pattern.js b/deps/npm/node_modules/minimatch/dist/commonjs/assert-valid-pattern.js similarity index 100% rename from deps/npm/node_modules/minimatch/dist/cjs/assert-valid-pattern.js rename to deps/npm/node_modules/minimatch/dist/commonjs/assert-valid-pattern.js diff --git a/deps/npm/node_modules/minimatch/dist/cjs/ast.js b/deps/npm/node_modules/minimatch/dist/commonjs/ast.js similarity index 99% rename from deps/npm/node_modules/minimatch/dist/cjs/ast.js rename to deps/npm/node_modules/minimatch/dist/commonjs/ast.js index 0b0cc8f3c50b3d..9e1f9e765c597e 100644 --- a/deps/npm/node_modules/minimatch/dist/cjs/ast.js +++ b/deps/npm/node_modules/minimatch/dist/commonjs/ast.js @@ -338,6 +338,9 @@ class AST { _glob: glob, }); } + get options() { + return this.#options; + } // returns the string match, the regexp source, whether there's magic // in the regexp (so a regular expression is required) and whether or // not the uflag is needed for the regular expression (for posix classes) diff --git a/deps/npm/node_modules/minimatch/dist/cjs/brace-expressions.js b/deps/npm/node_modules/minimatch/dist/commonjs/brace-expressions.js similarity index 100% rename from deps/npm/node_modules/minimatch/dist/cjs/brace-expressions.js rename to deps/npm/node_modules/minimatch/dist/commonjs/brace-expressions.js diff --git a/deps/npm/node_modules/minimatch/dist/cjs/escape.js b/deps/npm/node_modules/minimatch/dist/commonjs/escape.js similarity index 100% rename from deps/npm/node_modules/minimatch/dist/cjs/escape.js rename to deps/npm/node_modules/minimatch/dist/commonjs/escape.js diff --git a/deps/npm/node_modules/minimatch/dist/cjs/index.js b/deps/npm/node_modules/minimatch/dist/commonjs/index.js similarity index 99% rename from deps/npm/node_modules/minimatch/dist/cjs/index.js rename to deps/npm/node_modules/minimatch/dist/commonjs/index.js index d70e681fef5d7d..d05f8b47f1efb4 100644 --- a/deps/npm/node_modules/minimatch/dist/cjs/index.js +++ b/deps/npm/node_modules/minimatch/dist/commonjs/index.js @@ -344,6 +344,7 @@ class Minimatch { globParts = this.levelOneOptimize(globParts); } else { + // just collapse multiple ** portions into one globParts = this.adjascentGlobstarOptimize(globParts); } return globParts; @@ -833,7 +834,11 @@ class Minimatch { fastTest = dotStarTest; } const re = ast_js_1.AST.fromGlob(pattern, this.options).toMMPattern(); - return fastTest ? Object.assign(re, { test: fastTest }) : re; + if (fastTest && typeof re === 'object') { + // Avoids overriding in frozen environments + Reflect.defineProperty(re, 'test', { value: fastTest }); + } + return re; } makeRe() { if (this.regexp || this.regexp === false) diff --git a/deps/npm/node_modules/minimatch/dist/cjs/package.json b/deps/npm/node_modules/minimatch/dist/commonjs/package.json similarity index 100% rename from deps/npm/node_modules/minimatch/dist/cjs/package.json rename to deps/npm/node_modules/minimatch/dist/commonjs/package.json diff --git a/deps/npm/node_modules/minimatch/dist/cjs/unescape.js b/deps/npm/node_modules/minimatch/dist/commonjs/unescape.js similarity index 100% rename from deps/npm/node_modules/minimatch/dist/cjs/unescape.js rename to deps/npm/node_modules/minimatch/dist/commonjs/unescape.js diff --git a/deps/npm/node_modules/minimatch/dist/mjs/assert-valid-pattern.js b/deps/npm/node_modules/minimatch/dist/esm/assert-valid-pattern.js similarity index 100% rename from deps/npm/node_modules/minimatch/dist/mjs/assert-valid-pattern.js rename to deps/npm/node_modules/minimatch/dist/esm/assert-valid-pattern.js diff --git a/deps/npm/node_modules/minimatch/dist/mjs/ast.js b/deps/npm/node_modules/minimatch/dist/esm/ast.js similarity index 99% rename from deps/npm/node_modules/minimatch/dist/mjs/ast.js rename to deps/npm/node_modules/minimatch/dist/esm/ast.js index 7fb1f83e6182a0..02c6bda68427fc 100644 --- a/deps/npm/node_modules/minimatch/dist/mjs/ast.js +++ b/deps/npm/node_modules/minimatch/dist/esm/ast.js @@ -335,6 +335,9 @@ export class AST { _glob: glob, }); } + get options() { + return this.#options; + } // returns the string match, the regexp source, whether there's magic // in the regexp (so a regular expression is required) and whether or // not the uflag is needed for the regular expression (for posix classes) diff --git a/deps/npm/node_modules/minimatch/dist/mjs/brace-expressions.js b/deps/npm/node_modules/minimatch/dist/esm/brace-expressions.js similarity index 100% rename from deps/npm/node_modules/minimatch/dist/mjs/brace-expressions.js rename to deps/npm/node_modules/minimatch/dist/esm/brace-expressions.js diff --git a/deps/npm/node_modules/minimatch/dist/mjs/escape.js b/deps/npm/node_modules/minimatch/dist/esm/escape.js similarity index 100% rename from deps/npm/node_modules/minimatch/dist/mjs/escape.js rename to deps/npm/node_modules/minimatch/dist/esm/escape.js diff --git a/deps/npm/node_modules/minimatch/dist/mjs/index.js b/deps/npm/node_modules/minimatch/dist/esm/index.js similarity index 99% rename from deps/npm/node_modules/minimatch/dist/mjs/index.js rename to deps/npm/node_modules/minimatch/dist/esm/index.js index 831b6a67f63fb4..ff6319369ccd01 100644 --- a/deps/npm/node_modules/minimatch/dist/mjs/index.js +++ b/deps/npm/node_modules/minimatch/dist/esm/index.js @@ -332,6 +332,7 @@ export class Minimatch { globParts = this.levelOneOptimize(globParts); } else { + // just collapse multiple ** portions into one globParts = this.adjascentGlobstarOptimize(globParts); } return globParts; @@ -821,7 +822,11 @@ export class Minimatch { fastTest = dotStarTest; } const re = AST.fromGlob(pattern, this.options).toMMPattern(); - return fastTest ? Object.assign(re, { test: fastTest }) : re; + if (fastTest && typeof re === 'object') { + // Avoids overriding in frozen environments + Reflect.defineProperty(re, 'test', { value: fastTest }); + } + return re; } makeRe() { if (this.regexp || this.regexp === false) diff --git a/deps/npm/node_modules/minimatch/dist/mjs/package.json b/deps/npm/node_modules/minimatch/dist/esm/package.json similarity index 100% rename from deps/npm/node_modules/minimatch/dist/mjs/package.json rename to deps/npm/node_modules/minimatch/dist/esm/package.json diff --git a/deps/npm/node_modules/minimatch/dist/mjs/unescape.js b/deps/npm/node_modules/minimatch/dist/esm/unescape.js similarity index 100% rename from deps/npm/node_modules/minimatch/dist/mjs/unescape.js rename to deps/npm/node_modules/minimatch/dist/esm/unescape.js diff --git a/deps/npm/node_modules/minimatch/package.json b/deps/npm/node_modules/minimatch/package.json index 061c3b9f343306..2c82c03981152f 100644 --- a/deps/npm/node_modules/minimatch/package.json +++ b/deps/npm/node_modules/minimatch/package.json @@ -2,23 +2,23 @@ "author": "Isaac Z. Schlueter (http://blog.izs.me)", "name": "minimatch", "description": "a glob matcher in javascript", - "version": "9.0.3", + "version": "9.0.4", "repository": { "type": "git", "url": "git://github.com/isaacs/minimatch.git" }, - "main": "./dist/cjs/index.js", - "module": "./dist/mjs/index.js", - "types": "./dist/cjs/index.d.ts", + "main": "./dist/commonjs/index.js", + "types": "./dist/commonjs/index.d.ts", "exports": { + "./package.json": "./package.json", ".": { "import": { - "types": "./dist/mjs/index.d.ts", - "default": "./dist/mjs/index.js" + "types": "./dist/esm/index.d.ts", + "default": "./dist/esm/index.js" }, "require": { - "types": "./dist/cjs/index.d.ts", - "default": "./dist/cjs/index.js" + "types": "./dist/commonjs/index.d.ts", + "default": "./dist/commonjs/index.js" } } }, @@ -29,13 +29,11 @@ "preversion": "npm test", "postversion": "npm publish", "prepublishOnly": "git push origin --follow-tags", - "preprepare": "rm -rf dist", - "prepare": "tsc -p tsconfig.json && tsc -p tsconfig-esm.json", - "postprepare": "bash fixup.sh", + "prepare": "tshy", "pretest": "npm run prepare", "presnap": "npm run prepare", - "test": "c8 tap", - "snap": "c8 tap", + "test": "tap", + "snap": "tap", "format": "prettier --write . --loglevel warn", "benchmark": "node benchmark/index.js", "typedoc": "typedoc --tsconfig tsconfig-esm.json ./src/*.ts" @@ -61,26 +59,24 @@ "@types/brace-expansion": "^1.1.0", "@types/node": "^18.15.11", "@types/tap": "^15.0.8", - "c8": "^7.12.0", "eslint-config-prettier": "^8.6.0", "mkdirp": "1", "prettier": "^2.8.2", - "tap": "^16.3.7", + "tap": "^18.7.2", "ts-node": "^10.9.1", + "tshy": "^1.12.0", "typedoc": "^0.23.21", "typescript": "^4.9.3" }, - "tap": { - "coverage": false, - "node-arg": [ - "--no-warnings", - "--loader", - "ts-node/esm" - ], - "ts": false - }, "funding": { "url": "https://github.com/sponsors/isaacs" }, - "license": "ISC" + "license": "ISC", + "tshy": { + "exports": { + "./package.json": "./package.json", + ".": "./src/index.ts" + } + }, + "type": "module" } diff --git a/deps/npm/node_modules/node-gyp/.release-please-manifest.json b/deps/npm/node_modules/node-gyp/.release-please-manifest.json new file mode 100644 index 00000000000000..1842506cfa97f8 --- /dev/null +++ b/deps/npm/node_modules/node-gyp/.release-please-manifest.json @@ -0,0 +1,3 @@ +{ + ".": "10.1.0" +} diff --git a/deps/npm/node_modules/node-gyp/CHANGELOG.md b/deps/npm/node_modules/node-gyp/CHANGELOG.md index 98315add5e0d46..9db4f9f95288fd 100644 --- a/deps/npm/node_modules/node-gyp/CHANGELOG.md +++ b/deps/npm/node_modules/node-gyp/CHANGELOG.md @@ -1,5 +1,34 @@ # Changelog +## [10.1.0](https://github.com/nodejs/node-gyp/compare/v10.0.1...v10.1.0) (2024-03-13) + + +### Features + +* improve visual studio detection ([#2957](https://github.com/nodejs/node-gyp/issues/2957)) ([109e3d4](https://github.com/nodejs/node-gyp/commit/109e3d4245504a7b75c99f578e1203c0ef4b518e)) + + +### Core + +* add support for locally installed headers ([#2964](https://github.com/nodejs/node-gyp/issues/2964)) ([3298731](https://github.com/nodejs/node-gyp/commit/329873141f0d3e3787d3c006801431da04e4ed0c)) +* **deps:** bump actions/setup-python from 4 to 5 ([#2960](https://github.com/nodejs/node-gyp/issues/2960)) ([3f0df7e](https://github.com/nodejs/node-gyp/commit/3f0df7e9334e49e8c7f6fdbbb9e1e6c5a8cca53b)) +* **deps:** bump google-github-actions/release-please-action ([#2961](https://github.com/nodejs/node-gyp/issues/2961)) ([b1f1808](https://github.com/nodejs/node-gyp/commit/b1f1808bfff0d51e6d3eb696ab6a5b89b7b9630c)) +* print Python executable path using UTF-8 ([#2995](https://github.com/nodejs/node-gyp/issues/2995)) ([c472912](https://github.com/nodejs/node-gyp/commit/c4729129daa9bb5204246b857826fb391ac961e1)) +* update supported vs versions ([#2959](https://github.com/nodejs/node-gyp/issues/2959)) ([391cc5b](https://github.com/nodejs/node-gyp/commit/391cc5b9b25cffe0cb2edcba3583414a771b4a15)) + + +### Doc + +* npm is currently v10 ([#2970](https://github.com/nodejs/node-gyp/issues/2970)) ([7705a22](https://github.com/nodejs/node-gyp/commit/7705a22f31a62076e9f8429780a459f4ad71ea4c)) +* remove outdated Node versions from readme ([#2955](https://github.com/nodejs/node-gyp/issues/2955)) ([ae8478e](https://github.com/nodejs/node-gyp/commit/ae8478ec32d9b2fa71b591ac22cdf867ef2e9a7d)) +* remove outdated update engines.node reference in 10.0.0 changelog ([b42e796](https://github.com/nodejs/node-gyp/commit/b42e7966177f006f3d1aab1d27885d8372c8ed01)) + + +### Miscellaneous + +* only run release please on push ([cff9ac2](https://github.com/nodejs/node-gyp/commit/cff9ac2c3083769a383e00bc60b91562f03116e3)) +* upgrade release please action from v2 to v4 ([#2982](https://github.com/nodejs/node-gyp/issues/2982)) ([0035d8e](https://github.com/nodejs/node-gyp/commit/0035d8e9dc98b94f0bc8cd9023a6fa635003703e)) + ### [10.0.1](https://www.github.com/nodejs/node-gyp/compare/v10.0.0...v10.0.1) (2023-11-02) @@ -23,7 +52,6 @@ * the `Gyp` class exported is now created using ECMAScript classes and therefore might have small differences to classes that were previously created with `util.inherits`. * All internal functions have been coverted to return promises and no longer accept callbacks. This is not a breaking change for users but may be breaking to consumers of `node-gyp` if you are requiring internal functions directly. * `node-gyp` now supports node `^16.14.0 || >=18.0.0` -* update engines.node to ^14.17.0 || ^16.13.0 || >=18.0.0 ### Features @@ -706,11 +734,11 @@ Republish of v5.0.6 with unnecessary tarball removed from pack file. * [[`94c39c604e`](https://github.com/nodejs/node-gyp/commit/94c39c604e)] - **gyp**: fix ninja build failure (GYP patch) (Daniel Bevenius) [nodejs/node#12484](https://github.com/nodejs/node/pull/12484) * [[`e8ea74e0fa`](https://github.com/nodejs/node-gyp/commit/e8ea74e0fa)] - **tools**: patch gyp to avoid xcrun errors (Ujjwal Sharma) [nodejs/node#21520](https://github.com/nodejs/node/pull/21520) * [[`ea9aff44f2`](https://github.com/nodejs/node-gyp/commit/ea9aff44f2)] - **tools**: fix "the the" typos in comments (Masashi Hirano) [nodejs/node#20716](https://github.com/nodejs/node/pull/20716) -* [[`207e5aa4fd`](https://github.com/nodejs/node-gyp/commit/207e5aa4fd)] - **gyp**: implement LD/LDXX for ninja and FIPS (Sam Roberts) +* [[`207e5aa4fd`](https://github.com/nodejs/node-gyp/commit/207e5aa4fd)] - **gyp**: implement LD/LDXX for ninja and FIPS (Sam Roberts) * [[`b416c5f4b7`](https://github.com/nodejs/node-gyp/commit/b416c5f4b7)] - **gyp**: enable cctest to use objects (gyp part) (Daniel Bevenius) [nodejs/node#12450](https://github.com/nodejs/node/pull/12450) * [[`40692d016b`](https://github.com/nodejs/node-gyp/commit/40692d016b)] - **gyp**: add compile\_commands.json gyp generator (Ben Noordhuis) [nodejs/node#12450](https://github.com/nodejs/node/pull/12450) * [[`fc3c4e2b10`](https://github.com/nodejs/node-gyp/commit/fc3c4e2b10)] - **gyp**: float gyp patch for long filenames (Anna Henningsen) [nodejs/node#7963](https://github.com/nodejs/node/pull/7963) -* [[`8aedbfdef6`](https://github.com/nodejs/node-gyp/commit/8aedbfdef6)] - **gyp**: backport GYP fix to fix AIX shared suffix (Stewart Addison) +* [[`8aedbfdef6`](https://github.com/nodejs/node-gyp/commit/8aedbfdef6)] - **gyp**: backport GYP fix to fix AIX shared suffix (Stewart Addison) * [[`6cd84b84fc`](https://github.com/nodejs/node-gyp/commit/6cd84b84fc)] - **test**: formatting and minor fixes for execFileSync replacement (Rod Vagg) [#1521](https://github.com/nodejs/node-gyp/pull/1521) * [[`60e421363f`](https://github.com/nodejs/node-gyp/commit/60e421363f)] - **test**: added test/processExecSync.js for when execFileSync is not available. (Rohit Hazra) [#1492](https://github.com/nodejs/node-gyp/pull/1492) * [[`969447c5bd`](https://github.com/nodejs/node-gyp/commit/969447c5bd)] - **deps**: bump request to 2.8.7, fixes heok/hawk issues (Rohit Hazra) [#1492](https://github.com/nodejs/node-gyp/pull/1492) @@ -772,7 +800,7 @@ Republish of v5.0.6 with unnecessary tarball removed from pack file. ## v3.5.0 2017-01-10 -* [[`762d19a39e`](https://github.com/nodejs/node-gyp/commit/762d19a39e)] - \[doc\] merge History.md and CHANGELOG.md (Rod Vagg) +* [[`762d19a39e`](https://github.com/nodejs/node-gyp/commit/762d19a39e)] - \[doc\] merge History.md and CHANGELOG.md (Rod Vagg) * [[`80fc5c3d31`](https://github.com/nodejs/node-gyp/commit/80fc5c3d31)] - Fix deprecated dependency warning (Simone Primarosa) [#1069](https://github.com/nodejs/node-gyp/pull/1069) * [[`05c44944fd`](https://github.com/nodejs/node-gyp/commit/05c44944fd)] - Open the build file with universal-newlines mode (Guy Margalit) [#1053](https://github.com/nodejs/node-gyp/pull/1053) * [[`37ae7be114`](https://github.com/nodejs/node-gyp/commit/37ae7be114)] - Try python launcher when stock python is python 3. (Ben Noordhuis) [#992](https://github.com/nodejs/node-gyp/pull/992) @@ -829,7 +857,7 @@ Republish of v5.0.6 with unnecessary tarball removed from pack file. * [[`0e2dfda1f3`](https://github.com/nodejs/node-gyp/commit/0e2dfda1f3)] - Fix test/test-options when run through `npm test`. (Ben Noordhuis) [#755](https://github.com/nodejs/node-gyp/pull/755) * [[`9bfa0876b4`](https://github.com/nodejs/node-gyp/commit/9bfa0876b4)] - Add support for AIX (Michael Dawson) [#753](https://github.com/nodejs/node-gyp/pull/753) * [[`a8d441a0a2`](https://github.com/nodejs/node-gyp/commit/a8d441a0a2)] - Update README for Windows 10 support. (Jason Williams) [#766](https://github.com/nodejs/node-gyp/pull/766) -* [[`d1d6015276`](https://github.com/nodejs/node-gyp/commit/d1d6015276)] - Update broken links and switch to HTTPS. (andrew morton) +* [[`d1d6015276`](https://github.com/nodejs/node-gyp/commit/d1d6015276)] - Update broken links and switch to HTTPS. (andrew morton) ## v3.1.0 2015-11-14 diff --git a/deps/npm/node_modules/node-gyp/README.md b/deps/npm/node_modules/node-gyp/README.md index f46ee06308db1e..9e4c608e20a73f 100644 --- a/deps/npm/node_modules/node-gyp/README.md +++ b/deps/npm/node_modules/node-gyp/README.md @@ -11,9 +11,8 @@ addons. Note that `node-gyp` is _not_ used to build Node.js itself. -Multiple target versions of Node.js are supported (i.e. `0.8`, ..., `4`, `5`, `6`, -etc.), regardless of what version of Node.js is actually installed on your system -(`node-gyp` downloads the necessary development files or headers for the target version). +All current and LTS target versions of Node.js are supported. Depending on what version of Node.js is actually installed on your system +`node-gyp` downloads the necessary development files or headers for the target version. List of stable Node.js versions can be found on [Node.js website](https://nodejs.org/en/about/previous-releases). ## Features @@ -50,9 +49,7 @@ Install the current [version of Python](https://devguide.python.org/versions/) f [Microsoft Store](https://apps.microsoft.com/store/search?publisher=Python+Software+Foundation). Install tools and configuration manually: - * Install Visual C++ Build Environment: [Visual Studio Build Tools](https://visualstudio.microsoft.com/thank-you-downloading-visual-studio/?sku=BuildTools) - (using "Visual C++ build tools" if using a version older than VS2019, otherwise use "Desktop development with C++" workload) or [Visual Studio Community](https://visualstudio.microsoft.com/thank-you-downloading-visual-studio/?sku=Community) - (using the "Desktop development with C++" workload) + * Install Visual C++ Build Environment: For Visual Studio 2019 or later, use the `Desktop development with C++` workload from [Visual Studio Community](https://visualstudio.microsoft.com/thank-you-downloading-visual-studio/?sku=Community). For a version older than Visual Studio 2019, install [Visual Studio Build Tools](https://visualstudio.microsoft.com/thank-you-downloading-visual-studio/?sku=BuildTools) with the `Visual C++ build tools` option. If the above steps didn't work for you, please visit [Microsoft's Node.js Guidelines for Windows](https://github.com/Microsoft/nodejs-guidelines/blob/master/windows-environment.md#compiling-native-addon-modules) for additional tips. @@ -60,6 +57,9 @@ Install tools and configuration manually: To use the native ARM64 C++ compiler on Windows on ARM, ensure that you have Visual Studio 2022 [17.4 or later](https://devblogs.microsoft.com/visualstudio/arm64-visual-studio-is-officially-here/) installed. +It's advised to install following Powershell module: [VSSetup](https://github.com/microsoft/vssetup.powershell) using `Install-Module VSSetup -Scope CurrentUser`. +This will make Visual Studio detection logic to use more flexible and accessible method, avoiding Powershell's `ConstrainedLanguage` mode. + ### Configuring Python Dependency `node-gyp` requires that you have installed a [supported version of Python](https://devguide.python.org/versions/). diff --git a/deps/npm/node_modules/node-gyp/gyp/README.md b/deps/npm/node_modules/node-gyp/gyp/README.md deleted file mode 100644 index be1d7b9ebf6611..00000000000000 --- a/deps/npm/node_modules/node-gyp/gyp/README.md +++ /dev/null @@ -1,30 +0,0 @@ -GYP can Generate Your Projects. -=================================== - -Documents are available at [gyp.gsrc.io](https://gyp.gsrc.io), or you can check out ```md-pages``` branch to read those documents offline. - -__gyp-next__ is [released](https://github.com/nodejs/gyp-next/releases) to the [__Python Packaging Index__](https://pypi.org/project/gyp-next) (PyPI) and can be installed with the command: -* `python3 -m pip install gyp-next` - -When used as a command line utility, __gyp-next__ can also be installed with [pipx](https://pypa.github.io/pipx): -* `pipx install gyp-next` -``` -Installing to a new venv 'gyp-next' - installed package gyp-next 0.13.0, installed using Python 3.10.6 - These apps are now globally available - - gyp -done! ✨ 🌟 ✨ -``` - -Or to run __gyp-next__ directly without installing it: -* `pipx run gyp-next --help` -``` -NOTE: running app 'gyp' from 'gyp-next' -usage: usage: gyp [options ...] [build_file ...] - -options: - -h, --help show this help message and exit - --build CONFIGS configuration for build after project generation - --check check format of gyp files - [ ... ] -``` diff --git a/deps/npm/node_modules/node-gyp/gyp/pylib/gyp/generator/android.py b/deps/npm/node_modules/node-gyp/gyp/pylib/gyp/generator/android.py index d3c97c666db077..9a796702142e34 100644 --- a/deps/npm/node_modules/node-gyp/gyp/pylib/gyp/generator/android.py +++ b/deps/npm/node_modules/node-gyp/gyp/pylib/gyp/generator/android.py @@ -739,9 +739,9 @@ def ComputeOutput(self, spec): % (self.android_class, self.android_module) ) else: - path = "$(call intermediates-dir-for,{},{},,,$(GYP_VAR_PREFIX))".format( - self.android_class, - self.android_module, + path = ( + "$(call intermediates-dir-for," + f"{self.android_class},{self.android_module},,,$(GYP_VAR_PREFIX))" ) assert spec.get("product_dir") is None # TODO: not supported? diff --git a/deps/npm/node_modules/node-gyp/gyp/pylib/gyp/generator/gypsh.py b/deps/npm/node_modules/node-gyp/gyp/pylib/gyp/generator/gypsh.py index 82a07ddc6577be..625b6d65ca1a6d 100644 --- a/deps/npm/node_modules/node-gyp/gyp/pylib/gyp/generator/gypsh.py +++ b/deps/npm/node_modules/node-gyp/gyp/pylib/gyp/generator/gypsh.py @@ -49,10 +49,9 @@ def GenerateOutput(target_list, target_dicts, data, params): # Use a banner that looks like the stock Python one and like what # code.interact uses by default, but tack on something to indicate what # locals are available, and identify gypsh. - banner = "Python {} on {}\nlocals.keys() = {}\ngypsh".format( - sys.version, - sys.platform, - repr(sorted(locals.keys())), + banner = ( + f"Python {sys.version} on {sys.platform}\nlocals.keys() = " + f"{repr(sorted(locals.keys()))}\ngypsh" ) code.interact(banner, local=locals) diff --git a/deps/npm/node_modules/node-gyp/gyp/pylib/gyp/generator/msvs.py b/deps/npm/node_modules/node-gyp/gyp/pylib/gyp/generator/msvs.py index 13b0794b4dccc3..6f0f8c1ab64c9c 100644 --- a/deps/npm/node_modules/node-gyp/gyp/pylib/gyp/generator/msvs.py +++ b/deps/npm/node_modules/node-gyp/gyp/pylib/gyp/generator/msvs.py @@ -1778,11 +1778,9 @@ def _GetCopies(spec): outer_dir = posixpath.split(src_bare)[1] fixed_dst = _FixPath(dst) full_dst = f'"{fixed_dst}\\{outer_dir}\\"' - cmd = 'mkdir {} 2>nul & cd "{}" && xcopy /e /f /y "{}" {}'.format( - full_dst, - _FixPath(base_dir), - outer_dir, - full_dst, + cmd = ( + f'mkdir {full_dst} 2>nul & cd "{_FixPath(base_dir)}" && ' + f'xcopy /e /f /y "{outer_dir}" {full_dst}' ) copies.append( ( @@ -1794,10 +1792,9 @@ def _GetCopies(spec): ) else: fix_dst = _FixPath(cpy["destination"]) - cmd = 'mkdir "{}" 2>nul & set ERRORLEVEL=0 & copy /Y "{}" "{}"'.format( - fix_dst, - _FixPath(src), - _FixPath(dst), + cmd = ( + f'mkdir "{fix_dst}" 2>nul & set ERRORLEVEL=0 & ' + f'copy /Y "{_FixPath(src)}" "{_FixPath(dst)}"' ) copies.append(([src], [dst], cmd, f"Copying {src} to {fix_dst}")) return copies @@ -1899,9 +1896,8 @@ def _GetPlatformOverridesOfProject(spec): for config_name, c in spec["configurations"].items(): config_fullname = _ConfigFullName(config_name, c) platform = c.get("msvs_target_platform", _ConfigPlatform(c)) - fixed_config_fullname = "{}|{}".format( - _ConfigBaseName(config_name, _ConfigPlatform(c)), - platform, + fixed_config_fullname = ( + f"{_ConfigBaseName(config_name, _ConfigPlatform(c))}|{platform}" ) if spec["toolset"] == "host" and generator_supports_multiple_toolsets: fixed_config_fullname = f"{config_name}|x64" diff --git a/deps/npm/node_modules/node-gyp/gyp/pylib/gyp/input.py b/deps/npm/node_modules/node-gyp/gyp/pylib/gyp/input.py index 8f39519dee51fb..0b56c72750e6cd 100644 --- a/deps/npm/node_modules/node-gyp/gyp/pylib/gyp/input.py +++ b/deps/npm/node_modules/node-gyp/gyp/pylib/gyp/input.py @@ -1135,18 +1135,16 @@ def EvalCondition(condition, conditions_key, phase, variables, build_file): true_dict = condition[i + 1] if type(true_dict) is not dict: raise GypError( - "{} {} must be followed by a dictionary, not {}".format( - conditions_key, cond_expr, type(true_dict) - ) + f"{conditions_key} {cond_expr} must be followed by a dictionary, not " + f"{type(true_dict)}" ) if len(condition) > i + 2 and type(condition[i + 2]) is dict: false_dict = condition[i + 2] i = i + 3 if i != len(condition): raise GypError( - "{} {} has {} unexpected trailing items".format( - conditions_key, cond_expr, len(condition) - i - ) + f"{conditions_key} {cond_expr} has {len(condition) - i} " + "unexpected trailing items" ) else: false_dict = None diff --git a/deps/npm/node_modules/node-gyp/gyp/pylib/gyp/msvs_emulation.py b/deps/npm/node_modules/node-gyp/gyp/pylib/gyp/msvs_emulation.py index 38fa21dd666697..847d1b8dc1d4da 100644 --- a/deps/npm/node_modules/node-gyp/gyp/pylib/gyp/msvs_emulation.py +++ b/deps/npm/node_modules/node-gyp/gyp/pylib/gyp/msvs_emulation.py @@ -830,17 +830,14 @@ def _GetLdManifestFlags( ("VCLinkerTool", "UACUIAccess"), config, default="false" ) - inner = """ + inner = f""" - + -""".format( - execution_level_map[execution_level], - ui_access, - ) +""" # noqa: E501 else: inner = "" diff --git a/deps/npm/node_modules/node-gyp/gyp/pyproject.toml b/deps/npm/node_modules/node-gyp/gyp/pyproject.toml index 0c25d0b3c1a065..7183e07d3c2cd0 100644 --- a/deps/npm/node_modules/node-gyp/gyp/pyproject.toml +++ b/deps/npm/node_modules/node-gyp/gyp/pyproject.toml @@ -38,7 +38,7 @@ gyp = "gyp:script_main" "Homepage" = "https://github.com/nodejs/gyp-next" [tool.ruff] -select = [ +lint.select = [ "C4", # flake8-comprehensions "C90", # McCabe cyclomatic complexity "DTZ", # flake8-datetimez @@ -87,7 +87,7 @@ select = [ # "T20", # flake8-print # "TRY", # tryceratops ] -ignore = [ +lint.ignore = [ "E721", "PLC1901", "PLR0402", @@ -105,10 +105,10 @@ extend-exclude = ["pylib/packaging"] line-length = 88 target-version = "py37" -[tool.ruff.mccabe] +[tool.ruff.lint.mccabe] max-complexity = 101 -[tool.ruff.pylint] +[tool.ruff.lint.pylint] max-args = 11 max-branches = 108 max-returns = 10 diff --git a/deps/npm/node_modules/node-gyp/lib/configure.js b/deps/npm/node_modules/node-gyp/lib/configure.js index 8da41a849dfcf6..e4b8c94e3d2599 100644 --- a/deps/npm/node_modules/node-gyp/lib/configure.js +++ b/deps/npm/node_modules/node-gyp/lib/configure.js @@ -1,6 +1,6 @@ 'use strict' -const { promises: fs } = require('graceful-fs') +const { promises: fs, readFileSync } = require('graceful-fs') const path = require('path') const log = require('./log') const os = require('os') @@ -13,6 +13,10 @@ const { findAccessibleSync } = require('./util') const { findPython } = require('./find-python') const { findVisualStudio } = win ? require('./find-visualstudio') : {} +const majorRe = /^#define NODE_MAJOR_VERSION (\d+)/m +const minorRe = /^#define NODE_MINOR_VERSION (\d+)/m +const patchRe = /^#define NODE_PATCH_VERSION (\d+)/m + async function configure (gyp, argv) { const buildDir = path.resolve('build') const configNames = ['config.gypi', 'common.gypi'] @@ -27,6 +31,28 @@ async function configure (gyp, argv) { // 'python' should be set by now process.env.PYTHON = python + if (!gyp.opts.nodedir && + process.config.variables.use_prefix_to_find_headers) { + // check if the headers can be found using the prefix specified + // at build time. Use them if they match the version expected + const prefix = process.config.variables.node_prefix + let availVersion + try { + const nodeVersionH = readFileSync(path.join(prefix, + 'include', 'node', 'node_version.h'), { encoding: 'utf8' }) + const major = nodeVersionH.match(majorRe)[1] + const minor = nodeVersionH.match(minorRe)[1] + const patch = nodeVersionH.match(patchRe)[1] + availVersion = major + '.' + minor + '.' + patch + } catch {} + if (availVersion === release.version) { + // ok version matches, use the headers + gyp.opts.nodedir = prefix + log.verbose('using local node headers based on prefix', + 'setting nodedir to ' + gyp.opts.nodedir) + } + } + if (gyp.opts.nodedir) { // --nodedir was specified. use that for the dev files nodeDir = gyp.opts.nodedir.replace(/^~/, os.homedir()) diff --git a/deps/npm/node_modules/node-gyp/lib/find-python.js b/deps/npm/node_modules/node-gyp/lib/find-python.js index 615da57bb85723..a71c00c2b65bc1 100644 --- a/deps/npm/node_modules/node-gyp/lib/find-python.js +++ b/deps/npm/node_modules/node-gyp/lib/find-python.js @@ -41,7 +41,7 @@ class PythonFinder { static findPython = (...args) => new PythonFinder(...args).findPython() log = log.withPrefix('find Python') - argsExecutable = ['-c', 'import sys; print(sys.executable);'] + argsExecutable = ['-c', 'import sys; sys.stdout.buffer.write(sys.executable.encode(\'utf-8\'));'] argsVersion = ['-c', 'import sys; print("%s.%s.%s" % sys.version_info[:3]);'] semverRange = '>=3.6.0' diff --git a/deps/npm/node_modules/node-gyp/lib/find-visualstudio.js b/deps/npm/node_modules/node-gyp/lib/find-visualstudio.js index b57770259abde3..8c5ae96127504c 100644 --- a/deps/npm/node_modules/node-gyp/lib/find-visualstudio.js +++ b/deps/npm/node_modules/node-gyp/lib/find-visualstudio.js @@ -54,7 +54,10 @@ class VisualStudioFinder { } const checks = [ - () => this.findVisualStudio2017OrNewer(), + () => this.findVisualStudio2019OrNewerUsingSetupModule(), + () => this.findVisualStudio2019OrNewer(), + () => this.findVisualStudio2017UsingSetupModule(), + () => this.findVisualStudio2017(), () => this.findVisualStudio2015(), () => this.findVisualStudio2013() ] @@ -113,9 +116,84 @@ class VisualStudioFinder { throw new Error('Could not find any Visual Studio installation to use') } + async findVisualStudio2019OrNewerUsingSetupModule () { + return this.findNewVSUsingSetupModule([2019, 2022]) + } + + async findVisualStudio2017UsingSetupModule () { + if (this.nodeSemver.major >= 22) { + this.addLog( + 'not looking for VS2017 as it is only supported up to Node.js 21') + return null + } + return this.findNewVSUsingSetupModule([2017]) + } + + async findNewVSUsingSetupModule (supportedYears) { + const ps = path.join(process.env.SystemRoot, 'System32', + 'WindowsPowerShell', 'v1.0', 'powershell.exe') + const vcInstallDir = this.envVcInstallDir + + const checkModuleArgs = [ + '-NoProfile', + '-Command', + '&{@(Get-Module -ListAvailable -Name VSSetup).Version.ToString()}' + ] + this.log.silly('Running', ps, checkModuleArgs) + const [cErr] = await this.execFile(ps, checkModuleArgs) + if (cErr) { + this.addLog('VSSetup module doesn\'t seem to exist. You can install it via: "Install-Module VSSetup -Scope CurrentUser"') + this.log.silly('VSSetup error = %j', cErr && (cErr.stack || cErr)) + return null + } + const filterArg = vcInstallDir !== undefined ? `| where {$_.InstallationPath -eq '${vcInstallDir}' }` : '' + const psArgs = [ + '-NoProfile', + '-Command', + `&{Get-VSSetupInstance ${filterArg} | ConvertTo-Json -Depth 3}` + ] + + this.log.silly('Running', ps, psArgs) + const [err, stdout, stderr] = await this.execFile(ps, psArgs) + let parsedData = this.parseData(err, stdout, stderr) + if (parsedData === null) { + return null + } + this.log.silly('Parsed data', parsedData) + if (!Array.isArray(parsedData)) { + // if there are only 1 result, then Powershell will output non-array + parsedData = [parsedData] + } + // normalize output + parsedData = parsedData.map((info) => { + info.path = info.InstallationPath + info.version = `${info.InstallationVersion.Major}.${info.InstallationVersion.Minor}.${info.InstallationVersion.Build}.${info.InstallationVersion.Revision}` + info.packages = info.Packages.map((p) => p.Id) + return info + }) + // pass for further processing + return this.processData(parsedData, supportedYears) + } + + // Invoke the PowerShell script to get information about Visual Studio 2019 + // or newer installations + async findVisualStudio2019OrNewer () { + return this.findNewVS([2019, 2022]) + } + + // Invoke the PowerShell script to get information about Visual Studio 2017 + async findVisualStudio2017 () { + if (this.nodeSemver.major >= 22) { + this.addLog( + 'not looking for VS2017 as it is only supported up to Node.js 21') + return null + } + return this.findNewVS([2017]) + } + // Invoke the PowerShell script to get information about Visual Studio 2017 // or newer installations - async findVisualStudio2017OrNewer () { + async findNewVS (supportedYears) { const ps = path.join(process.env.SystemRoot, 'System32', 'WindowsPowerShell', 'v1.0', 'powershell.exe') const csFile = path.join(__dirname, 'Find-VisualStudio.cs') @@ -128,24 +206,35 @@ class VisualStudioFinder { ] this.log.silly('Running', ps, psArgs) - const [err, stdout, stderr] = await execFile(ps, psArgs, { encoding: 'utf8' }) - return this.parseData(err, stdout, stderr) + const [err, stdout, stderr] = await this.execFile(ps, psArgs) + const parsedData = this.parseData(err, stdout, stderr, { checkIsArray: true }) + if (parsedData === null) { + return null + } + return this.processData(parsedData, supportedYears) } - // Parse the output of the PowerShell script and look for an installation - // of Visual Studio 2017 or newer to use - parseData (err, stdout, stderr) { + // Parse the output of the PowerShell script, make sanity checks + parseData (err, stdout, stderr, sanityCheckOptions) { + const defaultOptions = { + checkIsArray: false + } + + // Merging provided options with the default options + const sanityOptions = { ...defaultOptions, ...sanityCheckOptions } + this.log.silly('PS stderr = %j', stderr) - const failPowershell = () => { + const failPowershell = (failureDetails) => { this.addLog( - 'could not use PowerShell to find Visual Studio 2017 or newer, try re-running with \'--loglevel silly\' for more details') + `could not use PowerShell to find Visual Studio 2017 or newer, try re-running with '--loglevel silly' for more details. \n + Failure details: ${failureDetails}`) return null } if (err) { this.log.silly('PS err = %j', err && (err.stack || err)) - return failPowershell() + return failPowershell(`${err}`.substring(0, 40)) } let vsInfo @@ -157,11 +246,16 @@ class VisualStudioFinder { return failPowershell() } - if (!Array.isArray(vsInfo)) { + if (sanityOptions.checkIsArray && !Array.isArray(vsInfo)) { this.log.silly('PS stdout = %j', stdout) - return failPowershell() + return failPowershell('Expected array as output of the PS script') } + return vsInfo + } + // Process parsed data containing information about VS installations + // Look for the required parts, extract and output them back + processData (vsInfo, supportedYears) { vsInfo = vsInfo.map((info) => { this.log.silly(`processing installation: "${info.path}"`) info.path = path.resolve(info.path) @@ -175,11 +269,12 @@ class VisualStudioFinder { this.log.silly('vsInfo:', vsInfo) // Remove future versions or errors parsing version number + // Also remove any unsupported versions vsInfo = vsInfo.filter((info) => { - if (info.versionYear) { + if (info.versionYear && supportedYears.indexOf(info.versionYear) !== -1) { return true } - this.addLog(`unknown version "${info.version}" found at "${info.path}"`) + this.addLog(`${info.versionYear ? 'unsupported' : 'unknown'} version "${info.version}" found at "${info.path}"`) return false }) @@ -438,6 +533,10 @@ class VisualStudioFinder { return true } + + async execFile (exec, args) { + return await execFile(exec, args, { encoding: 'utf8' }) + } } module.exports = VisualStudioFinder diff --git a/deps/npm/node_modules/node-gyp/package.json b/deps/npm/node_modules/node-gyp/package.json index 80c63f2e72c3d9..95f012fa5daf79 100644 --- a/deps/npm/node_modules/node-gyp/package.json +++ b/deps/npm/node_modules/node-gyp/package.json @@ -11,7 +11,7 @@ "bindings", "gyp" ], - "version": "10.0.1", + "version": "10.1.0", "installVersion": 11, "author": "Nathan Rajlich (http://tootallnate.net)", "repository": { diff --git a/deps/npm/node_modules/node-gyp/release-please-config.json b/deps/npm/node_modules/node-gyp/release-please-config.json new file mode 100644 index 00000000000000..94b8f8110e881a --- /dev/null +++ b/deps/npm/node_modules/node-gyp/release-please-config.json @@ -0,0 +1,40 @@ +{ + "packages": { + ".": { + "include-component-in-tag": false, + "release-type": "node", + "changelog-sections": [ + { "type": "feat", "section": "Features", "hidden": false }, + { "type": "fix", "section": "Bug Fixes", "hidden": false }, + { "type": "bin", "section": "Core", "hidden": false }, + { "type": "gyp", "section": "Core", "hidden": false }, + { "type": "lib", "section": "Core", "hidden": false }, + { "type": "src", "section": "Core", "hidden": false }, + { "type": "test", "section": "Tests", "hidden": false }, + { "type": "build", "section": "Core", "hidden": false }, + { "type": "clean", "section": "Core", "hidden": false }, + { "type": "configure", "section": "Core", "hidden": false }, + { "type": "install", "section": "Core", "hidden": false }, + { "type": "list", "section": "Core", "hidden": false }, + { "type": "rebuild", "section": "Core", "hidden": false }, + { "type": "remove", "section": "Core", "hidden": false }, + { "type": "deps", "section": "Core", "hidden": false }, + { "type": "python", "section": "Core", "hidden": false }, + { "type": "lin", "section": "Core", "hidden": false }, + { "type": "linux", "section": "Core", "hidden": false }, + { "type": "mac", "section": "Core", "hidden": false }, + { "type": "macos", "section": "Core", "hidden": false }, + { "type": "win", "section": "Core", "hidden": false }, + { "type": "windows", "section": "Core", "hidden": false }, + { "type": "zos", "section": "Core", "hidden": false }, + { "type": "doc", "section": "Doc", "hidden": false }, + { "type": "docs", "section": "Doc", "hidden": false }, + { "type": "readme", "section": "Doc", "hidden": false }, + { "type": "chore", "section": "Miscellaneous", "hidden": false }, + { "type": "refactor", "section": "Miscellaneous", "hidden": false }, + { "type": "ci", "section": "Miscellaneous", "hidden": false }, + { "type": "meta", "section": "Miscellaneous", "hidden": false } + ] + } + } +} diff --git a/deps/npm/node_modules/npm-registry-fetch/lib/check-response.js b/deps/npm/node_modules/npm-registry-fetch/lib/check-response.js index 066ac3c32420f2..183311d8403977 100644 --- a/deps/npm/node_modules/npm-registry-fetch/lib/check-response.js +++ b/deps/npm/node_modules/npm-registry-fetch/lib/check-response.js @@ -4,7 +4,7 @@ const errors = require('./errors.js') const { Response } = require('minipass-fetch') const defaultOpts = require('./default-opts.js') const log = require('proc-log') -const cleanUrl = require('./clean-url.js') +const { redact: cleanUrl } = require('@npmcli/redact') /* eslint-disable-next-line max-len */ const moreInfoUrl = 'https://github.com/npm/cli/wiki/No-auth-for-URI,-but-auth-present-for-scoped-registry' diff --git a/deps/npm/node_modules/npm-registry-fetch/lib/clean-url.js b/deps/npm/node_modules/npm-registry-fetch/lib/clean-url.js deleted file mode 100644 index 0c2656b5653a00..00000000000000 --- a/deps/npm/node_modules/npm-registry-fetch/lib/clean-url.js +++ /dev/null @@ -1,27 +0,0 @@ -const { URL } = require('url') - -const replace = '***' -const tokenRegex = /\bnpm_[a-zA-Z0-9]{36}\b/g -const guidRegex = /\b[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}\b/g - -const cleanUrl = (str) => { - if (typeof str !== 'string' || !str) { - return str - } - - try { - const url = new URL(str) - if (url.password) { - url.password = replace - str = url.toString() - } - } catch { - // ignore errors - } - - return str - .replace(tokenRegex, `npm_${replace}`) - .replace(guidRegex, `npm_${replace}`) -} - -module.exports = cleanUrl diff --git a/deps/npm/node_modules/npm-registry-fetch/lib/index.js b/deps/npm/node_modules/npm-registry-fetch/lib/index.js index bb413f862d92d0..1d77a77024bf50 100644 --- a/deps/npm/node_modules/npm-registry-fetch/lib/index.js +++ b/deps/npm/node_modules/npm-registry-fetch/lib/index.js @@ -10,6 +10,7 @@ const qs = require('querystring') const url = require('url') const zlib = require('minizlib') const { Minipass } = require('minipass') +const { redact: cleanUrl } = require('@npmcli/redact') const defaultOpts = require('./default-opts.js') @@ -246,4 +247,6 @@ function getHeaders (uri, auth, opts) { return headers } -module.exports.cleanUrl = require('./clean-url.js') +// export cleanUrl to avoid a breaking change +// TODO: next semver major remove this. Consumers should use @npmcli/redact instead +module.exports.cleanUrl = cleanUrl diff --git a/deps/npm/node_modules/npm-registry-fetch/package.json b/deps/npm/node_modules/npm-registry-fetch/package.json index b715d52391a933..88455a4971af0b 100644 --- a/deps/npm/node_modules/npm-registry-fetch/package.json +++ b/deps/npm/node_modules/npm-registry-fetch/package.json @@ -1,6 +1,6 @@ { "name": "npm-registry-fetch", - "version": "16.1.0", + "version": "16.2.0", "description": "Fetch-based http client for use with npm registry APIs", "main": "lib", "files": [ @@ -9,7 +9,7 @@ ], "scripts": { "eslint": "eslint", - "lint": "eslint \"**/*.js\"", + "lint": "eslint \"**/*.{js,cjs,ts,mjs,jsx,tsx}\"", "lintfix": "npm run lint -- --fix", "test": "tap", "posttest": "npm run lint", @@ -31,6 +31,7 @@ "author": "GitHub Inc.", "license": "ISC", "dependencies": { + "@npmcli/redact": "^1.1.0", "make-fetch-happen": "^13.0.0", "minipass": "^7.0.2", "minipass-fetch": "^3.0.0", @@ -41,7 +42,7 @@ }, "devDependencies": { "@npmcli/eslint-config": "^4.0.0", - "@npmcli/template-oss": "4.19.0", + "@npmcli/template-oss": "4.21.3", "cacache": "^18.0.0", "nock": "^13.2.4", "require-inject": "^1.4.4", @@ -61,7 +62,7 @@ }, "templateOSS": { "//@npmcli/template-oss": "This file is partially managed by @npmcli/template-oss. Edits may be overwritten.", - "version": "4.19.0", + "version": "4.21.3", "publish": "true" } } diff --git a/deps/npm/node_modules/path-scurry/dist/cjs/index.js b/deps/npm/node_modules/path-scurry/dist/commonjs/index.js similarity index 99% rename from deps/npm/node_modules/path-scurry/dist/cjs/index.js rename to deps/npm/node_modules/path-scurry/dist/commonjs/index.js index 23eb5b0853ff28..6e330d400d2ac9 100644 --- a/deps/npm/node_modules/path-scurry/dist/cjs/index.js +++ b/deps/npm/node_modules/path-scurry/dist/commonjs/index.js @@ -75,21 +75,21 @@ const IFMT = 0b1111; // mask to unset low 4 bits const IFMT_UNKNOWN = ~IFMT; // set after successfully calling readdir() and getting entries. -const READDIR_CALLED = 16; +const READDIR_CALLED = 0b0000_0001_0000; // set after a successful lstat() -const LSTAT_CALLED = 32; +const LSTAT_CALLED = 0b0000_0010_0000; // set if an entry (or one of its parents) is definitely not a dir -const ENOTDIR = 64; +const ENOTDIR = 0b0000_0100_0000; // set if an entry (or one of its parents) does not exist // (can also be set on lstat errors like EACCES or ENAMETOOLONG) -const ENOENT = 128; +const ENOENT = 0b0000_1000_0000; // cannot have child entries -- also verify &IFMT is either IFDIR or IFLNK // set if we fail to readlink -const ENOREADLINK = 256; +const ENOREADLINK = 0b0001_0000_0000; // set if we know realpath() will fail -const ENOREALPATH = 512; +const ENOREALPATH = 0b0010_0000_0000; const ENOCHILD = ENOTDIR | ENOENT | ENOREALPATH; -const TYPEMASK = 1023; +const TYPEMASK = 0b0011_1111_1111; const entToType = (s) => s.isFile() ? IFREG : s.isDirectory() @@ -703,7 +703,7 @@ class PathBase { /* c8 ignore stop */ try { const read = await this.#fs.promises.readlink(this.fullpath()); - const linkTarget = this.parent.resolve(read); + const linkTarget = (await this.parent.realpath())?.resolve(read); if (linkTarget) { return (this.#linkTarget = linkTarget); } @@ -732,7 +732,7 @@ class PathBase { /* c8 ignore stop */ try { const read = this.#fs.readlinkSync(this.fullpath()); - const linkTarget = this.parent.resolve(read); + const linkTarget = (this.parent.realpathSync())?.resolve(read); if (linkTarget) { return (this.#linkTarget = linkTarget); } @@ -747,7 +747,9 @@ class PathBase { this.#type |= READDIR_CALLED; // mark all remaining provisional children as ENOENT for (let p = children.provisional; p < children.length; p++) { - children[p].#markENOENT(); + const c = children[p]; + if (c) + c.#markENOENT(); } } #markENOENT() { diff --git a/deps/npm/node_modules/path-scurry/dist/cjs/package.json b/deps/npm/node_modules/path-scurry/dist/commonjs/package.json similarity index 100% rename from deps/npm/node_modules/path-scurry/dist/cjs/package.json rename to deps/npm/node_modules/path-scurry/dist/commonjs/package.json diff --git a/deps/npm/node_modules/path-scurry/dist/mjs/index.js b/deps/npm/node_modules/path-scurry/dist/esm/index.js similarity index 99% rename from deps/npm/node_modules/path-scurry/dist/mjs/index.js rename to deps/npm/node_modules/path-scurry/dist/esm/index.js index 079253a6aee967..2ce978e64bb117 100644 --- a/deps/npm/node_modules/path-scurry/dist/mjs/index.js +++ b/deps/npm/node_modules/path-scurry/dist/esm/index.js @@ -49,21 +49,21 @@ const IFMT = 0b1111; // mask to unset low 4 bits const IFMT_UNKNOWN = ~IFMT; // set after successfully calling readdir() and getting entries. -const READDIR_CALLED = 16; +const READDIR_CALLED = 0b0000_0001_0000; // set after a successful lstat() -const LSTAT_CALLED = 32; +const LSTAT_CALLED = 0b0000_0010_0000; // set if an entry (or one of its parents) is definitely not a dir -const ENOTDIR = 64; +const ENOTDIR = 0b0000_0100_0000; // set if an entry (or one of its parents) does not exist // (can also be set on lstat errors like EACCES or ENAMETOOLONG) -const ENOENT = 128; +const ENOENT = 0b0000_1000_0000; // cannot have child entries -- also verify &IFMT is either IFDIR or IFLNK // set if we fail to readlink -const ENOREADLINK = 256; +const ENOREADLINK = 0b0001_0000_0000; // set if we know realpath() will fail -const ENOREALPATH = 512; +const ENOREALPATH = 0b0010_0000_0000; const ENOCHILD = ENOTDIR | ENOENT | ENOREALPATH; -const TYPEMASK = 1023; +const TYPEMASK = 0b0011_1111_1111; const entToType = (s) => s.isFile() ? IFREG : s.isDirectory() @@ -675,7 +675,7 @@ export class PathBase { /* c8 ignore stop */ try { const read = await this.#fs.promises.readlink(this.fullpath()); - const linkTarget = this.parent.resolve(read); + const linkTarget = (await this.parent.realpath())?.resolve(read); if (linkTarget) { return (this.#linkTarget = linkTarget); } @@ -704,7 +704,7 @@ export class PathBase { /* c8 ignore stop */ try { const read = this.#fs.readlinkSync(this.fullpath()); - const linkTarget = this.parent.resolve(read); + const linkTarget = (this.parent.realpathSync())?.resolve(read); if (linkTarget) { return (this.#linkTarget = linkTarget); } @@ -719,7 +719,9 @@ export class PathBase { this.#type |= READDIR_CALLED; // mark all remaining provisional children as ENOENT for (let p = children.provisional; p < children.length; p++) { - children[p].#markENOENT(); + const c = children[p]; + if (c) + c.#markENOENT(); } } #markENOENT() { diff --git a/deps/npm/node_modules/path-scurry/dist/mjs/package.json b/deps/npm/node_modules/path-scurry/dist/esm/package.json similarity index 100% rename from deps/npm/node_modules/path-scurry/dist/mjs/package.json rename to deps/npm/node_modules/path-scurry/dist/esm/package.json diff --git a/deps/npm/node_modules/path-scurry/package.json b/deps/npm/node_modules/path-scurry/package.json index af04f807fed2bc..90a4b468f04618 100644 --- a/deps/npm/node_modules/path-scurry/package.json +++ b/deps/npm/node_modules/path-scurry/package.json @@ -1,19 +1,20 @@ { "name": "path-scurry", - "version": "1.10.1", + "version": "1.10.2", "description": "walk paths fast and efficiently", "author": "Isaac Z. Schlueter (https://blog.izs.me)", - "main": "./dist/cjs/index.js", - "module": "./dist/mjs/index.js", + "main": "./dist/commonjs/index.js", + "type": "module", "exports": { + "./package.json": "./package.json", ".": { "import": { - "types": "./dist/mjs/index.d.ts", - "default": "./dist/mjs/index.js" + "types": "./dist/esm/index.d.ts", + "default": "./dist/esm/index.js" }, "require": { - "types": "./dist/cjs/index.d.ts", - "default": "./dist/cjs/index.js" + "types": "./dist/commonjs/index.d.ts", + "default": "./dist/commonjs/index.js" } } }, @@ -26,12 +27,11 @@ "postversion": "npm publish", "prepublishOnly": "git push origin --follow-tags", "preprepare": "rm -rf dist", - "prepare": "tsc -p tsconfig.json && tsc -p tsconfig-esm.json", - "postprepare": "bash ./scripts/fixup.sh", + "prepare": "tshy", "pretest": "npm run prepare", "presnap": "npm run prepare", - "test": "c8 tap", - "snap": "c8 tap", + "test": "tap", + "snap": "tap", "format": "prettier --write . --loglevel warn", "typedoc": "typedoc --tsconfig tsconfig-esm.json ./src/*.ts", "bench": "bash ./scripts/bench.sh" @@ -47,28 +47,19 @@ "arrowParens": "avoid", "endOfLine": "lf" }, - "tap": { - "coverage": false, - "node-arg": [ - "--no-warnings", - "--loader", - "ts-node/esm" - ], - "ts": false - }, "devDependencies": { "@nodelib/fs.walk": "^1.2.8", - "@types/node": "^20.1.4", - "@types/tap": "^15.0.7", + "@types/node": "^20.11.30", "c8": "^7.12.0", "eslint-config-prettier": "^8.6.0", "mkdirp": "^3.0.0", "prettier": "^2.8.3", "rimraf": "^5.0.1", - "tap": "^16.3.4", - "ts-node": "^10.9.1", - "typedoc": "^0.23.24", - "typescript": "^5.0.4" + "tap": "^18.7.2", + "ts-node": "^10.9.2", + "tshy": "^1.12.0", + "typedoc": "^0.25.12", + "typescript": "^5.4.3" }, "engines": { "node": ">=16 || 14 >=14.17" @@ -81,7 +72,14 @@ "url": "git+https://github.com/isaacs/path-scurry" }, "dependencies": { - "lru-cache": "^9.1.1 || ^10.0.0", + "lru-cache": "^10.2.0", "minipass": "^5.0.0 || ^6.0.2 || ^7.0.0" - } + }, + "tshy": { + "exports": { + "./package.json": "./package.json", + ".": "./src/index.ts" + } + }, + "types": "./dist/commonjs/index.d.ts" } diff --git a/deps/npm/node_modules/promzard/lib/index.js b/deps/npm/node_modules/promzard/lib/index.js index 2244cbbbacdb02..52c8a3c828313d 100644 --- a/deps/npm/node_modules/promzard/lib/index.js +++ b/deps/npm/node_modules/promzard/lib/index.js @@ -4,7 +4,7 @@ const { promisify } = require('util') const { randomBytes } = require('crypto') const { Module } = require('module') const { dirname, basename } = require('path') -const read = require('read') +const { read } = require('read') const files = {} diff --git a/deps/npm/node_modules/promzard/package.json b/deps/npm/node_modules/promzard/package.json index a48764dd5441b8..a4253193232b87 100644 --- a/deps/npm/node_modules/promzard/package.json +++ b/deps/npm/node_modules/promzard/package.json @@ -2,23 +2,23 @@ "author": "GitHub Inc.", "name": "promzard", "description": "prompting wizardly", - "version": "1.0.0", + "version": "1.0.1", "repository": { "url": "https://github.com/npm/promzard.git", "type": "git" }, "dependencies": { - "read": "^2.0.0" + "read": "^3.0.1" }, "devDependencies": { "@npmcli/eslint-config": "^4.0.0", - "@npmcli/template-oss": "4.11.0", + "@npmcli/template-oss": "4.21.3", "tap": "^16.3.0" }, "main": "lib/index.js", "scripts": { "test": "tap", - "lint": "eslint \"**/*.js\"", + "lint": "eslint \"**/*.{js,cjs,ts,mjs,jsx,tsx}\"", "postlint": "template-oss-check", "template-oss-apply": "template-oss-apply --force", "lintfix": "npm run lint -- --fix", @@ -35,7 +35,8 @@ }, "templateOSS": { "//@npmcli/template-oss": "This file is partially managed by @npmcli/template-oss. Edits may be overwritten.", - "version": "4.11.0" + "version": "4.21.3", + "publish": true }, "tap": { "jobs": 1, diff --git a/deps/npm/node_modules/read/dist/commonjs/package.json b/deps/npm/node_modules/read/dist/commonjs/package.json new file mode 100644 index 00000000000000..5bbefffbabee39 --- /dev/null +++ b/deps/npm/node_modules/read/dist/commonjs/package.json @@ -0,0 +1,3 @@ +{ + "type": "commonjs" +} diff --git a/deps/npm/node_modules/read/dist/commonjs/read.js b/deps/npm/node_modules/read/dist/commonjs/read.js new file mode 100644 index 00000000000000..bab433d8a1155f --- /dev/null +++ b/deps/npm/node_modules/read/dist/commonjs/read.js @@ -0,0 +1,95 @@ +"use strict"; +var __importDefault = (this && this.__importDefault) || function (mod) { + return (mod && mod.__esModule) ? mod : { "default": mod }; +}; +Object.defineProperty(exports, "__esModule", { value: true }); +exports.read = void 0; +const mute_stream_1 = __importDefault(require("mute-stream")); +const readline_1 = require("readline"); +async function read({ default: def, input = process.stdin, output = process.stdout, completer, prompt = '', silent, timeout, edit, terminal, replace, }) { + if (typeof def !== 'undefined' && + typeof def !== 'string' && + typeof def !== 'number') { + throw new Error('default value must be string or number'); + } + let editDef = false; + const defString = def?.toString(); + prompt = prompt.trim() + ' '; + terminal = !!(terminal || output.isTTY); + if (defString) { + if (silent) { + prompt += '(

    Keys response:

      -
    • expires: null or a simplified extended ISO 8601 format: YYYY-MM-DDTHH:mm:ss.sssZ
    • +
    • expires: null or a simplified extended ISO 8601 format: YYYY-MM-DDTHH:mm:ss.sssZ
    • keydid: sha256 fingerprint of the public key
    • keytype: only ecdsa-sha2-nistp256 is currently supported by the npm CLI
    • scheme: only ecdsa-sha2-nistp256 is currently supported by the npm CLI
    • diff --git a/deps/npm/docs/output/commands/npm-ls.html b/deps/npm/docs/output/commands/npm-ls.html index 0f4beb45d09397..7449728edf37fe 100644 --- a/deps/npm/docs/output/commands/npm-ls.html +++ b/deps/npm/docs/output/commands/npm-ls.html @@ -160,7 +160,7 @@

      Description

      the results to only the paths to the packages named. Note that nested packages will also show the paths to the specified packages. For example, running npm ls promzard in npm's source tree will show:

      -
      npm@10.5.1 /path/to/npm
      +
      npm@10.5.2 /path/to/npm
       └─┬ init-package-json@0.0.4
         └── promzard@0.1.5
       
      diff --git a/deps/npm/docs/output/commands/npm.html b/deps/npm/docs/output/commands/npm.html index 4be373a7159e9f..d79ded11c70ba2 100644 --- a/deps/npm/docs/output/commands/npm.html +++ b/deps/npm/docs/output/commands/npm.html @@ -150,7 +150,7 @@

      Table of contents

      Note: This command is unaware of workspaces.

      Version

      -

      10.5.1

      +

      10.5.2

      Description

      npm is the package manager for the Node JavaScript platform. It puts modules in place so that node can find them, and manages dependency diff --git a/deps/npm/lib/base-command.js b/deps/npm/lib/base-command.js index e4a7bbbec724a6..cdf7971b5aaf92 100644 --- a/deps/npm/lib/base-command.js +++ b/deps/npm/lib/base-command.js @@ -3,7 +3,6 @@ const { relative } = require('path') const { definitions } = require('@npmcli/config/lib/definitions') -const getWorkspaces = require('./workspaces/get-workspaces.js') const { aliases: cmdAliases } = require('./utils/cmd-list') const log = require('./utils/log-shim.js') @@ -170,6 +169,7 @@ class BaseCommand { const relativeFrom = prefixInsideCwd ? this.npm.localPrefix : process.cwd() const filters = this.npm.config.get('workspace') + const getWorkspaces = require('./workspaces/get-workspaces.js') const ws = await getWorkspaces(filters, { path: this.npm.localPrefix, includeWorkspaceRoot, diff --git a/deps/npm/lib/commands/owner.js b/deps/npm/lib/commands/owner.js index 5b54dd41f3d607..e530e1c51c8e1f 100644 --- a/deps/npm/lib/commands/owner.js +++ b/deps/npm/lib/commands/owner.js @@ -5,6 +5,7 @@ const log = require('../utils/log-shim') const otplease = require('../utils/otplease.js') const pkgJson = require('@npmcli/package-json') const BaseCommand = require('../base-command.js') +const { redact } = require('@npmcli/redact') const readJson = async (path) => { try { @@ -119,7 +120,7 @@ class Owner extends BaseCommand { this.npm.output(maintainers.map(m => `${m.name} <${m.email}>`).join('\n')) } } catch (err) { - log.error('owner ls', "Couldn't get owner data", npmFetch.cleanUrl(pkg)) + log.error('owner ls', "Couldn't get owner data", redact(pkg)) throw err } } diff --git a/deps/npm/lib/commands/ping.js b/deps/npm/lib/commands/ping.js index c79e6a96cee405..2d60f5d69a8da6 100644 --- a/deps/npm/lib/commands/ping.js +++ b/deps/npm/lib/commands/ping.js @@ -1,4 +1,4 @@ -const { cleanUrl } = require('npm-registry-fetch') +const { redact } = require('@npmcli/redact') const log = require('../utils/log-shim') const pingUtil = require('../utils/ping.js') const BaseCommand = require('../base-command.js') @@ -9,7 +9,7 @@ class Ping extends BaseCommand { static name = 'ping' async exec (args) { - const cleanRegistry = cleanUrl(this.npm.config.get('registry')) + const cleanRegistry = redact(this.npm.config.get('registry')) log.notice('PING', cleanRegistry) const start = Date.now() const details = await pingUtil({ ...this.npm.flatOptions }) diff --git a/deps/npm/lib/commands/publish.js b/deps/npm/lib/commands/publish.js index 0456fd7e8320e6..cf6b50cce3c21c 100644 --- a/deps/npm/lib/commands/publish.js +++ b/deps/npm/lib/commands/publish.js @@ -220,7 +220,12 @@ class Publish extends BaseCommand { }) } if (manifest.publishConfig) { - flatten(manifest.publishConfig, opts) + const cliFlags = this.npm.config.data.get('cli').raw + // Filter out properties set in CLI flags to prioritize them over + // corresponding `publishConfig` settings + const filteredPublishConfig = Object.fromEntries( + Object.entries(manifest.publishConfig).filter(([key]) => !(key in cliFlags))) + flatten(filteredPublishConfig, opts) } return manifest } diff --git a/deps/npm/lib/commands/unpublish.js b/deps/npm/lib/commands/unpublish.js index a9c20900534c3a..a4d445a035b622 100644 --- a/deps/npm/lib/commands/unpublish.js +++ b/deps/npm/lib/commands/unpublish.js @@ -141,7 +141,12 @@ class Unpublish extends BaseCommand { // If localPrefix has a package.json with a name that matches the package // being unpublished, load up the publishConfig if (manifest?.name === spec.name && manifest.publishConfig) { - flatten(manifest.publishConfig, opts) + const cliFlags = this.npm.config.data.get('cli').raw + // Filter out properties set in CLI flags to prioritize them over + // corresponding `publishConfig` settings + const filteredPublishConfig = Object.fromEntries( + Object.entries(manifest.publishConfig).filter(([key]) => !(key in cliFlags))) + flatten(filteredPublishConfig, opts) } const versions = await Unpublish.getKeysOfVersions(spec.name, opts) diff --git a/deps/npm/lib/utils/error-message.js b/deps/npm/lib/utils/error-message.js index fc7be8301662e1..348bb63e2d5abd 100644 --- a/deps/npm/lib/utils/error-message.js +++ b/deps/npm/lib/utils/error-message.js @@ -1,6 +1,5 @@ const { format } = require('util') const { resolve } = require('path') -const nameValidator = require('validate-npm-package-name') const { redactLog: replaceInfo } = require('@npmcli/redact') const { report } = require('./explain-eresolve.js') const log = require('./log-shim') @@ -215,6 +214,7 @@ const errorMessage = (er, npm) => { detail.push(['404', '']) detail.push(['404', '', `'${replaceInfo(er.pkgid)}' is not in this registry.`]) + const nameValidator = require('validate-npm-package-name') const valResult = nameValidator(pkg) if (!valResult.validForNewPackages) { diff --git a/deps/npm/lib/utils/log-file.js b/deps/npm/lib/utils/log-file.js index 8c06f5647e761e..1a46b7da0d6604 100644 --- a/deps/npm/lib/utils/log-file.js +++ b/deps/npm/lib/utils/log-file.js @@ -1,7 +1,6 @@ const os = require('os') const { join, dirname, basename } = require('path') const { format } = require('util') -const { glob } = require('glob') const { Minipass } = require('minipass') const fsMiniPass = require('fs-minipass') const fs = require('fs/promises') @@ -9,7 +8,6 @@ const log = require('./log-shim') const Display = require('./display') const padZero = (n, length) => n.toString().padStart(length.toString().length, '0') -const globify = pattern => pattern.split('\\').join('/') class LogFiles { // Default to a plain minipass stream so we can buffer @@ -199,17 +197,41 @@ class LogFiles { try { const logPath = this.#getLogFilePath() - const logGlob = join(dirname(logPath), basename(logPath) + const patternFileName = basename(logPath) // tell glob to only match digits - .replace(/\d/g, '[0123456789]') + .replace(/\d/g, 'd') // Handle the old (prior to 8.2.0) log file names which did not have a // counter suffix - .replace(/-\.log$/, '*.log') - ) + .replace('-.log', '') + + let files = await fs.readdir( + dirname(logPath), { + withFileTypes: true, + encoding: 'utf-8', + }) + files = files.sort((a, b) => basename(a.name).localeCompare(basename(b.name), 'en')) + + const logFiles = [] + + for (const file of files) { + if (!file.isFile()) { + continue + } + + const genericFileName = file.name.replace(/\d/g, 'd') + const filePath = join(dirname(logPath), basename(file.name)) + + // Always ignore the currently written files + if ( + genericFileName.includes(patternFileName) + && genericFileName.endsWith('.log') + && !this.#files.includes(filePath) + ) { + logFiles.push(filePath) + } + } - // Always ignore the currently written files - const files = await glob(globify(logGlob), { ignore: this.#files.map(globify), silent: true }) - const toDelete = files.length - this.#logsMax + const toDelete = logFiles.length - this.#logsMax if (toDelete <= 0) { return @@ -217,7 +239,7 @@ class LogFiles { log.silly('logfile', `start cleaning logs, removing ${toDelete} files`) - for (const file of files.slice(0, toDelete)) { + for (const file of logFiles.slice(0, toDelete)) { try { await fs.rm(file, { force: true }) } catch (e) { diff --git a/deps/npm/lib/utils/update-notifier.js b/deps/npm/lib/utils/update-notifier.js index 7c9611e4773f97..7481b65d562217 100644 --- a/deps/npm/lib/utils/update-notifier.js +++ b/deps/npm/lib/utils/update-notifier.js @@ -3,7 +3,9 @@ // Check daily for betas, and weekly otherwise. const ciInfo = require('ci-info') -const semver = require('semver') +const gt = require('semver/functions/gt') +const gte = require('semver/functions/gte') +const parse = require('semver/functions/parse') const { stat, writeFile } = require('fs/promises') const { resolve } = require('path') @@ -38,12 +40,12 @@ const updateCheck = async (npm, spec, version, current) => { // and should get the updates from that release train. // Note that this isn't another http request over the network, because // the packument will be cached by pacote from previous request. - if (semver.gt(version, latest) && spec === 'latest') { + if (gt(version, latest) && spec === 'latest') { return updateNotifier(npm, `^${version}`) } // if we already have something >= the desired spec, then we're done - if (semver.gte(version, latest)) { + if (gte(version, latest)) { return null } @@ -53,7 +55,7 @@ const updateCheck = async (npm, spec, version, current) => { // ok! notify the user about this update they should get. // The message is saved for printing at process exit so it will not get // lost in any other messages being printed as part of the command. - const update = semver.parse(mani.version) + const update = parse(mani.version) const type = update.major !== current.major ? 'major' : update.minor !== current.minor ? 'minor' : update.patch !== current.patch ? 'patch' @@ -79,7 +81,7 @@ const updateNotifier = async (npm, spec = 'latest') => { // if we're on a prerelease train, then updates are coming fast // check for a new one daily. otherwise, weekly. const { version } = npm - const current = semver.parse(version) + const current = parse(version) // if we're on a beta train, always get the next beta if (current.prerelease.length) { diff --git a/deps/npm/man/man1/npm-audit.1 b/deps/npm/man/man1/npm-audit.1 index e8d71147400c45..5832cb09ff9980 100644 --- a/deps/npm/man/man1/npm-audit.1 +++ b/deps/npm/man/man1/npm-audit.1 @@ -80,7 +80,7 @@ Public signing keys are provided at \fBregistry-host.tld/-/npm/v1/keys\fR in the Keys response: .RS 0 .IP \(bu 4 -\fBexpires\fR: null or a simplified extended \fBISO 8601 format\fR \fI\(lahttps://en.wikipedia.org/wiki/ISO_8601"\(ra\fR: \fBYYYY-MM-DDTHH:mm:ss.sssZ\fR +\fBexpires\fR: null or a simplified extended \fBISO 8601 format\fR \fI\(lahttps://en.wikipedia.org/wiki/ISO_8601\(ra\fR: \fBYYYY-MM-DDTHH:mm:ss.sssZ\fR .IP \(bu 4 \fBkeydid\fR: sha256 fingerprint of the public key .IP \(bu 4 diff --git a/deps/npm/man/man1/npm-ls.1 b/deps/npm/man/man1/npm-ls.1 index a9f7e4af68b87b..9c8a25d0c9237a 100644 --- a/deps/npm/man/man1/npm-ls.1 +++ b/deps/npm/man/man1/npm-ls.1 @@ -20,7 +20,7 @@ Positional arguments are \fBname@version-range\fR identifiers, which will limit .P .RS 2 .nf -npm@10.5.1 /path/to/npm +npm@10.5.2 /path/to/npm └─┬ init-package-json@0.0.4 └── promzard@0.1.5 .fi diff --git a/deps/npm/man/man1/npm.1 b/deps/npm/man/man1/npm.1 index 6ac1a88baf121d..0df8bb7e2a1962 100644 --- a/deps/npm/man/man1/npm.1 +++ b/deps/npm/man/man1/npm.1 @@ -12,7 +12,7 @@ npm Note: This command is unaware of workspaces. .SS "Version" .P -10.5.1 +10.5.2 .SS "Description" .P npm is the package manager for the Node JavaScript platform. It puts modules in place so that node can find them, and manages dependency conflicts intelligently. diff --git a/deps/npm/node_modules/@npmcli/agent/lib/agents.js b/deps/npm/node_modules/@npmcli/agent/lib/agents.js index ffd299f3d2ba69..c541b93001517e 100644 --- a/deps/npm/node_modules/@npmcli/agent/lib/agents.js +++ b/deps/npm/node_modules/@npmcli/agent/lib/agents.js @@ -65,7 +65,10 @@ module.exports = class Agent extends AgentBase { ProxyAgent = this.isSecureEndpoint(options) ? ProxyAgent[1] : ProxyAgent[0] } - const proxyAgent = new ProxyAgent(proxy, this.#options) + const proxyAgent = new ProxyAgent(proxy, { + ...this.#options, + socketOptions: { family: this.#options.family }, + }) proxyCache.set(cacheKey, proxyAgent) return proxyAgent diff --git a/deps/npm/node_modules/@npmcli/agent/package.json b/deps/npm/node_modules/@npmcli/agent/package.json index ce240b283a42c9..ef5b4e3228cc46 100644 --- a/deps/npm/node_modules/@npmcli/agent/package.json +++ b/deps/npm/node_modules/@npmcli/agent/package.json @@ -1,6 +1,6 @@ { "name": "@npmcli/agent", - "version": "2.2.1", + "version": "2.2.2", "description": "the http/https agent used by the npm cli", "main": "lib/index.js", "scripts": { @@ -36,7 +36,7 @@ "http-proxy-agent": "^7.0.0", "https-proxy-agent": "^7.0.1", "lru-cache": "^10.0.1", - "socks-proxy-agent": "^8.0.1" + "socks-proxy-agent": "^8.0.3" }, "devDependencies": { "@npmcli/eslint-config": "^4.0.0", diff --git a/deps/npm/node_modules/@npmcli/arborist/lib/arborist/build-ideal-tree.js b/deps/npm/node_modules/@npmcli/arborist/lib/arborist/build-ideal-tree.js index 8ceb6b72123f68..75e4d373259a09 100644 --- a/deps/npm/node_modules/@npmcli/arborist/lib/arborist/build-ideal-tree.js +++ b/deps/npm/node_modules/@npmcli/arborist/lib/arborist/build-ideal-tree.js @@ -12,7 +12,7 @@ const { readdirScoped } = require('@npmcli/fs') const { lstat, readlink } = require('fs/promises') const { depth } = require('treeverse') const log = require('proc-log') -const { cleanUrl } = require('npm-registry-fetch') +const { redact } = require('@npmcli/redact') const { OK, @@ -1213,7 +1213,7 @@ This is a one-time fix-up, please be patient... if (this.#manifests.has(spec.raw)) { return this.#manifests.get(spec.raw) } else { - const cleanRawSpec = cleanUrl(spec.rawSpec) + const cleanRawSpec = redact(spec.rawSpec) log.silly('fetch manifest', spec.raw.replace(spec.rawSpec, cleanRawSpec)) const o = { ...options, diff --git a/deps/npm/node_modules/@npmcli/arborist/lib/place-dep.js b/deps/npm/node_modules/@npmcli/arborist/lib/place-dep.js index e757d0c38a6d70..bf0fef6525343a 100644 --- a/deps/npm/node_modules/@npmcli/arborist/lib/place-dep.js +++ b/deps/npm/node_modules/@npmcli/arborist/lib/place-dep.js @@ -9,7 +9,7 @@ const localeCompare = require('@isaacs/string-locale-compare')('en') const log = require('proc-log') -const { cleanUrl } = require('npm-registry-fetch') +const { redact } = require('@npmcli/redact') const deepestNestingTarget = require('./deepest-nesting-target.js') const CanPlaceDep = require('./can-place-dep.js') const { @@ -188,7 +188,7 @@ class PlaceDep { `${this.dep.name}@${this.dep.version}`, this.canPlace.description, `for: ${this.edge.from.package._id || this.edge.from.location}`, - `want: ${cleanUrl(this.edge.spec || '*')}` + `want: ${redact(this.edge.spec || '*')}` ) const placementType = this.canPlace.canPlace === CONFLICT diff --git a/deps/npm/node_modules/@npmcli/arborist/package.json b/deps/npm/node_modules/@npmcli/arborist/package.json index d7c393d99dfa57..3a92e669d4bb68 100644 --- a/deps/npm/node_modules/@npmcli/arborist/package.json +++ b/deps/npm/node_modules/@npmcli/arborist/package.json @@ -1,6 +1,6 @@ { "name": "@npmcli/arborist", - "version": "7.4.1", + "version": "7.4.2", "description": "Manage node_modules trees", "dependencies": { "@isaacs/string-locale-compare": "^1.1.0", @@ -12,6 +12,7 @@ "@npmcli/node-gyp": "^3.0.0", "@npmcli/package-json": "^5.0.0", "@npmcli/query": "^3.1.0", + "@npmcli/redact": "^1.1.0", "@npmcli/run-script": "^7.0.2", "bin-links": "^4.0.1", "cacache": "^18.0.0", diff --git a/deps/npm/node_modules/@npmcli/config/lib/definitions/definitions.js b/deps/npm/node_modules/@npmcli/config/lib/definitions/definitions.js index 09b0eceeea6b21..3565cdb4feb44b 100644 --- a/deps/npm/node_modules/@npmcli/config/lib/definitions/definitions.js +++ b/deps/npm/node_modules/@npmcli/config/lib/definitions/definitions.js @@ -1,11 +1,8 @@ -const definitions = {} -module.exports = definitions - const Definition = require('./definition.js') const ciInfo = require('ci-info') -const querystring = require('querystring') -const { join } = require('path') +const querystring = require('node:querystring') +const { join } = require('node:path') const isWindows = process.platform === 'win32' @@ -91,20 +88,12 @@ const cache = `${cacheRoot}/${cacheExtra}` // weird to pull out of the config module. // TODO: use better type definition/validation API, nopt's is so weird. const { - semver: { type: semver }, + semver: { type: Semver }, Umask: { type: Umask }, url: { type: url }, path: { type: path }, } = require('../type-defs.js') -const define = (key, def) => { - /* istanbul ignore if - this should never happen, prevents mistakes below */ - if (definitions[key]) { - throw new Error(`defining key more than once: ${key}`) - } - definitions[key] = new Definition(key, def) -} - // basic flattening function, just copy it over camelCase const flatten = (key, obj, flatOptions) => { const camel = key.replace(/-([a-z])/g, (_0, _1) => _1.toUpperCase()) @@ -135,12 +124,20 @@ const flatten = (key, obj, flatOptions) => { // while fixing some Arborist bug, we won't have to hunt through too // many places. -// Define all config keys we know about +// XXX: We should really deprecate all these `--save-blah` switches +// in favor of a single `--save-type` option. The unfortunate shortcut +// we took for `--save-peer --save-optional` being `--save-type=peerOptional` +// makes this tricky, and likely a breaking change. + +// Define all config keys we know about. They are indexed by their own key for +// ease of lookup later. This duplication is an optimization so that we don't +// have to do an extra function call just to "reuse" the key in both places. -define('_auth', { - default: null, - type: [null, String], - description: ` +const definitions = { + _auth: new Definition('_auth', { + default: null, + type: [null, String], + description: ` A basic-auth string to use when authenticating against the npm registry. This will ONLY be used to authenticate against the npm registry. For other registries you will need to scope it like "//other-registry.tld/:_auth" @@ -149,16 +146,15 @@ define('_auth', { is safer to use a registry-provided authentication bearer token stored in the ~/.npmrc file by running \`npm login\`. `, - flatten, -}) - -define('access', { - default: null, - defaultDescription: ` + flatten, + }), + access: new Definition('access', { + default: null, + defaultDescription: ` 'public' for new packages, existing packages it will not change the current level `, - type: [null, 'restricted', 'public'], - description: ` + type: [null, 'restricted', 'public'], + description: ` If you do not want your scoped package to be publicly viewable (and installable) set \`--access=restricted\`. @@ -169,131 +165,121 @@ define('access', { publish will change the access for an existing package the same way that \`npm access set status\` would. `, - flatten, -}) - -define('all', { - default: false, - type: Boolean, - short: 'a', - description: ` + flatten, + }), + all: new Definition('all', { + default: false, + type: Boolean, + short: 'a', + description: ` When running \`npm outdated\` and \`npm ls\`, setting \`--all\` will show all outdated or installed packages, rather than only those directly depended upon by the current project. `, - flatten, -}) - -define('allow-same-version', { - default: false, - type: Boolean, - description: ` + flatten, + }), + 'allow-same-version': new Definition('allow-same-version', { + default: false, + type: Boolean, + description: ` Prevents throwing an error when \`npm version\` is used to set the new version to the same value as the current version. `, - flatten, -}) - -define('also', { - default: null, - type: [null, 'dev', 'development'], - description: ` - When set to \`dev\` or \`development\`, this is an alias for - \`--include=dev\`. - `, - deprecated: 'Please use --include=dev instead.', - flatten (key, obj, flatOptions) { - definitions.omit.flatten('omit', obj, flatOptions) - }, -}) - -define('audit', { - default: true, - type: Boolean, - description: ` - When "true" submit audit reports alongside the current npm command to the - default registry and all registries configured for scopes. See the - documentation for [\`npm audit\`](/commands/npm-audit) for details on what - is submitted. - `, - flatten, -}) - -define('audit-level', { - default: null, - type: [null, 'info', 'low', 'moderate', 'high', 'critical', 'none'], - description: ` + flatten, + }), + also: new Definition('also', { + default: null, + type: [null, 'dev', 'development'], + description: ` + When set to \`dev\` or \`development\`, this is an alias for + \`--include=dev\`. + `, + deprecated: 'Please use --include=dev instead.', + flatten (key, obj, flatOptions) { + definitions.omit.flatten('omit', obj, flatOptions) + }, + }), + audit: new Definition('audit', { + default: true, + type: Boolean, + description: ` + When "true" submit audit reports alongside the current npm command to the + default registry and all registries configured for scopes. See the + documentation for [\`npm audit\`](/commands/npm-audit) for details on what + is submitted. + `, + flatten, + }), + 'audit-level': new Definition('audit-level', { + default: null, + type: [null, 'info', 'low', 'moderate', 'high', 'critical', 'none'], + description: ` The minimum level of vulnerability for \`npm audit\` to exit with a non-zero exit code. - `, - flatten, -}) - -define('auth-type', { - default: 'web', - type: ['legacy', 'web'], - description: ` - What authentication strategy to use with \`login\`. - Note that if an \`otp\` config is given, this value will always be set to \`legacy\`. - `, - flatten, -}) - -define('before', { - default: null, - type: [null, Date], - description: ` - If passed to \`npm install\`, will rebuild the npm tree such that only - versions that were available **on or before** the \`--before\` time get - installed. If there's no versions available for the current set of - direct dependencies, the command will error. - - If the requested version is a \`dist-tag\` and the given tag does not - pass the \`--before\` filter, the most recent version less than or equal - to that tag will be used. For example, \`foo@latest\` might install - \`foo@1.2\` even though \`latest\` is \`2.0\`. - `, - flatten, -}) - -define('bin-links', { - default: true, - type: Boolean, - description: ` - Tells npm to create symlinks (or \`.cmd\` shims on Windows) for package - executables. - - Set to false to have it not do this. This can be used to work around the - fact that some file systems don't support symlinks, even on ostensibly - Unix systems. - `, - flatten, -}) - -define('browser', { - default: null, - defaultDescription: ` + `, + flatten, + }), + 'auth-type': new Definition('auth-type', { + default: 'web', + type: ['legacy', 'web'], + description: ` + What authentication strategy to use with \`login\`. + Note that if an \`otp\` config is given, this value will always be set to \`legacy\`. + `, + flatten, + }), + before: new Definition('before', { + default: null, + type: [null, Date], + description: ` + If passed to \`npm install\`, will rebuild the npm tree such that only + versions that were available **on or before** the \`--before\` time get + installed. If there's no versions available for the current set of + direct dependencies, the command will error. + + If the requested version is a \`dist-tag\` and the given tag does not + pass the \`--before\` filter, the most recent version less than or equal + to that tag will be used. For example, \`foo@latest\` might install + \`foo@1.2\` even though \`latest\` is \`2.0\`. + `, + flatten, + }), + 'bin-links': new Definition('bin-links', { + default: true, + type: Boolean, + description: ` + Tells npm to create symlinks (or \`.cmd\` shims on Windows) for package + executables. + + Set to false to have it not do this. This can be used to work around the + fact that some file systems don't support symlinks, even on ostensibly + Unix systems. + `, + flatten, + }), + browser: new Definition('browser', { + default: null, + defaultDescription: ` OS X: \`"open"\`, Windows: \`"start"\`, Others: \`"xdg-open"\` - `, - type: [null, Boolean, String], - description: ` + `, + type: [null, Boolean, String], + description: ` The browser that is called by npm commands to open websites. Set to \`false\` to suppress browser behavior and instead print urls to terminal. Set to \`true\` to use default system URL opener. - `, - flatten, -}) - -define('ca', { - default: null, - type: [null, String, Array], - description: ` + `, + flatten, + }), + ca: new Definition('ca', { + default: null, + type: [null, String, Array], + description: ` The Certificate Authority signing certificate that is trusted for SSL connections to the registry. Values should be in PEM format (Windows - calls it "Base-64 encoded X.509 (.CER)") with newlines replaced by the + calls it "Base-64 encoded X.509 (.CER)") with newlines replaced by the string "\\n". For example: \`\`\`ini @@ -312,2124 +298,1979 @@ define('ca', { See also the \`strict-ssl\` config. `, - flatten, -}) - -define('cache', { - default: cache, - defaultDescription: ` - Windows: \`%LocalAppData%\\npm-cache\`, Posix: \`~/.npm\` - `, - type: path, - description: ` - The location of npm's cache directory. - `, - flatten (key, obj, flatOptions) { - flatOptions.cache = join(obj.cache, '_cacache') - flatOptions.npxCache = join(obj.cache, '_npx') - flatOptions.tufCache = join(obj.cache, '_tuf') - }, -}) - -define('cache-max', { - default: Infinity, - type: Number, - description: ` - \`--cache-max=0\` is an alias for \`--prefer-online\` - `, - deprecated: ` - This option has been deprecated in favor of \`--prefer-online\` - `, - flatten (key, obj, flatOptions) { - if (obj[key] <= 0) { - flatOptions.preferOnline = true - } - }, -}) - -define('cache-min', { - default: 0, - type: Number, - description: ` - \`--cache-min=9999 (or bigger)\` is an alias for \`--prefer-offline\`. - `, - deprecated: ` - This option has been deprecated in favor of \`--prefer-offline\`. - `, - flatten (key, obj, flatOptions) { - if (obj[key] >= 9999) { - flatOptions.preferOffline = true - } - }, -}) - -define('cafile', { - default: null, - type: path, - description: ` - A path to a file containing one or multiple Certificate Authority signing - certificates. Similar to the \`ca\` setting, but allows for multiple - CA's, as well as for the CA information to be stored in a file on disk. - `, - flatten (key, obj, flatOptions) { - // always set to null in defaults - if (!obj.cafile) { - return - } - - const raw = maybeReadFile(obj.cafile) - if (!raw) { - return - } - - const delim = '-----END CERTIFICATE-----' - flatOptions.ca = raw.replace(/\r\n/g, '\n').split(delim) - .filter(section => section.trim()) - .map(section => section.trimLeft() + delim) - }, -}) - -define('call', { - default: '', - type: String, - short: 'c', - description: ` - Optional companion option for \`npm exec\`, \`npx\` that allows for - specifying a custom command to be run along with the installed packages. - - \`\`\`bash - npm exec --package yo --package generator-node --call "yo node" - \`\`\` - `, - flatten, -}) - -define('cert', { - default: null, - type: [null, String], - description: ` - A client certificate to pass when accessing the registry. Values should - be in PEM format (Windows calls it "Base-64 encoded X.509 (.CER)") with - newlines replaced by the string "\\n". For example: - - \`\`\`ini - cert="-----BEGIN CERTIFICATE-----\\nXXXX\\nXXXX\\n-----END CERTIFICATE-----" - \`\`\` - - It is _not_ the path to a certificate file, though you can set a registry-scoped - "certfile" path like "//other-registry.tld/:certfile=/path/to/cert.pem". - `, - deprecated: ` - \`key\` and \`cert\` are no longer used for most registry operations. - Use registry scoped \`keyfile\` and \`certfile\` instead. - Example: - //other-registry.tld/:keyfile=/path/to/key.pem - //other-registry.tld/:certfile=/path/to/cert.crt - `, - flatten, -}) - -define('cidr', { - default: null, - type: [null, String, Array], - description: ` - This is a list of CIDR address to be used when configuring limited access - tokens with the \`npm token create\` command. - `, - flatten, -}) - -// This should never be directly used, the flattened value is the derived value -// and is sent to other modules, and is also exposed as `npm.color` for use -// inside npm itself. -define('color', { - default: !process.env.NO_COLOR || process.env.NO_COLOR === '0', - usage: '--color|--no-color|--color always', - defaultDescription: ` - true unless the NO_COLOR environ is set to something other than '0' - `, - type: ['always', Boolean], - description: ` - If false, never shows colors. If \`"always"\` then always shows colors. - If true, then only prints color codes for tty file descriptors. - `, - flatten (key, obj, flatOptions) { - flatOptions.color = !obj.color ? false - : obj.color === 'always' ? true - : !!process.stdout.isTTY - flatOptions.logColor = !obj.color ? false - : obj.color === 'always' ? true - : !!process.stderr.isTTY - }, -}) - -define('commit-hooks', { - default: true, - type: Boolean, - description: ` - Run git commit hooks when using the \`npm version\` command. - `, - flatten, -}) - -define('cpu', { - default: null, - type: [null, String], - description: ` - Override CPU architecture of native modules to install. - Acceptable values are same as \`cpu\` field of package.json, - which comes from \`process.arch\`. - `, - flatten, -}) - -define('os', { - default: null, - type: [null, String], - description: ` - Override OS of native modules to install. - Acceptable values are same as \`os\` field of package.json, - which comes from \`process.platform\`. - `, - flatten, -}) - -define('libc', { - default: null, - type: [null, String], - description: ` - Override libc of native modules to install. - Acceptable values are same as \`libc\` field of package.json - `, - flatten, -}) - -define('depth', { - default: null, - defaultDescription: ` - \`Infinity\` if \`--all\` is set, otherwise \`1\` - `, - type: [null, Number], - description: ` - The depth to go when recursing packages for \`npm ls\`. - - If not set, \`npm ls\` will show only the immediate dependencies of the - root project. If \`--all\` is set, then npm will show all dependencies - by default. - `, - flatten, -}) - -define('description', { - default: true, - type: Boolean, - usage: '--no-description', - description: ` - Show the description in \`npm search\` - `, - flatten (key, obj, flatOptions) { - flatOptions.search = flatOptions.search || { limit: 20 } - flatOptions.search[key] = obj[key] - }, -}) - -define('dev', { - default: false, - type: Boolean, - description: ` - Alias for \`--include=dev\`. - `, - deprecated: 'Please use --include=dev instead.', - flatten (key, obj, flatOptions) { - definitions.omit.flatten('omit', obj, flatOptions) - }, -}) - -define('diff', { - default: [], - hint: '', - type: [String, Array], - description: ` - Define arguments to compare in \`npm diff\`. - `, - flatten, -}) - -define('diff-ignore-all-space', { - default: false, - type: Boolean, - description: ` - Ignore whitespace when comparing lines in \`npm diff\`. - `, - flatten, -}) - -define('diff-name-only', { - default: false, - type: Boolean, - description: ` - Prints only filenames when using \`npm diff\`. - `, - flatten, -}) - -define('diff-no-prefix', { - default: false, - type: Boolean, - description: ` - Do not show any source or destination prefix in \`npm diff\` output. - - Note: this causes \`npm diff\` to ignore the \`--diff-src-prefix\` and - \`--diff-dst-prefix\` configs. - `, - flatten, -}) - -define('diff-dst-prefix', { - default: 'b/', - hint: '', - type: String, - description: ` - Destination prefix to be used in \`npm diff\` output. - `, - flatten, -}) - -define('diff-src-prefix', { - default: 'a/', - hint: '', - type: String, - description: ` - Source prefix to be used in \`npm diff\` output. - `, - flatten, -}) - -define('diff-text', { - default: false, - type: Boolean, - description: ` - Treat all files as text in \`npm diff\`. - `, - flatten, -}) - -define('diff-unified', { - default: 3, - type: Number, - description: ` - The number of lines of context to print in \`npm diff\`. - `, - flatten, -}) - -define('dry-run', { - default: false, - type: Boolean, - description: ` - Indicates that you don't want npm to make any changes and that it should - only report what it would have done. This can be passed into any of the - commands that modify your local installation, eg, \`install\`, - \`update\`, \`dedupe\`, \`uninstall\`, as well as \`pack\` and - \`publish\`. - - Note: This is NOT honored by other network related commands, eg - \`dist-tags\`, \`owner\`, etc. - `, - flatten, -}) - -define('editor', { - default: editor, - defaultDescription: ` - The EDITOR or VISUAL environment variables, or '%SYSTEMROOT%\\notepad.exe' on Windows, - or 'vi' on Unix systems - `, - type: String, - description: ` - The command to run for \`npm edit\` and \`npm config edit\`. - `, - flatten, -}) - -define('engine-strict', { - default: false, - type: Boolean, - description: ` - If set to true, then npm will stubbornly refuse to install (or even - consider installing) any package that claims to not be compatible with - the current Node.js version. - - This can be overridden by setting the \`--force\` flag. - `, - flatten, -}) - -define('expect-results', { - default: null, - type: [null, Boolean], - exclusive: ['expect-result-count'], - description: ` - Tells npm whether or not to expect results from the command. - Can be either true (expect some results) or false (expect no results). - `, -}) - -define('expect-result-count', { - default: null, - type: [null, Number], - hint: '', - exclusive: ['expect-results'], - description: ` - Tells to expect a specific number of results from the command. - `, -}) - -define('fetch-retries', { - default: 2, - type: Number, - description: ` - The "retries" config for the \`retry\` module to use when fetching - packages from the registry. - - npm will retry idempotent read requests to the registry in the case - of network failures or 5xx HTTP errors. - `, - flatten (key, obj, flatOptions) { - flatOptions.retry = flatOptions.retry || {} - flatOptions.retry.retries = obj[key] - }, -}) - -define('fetch-retry-factor', { - default: 10, - type: Number, - description: ` - The "factor" config for the \`retry\` module to use when fetching - packages. - `, - flatten (key, obj, flatOptions) { - flatOptions.retry = flatOptions.retry || {} - flatOptions.retry.factor = obj[key] - }, -}) - -define('fetch-retry-maxtimeout', { - default: 60000, - defaultDescription: '60000 (1 minute)', - type: Number, - description: ` - The "maxTimeout" config for the \`retry\` module to use when fetching - packages. - `, - flatten (key, obj, flatOptions) { - flatOptions.retry = flatOptions.retry || {} - flatOptions.retry.maxTimeout = obj[key] - }, -}) - -define('fetch-retry-mintimeout', { - default: 10000, - defaultDescription: '10000 (10 seconds)', - type: Number, - description: ` - The "minTimeout" config for the \`retry\` module to use when fetching - packages. - `, - flatten (key, obj, flatOptions) { - flatOptions.retry = flatOptions.retry || {} - flatOptions.retry.minTimeout = obj[key] - }, -}) - -define('fetch-timeout', { - default: 5 * 60 * 1000, - defaultDescription: `${5 * 60 * 1000} (5 minutes)`, - type: Number, - description: ` - The maximum amount of time to wait for HTTP requests to complete. - `, - flatten (key, obj, flatOptions) { - flatOptions.timeout = obj[key] - }, -}) - -define('force', { - default: false, - type: Boolean, - short: 'f', - description: ` - Removes various protections against unfortunate side effects, common - mistakes, unnecessary performance degradation, and malicious input. - - * Allow clobbering non-npm files in global installs. - * Allow the \`npm version\` command to work on an unclean git repository. - * Allow deleting the cache folder with \`npm cache clean\`. - * Allow installing packages that have an \`engines\` declaration - requiring a different version of npm. - * Allow installing packages that have an \`engines\` declaration - requiring a different version of \`node\`, even if \`--engine-strict\` - is enabled. - * Allow \`npm audit fix\` to install modules outside your stated - dependency range (including SemVer-major changes). - * Allow unpublishing all versions of a published package. - * Allow conflicting peerDependencies to be installed in the root project. - * Implicitly set \`--yes\` during \`npm init\`. - * Allow clobbering existing values in \`npm pkg\` - * Allow unpublishing of entire packages (not just a single version). - - If you don't have a clear idea of what you want to do, it is strongly - recommended that you do not use this option! - `, - flatten, -}) - -define('foreground-scripts', { - default: false, - defaultDescription: `\`false\` unless when using \`npm pack\` or \`npm publish\` where it - defaults to \`true\``, - type: Boolean, - description: ` - Run all build scripts (ie, \`preinstall\`, \`install\`, and - \`postinstall\`) scripts for installed packages in the foreground - process, sharing standard input, output, and error with the main npm - process. - - Note that this will generally make installs run slower, and be much - noisier, but can be useful for debugging. - `, - flatten, -}) - -define('format-package-lock', { - default: true, - type: Boolean, - description: ` - Format \`package-lock.json\` or \`npm-shrinkwrap.json\` as a human - readable file. - `, - flatten, -}) - -define('fund', { - default: true, - type: Boolean, - description: ` - When "true" displays the message at the end of each \`npm install\` - acknowledging the number of dependencies looking for funding. - See [\`npm fund\`](/commands/npm-fund) for details. - `, - flatten, -}) - -define('git', { - default: 'git', - type: String, - description: ` - The command to use for git commands. If git is installed on the - computer, but is not in the \`PATH\`, then set this to the full path to - the git binary. - `, - flatten, -}) - -define('git-tag-version', { - default: true, - type: Boolean, - description: ` - Tag the commit when using the \`npm version\` command. Setting this to - false results in no commit being made at all. - `, - flatten, -}) - -define('global', { - default: false, - type: Boolean, - short: 'g', - description: ` - Operates in "global" mode, so that packages are installed into the - \`prefix\` folder instead of the current working directory. See - [folders](/configuring-npm/folders) for more on the differences in - behavior. - - * packages are installed into the \`{prefix}/lib/node_modules\` folder, - instead of the current working directory. - * bin files are linked to \`{prefix}/bin\` - * man pages are linked to \`{prefix}/share/man\` - `, - flatten: (key, obj, flatOptions) => { - flatten(key, obj, flatOptions) - if (flatOptions.global) { - flatOptions.location = 'global' - } - }, -}) - -// the globalconfig has its default defined outside of this module -define('globalconfig', { - type: path, - default: '', - defaultDescription: ` - The global --prefix setting plus 'etc/npmrc'. For example, - '/usr/local/etc/npmrc' - `, - description: ` - The config file to read for global config options. - `, - flatten, -}) - -define('global-style', { - default: false, - type: Boolean, - description: ` - Only install direct dependencies in the top level \`node_modules\`, - but hoist on deeper dependencies. - Sets \`--install-strategy=shallow\`. - `, - deprecated: ` - This option has been deprecated in favor of \`--install-strategy=shallow\` - `, - flatten (key, obj, flatOptions) { - if (obj[key]) { - obj['install-strategy'] = 'shallow' - flatOptions.installStrategy = 'shallow' - } - }, -}) - -define('heading', { - default: 'npm', - type: String, - description: ` - The string that starts all the debugging log output. - `, - flatten, -}) - -define('https-proxy', { - default: null, - type: [null, url], - description: ` - A proxy to use for outgoing https requests. If the \`HTTPS_PROXY\` or - \`https_proxy\` or \`HTTP_PROXY\` or \`http_proxy\` environment variables - are set, proxy settings will be honored by the underlying - \`make-fetch-happen\` library. - `, - flatten, -}) - -define('if-present', { - default: false, - type: Boolean, - envExport: false, - description: ` - If true, npm will not exit with an error code when \`run-script\` is - invoked for a script that isn't defined in the \`scripts\` section of - \`package.json\`. This option can be used when it's desirable to - optionally run a script when it's present and fail if the script fails. - This is useful, for example, when running scripts that may only apply for - some builds in an otherwise generic CI setup. - `, - flatten, -}) - -define('ignore-scripts', { - default: false, - type: Boolean, - description: ` - If true, npm does not run scripts specified in package.json files. - - Note that commands explicitly intended to run a particular script, such - as \`npm start\`, \`npm stop\`, \`npm restart\`, \`npm test\`, and \`npm - run-script\` will still run their intended script if \`ignore-scripts\` is - set, but they will *not* run any pre- or post-scripts. - `, - flatten, -}) - -define('include', { - default: [], - type: [Array, 'prod', 'dev', 'optional', 'peer'], - description: ` - Option that allows for defining which types of dependencies to install. - - This is the inverse of \`--omit=\`. - - Dependency types specified in \`--include\` will not be omitted, - regardless of the order in which omit/include are specified on the - command-line. - `, - flatten (key, obj, flatOptions) { - // just call the omit flattener, it reads from obj.include - definitions.omit.flatten('omit', obj, flatOptions) - }, -}) - -define('include-staged', { - default: false, - type: Boolean, - description: ` - Allow installing "staged" published packages, as defined by [npm RFC PR - #92](https://github.com/npm/rfcs/pull/92). - - This is experimental, and not implemented by the npm public registry. - `, - flatten, -}) - -define('include-workspace-root', { - default: false, - type: Boolean, - envExport: false, - description: ` - Include the workspace root when workspaces are enabled for a command. - - When false, specifying individual workspaces via the \`workspace\` config, - or all workspaces via the \`workspaces\` flag, will cause npm to operate only - on the specified workspaces, and not on the root project. - `, - flatten, -}) - -define('init-author-email', { - default: '', - hint: '', - type: String, - description: ` - The value \`npm init\` should use by default for the package author's - email. - `, -}) - -define('init-author-name', { - default: '', - hint: '', - type: String, - description: ` - The value \`npm init\` should use by default for the package author's name. - `, -}) - -define('init-author-url', { - default: '', - type: ['', url], - hint: '', - description: ` - The value \`npm init\` should use by default for the package author's homepage. - `, -}) - -define('init-license', { - default: 'ISC', - hint: '', - type: String, - description: ` - The value \`npm init\` should use by default for the package license. - `, -}) - -define('init-module', { - default: '~/.npm-init.js', - type: path, - hint: '', - description: ` - A module that will be loaded by the \`npm init\` command. See the - documentation for the - [init-package-json](https://github.com/npm/init-package-json) module for - more information, or [npm init](/commands/npm-init). - `, -}) - -define('init-version', { - default: '1.0.0', - type: semver, - hint: '', - description: ` - The value that \`npm init\` should use by default for the package - version number, if not already set in package.json. - `, -}) - -// these "aliases" are historically supported in .npmrc files, unfortunately -// They should be removed in a future npm version. -define('init.author.email', { - default: '', - type: String, - deprecated: ` - Use \`--init-author-email\` instead.`, - description: ` - Alias for \`--init-author-email\` - `, -}) - -define('init.author.name', { - default: '', - type: String, - deprecated: ` - Use \`--init-author-name\` instead. - `, - description: ` - Alias for \`--init-author-name\` - `, -}) - -define('init.author.url', { - default: '', - type: ['', url], - deprecated: ` - Use \`--init-author-url\` instead. - `, - description: ` - Alias for \`--init-author-url\` - `, -}) - -define('init.license', { - default: 'ISC', - type: String, - deprecated: ` - Use \`--init-license\` instead. - `, - description: ` - Alias for \`--init-license\` - `, -}) - -define('init.module', { - default: '~/.npm-init.js', - type: path, - deprecated: ` - Use \`--init-module\` instead. - `, - description: ` - Alias for \`--init-module\` - `, -}) - -define('init.version', { - default: '1.0.0', - type: semver, - deprecated: ` - Use \`--init-version\` instead. - `, - description: ` - Alias for \`--init-version\` - `, -}) - -define('install-links', { - default: false, - type: Boolean, - description: ` - When set file: protocol dependencies will be packed and installed as - regular dependencies instead of creating a symlink. This option has - no effect on workspaces. - `, - flatten, -}) - -define('install-strategy', { - default: 'hoisted', - type: ['hoisted', 'nested', 'shallow', 'linked'], - description: ` - Sets the strategy for installing packages in node_modules. - hoisted (default): Install non-duplicated in top-level, and duplicated as - necessary within directory structure. - nested: (formerly --legacy-bundling) install in place, no hoisting. - shallow (formerly --global-style) only install direct deps at top-level. - linked: (experimental) install in node_modules/.store, link in place, - unhoisted. - `, - flatten, -}) - -define('json', { - default: false, - type: Boolean, - description: ` - Whether or not to output JSON data, rather than the normal output. - - * In \`npm pkg set\` it enables parsing set values with JSON.parse() - before saving them to your \`package.json\`. - - Not supported by all npm commands. - `, - flatten, -}) - -define('key', { - default: null, - type: [null, String], - description: ` - A client key to pass when accessing the registry. Values should be in - PEM format with newlines replaced by the string "\\n". For example: - - \`\`\`ini - key="-----BEGIN PRIVATE KEY-----\\nXXXX\\nXXXX\\n-----END PRIVATE KEY-----" - \`\`\` - - It is _not_ the path to a key file, though you can set a registry-scoped - "keyfile" path like "//other-registry.tld/:keyfile=/path/to/key.pem". - `, - deprecated: ` - \`key\` and \`cert\` are no longer used for most registry operations. - Use registry scoped \`keyfile\` and \`certfile\` instead. - Example: - //other-registry.tld/:keyfile=/path/to/key.pem - //other-registry.tld/:certfile=/path/to/cert.crt - `, - flatten, -}) - -define('legacy-bundling', { - default: false, - type: Boolean, - description: ` - Instead of hoisting package installs in \`node_modules\`, install packages - in the same manner that they are depended on. This may cause very deep - directory structures and duplicate package installs as there is no - de-duplicating. - Sets \`--install-strategy=nested\`. - `, - deprecated: ` - This option has been deprecated in favor of \`--install-strategy=nested\` - `, - flatten (key, obj, flatOptions) { - if (obj[key]) { - obj['install-strategy'] = 'nested' - flatOptions.installStrategy = 'nested' - } - }, -}) - -define('legacy-peer-deps', { - default: false, - type: Boolean, - description: ` - Causes npm to completely ignore \`peerDependencies\` when building a - package tree, as in npm versions 3 through 6. - - If a package cannot be installed because of overly strict - \`peerDependencies\` that collide, it provides a way to move forward - resolving the situation. - - This differs from \`--omit=peer\`, in that \`--omit=peer\` will avoid - unpacking \`peerDependencies\` on disk, but will still design a tree such - that \`peerDependencies\` _could_ be unpacked in a correct place. - - Use of \`legacy-peer-deps\` is not recommended, as it will not enforce - the \`peerDependencies\` contract that meta-dependencies may rely on. - `, - flatten, -}) - -define('link', { - default: false, - type: Boolean, - description: ` - Used with \`npm ls\`, limiting output to only those packages that are - linked. - `, -}) - -define('local-address', { - default: null, - type: getLocalAddresses(), - typeDescription: 'IP Address', - description: ` - The IP address of the local interface to use when making connections to - the npm registry. Must be IPv4 in versions of Node prior to 0.12. - `, - flatten, -}) - -define('sbom-format', { - default: null, - type: [ - 'cyclonedx', - 'spdx', - ], - description: ` - SBOM format to use when generating SBOMs. - `, - flatten, -}) - -define('sbom-type', { - default: 'library', - type: [ - 'library', - 'application', - 'framework', - ], - description: ` - The type of package described by the generated SBOM. For SPDX, this is the - value for the \`primaryPackagePurpose\` field. For CycloneDX, this is the - value for the \`type\` field. - `, - flatten, -}) - -define('location', { - default: 'user', - short: 'L', - type: [ - 'global', - 'user', - 'project', - ], - defaultDescription: ` - "user" unless \`--global\` is passed, which will also set this value to "global" - `, - description: ` - When passed to \`npm config\` this refers to which config file to use. - - When set to "global" mode, packages are installed into the \`prefix\` folder - instead of the current working directory. See - [folders](/configuring-npm/folders) for more on the differences in behavior. - - * packages are installed into the \`{prefix}/lib/node_modules\` folder, - instead of the current working directory. - * bin files are linked to \`{prefix}/bin\` - * man pages are linked to \`{prefix}/share/man\` - `, - flatten: (key, obj, flatOptions) => { - flatten(key, obj, flatOptions) - if (flatOptions.global) { - flatOptions.location = 'global' - } - if (obj.location === 'global') { - flatOptions.global = true - } - }, -}) - -define('lockfile-version', { - default: null, - type: [null, 1, 2, 3, '1', '2', '3'], - defaultDescription: ` - Version 3 if no lockfile, auto-converting v1 lockfiles to v3, otherwise - maintain current lockfile version.`, - description: ` - Set the lockfile format version to be used in package-lock.json and - npm-shrinkwrap-json files. Possible options are: - - 1: The lockfile version used by npm versions 5 and 6. Lacks some data that - is used during the install, resulting in slower and possibly less - deterministic installs. Prevents lockfile churn when interoperating with - older npm versions. - - 2: The default lockfile version used by npm version 7 and 8. Includes both - the version 1 lockfile data and version 3 lockfile data, for maximum - determinism and interoperability, at the expense of more bytes on disk. - - 3: Only the new lockfile information introduced in npm version 7. Smaller - on disk than lockfile version 2, but not interoperable with older npm - versions. Ideal if all users are on npm version 7 and higher. - `, - flatten: (key, obj, flatOptions) => { - flatOptions.lockfileVersion = obj[key] && parseInt(obj[key], 10) - }, -}) - -define('loglevel', { - default: 'notice', - type: [ - 'silent', - 'error', - 'warn', - 'notice', - 'http', - 'info', - 'verbose', - 'silly', - ], - description: ` - What level of logs to report. All logs are written to a debug log, - with the path to that file printed if the execution of a command fails. - - Any logs of a higher level than the setting are shown. The default is - "notice". - - See also the \`foreground-scripts\` config. - `, - flatten (key, obj, flatOptions) { - flatOptions.silent = obj[key] === 'silent' - }, -}) - -define('logs-dir', { - default: null, - type: [null, path], - defaultDescription: ` - A directory named \`_logs\` inside the cache -`, - description: ` - The location of npm's log directory. See [\`npm - logging\`](/using-npm/logging) for more information. - `, -}) - -define('logs-max', { - default: 10, - type: Number, - description: ` - The maximum number of log files to store. - - If set to 0, no log files will be written for the current run. - `, -}) - -define('long', { - default: false, - type: Boolean, - short: 'l', - description: ` - Show extended information in \`ls\`, \`search\`, and \`help-search\`. - `, -}) - -define('maxsockets', { - default: 15, - type: Number, - description: ` - The maximum number of connections to use per origin (protocol/host/port - combination). - `, - flatten (key, obj, flatOptions) { - flatOptions.maxSockets = obj[key] - }, -}) - -define('message', { - default: '%s', - type: String, - short: 'm', - description: ` - Commit message which is used by \`npm version\` when creating version commit. - - Any "%s" in the message will be replaced with the version number. - `, - flatten, -}) - -define('node-options', { - default: null, - type: [null, String], - description: ` - Options to pass through to Node.js via the \`NODE_OPTIONS\` environment - variable. This does not impact how npm itself is executed but it does - impact how lifecycle scripts are called. - `, -}) - -define('noproxy', { - default: '', - defaultDescription: ` - The value of the NO_PROXY environment variable - `, - type: [String, Array], - description: ` - Domain extensions that should bypass any proxies. - - Also accepts a comma-delimited string. - `, - flatten (key, obj, flatOptions) { - if (Array.isArray(obj[key])) { - flatOptions.noProxy = obj[key].join(',') - } else { - flatOptions.noProxy = obj[key] - } - }, -}) - -define('offline', { - default: false, - type: Boolean, - description: ` - Force offline mode: no network requests will be done during install. To allow - the CLI to fill in missing cache data, see \`--prefer-offline\`. - `, - flatten, -}) - -define('omit', { - default: process.env.NODE_ENV === 'production' ? ['dev'] : [], - defaultDescription: ` - 'dev' if the \`NODE_ENV\` environment variable is set to 'production', - otherwise empty. - `, - type: [Array, 'dev', 'optional', 'peer'], - description: ` - Dependency types to omit from the installation tree on disk. - - Note that these dependencies _are_ still resolved and added to the - \`package-lock.json\` or \`npm-shrinkwrap.json\` file. They are just - not physically installed on disk. - - If a package type appears in both the \`--include\` and \`--omit\` - lists, then it will be included. - - If the resulting omit list includes \`'dev'\`, then the \`NODE_ENV\` - environment variable will be set to \`'production'\` for all lifecycle - scripts. - `, - flatten (key, obj, flatOptions) { - flatOptions.omit = buildOmitList(obj) - }, -}) - -define('omit-lockfile-registry-resolved', { - default: false, - type: Boolean, - description: ` - This option causes npm to create lock files without a \`resolved\` key for - registry dependencies. Subsequent installs will need to resolve tarball - endpoints with the configured registry, likely resulting in a longer install - time. - `, - flatten, -}) - -define('only', { - default: null, - type: [null, 'prod', 'production'], - deprecated: ` - Use \`--omit=dev\` to omit dev dependencies from the install. - `, - description: ` - When set to \`prod\` or \`production\`, this is an alias for - \`--omit=dev\`. - `, - flatten (key, obj, flatOptions) { - definitions.omit.flatten('omit', obj, flatOptions) - }, -}) - -define('optional', { - default: null, - type: [null, Boolean], - deprecated: ` - Use \`--omit=optional\` to exclude optional dependencies, or - \`--include=optional\` to include them. - - Default value does install optional deps unless otherwise omitted. - `, - description: ` - Alias for --include=optional or --omit=optional - `, - flatten (key, obj, flatOptions) { - definitions.omit.flatten('omit', obj, flatOptions) - }, -}) - -define('otp', { - default: null, - type: [null, String], - description: ` - This is a one-time password from a two-factor authenticator. It's needed - when publishing or changing package permissions with \`npm access\`. - - If not set, and a registry response fails with a challenge for a one-time - password, npm will prompt on the command line for one. - `, - flatten (key, obj, flatOptions) { - flatten(key, obj, flatOptions) - if (obj.otp) { - obj['auth-type'] = 'legacy' - flatten('auth-type', obj, flatOptions) - } - }, -}) - -define('package', { - default: [], - hint: '', - type: [String, Array], - description: ` - The package or packages to install for [\`npm exec\`](/commands/npm-exec) - `, - flatten, -}) - -define('package-lock', { - default: true, - type: Boolean, - description: ` - If set to false, then ignore \`package-lock.json\` files when installing. - This will also prevent _writing_ \`package-lock.json\` if \`save\` is - true. - `, - flatten: (key, obj, flatOptions) => { - flatten(key, obj, flatOptions) - if (flatOptions.packageLockOnly) { - flatOptions.packageLock = true - } - }, -}) - -define('package-lock-only', { - default: false, - type: Boolean, - description: ` - If set to true, the current operation will only use the \`package-lock.json\`, - ignoring \`node_modules\`. - - For \`update\` this means only the \`package-lock.json\` will be updated, - instead of checking \`node_modules\` and downloading dependencies. - - For \`list\` this means the output will be based on the tree described by the - \`package-lock.json\`, rather than the contents of \`node_modules\`. - `, - flatten: (key, obj, flatOptions) => { - flatten(key, obj, flatOptions) - if (flatOptions.packageLockOnly) { - flatOptions.packageLock = true - } - }, -}) - -define('pack-destination', { - default: '.', - type: String, - description: ` - Directory in which \`npm pack\` will save tarballs. - `, - flatten, -}) - -define('parseable', { - default: false, - type: Boolean, - short: 'p', - description: ` - Output parseable results from commands that write to standard output. For - \`npm search\`, this will be tab-separated table format. - `, - flatten, -}) - -define('prefer-dedupe', { - default: false, - type: Boolean, - description: ` - Prefer to deduplicate packages if possible, rather than - choosing a newer version of a dependency. - `, - flatten, -}) - -define('prefer-offline', { - default: false, - type: Boolean, - description: ` - If true, staleness checks for cached data will be bypassed, but missing - data will be requested from the server. To force full offline mode, use - \`--offline\`. - `, - flatten, -}) - -define('prefer-online', { - default: false, - type: Boolean, - description: ` - If true, staleness checks for cached data will be forced, making the CLI - look for updates immediately even for fresh package data. - `, - flatten, -}) - -// `prefix` has its default defined outside of this module -define('prefix', { - type: path, - short: 'C', - default: '', - defaultDescription: ` - In global mode, the folder where the node executable is installed. - Otherwise, the nearest parent folder containing either a package.json - file or a node_modules folder. - `, - description: ` - The location to install global items. If set on the command line, then - it forces non-global commands to run in the specified folder. - `, -}) - -define('preid', { - default: '', - hint: 'prerelease-id', - type: String, - description: ` - The "prerelease identifier" to use as a prefix for the "prerelease" part - of a semver. Like the \`rc\` in \`1.2.0-rc.8\`. - `, - flatten, -}) - -define('production', { - default: null, - type: [null, Boolean], - deprecated: 'Use `--omit=dev` instead.', - description: 'Alias for `--omit=dev`', - flatten (key, obj, flatOptions) { - definitions.omit.flatten('omit', obj, flatOptions) - }, -}) - -define('progress', { - default: !ciInfo.isCI, - defaultDescription: ` - \`true\` unless running in a known CI system - `, - type: Boolean, - description: ` - When set to \`true\`, npm will display a progress bar during time - intensive operations, if \`process.stderr\` is a TTY. - - Set to \`false\` to suppress the progress bar. - `, - flatten (key, obj, flatOptions) { - flatOptions.progress = !obj.progress ? false - : !!process.stderr.isTTY && process.env.TERM !== 'dumb' - }, -}) - -define('provenance', { - default: false, - type: Boolean, - exclusive: ['provenance-file'], - description: ` - When publishing from a supported cloud CI/CD system, the package will be - publicly linked to where it was built and published from. - `, - flatten, -}) - -define('provenance-file', { - default: null, - type: path, - hint: '', - exclusive: ['provenance'], - description: ` - When publishing, the provenance bundle at the given path will be used. - `, - flatten, -}) - -define('proxy', { - default: null, - type: [null, false, url], // allow proxy to be disabled explicitly - description: ` - A proxy to use for outgoing http requests. If the \`HTTP_PROXY\` or - \`http_proxy\` environment variables are set, proxy settings will be - honored by the underlying \`request\` library. - `, - flatten, -}) - -define('read-only', { - default: false, - type: Boolean, - description: ` - This is used to mark a token as unable to publish when configuring - limited access tokens with the \`npm token create\` command. - `, - flatten, -}) - -define('rebuild-bundle', { - default: true, - type: Boolean, - description: ` - Rebuild bundled dependencies after installation. - `, - flatten, -}) - -define('registry', { - default: 'https://registry.npmjs.org/', - type: url, - description: ` - The base URL of the npm registry. - `, - flatten, -}) - -define('replace-registry-host', { - default: 'npmjs', - hint: ' | hostname', - type: ['npmjs', 'never', 'always', String], - description: ` - Defines behavior for replacing the registry host in a lockfile with the - configured registry. - - The default behavior is to replace package dist URLs from the default - registry (https://registry.npmjs.org) to the configured registry. If set to - "never", then use the registry value. If set to "always", then replace the - registry host with the configured host every time. - - You may also specify a bare hostname (e.g., "registry.npmjs.org"). - `, - flatten, -}) - -define('save', { - default: true, - defaultDescription: `\`true\` unless when using \`npm update\` where it - defaults to \`false\``, - usage: '-S|--save|--no-save|--save-prod|--save-dev|--save-optional|--save-peer|--save-bundle', - type: Boolean, - short: 'S', - description: ` - Save installed packages to a \`package.json\` file as dependencies. - - When used with the \`npm rm\` command, removes the dependency from - \`package.json\`. - - Will also prevent writing to \`package-lock.json\` if set to \`false\`. - `, - flatten, -}) - -define('save-bundle', { - default: false, - type: Boolean, - short: 'B', - description: ` - If a package would be saved at install time by the use of \`--save\`, - \`--save-dev\`, or \`--save-optional\`, then also put it in the - \`bundleDependencies\` list. - - Ignored if \`--save-peer\` is set, since peerDependencies cannot be bundled. - `, - flatten (key, obj, flatOptions) { - // XXX update arborist to just ignore it if resulting saveType is peer - // otherwise this won't have the expected effect: - // - // npm config set save-peer true - // npm i foo --save-bundle --save-prod <-- should bundle - flatOptions.saveBundle = obj['save-bundle'] && !obj['save-peer'] - }, -}) + flatten, + }), + cache: new Definition('cache', { + default: cache, + defaultDescription: ` + Windows: \`%LocalAppData%\\npm-cache\`, Posix: \`~/.npm\` + `, + type: path, + description: ` + The location of npm's cache directory. + `, + flatten (key, obj, flatOptions) { + flatOptions.cache = join(obj.cache, '_cacache') + flatOptions.npxCache = join(obj.cache, '_npx') + flatOptions.tufCache = join(obj.cache, '_tuf') + }, + }), + 'cache-max': new Definition('cache-max', { + default: Infinity, + type: Number, + description: ` + \`--cache-max=0\` is an alias for \`--prefer-online\` + `, + deprecated: ` + This option has been deprecated in favor of \`--prefer-online\` + `, + flatten (key, obj, flatOptions) { + if (obj[key] <= 0) { + flatOptions.preferOnline = true + } + }, + }), + 'cache-min': new Definition('cache-min', { + default: 0, + type: Number, + description: ` + \`--cache-min=9999 (or bigger)\` is an alias for \`--prefer-offline\`. + `, + deprecated: ` + This option has been deprecated in favor of \`--prefer-offline\`. + `, + flatten (key, obj, flatOptions) { + if (obj[key] >= 9999) { + flatOptions.preferOffline = true + } + }, + }), + cafile: new Definition('cafile', { + default: null, + type: path, + description: ` + A path to a file containing one or multiple Certificate Authority signing + certificates. Similar to the \`ca\` setting, but allows for multiple + CA's, as well as for the CA information to be stored in a file on disk. + `, + flatten (key, obj, flatOptions) { + // always set to null in defaults + if (!obj.cafile) { + return + } -// XXX: We should really deprecate all these `--save-blah` switches -// in favor of a single `--save-type` option. The unfortunate shortcut -// we took for `--save-peer --save-optional` being `--save-type=peerOptional` -// makes this tricky, and likely a breaking change. + const raw = maybeReadFile(obj.cafile) + if (!raw) { + return + } -define('save-dev', { - default: false, - type: Boolean, - short: 'D', - description: ` - Save installed packages to a package.json file as \`devDependencies\`. - `, - flatten (key, obj, flatOptions) { - if (!obj[key]) { - if (flatOptions.saveType === 'dev') { - delete flatOptions.saveType + const delim = '-----END CERTIFICATE-----' + flatOptions.ca = raw.replace(/\r\n/g, '\n').split(delim) + .filter(section => section.trim()) + .map(section => section.trimLeft() + delim) + }, + }), + call: new Definition('call', { + default: '', + type: String, + short: 'c', + description: ` + Optional companion option for \`npm exec\`, \`npx\` that allows for + specifying a custom command to be run along with the installed packages. + + \`\`\`bash + npm exec --package yo --package generator-node --call "yo node" + \`\`\` + `, + flatten, + }), + cert: new Definition('cert', { + default: null, + type: [null, String], + description: ` + A client certificate to pass when accessing the registry. Values should + be in PEM format (Windows calls it "Base-64 encoded X.509 (.CER)") with + newlines replaced by the string "\\n". For example: + + \`\`\`ini + cert="-----BEGIN CERTIFICATE-----\\nXXXX\\nXXXX\\n-----END CERTIFICATE-----" + \`\`\` + + It is _not_ the path to a certificate file, though you can set a registry-scoped + "certfile" path like "//other-registry.tld/:certfile=/path/to/cert.pem". + `, + deprecated: ` + \`key\` and \`cert\` are no longer used for most registry operations. + Use registry scoped \`keyfile\` and \`certfile\` instead. + Example: + //other-registry.tld/:keyfile=/path/to/key.pem + //other-registry.tld/:certfile=/path/to/cert.crt + `, + flatten, + }), + cidr: new Definition('cidr', { + default: null, + type: [null, String, Array], + description: ` + This is a list of CIDR address to be used when configuring limited access + tokens with the \`npm token create\` command. + `, + flatten, + }), + // This should never be directly used, the flattened value is the derived value + // and is sent to other modules, and is also exposed as `npm.color` for use + // inside npm itself. + color: new Definition('color', { + default: !process.env.NO_COLOR || process.env.NO_COLOR === '0', + usage: '--color|--no-color|--color always', + defaultDescription: ` + true unless the NO_COLOR environ is set to something other than '0' + `, + type: ['always', Boolean], + description: ` + If false, never shows colors. If \`"always"\` then always shows colors. + If true, then only prints color codes for tty file descriptors. + `, + flatten (key, obj, flatOptions) { + flatOptions.color = !obj.color ? false + : obj.color === 'always' ? true + : !!process.stdout.isTTY + flatOptions.logColor = !obj.color ? false + : obj.color === 'always' ? true + : !!process.stderr.isTTY + }, + }), + 'commit-hooks': new Definition('commit-hooks', { + default: true, + type: Boolean, + description: ` + Run git commit hooks when using the \`npm version\` command. + `, + flatten, + }), + cpu: new Definition('cpu', { + default: null, + type: [null, String], + description: ` + Override CPU architecture of native modules to install. + Acceptable values are same as \`cpu\` field of package.json, + which comes from \`process.arch\`. + `, + flatten, + }), + depth: new Definition('depth', { + default: null, + defaultDescription: ` + \`Infinity\` if \`--all\` is set, otherwise \`1\` + `, + type: [null, Number], + description: ` + The depth to go when recursing packages for \`npm ls\`. + + If not set, \`npm ls\` will show only the immediate dependencies of the + root project. If \`--all\` is set, then npm will show all dependencies + by default. + `, + flatten, + }), + description: new Definition('description', { + default: true, + type: Boolean, + usage: '--no-description', + description: ` + Show the description in \`npm search\` + `, + flatten (key, obj, flatOptions) { + flatOptions.search = flatOptions.search || { limit: 20 } + flatOptions.search[key] = obj[key] + }, + }), + dev: new Definition('dev', { + default: false, + type: Boolean, + description: ` + Alias for \`--include=dev\`. + `, + deprecated: 'Please use --include=dev instead.', + flatten (key, obj, flatOptions) { + definitions.omit.flatten('omit', obj, flatOptions) + }, + }), + diff: new Definition('diff', { + default: [], + hint: '', + type: [String, Array], + description: ` + Define arguments to compare in \`npm diff\`. + `, + flatten, + }), + 'diff-ignore-all-space': new Definition('diff-ignore-all-space', { + default: false, + type: Boolean, + description: ` + Ignore whitespace when comparing lines in \`npm diff\`. + `, + flatten, + }), + 'diff-name-only': new Definition('diff-name-only', { + default: false, + type: Boolean, + description: ` + Prints only filenames when using \`npm diff\`. + `, + flatten, + }), + 'diff-no-prefix': new Definition('diff-no-prefix', { + default: false, + type: Boolean, + description: ` + Do not show any source or destination prefix in \`npm diff\` output. + + Note: this causes \`npm diff\` to ignore the \`--diff-src-prefix\` and + \`--diff-dst-prefix\` configs. + `, + flatten, + }), + 'diff-dst-prefix': new Definition('diff-dst-prefix', { + default: 'b/', + hint: '', + type: String, + description: ` + Destination prefix to be used in \`npm diff\` output. + `, + flatten, + }), + 'diff-src-prefix': new Definition('diff-src-prefix', { + default: 'a/', + hint: '', + type: String, + description: ` + Source prefix to be used in \`npm diff\` output. + `, + flatten, + }), + 'diff-text': new Definition('diff-text', { + default: false, + type: Boolean, + description: ` + Treat all files as text in \`npm diff\`. + `, + flatten, + }), + 'diff-unified': new Definition('diff-unified', { + default: 3, + type: Number, + description: ` + The number of lines of context to print in \`npm diff\`. + `, + flatten, + }), + 'dry-run': new Definition('dry-run', { + default: false, + type: Boolean, + description: ` + Indicates that you don't want npm to make any changes and that it should + only report what it would have done. This can be passed into any of the + commands that modify your local installation, eg, \`install\`, + \`update\`, \`dedupe\`, \`uninstall\`, as well as \`pack\` and + \`publish\`. + + Note: This is NOT honored by other network related commands, eg + \`dist-tags\`, \`owner\`, etc. + `, + flatten, + }), + editor: new Definition('editor', { + default: editor, + defaultDescription: ` + The EDITOR or VISUAL environment variables, or '%SYSTEMROOT%\\notepad.exe' on Windows, + or 'vi' on Unix systems + `, + type: String, + description: ` + The command to run for \`npm edit\` and \`npm config edit\`. + `, + flatten, + }), + 'engine-strict': new Definition('engine-strict', { + default: false, + type: Boolean, + description: ` + If set to true, then npm will stubbornly refuse to install (or even + consider installing) any package that claims to not be compatible with + the current Node.js version. + + This can be overridden by setting the \`--force\` flag. + `, + flatten, + }), + 'expect-result-count': new Definition('expect-result-count', { + default: null, + type: [null, Number], + hint: '', + exclusive: ['expect-results'], + description: ` + Tells to expect a specific number of results from the command. + `, + }), + 'expect-results': new Definition('expect-results', { + default: null, + type: [null, Boolean], + exclusive: ['expect-result-count'], + description: ` + Tells npm whether or not to expect results from the command. + Can be either true (expect some results) or false (expect no results). + `, + }), + 'fetch-retries': new Definition('fetch-retries', { + default: 2, + type: Number, + description: ` + The "retries" config for the \`retry\` module to use when fetching + packages from the registry. + + npm will retry idempotent read requests to the registry in the case + of network failures or 5xx HTTP errors. + `, + flatten (key, obj, flatOptions) { + flatOptions.retry = flatOptions.retry || {} + flatOptions.retry.retries = obj[key] + }, + }), + 'fetch-retry-factor': new Definition('fetch-retry-factor', { + default: 10, + type: Number, + description: ` + The "factor" config for the \`retry\` module to use when fetching + packages. + `, + flatten (key, obj, flatOptions) { + flatOptions.retry = flatOptions.retry || {} + flatOptions.retry.factor = obj[key] + }, + }), + 'fetch-retry-maxtimeout': new Definition('fetch-retry-maxtimeout', { + default: 60000, + defaultDescription: '60000 (1 minute)', + type: Number, + description: ` + The "maxTimeout" config for the \`retry\` module to use when fetching + packages. + `, + flatten (key, obj, flatOptions) { + flatOptions.retry = flatOptions.retry || {} + flatOptions.retry.maxTimeout = obj[key] + }, + }), + 'fetch-retry-mintimeout': new Definition('fetch-retry-mintimeout', { + default: 10000, + defaultDescription: '10000 (10 seconds)', + type: Number, + description: ` + The "minTimeout" config for the \`retry\` module to use when fetching + packages. + `, + flatten (key, obj, flatOptions) { + flatOptions.retry = flatOptions.retry || {} + flatOptions.retry.minTimeout = obj[key] + }, + }), + 'fetch-timeout': new Definition('fetch-timeout', { + default: 5 * 60 * 1000, + defaultDescription: `${5 * 60 * 1000} (5 minutes)`, + type: Number, + description: ` + The maximum amount of time to wait for HTTP requests to complete. + `, + flatten (key, obj, flatOptions) { + flatOptions.timeout = obj[key] + }, + }), + force: new Definition('force', { + default: false, + type: Boolean, + short: 'f', + description: ` + Removes various protections against unfortunate side effects, common + mistakes, unnecessary performance degradation, and malicious input. + + * Allow clobbering non-npm files in global installs. + * Allow the \`npm version\` command to work on an unclean git repository. + * Allow deleting the cache folder with \`npm cache clean\`. + * Allow installing packages that have an \`engines\` declaration + requiring a different version of npm. + * Allow installing packages that have an \`engines\` declaration + requiring a different version of \`node\`, even if \`--engine-strict\` + is enabled. + * Allow \`npm audit fix\` to install modules outside your stated + dependency range (including SemVer-major changes). + * Allow unpublishing all versions of a published package. + * Allow conflicting peerDependencies to be installed in the root project. + * Implicitly set \`--yes\` during \`npm init\`. + * Allow clobbering existing values in \`npm pkg\` + * Allow unpublishing of entire packages (not just a single version). + + If you don't have a clear idea of what you want to do, it is strongly + recommended that you do not use this option! + `, + flatten, + }), + 'foreground-scripts': new Definition('foreground-scripts', { + default: false, + defaultDescription: `\`false\` unless when using \`npm pack\` or \`npm publish\` where it + defaults to \`true\``, + type: Boolean, + description: ` + Run all build scripts (ie, \`preinstall\`, \`install\`, and + \`postinstall\`) scripts for installed packages in the foreground + process, sharing standard input, output, and error with the main npm + process. + + Note that this will generally make installs run slower, and be much + noisier, but can be useful for debugging. + `, + flatten, + }), + 'format-package-lock': new Definition('format-package-lock', { + default: true, + type: Boolean, + description: ` + Format \`package-lock.json\` or \`npm-shrinkwrap.json\` as a human + readable file. + `, + flatten, + }), + fund: new Definition('fund', { + default: true, + type: Boolean, + description: ` + When "true" displays the message at the end of each \`npm install\` + acknowledging the number of dependencies looking for funding. + See [\`npm fund\`](/commands/npm-fund) for details. + `, + flatten, + }), + git: new Definition('git', { + default: 'git', + type: String, + description: ` + The command to use for git commands. If git is installed on the + computer, but is not in the \`PATH\`, then set this to the full path to + the git binary. + `, + flatten, + }), + 'git-tag-version': new Definition('git-tag-version', { + default: true, + type: Boolean, + description: ` + Tag the commit when using the \`npm version\` command. Setting this to + false results in no commit being made at all. + `, + flatten, + }), + global: new Definition('global', { + default: false, + type: Boolean, + short: 'g', + description: ` + Operates in "global" mode, so that packages are installed into the + \`prefix\` folder instead of the current working directory. See + [folders](/configuring-npm/folders) for more on the differences in + behavior. + + * packages are installed into the \`{prefix}/lib/node_modules\` folder, + instead of the current working directory. + * bin files are linked to \`{prefix}/bin\` + * man pages are linked to \`{prefix}/share/man\` + `, + flatten: (key, obj, flatOptions) => { + flatten(key, obj, flatOptions) + if (flatOptions.global) { + flatOptions.location = 'global' + } + }, + }), + // the globalconfig has its default defined outside of this module + globalconfig: new Definition('globalconfig', { + type: path, + default: '', + defaultDescription: ` + The global --prefix setting plus 'etc/npmrc'. For example, + '/usr/local/etc/npmrc' + `, + description: ` + The config file to read for global config options. + `, + flatten, + }), + 'global-style': new Definition('global-style', { + default: false, + type: Boolean, + description: ` + Only install direct dependencies in the top level \`node_modules\`, + but hoist on deeper dependencies. + Sets \`--install-strategy=shallow\`. + `, + deprecated: ` + This option has been deprecated in favor of \`--install-strategy=shallow\` + `, + flatten (key, obj, flatOptions) { + if (obj[key]) { + obj['install-strategy'] = 'shallow' + flatOptions.installStrategy = 'shallow' + } + }, + }), + heading: new Definition('heading', { + default: 'npm', + type: String, + description: ` + The string that starts all the debugging log output. + `, + flatten, + }), + 'https-proxy': new Definition('https-proxy', { + default: null, + type: [null, url], + description: ` + A proxy to use for outgoing https requests. If the \`HTTPS_PROXY\` or + \`https_proxy\` or \`HTTP_PROXY\` or \`http_proxy\` environment variables + are set, proxy settings will be honored by the underlying + \`make-fetch-happen\` library. + `, + flatten, + }), + 'if-present': new Definition('if-present', { + default: false, + type: Boolean, + envExport: false, + description: ` + If true, npm will not exit with an error code when \`run-script\` is + invoked for a script that isn't defined in the \`scripts\` section of + \`package.json\`. This option can be used when it's desirable to + optionally run a script when it's present and fail if the script fails. + This is useful, for example, when running scripts that may only apply for + some builds in an otherwise generic CI setup. + `, + flatten, + }), + 'ignore-scripts': new Definition('ignore-scripts', { + default: false, + type: Boolean, + description: ` + If true, npm does not run scripts specified in package.json files. + + Note that commands explicitly intended to run a particular script, such + as \`npm start\`, \`npm stop\`, \`npm restart\`, \`npm test\`, and \`npm + run-script\` will still run their intended script if \`ignore-scripts\` is + set, but they will *not* run any pre- or post-scripts. + `, + flatten, + }), + include: new Definition('include', { + default: [], + type: [Array, 'prod', 'dev', 'optional', 'peer'], + description: ` + Option that allows for defining which types of dependencies to install. + + This is the inverse of \`--omit=\`. + + Dependency types specified in \`--include\` will not be omitted, + regardless of the order in which omit/include are specified on the + command-line. + `, + flatten (key, obj, flatOptions) { + // just call the omit flattener, it reads from obj.include + definitions.omit.flatten('omit', obj, flatOptions) + }, + }), + 'include-staged': new Definition('include-staged', { + default: false, + type: Boolean, + description: ` + Allow installing "staged" published packages, as defined by [npm RFC PR + #92](https://github.com/npm/rfcs/pull/92). + + This is experimental, and not implemented by the npm public registry. + `, + flatten, + }), + 'include-workspace-root': new Definition('include-workspace-root', { + default: false, + type: Boolean, + envExport: false, + description: ` + Include the workspace root when workspaces are enabled for a command. + + When false, specifying individual workspaces via the \`workspace\` config, + or all workspaces via the \`workspaces\` flag, will cause npm to operate only + on the specified workspaces, and not on the root project. + `, + flatten, + }), + 'init-author-email': new Definition('init-author-email', { + default: '', + hint: '', + type: String, + description: ` + The value \`npm init\` should use by default for the package author's + email. + `, + }), + 'init-author-name': new Definition('init-author-name', { + default: '', + hint: '', + type: String, + description: ` + The value \`npm init\` should use by default for the package author's name. + `, + }), + 'init-author-url': new Definition('init-author-url', { + default: '', + type: ['', url], + hint: '', + description: ` + The value \`npm init\` should use by default for the package author's homepage. + `, + }), + 'init-license': new Definition('init-license', { + default: 'ISC', + hint: '', + type: String, + description: ` + The value \`npm init\` should use by default for the package license. + `, + }), + 'init-module': new Definition('init-module', { + default: '~/.npm-init.js', + type: path, + hint: '', + description: ` + A module that will be loaded by the \`npm init\` command. See the + documentation for the + [init-package-json](https://github.com/npm/init-package-json) module for + more information, or [npm init](/commands/npm-init). + `, + }), + 'init-version': new Definition('init-version', { + default: '1.0.0', + type: Semver, + hint: '', + description: ` + The value that \`npm init\` should use by default for the package + version number, if not already set in package.json. + `, + }), + // these "aliases" are historically supported in .npmrc files, unfortunately + // They should be removed in a future npm version. + 'init.author.email': new Definition('init.author.email', { + default: '', + type: String, + deprecated: ` + Use \`--init-author-email\` instead.`, + description: ` + Alias for \`--init-author-email\` + `, + }), + 'init.author.name': new Definition('init.author.name', { + default: '', + type: String, + deprecated: ` + Use \`--init-author-name\` instead. + `, + description: ` + Alias for \`--init-author-name\` + `, + }), + 'init.author.url': new Definition('init.author.url', { + default: '', + type: ['', url], + deprecated: ` + Use \`--init-author-url\` instead. + `, + description: ` + Alias for \`--init-author-url\` + `, + }), + 'init.license': new Definition('init.license', { + default: 'ISC', + type: String, + deprecated: ` + Use \`--init-license\` instead. + `, + description: ` + Alias for \`--init-license\` + `, + }), + 'init.module': new Definition('init.module', { + default: '~/.npm-init.js', + type: path, + deprecated: ` + Use \`--init-module\` instead. + `, + description: ` + Alias for \`--init-module\` + `, + }), + 'init.version': new Definition('init.version', { + default: '1.0.0', + type: Semver, + deprecated: ` + Use \`--init-version\` instead. + `, + description: ` + Alias for \`--init-version\` + `, + }), + 'install-links': new Definition('install-links', { + default: false, + type: Boolean, + description: ` + When set file: protocol dependencies will be packed and installed as + regular dependencies instead of creating a symlink. This option has + no effect on workspaces. + `, + flatten, + }), + 'install-strategy': new Definition('install-strategy', { + default: 'hoisted', + type: ['hoisted', 'nested', 'shallow', 'linked'], + description: ` + Sets the strategy for installing packages in node_modules. + hoisted (default): Install non-duplicated in top-level, and duplicated as + necessary within directory structure. + nested: (formerly --legacy-bundling) install in place, no hoisting. + shallow (formerly --global-style) only install direct deps at top-level. + linked: (experimental) install in node_modules/.store, link in place, + unhoisted. + `, + flatten, + }), + json: new Definition('json', { + default: false, + type: Boolean, + description: ` + Whether or not to output JSON data, rather than the normal output. + + * In \`npm pkg set\` it enables parsing set values with JSON.parse() + before saving them to your \`package.json\`. + + Not supported by all npm commands. + `, + flatten, + }), + key: new Definition('key', { + default: null, + type: [null, String], + description: ` + A client key to pass when accessing the registry. Values should be in + PEM format with newlines replaced by the string "\\n". For example: + + \`\`\`ini + key="-----BEGIN PRIVATE KEY-----\\nXXXX\\nXXXX\\n-----END PRIVATE KEY-----" + \`\`\` + + It is _not_ the path to a key file, though you can set a registry-scoped + "keyfile" path like "//other-registry.tld/:keyfile=/path/to/key.pem". + `, + deprecated: ` + \`key\` and \`cert\` are no longer used for most registry operations. + Use registry scoped \`keyfile\` and \`certfile\` instead. + Example: + //other-registry.tld/:keyfile=/path/to/key.pem + //other-registry.tld/:certfile=/path/to/cert.crt + `, + flatten, + }), + 'legacy-bundling': new Definition('legacy-bundling', { + default: false, + type: Boolean, + description: ` + Instead of hoisting package installs in \`node_modules\`, install packages + in the same manner that they are depended on. This may cause very deep + directory structures and duplicate package installs as there is no + de-duplicating. + Sets \`--install-strategy=nested\`. + `, + deprecated: ` + This option has been deprecated in favor of \`--install-strategy=nested\` + `, + flatten (key, obj, flatOptions) { + if (obj[key]) { + obj['install-strategy'] = 'nested' + flatOptions.installStrategy = 'nested' + } + }, + }), + 'legacy-peer-deps': new Definition('legacy-peer-deps', { + default: false, + type: Boolean, + description: ` + Causes npm to completely ignore \`peerDependencies\` when building a + package tree, as in npm versions 3 through 6. + + If a package cannot be installed because of overly strict + \`peerDependencies\` that collide, it provides a way to move forward + resolving the situation. + + This differs from \`--omit=peer\`, in that \`--omit=peer\` will avoid + unpacking \`peerDependencies\` on disk, but will still design a tree such + that \`peerDependencies\` _could_ be unpacked in a correct place. + + Use of \`legacy-peer-deps\` is not recommended, as it will not enforce + the \`peerDependencies\` contract that meta-dependencies may rely on. + `, + flatten, + }), + libc: new Definition('libc', { + default: null, + type: [null, String], + description: ` + Override libc of native modules to install. + Acceptable values are same as \`libc\` field of package.json + `, + flatten, + }), + link: new Definition('link', { + default: false, + type: Boolean, + description: ` + Used with \`npm ls\`, limiting output to only those packages that are + linked. + `, + }), + 'local-address': new Definition('local-address', { + default: null, + type: getLocalAddresses(), + typeDescription: 'IP Address', + description: ` + The IP address of the local interface to use when making connections to + the npm registry. Must be IPv4 in versions of Node prior to 0.12. + `, + flatten, + }), + location: new Definition('location', { + default: 'user', + short: 'L', + type: [ + 'global', + 'user', + 'project', + ], + defaultDescription: ` + "user" unless \`--global\` is passed, which will also set this value to "global" + `, + description: ` + When passed to \`npm config\` this refers to which config file to use. + + When set to "global" mode, packages are installed into the \`prefix\` folder + instead of the current working directory. See + [folders](/configuring-npm/folders) for more on the differences in behavior. + + * packages are installed into the \`{prefix}/lib/node_modules\` folder, + instead of the current working directory. + * bin files are linked to \`{prefix}/bin\` + * man pages are linked to \`{prefix}/share/man\` + `, + flatten: (key, obj, flatOptions) => { + flatten(key, obj, flatOptions) + if (flatOptions.global) { + flatOptions.location = 'global' + } + if (obj.location === 'global') { + flatOptions.global = true + } + }, + }), + 'lockfile-version': new Definition('lockfile-version', { + default: null, + type: [null, 1, 2, 3, '1', '2', '3'], + defaultDescription: ` + Version 3 if no lockfile, auto-converting v1 lockfiles to v3, otherwise + maintain current lockfile version.`, + description: ` + Set the lockfile format version to be used in package-lock.json and + npm-shrinkwrap-json files. Possible options are: + + 1: The lockfile version used by npm versions 5 and 6. Lacks some data that + is used during the install, resulting in slower and possibly less + deterministic installs. Prevents lockfile churn when interoperating with + older npm versions. + + 2: The default lockfile version used by npm version 7 and 8. Includes both + the version 1 lockfile data and version 3 lockfile data, for maximum + determinism and interoperability, at the expense of more bytes on disk. + + 3: Only the new lockfile information introduced in npm version 7. Smaller + on disk than lockfile version 2, but not interoperable with older npm + versions. Ideal if all users are on npm version 7 and higher. + `, + flatten: (key, obj, flatOptions) => { + flatOptions.lockfileVersion = obj[key] && parseInt(obj[key], 10) + }, + }), + loglevel: new Definition('loglevel', { + default: 'notice', + type: [ + 'silent', + 'error', + 'warn', + 'notice', + 'http', + 'info', + 'verbose', + 'silly', + ], + description: ` + What level of logs to report. All logs are written to a debug log, + with the path to that file printed if the execution of a command fails. + + Any logs of a higher level than the setting are shown. The default is + "notice". + + See also the \`foreground-scripts\` config. + `, + flatten (key, obj, flatOptions) { + flatOptions.silent = obj[key] === 'silent' + }, + }), + 'logs-dir': new Definition('logs-dir', { + default: null, + type: [null, path], + defaultDescription: ` + A directory named \`_logs\` inside the cache + `, + description: ` + The location of npm's log directory. See [\`npm + logging\`](/using-npm/logging) for more information. + `, + }), + 'logs-max': new Definition('logs-max', { + default: 10, + type: Number, + description: ` + The maximum number of log files to store. + + If set to 0, no log files will be written for the current run. + `, + }), + long: new Definition('long', { + default: false, + type: Boolean, + short: 'l', + description: ` + Show extended information in \`ls\`, \`search\`, and \`help-search\`. + `, + }), + maxsockets: new Definition('maxsockets', { + default: 15, + type: Number, + description: ` + The maximum number of connections to use per origin (protocol/host/port + combination). + `, + flatten (key, obj, flatOptions) { + flatOptions.maxSockets = obj[key] + }, + }), + message: new Definition('message', { + default: '%s', + type: String, + short: 'm', + description: ` + Commit message which is used by \`npm version\` when creating version commit. + + Any "%s" in the message will be replaced with the version number. + `, + flatten, + }), + 'node-options': new Definition('node-options', { + default: null, + type: [null, String], + description: ` + Options to pass through to Node.js via the \`NODE_OPTIONS\` environment + variable. This does not impact how npm itself is executed but it does + impact how lifecycle scripts are called. + `, + }), + noproxy: new Definition('noproxy', { + default: '', + defaultDescription: ` + The value of the NO_PROXY environment variable + `, + type: [String, Array], + description: ` + Domain extensions that should bypass any proxies. + + Also accepts a comma-delimited string. + `, + flatten (key, obj, flatOptions) { + if (Array.isArray(obj[key])) { + flatOptions.noProxy = obj[key].join(',') + } else { + flatOptions.noProxy = obj[key] + } + }, + }), + offline: new Definition('offline', { + default: false, + type: Boolean, + description: ` + Force offline mode: no network requests will be done during install. To allow + the CLI to fill in missing cache data, see \`--prefer-offline\`. + `, + flatten, + }), + omit: new Definition('omit', { + default: process.env.NODE_ENV === 'production' ? ['dev'] : [], + defaultDescription: ` + 'dev' if the \`NODE_ENV\` environment variable is set to 'production', + otherwise empty. + `, + type: [Array, 'dev', 'optional', 'peer'], + description: ` + Dependency types to omit from the installation tree on disk. + + Note that these dependencies _are_ still resolved and added to the + \`package-lock.json\` or \`npm-shrinkwrap.json\` file. They are just + not physically installed on disk. + + If a package type appears in both the \`--include\` and \`--omit\` + lists, then it will be included. + + If the resulting omit list includes \`'dev'\`, then the \`NODE_ENV\` + environment variable will be set to \`'production'\` for all lifecycle + scripts. + `, + flatten (key, obj, flatOptions) { + flatOptions.omit = buildOmitList(obj) + }, + }), + 'omit-lockfile-registry-resolved': new Definition('omit-lockfile-registry-resolved', { + default: false, + type: Boolean, + description: ` + This option causes npm to create lock files without a \`resolved\` key for + registry dependencies. Subsequent installs will need to resolve tarball + endpoints with the configured registry, likely resulting in a longer install + time. + `, + flatten, + }), + only: new Definition('only', { + default: null, + type: [null, 'prod', 'production'], + deprecated: ` + Use \`--omit=dev\` to omit dev dependencies from the install. + `, + description: ` + When set to \`prod\` or \`production\`, this is an alias for + \`--omit=dev\`. + `, + flatten (key, obj, flatOptions) { + definitions.omit.flatten('omit', obj, flatOptions) + }, + }), + optional: new Definition('optional', { + default: null, + type: [null, Boolean], + deprecated: ` + Use \`--omit=optional\` to exclude optional dependencies, or + \`--include=optional\` to include them. + + Default value does install optional deps unless otherwise omitted. + `, + description: ` + Alias for --include=optional or --omit=optional + `, + flatten (key, obj, flatOptions) { + definitions.omit.flatten('omit', obj, flatOptions) + }, + }), + os: new Definition('os', { + default: null, + type: [null, String], + description: ` + Override OS of native modules to install. + Acceptable values are same as \`os\` field of package.json, + which comes from \`process.platform\`. + `, + flatten, + }), + otp: new Definition('otp', { + default: null, + type: [null, String], + description: ` + This is a one-time password from a two-factor authenticator. It's needed + when publishing or changing package permissions with \`npm access\`. + + If not set, and a registry response fails with a challenge for a one-time + password, npm will prompt on the command line for one. + `, + flatten (key, obj, flatOptions) { + flatten(key, obj, flatOptions) + if (obj.otp) { + obj['auth-type'] = 'legacy' + flatten('auth-type', obj, flatOptions) + } + }, + }), + package: new Definition('package', { + default: [], + hint: '', + type: [String, Array], + description: ` + The package or packages to install for [\`npm exec\`](/commands/npm-exec) + `, + flatten, + }), + 'package-lock': new Definition('package-lock', { + default: true, + type: Boolean, + description: ` + If set to false, then ignore \`package-lock.json\` files when installing. + This will also prevent _writing_ \`package-lock.json\` if \`save\` is + true. + `, + flatten: (key, obj, flatOptions) => { + flatten(key, obj, flatOptions) + if (flatOptions.packageLockOnly) { + flatOptions.packageLock = true + } + }, + }), + 'package-lock-only': new Definition('package-lock-only', { + default: false, + type: Boolean, + description: ` + If set to true, the current operation will only use the \`package-lock.json\`, + ignoring \`node_modules\`. + + For \`update\` this means only the \`package-lock.json\` will be updated, + instead of checking \`node_modules\` and downloading dependencies. + + For \`list\` this means the output will be based on the tree described by the + \`package-lock.json\`, rather than the contents of \`node_modules\`. + `, + flatten: (key, obj, flatOptions) => { + flatten(key, obj, flatOptions) + if (flatOptions.packageLockOnly) { + flatOptions.packageLock = true + } + }, + }), + 'pack-destination': new Definition('pack-destination', { + default: '.', + type: String, + description: ` + Directory in which \`npm pack\` will save tarballs. + `, + flatten, + }), + parseable: new Definition('parseable', { + default: false, + type: Boolean, + short: 'p', + description: ` + Output parseable results from commands that write to standard output. For + \`npm search\`, this will be tab-separated table format. + `, + flatten, + }), + 'prefer-dedupe': new Definition('prefer-dedupe', { + default: false, + type: Boolean, + description: ` + Prefer to deduplicate packages if possible, rather than + choosing a newer version of a dependency. + `, + flatten, + }), + 'prefer-offline': new Definition('prefer-offline', { + default: false, + type: Boolean, + description: ` + If true, staleness checks for cached data will be bypassed, but missing + data will be requested from the server. To force full offline mode, use + \`--offline\`. + `, + flatten, + }), + 'prefer-online': new Definition('prefer-online', { + default: false, + type: Boolean, + description: ` + If true, staleness checks for cached data will be forced, making the CLI + look for updates immediately even for fresh package data. + `, + flatten, + }), + // `prefix` has its default defined outside of this module + prefix: new Definition('prefix', { + type: path, + short: 'C', + default: '', + defaultDescription: ` + In global mode, the folder where the node executable is installed. + Otherwise, the nearest parent folder containing either a package.json + file or a node_modules folder. + `, + description: ` + The location to install global items. If set on the command line, then + it forces non-global commands to run in the specified folder. + `, + }), + preid: new Definition('preid', { + default: '', + hint: 'prerelease-id', + type: String, + description: ` + The "prerelease identifier" to use as a prefix for the "prerelease" part + of a semver. Like the \`rc\` in \`1.2.0-rc.8\`. + `, + flatten, + }), + production: new Definition('production', { + default: null, + type: [null, Boolean], + deprecated: 'Use `--omit=dev` instead.', + description: 'Alias for `--omit=dev`', + flatten (key, obj, flatOptions) { + definitions.omit.flatten('omit', obj, flatOptions) + }, + }), + progress: new Definition('progress', { + default: !ciInfo.isCI, + defaultDescription: ` + \`true\` unless running in a known CI system + `, + type: Boolean, + description: ` + When set to \`true\`, npm will display a progress bar during time + intensive operations, if \`process.stderr\` is a TTY. + + Set to \`false\` to suppress the progress bar. + `, + flatten (key, obj, flatOptions) { + flatOptions.progress = !obj.progress ? false + : !!process.stderr.isTTY && process.env.TERM !== 'dumb' + }, + }), + provenance: new Definition('provenance', { + default: false, + type: Boolean, + exclusive: ['provenance-file'], + description: ` + When publishing from a supported cloud CI/CD system, the package will be + publicly linked to where it was built and published from. + `, + flatten, + }), + 'provenance-file': new Definition('provenance-file', { + default: null, + type: path, + hint: '', + exclusive: ['provenance'], + description: ` + When publishing, the provenance bundle at the given path will be used. + `, + flatten, + }), + proxy: new Definition('proxy', { + default: null, + type: [null, false, url], // allow proxy to be disabled explicitly + description: ` + A proxy to use for outgoing http requests. If the \`HTTP_PROXY\` or + \`http_proxy\` environment variables are set, proxy settings will be + honored by the underlying \`request\` library. + `, + flatten, + }), + 'read-only': new Definition('read-only', { + default: false, + type: Boolean, + description: ` + This is used to mark a token as unable to publish when configuring + limited access tokens with the \`npm token create\` command. + `, + flatten, + }), + 'rebuild-bundle': new Definition('rebuild-bundle', { + default: true, + type: Boolean, + description: ` + Rebuild bundled dependencies after installation. + `, + flatten, + }), + registry: new Definition('registry', { + default: 'https://registry.npmjs.org/', + type: url, + description: ` + The base URL of the npm registry. + `, + flatten, + }), + 'replace-registry-host': new Definition('replace-registry-host', { + default: 'npmjs', + hint: ' | hostname', + type: ['npmjs', 'never', 'always', String], + description: ` + Defines behavior for replacing the registry host in a lockfile with the + configured registry. + + The default behavior is to replace package dist URLs from the default + registry (https://registry.npmjs.org) to the configured registry. If set to + "never", then use the registry value. If set to "always", then replace the + registry host with the configured host every time. + + You may also specify a bare hostname (e.g., "registry.npmjs.org"). + `, + flatten, + }), + save: new Definition('save', { + default: true, + defaultDescription: `\`true\` unless when using \`npm update\` where it + defaults to \`false\``, + usage: '-S|--save|--no-save|--save-prod|--save-dev|--save-optional|--save-peer|--save-bundle', + type: Boolean, + short: 'S', + description: ` + Save installed packages to a \`package.json\` file as dependencies. + + When used with the \`npm rm\` command, removes the dependency from + \`package.json\`. + + Will also prevent writing to \`package-lock.json\` if set to \`false\`. + `, + flatten, + }), + 'save-bundle': new Definition('save-bundle', { + default: false, + type: Boolean, + short: 'B', + description: ` + If a package would be saved at install time by the use of \`--save\`, + \`--save-dev\`, or \`--save-optional\`, then also put it in the + \`bundleDependencies\` list. + + Ignored if \`--save-peer\` is set, since peerDependencies cannot be bundled. + `, + flatten (key, obj, flatOptions) { + // XXX update arborist to just ignore it if resulting saveType is peer + // otherwise this won't have the expected effect: + // + // npm config set save-peer true + // npm i foo --save-bundle --save-prod <-- should bundle + flatOptions.saveBundle = obj['save-bundle'] && !obj['save-peer'] + }, + }), + 'save-dev': new Definition('save-dev', { + default: false, + type: Boolean, + short: 'D', + description: ` + Save installed packages to a package.json file as \`devDependencies\`. + `, + flatten (key, obj, flatOptions) { + if (!obj[key]) { + if (flatOptions.saveType === 'dev') { + delete flatOptions.saveType + } + return } - return - } - flatOptions.saveType = 'dev' - }, -}) - -define('save-exact', { - default: false, - type: Boolean, - short: 'E', - description: ` - Dependencies saved to package.json will be configured with an exact - version rather than using npm's default semver range operator. - `, - flatten (key, obj, flatOptions) { - // just call the save-prefix flattener, it reads from obj['save-exact'] - definitions['save-prefix'].flatten('save-prefix', obj, flatOptions) - }, -}) - -define('save-optional', { - default: false, - type: Boolean, - short: 'O', - description: ` - Save installed packages to a package.json file as - \`optionalDependencies\`. - `, - flatten (key, obj, flatOptions) { - if (!obj[key]) { - if (flatOptions.saveType === 'optional') { - delete flatOptions.saveType - } else if (flatOptions.saveType === 'peerOptional') { - flatOptions.saveType = 'peer' + flatOptions.saveType = 'dev' + }, + }), + 'save-exact': new Definition('save-exact', { + default: false, + type: Boolean, + short: 'E', + description: ` + Dependencies saved to package.json will be configured with an exact + version rather than using npm's default semver range operator. + `, + flatten (key, obj, flatOptions) { + // just call the save-prefix flattener, it reads from obj['save-exact'] + definitions['save-prefix'].flatten('save-prefix', obj, flatOptions) + }, + }), + 'save-optional': new Definition('save-optional', { + default: false, + type: Boolean, + short: 'O', + description: ` + Save installed packages to a package.json file as + \`optionalDependencies\`. + `, + flatten (key, obj, flatOptions) { + if (!obj[key]) { + if (flatOptions.saveType === 'optional') { + delete flatOptions.saveType + } else if (flatOptions.saveType === 'peerOptional') { + flatOptions.saveType = 'peer' + } + return } - return - } - if (flatOptions.saveType === 'peerOptional') { - return - } + if (flatOptions.saveType === 'peerOptional') { + return + } - if (flatOptions.saveType === 'peer') { - flatOptions.saveType = 'peerOptional' - } else { - flatOptions.saveType = 'optional' - } - }, -}) - -define('save-peer', { - default: false, - type: Boolean, - description: ` - Save installed packages to a package.json file as \`peerDependencies\` - `, - flatten (key, obj, flatOptions) { - if (!obj[key]) { if (flatOptions.saveType === 'peer') { - delete flatOptions.saveType - } else if (flatOptions.saveType === 'peerOptional') { + flatOptions.saveType = 'peerOptional' + } else { flatOptions.saveType = 'optional' } - return - } - - if (flatOptions.saveType === 'peerOptional') { - return - } - - if (flatOptions.saveType === 'optional') { - flatOptions.saveType = 'peerOptional' - } else { - flatOptions.saveType = 'peer' - } - }, -}) - -define('save-prefix', { - default: '^', - type: String, - description: ` - Configure how versions of packages installed to a package.json file via - \`--save\` or \`--save-dev\` get prefixed. - - For example if a package has version \`1.2.3\`, by default its version is - set to \`^1.2.3\` which allows minor upgrades for that package, but after - \`npm config set save-prefix='~'\` it would be set to \`~1.2.3\` which - only allows patch upgrades. - `, - flatten (key, obj, flatOptions) { - flatOptions.savePrefix = obj['save-exact'] ? '' : obj['save-prefix'] - obj['save-prefix'] = flatOptions.savePrefix - }, -}) - -define('save-prod', { - default: false, - type: Boolean, - short: 'P', - description: ` - Save installed packages into \`dependencies\` specifically. This is - useful if a package already exists in \`devDependencies\` or - \`optionalDependencies\`, but you want to move it to be a non-optional - production dependency. - - This is the default behavior if \`--save\` is true, and neither - \`--save-dev\` or \`--save-optional\` are true. - `, - flatten (key, obj, flatOptions) { - if (!obj[key]) { - if (flatOptions.saveType === 'prod') { - delete flatOptions.saveType + }, + }), + 'save-peer': new Definition('save-peer', { + default: false, + type: Boolean, + description: ` + Save installed packages to a package.json file as \`peerDependencies\` + `, + flatten (key, obj, flatOptions) { + if (!obj[key]) { + if (flatOptions.saveType === 'peer') { + delete flatOptions.saveType + } else if (flatOptions.saveType === 'peerOptional') { + flatOptions.saveType = 'optional' + } + return } - return - } - - flatOptions.saveType = 'prod' - }, -}) - -define('scope', { - default: '', - defaultDescription: ` - the scope of the current project, if any, or "" - `, - type: String, - hint: '<@scope>', - description: ` - Associate an operation with a scope for a scoped registry. - - Useful when logging in to or out of a private registry: - - \`\`\` - # log in, linking the scope to the custom registry - npm login --scope=@mycorp --registry=https://registry.mycorp.com - # log out, removing the link and the auth token - npm logout --scope=@mycorp - \`\`\` - - This will cause \`@mycorp\` to be mapped to the registry for future - installation of packages specified according to the pattern - \`@mycorp/package\`. - - This will also cause \`npm init\` to create a scoped package. - - \`\`\` - # accept all defaults, and create a package named "@foo/whatever", - # instead of just named "whatever" - npm init --scope=@foo --yes - \`\`\` - `, - flatten (key, obj, flatOptions) { - const value = obj[key] - const scope = value && !/^@/.test(value) ? `@${value}` : value - flatOptions.scope = scope - // projectScope is kept for compatibility with npm-registry-fetch - flatOptions.projectScope = scope - }, -}) - -define('script-shell', { - default: null, - defaultDescription: ` - '/bin/sh' on POSIX systems, 'cmd.exe' on Windows - `, - type: [null, String], - description: ` - The shell to use for scripts run with the \`npm exec\`, - \`npm run\` and \`npm init \` commands. - `, - flatten (key, obj, flatOptions) { - flatOptions.scriptShell = obj[key] || undefined - }, -}) - -define('searchexclude', { - default: '', - type: String, - description: ` - Space-separated options that limit the results from search. - `, - flatten (key, obj, flatOptions) { - flatOptions.search = flatOptions.search || { limit: 20 } - flatOptions.search.exclude = obj[key].toLowerCase() - }, -}) - -define('searchlimit', { - default: 20, - type: Number, - description: ` - Number of items to limit search results to. Will not apply at all to - legacy searches. - `, - flatten (key, obj, flatOptions) { - flatOptions.search = flatOptions.search || {} - flatOptions.search.limit = obj[key] - }, -}) - -define('searchopts', { - default: '', - type: String, - description: ` - Space-separated options that are always passed to search. - `, - flatten (key, obj, flatOptions) { - flatOptions.search = flatOptions.search || { limit: 20 } - flatOptions.search.opts = querystring.parse(obj[key]) - }, -}) - -define('searchstaleness', { - default: 15 * 60, - type: Number, - description: ` - The age of the cache, in seconds, before another registry request is made - if using legacy search endpoint. - `, - flatten (key, obj, flatOptions) { - flatOptions.search = flatOptions.search || { limit: 20 } - flatOptions.search.staleness = obj[key] - }, -}) - -define('shell', { - default: shell, - defaultDescription: ` - SHELL environment variable, or "bash" on Posix, or "cmd.exe" on Windows - `, - type: String, - description: ` - The shell to run for the \`npm explore\` command. - `, - flatten, -}) - -define('shrinkwrap', { - default: true, - type: Boolean, - deprecated: ` - Use the --package-lock setting instead. - `, - description: ` - Alias for --package-lock - `, - flatten (key, obj, flatOptions) { - obj['package-lock'] = obj.shrinkwrap - definitions['package-lock'].flatten('package-lock', obj, flatOptions) - }, -}) - -define('sign-git-commit', { - default: false, - type: Boolean, - description: ` - If set to true, then the \`npm version\` command will commit the new - package version using \`-S\` to add a signature. - - Note that git requires you to have set up GPG keys in your git configs - for this to work properly. - `, - flatten, -}) - -define('sign-git-tag', { - default: false, - type: Boolean, - description: ` - If set to true, then the \`npm version\` command will tag the version - using \`-s\` to add a signature. - - Note that git requires you to have set up GPG keys in your git configs - for this to work properly. - `, - flatten, -}) - -define('strict-peer-deps', { - default: false, - type: Boolean, - description: ` - If set to \`true\`, and \`--legacy-peer-deps\` is not set, then _any_ - conflicting \`peerDependencies\` will be treated as an install failure, - even if npm could reasonably guess the appropriate resolution based on - non-peer dependency relationships. - - By default, conflicting \`peerDependencies\` deep in the dependency graph - will be resolved using the nearest non-peer dependency specification, - even if doing so will result in some packages receiving a peer dependency - outside the range set in their package's \`peerDependencies\` object. - - When such an override is performed, a warning is printed, explaining the - conflict and the packages involved. If \`--strict-peer-deps\` is set, - then this warning is treated as a failure. - `, - flatten, -}) - -define('strict-ssl', { - default: true, - type: Boolean, - description: ` - Whether or not to do SSL key validation when making requests to the - registry via https. - - See also the \`ca\` config. - `, - flatten (key, obj, flatOptions) { - flatOptions.strictSSL = obj[key] - }, -}) - -define('tag', { - default: 'latest', - type: String, - description: ` - If you ask npm to install a package and don't tell it a specific version, - then it will install the specified tag. - - Also the tag that is added to the package@version specified by the \`npm - tag\` command, if no explicit tag is given. - - When used by the \`npm diff\` command, this is the tag used to fetch the - tarball that will be compared with the local files by default. - `, - flatten (key, obj, flatOptions) { - flatOptions.defaultTag = obj[key] - }, -}) - -define('tag-version-prefix', { - default: 'v', - type: String, - description: ` - If set, alters the prefix used when tagging a new version when performing - a version increment using \`npm version\`. To remove the prefix - altogether, set it to the empty string: \`""\`. - - Because other tools may rely on the convention that npm version tags look - like \`v1.0.0\`, _only use this property if it is absolutely necessary_. - In particular, use care when overriding this setting for public packages. - `, - flatten, -}) - -define('timing', { - default: false, - type: Boolean, - description: ` - If true, writes timing information to a process specific json file in - the cache or \`logs-dir\`. The file name ends with \`-timing.json\`. - - You can quickly view it with this [json](https://npm.im/json) command - line: \`cat ~/.npm/_logs/*-timing.json | npm exec -- json -g\`. - - Timing information will also be reported in the terminal. To suppress this - while still writing the timing file, use \`--silent\`. - `, -}) - -define('umask', { - default: 0, - type: Umask, - description: ` - The "umask" value to use when setting the file creation mode on files and - folders. - - Folders and executables are given a mode which is \`0o777\` masked - against this value. Other files are given a mode which is \`0o666\` - masked against this value. - - Note that the underlying system will _also_ apply its own umask value to - files and folders that are created, and npm does not circumvent this, but - rather adds the \`--umask\` config to it. - - Thus, the effective default umask value on most POSIX systems is 0o22, - meaning that folders and executables are created with a mode of 0o755 and - other files are created with a mode of 0o644. - `, - flatten, -}) - -define('unicode', { - default: unicode, - defaultDescription: ` - false on windows, true on mac/unix systems with a unicode locale, as - defined by the \`LC_ALL\`, \`LC_CTYPE\`, or \`LANG\` environment variables. - `, - type: Boolean, - description: ` - When set to true, npm uses unicode characters in the tree output. When - false, it uses ascii characters instead of unicode glyphs. - `, - flatten, -}) - -define('update-notifier', { - default: true, - type: Boolean, - description: ` - Set to false to suppress the update notification when using an older - version of npm than the latest. - `, -}) - -define('usage', { - default: false, - type: Boolean, - short: ['?', 'H', 'h'], - description: ` - Show short usage output about the command specified. - `, -}) - -define('user-agent', { - default: 'npm/{npm-version} ' + - 'node/{node-version} ' + - '{platform} ' + - '{arch} ' + - 'workspaces/{workspaces} ' + - '{ci}', - type: String, - description: ` - Sets the User-Agent request header. The following fields are replaced - with their actual counterparts: - - * \`{npm-version}\` - The npm version in use - * \`{node-version}\` - The Node.js version in use - * \`{platform}\` - The value of \`process.platform\` - * \`{arch}\` - The value of \`process.arch\` - * \`{workspaces}\` - Set to \`true\` if the \`workspaces\` or \`workspace\` - options are set. - * \`{ci}\` - The value of the \`ci-name\` config, if set, prefixed with - \`ci/\`, or an empty string if \`ci-name\` is empty. - `, - flatten (key, obj, flatOptions) { - const value = obj[key] - const ciName = ciInfo.name?.toLowerCase().split(' ').join('-') || null - let inWorkspaces = false - if (obj.workspaces || obj.workspace && obj.workspace.length) { - inWorkspaces = true - } - flatOptions.userAgent = - value.replace(/\{node-version\}/gi, process.version) - .replace(/\{npm-version\}/gi, obj['npm-version']) - .replace(/\{platform\}/gi, process.platform) - .replace(/\{arch\}/gi, process.arch) - .replace(/\{workspaces\}/gi, inWorkspaces) - .replace(/\{ci\}/gi, ciName ? `ci/${ciName}` : '') - .trim() - - // We can't clobber the original or else subsequent flattening will fail - // (i.e. when we change the underlying config values) - // obj[key] = flatOptions.userAgent - - // user-agent is a unique kind of config item that gets set from a template - // and ends up translated. Because of this, the normal "should we set this - // to process.env also doesn't work - process.env.npm_config_user_agent = flatOptions.userAgent - }, -}) - -define('userconfig', { - default: '~/.npmrc', - type: path, - description: ` - The location of user-level configuration settings. - - This may be overridden by the \`npm_config_userconfig\` environment - variable or the \`--userconfig\` command line option, but may _not_ - be overridden by settings in the \`globalconfig\` file. - `, -}) - -define('version', { - default: false, - type: Boolean, - short: 'v', - description: ` - If true, output the npm version and exit successfully. - - Only relevant when specified explicitly on the command line. - `, -}) - -define('versions', { - default: false, - type: Boolean, - description: ` - If true, output the npm version as well as node's \`process.versions\` - map and the version in the current working directory's \`package.json\` - file if one exists, and exit successfully. + if (flatOptions.saveType === 'peerOptional') { + return + } - Only relevant when specified explicitly on the command line. - `, -}) + if (flatOptions.saveType === 'optional') { + flatOptions.saveType = 'peerOptional' + } else { + flatOptions.saveType = 'peer' + } + }, + }), + 'save-prefix': new Definition('save-prefix', { + default: '^', + type: String, + description: ` + Configure how versions of packages installed to a package.json file via + \`--save\` or \`--save-dev\` get prefixed. + + For example if a package has version \`1.2.3\`, by default its version is + set to \`^1.2.3\` which allows minor upgrades for that package, but after + \`npm config set save-prefix='~'\` it would be set to \`~1.2.3\` which + only allows patch upgrades. + `, + flatten (key, obj, flatOptions) { + flatOptions.savePrefix = obj['save-exact'] ? '' : obj['save-prefix'] + obj['save-prefix'] = flatOptions.savePrefix + }, + }), + 'save-prod': new Definition('save-prod', { + default: false, + type: Boolean, + short: 'P', + description: ` + Save installed packages into \`dependencies\` specifically. This is + useful if a package already exists in \`devDependencies\` or + \`optionalDependencies\`, but you want to move it to be a non-optional + production dependency. + + This is the default behavior if \`--save\` is true, and neither + \`--save-dev\` or \`--save-optional\` are true. + `, + flatten (key, obj, flatOptions) { + if (!obj[key]) { + if (flatOptions.saveType === 'prod') { + delete flatOptions.saveType + } + return + } -define('viewer', { - default: isWindows ? 'browser' : 'man', - defaultDescription: ` - "man" on Posix, "browser" on Windows - `, - type: String, - description: ` - The program to use to view help content. + flatOptions.saveType = 'prod' + }, + }), + 'sbom-format': new Definition('sbom-format', { + default: null, + type: [ + 'cyclonedx', + 'spdx', + ], + description: ` + SBOM format to use when generating SBOMs. + `, + flatten, + }), + 'sbom-type': new Definition('sbom-type', { + default: 'library', + type: [ + 'library', + 'application', + 'framework', + ], + description: ` + The type of package described by the generated SBOM. For SPDX, this is the + value for the \`primaryPackagePurpose\` field. For CycloneDX, this is the + value for the \`type\` field. + `, + flatten, + }), + scope: new Definition('scope', { + default: '', + defaultDescription: ` + the scope of the current project, if any, or "" + `, + type: String, + hint: '<@scope>', + description: ` + Associate an operation with a scope for a scoped registry. + + Useful when logging in to or out of a private registry: + + \`\`\` + # log in, linking the scope to the custom registry + npm login --scope=@mycorp --registry=https://registry.mycorp.com + + # log out, removing the link and the auth token + npm logout --scope=@mycorp + \`\`\` + + This will cause \`@mycorp\` to be mapped to the registry for future + installation of packages specified according to the pattern + \`@mycorp/package\`. + + This will also cause \`npm init\` to create a scoped package. + + \`\`\` + # accept all defaults, and create a package named "@foo/whatever", + # instead of just named "whatever" + npm init --scope=@foo --yes + \`\`\` + `, + flatten (key, obj, flatOptions) { + const value = obj[key] + const scope = value && !/^@/.test(value) ? `@${value}` : value + flatOptions.scope = scope + // projectScope is kept for compatibility with npm-registry-fetch + flatOptions.projectScope = scope + }, + }), + 'script-shell': new Definition('script-shell', { + default: null, + defaultDescription: ` + '/bin/sh' on POSIX systems, 'cmd.exe' on Windows + `, + type: [null, String], + description: ` + The shell to use for scripts run with the \`npm exec\`, + \`npm run\` and \`npm init \` commands. + `, + flatten (key, obj, flatOptions) { + flatOptions.scriptShell = obj[key] || undefined + }, + }), + searchexclude: new Definition('searchexclude', { + default: '', + type: String, + description: ` + Space-separated options that limit the results from search. + `, + flatten (key, obj, flatOptions) { + flatOptions.search = flatOptions.search || { limit: 20 } + flatOptions.search.exclude = obj[key].toLowerCase() + }, + }), + searchlimit: new Definition('searchlimit', { + default: 20, + type: Number, + description: ` + Number of items to limit search results to. Will not apply at all to + legacy searches. + `, + flatten (key, obj, flatOptions) { + flatOptions.search = flatOptions.search || {} + flatOptions.search.limit = obj[key] + }, + }), + searchopts: new Definition('searchopts', { + default: '', + type: String, + description: ` + Space-separated options that are always passed to search. + `, + flatten (key, obj, flatOptions) { + flatOptions.search = flatOptions.search || { limit: 20 } + flatOptions.search.opts = querystring.parse(obj[key]) + }, + }), + searchstaleness: new Definition('searchstaleness', { + default: 15 * 60, + type: Number, + description: ` + The age of the cache, in seconds, before another registry request is made + if using legacy search endpoint. + `, + flatten (key, obj, flatOptions) { + flatOptions.search = flatOptions.search || { limit: 20 } + flatOptions.search.staleness = obj[key] + }, + }), + shell: new Definition('shell', { + default: shell, + defaultDescription: ` + SHELL environment variable, or "bash" on Posix, or "cmd.exe" on Windows + `, + type: String, + description: ` + The shell to run for the \`npm explore\` command. + `, + flatten, + }), + shrinkwrap: new Definition('shrinkwrap', { + default: true, + type: Boolean, + deprecated: ` + Use the --package-lock setting instead. + `, + description: ` + Alias for --package-lock + `, + flatten (key, obj, flatOptions) { + obj['package-lock'] = obj.shrinkwrap + definitions['package-lock'].flatten('package-lock', obj, flatOptions) + }, + }), + 'sign-git-commit': new Definition('sign-git-commit', { + default: false, + type: Boolean, + description: ` + If set to true, then the \`npm version\` command will commit the new + package version using \`-S\` to add a signature. + + Note that git requires you to have set up GPG keys in your git configs + for this to work properly. + `, + flatten, + }), + 'sign-git-tag': new Definition('sign-git-tag', { + default: false, + type: Boolean, + description: ` + If set to true, then the \`npm version\` command will tag the version + using \`-s\` to add a signature. + + Note that git requires you to have set up GPG keys in your git configs + for this to work properly. + `, + flatten, + }), + 'strict-peer-deps': new Definition('strict-peer-deps', { + default: false, + type: Boolean, + description: ` + If set to \`true\`, and \`--legacy-peer-deps\` is not set, then _any_ + conflicting \`peerDependencies\` will be treated as an install failure, + even if npm could reasonably guess the appropriate resolution based on + non-peer dependency relationships. + + By default, conflicting \`peerDependencies\` deep in the dependency graph + will be resolved using the nearest non-peer dependency specification, + even if doing so will result in some packages receiving a peer dependency + outside the range set in their package's \`peerDependencies\` object. + + When such an override is performed, a warning is printed, explaining the + conflict and the packages involved. If \`--strict-peer-deps\` is set, + then this warning is treated as a failure. + `, + flatten, + }), + 'strict-ssl': new Definition('strict-ssl', { + default: true, + type: Boolean, + description: ` + Whether or not to do SSL key validation when making requests to the + registry via https. + + See also the \`ca\` config. + `, + flatten (key, obj, flatOptions) { + flatOptions.strictSSL = obj[key] + }, + }), + tag: new Definition('tag', { + default: 'latest', + type: String, + description: ` + If you ask npm to install a package and don't tell it a specific version, + then it will install the specified tag. + + Also the tag that is added to the package@version specified by the \`npm + tag\` command, if no explicit tag is given. + + When used by the \`npm diff\` command, this is the tag used to fetch the + tarball that will be compared with the local files by default. + `, + flatten (key, obj, flatOptions) { + flatOptions.defaultTag = obj[key] + }, + }), + 'tag-version-prefix': new Definition('tag-version-prefix', { + default: 'v', + type: String, + description: ` + If set, alters the prefix used when tagging a new version when performing + a version increment using \`npm version\`. To remove the prefix + altogether, set it to the empty string: \`""\`. + + Because other tools may rely on the convention that npm version tags look + like \`v1.0.0\`, _only use this property if it is absolutely necessary_. + In particular, use care when overriding this setting for public packages. + `, + flatten, + }), + timing: new Definition('timing', { + default: false, + type: Boolean, + description: ` + If true, writes timing information to a process specific json file in + the cache or \`logs-dir\`. The file name ends with \`-timing.json\`. + + You can quickly view it with this [json](https://npm.im/json) command + line: \`cat ~/.npm/_logs/*-timing.json | npm exec -- json -g\`. + + Timing information will also be reported in the terminal. To suppress this + while still writing the timing file, use \`--silent\`. + `, + }), + umask: new Definition('umask', { + default: 0, + type: Umask, + description: ` + The "umask" value to use when setting the file creation mode on files and + folders. + + Folders and executables are given a mode which is \`0o777\` masked + against this value. Other files are given a mode which is \`0o666\` + masked against this value. + + Note that the underlying system will _also_ apply its own umask value to + files and folders that are created, and npm does not circumvent this, but + rather adds the \`--umask\` config to it. + + Thus, the effective default umask value on most POSIX systems is 0o22, + meaning that folders and executables are created with a mode of 0o755 and + other files are created with a mode of 0o644. + `, + flatten, + }), + unicode: new Definition('unicode', { + default: unicode, + defaultDescription: ` + false on windows, true on mac/unix systems with a unicode locale, as + defined by the \`LC_ALL\`, \`LC_CTYPE\`, or \`LANG\` environment variables. + `, + type: Boolean, + description: ` + When set to true, npm uses unicode characters in the tree output. When + false, it uses ascii characters instead of unicode glyphs. + `, + flatten, + }), + 'update-notifier': new Definition('update-notifier', { + default: true, + type: Boolean, + description: ` + Set to false to suppress the update notification when using an older + version of npm than the latest. + `, + }), + usage: new Definition('usage', { + default: false, + type: Boolean, + short: ['?', 'H', 'h'], + description: ` + Show short usage output about the command specified. + `, + }), + 'user-agent': new Definition('user-agent', { + default: 'npm/{npm-version} ' + + 'node/{node-version} ' + + '{platform} ' + + '{arch} ' + + 'workspaces/{workspaces} ' + + '{ci}', + type: String, + description: ` + Sets the User-Agent request header. The following fields are replaced + with their actual counterparts: + + * \`{npm-version}\` - The npm version in use + * \`{node-version}\` - The Node.js version in use + * \`{platform}\` - The value of \`process.platform\` + * \`{arch}\` - The value of \`process.arch\` + * \`{workspaces}\` - Set to \`true\` if the \`workspaces\` or \`workspace\` + options are set. + * \`{ci}\` - The value of the \`ci-name\` config, if set, prefixed with + \`ci/\`, or an empty string if \`ci-name\` is empty. + `, + flatten (key, obj, flatOptions) { + const value = obj[key] + const ciName = ciInfo.name?.toLowerCase().split(' ').join('-') || null + let inWorkspaces = false + if (obj.workspaces || obj.workspace && obj.workspace.length) { + inWorkspaces = true + } + flatOptions.userAgent = + value.replace(/\{node-version\}/gi, process.version) + .replace(/\{npm-version\}/gi, obj['npm-version']) + .replace(/\{platform\}/gi, process.platform) + .replace(/\{arch\}/gi, process.arch) + .replace(/\{workspaces\}/gi, inWorkspaces) + .replace(/\{ci\}/gi, ciName ? `ci/${ciName}` : '') + .trim() + + // We can't clobber the original or else subsequent flattening will fail + // (i.e. when we change the underlying config values) + // obj[key] = flatOptions.userAgent + + // user-agent is a unique kind of config item that gets set from a template + // and ends up translated. Because of this, the normal "should we set this + // to process.env also doesn't work + process.env.npm_config_user_agent = flatOptions.userAgent + }, + }), + userconfig: new Definition('userconfig', { + default: '~/.npmrc', + type: path, + description: ` + The location of user-level configuration settings. + + This may be overridden by the \`npm_config_userconfig\` environment + variable or the \`--userconfig\` command line option, but may _not_ + be overridden by settings in the \`globalconfig\` file. + `, + }), + version: new Definition('version', { + default: false, + type: Boolean, + short: 'v', + description: ` + If true, output the npm version and exit successfully. + + Only relevant when specified explicitly on the command line. + `, + }), + versions: new Definition('versions', { + default: false, + type: Boolean, + description: ` + If true, output the npm version as well as node's \`process.versions\` + map and the version in the current working directory's \`package.json\` + file if one exists, and exit successfully. + + Only relevant when specified explicitly on the command line. + `, + }), + viewer: new Definition('viewer', { + default: isWindows ? 'browser' : 'man', + defaultDescription: ` + "man" on Posix, "browser" on Windows + `, + type: String, + description: ` + The program to use to view help content. + + Set to \`"browser"\` to view html help content in the default web browser. + `, + }), + which: new Definition('which', { + default: null, + hint: '', + type: [null, Number], + description: ` + If there are multiple funding sources, which 1-indexed source URL to open. + `, + }), + workspace: new Definition('workspace', { + default: [], + type: [String, Array], + hint: '', + short: 'w', + envExport: false, + description: ` + Enable running a command in the context of the configured workspaces of the + current project while filtering by running only the workspaces defined by + this configuration option. + + Valid values for the \`workspace\` config are either: + + * Workspace names + * Path to a workspace directory + * Path to a parent workspace directory (will result in selecting all + workspaces within that folder) + + When set for the \`npm init\` command, this may be set to the folder of + a workspace which does not yet exist, to create the folder and set it + up as a brand new workspace within the project. + `, + flatten: (key, obj, flatOptions) => { + definitions['user-agent'].flatten('user-agent', obj, flatOptions) + }, + }), + workspaces: new Definition('workspaces', { + default: null, + type: [null, Boolean], + short: 'ws', + envExport: false, + description: ` + Set to true to run the command in the context of **all** configured + workspaces. + + Explicitly setting this to false will cause commands like \`install\` to + ignore workspaces altogether. + When not set explicitly: + + - Commands that operate on the \`node_modules\` tree (install, update, + etc.) will link workspaces into the \`node_modules\` folder. + - Commands that do other things (test, exec, publish, etc.) will operate + on the root project, _unless_ one or more workspaces are specified in + the \`workspace\` config. + `, + flatten: (key, obj, flatOptions) => { + definitions['user-agent'].flatten('user-agent', obj, flatOptions) + + // TODO: this is a derived value, and should be reworked when we have a + // pattern for derived value + + // workspacesEnabled is true whether workspaces is null or true + // commands contextually work with workspaces or not regardless of + // configuration, so we need an option specifically to disable workspaces + flatOptions.workspacesEnabled = obj[key] !== false + }, + }), + 'workspaces-update': new Definition('workspaces-update', { + default: true, + type: Boolean, + description: ` + If set to true, the npm cli will run an update after operations that may + possibly change the workspaces installed to the \`node_modules\` folder. + `, + flatten, + }), + yes: new Definition('yes', { + default: null, + type: [null, Boolean], + short: 'y', + description: ` + Automatically answer "yes" to any prompts that npm might print on + the command line. + `, + }), +} - Set to \`"browser"\` to view html help content in the default web browser. - `, -}) - -define('which', { - default: null, - hint: '', - type: [null, Number], - description: ` - If there are multiple funding sources, which 1-indexed source URL to open. - `, -}) - -define('workspace', { - default: [], - type: [String, Array], - hint: '', - short: 'w', - envExport: false, - description: ` - Enable running a command in the context of the configured workspaces of the - current project while filtering by running only the workspaces defined by - this configuration option. - - Valid values for the \`workspace\` config are either: - - * Workspace names - * Path to a workspace directory - * Path to a parent workspace directory (will result in selecting all - workspaces within that folder) - - When set for the \`npm init\` command, this may be set to the folder of - a workspace which does not yet exist, to create the folder and set it - up as a brand new workspace within the project. - `, - flatten: (key, obj, flatOptions) => { - definitions['user-agent'].flatten('user-agent', obj, flatOptions) - }, -}) - -define('workspaces', { - default: null, - type: [null, Boolean], - short: 'ws', - envExport: false, - description: ` - Set to true to run the command in the context of **all** configured - workspaces. - - Explicitly setting this to false will cause commands like \`install\` to - ignore workspaces altogether. - When not set explicitly: - - - Commands that operate on the \`node_modules\` tree (install, update, - etc.) will link workspaces into the \`node_modules\` folder. - - Commands that do other things (test, exec, publish, etc.) will operate - on the root project, _unless_ one or more workspaces are specified in - the \`workspace\` config. - `, - flatten: (key, obj, flatOptions) => { - definitions['user-agent'].flatten('user-agent', obj, flatOptions) - - // TODO: this is a derived value, and should be reworked when we have a - // pattern for derived value - - // workspacesEnabled is true whether workspaces is null or true - // commands contextually work with workspaces or not regardless of - // configuration, so we need an option specifically to disable workspaces - flatOptions.workspacesEnabled = obj[key] !== false - }, -}) - -define('workspaces-update', { - default: true, - type: Boolean, - description: ` - If set to true, the npm cli will run an update after operations that may - possibly change the workspaces installed to the \`node_modules\` folder. - `, - flatten, -}) - -define('yes', { - default: null, - type: [null, Boolean], - short: 'y', - description: ` - Automatically answer "yes" to any prompts that npm might print on - the command line. - `, -}) +module.exports = definitions diff --git a/deps/npm/node_modules/@npmcli/config/lib/index.js b/deps/npm/node_modules/@npmcli/config/lib/index.js index b09ecc478f64fd..1ff19c128696ca 100644 --- a/deps/npm/node_modules/@npmcli/config/lib/index.js +++ b/deps/npm/node_modules/@npmcli/config/lib/index.js @@ -2,12 +2,10 @@ const { walkUp } = require('walk-up-path') const ini = require('ini') const nopt = require('nopt') -const mapWorkspaces = require('@npmcli/map-workspaces') -const rpj = require('read-package-json-fast') const log = require('proc-log') -const { resolve, dirname, join } = require('path') -const { homedir } = require('os') +const { resolve, dirname, join } = require('node:path') +const { homedir } = require('node:os') const { readFile, writeFile, @@ -28,35 +26,12 @@ const dirExists = (...p) => stat(resolve(...p)) const hasOwnProperty = (obj, key) => Object.prototype.hasOwnProperty.call(obj, key) -// define a custom getter, but turn into a normal prop -// if we set it. otherwise it can't be set on child objects -const settableGetter = (obj, key, get) => { - Object.defineProperty(obj, key, { - get, - set (value) { - Object.defineProperty(obj, key, { - value, - configurable: true, - writable: true, - enumerable: true, - }) - }, - configurable: true, - enumerable: true, - }) -} - const typeDefs = require('./type-defs.js') const nerfDart = require('./nerf-dart.js') const envReplace = require('./env-replace.js') const parseField = require('./parse-field.js') -const typeDescription = require('./type-description.js') const setEnvs = require('./set-envs.js') -const { - ErrInvalidAuth, -} = require('./errors.js') - // types that can be saved back to const confFileTypes = new Set([ 'global', @@ -329,7 +304,21 @@ class Config { // default the globalconfig file to that location, instead of the default // global prefix. It's weird that `npm get globalconfig --prefix=/foo` // returns `/foo/etc/npmrc`, but better to not change it at this point. - settableGetter(data, 'globalconfig', () => resolve(this.#get('prefix'), 'etc/npmrc')) + // define a custom getter, but turn into a normal prop + // if we set it. otherwise it can't be set on child objects + Object.defineProperty(data, 'globalconfig', { + get: () => resolve(this.#get('prefix'), 'etc/npmrc'), + set (value) { + Object.defineProperty(data, 'globalconfig', { + value, + configurable: true, + writable: true, + enumerable: true, + }) + }, + configurable: true, + enumerable: true, + }) } loadHome () { @@ -444,6 +433,7 @@ class Config { } if (authProblems.length) { + const { ErrInvalidAuth } = require('./errors.js') throw new ErrInvalidAuth(authProblems) } @@ -512,6 +502,7 @@ class Config { } invalidHandler (k, val, type, source, where) { + const typeDescription = require('./type-description.js') log.warn( 'invalid config', k + '=' + JSON.stringify(val), @@ -696,6 +687,7 @@ class Config { } if (this.localPrefix && hasPackageJson) { + const rpj = require('read-package-json-fast') // if we already set localPrefix but this dir has a package.json // then we need to see if `p` is a workspace root by reading its package.json // however, if reading it fails then we should just move on @@ -704,6 +696,7 @@ class Config { continue } + const mapWorkspaces = require('@npmcli/map-workspaces') const workspaces = await mapWorkspaces({ cwd: p, pkg }) for (const w of workspaces.values()) { if (w === this.localPrefix) { diff --git a/deps/npm/node_modules/@npmcli/config/lib/nerf-dart.js b/deps/npm/node_modules/@npmcli/config/lib/nerf-dart.js index d6ae4aa2aa7e2a..030d92a82270d2 100644 --- a/deps/npm/node_modules/@npmcli/config/lib/nerf-dart.js +++ b/deps/npm/node_modules/@npmcli/config/lib/nerf-dart.js @@ -1,4 +1,4 @@ -const { URL } = require('url') +const { URL } = require('node:url') /** * Maps a URL to an identifier. diff --git a/deps/npm/node_modules/@npmcli/config/lib/parse-field.js b/deps/npm/node_modules/@npmcli/config/lib/parse-field.js index 099b0b4eaf1a83..9ac3d21cae8b71 100644 --- a/deps/npm/node_modules/@npmcli/config/lib/parse-field.js +++ b/deps/npm/node_modules/@npmcli/config/lib/parse-field.js @@ -1,7 +1,7 @@ // Parse a field, coercing it to the best type available. const typeDefs = require('./type-defs.js') const envReplace = require('./env-replace.js') -const { resolve } = require('path') +const { resolve } = require('node:path') const { parse: umaskParse } = require('./umask.js') diff --git a/deps/npm/node_modules/@npmcli/config/lib/type-defs.js b/deps/npm/node_modules/@npmcli/config/lib/type-defs.js index 20a827c3d164e9..3c9dfe19ded113 100644 --- a/deps/npm/node_modules/@npmcli/config/lib/type-defs.js +++ b/deps/npm/node_modules/@npmcli/config/lib/type-defs.js @@ -1,10 +1,12 @@ const nopt = require('nopt') -const { Umask, validate: validateUmask } = require('./umask.js') +const { validate: validateUmask } = require('./umask.js') -const semver = require('semver') +class Umask {} +class Semver {} +const semverValid = require('semver/functions/valid') const validateSemver = (data, k, val) => { - const valid = semver.valid(val) + const valid = semverValid(val) if (!valid) { return false } @@ -23,7 +25,7 @@ const validatePath = (data, k, val) => { module.exports = { ...nopt.typeDefs, semver: { - type: semver, + type: Semver, validate: validateSemver, description: 'full valid SemVer string', }, diff --git a/deps/npm/node_modules/@npmcli/config/lib/umask.js b/deps/npm/node_modules/@npmcli/config/lib/umask.js index 4d9ebbdc965451..2ddc5ca7822326 100644 --- a/deps/npm/node_modules/@npmcli/config/lib/umask.js +++ b/deps/npm/node_modules/@npmcli/config/lib/umask.js @@ -1,4 +1,3 @@ -class Umask {} const parse = val => { // this is run via nopt and parse field where everything is // converted to a string first, ignoring coverage for now @@ -33,4 +32,4 @@ const validate = (data, k, val) => { } } -module.exports = { Umask, parse, validate } +module.exports = { parse, validate } diff --git a/deps/npm/node_modules/@npmcli/config/package.json b/deps/npm/node_modules/@npmcli/config/package.json index b5c73e1b13a9b1..797c32f7ee4a63 100644 --- a/deps/npm/node_modules/@npmcli/config/package.json +++ b/deps/npm/node_modules/@npmcli/config/package.json @@ -1,6 +1,6 @@ { "name": "@npmcli/config", - "version": "8.2.1", + "version": "8.2.2", "files": [ "bin/", "lib/" diff --git a/deps/npm/node_modules/@npmcli/git/lib/is.js b/deps/npm/node_modules/@npmcli/git/lib/is.js index e2542f21577272..f5a0e8754f10dc 100644 --- a/deps/npm/node_modules/@npmcli/git/lib/is.js +++ b/deps/npm/node_modules/@npmcli/git/lib/is.js @@ -1,6 +1,4 @@ // not an airtight indicator, but a good gut-check to even bother trying -const { promisify } = require('util') -const fs = require('fs') -const stat = promisify(fs.stat) +const { stat } = require('fs/promises') module.exports = ({ cwd = process.cwd() } = {}) => stat(cwd + '/.git').then(() => true, () => false) diff --git a/deps/npm/node_modules/@npmcli/git/package.json b/deps/npm/node_modules/@npmcli/git/package.json index 485c1f43dddb90..7493ec7fb0effb 100644 --- a/deps/npm/node_modules/@npmcli/git/package.json +++ b/deps/npm/node_modules/@npmcli/git/package.json @@ -1,6 +1,6 @@ { "name": "@npmcli/git", - "version": "5.0.4", + "version": "5.0.5", "main": "lib/index.js", "files": [ "bin/", diff --git a/deps/npm/node_modules/@npmcli/map-workspaces/lib/index.js b/deps/npm/node_modules/@npmcli/map-workspaces/lib/index.js index 501dad870a2b75..b20bf5de5d631e 100644 --- a/deps/npm/node_modules/@npmcli/map-workspaces/lib/index.js +++ b/deps/npm/node_modules/@npmcli/map-workspaces/lib/index.js @@ -5,23 +5,49 @@ const { minimatch } = require('minimatch') const rpj = require('read-package-json-fast') const { glob } = require('glob') -function appendNegatedPatterns (patterns) { - const results = [] - for (let pattern of patterns) { +function appendNegatedPatterns (allPatterns) { + const patterns = [] + const negatedPatterns = [] + for (let pattern of allPatterns) { const excl = pattern.match(/^!+/) if (excl) { pattern = pattern.slice(excl[0].length) } - // strip off any / from the start of the pattern. /foo => foo - pattern = pattern.replace(/^\/+/, '') + // strip off any / or ./ from the start of the pattern. /foo => foo + pattern = pattern.replace(/^\.?\/+/, '') // an odd number of ! means a negated pattern. !!foo ==> foo const negate = excl && excl[0].length % 2 === 1 - results.push({ pattern, negate }) + if (negate) { + negatedPatterns.push(pattern) + } else { + // remove negated patterns that appeared before this pattern to avoid + // ignoring paths that were matched afterwards + // e.g: ['packages/**', '!packages/b/**', 'packages/b/a'] + // in the above list, the last pattern overrides the negated pattern + // right before it. In effect, the above list would become: + // ['packages/**', 'packages/b/a'] + // The order matters here which is why we must do it inside the loop + // as opposed to doing it all together at the end. + for (let i = 0; i < negatedPatterns.length; ++i) { + const negatedPattern = negatedPatterns[i] + if (minimatch(pattern, negatedPattern)) { + negatedPatterns.splice(i, 1) + } + } + patterns.push(pattern) + } } - return results + // use the negated patterns to eagerly remove all the patterns that + // can be removed to avoid unnecessary crawling + for (const negated of negatedPatterns) { + for (const pattern of minimatch.match(patterns, negated)) { + patterns.splice(patterns.indexOf(pattern), 1) + } + } + return { patterns, negatedPatterns } } function getPatterns (workspaces) { @@ -77,11 +103,11 @@ async function mapWorkspaces (opts = {}) { } const { workspaces = [] } = opts.pkg - const patterns = getPatterns(workspaces) + const { patterns, negatedPatterns } = getPatterns(workspaces) const results = new Map() const seen = new Map() - if (!patterns.length) { + if (!patterns.length && !negatedPatterns.length) { return results } @@ -89,52 +115,54 @@ async function mapWorkspaces (opts = {}) { ...opts, ignore: [ ...opts.ignore || [], - ...['**/node_modules/**'], + '**/node_modules/**', + // just ignore the negated patterns to avoid unnecessary crawling + ...negatedPatterns, ], }) const getPackagePathname = pkgPathmame(opts) - for (const item of patterns) { - let matches = await glob(getGlobPattern(item.pattern), getGlobOpts()) - // preserves glob@8 behavior - matches = matches.sort((a, b) => a.localeCompare(b, 'en')) - - for (const match of matches) { - let pkg - const packageJsonPathname = getPackagePathname(match, 'package.json') - const packagePathname = path.dirname(packageJsonPathname) - - try { - pkg = await rpj(packageJsonPathname) - } catch (err) { - if (err.code === 'ENOENT') { - continue - } else { - throw err - } - } + let matches = await glob(patterns.map((p) => getGlobPattern(p)), getGlobOpts()) + // preserves glob@8 behavior + matches = matches.sort((a, b) => a.localeCompare(b, 'en')) + + // we must preserve the order of results according to the given list of + // workspace patterns + const orderedMatches = [] + for (const pattern of patterns) { + orderedMatches.push(...matches.filter((m) => { + return minimatch(m, pattern, { partial: true, windowsPathsNoEscape: true }) + })) + } - const name = getPackageName(pkg, packagePathname) + for (const match of orderedMatches) { + let pkg + const packageJsonPathname = getPackagePathname(match, 'package.json') - let seenPackagePathnames = seen.get(name) - if (!seenPackagePathnames) { - seenPackagePathnames = new Set() - seen.set(name, seenPackagePathnames) - } - if (item.negate) { - seenPackagePathnames.delete(packagePathname) + try { + pkg = await rpj(packageJsonPathname) + } catch (err) { + if (err.code === 'ENOENT') { + continue } else { - seenPackagePathnames.add(packagePathname) + throw err } } + + const packagePathname = path.dirname(packageJsonPathname) + const name = getPackageName(pkg, packagePathname) + + let seenPackagePathnames = seen.get(name) + if (!seenPackagePathnames) { + seenPackagePathnames = new Set() + seen.set(name, seenPackagePathnames) + } + seenPackagePathnames.add(packagePathname) } const errorMessageArray = ['must not have multiple workspaces with the same name'] for (const [packageName, seenPackagePathnames] of seen) { - if (seenPackagePathnames.size === 0) { - continue - } if (seenPackagePathnames.size > 1) { addDuplicateErrorMessages(errorMessageArray, packageName, seenPackagePathnames) } else { @@ -177,30 +205,25 @@ mapWorkspaces.virtual = function (opts = {}) { const { workspaces = [] } = packages[''] || {} // uses a pathname-keyed map in order to negate the exact items const results = new Map() - const patterns = getPatterns(workspaces) - if (!patterns.length) { + const { patterns, negatedPatterns } = getPatterns(workspaces) + if (!patterns.length && !negatedPatterns.length) { return results } - patterns.push({ pattern: '**/node_modules/**', negate: true }) - - const getPackagePathname = pkgPathmame(opts) + negatedPatterns.push('**/node_modules/**') - for (const packageKey of Object.keys(packages)) { - if (packageKey === '') { - continue + const packageKeys = Object.keys(packages) + for (const pattern of negatedPatterns) { + for (const packageKey of minimatch.match(packageKeys, pattern)) { + packageKeys.splice(packageKeys.indexOf(packageKey), 1) } + } - for (const item of patterns) { - if (minimatch(packageKey, item.pattern)) { - const packagePathname = getPackagePathname(packageKey) - const name = getPackageName(packages[packageKey], packagePathname) - - if (item.negate) { - results.delete(packagePathname) - } else { - results.set(packagePathname, name) - } - } + const getPackagePathname = pkgPathmame(opts) + for (const pattern of patterns) { + for (const packageKey of minimatch.match(packageKeys, pattern)) { + const packagePathname = getPackagePathname(packageKey) + const name = getPackageName(packages[packageKey], packagePathname) + results.set(packagePathname, name) } } diff --git a/deps/npm/node_modules/@npmcli/map-workspaces/package.json b/deps/npm/node_modules/@npmcli/map-workspaces/package.json index 64cb7f9d3f1177..e6292b06bd2b43 100644 --- a/deps/npm/node_modules/@npmcli/map-workspaces/package.json +++ b/deps/npm/node_modules/@npmcli/map-workspaces/package.json @@ -1,6 +1,6 @@ { "name": "@npmcli/map-workspaces", - "version": "3.0.4", + "version": "3.0.6", "main": "lib/index.js", "files": [ "bin/", @@ -25,7 +25,7 @@ "author": "GitHub Inc.", "license": "ISC", "scripts": { - "lint": "eslint \"**/*.js\"", + "lint": "eslint \"**/*.{js,cjs,ts,mjs,jsx,tsx}\"", "pretest": "npm run lint", "test": "tap", "snap": "tap", @@ -43,7 +43,7 @@ }, "devDependencies": { "@npmcli/eslint-config": "^4.0.0", - "@npmcli/template-oss": "4.14.1", + "@npmcli/template-oss": "4.21.3", "tap": "^16.0.1" }, "dependencies": { @@ -54,7 +54,7 @@ }, "templateOSS": { "//@npmcli/template-oss": "This file is partially managed by @npmcli/template-oss. Edits may be overwritten.", - "version": "4.14.1", + "version": "4.21.3", "publish": "true" } } diff --git a/deps/npm/node_modules/@npmcli/package-json/lib/normalize.js b/deps/npm/node_modules/@npmcli/package-json/lib/normalize.js index 204d4d8a8e7dd6..350b3f3d7cb8f0 100644 --- a/deps/npm/node_modules/@npmcli/package-json/lib/normalize.js +++ b/deps/npm/node_modules/@npmcli/package-json/lib/normalize.js @@ -1,12 +1,30 @@ -const semver = require('semver') +const valid = require('semver/functions/valid') +const clean = require('semver/functions/clean') const fs = require('fs/promises') -const { glob } = require('glob') -const legacyFixer = require('normalize-package-data/lib/fixer.js') -const legacyMakeWarning = require('normalize-package-data/lib/make_warning.js') const path = require('path') const log = require('proc-log') -const git = require('@npmcli/git') -const hostedGitInfo = require('hosted-git-info') + +/** + * @type {import('hosted-git-info')} + */ +let _hostedGitInfo +function lazyHostedGitInfo () { + if (!_hostedGitInfo) { + _hostedGitInfo = require('hosted-git-info') + } + return _hostedGitInfo +} + +/** + * @type {import('glob').glob} + */ +let _glob +function lazyLoadGlob () { + if (!_glob) { + _glob = require('glob').glob + } + return _glob +} // used to be npm-normalize-package-bin function normalizePackageBin (pkg, changes) { @@ -130,10 +148,10 @@ const normalize = async (pkg, { strict, steps, root, changes, allowLegacyCase }) if (!data.version) { data.version = '' } else { - if (!semver.valid(data.version, loose)) { + if (!valid(data.version, loose)) { throw new Error(`Invalid version: "${data.version}"`) } - const version = semver.clean(data.version, loose) + const version = clean(data.version, loose) if (version !== data.version) { changes?.push(`"version" was cleaned and set to "${version}"`) data.version = version @@ -208,7 +226,7 @@ const normalize = async (pkg, { strict, steps, root, changes, allowLegacyCase }) // add "install" attribute if any "*.gyp" files exist if (steps.includes('gypfile')) { if (!scripts.install && !scripts.preinstall && data.gypfile !== false) { - const files = await glob('*.gyp', { cwd: pkg.path }) + const files = await lazyLoadGlob()('*.gyp', { cwd: pkg.path }) if (files.length) { scripts.install = 'node-gyp rebuild' data.scripts = scripts @@ -275,7 +293,11 @@ const normalize = async (pkg, { strict, steps, root, changes, allowLegacyCase }) // populate "readme" attribute if (steps.includes('readme') && !data.readme) { const mdre = /\.m?a?r?k?d?o?w?n?$/i - const files = await glob('{README,README.*}', { cwd: pkg.path, nocase: true, mark: true }) + const files = await lazyLoadGlob()('{README,README.*}', { + cwd: pkg.path, + nocase: true, + mark: true, + }) let readmeFile for (const file of files) { // don't accept directories. @@ -306,7 +328,7 @@ const normalize = async (pkg, { strict, steps, root, changes, allowLegacyCase }) if (steps.includes('mans') && !data.man && data.directories?.man) { const manDir = data.directories.man const cwd = path.resolve(pkg.path, manDir) - const files = await glob('**/*.[0-9]', { cwd }) + const files = await lazyLoadGlob()('**/*.[0-9]', { cwd }) data.man = files.map(man => path.relative(pkg.path, path.join(cwd, man)).split(path.sep).join('/') ) @@ -319,7 +341,7 @@ const normalize = async (pkg, { strict, steps, root, changes, allowLegacyCase }) // expand "directories.bin" if (steps.includes('binDir') && data.directories?.bin && !data.bin) { const binsDir = path.resolve(pkg.path, path.join('.', path.join('/', data.directories.bin))) - const bins = await glob('**', { cwd: binsDir }) + const bins = await lazyLoadGlob()('**', { cwd: binsDir }) data.bin = bins.reduce((acc, binFile) => { if (binFile && !binFile.startsWith('.')) { const binName = path.basename(binFile) @@ -333,6 +355,7 @@ const normalize = async (pkg, { strict, steps, root, changes, allowLegacyCase }) // populate "gitHead" attribute if (steps.includes('gitHead') && !data.gitHead) { + const git = require('@npmcli/git') const gitRoot = await git.find({ cwd: pkg.path, root }) let head if (gitRoot) { @@ -446,7 +469,7 @@ const normalize = async (pkg, { strict, steps, root, changes, allowLegacyCase }) } } if (data.repository.url) { - const hosted = hostedGitInfo.fromUrl(data.repository.url) + const hosted = lazyHostedGitInfo().fromUrl(data.repository.url) let r if (hosted) { if (hosted.getDefaultRepresentation() === 'shortcut') { @@ -506,7 +529,7 @@ const normalize = async (pkg, { strict, steps, root, changes, allowLegacyCase }) changes?.push(`Removed invalid "${deps}.${d}"`) delete data[deps][d] } - const hosted = hostedGitInfo.fromUrl(data[deps][d])?.toString() + const hosted = lazyHostedGitInfo().fromUrl(data[deps][d])?.toString() if (hosted && hosted !== data[deps][d]) { changes?.push(`Normalized git reference to "${deps}.${d}"`) data[deps][d] = hosted.toString() @@ -518,6 +541,8 @@ const normalize = async (pkg, { strict, steps, root, changes, allowLegacyCase }) } if (steps.includes('normalizeData')) { + const legacyFixer = require('normalize-package-data/lib/fixer.js') + const legacyMakeWarning = require('normalize-package-data/lib/make_warning.js') legacyFixer.warn = function () { changes?.push(legacyMakeWarning.apply(null, arguments)) } diff --git a/deps/npm/node_modules/@npmcli/package-json/package.json b/deps/npm/node_modules/@npmcli/package-json/package.json index ab320e8695ca3d..4f7a29d2e4c597 100644 --- a/deps/npm/node_modules/@npmcli/package-json/package.json +++ b/deps/npm/node_modules/@npmcli/package-json/package.json @@ -1,6 +1,6 @@ { "name": "@npmcli/package-json", - "version": "5.0.0", + "version": "5.0.2", "description": "Programmatic API to update package.json", "main": "lib/index.js", "files": [ @@ -10,7 +10,7 @@ "scripts": { "snap": "tap", "test": "tap", - "lint": "eslint \"**/*.js\"", + "lint": "eslint \"**/*.{js,cjs,ts,mjs,jsx,tsx}\"", "lintfix": "npm run lint -- --fix", "posttest": "npm run lint", "postsnap": "npm run lintfix --", @@ -25,8 +25,8 @@ "license": "ISC", "devDependencies": { "@npmcli/eslint-config": "^4.0.0", - "@npmcli/template-oss": "4.18.0", - "read-package-json": "^6.0.4", + "@npmcli/template-oss": "4.21.3", + "read-package-json": "^7.0.0", "read-package-json-fast": "^3.0.2", "tap": "^16.0.1" }, @@ -48,14 +48,8 @@ }, "templateOSS": { "//@npmcli/template-oss": "This file is partially managed by @npmcli/template-oss. Edits may be overwritten.", - "version": "4.18.0", - "publish": "true", - "ciVersions": [ - "16.14.0", - "16.x", - "18.0.0", - "18.x" - ] + "version": "4.21.3", + "publish": "true" }, "tap": { "nyc-arg": [ diff --git a/deps/npm/node_modules/@sigstore/bundle/dist/build.js b/deps/npm/node_modules/@sigstore/bundle/dist/build.js index 6990f5451a2d33..65c71b100ad58f 100644 --- a/deps/npm/node_modules/@sigstore/bundle/dist/build.js +++ b/deps/npm/node_modules/@sigstore/bundle/dist/build.js @@ -21,7 +21,9 @@ const bundle_1 = require("./bundle"); // Message signature bundle - $case: 'messageSignature' function toMessageSignatureBundle(options) { return { - mediaType: bundle_1.BUNDLE_V02_MEDIA_TYPE, + mediaType: options.singleCertificate + ? bundle_1.BUNDLE_V03_MEDIA_TYPE + : bundle_1.BUNDLE_V02_MEDIA_TYPE, content: { $case: 'messageSignature', messageSignature: { @@ -39,7 +41,9 @@ exports.toMessageSignatureBundle = toMessageSignatureBundle; // DSSE envelope bundle - $case: 'dsseEnvelope' function toDSSEBundle(options) { return { - mediaType: bundle_1.BUNDLE_V02_MEDIA_TYPE, + mediaType: options.singleCertificate + ? bundle_1.BUNDLE_V03_MEDIA_TYPE + : bundle_1.BUNDLE_V02_MEDIA_TYPE, content: { $case: 'dsseEnvelope', dsseEnvelope: toEnvelope(options), @@ -71,12 +75,20 @@ function toVerificationMaterial(options) { } function toKeyContent(options) { if (options.certificate) { - return { - $case: 'x509CertificateChain', - x509CertificateChain: { - certificates: [{ rawBytes: options.certificate }], - }, - }; + if (options.singleCertificate) { + return { + $case: 'certificate', + certificate: { rawBytes: options.certificate }, + }; + } + else { + return { + $case: 'x509CertificateChain', + x509CertificateChain: { + certificates: [{ rawBytes: options.certificate }], + }, + }; + } } else { return { diff --git a/deps/npm/node_modules/@sigstore/bundle/dist/bundle.js b/deps/npm/node_modules/@sigstore/bundle/dist/bundle.js index 60574b309c09cb..dbd35df2ca2bb3 100644 --- a/deps/npm/node_modules/@sigstore/bundle/dist/bundle.js +++ b/deps/npm/node_modules/@sigstore/bundle/dist/bundle.js @@ -1,9 +1,10 @@ "use strict"; Object.defineProperty(exports, "__esModule", { value: true }); -exports.isBundleWithDsseEnvelope = exports.isBundleWithMessageSignature = exports.isBundleWithPublicKey = exports.isBundleWithCertificateChain = exports.BUNDLE_V03_MEDIA_TYPE = exports.BUNDLE_V02_MEDIA_TYPE = exports.BUNDLE_V01_MEDIA_TYPE = void 0; +exports.isBundleWithDsseEnvelope = exports.isBundleWithMessageSignature = exports.isBundleWithPublicKey = exports.isBundleWithCertificateChain = exports.BUNDLE_V03_MEDIA_TYPE = exports.BUNDLE_V03_LEGACY_MEDIA_TYPE = exports.BUNDLE_V02_MEDIA_TYPE = exports.BUNDLE_V01_MEDIA_TYPE = void 0; exports.BUNDLE_V01_MEDIA_TYPE = 'application/vnd.dev.sigstore.bundle+json;version=0.1'; exports.BUNDLE_V02_MEDIA_TYPE = 'application/vnd.dev.sigstore.bundle+json;version=0.2'; -exports.BUNDLE_V03_MEDIA_TYPE = 'application/vnd.dev.sigstore.bundle+json;version=0.3'; +exports.BUNDLE_V03_LEGACY_MEDIA_TYPE = 'application/vnd.dev.sigstore.bundle+json;version=0.3'; +exports.BUNDLE_V03_MEDIA_TYPE = 'application/vnd.dev.sigstore.bundle.v0.3+json'; // Type guards for bundle variants. function isBundleWithCertificateChain(b) { return b.verificationMaterial.content.$case === 'x509CertificateChain'; diff --git a/deps/npm/node_modules/@sigstore/bundle/dist/index.js b/deps/npm/node_modules/@sigstore/bundle/dist/index.js index f2b50994e9b1f5..1b012acad4d85b 100644 --- a/deps/npm/node_modules/@sigstore/bundle/dist/index.js +++ b/deps/npm/node_modules/@sigstore/bundle/dist/index.js @@ -1,6 +1,6 @@ "use strict"; Object.defineProperty(exports, "__esModule", { value: true }); -exports.isBundleV01 = exports.assertBundleV02 = exports.assertBundleV01 = exports.assertBundleLatest = exports.assertBundle = exports.envelopeToJSON = exports.envelopeFromJSON = exports.bundleToJSON = exports.bundleFromJSON = exports.ValidationError = exports.isBundleWithPublicKey = exports.isBundleWithMessageSignature = exports.isBundleWithDsseEnvelope = exports.isBundleWithCertificateChain = exports.BUNDLE_V03_MEDIA_TYPE = exports.BUNDLE_V02_MEDIA_TYPE = exports.BUNDLE_V01_MEDIA_TYPE = exports.toMessageSignatureBundle = exports.toDSSEBundle = void 0; +exports.isBundleV01 = exports.assertBundleV02 = exports.assertBundleV01 = exports.assertBundleLatest = exports.assertBundle = exports.envelopeToJSON = exports.envelopeFromJSON = exports.bundleToJSON = exports.bundleFromJSON = exports.ValidationError = exports.isBundleWithPublicKey = exports.isBundleWithMessageSignature = exports.isBundleWithDsseEnvelope = exports.isBundleWithCertificateChain = exports.BUNDLE_V03_MEDIA_TYPE = exports.BUNDLE_V03_LEGACY_MEDIA_TYPE = exports.BUNDLE_V02_MEDIA_TYPE = exports.BUNDLE_V01_MEDIA_TYPE = exports.toMessageSignatureBundle = exports.toDSSEBundle = void 0; /* Copyright 2023 The Sigstore Authors. @@ -22,6 +22,7 @@ Object.defineProperty(exports, "toMessageSignatureBundle", { enumerable: true, g var bundle_1 = require("./bundle"); Object.defineProperty(exports, "BUNDLE_V01_MEDIA_TYPE", { enumerable: true, get: function () { return bundle_1.BUNDLE_V01_MEDIA_TYPE; } }); Object.defineProperty(exports, "BUNDLE_V02_MEDIA_TYPE", { enumerable: true, get: function () { return bundle_1.BUNDLE_V02_MEDIA_TYPE; } }); +Object.defineProperty(exports, "BUNDLE_V03_LEGACY_MEDIA_TYPE", { enumerable: true, get: function () { return bundle_1.BUNDLE_V03_LEGACY_MEDIA_TYPE; } }); Object.defineProperty(exports, "BUNDLE_V03_MEDIA_TYPE", { enumerable: true, get: function () { return bundle_1.BUNDLE_V03_MEDIA_TYPE; } }); Object.defineProperty(exports, "isBundleWithCertificateChain", { enumerable: true, get: function () { return bundle_1.isBundleWithCertificateChain; } }); Object.defineProperty(exports, "isBundleWithDsseEnvelope", { enumerable: true, get: function () { return bundle_1.isBundleWithDsseEnvelope; } }); diff --git a/deps/npm/node_modules/@sigstore/bundle/dist/validate.js b/deps/npm/node_modules/@sigstore/bundle/dist/validate.js index 6a59ecc230f4ac..67079cd1f680a9 100644 --- a/deps/npm/node_modules/@sigstore/bundle/dist/validate.js +++ b/deps/npm/node_modules/@sigstore/bundle/dist/validate.js @@ -74,7 +74,8 @@ function validateBundleBase(b) { const invalidValues = []; // Media type validation if (b.mediaType === undefined || - !b.mediaType.startsWith('application/vnd.dev.sigstore.bundle+json;version=')) { + (!b.mediaType.match(/^application\/vnd\.dev\.sigstore\.bundle\+json;version=\d\.\d/) && + !b.mediaType.match(/^application\/vnd\.dev\.sigstore\.bundle\.v\d\.\d\+json/))) { invalidValues.push('mediaType'); } // Content-related validation diff --git a/deps/npm/node_modules/@sigstore/bundle/package.json b/deps/npm/node_modules/@sigstore/bundle/package.json index 2cac185f73895b..ab96ba7e8b2ca6 100644 --- a/deps/npm/node_modules/@sigstore/bundle/package.json +++ b/deps/npm/node_modules/@sigstore/bundle/package.json @@ -1,6 +1,6 @@ { "name": "@sigstore/bundle", - "version": "2.2.0", + "version": "2.3.1", "description": "Sigstore bundle type", "main": "dist/index.js", "types": "dist/index.d.ts", @@ -27,7 +27,7 @@ "provenance": true }, "dependencies": { - "@sigstore/protobuf-specs": "^0.3.0" + "@sigstore/protobuf-specs": "^0.3.1" }, "engines": { "node": "^16.14.0 || >=18.0.0" diff --git a/deps/npm/node_modules/@sigstore/core/dist/crypto.js b/deps/npm/node_modules/@sigstore/core/dist/crypto.js index c5d899d003e1d4..dbe65b165d3574 100644 --- a/deps/npm/node_modules/@sigstore/core/dist/crypto.js +++ b/deps/npm/node_modules/@sigstore/core/dist/crypto.js @@ -21,12 +21,12 @@ limitations under the License. */ const crypto_1 = __importDefault(require("crypto")); const SHA256_ALGORITHM = 'sha256'; -function createPublicKey(key) { +function createPublicKey(key, type = 'spki') { if (typeof key === 'string') { return crypto_1.default.createPublicKey(key); } else { - return crypto_1.default.createPublicKey({ key, format: 'der', type: 'spki' }); + return crypto_1.default.createPublicKey({ key, format: 'der', type: type }); } } exports.createPublicKey = createPublicKey; diff --git a/deps/npm/node_modules/@sigstore/core/package.json b/deps/npm/node_modules/@sigstore/core/package.json index b9f901652ef0fd..621ff1715bcd1c 100644 --- a/deps/npm/node_modules/@sigstore/core/package.json +++ b/deps/npm/node_modules/@sigstore/core/package.json @@ -1,6 +1,6 @@ { "name": "@sigstore/core", - "version": "1.0.0", + "version": "1.1.0", "description": "Base library for Sigstore", "main": "dist/index.js", "types": "dist/index.d.ts", diff --git a/deps/npm/node_modules/@sigstore/protobuf-specs/dist/__generated__/sigstore_trustroot.js b/deps/npm/node_modules/@sigstore/protobuf-specs/dist/__generated__/sigstore_trustroot.js index 05e566767cdb24..9984f7879c73f1 100644 --- a/deps/npm/node_modules/@sigstore/protobuf-specs/dist/__generated__/sigstore_trustroot.js +++ b/deps/npm/node_modules/@sigstore/protobuf-specs/dist/__generated__/sigstore_trustroot.js @@ -1,6 +1,6 @@ "use strict"; Object.defineProperty(exports, "__esModule", { value: true }); -exports.TrustedRoot = exports.CertificateAuthority = exports.TransparencyLogInstance = void 0; +exports.ClientTrustConfig = exports.SigningConfig = exports.TrustedRoot = exports.CertificateAuthority = exports.TransparencyLogInstance = void 0; /* eslint-disable */ const sigstore_common_1 = require("./sigstore_common"); function createBaseTransparencyLogInstance() { @@ -98,6 +98,58 @@ exports.TrustedRoot = { return obj; }, }; +function createBaseSigningConfig() { + return { caUrl: "", oidcUrl: "", tlogUrls: [], tsaUrls: [] }; +} +exports.SigningConfig = { + fromJSON(object) { + return { + caUrl: isSet(object.caUrl) ? String(object.caUrl) : "", + oidcUrl: isSet(object.oidcUrl) ? String(object.oidcUrl) : "", + tlogUrls: Array.isArray(object?.tlogUrls) ? object.tlogUrls.map((e) => String(e)) : [], + tsaUrls: Array.isArray(object?.tsaUrls) ? object.tsaUrls.map((e) => String(e)) : [], + }; + }, + toJSON(message) { + const obj = {}; + message.caUrl !== undefined && (obj.caUrl = message.caUrl); + message.oidcUrl !== undefined && (obj.oidcUrl = message.oidcUrl); + if (message.tlogUrls) { + obj.tlogUrls = message.tlogUrls.map((e) => e); + } + else { + obj.tlogUrls = []; + } + if (message.tsaUrls) { + obj.tsaUrls = message.tsaUrls.map((e) => e); + } + else { + obj.tsaUrls = []; + } + return obj; + }, +}; +function createBaseClientTrustConfig() { + return { mediaType: "", trustedRoot: undefined, signingConfig: undefined }; +} +exports.ClientTrustConfig = { + fromJSON(object) { + return { + mediaType: isSet(object.mediaType) ? String(object.mediaType) : "", + trustedRoot: isSet(object.trustedRoot) ? exports.TrustedRoot.fromJSON(object.trustedRoot) : undefined, + signingConfig: isSet(object.signingConfig) ? exports.SigningConfig.fromJSON(object.signingConfig) : undefined, + }; + }, + toJSON(message) { + const obj = {}; + message.mediaType !== undefined && (obj.mediaType = message.mediaType); + message.trustedRoot !== undefined && + (obj.trustedRoot = message.trustedRoot ? exports.TrustedRoot.toJSON(message.trustedRoot) : undefined); + message.signingConfig !== undefined && + (obj.signingConfig = message.signingConfig ? exports.SigningConfig.toJSON(message.signingConfig) : undefined); + return obj; + }, +}; function isSet(value) { return value !== null && value !== undefined; } diff --git a/deps/npm/node_modules/@sigstore/protobuf-specs/package.json b/deps/npm/node_modules/@sigstore/protobuf-specs/package.json index 047a67a7a2e208..4fefe51c761fd8 100644 --- a/deps/npm/node_modules/@sigstore/protobuf-specs/package.json +++ b/deps/npm/node_modules/@sigstore/protobuf-specs/package.json @@ -1,6 +1,6 @@ { "name": "@sigstore/protobuf-specs", - "version": "0.3.0", + "version": "0.3.1", "description": "code-signing for npm packages", "main": "dist/index.js", "types": "dist/index.d.ts", @@ -21,11 +21,11 @@ }, "homepage": "https://github.com/sigstore/protobuf-specs#readme", "devDependencies": { - "@tsconfig/node14": "^1.0.3", + "@tsconfig/node16": "^16.1.1", "@types/node": "^18.14.0", "typescript": "^4.9.5" }, "engines": { - "node": "^14.17.0 || ^16.13.0 || >=18.0.0" + "node": "^16.14.0 || >=18.0.0" } } diff --git a/deps/npm/node_modules/@sigstore/sign/dist/bundler/bundle.js b/deps/npm/node_modules/@sigstore/sign/dist/bundler/bundle.js index f01aac252b304a..7c2ca9164f0dfe 100644 --- a/deps/npm/node_modules/@sigstore/sign/dist/bundler/bundle.js +++ b/deps/npm/node_modules/@sigstore/sign/dist/bundler/bundle.js @@ -56,7 +56,7 @@ function toMessageSignatureBundle(artifact, signature) { } exports.toMessageSignatureBundle = toMessageSignatureBundle; // DSSE envelope bundle - $case: 'dsseEnvelope' -function toDSSEBundle(artifact, signature) { +function toDSSEBundle(artifact, signature, singleCertificate) { return sigstore.toDSSEBundle({ artifact: artifact.data, artifactType: artifact.type, @@ -65,6 +65,7 @@ function toDSSEBundle(artifact, signature) { ? util_1.pem.toDER(signature.key.certificate) : undefined, keyHint: signature.key.$case === 'publicKey' ? signature.key.hint : undefined, + singleCertificate, }); } exports.toDSSEBundle = toDSSEBundle; diff --git a/deps/npm/node_modules/@sigstore/sign/dist/bundler/dsse.js b/deps/npm/node_modules/@sigstore/sign/dist/bundler/dsse.js index 486d289aea38cb..621700df93842a 100644 --- a/deps/npm/node_modules/@sigstore/sign/dist/bundler/dsse.js +++ b/deps/npm/node_modules/@sigstore/sign/dist/bundler/dsse.js @@ -23,6 +23,7 @@ const bundle_1 = require("./bundle"); class DSSEBundleBuilder extends base_1.BaseBundleBuilder { constructor(options) { super(options); + this.singleCertificate = options.singleCertificate ?? false; } // DSSE requires the artifact to be pre-encoded with the payload type // before the signature is generated. @@ -32,7 +33,7 @@ class DSSEBundleBuilder extends base_1.BaseBundleBuilder { } // Packages the artifact and signature into a DSSE bundle async package(artifact, signature) { - return (0, bundle_1.toDSSEBundle)(artifactDefaults(artifact), signature); + return (0, bundle_1.toDSSEBundle)(artifactDefaults(artifact), signature, this.singleCertificate); } } exports.DSSEBundleBuilder = DSSEBundleBuilder; diff --git a/deps/npm/node_modules/@sigstore/sign/package.json b/deps/npm/node_modules/@sigstore/sign/package.json index 4302f6e07a2a82..09eea0a39e8771 100644 --- a/deps/npm/node_modules/@sigstore/sign/package.json +++ b/deps/npm/node_modules/@sigstore/sign/package.json @@ -1,6 +1,6 @@ { "name": "@sigstore/sign", - "version": "2.2.3", + "version": "2.3.0", "description": "Sigstore signing library", "main": "dist/index.js", "types": "dist/index.d.ts", @@ -27,14 +27,14 @@ }, "devDependencies": { "@sigstore/jest": "^0.0.0", - "@sigstore/mock": "^0.6.5", + "@sigstore/mock": "^0.7.0", "@sigstore/rekor-types": "^2.0.0", "@types/make-fetch-happen": "^10.0.4" }, "dependencies": { - "@sigstore/bundle": "^2.2.0", + "@sigstore/bundle": "^2.3.0", "@sigstore/core": "^1.0.0", - "@sigstore/protobuf-specs": "^0.3.0", + "@sigstore/protobuf-specs": "^0.3.1", "make-fetch-happen": "^13.0.0" }, "engines": { diff --git a/deps/npm/node_modules/@sigstore/verify/dist/tlog/intoto.js b/deps/npm/node_modules/@sigstore/verify/dist/tlog/intoto.js index e706887a95043a..74c7f50d763e1d 100644 --- a/deps/npm/node_modules/@sigstore/verify/dist/tlog/intoto.js +++ b/deps/npm/node_modules/@sigstore/verify/dist/tlog/intoto.js @@ -42,11 +42,12 @@ function verifyIntoto002TLogBody(tlogEntry, content) { // Signature is double-base64-encoded in the tlog entry const tlogSig = base64Decode(tlogEntry.spec.content.envelope.signatures[0].sig); // Ensure that the signature in the bundle's DSSE matches tlog entry - if (!content.compareSignature(Buffer.from(tlogSig, 'base64'))) + if (!content.compareSignature(Buffer.from(tlogSig, 'base64'))) { throw new error_1.VerificationError({ code: 'TLOG_BODY_ERROR', message: 'tlog entry signature mismatch', }); + } // Ensure the digest of the bundle's DSSE payload matches the digest in the // tlog entry const tlogHash = tlogEntry.spec.content.payloadHash?.value || ''; diff --git a/deps/npm/node_modules/@sigstore/verify/dist/trust/index.js b/deps/npm/node_modules/@sigstore/verify/dist/trust/index.js index 7991f351949a00..954de558415902 100644 --- a/deps/npm/node_modules/@sigstore/verify/dist/trust/index.js +++ b/deps/npm/node_modules/@sigstore/verify/dist/trust/index.js @@ -17,6 +17,7 @@ See the License for the specific language governing permissions and limitations under the License. */ const core_1 = require("@sigstore/core"); +const protobuf_specs_1 = require("@sigstore/protobuf-specs"); const error_1 = require("../error"); const BEGINNING_OF_TIME = new Date(0); const END_OF_TIME = new Date(8640000000000000); @@ -35,9 +36,17 @@ function toTrustMaterial(root, keys) { } exports.toTrustMaterial = toTrustMaterial; function createTLogAuthority(tlogInstance) { + const keyDetails = tlogInstance.publicKey.keyDetails; + const keyType = keyDetails === protobuf_specs_1.PublicKeyDetails.PKCS1_RSA_PKCS1V5 || + keyDetails === protobuf_specs_1.PublicKeyDetails.PKIX_RSA_PKCS1V5 || + keyDetails === protobuf_specs_1.PublicKeyDetails.PKIX_RSA_PKCS1V15_2048_SHA256 || + keyDetails === protobuf_specs_1.PublicKeyDetails.PKIX_RSA_PKCS1V15_3072_SHA256 || + keyDetails === protobuf_specs_1.PublicKeyDetails.PKIX_RSA_PKCS1V15_4096_SHA256 + ? 'pkcs1' + : 'spki'; return { logID: tlogInstance.logId.keyId, - publicKey: core_1.crypto.createPublicKey(tlogInstance.publicKey.rawBytes), + publicKey: core_1.crypto.createPublicKey(tlogInstance.publicKey.rawBytes, keyType), validFor: { start: tlogInstance.publicKey.validFor?.start || BEGINNING_OF_TIME, end: tlogInstance.publicKey.validFor?.end || END_OF_TIME, diff --git a/deps/npm/node_modules/@sigstore/verify/package.json b/deps/npm/node_modules/@sigstore/verify/package.json index dcfb587e084a6c..edd6566e10ece7 100644 --- a/deps/npm/node_modules/@sigstore/verify/package.json +++ b/deps/npm/node_modules/@sigstore/verify/package.json @@ -1,6 +1,6 @@ { "name": "@sigstore/verify", - "version": "1.1.0", + "version": "1.2.0", "description": "Verification of Sigstore signatures", "main": "dist/index.js", "types": "dist/index.d.ts", @@ -26,9 +26,9 @@ "provenance": true }, "dependencies": { - "@sigstore/protobuf-specs": "^0.3.0", - "@sigstore/bundle": "^2.2.0", - "@sigstore/core": "^1.0.0" + "@sigstore/protobuf-specs": "^0.3.1", + "@sigstore/bundle": "^2.3.1", + "@sigstore/core": "^1.1.0" }, "engines": { "node": "^16.14.0 || >=18.0.0" diff --git a/deps/npm/node_modules/builtins/index.js b/deps/npm/node_modules/builtins/index.js index b715278437cbcf..01d23876aae325 100644 --- a/deps/npm/node_modules/builtins/index.js +++ b/deps/npm/node_modules/builtins/index.js @@ -1,6 +1,6 @@ 'use strict' -const semver = require('semver') +const satisfies = require('semver/functions/satisfies') const permanentModules = [ 'assert', @@ -60,7 +60,7 @@ module.exports = ({ version = process.version, experimental = false } = {}) => { const builtins = [...permanentModules] for (const [name, semverRange] of Object.entries(versionLockedModules)) { - if (version === '*' || semver.satisfies(version, semverRange)) { + if (version === '*' || satisfies(version, semverRange)) { builtins.push(name) } } @@ -69,7 +69,7 @@ module.exports = ({ version = process.version, experimental = false } = {}) => { for (const [name, semverRange] of Object.entries(experimentalModules)) { if ( !builtins.includes(name) && - (version === '*' || semver.satisfies(version, semverRange)) + (version === '*' || satisfies(version, semverRange)) ) { builtins.push(name) } diff --git a/deps/npm/node_modules/builtins/package.json b/deps/npm/node_modules/builtins/package.json index 1c43660c7483fe..d37e02e0768180 100644 --- a/deps/npm/node_modules/builtins/package.json +++ b/deps/npm/node_modules/builtins/package.json @@ -1,20 +1,19 @@ { "name": "builtins", - "version": "5.0.1", + "version": "5.1.0", "description": "List of node.js builtin modules", "repository": "juliangruber/builtins", "license": "MIT", "main": "index.js", "files": [], "scripts": { - "test": "prettier-standard && standard && node-core-test" + "test": "standard --fix && node--test" }, "dependencies": { "semver": "^7.0.0" }, "devDependencies": { - "node-core-test": "^1.4.0", - "prettier-standard": "^15.0.1", - "standard": "^14.3.4" + "standard": "^17.0.0", + "test": "^3.0.0" } } diff --git a/deps/npm/node_modules/cidr-regex/package.json b/deps/npm/node_modules/cidr-regex/package.json index 4f743464075e3e..262da56e2ee676 100644 --- a/deps/npm/node_modules/cidr-regex/package.json +++ b/deps/npm/node_modules/cidr-regex/package.json @@ -1,6 +1,6 @@ { "name": "cidr-regex", - "version": "4.0.3", + "version": "4.0.5", "description": "Regular expression for matching IP addresses in CIDR notation", "author": "silverwind ", "contributors": [ @@ -22,11 +22,12 @@ "ip-regex": "^5.0.0" }, "devDependencies": { - "eslint": "8.37.0", - "eslint-config-silverwind": "65.1.3", - "tsd": "0.28.1", - "updates": "13.2.9", - "versions": "10.4.2", - "vitest": "0.29.8" + "eslint": "8.57.0", + "eslint-config-silverwind": "83.0.1", + "tsd": "0.31.0", + "updates": "16.0.0", + "versions": "12.0.1", + "vitest": "1.4.0", + "vitest-config-silverwind": "7.0.3" } } diff --git a/deps/npm/node_modules/hasown/package.json b/deps/npm/node_modules/hasown/package.json index 1b03e9d3018bde..8502e13dd5c835 100644 --- a/deps/npm/node_modules/hasown/package.json +++ b/deps/npm/node_modules/hasown/package.json @@ -1,6 +1,6 @@ { "name": "hasown", - "version": "2.0.1", + "version": "2.0.2", "description": "A robust, ES3 compatible, \"has own property\" predicate.", "main": "index.js", "exports": { @@ -18,6 +18,7 @@ "postlint": "npm run tsc", "pretest": "npm run lint", "tsc": "tsc -p .", + "posttsc": "attw -P", "tests-only": "nyc tape 'test/**/*.js'", "test": "npm run tests-only", "posttest": "aud --production", @@ -50,7 +51,9 @@ "function-bind": "^1.1.2" }, "devDependencies": { + "@arethetypeswrong/cli": "^0.15.1", "@ljharb/eslint-config": "^21.1.0", + "@ljharb/tsconfig": "^0.2.0", "@types/function-bind": "^1.1.10", "@types/mock-property": "^1.0.2", "@types/tape": "^5.6.4", @@ -63,7 +66,7 @@ "npmignore": "^0.3.1", "nyc": "^10.3.2", "safe-publish-latest": "^2.0.0", - "tape": "^5.7.4", + "tape": "^5.7.5", "typescript": "next" }, "engines": { diff --git a/deps/npm/node_modules/hasown/tsconfig.json b/deps/npm/node_modules/hasown/tsconfig.json index fdab34fe311577..0930c565850326 100644 --- a/deps/npm/node_modules/hasown/tsconfig.json +++ b/deps/npm/node_modules/hasown/tsconfig.json @@ -1,49 +1,6 @@ { - "compilerOptions": { - /* Visit https://aka.ms/tsconfig to read more about this file */ - - /* Projects */ - - /* Language and Environment */ - "target": "es2016", /* Set the JavaScript language version for emitted JavaScript and include compatible library declarations. */ - // "lib": [], /* Specify a set of bundled library declaration files that describe the target runtime environment. */ - // "noLib": true, /* Disable including any library files, including the default lib.d.ts. */ - "useDefineForClassFields": true, /* Emit ECMAScript-standard-compliant class fields. */ - // "moduleDetection": "auto", /* Control what method is used to detect module-format JS files. */ - - /* Modules */ - "module": "commonjs", /* Specify what module code is generated. */ - // "rootDir": "./", /* Specify the root folder within your source files. */ - // "moduleResolution": "node10", /* Specify how TypeScript looks up a file from a given module specifier. */ - // "baseUrl": "./", /* Specify the base directory to resolve non-relative module names. */ - // "paths": {}, /* Specify a set of entries that re-map imports to additional lookup locations. */ - // "rootDirs": [], /* Allow multiple folders to be treated as one when resolving modules. */ - "typeRoots": ["types"], /* Specify multiple folders that act like './node_modules/@types'. */ - "resolveJsonModule": true, /* Enable importing .json files. */ - // "allowArbitraryExtensions": true, /* Enable importing files with any extension, provided a declaration file is present. */ - - /* JavaScript Support */ - "allowJs": true, /* Allow JavaScript files to be a part of your program. Use the 'checkJS' option to get errors from these files. */ - "checkJs": true, /* Enable error reporting in type-checked JavaScript files. */ - "maxNodeModuleJsDepth": 1, /* Specify the maximum folder depth used for checking JavaScript files from 'node_modules'. Only applicable with 'allowJs'. */ - - /* Emit */ - "declaration": true, /* Generate .d.ts files from TypeScript and JavaScript files in your project. */ - "declarationMap": true, /* Create sourcemaps for d.ts files. */ - "noEmit": true, /* Disable emitting files from a compilation. */ - - /* Interop Constraints */ - "allowSyntheticDefaultImports": true, /* Allow 'import x from y' when a module doesn't have a default export. */ - "esModuleInterop": true, /* Emit additional JavaScript to ease support for importing CommonJS modules. This enables 'allowSyntheticDefaultImports' for type compatibility. */ - "forceConsistentCasingInFileNames": true, /* Ensure that casing is correct in imports. */ - - /* Type Checking */ - "strict": true, /* Enable all strict type-checking options. */ - - /* Completeness */ - //"skipLibCheck": true /* Skip type checking all .d.ts files. */ - }, + "extends": "@ljharb/tsconfig", "exclude": [ - "coverage" - ] + "coverage", + ], } diff --git a/deps/npm/node_modules/is-cidr/LICENSE b/deps/npm/node_modules/is-cidr/LICENSE deleted file mode 100644 index 9669c20f85511d..00000000000000 --- a/deps/npm/node_modules/is-cidr/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -Copyright (c) silverwind -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -1. Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. -2. Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/deps/npm/node_modules/is-cidr/package.json b/deps/npm/node_modules/is-cidr/package.json index d6971b3b33bd83..baf6fa55fe4522 100644 --- a/deps/npm/node_modules/is-cidr/package.json +++ b/deps/npm/node_modules/is-cidr/package.json @@ -1,6 +1,6 @@ { "name": "is-cidr", - "version": "5.0.3", + "version": "5.0.5", "description": "Check if a string is an IP address in CIDR notation", "author": "silverwind ", "contributors": [ @@ -19,14 +19,15 @@ "index.d.ts" ], "dependencies": { - "cidr-regex": "4.0.3" + "cidr-regex": "^4.0.4" }, "devDependencies": { - "eslint": "8.37.0", - "eslint-config-silverwind": "65.1.3", - "tsd": "0.28.1", - "updates": "13.2.9", - "versions": "10.4.2", - "vitest": "0.29.8" + "eslint": "8.57.0", + "eslint-config-silverwind": "83.0.1", + "tsd": "0.31.0", + "updates": "16.0.0", + "versions": "12.0.1", + "vitest": "1.4.0", + "vitest-config-silverwind": "7.0.3" } } diff --git a/deps/npm/node_modules/libnpmdiff/package.json b/deps/npm/node_modules/libnpmdiff/package.json index f6ad40b482a934..98229e99bd5618 100644 --- a/deps/npm/node_modules/libnpmdiff/package.json +++ b/deps/npm/node_modules/libnpmdiff/package.json @@ -1,6 +1,6 @@ { "name": "libnpmdiff", - "version": "6.0.8", + "version": "6.0.9", "description": "The registry diff", "repository": { "type": "git", diff --git a/deps/npm/node_modules/libnpmexec/package.json b/deps/npm/node_modules/libnpmexec/package.json index 8b6a9d217a00d0..39f12270e35a7e 100644 --- a/deps/npm/node_modules/libnpmexec/package.json +++ b/deps/npm/node_modules/libnpmexec/package.json @@ -1,6 +1,6 @@ { "name": "libnpmexec", - "version": "7.0.9", + "version": "7.0.10", "files": [ "bin/", "lib/" diff --git a/deps/npm/node_modules/libnpmfund/package.json b/deps/npm/node_modules/libnpmfund/package.json index 994538c89fce17..978252999e92eb 100644 --- a/deps/npm/node_modules/libnpmfund/package.json +++ b/deps/npm/node_modules/libnpmfund/package.json @@ -1,6 +1,6 @@ { "name": "libnpmfund", - "version": "5.0.6", + "version": "5.0.7", "main": "lib/index.js", "files": [ "bin/", diff --git a/deps/npm/node_modules/libnpmpack/package.json b/deps/npm/node_modules/libnpmpack/package.json index d49a17aa39f838..1782ab7143186a 100644 --- a/deps/npm/node_modules/libnpmpack/package.json +++ b/deps/npm/node_modules/libnpmpack/package.json @@ -1,6 +1,6 @@ { "name": "libnpmpack", - "version": "6.0.8", + "version": "6.0.9", "description": "Programmatic API for the bits behind npm pack", "author": "GitHub Inc.", "main": "lib/index.js", diff --git a/deps/npm/node_modules/postcss-selector-parser/package.json b/deps/npm/node_modules/postcss-selector-parser/package.json index d1f6be84cc5c77..0d1af5d373f20e 100644 --- a/deps/npm/node_modules/postcss-selector-parser/package.json +++ b/deps/npm/node_modules/postcss-selector-parser/package.json @@ -1,6 +1,6 @@ { "name": "postcss-selector-parser", - "version": "6.0.15", + "version": "6.0.16", "devDependencies": { "@babel/cli": "^7.11.6", "@babel/core": "^7.11.6", @@ -33,7 +33,8 @@ "!**/__tests__" ], "scripts": { - "pretest": "eslint src && tsc --noEmit postcss-selector-parser.d.ts", + "typecheck": "tsc --noEmit --strict postcss-selector-parser.d.ts postcss-selector-parser.test.ts", + "pretest": "eslint src && npm run typecheck", "prepare": "del-cli dist && BABEL_ENV=publish babel src --out-dir dist --ignore /__tests__/", "lintfix": "eslint --fix src", "report": "nyc report --reporter=html", diff --git a/deps/npm/node_modules/sigstore/package.json b/deps/npm/node_modules/sigstore/package.json index 3dca00636b8d9d..f39fc63b5a91de 100644 --- a/deps/npm/node_modules/sigstore/package.json +++ b/deps/npm/node_modules/sigstore/package.json @@ -1,6 +1,6 @@ { "name": "sigstore", - "version": "2.2.2", + "version": "2.3.0", "description": "code-signing for npm packages", "main": "dist/index.js", "types": "dist/index.d.ts", @@ -29,17 +29,17 @@ "devDependencies": { "@sigstore/rekor-types": "^2.0.0", "@sigstore/jest": "^0.0.0", - "@sigstore/mock": "^0.6.5", + "@sigstore/mock": "^0.7.0", "@tufjs/repo-mock": "^2.0.0", "@types/make-fetch-happen": "^10.0.4" }, "dependencies": { - "@sigstore/bundle": "^2.2.0", + "@sigstore/bundle": "^2.3.1", "@sigstore/core": "^1.0.0", - "@sigstore/protobuf-specs": "^0.3.0", - "@sigstore/sign": "^2.2.3", + "@sigstore/protobuf-specs": "^0.3.1", + "@sigstore/sign": "^2.3.0", "@sigstore/tuf": "^2.3.1", - "@sigstore/verify": "^1.1.0" + "@sigstore/verify": "^1.2.0" }, "engines": { "node": "^16.14.0 || >=18.0.0" diff --git a/deps/npm/node_modules/socks-proxy-agent/LICENSE b/deps/npm/node_modules/socks-proxy-agent/LICENSE new file mode 100644 index 00000000000000..008728cb51847d --- /dev/null +++ b/deps/npm/node_modules/socks-proxy-agent/LICENSE @@ -0,0 +1,22 @@ +(The MIT License) + +Copyright (c) 2013 Nathan Rajlich + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +'Software'), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/deps/npm/node_modules/socks-proxy-agent/dist/index.js b/deps/npm/node_modules/socks-proxy-agent/dist/index.js index 102ecd64c0b9d6..efd03ec9a7fe76 100644 --- a/deps/npm/node_modules/socks-proxy-agent/dist/index.js +++ b/deps/npm/node_modules/socks-proxy-agent/dist/index.js @@ -94,6 +94,7 @@ class SocksProxyAgent extends agent_base_1.Agent { this.shouldLookup = lookup; this.proxy = proxy; this.timeout = opts?.timeout ?? null; + this.socketOptions = opts?.socketOptions ?? null; } /** * Initiates a SOCKS connection to the specified SOCKS proxy server, @@ -128,6 +129,9 @@ class SocksProxyAgent extends agent_base_1.Agent { }, command: 'connect', timeout: timeout ?? undefined, + // @ts-expect-error the type supplied by socks for socket_options is wider + // than necessary since socks will always override the host and port + socket_options: this.socketOptions ?? undefined, }; const cleanup = (tlsSocket) => { req.destroy(); diff --git a/deps/npm/node_modules/socks-proxy-agent/package.json b/deps/npm/node_modules/socks-proxy-agent/package.json index 6e1c2c12de3674..090ec3e46b2798 100644 --- a/deps/npm/node_modules/socks-proxy-agent/package.json +++ b/deps/npm/node_modules/socks-proxy-agent/package.json @@ -1,6 +1,6 @@ { "name": "socks-proxy-agent", - "version": "8.0.2", + "version": "8.0.3", "description": "A SOCKS proxy `http.Agent` implementation for HTTP and HTTPS", "main": "./dist/index.js", "types": "./dist/index.d.ts", @@ -107,7 +107,7 @@ "socks5h" ], "dependencies": { - "agent-base": "^7.0.2", + "agent-base": "^7.1.1", "debug": "^4.3.4", "socks": "^2.7.1" }, diff --git a/deps/npm/node_modules/socks/build/common/helpers.js b/deps/npm/node_modules/socks/build/common/helpers.js index 65bd95bdc62215..1ae44e4159a155 100644 --- a/deps/npm/node_modules/socks/build/common/helpers.js +++ b/deps/npm/node_modules/socks/build/common/helpers.js @@ -152,7 +152,11 @@ function ipToBuffer(ip) { else if (net.isIPv6(ip)) { // Handle IPv6 addresses const address = new ip_address_1.Address6(ip); - return Buffer.from(address.toByteArray()); + return Buffer.from(address + .canonicalForm() + .split(':') + .map((segment) => segment.padStart(4, '0')) + .join(''), 'hex'); } else { throw new Error('Invalid IP address format'); diff --git a/deps/npm/node_modules/socks/package.json b/deps/npm/node_modules/socks/package.json index dbda909fd0787c..5cc2a6836072e5 100644 --- a/deps/npm/node_modules/socks/package.json +++ b/deps/npm/node_modules/socks/package.json @@ -1,7 +1,7 @@ { "name": "socks", "private": false, - "version": "2.8.0", + "version": "2.8.3", "description": "Fully featured SOCKS proxy client supporting SOCKSv4, SOCKSv4a, and SOCKSv5. Includes Bind and Associate functionality.", "main": "build/index.js", "typings": "typings/index.d.ts", @@ -23,7 +23,7 @@ "socks5" ], "engines": { - "node": ">= 16.0.0", + "node": ">= 10.0.0", "npm": ">= 3.0.0" }, "author": "Josh Glazebrook", @@ -52,6 +52,7 @@ "test": "NODE_ENV=test mocha --recursive --require ts-node/register test/**/*.ts", "prettier": "prettier --write ./src/**/*.ts --config .prettierrc.yaml", "lint": "eslint 'src/**/*.ts'", - "build": "rm -rf build typings && prettier --write ./src/**/*.ts --config .prettierrc.yaml && tsc -p ." + "build": "rm -rf build typings && prettier --write ./src/**/*.ts --config .prettierrc.yaml && tsc -p .", + "build-raw": "rm -rf build typings && tsc -p ." } } diff --git a/deps/npm/node_modules/spdx-correct/node_modules/spdx-expression-parse/AUTHORS b/deps/npm/node_modules/spdx-correct/node_modules/spdx-expression-parse/AUTHORS new file mode 100644 index 00000000000000..257a76b9484c12 --- /dev/null +++ b/deps/npm/node_modules/spdx-correct/node_modules/spdx-expression-parse/AUTHORS @@ -0,0 +1,4 @@ +C. Scott Ananian (http://cscott.net) +Kyle E. Mitchell (https://kemitchell.com) +Shinnosuke Watanabe +Antoine Motet diff --git a/deps/npm/node_modules/spdx-correct/node_modules/spdx-expression-parse/LICENSE b/deps/npm/node_modules/spdx-correct/node_modules/spdx-expression-parse/LICENSE new file mode 100644 index 00000000000000..831618eaba6c89 --- /dev/null +++ b/deps/npm/node_modules/spdx-correct/node_modules/spdx-expression-parse/LICENSE @@ -0,0 +1,22 @@ +The MIT License + +Copyright (c) 2015 Kyle E. Mitchell & other authors listed in AUTHORS + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be included +in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/deps/npm/node_modules/spdx-correct/node_modules/spdx-expression-parse/index.js b/deps/npm/node_modules/spdx-correct/node_modules/spdx-expression-parse/index.js new file mode 100644 index 00000000000000..52fab560aea707 --- /dev/null +++ b/deps/npm/node_modules/spdx-correct/node_modules/spdx-expression-parse/index.js @@ -0,0 +1,8 @@ +'use strict' + +var scan = require('./scan') +var parse = require('./parse') + +module.exports = function (source) { + return parse(scan(source)) +} diff --git a/deps/npm/node_modules/spdx-correct/node_modules/spdx-expression-parse/package.json b/deps/npm/node_modules/spdx-correct/node_modules/spdx-expression-parse/package.json new file mode 100644 index 00000000000000..c9edc9f939cdf6 --- /dev/null +++ b/deps/npm/node_modules/spdx-correct/node_modules/spdx-expression-parse/package.json @@ -0,0 +1,39 @@ +{ + "name": "spdx-expression-parse", + "description": "parse SPDX license expressions", + "version": "3.0.1", + "author": "Kyle E. Mitchell (https://kemitchell.com)", + "files": [ + "AUTHORS", + "index.js", + "parse.js", + "scan.js" + ], + "dependencies": { + "spdx-exceptions": "^2.1.0", + "spdx-license-ids": "^3.0.0" + }, + "devDependencies": { + "defence-cli": "^3.0.1", + "replace-require-self": "^1.0.0", + "standard": "^14.1.0" + }, + "keywords": [ + "SPDX", + "law", + "legal", + "license", + "metadata", + "package", + "package.json", + "standards" + ], + "license": "MIT", + "repository": "jslicense/spdx-expression-parse.js", + "scripts": { + "lint": "standard", + "test:readme": "defence -i javascript README.md | replace-require-self | node", + "test:suite": "node test.js", + "test": "npm run test:suite && npm run test:readme" + } +} diff --git a/deps/npm/node_modules/spdx-correct/node_modules/spdx-expression-parse/parse.js b/deps/npm/node_modules/spdx-correct/node_modules/spdx-expression-parse/parse.js new file mode 100644 index 00000000000000..5a00b45c5799c4 --- /dev/null +++ b/deps/npm/node_modules/spdx-correct/node_modules/spdx-expression-parse/parse.js @@ -0,0 +1,138 @@ +'use strict' + +// The ABNF grammar in the spec is totally ambiguous. +// +// This parser follows the operator precedence defined in the +// `Order of Precedence and Parentheses` section. + +module.exports = function (tokens) { + var index = 0 + + function hasMore () { + return index < tokens.length + } + + function token () { + return hasMore() ? tokens[index] : null + } + + function next () { + if (!hasMore()) { + throw new Error() + } + index++ + } + + function parseOperator (operator) { + var t = token() + if (t && t.type === 'OPERATOR' && operator === t.string) { + next() + return t.string + } + } + + function parseWith () { + if (parseOperator('WITH')) { + var t = token() + if (t && t.type === 'EXCEPTION') { + next() + return t.string + } + throw new Error('Expected exception after `WITH`') + } + } + + function parseLicenseRef () { + // TODO: Actually, everything is concatenated into one string + // for backward-compatibility but it could be better to return + // a nice structure. + var begin = index + var string = '' + var t = token() + if (t.type === 'DOCUMENTREF') { + next() + string += 'DocumentRef-' + t.string + ':' + if (!parseOperator(':')) { + throw new Error('Expected `:` after `DocumentRef-...`') + } + } + t = token() + if (t.type === 'LICENSEREF') { + next() + string += 'LicenseRef-' + t.string + return { license: string } + } + index = begin + } + + function parseLicense () { + var t = token() + if (t && t.type === 'LICENSE') { + next() + var node = { license: t.string } + if (parseOperator('+')) { + node.plus = true + } + var exception = parseWith() + if (exception) { + node.exception = exception + } + return node + } + } + + function parseParenthesizedExpression () { + var left = parseOperator('(') + if (!left) { + return + } + + var expr = parseExpression() + + if (!parseOperator(')')) { + throw new Error('Expected `)`') + } + + return expr + } + + function parseAtom () { + return ( + parseParenthesizedExpression() || + parseLicenseRef() || + parseLicense() + ) + } + + function makeBinaryOpParser (operator, nextParser) { + return function parseBinaryOp () { + var left = nextParser() + if (!left) { + return + } + + if (!parseOperator(operator)) { + return left + } + + var right = parseBinaryOp() + if (!right) { + throw new Error('Expected expression') + } + return { + left: left, + conjunction: operator.toLowerCase(), + right: right + } + } + } + + var parseAnd = makeBinaryOpParser('AND', parseAtom) + var parseExpression = makeBinaryOpParser('OR', parseAnd) + + var node = parseExpression() + if (!node || hasMore()) { + throw new Error('Syntax error') + } + return node +} diff --git a/deps/npm/node_modules/spdx-correct/node_modules/spdx-expression-parse/scan.js b/deps/npm/node_modules/spdx-correct/node_modules/spdx-expression-parse/scan.js new file mode 100644 index 00000000000000..b74fce2e2c6632 --- /dev/null +++ b/deps/npm/node_modules/spdx-correct/node_modules/spdx-expression-parse/scan.js @@ -0,0 +1,131 @@ +'use strict' + +var licenses = [] + .concat(require('spdx-license-ids')) + .concat(require('spdx-license-ids/deprecated')) +var exceptions = require('spdx-exceptions') + +module.exports = function (source) { + var index = 0 + + function hasMore () { + return index < source.length + } + + // `value` can be a regexp or a string. + // If it is recognized, the matching source string is returned and + // the index is incremented. Otherwise `undefined` is returned. + function read (value) { + if (value instanceof RegExp) { + var chars = source.slice(index) + var match = chars.match(value) + if (match) { + index += match[0].length + return match[0] + } + } else { + if (source.indexOf(value, index) === index) { + index += value.length + return value + } + } + } + + function skipWhitespace () { + read(/[ ]*/) + } + + function operator () { + var string + var possibilities = ['WITH', 'AND', 'OR', '(', ')', ':', '+'] + for (var i = 0; i < possibilities.length; i++) { + string = read(possibilities[i]) + if (string) { + break + } + } + + if (string === '+' && index > 1 && source[index - 2] === ' ') { + throw new Error('Space before `+`') + } + + return string && { + type: 'OPERATOR', + string: string + } + } + + function idstring () { + return read(/[A-Za-z0-9-.]+/) + } + + function expectIdstring () { + var string = idstring() + if (!string) { + throw new Error('Expected idstring at offset ' + index) + } + return string + } + + function documentRef () { + if (read('DocumentRef-')) { + var string = expectIdstring() + return { type: 'DOCUMENTREF', string: string } + } + } + + function licenseRef () { + if (read('LicenseRef-')) { + var string = expectIdstring() + return { type: 'LICENSEREF', string: string } + } + } + + function identifier () { + var begin = index + var string = idstring() + + if (licenses.indexOf(string) !== -1) { + return { + type: 'LICENSE', + string: string + } + } else if (exceptions.indexOf(string) !== -1) { + return { + type: 'EXCEPTION', + string: string + } + } + + index = begin + } + + // Tries to read the next token. Returns `undefined` if no token is + // recognized. + function parseToken () { + // Ordering matters + return ( + operator() || + documentRef() || + licenseRef() || + identifier() + ) + } + + var tokens = [] + while (hasMore()) { + skipWhitespace() + if (!hasMore()) { + break + } + + var token = parseToken() + if (!token) { + throw new Error('Unexpected `' + source[index] + + '` at offset ' + index) + } + + tokens.push(token) + } + return tokens +} diff --git a/deps/npm/node_modules/spdx-expression-parse/package.json b/deps/npm/node_modules/spdx-expression-parse/package.json index c9edc9f939cdf6..c3a22afcf7dfcb 100644 --- a/deps/npm/node_modules/spdx-expression-parse/package.json +++ b/deps/npm/node_modules/spdx-expression-parse/package.json @@ -1,7 +1,7 @@ { "name": "spdx-expression-parse", "description": "parse SPDX license expressions", - "version": "3.0.1", + "version": "4.0.0", "author": "Kyle E. Mitchell (https://kemitchell.com)", "files": [ "AUTHORS", diff --git a/deps/npm/node_modules/spdx-expression-parse/scan.js b/deps/npm/node_modules/spdx-expression-parse/scan.js index b74fce2e2c6632..528522282703c6 100644 --- a/deps/npm/node_modules/spdx-expression-parse/scan.js +++ b/deps/npm/node_modules/spdx-expression-parse/scan.js @@ -37,7 +37,7 @@ module.exports = function (source) { function operator () { var string - var possibilities = ['WITH', 'AND', 'OR', '(', ')', ':', '+'] + var possibilities = [/^WITH/i, /^AND/i, /^OR/i, '(', ')', ':', '+'] for (var i = 0; i < possibilities.length; i++) { string = read(possibilities[i]) if (string) { @@ -51,7 +51,7 @@ module.exports = function (source) { return string && { type: 'OPERATOR', - string: string + string: string.toUpperCase() } } diff --git a/deps/npm/node_modules/validate-npm-package-license/node_modules/spdx-expression-parse/AUTHORS b/deps/npm/node_modules/validate-npm-package-license/node_modules/spdx-expression-parse/AUTHORS new file mode 100644 index 00000000000000..257a76b9484c12 --- /dev/null +++ b/deps/npm/node_modules/validate-npm-package-license/node_modules/spdx-expression-parse/AUTHORS @@ -0,0 +1,4 @@ +C. Scott Ananian (http://cscott.net) +Kyle E. Mitchell (https://kemitchell.com) +Shinnosuke Watanabe +Antoine Motet diff --git a/deps/npm/node_modules/validate-npm-package-license/node_modules/spdx-expression-parse/LICENSE b/deps/npm/node_modules/validate-npm-package-license/node_modules/spdx-expression-parse/LICENSE new file mode 100644 index 00000000000000..831618eaba6c89 --- /dev/null +++ b/deps/npm/node_modules/validate-npm-package-license/node_modules/spdx-expression-parse/LICENSE @@ -0,0 +1,22 @@ +The MIT License + +Copyright (c) 2015 Kyle E. Mitchell & other authors listed in AUTHORS + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be included +in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/deps/npm/node_modules/validate-npm-package-license/node_modules/spdx-expression-parse/index.js b/deps/npm/node_modules/validate-npm-package-license/node_modules/spdx-expression-parse/index.js new file mode 100644 index 00000000000000..52fab560aea707 --- /dev/null +++ b/deps/npm/node_modules/validate-npm-package-license/node_modules/spdx-expression-parse/index.js @@ -0,0 +1,8 @@ +'use strict' + +var scan = require('./scan') +var parse = require('./parse') + +module.exports = function (source) { + return parse(scan(source)) +} diff --git a/deps/npm/node_modules/validate-npm-package-license/node_modules/spdx-expression-parse/package.json b/deps/npm/node_modules/validate-npm-package-license/node_modules/spdx-expression-parse/package.json new file mode 100644 index 00000000000000..c9edc9f939cdf6 --- /dev/null +++ b/deps/npm/node_modules/validate-npm-package-license/node_modules/spdx-expression-parse/package.json @@ -0,0 +1,39 @@ +{ + "name": "spdx-expression-parse", + "description": "parse SPDX license expressions", + "version": "3.0.1", + "author": "Kyle E. Mitchell (https://kemitchell.com)", + "files": [ + "AUTHORS", + "index.js", + "parse.js", + "scan.js" + ], + "dependencies": { + "spdx-exceptions": "^2.1.0", + "spdx-license-ids": "^3.0.0" + }, + "devDependencies": { + "defence-cli": "^3.0.1", + "replace-require-self": "^1.0.0", + "standard": "^14.1.0" + }, + "keywords": [ + "SPDX", + "law", + "legal", + "license", + "metadata", + "package", + "package.json", + "standards" + ], + "license": "MIT", + "repository": "jslicense/spdx-expression-parse.js", + "scripts": { + "lint": "standard", + "test:readme": "defence -i javascript README.md | replace-require-self | node", + "test:suite": "node test.js", + "test": "npm run test:suite && npm run test:readme" + } +} diff --git a/deps/npm/node_modules/validate-npm-package-license/node_modules/spdx-expression-parse/parse.js b/deps/npm/node_modules/validate-npm-package-license/node_modules/spdx-expression-parse/parse.js new file mode 100644 index 00000000000000..5a00b45c5799c4 --- /dev/null +++ b/deps/npm/node_modules/validate-npm-package-license/node_modules/spdx-expression-parse/parse.js @@ -0,0 +1,138 @@ +'use strict' + +// The ABNF grammar in the spec is totally ambiguous. +// +// This parser follows the operator precedence defined in the +// `Order of Precedence and Parentheses` section. + +module.exports = function (tokens) { + var index = 0 + + function hasMore () { + return index < tokens.length + } + + function token () { + return hasMore() ? tokens[index] : null + } + + function next () { + if (!hasMore()) { + throw new Error() + } + index++ + } + + function parseOperator (operator) { + var t = token() + if (t && t.type === 'OPERATOR' && operator === t.string) { + next() + return t.string + } + } + + function parseWith () { + if (parseOperator('WITH')) { + var t = token() + if (t && t.type === 'EXCEPTION') { + next() + return t.string + } + throw new Error('Expected exception after `WITH`') + } + } + + function parseLicenseRef () { + // TODO: Actually, everything is concatenated into one string + // for backward-compatibility but it could be better to return + // a nice structure. + var begin = index + var string = '' + var t = token() + if (t.type === 'DOCUMENTREF') { + next() + string += 'DocumentRef-' + t.string + ':' + if (!parseOperator(':')) { + throw new Error('Expected `:` after `DocumentRef-...`') + } + } + t = token() + if (t.type === 'LICENSEREF') { + next() + string += 'LicenseRef-' + t.string + return { license: string } + } + index = begin + } + + function parseLicense () { + var t = token() + if (t && t.type === 'LICENSE') { + next() + var node = { license: t.string } + if (parseOperator('+')) { + node.plus = true + } + var exception = parseWith() + if (exception) { + node.exception = exception + } + return node + } + } + + function parseParenthesizedExpression () { + var left = parseOperator('(') + if (!left) { + return + } + + var expr = parseExpression() + + if (!parseOperator(')')) { + throw new Error('Expected `)`') + } + + return expr + } + + function parseAtom () { + return ( + parseParenthesizedExpression() || + parseLicenseRef() || + parseLicense() + ) + } + + function makeBinaryOpParser (operator, nextParser) { + return function parseBinaryOp () { + var left = nextParser() + if (!left) { + return + } + + if (!parseOperator(operator)) { + return left + } + + var right = parseBinaryOp() + if (!right) { + throw new Error('Expected expression') + } + return { + left: left, + conjunction: operator.toLowerCase(), + right: right + } + } + } + + var parseAnd = makeBinaryOpParser('AND', parseAtom) + var parseExpression = makeBinaryOpParser('OR', parseAnd) + + var node = parseExpression() + if (!node || hasMore()) { + throw new Error('Syntax error') + } + return node +} diff --git a/deps/npm/node_modules/validate-npm-package-license/node_modules/spdx-expression-parse/scan.js b/deps/npm/node_modules/validate-npm-package-license/node_modules/spdx-expression-parse/scan.js new file mode 100644 index 00000000000000..b74fce2e2c6632 --- /dev/null +++ b/deps/npm/node_modules/validate-npm-package-license/node_modules/spdx-expression-parse/scan.js @@ -0,0 +1,131 @@ +'use strict' + +var licenses = [] + .concat(require('spdx-license-ids')) + .concat(require('spdx-license-ids/deprecated')) +var exceptions = require('spdx-exceptions') + +module.exports = function (source) { + var index = 0 + + function hasMore () { + return index < source.length + } + + // `value` can be a regexp or a string. + // If it is recognized, the matching source string is returned and + // the index is incremented. Otherwise `undefined` is returned. + function read (value) { + if (value instanceof RegExp) { + var chars = source.slice(index) + var match = chars.match(value) + if (match) { + index += match[0].length + return match[0] + } + } else { + if (source.indexOf(value, index) === index) { + index += value.length + return value + } + } + } + + function skipWhitespace () { + read(/[ ]*/) + } + + function operator () { + var string + var possibilities = ['WITH', 'AND', 'OR', '(', ')', ':', '+'] + for (var i = 0; i < possibilities.length; i++) { + string = read(possibilities[i]) + if (string) { + break + } + } + + if (string === '+' && index > 1 && source[index - 2] === ' ') { + throw new Error('Space before `+`') + } + + return string && { + type: 'OPERATOR', + string: string + } + } + + function idstring () { + return read(/[A-Za-z0-9-.]+/) + } + + function expectIdstring () { + var string = idstring() + if (!string) { + throw new Error('Expected idstring at offset ' + index) + } + return string + } + + function documentRef () { + if (read('DocumentRef-')) { + var string = expectIdstring() + return { type: 'DOCUMENTREF', string: string } + } + } + + function licenseRef () { + if (read('LicenseRef-')) { + var string = expectIdstring() + return { type: 'LICENSEREF', string: string } + } + } + + function identifier () { + var begin = index + var string = idstring() + + if (licenses.indexOf(string) !== -1) { + return { + type: 'LICENSE', + string: string + } + } else if (exceptions.indexOf(string) !== -1) { + return { + type: 'EXCEPTION', + string: string + } + } + + index = begin + } + + // Tries to read the next token. Returns `undefined` if no token is + // recognized. + function parseToken () { + // Ordering matters + return ( + operator() || + documentRef() || + licenseRef() || + identifier() + ) + } + + var tokens = [] + while (hasMore()) { + skipWhitespace() + if (!hasMore()) { + break + } + + var token = parseToken() + if (!token) { + throw new Error('Unexpected `' + source[index] + + '` at offset ' + index) + } + + tokens.push(token) + } + return tokens +} diff --git a/deps/npm/package.json b/deps/npm/package.json index 5fcd785d31bffd..d157883a10bbc7 100644 --- a/deps/npm/package.json +++ b/deps/npm/package.json @@ -1,5 +1,5 @@ { - "version": "10.5.1", + "version": "10.5.2", "name": "npm", "description": "a package manager for JavaScript", "workspaces": [ @@ -55,8 +55,8 @@ "@npmcli/arborist": "^7.2.1", "@npmcli/config": "^8.0.2", "@npmcli/fs": "^3.1.0", - "@npmcli/map-workspaces": "^3.0.4", - "@npmcli/package-json": "^5.0.0", + "@npmcli/map-workspaces": "^3.0.6", + "@npmcli/package-json": "^5.0.2", "@npmcli/promise-spawn": "^7.0.1", "@npmcli/redact": "^1.1.0", "@npmcli/run-script": "^7.0.4", @@ -76,7 +76,7 @@ "hosted-git-info": "^7.0.1", "ini": "^4.1.2", "init-package-json": "^6.0.2", - "is-cidr": "^5.0.3", + "is-cidr": "^5.0.5", "json-parse-even-better-errors": "^3.0.1", "libnpmaccess": "^8.0.1", "libnpmdiff": "^6.0.3", @@ -112,7 +112,7 @@ "qrcode-terminal": "^0.12.0", "read": "^3.0.1", "semver": "^7.6.0", - "spdx-expression-parse": "^3.0.1", + "spdx-expression-parse": "^4.0.0", "ssri": "^10.0.5", "supports-color": "^9.4.0", "tar": "^6.2.1", @@ -199,7 +199,7 @@ "devDependencies": { "@npmcli/docs": "^1.0.0", "@npmcli/eslint-config": "^4.0.2", - "@npmcli/git": "^5.0.4", + "@npmcli/git": "^5.0.5", "@npmcli/mock-globals": "^1.0.0", "@npmcli/mock-registry": "^1.0.0", "@npmcli/template-oss": "4.21.3", diff --git a/deps/npm/tap-snapshots/test/lib/commands/config.js.test.cjs b/deps/npm/tap-snapshots/test/lib/commands/config.js.test.cjs index 79518c2b8c8670..9d67091f7a0d4e 100644 --- a/deps/npm/tap-snapshots/test/lib/commands/config.js.test.cjs +++ b/deps/npm/tap-snapshots/test/lib/commands/config.js.test.cjs @@ -34,8 +34,6 @@ exports[`test/lib/commands/config.js TAP config list --json > output matches sna "color": true, "commit-hooks": true, "cpu": null, - "os": null, - "libc": null, "depth": null, "description": true, "dev": false, @@ -50,8 +48,8 @@ exports[`test/lib/commands/config.js TAP config list --json > output matches sna "dry-run": false, "editor": "{EDITOR}", "engine-strict": false, - "expect-results": null, "expect-result-count": null, + "expect-results": null, "fetch-retries": 2, "fetch-retry-factor": 10, "fetch-retry-maxtimeout": 60000, @@ -90,10 +88,9 @@ exports[`test/lib/commands/config.js TAP config list --json > output matches sna "key": null, "legacy-bundling": false, "legacy-peer-deps": false, + "libc": null, "link": false, "local-address": null, - "sbom-format": null, - "sbom-type": "library", "location": "user", "lockfile-version": null, "loglevel": "notice", @@ -111,6 +108,7 @@ exports[`test/lib/commands/config.js TAP config list --json > output matches sna "omit-lockfile-registry-resolved": false, "only": null, "optional": null, + "os": null, "otp": null, "package": [], "package-lock": true, @@ -138,6 +136,8 @@ exports[`test/lib/commands/config.js TAP config list --json > output matches sna "save-peer": false, "save-prefix": "^", "save-prod": false, + "sbom-format": null, + "sbom-type": "library", "scope": "", "script-shell": null, "searchexclude": "", diff --git a/deps/npm/tap-snapshots/test/lib/commands/publish.js.test.cjs b/deps/npm/tap-snapshots/test/lib/commands/publish.js.test.cjs index 45406c994002aa..4f947be5cf5dec 100644 --- a/deps/npm/tap-snapshots/test/lib/commands/publish.js.test.cjs +++ b/deps/npm/tap-snapshots/test/lib/commands/publish.js.test.cjs @@ -350,6 +350,10 @@ Array [ ] ` +exports[`test/lib/commands/publish.js TAP prioritize CLI flags over publishConfig > new package version 1`] = ` ++ test-package@1.0.0 +` + exports[`test/lib/commands/publish.js TAP public access > must match snapshot 1`] = ` Array [ Array [ diff --git a/deps/npm/tap-snapshots/test/lib/docs.js.test.cjs b/deps/npm/tap-snapshots/test/lib/docs.js.test.cjs index e396fce2475ea3..64dad96903ec32 100644 --- a/deps/npm/tap-snapshots/test/lib/docs.js.test.cjs +++ b/deps/npm/tap-snapshots/test/lib/docs.js.test.cjs @@ -2078,8 +2078,6 @@ Array [ "color", "commit-hooks", "cpu", - "os", - "libc", "depth", "description", "dev", @@ -2094,8 +2092,8 @@ Array [ "dry-run", "editor", "engine-strict", - "expect-results", "expect-result-count", + "expect-results", "fetch-retries", "fetch-retry-factor", "fetch-retry-maxtimeout", @@ -2135,10 +2133,9 @@ Array [ "key", "legacy-bundling", "legacy-peer-deps", + "libc", "link", "local-address", - "sbom-format", - "sbom-type", "location", "lockfile-version", "loglevel", @@ -2154,6 +2151,7 @@ Array [ "omit-lockfile-registry-resolved", "only", "optional", + "os", "otp", "package", "package-lock", @@ -2182,6 +2180,8 @@ Array [ "save-peer", "save-prefix", "save-prod", + "sbom-format", + "sbom-type", "scope", "script-shell", "searchexclude", @@ -2238,8 +2238,6 @@ Array [ "color", "commit-hooks", "cpu", - "os", - "libc", "depth", "description", "dev", @@ -2281,9 +2279,8 @@ Array [ "key", "legacy-bundling", "legacy-peer-deps", + "libc", "local-address", - "sbom-format", - "sbom-type", "location", "lockfile-version", "loglevel", @@ -2295,6 +2292,7 @@ Array [ "omit-lockfile-registry-resolved", "only", "optional", + "os", "otp", "package", "package-lock", @@ -2322,6 +2320,8 @@ Array [ "save-peer", "save-prefix", "save-prod", + "sbom-format", + "sbom-type", "scope", "script-shell", "searchexclude", @@ -2347,8 +2347,8 @@ Array [ exports[`test/lib/docs.js TAP config > keys that are not flattened 1`] = ` Array [ - "expect-results", "expect-result-count", + "expect-results", "init-author-email", "init-author-name", "init-author-url", diff --git a/deps/npm/test/lib/commands/publish.js b/deps/npm/test/lib/commands/publish.js index ec7299e9eec530..751cd97d8acf6e 100644 --- a/deps/npm/test/lib/commands/publish.js +++ b/deps/npm/test/lib/commands/publish.js @@ -131,6 +131,58 @@ t.test('re-loads publishConfig.registry if added during script process', async t t.matchSnapshot(joinedOutput(), 'new package version') }) +t.test('prioritize CLI flags over publishConfig', async t => { + const publishConfig = { registry: 'http://publishconfig' } + const { joinedOutput, npm } = await loadMockNpm(t, { + config: { + [`${alternateRegistry.slice(6)}/:_authToken`]: 'test-other-token', + }, + prefixDir: { + 'package.json': JSON.stringify({ + ...pkgJson, + scripts: { + prepare: 'cp new.json package.json', + }, + }, null, 2), + 'new.json': JSON.stringify({ + ...pkgJson, + publishConfig, + }), + }, + argv: ['--registry', alternateRegistry], + }) + const registry = new MockRegistry({ + tap: t, + registry: alternateRegistry, + authorization: 'test-other-token', + }) + registry.nock.put(`/${pkg}`, body => { + return t.match(body, { + _id: pkg, + name: pkg, + 'dist-tags': { latest: '1.0.0' }, + access: null, + versions: { + '1.0.0': { + name: pkg, + version: '1.0.0', + _id: `${pkg}@1.0.0`, + dist: { + shasum: /\.*/, + tarball: `http:${alternateRegistry.slice(6)}/test-package/-/test-package-1.0.0.tgz`, + }, + publishConfig, + }, + }, + _attachments: { + [`${pkg}-1.0.0.tgz`]: {}, + }, + }) + }).reply(200, {}) + await npm.exec('publish', []) + t.matchSnapshot(joinedOutput(), 'new package version') +}) + t.test('json', async t => { const { joinedOutput, npm, logs } = await loadMockNpm(t, { config: { diff --git a/deps/npm/test/lib/commands/unpublish.js b/deps/npm/test/lib/commands/unpublish.js index 097309393a2585..31dc77ea46cd0c 100644 --- a/deps/npm/test/lib/commands/unpublish.js +++ b/deps/npm/test/lib/commands/unpublish.js @@ -408,6 +408,36 @@ t.test('publishConfig no spec', async t => { t.equal(joinedOutput(), '- test-package') }) +t.test('prioritize CLI flags over publishConfig no spec', async t => { + const alternateRegistry = 'https://other.registry.npmjs.org' + const publishConfig = { registry: 'http://publishconfig' } + const { joinedOutput, npm } = await loadMockNpm(t, { + config: { + force: true, + '//other.registry.npmjs.org/:_authToken': 'test-other-token', + }, + prefixDir: { + 'package.json': JSON.stringify({ + name: pkg, + version: '1.0.0', + publishConfig, + }, null, 2), + }, + argv: ['--registry', alternateRegistry], + }) + + const registry = new MockRegistry({ + tap: t, + registry: alternateRegistry, + authorization: 'test-other-token', + }) + const manifest = registry.manifest({ name: pkg }) + await registry.package({ manifest, query: { write: true }, times: 2 }) + registry.unpublish({ manifest }) + await npm.exec('unpublish', []) + t.equal(joinedOutput(), '- test-package') +}) + t.test('publishConfig with spec', async t => { const alternateRegistry = 'https://other.registry.npmjs.org' const { joinedOutput, npm } = await loadMockNpm(t, { diff --git a/deps/npm/test/lib/utils/log-file.js b/deps/npm/test/lib/utils/log-file.js index c02f338a84ee01..f34dda8f524337 100644 --- a/deps/npm/test/lib/utils/log-file.js +++ b/deps/npm/test/lib/utils/log-file.js @@ -57,8 +57,10 @@ const loadLogFile = async (t, { buffer = [], mocks, testdir = {}, ...options } = logFile, LogFile, readLogs: async () => { - const logDir = await fs.readdir(root) - const logFiles = logDir.map((f) => path.join(root, f)) + const logDir = await fs.readdir(root, { withFileTypes: true }) + const logFiles = logDir + .filter(f => f.isFile()) + .map((f) => path.join(root, f.name)) .filter((f) => _fs.existsSync(f)) return Promise.all(logFiles.map(async (f) => { const content = await fs.readFile(f, 'utf8') @@ -202,6 +204,22 @@ t.test('cleans logs', async t => { t.equal(logs.length, logsMax + 1) }) +t.test('cleans logs even when find folder inside logs folder', async t => { + const logsMax = 5 + const { readLogs } = await loadLogFile(t, { + logsMax, + testdir: { + ...makeOldLogs(10), + ignore_folder: { + 'ignored-file.txt': 'hello', + }, + }, + }) + + const logs = await readLogs() + t.equal(logs.length, logsMax + 1) +}) + t.test('doesnt clean current log by default', async t => { const logsMax = 1 const { readLogs, logFile } = await loadLogFile(t, { @@ -240,35 +258,6 @@ t.test('doesnt need to clean', async t => { t.equal(logs.length, oldLogs + 1) }) -t.test('glob error', async t => { - const { readLogs } = await loadLogFile(t, { - logsMax: 5, - mocks: { - glob: { glob: () => { - throw new Error('bad glob') - } }, - }, - }) - - const logs = await readLogs() - t.equal(logs.length, 1) - t.match(last(logs).content, /error cleaning log files .* bad glob/) -}) - -t.test('do not log cleaning errors when logging is disabled', async t => { - const { readLogs } = await loadLogFile(t, { - logsMax: 0, - mocks: { - glob: () => { - throw new Error('should not be logged') - }, - }, - }) - - const logs = await readLogs() - t.equal(logs.length, 0) -}) - t.test('cleans old style logs too', async t => { const logsMax = 5 const oldLogs = 10 @@ -290,6 +279,7 @@ t.test('rimraf error', async t => { testdir: makeOldLogs(oldLogs), mocks: { 'fs/promises': { + readdir: fs.readdir, rm: async (...args) => { if (count >= 3) { throw new Error('bad rimraf') From 28c0c78c9a97e0672ad3002266ce44980ff79cf7 Mon Sep 17 00:00:00 2001 From: James M Snell Date: Tue, 26 Dec 2023 06:48:01 -0800 Subject: [PATCH 25/41] deps: update ngtcp2 and nghttp3 PR-URL: https://github.com/nodejs/node/pull/51291 Reviewed-By: Stephen Belanger Reviewed-By: Jiawen Geng --- .../nghttp3/lib/includes/nghttp3/nghttp3.h | 795 +++-- deps/ngtcp2/nghttp3/lib/nghttp3_conn.c | 305 +- deps/ngtcp2/nghttp3/lib/nghttp3_conn.h | 10 +- deps/ngtcp2/nghttp3/lib/nghttp3_conv.c | 26 +- deps/ngtcp2/nghttp3/lib/nghttp3_conv.h | 71 +- deps/ngtcp2/nghttp3/lib/nghttp3_err.c | 5 +- deps/ngtcp2/nghttp3/lib/nghttp3_frame.c | 47 +- deps/ngtcp2/nghttp3/lib/nghttp3_frame.h | 57 +- deps/ngtcp2/nghttp3/lib/nghttp3_http.c | 744 +--- deps/ngtcp2/nghttp3/lib/nghttp3_http.h | 63 +- deps/ngtcp2/nghttp3/lib/nghttp3_ksl.c | 38 +- deps/ngtcp2/nghttp3/lib/nghttp3_ksl.h | 4 +- deps/ngtcp2/nghttp3/lib/nghttp3_map.c | 2 + deps/ngtcp2/nghttp3/lib/nghttp3_map.h | 2 + deps/ngtcp2/nghttp3/lib/nghttp3_objalloc.h | 29 +- deps/ngtcp2/nghttp3/lib/nghttp3_qpack.c | 89 +- deps/ngtcp2/nghttp3/lib/nghttp3_ringbuf.c | 3 +- deps/ngtcp2/nghttp3/lib/nghttp3_stream.c | 108 +- deps/ngtcp2/nghttp3/lib/nghttp3_stream.h | 29 +- deps/ngtcp2/nghttp3/lib/nghttp3_tnode.c | 31 +- deps/ngtcp2/nghttp3/lib/nghttp3_tnode.h | 23 +- deps/ngtcp2/nghttp3/lib/nghttp3_unreachable.c | 72 + deps/ngtcp2/nghttp3/lib/nghttp3_unreachable.h | 53 + deps/ngtcp2/nghttp3/lib/sfparse.c | 1146 ++++++ deps/ngtcp2/nghttp3/lib/sfparse.h | 409 +++ deps/ngtcp2/ngtcp2.gyp | 23 +- .../ngtcp2/crypto/boringssl/boringssl.c | 116 +- .../crypto/includes/ngtcp2/ngtcp2_crypto.h | 304 +- .../includes/ngtcp2/ngtcp2_crypto_boringssl.h | 16 +- .../includes/ngtcp2/ngtcp2_crypto_picotls.h | 20 +- ...ypto_openssl.h => ngtcp2_crypto_quictls.h} | 59 +- .../includes/ngtcp2/ngtcp2_crypto_wolfssl.h | 16 +- deps/ngtcp2/ngtcp2/crypto/picotls/picotls.c | 112 +- .../{openssl/openssl.c => quictls/quictls.c} | 341 +- deps/ngtcp2/ngtcp2/crypto/shared.c | 190 +- deps/ngtcp2/ngtcp2/crypto/shared.h | 89 +- deps/ngtcp2/ngtcp2/crypto/wolfssl/wolfssl.c | 82 +- .../ngtcp2/lib/includes/ngtcp2/ngtcp2.h | 2386 +++++++------ deps/ngtcp2/ngtcp2/lib/ngtcp2_acktr.c | 20 +- deps/ngtcp2/ngtcp2/lib/ngtcp2_acktr.h | 5 +- deps/ngtcp2/ngtcp2/lib/ngtcp2_addr.c | 8 +- deps/ngtcp2/ngtcp2/lib/ngtcp2_addr.h | 6 +- deps/ngtcp2/ngtcp2/lib/ngtcp2_bbr.c | 1658 ++++++--- deps/ngtcp2/ngtcp2/lib/ngtcp2_bbr.h | 172 +- deps/ngtcp2/ngtcp2/lib/ngtcp2_bbr2.c | 1486 -------- deps/ngtcp2/ngtcp2/lib/ngtcp2_bbr2.h | 149 - deps/ngtcp2/ngtcp2/lib/ngtcp2_cc.c | 524 ++- deps/ngtcp2/ngtcp2/lib/ngtcp2_cc.h | 75 +- deps/ngtcp2/ngtcp2/lib/ngtcp2_conn.c | 3157 +++++++++-------- deps/ngtcp2/ngtcp2/lib/ngtcp2_conn.h | 194 +- deps/ngtcp2/ngtcp2/lib/ngtcp2_conn_stat.h | 132 + deps/ngtcp2/ngtcp2/lib/ngtcp2_conv.c | 95 +- deps/ngtcp2/ngtcp2/lib/ngtcp2_conv.h | 82 +- deps/ngtcp2/ngtcp2/lib/ngtcp2_conversion.c | 66 + deps/ngtcp2/ngtcp2/lib/ngtcp2_conversion.h | 71 + deps/ngtcp2/ngtcp2/lib/ngtcp2_crypto.c | 536 ++- deps/ngtcp2/ngtcp2/lib/ngtcp2_crypto.h | 48 +- deps/ngtcp2/ngtcp2/lib/ngtcp2_err.c | 5 +- deps/ngtcp2/ngtcp2/lib/ngtcp2_frame_chain.c | 220 ++ deps/ngtcp2/ngtcp2/lib/ngtcp2_frame_chain.h | 171 + deps/ngtcp2/ngtcp2/lib/ngtcp2_ksl.c | 38 +- deps/ngtcp2/ngtcp2/lib/ngtcp2_ksl.h | 4 +- deps/ngtcp2/ngtcp2/lib/ngtcp2_log.c | 271 +- deps/ngtcp2/ngtcp2/lib/ngtcp2_log.h | 25 +- deps/ngtcp2/ngtcp2/lib/ngtcp2_macro.h | 5 + deps/ngtcp2/ngtcp2/lib/ngtcp2_map.c | 2 + deps/ngtcp2/ngtcp2/lib/ngtcp2_map.h | 2 + deps/ngtcp2/ngtcp2/lib/ngtcp2_net.h | 33 +- deps/ngtcp2/ngtcp2/lib/ngtcp2_objalloc.h | 29 +- deps/ngtcp2/ngtcp2/lib/ngtcp2_pkt.c | 565 ++- deps/ngtcp2/ngtcp2/lib/ngtcp2_pkt.h | 198 +- deps/ngtcp2/ngtcp2/lib/ngtcp2_pktns_id.h | 62 + deps/ngtcp2/ngtcp2/lib/ngtcp2_pmtud.c | 14 +- deps/ngtcp2/ngtcp2/lib/ngtcp2_ppe.c | 4 +- deps/ngtcp2/ngtcp2/lib/ngtcp2_pv.h | 4 - deps/ngtcp2/ngtcp2/lib/ngtcp2_qlog.c | 122 +- deps/ngtcp2/ngtcp2/lib/ngtcp2_ringbuf.c | 27 +- deps/ngtcp2/ngtcp2/lib/ngtcp2_ringbuf.h | 6 +- deps/ngtcp2/ngtcp2/lib/ngtcp2_rob.c | 6 +- deps/ngtcp2/ngtcp2/lib/ngtcp2_rob.h | 8 +- deps/ngtcp2/ngtcp2/lib/ngtcp2_rst.c | 13 +- deps/ngtcp2/ngtcp2/lib/ngtcp2_rst.h | 5 +- deps/ngtcp2/ngtcp2/lib/ngtcp2_rtb.c | 346 +- deps/ngtcp2/ngtcp2/lib/ngtcp2_rtb.h | 154 +- deps/ngtcp2/ngtcp2/lib/ngtcp2_str.c | 5 + deps/ngtcp2/ngtcp2/lib/ngtcp2_str.h | 7 + deps/ngtcp2/ngtcp2/lib/ngtcp2_strm.c | 44 +- deps/ngtcp2/ngtcp2/lib/ngtcp2_strm.h | 53 +- deps/ngtcp2/ngtcp2/lib/ngtcp2_tstamp.h | 68 + deps/ngtcp2/ngtcp2/lib/ngtcp2_unreachable.c | 71 + deps/ngtcp2/ngtcp2/lib/ngtcp2_unreachable.h | 52 + deps/ngtcp2/ngtcp2/lib/ngtcp2_vec.c | 45 +- 92 files changed, 10428 insertions(+), 8655 deletions(-) create mode 100644 deps/ngtcp2/nghttp3/lib/nghttp3_unreachable.c create mode 100644 deps/ngtcp2/nghttp3/lib/nghttp3_unreachable.h create mode 100644 deps/ngtcp2/nghttp3/lib/sfparse.c create mode 100644 deps/ngtcp2/nghttp3/lib/sfparse.h rename deps/ngtcp2/ngtcp2/crypto/includes/ngtcp2/{ngtcp2_crypto_openssl.h => ngtcp2_crypto_quictls.h} (63%) rename deps/ngtcp2/ngtcp2/crypto/{openssl/openssl.c => quictls/quictls.c} (73%) delete mode 100644 deps/ngtcp2/ngtcp2/lib/ngtcp2_bbr2.c delete mode 100644 deps/ngtcp2/ngtcp2/lib/ngtcp2_bbr2.h create mode 100644 deps/ngtcp2/ngtcp2/lib/ngtcp2_conn_stat.h create mode 100644 deps/ngtcp2/ngtcp2/lib/ngtcp2_conversion.c create mode 100644 deps/ngtcp2/ngtcp2/lib/ngtcp2_conversion.h create mode 100644 deps/ngtcp2/ngtcp2/lib/ngtcp2_frame_chain.c create mode 100644 deps/ngtcp2/ngtcp2/lib/ngtcp2_frame_chain.h create mode 100644 deps/ngtcp2/ngtcp2/lib/ngtcp2_pktns_id.h create mode 100644 deps/ngtcp2/ngtcp2/lib/ngtcp2_tstamp.h create mode 100644 deps/ngtcp2/ngtcp2/lib/ngtcp2_unreachable.c create mode 100644 deps/ngtcp2/ngtcp2/lib/ngtcp2_unreachable.h diff --git a/deps/ngtcp2/nghttp3/lib/includes/nghttp3/nghttp3.h b/deps/ngtcp2/nghttp3/lib/includes/nghttp3/nghttp3.h index cd10e4def7019b..77eb1fbf263815 100644 --- a/deps/ngtcp2/nghttp3/lib/includes/nghttp3/nghttp3.h +++ b/deps/ngtcp2/nghttp3/lib/includes/nghttp3/nghttp3.h @@ -68,6 +68,12 @@ extern "C" { # endif /* !BUILDING_NGHTTP3 */ #endif /* !defined(WIN32) */ +#ifdef _MSC_VER +# define NGHTTP3_ALIGN(N) __declspec(align(N)) +#else /* !_MSC_VER */ +# define NGHTTP3_ALIGN(N) __attribute__((aligned(N))) +#endif /* !_MSC_VER */ + /** * @typedef * @@ -97,166 +103,159 @@ typedef ptrdiff_t nghttp3_ssize; * argument is invalid. */ #define NGHTTP3_ERR_INVALID_ARGUMENT -101 -/** - * @macro - * - * :macro:`NGHTTP3_ERR_NOBUF` indicates that a provided buffer does - * not have enough space to store data. - */ -#define NGHTTP3_ERR_NOBUF -102 /** * @macro * * :macro:`NGHTTP3_ERR_INVALID_STATE` indicates that a requested * operation is not allowed at the current connection state. */ -#define NGHTTP3_ERR_INVALID_STATE -103 +#define NGHTTP3_ERR_INVALID_STATE -102 /** * @macro * * :macro:`NGHTTP3_ERR_WOULDBLOCK` indicates that an operation might * block. */ -#define NGHTTP3_ERR_WOULDBLOCK -104 +#define NGHTTP3_ERR_WOULDBLOCK -103 /** * @macro * * :macro:`NGHTTP3_ERR_STREAM_IN_USE` indicates that a stream ID is * already in use. */ -#define NGHTTP3_ERR_STREAM_IN_USE -105 +#define NGHTTP3_ERR_STREAM_IN_USE -104 /** * @macro * * :macro:`NGHTTP3_ERR_MALFORMED_HTTP_HEADER` indicates that an HTTP * header field is malformed. */ -#define NGHTTP3_ERR_MALFORMED_HTTP_HEADER -107 +#define NGHTTP3_ERR_MALFORMED_HTTP_HEADER -105 /** * @macro * * :macro:`NGHTTP3_ERR_REMOVE_HTTP_HEADER` indicates that an HTTP * header field is discarded. */ -#define NGHTTP3_ERR_REMOVE_HTTP_HEADER -108 +#define NGHTTP3_ERR_REMOVE_HTTP_HEADER -106 /** * @macro * * :macro:`NGHTTP3_ERR_MALFORMED_HTTP_MESSAGING` indicates that HTTP * messaging is malformed. */ -#define NGHTTP3_ERR_MALFORMED_HTTP_MESSAGING -109 +#define NGHTTP3_ERR_MALFORMED_HTTP_MESSAGING -107 /** * @macro * * :macro:`NGHTTP3_ERR_QPACK_FATAL` indicates that a fatal error is - * occurred during QPACK processing and it cannot be recoverable. + * occurred during QPACK processing, and it cannot be recoverable. */ -#define NGHTTP3_ERR_QPACK_FATAL -111 +#define NGHTTP3_ERR_QPACK_FATAL -108 /** * @macro * * :macro:`NGHTTP3_ERR_QPACK_HEADER_TOO_LARGE` indicates that a header * field is too large to process. */ -#define NGHTTP3_ERR_QPACK_HEADER_TOO_LARGE -112 +#define NGHTTP3_ERR_QPACK_HEADER_TOO_LARGE -109 /** * @macro * * :macro:`NGHTTP3_ERR_STREAM_NOT_FOUND` indicates that a stream is * not found. */ -#define NGHTTP3_ERR_STREAM_NOT_FOUND -114 +#define NGHTTP3_ERR_STREAM_NOT_FOUND -110 /** * @macro * * :macro:`NGHTTP3_ERR_CONN_CLOSING` indicates that a connection is * closing state. */ -#define NGHTTP3_ERR_CONN_CLOSING -116 +#define NGHTTP3_ERR_CONN_CLOSING -111 /** * @macro * * :macro:`NGHTTP3_ERR_STREAM_DATA_OVERFLOW` indicates that the length - * of stream data is too long and causes overflow. + * of stream data is too long, and causes overflow. */ -#define NGHTTP3_ERR_STREAM_DATA_OVERFLOW -117 +#define NGHTTP3_ERR_STREAM_DATA_OVERFLOW -112 /** * @macro * * :macro:`NGHTTP3_ERR_QPACK_DECOMPRESSION_FAILED` indicates that a * QPACK decompression failed. */ -#define NGHTTP3_ERR_QPACK_DECOMPRESSION_FAILED -402 +#define NGHTTP3_ERR_QPACK_DECOMPRESSION_FAILED -401 /** * @macro * * :macro:`NGHTTP3_ERR_QPACK_ENCODER_STREAM_ERROR` indicates that an * error occurred while reading QPACK encoder stream. */ -#define NGHTTP3_ERR_QPACK_ENCODER_STREAM_ERROR -403 +#define NGHTTP3_ERR_QPACK_ENCODER_STREAM_ERROR -402 /** * @macro * * :macro:`NGHTTP3_ERR_QPACK_DECODER_STREAM_ERROR` indicates that an * error occurred while reading QPACK decoder stream. */ -#define NGHTTP3_ERR_QPACK_DECODER_STREAM_ERROR -404 +#define NGHTTP3_ERR_QPACK_DECODER_STREAM_ERROR -403 /** * @macro * * :macro:`NGHTTP3_ERR_H3_FRAME_UNEXPECTED` indicates that an * unexpected HTTP/3 frame is received. */ -#define NGHTTP3_ERR_H3_FRAME_UNEXPECTED -408 +#define NGHTTP3_ERR_H3_FRAME_UNEXPECTED -601 /** * @macro * * :macro:`NGHTTP3_ERR_H3_FRAME_ERROR` indicates that an HTTP/3 frame * is malformed. */ -#define NGHTTP3_ERR_H3_FRAME_ERROR -409 +#define NGHTTP3_ERR_H3_FRAME_ERROR -602 /** * @macro * * :macro:`NGHTTP3_ERR_H3_MISSING_SETTINGS` indicates that an HTTP/3 * SETTINGS frame is missing. */ -#define NGHTTP3_ERR_H3_MISSING_SETTINGS -665 +#define NGHTTP3_ERR_H3_MISSING_SETTINGS -603 /** * @macro * * :macro:`NGHTTP3_ERR_H3_INTERNAL_ERROR` indicates an internal error. */ -#define NGHTTP3_ERR_H3_INTERNAL_ERROR -667 +#define NGHTTP3_ERR_H3_INTERNAL_ERROR -604 /** * @macro * * :macro:`NGHTTP3_ERR_H3_CLOSED_CRITICAL_STREAM` indicates that a * critical stream is closed. */ -#define NGHTTP3_ERR_H3_CLOSED_CRITICAL_STREAM -668 +#define NGHTTP3_ERR_H3_CLOSED_CRITICAL_STREAM -605 /** * @macro * * :macro:`NGHTTP3_ERR_H3_GENERAL_PROTOCOL_ERROR` indicates a general * protocol error. This is typically a catch-all error. */ -#define NGHTTP3_ERR_H3_GENERAL_PROTOCOL_ERROR -669 +#define NGHTTP3_ERR_H3_GENERAL_PROTOCOL_ERROR -606 /** * @macro * * :macro:`NGHTTP3_ERR_H3_ID_ERROR` indicates that an ID related error * occurred. */ -#define NGHTTP3_ERR_H3_ID_ERROR -670 +#define NGHTTP3_ERR_H3_ID_ERROR -607 /** * @macro * * :macro:`NGHTTP3_ERR_H3_SETTINGS_ERROR` indicates that an HTTP/3 * SETTINGS frame is malformed. */ -#define NGHTTP3_ERR_H3_SETTINGS_ERROR -671 +#define NGHTTP3_ERR_H3_SETTINGS_ERROR -608 /** * @macro * @@ -264,7 +263,7 @@ typedef ptrdiff_t nghttp3_ssize; * remote endpoint attempts to create a new stream which is not * allowed. */ -#define NGHTTP3_ERR_H3_STREAM_CREATION_ERROR -672 +#define NGHTTP3_ERR_H3_STREAM_CREATION_ERROR -609 /** * @macro * @@ -479,7 +478,7 @@ typedef void *(*nghttp3_realloc)(void *ptr, size_t size, void *user_data); * per-session memory pool. * * In the following example code, ``my_malloc``, ``my_free``, - * ``my_calloc`` and ``my_realloc`` are the replacement of the + * ``my_calloc``, and ``my_realloc`` are the replacement of the * standard allocators :manpage:`malloc(3)`, :manpage:`free(3)`, * :manpage:`calloc(3)` and :manpage:`realloc(3)` respectively:: * @@ -512,8 +511,8 @@ typedef void *(*nghttp3_realloc)(void *ptr, size_t size, void *user_data); */ typedef struct nghttp3_mem { /** - * :member:`user_data` is an arbitrary user supplied data. This - * is passed to each allocator function. + * :member:`user_data` is an arbitrary user supplied data. This is + * passed to each allocator function. */ void *user_data; /** @@ -559,7 +558,7 @@ typedef struct nghttp3_vec { uint8_t *base; /** * :member:`len` is the number of bytes which the buffer pointed by - * base contains. + * :member:`base` contains. */ size_t len; } nghttp3_vec; @@ -626,8 +625,8 @@ typedef struct nghttp3_buf { uint8_t *end; /** * :member:`pos` pointers to the start of data. Typically, this - * points to the point that next data should be read. Initially, it - * points to :member:`begin`. + * points to the address that next data should be read. Initially, + * it points to :member:`begin`. */ uint8_t *pos; /** @@ -685,7 +684,7 @@ NGHTTP3_EXTERN void nghttp3_buf_reset(nghttp3_buf *buf); /** * @macrosection * - * Flags for header field name/value pair + * Flags for HTTP field name/value pair */ /** @@ -708,8 +707,8 @@ NGHTTP3_EXTERN void nghttp3_buf_reset(nghttp3_buf *buf); * @macro * * :macro:`NGHTTP3_NV_FLAG_NO_COPY_NAME` is set solely by application. - * If this flag is set, the library does not make a copy of header - * field name. This could improve performance. + * If this flag is set, the library does not make a copy of field + * name. This could improve performance. */ #define NGHTTP3_NV_FLAG_NO_COPY_NAME 0x02u @@ -718,25 +717,35 @@ NGHTTP3_EXTERN void nghttp3_buf_reset(nghttp3_buf *buf); * * :macro:`NGHTTP3_NV_FLAG_NO_COPY_VALUE` is set solely by * application. If this flag is set, the library does not make a copy - * of header field value. This could improve performance. + * of field value. This could improve performance. */ #define NGHTTP3_NV_FLAG_NO_COPY_VALUE 0x04u +/** + * @macro + * + * :macro:`NGHTTP3_NV_FLAG_TRY_INDEX` gives a hint to QPACK encoder to + * index an HTTP field which is not indexed by default. This is just + * a hint, and QPACK encoder might not encode the field in various + * reasons. + */ +#define NGHTTP3_NV_FLAG_TRY_INDEX 0x08u + /** * @struct * * :type:`nghttp3_nv` is the name/value pair, which mainly used to - * represent header fields. + * represent HTTP fields. */ typedef struct nghttp3_nv { /** - * :member:`name` is the header field name. + * :member:`name` is the HTTP field name. */ - uint8_t *name; + const uint8_t *name; /** - * :member:`value` is the header field value. + * :member:`value` is the HTTP field value. */ - uint8_t *value; + const uint8_t *value; /** * :member:`namelen` is the length of the |name|, excluding * terminating NULL. @@ -758,8 +767,8 @@ typedef struct nghttp3_nv { /** * @enum * - * :type:`nghttp3_qpack_token` defines HTTP header field name tokens - * to identify field name quickly. It appears in + * :type:`nghttp3_qpack_token` defines HTTP field name tokens to + * identify field name quickly. It appears in * :member:`nghttp3_qpack_nv.token`. */ typedef enum nghttp3_qpack_token { @@ -1005,7 +1014,7 @@ typedef enum nghttp3_qpack_token { */ NGHTTP3_QPACK_TOKEN_X_FRAME_OPTIONS = 96, - /* Additional header fields for HTTP messaging validation */ + /* Additional HTTP fields for HTTP messaging validation */ /** * :enum:`NGHTTP3_QPACK_TOKEN_HOST` is a token for ``host``. @@ -1053,26 +1062,25 @@ typedef enum nghttp3_qpack_token { /** * @struct * - * :type:`nghttp3_qpack_nv` represents header field name/value pair - * just like :type:`nghttp3_nv`. It is an extended version of - * :type:`nghttp3_nv` and has reference counted buffers and tokens - * which might be useful for applications. + * :type:`nghttp3_qpack_nv` represents HTTP field name/value pair just + * like :type:`nghttp3_nv`. It is an extended version of + * :type:`nghttp3_nv`, and has reference counted buffers and tokens. */ typedef struct nghttp3_qpack_nv { /** - * :member:`name` is the buffer containing header field name. + * :member:`name` is the buffer containing HTTP field name. * NULL-termination is guaranteed. */ nghttp3_rcbuf *name; /** - * :member:`value` is the buffer containing header field value. + * :member:`value` is the buffer containing HTTP field value. * NULL-termination is guaranteed. */ nghttp3_rcbuf *value; /** * :member:`token` is :type:`nghttp3_qpack_token` value of - * :member:`name`. It could be -1 if we have no token for that - * header field name. + * :member:`name`. It could be -1 if we have no token for that HTTP + * field name. */ int32_t token; /** @@ -1085,7 +1093,8 @@ typedef struct nghttp3_qpack_nv { /** * @struct * - * :type:`nghttp3_qpack_encoder` is QPACK encoder. + * :type:`nghttp3_qpack_encoder` is QPACK encoder. The details of + * this structure are intentionally hidden from the public API. */ typedef struct nghttp3_qpack_encoder nghttp3_qpack_encoder; @@ -1096,7 +1105,7 @@ typedef struct nghttp3_qpack_encoder nghttp3_qpack_encoder; * must be non-NULL pointer. |hard_max_dtable_capacity| is the upper * bound of the dynamic table capacity. |mem| is a memory allocator. * This function allocates memory for :type:`nghttp3_qpack_encoder` - * itself and assigns its pointer to |*pencoder| if it succeeds. + * itself, and assigns its pointer to |*pencoder| if it succeeds. * * The maximum dynamic table capacity is still 0. In order to change * the maximum dynamic table capacity, call @@ -1116,21 +1125,22 @@ NGHTTP3_EXTERN int nghttp3_qpack_encoder_new(nghttp3_qpack_encoder **pencoder, * @function * * `nghttp3_qpack_encoder_del` frees memory allocated for |encoder|. - * This function frees memory pointed by |encoder| itself. + * This function also frees memory pointed by |encoder| itself. This + * function does nothing if |encoder| is NULL. */ NGHTTP3_EXTERN void nghttp3_qpack_encoder_del(nghttp3_qpack_encoder *encoder); /** * @function * - * `nghttp3_qpack_encoder_encode` encodes the list of header fields + * `nghttp3_qpack_encoder_encode` encodes the list of HTTP fields * |nva|. |nvlen| is the length of |nva|. |stream_id| is the - * identifier of the stream which this header fields belong to. This - * function writes header block prefix, encoded header fields, and - * encoder stream to |pbuf|, |rbuf|, and |ebuf| respectively. The - * :member:`nghttp3_buf.last` will be adjusted when data is written. - * An application should write |pbuf| and |rbuf| to the request stream - * in this order. + * identifier of the stream which these HTTP fields belong to. This + * function writes field section prefix, encoded HTTP field section, + * and encoder stream to |pbuf|, |rbuf|, and |ebuf| respectively. + * Each :member:`nghttp3_buf.last` will be adjusted when data is + * written. An application should write |pbuf| and |rbuf| to the + * request stream in this order. * * The buffer pointed by |pbuf|, |rbuf|, and |ebuf| can be empty * buffer. It is fine to pass a buffer initialized by @@ -1139,7 +1149,7 @@ NGHTTP3_EXTERN void nghttp3_qpack_encoder_del(nghttp3_qpack_encoder *encoder); * frees and expands buffer if the current capacity of buffer is not * enough. If :member:`nghttp3_buf.begin` of any buffer is not NULL, * it must be allocated by the same memory allocator passed to - * `nghttp3_qpack_encoder_new()`. + * `nghttp3_qpack_encoder_new`. * * This function returns 0 if it succeeds, or one of the following * negative error codes: @@ -1147,7 +1157,7 @@ NGHTTP3_EXTERN void nghttp3_qpack_encoder_del(nghttp3_qpack_encoder *encoder); * :macro:`NGHTTP3_ERR_NOMEM` * Out of memory * :macro:`NGHTTP3_ERR_QPACK_FATAL` - * |encoder| is in unrecoverable error state and cannot be used + * |encoder| is in unrecoverable error state, and cannot be used * anymore. */ NGHTTP3_EXTERN int nghttp3_qpack_encoder_encode( @@ -1166,7 +1176,7 @@ NGHTTP3_EXTERN int nghttp3_qpack_encoder_encode( * :macro:`NGHTTP3_ERR_NOMEM` * Out of memory * :macro:`NGHTTP3_ERR_QPACK_FATAL` - * |encoder| is in unrecoverable error state and cannot be used + * |encoder| is in unrecoverable error state, and cannot be used * anymore. * :macro:`NGHTTP3_ERR_QPACK_DECODER_STREAM` * |encoder| is unable to process input because it is malformed. @@ -1178,8 +1188,8 @@ NGHTTP3_EXTERN nghttp3_ssize nghttp3_qpack_encoder_read_decoder( * @function * * `nghttp3_qpack_encoder_set_max_dtable_capacity` sets max dynamic - * table capacity to |max_dtable_capacity|. If |max_dtable_capacity| is - * larger than ``hard_max_dtable_capacity`` parameter of + * table capacity to |max_dtable_capacity|. If |max_dtable_capacity| + * is larger than ``hard_max_dtable_capacity`` parameter of * `nghttp3_qpack_encoder_new`, it is truncated to the latter. */ NGHTTP3_EXTERN void @@ -1200,9 +1210,10 @@ nghttp3_qpack_encoder_set_max_blocked_streams(nghttp3_qpack_encoder *encoder, * @function * * `nghttp3_qpack_encoder_ack_everything` tells |encoder| that all - * encoded header blocks are acknowledged. This function is provided - * for debugging purpose only. In HTTP/3, |encoder| knows this by - * reading decoder stream with `nghttp3_qpack_encoder_read_decoder()`. + * encoded HTTP field sections are acknowledged. This function is + * provided for debugging purpose only. In HTTP/3, |encoder| knows + * this by reading decoder stream with + * `nghttp3_qpack_encoder_read_decoder`. */ NGHTTP3_EXTERN void nghttp3_qpack_encoder_ack_everything(nghttp3_qpack_encoder *encoder); @@ -1220,9 +1231,10 @@ nghttp3_qpack_encoder_get_num_blocked_streams(nghttp3_qpack_encoder *encoder); * @struct * * :type:`nghttp3_qpack_stream_context` is a decoder context for an - * individual stream. Its state is per header block. In order to - * reuse this object for another header block, call - * `nghttp3_qpack_stream_context_reset`. + * individual stream. Its state is per HTTP field section. In order + * to reuse this object for another HTTP field section, call + * `nghttp3_qpack_stream_context_reset`. The details of this + * structure are intentionally hidden from the public API. */ typedef struct nghttp3_qpack_stream_context nghttp3_qpack_stream_context; @@ -1232,8 +1244,8 @@ typedef struct nghttp3_qpack_stream_context nghttp3_qpack_stream_context; * `nghttp3_qpack_stream_context_new` initializes stream context. * |psctx| must be non-NULL pointer. |stream_id| is stream ID. |mem| * is a memory allocator. This function allocates memory for - * :type:`nghttp3_qpack_stream_context` itself and assigns its pointer - * to |*psctx| if it succeeds. + * :type:`nghttp3_qpack_stream_context` itself, and assigns its + * pointer to |*psctx| if it succeeds. * * This function returns 0 if it succeeds, or one of the following * negative error codes: @@ -1249,7 +1261,8 @@ nghttp3_qpack_stream_context_new(nghttp3_qpack_stream_context **psctx, * @function * * `nghttp3_qpack_stream_context_del` frees memory allocated for - * |sctx|. This function frees memory pointed by |sctx| itself. + * |sctx|. This function frees memory pointed by |sctx| itself. This + * function does nothing if |sctx| is NULL. */ NGHTTP3_EXTERN void nghttp3_qpack_stream_context_del(nghttp3_qpack_stream_context *sctx); @@ -1267,8 +1280,8 @@ nghttp3_qpack_stream_context_get_ricnt(nghttp3_qpack_stream_context *sctx); * @function * * `nghttp3_qpack_stream_context_reset` resets the state of |sctx|. - * Then it can be reused for an another header block in the same - * stream. + * Then it can be reused for decoding an another HTTP field section in + * the same stream. */ NGHTTP3_EXTERN void nghttp3_qpack_stream_context_reset(nghttp3_qpack_stream_context *sctx); @@ -1276,7 +1289,8 @@ void nghttp3_qpack_stream_context_reset(nghttp3_qpack_stream_context *sctx); /** * @struct * - * :type:`nghttp3_qpack_decoder` is QPACK decoder. + * :type:`nghttp3_qpack_decoder` is QPACK decoder. The details of + * this structure are intentionally hidden from the public API. */ typedef struct nghttp3_qpack_decoder nghttp3_qpack_decoder; @@ -1288,7 +1302,7 @@ typedef struct nghttp3_qpack_decoder nghttp3_qpack_decoder; * bound of the dynamic table capacity. |max_blocked_streams| is the * maximum number of streams which can be blocked. |mem| is a memory * allocator. This function allocates memory for - * :type:`nghttp3_qpack_decoder` itself and assigns its pointer to + * :type:`nghttp3_qpack_decoder` itself, and assigns its pointer to * |*pdecoder| if it succeeds. * * This function returns 0 if it succeeds, or one of the following @@ -1306,7 +1320,8 @@ NGHTTP3_EXTERN int nghttp3_qpack_decoder_new(nghttp3_qpack_decoder **pdecoder, * @function * * `nghttp3_qpack_decoder_del` frees memory allocated for |decoder|. - * This function frees memory pointed by |decoder| itself. + * This function frees memory pointed by |decoder| itself. This + * function does nothing if |decoder| is NULL. */ NGHTTP3_EXTERN void nghttp3_qpack_decoder_del(nghttp3_qpack_decoder *decoder); @@ -1322,7 +1337,7 @@ NGHTTP3_EXTERN void nghttp3_qpack_decoder_del(nghttp3_qpack_decoder *decoder); * :macro:`NGHTTP3_ERR_NOMEM` * Out of memory. * :macro:`NGHTTP3_ERR_QPACK_FATAL` - * |decoder| is in unrecoverable error state and cannot be used + * |decoder| is in unrecoverable error state, and cannot be used * anymore. * :macro:`NGHTTP3_ERR_QPACK_ENCODER_STREAM` * Could not interpret encoder stream instruction. @@ -1354,7 +1369,7 @@ nghttp3_qpack_decoder_get_icnt(const nghttp3_qpack_decoder *decoder); /** * @macro * - * :macro:`NGHTTP3_QPACK_DECODE_FLAG_EMIT` indicates that a header + * :macro:`NGHTTP3_QPACK_DECODE_FLAG_EMIT` indicates that an HTTP * field is successfully decoded. */ #define NGHTTP3_QPACK_DECODE_FLAG_EMIT 0x01u @@ -1362,8 +1377,8 @@ nghttp3_qpack_decoder_get_icnt(const nghttp3_qpack_decoder *decoder); /** * @macro * - * :macro:`NGHTTP3_QPACK_DECODE_FLAG_FINAL` indicates that all header - * fields have been decoded. + * :macro:`NGHTTP3_QPACK_DECODE_FLAG_FINAL` indicates that an entire + * HTTP field section has been decoded. */ #define NGHTTP3_QPACK_DECODE_FLAG_FINAL 0x02u @@ -1380,32 +1395,32 @@ nghttp3_qpack_decoder_get_icnt(const nghttp3_qpack_decoder *decoder); * * `nghttp3_qpack_decoder_read_request` reads request stream. The * request stream is given as the buffer pointed by |src| of length - * |srclen|. |sctx| is the stream context and it must be created by - * `nghttp3_qpack_stream_context_new()`. |*pflags| must be non-NULL + * |srclen|. |sctx| is the stream context, and it must be created by + * `nghttp3_qpack_stream_context_new`. |*pflags| must be non-NULL * pointer. |nv| must be non-NULL pointer. * * If this function succeeds, it assigns flags to |*pflags|. If * |*pflags| has :macro:`NGHTTP3_QPACK_DECODE_FLAG_EMIT` set, a - * decoded header field is assigned to |nv|. If |*pflags| has - * :macro:`NGHTTP3_QPACK_DECODE_FLAG_FINAL` set, all header fields - * have been successfully decoded. If |*pflags| has + * decoded HTTP field is assigned to |nv|. If |*pflags| has + * :macro:`NGHTTP3_QPACK_DECODE_FLAG_FINAL` set, an entire HTTP field + * section has been successfully decoded. If |*pflags| has * :macro:`NGHTTP3_QPACK_DECODE_FLAG_BLOCKED` set, decoding is blocked * due to required insert count. * - * When a header field is decoded, an application receives it in |nv|. + * When an HTTP field is decoded, an application receives it in |nv|. * :member:`nv->name ` and :member:`nv->value * ` are reference counted buffer, and their * reference counts are already incremented for application use. - * Therefore, when application finishes processing the header field, - * it must call `nghttp3_rcbuf_decref(nv->name) - * ` and `nghttp3_rcbuf_decref(nv->value) - * ` or memory leak might occur. These - * :type:`nghttp3_rcbuf` objects hold the pointer to - * :type:`nghttp3_mem` that is passed to `nghttp3_qpack_decoder_new` - * (or either `nghttp3_conn_client_new` or `nghttp3_conn_server_new` - * if it is used indirectly). As long as these objects are alive, the - * pointed :type:`nghttp3_mem` object must be available. Otherwise, - * `nghttp3_rcbuf_decref` will cause undefined behavior. + * Therefore, when application finishes processing |nv|, it must call + * `nghttp3_rcbuf_decref(nv->name) ` and + * `nghttp3_rcbuf_decref(nv->value) `, or memory + * leak might occur. These :type:`nghttp3_rcbuf` objects hold the + * pointer to :type:`nghttp3_mem` that is passed to + * `nghttp3_qpack_decoder_new` (or either `nghttp3_conn_client_new` or + * `nghttp3_conn_server_new` if it is used indirectly). As long as + * these objects are alive, the pointed :type:`nghttp3_mem` object + * must be available. Otherwise, `nghttp3_rcbuf_decref` will cause + * undefined behavior. * * This function returns the number of bytes read, or one of the * following negative error codes: @@ -1413,12 +1428,12 @@ nghttp3_qpack_decoder_get_icnt(const nghttp3_qpack_decoder *decoder); * :macro:`NGHTTP3_ERR_NOMEM` * Out of memory. * :macro:`NGHTTP3_ERR_QPACK_FATAL` - * |decoder| is in unrecoverable error state and cannot be used + * |decoder| is in unrecoverable error state, and cannot be used * anymore. * :macro:`NGHTTP3_ERR_QPACK_DECOMPRESSION_FAILED` - * Could not interpret header block instruction. + * Could not interpret field line representations. * :macro:`NGHTTP3_ERR_QPACK_HEADER_TOO_LARGE` - * Header field is too large. + * HTTP field is too large. */ NGHTTP3_EXTERN nghttp3_ssize nghttp3_qpack_decoder_read_request( nghttp3_qpack_decoder *decoder, nghttp3_qpack_stream_context *sctx, @@ -1444,7 +1459,7 @@ nghttp3_qpack_decoder_write_decoder(nghttp3_qpack_decoder *decoder, * @function * * `nghttp3_qpack_decoder_get_decoder_streamlen` returns the length of - * decoder stream. + * decoder stream that is currently pending. */ NGHTTP3_EXTERN size_t nghttp3_qpack_decoder_get_decoder_streamlen(nghttp3_qpack_decoder *decoder); @@ -1452,8 +1467,8 @@ nghttp3_qpack_decoder_get_decoder_streamlen(nghttp3_qpack_decoder *decoder); /** * @function * - * `nghttp3_qpack_decoder_cancel_stream` cancels header decoding for - * stream denoted by |stream_id|. + * `nghttp3_qpack_decoder_cancel_stream` cancels HTTP field section + * decoding for stream denoted by |stream_id|. * * This function returns 0 if it succeeds, or one of the following * negative error codes: @@ -1472,14 +1487,14 @@ nghttp3_qpack_decoder_cancel_stream(nghttp3_qpack_decoder *decoder, * * `nghttp3_qpack_decoder_set_max_dtable_capacity` sets * |max_dtable_capacity| as maximum dynamic table size. - * |max_dtable_capacity| must be equal to or smaller than + * |max_dtable_capacity| must be equal to, or smaller than * ``hard_max_dtable_capacity`` parameter of * `nghttp3_qpack_decoder_new`. Normally, the maximum capacity is * communicated in encoder stream. This function is provided for * debugging and testing purpose. * - * This function returns 0 if it succeeds, or one of the - * following negative error codes: + * This function returns 0 if it succeeds, or one of the following + * negative error codes: * * :macro:`NGHTTP3_ERR_INVALID_ARGUMENT` * |max_dtable_capacity| exceeds the upper bound of the dynamic @@ -1535,8 +1550,8 @@ typedef void (*nghttp3_debug_vprintf_callback)(const char *format, * * `nghttp3_set_debug_vprintf_callback` sets a debug output callback * called by the library when built with :macro:`DEBUGBUILD` macro - * defined. If this option is not used, debug log is written into - * standard error output. + * defined. If a callback function is not set by this function, debug + * log is written into standard error output. * * For builds without :macro:`DEBUGBUILD` macro defined, this function * is noop. @@ -1564,7 +1579,7 @@ NGHTTP3_EXTERN void nghttp3_set_debug_vprintf_callback( /** * @macro * - * :macro:`NGHTTP3_SHUTDOWN_NOTICE_STREAM_ID` specifies stream id sent + * :macro:`NGHTTP3_SHUTDOWN_NOTICE_STREAM_ID` specifies stream ID sent * by a server when it initiates graceful shutdown of the connection * via `nghttp3_conn_submit_shutdown_notice`. */ @@ -1573,19 +1588,72 @@ NGHTTP3_EXTERN void nghttp3_set_debug_vprintf_callback( /** * @macro * - * :macro:`NGHTTP3_SHUTDOWN_NOTICE_PUSH_ID` specifies push id sent - * by a client when it initiates graceful shutdown of the connection - * via `nghttp3_conn_submit_shutdown_notice`. + * :macro:`NGHTTP3_SHUTDOWN_NOTICE_PUSH_ID` specifies push ID sent by + * a client when it initiates graceful shutdown of the connection via + * `nghttp3_conn_submit_shutdown_notice`. Note that libnghttp3 does + * not implement HTTP/3 Server Push. */ #define NGHTTP3_SHUTDOWN_NOTICE_PUSH_ID ((1ull << 62) - 1) /** * @struct * - * :type:`nghttp3_conn` represents a single HTTP/3 connection. + * :type:`nghttp3_conn` represents a single HTTP/3 connection. The + * details of this structure are intentionally hidden from the public + * API. */ typedef struct nghttp3_conn nghttp3_conn; +#define NGHTTP3_SETTINGS_V1 1 +#define NGHTTP3_SETTINGS_VERSION NGHTTP3_SETTINGS_V1 + +/** + * @struct + * + * :type:`nghttp3_settings` defines HTTP/3 settings. + */ +typedef struct nghttp3_settings { + /** + * :member:`max_field_section_size` specifies the maximum header + * section (block) size. + */ + uint64_t max_field_section_size; + /** + * :member:`qpack_max_dtable_capacity` is the maximum size of QPACK + * dynamic table. + */ + size_t qpack_max_dtable_capacity; + /** + * :member:`qpack_encoder_max_dtable_capacity` is the upper bound of + * QPACK dynamic table capacity that the QPACK encoder is willing to + * use. The effective maximum dynamic table capacity is the minimum + * of this field and the value of the received + * SETTINGS_QPACK_MAX_TABLE_CAPACITY. If this field is set to 0, + * the encoder does not use the dynamic table. + * + * When :type:`nghttp3_settings` is passed to + * :member:`nghttp3_callbacks.recv_settings` callback, this field + * should be ignored. + */ + size_t qpack_encoder_max_dtable_capacity; + /** + * :member:`qpack_blocked_streams` is the maximum number of streams + * which can be blocked while they are being decoded. + */ + size_t qpack_blocked_streams; + /** + * :member:`enable_connect_protocol`, if set to nonzero, enables + * Extended CONNECT Method (see :rfc:`9220`). Client ignores this + * field. + */ + uint8_t enable_connect_protocol; + /** + * :member:`h3_datagram`, if set to nonzero, enables HTTP/3 + * Datagrams (see :rfc:`9297`). + */ + uint8_t h3_datagram; +} nghttp3_settings; + /** * @functypedef * @@ -1607,8 +1675,9 @@ typedef int (*nghttp3_acked_stream_data)(nghttp3_conn *conn, int64_t stream_id, * @functypedef * * :type:`nghttp3_conn_stream_close` is a callback function which is - * invoked when a stream identified by |stream_id| is closed. - * |app_error_code| indicates the reason of this closure. + * invoked when a stream identified by |stream_id| is closed. QUIC + * application error code |app_error_code| indicates the reason of + * this closure. * * The implementation of this callback must return 0 if it succeeds. * Returning :macro:`NGHTTP3_ERR_CALLBACK_FAILURE` will return to the @@ -1625,11 +1694,11 @@ typedef int (*nghttp3_stream_close)(nghttp3_conn *conn, int64_t stream_id, * * :type:`nghttp3_recv_data` is a callback function which is invoked * when a part of request or response body on stream identified by - * |stream_id| is received. |data| points to the received data and + * |stream_id| is received. |data| points to the received data, and * its length is |datalen|. * * The application is responsible for increasing flow control credit - * by |datalen| bytes. + * (say, increasing by |datalen| bytes). * * The implementation of this callback must return 0 if it succeeds. * Returning :macro:`NGHTTP3_ERR_CALLBACK_FAILURE` will return to the @@ -1646,9 +1715,9 @@ typedef int (*nghttp3_recv_data)(nghttp3_conn *conn, int64_t stream_id, * :type:`nghttp3_deferred_consume` is a callback function which is * invoked when the library consumed |consumed| bytes for a stream * identified by |stream_id|. This callback is used to notify the - * consumed bytes for stream blocked by QPACK decoder. The - * application is responsible for increasing flow control credit by - * |consumed| bytes. + * consumed bytes for stream blocked due to synchronization between + * streams. The application is responsible for increasing flow + * control credit by |consumed| bytes. * * The implementation of this callback must return 0 if it succeeds. * Returning :macro:`NGHTTP3_ERR_CALLBACK_FAILURE` will return to the @@ -1663,11 +1732,11 @@ typedef int (*nghttp3_deferred_consume)(nghttp3_conn *conn, int64_t stream_id, * @functypedef * * :type:`nghttp3_begin_headers` is a callback function which is - * invoked when an incoming header block section is started on a - * stream denoted by |stream_id|. Each header field is passed to - * application by :type:`nghttp3_recv_header` callback. And then - * :type:`nghttp3_end_headers` is called when a whole header block is - * processed. + * invoked when an incoming HTTP field section is started on a stream + * denoted by |stream_id|. Each HTTP field is passed to application + * by :type:`nghttp3_recv_header` callback. And then + * :type:`nghttp3_end_headers` is called when a whole HTTP field + * section is processed. * * The implementation of this callback must return 0 if it succeeds. * Returning :macro:`NGHTTP3_ERR_CALLBACK_FAILURE` will return to the @@ -1682,8 +1751,8 @@ typedef int (*nghttp3_begin_headers)(nghttp3_conn *conn, int64_t stream_id, * @functypedef * * :type:`nghttp3_recv_header` is a callback function which is invoked - * when a header field is received on a stream denoted by |stream_id|. - * |name| contains a field name and |value| contains a field value. + * when an HTTP field is received on a stream denoted by |stream_id|. + * |name| contains a field name, and |value| contains a field value. * |token| is one of token defined in :type:`nghttp3_qpack_token` or * -1 if no token is defined for |name|. |flags| is bitwise OR of * zero or more of :macro:`NGHTTP3_NV_FLAG_* `. @@ -1708,9 +1777,10 @@ typedef int (*nghttp3_recv_header)(nghttp3_conn *conn, int64_t stream_id, * @functypedef * * :type:`nghttp3_end_headers` is a callback function which is invoked - * when an incoming header block has ended. + * when an incoming HTTP field section has ended. * - * If the stream ends with this header block, |fin| is set to nonzero. + * If the stream ends with this HTTP field section, |fin| is set to + * nonzero. * * The implementation of this callback must return 0 if it succeeds. * Returning :macro:`NGHTTP3_ERR_CALLBACK_FAILURE` will return to the @@ -1743,8 +1813,8 @@ typedef int (*nghttp3_end_stream)(nghttp3_conn *conn, int64_t stream_id, * * :type:`nghttp3_stop_sending` is a callback function which is * invoked when the library asks application to send STOP_SENDING to - * the stream identified by |stream_id|. |app_error_code| indicates - * the reason for this action. + * the stream identified by |stream_id|. QUIC application error code + * |app_error_code| indicates the reason for this action. * * The implementation of this callback must return 0 if it succeeds. * Returning :macro:`NGHTTP3_ERR_CALLBACK_FAILURE` will return to the @@ -1761,8 +1831,8 @@ typedef int (*nghttp3_stop_sending)(nghttp3_conn *conn, int64_t stream_id, * * :type:`nghttp3_reset_stream` is a callback function which is * invoked when the library asks application to reset stream - * identified by |stream_id|. |app_error_code| indicates the reason - * for this action. + * identified by |stream_id|. QUIC application error code + * |app_error_code| indicates the reason for this action. * * The implementation of this callback must return 0 if it succeeds. * Returning :macro:`NGHTTP3_ERR_CALLBACK_FAILURE` will return to the @@ -1779,13 +1849,14 @@ typedef int (*nghttp3_reset_stream)(nghttp3_conn *conn, int64_t stream_id, * * :type:`nghttp3_shutdown` is a callback function which is invoked * when a shutdown is initiated by the remote endpoint. For client, - * |id| contains a stream id of a client initiated stream, for server, - * it contains a push id. All client streams with stream id or pushes - * with push id equal to or larger than |id| are guaranteed to not be - * processed by the remote endpoint. + * |id| contains a stream ID of a client initiated stream, for server, + * it contains a push ID. All client streams with stream ID, or pushes + * with push ID equal to, or larger than |ID| are guaranteed to not be + * processed by the remote endpoint. Note that libnghttp3 does not + * implement Server Push. * * Parameter |id| for client can contain a special value - * :macro:`NGHTTP3_SHUTDOWN_NOTICE_STREAM_ID` and for server it can + * :macro:`NGHTTP3_SHUTDOWN_NOTICE_STREAM_ID`, and for server it can * contain special value * :macro:`NGHTTP3_SHUTDOWN_NOTICE_PUSH_ID`. These values signal * request for graceful shutdown of the connection, triggered by @@ -1804,8 +1875,24 @@ typedef int (*nghttp3_reset_stream)(nghttp3_conn *conn, int64_t stream_id, typedef int (*nghttp3_shutdown)(nghttp3_conn *conn, int64_t id, void *conn_user_data); -#define NGHTTP3_CALLBACKS_VERSION_V1 1 -#define NGHTTP3_CALLBACKS_VERSION NGHTTP3_CALLBACKS_VERSION_V1 +/** + * @functypedef + * + * :type:`nghttp3_recv_settings` is a callback function which is + * invoked when SETTINGS frame is received. |settings| is a received + * remote HTTP/3 settings. + * + * The implementation of this callback must return 0 if it succeeds. + * Returning :macro:`NGHTTP3_ERR_CALLBACK_FAILURE` will return to the + * caller immediately. Any values other than 0 is treated as + * :macro:`NGHTTP3_ERR_CALLBACK_FAILURE`. + */ +typedef int (*nghttp3_recv_settings)(nghttp3_conn *conn, + const nghttp3_settings *settings, + void *conn_user_data); + +#define NGHTTP3_CALLBACKS_V1 1 +#define NGHTTP3_CALLBACKS_VERSION NGHTTP3_CALLBACKS_V1 /** * @struct @@ -1837,32 +1924,38 @@ typedef struct nghttp3_callbacks { nghttp3_deferred_consume deferred_consume; /** * :member:`begin_headers` is a callback function which is invoked - * when a header block has started on a particular stream. + * when an HTTP header field section has started on a particular + * stream. */ nghttp3_begin_headers begin_headers; /** * :member:`recv_header` is a callback function which is invoked - * when a single header field is received on a particular stream. + * when a single HTTP header field is received on a particular + * stream. */ nghttp3_recv_header recv_header; /** * :member:`end_headers` is a callback function which is invoked - * when a header block has ended on a particular stream. + * when an HTTP header field section has ended on a particular + * stream. */ nghttp3_end_headers end_headers; /** * :member:`begin_trailers` is a callback function which is invoked - * when a trailer block has started on a particular stream. + * when an HTTP trailer field section has started on a particular + * stream. */ nghttp3_begin_headers begin_trailers; /** * :member:`recv_trailer` is a callback function which is invoked - * when a single trailer field is received on a particular stream. + * when a single HTTP trailer field is received on a particular + * stream. */ nghttp3_recv_header recv_trailer; /** * :member:`end_trailers` is a callback function which is invoked - * when a trailer block has ended on a particular stream. + * when an HTTP trailer field section has ended on a particular + * stream. */ nghttp3_end_headers end_trailers; /** @@ -1884,52 +1977,16 @@ typedef struct nghttp3_callbacks { nghttp3_reset_stream reset_stream; /** * :member:`shutdown` is a callback function which is invoked when - * the remote endpoint has signalled initiation of connection shutdown. + * the remote endpoint has signalled initiation of connection + * shutdown. */ nghttp3_shutdown shutdown; -} nghttp3_callbacks; - -#define NGHTTP3_SETTINGS_VERSION_V1 1 -#define NGHTTP3_SETTINGS_VERSION NGHTTP3_SETTINGS_VERSION_V1 - -/** - * @struct - * - * :type:`nghttp3_settings` defines HTTP/3 settings. - */ -typedef struct nghttp3_settings { - /** - * :member:`max_field_section_size` specifies the maximum header - * section (block) size. - */ - uint64_t max_field_section_size; - /** - * :member:`qpack_max_dtable_capacity` is the maximum size of QPACK - * dynamic table. - */ - size_t qpack_max_dtable_capacity; - /** - * :member:`qpack_encoder_max_dtable_capacity` is the upper bound of - * QPACK dynamic table capacity that the QPACK encoder is willing to - * use. The effective maximum dynamic table capacity is the minimum - * of this field and the value of the received - * SETTINGS_QPACK_MAX_TABLE_CAPACITY. If this field is set to 0, - * the encoder does not use the dynamic table. - */ - size_t qpack_encoder_max_dtable_capacity; /** - * :member:`qpack_blocked_streams` is the maximum number of streams - * which can be blocked while they are being decoded. + * :member:`recv_settings` is a callback function which is invoked + * when SETTINGS frame is received. */ - size_t qpack_blocked_streams; - /** - * :member:`enable_connect_protocol`, if set to nonzero, enables - * Extended CONNECT Method (see - * https://www.ietf.org/archive/id/draft-ietf-httpbis-h3-websockets-00.html). - * Client ignores this field. - */ - int enable_connect_protocol; -} nghttp3_settings; + nghttp3_recv_settings recv_settings; +} nghttp3_callbacks; /** * @function @@ -1955,10 +2012,16 @@ nghttp3_settings_default_versioned(int settings_version, /** * @function * - * `nghttp3_conn_client_new` creates :type:`nghttp3_conn` and + * `nghttp3_conn_client_new` creates :type:`nghttp3_conn`, and * initializes it for client use. The pointer to the object is stored * in |*pconn|. If |mem| is ``NULL``, the memory allocator returned * by `nghttp3_mem_default` is used. + * + * This function returns 0 if it succeeds, or one of the following + * negative error codes: + * + * :macro:`NGHTTP3_ERR_NOMEM` + * Out of memory. */ NGHTTP3_EXTERN int nghttp3_conn_client_new_versioned(nghttp3_conn **pconn, int callbacks_version, @@ -1970,10 +2033,16 @@ nghttp3_conn_client_new_versioned(nghttp3_conn **pconn, int callbacks_version, /** * @function * - * `nghttp3_conn_server_new` creates :type:`nghttp3_conn` and + * `nghttp3_conn_server_new` creates :type:`nghttp3_conn`, and * initializes it for server use. The pointer to the object is stored * in |*pconn|. If |mem| is ``NULL``, the memory allocator returned * by `nghttp3_mem_default` is used. + * + * This function returns 0 if it succeeds, or one of the following + * negative error codes: + * + * :macro:`NGHTTP3_ERR_NOMEM` + * Out of memory. */ NGHTTP3_EXTERN int nghttp3_conn_server_new_versioned(nghttp3_conn **pconn, int callbacks_version, @@ -1985,7 +2054,9 @@ nghttp3_conn_server_new_versioned(nghttp3_conn **pconn, int callbacks_version, /** * @function * - * `nghttp3_conn_del` frees resources allocated for |conn|. + * `nghttp3_conn_del` frees resources allocated for |conn|. This + * function also frees memory pointed by |conn| itself. This function + * does nothing if |conn| is NULL. */ NGHTTP3_EXTERN void nghttp3_conn_del(nghttp3_conn *conn); @@ -2000,8 +2071,8 @@ NGHTTP3_EXTERN void nghttp3_conn_del(nghttp3_conn *conn); * * :macro:`NGHTTP3_ERR_INVALID_STATE` * Control stream has already corresponding stream ID. - * - * TBD + * :macro:`NGHTTP3_ERR_NOMEM` + * Out of memory. */ NGHTTP3_EXTERN int nghttp3_conn_bind_control_stream(nghttp3_conn *conn, int64_t stream_id); @@ -2010,7 +2081,7 @@ NGHTTP3_EXTERN int nghttp3_conn_bind_control_stream(nghttp3_conn *conn, * @function * * `nghttp3_conn_bind_qpack_streams` binds stream denoted by - * |qenc_stream_id| to outgoing QPACK encoder stream and stream + * |qenc_stream_id| to outgoing QPACK encoder stream, and stream * denoted by |qdec_stream_id| to outgoing QPACK encoder stream. * * This function returns 0 if it succeeds, or one of the following @@ -2019,8 +2090,8 @@ NGHTTP3_EXTERN int nghttp3_conn_bind_control_stream(nghttp3_conn *conn, * :macro:`NGHTTP3_ERR_INVALID_STATE` * QPACK encoder/decoder stream have already corresponding stream * IDs. - * - * TBD + * :macro:`NGHTTP3_ERR_NOMEM` + * Out of memory. */ NGHTTP3_EXTERN int nghttp3_conn_bind_qpack_streams(nghttp3_conn *conn, int64_t qenc_stream_id, @@ -2038,6 +2109,18 @@ NGHTTP3_EXTERN int nghttp3_conn_bind_qpack_streams(nghttp3_conn *conn, * any control or QPACK unidirectional streams) . See * :type:`nghttp3_recv_data` to handle those bytes. If |fin| is * nonzero, this is the last data from remote endpoint in this stream. + * + * This function returns the number of bytes consumed, or one of the + * following negative error codes: + * + * :macro:`NGHTTP3_ERR_NOMEM` + * Out of memory. + * :macro:`NGHTTP3_ERR_CALLBACK_FAILURE` + * User callback failed. + * + * It may return the other error codes. In general, the negative + * error code means that |conn| encountered a connection error, and + * the connection should be closed. */ NGHTTP3_EXTERN nghttp3_ssize nghttp3_conn_read_stream(nghttp3_conn *conn, int64_t stream_id, @@ -2048,18 +2131,30 @@ NGHTTP3_EXTERN nghttp3_ssize nghttp3_conn_read_stream(nghttp3_conn *conn, * @function * * `nghttp3_conn_writev_stream` stores stream data to send to |vec| of - * length |veccnt| and returns the number of nghttp3_vec object in + * length |veccnt|, and returns the number of nghttp3_vec object in * which it stored data. It stores stream ID to |*pstream_id|. An * application has to call `nghttp3_conn_add_write_offset` to inform * |conn| of the actual number of bytes that underlying QUIC stack * accepted. |*pfin| will be nonzero if this is the last data to * send. If there is no stream to write data or send fin, this * function returns 0, and -1 is assigned to |*pstream_id|. This - * function may return 0 and |*pstream_id| is not -1 and |*pfin| is - * nonzero. It means 0 length data to |*pstream_id| and it is the + * function may return 0, and |*pstream_id| is not -1, and |*pfin| is + * nonzero. It means 0 length data to |*pstream_id|, and it is the * last data to the stream. They must be passed to QUIC stack, and * they are accepted, the application has to call - * `nghttp3_conn_add_write_offset`. + * `nghttp3_conn_add_write_offset` with 0 byte. + * + * This function returns the number of bytes consumed, or one of the + * following negative error codes: + * + * :macro:`NGHTTP3_ERR_NOMEM` + * Out of memory. + * :macro:`NGHTTP3_ERR_CALLBACK_FAILURE` + * User callback failed. + * + * It may return the other error codes. In general, the negative + * error code means that |conn| encountered a connection error, and + * the connection should be closed. */ NGHTTP3_EXTERN nghttp3_ssize nghttp3_conn_writev_stream(nghttp3_conn *conn, int64_t *pstream_id, @@ -2081,6 +2176,15 @@ NGHTTP3_EXTERN nghttp3_ssize nghttp3_conn_writev_stream(nghttp3_conn *conn, * `nghttp3_conn_writev_stream` must be called before calling this * function to get data to send, and those data must be fed into QUIC * stack. + * + * If a stream denoted by |stream_id| is not found, this function + * returns 0. + * + * This function returns 0 if it succeeds, or one of the following + * negative error codes: + * + * :macro:`NGHTTP3_ERR_NOMEM` + * Out of memory. */ NGHTTP3_EXTERN int nghttp3_conn_add_write_offset(nghttp3_conn *conn, int64_t stream_id, size_t n); @@ -2090,6 +2194,15 @@ NGHTTP3_EXTERN int nghttp3_conn_add_write_offset(nghttp3_conn *conn, * * `nghttp3_conn_add_ack_offset` tells |conn| the number of bytes |n| * for stream denoted by |stream_id| QUIC stack has acknowledged. + * + * If a stream denoted by |stream_id| is not found, this function + * returns 0. + * + * This function returns 0 if it succeeds, or one of the following + * negative error codes: + * + * :macro:`NGHTTP3_ERR_CALLBACK_FAILURE` + * User callback failed. */ NGHTTP3_EXTERN int nghttp3_conn_add_ack_offset(nghttp3_conn *conn, int64_t stream_id, uint64_t n); @@ -2107,8 +2220,17 @@ NGHTTP3_EXTERN void nghttp3_conn_block_stream(nghttp3_conn *conn, * @function * * `nghttp3_conn_unblock_stream` tells the library that stream - * identified by |stream_id| which was blocked by QUIC flow control is - * unblocked. + * identified by |stream_id| which was blocked by QUIC flow control + * (see `nghttp3_conn_block_stream`) is unblocked. + * + * If a stream denoted by |stream_id| is not found, this function + * returns 0. + * + * This function returns 0 if it succeeds, or one of the following + * negative error codes: + * + * :macro:`NGHTTP3_ERR_NOMEM` + * Out of memory. */ NGHTTP3_EXTERN int nghttp3_conn_unblock_stream(nghttp3_conn *conn, int64_t stream_id); @@ -2148,9 +2270,24 @@ NGHTTP3_EXTERN void nghttp3_conn_shutdown_stream_write(nghttp3_conn *conn, * @function * * `nghttp3_conn_shutdown_stream_read` tells the library that - * read-side of stream denoted by |stream_id| is abruptly closed and + * read-side of stream denoted by |stream_id| is abruptly closed, and * any further incoming data and pending stream data should be * discarded. + * + * If a stream denoted by |stream_id| is not client bidirectional + * stream, this function returns 0. If the stream has already + * shutdown read-side stream, this function returns 0. + * + * This function does not fail if a stream denoted by |stream_id| is + * not found, although it may fail with the other reasons. + * + * This function returns 0 if it succeeds, or one of the following + * negative error codes: + * + * :macro:`NGHTTP3_ERR_NOMEM` + * Out of memory. + * :macro:`NGHTTP3_ERR_QPACK_FATAL` + * QPACK decoder stream overflow. */ NGHTTP3_EXTERN int nghttp3_conn_shutdown_stream_read(nghttp3_conn *conn, int64_t stream_id); @@ -2159,7 +2296,17 @@ NGHTTP3_EXTERN int nghttp3_conn_shutdown_stream_read(nghttp3_conn *conn, * @function * * `nghttp3_conn_resume_stream` resumes stream identified by - * |stream_id| which was previously unable to provide data. + * |stream_id| which was previously unable to provide data. See + * :type:`nghttp3_read_data_callback`. + * + * If a stream denoted by |stream_id| is not found, this function + * returns 0. + * + * This function returns 0 if it succeeds, or one of the following + * negative error codes: + * + * :macro:`NGHTTP3_ERR_NOMEM` + * Out of memory. */ NGHTTP3_EXTERN int nghttp3_conn_resume_stream(nghttp3_conn *conn, int64_t stream_id); @@ -2168,7 +2315,8 @@ NGHTTP3_EXTERN int nghttp3_conn_resume_stream(nghttp3_conn *conn, * @function * * `nghttp3_conn_close_stream` closes stream identified by - * |stream_id|. |app_error_code| is the reason of the closure. + * |stream_id|. QUIC application error code |app_error_code| is the + * reason of the closure. * * This function returns 0 if it succeeds, or one of the following * negative error codes: @@ -2213,9 +2361,9 @@ NGHTTP3_EXTERN int nghttp3_conn_close_stream(nghttp3_conn *conn, * :macro:`NGHTTP3_DATA_FLAG_NO_END_STREAM` indicates that sending * side of stream is not closed even if :macro:`NGHTTP3_DATA_FLAG_EOF` * is set. Usually this flag is used to send trailer fields with - * `nghttp3_conn_submit_trailers()`. If - * `nghttp3_conn_submit_trailers()` has been called, regardless of - * this flag, the submitted trailer fields are sent. + * `nghttp3_conn_submit_trailers`. If `nghttp3_conn_submit_trailers` + * has been called, regardless of this flag, the submitted trailer + * fields are sent. */ #define NGHTTP3_DATA_FLAG_NO_END_STREAM 0x02u @@ -2260,8 +2408,8 @@ nghttp3_conn_set_max_concurrent_streams(nghttp3_conn *conn, * :macro:`NGHTTP3_DATA_FLAG_EOF` to |*pflags|. * * If the application is unable to provide data temporarily, return - * :macro:`NGHTTP3_ERR_WOULDBLOCK`. When it is ready to provide - * data, call `nghttp3_conn_resume_stream()`. + * :macro:`NGHTTP3_ERR_WOULDBLOCK`. When it is ready to provide data, + * call `nghttp3_conn_resume_stream`. * * The callback should return the number of objects in |vec| that the * application filled if it succeeds, or @@ -2298,6 +2446,18 @@ typedef struct nghttp3_data_reader { * request body, specify NULL. If |dr| is NULL, it implies the end of * stream. |stream_user_data| is an opaque pointer attached to the * stream. + * + * This function returns 0 if it succeeds, or one of the following + * negative error codes: + * + * :macro:`NGHTTP3_ERR_INVALID_ARGUMENT` + * |stream_id| identifies unidirectional stream. + * :macro:`NGHTTP3_ERR_CONN_CLOSING` + * Connection is shutting down, and no new stream is allowed. + * :macro:`NGHTTP3_ERR_STREAM_IN_USE` + * Stream has already been opened. + * :macro:`NGHTTP3_ERR_NOMEM` + * Out of memory. */ NGHTTP3_EXTERN int nghttp3_conn_submit_request( nghttp3_conn *conn, int64_t stream_id, const nghttp3_nv *nva, size_t nvlen, @@ -2309,6 +2469,14 @@ NGHTTP3_EXTERN int nghttp3_conn_submit_request( * `nghttp3_conn_submit_info` submits HTTP non-final response header * fields on the stream identified by |stream_id|. |nva| of length * |nvlen| specifies HTTP response header fields. + * + * This function returns 0 if it succeeds, or one of the following + * negative error codes: + * + * :macro:`NGHTTP3_ERR_STREAM_NOT_FOUND` + * Stream not found + * :macro:`NGHTTP3_ERR_NOMEM` + * Out of memory. */ NGHTTP3_EXTERN int nghttp3_conn_submit_info(nghttp3_conn *conn, int64_t stream_id, @@ -2323,6 +2491,14 @@ NGHTTP3_EXTERN int nghttp3_conn_submit_info(nghttp3_conn *conn, * |nvlen| specifies HTTP response header fields. |dr| specifies a * response body. If there is no response body, specify NULL. If * |dr| is NULL, it implies the end of stream. + * + * This function returns 0 if it succeeds, or one of the following + * negative error codes: + * + * :macro:`NGHTTP3_ERR_STREAM_NOT_FOUND` + * Stream not found + * :macro:`NGHTTP3_ERR_NOMEM` + * Out of memory. */ NGHTTP3_EXTERN int nghttp3_conn_submit_response(nghttp3_conn *conn, int64_t stream_id, @@ -2337,6 +2513,16 @@ NGHTTP3_EXTERN int nghttp3_conn_submit_response(nghttp3_conn *conn, * stream identified by |stream_id|. |nva| of length |nvlen| * specifies HTTP trailer fields. Calling this function implies the * end of stream. + * + * This function returns 0 if it succeeds, or one of the following + * negative error codes: + * + * :macro:`NGHTTP3_ERR_STREAM_NOT_FOUND` + * Stream not found + * :macro:`NGHTTP3_ERR_INVALID_STATE` + * Application has already submitted fin to stream. + * :macro:`NGHTTP3_ERR_NOMEM` + * Out of memory. */ NGHTTP3_EXTERN int nghttp3_conn_submit_trailers(nghttp3_conn *conn, int64_t stream_id, @@ -2349,6 +2535,12 @@ NGHTTP3_EXTERN int nghttp3_conn_submit_trailers(nghttp3_conn *conn, * `nghttp3_conn_submit_shutdown_notice` notifies the other endpoint * to stop creating new stream. After a couple of RTTs later, call * `nghttp3_conn_shutdown` to start graceful shutdown. + * + * This function returns 0 if it succeeds, or one of the following + * negative error codes: + * + * :macro:`NGHTTP3_ERR_NOMEM` + * Out of memory. */ NGHTTP3_EXTERN int nghttp3_conn_submit_shutdown_notice(nghttp3_conn *conn); @@ -2357,9 +2549,15 @@ NGHTTP3_EXTERN int nghttp3_conn_submit_shutdown_notice(nghttp3_conn *conn); * * `nghttp3_conn_shutdown` starts graceful shutdown. It should be * called after `nghttp3_conn_submit_shutdown_notice` and a couple of - * RTT. After calling this function, the local endpoint starts + * RTTs. After calling this function, the local endpoint starts * rejecting new incoming streams. The existing streams are processed - * normally. + * normally. See also `nghttp3_conn_is_drained`. + * + * This function returns 0 if it succeeds, or one of the following + * negative error codes: + * + * :macro:`NGHTTP3_ERR_NOMEM` + * Out of memory. */ NGHTTP3_EXTERN int nghttp3_conn_shutdown(nghttp3_conn *conn); @@ -2368,6 +2566,12 @@ NGHTTP3_EXTERN int nghttp3_conn_shutdown(nghttp3_conn *conn); * * `nghttp3_conn_set_stream_user_data` sets |stream_user_data| to the * stream identified by |stream_id|. + * + * This function returns 0 if it succeeds, or one of the following + * negative error codes: + * + * :macro:`NGHTTP3_ERR_STREAM_NOT_FOUND` + * Stream not found. */ NGHTTP3_EXTERN int nghttp3_conn_set_stream_user_data(nghttp3_conn *conn, int64_t stream_id, @@ -2378,7 +2582,9 @@ NGHTTP3_EXTERN int nghttp3_conn_set_stream_user_data(nghttp3_conn *conn, * * `nghttp3_conn_get_frame_payload_left` returns the number of bytes * left to read current frame payload for a stream denoted by - * |stream_id|. If no such stream is found, it returns 0. + * |stream_id|. If no such stream is found, or |stream_id| identifies + * neither client bidirectional stream nor remote control stream, it + * returns 0. */ NGHTTP3_EXTERN uint64_t nghttp3_conn_get_frame_payload_left(nghttp3_conn *conn, int64_t stream_id); @@ -2417,12 +2623,15 @@ NGHTTP3_EXTERN uint64_t nghttp3_conn_get_frame_payload_left(nghttp3_conn *conn, */ #define NGHTTP3_URGENCY_LEVELS (NGHTTP3_URGENCY_LOW + 1) +#define NGHTTP3_PRI_V1 1 +#define NGHTTP3_PRI_VERSION NGHTTP3_PRI_V1 + /** * @struct * * :type:`nghttp3_pri` represents HTTP priority. */ -typedef struct nghttp3_pri { +typedef struct NGHTTP3_ALIGN(8) nghttp3_pri { /** * :member:`urgency` is the urgency of a stream, it must be in * [:macro:`NGHTTP3_URGENCY_HIGH`, :macro:`NGHTTP3_URGENCY_LOW`], @@ -2431,11 +2640,11 @@ typedef struct nghttp3_pri { uint32_t urgency; /** * :member:`inc` indicates that a content can be processed - * incrementally or not. If inc is 0, it cannot be processed - * incrementally. If inc is 1, it can be processed incrementally. + * incrementally or not. If it is 0, it cannot be processed + * incrementally. If it is 1, it can be processed incrementally. * Other value is not permitted. */ - int inc; + uint8_t inc; } nghttp3_pri; /** @@ -2452,26 +2661,25 @@ typedef struct nghttp3_pri { * This function returns 0 if it succeeds, or one of the following * negative error codes: * + * :macro:`NGHTTP3_ERR_INVALID_ARGUMENT` + * |stream_id| is not a client initiated bidirectional stream ID. * :macro:`NGHTTP3_ERR_STREAM_NOT_FOUND` * Stream not found. */ -NGHTTP3_EXTERN int nghttp3_conn_get_stream_priority(nghttp3_conn *conn, - nghttp3_pri *dest, - int64_t stream_id); +NGHTTP3_EXTERN int nghttp3_conn_get_stream_priority_versioned( + nghttp3_conn *conn, int pri_version, nghttp3_pri *dest, int64_t stream_id); /** * @function * - * `nghttp3_conn_set_stream_priority` updates priority of a stream - * denoted by |stream_id| with the value pointed by |pri|. - * |stream_id| must identify client initiated bidirectional stream. - * - * Both client and server can update stream priority with this - * function. + * `nghttp3_conn_set_client_stream_priority` updates priority of a + * stream denoted by |stream_id| with the value pointed by |data| of + * length |datalen|, which should be a serialized :rfc:`9218` priority + * field value. |stream_id| must identify client initiated + * bidirectional stream. * - * If server updates stream priority with this function, it completely - * overrides stream priority set by client and the attempts to update - * priority by client are ignored. + * This function must not be called if |conn| is initialized as + * server. * * This function returns 0 if it succeeds, or one of the following * negative error codes: @@ -2483,20 +2691,37 @@ NGHTTP3_EXTERN int nghttp3_conn_get_stream_priority(nghttp3_conn *conn, * :macro:`NGHTTP3_ERR_NOMEM` * Out of memory. */ -NGHTTP3_EXTERN int nghttp3_conn_set_stream_priority(nghttp3_conn *conn, - int64_t stream_id, - const nghttp3_pri *pri); +NGHTTP3_EXTERN int nghttp3_conn_set_client_stream_priority(nghttp3_conn *conn, + int64_t stream_id, + const uint8_t *data, + size_t datalen); /** * @function * - * `nghttp3_conn_is_remote_qpack_encoder_stream` returns nonzero if a - * stream denoted by |stream_id| is QPACK encoder stream of a remote - * endpoint. + * `nghttp3_conn_set_server_stream_priority` updates priority of a + * stream denoted by |stream_id| with the value pointed by |pri|. + * |stream_id| must identify client initiated bidirectional stream. + * + * This function must not be called if |conn| is initialized as + * client. + * + * This function completely overrides stream priority set by client, + * and any attempts to update priority by client are ignored. + * + * This function returns 0 if it succeeds, or one of the following + * negative error codes: + * + * :macro:`NGHTTP3_ERR_INVALID_ARGUMENT` + * |stream_id| is not a client initiated bidirectional stream ID. + * :macro:`NGHTTP3_ERR_STREAM_NOT_FOUND` + * Stream not found. + * :macro:`NGHTTP3_ERR_NOMEM` + * Out of memory. */ -NGHTTP3_EXTERN int -nghttp3_conn_is_remote_qpack_encoder_stream(nghttp3_conn *conn, - int64_t stream_id); +NGHTTP3_EXTERN int nghttp3_conn_set_server_stream_priority_versioned( + nghttp3_conn *conn, int64_t stream_id, int pri_version, + const nghttp3_pri *pri); /** * @function @@ -2509,11 +2734,11 @@ NGHTTP3_EXTERN uint64_t nghttp3_vec_len(const nghttp3_vec *vec, size_t cnt); /** * @function * - * `nghttp3_check_header_name` returns nonzero if HTTP header field - * name |name| of length |len| is valid according to + * `nghttp3_check_header_name` returns nonzero if HTTP field name + * |name| of length |len| is valid according to * :rfc:`7230#section-3.2`. * - * Because this is a header field name in HTTP/3, the upper cased + * Because this is an HTTP field name in HTTP/3, the upper cased * alphabet is treated as error. */ NGHTTP3_EXTERN int nghttp3_check_header_name(const uint8_t *name, size_t len); @@ -2521,8 +2746,8 @@ NGHTTP3_EXTERN int nghttp3_check_header_name(const uint8_t *name, size_t len); /** * @function * - * `nghttp3_check_header_value` returns nonzero if HTTP header field - * value |value| of length |len| is valid according to + * `nghttp3_check_header_value` returns nonzero if HTTP field value + * |value| of length |len| is valid according to * :rfc:`7230#section-3.2`. */ NGHTTP3_EXTERN int nghttp3_check_header_value(const uint8_t *value, size_t len); @@ -2530,21 +2755,34 @@ NGHTTP3_EXTERN int nghttp3_check_header_value(const uint8_t *value, size_t len); /** * @function * - * `nghttp3_http_parse_priority` parses priority HTTP header field - * stored in the buffer pointed by |value| of length |len|. If it - * successfully processed header field value, it stores the result - * into |*dest|. This function just overwrites what it sees in the - * header field value and does not initialize any field in |*dest|. + * `nghttp3_conn_is_drained` returns nonzero if + * `nghttp3_conn_shutdown` has been called, and there is no active + * remote streams. This function is for server use only. + */ +NGHTTP3_EXTERN int nghttp3_conn_is_drained(nghttp3_conn *conn); + +/** + * @function + * + * `nghttp3_pri_parse_priority` parses Priority header field value + * pointed by |value| of length |len|, and stores the result in the + * object pointed by |dest|. Priority header field is defined in + * :rfc:`9218`. + * + * This function does not initialize the object pointed by |dest| + * before storing the result. It only assigns the values that the + * parser correctly extracted to fields. * * This function returns 0 if it succeeds, or one of the following * negative error codes: * * :macro:`NGHTTP3_ERR_INVALID_ARGUMENT` - * The function could not parse the provided value. + * Failed to parse the header field value. */ -NGHTTP3_EXTERN int nghttp3_http_parse_priority(nghttp3_pri *dest, - const uint8_t *value, - size_t len); +NGHTTP3_EXTERN int nghttp3_pri_parse_priority_versioned(int pri_version, + nghttp3_pri *dest, + const uint8_t *value, + size_t len); /** * @macrosection @@ -2562,7 +2800,7 @@ NGHTTP3_EXTERN int nghttp3_http_parse_priority(nghttp3_pri *dest, /** * @struct * - * :type:`nghttp3_info` is what `nghttp3_version()` returns. It holds + * :type:`nghttp3_info` is what `nghttp3_version` returns. It holds * information about the particular nghttp3 version. */ typedef struct nghttp3_info { @@ -2574,7 +2812,7 @@ typedef struct nghttp3_info { int age; /** * :member:`version_num` is the :macro:`NGHTTP3_VERSION_NUM` number - * (since age ==1) + * (since age == 1) */ int version_num; /** @@ -2591,7 +2829,7 @@ typedef struct nghttp3_info { * `nghttp3_version` returns a pointer to a :type:`nghttp3_info` * struct with version information about the run-time library in use. * The |least_version| argument can be set to a 24 bit numerical value - * for the least accepted version number and if the condition is not + * for the least accepted version number, and if the condition is not * met, this function will return a ``NULL``. Pass in 0 to skip the * version checking. */ @@ -2639,6 +2877,33 @@ NGHTTP3_EXTERN int nghttp3_err_is_fatal(int liberr); (CALLBACKS), NGHTTP3_SETTINGS_VERSION, \ (SETTINGS), (MEM), (USER_DATA)) +/* + * `nghttp3_conn_set_server_stream_priority` is a wrapper around + * `nghttp3_conn_set_server_stream_priority_versioned` to set the + * correct struct version. + */ +#define nghttp3_conn_set_server_stream_priority(CONN, STREAM_ID, PRI) \ + nghttp3_conn_set_server_stream_priority_versioned( \ + (CONN), (STREAM_ID), NGHTTP3_PRI_VERSION, (PRI)) + +/* + * `nghttp3_conn_get_stream_priority` is a wrapper around + * `nghttp3_conn_get_stream_priority_versioned` to set the correct + * struct version. + */ +#define nghttp3_conn_get_stream_priority(CONN, DEST, STREAM_ID) \ + nghttp3_conn_get_stream_priority_versioned((CONN), NGHTTP3_PRI_VERSION, \ + (DEST), (STREAM_ID)) + +/* + * `nghttp3_pri_parse_priority` is a wrapper around + * `nghttp3_pri_parse_priority_versioned` to set the correct struct + * version. + */ +#define nghttp3_pri_parse_priority(DEST, VALUE, LEN) \ + nghttp3_pri_parse_priority_versioned(NGHTTP3_PRI_VERSION, (DEST), (VALUE), \ + (LEN)) + #ifdef __cplusplus } #endif diff --git a/deps/ngtcp2/nghttp3/lib/nghttp3_conn.c b/deps/ngtcp2/nghttp3/lib/nghttp3_conn.c index 1fbb72c98af2f2..25aaf685734cb1 100644 --- a/deps/ngtcp2/nghttp3/lib/nghttp3_conn.c +++ b/deps/ngtcp2/nghttp3/lib/nghttp3_conn.c @@ -33,11 +33,14 @@ #include "nghttp3_err.h" #include "nghttp3_conv.h" #include "nghttp3_http.h" +#include "nghttp3_unreachable.h" /* NGHTTP3_QPACK_ENCODER_MAX_DTABLE_CAPACITY is the upper bound of the dynamic table capacity that QPACK encoder is willing to use. */ #define NGHTTP3_QPACK_ENCODER_MAX_DTABLE_CAPACITY 4096 +nghttp3_objalloc_def(chunk, nghttp3_chunk, oplent); + /* * conn_remote_stream_uni returns nonzero if |stream_id| is remote * unidirectional stream ID. @@ -56,7 +59,7 @@ static int conn_call_begin_headers(nghttp3_conn *conn, nghttp3_stream *stream) { return 0; } - rv = conn->callbacks.begin_headers(conn, stream->node.nid.id, conn->user_data, + rv = conn->callbacks.begin_headers(conn, stream->node.id, conn->user_data, stream->user_data); if (rv != 0) { /* TODO Allow ignore headers */ @@ -74,8 +77,8 @@ static int conn_call_end_headers(nghttp3_conn *conn, nghttp3_stream *stream, return 0; } - rv = conn->callbacks.end_headers(conn, stream->node.nid.id, fin, - conn->user_data, stream->user_data); + rv = conn->callbacks.end_headers(conn, stream->node.id, fin, conn->user_data, + stream->user_data); if (rv != 0) { /* TODO Allow ignore headers */ return NGHTTP3_ERR_CALLBACK_FAILURE; @@ -92,8 +95,8 @@ static int conn_call_begin_trailers(nghttp3_conn *conn, return 0; } - rv = conn->callbacks.begin_trailers(conn, stream->node.nid.id, - conn->user_data, stream->user_data); + rv = conn->callbacks.begin_trailers(conn, stream->node.id, conn->user_data, + stream->user_data); if (rv != 0) { /* TODO Allow ignore headers */ return NGHTTP3_ERR_CALLBACK_FAILURE; @@ -110,8 +113,8 @@ static int conn_call_end_trailers(nghttp3_conn *conn, nghttp3_stream *stream, return 0; } - rv = conn->callbacks.end_trailers(conn, stream->node.nid.id, fin, - conn->user_data, stream->user_data); + rv = conn->callbacks.end_trailers(conn, stream->node.id, fin, conn->user_data, + stream->user_data); if (rv != 0) { /* TODO Allow ignore headers */ return NGHTTP3_ERR_CALLBACK_FAILURE; @@ -127,7 +130,7 @@ static int conn_call_end_stream(nghttp3_conn *conn, nghttp3_stream *stream) { return 0; } - rv = conn->callbacks.end_stream(conn, stream->node.nid.id, conn->user_data, + rv = conn->callbacks.end_stream(conn, stream->node.id, conn->user_data, stream->user_data); if (rv != 0) { return NGHTTP3_ERR_CALLBACK_FAILURE; @@ -144,7 +147,7 @@ static int conn_call_stop_sending(nghttp3_conn *conn, nghttp3_stream *stream, return 0; } - rv = conn->callbacks.stop_sending(conn, stream->node.nid.id, app_error_code, + rv = conn->callbacks.stop_sending(conn, stream->node.id, app_error_code, conn->user_data, stream->user_data); if (rv != 0) { return NGHTTP3_ERR_CALLBACK_FAILURE; @@ -161,7 +164,7 @@ static int conn_call_reset_stream(nghttp3_conn *conn, nghttp3_stream *stream, return 0; } - rv = conn->callbacks.reset_stream(conn, stream->node.nid.id, app_error_code, + rv = conn->callbacks.reset_stream(conn, stream->node.id, app_error_code, conn->user_data, stream->user_data); if (rv != 0) { return NGHTTP3_ERR_CALLBACK_FAILURE; @@ -179,7 +182,7 @@ static int conn_call_deferred_consume(nghttp3_conn *conn, return 0; } - rv = conn->callbacks.deferred_consume(conn, stream->node.nid.id, nconsumed, + rv = conn->callbacks.deferred_consume(conn, stream->node.id, nconsumed, conn->user_data, stream->user_data); if (rv != 0) { return NGHTTP3_ERR_CALLBACK_FAILURE; @@ -188,6 +191,22 @@ static int conn_call_deferred_consume(nghttp3_conn *conn, return 0; } +static int conn_call_recv_settings(nghttp3_conn *conn) { + int rv; + + if (!conn->callbacks.recv_settings) { + return 0; + } + + rv = conn->callbacks.recv_settings(conn, &conn->remote.settings, + conn->user_data); + if (rv != 0) { + return NGHTTP3_ERR_CALLBACK_FAILURE; + } + + return 0; +} + static int ricnt_less(const nghttp3_pq_entry *lhsx, const nghttp3_pq_entry *rhsx) { nghttp3_stream *lhs = @@ -204,7 +223,7 @@ static int cycle_less(const nghttp3_pq_entry *lhsx, const nghttp3_tnode *rhs = nghttp3_struct_of(rhsx, nghttp3_tnode, pe); if (lhs->cycle == rhs->cycle) { - return lhs->seq < rhs->seq; + return lhs->id < rhs->id; } return rhs->cycle - lhs->cycle <= NGHTTP3_TNODE_MAX_CYCLE_GAP; @@ -264,7 +283,6 @@ static int conn_new(nghttp3_conn **pconn, int server, int callbacks_version, nghttp3_settings_default(&conn->remote.settings); conn->mem = mem; conn->user_data = user_data; - conn->next_seq = 0; conn->server = server; conn->rx.goaway_id = NGHTTP3_VARINT_MAX + 1; conn->tx.goaway_id = NGHTTP3_VARINT_MAX + 1; @@ -454,12 +472,6 @@ nghttp3_ssize nghttp3_conn_read_stream(nghttp3_conn *conn, int64_t stream_id, stream->tx.hstate = NGHTTP3_HTTP_STATE_REQ_INITIAL; } } - } else if (nghttp3_stream_uni(stream_id) && - stream->type == NGHTTP3_STREAM_TYPE_PUSH) { - if (stream->rx.hstate == NGHTTP3_HTTP_STATE_NONE) { - stream->rx.hstate = NGHTTP3_HTTP_STATE_RESP_INITIAL; - stream->tx.hstate = NGHTTP3_HTTP_STATE_RESP_INITIAL; - } } if (srclen == 0 && !fin) { @@ -603,8 +615,7 @@ nghttp3_ssize nghttp3_conn_read_uni(nghttp3_conn *conn, nghttp3_stream *stream, } break; default: - /* unreachable */ - assert(0); + nghttp3_unreachable(); } if (nconsumed < 0) { @@ -686,6 +697,11 @@ nghttp3_ssize nghttp3_conn_read_control(nghttp3_conn *conn, case NGHTTP3_FRAME_SETTINGS: /* SETTINGS frame might be empty. */ if (rstate->left == 0) { + rv = conn_call_recv_settings(conn); + if (rv != 0) { + return rv; + } + nghttp3_stream_read_state_reset(rstate); break; } @@ -735,11 +751,21 @@ nghttp3_ssize nghttp3_conn_read_control(nghttp3_conn *conn, } break; case NGHTTP3_CTRL_STREAM_STATE_SETTINGS: - for (; p != end;) { + for (;;) { if (rstate->left == 0) { + rv = conn_call_recv_settings(conn); + if (rv != 0) { + return rv; + } + nghttp3_stream_read_state_reset(rstate); break; } + + if (p == end) { + return (nghttp3_ssize)nconsumed; + } + /* Read Identifier */ len = (size_t)nghttp3_min(rstate->left, (int64_t)(end - p)); assert(len > 0); @@ -845,6 +871,11 @@ nghttp3_ssize nghttp3_conn_read_control(nghttp3_conn *conn, break; } + rv = conn_call_recv_settings(conn); + if (rv != 0) { + return rv; + } + nghttp3_stream_read_state_reset(rstate); break; case NGHTTP3_CTRL_STREAM_STATE_GOAWAY: @@ -1013,8 +1044,7 @@ nghttp3_ssize nghttp3_conn_read_control(nghttp3_conn *conn, nghttp3_stream_read_state_reset(rstate); break; default: - /* unreachable */ - assert(0); + nghttp3_unreachable(); } } @@ -1022,7 +1052,7 @@ nghttp3_ssize nghttp3_conn_read_control(nghttp3_conn *conn, } static int conn_delete_stream(nghttp3_conn *conn, nghttp3_stream *stream) { - int bidi = nghttp3_client_stream_bidi(stream->node.nid.id); + int bidi = nghttp3_client_stream_bidi(stream->node.id); int rv; rv = conn_call_deferred_consume(conn, stream, @@ -1032,16 +1062,21 @@ static int conn_delete_stream(nghttp3_conn *conn, nghttp3_stream *stream) { } if (bidi && conn->callbacks.stream_close) { - rv = conn->callbacks.stream_close(conn, stream->node.nid.id, - stream->error_code, conn->user_data, - stream->user_data); + rv = conn->callbacks.stream_close(conn, stream->node.id, stream->error_code, + conn->user_data, stream->user_data); if (rv != 0) { return NGHTTP3_ERR_CALLBACK_FAILURE; } } - rv = nghttp3_map_remove(&conn->streams, - (nghttp3_map_key_type)stream->node.nid.id); + if (conn->server && nghttp3_client_stream_bidi(stream->node.id)) { + assert(conn->remote.bidi.num_streams > 0); + + --conn->remote.bidi.num_streams; + } + + rv = + nghttp3_map_remove(&conn->streams, (nghttp3_map_key_type)stream->node.id); assert(0 == rv); @@ -1058,7 +1093,7 @@ static int conn_process_blocked_stream_data(nghttp3_conn *conn, int rv; size_t len; - assert(nghttp3_client_stream_bidi(stream->node.nid.id)); + assert(nghttp3_client_stream_bidi(stream->node.id)); for (;;) { len = nghttp3_ringbuf_len(&stream->inq); @@ -1149,16 +1184,17 @@ static nghttp3_tnode *stream_get_sched_node(nghttp3_stream *stream) { } static int conn_update_stream_priority(nghttp3_conn *conn, - nghttp3_stream *stream, uint8_t pri) { - assert(nghttp3_client_stream_bidi(stream->node.nid.id)); + nghttp3_stream *stream, + const nghttp3_pri *pri) { + assert(nghttp3_client_stream_bidi(stream->node.id)); - if (stream->node.pri == pri) { + if (nghttp3_pri_eq(&stream->node.pri, pri)) { return 0; } nghttp3_conn_unschedule_stream(conn, stream); - stream->node.pri = pri; + stream->node.pri = *pri; if (nghttp3_stream_require_schedule(stream)) { return nghttp3_conn_schedule_stream(conn, stream); @@ -1286,8 +1322,7 @@ nghttp3_ssize nghttp3_conn_read_bidi(nghttp3_conn *conn, size_t *pnproc, rv = conn_call_begin_trailers(conn, stream); break; default: - /* Unreachable */ - assert(0); + nghttp3_unreachable(); } if (rv != 0) { @@ -1377,9 +1412,7 @@ nghttp3_ssize nghttp3_conn_read_bidi(nghttp3_conn *conn, size_t *pnproc, rv = 0; break; default: - /* Unreachable */ - assert(0); - abort(); + nghttp3_unreachable(); } if (rv != 0) { @@ -1398,7 +1431,7 @@ nghttp3_ssize nghttp3_conn_read_bidi(nghttp3_conn *conn, size_t *pnproc, (stream->rx.http.flags & NGHTTP3_HTTP_FLAG_PRIORITY) && !(stream->flags & NGHTTP3_STREAM_FLAG_PRIORITY_UPDATE_RECVED) && !(stream->flags & NGHTTP3_STREAM_FLAG_SERVER_PRIORITY_SET)) { - rv = conn_update_stream_priority(conn, stream, stream->rx.http.pri); + rv = conn_update_stream_priority(conn, stream, &stream->rx.http.pri); if (rv != 0) { return rv; } @@ -1412,8 +1445,7 @@ nghttp3_ssize nghttp3_conn_read_bidi(nghttp3_conn *conn, size_t *pnproc, rv = conn_call_end_trailers(conn, stream, p == end && fin); break; default: - /* Unreachable */ - assert(0); + nghttp3_unreachable(); } if (rv != 0) { @@ -1505,7 +1537,7 @@ int nghttp3_conn_on_data(nghttp3_conn *conn, nghttp3_stream *stream, return 0; } - rv = conn->callbacks.recv_data(conn, stream->node.nid.id, data, datalen, + rv = conn->callbacks.recv_data(conn, stream->node.id, data, datalen, conn->user_data, stream->user_data); if (rv != 0) { return NGHTTP3_ERR_CALLBACK_FAILURE; @@ -1515,11 +1547,9 @@ int nghttp3_conn_on_data(nghttp3_conn *conn, nghttp3_stream *stream, } static nghttp3_pq *conn_get_sched_pq(nghttp3_conn *conn, nghttp3_tnode *tnode) { - uint32_t urgency = nghttp3_pri_uint8_urgency(tnode->pri); + assert(tnode->pri.urgency < NGHTTP3_URGENCY_LEVELS); - assert(urgency < NGHTTP3_URGENCY_LEVELS); - - return &conn->sched[urgency].spq; + return &conn->sched[tnode->pri.urgency].spq; } static nghttp3_ssize conn_decode_headers(nghttp3_conn *conn, @@ -1552,8 +1582,7 @@ static nghttp3_ssize conn_decode_headers(nghttp3_conn *conn, recv_header = conn->callbacks.recv_trailer; break; default: - /* Unreachable */ - assert(0); + nghttp3_unreachable(); } http = &stream->rx.http; @@ -1606,17 +1635,15 @@ static nghttp3_ssize conn_decode_headers(nghttp3_conn *conn, break; case 0: if (recv_header) { - rv = recv_header(conn, stream->node.nid.id, nv.token, nv.name, - nv.value, nv.flags, conn->user_data, - stream->user_data); + rv = recv_header(conn, stream->node.id, nv.token, nv.name, nv.value, + nv.flags, conn->user_data, stream->user_data); if (rv != 0) { rv = NGHTTP3_ERR_CALLBACK_FAILURE; } } break; default: - /* Unreachable */ - assert(0); + nghttp3_unreachable(); } nghttp3_rcbuf_decref(nv.name); @@ -1684,13 +1711,32 @@ int nghttp3_conn_on_settings_entry_received(nghttp3_conn *conn, if (!conn->server) { break; } - if (ent->value != 0 && ent->value != 1) { + + switch (ent->value) { + case 0: + if (dest->enable_connect_protocol) { + return NGHTTP3_ERR_H3_SETTINGS_ERROR; + } + + break; + case 1: + break; + default: return NGHTTP3_ERR_H3_SETTINGS_ERROR; } - if (ent->value == 0 && dest->enable_connect_protocol) { + + dest->enable_connect_protocol = (uint8_t)ent->value; + break; + case NGHTTP3_SETTINGS_ID_H3_DATAGRAM: + switch (ent->value) { + case 0: + case 1: + break; + default: return NGHTTP3_ERR_H3_SETTINGS_ERROR; } - dest->enable_connect_protocol = (int)ent->value; + + dest->h3_datagram = (uint8_t)ent->value; break; case NGHTTP3_H2_SETTINGS_ID_ENABLE_PUSH: case NGHTTP3_H2_SETTINGS_ID_MAX_CONCURRENT_STREAMS: @@ -1744,7 +1790,7 @@ conn_on_priority_update_stream(nghttp3_conn *conn, return rv; } - stream->node.pri = nghttp3_pri_to_uint8(&fr->pri); + stream->node.pri = fr->pri; stream->flags |= NGHTTP3_STREAM_FLAG_PRIORITY_UPDATE_RECVED; return 0; @@ -1756,8 +1802,7 @@ conn_on_priority_update_stream(nghttp3_conn *conn, stream->flags |= NGHTTP3_STREAM_FLAG_PRIORITY_UPDATE_RECVED; - return conn_update_stream_priority(conn, stream, - nghttp3_pri_to_uint8(&fr->pri)); + return conn_update_stream_priority(conn, stream, &fr->pri); } int nghttp3_conn_on_priority_update(nghttp3_conn *conn, @@ -1794,7 +1839,7 @@ int nghttp3_conn_create_stream(nghttp3_conn *conn, nghttp3_stream **pstream, conn_stream_acked_data, }; - rv = nghttp3_stream_new(&stream, stream_id, conn->next_seq, &callbacks, + rv = nghttp3_stream_new(&stream, stream_id, &callbacks, &conn->out_chunk_objalloc, &conn->stream_objalloc, conn->mem); if (rv != 0) { @@ -1803,14 +1848,17 @@ int nghttp3_conn_create_stream(nghttp3_conn *conn, nghttp3_stream **pstream, stream->conn = conn; - rv = nghttp3_map_insert(&conn->streams, - (nghttp3_map_key_type)stream->node.nid.id, stream); + rv = nghttp3_map_insert(&conn->streams, (nghttp3_map_key_type)stream->node.id, + stream); if (rv != 0) { nghttp3_stream_del(stream); return rv; } - ++conn->next_seq; + if (conn->server && nghttp3_client_stream_bidi(stream_id)) { + ++conn->remote.bidi.num_streams; + } + *pstream = stream; return 0; @@ -1897,7 +1945,7 @@ static nghttp3_ssize conn_writev_stream(nghttp3_conn *conn, int64_t *pstream_id, int *pfin, nghttp3_vec *vec, size_t veccnt, nghttp3_stream *stream) { int rv; - nghttp3_ssize n; + size_t n; assert(veccnt > 0); @@ -1910,31 +1958,25 @@ static nghttp3_ssize conn_writev_stream(nghttp3_conn *conn, int64_t *pstream_id, } } - if (!nghttp3_stream_uni(stream->node.nid.id) && conn->tx.qenc && + if (!nghttp3_stream_uni(stream->node.id) && conn->tx.qenc && !nghttp3_stream_is_blocked(conn->tx.qenc)) { n = nghttp3_stream_writev(conn->tx.qenc, pfin, vec, veccnt); - if (n < 0) { - return n; - } if (n) { - *pstream_id = conn->tx.qenc->node.nid.id; - return n; + *pstream_id = conn->tx.qenc->node.id; + return (nghttp3_ssize)n; } } n = nghttp3_stream_writev(stream, pfin, vec, veccnt); - if (n < 0) { - return n; - } /* We might just want to write stream fin without sending any stream data. */ if (n == 0 && *pfin == 0) { return 0; } - *pstream_id = stream->node.nid.id; + *pstream_id = stream->node.id; - return n; + return (nghttp3_ssize)n; } nghttp3_ssize nghttp3_conn_writev_stream(nghttp3_conn *conn, @@ -1990,7 +2032,7 @@ nghttp3_ssize nghttp3_conn_writev_stream(nghttp3_conn *conn, return ncnt; } - if (nghttp3_client_stream_bidi(stream->node.nid.id) && + if (nghttp3_client_stream_bidi(stream->node.id) && !nghttp3_stream_require_schedule(stream)) { nghttp3_conn_unschedule_stream(conn, stream); } @@ -2020,20 +2062,16 @@ nghttp3_stream *nghttp3_conn_get_next_tx_stream(nghttp3_conn *conn) { int nghttp3_conn_add_write_offset(nghttp3_conn *conn, int64_t stream_id, size_t n) { nghttp3_stream *stream = nghttp3_conn_find_stream(conn, stream_id); - int rv; if (stream == NULL) { return 0; } - rv = nghttp3_stream_add_outq_offset(stream, n); - if (rv != 0) { - return rv; - } + nghttp3_stream_add_outq_offset(stream, n); stream->unscheduled_nwrite += n; - if (!nghttp3_client_stream_bidi(stream->node.nid.id)) { + if (!nghttp3_client_stream_bidi(stream->node.id)) { return 0; } @@ -2065,7 +2103,7 @@ static int conn_submit_headers_data(nghttp3_conn *conn, nghttp3_stream *stream, const nghttp3_data_reader *dr) { int rv; nghttp3_nv *nnva; - nghttp3_frame_entry frent; + nghttp3_frame_entry frent = {0}; rv = nghttp3_nva_copy(&nnva, nva, nvlen, conn->mem); if (rv != 0) { @@ -2235,7 +2273,7 @@ int nghttp3_conn_submit_trailers(nghttp3_conn *conn, int64_t stream_id, } int nghttp3_conn_submit_shutdown_notice(nghttp3_conn *conn) { - nghttp3_frame_entry frent; + nghttp3_frame_entry frent = {0}; int rv; assert(conn->tx.ctrl); @@ -2258,7 +2296,7 @@ int nghttp3_conn_submit_shutdown_notice(nghttp3_conn *conn) { } int nghttp3_conn_shutdown(nghttp3_conn *conn) { - nghttp3_frame_entry frent; + nghttp3_frame_entry frent = {0}; int rv; assert(conn->tx.ctrl); @@ -2279,7 +2317,8 @@ int nghttp3_conn_shutdown(nghttp3_conn *conn) { } conn->tx.goaway_id = frent.fr.goaway.id; - conn->flags |= NGHTTP3_CONN_FLAG_GOAWAY_QUEUED; + conn->flags |= + NGHTTP3_CONN_FLAG_GOAWAY_QUEUED | NGHTTP3_CONN_FLAG_SHUTDOWN_COMMENCED; return 0; } @@ -2305,7 +2344,7 @@ void nghttp3_conn_block_stream(nghttp3_conn *conn, int64_t stream_id) { stream->flags |= NGHTTP3_STREAM_FLAG_FC_BLOCKED; stream->unscheduled_nwrite = 0; - if (nghttp3_client_stream_bidi(stream->node.nid.id)) { + if (nghttp3_client_stream_bidi(stream->node.id)) { nghttp3_conn_unschedule_stream(conn, stream); } } @@ -2320,7 +2359,7 @@ void nghttp3_conn_shutdown_stream_write(nghttp3_conn *conn, int64_t stream_id) { stream->flags |= NGHTTP3_STREAM_FLAG_SHUT_WR; stream->unscheduled_nwrite = 0; - if (nghttp3_client_stream_bidi(stream->node.nid.id)) { + if (nghttp3_client_stream_bidi(stream->node.id)) { nghttp3_conn_unschedule_stream(conn, stream); } } @@ -2334,7 +2373,7 @@ int nghttp3_conn_unblock_stream(nghttp3_conn *conn, int64_t stream_id) { stream->flags &= (uint16_t)~NGHTTP3_STREAM_FLAG_FC_BLOCKED; - if (nghttp3_client_stream_bidi(stream->node.nid.id) && + if (nghttp3_client_stream_bidi(stream->node.id) && nghttp3_stream_require_schedule(stream)) { return nghttp3_conn_ensure_stream_scheduled(conn, stream); } @@ -2364,7 +2403,7 @@ int nghttp3_conn_resume_stream(nghttp3_conn *conn, int64_t stream_id) { stream->flags &= (uint16_t)~NGHTTP3_STREAM_FLAG_READ_DATA_BLOCKED; - if (nghttp3_client_stream_bidi(stream->node.nid.id) && + if (nghttp3_client_stream_bidi(stream->node.id) && nghttp3_stream_require_schedule(stream)) { return nghttp3_conn_ensure_stream_scheduled(conn, stream); } @@ -2381,7 +2420,6 @@ int nghttp3_conn_close_stream(nghttp3_conn *conn, int64_t stream_id, } if (nghttp3_stream_uni(stream_id) && - stream->type != NGHTTP3_STREAM_TYPE_PUSH && stream->type != NGHTTP3_STREAM_TYPE_UNKNOWN) { return NGHTTP3_ERR_H3_CLOSED_CRITICAL_STREAM; } @@ -2459,18 +2497,34 @@ int nghttp3_conn_set_stream_user_data(nghttp3_conn *conn, int64_t stream_id, uint64_t nghttp3_conn_get_frame_payload_left(nghttp3_conn *conn, int64_t stream_id) { - nghttp3_stream *stream = nghttp3_conn_find_stream(conn, stream_id); + nghttp3_stream *stream; + int uni = 0; + + if (!nghttp3_client_stream_bidi(stream_id)) { + uni = conn_remote_stream_uni(conn, stream_id); + if (!uni) { + return 0; + } + } + stream = nghttp3_conn_find_stream(conn, stream_id); if (stream == NULL) { return 0; } + if (uni && stream->type != NGHTTP3_STREAM_TYPE_CONTROL) { + return 0; + } + return (uint64_t)stream->rstate.left; } -int nghttp3_conn_get_stream_priority(nghttp3_conn *conn, nghttp3_pri *dest, - int64_t stream_id) { +int nghttp3_conn_get_stream_priority_versioned(nghttp3_conn *conn, + int pri_version, + nghttp3_pri *dest, + int64_t stream_id) { nghttp3_stream *stream; + (void)pri_version; assert(conn->server); @@ -2483,19 +2537,20 @@ int nghttp3_conn_get_stream_priority(nghttp3_conn *conn, nghttp3_pri *dest, return NGHTTP3_ERR_STREAM_NOT_FOUND; } - dest->urgency = nghttp3_pri_uint8_urgency(stream->node.pri); - dest->inc = nghttp3_pri_uint8_inc(stream->node.pri); + *dest = stream->node.pri; return 0; } -int nghttp3_conn_set_stream_priority(nghttp3_conn *conn, int64_t stream_id, - const nghttp3_pri *pri) { +int nghttp3_conn_set_client_stream_priority(nghttp3_conn *conn, + int64_t stream_id, + const uint8_t *data, + size_t datalen) { nghttp3_stream *stream; - nghttp3_frame_entry frent; + nghttp3_frame_entry frent = {0}; + uint8_t *buf = NULL; - assert(pri->urgency < NGHTTP3_URGENCY_LEVELS); - assert(pri->inc == 0 || pri->inc == 1); + assert(!conn->server); if (!nghttp3_client_stream_bidi(stream_id)) { return NGHTTP3_ERR_INVALID_ARGUMENT; @@ -2506,29 +2561,55 @@ int nghttp3_conn_set_stream_priority(nghttp3_conn *conn, int64_t stream_id, return NGHTTP3_ERR_STREAM_NOT_FOUND; } - if (conn->server) { - stream->flags |= NGHTTP3_STREAM_FLAG_SERVER_PRIORITY_SET; + if (datalen) { + buf = nghttp3_mem_malloc(conn->mem, datalen); + if (buf == NULL) { + return NGHTTP3_ERR_NOMEM; + } - return conn_update_stream_priority(conn, stream, nghttp3_pri_to_uint8(pri)); + memcpy(buf, data, datalen); } frent.fr.hd.type = NGHTTP3_FRAME_PRIORITY_UPDATE; frent.fr.priority_update.pri_elem_id = stream_id; - frent.fr.priority_update.pri = *pri; + frent.fr.priority_update.data = buf; + frent.fr.priority_update.datalen = datalen; return nghttp3_stream_frq_add(conn->tx.ctrl, &frent); } -int nghttp3_conn_is_remote_qpack_encoder_stream(nghttp3_conn *conn, - int64_t stream_id) { +int nghttp3_conn_set_server_stream_priority_versioned(nghttp3_conn *conn, + int64_t stream_id, + int pri_version, + const nghttp3_pri *pri) { nghttp3_stream *stream; + (void)pri_version; - if (!conn_remote_stream_uni(conn, stream_id)) { - return 0; + assert(conn->server); + assert(pri->urgency < NGHTTP3_URGENCY_LEVELS); + assert(pri->inc == 0 || pri->inc == 1); + + if (!nghttp3_client_stream_bidi(stream_id)) { + return NGHTTP3_ERR_INVALID_ARGUMENT; } stream = nghttp3_conn_find_stream(conn, stream_id); - return stream && stream->type == NGHTTP3_STREAM_TYPE_QPACK_ENCODER; + if (stream == NULL) { + return NGHTTP3_ERR_STREAM_NOT_FOUND; + } + + stream->flags |= NGHTTP3_STREAM_FLAG_SERVER_PRIORITY_SET; + + return conn_update_stream_priority(conn, stream, pri); +} + +int nghttp3_conn_is_drained(nghttp3_conn *conn) { + assert(conn->server); + + return (conn->flags & NGHTTP3_CONN_FLAG_SHUTDOWN_COMMENCED) && + conn->remote.bidi.num_streams == 0 && + nghttp3_stream_outq_write_done(conn->tx.ctrl) && + nghttp3_ringbuf_len(&conn->tx.ctrl->frq) == 0; } void nghttp3_settings_default_versioned(int settings_version, diff --git a/deps/ngtcp2/nghttp3/lib/nghttp3_conn.h b/deps/ngtcp2/nghttp3/lib/nghttp3_conn.h index fa7071e4b1ddb7..74f47583ce825c 100644 --- a/deps/ngtcp2/nghttp3/lib/nghttp3_conn.h +++ b/deps/ngtcp2/nghttp3/lib/nghttp3_conn.h @@ -62,6 +62,9 @@ /* NGHTTP3_CONN_FLAG_QPACK_DECODER_OPENED is set when a QPACK decoder stream has opened. */ #define NGHTTP3_CONN_FLAG_QPACK_DECODER_OPENED 0x0008u +/* NGHTTP3_CONN_FLAG_SHUTDOWN_COMMENCED is set when graceful shutdown + has started. */ +#define NGHTTP3_CONN_FLAG_SHUTDOWN_COMMENCED 0x0010u /* NGHTTP3_CONN_FLAG_GOAWAY_RECVED indicates that GOAWAY frame has received. */ #define NGHTTP3_CONN_FLAG_GOAWAY_RECVED 0x0020u @@ -73,7 +76,7 @@ typedef struct nghttp3_chunk { nghttp3_opl_entry oplent; } nghttp3_chunk; -nghttp3_objalloc_def(chunk, nghttp3_chunk, oplent); +nghttp3_objalloc_decl(chunk, nghttp3_chunk, oplent); struct nghttp3_conn { nghttp3_objalloc out_chunk_objalloc; @@ -90,7 +93,6 @@ struct nghttp3_conn { void *user_data; int server; uint16_t flags; - uint64_t next_seq; struct { nghttp3_settings settings; @@ -109,6 +111,10 @@ struct nghttp3_conn { initiated bidirectional stream ID the remote endpoint can issue. This field is used on server side only. */ uint64_t max_client_streams; + /* num_streams is the number of client initiated bidirectional + streams that are currently open. This field is for server + use only. */ + size_t num_streams; } bidi; nghttp3_settings settings; } remote; diff --git a/deps/ngtcp2/nghttp3/lib/nghttp3_conv.c b/deps/ngtcp2/nghttp3/lib/nghttp3_conv.c index cb340ab5a11363..edd0adc4d0ff0a 100644 --- a/deps/ngtcp2/nghttp3/lib/nghttp3_conv.c +++ b/deps/ngtcp2/nghttp3/lib/nghttp3_conv.c @@ -29,6 +29,7 @@ #include #include "nghttp3_str.h" +#include "nghttp3_unreachable.h" int64_t nghttp3_get_varint(size_t *plen, const uint8_t *p) { union { @@ -38,7 +39,7 @@ int64_t nghttp3_get_varint(size_t *plen, const uint8_t *p) { uint64_t n64; } n; - *plen = 1u << (*p >> 6); + *plen = (size_t)(1u << (*p >> 6)); switch (*plen) { case 1: @@ -57,34 +58,25 @@ int64_t nghttp3_get_varint(size_t *plen, const uint8_t *p) { return (int64_t)nghttp3_ntohl64(n.n64); } - assert(0); - abort(); + nghttp3_unreachable(); } int64_t nghttp3_get_varint_fb(const uint8_t *p) { return *p & 0x3f; } -size_t nghttp3_get_varint_len(const uint8_t *p) { return 1u << (*p >> 6); } +size_t nghttp3_get_varintlen(const uint8_t *p) { + return (size_t)(1u << (*p >> 6)); +} uint8_t *nghttp3_put_uint64be(uint8_t *p, uint64_t n) { n = nghttp3_htonl64(n); return nghttp3_cpymem(p, (const uint8_t *)&n, sizeof(n)); } -uint8_t *nghttp3_put_uint48be(uint8_t *p, uint64_t n) { - n = nghttp3_htonl64(n); - return nghttp3_cpymem(p, ((const uint8_t *)&n) + 2, 6); -} - uint8_t *nghttp3_put_uint32be(uint8_t *p, uint32_t n) { n = htonl(n); return nghttp3_cpymem(p, (const uint8_t *)&n, sizeof(n)); } -uint8_t *nghttp3_put_uint24be(uint8_t *p, uint32_t n) { - n = htonl(n); - return nghttp3_cpymem(p, ((const uint8_t *)&n) + 1, 3); -} - uint8_t *nghttp3_put_uint16be(uint8_t *p, uint16_t n) { n = htons(n); return nghttp3_cpymem(p, (const uint8_t *)&n, sizeof(n)); @@ -112,7 +104,7 @@ uint8_t *nghttp3_put_varint(uint8_t *p, int64_t n) { return rv; } -size_t nghttp3_put_varint_len(int64_t n) { +size_t nghttp3_put_varintlen(int64_t n) { if (n < 64) { return 1; } @@ -129,7 +121,3 @@ size_t nghttp3_put_varint_len(int64_t n) { uint64_t nghttp3_ord_stream_id(int64_t stream_id) { return (uint64_t)(stream_id >> 2) + 1; } - -uint8_t nghttp3_pri_to_uint8(const nghttp3_pri *pri) { - return (uint8_t)((uint32_t)pri->inc << 7 | pri->urgency); -} diff --git a/deps/ngtcp2/nghttp3/lib/nghttp3_conv.h b/deps/ngtcp2/nghttp3/lib/nghttp3_conv.h index 23555be7cac027..5522bc735bfd37 100644 --- a/deps/ngtcp2/nghttp3/lib/nghttp3_conv.h +++ b/deps/ngtcp2/nghttp3/lib/nghttp3_conv.h @@ -50,15 +50,11 @@ # include #endif /* HAVE_SYS_ENDIAN_H */ -#include +#if defined(__APPLE__) +# include +#endif // __APPLE__ -#if defined(HAVE_BSWAP_64) || \ - (defined(HAVE_DECL_BSWAP_64) && HAVE_DECL_BSWAP_64 > 0) -# define nghttp3_bswap64 bswap_64 -#else /* !HAVE_BSWAP_64 */ -# define nghttp3_bswap64(N) \ - ((uint64_t)(ntohl((uint32_t)(N))) << 32 | ntohl((uint32_t)((N) >> 32))) -#endif /* !HAVE_BSWAP_64 */ +#include #if defined(HAVE_BE64TOH) || \ (defined(HAVE_DECL_BE64TOH) && HAVE_DECL_BE64TOH > 0) @@ -69,6 +65,17 @@ # define nghttp3_ntohl64(N) (N) # define nghttp3_htonl64(N) (N) # else /* !WORDS_BIGENDIAN */ +# if defined(HAVE_BSWAP_64) || \ + (defined(HAVE_DECL_BSWAP_64) && HAVE_DECL_BSWAP_64 > 0) +# define nghttp3_bswap64 bswap_64 +# elif defined(WIN32) +# define nghttp3_bswap64 _byteswap_uint64 +# elif defined(__APPLE__) +# define nghttp3_bswap64 OSSwapInt64 +# else /* !HAVE_BSWAP_64 && !WIN32 && !__APPLE__ */ +# define nghttp3_bswap64(N) \ + ((uint64_t)(ntohl((uint32_t)(N))) << 32 | ntohl((uint32_t)((N) >> 32))) +# endif /* !HAVE_BSWAP_64 && !WIN32 && !__APPLE__ */ # define nghttp3_ntohl64(N) nghttp3_bswap64(N) # define nghttp3_htonl64(N) nghttp3_bswap64(N) # endif /* !WORDS_BIGENDIAN */ @@ -106,9 +113,9 @@ STIN uint16_t htons(uint16_t hostshort) { STIN uint32_t ntohl(uint32_t netlong) { uint32_t res; unsigned char *p = (unsigned char *)&netlong; - res = *p++ << 24; - res += *p++ << 16; - res += *p++ << 8; + res = (uint32_t)(*p++ << 24); + res += (uint32_t)(*p++ << 16); + res += (uint32_t)(*p++ << 8); res += *p; return res; } @@ -116,7 +123,7 @@ STIN uint32_t ntohl(uint32_t netlong) { STIN uint16_t ntohs(uint16_t netshort) { uint16_t res; unsigned char *p = (unsigned char *)&netshort; - res = *p++ << 8; + res = (uint16_t)(*p++ << 8); res += *p; return res; } @@ -137,10 +144,10 @@ int64_t nghttp3_get_varint(size_t *plen, const uint8_t *p); int64_t nghttp3_get_varint_fb(const uint8_t *p); /* - * nghttp3_get_varint_len returns the required number of bytes to read + * nghttp3_get_varintlen returns the required number of bytes to read * variable-length integer starting at |p|. */ -size_t nghttp3_get_varint_len(const uint8_t *p); +size_t nghttp3_get_varintlen(const uint8_t *p); /* * nghttp3_put_uint64be writes |n| in host byte order in |p| in @@ -149,13 +156,6 @@ size_t nghttp3_get_varint_len(const uint8_t *p); */ uint8_t *nghttp3_put_uint64be(uint8_t *p, uint64_t n); -/* - * nghttp3_put_uint48be writes |n| in host byte order in |p| in - * network byte order. It writes only least significant 48 bits. It - * returns the one beyond of the last written position. - */ -uint8_t *nghttp3_put_uint48be(uint8_t *p, uint64_t n); - /* * nghttp3_put_uint32be writes |n| in host byte order in |p| in * network byte order. It returns the one beyond of the last written @@ -163,13 +163,6 @@ uint8_t *nghttp3_put_uint48be(uint8_t *p, uint64_t n); */ uint8_t *nghttp3_put_uint32be(uint8_t *p, uint32_t n); -/* - * nghttp3_put_uint24be writes |n| in host byte order in |p| in - * network byte order. It writes only least significant 24 bits. It - * returns the one beyond of the last written position. - */ -uint8_t *nghttp3_put_uint24be(uint8_t *p, uint32_t n); - /* * nghttp3_put_uint16be writes |n| in host byte order in |p| in * network byte order. It returns the one beyond of the last written @@ -184,10 +177,10 @@ uint8_t *nghttp3_put_uint16be(uint8_t *p, uint16_t n); uint8_t *nghttp3_put_varint(uint8_t *p, int64_t n); /* - * nghttp3_put_varint_len returns the required number of bytes to + * nghttp3_put_varintlen returns the required number of bytes to * encode |n|. */ -size_t nghttp3_put_varint_len(int64_t n); +size_t nghttp3_put_varintlen(int64_t n); /* * nghttp3_ord_stream_id returns the ordinal number of |stream_id|. @@ -200,22 +193,4 @@ uint64_t nghttp3_ord_stream_id(int64_t stream_id); */ #define NGHTTP3_PRI_INC_MASK (1 << 7) -/* - * nghttp3_pri_to_uint8 encodes |pri| into uint8_t variable. - */ -uint8_t nghttp3_pri_to_uint8(const nghttp3_pri *pri); - -/* - * nghttp3_pri_uint8_urgency extracts urgency from |PRI| which is - * supposed to be constructed by nghttp3_pri_to_uint8. - */ -#define nghttp3_pri_uint8_urgency(PRI) \ - ((uint32_t)((PRI) & ~NGHTTP3_PRI_INC_MASK)) - -/* - * nghttp3_pri_uint8_inc extracts inc from |PRI| which is supposed to - * be constructed by nghttp3_pri_to_uint8. - */ -#define nghttp3_pri_uint8_inc(PRI) (((PRI)&NGHTTP3_PRI_INC_MASK) != 0) - #endif /* NGHTTP3_CONV_H */ diff --git a/deps/ngtcp2/nghttp3/lib/nghttp3_err.c b/deps/ngtcp2/nghttp3/lib/nghttp3_err.c index 5cf94db852f71f..0d596bfab6d29d 100644 --- a/deps/ngtcp2/nghttp3/lib/nghttp3_err.c +++ b/deps/ngtcp2/nghttp3/lib/nghttp3_err.c @@ -28,8 +28,6 @@ const char *nghttp3_strerror(int liberr) { switch (liberr) { case NGHTTP3_ERR_INVALID_ARGUMENT: return "ERR_INVALID_ARGUMENT"; - case NGHTTP3_ERR_NOBUF: - return "ERR_NOBUF"; case NGHTTP3_ERR_INVALID_STATE: return "ERR_INVALID_STATE"; case NGHTTP3_ERR_WOULDBLOCK: @@ -104,6 +102,9 @@ uint64_t nghttp3_err_infer_quic_app_error_code(int liberr) { case NGHTTP3_ERR_H3_INTERNAL_ERROR: case NGHTTP3_ERR_NOMEM: case NGHTTP3_ERR_CALLBACK_FAILURE: + case NGHTTP3_ERR_QPACK_FATAL: + case NGHTTP3_ERR_QPACK_HEADER_TOO_LARGE: + case NGHTTP3_ERR_STREAM_DATA_OVERFLOW: return NGHTTP3_H3_INTERNAL_ERROR; case NGHTTP3_ERR_H3_CLOSED_CRITICAL_STREAM: return NGHTTP3_H3_CLOSED_CRITICAL_STREAM; diff --git a/deps/ngtcp2/nghttp3/lib/nghttp3_frame.c b/deps/ngtcp2/nghttp3/lib/nghttp3_frame.c index 38c395ebe16162..923a78f90f826f 100644 --- a/deps/ngtcp2/nghttp3/lib/nghttp3_frame.c +++ b/deps/ngtcp2/nghttp3/lib/nghttp3_frame.c @@ -38,7 +38,7 @@ uint8_t *nghttp3_frame_write_hd(uint8_t *p, const nghttp3_frame_hd *hd) { } size_t nghttp3_frame_write_hd_len(const nghttp3_frame_hd *hd) { - return nghttp3_put_varint_len(hd->type) + nghttp3_put_varint_len(hd->length); + return nghttp3_put_varintlen(hd->type) + nghttp3_put_varintlen(hd->length); } uint8_t *nghttp3_frame_write_settings(uint8_t *p, @@ -61,14 +61,14 @@ size_t nghttp3_frame_write_settings_len(int64_t *ppayloadlen, size_t i; for (i = 0; i < fr->niv; ++i) { - payloadlen += nghttp3_put_varint_len((int64_t)fr->iv[i].id) + - nghttp3_put_varint_len((int64_t)fr->iv[i].value); + payloadlen += nghttp3_put_varintlen((int64_t)fr->iv[i].id) + + nghttp3_put_varintlen((int64_t)fr->iv[i].value); } *ppayloadlen = (int64_t)payloadlen; - return nghttp3_put_varint_len(NGHTTP3_FRAME_SETTINGS) + - nghttp3_put_varint_len((int64_t)payloadlen) + payloadlen; + return nghttp3_put_varintlen(NGHTTP3_FRAME_SETTINGS) + + nghttp3_put_varintlen((int64_t)payloadlen) + payloadlen; } uint8_t *nghttp3_frame_write_goaway(uint8_t *p, @@ -81,12 +81,12 @@ uint8_t *nghttp3_frame_write_goaway(uint8_t *p, size_t nghttp3_frame_write_goaway_len(int64_t *ppayloadlen, const nghttp3_frame_goaway *fr) { - size_t payloadlen = nghttp3_put_varint_len(fr->id); + size_t payloadlen = nghttp3_put_varintlen(fr->id); *ppayloadlen = (int64_t)payloadlen; - return nghttp3_put_varint_len(NGHTTP3_FRAME_GOAWAY) + - nghttp3_put_varint_len((int64_t)payloadlen) + payloadlen; + return nghttp3_put_varintlen(NGHTTP3_FRAME_GOAWAY) + + nghttp3_put_varintlen((int64_t)payloadlen) + payloadlen; } uint8_t * @@ -94,17 +94,8 @@ nghttp3_frame_write_priority_update(uint8_t *p, const nghttp3_frame_priority_update *fr) { p = nghttp3_frame_write_hd(p, &fr->hd); p = nghttp3_put_varint(p, fr->pri_elem_id); - - assert(fr->pri.urgency <= NGHTTP3_URGENCY_LOW); - - *p++ = 'u'; - *p++ = '='; - *p++ = (uint8_t)('0' + fr->pri.urgency); - - if (fr->pri.inc) { -#define NGHTTP3_PRIORITY_INCREMENTAL ", i" - p = nghttp3_cpymem(p, (const uint8_t *)NGHTTP3_PRIORITY_INCREMENTAL, - sizeof(NGHTTP3_PRIORITY_INCREMENTAL) - 1); + if (fr->datalen) { + p = nghttp3_cpymem(p, fr->data, fr->datalen); } return p; @@ -112,13 +103,12 @@ nghttp3_frame_write_priority_update(uint8_t *p, size_t nghttp3_frame_write_priority_update_len( int64_t *ppayloadlen, const nghttp3_frame_priority_update *fr) { - size_t payloadlen = nghttp3_put_varint_len(fr->pri_elem_id) + sizeof("u=U") - - 1 + (fr->pri.inc ? sizeof(", i") - 1 : 0); + size_t payloadlen = nghttp3_put_varintlen(fr->pri_elem_id) + fr->datalen; *ppayloadlen = (int64_t)payloadlen; - return nghttp3_put_varint_len(fr->hd.type) + - nghttp3_put_varint_len((int64_t)payloadlen) + payloadlen; + return nghttp3_put_varintlen(fr->hd.type) + + nghttp3_put_varintlen((int64_t)payloadlen) + payloadlen; } int nghttp3_nva_copy(nghttp3_nv **pnva, const nghttp3_nv *nva, size_t nvlen, @@ -164,11 +154,11 @@ int nghttp3_nva_copy(nghttp3_nv **pnva, const nghttp3_nv *nva, size_t nvlen, } else { if (nva[i].namelen) { memcpy(data, nva[i].name, nva[i].namelen); + nghttp3_downcase(data, nva[i].namelen); } p->name = data; p->namelen = nva[i].namelen; data[p->namelen] = '\0'; - nghttp3_downcase(p->name, p->namelen); data += nva[i].namelen + 1; } @@ -202,3 +192,12 @@ void nghttp3_frame_headers_free(nghttp3_frame_headers *fr, nghttp3_nva_del(fr->nva, mem); } + +void nghttp3_frame_priority_update_free(nghttp3_frame_priority_update *fr, + const nghttp3_mem *mem) { + if (fr == NULL) { + return; + } + + nghttp3_mem_free(mem, fr->data); +} diff --git a/deps/ngtcp2/nghttp3/lib/nghttp3_frame.h b/deps/ngtcp2/nghttp3/lib/nghttp3_frame.h index b64bbc4ecb9667..1079673d150ce3 100644 --- a/deps/ngtcp2/nghttp3/lib/nghttp3_frame.h +++ b/deps/ngtcp2/nghttp3/lib/nghttp3_frame.h @@ -34,26 +34,23 @@ #include "nghttp3_buf.h" -typedef enum nghttp3_frame_type { - NGHTTP3_FRAME_DATA = 0x00, - NGHTTP3_FRAME_HEADERS = 0x01, - NGHTTP3_FRAME_CANCEL_PUSH = 0x03, - NGHTTP3_FRAME_SETTINGS = 0x04, - NGHTTP3_FRAME_PUSH_PROMISE = 0x05, - NGHTTP3_FRAME_GOAWAY = 0x07, - NGHTTP3_FRAME_MAX_PUSH_ID = 0x0d, - /* PRIORITY_UPDATE: - https://tools.ietf.org/html/draft-ietf-httpbis-priority-03 */ - NGHTTP3_FRAME_PRIORITY_UPDATE = 0x0f0700, - NGHTTP3_FRAME_PRIORITY_UPDATE_PUSH_ID = 0x0f0701, -} nghttp3_frame_type; - -typedef enum nghttp3_h2_reserved_type { - NGHTTP3_H2_FRAME_PRIORITY = 0x02, - NGHTTP3_H2_FRAME_PING = 0x06, - NGHTTP3_H2_FRAME_WINDOW_UPDATE = 0x08, - NGHTTP3_H2_FRAME_CONTINUATION = 0x9, -} nghttp3_h2_reserved_type; +#define NGHTTP3_FRAME_DATA 0x00 +#define NGHTTP3_FRAME_HEADERS 0x01 +#define NGHTTP3_FRAME_CANCEL_PUSH 0x03 +#define NGHTTP3_FRAME_SETTINGS 0x04 +#define NGHTTP3_FRAME_PUSH_PROMISE 0x05 +#define NGHTTP3_FRAME_GOAWAY 0x07 +#define NGHTTP3_FRAME_MAX_PUSH_ID 0x0d +/* PRIORITY_UPDATE: https://datatracker.ietf.org/doc/html/rfc9218 */ +#define NGHTTP3_FRAME_PRIORITY_UPDATE 0x0f0700 +#define NGHTTP3_FRAME_PRIORITY_UPDATE_PUSH_ID 0x0f0701 + +/* Frame types that are reserved for HTTP/2, and must not be used in + HTTP/3. */ +#define NGHTTP3_H2_FRAME_PRIORITY 0x02 +#define NGHTTP3_H2_FRAME_PING 0x06 +#define NGHTTP3_H2_FRAME_WINDOW_UPDATE 0x08 +#define NGHTTP3_H2_FRAME_CONTINUATION 0x9 typedef struct nghttp3_frame_hd { int64_t type; @@ -74,6 +71,7 @@ typedef struct nghttp3_frame_headers { #define NGHTTP3_SETTINGS_ID_QPACK_MAX_TABLE_CAPACITY 0x01 #define NGHTTP3_SETTINGS_ID_QPACK_BLOCKED_STREAMS 0x07 #define NGHTTP3_SETTINGS_ID_ENABLE_CONNECT_PROTOCOL 0x08 +#define NGHTTP3_SETTINGS_ID_H3_DATAGRAM 0x33 #define NGHTTP3_H2_SETTINGS_ID_ENABLE_PUSH 0x2 #define NGHTTP3_H2_SETTINGS_ID_MAX_CONCURRENT_STREAMS 0x3 @@ -103,7 +101,17 @@ typedef struct nghttp3_frame_priority_update { NGHTTP3_FRAME_PRIORITY_UPDATE_PUSH_ID. It is undefined otherwise. */ int64_t pri_elem_id; - nghttp3_pri pri; + /* When sending this frame, data should point to the buffer + containing a serialized priority field value and its length is + set to datalen. On reception, pri contains the decoded priority + header value. */ + union { + nghttp3_pri pri; + struct { + uint8_t *data; + size_t datalen; + }; + }; } nghttp3_frame_priority_update; typedef union nghttp3_frame { @@ -212,4 +220,11 @@ void nghttp3_nva_del(nghttp3_nv *nva, const nghttp3_mem *mem); void nghttp3_frame_headers_free(nghttp3_frame_headers *fr, const nghttp3_mem *mem); +/* + * nghttp3_frame_priority_update_free frees memory allocated for |fr|. + * This function should only be called for an outgoing frame. + */ +void nghttp3_frame_priority_update_free(nghttp3_frame_priority_update *fr, + const nghttp3_mem *mem); + #endif /* NGHTTP3_FRAME_H */ diff --git a/deps/ngtcp2/nghttp3/lib/nghttp3_http.c b/deps/ngtcp2/nghttp3/lib/nghttp3_http.c index 5e06d8c47658e1..963134f13df946 100644 --- a/deps/ngtcp2/nghttp3/lib/nghttp3_http.c +++ b/deps/ngtcp2/nghttp3/lib/nghttp3_http.c @@ -31,6 +31,8 @@ #include "nghttp3_stream.h" #include "nghttp3_macro.h" #include "nghttp3_conv.h" +#include "nghttp3_unreachable.h" +#include "sfparse.h" static uint8_t downcase(uint8_t c) { return 'A' <= c && c <= 'Z' ? (uint8_t)(c - 'A' + 'a') : c; @@ -105,722 +107,74 @@ static int check_path_flags(nghttp3_http_state *http) { (http->flags & NGHTTP3_HTTP_FLAG_PATH_ASTERISK))); } -/* Generated by genchartbl.py */ -static const int SF_KEY_CHARS[] = { - 0 /* NUL */, 0 /* SOH */, 0 /* STX */, 0 /* ETX */, 0 /* EOT */, - 0 /* ENQ */, 0 /* ACK */, 0 /* BEL */, 0 /* BS */, 0 /* HT */, - 0 /* LF */, 0 /* VT */, 0 /* FF */, 0 /* CR */, 0 /* SO */, - 0 /* SI */, 0 /* DLE */, 0 /* DC1 */, 0 /* DC2 */, 0 /* DC3 */, - 0 /* DC4 */, 0 /* NAK */, 0 /* SYN */, 0 /* ETB */, 0 /* CAN */, - 0 /* EM */, 0 /* SUB */, 0 /* ESC */, 0 /* FS */, 0 /* GS */, - 0 /* RS */, 0 /* US */, 0 /* SPC */, 0 /* ! */, 0 /* " */, - 0 /* # */, 0 /* $ */, 0 /* % */, 0 /* & */, 0 /* ' */, - 0 /* ( */, 0 /* ) */, 1 /* * */, 0 /* + */, 0 /* , */, - 1 /* - */, 1 /* . */, 0 /* / */, 1 /* 0 */, 1 /* 1 */, - 1 /* 2 */, 1 /* 3 */, 1 /* 4 */, 1 /* 5 */, 1 /* 6 */, - 1 /* 7 */, 1 /* 8 */, 1 /* 9 */, 0 /* : */, 0 /* ; */, - 0 /* < */, 0 /* = */, 0 /* > */, 0 /* ? */, 0 /* @ */, - 0 /* A */, 0 /* B */, 0 /* C */, 0 /* D */, 0 /* E */, - 0 /* F */, 0 /* G */, 0 /* H */, 0 /* I */, 0 /* J */, - 0 /* K */, 0 /* L */, 0 /* M */, 0 /* N */, 0 /* O */, - 0 /* P */, 0 /* Q */, 0 /* R */, 0 /* S */, 0 /* T */, - 0 /* U */, 0 /* V */, 0 /* W */, 0 /* X */, 0 /* Y */, - 0 /* Z */, 0 /* [ */, 0 /* \ */, 0 /* ] */, 0 /* ^ */, - 1 /* _ */, 0 /* ` */, 1 /* a */, 1 /* b */, 1 /* c */, - 1 /* d */, 1 /* e */, 1 /* f */, 1 /* g */, 1 /* h */, - 1 /* i */, 1 /* j */, 1 /* k */, 1 /* l */, 1 /* m */, - 1 /* n */, 1 /* o */, 1 /* p */, 1 /* q */, 1 /* r */, - 1 /* s */, 1 /* t */, 1 /* u */, 1 /* v */, 1 /* w */, - 1 /* x */, 1 /* y */, 1 /* z */, 0 /* { */, 0 /* | */, - 0 /* } */, 0 /* ~ */, 0 /* DEL */, 0 /* 0x80 */, 0 /* 0x81 */, - 0 /* 0x82 */, 0 /* 0x83 */, 0 /* 0x84 */, 0 /* 0x85 */, 0 /* 0x86 */, - 0 /* 0x87 */, 0 /* 0x88 */, 0 /* 0x89 */, 0 /* 0x8a */, 0 /* 0x8b */, - 0 /* 0x8c */, 0 /* 0x8d */, 0 /* 0x8e */, 0 /* 0x8f */, 0 /* 0x90 */, - 0 /* 0x91 */, 0 /* 0x92 */, 0 /* 0x93 */, 0 /* 0x94 */, 0 /* 0x95 */, - 0 /* 0x96 */, 0 /* 0x97 */, 0 /* 0x98 */, 0 /* 0x99 */, 0 /* 0x9a */, - 0 /* 0x9b */, 0 /* 0x9c */, 0 /* 0x9d */, 0 /* 0x9e */, 0 /* 0x9f */, - 0 /* 0xa0 */, 0 /* 0xa1 */, 0 /* 0xa2 */, 0 /* 0xa3 */, 0 /* 0xa4 */, - 0 /* 0xa5 */, 0 /* 0xa6 */, 0 /* 0xa7 */, 0 /* 0xa8 */, 0 /* 0xa9 */, - 0 /* 0xaa */, 0 /* 0xab */, 0 /* 0xac */, 0 /* 0xad */, 0 /* 0xae */, - 0 /* 0xaf */, 0 /* 0xb0 */, 0 /* 0xb1 */, 0 /* 0xb2 */, 0 /* 0xb3 */, - 0 /* 0xb4 */, 0 /* 0xb5 */, 0 /* 0xb6 */, 0 /* 0xb7 */, 0 /* 0xb8 */, - 0 /* 0xb9 */, 0 /* 0xba */, 0 /* 0xbb */, 0 /* 0xbc */, 0 /* 0xbd */, - 0 /* 0xbe */, 0 /* 0xbf */, 0 /* 0xc0 */, 0 /* 0xc1 */, 0 /* 0xc2 */, - 0 /* 0xc3 */, 0 /* 0xc4 */, 0 /* 0xc5 */, 0 /* 0xc6 */, 0 /* 0xc7 */, - 0 /* 0xc8 */, 0 /* 0xc9 */, 0 /* 0xca */, 0 /* 0xcb */, 0 /* 0xcc */, - 0 /* 0xcd */, 0 /* 0xce */, 0 /* 0xcf */, 0 /* 0xd0 */, 0 /* 0xd1 */, - 0 /* 0xd2 */, 0 /* 0xd3 */, 0 /* 0xd4 */, 0 /* 0xd5 */, 0 /* 0xd6 */, - 0 /* 0xd7 */, 0 /* 0xd8 */, 0 /* 0xd9 */, 0 /* 0xda */, 0 /* 0xdb */, - 0 /* 0xdc */, 0 /* 0xdd */, 0 /* 0xde */, 0 /* 0xdf */, 0 /* 0xe0 */, - 0 /* 0xe1 */, 0 /* 0xe2 */, 0 /* 0xe3 */, 0 /* 0xe4 */, 0 /* 0xe5 */, - 0 /* 0xe6 */, 0 /* 0xe7 */, 0 /* 0xe8 */, 0 /* 0xe9 */, 0 /* 0xea */, - 0 /* 0xeb */, 0 /* 0xec */, 0 /* 0xed */, 0 /* 0xee */, 0 /* 0xef */, - 0 /* 0xf0 */, 0 /* 0xf1 */, 0 /* 0xf2 */, 0 /* 0xf3 */, 0 /* 0xf4 */, - 0 /* 0xf5 */, 0 /* 0xf6 */, 0 /* 0xf7 */, 0 /* 0xf8 */, 0 /* 0xf9 */, - 0 /* 0xfa */, 0 /* 0xfb */, 0 /* 0xfc */, 0 /* 0xfd */, 0 /* 0xfe */, - 0 /* 0xff */, -}; - -static nghttp3_ssize sf_parse_key(const uint8_t *begin, const uint8_t *end) { - const uint8_t *p = begin; - - if ((*p < 'a' || 'z' < *p) && *p != '*') { - return -1; - } - - for (; p != end && SF_KEY_CHARS[*p]; ++p) - ; - - return p - begin; -} - -static nghttp3_ssize sf_parse_integer_or_decimal(nghttp3_sf_value *dest, - const uint8_t *begin, - const uint8_t *end) { - const uint8_t *p = begin; - int sign = 1; - int64_t value = 0; - int type = NGHTTP3_SF_VALUE_TYPE_INTEGER; - size_t len = 0; - size_t fpos = 0; - size_t i; - - if (*p == '-') { - if (++p == end) { - return -1; - } - - sign = -1; - } - - if (*p < '0' || '9' < *p) { - return -1; - } - - for (; p != end; ++p) { - switch (*p) { - case '0': - case '1': - case '2': - case '3': - case '4': - case '5': - case '6': - case '7': - case '8': - case '9': - value *= 10; - value += *p - '0'; - - if (++len > 15) { - return -1; - } - - break; - case '.': - if (type != NGHTTP3_SF_VALUE_TYPE_INTEGER) { - goto fin; - } - - if (len > 12) { - return -1; - } - fpos = len; - type = NGHTTP3_SF_VALUE_TYPE_DECIMAL; - - break; - default: - goto fin; - }; - } - -fin: - switch (type) { - case NGHTTP3_SF_VALUE_TYPE_INTEGER: - if (dest) { - dest->type = (uint8_t)type; - dest->i = value * sign; - } - - return p - begin; - case NGHTTP3_SF_VALUE_TYPE_DECIMAL: - if (fpos == len || len - fpos > 3) { - return -1; - } - - if (dest) { - dest->type = (uint8_t)type; - dest->d = (double)value; - for (i = len - fpos; i > 0; --i) { - dest->d /= (double)10; - } - dest->d *= sign; - } - - return p - begin; - default: - assert(0); - abort(); - } -} - -/* Generated by genchartbl.py */ -static const int SF_DQUOTE_CHARS[] = { - 0 /* NUL */, 0 /* SOH */, 0 /* STX */, 0 /* ETX */, 0 /* EOT */, - 0 /* ENQ */, 0 /* ACK */, 0 /* BEL */, 0 /* BS */, 0 /* HT */, - 0 /* LF */, 0 /* VT */, 0 /* FF */, 0 /* CR */, 0 /* SO */, - 0 /* SI */, 0 /* DLE */, 0 /* DC1 */, 0 /* DC2 */, 0 /* DC3 */, - 0 /* DC4 */, 0 /* NAK */, 0 /* SYN */, 0 /* ETB */, 0 /* CAN */, - 0 /* EM */, 0 /* SUB */, 0 /* ESC */, 0 /* FS */, 0 /* GS */, - 0 /* RS */, 0 /* US */, 1 /* SPC */, 1 /* ! */, 0 /* " */, - 1 /* # */, 1 /* $ */, 1 /* % */, 1 /* & */, 1 /* ' */, - 1 /* ( */, 1 /* ) */, 1 /* * */, 1 /* + */, 1 /* , */, - 1 /* - */, 1 /* . */, 1 /* / */, 1 /* 0 */, 1 /* 1 */, - 1 /* 2 */, 1 /* 3 */, 1 /* 4 */, 1 /* 5 */, 1 /* 6 */, - 1 /* 7 */, 1 /* 8 */, 1 /* 9 */, 1 /* : */, 1 /* ; */, - 1 /* < */, 1 /* = */, 1 /* > */, 1 /* ? */, 1 /* @ */, - 1 /* A */, 1 /* B */, 1 /* C */, 1 /* D */, 1 /* E */, - 1 /* F */, 1 /* G */, 1 /* H */, 1 /* I */, 1 /* J */, - 1 /* K */, 1 /* L */, 1 /* M */, 1 /* N */, 1 /* O */, - 1 /* P */, 1 /* Q */, 1 /* R */, 1 /* S */, 1 /* T */, - 1 /* U */, 1 /* V */, 1 /* W */, 1 /* X */, 1 /* Y */, - 1 /* Z */, 1 /* [ */, 0 /* \ */, 1 /* ] */, 1 /* ^ */, - 1 /* _ */, 1 /* ` */, 1 /* a */, 1 /* b */, 1 /* c */, - 1 /* d */, 1 /* e */, 1 /* f */, 1 /* g */, 1 /* h */, - 1 /* i */, 1 /* j */, 1 /* k */, 1 /* l */, 1 /* m */, - 1 /* n */, 1 /* o */, 1 /* p */, 1 /* q */, 1 /* r */, - 1 /* s */, 1 /* t */, 1 /* u */, 1 /* v */, 1 /* w */, - 1 /* x */, 1 /* y */, 1 /* z */, 1 /* { */, 1 /* | */, - 1 /* } */, 1 /* ~ */, 0 /* DEL */, 0 /* 0x80 */, 0 /* 0x81 */, - 0 /* 0x82 */, 0 /* 0x83 */, 0 /* 0x84 */, 0 /* 0x85 */, 0 /* 0x86 */, - 0 /* 0x87 */, 0 /* 0x88 */, 0 /* 0x89 */, 0 /* 0x8a */, 0 /* 0x8b */, - 0 /* 0x8c */, 0 /* 0x8d */, 0 /* 0x8e */, 0 /* 0x8f */, 0 /* 0x90 */, - 0 /* 0x91 */, 0 /* 0x92 */, 0 /* 0x93 */, 0 /* 0x94 */, 0 /* 0x95 */, - 0 /* 0x96 */, 0 /* 0x97 */, 0 /* 0x98 */, 0 /* 0x99 */, 0 /* 0x9a */, - 0 /* 0x9b */, 0 /* 0x9c */, 0 /* 0x9d */, 0 /* 0x9e */, 0 /* 0x9f */, - 0 /* 0xa0 */, 0 /* 0xa1 */, 0 /* 0xa2 */, 0 /* 0xa3 */, 0 /* 0xa4 */, - 0 /* 0xa5 */, 0 /* 0xa6 */, 0 /* 0xa7 */, 0 /* 0xa8 */, 0 /* 0xa9 */, - 0 /* 0xaa */, 0 /* 0xab */, 0 /* 0xac */, 0 /* 0xad */, 0 /* 0xae */, - 0 /* 0xaf */, 0 /* 0xb0 */, 0 /* 0xb1 */, 0 /* 0xb2 */, 0 /* 0xb3 */, - 0 /* 0xb4 */, 0 /* 0xb5 */, 0 /* 0xb6 */, 0 /* 0xb7 */, 0 /* 0xb8 */, - 0 /* 0xb9 */, 0 /* 0xba */, 0 /* 0xbb */, 0 /* 0xbc */, 0 /* 0xbd */, - 0 /* 0xbe */, 0 /* 0xbf */, 0 /* 0xc0 */, 0 /* 0xc1 */, 0 /* 0xc2 */, - 0 /* 0xc3 */, 0 /* 0xc4 */, 0 /* 0xc5 */, 0 /* 0xc6 */, 0 /* 0xc7 */, - 0 /* 0xc8 */, 0 /* 0xc9 */, 0 /* 0xca */, 0 /* 0xcb */, 0 /* 0xcc */, - 0 /* 0xcd */, 0 /* 0xce */, 0 /* 0xcf */, 0 /* 0xd0 */, 0 /* 0xd1 */, - 0 /* 0xd2 */, 0 /* 0xd3 */, 0 /* 0xd4 */, 0 /* 0xd5 */, 0 /* 0xd6 */, - 0 /* 0xd7 */, 0 /* 0xd8 */, 0 /* 0xd9 */, 0 /* 0xda */, 0 /* 0xdb */, - 0 /* 0xdc */, 0 /* 0xdd */, 0 /* 0xde */, 0 /* 0xdf */, 0 /* 0xe0 */, - 0 /* 0xe1 */, 0 /* 0xe2 */, 0 /* 0xe3 */, 0 /* 0xe4 */, 0 /* 0xe5 */, - 0 /* 0xe6 */, 0 /* 0xe7 */, 0 /* 0xe8 */, 0 /* 0xe9 */, 0 /* 0xea */, - 0 /* 0xeb */, 0 /* 0xec */, 0 /* 0xed */, 0 /* 0xee */, 0 /* 0xef */, - 0 /* 0xf0 */, 0 /* 0xf1 */, 0 /* 0xf2 */, 0 /* 0xf3 */, 0 /* 0xf4 */, - 0 /* 0xf5 */, 0 /* 0xf6 */, 0 /* 0xf7 */, 0 /* 0xf8 */, 0 /* 0xf9 */, - 0 /* 0xfa */, 0 /* 0xfb */, 0 /* 0xfc */, 0 /* 0xfd */, 0 /* 0xfe */, - 0 /* 0xff */, -}; - -static nghttp3_ssize sf_parse_string(nghttp3_sf_value *dest, - const uint8_t *begin, const uint8_t *end) { - const uint8_t *p = begin; - - if (*p++ != '"') { - return -1; - } - - for (; p != end; ++p) { - switch (*p) { - case '\\': - if (++p == end) { - return -1; - } - - switch (*p) { - case '"': - case '\\': - break; - default: - return -1; - } - - break; - case '"': - if (dest) { - dest->type = NGHTTP3_SF_VALUE_TYPE_STRING; - dest->s.base = begin + 1; - dest->s.len = (size_t)(p - dest->s.base); - } - - ++p; - - return p - begin; - default: - if (!SF_DQUOTE_CHARS[*p]) { - return -1; - } - } - } - - return -1; -} - -/* Generated by genchartbl.py */ -static const int SF_TOKEN_CHARS[] = { - 0 /* NUL */, 0 /* SOH */, 0 /* STX */, 0 /* ETX */, 0 /* EOT */, - 0 /* ENQ */, 0 /* ACK */, 0 /* BEL */, 0 /* BS */, 0 /* HT */, - 0 /* LF */, 0 /* VT */, 0 /* FF */, 0 /* CR */, 0 /* SO */, - 0 /* SI */, 0 /* DLE */, 0 /* DC1 */, 0 /* DC2 */, 0 /* DC3 */, - 0 /* DC4 */, 0 /* NAK */, 0 /* SYN */, 0 /* ETB */, 0 /* CAN */, - 0 /* EM */, 0 /* SUB */, 0 /* ESC */, 0 /* FS */, 0 /* GS */, - 0 /* RS */, 0 /* US */, 0 /* SPC */, 1 /* ! */, 0 /* " */, - 1 /* # */, 1 /* $ */, 1 /* % */, 1 /* & */, 1 /* ' */, - 0 /* ( */, 0 /* ) */, 1 /* * */, 1 /* + */, 0 /* , */, - 1 /* - */, 1 /* . */, 1 /* / */, 1 /* 0 */, 1 /* 1 */, - 1 /* 2 */, 1 /* 3 */, 1 /* 4 */, 1 /* 5 */, 1 /* 6 */, - 1 /* 7 */, 1 /* 8 */, 1 /* 9 */, 1 /* : */, 0 /* ; */, - 0 /* < */, 0 /* = */, 0 /* > */, 0 /* ? */, 0 /* @ */, - 1 /* A */, 1 /* B */, 1 /* C */, 1 /* D */, 1 /* E */, - 1 /* F */, 1 /* G */, 1 /* H */, 1 /* I */, 1 /* J */, - 1 /* K */, 1 /* L */, 1 /* M */, 1 /* N */, 1 /* O */, - 1 /* P */, 1 /* Q */, 1 /* R */, 1 /* S */, 1 /* T */, - 1 /* U */, 1 /* V */, 1 /* W */, 1 /* X */, 1 /* Y */, - 1 /* Z */, 0 /* [ */, 0 /* \ */, 0 /* ] */, 1 /* ^ */, - 1 /* _ */, 1 /* ` */, 1 /* a */, 1 /* b */, 1 /* c */, - 1 /* d */, 1 /* e */, 1 /* f */, 1 /* g */, 1 /* h */, - 1 /* i */, 1 /* j */, 1 /* k */, 1 /* l */, 1 /* m */, - 1 /* n */, 1 /* o */, 1 /* p */, 1 /* q */, 1 /* r */, - 1 /* s */, 1 /* t */, 1 /* u */, 1 /* v */, 1 /* w */, - 1 /* x */, 1 /* y */, 1 /* z */, 0 /* { */, 1 /* | */, - 0 /* } */, 1 /* ~ */, 0 /* DEL */, 0 /* 0x80 */, 0 /* 0x81 */, - 0 /* 0x82 */, 0 /* 0x83 */, 0 /* 0x84 */, 0 /* 0x85 */, 0 /* 0x86 */, - 0 /* 0x87 */, 0 /* 0x88 */, 0 /* 0x89 */, 0 /* 0x8a */, 0 /* 0x8b */, - 0 /* 0x8c */, 0 /* 0x8d */, 0 /* 0x8e */, 0 /* 0x8f */, 0 /* 0x90 */, - 0 /* 0x91 */, 0 /* 0x92 */, 0 /* 0x93 */, 0 /* 0x94 */, 0 /* 0x95 */, - 0 /* 0x96 */, 0 /* 0x97 */, 0 /* 0x98 */, 0 /* 0x99 */, 0 /* 0x9a */, - 0 /* 0x9b */, 0 /* 0x9c */, 0 /* 0x9d */, 0 /* 0x9e */, 0 /* 0x9f */, - 0 /* 0xa0 */, 0 /* 0xa1 */, 0 /* 0xa2 */, 0 /* 0xa3 */, 0 /* 0xa4 */, - 0 /* 0xa5 */, 0 /* 0xa6 */, 0 /* 0xa7 */, 0 /* 0xa8 */, 0 /* 0xa9 */, - 0 /* 0xaa */, 0 /* 0xab */, 0 /* 0xac */, 0 /* 0xad */, 0 /* 0xae */, - 0 /* 0xaf */, 0 /* 0xb0 */, 0 /* 0xb1 */, 0 /* 0xb2 */, 0 /* 0xb3 */, - 0 /* 0xb4 */, 0 /* 0xb5 */, 0 /* 0xb6 */, 0 /* 0xb7 */, 0 /* 0xb8 */, - 0 /* 0xb9 */, 0 /* 0xba */, 0 /* 0xbb */, 0 /* 0xbc */, 0 /* 0xbd */, - 0 /* 0xbe */, 0 /* 0xbf */, 0 /* 0xc0 */, 0 /* 0xc1 */, 0 /* 0xc2 */, - 0 /* 0xc3 */, 0 /* 0xc4 */, 0 /* 0xc5 */, 0 /* 0xc6 */, 0 /* 0xc7 */, - 0 /* 0xc8 */, 0 /* 0xc9 */, 0 /* 0xca */, 0 /* 0xcb */, 0 /* 0xcc */, - 0 /* 0xcd */, 0 /* 0xce */, 0 /* 0xcf */, 0 /* 0xd0 */, 0 /* 0xd1 */, - 0 /* 0xd2 */, 0 /* 0xd3 */, 0 /* 0xd4 */, 0 /* 0xd5 */, 0 /* 0xd6 */, - 0 /* 0xd7 */, 0 /* 0xd8 */, 0 /* 0xd9 */, 0 /* 0xda */, 0 /* 0xdb */, - 0 /* 0xdc */, 0 /* 0xdd */, 0 /* 0xde */, 0 /* 0xdf */, 0 /* 0xe0 */, - 0 /* 0xe1 */, 0 /* 0xe2 */, 0 /* 0xe3 */, 0 /* 0xe4 */, 0 /* 0xe5 */, - 0 /* 0xe6 */, 0 /* 0xe7 */, 0 /* 0xe8 */, 0 /* 0xe9 */, 0 /* 0xea */, - 0 /* 0xeb */, 0 /* 0xec */, 0 /* 0xed */, 0 /* 0xee */, 0 /* 0xef */, - 0 /* 0xf0 */, 0 /* 0xf1 */, 0 /* 0xf2 */, 0 /* 0xf3 */, 0 /* 0xf4 */, - 0 /* 0xf5 */, 0 /* 0xf6 */, 0 /* 0xf7 */, 0 /* 0xf8 */, 0 /* 0xf9 */, - 0 /* 0xfa */, 0 /* 0xfb */, 0 /* 0xfc */, 0 /* 0xfd */, 0 /* 0xfe */, - 0 /* 0xff */, -}; - -static nghttp3_ssize sf_parse_token(nghttp3_sf_value *dest, - const uint8_t *begin, const uint8_t *end) { - const uint8_t *p = begin; - - if ((*p < 'A' || 'Z' < *p) && (*p < 'a' || 'z' < *p) && *p != '*') { - return -1; - } - - for (; p != end && SF_TOKEN_CHARS[*p]; ++p) - ; - - if (dest) { - dest->type = NGHTTP3_SF_VALUE_TYPE_TOKEN; - dest->s.base = begin; - dest->s.len = (size_t)(p - begin); - } - - return p - begin; -} - -/* Generated by genchartbl.py */ -static const int SF_BYTESEQ_CHARS[] = { - 0 /* NUL */, 0 /* SOH */, 0 /* STX */, 0 /* ETX */, 0 /* EOT */, - 0 /* ENQ */, 0 /* ACK */, 0 /* BEL */, 0 /* BS */, 0 /* HT */, - 0 /* LF */, 0 /* VT */, 0 /* FF */, 0 /* CR */, 0 /* SO */, - 0 /* SI */, 0 /* DLE */, 0 /* DC1 */, 0 /* DC2 */, 0 /* DC3 */, - 0 /* DC4 */, 0 /* NAK */, 0 /* SYN */, 0 /* ETB */, 0 /* CAN */, - 0 /* EM */, 0 /* SUB */, 0 /* ESC */, 0 /* FS */, 0 /* GS */, - 0 /* RS */, 0 /* US */, 0 /* SPC */, 0 /* ! */, 0 /* " */, - 0 /* # */, 0 /* $ */, 0 /* % */, 0 /* & */, 0 /* ' */, - 0 /* ( */, 0 /* ) */, 0 /* * */, 1 /* + */, 0 /* , */, - 0 /* - */, 0 /* . */, 1 /* / */, 1 /* 0 */, 1 /* 1 */, - 1 /* 2 */, 1 /* 3 */, 1 /* 4 */, 1 /* 5 */, 1 /* 6 */, - 1 /* 7 */, 1 /* 8 */, 1 /* 9 */, 0 /* : */, 0 /* ; */, - 0 /* < */, 1 /* = */, 0 /* > */, 0 /* ? */, 0 /* @ */, - 1 /* A */, 1 /* B */, 1 /* C */, 1 /* D */, 1 /* E */, - 1 /* F */, 1 /* G */, 1 /* H */, 1 /* I */, 1 /* J */, - 1 /* K */, 1 /* L */, 1 /* M */, 1 /* N */, 1 /* O */, - 1 /* P */, 1 /* Q */, 1 /* R */, 1 /* S */, 1 /* T */, - 1 /* U */, 1 /* V */, 1 /* W */, 1 /* X */, 1 /* Y */, - 1 /* Z */, 0 /* [ */, 0 /* \ */, 0 /* ] */, 0 /* ^ */, - 0 /* _ */, 0 /* ` */, 1 /* a */, 1 /* b */, 1 /* c */, - 1 /* d */, 1 /* e */, 1 /* f */, 1 /* g */, 1 /* h */, - 1 /* i */, 1 /* j */, 1 /* k */, 1 /* l */, 1 /* m */, - 1 /* n */, 1 /* o */, 1 /* p */, 1 /* q */, 1 /* r */, - 1 /* s */, 1 /* t */, 1 /* u */, 1 /* v */, 1 /* w */, - 1 /* x */, 1 /* y */, 1 /* z */, 0 /* { */, 0 /* | */, - 0 /* } */, 0 /* ~ */, 0 /* DEL */, 0 /* 0x80 */, 0 /* 0x81 */, - 0 /* 0x82 */, 0 /* 0x83 */, 0 /* 0x84 */, 0 /* 0x85 */, 0 /* 0x86 */, - 0 /* 0x87 */, 0 /* 0x88 */, 0 /* 0x89 */, 0 /* 0x8a */, 0 /* 0x8b */, - 0 /* 0x8c */, 0 /* 0x8d */, 0 /* 0x8e */, 0 /* 0x8f */, 0 /* 0x90 */, - 0 /* 0x91 */, 0 /* 0x92 */, 0 /* 0x93 */, 0 /* 0x94 */, 0 /* 0x95 */, - 0 /* 0x96 */, 0 /* 0x97 */, 0 /* 0x98 */, 0 /* 0x99 */, 0 /* 0x9a */, - 0 /* 0x9b */, 0 /* 0x9c */, 0 /* 0x9d */, 0 /* 0x9e */, 0 /* 0x9f */, - 0 /* 0xa0 */, 0 /* 0xa1 */, 0 /* 0xa2 */, 0 /* 0xa3 */, 0 /* 0xa4 */, - 0 /* 0xa5 */, 0 /* 0xa6 */, 0 /* 0xa7 */, 0 /* 0xa8 */, 0 /* 0xa9 */, - 0 /* 0xaa */, 0 /* 0xab */, 0 /* 0xac */, 0 /* 0xad */, 0 /* 0xae */, - 0 /* 0xaf */, 0 /* 0xb0 */, 0 /* 0xb1 */, 0 /* 0xb2 */, 0 /* 0xb3 */, - 0 /* 0xb4 */, 0 /* 0xb5 */, 0 /* 0xb6 */, 0 /* 0xb7 */, 0 /* 0xb8 */, - 0 /* 0xb9 */, 0 /* 0xba */, 0 /* 0xbb */, 0 /* 0xbc */, 0 /* 0xbd */, - 0 /* 0xbe */, 0 /* 0xbf */, 0 /* 0xc0 */, 0 /* 0xc1 */, 0 /* 0xc2 */, - 0 /* 0xc3 */, 0 /* 0xc4 */, 0 /* 0xc5 */, 0 /* 0xc6 */, 0 /* 0xc7 */, - 0 /* 0xc8 */, 0 /* 0xc9 */, 0 /* 0xca */, 0 /* 0xcb */, 0 /* 0xcc */, - 0 /* 0xcd */, 0 /* 0xce */, 0 /* 0xcf */, 0 /* 0xd0 */, 0 /* 0xd1 */, - 0 /* 0xd2 */, 0 /* 0xd3 */, 0 /* 0xd4 */, 0 /* 0xd5 */, 0 /* 0xd6 */, - 0 /* 0xd7 */, 0 /* 0xd8 */, 0 /* 0xd9 */, 0 /* 0xda */, 0 /* 0xdb */, - 0 /* 0xdc */, 0 /* 0xdd */, 0 /* 0xde */, 0 /* 0xdf */, 0 /* 0xe0 */, - 0 /* 0xe1 */, 0 /* 0xe2 */, 0 /* 0xe3 */, 0 /* 0xe4 */, 0 /* 0xe5 */, - 0 /* 0xe6 */, 0 /* 0xe7 */, 0 /* 0xe8 */, 0 /* 0xe9 */, 0 /* 0xea */, - 0 /* 0xeb */, 0 /* 0xec */, 0 /* 0xed */, 0 /* 0xee */, 0 /* 0xef */, - 0 /* 0xf0 */, 0 /* 0xf1 */, 0 /* 0xf2 */, 0 /* 0xf3 */, 0 /* 0xf4 */, - 0 /* 0xf5 */, 0 /* 0xf6 */, 0 /* 0xf7 */, 0 /* 0xf8 */, 0 /* 0xf9 */, - 0 /* 0xfa */, 0 /* 0xfb */, 0 /* 0xfc */, 0 /* 0xfd */, 0 /* 0xfe */, - 0 /* 0xff */, -}; - -static nghttp3_ssize sf_parse_byteseq(nghttp3_sf_value *dest, - const uint8_t *begin, - const uint8_t *end) { - const uint8_t *p = begin; - - if (*p++ != ':') { - return -1; - } - - for (; p != end; ++p) { - switch (*p) { - case ':': - if (dest) { - dest->type = NGHTTP3_SF_VALUE_TYPE_BYTESEQ; - dest->s.base = begin + 1; - dest->s.len = (size_t)(p - dest->s.base); - } - - ++p; - - return p - begin; - default: - if (!SF_BYTESEQ_CHARS[*p]) { - return -1; - } - } - } - - return -1; -} - -static nghttp3_ssize sf_parse_boolean(nghttp3_sf_value *dest, - const uint8_t *begin, - const uint8_t *end) { - const uint8_t *p = begin; - int b; - - if (*p++ != '?') { - return -1; - } - - if (p == end) { - return -1; - } - - switch (*p++) { - case '0': - b = 0; - break; - case '1': - b = 1; - break; - default: - return -1; - } - - if (dest) { - dest->type = NGHTTP3_SF_VALUE_TYPE_BOOLEAN; - dest->b = b; - } - - return p - begin; -} - -static nghttp3_ssize sf_parse_bare_item(nghttp3_sf_value *dest, - const uint8_t *begin, - const uint8_t *end) { - switch (*begin) { - case '-': - case '0': - case '1': - case '2': - case '3': - case '4': - case '5': - case '6': - case '7': - case '8': - case '9': - return sf_parse_integer_or_decimal(dest, begin, end); - case '"': - return sf_parse_string(dest, begin, end); - case '*': - return sf_parse_token(dest, begin, end); - case ':': - return sf_parse_byteseq(dest, begin, end); - case '?': - return sf_parse_boolean(dest, begin, end); +static int is_ws(uint8_t c) { + switch (c) { + case ' ': + case '\t': + return 1; default: - if (('A' <= *begin && *begin <= 'Z') || ('a' <= *begin && *begin <= 'z')) { - return sf_parse_token(dest, begin, end); - } - return -1; - } -} - -#define sf_discard_sp_end_err(BEGIN, END, ERR) \ - for (;; ++(BEGIN)) { \ - if ((BEGIN) == (END)) { \ - return (ERR); \ - } \ - if (*(BEGIN) != ' ') { \ - break; \ - } \ - } - -static nghttp3_ssize sf_parse_params(const uint8_t *begin, const uint8_t *end) { - const uint8_t *p = begin; - nghttp3_ssize slen; - - for (; p != end && *p == ';';) { - ++p; - - sf_discard_sp_end_err(p, end, -1); - - slen = sf_parse_key(p, end); - if (slen < 0) { - return -1; - } - - p += slen; - - if (p == end || *p != '=') { - /* Boolean true */ - } else if (++p == end) { - return -1; - } else { - slen = sf_parse_bare_item(NULL, p, end); - if (slen < 0) { - return -1; - } - - p += slen; - } - } - - return p - begin; -} - -static nghttp3_ssize sf_parse_item(nghttp3_sf_value *dest, const uint8_t *begin, - const uint8_t *end) { - const uint8_t *p = begin; - nghttp3_ssize slen; - - slen = sf_parse_bare_item(dest, p, end); - if (slen < 0) { - return -1; - } - - p += slen; - - slen = sf_parse_params(p, end); - if (slen < 0) { - return -1; - } - - p += slen; - - return p - begin; -} - -nghttp3_ssize nghttp3_sf_parse_item(nghttp3_sf_value *dest, - const uint8_t *begin, const uint8_t *end) { - return sf_parse_item(dest, begin, end); -} - -static nghttp3_ssize sf_parse_inner_list(nghttp3_sf_value *dest, - const uint8_t *begin, - const uint8_t *end) { - const uint8_t *p = begin; - nghttp3_ssize slen; - - if (*p++ != '(') { - return -1; - } - - for (;;) { - sf_discard_sp_end_err(p, end, -1); - - if (*p == ')') { - ++p; - - slen = sf_parse_params(p, end); - if (slen < 0) { - return -1; - } - - p += slen; - - if (dest) { - dest->type = NGHTTP3_SF_VALUE_TYPE_INNER_LIST; - } - - return p - begin; - } - - slen = sf_parse_item(NULL, p, end); - if (slen < 0) { - return -1; - } - - p += slen; - - if (p == end || (*p != ' ' && *p != ')')) { - return -1; - } - } -} - -nghttp3_ssize nghttp3_sf_parse_inner_list(nghttp3_sf_value *dest, - const uint8_t *begin, - const uint8_t *end) { - return sf_parse_inner_list(dest, begin, end); -} - -static nghttp3_ssize sf_parse_item_or_inner_list(nghttp3_sf_value *dest, - const uint8_t *begin, - const uint8_t *end) { - if (*begin == '(') { - return sf_parse_inner_list(dest, begin, end); + return 0; } - - return sf_parse_item(dest, begin, end); } -#define sf_discard_ows(BEGIN, END) \ - for (;; ++(BEGIN)) { \ - if ((BEGIN) == (END)) { \ - goto fin; \ - } \ - if (*(BEGIN) != ' ' && *(BEGIN) != '\t') { \ - break; \ - } \ - } - -#define sf_discard_ows_end_err(BEGIN, END, ERR) \ - for (;; ++(BEGIN)) { \ - if ((BEGIN) == (END)) { \ - return (ERR); \ - } \ - if (*(BEGIN) != ' ' && *(BEGIN) != '\t') { \ - break; \ - } \ - } - int nghttp3_http_parse_priority(nghttp3_pri *dest, const uint8_t *value, size_t valuelen) { - const uint8_t *p = value, *end = value + valuelen; - nghttp3_ssize slen; - nghttp3_sf_value val; nghttp3_pri pri = *dest; - const uint8_t *key; - size_t keylen; + sf_parser sfp; + sf_vec key; + sf_value val; + int rv; - for (; p != end && *p == ' '; ++p) - ; + sf_parser_init(&sfp, value, valuelen); + + for (;;) { + rv = sf_parser_dict(&sfp, &key, &val); + if (rv != 0) { + if (rv == SF_ERR_EOF) { + break; + } - for (; p != end;) { - slen = sf_parse_key(p, end); - if (slen < 0) { return NGHTTP3_ERR_INVALID_ARGUMENT; } - key = p; - keylen = (size_t)slen; - - p += slen; - - if (p == end || *p != '=') { - /* Boolean true */ - val.type = NGHTTP3_SF_VALUE_TYPE_BOOLEAN; - val.b = 1; + if (key.len != 1) { + continue; + } - slen = sf_parse_params(p, end); - if (slen < 0) { + switch (key.base[0]) { + case 'i': + if (val.type != SF_TYPE_BOOLEAN) { return NGHTTP3_ERR_INVALID_ARGUMENT; } - } else if (++p == end) { - return NGHTTP3_ERR_INVALID_ARGUMENT; - } else { - slen = sf_parse_item_or_inner_list(&val, p, end); - if (slen < 0) { - return NGHTTP3_ERR_INVALID_ARGUMENT; - } - } - - p += slen; - - if (keylen == 1) { - switch (key[0]) { - case 'i': - if (val.type != NGHTTP3_SF_VALUE_TYPE_BOOLEAN) { - return NGHTTP3_ERR_INVALID_ARGUMENT; - } - - pri.inc = val.b; - - break; - case 'u': - if (val.type != NGHTTP3_SF_VALUE_TYPE_INTEGER || - val.i < NGHTTP3_URGENCY_HIGH || NGHTTP3_URGENCY_LOW < val.i) { - return NGHTTP3_ERR_INVALID_ARGUMENT; - } - pri.urgency = (uint32_t)val.i; + pri.inc = (uint8_t)val.boolean; - break; + break; + case 'u': + if (val.type != SF_TYPE_INTEGER || val.integer < NGHTTP3_URGENCY_HIGH || + NGHTTP3_URGENCY_LOW < val.integer) { + return NGHTTP3_ERR_INVALID_ARGUMENT; } - } - sf_discard_ows(p, end); + pri.urgency = (uint32_t)val.integer; - if (*p++ != ',') { - return NGHTTP3_ERR_INVALID_ARGUMENT; + break; } - - sf_discard_ows_end_err(p, end, NGHTTP3_ERR_INVALID_ARGUMENT); } -fin: *dest = pri; return 0; } +int nghttp3_pri_parse_priority_versioned(int pri_version, nghttp3_pri *dest, + const uint8_t *value, + size_t valuelen) { + (void)pri_version; + + return nghttp3_http_parse_priority(dest, value, valuelen); +} + static int http_request_on_header(nghttp3_http_state *http, nghttp3_qpack_nv *nv, int trailers, int connect_protocol) { @@ -931,11 +285,10 @@ static int http_request_on_header(nghttp3_http_state *http, break; case NGHTTP3_QPACK_TOKEN_PRIORITY: if (!trailers && !(http->flags & NGHTTP3_HTTP_FLAG_BAD_PRIORITY)) { - pri.urgency = nghttp3_pri_uint8_urgency(http->pri); - pri.inc = nghttp3_pri_uint8_inc(http->pri); + pri = http->pri; if (nghttp3_http_parse_priority(&pri, nv->value->base, nv->value->len) == 0) { - http->pri = nghttp3_pri_to_uint8(&pri); + http->pri = pri; http->flags |= NGHTTP3_HTTP_FLAG_PRIORITY; } else { http->flags &= ~NGHTTP3_HTTP_FLAG_PRIORITY; @@ -1637,10 +990,9 @@ int nghttp3_check_header_value(const uint8_t *value, size_t len) { case 0: return 1; case 1: - return !(*value == ' ' || *value == '\t'); + return !is_ws(*value); default: - if (*value == ' ' || *value == '\t' || *(value + len - 1) == ' ' || - *(value + len - 1) == '\t') { + if (is_ws(*value) || is_ws(*(value + len - 1))) { return 0; } } @@ -1652,3 +1004,7 @@ int nghttp3_check_header_value(const uint8_t *value, size_t len) { } return 1; } + +int nghttp3_pri_eq(const nghttp3_pri *a, const nghttp3_pri *b) { + return a->urgency == b->urgency && a->inc == b->inc; +} diff --git a/deps/ngtcp2/nghttp3/lib/nghttp3_http.h b/deps/ngtcp2/nghttp3/lib/nghttp3_http.h index 1617348ad14d78..575d9c267e1b68 100644 --- a/deps/ngtcp2/nghttp3/lib/nghttp3_http.h +++ b/deps/ngtcp2/nghttp3/lib/nghttp3_http.h @@ -150,53 +150,24 @@ int nghttp3_http_on_data_chunk(nghttp3_stream *stream, size_t n); void nghttp3_http_record_request_method(nghttp3_stream *stream, const nghttp3_nv *nva, size_t nvlen); -/* - * RFC 8941 Structured Field Values. - */ -typedef enum nghttp3_sf_value_type { - NGHTTP3_SF_VALUE_TYPE_BOOLEAN, - NGHTTP3_SF_VALUE_TYPE_INTEGER, - NGHTTP3_SF_VALUE_TYPE_DECIMAL, - NGHTTP3_SF_VALUE_TYPE_STRING, - NGHTTP3_SF_VALUE_TYPE_TOKEN, - NGHTTP3_SF_VALUE_TYPE_BYTESEQ, - NGHTTP3_SF_VALUE_TYPE_INNER_LIST, -} nghttp3_sf_value_type; - -/* - * nghttp3_sf_value stores Structured Field Values item. For Inner - * List, only type is set to NGHTTP3_SF_VALUE_TYPE_INNER_LIST. - */ -typedef struct nghttp3_sf_value { - uint8_t type; - union { - int b; - int64_t i; - double d; - struct { - const uint8_t *base; - size_t len; - } s; - }; -} nghttp3_sf_value; - -/* - * nghttp3_sf_parse_item parses the input sequence [|begin|, |end|) - * and stores the parsed an Item in |dest|. It returns the number of - * bytes consumed if it succeeds, or -1. This function is declared - * here for unit tests. +/** + * @function + * + * `nghttp3_http_parse_priority` parses priority HTTP header field + * stored in the buffer pointed by |value| of length |len|. If it + * successfully processed header field value, it stores the result + * into |*dest|. This function just overwrites what it sees in the + * header field value and does not initialize any field in |*dest|. + * + * This function returns 0 if it succeeds, or one of the following + * negative error codes: + * + * :macro:`NGHTTP3_ERR_INVALID_ARGUMENT` + * The function could not parse the provided value. */ -nghttp3_ssize nghttp3_sf_parse_item(nghttp3_sf_value *dest, - const uint8_t *begin, const uint8_t *end); +int nghttp3_http_parse_priority(nghttp3_pri *dest, const uint8_t *value, + size_t len); -/* - * nghttp3_sf_parse_inner_list parses the input sequence [|begin|, |end|) - * and stores the parsed an Inner List in |dest|. It returns the number of - * bytes consumed if it succeeds, or -1. This function is declared - * here for unit tests. - */ -nghttp3_ssize nghttp3_sf_parse_inner_list(nghttp3_sf_value *dest, - const uint8_t *begin, - const uint8_t *end); +int nghttp3_pri_eq(const nghttp3_pri *a, const nghttp3_pri *b); #endif /* NGHTTP3_HTTP_H */ diff --git a/deps/ngtcp2/nghttp3/lib/nghttp3_ksl.c b/deps/ngtcp2/nghttp3/lib/nghttp3_ksl.c index adea677abe1f1c..d7420a5d8a1e5d 100644 --- a/deps/ngtcp2/nghttp3/lib/nghttp3_ksl.c +++ b/deps/ngtcp2/nghttp3/lib/nghttp3_ksl.c @@ -36,6 +36,8 @@ static nghttp3_ksl_blk null_blk = {{{NULL, NULL, 0, 0, {0}}}}; +nghttp3_objalloc_def(ksl_blk, nghttp3_ksl_blk, oplent); + static size_t ksl_nodelen(size_t keylen) { return (sizeof(nghttp3_ksl_node) + keylen - sizeof(uint64_t) + 0xfu) & ~(uintptr_t)0xfu; @@ -722,6 +724,24 @@ void nghttp3_ksl_update_key(nghttp3_ksl *ksl, const nghttp3_ksl_key *old_key, } } +size_t nghttp3_ksl_len(nghttp3_ksl *ksl) { return ksl->n; } + +void nghttp3_ksl_clear(nghttp3_ksl *ksl) { + if (!ksl->head) { + return; + } + +#ifdef NOMEMPOOL + ksl_free_blk(ksl, ksl->head); +#endif /* NOMEMPOOL */ + + ksl->front = ksl->back = ksl->head = NULL; + ksl->n = 0; + + nghttp3_objalloc_clear(&ksl->blkalloc); +} + +#ifndef WIN32 static void ksl_print(nghttp3_ksl *ksl, nghttp3_ksl_blk *blk, size_t level) { size_t i; nghttp3_ksl_node *node; @@ -742,23 +762,6 @@ static void ksl_print(nghttp3_ksl *ksl, nghttp3_ksl_blk *blk, size_t level) { } } -size_t nghttp3_ksl_len(nghttp3_ksl *ksl) { return ksl->n; } - -void nghttp3_ksl_clear(nghttp3_ksl *ksl) { - if (!ksl->head) { - return; - } - -#ifdef NOMEMPOOL - ksl_free_blk(ksl, ksl->head); -#endif /* NOMEMPOOL */ - - ksl->front = ksl->back = ksl->head = NULL; - ksl->n = 0; - - nghttp3_objalloc_clear(&ksl->blkalloc); -} - void nghttp3_ksl_print(nghttp3_ksl *ksl) { if (!ksl->head) { return; @@ -766,6 +769,7 @@ void nghttp3_ksl_print(nghttp3_ksl *ksl) { ksl_print(ksl, ksl->head, 0); } +#endif /* !WIN32 */ nghttp3_ksl_it nghttp3_ksl_begin(const nghttp3_ksl *ksl) { nghttp3_ksl_it it; diff --git a/deps/ngtcp2/nghttp3/lib/nghttp3_ksl.h b/deps/ngtcp2/nghttp3/lib/nghttp3_ksl.h index 0bc10e846fe418..d513bdd672c750 100644 --- a/deps/ngtcp2/nghttp3/lib/nghttp3_ksl.h +++ b/deps/ngtcp2/nghttp3/lib/nghttp3_ksl.h @@ -108,7 +108,7 @@ struct nghttp3_ksl_blk { }; }; -nghttp3_objalloc_def(ksl_blk, nghttp3_ksl_blk, oplent); +nghttp3_objalloc_decl(ksl_blk, nghttp3_ksl_blk, oplent); /* * nghttp3_ksl_compar is a function type which returns nonzero if key @@ -265,12 +265,14 @@ void nghttp3_ksl_clear(nghttp3_ksl *ksl); #define nghttp3_ksl_nth_node(KSL, BLK, N) \ ((nghttp3_ksl_node *)(void *)((BLK)->nodes + (KSL)->nodelen * (N))) +#ifndef WIN32 /* * nghttp3_ksl_print prints its internal state in stderr. It assumes * that the key is of type int64_t. This function should be used for * the debugging purpose only. */ void nghttp3_ksl_print(nghttp3_ksl *ksl); +#endif /* !WIN32 */ /* * nghttp3_ksl_it_init initializes |it|. diff --git a/deps/ngtcp2/nghttp3/lib/nghttp3_map.c b/deps/ngtcp2/nghttp3/lib/nghttp3_map.c index fcfc31ae41e8e0..b93fdfd3d488f5 100644 --- a/deps/ngtcp2/nghttp3/lib/nghttp3_map.c +++ b/deps/ngtcp2/nghttp3/lib/nghttp3_map.c @@ -127,6 +127,7 @@ static void map_bucket_set_data(nghttp3_map_bucket *bkt, uint32_t hash, bkt->data = data; } +#ifndef WIN32 void nghttp3_map_print_distance(nghttp3_map *map) { uint32_t i; size_t idx; @@ -146,6 +147,7 @@ void nghttp3_map_print_distance(nghttp3_map *map) { distance(map->tablelen, map->tablelenbits, bkt, idx)); } } +#endif /* !WIN32 */ static int insert(nghttp3_map_bucket *table, uint32_t tablelen, uint32_t tablelenbits, uint32_t hash, diff --git a/deps/ngtcp2/nghttp3/lib/nghttp3_map.h b/deps/ngtcp2/nghttp3/lib/nghttp3_map.h index 79dff0286bc3cc..7683cfeef3f33e 100644 --- a/deps/ngtcp2/nghttp3/lib/nghttp3_map.h +++ b/deps/ngtcp2/nghttp3/lib/nghttp3_map.h @@ -132,6 +132,8 @@ size_t nghttp3_map_size(nghttp3_map *map); int nghttp3_map_each(nghttp3_map *map, int (*func)(void *data, void *ptr), void *ptr); +#ifndef WIN32 void nghttp3_map_print_distance(nghttp3_map *map); +#endif /* !WIN32 */ #endif /* NGHTTP3_MAP_H */ diff --git a/deps/ngtcp2/nghttp3/lib/nghttp3_objalloc.h b/deps/ngtcp2/nghttp3/lib/nghttp3_objalloc.h index da39447a872b02..02dff285f24060 100644 --- a/deps/ngtcp2/nghttp3/lib/nghttp3_objalloc.h +++ b/deps/ngtcp2/nghttp3/lib/nghttp3_objalloc.h @@ -66,15 +66,25 @@ void nghttp3_objalloc_free(nghttp3_objalloc *objalloc); void nghttp3_objalloc_clear(nghttp3_objalloc *objalloc); #ifndef NOMEMPOOL -# define nghttp3_objalloc_def(NAME, TYPE, OPLENTFIELD) \ +# define nghttp3_objalloc_decl(NAME, TYPE, OPLENTFIELD) \ inline static void nghttp3_objalloc_##NAME##_init( \ nghttp3_objalloc *objalloc, size_t nmemb, const nghttp3_mem *mem) { \ nghttp3_objalloc_init( \ objalloc, ((sizeof(TYPE) + 0xfu) & ~(uintptr_t)0xfu) * nmemb, mem); \ } \ \ - inline static TYPE *nghttp3_objalloc_##NAME##_get( \ - nghttp3_objalloc *objalloc) { \ + TYPE *nghttp3_objalloc_##NAME##_get(nghttp3_objalloc *objalloc); \ + \ + TYPE *nghttp3_objalloc_##NAME##_len_get(nghttp3_objalloc *objalloc, \ + size_t len); \ + \ + inline static void nghttp3_objalloc_##NAME##_release( \ + nghttp3_objalloc *objalloc, TYPE *obj) { \ + nghttp3_opl_push(&objalloc->opl, &obj->OPLENTFIELD); \ + } + +# define nghttp3_objalloc_def(NAME, TYPE, OPLENTFIELD) \ + TYPE *nghttp3_objalloc_##NAME##_get(nghttp3_objalloc *objalloc) { \ nghttp3_opl_entry *oplent = nghttp3_opl_pop(&objalloc->opl); \ TYPE *obj; \ int rv; \ @@ -92,8 +102,8 @@ void nghttp3_objalloc_clear(nghttp3_objalloc *objalloc); return nghttp3_struct_of(oplent, TYPE, OPLENTFIELD); \ } \ \ - inline static TYPE *nghttp3_objalloc_##NAME##_len_get( \ - nghttp3_objalloc *objalloc, size_t len) { \ + TYPE *nghttp3_objalloc_##NAME##_len_get(nghttp3_objalloc *objalloc, \ + size_t len) { \ nghttp3_opl_entry *oplent = nghttp3_opl_pop(&objalloc->opl); \ TYPE *obj; \ int rv; \ @@ -108,14 +118,9 @@ void nghttp3_objalloc_clear(nghttp3_objalloc *objalloc); } \ \ return nghttp3_struct_of(oplent, TYPE, OPLENTFIELD); \ - } \ - \ - inline static void nghttp3_objalloc_##NAME##_release( \ - nghttp3_objalloc *objalloc, TYPE *obj) { \ - nghttp3_opl_push(&objalloc->opl, &obj->OPLENTFIELD); \ } #else /* NOMEMPOOL */ -# define nghttp3_objalloc_def(NAME, TYPE, OPLENTFIELD) \ +# define nghttp3_objalloc_decl(NAME, TYPE, OPLENTFIELD) \ inline static void nghttp3_objalloc_##NAME##_init( \ nghttp3_objalloc *objalloc, size_t nmemb, const nghttp3_mem *mem) { \ nghttp3_objalloc_init( \ @@ -136,6 +141,8 @@ void nghttp3_objalloc_clear(nghttp3_objalloc *objalloc); nghttp3_objalloc *objalloc, TYPE *obj) { \ nghttp3_mem_free(objalloc->balloc.mem, obj); \ } + +# define nghttp3_objalloc_def(NAME, TYPE, OPLENTFIELD) #endif /* NOMEMPOOL */ #endif /* NGHTTP3_OBJALLOC_H */ diff --git a/deps/ngtcp2/nghttp3/lib/nghttp3_qpack.c b/deps/ngtcp2/nghttp3/lib/nghttp3_qpack.c index ddb3dd6d84bf8a..428c06a82c6bfb 100644 --- a/deps/ngtcp2/nghttp3/lib/nghttp3_qpack.c +++ b/deps/ngtcp2/nghttp3/lib/nghttp3_qpack.c @@ -32,6 +32,7 @@ #include "nghttp3_str.h" #include "nghttp3_macro.h" #include "nghttp3_debug.h" +#include "nghttp3_unreachable.h" /* NGHTTP3_QPACK_MAX_QPACK_STREAMS is the maximum number of concurrent nghttp3_qpack_stream object to handle a client which never cancel @@ -1099,7 +1100,7 @@ static void qpack_encoder_remove_stream(nghttp3_qpack_encoder *encoder, /* * reserve_buf_internal ensures that |buf| contains at least * |extra_size| of free space. In other words, if this function - * succeeds, nghttp2_buf_left(buf) >= extra_size holds. |min_size| is + * succeeds, nghttp3_buf_left(buf) >= extra_size holds. |min_size| is * the minimum size of buffer. The allocated buffer has at least * |min_size| bytes. * @@ -1281,6 +1282,19 @@ int nghttp3_qpack_encoder_stream_is_blocked(nghttp3_qpack_encoder *encoder, return stream && encoder->krcnt < nghttp3_qpack_stream_get_max_cnt(stream); } +static uint32_t qpack_hash_name(const nghttp3_nv *nv) { + /* 32 bit FNV-1a: http://isthe.com/chongo/tech/comp/fnv/ */ + uint32_t h = 2166136261u; + size_t i; + + for (i = 0; i < nv->namelen; ++i) { + h ^= nv->name[i]; + h += (h << 1) + (h << 4) + (h << 7) + (h << 8) + (h << 24); + } + + return h; +} + /* * qpack_encoder_decide_indexing_mode determines and returns indexing * mode for header field |nv|. |token| is a token of header field @@ -1310,6 +1324,10 @@ qpack_encoder_decide_indexing_mode(nghttp3_qpack_encoder *encoder, case NGHTTP3_QPACK_TOKEN_IF_NONE_MATCH: case NGHTTP3_QPACK_TOKEN_LOCATION: case NGHTTP3_QPACK_TOKEN_SET_COOKIE: + if (nv->flags & NGHTTP3_NV_FLAG_TRY_INDEX) { + break; + } + return NGHTTP3_QPACK_INDEXING_MODE_LITERAL; case NGHTTP3_QPACK_TOKEN_HOST: case NGHTTP3_QPACK_TOKEN_TE: @@ -1317,6 +1335,10 @@ qpack_encoder_decide_indexing_mode(nghttp3_qpack_encoder *encoder, case NGHTTP3_QPACK_TOKEN_PRIORITY: break; default: + if (nv->flags & NGHTTP3_NV_FLAG_TRY_INDEX) { + break; + } + if (token >= 1000) { return NGHTTP3_QPACK_INDEXING_MODE_LITERAL; } @@ -1428,6 +1450,17 @@ int nghttp3_qpack_encoder_encode_nv(nghttp3_qpack_encoder *encoder, token = qpack_lookup_token(nv->name, nv->namelen); static_entry = token != -1 && (size_t)token < nghttp3_arraylen(token_stable); + + indexing_mode = qpack_encoder_decide_indexing_mode(encoder, nv, token); + + if (static_entry) { + sres = nghttp3_qpack_lookup_stable(nv, token, indexing_mode); + if (sres.index != -1 && sres.name_value_match) { + return nghttp3_qpack_encoder_write_static_indexed(encoder, rbuf, + (size_t)sres.index); + } + } + if (static_entry) { hash = token_stable[token].hash; } else { @@ -1444,21 +1477,12 @@ int nghttp3_qpack_encoder_encode_nv(nghttp3_qpack_encoder *encoder, case NGHTTP3_QPACK_TOKEN_PRIORITY: hash = 2498028297u; break; + default: + hash = qpack_hash_name(nv); } } - indexing_mode = qpack_encoder_decide_indexing_mode(encoder, nv, token); - - if (static_entry) { - sres = nghttp3_qpack_lookup_stable(nv, token, indexing_mode); - if (sres.index != -1 && sres.name_value_match) { - return nghttp3_qpack_encoder_write_static_indexed(encoder, rbuf, - (size_t)sres.index); - } - } - - if (hash && - nghttp3_map_size(&encoder->streams) < NGHTTP3_QPACK_MAX_QPACK_STREAMS) { + if (nghttp3_map_size(&encoder->streams) < NGHTTP3_QPACK_MAX_QPACK_STREAMS) { dres = nghttp3_qpack_encoder_lookup_dtable(encoder, nv, token, hash, indexing_mode, encoder->krcnt, allow_blocking); @@ -2552,18 +2576,14 @@ nghttp3_ssize nghttp3_qpack_encoder_read_decoder(nghttp3_qpack_encoder *encoder, (int64_t)encoder->rstate.left); break; default: - /* unreachable */ - assert(0); - break; + nghttp3_unreachable(); } encoder->state = NGHTTP3_QPACK_DS_STATE_OPCODE; nghttp3_qpack_read_state_reset(&encoder->rstate); break; default: - /* unreachable */ - assert(0); - break; + nghttp3_unreachable(); } } @@ -2838,24 +2858,26 @@ nghttp3_ssize nghttp3_qpack_decoder_read_encoder(nghttp3_qpack_decoder *decoder, goto fail; } - if (decoder->opcode == NGHTTP3_QPACK_ES_OPCODE_DUPLICATE) { + switch (decoder->opcode) { + case NGHTTP3_QPACK_ES_OPCODE_DUPLICATE: rv = nghttp3_qpack_decoder_dtable_duplicate_add(decoder); if (rv != 0) { goto fail; } + decoder->state = NGHTTP3_QPACK_ES_STATE_OPCODE; nghttp3_qpack_read_state_reset(&decoder->rstate); - break; - } - if (decoder->opcode == NGHTTP3_QPACK_ES_OPCODE_INSERT_INDEXED) { + break; + case NGHTTP3_QPACK_ES_OPCODE_INSERT_INDEXED: decoder->rstate.prefix = 7; decoder->state = NGHTTP3_QPACK_ES_STATE_CHECK_VALUE_HUFFMAN; + break; + default: + nghttp3_unreachable(); } - /* Unreachable */ - assert(0); break; case NGHTTP3_QPACK_ES_STATE_CHECK_NAME_HUFFMAN: qpack_read_state_check_huffman(&decoder->rstate, *p); @@ -3010,9 +3032,7 @@ nghttp3_ssize nghttp3_qpack_decoder_read_encoder(nghttp3_qpack_decoder *decoder, rv = nghttp3_qpack_decoder_dtable_literal_add(decoder); break; default: - /* Unreachable */ - assert(0); - abort(); + nghttp3_unreachable(); } if (rv != 0) { goto fail; @@ -3045,9 +3065,7 @@ nghttp3_ssize nghttp3_qpack_decoder_read_encoder(nghttp3_qpack_decoder *decoder, rv = nghttp3_qpack_decoder_dtable_literal_add(decoder); break; default: - /* Unreachable */ - assert(0); - abort(); + nghttp3_unreachable(); } if (rv != 0) { goto fail; @@ -3430,8 +3448,7 @@ nghttp3_qpack_decoder_read_request(nghttp3_qpack_decoder *decoder, sctx->state = NGHTTP3_QPACK_RS_STATE_CHECK_VALUE_HUFFMAN; break; default: - /* Unreachable */ - assert(0); + nghttp3_unreachable(); } break; case NGHTTP3_QPACK_RS_STATE_CHECK_NAME_HUFFMAN: @@ -3589,8 +3606,7 @@ nghttp3_qpack_decoder_read_request(nghttp3_qpack_decoder *decoder, nghttp3_qpack_decoder_emit_literal(decoder, sctx, nv); break; default: - /* Unreachable */ - assert(0); + nghttp3_unreachable(); } *pflags |= NGHTTP3_QPACK_DECODE_FLAG_EMIT; @@ -3627,8 +3643,7 @@ nghttp3_qpack_decoder_read_request(nghttp3_qpack_decoder *decoder, nghttp3_qpack_decoder_emit_literal(decoder, sctx, nv); break; default: - /* Unreachable */ - assert(0); + nghttp3_unreachable(); } *pflags |= NGHTTP3_QPACK_DECODE_FLAG_EMIT; diff --git a/deps/ngtcp2/nghttp3/lib/nghttp3_ringbuf.c b/deps/ngtcp2/nghttp3/lib/nghttp3_ringbuf.c index 5e7775f1a5a597..61a7d06cad306f 100644 --- a/deps/ngtcp2/nghttp3/lib/nghttp3_ringbuf.c +++ b/deps/ngtcp2/nghttp3/lib/nghttp3_ringbuf.c @@ -33,7 +33,8 @@ #include "nghttp3_macro.h" -#if defined(_MSC_VER) && !defined(__clang__) && (defined(_M_ARM) || defined(_M_ARM64)) +#if defined(_MSC_VER) && !defined(__clang__) && \ + (defined(_M_ARM) || defined(_M_ARM64)) unsigned int __popcnt(unsigned int x) { unsigned int c = 0; for (; x; ++c) { diff --git a/deps/ngtcp2/nghttp3/lib/nghttp3_stream.c b/deps/ngtcp2/nghttp3/lib/nghttp3_stream.c index e655a7ec01d10b..6188a141dd123b 100644 --- a/deps/ngtcp2/nghttp3/lib/nghttp3_stream.c +++ b/deps/ngtcp2/nghttp3/lib/nghttp3_stream.c @@ -35,6 +35,7 @@ #include "nghttp3_str.h" #include "nghttp3_http.h" #include "nghttp3_vec.h" +#include "nghttp3_unreachable.h" /* NGHTTP3_STREAM_MAX_COPY_THRES is the maximum size of buffer which makes a copy to outq. */ @@ -43,13 +44,14 @@ /* NGHTTP3_MIN_RBLEN is the minimum length of nghttp3_ringbuf */ #define NGHTTP3_MIN_RBLEN 4 +nghttp3_objalloc_def(stream, nghttp3_stream, oplent); + int nghttp3_stream_new(nghttp3_stream **pstream, int64_t stream_id, - uint64_t seq, const nghttp3_stream_callbacks *callbacks, + const nghttp3_stream_callbacks *callbacks, nghttp3_objalloc *out_chunk_objalloc, nghttp3_objalloc *stream_objalloc, const nghttp3_mem *mem) { nghttp3_stream *stream = nghttp3_objalloc_stream_get(stream_objalloc); - nghttp3_node_id nid; if (stream == NULL) { return NGHTTP3_ERR_NOMEM; @@ -60,10 +62,7 @@ int nghttp3_stream_new(nghttp3_stream **pstream, int64_t stream_id, stream->out_chunk_objalloc = out_chunk_objalloc; stream->stream_objalloc = stream_objalloc; - nghttp3_tnode_init( - &stream->node, - nghttp3_node_id_init(&nid, NGHTTP3_NODE_ID_TYPE_STREAM, stream_id), seq, - NGHTTP3_DEFAULT_URGENCY); + nghttp3_tnode_init(&stream->node, stream_id); nghttp3_ringbuf_init(&stream->frq, 0, sizeof(nghttp3_frame_entry), mem); nghttp3_ringbuf_init(&stream->chunks, 0, sizeof(nghttp3_buf), mem); @@ -77,7 +76,7 @@ int nghttp3_stream_new(nghttp3_stream **pstream, int64_t stream_id, stream->tx.offset = 0; stream->rx.http.status_code = -1; stream->rx.http.content_length = -1; - stream->rx.http.pri = NGHTTP3_DEFAULT_URGENCY; + stream->rx.http.pri.urgency = NGHTTP3_DEFAULT_URGENCY; stream->error_code = NGHTTP3_H3_NO_ERROR; if (callbacks) { @@ -146,6 +145,9 @@ static void delete_frq(nghttp3_ringbuf *frq, const nghttp3_mem *mem) { case NGHTTP3_FRAME_HEADERS: nghttp3_frame_headers_free(&frent->fr.headers, mem); break; + case NGHTTP3_FRAME_PRIORITY_UPDATE: + nghttp3_frame_priority_update_free(&frent->fr.priority_update, mem); + break; default: break; } @@ -188,7 +190,7 @@ nghttp3_ssize nghttp3_read_varint(nghttp3_varint_read_state *rvint, if (rvint->left == 0) { assert(rvint->acc == 0); - rvint->left = nghttp3_get_varint_len(src); + rvint->left = nghttp3_get_varintlen(src); if (rvint->left <= srclen) { rvint->acc = nghttp3_get_varint(&nread, src); rvint->left = 0; @@ -248,7 +250,7 @@ int nghttp3_stream_fill_outq(nghttp3_stream *stream) { int data_eof; int rv; - for (; nghttp3_ringbuf_len(frq) && !nghttp3_stream_outq_is_full(stream) && + for (; nghttp3_ringbuf_len(frq) && stream->unsent_bytes < NGHTTP3_MIN_UNSENT_BYTES;) { frent = nghttp3_ringbuf_get(frq, 0); @@ -289,6 +291,8 @@ int nghttp3_stream_fill_outq(nghttp3_stream *stream) { if (rv != 0) { return rv; } + nghttp3_frame_priority_update_free(&frent->fr.priority_update, + stream->mem); break; default: /* TODO Not implemented */ @@ -308,7 +312,7 @@ static void typed_buf_shared_init(nghttp3_typed_buf *tbuf, } int nghttp3_stream_write_stream_type(nghttp3_stream *stream) { - size_t len = nghttp3_put_varint_len((int64_t)stream->type); + size_t len = nghttp3_put_varintlen((int64_t)stream->type); nghttp3_buf *chunk; nghttp3_typed_buf tbuf; int rv; @@ -351,10 +355,18 @@ int nghttp3_stream_write_settings(nghttp3_stream *stream, iv[2].id = NGHTTP3_SETTINGS_ID_QPACK_BLOCKED_STREAMS; iv[2].value = local_settings->qpack_blocked_streams; + if (local_settings->h3_datagram) { + iv[fr.settings.niv].id = NGHTTP3_SETTINGS_ID_H3_DATAGRAM; + iv[fr.settings.niv].value = 1; + + ++fr.settings.niv; + } + if (local_settings->enable_connect_protocol) { + iv[fr.settings.niv].id = NGHTTP3_SETTINGS_ID_ENABLE_CONNECT_PROTOCOL; + iv[fr.settings.niv].value = 1; + ++fr.settings.niv; - iv[3].id = NGHTTP3_SETTINGS_ID_ENABLE_CONNECT_PROTOCOL; - iv[3].value = 1; } len = nghttp3_frame_write_settings_len(&fr.settings.hd.length, &fr.settings); @@ -453,8 +465,8 @@ int nghttp3_stream_write_header_block(nghttp3_stream *stream, nghttp3_buf_wrap_init(&pbuf, raw_pbuf, sizeof(raw_pbuf)); - rv = nghttp3_qpack_encoder_encode(qenc, &pbuf, rbuf, ebuf, - stream->node.nid.id, nva, nvlen); + rv = nghttp3_qpack_encoder_encode(qenc, &pbuf, rbuf, ebuf, stream->node.id, + nva, nvlen); if (rv != 0) { goto fail; } @@ -574,8 +586,8 @@ int nghttp3_stream_write_data(nghttp3_stream *stream, int *peof, *peof = 0; - sveccnt = read_data(conn, stream->node.nid.id, vec, nghttp3_arraylen(vec), - &flags, conn->user_data, stream->user_data); + sveccnt = read_data(conn, stream->node.id, vec, nghttp3_arraylen(vec), &flags, + conn->user_data, stream->user_data); if (sveccnt < 0) { if (sveccnt == NGHTTP3_ERR_WOULDBLOCK) { stream->flags |= NGHTTP3_STREAM_FLAG_READ_DATA_BLOCKED; @@ -691,11 +703,6 @@ int nghttp3_stream_write_qpack_decoder_stream(nghttp3_stream *stream) { return nghttp3_stream_outq_add(stream, &tbuf); } -int nghttp3_stream_outq_is_full(nghttp3_stream *stream) { - /* TODO Verify that the limit is reasonable. */ - return nghttp3_ringbuf_len(&stream->outq) >= 1024; -} - int nghttp3_stream_outq_add(nghttp3_stream *stream, const nghttp3_typed_buf *tbuf) { nghttp3_ringbuf *outq = &stream->outq; @@ -809,11 +816,11 @@ int nghttp3_stream_require_schedule(nghttp3_stream *stream) { !(stream->flags & NGHTTP3_STREAM_FLAG_READ_DATA_BLOCKED)); } -nghttp3_ssize nghttp3_stream_writev(nghttp3_stream *stream, int *pfin, - nghttp3_vec *vec, size_t veccnt) { +size_t nghttp3_stream_writev(nghttp3_stream *stream, int *pfin, + nghttp3_vec *vec, size_t veccnt) { nghttp3_ringbuf *outq = &stream->outq; size_t len = nghttp3_ringbuf_len(outq); - size_t i; + size_t i = stream->outq_idx; uint64_t offset = stream->outq_offset; size_t buflen; nghttp3_vec *vbegin = vec, *vend = vec + veccnt; @@ -821,25 +828,27 @@ nghttp3_ssize nghttp3_stream_writev(nghttp3_stream *stream, int *pfin, assert(veccnt > 0); - for (i = stream->outq_idx; i < len; ++i) { + if (i < len) { tbuf = nghttp3_ringbuf_get(outq, i); buflen = nghttp3_buf_len(&tbuf->buf); - if (offset >= buflen) { - offset -= buflen; - continue; + + if (offset < buflen) { + vec->base = tbuf->buf.pos + offset; + vec->len = (size_t)(buflen - offset); + ++vec; + } else { + /* This is the only case that satisfies offset >= buflen */ + assert(0 == offset); + assert(0 == buflen); } - vec->base = tbuf->buf.pos + offset; - vec->len = (size_t)(buflen - offset); - ++vec; ++i; - break; - } - for (; i < len && vec != vend; ++i, ++vec) { - tbuf = nghttp3_ringbuf_get(outq, i); - vec->base = tbuf->buf.pos; - vec->len = nghttp3_buf_len(&tbuf->buf); + for (; i < len && vec != vend; ++i, ++vec) { + tbuf = nghttp3_ringbuf_get(outq, i); + vec->base = tbuf->buf.pos; + vec->len = nghttp3_buf_len(&tbuf->buf); + } } /* TODO Rework this if we have finished implementing HTTP @@ -847,10 +856,10 @@ nghttp3_ssize nghttp3_stream_writev(nghttp3_stream *stream, int *pfin, *pfin = nghttp3_ringbuf_len(&stream->frq) == 0 && i == len && (stream->flags & NGHTTP3_STREAM_FLAG_WRITE_END_STREAM); - return vec - vbegin; + return (size_t)(vec - vbegin); } -int nghttp3_stream_add_outq_offset(nghttp3_stream *stream, size_t n) { +void nghttp3_stream_add_outq_offset(nghttp3_stream *stream, size_t n) { nghttp3_ringbuf *outq = &stream->outq; size_t i; size_t len = nghttp3_ringbuf_len(outq); @@ -874,8 +883,6 @@ int nghttp3_stream_add_outq_offset(nghttp3_stream *stream, size_t n) { stream->unsent_bytes -= n; stream->outq_idx = i; stream->outq_offset = offset; - - return 0; } int nghttp3_stream_outq_write_done(nghttp3_stream *stream) { @@ -885,8 +892,8 @@ int nghttp3_stream_outq_write_done(nghttp3_stream *stream) { return len == 0 || stream->outq_idx >= len; } -static int stream_pop_outq_entry(nghttp3_stream *stream, - nghttp3_typed_buf *tbuf) { +static void stream_pop_outq_entry(nghttp3_stream *stream, + nghttp3_typed_buf *tbuf) { nghttp3_ringbuf *chunks = &stream->chunks; nghttp3_buf *chunk; @@ -915,13 +922,10 @@ static int stream_pop_outq_entry(nghttp3_stream *stream, } break; default: - assert(0); - abort(); + nghttp3_unreachable(); }; nghttp3_ringbuf_pop_front(&stream->outq); - - return 0; } int nghttp3_stream_add_ack_offset(nghttp3_stream *stream, uint64_t n) { @@ -940,7 +944,7 @@ int nghttp3_stream_add_ack_offset(nghttp3_stream *stream, uint64_t n) { if (tbuf->type == NGHTTP3_BUF_TYPE_ALIEN) { nack = nghttp3_min(offset, (uint64_t)buflen) - stream->ack_done; if (stream->callbacks.acked_data) { - rv = stream->callbacks.acked_data(stream, stream->node.nid.id, nack, + rv = stream->callbacks.acked_data(stream, stream->node.id, nack, stream->user_data); if (rv != 0) { return NGHTTP3_ERR_CALLBACK_FAILURE; @@ -950,10 +954,7 @@ int nghttp3_stream_add_ack_offset(nghttp3_stream *stream, uint64_t n) { } if (offset >= buflen) { - rv = stream_pop_outq_entry(stream, tbuf); - if (rv != 0) { - return rv; - } + stream_pop_outq_entry(stream, tbuf); offset -= buflen; ++npopped; @@ -1221,8 +1222,7 @@ int nghttp3_stream_transit_rx_http_state(nghttp3_stream *stream, case NGHTTP3_HTTP_STATE_RESP_END: return NGHTTP3_ERR_MALFORMED_HTTP_MESSAGING; default: - assert(0); - abort(); + nghttp3_unreachable(); } } diff --git a/deps/ngtcp2/nghttp3/lib/nghttp3_stream.h b/deps/ngtcp2/nghttp3/lib/nghttp3_stream.h index 06292738a17e93..03a57697b232b3 100644 --- a/deps/ngtcp2/nghttp3/lib/nghttp3_stream.h +++ b/deps/ngtcp2/nghttp3/lib/nghttp3_stream.h @@ -50,13 +50,13 @@ #define NGHTTP3_STREAM_MIN_WRITELEN 800 /* nghttp3_stream_type is unidirectional stream type. */ -typedef enum nghttp3_stream_type { - NGHTTP3_STREAM_TYPE_CONTROL = 0x00, - NGHTTP3_STREAM_TYPE_PUSH = 0x01, - NGHTTP3_STREAM_TYPE_QPACK_ENCODER = 0x02, - NGHTTP3_STREAM_TYPE_QPACK_DECODER = 0x03, - NGHTTP3_STREAM_TYPE_UNKNOWN = UINT64_MAX, -} nghttp3_stream_type; +typedef uint64_t nghttp3_stream_type; + +#define NGHTTP3_STREAM_TYPE_CONTROL 0x00 +#define NGHTTP3_STREAM_TYPE_PUSH 0x01 +#define NGHTTP3_STREAM_TYPE_QPACK_ENCODER 0x02 +#define NGHTTP3_STREAM_TYPE_QPACK_DECODER 0x03 +#define NGHTTP3_STREAM_TYPE_UNKNOWN UINT64_MAX typedef enum nghttp3_ctrl_stream_state { NGHTTP3_CTRL_STREAM_STATE_FRAME_TYPE, @@ -195,9 +195,8 @@ typedef struct nghttp3_http_state { /* recv_content_length is the number of body bytes received so far. */ int64_t recv_content_length; + nghttp3_pri pri; uint32_t flags; - /* pri is a stream priority produced by nghttp3_pri_to_uint8. */ - uint8_t pri; } nghttp3_http_state; struct nghttp3_stream { @@ -257,7 +256,7 @@ struct nghttp3_stream { }; }; -nghttp3_objalloc_def(stream, nghttp3_stream, oplent); +nghttp3_objalloc_decl(stream, nghttp3_stream, oplent); typedef struct nghttp3_frame_entry { nghttp3_frame fr; @@ -272,7 +271,7 @@ typedef struct nghttp3_frame_entry { } nghttp3_frame_entry; int nghttp3_stream_new(nghttp3_stream **pstream, int64_t stream_id, - uint64_t seq, const nghttp3_stream_callbacks *callbacks, + const nghttp3_stream_callbacks *callbacks, nghttp3_objalloc *out_chunk_objalloc, nghttp3_objalloc *stream_objalloc, const nghttp3_mem *mem); @@ -293,13 +292,11 @@ int nghttp3_stream_fill_outq(nghttp3_stream *stream); int nghttp3_stream_write_stream_type(nghttp3_stream *stream); -nghttp3_ssize nghttp3_stream_writev(nghttp3_stream *stream, int *pfin, - nghttp3_vec *vec, size_t veccnt); +size_t nghttp3_stream_writev(nghttp3_stream *stream, int *pfin, + nghttp3_vec *vec, size_t veccnt); int nghttp3_stream_write_qpack_decoder_stream(nghttp3_stream *stream); -int nghttp3_stream_outq_is_full(nghttp3_stream *stream); - int nghttp3_stream_outq_add(nghttp3_stream *stream, const nghttp3_typed_buf *tbuf); @@ -331,7 +328,7 @@ nghttp3_buf *nghttp3_stream_get_chunk(nghttp3_stream *stream); int nghttp3_stream_is_blocked(nghttp3_stream *stream); -int nghttp3_stream_add_outq_offset(nghttp3_stream *stream, size_t n); +void nghttp3_stream_add_outq_offset(nghttp3_stream *stream, size_t n); /* * nghttp3_stream_outq_write_done returns nonzero if all contents in diff --git a/deps/ngtcp2/nghttp3/lib/nghttp3_tnode.c b/deps/ngtcp2/nghttp3/lib/nghttp3_tnode.c index 36e738c3469aca..d9c5e598699512 100644 --- a/deps/ngtcp2/nghttp3/lib/nghttp3_tnode.c +++ b/deps/ngtcp2/nghttp3/lib/nghttp3_tnode.c @@ -31,26 +31,12 @@ #include "nghttp3_conn.h" #include "nghttp3_conv.h" -nghttp3_node_id *nghttp3_node_id_init(nghttp3_node_id *nid, - nghttp3_node_id_type type, int64_t id) { - nid->type = type; - nid->id = id; - return nid; -} - -int nghttp3_node_id_eq(const nghttp3_node_id *a, const nghttp3_node_id *b) { - return a->type == b->type && a->id == b->id; -} - -void nghttp3_tnode_init(nghttp3_tnode *tnode, const nghttp3_node_id *nid, - uint64_t seq, uint8_t pri) { - assert(nghttp3_pri_uint8_urgency(pri) < NGHTTP3_URGENCY_LEVELS); - +void nghttp3_tnode_init(nghttp3_tnode *tnode, int64_t id) { tnode->pe.index = NGHTTP3_PQ_BAD_INDEX; - tnode->nid = *nid; - tnode->seq = seq; + tnode->id = id; tnode->cycle = 0; - tnode->pri = pri; + tnode->pri.urgency = NGHTTP3_DEFAULT_URGENCY; + tnode->pri.inc = 0; } void nghttp3_tnode_free(nghttp3_tnode *tnode) { (void)tnode; } @@ -86,12 +72,11 @@ int nghttp3_tnode_schedule(nghttp3_tnode *tnode, nghttp3_pq *pq, uint64_t penalty = nwrite / NGHTTP3_STREAM_MIN_WRITELEN; if (tnode->pe.index == NGHTTP3_PQ_BAD_INDEX) { - tnode->cycle = pq_get_first_cycle(pq) + - ((nwrite == 0 || !nghttp3_pri_uint8_inc(tnode->pri)) - ? 0 - : nghttp3_max(1, penalty)); + tnode->cycle = + pq_get_first_cycle(pq) + + ((nwrite == 0 || !tnode->pri.inc) ? 0 : nghttp3_max(1, penalty)); } else if (nwrite > 0) { - if (!nghttp3_pri_uint8_inc(tnode->pri) || nghttp3_pq_size(pq) == 1) { + if (!tnode->pri.inc || nghttp3_pq_size(pq) == 1) { return 0; } diff --git a/deps/ngtcp2/nghttp3/lib/nghttp3_tnode.h b/deps/ngtcp2/nghttp3/lib/nghttp3_tnode.h index f71dcf5ee31ad6..1abc1e62519381 100644 --- a/deps/ngtcp2/nghttp3/lib/nghttp3_tnode.h +++ b/deps/ngtcp2/nghttp3/lib/nghttp3_tnode.h @@ -35,33 +35,16 @@ #define NGHTTP3_TNODE_MAX_CYCLE_GAP (1llu << 24) -typedef enum nghttp3_node_id_type { - NGHTTP3_NODE_ID_TYPE_STREAM = 0x00, - NGHTTP3_NODE_ID_TYPE_PUSH = 0x01, -} nghttp3_node_id_type; - -typedef struct nghttp3_node_id { - nghttp3_node_id_type type; - int64_t id; -} nghttp3_node_id; - -nghttp3_node_id *nghttp3_node_id_init(nghttp3_node_id *nid, - nghttp3_node_id_type type, int64_t id); - -int nghttp3_node_id_eq(const nghttp3_node_id *a, const nghttp3_node_id *b); - typedef struct nghttp3_tnode { nghttp3_pq_entry pe; size_t num_children; - nghttp3_node_id nid; - uint64_t seq; + int64_t id; uint64_t cycle; /* pri is a stream priority produced by nghttp3_pri_to_uint8. */ - uint8_t pri; + nghttp3_pri pri; } nghttp3_tnode; -void nghttp3_tnode_init(nghttp3_tnode *tnode, const nghttp3_node_id *nid, - uint64_t seq, uint8_t pri); +void nghttp3_tnode_init(nghttp3_tnode *tnode, int64_t id); void nghttp3_tnode_free(nghttp3_tnode *tnode); diff --git a/deps/ngtcp2/nghttp3/lib/nghttp3_unreachable.c b/deps/ngtcp2/nghttp3/lib/nghttp3_unreachable.c new file mode 100644 index 00000000000000..6fea89b802b12d --- /dev/null +++ b/deps/ngtcp2/nghttp3/lib/nghttp3_unreachable.c @@ -0,0 +1,72 @@ +/* + * nghttp3 + * + * Copyright (c) 2022 nghttp3 contributors + * Copyright (c) 2022 ngtcp2 contributors + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#include "nghttp3_unreachable.h" + +#include +#include +#ifdef HAVE_UNISTD_H +# include +#endif /* HAVE_UNISTD_H */ +#include +#ifdef WIN32 +# include +#endif /* WIN32 */ + +void nghttp3_unreachable_fail(const char *file, int line, const char *func) { + char *buf; + size_t buflen; + int rv; + +#define NGHTTP3_UNREACHABLE_TEMPLATE "%s:%d %s: Unreachable.\n" + + rv = snprintf(NULL, 0, NGHTTP3_UNREACHABLE_TEMPLATE, file, line, func); + if (rv < 0) { + abort(); + } + + /* here we explicitly use system malloc */ + buflen = (size_t)rv + 1; + buf = malloc(buflen); + if (buf == NULL) { + abort(); + } + + rv = snprintf(buf, buflen, NGHTTP3_UNREACHABLE_TEMPLATE, file, line, func); + if (rv < 0) { + abort(); + } + +#ifndef WIN32 + while (write(STDERR_FILENO, buf, (size_t)rv) == -1 && errno == EINTR) + ; +#else /* WIN32 */ + _write(_fileno(stderr), buf, (unsigned int)rv); +#endif /* WIN32 */ + + free(buf); + + abort(); +} diff --git a/deps/ngtcp2/nghttp3/lib/nghttp3_unreachable.h b/deps/ngtcp2/nghttp3/lib/nghttp3_unreachable.h new file mode 100644 index 00000000000000..6360f52d3aa857 --- /dev/null +++ b/deps/ngtcp2/nghttp3/lib/nghttp3_unreachable.h @@ -0,0 +1,53 @@ +/* + * nghttp3 + * + * Copyright (c) 2022 nghttp3 contributors + * Copyright (c) 2022 ngtcp2 contributors + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef NGHTTP3_UNREACHABLE_H +#define NGHTTP3_UNREACHABLE_H + +#ifdef HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include + +#ifdef __FILE_NAME__ +# define NGHTTP3_FILE_NAME __FILE_NAME__ +#else /* !__FILE_NAME__ */ +# define NGHTTP3_FILE_NAME "(file)" +#endif /* !__FILE_NAME__ */ + +#define nghttp3_unreachable() \ + nghttp3_unreachable_fail(NGHTTP3_FILE_NAME, __LINE__, __func__) + +#ifdef _MSC_VER +__declspec(noreturn) +#endif /* _MSC_VER */ + void nghttp3_unreachable_fail(const char *file, int line, const char *func) +#ifndef _MSC_VER + __attribute__((noreturn)) +#endif /* !_MSC_VER */ + ; + +#endif /* NGHTTP3_UNREACHABLE_H */ diff --git a/deps/ngtcp2/nghttp3/lib/sfparse.c b/deps/ngtcp2/nghttp3/lib/sfparse.c new file mode 100644 index 00000000000000..efa2850c9d661d --- /dev/null +++ b/deps/ngtcp2/nghttp3/lib/sfparse.c @@ -0,0 +1,1146 @@ +/* + * sfparse + * + * Copyright (c) 2023 sfparse contributors + * Copyright (c) 2019 nghttp3 contributors + * Copyright (c) 2015 nghttp2 contributors + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#include "sfparse.h" + +#include +#include +#include + +#define SF_STATE_DICT 0x08u +#define SF_STATE_LIST 0x10u +#define SF_STATE_ITEM 0x18u + +#define SF_STATE_INNER_LIST 0x04u + +#define SF_STATE_BEFORE 0x00u +#define SF_STATE_BEFORE_PARAMS 0x01u +#define SF_STATE_PARAMS 0x02u +#define SF_STATE_AFTER 0x03u + +#define SF_STATE_OP_MASK 0x03u + +#define SF_SET_STATE_AFTER(NAME) (SF_STATE_##NAME | SF_STATE_AFTER) +#define SF_SET_STATE_BEFORE_PARAMS(NAME) \ + (SF_STATE_##NAME | SF_STATE_BEFORE_PARAMS) +#define SF_SET_STATE_INNER_LIST_BEFORE(NAME) \ + (SF_STATE_##NAME | SF_STATE_INNER_LIST | SF_STATE_BEFORE) + +#define SF_STATE_DICT_AFTER SF_SET_STATE_AFTER(DICT) +#define SF_STATE_DICT_BEFORE_PARAMS SF_SET_STATE_BEFORE_PARAMS(DICT) +#define SF_STATE_DICT_INNER_LIST_BEFORE SF_SET_STATE_INNER_LIST_BEFORE(DICT) + +#define SF_STATE_LIST_AFTER SF_SET_STATE_AFTER(LIST) +#define SF_STATE_LIST_BEFORE_PARAMS SF_SET_STATE_BEFORE_PARAMS(LIST) +#define SF_STATE_LIST_INNER_LIST_BEFORE SF_SET_STATE_INNER_LIST_BEFORE(LIST) + +#define SF_STATE_ITEM_AFTER SF_SET_STATE_AFTER(ITEM) +#define SF_STATE_ITEM_BEFORE_PARAMS SF_SET_STATE_BEFORE_PARAMS(ITEM) +#define SF_STATE_ITEM_INNER_LIST_BEFORE SF_SET_STATE_INNER_LIST_BEFORE(ITEM) + +#define SF_STATE_INITIAL 0x00u + +#define DIGIT_CASES \ + case '0': \ + case '1': \ + case '2': \ + case '3': \ + case '4': \ + case '5': \ + case '6': \ + case '7': \ + case '8': \ + case '9' + +#define LCALPHA_CASES \ + case 'a': \ + case 'b': \ + case 'c': \ + case 'd': \ + case 'e': \ + case 'f': \ + case 'g': \ + case 'h': \ + case 'i': \ + case 'j': \ + case 'k': \ + case 'l': \ + case 'm': \ + case 'n': \ + case 'o': \ + case 'p': \ + case 'q': \ + case 'r': \ + case 's': \ + case 't': \ + case 'u': \ + case 'v': \ + case 'w': \ + case 'x': \ + case 'y': \ + case 'z' + +#define UCALPHA_CASES \ + case 'A': \ + case 'B': \ + case 'C': \ + case 'D': \ + case 'E': \ + case 'F': \ + case 'G': \ + case 'H': \ + case 'I': \ + case 'J': \ + case 'K': \ + case 'L': \ + case 'M': \ + case 'N': \ + case 'O': \ + case 'P': \ + case 'Q': \ + case 'R': \ + case 'S': \ + case 'T': \ + case 'U': \ + case 'V': \ + case 'W': \ + case 'X': \ + case 'Y': \ + case 'Z' + +#define ALPHA_CASES \ + UCALPHA_CASES: \ + LCALPHA_CASES + +#define X20_21_CASES \ + case ' ': \ + case '!' + +#define X23_5B_CASES \ + case '#': \ + case '$': \ + case '%': \ + case '&': \ + case '\'': \ + case '(': \ + case ')': \ + case '*': \ + case '+': \ + case ',': \ + case '-': \ + case '.': \ + case '/': \ + DIGIT_CASES: \ + case ':': \ + case ';': \ + case '<': \ + case '=': \ + case '>': \ + case '?': \ + case '@': \ + UCALPHA_CASES: \ + case '[' + +#define X5D_7E_CASES \ + case ']': \ + case '^': \ + case '_': \ + case '`': \ + LCALPHA_CASES: \ + case '{': \ + case '|': \ + case '}': \ + case '~' + +static int is_ws(uint8_t c) { + switch (c) { + case ' ': + case '\t': + return 1; + default: + return 0; + } +} + +static int parser_eof(sf_parser *sfp) { return sfp->pos == sfp->end; } + +static void parser_discard_ows(sf_parser *sfp) { + for (; !parser_eof(sfp) && is_ws(*sfp->pos); ++sfp->pos) + ; +} + +static void parser_discard_sp(sf_parser *sfp) { + for (; !parser_eof(sfp) && *sfp->pos == ' '; ++sfp->pos) + ; +} + +static void parser_set_op_state(sf_parser *sfp, uint32_t op) { + sfp->state &= ~SF_STATE_OP_MASK; + sfp->state |= op; +} + +static void parser_unset_inner_list_state(sf_parser *sfp) { + sfp->state &= ~SF_STATE_INNER_LIST; +} + +static int parser_key(sf_parser *sfp, sf_vec *dest) { + const uint8_t *base; + + switch (*sfp->pos) { + case '*': + LCALPHA_CASES: + break; + default: + return SF_ERR_PARSE_ERROR; + } + + base = sfp->pos++; + + for (; !parser_eof(sfp); ++sfp->pos) { + switch (*sfp->pos) { + case '_': + case '-': + case '.': + case '*': + DIGIT_CASES: + LCALPHA_CASES: + continue; + } + + break; + } + + if (dest) { + dest->base = (uint8_t *)base; + dest->len = (size_t)(sfp->pos - dest->base); + } + + return 0; +} + +static int parser_number(sf_parser *sfp, sf_value *dest) { + int sign = 1; + int64_t value = 0; + size_t len = 0; + size_t fpos = 0; + + if (*sfp->pos == '-') { + ++sfp->pos; + if (parser_eof(sfp)) { + return SF_ERR_PARSE_ERROR; + } + + sign = -1; + } + + assert(!parser_eof(sfp)); + + for (; !parser_eof(sfp); ++sfp->pos) { + switch (*sfp->pos) { + DIGIT_CASES: + if (++len > 15) { + return SF_ERR_PARSE_ERROR; + } + + value *= 10; + value += *sfp->pos - '0'; + + continue; + } + + break; + } + + if (len == 0) { + return SF_ERR_PARSE_ERROR; + } + + if (parser_eof(sfp) || *sfp->pos != '.') { + if (dest) { + dest->type = SF_TYPE_INTEGER; + dest->flags = SF_VALUE_FLAG_NONE; + dest->integer = value * sign; + } + + return 0; + } + + /* decimal */ + + if (len > 12) { + return SF_ERR_PARSE_ERROR; + } + + fpos = len; + + ++sfp->pos; + + for (; !parser_eof(sfp); ++sfp->pos) { + switch (*sfp->pos) { + DIGIT_CASES: + if (++len > 15) { + return SF_ERR_PARSE_ERROR; + } + + value *= 10; + value += *sfp->pos - '0'; + + continue; + } + + break; + } + + if (fpos == len || len - fpos > 3) { + return SF_ERR_PARSE_ERROR; + } + + if (dest) { + dest->type = SF_TYPE_DECIMAL; + dest->flags = SF_VALUE_FLAG_NONE; + dest->decimal.numer = value * sign; + + switch (len - fpos) { + case 1: + dest->decimal.denom = 10; + + break; + case 2: + dest->decimal.denom = 100; + + break; + case 3: + dest->decimal.denom = 1000; + + break; + } + } + + return 0; +} + +static int parser_date(sf_parser *sfp, sf_value *dest) { + int rv; + sf_value val; + + /* The first byte has already been validated by the caller. */ + assert('@' == *sfp->pos); + + ++sfp->pos; + + if (parser_eof(sfp)) { + return SF_ERR_PARSE_ERROR; + } + + rv = parser_number(sfp, &val); + if (rv != 0) { + return rv; + } + + if (val.type != SF_TYPE_INTEGER) { + return SF_ERR_PARSE_ERROR; + } + + if (dest) { + *dest = val; + dest->type = SF_TYPE_DATE; + } + + return 0; +} + +static int parser_string(sf_parser *sfp, sf_value *dest) { + const uint8_t *base; + uint32_t flags = SF_VALUE_FLAG_NONE; + + /* The first byte has already been validated by the caller. */ + assert('"' == *sfp->pos); + + base = ++sfp->pos; + + for (; !parser_eof(sfp); ++sfp->pos) { + switch (*sfp->pos) { + X20_21_CASES: + X23_5B_CASES: + X5D_7E_CASES: + break; + case '\\': + ++sfp->pos; + if (parser_eof(sfp)) { + return SF_ERR_PARSE_ERROR; + } + + switch (*sfp->pos) { + case '"': + case '\\': + flags = SF_VALUE_FLAG_ESCAPED_STRING; + + break; + default: + return SF_ERR_PARSE_ERROR; + } + + break; + case '"': + if (dest) { + dest->type = SF_TYPE_STRING; + dest->flags = flags; + dest->vec.len = (size_t)(sfp->pos - base); + dest->vec.base = dest->vec.len == 0 ? NULL : (uint8_t *)base; + } + + ++sfp->pos; + + return 0; + default: + return SF_ERR_PARSE_ERROR; + } + } + + return SF_ERR_PARSE_ERROR; +} + +static int parser_token(sf_parser *sfp, sf_value *dest) { + const uint8_t *base; + + /* The first byte has already been validated by the caller. */ + base = sfp->pos++; + + for (; !parser_eof(sfp); ++sfp->pos) { + switch (*sfp->pos) { + case '!': + case '#': + case '$': + case '%': + case '&': + case '\'': + case '*': + case '+': + case '-': + case '.': + case '^': + case '_': + case '`': + case '|': + case '~': + case ':': + case '/': + DIGIT_CASES: + ALPHA_CASES: + continue; + } + + break; + } + + if (dest) { + dest->type = SF_TYPE_TOKEN; + dest->flags = SF_VALUE_FLAG_NONE; + dest->vec.base = (uint8_t *)base; + dest->vec.len = (size_t)(sfp->pos - base); + } + + return 0; +} + +static int parser_byteseq(sf_parser *sfp, sf_value *dest) { + const uint8_t *base; + + /* The first byte has already been validated by the caller. */ + assert(':' == *sfp->pos); + + base = ++sfp->pos; + + for (; !parser_eof(sfp); ++sfp->pos) { + switch (*sfp->pos) { + case '+': + case '/': + DIGIT_CASES: + ALPHA_CASES: + continue; + case '=': + switch ((sfp->pos - base) & 0x3) { + case 0: + case 1: + return SF_ERR_PARSE_ERROR; + case 2: + switch (*(sfp->pos - 1)) { + case 'A': + case 'Q': + case 'g': + case 'w': + break; + default: + return SF_ERR_PARSE_ERROR; + } + + ++sfp->pos; + + if (parser_eof(sfp) || *sfp->pos != '=') { + return SF_ERR_PARSE_ERROR; + } + + break; + case 3: + switch (*(sfp->pos - 1)) { + case 'A': + case 'E': + case 'I': + case 'M': + case 'Q': + case 'U': + case 'Y': + case 'c': + case 'g': + case 'k': + case 'o': + case 's': + case 'w': + case '0': + case '4': + case '8': + break; + default: + return SF_ERR_PARSE_ERROR; + } + + break; + } + + ++sfp->pos; + + if (parser_eof(sfp) || *sfp->pos != ':') { + return SF_ERR_PARSE_ERROR; + } + + goto fin; + case ':': + if ((sfp->pos - base) & 0x3) { + return SF_ERR_PARSE_ERROR; + } + + goto fin; + default: + return SF_ERR_PARSE_ERROR; + } + } + + return SF_ERR_PARSE_ERROR; + +fin: + if (dest) { + dest->type = SF_TYPE_BYTESEQ; + dest->flags = SF_VALUE_FLAG_NONE; + dest->vec.len = (size_t)(sfp->pos - base); + dest->vec.base = dest->vec.len == 0 ? NULL : (uint8_t *)base; + } + + ++sfp->pos; + + return 0; +} + +static int parser_boolean(sf_parser *sfp, sf_value *dest) { + int b; + + /* The first byte has already been validated by the caller. */ + assert('?' == *sfp->pos); + + ++sfp->pos; + + if (parser_eof(sfp)) { + return SF_ERR_PARSE_ERROR; + } + + switch (*sfp->pos) { + case '0': + b = 0; + + break; + case '1': + b = 1; + + break; + default: + return SF_ERR_PARSE_ERROR; + } + + ++sfp->pos; + + if (dest) { + dest->type = SF_TYPE_BOOLEAN; + dest->flags = SF_VALUE_FLAG_NONE; + dest->boolean = b; + } + + return 0; +} + +static int parser_bare_item(sf_parser *sfp, sf_value *dest) { + switch (*sfp->pos) { + case '"': + return parser_string(sfp, dest); + case '-': + DIGIT_CASES: + return parser_number(sfp, dest); + case '@': + return parser_date(sfp, dest); + case ':': + return parser_byteseq(sfp, dest); + case '?': + return parser_boolean(sfp, dest); + case '*': + ALPHA_CASES: + return parser_token(sfp, dest); + default: + return SF_ERR_PARSE_ERROR; + } +} + +static int parser_skip_inner_list(sf_parser *sfp); + +int sf_parser_param(sf_parser *sfp, sf_vec *dest_key, sf_value *dest_value) { + int rv; + + switch (sfp->state & SF_STATE_OP_MASK) { + case SF_STATE_BEFORE: + rv = parser_skip_inner_list(sfp); + if (rv != 0) { + return rv; + } + + /* fall through */ + case SF_STATE_BEFORE_PARAMS: + parser_set_op_state(sfp, SF_STATE_PARAMS); + + break; + case SF_STATE_PARAMS: + break; + default: + assert(0); + abort(); + } + + if (parser_eof(sfp) || *sfp->pos != ';') { + parser_set_op_state(sfp, SF_STATE_AFTER); + + return SF_ERR_EOF; + } + + ++sfp->pos; + + parser_discard_sp(sfp); + if (parser_eof(sfp)) { + return SF_ERR_PARSE_ERROR; + } + + rv = parser_key(sfp, dest_key); + if (rv != 0) { + return rv; + } + + if (parser_eof(sfp) || *sfp->pos != '=') { + if (dest_value) { + dest_value->type = SF_TYPE_BOOLEAN; + dest_value->flags = SF_VALUE_FLAG_NONE; + dest_value->boolean = 1; + } + + return 0; + } + + ++sfp->pos; + + if (parser_eof(sfp)) { + return SF_ERR_PARSE_ERROR; + } + + return parser_bare_item(sfp, dest_value); +} + +static int parser_skip_params(sf_parser *sfp) { + int rv; + + for (;;) { + rv = sf_parser_param(sfp, NULL, NULL); + switch (rv) { + case 0: + break; + case SF_ERR_EOF: + return 0; + case SF_ERR_PARSE_ERROR: + return rv; + default: + assert(0); + abort(); + } + } +} + +int sf_parser_inner_list(sf_parser *sfp, sf_value *dest) { + int rv; + + switch (sfp->state & SF_STATE_OP_MASK) { + case SF_STATE_BEFORE: + parser_discard_sp(sfp); + if (parser_eof(sfp)) { + return SF_ERR_PARSE_ERROR; + } + + break; + case SF_STATE_BEFORE_PARAMS: + rv = parser_skip_params(sfp); + if (rv != 0) { + return rv; + } + + /* Technically, we are entering SF_STATE_AFTER, but we will set + another state without reading the state. */ + /* parser_set_op_state(sfp, SF_STATE_AFTER); */ + + /* fall through */ + case SF_STATE_AFTER: + if (parser_eof(sfp)) { + return SF_ERR_PARSE_ERROR; + } + + switch (*sfp->pos) { + case ' ': + parser_discard_sp(sfp); + if (parser_eof(sfp)) { + return SF_ERR_PARSE_ERROR; + } + + break; + case ')': + break; + default: + return SF_ERR_PARSE_ERROR; + } + + break; + default: + assert(0); + abort(); + } + + if (*sfp->pos == ')') { + ++sfp->pos; + + parser_unset_inner_list_state(sfp); + parser_set_op_state(sfp, SF_STATE_BEFORE_PARAMS); + + return SF_ERR_EOF; + } + + rv = parser_bare_item(sfp, dest); + if (rv != 0) { + return rv; + } + + parser_set_op_state(sfp, SF_STATE_BEFORE_PARAMS); + + return 0; +} + +static int parser_skip_inner_list(sf_parser *sfp) { + int rv; + + for (;;) { + rv = sf_parser_inner_list(sfp, NULL); + switch (rv) { + case 0: + break; + case SF_ERR_EOF: + return 0; + case SF_ERR_PARSE_ERROR: + return rv; + default: + assert(0); + abort(); + } + } +} + +static int parser_next_key_or_item(sf_parser *sfp) { + parser_discard_ows(sfp); + + if (parser_eof(sfp)) { + return SF_ERR_EOF; + } + + if (*sfp->pos != ',') { + return SF_ERR_PARSE_ERROR; + } + + ++sfp->pos; + + parser_discard_ows(sfp); + if (parser_eof(sfp)) { + return SF_ERR_PARSE_ERROR; + } + + return 0; +} + +static int parser_dict_value(sf_parser *sfp, sf_value *dest) { + int rv; + + if (parser_eof(sfp) || *(sfp->pos) != '=') { + /* Boolean true */ + if (dest) { + dest->type = SF_TYPE_BOOLEAN; + dest->flags = SF_VALUE_FLAG_NONE; + dest->boolean = 1; + } + + sfp->state = SF_STATE_DICT_BEFORE_PARAMS; + + return 0; + } + + ++sfp->pos; + + if (parser_eof(sfp)) { + return SF_ERR_PARSE_ERROR; + } + + if (*sfp->pos == '(') { + if (dest) { + dest->type = SF_TYPE_INNER_LIST; + dest->flags = SF_VALUE_FLAG_NONE; + } + + ++sfp->pos; + + sfp->state = SF_STATE_DICT_INNER_LIST_BEFORE; + + return 0; + } + + rv = parser_bare_item(sfp, dest); + if (rv != 0) { + return rv; + } + + sfp->state = SF_STATE_DICT_BEFORE_PARAMS; + + return 0; +} + +int sf_parser_dict(sf_parser *sfp, sf_vec *dest_key, sf_value *dest_value) { + int rv; + + switch (sfp->state) { + case SF_STATE_DICT_INNER_LIST_BEFORE: + rv = parser_skip_inner_list(sfp); + if (rv != 0) { + return rv; + } + + /* fall through */ + case SF_STATE_DICT_BEFORE_PARAMS: + rv = parser_skip_params(sfp); + if (rv != 0) { + return rv; + } + + /* fall through */ + case SF_STATE_DICT_AFTER: + rv = parser_next_key_or_item(sfp); + if (rv != 0) { + return rv; + } + + break; + case SF_STATE_INITIAL: + parser_discard_sp(sfp); + + if (parser_eof(sfp)) { + return SF_ERR_EOF; + } + + break; + default: + assert(0); + abort(); + } + + rv = parser_key(sfp, dest_key); + if (rv != 0) { + return rv; + } + + return parser_dict_value(sfp, dest_value); +} + +int sf_parser_list(sf_parser *sfp, sf_value *dest) { + int rv; + + switch (sfp->state) { + case SF_STATE_LIST_INNER_LIST_BEFORE: + rv = parser_skip_inner_list(sfp); + if (rv != 0) { + return rv; + } + + /* fall through */ + case SF_STATE_LIST_BEFORE_PARAMS: + rv = parser_skip_params(sfp); + if (rv != 0) { + return rv; + } + + /* fall through */ + case SF_STATE_LIST_AFTER: + rv = parser_next_key_or_item(sfp); + if (rv != 0) { + return rv; + } + + break; + case SF_STATE_INITIAL: + parser_discard_sp(sfp); + + if (parser_eof(sfp)) { + return SF_ERR_EOF; + } + + break; + default: + assert(0); + abort(); + } + + if (*sfp->pos == '(') { + if (dest) { + dest->type = SF_TYPE_INNER_LIST; + dest->flags = SF_VALUE_FLAG_NONE; + } + + ++sfp->pos; + + sfp->state = SF_STATE_LIST_INNER_LIST_BEFORE; + + return 0; + } + + rv = parser_bare_item(sfp, dest); + if (rv != 0) { + return rv; + } + + sfp->state = SF_STATE_LIST_BEFORE_PARAMS; + + return 0; +} + +int sf_parser_item(sf_parser *sfp, sf_value *dest) { + int rv; + + switch (sfp->state) { + case SF_STATE_INITIAL: + parser_discard_sp(sfp); + + if (parser_eof(sfp)) { + return SF_ERR_PARSE_ERROR; + } + + break; + case SF_STATE_ITEM_INNER_LIST_BEFORE: + rv = parser_skip_inner_list(sfp); + if (rv != 0) { + return rv; + } + + /* fall through */ + case SF_STATE_ITEM_BEFORE_PARAMS: + rv = parser_skip_params(sfp); + if (rv != 0) { + return rv; + } + + /* fall through */ + case SF_STATE_ITEM_AFTER: + parser_discard_sp(sfp); + + if (!parser_eof(sfp)) { + return SF_ERR_PARSE_ERROR; + } + + return SF_ERR_EOF; + default: + assert(0); + abort(); + } + + if (*sfp->pos == '(') { + if (dest) { + dest->type = SF_TYPE_INNER_LIST; + dest->flags = SF_VALUE_FLAG_NONE; + } + + ++sfp->pos; + + sfp->state = SF_STATE_ITEM_INNER_LIST_BEFORE; + + return 0; + } + + rv = parser_bare_item(sfp, dest); + if (rv != 0) { + return rv; + } + + sfp->state = SF_STATE_ITEM_BEFORE_PARAMS; + + return 0; +} + +void sf_parser_init(sf_parser *sfp, const uint8_t *data, size_t datalen) { + if (datalen == 0) { + sfp->pos = sfp->end = NULL; + } else { + sfp->pos = data; + sfp->end = data + datalen; + } + + sfp->state = SF_STATE_INITIAL; +} + +void sf_unescape(sf_vec *dest, const sf_vec *src) { + const uint8_t *p, *q; + uint8_t *o; + size_t len, slen; + + if (src->len == 0) { + *dest = *src; + + return; + } + + o = dest->base; + p = src->base; + len = src->len; + + for (;;) { + q = memchr(p, '\\', len); + if (q == NULL) { + if (len == src->len) { + *dest = *src; + + return; + } + + memcpy(o, p, len); + o += len; + + break; + } + + slen = (size_t)(q - p); + memcpy(o, p, slen); + o += slen; + + p = q + 1; + *o++ = *p++; + len -= slen + 2; + } + + dest->len = (size_t)(o - dest->base); +} + +void sf_base64decode(sf_vec *dest, const sf_vec *src) { + static const int index_tbl[] = { + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, 62, -1, -1, -1, 63, 52, 53, 54, 55, 56, 57, + 58, 59, 60, 61, -1, -1, -1, -1, -1, -1, -1, 0, 1, 2, 3, 4, 5, 6, + 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, + 25, -1, -1, -1, -1, -1, -1, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, + 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1}; + uint8_t *o; + const uint8_t *p, *end; + uint32_t n; + size_t i; + int idx; + + assert((src->len & 0x3) == 0); + + if (src->len == 0) { + *dest = *src; + + return; + } + + o = dest->base; + p = src->base; + end = src->base + src->len; + + for (; p != end;) { + n = 0; + + for (i = 1; i <= 4; ++i, ++p) { + idx = index_tbl[*p]; + + if (idx == -1) { + assert(i > 2); + + if (i == 3) { + assert(*p == '=' && *(p + 1) == '=' && p + 2 == end); + + *o++ = (uint8_t)(n >> 16); + + goto fin; + } + + assert(*p == '=' && p + 1 == end); + + *o++ = (uint8_t)(n >> 16); + *o++ = (n >> 8) & 0xffu; + + goto fin; + } + + n += (uint32_t)(idx << (24 - i * 6)); + } + + *o++ = (uint8_t)(n >> 16); + *o++ = (n >> 8) & 0xffu; + *o++ = n & 0xffu; + } + +fin: + dest->len = (size_t)(o - dest->base); +} diff --git a/deps/ngtcp2/nghttp3/lib/sfparse.h b/deps/ngtcp2/nghttp3/lib/sfparse.h new file mode 100644 index 00000000000000..1474db1429acea --- /dev/null +++ b/deps/ngtcp2/nghttp3/lib/sfparse.h @@ -0,0 +1,409 @@ +/* + * sfparse + * + * Copyright (c) 2023 sfparse contributors + * Copyright (c) 2019 nghttp3 contributors + * Copyright (c) 2015 nghttp2 contributors + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef SFPARSE_H +#define SFPARSE_H + +/* Define WIN32 when build target is Win32 API (borrowed from + libcurl) */ +#if (defined(_WIN32) || defined(__WIN32__)) && !defined(WIN32) +# define WIN32 +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +#if defined(_MSC_VER) && (_MSC_VER < 1800) +/* MSVC < 2013 does not have inttypes.h because it is not C99 + compliant. See compiler macros and version number in + https://sourceforge.net/p/predef/wiki/Compilers/ */ +# include +#else /* !defined(_MSC_VER) || (_MSC_VER >= 1800) */ +# include +#endif /* !defined(_MSC_VER) || (_MSC_VER >= 1800) */ +#include +#include + +/** + * @enum + * + * :type:`sf_type` defines value type. + */ +typedef enum sf_type { + /** + * :enum:`SF_TYPE_BOOLEAN` indicates boolean type. + */ + SF_TYPE_BOOLEAN, + /** + * :enum:`SF_TYPE_INTEGER` indicates integer type. + */ + SF_TYPE_INTEGER, + /** + * :enum:`SF_TYPE_DECIMAL` indicates decimal type. + */ + SF_TYPE_DECIMAL, + /** + * :enum:`SF_TYPE_STRING` indicates string type. + */ + SF_TYPE_STRING, + /** + * :enum:`SF_TYPE_TOKEN` indicates token type. + */ + SF_TYPE_TOKEN, + /** + * :enum:`SF_TYPE_BYTESEQ` indicates byte sequence type. + */ + SF_TYPE_BYTESEQ, + /** + * :enum:`SF_TYPE_INNER_LIST` indicates inner list type. + */ + SF_TYPE_INNER_LIST, + /** + * :enum:`SF_TYPE_DATE` indicates date type. + */ + SF_TYPE_DATE +} sf_type; + +/** + * @macro + * + * :macro:`SF_ERR_PARSE_ERROR` indicates fatal parse error has + * occurred, and it is not possible to continue the processing. + */ +#define SF_ERR_PARSE_ERROR -1 + +/** + * @macro + * + * :macro:`SF_ERR_EOF` indicates that there is nothing left to read. + * The context of this error varies depending on the function that + * returns this error code. + */ +#define SF_ERR_EOF -2 + +/** + * @struct + * + * :type:`sf_vec` stores sequence of bytes. + */ +typedef struct sf_vec { + /** + * :member:`base` points to the beginning of the sequence of bytes. + */ + uint8_t *base; + /** + * :member:`len` is the number of bytes contained in this sequence. + */ + size_t len; +} sf_vec; + +/** + * @macro + * + * :macro:`SF_VALUE_FLAG_NONE` indicates no flag set. + */ +#define SF_VALUE_FLAG_NONE 0x0u + +/** + * @macro + * + * :macro:`SF_VALUE_FLAG_ESCAPED_STRING` indicates that a string + * contains escaped character(s). + */ +#define SF_VALUE_FLAG_ESCAPED_STRING 0x1u + +/** + * @struct + * + * :type:`sf_decimal` contains decimal value. + */ +typedef struct sf_decimal { + /** + * :member:`numer` contains numerator of the decimal value. + */ + int64_t numer; + /** + * :member:`denom` contains denominator of the decimal value. + */ + int64_t denom; +} sf_decimal; + +/** + * @struct + * + * :type:`sf_value` stores a Structured Field item. For Inner List, + * only type is set to :enum:`sf_type.SF_TYPE_INNER_LIST`. In order + * to read the items contained in an inner list, call + * `sf_parser_inner_list`. + */ +typedef struct sf_value { + /** + * :member:`type` is the type of the value contained in this + * particular object. + */ + sf_type type; + /** + * :member:`flags` is bitwise OR of one or more of + * :macro:`SF_VALUE_FLAG_* `. + */ + uint32_t flags; + /** + * @anonunion_start + * + * @sf_value_value + */ + union { + /** + * :member:`boolean` contains boolean value if :member:`type` == + * :enum:`sf_type.SF_TYPE_BOOLEAN`. 1 indicates true, and 0 + * indicates false. + */ + int boolean; + /** + * :member:`integer` contains integer value if :member:`type` is + * either :enum:`sf_type.SF_TYPE_INTEGER` or + * :enum:`sf_type.SF_TYPE_DATE`. + */ + int64_t integer; + /** + * :member:`decimal` contains decimal value if :member:`type` == + * :enum:`sf_type.SF_TYPE_DECIMAL`. + */ + sf_decimal decimal; + /** + * :member:`vec` contains sequence of bytes if :member:`type` is + * either :enum:`sf_type.SF_TYPE_STRING`, + * :enum:`sf_type.SF_TYPE_TOKEN`, or + * :enum:`sf_type.SF_TYPE_BYTESEQ`. + * + * For :enum:`sf_type.SF_TYPE_STRING`, this field contains one or + * more escaped characters if :member:`flags` has + * :macro:`SF_VALUE_FLAG_ESCAPED_STRING` set. To unescape the + * string, use `sf_unescape`. + * + * For :enum:`sf_type.SF_TYPE_BYTESEQ`, this field contains base64 + * encoded string. To decode this byte string, use + * `sf_base64decode`. + * + * If :member:`vec.len ` == 0, :member:`vec.base + * ` is guaranteed to be NULL. + */ + sf_vec vec; + /** + * @anonunion_end + */ + }; +} sf_value; + +/** + * @struct + * + * :type:`sf_parser` is the Structured Field Values parser. Use + * `sf_parser_init` to initialize it. + */ +typedef struct sf_parser { + /* all fields are private */ + const uint8_t *pos; + const uint8_t *end; + uint32_t state; +} sf_parser; + +/** + * @function + * + * `sf_parser_init` initializes |sfp| with the given buffer pointed by + * |data| of length |datalen|. + */ +void sf_parser_init(sf_parser *sfp, const uint8_t *data, size_t datalen); + +/** + * @function + * + * `sf_parser_param` reads a parameter. If this function returns 0, + * it stores parameter key and value in |dest_key| and |dest_value| + * respectively, if they are not NULL. + * + * This function does no effort to find duplicated keys. Same key may + * be reported more than once. + * + * Caller should keep calling this function until it returns negative + * error code. If it returns :macro:`SF_ERR_EOF`, all parameters have + * read, and caller can continue to read rest of the values. If it + * returns :macro:`SF_ERR_PARSE_ERROR`, it encountered fatal error + * while parsing field value. + */ +int sf_parser_param(sf_parser *sfp, sf_vec *dest_key, sf_value *dest_value); + +/** + * @function + * + * `sf_parser_dict` reads the next dictionary key and value pair. If + * this function returns 0, it stores the key and value in |dest_key| + * and |dest_value| respectively, if they are not NULL. + * + * Caller can optionally read parameters attached to the pair by + * calling `sf_parser_param`. + * + * This function does no effort to find duplicated keys. Same key may + * be reported more than once. + * + * Caller should keep calling this function until it returns negative + * error code. If it returns :macro:`SF_ERR_EOF`, all key and value + * pairs have been read, and there is nothing left to read. + * + * This function returns 0 if it succeeds, or one of the following + * negative error codes: + * + * :macro:`SF_ERR_EOF` + * All values in the dictionary have read. + * :macro:`SF_ERR_PARSE_ERROR` + * It encountered fatal error while parsing field value. + */ +int sf_parser_dict(sf_parser *sfp, sf_vec *dest_key, sf_value *dest_value); + +/** + * @function + * + * `sf_parser_list` reads the next list item. If this function + * returns 0, it stores the item in |dest| if it is not NULL. + * + * Caller can optionally read parameters attached to the item by + * calling `sf_parser_param`. + * + * Caller should keep calling this function until it returns negative + * error code. If it returns :macro:`SF_ERR_EOF`, all values in the + * list have been read, and there is nothing left to read. + * + * This function returns 0 if it succeeds, or one of the following + * negative error codes: + * + * :macro:`SF_ERR_EOF` + * All values in the list have read. + * :macro:`SF_ERR_PARSE_ERROR` + * It encountered fatal error while parsing field value. + */ +int sf_parser_list(sf_parser *sfp, sf_value *dest); + +/** + * @function + * + * `sf_parser_item` reads a single item. If this function returns 0, + * it stores the item in |dest| if it is not NULL. + * + * This function is only used for the field value that consists of a + * single item. + * + * Caller can optionally read parameters attached to the item by + * calling `sf_parser_param`. + * + * Caller should call this function again to make sure that there is + * nothing left to read. If this 2nd function call returns + * :macro:`SF_ERR_EOF`, all data have been processed successfully. + * + * This function returns 0 if it succeeds, or one of the following + * negative error codes: + * + * :macro:`SF_ERR_EOF` + * There is nothing left to read. + * :macro:`SF_ERR_PARSE_ERROR` + * It encountered fatal error while parsing field value. + */ +int sf_parser_item(sf_parser *sfp, sf_value *dest); + +/** + * @function + * + * `sf_parser_inner_list` reads the next inner list item. If this + * function returns 0, it stores the item in |dest| if it is not NULL. + * + * Caller can optionally read parameters attached to the item by + * calling `sf_parser_param`. + * + * Caller should keep calling this function until it returns negative + * error code. If it returns :macro:`SF_ERR_EOF`, all values in this + * inner list have been read, and caller can optionally read + * parameters attached to this inner list by calling + * `sf_parser_param`. Then caller can continue to read rest of the + * values. + * + * This function returns 0 if it succeeds, or one of the following + * negative error codes: + * + * :macro:`SF_ERR_EOF` + * All values in the inner list have read. + * :macro:`SF_ERR_PARSE_ERROR` + * It encountered fatal error while parsing field value. + */ +int sf_parser_inner_list(sf_parser *sfp, sf_value *dest); + +/** + * @function + * + * `sf_unescape` copies |src| to |dest| by removing escapes (``\``). + * |src| should be the pointer to :member:`sf_value.vec` of type + * :enum:`sf_type.SF_TYPE_STRING` produced by either `sf_parser_dict`, + * `sf_parser_list`, `sf_parser_inner_list`, `sf_parser_item`, or + * `sf_parser_param`, otherwise the behavior is undefined. + * + * :member:`dest->base ` must point to the buffer that + * has sufficient space to store the unescaped string. + * + * If there is no escape character in |src|, |*src| is assigned to + * |*dest|. This includes the case that :member:`src->len + * ` == 0. + * + * This function sets the length of unescaped string to + * :member:`dest->len `. + */ +void sf_unescape(sf_vec *dest, const sf_vec *src); + +/** + * @function + * + * `sf_base64decode` decodes Base64 encoded string |src| and writes + * the result into |dest|. |src| should be the pointer to + * :member:`sf_value.vec` of type :enum:`sf_type.SF_TYPE_BYTESEQ` + * produced by either `sf_parser_dict`, `sf_parser_list`, + * `sf_parser_inner_list`, `sf_parser_item`, or `sf_parser_param`, + * otherwise the behavior is undefined. + * + * :member:`dest->base ` must point to the buffer that + * has sufficient space to store the decoded byte string. + * + * If :member:`src->len ` == 0, |*src| is assigned to + * |*dest|. + * + * This function sets the length of decoded byte string to + * :member:`dest->len `. + */ +void sf_base64decode(sf_vec *dest, const sf_vec *src); + +#ifdef __cplusplus +} +#endif + +#endif /* SFPARSE_H */ diff --git a/deps/ngtcp2/ngtcp2.gyp b/deps/ngtcp2/ngtcp2.gyp index e53b7f61ea387b..0f2929b75478f1 100644 --- a/deps/ngtcp2/ngtcp2.gyp +++ b/deps/ngtcp2/ngtcp2.gyp @@ -8,14 +8,15 @@ 'ngtcp2/lib/ngtcp2_addr.c', 'ngtcp2/lib/ngtcp2_balloc.c', 'ngtcp2/lib/ngtcp2_bbr.c', - 'ngtcp2/lib/ngtcp2_bbr2.c', 'ngtcp2/lib/ngtcp2_buf.c', 'ngtcp2/lib/ngtcp2_cc.c', 'ngtcp2/lib/ngtcp2_cid.c', 'ngtcp2/lib/ngtcp2_conn.c', 'ngtcp2/lib/ngtcp2_conv.c', + 'ngtcp2/lib/ngtcp2_conversion.c', 'ngtcp2/lib/ngtcp2_crypto.c', 'ngtcp2/lib/ngtcp2_err.c', + 'ngtcp2/lib/ngtcp2_frame_chain.c', 'ngtcp2/lib/ngtcp2_gaptr.c', 'ngtcp2/lib/ngtcp2_idtr.c', 'ngtcp2/lib/ngtcp2_ksl.c', @@ -38,13 +39,14 @@ 'ngtcp2/lib/ngtcp2_rtb.c', 'ngtcp2/lib/ngtcp2_str.c', 'ngtcp2/lib/ngtcp2_strm.c', + 'ngtcp2/lib/ngtcp2_unreachable.c', 'ngtcp2/lib/ngtcp2_vec.c', 'ngtcp2/lib/ngtcp2_version.c', 'ngtcp2/lib/ngtcp2_window_filter.c', 'ngtcp2/crypto/shared.c' ], - 'ngtcp2_sources_openssl': [ - 'ngtcp2/crypto/openssl/openssl.c' + 'ngtcp2_sources_quictls': [ + 'ngtcp2/crypto/quictls/quictls.c' ], 'ngtcp2_sources_boringssl': [ 'ngtcp2/crypto/boringssl/boringssl.c' @@ -75,8 +77,11 @@ 'nghttp3/lib/nghttp3_str.c', 'nghttp3/lib/nghttp3_stream.c', 'nghttp3/lib/nghttp3_tnode.c', + 'nghttp3/lib/nghttp3_unreachable.c', 'nghttp3/lib/nghttp3_vec.c', - 'nghttp3/lib/nghttp3_version.c' + 'nghttp3/lib/nghttp3_version.c', + # sfparse is also used by nghttp2 and is included by nghttp2.gyp + # 'nghttp3/lib/sfparse.c' ] }, 'targets': [ @@ -100,6 +105,9 @@ '../openssl/openssl.gyp:openssl' ] }], + ['OS!="win"', { + 'defines': ['HAVE_UNISTD_H'] + }], ['OS=="win"', { 'defines': [ 'WIN32', @@ -132,7 +140,7 @@ }, 'sources': [ '<@(ngtcp2_sources)', - '<@(ngtcp2_sources_openssl)', + '<@(ngtcp2_sources_quictls)', ] }, { @@ -144,7 +152,7 @@ ], 'defines': [ 'BUILDING_NGHTTP3', - 'NGHTTP3_STATICLIB' + 'NGHTTP3_STATICLIB', ], 'dependencies': [ 'ngtcp2' @@ -162,6 +170,9 @@ }, }, }], + ['OS!="win"', { + 'defines': ['HAVE_UNISTD_H'] + }], ['OS=="linux" or OS=="android"', { 'defines': [ 'HAVE_ARPA_INET_H', diff --git a/deps/ngtcp2/ngtcp2/crypto/boringssl/boringssl.c b/deps/ngtcp2/ngtcp2/crypto/boringssl/boringssl.c index 015032d41ca0ce..50b89110e36ff7 100644 --- a/deps/ngtcp2/ngtcp2/crypto/boringssl/boringssl.c +++ b/deps/ngtcp2/ngtcp2/crypto/boringssl/boringssl.c @@ -92,8 +92,8 @@ ngtcp2_crypto_aead *ngtcp2_crypto_aead_retry(ngtcp2_crypto_aead *aead) { return ngtcp2_crypto_aead_init(aead, (void *)EVP_aead_aes_128_gcm()); } -static const EVP_AEAD *crypto_ssl_get_aead(SSL *ssl) { - switch (SSL_CIPHER_get_id(SSL_get_current_cipher(ssl))) { +static const EVP_AEAD *crypto_cipher_id_get_aead(uint32_t cipher_id) { + switch (cipher_id) { case TLS1_CK_AES_128_GCM_SHA256: return EVP_aead_aes_128_gcm(); case TLS1_CK_AES_256_GCM_SHA384: @@ -105,8 +105,8 @@ static const EVP_AEAD *crypto_ssl_get_aead(SSL *ssl) { } } -static uint64_t crypto_ssl_get_aead_max_encryption(SSL *ssl) { - switch (SSL_CIPHER_get_id(SSL_get_current_cipher(ssl))) { +static uint64_t crypto_cipher_id_get_aead_max_encryption(uint32_t cipher_id) { + switch (cipher_id) { case TLS1_CK_AES_128_GCM_SHA256: case TLS1_CK_AES_256_GCM_SHA384: return NGTCP2_CRYPTO_MAX_ENCRYPTION_AES_GCM; @@ -117,8 +117,9 @@ static uint64_t crypto_ssl_get_aead_max_encryption(SSL *ssl) { } } -static uint64_t crypto_ssl_get_aead_max_decryption_failure(SSL *ssl) { - switch (SSL_CIPHER_get_id(SSL_get_current_cipher(ssl))) { +static uint64_t +crypto_cipher_id_get_aead_max_decryption_failure(uint32_t cipher_id) { + switch (cipher_id) { case TLS1_CK_AES_128_GCM_SHA256: case TLS1_CK_AES_256_GCM_SHA384: return NGTCP2_CRYPTO_MAX_DECRYPTION_FAILURE_AES_GCM; @@ -129,8 +130,9 @@ static uint64_t crypto_ssl_get_aead_max_decryption_failure(SSL *ssl) { } } -static const ngtcp2_crypto_boringssl_cipher *crypto_ssl_get_hp(SSL *ssl) { - switch (SSL_CIPHER_get_id(SSL_get_current_cipher(ssl))) { +static const ngtcp2_crypto_boringssl_cipher * +crypto_cipher_id_get_hp(uint32_t cipher_id) { + switch (cipher_id) { case TLS1_CK_AES_128_GCM_SHA256: return &crypto_cipher_aes_128; case TLS1_CK_AES_256_GCM_SHA384: @@ -142,8 +144,8 @@ static const ngtcp2_crypto_boringssl_cipher *crypto_ssl_get_hp(SSL *ssl) { } } -static const EVP_MD *crypto_ssl_get_md(SSL *ssl) { - switch (SSL_CIPHER_get_id(SSL_get_current_cipher(ssl))) { +static const EVP_MD *crypto_cipher_id_get_md(uint32_t cipher_id) { + switch (cipher_id) { case TLS1_CK_AES_128_GCM_SHA256: case TLS1_CK_CHACHA20_POLY1305_SHA256: return EVP_sha256(); @@ -154,15 +156,47 @@ static const EVP_MD *crypto_ssl_get_md(SSL *ssl) { } } +static int supported_cipher_id(uint32_t cipher_id) { + switch (cipher_id) { + case TLS1_CK_AES_128_GCM_SHA256: + case TLS1_CK_AES_256_GCM_SHA384: + case TLS1_CK_CHACHA20_POLY1305_SHA256: + return 1; + default: + return 0; + } +} + +static ngtcp2_crypto_ctx *crypto_ctx_cipher_id(ngtcp2_crypto_ctx *ctx, + uint32_t cipher_id) { + ngtcp2_crypto_aead_init(&ctx->aead, + (void *)crypto_cipher_id_get_aead(cipher_id)); + ctx->md.native_handle = (void *)crypto_cipher_id_get_md(cipher_id); + ctx->hp.native_handle = (void *)crypto_cipher_id_get_hp(cipher_id); + ctx->max_encryption = crypto_cipher_id_get_aead_max_encryption(cipher_id); + ctx->max_decryption_failure = + crypto_cipher_id_get_aead_max_decryption_failure(cipher_id); + + return ctx; +} + ngtcp2_crypto_ctx *ngtcp2_crypto_ctx_tls(ngtcp2_crypto_ctx *ctx, void *tls_native_handle) { SSL *ssl = tls_native_handle; - ngtcp2_crypto_aead_init(&ctx->aead, (void *)crypto_ssl_get_aead(ssl)); - ctx->md.native_handle = (void *)crypto_ssl_get_md(ssl); - ctx->hp.native_handle = (void *)crypto_ssl_get_hp(ssl); - ctx->max_encryption = crypto_ssl_get_aead_max_encryption(ssl); - ctx->max_decryption_failure = crypto_ssl_get_aead_max_decryption_failure(ssl); - return ctx; + const SSL_CIPHER *cipher = SSL_get_current_cipher(ssl); + uint32_t cipher_id; + + if (cipher == NULL) { + return NULL; + } + + cipher_id = SSL_CIPHER_get_id(cipher); + + if (!supported_cipher_id(cipher_id)) { + return NULL; + } + + return crypto_ctx_cipher_id(ctx, cipher_id); } ngtcp2_crypto_ctx *ngtcp2_crypto_ctx_tls_early(ngtcp2_crypto_ctx *ctx, @@ -394,15 +428,17 @@ int ngtcp2_crypto_hp_mask(uint8_t *dest, const ngtcp2_crypto_cipher *hp, } } -int ngtcp2_crypto_read_write_crypto_data(ngtcp2_conn *conn, - ngtcp2_crypto_level crypto_level, - const uint8_t *data, size_t datalen) { +int ngtcp2_crypto_read_write_crypto_data( + ngtcp2_conn *conn, ngtcp2_encryption_level encryption_level, + const uint8_t *data, size_t datalen) { SSL *ssl = ngtcp2_conn_get_tls_native_handle(conn); int rv; int err; if (SSL_provide_quic_data( - ssl, ngtcp2_crypto_boringssl_from_ngtcp2_crypto_level(crypto_level), + ssl, + ngtcp2_crypto_boringssl_from_ngtcp2_encryption_level( + encryption_level), data, datalen) != 1) { return -1; } @@ -423,7 +459,10 @@ int ngtcp2_crypto_read_write_crypto_data(ngtcp2_conn *conn, SSL_reset_early_data_reject(ssl); - ngtcp2_conn_early_data_rejected(conn); + rv = ngtcp2_conn_tls_early_data_rejected(conn); + if (rv != 0) { + return -1; + } goto retry; default: @@ -435,7 +474,7 @@ int ngtcp2_crypto_read_write_crypto_data(ngtcp2_conn *conn, return 0; } - ngtcp2_conn_handshake_completed(conn); + ngtcp2_conn_tls_handshake_completed(conn); } rv = SSL_process_quic_post_handshake(ssl); @@ -464,7 +503,7 @@ int ngtcp2_crypto_set_remote_transport_params(ngtcp2_conn *conn, void *tls) { SSL_get_peer_quic_transport_params(ssl, &tp, &tplen); - rv = ngtcp2_conn_decode_remote_transport_params(conn, tp, tplen); + rv = ngtcp2_conn_decode_and_set_remote_transport_params(conn, tp, tplen); if (rv != 0) { ngtcp2_conn_set_tls_error(conn, rv); return -1; @@ -482,33 +521,34 @@ int ngtcp2_crypto_set_local_transport_params(void *tls, const uint8_t *buf, return 0; } -ngtcp2_crypto_level ngtcp2_crypto_boringssl_from_ssl_encryption_level( +ngtcp2_encryption_level ngtcp2_crypto_boringssl_from_ssl_encryption_level( enum ssl_encryption_level_t ssl_level) { switch (ssl_level) { case ssl_encryption_initial: - return NGTCP2_CRYPTO_LEVEL_INITIAL; + return NGTCP2_ENCRYPTION_LEVEL_INITIAL; case ssl_encryption_early_data: - return NGTCP2_CRYPTO_LEVEL_EARLY; + return NGTCP2_ENCRYPTION_LEVEL_0RTT; case ssl_encryption_handshake: - return NGTCP2_CRYPTO_LEVEL_HANDSHAKE; + return NGTCP2_ENCRYPTION_LEVEL_HANDSHAKE; case ssl_encryption_application: - return NGTCP2_CRYPTO_LEVEL_APPLICATION; + return NGTCP2_ENCRYPTION_LEVEL_1RTT; default: assert(0); abort(); } } -enum ssl_encryption_level_t ngtcp2_crypto_boringssl_from_ngtcp2_crypto_level( - ngtcp2_crypto_level crypto_level) { - switch (crypto_level) { - case NGTCP2_CRYPTO_LEVEL_INITIAL: +enum ssl_encryption_level_t +ngtcp2_crypto_boringssl_from_ngtcp2_encryption_level( + ngtcp2_encryption_level encryption_level) { + switch (encryption_level) { + case NGTCP2_ENCRYPTION_LEVEL_INITIAL: return ssl_encryption_initial; - case NGTCP2_CRYPTO_LEVEL_HANDSHAKE: + case NGTCP2_ENCRYPTION_LEVEL_HANDSHAKE: return ssl_encryption_handshake; - case NGTCP2_CRYPTO_LEVEL_APPLICATION: + case NGTCP2_ENCRYPTION_LEVEL_1RTT: return ssl_encryption_application; - case NGTCP2_CRYPTO_LEVEL_EARLY: + case NGTCP2_ENCRYPTION_LEVEL_0RTT: return ssl_encryption_early_data; default: assert(0); @@ -541,7 +581,7 @@ static int set_read_secret(SSL *ssl, enum ssl_encryption_level_t bssl_level, size_t secretlen) { ngtcp2_crypto_conn_ref *conn_ref = SSL_get_app_data(ssl); ngtcp2_conn *conn = conn_ref->get_conn(conn_ref); - ngtcp2_crypto_level level = + ngtcp2_encryption_level level = ngtcp2_crypto_boringssl_from_ssl_encryption_level(bssl_level); (void)cipher; @@ -558,7 +598,7 @@ static int set_write_secret(SSL *ssl, enum ssl_encryption_level_t bssl_level, size_t secretlen) { ngtcp2_crypto_conn_ref *conn_ref = SSL_get_app_data(ssl); ngtcp2_conn *conn = conn_ref->get_conn(conn_ref); - ngtcp2_crypto_level level = + ngtcp2_encryption_level level = ngtcp2_crypto_boringssl_from_ssl_encryption_level(bssl_level); (void)cipher; @@ -574,7 +614,7 @@ static int add_handshake_data(SSL *ssl, enum ssl_encryption_level_t bssl_level, const uint8_t *data, size_t datalen) { ngtcp2_crypto_conn_ref *conn_ref = SSL_get_app_data(ssl); ngtcp2_conn *conn = conn_ref->get_conn(conn_ref); - ngtcp2_crypto_level level = + ngtcp2_encryption_level level = ngtcp2_crypto_boringssl_from_ssl_encryption_level(bssl_level); int rv; diff --git a/deps/ngtcp2/ngtcp2/crypto/includes/ngtcp2/ngtcp2_crypto.h b/deps/ngtcp2/ngtcp2/crypto/includes/ngtcp2/ngtcp2_crypto.h index 4736b51c3cb48d..06427d7a7cac70 100644 --- a/deps/ngtcp2/ngtcp2/crypto/includes/ngtcp2/ngtcp2_crypto.h +++ b/deps/ngtcp2/ngtcp2/crypto/includes/ngtcp2/ngtcp2_crypto.h @@ -38,39 +38,16 @@ extern "C" { # include #endif /* WIN32 */ -/** - * @macro - * - * :macro:`NGTCP2_CRYPTO_INITIAL_SECRETLEN` is the length of secret - * for Initial packets. - */ -#define NGTCP2_CRYPTO_INITIAL_SECRETLEN 32 - -/** - * @macro - * - * :macro:`NGTCP2_CRYPTO_INITIAL_KEYLEN` is the length of key for - * Initial packets. - */ -#define NGTCP2_CRYPTO_INITIAL_KEYLEN 16 - -/** - * @macro - * - * :macro:`NGTCP2_CRYPTO_INITIAL_IVLEN` is the length of IV for - * Initial packets. - */ -#define NGTCP2_CRYPTO_INITIAL_IVLEN 12 - /** * @function * * `ngtcp2_crypto_ctx_tls` initializes |ctx| by extracting negotiated * ciphers and message digests from native TLS session * |tls_native_handle|. This is used for encrypting/decrypting - * Handshake and Short header packets. + * Handshake and 1-RTT packets. If it is unable to obtain necessary + * data from |tls_native_handle|, this function returns NULL. * - * If libngtcp2_crypto_openssl is linked, |tls_native_handle| must be + * If libngtcp2_crypto_quictls is linked, |tls_native_handle| must be * a pointer to SSL object. */ NGTCP2_EXTERN ngtcp2_crypto_ctx *ngtcp2_crypto_ctx_tls(ngtcp2_crypto_ctx *ctx, @@ -81,10 +58,11 @@ NGTCP2_EXTERN ngtcp2_crypto_ctx *ngtcp2_crypto_ctx_tls(ngtcp2_crypto_ctx *ctx, * * `ngtcp2_crypto_ctx_tls_early` initializes |ctx| by extracting early * ciphers and message digests from native TLS session - * |tls_native_handle|. This is used for encrypting/decrypting 0RTT - * packets. + * |tls_native_handle|. This is used for encrypting/decrypting 0-RTT + * packets. If it is unable to obtain necessary data from + * |tls_native_handle|, this function returns NULL. * - * If libngtcp2_crypto_openssl is linked, |tls_native_handle| must be + * If libngtcp2_crypto_quictls is linked, |tls_native_handle| must be * a pointer to SSL object. */ NGTCP2_EXTERN ngtcp2_crypto_ctx * @@ -96,7 +74,7 @@ ngtcp2_crypto_ctx_tls_early(ngtcp2_crypto_ctx *ctx, void *tls_native_handle); * `ngtcp2_crypto_md_init` initializes |md| with the provided * |md_native_handle| which is an underlying message digest object. * - * If libngtcp2_crypto_openssl is linked, |md_native_handle| must be a + * If libngtcp2_crypto_quictls is linked, |md_native_handle| must be a * pointer to EVP_MD. * * If libngtcp2_crypto_gnutls is linked, |md_native_handle| must be @@ -134,10 +112,12 @@ ngtcp2_crypto_aead_noncelen(const ngtcp2_crypto_aead *aead); /** * @function * - * `ngtcp2_crypto_hkdf_extract` performs HKDF extract operation. The - * result is the length of |md| and is stored to the buffer pointed by - * |dest|. The caller is responsible to specify the buffer that can - * store the output. + * `ngtcp2_crypto_hkdf_extract` performs HKDF extract operation. + * + * The length of output is `ngtcp2_crypto_md_hashlen(md) + * `. The output is stored in the buffer + * pointed by |dest|. The caller is responsible to specify the buffer + * that has enough capacity to store the output. * * This function returns 0 if it succeeds, or -1. */ @@ -150,7 +130,7 @@ ngtcp2_crypto_hkdf_extract(uint8_t *dest, const ngtcp2_crypto_md *md, * @function * * `ngtcp2_crypto_hkdf_expand` performs HKDF expand operation. The - * result is |destlen| bytes long and is stored to the buffer pointed + * result is |destlen| bytes long, and is stored in the buffer pointed * by |dest|. * * This function returns 0 if it succeeds, or -1. @@ -166,7 +146,8 @@ NGTCP2_EXTERN int ngtcp2_crypto_hkdf_expand(uint8_t *dest, size_t destlen, * @function * * `ngtcp2_crypto_hkdf` performs HKDF operation. The result is - * |destlen| bytes long and is stored to the buffer pointed by |dest|. + * |destlen| bytes long, and is stored in the buffer pointed by + * |dest|. * * This function returns 0 if it succeeds, or -1. */ @@ -176,41 +157,6 @@ NGTCP2_EXTERN int ngtcp2_crypto_hkdf(uint8_t *dest, size_t destlen, const uint8_t *salt, size_t saltlen, const uint8_t *info, size_t infolen); -/** - * @function - * - * `ngtcp2_crypto_hkdf_expand_label` performs HKDF expand label. The - * result is |destlen| bytes long and is stored to the buffer pointed - * by |dest|. - * - * This function returns 0 if it succeeds, or -1. - */ -NGTCP2_EXTERN int ngtcp2_crypto_hkdf_expand_label(uint8_t *dest, size_t destlen, - const ngtcp2_crypto_md *md, - const uint8_t *secret, - size_t secretlen, - const uint8_t *label, - size_t labellen); - -/** - * @enum - * - * :type:`ngtcp2_crypto_side` indicates which side the application - * implements; client or server. - */ -typedef enum ngtcp2_crypto_side { - /** - * :enum:`NGTCP2_CRYPTO_SIDE_CLIENT` indicates that the application - * is client. - */ - NGTCP2_CRYPTO_SIDE_CLIENT, - /** - * :enum:`NGTCP2_CRYPTO_SIDE_SERVER` indicates that the application - * is server. - */ - NGTCP2_CRYPTO_SIDE_SERVER -} ngtcp2_crypto_side; - /** * @function * @@ -225,11 +171,10 @@ ngtcp2_crypto_packet_protection_ivlen(const ngtcp2_crypto_aead *aead); * * `ngtcp2_crypto_encrypt` encrypts |plaintext| of length * |plaintextlen| and writes the ciphertext into the buffer pointed by - * |dest|. The length of ciphertext is plaintextlen + + * |dest|. The length of ciphertext is |plaintextlen| + * :member:`aead->max_overhead ` * bytes long. |dest| must have enough capacity to store the - * ciphertext. It is allowed to specify the same value to |dest| and - * |plaintext|. + * ciphertext. |dest| and |plaintext| may point to the same buffer. * * This function returns 0 if it succeeds, or -1. */ @@ -263,11 +208,10 @@ ngtcp2_crypto_encrypt_cb(uint8_t *dest, const ngtcp2_crypto_aead *aead, * * `ngtcp2_crypto_decrypt` decrypts |ciphertext| of length * |ciphertextlen| and writes the plaintext into the buffer pointed by - * |dest|. The length of plaintext is ciphertextlen - + * |dest|. The length of plaintext is |ciphertextlen| - * :member:`aead->max_overhead ` * bytes long. |dest| must have enough capacity to store the - * plaintext. It is allowed to specify the same value to |dest| and - * |ciphertext|. + * plaintext. |dest| and |ciphertext| may point to the same buffer. * * This function returns 0 if it succeeds, or -1. */ @@ -299,7 +243,7 @@ ngtcp2_crypto_decrypt_cb(uint8_t *dest, const ngtcp2_crypto_aead *aead, /** * @function * - * `ngtcp2_crypto_hp_mask` generates mask which is used in packet + * `ngtcp2_crypto_hp_mask` generates a mask which is used in packet * header encryption. The mask is written to the buffer pointed by * |dest|. The sample is passed as |sample| which is * :macro:`NGTCP2_HP_SAMPLELEN` bytes long. The length of mask must @@ -333,15 +277,14 @@ ngtcp2_crypto_hp_mask_cb(uint8_t *dest, const ngtcp2_crypto_cipher *hp, /** * @function * - * `ngtcp2_crypto_derive_and_install_rx_key` derives the rx keys from - * |secret| and installs new keys to |conn|. + * `ngtcp2_crypto_derive_and_install_rx_key` derives the decryption + * keying materials from |secret|, and installs them to |conn|. * - * If |key| is not NULL, the derived packet protection key for - * decryption is written to the buffer pointed by |key|. If |iv| is - * not NULL, the derived packet protection IV for decryption is - * written to the buffer pointed by |iv|. If |hp| is not NULL, the - * derived header protection key for decryption is written to the - * buffer pointed by |hp|. + * If |key| is not NULL, the derived packet protection key is written + * to the buffer pointed by |key|. If |iv| is not NULL, the derived + * packet protection IV is written to the buffer pointed by |iv|. If + * |hp| is not NULL, the derived header protection key is written to + * the buffer pointed by |hp|. * * |secretlen| specifies the length of |secret|. * @@ -351,44 +294,44 @@ ngtcp2_crypto_hp_mask_cb(uint8_t *dest, const ngtcp2_crypto_cipher *hp, * `ngtcp2_crypto_packet_protection_ivlen(ctx->aead) * ` where ctx is obtained by * `ngtcp2_crypto_ctx_tls` (or `ngtcp2_crypto_ctx_tls_early` if - * |level| == :enum:`ngtcp2_crypto_level.NGTCP2_CRYPTO_LEVEL_EARLY`). + * |level| == + * :enum:`ngtcp2_encryption_level.NGTCP2_ENCRYPTION_LEVEL_0RTT`). * * In the first call of this function, it calls * `ngtcp2_conn_set_crypto_ctx` (or `ngtcp2_conn_set_early_crypto_ctx` * if |level| == - * :enum:`ngtcp2_crypto_level.NGTCP2_CRYPTO_LEVEL_EARLY`) to set - * negotiated AEAD and message digest algorithm. After the successful - * call of this function, application can use + * :enum:`ngtcp2_encryption_level.NGTCP2_ENCRYPTION_LEVEL_0RTT`) to + * set negotiated AEAD and message digest algorithm. After the + * successful call of this function, application can use * `ngtcp2_conn_get_crypto_ctx` (or `ngtcp2_conn_get_early_crypto_ctx` * if |level| == - * :enum:`ngtcp2_crypto_level.NGTCP2_CRYPTO_LEVEL_EARLY`) to get - * :type:`ngtcp2_crypto_ctx`. + * :enum:`ngtcp2_encryption_level.NGTCP2_ENCRYPTION_LEVEL_0RTT`) to + * get :type:`ngtcp2_crypto_ctx`. * * If |conn| is initialized as client, and |level| is - * :enum:`ngtcp2_crypto_level.NGTCP2_CRYPTO_LEVEL_APPLICATION`, this + * :enum:`ngtcp2_encryption_level.NGTCP2_ENCRYPTION_LEVEL_1RTT`, this * function retrieves a remote QUIC transport parameters extension - * from an object obtained by `ngtcp2_conn_get_tls_native_handle` and + * from an object obtained by `ngtcp2_conn_get_tls_native_handle`, and * sets it to |conn| by calling - * `ngtcp2_conn_decode_remote_transport_params`. + * `ngtcp2_conn_decode_and_set_remote_transport_params`. * * This function returns 0 if it succeeds, or -1. */ NGTCP2_EXTERN int ngtcp2_crypto_derive_and_install_rx_key( ngtcp2_conn *conn, uint8_t *key, uint8_t *iv, uint8_t *hp, - ngtcp2_crypto_level level, const uint8_t *secret, size_t secretlen); + ngtcp2_encryption_level level, const uint8_t *secret, size_t secretlen); /** * @function * - * `ngtcp2_crypto_derive_and_install_tx_key` derives the tx keys from - * |secret| and installs new keys to |conn|. + * `ngtcp2_crypto_derive_and_install_tx_key` derives the encryption + * keying materials from |secret|, and installs new keys to |conn|. * - * If |key| is not NULL, the derived packet protection key for - * encryption is written to the buffer pointed by |key|. If |iv| is - * not NULL, the derived packet protection IV for encryption is - * written to the buffer pointed by |iv|. If |hp| is not NULL, the - * derived header protection key for encryption is written to the - * buffer pointed by |hp|. + * If |key| is not NULL, the derived packet protection key is written + * to the buffer pointed by |key|. If |iv| is not NULL, the derived + * packet protection IV is written to the buffer pointed by |iv|. If + * |hp| is not NULL, the derived header protection key is written to + * the buffer pointed by |hp|. * * |secretlen| specifies the length of |secret|. * @@ -398,58 +341,59 @@ NGTCP2_EXTERN int ngtcp2_crypto_derive_and_install_rx_key( * `ngtcp2_crypto_packet_protection_ivlen(ctx->aead) * ` where ctx is obtained by * `ngtcp2_crypto_ctx_tls` (or `ngtcp2_crypto_ctx_tls_early` if - * |level| == :enum:`ngtcp2_crypto_level.NGTCP2_CRYPTO_LEVEL_EARLY`). + * |level| == + * :enum:`ngtcp2_encryption_level.NGTCP2_ENCRYPTION_LEVEL_0RTT`). * * In the first call of this function, it calls * `ngtcp2_conn_set_crypto_ctx` (or `ngtcp2_conn_set_early_crypto_ctx` * if |level| == - * :enum:`ngtcp2_crypto_level.NGTCP2_CRYPTO_LEVEL_EARLY`) to set - * negotiated AEAD and message digest algorithm. After the successful - * call of this function, application can use + * :enum:`ngtcp2_encryption_level.NGTCP2_ENCRYPTION_LEVEL_0RTT`) to + * set negotiated AEAD and message digest algorithm. After the + * successful call of this function, application can use * `ngtcp2_conn_get_crypto_ctx` (or `ngtcp2_conn_get_early_crypto_ctx` * if |level| == - * :enum:`ngtcp2_crypto_level.NGTCP2_CRYPTO_LEVEL_EARLY`) to get - * :type:`ngtcp2_crypto_ctx`. + * :enum:`ngtcp2_encryption_level.NGTCP2_ENCRYPTION_LEVEL_0RTT`) to + * get :type:`ngtcp2_crypto_ctx`. * * If |conn| is initialized as server, and |level| is - * :enum:`ngtcp2_crypto_level.NGTCP2_CRYPTO_LEVEL_APPLICATION`, this + * :enum:`ngtcp2_encryption_level.NGTCP2_ENCRYPTION_LEVEL_1RTT`, this * function retrieves a remote QUIC transport parameters extension - * from an object obtained by `ngtcp2_conn_get_tls_native_handle` and + * from an object obtained by `ngtcp2_conn_get_tls_native_handle`, and * sets it to |conn| by calling - * `ngtcp2_conn_decode_remote_transport_params`. + * `ngtcp2_conn_decode_and_set_remote_transport_params`. * * This function returns 0 if it succeeds, or -1. */ NGTCP2_EXTERN int ngtcp2_crypto_derive_and_install_tx_key( ngtcp2_conn *conn, uint8_t *key, uint8_t *iv, uint8_t *hp, - ngtcp2_crypto_level level, const uint8_t *secret, size_t secretlen); + ngtcp2_encryption_level level, const uint8_t *secret, size_t secretlen); /** * @function * * `ngtcp2_crypto_update_key` updates traffic keying materials. * - * The new traffic secret for decryption is written to the buffer - * pointed by |rx_secret|. The length of secret is |secretlen| bytes, - * and |rx_secret| must point to the buffer which has enough capacity. + * The new decryption traffic secret is written to the buffer pointed + * by |rx_secret|. The length of secret is |secretlen| bytes, and + * |rx_secret| must point to the buffer which has enough capacity. * - * The new traffic secret for encryption is written to the buffer - * pointed by |tx_secret|. The length of secret is |secretlen| bytes, - * and |tx_secret| must point to the buffer which has enough capacity. + * The new encryption traffic secret is written to the buffer pointed + * by |tx_secret|. The length of secret is |secretlen| bytes, and + * |tx_secret| must point to the buffer which has enough capacity. * - * The derived packet protection key for decryption is written to the - * buffer pointed by |rx_key|. The derived packet protection IV for - * decryption is written to the buffer pointed by |rx_iv|. - * |rx_aead_ctx| must be constructed with |rx_key|. + * The derived decryption packet protection key is written to the + * buffer pointed by |rx_key|. The derived decryption packet + * protection IV is written to the buffer pointed by |rx_iv|. + * |rx_aead_ctx| is initialized with the derived key and IV. * - * The derived packet protection key for encryption is written to the - * buffer pointed by |tx_key|. The derived packet protection IV for - * encryption is written to the buffer pointed by |tx_iv|. - * |tx_aead_ctx| must be constructed with |rx_key|. + * The derived encryption packet protection key is written to the + * buffer pointed by |tx_key|. The derived encryption packet + * protection IV is written to the buffer pointed by |tx_iv|. + * |tx_aead_ctx| is initialized with the derived key and IV. * - * |current_rx_secret| and |current_tx_secret| are the current traffic - * secrets for decryption and encryption. |secretlen| specifies the - * length of |rx_secret| and |tx_secret|. + * |current_rx_secret| and |current_tx_secret| are the current + * decryption and encryption traffic secrets respectively. They share + * the same length with |rx_secret| and |tx_secret|. * * The length of packet protection key and header protection key is * `ngtcp2_crypto_aead_keylen(ctx->aead) `, @@ -488,7 +432,7 @@ NGTCP2_EXTERN int ngtcp2_crypto_update_key_cb( * @function * * `ngtcp2_crypto_client_initial_cb` installs initial secrets and - * encryption keys and sets QUIC transport parameters. + * encryption keys, and sets QUIC transport parameters. * * This function can be directly passed to * :member:`ngtcp2_callbacks.client_initial` field. It is only used @@ -507,8 +451,8 @@ NGTCP2_EXTERN int ngtcp2_crypto_client_initial_cb(ngtcp2_conn *conn, * response to incoming Retry packet. * * This function can be directly passed to - * :member:`ngtcp2_callbacks.recv_retry` field. It is only used - * by client. + * :member:`ngtcp2_callbacks.recv_retry` field. It is only used by + * client. * * This function returns 0 if it succeeds, or * :macro:`NGTCP2_ERR_CALLBACK_FAILURE`. @@ -525,8 +469,8 @@ NGTCP2_EXTERN int ngtcp2_crypto_recv_retry_cb(ngtcp2_conn *conn, * transport parameters. * * This function can be directly passed to - * :member:`ngtcp2_callbacks.recv_client_initial` field. It is - * only used by server. + * :member:`ngtcp2_callbacks.recv_client_initial` field. It is only + * used by server. * * This function returns 0 if it succeeds, or * :macro:`NGTCP2_ERR_CALLBACK_FAILURE`. @@ -539,21 +483,21 @@ NGTCP2_EXTERN int ngtcp2_crypto_recv_client_initial_cb(ngtcp2_conn *conn, * @function * * `ngtcp2_crypto_read_write_crypto_data` reads CRYPTO data |data| of - * length |datalen| in encryption level |crypto_level| and may feed - * outgoing CRYPTO data to |conn|. This function can drive handshake. - * This function can be also used after handshake completes. It is - * allowed to call this function with |datalen| == 0. In this case, - * no additional read operation is done. + * length |datalen| in an encryption level |encryption_level|, and may + * feed outgoing CRYPTO data to |conn|. This function can drive + * handshake. This function can be also used after handshake + * completes. It is allowed to call this function with |datalen| == + * 0. In this case, no additional read operation is done. * * This function returns 0 if it succeeds, or a negative error code. * The generic error code is -1 if a specific error code is not * suitable. The error codes less than -10000 are specific to - * underlying TLS implementation. For OpenSSL, the error codes are - * defined in *ngtcp2_crypto_openssl.h*. + * underlying TLS implementation. For quictls, the error codes are + * defined in *ngtcp2_crypto_quictls.h*. */ NGTCP2_EXTERN int ngtcp2_crypto_read_write_crypto_data(ngtcp2_conn *conn, - ngtcp2_crypto_level crypto_level, + ngtcp2_encryption_level encryption_level, const uint8_t *data, size_t datalen); /** @@ -570,17 +514,17 @@ ngtcp2_crypto_read_write_crypto_data(ngtcp2_conn *conn, * codes. */ NGTCP2_EXTERN int ngtcp2_crypto_recv_crypto_data_cb( - ngtcp2_conn *conn, ngtcp2_crypto_level crypto_level, uint64_t offset, - const uint8_t *data, size_t datalen, void *user_data); + ngtcp2_conn *conn, ngtcp2_encryption_level encryption_level, + uint64_t offset, const uint8_t *data, size_t datalen, void *user_data); /** * @function * * `ngtcp2_crypto_generate_stateless_reset_token` generates a * stateless reset token using HKDF extraction using the given |cid| - * and static key |secret| as input. The token will be written to - * the buffer pointed by |token| and it must have a capacity of at - * least :macro:`NGTCP2_STATELESS_RESET_TOKENLEN` bytes. + * and |secret| as input. The token will be written to the buffer + * pointed by |token|, and it must have a capacity of at least + * :macro:`NGTCP2_STATELESS_RESET_TOKENLEN` bytes. * * This function returns 0 if it succeeds, or -1. */ @@ -644,12 +588,12 @@ NGTCP2_EXTERN int ngtcp2_crypto_generate_stateless_reset_token( * :macro:`NGTCP2_CRYPTO_MAX_RETRY_TOKENLEN` bytes long. The * successfully generated token starts with * :macro:`NGTCP2_CRYPTO_TOKEN_MAGIC_RETRY`. |secret| of length - * |secretlen| is an initial keying material to generate keys to - * encrypt the token. |version| is QUIC version. |remote_addr| of - * length |remote_addrlen| is an address of client. |retry_scid| is a - * Source Connection ID chosen by server and set in Retry packet. - * |odcid| is a Destination Connection ID in Initial packet sent by - * client. |ts| is the timestamp when the token is generated. + * |secretlen| is a keying material to generate keys to encrypt the + * token. |version| is QUIC version. |remote_addr| of length + * |remote_addrlen| is an address of client. |retry_scid| is a Source + * Connection ID chosen by server, and set in Retry packet. |odcid| + * is a Destination Connection ID in Initial packet sent by client. + * |ts| is the timestamp when the token is generated. * * This function returns the length of generated token if it succeeds, * or -1. @@ -664,16 +608,16 @@ NGTCP2_EXTERN ngtcp2_ssize ngtcp2_crypto_generate_retry_token( * * `ngtcp2_crypto_verify_retry_token` verifies Retry token stored in * the buffer pointed by |token| of length |tokenlen|. |secret| of - * length |secretlen| is an initial keying material to generate keys - * to decrypt the token. |version| is QUIC version of the Initial - * packet that contains this token. |remote_addr| of length - * |remote_addrlen| is an address of client. |dcid| is a Destination - * Connection ID in Initial packet sent by client. |timeout| is the - * period during which the token is valid. |ts| is the current - * timestamp. When validation succeeds, the extracted Destination - * Connection ID (which is the Destination Connection ID in Initial - * packet sent by client that triggered Retry packet) is stored to the - * buffer pointed by |odcid|. + * length |secretlen| is a keying material to generate keys to decrypt + * the token. |version| is QUIC version of the Initial packet that + * contains this token. |remote_addr| of length |remote_addrlen| is + * an address of client. |dcid| is a Destination Connection ID in + * Initial packet sent by client. |timeout| is the period during + * which the token is valid. |ts| is the current timestamp. When + * validation succeeds, the extracted Destination Connection ID (which + * is the Destination Connection ID in Initial packet sent by client + * that triggered Retry packet) is stored in the buffer pointed by + * |odcid|. * * This function returns 0 if it succeeds, or -1. */ @@ -692,10 +636,9 @@ NGTCP2_EXTERN int ngtcp2_crypto_verify_retry_token( * :macro:`NGTCP2_CRYPTO_MAX_REGULAR_TOKENLEN` bytes long. The * successfully generated token starts with * :macro:`NGTCP2_CRYPTO_TOKEN_MAGIC_REGULAR`. |secret| of length - * |secretlen| is an initial keying material to generate keys to - * encrypt the token. |remote_addr| of length |remote_addrlen| is an - * address of client. |ts| is the timestamp when the token is - * generated. + * |secretlen| is a keying material to generate keys to encrypt the + * token. |remote_addr| of length |remote_addrlen| is an address of + * client. |ts| is the timestamp when the token is generated. * * This function returns the length of generated token if it succeeds, * or -1. @@ -710,8 +653,8 @@ NGTCP2_EXTERN ngtcp2_ssize ngtcp2_crypto_generate_regular_token( * * `ngtcp2_crypto_verify_regular_token` verifies a regular token * stored in the buffer pointed by |token| of length |tokenlen|. - * |secret| of length |secretlen| is an initial keying material to - * generate keys to decrypt the token. |remote_addr| of length + * |secret| of length |secretlen| is a keying material to generate + * keys to decrypt the token. |remote_addr| of length * |remote_addrlen| is an address of client. |timeout| is the period * during which the token is valid. |ts| is the current timestamp. * @@ -750,9 +693,12 @@ NGTCP2_EXTERN ngtcp2_ssize ngtcp2_crypto_write_connection_close( * @function * * `ngtcp2_crypto_write_retry` writes Retry packet to the buffer - * pointed by |dest| of length |destlen|. |odcid| specifies Original - * Destination Connection ID. |token| specifies Retry Token, and - * |tokenlen| specifies its length. + * pointed by |dest| of length |destlen|. |dcid| is the Connection ID + * which appeared in a packet as a Source Connection ID sent by + * client. |scid| is a server chosen Source Connection ID. |odcid| + * specifies Original Destination Connection ID which appeared in a + * packet as a Destination Connection ID sent by client. |token| + * specifies Retry Token, and |tokenlen| specifies its length. * * This function wraps around `ngtcp2_pkt_write_retry` for easier use. * @@ -783,7 +729,7 @@ ngtcp2_crypto_aead_ctx_encrypt_init(ngtcp2_crypto_aead_ctx *aead_ctx, * * `ngtcp2_crypto_aead_ctx_decrypt_init` initializes |aead_ctx| with * new AEAD cipher context object for decryption which is constructed - * to use |key| as encryption key. |aead| specifies AEAD cipher to + * to use |key| as decryption key. |aead| specifies AEAD cipher to * use. |noncelen| is the length of nonce. * * This function returns 0 if it succeeds, or -1. @@ -806,7 +752,8 @@ ngtcp2_crypto_aead_ctx_free(ngtcp2_crypto_aead_ctx *aead_ctx); /** * @function * - * `ngtcp2_crypto_delete_crypto_aead_ctx_cb` deletes the given |aead_ctx|. + * `ngtcp2_crypto_delete_crypto_aead_ctx_cb` deletes the given + * |aead_ctx|. * * This function can be directly passed to * :member:`ngtcp2_callbacks.delete_crypto_aead_ctx` field. @@ -845,7 +792,8 @@ NGTCP2_EXTERN int ngtcp2_crypto_get_path_challenge_data_cb(ngtcp2_conn *conn, * * `ngtcp2_crypto_version_negotiation_cb` installs Initial keys for * |version| which is negotiated or being negotiated. |client_dcid| - * is the destination connection ID in first Initial packet of client. + * is the destination connection ID in first Initial packet from + * client. * * This function can be directly passed to * :member:`ngtcp2_callbacks.version_negotiation` field. @@ -872,7 +820,7 @@ typedef ngtcp2_conn *(*ngtcp2_crypto_get_conn)( * * :type:`ngtcp2_crypto_conn_ref` is a structure to get a pointer to * :type:`ngtcp2_conn`. It is meant to be set to TLS native handle as - * an application specific data (e.g. SSL_set_app_data in OpenSSL). + * an application specific data (e.g. SSL_set_app_data in quictls). */ typedef struct ngtcp2_crypto_conn_ref { /** diff --git a/deps/ngtcp2/ngtcp2/crypto/includes/ngtcp2/ngtcp2_crypto_boringssl.h b/deps/ngtcp2/ngtcp2/crypto/includes/ngtcp2/ngtcp2_crypto_boringssl.h index 6497c09e79840d..43a3c36f03a382 100644 --- a/deps/ngtcp2/ngtcp2/crypto/includes/ngtcp2/ngtcp2_crypto_boringssl.h +++ b/deps/ngtcp2/ngtcp2/crypto/includes/ngtcp2/ngtcp2_crypto_boringssl.h @@ -37,23 +37,23 @@ extern "C" { * @function * * `ngtcp2_crypto_boringssl_from_ssl_encryption_level` translates - * |ssl_level| to :type:`ngtcp2_crypto_level`. This function is only - * available for BoringSSL backend. + * |ssl_level| to :type:`ngtcp2_encryption_level`. This function is + * only available for BoringSSL backend. */ -NGTCP2_EXTERN ngtcp2_crypto_level +NGTCP2_EXTERN ngtcp2_encryption_level ngtcp2_crypto_boringssl_from_ssl_encryption_level( enum ssl_encryption_level_t ssl_level); /** * @function * - * `ngtcp2_crypto_boringssl_from_ngtcp2_crypto_level` translates - * |crypto_level| to ssl_encryption_level_t. This function is only - * available for BoringSSL backend. + * `ngtcp2_crypto_boringssl_from_ngtcp2_encryption_level` translates + * |encryption_level| to ssl_encryption_level_t. This function is + * only available for BoringSSL backend. */ NGTCP2_EXTERN enum ssl_encryption_level_t -ngtcp2_crypto_boringssl_from_ngtcp2_crypto_level( - ngtcp2_crypto_level crypto_level); +ngtcp2_crypto_boringssl_from_ngtcp2_encryption_level( + ngtcp2_encryption_level encryption_level); /** * @function diff --git a/deps/ngtcp2/ngtcp2/crypto/includes/ngtcp2/ngtcp2_crypto_picotls.h b/deps/ngtcp2/ngtcp2/crypto/includes/ngtcp2/ngtcp2_crypto_picotls.h index d4b551c382fd69..61020bb3a8f376 100644 --- a/deps/ngtcp2/ngtcp2/crypto/includes/ngtcp2/ngtcp2_crypto_picotls.h +++ b/deps/ngtcp2/ngtcp2/crypto/includes/ngtcp2/ngtcp2_crypto_picotls.h @@ -36,8 +36,8 @@ extern "C" { /** * @struct * - * :type:`ngtcp2_crypto_picotls_ctx` contains per-connection state - * of Picotls objects and must be an object to bet set to + * :type:`ngtcp2_crypto_picotls_ctx` contains per-connection state of + * Picotls objects and must be an object to bet set to * `ngtcp2_conn_set_tls_native_handle`. */ typedef struct ngtcp2_crypto_picotls_ctx { @@ -65,21 +65,21 @@ ngtcp2_crypto_picotls_ctx_init(ngtcp2_crypto_picotls_ctx *cptls); * @function * * `ngtcp2_crypto_picotls_from_epoch` translates |epoch| to - * :type:`ngtcp2_crypto_level`. This function is only available for - * Picotls backend. + * :type:`ngtcp2_encryption_level`. This function is only available + * for Picotls backend. */ -NGTCP2_EXTERN ngtcp2_crypto_level +NGTCP2_EXTERN ngtcp2_encryption_level ngtcp2_crypto_picotls_from_epoch(size_t epoch); /** * @function * - * `ngtcp2_crypto_picotls_from_ngtcp2_crypto_level` translates - * |crypto_level| to epoch. This function is only available for + * `ngtcp2_crypto_picotls_from_ngtcp2_encryption_level` translates + * |encryption_level| to epoch. This function is only available for * Picotls backend. */ -NGTCP2_EXTERN size_t ngtcp2_crypto_picotls_from_ngtcp2_crypto_level( - ngtcp2_crypto_level crypto_level); +NGTCP2_EXTERN size_t ngtcp2_crypto_picotls_from_ngtcp2_encryption_level( + ngtcp2_encryption_level encryption_level); /** * @function @@ -206,7 +206,7 @@ ngtcp2_crypto_picotls_configure_client_session(ngtcp2_crypto_picotls_ctx *cptls, * * `ngtcp2_crypto_picotls_deconfigure_session` frees the resources * allocated for |cptls| during QUIC connection. It frees the - * following data using :manpage:`free(3)`. + * following data using :manpage:`free(3)`: * * - handshake_properties.max_early_data_size * - handshake_properties.additional_extensions[0].data.base diff --git a/deps/ngtcp2/ngtcp2/crypto/includes/ngtcp2/ngtcp2_crypto_openssl.h b/deps/ngtcp2/ngtcp2/crypto/includes/ngtcp2/ngtcp2_crypto_quictls.h similarity index 63% rename from deps/ngtcp2/ngtcp2/crypto/includes/ngtcp2/ngtcp2_crypto_openssl.h rename to deps/ngtcp2/ngtcp2/crypto/includes/ngtcp2/ngtcp2_crypto_quictls.h index 844081bfa8b055..b25c13b81c8b18 100644 --- a/deps/ngtcp2/ngtcp2/crypto/includes/ngtcp2/ngtcp2_crypto_openssl.h +++ b/deps/ngtcp2/ngtcp2/crypto/includes/ngtcp2/ngtcp2_crypto_quictls.h @@ -22,8 +22,8 @@ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ -#ifndef NGTCP2_CRYPTO_OPENSSL_H -#define NGTCP2_CRYPTO_OPENSSL_H +#ifndef NGTCP2_CRYPTO_QUICTLS_H +#define NGTCP2_CRYPTO_QUICTLS_H #include @@ -36,57 +36,57 @@ extern "C" { /** * @macrosection * - * OpenSSL specific error codes + * quictls specific error codes */ /** * @macro * - * :macro:`NGTCP2_CRYPTO_OPENSSL_ERR_TLS_WANT_X509_LOOKUP` is the + * :macro:`NGTCP2_CRYPTO_QUICTLS_ERR_TLS_WANT_X509_LOOKUP` is the * error code which indicates that TLS handshake routine is * interrupted by X509 certificate lookup. See * :macro:`SSL_ERROR_WANT_X509_LOOKUP` error description from * `SSL_do_handshake`. */ -#define NGTCP2_CRYPTO_OPENSSL_ERR_TLS_WANT_X509_LOOKUP -10001 +#define NGTCP2_CRYPTO_QUICTLS_ERR_TLS_WANT_X509_LOOKUP -10001 /** * @macro * - * :macro:`NGTCP2_CRYPTO_OPENSSL_ERR_TLS_WANT_CLIENT_HELLO_CB` is the + * :macro:`NGTCP2_CRYPTO_QUICTLS_ERR_TLS_WANT_CLIENT_HELLO_CB` is the * error code which indicates that TLS handshake routine is * interrupted by client hello callback. See * :macro:`SSL_ERROR_WANT_CLIENT_HELLO_CB` error description from * `SSL_do_handshake`. */ -#define NGTCP2_CRYPTO_OPENSSL_ERR_TLS_WANT_CLIENT_HELLO_CB -10002 +#define NGTCP2_CRYPTO_QUICTLS_ERR_TLS_WANT_CLIENT_HELLO_CB -10002 /** * @function * - * `ngtcp2_crypto_openssl_from_ossl_encryption_level` translates - * |ossl_level| to :type:`ngtcp2_crypto_level`. This function is only - * available for OpenSSL backend. + * `ngtcp2_crypto_quictls_from_ossl_encryption_level` translates + * |ossl_level| to :type:`ngtcp2_encryption_level`. This function is + * only available for quictls backend. */ -NGTCP2_EXTERN ngtcp2_crypto_level -ngtcp2_crypto_openssl_from_ossl_encryption_level( +NGTCP2_EXTERN ngtcp2_encryption_level +ngtcp2_crypto_quictls_from_ossl_encryption_level( OSSL_ENCRYPTION_LEVEL ossl_level); /** * @function * - * `ngtcp2_crypto_openssl_from_ngtcp2_crypto_level` translates - * |crypto_level| to OSSL_ENCRYPTION_LEVEL. This function is only - * available for OpenSSL backend. + * `ngtcp2_crypto_quictls_from_ngtcp2_encryption_level` translates + * |encryption_level| to OSSL_ENCRYPTION_LEVEL. This function is only + * available for quictls backend. */ NGTCP2_EXTERN OSSL_ENCRYPTION_LEVEL -ngtcp2_crypto_openssl_from_ngtcp2_crypto_level( - ngtcp2_crypto_level crypto_level); +ngtcp2_crypto_quictls_from_ngtcp2_encryption_level( + ngtcp2_encryption_level encryption_level); /** * @function * - * `ngtcp2_crypto_openssl_configure_server_context` configures + * `ngtcp2_crypto_quictls_configure_server_context` configures * |ssl_ctx| for server side QUIC connection. It performs the * following modifications: * @@ -102,12 +102,12 @@ ngtcp2_crypto_openssl_from_ngtcp2_crypto_level( * It returns 0 if it succeeds, or -1. */ NGTCP2_EXTERN int -ngtcp2_crypto_openssl_configure_server_context(SSL_CTX *ssl_ctx); +ngtcp2_crypto_quictls_configure_server_context(SSL_CTX *ssl_ctx); /** * @function * - * `ngtcp2_crypto_openssl_configure_client_context` configures + * `ngtcp2_crypto_quictls_configure_client_context` configures * |ssl_ctx| for client side QUIC connection. It performs the * following modifications: * @@ -123,10 +123,25 @@ ngtcp2_crypto_openssl_configure_server_context(SSL_CTX *ssl_ctx); * It returns 0 if it succeeds, or -1. */ NGTCP2_EXTERN int -ngtcp2_crypto_openssl_configure_client_context(SSL_CTX *ssl_ctx); +ngtcp2_crypto_quictls_configure_client_context(SSL_CTX *ssl_ctx); + +/** + * @function + * + * `ngtcp2_crypto_quictls_init` initializes libngtcp2_crypto_quictls + * library. This initialization is optional. For quictls >= 3.0, it + * is highly recommended to call this function before any use of + * libngtcp2_crypto library API to workaround the performance + * regression. Note that calling this function does not solve all + * performance issues introduced in 3.x. For quictls 1.1.1, this + * function does nothing, and always succeeds. + * + * This function returns 0 if it succeeds, or -1. + */ +NGTCP2_EXTERN int ngtcp2_crypto_quictls_init(void); #ifdef __cplusplus } #endif -#endif /* NGTCP2_CRYPTO_OPENSSL_H */ +#endif /* NGTCP2_CRYPTO_QUICTLS_H */ diff --git a/deps/ngtcp2/ngtcp2/crypto/includes/ngtcp2/ngtcp2_crypto_wolfssl.h b/deps/ngtcp2/ngtcp2/crypto/includes/ngtcp2/ngtcp2_crypto_wolfssl.h index 3b10802c25b5e8..e1d621adce94d8 100644 --- a/deps/ngtcp2/ngtcp2/crypto/includes/ngtcp2/ngtcp2_crypto_wolfssl.h +++ b/deps/ngtcp2/ngtcp2/crypto/includes/ngtcp2/ngtcp2_crypto_wolfssl.h @@ -39,23 +39,23 @@ extern "C" { * @function * * `ngtcp2_crypto_wolfssl_from_wolfssl_encryption_level` translates - * |wolfssl_level| to :type:`ngtcp2_crypto_level`. This function is only - * available for wolfSSL backend. + * |wolfssl_level| to :type:`ngtcp2_encryption_level`. This function + * is only available for wolfSSL backend. */ -NGTCP2_EXTERN ngtcp2_crypto_level +NGTCP2_EXTERN ngtcp2_encryption_level ngtcp2_crypto_wolfssl_from_wolfssl_encryption_level( WOLFSSL_ENCRYPTION_LEVEL wolfssl_level); /** * @function * - * `ngtcp2_crypto_wolfssl_from_ngtcp2_crypto_level` translates - * |crypto_level| to WOLFSSL_ENCRYPTION_LEVEL. This function is only - * available for wolfSSL backend. + * `ngtcp2_crypto_wolfssl_from_ngtcp2_encryption_level` translates + * |encryption_level| to WOLFSSL_ENCRYPTION_LEVEL. This function is + * only available for wolfSSL backend. */ NGTCP2_EXTERN WOLFSSL_ENCRYPTION_LEVEL -ngtcp2_crypto_wolfssl_from_ngtcp2_crypto_level( - ngtcp2_crypto_level crypto_level); +ngtcp2_crypto_wolfssl_from_ngtcp2_encryption_level( + ngtcp2_encryption_level encryption_level); /** * @function diff --git a/deps/ngtcp2/ngtcp2/crypto/picotls/picotls.c b/deps/ngtcp2/ngtcp2/crypto/picotls/picotls.c index 32d17adc6c3a35..35bfb7b2f8fa19 100644 --- a/deps/ngtcp2/ngtcp2/crypto/picotls/picotls.c +++ b/deps/ngtcp2/ngtcp2/crypto/picotls/picotls.c @@ -68,45 +68,40 @@ ngtcp2_crypto_aead *ngtcp2_crypto_aead_retry(ngtcp2_crypto_aead *aead) { return ngtcp2_crypto_aead_init(aead, (void *)&ptls_openssl_aes128gcm); } -static const ptls_aead_algorithm_t *crypto_ptls_get_aead(ptls_t *ptls) { - ptls_cipher_suite_t *cs = ptls_get_cipher(ptls); - - return cs->aead; -} - -static uint64_t crypto_ptls_get_aead_max_encryption(ptls_t *ptls) { - ptls_cipher_suite_t *cs = ptls_get_cipher(ptls); - +static uint64_t +crypto_cipher_suite_get_aead_max_encryption(ptls_cipher_suite_t *cs) { if (cs->aead == &ptls_openssl_aes128gcm || cs->aead == &ptls_openssl_aes256gcm) { return NGTCP2_CRYPTO_MAX_ENCRYPTION_AES_GCM; } +#ifdef PTLS_OPENSSL_HAVE_CHACHA20_POLY1305 if (cs->aead == &ptls_openssl_chacha20poly1305) { return NGTCP2_CRYPTO_MAX_ENCRYPTION_CHACHA20_POLY1305; } +#endif /* PTLS_OPENSSL_HAVE_CHACHA20_POLY1305 */ return 0; } -static uint64_t crypto_ptls_get_aead_max_decryption_failure(ptls_t *ptls) { - ptls_cipher_suite_t *cs = ptls_get_cipher(ptls); - +static uint64_t +crypto_cipher_suite_get_aead_max_decryption_failure(ptls_cipher_suite_t *cs) { if (cs->aead == &ptls_openssl_aes128gcm || cs->aead == &ptls_openssl_aes256gcm) { return NGTCP2_CRYPTO_MAX_DECRYPTION_FAILURE_AES_GCM; } +#ifdef PTLS_OPENSSL_HAVE_CHACHA20_POLY1305 if (cs->aead == &ptls_openssl_chacha20poly1305) { return NGTCP2_CRYPTO_MAX_DECRYPTION_FAILURE_CHACHA20_POLY1305; } +#endif /* PTLS_OPENSSL_HAVE_CHACHA20_POLY1305 */ return 0; } -static const ptls_cipher_algorithm_t *crypto_ptls_get_hp(ptls_t *ptls) { - ptls_cipher_suite_t *cs = ptls_get_cipher(ptls); - +static const ptls_cipher_algorithm_t * +crypto_cipher_suite_get_hp(ptls_cipher_suite_t *cs) { if (cs->aead == &ptls_openssl_aes128gcm) { return &ptls_openssl_aes128ctr; } @@ -115,29 +110,43 @@ static const ptls_cipher_algorithm_t *crypto_ptls_get_hp(ptls_t *ptls) { return &ptls_openssl_aes256ctr; } +#ifdef PTLS_OPENSSL_HAVE_CHACHA20_POLY1305 if (cs->aead == &ptls_openssl_chacha20poly1305) { return &ptls_openssl_chacha20; } +#endif /* PTLS_OPENSSL_HAVE_CHACHA20_POLY1305 */ return NULL; } -static const ptls_hash_algorithm_t *crypto_ptls_get_md(ptls_t *ptls) { - ptls_cipher_suite_t *cs = ptls_get_cipher(ptls); - - return cs->hash; +static int supported_cipher_suite(ptls_cipher_suite_t *cs) { + return cs->aead == &ptls_openssl_aes128gcm || + cs->aead == &ptls_openssl_aes256gcm +#ifdef PTLS_OPENSSL_HAVE_CHACHA20_POLY1305 + || cs->aead == &ptls_openssl_chacha20poly1305 +#endif /* PTLS_OPENSSL_HAVE_CHACHA20_POLY1305 */ + ; } ngtcp2_crypto_ctx *ngtcp2_crypto_ctx_tls(ngtcp2_crypto_ctx *ctx, void *tls_native_handle) { ngtcp2_crypto_picotls_ctx *cptls = tls_native_handle; - ngtcp2_crypto_aead_init(&ctx->aead, - (void *)crypto_ptls_get_aead(cptls->ptls)); - ctx->md.native_handle = (void *)crypto_ptls_get_md(cptls->ptls); - ctx->hp.native_handle = (void *)crypto_ptls_get_hp(cptls->ptls); - ctx->max_encryption = crypto_ptls_get_aead_max_encryption(cptls->ptls); + ptls_cipher_suite_t *cs = ptls_get_cipher(cptls->ptls); + + if (cs == NULL) { + return NULL; + } + + if (!supported_cipher_suite(cs)) { + return NULL; + } + + ngtcp2_crypto_aead_init(&ctx->aead, (void *)cs->aead); + ctx->md.native_handle = (void *)cs->hash; + ctx->hp.native_handle = (void *)crypto_cipher_suite_get_hp(cs); + ctx->max_encryption = crypto_cipher_suite_get_aead_max_encryption(cs); ctx->max_decryption_failure = - crypto_ptls_get_aead_max_decryption_failure(cptls->ptls); + crypto_cipher_suite_get_aead_max_decryption_failure(cs); return ctx; } @@ -350,13 +359,14 @@ int ngtcp2_crypto_hp_mask(uint8_t *dest, const ngtcp2_crypto_cipher *hp, return 0; } -int ngtcp2_crypto_read_write_crypto_data(ngtcp2_conn *conn, - ngtcp2_crypto_level crypto_level, - const uint8_t *data, size_t datalen) { +int ngtcp2_crypto_read_write_crypto_data( + ngtcp2_conn *conn, ngtcp2_encryption_level encryption_level, + const uint8_t *data, size_t datalen) { ngtcp2_crypto_picotls_ctx *cptls = ngtcp2_conn_get_tls_native_handle(conn); ptls_buffer_t sendbuf; size_t epoch_offsets[5] = {0}; - size_t epoch = ngtcp2_crypto_picotls_from_ngtcp2_crypto_level(crypto_level); + size_t epoch = + ngtcp2_crypto_picotls_from_ngtcp2_encryption_level(encryption_level); size_t epoch_datalen; size_t i; int rv; @@ -379,7 +389,11 @@ int ngtcp2_crypto_read_write_crypto_data(ngtcp2_conn *conn, if (!ngtcp2_conn_is_server(conn) && cptls->handshake_properties.client.early_data_acceptance == PTLS_EARLY_DATA_REJECTED) { - ngtcp2_conn_early_data_rejected(conn); + rv = ngtcp2_conn_tls_early_data_rejected(conn); + if (rv != 0) { + rv = -1; + goto fin; + } } for (i = 0; i < 4; ++i) { @@ -399,7 +413,7 @@ int ngtcp2_crypto_read_write_crypto_data(ngtcp2_conn *conn, } if (rv == 0) { - ngtcp2_conn_handshake_completed(conn); + ngtcp2_conn_tls_handshake_completed(conn); } rv = 0; @@ -432,32 +446,32 @@ int ngtcp2_crypto_set_local_transport_params(void *tls, const uint8_t *buf, return 0; } -ngtcp2_crypto_level ngtcp2_crypto_picotls_from_epoch(size_t epoch) { +ngtcp2_encryption_level ngtcp2_crypto_picotls_from_epoch(size_t epoch) { switch (epoch) { case 0: - return NGTCP2_CRYPTO_LEVEL_INITIAL; + return NGTCP2_ENCRYPTION_LEVEL_INITIAL; case 1: - return NGTCP2_CRYPTO_LEVEL_EARLY; + return NGTCP2_ENCRYPTION_LEVEL_0RTT; case 2: - return NGTCP2_CRYPTO_LEVEL_HANDSHAKE; + return NGTCP2_ENCRYPTION_LEVEL_HANDSHAKE; case 3: - return NGTCP2_CRYPTO_LEVEL_APPLICATION; + return NGTCP2_ENCRYPTION_LEVEL_1RTT; default: assert(0); abort(); } } -size_t ngtcp2_crypto_picotls_from_ngtcp2_crypto_level( - ngtcp2_crypto_level crypto_level) { - switch (crypto_level) { - case NGTCP2_CRYPTO_LEVEL_INITIAL: +size_t ngtcp2_crypto_picotls_from_ngtcp2_encryption_level( + ngtcp2_encryption_level encryption_level) { + switch (encryption_level) { + case NGTCP2_ENCRYPTION_LEVEL_INITIAL: return 0; - case NGTCP2_CRYPTO_LEVEL_EARLY: + case NGTCP2_ENCRYPTION_LEVEL_0RTT: return 1; - case NGTCP2_CRYPTO_LEVEL_HANDSHAKE: + case NGTCP2_ENCRYPTION_LEVEL_HANDSHAKE: return 2; - case NGTCP2_CRYPTO_LEVEL_APPLICATION: + case NGTCP2_ENCRYPTION_LEVEL_1RTT: return 3; default: assert(0); @@ -543,8 +557,8 @@ int ngtcp2_crypto_picotls_collected_extensions( conn_ref = *ptls_get_data_ptr(ptls); conn = conn_ref->get_conn(conn_ref); - rv = ngtcp2_conn_decode_remote_transport_params(conn, extensions->data.base, - extensions->data.len); + rv = ngtcp2_conn_decode_and_set_remote_transport_params( + conn, extensions->data.base, extensions->data.len); if (rv != 0) { ngtcp2_conn_set_tls_error(conn, rv); return -1; @@ -561,7 +575,7 @@ static int update_traffic_key_server_cb(ptls_update_traffic_key_t *self, const void *secret) { ngtcp2_crypto_conn_ref *conn_ref = *ptls_get_data_ptr(ptls); ngtcp2_conn *conn = conn_ref->get_conn(conn_ref); - ngtcp2_crypto_level level = ngtcp2_crypto_picotls_from_epoch(epoch); + ngtcp2_encryption_level level = ngtcp2_crypto_picotls_from_epoch(epoch); ptls_cipher_suite_t *cipher = ptls_get_cipher(ptls); size_t secretlen = cipher->hash->digest_size; ngtcp2_crypto_picotls_ctx *cptls; @@ -574,7 +588,7 @@ static int update_traffic_key_server_cb(ptls_update_traffic_key_t *self, return -1; } - if (level == NGTCP2_CRYPTO_LEVEL_HANDSHAKE) { + if (level == NGTCP2_ENCRYPTION_LEVEL_HANDSHAKE) { /* libngtcp2 allows an application to change QUIC transport * parameters before installing Handshake tx key. We need to * wait for the key to get the correct local transport @@ -606,7 +620,7 @@ static int update_traffic_key_cb(ptls_update_traffic_key_t *self, ptls_t *ptls, int is_enc, size_t epoch, const void *secret) { ngtcp2_crypto_conn_ref *conn_ref = *ptls_get_data_ptr(ptls); ngtcp2_conn *conn = conn_ref->get_conn(conn_ref); - ngtcp2_crypto_level level = ngtcp2_crypto_picotls_from_epoch(epoch); + ngtcp2_encryption_level level = ngtcp2_crypto_picotls_from_epoch(epoch); ptls_cipher_suite_t *cipher = ptls_get_cipher(ptls); size_t secretlen = cipher->hash->digest_size; @@ -660,7 +674,7 @@ int ngtcp2_crypto_picotls_configure_client_session( ngtcp2_crypto_picotls_ctx *cptls, ngtcp2_conn *conn) { ptls_handshake_properties_t *hsprops = &cptls->handshake_properties; - hsprops->client.max_early_data_size = calloc(1, sizeof(uint32_t)); + hsprops->client.max_early_data_size = calloc(1, sizeof(size_t)); if (hsprops->client.max_early_data_size == NULL) { return -1; } diff --git a/deps/ngtcp2/ngtcp2/crypto/openssl/openssl.c b/deps/ngtcp2/ngtcp2/crypto/quictls/quictls.c similarity index 73% rename from deps/ngtcp2/ngtcp2/crypto/openssl/openssl.c rename to deps/ngtcp2/ngtcp2/crypto/quictls/quictls.c index 466d9e11ca6415..330ca687b44666 100644 --- a/deps/ngtcp2/ngtcp2/crypto/openssl/openssl.c +++ b/deps/ngtcp2/ngtcp2/crypto/quictls/quictls.c @@ -29,7 +29,7 @@ #include #include -#include +#include #include #include @@ -42,6 +42,168 @@ #include "shared.h" +#if OPENSSL_VERSION_NUMBER >= 0x30000000L +static int crypto_initialized; +static EVP_CIPHER *crypto_aes_128_gcm; +static EVP_CIPHER *crypto_aes_256_gcm; +static EVP_CIPHER *crypto_chacha20_poly1305; +static EVP_CIPHER *crypto_aes_128_ccm; +static EVP_CIPHER *crypto_aes_128_ctr; +static EVP_CIPHER *crypto_aes_256_ctr; +static EVP_CIPHER *crypto_chacha20; +static EVP_MD *crypto_sha256; +static EVP_MD *crypto_sha384; +static EVP_KDF *crypto_hkdf; + +int ngtcp2_crypto_quictls_init(void) { + crypto_aes_128_gcm = EVP_CIPHER_fetch(NULL, "AES-128-GCM", NULL); + if (crypto_aes_128_gcm == NULL) { + return -1; + } + + crypto_aes_256_gcm = EVP_CIPHER_fetch(NULL, "AES-256-GCM", NULL); + if (crypto_aes_256_gcm == NULL) { + return -1; + } + + crypto_chacha20_poly1305 = EVP_CIPHER_fetch(NULL, "ChaCha20-Poly1305", NULL); + if (crypto_chacha20_poly1305 == NULL) { + return -1; + } + + crypto_aes_128_ccm = EVP_CIPHER_fetch(NULL, "AES-128-CCM", NULL); + if (crypto_aes_128_ccm == NULL) { + return -1; + } + + crypto_aes_128_ctr = EVP_CIPHER_fetch(NULL, "AES-128-CTR", NULL); + if (crypto_aes_128_ctr == NULL) { + return -1; + } + + crypto_aes_256_ctr = EVP_CIPHER_fetch(NULL, "AES-256-CTR", NULL); + if (crypto_aes_256_ctr == NULL) { + return -1; + } + + crypto_chacha20 = EVP_CIPHER_fetch(NULL, "ChaCha20", NULL); + if (crypto_chacha20 == NULL) { + return -1; + } + + crypto_sha256 = EVP_MD_fetch(NULL, "sha256", NULL); + if (crypto_sha256 == NULL) { + return -1; + } + + crypto_sha384 = EVP_MD_fetch(NULL, "sha384", NULL); + if (crypto_sha384 == NULL) { + return -1; + } + + crypto_hkdf = EVP_KDF_fetch(NULL, "hkdf", NULL); + if (crypto_hkdf == NULL) { + return -1; + } + + crypto_initialized = 1; + + return 0; +} + +static const EVP_CIPHER *crypto_aead_aes_128_gcm(void) { + if (crypto_aes_128_gcm) { + return crypto_aes_128_gcm; + } + + return EVP_aes_128_gcm(); +} + +static const EVP_CIPHER *crypto_aead_aes_256_gcm(void) { + if (crypto_aes_256_gcm) { + return crypto_aes_256_gcm; + } + + return EVP_aes_256_gcm(); +} + +static const EVP_CIPHER *crypto_aead_chacha20_poly1305(void) { + if (crypto_chacha20_poly1305) { + return crypto_chacha20_poly1305; + } + + return EVP_chacha20_poly1305(); +} + +static const EVP_CIPHER *crypto_aead_aes_128_ccm(void) { + if (crypto_aes_128_ccm) { + return crypto_aes_128_ccm; + } + + return EVP_aes_128_ccm(); +} + +static const EVP_CIPHER *crypto_cipher_aes_128_ctr(void) { + if (crypto_aes_128_ctr) { + return crypto_aes_128_ctr; + } + + return EVP_aes_128_ctr(); +} + +static const EVP_CIPHER *crypto_cipher_aes_256_ctr(void) { + if (crypto_aes_256_ctr) { + return crypto_aes_256_ctr; + } + + return EVP_aes_256_ctr(); +} + +static const EVP_CIPHER *crypto_cipher_chacha20(void) { + if (crypto_chacha20) { + return crypto_chacha20; + } + + return EVP_chacha20(); +} + +static const EVP_MD *crypto_md_sha256(void) { + if (crypto_sha256) { + return crypto_sha256; + } + + return EVP_sha256(); +} + +static const EVP_MD *crypto_md_sha384(void) { + if (crypto_sha384) { + return crypto_sha384; + } + + return EVP_sha384(); +} + +static EVP_KDF *crypto_kdf_hkdf(void) { + if (crypto_hkdf) { + return crypto_hkdf; + } + + return EVP_KDF_fetch(NULL, "hkdf", NULL); +} +#else /* !(OPENSSL_VERSION_NUMBER >= 0x30000000L) */ +# define crypto_aead_aes_128_gcm EVP_aes_128_gcm +# define crypto_aead_aes_256_gcm EVP_aes_256_gcm +# define crypto_aead_chacha20_poly1305 EVP_chacha20_poly1305 +# define crypto_aead_aes_128_ccm EVP_aes_128_ccm +# define crypto_cipher_aes_128_ctr EVP_aes_128_ctr +# define crypto_cipher_aes_256_ctr EVP_aes_256_ctr +# define crypto_cipher_chacha20 EVP_chacha20 +# define crypto_md_sha256 EVP_sha256 +# define crypto_md_sha384 EVP_sha384 + +int ngtcp2_crypto_quictls_init(void) { return 0; } +#endif /* !(OPENSSL_VERSION_NUMBER >= 0x30000000L) */ + static size_t crypto_aead_max_overhead(const EVP_CIPHER *aead) { switch (EVP_CIPHER_nid(aead)) { case NID_aes_128_gcm: @@ -58,18 +220,18 @@ static size_t crypto_aead_max_overhead(const EVP_CIPHER *aead) { } ngtcp2_crypto_aead *ngtcp2_crypto_aead_aes_128_gcm(ngtcp2_crypto_aead *aead) { - return ngtcp2_crypto_aead_init(aead, (void *)EVP_aes_128_gcm()); + return ngtcp2_crypto_aead_init(aead, (void *)crypto_aead_aes_128_gcm()); } ngtcp2_crypto_md *ngtcp2_crypto_md_sha256(ngtcp2_crypto_md *md) { - md->native_handle = (void *)EVP_sha256(); + md->native_handle = (void *)crypto_md_sha256(); return md; } ngtcp2_crypto_ctx *ngtcp2_crypto_ctx_initial(ngtcp2_crypto_ctx *ctx) { - ngtcp2_crypto_aead_init(&ctx->aead, (void *)EVP_aes_128_gcm()); - ctx->md.native_handle = (void *)EVP_sha256(); - ctx->hp.native_handle = (void *)EVP_aes_128_ctr(); + ngtcp2_crypto_aead_init(&ctx->aead, (void *)crypto_aead_aes_128_gcm()); + ctx->md.native_handle = (void *)crypto_md_sha256(); + ctx->hp.native_handle = (void *)crypto_cipher_aes_128_ctr(); ctx->max_encryption = 0; ctx->max_decryption_failure = 0; return ctx; @@ -83,26 +245,26 @@ ngtcp2_crypto_aead *ngtcp2_crypto_aead_init(ngtcp2_crypto_aead *aead, } ngtcp2_crypto_aead *ngtcp2_crypto_aead_retry(ngtcp2_crypto_aead *aead) { - return ngtcp2_crypto_aead_init(aead, (void *)EVP_aes_128_gcm()); + return ngtcp2_crypto_aead_init(aead, (void *)crypto_aead_aes_128_gcm()); } -static const EVP_CIPHER *crypto_ssl_get_aead(SSL *ssl) { - switch (SSL_CIPHER_get_id(SSL_get_current_cipher(ssl))) { +static const EVP_CIPHER *crypto_cipher_id_get_aead(uint32_t cipher_id) { + switch (cipher_id) { case TLS1_3_CK_AES_128_GCM_SHA256: - return EVP_aes_128_gcm(); + return crypto_aead_aes_128_gcm(); case TLS1_3_CK_AES_256_GCM_SHA384: - return EVP_aes_256_gcm(); + return crypto_aead_aes_256_gcm(); case TLS1_3_CK_CHACHA20_POLY1305_SHA256: - return EVP_chacha20_poly1305(); + return crypto_aead_chacha20_poly1305(); case TLS1_3_CK_AES_128_CCM_SHA256: - return EVP_aes_128_ccm(); + return crypto_aead_aes_128_ccm(); default: return NULL; } } -static uint64_t crypto_ssl_get_aead_max_encryption(SSL *ssl) { - switch (SSL_CIPHER_get_id(SSL_get_current_cipher(ssl))) { +static uint64_t crypto_cipher_id_get_aead_max_encryption(uint32_t cipher_id) { + switch (cipher_id) { case TLS1_3_CK_AES_128_GCM_SHA256: case TLS1_3_CK_AES_256_GCM_SHA384: return NGTCP2_CRYPTO_MAX_ENCRYPTION_AES_GCM; @@ -115,8 +277,9 @@ static uint64_t crypto_ssl_get_aead_max_encryption(SSL *ssl) { } } -static uint64_t crypto_ssl_get_aead_max_decryption_failure(SSL *ssl) { - switch (SSL_CIPHER_get_id(SSL_get_current_cipher(ssl))) { +static uint64_t +crypto_cipher_id_get_aead_max_decryption_failure(uint32_t cipher_id) { + switch (cipher_id) { case TLS1_3_CK_AES_128_GCM_SHA256: case TLS1_3_CK_AES_256_GCM_SHA384: return NGTCP2_CRYPTO_MAX_DECRYPTION_FAILURE_AES_GCM; @@ -129,42 +292,75 @@ static uint64_t crypto_ssl_get_aead_max_decryption_failure(SSL *ssl) { } } -static const EVP_CIPHER *crypto_ssl_get_hp(SSL *ssl) { - switch (SSL_CIPHER_get_id(SSL_get_current_cipher(ssl))) { +static const EVP_CIPHER *crypto_cipher_id_get_hp(uint32_t cipher_id) { + switch (cipher_id) { case TLS1_3_CK_AES_128_GCM_SHA256: case TLS1_3_CK_AES_128_CCM_SHA256: - return EVP_aes_128_ctr(); + return crypto_cipher_aes_128_ctr(); case TLS1_3_CK_AES_256_GCM_SHA384: - return EVP_aes_256_ctr(); + return crypto_cipher_aes_256_ctr(); case TLS1_3_CK_CHACHA20_POLY1305_SHA256: - return EVP_chacha20(); + return crypto_cipher_chacha20(); default: return NULL; } } -static const EVP_MD *crypto_ssl_get_md(SSL *ssl) { - switch (SSL_CIPHER_get_id(SSL_get_current_cipher(ssl))) { +static const EVP_MD *crypto_cipher_id_get_md(uint32_t cipher_id) { + switch (cipher_id) { case TLS1_3_CK_AES_128_GCM_SHA256: case TLS1_3_CK_CHACHA20_POLY1305_SHA256: case TLS1_3_CK_AES_128_CCM_SHA256: - return EVP_sha256(); + return crypto_md_sha256(); case TLS1_3_CK_AES_256_GCM_SHA384: - return EVP_sha384(); + return crypto_md_sha384(); default: return NULL; } } +static int supported_cipher_id(uint32_t cipher_id) { + switch (cipher_id) { + case TLS1_3_CK_AES_128_GCM_SHA256: + case TLS1_3_CK_AES_256_GCM_SHA384: + case TLS1_3_CK_CHACHA20_POLY1305_SHA256: + case TLS1_3_CK_AES_128_CCM_SHA256: + return 1; + default: + return 0; + } +} + +static ngtcp2_crypto_ctx *crypto_ctx_cipher_id(ngtcp2_crypto_ctx *ctx, + uint32_t cipher_id) { + ngtcp2_crypto_aead_init(&ctx->aead, + (void *)crypto_cipher_id_get_aead(cipher_id)); + ctx->md.native_handle = (void *)crypto_cipher_id_get_md(cipher_id); + ctx->hp.native_handle = (void *)crypto_cipher_id_get_hp(cipher_id); + ctx->max_encryption = crypto_cipher_id_get_aead_max_encryption(cipher_id); + ctx->max_decryption_failure = + crypto_cipher_id_get_aead_max_decryption_failure(cipher_id); + + return ctx; +} + ngtcp2_crypto_ctx *ngtcp2_crypto_ctx_tls(ngtcp2_crypto_ctx *ctx, void *tls_native_handle) { SSL *ssl = tls_native_handle; - ngtcp2_crypto_aead_init(&ctx->aead, (void *)crypto_ssl_get_aead(ssl)); - ctx->md.native_handle = (void *)crypto_ssl_get_md(ssl); - ctx->hp.native_handle = (void *)crypto_ssl_get_hp(ssl); - ctx->max_encryption = crypto_ssl_get_aead_max_encryption(ssl); - ctx->max_decryption_failure = crypto_ssl_get_aead_max_decryption_failure(ssl); - return ctx; + const SSL_CIPHER *cipher = SSL_get_current_cipher(ssl); + uint32_t cipher_id; + + if (cipher == NULL) { + return NULL; + } + + cipher_id = (uint32_t)SSL_CIPHER_get_id(cipher); + + if (!supported_cipher_id(cipher_id)) { + return NULL; + } + + return crypto_ctx_cipher_id(ctx, cipher_id); } ngtcp2_crypto_ctx *ngtcp2_crypto_ctx_tls_early(ngtcp2_crypto_ctx *ctx, @@ -327,7 +523,7 @@ int ngtcp2_crypto_hkdf_extract(uint8_t *dest, const ngtcp2_crypto_md *md, const uint8_t *salt, size_t saltlen) { #if OPENSSL_VERSION_NUMBER >= 0x30000000L const EVP_MD *prf = md->native_handle; - EVP_KDF *kdf = EVP_KDF_fetch(NULL, "hkdf", NULL); + EVP_KDF *kdf = crypto_kdf_hkdf(); EVP_KDF_CTX *kctx = EVP_KDF_CTX_new(kdf); int mode = EVP_KDF_HKDF_MODE_EXTRACT_ONLY; OSSL_PARAM params[] = { @@ -342,7 +538,9 @@ int ngtcp2_crypto_hkdf_extract(uint8_t *dest, const ngtcp2_crypto_md *md, }; int rv = 0; - EVP_KDF_free(kdf); + if (!crypto_initialized) { + EVP_KDF_free(kdf); + } if (EVP_KDF_derive(kctx, dest, (size_t)EVP_MD_size(prf), params) <= 0) { rv = -1; @@ -382,7 +580,7 @@ int ngtcp2_crypto_hkdf_expand(uint8_t *dest, size_t destlen, size_t infolen) { #if OPENSSL_VERSION_NUMBER >= 0x30000000L const EVP_MD *prf = md->native_handle; - EVP_KDF *kdf = EVP_KDF_fetch(NULL, "hkdf", NULL); + EVP_KDF *kdf = crypto_kdf_hkdf(); EVP_KDF_CTX *kctx = EVP_KDF_CTX_new(kdf); int mode = EVP_KDF_HKDF_MODE_EXPAND_ONLY; OSSL_PARAM params[] = { @@ -397,7 +595,9 @@ int ngtcp2_crypto_hkdf_expand(uint8_t *dest, size_t destlen, }; int rv = 0; - EVP_KDF_free(kdf); + if (!crypto_initialized) { + EVP_KDF_free(kdf); + } if (EVP_KDF_derive(kctx, dest, destlen, params) <= 0) { rv = -1; @@ -436,7 +636,7 @@ int ngtcp2_crypto_hkdf(uint8_t *dest, size_t destlen, const uint8_t *info, size_t infolen) { #if OPENSSL_VERSION_NUMBER >= 0x30000000L const EVP_MD *prf = md->native_handle; - EVP_KDF *kdf = EVP_KDF_fetch(NULL, "hkdf", NULL); + EVP_KDF *kdf = crypto_kdf_hkdf(); EVP_KDF_CTX *kctx = EVP_KDF_CTX_new(kdf); OSSL_PARAM params[] = { OSSL_PARAM_construct_utf8_string(OSSL_KDF_PARAM_DIGEST, @@ -451,7 +651,9 @@ int ngtcp2_crypto_hkdf(uint8_t *dest, size_t destlen, }; int rv = 0; - EVP_KDF_free(kdf); + if (!crypto_initialized) { + EVP_KDF_free(kdf); + } if (EVP_KDF_derive(kctx, dest, destlen, params) <= 0) { rv = -1; @@ -591,15 +793,16 @@ int ngtcp2_crypto_hp_mask(uint8_t *dest, const ngtcp2_crypto_cipher *hp, return 0; } -int ngtcp2_crypto_read_write_crypto_data(ngtcp2_conn *conn, - ngtcp2_crypto_level crypto_level, - const uint8_t *data, size_t datalen) { +int ngtcp2_crypto_read_write_crypto_data( + ngtcp2_conn *conn, ngtcp2_encryption_level encryption_level, + const uint8_t *data, size_t datalen) { SSL *ssl = ngtcp2_conn_get_tls_native_handle(conn); int rv; int err; if (SSL_provide_quic_data( - ssl, ngtcp2_crypto_openssl_from_ngtcp2_crypto_level(crypto_level), + ssl, + ngtcp2_crypto_quictls_from_ngtcp2_encryption_level(encryption_level), data, datalen) != 1) { return -1; } @@ -613,9 +816,9 @@ int ngtcp2_crypto_read_write_crypto_data(ngtcp2_conn *conn, case SSL_ERROR_WANT_WRITE: return 0; case SSL_ERROR_WANT_CLIENT_HELLO_CB: - return NGTCP2_CRYPTO_OPENSSL_ERR_TLS_WANT_CLIENT_HELLO_CB; + return NGTCP2_CRYPTO_QUICTLS_ERR_TLS_WANT_CLIENT_HELLO_CB; case SSL_ERROR_WANT_X509_LOOKUP: - return NGTCP2_CRYPTO_OPENSSL_ERR_TLS_WANT_X509_LOOKUP; + return NGTCP2_CRYPTO_QUICTLS_ERR_TLS_WANT_X509_LOOKUP; case SSL_ERROR_SSL: return -1; default: @@ -623,7 +826,7 @@ int ngtcp2_crypto_read_write_crypto_data(ngtcp2_conn *conn, } } - ngtcp2_conn_handshake_completed(conn); + ngtcp2_conn_tls_handshake_completed(conn); } rv = SSL_process_quic_post_handshake(ssl); @@ -652,7 +855,7 @@ int ngtcp2_crypto_set_remote_transport_params(ngtcp2_conn *conn, void *tls) { SSL_get_peer_quic_transport_params(ssl, &tp, &tplen); - rv = ngtcp2_conn_decode_remote_transport_params(conn, tp, tplen); + rv = ngtcp2_conn_decode_and_set_remote_transport_params(conn, tp, tplen); if (rv != 0) { ngtcp2_conn_set_tls_error(conn, rv); return -1; @@ -670,17 +873,17 @@ int ngtcp2_crypto_set_local_transport_params(void *tls, const uint8_t *buf, return 0; } -ngtcp2_crypto_level ngtcp2_crypto_openssl_from_ossl_encryption_level( +ngtcp2_encryption_level ngtcp2_crypto_quictls_from_ossl_encryption_level( OSSL_ENCRYPTION_LEVEL ossl_level) { switch (ossl_level) { case ssl_encryption_initial: - return NGTCP2_CRYPTO_LEVEL_INITIAL; + return NGTCP2_ENCRYPTION_LEVEL_INITIAL; case ssl_encryption_early_data: - return NGTCP2_CRYPTO_LEVEL_EARLY; + return NGTCP2_ENCRYPTION_LEVEL_0RTT; case ssl_encryption_handshake: - return NGTCP2_CRYPTO_LEVEL_HANDSHAKE; + return NGTCP2_ENCRYPTION_LEVEL_HANDSHAKE; case ssl_encryption_application: - return NGTCP2_CRYPTO_LEVEL_APPLICATION; + return NGTCP2_ENCRYPTION_LEVEL_1RTT; default: assert(0); abort(); /* if NDEBUG is set */ @@ -688,16 +891,16 @@ ngtcp2_crypto_level ngtcp2_crypto_openssl_from_ossl_encryption_level( } OSSL_ENCRYPTION_LEVEL -ngtcp2_crypto_openssl_from_ngtcp2_crypto_level( - ngtcp2_crypto_level crypto_level) { - switch (crypto_level) { - case NGTCP2_CRYPTO_LEVEL_INITIAL: +ngtcp2_crypto_quictls_from_ngtcp2_encryption_level( + ngtcp2_encryption_level encryption_level) { + switch (encryption_level) { + case NGTCP2_ENCRYPTION_LEVEL_INITIAL: return ssl_encryption_initial; - case NGTCP2_CRYPTO_LEVEL_HANDSHAKE: + case NGTCP2_ENCRYPTION_LEVEL_HANDSHAKE: return ssl_encryption_handshake; - case NGTCP2_CRYPTO_LEVEL_APPLICATION: + case NGTCP2_ENCRYPTION_LEVEL_1RTT: return ssl_encryption_application; - case NGTCP2_CRYPTO_LEVEL_EARLY: + case NGTCP2_ENCRYPTION_LEVEL_0RTT: return ssl_encryption_early_data; default: assert(0); @@ -730,8 +933,8 @@ static int set_encryption_secrets(SSL *ssl, OSSL_ENCRYPTION_LEVEL ossl_level, const uint8_t *tx_secret, size_t secretlen) { ngtcp2_crypto_conn_ref *conn_ref = SSL_get_app_data(ssl); ngtcp2_conn *conn = conn_ref->get_conn(conn_ref); - ngtcp2_crypto_level level = - ngtcp2_crypto_openssl_from_ossl_encryption_level(ossl_level); + ngtcp2_encryption_level level = + ngtcp2_crypto_quictls_from_ossl_encryption_level(ossl_level); if (rx_secret && ngtcp2_crypto_derive_and_install_rx_key(conn, NULL, NULL, NULL, level, @@ -752,8 +955,8 @@ static int add_handshake_data(SSL *ssl, OSSL_ENCRYPTION_LEVEL ossl_level, const uint8_t *data, size_t datalen) { ngtcp2_crypto_conn_ref *conn_ref = SSL_get_app_data(ssl); ngtcp2_conn *conn = conn_ref->get_conn(conn_ref); - ngtcp2_crypto_level level = - ngtcp2_crypto_openssl_from_ossl_encryption_level(ossl_level); + ngtcp2_encryption_level level = + ngtcp2_crypto_quictls_from_ossl_encryption_level(ossl_level); int rv; rv = ngtcp2_conn_submit_crypto_data(conn, level, data, datalen); @@ -786,22 +989,26 @@ static SSL_QUIC_METHOD quic_method = { add_handshake_data, flush_flight, send_alert, +#ifdef LIBRESSL_VERSION_NUMBER + NULL, + NULL, +#endif /* LIBRESSL_VERSION_NUMBER */ }; -static void crypto_openssl_configure_context(SSL_CTX *ssl_ctx) { +static void crypto_quictls_configure_context(SSL_CTX *ssl_ctx) { SSL_CTX_set_min_proto_version(ssl_ctx, TLS1_3_VERSION); SSL_CTX_set_max_proto_version(ssl_ctx, TLS1_3_VERSION); SSL_CTX_set_quic_method(ssl_ctx, &quic_method); } -int ngtcp2_crypto_openssl_configure_server_context(SSL_CTX *ssl_ctx) { - crypto_openssl_configure_context(ssl_ctx); +int ngtcp2_crypto_quictls_configure_server_context(SSL_CTX *ssl_ctx) { + crypto_quictls_configure_context(ssl_ctx); return 0; } -int ngtcp2_crypto_openssl_configure_client_context(SSL_CTX *ssl_ctx) { - crypto_openssl_configure_context(ssl_ctx); +int ngtcp2_crypto_quictls_configure_client_context(SSL_CTX *ssl_ctx) { + crypto_quictls_configure_context(ssl_ctx); return 0; } diff --git a/deps/ngtcp2/ngtcp2/crypto/shared.c b/deps/ngtcp2/ngtcp2/crypto/shared.c index 78252b852b4fab..162094a375cb8b 100644 --- a/deps/ngtcp2/ngtcp2/crypto/shared.c +++ b/deps/ngtcp2/ngtcp2/crypto/shared.c @@ -64,11 +64,9 @@ int ngtcp2_crypto_hkdf_expand_label(uint8_t *dest, size_t destlen, (size_t)(p - info)); } -#define NGTCP2_CRYPTO_INITIAL_SECRETLEN 32 - -int ngtcp2_crypto_derive_initial_secrets(uint32_t version, uint8_t *rx_secret, - uint8_t *tx_secret, +int ngtcp2_crypto_derive_initial_secrets(uint8_t *rx_secret, uint8_t *tx_secret, uint8_t *initial_secret, + uint32_t version, const ngtcp2_cid *client_dcid, ngtcp2_crypto_side side) { static const uint8_t CLABEL[] = "client in"; @@ -88,16 +86,14 @@ int ngtcp2_crypto_derive_initial_secrets(uint32_t version, uint8_t *rx_secret, switch (version) { case NGTCP2_PROTO_VER_V1: + default: salt = (const uint8_t *)NGTCP2_INITIAL_SALT_V1; saltlen = sizeof(NGTCP2_INITIAL_SALT_V1) - 1; break; - case NGTCP2_PROTO_VER_V2_DRAFT: - salt = (const uint8_t *)NGTCP2_INITIAL_SALT_V2_DRAFT; - saltlen = sizeof(NGTCP2_INITIAL_SALT_V2_DRAFT) - 1; + case NGTCP2_PROTO_VER_V2: + salt = (const uint8_t *)NGTCP2_INITIAL_SALT_V2; + saltlen = sizeof(NGTCP2_INITIAL_SALT_V2) - 1; break; - default: - salt = (const uint8_t *)NGTCP2_INITIAL_SALT_DRAFT; - saltlen = sizeof(NGTCP2_INITIAL_SALT_DRAFT) - 1; } if (ngtcp2_crypto_hkdf_extract(initial_secret, &ctx.md, client_dcid->data, @@ -139,9 +135,9 @@ int ngtcp2_crypto_derive_packet_protection_key( static const uint8_t KEY_LABEL_V1[] = "quic key"; static const uint8_t IV_LABEL_V1[] = "quic iv"; static const uint8_t HP_KEY_LABEL_V1[] = "quic hp"; - static const uint8_t KEY_LABEL_V2_DRAFT[] = "quicv2 key"; - static const uint8_t IV_LABEL_V2_DRAFT[] = "quicv2 iv"; - static const uint8_t HP_KEY_LABEL_V2_DRAFT[] = "quicv2 hp"; + static const uint8_t KEY_LABEL_V2[] = "quicv2 key"; + static const uint8_t IV_LABEL_V2[] = "quicv2 iv"; + static const uint8_t HP_KEY_LABEL_V2[] = "quicv2 hp"; size_t keylen = ngtcp2_crypto_aead_keylen(aead); size_t ivlen = ngtcp2_crypto_packet_protection_ivlen(aead); const uint8_t *key_label; @@ -152,13 +148,13 @@ int ngtcp2_crypto_derive_packet_protection_key( size_t hp_key_labellen; switch (version) { - case NGTCP2_PROTO_VER_V2_DRAFT: - key_label = KEY_LABEL_V2_DRAFT; - key_labellen = sizeof(KEY_LABEL_V2_DRAFT) - 1; - iv_label = IV_LABEL_V2_DRAFT; - iv_labellen = sizeof(IV_LABEL_V2_DRAFT) - 1; - hp_key_label = HP_KEY_LABEL_V2_DRAFT; - hp_key_labellen = sizeof(HP_KEY_LABEL_V2_DRAFT) - 1; + case NGTCP2_PROTO_VER_V2: + key_label = KEY_LABEL_V2; + key_labellen = sizeof(KEY_LABEL_V2) - 1; + iv_label = IV_LABEL_V2; + iv_labellen = sizeof(IV_LABEL_V2) - 1; + hp_key_label = HP_KEY_LABEL_V2; + hp_key_labellen = sizeof(HP_KEY_LABEL_V2) - 1; break; default: key_label = KEY_LABEL_V1; @@ -188,14 +184,27 @@ int ngtcp2_crypto_derive_packet_protection_key( return 0; } -int ngtcp2_crypto_update_traffic_secret(uint8_t *dest, +int ngtcp2_crypto_update_traffic_secret(uint8_t *dest, uint32_t version, const ngtcp2_crypto_md *md, const uint8_t *secret, size_t secretlen) { static const uint8_t LABEL[] = "quic ku"; + static const uint8_t LABEL_V2[] = "quicv2 ku"; + const uint8_t *label; + size_t labellen; + + switch (version) { + case NGTCP2_PROTO_VER_V2: + label = LABEL_V2; + labellen = sizeof(LABEL_V2) - 1; + break; + default: + label = LABEL; + labellen = sizeof(LABEL) - 1; + } if (ngtcp2_crypto_hkdf_expand_label(dest, secretlen, md, secret, secretlen, - LABEL, sizeof(LABEL) - 1) != 0) { + label, labellen) != 0) { return -1; } @@ -204,7 +213,7 @@ int ngtcp2_crypto_update_traffic_secret(uint8_t *dest, int ngtcp2_crypto_derive_and_install_rx_key(ngtcp2_conn *conn, uint8_t *key, uint8_t *iv, uint8_t *hp_key, - ngtcp2_crypto_level level, + ngtcp2_encryption_level level, const uint8_t *secret, size_t secretlen) { const ngtcp2_crypto_ctx *ctx; @@ -220,7 +229,7 @@ int ngtcp2_crypto_derive_and_install_rx_key(ngtcp2_conn *conn, uint8_t *key, ngtcp2_crypto_ctx cctx; uint32_t version; - if (level == NGTCP2_CRYPTO_LEVEL_EARLY && !ngtcp2_conn_is_server(conn)) { + if (level == NGTCP2_ENCRYPTION_LEVEL_0RTT && !ngtcp2_conn_is_server(conn)) { return 0; } @@ -235,13 +244,16 @@ int ngtcp2_crypto_derive_and_install_rx_key(ngtcp2_conn *conn, uint8_t *key, } switch (level) { - case NGTCP2_CRYPTO_LEVEL_EARLY: - ngtcp2_crypto_ctx_tls_early(&cctx, tls); - ngtcp2_conn_set_early_crypto_ctx(conn, &cctx); - ctx = ngtcp2_conn_get_early_crypto_ctx(conn); + case NGTCP2_ENCRYPTION_LEVEL_0RTT: + if (ngtcp2_crypto_ctx_tls_early(&cctx, tls) == NULL) { + return -1; + } + + ngtcp2_conn_set_0rtt_crypto_ctx(conn, &cctx); + ctx = ngtcp2_conn_get_0rtt_crypto_ctx(conn); version = ngtcp2_conn_get_client_chosen_version(conn); break; - case NGTCP2_CRYPTO_LEVEL_HANDSHAKE: + case NGTCP2_ENCRYPTION_LEVEL_HANDSHAKE: if (ngtcp2_conn_is_server(conn) && !ngtcp2_conn_get_negotiated_version(conn)) { rv = ngtcp2_crypto_set_remote_transport_params(conn, tls); @@ -250,15 +262,21 @@ int ngtcp2_crypto_derive_and_install_rx_key(ngtcp2_conn *conn, uint8_t *key, } } /* fall through */ - default: + case NGTCP2_ENCRYPTION_LEVEL_1RTT: ctx = ngtcp2_conn_get_crypto_ctx(conn); version = ngtcp2_conn_get_negotiated_version(conn); if (!ctx->aead.native_handle) { - ngtcp2_crypto_ctx_tls(&cctx, tls); + if (ngtcp2_crypto_ctx_tls(&cctx, tls) == NULL) { + return -1; + } + ngtcp2_conn_set_crypto_ctx(conn, &cctx); ctx = ngtcp2_conn_get_crypto_ctx(conn); } + break; + default: + return -1; } aead = &ctx->aead; @@ -280,20 +298,20 @@ int ngtcp2_crypto_derive_and_install_rx_key(ngtcp2_conn *conn, uint8_t *key, } switch (level) { - case NGTCP2_CRYPTO_LEVEL_EARLY: - rv = ngtcp2_conn_install_early_key(conn, &aead_ctx, iv, ivlen, &hp_ctx); + case NGTCP2_ENCRYPTION_LEVEL_0RTT: + rv = ngtcp2_conn_install_0rtt_key(conn, &aead_ctx, iv, ivlen, &hp_ctx); if (rv != 0) { goto fail; } break; - case NGTCP2_CRYPTO_LEVEL_HANDSHAKE: + case NGTCP2_ENCRYPTION_LEVEL_HANDSHAKE: rv = ngtcp2_conn_install_rx_handshake_key(conn, &aead_ctx, iv, ivlen, &hp_ctx); if (rv != 0) { goto fail; } break; - case NGTCP2_CRYPTO_LEVEL_APPLICATION: + case NGTCP2_ENCRYPTION_LEVEL_1RTT: if (!ngtcp2_conn_is_server(conn)) { rv = ngtcp2_crypto_set_remote_transport_params(conn, tls); if (rv != 0) { @@ -345,7 +363,7 @@ static int crypto_set_local_transport_params(ngtcp2_conn *conn, void *tls) { int ngtcp2_crypto_derive_and_install_tx_key(ngtcp2_conn *conn, uint8_t *key, uint8_t *iv, uint8_t *hp_key, - ngtcp2_crypto_level level, + ngtcp2_encryption_level level, const uint8_t *secret, size_t secretlen) { const ngtcp2_crypto_ctx *ctx; @@ -361,7 +379,7 @@ int ngtcp2_crypto_derive_and_install_tx_key(ngtcp2_conn *conn, uint8_t *key, ngtcp2_crypto_ctx cctx; uint32_t version; - if (level == NGTCP2_CRYPTO_LEVEL_EARLY && ngtcp2_conn_is_server(conn)) { + if (level == NGTCP2_ENCRYPTION_LEVEL_0RTT && ngtcp2_conn_is_server(conn)) { return 0; } @@ -376,13 +394,16 @@ int ngtcp2_crypto_derive_and_install_tx_key(ngtcp2_conn *conn, uint8_t *key, } switch (level) { - case NGTCP2_CRYPTO_LEVEL_EARLY: - ngtcp2_crypto_ctx_tls_early(&cctx, tls); - ngtcp2_conn_set_early_crypto_ctx(conn, &cctx); - ctx = ngtcp2_conn_get_early_crypto_ctx(conn); + case NGTCP2_ENCRYPTION_LEVEL_0RTT: + if (ngtcp2_crypto_ctx_tls_early(&cctx, tls) == NULL) { + return -1; + } + + ngtcp2_conn_set_0rtt_crypto_ctx(conn, &cctx); + ctx = ngtcp2_conn_get_0rtt_crypto_ctx(conn); version = ngtcp2_conn_get_client_chosen_version(conn); break; - case NGTCP2_CRYPTO_LEVEL_HANDSHAKE: + case NGTCP2_ENCRYPTION_LEVEL_HANDSHAKE: if (ngtcp2_conn_is_server(conn) && !ngtcp2_conn_get_negotiated_version(conn)) { rv = ngtcp2_crypto_set_remote_transport_params(conn, tls); @@ -391,15 +412,21 @@ int ngtcp2_crypto_derive_and_install_tx_key(ngtcp2_conn *conn, uint8_t *key, } } /* fall through */ - default: + case NGTCP2_ENCRYPTION_LEVEL_1RTT: ctx = ngtcp2_conn_get_crypto_ctx(conn); version = ngtcp2_conn_get_negotiated_version(conn); if (!ctx->aead.native_handle) { - ngtcp2_crypto_ctx_tls(&cctx, tls); + if (ngtcp2_crypto_ctx_tls(&cctx, tls) == NULL) { + return -1; + } + ngtcp2_conn_set_crypto_ctx(conn, &cctx); ctx = ngtcp2_conn_get_crypto_ctx(conn); } + break; + default: + return -1; } aead = &ctx->aead; @@ -421,13 +448,13 @@ int ngtcp2_crypto_derive_and_install_tx_key(ngtcp2_conn *conn, uint8_t *key, } switch (level) { - case NGTCP2_CRYPTO_LEVEL_EARLY: - rv = ngtcp2_conn_install_early_key(conn, &aead_ctx, iv, ivlen, &hp_ctx); + case NGTCP2_ENCRYPTION_LEVEL_0RTT: + rv = ngtcp2_conn_install_0rtt_key(conn, &aead_ctx, iv, ivlen, &hp_ctx); if (rv != 0) { goto fail; } break; - case NGTCP2_CRYPTO_LEVEL_HANDSHAKE: + case NGTCP2_ENCRYPTION_LEVEL_HANDSHAKE: rv = ngtcp2_conn_install_tx_handshake_key(conn, &aead_ctx, iv, ivlen, &hp_ctx); if (rv != 0) { @@ -440,7 +467,7 @@ int ngtcp2_crypto_derive_and_install_tx_key(ngtcp2_conn *conn, uint8_t *key, } break; - case NGTCP2_CRYPTO_LEVEL_APPLICATION: + case NGTCP2_ENCRYPTION_LEVEL_1RTT: rv = ngtcp2_conn_install_tx_key(conn, secret, secretlen, &aead_ctx, iv, ivlen, &hp_ctx); if (rv != 0) { @@ -521,7 +548,7 @@ int ngtcp2_crypto_derive_and_install_initial_key( ngtcp2_conn_set_initial_crypto_ctx(conn, &ctx); if (ngtcp2_crypto_derive_initial_secrets( - version, rx_secret, tx_secret, initial_secret, client_dcid, + rx_secret, tx_secret, initial_secret, version, client_dcid, server ? NGTCP2_CRYPTO_SIDE_SERVER : NGTCP2_CRYPTO_SIDE_CLIENT) != 0) { return -1; @@ -564,16 +591,14 @@ int ngtcp2_crypto_derive_and_install_initial_key( switch (version) { case NGTCP2_PROTO_VER_V1: + default: retry_key = (const uint8_t *)NGTCP2_RETRY_KEY_V1; retry_noncelen = sizeof(NGTCP2_RETRY_NONCE_V1) - 1; break; - case NGTCP2_PROTO_VER_V2_DRAFT: - retry_key = (const uint8_t *)NGTCP2_RETRY_KEY_V2_DRAFT; - retry_noncelen = sizeof(NGTCP2_RETRY_NONCE_V2_DRAFT) - 1; + case NGTCP2_PROTO_VER_V2: + retry_key = (const uint8_t *)NGTCP2_RETRY_KEY_V2; + retry_noncelen = sizeof(NGTCP2_RETRY_NONCE_V2) - 1; break; - default: - retry_key = (const uint8_t *)NGTCP2_RETRY_KEY_DRAFT; - retry_noncelen = sizeof(NGTCP2_RETRY_NONCE_DRAFT) - 1; } if (ngtcp2_crypto_aead_ctx_encrypt_init(&retry_aead_ctx, &retry_aead, @@ -657,7 +682,7 @@ int ngtcp2_crypto_derive_and_install_vneg_initial_key( } if (ngtcp2_crypto_derive_initial_secrets( - version, rx_secret, tx_secret, initial_secret, client_dcid, + rx_secret, tx_secret, initial_secret, version, client_dcid, server ? NGTCP2_CRYPTO_SIDE_SERVER : NGTCP2_CRYPTO_SIDE_CLIENT) != 0) { return -1; @@ -725,8 +750,8 @@ int ngtcp2_crypto_update_key( size_t ivlen = ngtcp2_crypto_packet_protection_ivlen(aead); uint32_t version = ngtcp2_conn_get_negotiated_version(conn); - if (ngtcp2_crypto_update_traffic_secret(rx_secret, md, current_rx_secret, - secretlen) != 0) { + if (ngtcp2_crypto_update_traffic_secret(rx_secret, version, md, + current_rx_secret, secretlen) != 0) { return -1; } @@ -735,8 +760,8 @@ int ngtcp2_crypto_update_key( return -1; } - if (ngtcp2_crypto_update_traffic_secret(tx_secret, md, current_tx_secret, - secretlen) != 0) { + if (ngtcp2_crypto_update_traffic_secret(tx_secret, version, md, + current_tx_secret, secretlen) != 0) { return -1; } @@ -883,6 +908,7 @@ static size_t crypto_generate_retry_token_aad(uint8_t *dest, uint32_t version, version = ngtcp2_htonl(version); memcpy(p, &version, sizeof(version)); + p += sizeof(version); memcpy(p, sa, (size_t)salen); p += salen; memcpy(p, retry_scid->data, retry_scid->datalen); @@ -907,13 +933,15 @@ ngtcp2_ssize ngtcp2_crypto_generate_retry_token( ngtcp2_crypto_md md; ngtcp2_crypto_aead_ctx aead_ctx; size_t plaintextlen; - uint8_t aad[sizeof(version) + sizeof(ngtcp2_sockaddr_storage) + - NGTCP2_MAX_CIDLEN]; + uint8_t + aad[sizeof(version) + sizeof(ngtcp2_sockaddr_union) + NGTCP2_MAX_CIDLEN]; size_t aadlen; uint8_t *p = plaintext; ngtcp2_tstamp ts_be = ngtcp2_htonl64(ts); int rv; + assert((size_t)remote_addrlen <= sizeof(ngtcp2_sockaddr_union)); + memset(plaintext, 0, sizeof(plaintext)); *p++ = (uint8_t)odcid->datalen; @@ -984,8 +1012,8 @@ int ngtcp2_crypto_verify_retry_token( ngtcp2_crypto_aead_ctx aead_ctx; ngtcp2_crypto_aead aead; ngtcp2_crypto_md md; - uint8_t aad[sizeof(version) + sizeof(ngtcp2_sockaddr_storage) + - NGTCP2_MAX_CIDLEN]; + uint8_t + aad[sizeof(version) + sizeof(ngtcp2_sockaddr_union) + NGTCP2_MAX_CIDLEN]; size_t aadlen; const uint8_t *rand_data; const uint8_t *ciphertext; @@ -994,6 +1022,8 @@ int ngtcp2_crypto_verify_retry_token( int rv; ngtcp2_tstamp gen_ts; + assert((size_t)remote_addrlen <= sizeof(ngtcp2_sockaddr_union)); + if (tokenlen != NGTCP2_CRYPTO_MAX_RETRY_TOKENLEN || token[0] != NGTCP2_CRYPTO_TOKEN_MAGIC_RETRY) { return -1; @@ -1034,7 +1064,9 @@ int ngtcp2_crypto_verify_retry_token( cil = plaintext[0]; - assert(cil == 0 || (cil >= NGTCP2_MIN_CIDLEN && cil <= NGTCP2_MAX_CIDLEN)); + if (cil != 0 && (cil < NGTCP2_MIN_CIDLEN || cil > NGTCP2_MAX_CIDLEN)) { + return -1; + } memcpy(&gen_ts, plaintext + /* cid len = */ 1 + NGTCP2_MAX_CIDLEN, sizeof(gen_ts)); @@ -1055,11 +1087,11 @@ static size_t crypto_generate_regular_token_aad(uint8_t *dest, size_t addrlen; switch (sa->sa_family) { - case AF_INET: + case NGTCP2_AF_INET: addr = (const uint8_t *)&((const ngtcp2_sockaddr_in *)(void *)sa)->sin_addr; addrlen = sizeof(((const ngtcp2_sockaddr_in *)(void *)sa)->sin_addr); break; - case AF_INET6: + case NGTCP2_AF_INET6: addr = (const uint8_t *)&((const ngtcp2_sockaddr_in6 *)(void *)sa)->sin6_addr; addrlen = sizeof(((const ngtcp2_sockaddr_in6 *)(void *)sa)->sin6_addr); @@ -1234,8 +1266,8 @@ ngtcp2_ssize ngtcp2_crypto_write_connection_close( ngtcp2_crypto_ctx_initial(&ctx); - if (ngtcp2_crypto_derive_initial_secrets(version, rx_secret, tx_secret, - initial_secret, scid, + if (ngtcp2_crypto_derive_initial_secrets(rx_secret, tx_secret, initial_secret, + version, scid, NGTCP2_CRYPTO_SIDE_SERVER) != 0) { return -1; } @@ -1287,16 +1319,14 @@ ngtcp2_ssize ngtcp2_crypto_write_retry(uint8_t *dest, size_t destlen, switch (version) { case NGTCP2_PROTO_VER_V1: + default: key = (const uint8_t *)NGTCP2_RETRY_KEY_V1; noncelen = sizeof(NGTCP2_RETRY_NONCE_V1) - 1; break; - case NGTCP2_PROTO_VER_V2_DRAFT: - key = (const uint8_t *)NGTCP2_RETRY_KEY_V2_DRAFT; - noncelen = sizeof(NGTCP2_RETRY_NONCE_V2_DRAFT) - 1; + case NGTCP2_PROTO_VER_V2: + key = (const uint8_t *)NGTCP2_RETRY_KEY_V2; + noncelen = sizeof(NGTCP2_RETRY_NONCE_V2) - 1; break; - default: - key = (const uint8_t *)NGTCP2_RETRY_KEY_DRAFT; - noncelen = sizeof(NGTCP2_RETRY_NONCE_DRAFT) - 1; } if (ngtcp2_crypto_aead_ctx_encrypt_init(&aead_ctx, &aead, key, noncelen) != @@ -1331,8 +1361,8 @@ int ngtcp2_crypto_client_initial_cb(ngtcp2_conn *conn, void *user_data) { return NGTCP2_ERR_CALLBACK_FAILURE; } - if (ngtcp2_crypto_read_write_crypto_data(conn, NGTCP2_CRYPTO_LEVEL_INITIAL, - NULL, 0) != 0) { + if (ngtcp2_crypto_read_write_crypto_data( + conn, NGTCP2_ENCRYPTION_LEVEL_INITIAL, NULL, 0) != 0) { return NGTCP2_ERR_CALLBACK_FAILURE; } @@ -1398,15 +1428,15 @@ void ngtcp2_crypto_delete_crypto_cipher_ctx_cb( } int ngtcp2_crypto_recv_crypto_data_cb(ngtcp2_conn *conn, - ngtcp2_crypto_level crypto_level, + ngtcp2_encryption_level encryption_level, uint64_t offset, const uint8_t *data, size_t datalen, void *user_data) { int rv; (void)offset; (void)user_data; - if (ngtcp2_crypto_read_write_crypto_data(conn, crypto_level, data, datalen) != - 0) { + if (ngtcp2_crypto_read_write_crypto_data(conn, encryption_level, data, + datalen) != 0) { rv = ngtcp2_conn_get_tls_error(conn); if (rv) { return rv; diff --git a/deps/ngtcp2/ngtcp2/crypto/shared.h b/deps/ngtcp2/ngtcp2/crypto/shared.h index 02b948901ae40e..d69fd21212d7d2 100644 --- a/deps/ngtcp2/ngtcp2/crypto/shared.h +++ b/deps/ngtcp2/ngtcp2/crypto/shared.h @@ -31,16 +31,6 @@ #include -/** - * @macro - * - * :macro:`NGTCP2_INITIAL_SALT_DRAFT` is a salt value which is used to - * derive initial secret. It is used for QUIC draft versions. - */ -#define NGTCP2_INITIAL_SALT_DRAFT \ - "\xaf\xbf\xec\x28\x99\x93\xd2\x4c\x9e\x97\x86\xf1\x9c\x61\x11\xe0\x43\x90" \ - "\xa8\x99" - /** * @macro * @@ -54,12 +44,12 @@ /** * @macro * - * :macro:`NGTCP2_INITIAL_SALT_V2_DRAFT` is a salt value which is used to - * derive initial secret. It is used for QUIC v2 draft. + * :macro:`NGTCP2_INITIAL_SALT_V2` is a salt value which is used to + * derive initial secret. It is used for QUIC v2. */ -#define NGTCP2_INITIAL_SALT_V2_DRAFT \ - "\xa7\x07\xc2\x03\xa5\x9b\x47\x18\x4a\x1d\x62\xca\x57\x04\x06\xea\x7a\xe3" \ - "\xe5\xd3" +#define NGTCP2_INITIAL_SALT_V2 \ + "\x0d\xed\xe3\xde\xf7\x00\xa6\xdb\x81\x93\x81\xbe\x6e\x26\x9d\xcb\xf9\xbd" \ + "\x2e\xd9" /* Maximum key usage (encryption) limits */ #define NGTCP2_CRYPTO_MAX_ENCRYPTION_AES_GCM (1ULL << 23) @@ -72,6 +62,30 @@ #define NGTCP2_CRYPTO_MAX_DECRYPTION_FAILURE_CHACHA20_POLY1305 (1ULL << 36) #define NGTCP2_CRYPTO_MAX_DECRYPTION_FAILURE_AES_CCM (2965820ULL) +/** + * @macro + * + * :macro:`NGTCP2_CRYPTO_INITIAL_SECRETLEN` is the length of secret + * for Initial packets. + */ +#define NGTCP2_CRYPTO_INITIAL_SECRETLEN 32 + +/** + * @macro + * + * :macro:`NGTCP2_CRYPTO_INITIAL_KEYLEN` is the length of key for + * Initial packets. + */ +#define NGTCP2_CRYPTO_INITIAL_KEYLEN 16 + +/** + * @macro + * + * :macro:`NGTCP2_CRYPTO_INITIAL_IVLEN` is the length of IV for + * Initial packets. + */ +#define NGTCP2_CRYPTO_INITIAL_IVLEN 12 + /** * @function * @@ -86,7 +100,7 @@ ngtcp2_crypto_ctx *ngtcp2_crypto_ctx_initial(ngtcp2_crypto_ctx *ctx); * `ngtcp2_crypto_aead_init` initializes |aead| with the provided * |aead_native_handle| which is an underlying AEAD object. * - * If libngtcp2_crypto_openssl is linked, |aead_native_handle| must be + * If libngtcp2_crypto_quictls is linked, |aead_native_handle| must be * a pointer to EVP_CIPHER. * * If libngtcp2_crypto_gnutls is linked, |aead_native_handle| must be @@ -106,6 +120,25 @@ ngtcp2_crypto_aead *ngtcp2_crypto_aead_init(ngtcp2_crypto_aead *aead, */ ngtcp2_crypto_aead *ngtcp2_crypto_aead_retry(ngtcp2_crypto_aead *aead); +/** + * @enum + * + * :type:`ngtcp2_crypto_side` indicates which side the application + * implements; client or server. + */ +typedef enum ngtcp2_crypto_side { + /** + * :enum:`NGTCP2_CRYPTO_SIDE_CLIENT` indicates that the application + * is client. + */ + NGTCP2_CRYPTO_SIDE_CLIENT, + /** + * :enum:`NGTCP2_CRYPTO_SIDE_SERVER` indicates that the application + * is server. + */ + NGTCP2_CRYPTO_SIDE_SERVER +} ngtcp2_crypto_side; + /** * @function * @@ -122,9 +155,9 @@ ngtcp2_crypto_aead *ngtcp2_crypto_aead_retry(ngtcp2_crypto_aead *aead); * * This function returns 0 if it succeeds, or -1. */ -int ngtcp2_crypto_derive_initial_secrets(uint32_t version, uint8_t *rx_secret, - uint8_t *tx_secret, +int ngtcp2_crypto_derive_initial_secrets(uint8_t *rx_secret, uint8_t *tx_secret, uint8_t *initial_secret, + uint32_t version, const ngtcp2_cid *client_dcid, ngtcp2_crypto_side side); @@ -168,7 +201,7 @@ int ngtcp2_crypto_derive_packet_protection_key(uint8_t *key, uint8_t *iv, * * This function returns 0 if it succeeds, or -1. */ -int ngtcp2_crypto_update_traffic_secret(uint8_t *dest, +int ngtcp2_crypto_update_traffic_secret(uint8_t *dest, uint32_t version, const ngtcp2_crypto_md *md, const uint8_t *secret, size_t secretlen); @@ -181,7 +214,7 @@ int ngtcp2_crypto_update_traffic_secret(uint8_t *dest, * pointed by |buf| of length |len|, to the native handle |tls|. * * |tls| points to a implementation dependent TLS session object. If - * libngtcp2_crypto_openssl is linked, |tls| must be a pointer to SSL + * libngtcp2_crypto_quictls is linked, |tls| must be a pointer to SSL * object. * * This function returns 0 if it succeeds, or -1. @@ -197,7 +230,7 @@ int ngtcp2_crypto_set_local_transport_params(void *tls, const uint8_t *buf, * `ngtcp2_conn_set_remote_transport_params`. * * |tls| points to a implementation dependent TLS session object. If - * libngtcp2_crypto_openssl is linked, |tls| must be a pointer to SSL + * libngtcp2_crypto_quictls is linked, |tls| must be a pointer to SSL * object. * * This function returns 0 if it succeeds, or -1. @@ -347,4 +380,18 @@ ngtcp2_crypto_aead *ngtcp2_crypto_aead_aes_128_gcm(ngtcp2_crypto_aead *aead); */ int ngtcp2_crypto_random(uint8_t *data, size_t datalen); +/** + * @function + * + * `ngtcp2_crypto_hkdf_expand_label` performs HKDF expand label. The + * result is |destlen| bytes long, and is stored to the buffer pointed + * by |dest|. + * + * This function returns 0 if it succeeds, or -1. + */ +int ngtcp2_crypto_hkdf_expand_label(uint8_t *dest, size_t destlen, + const ngtcp2_crypto_md *md, + const uint8_t *secret, size_t secretlen, + const uint8_t *label, size_t labellen); + #endif /* NGTCP2_SHARED_H */ diff --git a/deps/ngtcp2/ngtcp2/crypto/wolfssl/wolfssl.c b/deps/ngtcp2/ngtcp2/crypto/wolfssl/wolfssl.c index 9a58b9be2b76e9..2b7b5321863915 100644 --- a/deps/ngtcp2/ngtcp2/crypto/wolfssl/wolfssl.c +++ b/deps/ngtcp2/ngtcp2/crypto/wolfssl/wolfssl.c @@ -73,9 +73,8 @@ ngtcp2_crypto_aead *ngtcp2_crypto_aead_retry(ngtcp2_crypto_aead *aead) { return ngtcp2_crypto_aead_init(aead, (void *)wolfSSL_EVP_aes_128_gcm()); } -static uint64_t crypto_wolfssl_get_aead_max_encryption(WOLFSSL *ssl) { - const WOLFSSL_EVP_CIPHER *aead = wolfSSL_quic_get_aead(ssl); - +static uint64_t +crypto_aead_get_aead_max_encryption(const WOLFSSL_EVP_CIPHER *aead) { if (wolfSSL_quic_aead_is_gcm(aead)) { return NGTCP2_CRYPTO_MAX_ENCRYPTION_AES_GCM; } @@ -88,9 +87,8 @@ static uint64_t crypto_wolfssl_get_aead_max_encryption(WOLFSSL *ssl) { return 0; } -static uint64_t crypto_wolfssl_get_aead_max_decryption_failure(WOLFSSL *ssl) { - const WOLFSSL_EVP_CIPHER *aead = wolfSSL_quic_get_aead(ssl); - +static uint64_t +crypto_aead_get_aead_max_decryption_failure(const WOLFSSL_EVP_CIPHER *aead) { if (wolfSSL_quic_aead_is_gcm(aead)) { return NGTCP2_CRYPTO_MAX_DECRYPTION_FAILURE_AES_GCM; } @@ -103,16 +101,30 @@ static uint64_t crypto_wolfssl_get_aead_max_decryption_failure(WOLFSSL *ssl) { return 0; } +static int supported_aead(const WOLFSSL_EVP_CIPHER *aead) { + return wolfSSL_quic_aead_is_gcm(aead) || + wolfSSL_quic_aead_is_chacha20(aead) || wolfSSL_quic_aead_is_ccm(aead); +} + ngtcp2_crypto_ctx *ngtcp2_crypto_ctx_tls(ngtcp2_crypto_ctx *ctx, void *tls_native_handle) { WOLFSSL *ssl = tls_native_handle; + const WOLFSSL_EVP_CIPHER *aead = wolfSSL_quic_get_aead(ssl); - ngtcp2_crypto_aead_init(&ctx->aead, (void *)wolfSSL_quic_get_aead(ssl)); + if (aead == NULL) { + return NULL; + } + + if (!supported_aead(aead)) { + return NULL; + } + + ngtcp2_crypto_aead_init(&ctx->aead, (void *)aead); ctx->md.native_handle = (void *)wolfSSL_quic_get_md(ssl); ctx->hp.native_handle = (void *)wolfSSL_quic_get_hp(ssl); - ctx->max_encryption = crypto_wolfssl_get_aead_max_encryption(ssl); + ctx->max_encryption = crypto_aead_get_aead_max_encryption(aead); ctx->max_decryption_failure = - crypto_wolfssl_get_aead_max_decryption_failure(ssl); + crypto_aead_get_aead_max_decryption_failure(aead); return ctx; } @@ -211,6 +223,7 @@ int ngtcp2_crypto_hkdf_extract(uint8_t *dest, const ngtcp2_crypto_md *md, const uint8_t *salt, size_t saltlen) { if (wolfSSL_quic_hkdf_extract(dest, md->native_handle, secret, secretlen, salt, saltlen) != WOLFSSL_SUCCESS) { + DEBUG_MSG("WOLFSSL: wolfSSL_quic_hkdf_extract FAILED\n"); return -1; } return 0; @@ -222,6 +235,7 @@ int ngtcp2_crypto_hkdf_expand(uint8_t *dest, size_t destlen, size_t infolen) { if (wolfSSL_quic_hkdf_expand(dest, destlen, md->native_handle, secret, secretlen, info, infolen) != WOLFSSL_SUCCESS) { + DEBUG_MSG("WOLFSSL: wolfSSL_quic_hkdf_expand FAILED\n"); return -1; } return 0; @@ -233,6 +247,7 @@ int ngtcp2_crypto_hkdf(uint8_t *dest, size_t destlen, const uint8_t *info, size_t infolen) { if (wolfSSL_quic_hkdf(dest, destlen, md->native_handle, secret, secretlen, salt, saltlen, info, infolen) != WOLFSSL_SUCCESS) { + DEBUG_MSG("WOLFSSL: wolfSSL_quic_hkdf FAILED\n"); return -1; } return 0; @@ -286,18 +301,19 @@ int ngtcp2_crypto_hp_mask(uint8_t *dest, const ngtcp2_crypto_cipher *hp, sizeof(PLAINTEXT) - 1) != WOLFSSL_SUCCESS || wolfSSL_EVP_EncryptFinal_ex(actx, dest + sizeof(PLAINTEXT) - 1, &len) != WOLFSSL_SUCCESS) { + DEBUG_MSG("WOLFSSL: hp_mask FAILED\n"); return -1; } return 0; } -int ngtcp2_crypto_read_write_crypto_data(ngtcp2_conn *conn, - ngtcp2_crypto_level crypto_level, - const uint8_t *data, size_t datalen) { +int ngtcp2_crypto_read_write_crypto_data( + ngtcp2_conn *conn, ngtcp2_encryption_level encryption_level, + const uint8_t *data, size_t datalen) { WOLFSSL *ssl = ngtcp2_conn_get_tls_native_handle(conn); WOLFSSL_ENCRYPTION_LEVEL level = - ngtcp2_crypto_wolfssl_from_ngtcp2_crypto_level(crypto_level); + ngtcp2_crypto_wolfssl_from_ngtcp2_encryption_level(encryption_level); int rv; int err; @@ -313,9 +329,9 @@ int ngtcp2_crypto_read_write_crypto_data(ngtcp2_conn *conn, if (!ngtcp2_conn_get_handshake_completed(conn)) { rv = wolfSSL_quic_do_handshake(ssl); - DEBUG_MSG("WOLFSSL: do_handshake, rv=%d\n", rv); if (rv <= 0) { err = wolfSSL_get_error(ssl, rv); + DEBUG_MSG("WOLFSSL: do_handshake, rv=%d, err=%d\n", rv, err); switch (err) { case SSL_ERROR_WANT_READ: case SSL_ERROR_WANT_WRITE: @@ -328,7 +344,7 @@ int ngtcp2_crypto_read_write_crypto_data(ngtcp2_conn *conn, } DEBUG_MSG("WOLFSSL: handshake done\n"); - ngtcp2_conn_handshake_completed(conn); + ngtcp2_conn_tls_handshake_completed(conn); } rv = wolfSSL_process_quic_post_handshake(ssl); @@ -359,7 +375,7 @@ int ngtcp2_crypto_set_remote_transport_params(ngtcp2_conn *conn, void *tls) { wolfSSL_get_peer_quic_transport_params(ssl, &tp, &tplen); DEBUG_MSG("WOLFSSL: get peer transport params, len=%lu\n", tplen); - rv = ngtcp2_conn_decode_remote_transport_params(conn, tp, tplen); + rv = ngtcp2_conn_decode_and_set_remote_transport_params(conn, tp, tplen); if (rv != 0) { DEBUG_MSG("WOLFSSL: decode peer transport params failed, rv=%d\n", rv); ngtcp2_conn_set_tls_error(conn, rv); @@ -380,17 +396,17 @@ int ngtcp2_crypto_set_local_transport_params(void *tls, const uint8_t *buf, return 0; } -ngtcp2_crypto_level ngtcp2_crypto_wolfssl_from_wolfssl_encryption_level( +ngtcp2_encryption_level ngtcp2_crypto_wolfssl_from_wolfssl_encryption_level( WOLFSSL_ENCRYPTION_LEVEL wolfssl_level) { switch (wolfssl_level) { case wolfssl_encryption_initial: - return NGTCP2_CRYPTO_LEVEL_INITIAL; + return NGTCP2_ENCRYPTION_LEVEL_INITIAL; case wolfssl_encryption_early_data: - return NGTCP2_CRYPTO_LEVEL_EARLY; + return NGTCP2_ENCRYPTION_LEVEL_0RTT; case wolfssl_encryption_handshake: - return NGTCP2_CRYPTO_LEVEL_HANDSHAKE; + return NGTCP2_ENCRYPTION_LEVEL_HANDSHAKE; case wolfssl_encryption_application: - return NGTCP2_CRYPTO_LEVEL_APPLICATION; + return NGTCP2_ENCRYPTION_LEVEL_1RTT; default: assert(0); abort(); /* if NDEBUG is set */ @@ -398,16 +414,16 @@ ngtcp2_crypto_level ngtcp2_crypto_wolfssl_from_wolfssl_encryption_level( } WOLFSSL_ENCRYPTION_LEVEL -ngtcp2_crypto_wolfssl_from_ngtcp2_crypto_level( - ngtcp2_crypto_level crypto_level) { - switch (crypto_level) { - case NGTCP2_CRYPTO_LEVEL_INITIAL: +ngtcp2_crypto_wolfssl_from_ngtcp2_encryption_level( + ngtcp2_encryption_level encryption_level) { + switch (encryption_level) { + case NGTCP2_ENCRYPTION_LEVEL_INITIAL: return wolfssl_encryption_initial; - case NGTCP2_CRYPTO_LEVEL_HANDSHAKE: + case NGTCP2_ENCRYPTION_LEVEL_HANDSHAKE: return wolfssl_encryption_handshake; - case NGTCP2_CRYPTO_LEVEL_APPLICATION: + case NGTCP2_ENCRYPTION_LEVEL_1RTT: return wolfssl_encryption_application; - case NGTCP2_CRYPTO_LEVEL_EARLY: + case NGTCP2_ENCRYPTION_LEVEL_0RTT: return wolfssl_encryption_early_data; default: assert(0); @@ -441,7 +457,7 @@ static int set_encryption_secrets(WOLFSSL *ssl, const uint8_t *tx_secret, size_t secretlen) { ngtcp2_crypto_conn_ref *conn_ref = SSL_get_app_data(ssl); ngtcp2_conn *conn = conn_ref->get_conn(conn_ref); - ngtcp2_crypto_level level = + ngtcp2_encryption_level level = ngtcp2_crypto_wolfssl_from_wolfssl_encryption_level(wolfssl_level); DEBUG_MSG("WOLFSSL: set encryption secrets, level=%d, rxlen=%lu, txlen=%lu\n", @@ -467,7 +483,7 @@ static int add_handshake_data(WOLFSSL *ssl, const uint8_t *data, size_t datalen) { ngtcp2_crypto_conn_ref *conn_ref = SSL_get_app_data(ssl); ngtcp2_conn *conn = conn_ref->get_conn(conn_ref); - ngtcp2_crypto_level level = + ngtcp2_encryption_level level = ngtcp2_crypto_wolfssl_from_wolfssl_encryption_level(wolfssl_level); int rv; @@ -514,11 +530,17 @@ static void crypto_wolfssl_configure_context(WOLFSSL_CTX *ssl_ctx) { int ngtcp2_crypto_wolfssl_configure_server_context(WOLFSSL_CTX *ssl_ctx) { crypto_wolfssl_configure_context(ssl_ctx); +#if PRINTF_DEBUG + wolfSSL_Debugging_ON(); +#endif return 0; } int ngtcp2_crypto_wolfssl_configure_client_context(WOLFSSL_CTX *ssl_ctx) { crypto_wolfssl_configure_context(ssl_ctx); wolfSSL_CTX_UseSessionTicket(ssl_ctx); +#if PRINTF_DEBUG + wolfSSL_Debugging_ON(); +#endif return 0; } diff --git a/deps/ngtcp2/ngtcp2/lib/includes/ngtcp2/ngtcp2.h b/deps/ngtcp2/ngtcp2/lib/includes/ngtcp2/ngtcp2.h index ed71cb3ea0cb37..a8d4b4afd3a470 100644 --- a/deps/ngtcp2/ngtcp2/lib/includes/ngtcp2/ngtcp2.h +++ b/deps/ngtcp2/ngtcp2/lib/includes/ngtcp2/ngtcp2.h @@ -54,26 +54,13 @@ # ifdef WIN32 # ifndef WIN32_LEAN_AND_MEAN # define WIN32_LEAN_AND_MEAN -# endif +# endif /* WIN32_LEAN_AND_MEAN */ # include -# else +# else /* !WIN32 */ # include # include -# endif -#endif - -#ifdef AF_INET -# define NGTCP2_AF_INET AF_INET -#else -# define NGTCP2_AF_INET 2 -#endif - -#ifdef AF_INET6 -# define NGTCP2_AF_INET6 AF_INET6 -#else -# define NGTCP2_AF_INET6 23 -# define NGTCP2_USE_GENERIC_IPV6_SOCKADDR -#endif +# endif /* !WIN32 */ +#endif /* NGTCP2_USE_GENERIC_SOCKADDR */ #include @@ -181,7 +168,7 @@ typedef void *(*ngtcp2_realloc)(void *ptr, size_t size, void *user_data); * * void conn_new() { * ngtcp2_mem mem = {NULL, my_malloc_cb, my_free_cb, my_calloc_cb, - * my_realloc_cb}; + * my_realloc_cb}; * * ... * } @@ -223,7 +210,8 @@ typedef struct ngtcp2_mem { /** * @macro * - * :macro:`NGTCP2_SECONDS` is a count of tick which corresponds to 1 second. + * :macro:`NGTCP2_SECONDS` is a count of tick which corresponds to 1 + * second. */ #define NGTCP2_SECONDS ((ngtcp2_duration)1000000000ULL) @@ -267,34 +255,16 @@ typedef struct ngtcp2_mem { /** * @macro * - * :macro:`NGTCP2_PROTO_VER_V2_DRAFT` is the provisional version - * number for QUIC version 2 draft. - * - * https://quicwg.org/quic-v2/draft-ietf-quic-v2.html + * :macro:`NGTCP2_PROTO_VER_V2` is the QUIC version 2. See + * :rfc:`9369`. */ -#define NGTCP2_PROTO_VER_V2_DRAFT ((uint32_t)0x709a50c4u) - -/** - * @macro - * - * :macro:`NGTCP2_PROTO_VER_DRAFT_MAX` is the maximum QUIC draft - * version that this library supports. - */ -#define NGTCP2_PROTO_VER_DRAFT_MAX 0xff000020u - -/** - * @macro - * - * :macro:`NGTCP2_PROTO_VER_DRAFT_MIN` is the minimum QUIC draft - * version that this library supports. - */ -#define NGTCP2_PROTO_VER_DRAFT_MIN 0xff00001du +#define NGTCP2_PROTO_VER_V2 ((uint32_t)0x6b3343cfu) /** * @macro * * :macro:`NGTCP2_PROTO_VER_MAX` is the highest QUIC version that this - * library supports. + * library supports. Deprecated since v1.1.0. */ #define NGTCP2_PROTO_VER_MAX NGTCP2_PROTO_VER_V1 @@ -302,9 +272,9 @@ typedef struct ngtcp2_mem { * @macro * * :macro:`NGTCP2_PROTO_VER_MIN` is the lowest QUIC version that this - * library supports. + * library supports. Deprecated since v1.1.0. */ -#define NGTCP2_PROTO_VER_MIN NGTCP2_PROTO_VER_DRAFT_MIN +#define NGTCP2_PROTO_VER_MIN NGTCP2_PROTO_VER_V1 /** * @macro @@ -324,7 +294,7 @@ typedef struct ngtcp2_mem { * @macro * * :macro:`NGTCP2_MAX_UDP_PAYLOAD_SIZE` is the default maximum UDP - * datagram payload size that this endpoint transmits. + * datagram payload size that the local endpoint transmits. */ #define NGTCP2_MAX_UDP_PAYLOAD_SIZE 1200 @@ -362,7 +332,7 @@ typedef struct ngtcp2_mem { * @macro * * :macro:`NGTCP2_MIN_STATELESS_RESET_RANDLEN` is the minimum length - * of random bytes (Unpredictable Bits) in Stateless Reset packet + * of random bytes (Unpredictable Bits) in Stateless Reset packet. */ #define NGTCP2_MIN_STATELESS_RESET_RANDLEN 5 @@ -374,24 +344,6 @@ typedef struct ngtcp2_mem { */ #define NGTCP2_PATH_CHALLENGE_DATALEN 8 -/** - * @macro - * - * :macro:`NGTCP2_RETRY_KEY_DRAFT` is an encryption key to create - * integrity tag of Retry packet. It is used for QUIC draft versions. - */ -#define NGTCP2_RETRY_KEY_DRAFT \ - "\xcc\xce\x18\x7e\xd0\x9a\x09\xd0\x57\x28\x15\x5a\x6c\xb9\x6b\xe1" - -/** - * @macro - * - * :macro:`NGTCP2_RETRY_NONCE_DRAFT` is nonce used when generating - * integrity tag of Retry packet. It is used for QUIC draft versions. - */ -#define NGTCP2_RETRY_NONCE_DRAFT \ - "\xe5\x49\x30\xf9\x7f\x21\x36\xf0\x53\x0a\x8c\x1c" - /** * @macro * @@ -404,32 +356,29 @@ typedef struct ngtcp2_mem { /** * @macro * - * :macro:`NGTCP2_RETRY_NONCE_V1` is nonce used when generating integrity - * tag of Retry packet. It is used for QUIC v1. + * :macro:`NGTCP2_RETRY_NONCE_V1` is nonce used when generating + * integrity tag of Retry packet. It is used for QUIC v1. */ #define NGTCP2_RETRY_NONCE_V1 "\x46\x15\x99\xd3\x5d\x63\x2b\xf2\x23\x98\x25\xbb" /** * @macro * - * :macro:`NGTCP2_RETRY_KEY_V2_DRAFT` is an encryption key to create - * integrity tag of Retry packet. It is used for QUIC v2 draft. - * - * https://quicwg.org/quic-v2/draft-ietf-quic-v2.html + * :macro:`NGTCP2_RETRY_KEY_V2` is an encryption key to create + * integrity tag of Retry packet. It is used for QUIC v2. See + * :rfc:`9369`. */ -#define NGTCP2_RETRY_KEY_V2_DRAFT \ - "\xba\x85\x8d\xc7\xb4\x3d\xe5\xdb\xf8\x76\x17\xff\x4a\xb2\x53\xdb" +#define NGTCP2_RETRY_KEY_V2 \ + "\x8f\xb4\xb0\x1b\x56\xac\x48\xe2\x60\xfb\xcb\xce\xad\x7c\xcc\x92" /** * @macro * - * :macro:`NGTCP2_RETRY_NONCE_V2_DRAFT` is nonce used when generating - * integrity tag of Retry packet. It is used for QUIC v2 draft. - * - * https://quicwg.org/quic-v2/draft-ietf-quic-v2.html + * :macro:`NGTCP2_RETRY_NONCE_V2` is nonce used when generating + * integrity tag of Retry packet. It is used for QUIC v2. See + * :rfc:`9369`. */ -#define NGTCP2_RETRY_NONCE_V2_DRAFT \ - "\x14\x1b\x99\xc2\x39\xb0\x3e\x78\x5d\x6a\x2e\x9f" +#define NGTCP2_RETRY_NONCE_V2 "\xd8\x69\x69\xbc\x2d\x7c\x6d\x99\x90\xef\xb0\x4a" /** * @macro @@ -476,14 +425,6 @@ typedef struct ngtcp2_mem { */ #define NGTCP2_MIN_INITIAL_DCIDLEN 8 -/** - * @macro - * - * :macro:`NGTCP2_DEFAULT_HANDSHAKE_TIMEOUT` is the default handshake - * timeout. - */ -#define NGTCP2_DEFAULT_HANDSHAKE_TIMEOUT (10 * NGTCP2_SECONDS) - /** * @macrosection * @@ -525,8 +466,8 @@ typedef struct ngtcp2_mem { */ #define NGTCP2_ECN_MASK 0x3 -#define NGTCP2_PKT_INFO_VERSION_V1 1 -#define NGTCP2_PKT_INFO_VERSION NGTCP2_PKT_INFO_VERSION_V1 +#define NGTCP2_PKT_INFO_V1 1 +#define NGTCP2_PKT_INFO_VERSION NGTCP2_PKT_INFO_V1 /** * @struct @@ -535,12 +476,12 @@ typedef struct ngtcp2_mem { */ typedef struct NGTCP2_ALIGN(8) ngtcp2_pkt_info { /** - * :member:`ecn` is ECN marking and when passing - * `ngtcp2_conn_read_pkt()`, and it should be either + * :member:`ecn` is ECN marking, and when it is passed to + * `ngtcp2_conn_read_pkt()`, it should be either * :macro:`NGTCP2_ECN_NOT_ECT`, :macro:`NGTCP2_ECN_ECT_1`, * :macro:`NGTCP2_ECN_ECT_0`, or :macro:`NGTCP2_ECN_CE`. */ - uint32_t ecn; + uint8_t ecn; } ngtcp2_pkt_info; /** @@ -562,190 +503,190 @@ typedef struct NGTCP2_ALIGN(8) ngtcp2_pkt_info { * :macro:`NGTCP2_ERR_NOBUF` indicates that a provided buffer does not * have enough space to store data. */ -#define NGTCP2_ERR_NOBUF -203 +#define NGTCP2_ERR_NOBUF -202 /** * @macro * * :macro:`NGTCP2_ERR_PROTO` indicates a general protocol error. */ -#define NGTCP2_ERR_PROTO -205 +#define NGTCP2_ERR_PROTO -203 /** * @macro * * :macro:`NGTCP2_ERR_INVALID_STATE` indicates that a requested * operation is not allowed at the current connection state. */ -#define NGTCP2_ERR_INVALID_STATE -206 +#define NGTCP2_ERR_INVALID_STATE -204 /** * @macro * * :macro:`NGTCP2_ERR_ACK_FRAME` indicates that an invalid ACK frame * is received. */ -#define NGTCP2_ERR_ACK_FRAME -207 +#define NGTCP2_ERR_ACK_FRAME -205 /** * @macro * * :macro:`NGTCP2_ERR_STREAM_ID_BLOCKED` indicates that there is no * spare stream ID available. */ -#define NGTCP2_ERR_STREAM_ID_BLOCKED -208 +#define NGTCP2_ERR_STREAM_ID_BLOCKED -206 /** * @macro * * :macro:`NGTCP2_ERR_STREAM_IN_USE` indicates that a stream ID is * already in use. */ -#define NGTCP2_ERR_STREAM_IN_USE -209 +#define NGTCP2_ERR_STREAM_IN_USE -207 /** * @macro * * :macro:`NGTCP2_ERR_STREAM_DATA_BLOCKED` indicates that stream data * cannot be sent because of flow control. */ -#define NGTCP2_ERR_STREAM_DATA_BLOCKED -210 +#define NGTCP2_ERR_STREAM_DATA_BLOCKED -208 /** * @macro * * :macro:`NGTCP2_ERR_FLOW_CONTROL` indicates flow control error. */ -#define NGTCP2_ERR_FLOW_CONTROL -211 +#define NGTCP2_ERR_FLOW_CONTROL -209 /** * @macro * * :macro:`NGTCP2_ERR_CONNECTION_ID_LIMIT` indicates that the number * of received Connection ID exceeds acceptable limit. */ -#define NGTCP2_ERR_CONNECTION_ID_LIMIT -212 +#define NGTCP2_ERR_CONNECTION_ID_LIMIT -210 /** * @macro * * :macro:`NGTCP2_ERR_STREAM_LIMIT` indicates that a remote endpoint * opens more streams that is permitted. */ -#define NGTCP2_ERR_STREAM_LIMIT -213 +#define NGTCP2_ERR_STREAM_LIMIT -211 /** * @macro * * :macro:`NGTCP2_ERR_FINAL_SIZE` indicates that inconsistent final * size of a stream. */ -#define NGTCP2_ERR_FINAL_SIZE -214 +#define NGTCP2_ERR_FINAL_SIZE -212 /** * @macro * * :macro:`NGTCP2_ERR_CRYPTO` indicates crypto (TLS) related error. */ -#define NGTCP2_ERR_CRYPTO -215 +#define NGTCP2_ERR_CRYPTO -213 /** * @macro * * :macro:`NGTCP2_ERR_PKT_NUM_EXHAUSTED` indicates that packet number * is exhausted. */ -#define NGTCP2_ERR_PKT_NUM_EXHAUSTED -216 +#define NGTCP2_ERR_PKT_NUM_EXHAUSTED -214 /** * @macro * * :macro:`NGTCP2_ERR_REQUIRED_TRANSPORT_PARAM` indicates that a * required transport parameter is missing. */ -#define NGTCP2_ERR_REQUIRED_TRANSPORT_PARAM -217 +#define NGTCP2_ERR_REQUIRED_TRANSPORT_PARAM -215 /** * @macro * * :macro:`NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM` indicates that a * transport parameter is malformed. */ -#define NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM -218 +#define NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM -216 /** * @macro * * :macro:`NGTCP2_ERR_FRAME_ENCODING` indicates there is an error in * frame encoding. */ -#define NGTCP2_ERR_FRAME_ENCODING -219 +#define NGTCP2_ERR_FRAME_ENCODING -217 /** * @macro * * :macro:`NGTCP2_ERR_DECRYPT` indicates a decryption failure. */ -#define NGTCP2_ERR_DECRYPT -220 +#define NGTCP2_ERR_DECRYPT -218 /** * @macro * * :macro:`NGTCP2_ERR_STREAM_SHUT_WR` indicates no more data can be * sent to a stream. */ -#define NGTCP2_ERR_STREAM_SHUT_WR -221 +#define NGTCP2_ERR_STREAM_SHUT_WR -219 /** * @macro * - * :macro:`NGTCP2_ERR_STREAM_NOT_FOUND` indicates that a stream was not - * found. + * :macro:`NGTCP2_ERR_STREAM_NOT_FOUND` indicates that a stream was + * not found. */ -#define NGTCP2_ERR_STREAM_NOT_FOUND -222 +#define NGTCP2_ERR_STREAM_NOT_FOUND -220 /** * @macro * * :macro:`NGTCP2_ERR_STREAM_STATE` indicates that a requested * operation is not allowed at the current stream state. */ -#define NGTCP2_ERR_STREAM_STATE -226 +#define NGTCP2_ERR_STREAM_STATE -221 /** * @macro * * :macro:`NGTCP2_ERR_RECV_VERSION_NEGOTIATION` indicates that Version * Negotiation packet was received. */ -#define NGTCP2_ERR_RECV_VERSION_NEGOTIATION -229 +#define NGTCP2_ERR_RECV_VERSION_NEGOTIATION -222 /** * @macro * * :macro:`NGTCP2_ERR_CLOSING` indicates that connection is in closing * state. */ -#define NGTCP2_ERR_CLOSING -230 +#define NGTCP2_ERR_CLOSING -223 /** * @macro * * :macro:`NGTCP2_ERR_DRAINING` indicates that connection is in * draining state. */ -#define NGTCP2_ERR_DRAINING -231 +#define NGTCP2_ERR_DRAINING -224 /** * @macro * * :macro:`NGTCP2_ERR_TRANSPORT_PARAM` indicates a general transport * parameter error. */ -#define NGTCP2_ERR_TRANSPORT_PARAM -234 +#define NGTCP2_ERR_TRANSPORT_PARAM -225 /** * @macro * * :macro:`NGTCP2_ERR_DISCARD_PKT` indicates a packet was discarded. */ -#define NGTCP2_ERR_DISCARD_PKT -235 +#define NGTCP2_ERR_DISCARD_PKT -226 /** * @macro * * :macro:`NGTCP2_ERR_CONN_ID_BLOCKED` indicates that there is no * spare Connection ID available. */ -#define NGTCP2_ERR_CONN_ID_BLOCKED -237 +#define NGTCP2_ERR_CONN_ID_BLOCKED -227 /** * @macro * * :macro:`NGTCP2_ERR_INTERNAL` indicates an internal error. */ -#define NGTCP2_ERR_INTERNAL -238 +#define NGTCP2_ERR_INTERNAL -228 /** * @macro * * :macro:`NGTCP2_ERR_CRYPTO_BUFFER_EXCEEDED` indicates that a crypto * buffer exceeded. */ -#define NGTCP2_ERR_CRYPTO_BUFFER_EXCEEDED -239 +#define NGTCP2_ERR_CRYPTO_BUFFER_EXCEEDED -229 /** * @macro * @@ -753,21 +694,21 @@ typedef struct NGTCP2_ALIGN(8) ngtcp2_pkt_info { * :macro:`NGTCP2_WRITE_STREAM_FLAG_MORE` is used and a function call * succeeded. */ -#define NGTCP2_ERR_WRITE_MORE -240 +#define NGTCP2_ERR_WRITE_MORE -230 /** * @macro * * :macro:`NGTCP2_ERR_RETRY` indicates that server should send Retry * packet. */ -#define NGTCP2_ERR_RETRY -241 +#define NGTCP2_ERR_RETRY -231 /** * @macro * * :macro:`NGTCP2_ERR_DROP_CONN` indicates that an endpoint should * drop connection immediately. */ -#define NGTCP2_ERR_DROP_CONN -242 +#define NGTCP2_ERR_DROP_CONN -232 /** * @macro * @@ -775,7 +716,7 @@ typedef struct NGTCP2_ALIGN(8) ngtcp2_pkt_info { * limit is reached and key update is not available. An endpoint * should drop connection immediately. */ -#define NGTCP2_ERR_AEAD_LIMIT_REACHED -243 +#define NGTCP2_ERR_AEAD_LIMIT_REACHED -233 /** * @macro * @@ -783,41 +724,41 @@ typedef struct NGTCP2_ALIGN(8) ngtcp2_pkt_info { * could not probe that a path is capable of sending UDP datagram * payload of size at least 1200 bytes. */ -#define NGTCP2_ERR_NO_VIABLE_PATH -244 +#define NGTCP2_ERR_NO_VIABLE_PATH -234 /** * @macro * * :macro:`NGTCP2_ERR_VERSION_NEGOTIATION` indicates that server * should send Version Negotiation packet. */ -#define NGTCP2_ERR_VERSION_NEGOTIATION -245 +#define NGTCP2_ERR_VERSION_NEGOTIATION -235 /** * @macro * * :macro:`NGTCP2_ERR_HANDSHAKE_TIMEOUT` indicates that QUIC * connection is not established before the specified deadline. */ -#define NGTCP2_ERR_HANDSHAKE_TIMEOUT -246 +#define NGTCP2_ERR_HANDSHAKE_TIMEOUT -236 /** * @macro * * :macro:`NGTCP2_ERR_VERSION_NEGOTIATION_FAILURE` indicates the * version negotiation failed. */ -#define NGTCP2_ERR_VERSION_NEGOTIATION_FAILURE -247 +#define NGTCP2_ERR_VERSION_NEGOTIATION_FAILURE -237 /** * @macro * * :macro:`NGTCP2_ERR_IDLE_CLOSE` indicates the connection should be * closed silently because of idle timeout. */ -#define NGTCP2_ERR_IDLE_CLOSE -248 +#define NGTCP2_ERR_IDLE_CLOSE -238 /** * @macro * * :macro:`NGTCP2_ERR_FATAL` indicates that error codes less than this * value is fatal error. When this error is returned, an endpoint - * should drop connection immediately. + * should close connection immediately. */ #define NGTCP2_ERR_FATAL -500 /** @@ -892,7 +833,7 @@ typedef enum ngtcp2_pkt_type { */ NGTCP2_PKT_INITIAL = 0x10, /** - * :enum:`NGTCP2_PKT_0RTT` indicates 0RTT packet. + * :enum:`NGTCP2_PKT_0RTT` indicates 0-RTT packet. */ NGTCP2_PKT_0RTT = 0x11, /** @@ -1061,12 +1002,10 @@ typedef enum ngtcp2_pkt_type { /** * @macro * - * :macro:`NGTCP2_VERSION_NEGOTIATION_ERROR_DRAFT` is QUIC transport - * error code ``VERSION_NEGOTIATION_ERROR``. - * - * https://quicwg.org/quic-v2/draft-ietf-quic-v2.html + * :macro:`NGTCP2_VERSION_NEGOTIATION_ERROR` is QUIC transport error + * code ``VERSION_NEGOTIATION_ERROR``. See :rfc:`9368`. */ -#define NGTCP2_VERSION_NEGOTIATION_ERROR_DRAFT 0x53f8u +#define NGTCP2_VERSION_NEGOTIATION_ERROR 0x11 /** * @enum @@ -1096,6 +1035,8 @@ typedef enum ngtcp2_path_validation_result { * @typedef * * :type:`ngtcp2_tstamp` is a timestamp with nanosecond resolution. + * ``UINT64_MAX`` is an invalid value, and it is often used to + * indicate that no value is set. */ typedef uint64_t ngtcp2_tstamp; @@ -1103,7 +1044,8 @@ typedef uint64_t ngtcp2_tstamp; * @typedef * * :type:`ngtcp2_duration` is a period of time in nanosecond - * resolution. + * resolution. ``UINT64_MAX`` is an invalid value, and it is often + * used to indicate that no value is set. */ typedef uint64_t ngtcp2_duration; @@ -1178,10 +1120,15 @@ typedef struct ngtcp2_pkt_hd { */ int64_t pkt_num; /** - * :member:`token` contains token for Initial - * packet. + * :member:`token` contains token. Only Initial packet may contain + * token. NULL if no token is present. */ - ngtcp2_vec token; + const uint8_t *token; + /** + * :member:`tokenlen` is the length of :member:`token`. 0 if no + * token is present. + */ + size_t tokenlen; /** * :member:`pkt_numlen` is the number of bytes spent to encode * :member:`pkt_num`. @@ -1197,8 +1144,10 @@ typedef struct ngtcp2_pkt_hd { */ uint32_t version; /** - * :member:`type` is a type of QUIC packet. See - * :type:`ngtcp2_pkt_type`. + * :member:`type` is a type of QUIC packet. This field does not + * have a QUIC packet type defined for a specific QUIC version. + * Instead, it contains version independent packet type defined by + * this library. See :type:`ngtcp2_pkt_type`. */ uint8_t type; /** @@ -1229,25 +1178,6 @@ typedef struct ngtcp2_pkt_stateless_reset { size_t randlen; } ngtcp2_pkt_stateless_reset; -/** - * @enum - * - * :type:`ngtcp2_transport_params_type` defines TLS message type which - * carries transport parameters. - */ -typedef enum ngtcp2_transport_params_type { - /** - * :enum:`NGTCP2_TRANSPORT_PARAMS_TYPE_CLIENT_HELLO` is Client Hello - * TLS message. - */ - NGTCP2_TRANSPORT_PARAMS_TYPE_CLIENT_HELLO, - /** - * :enum:`NGTCP2_TRANSPORT_PARAMS_TYPE_ENCRYPTED_EXTENSIONS` is - * Encrypted Extensions TLS message. - */ - NGTCP2_TRANSPORT_PARAMS_TYPE_ENCRYPTED_EXTENSIONS -} ngtcp2_transport_params_type; - /** * @macrosection * @@ -1296,14 +1226,96 @@ typedef enum ngtcp2_transport_params_type { */ #define NGTCP2_TLSEXT_QUIC_TRANSPORT_PARAMETERS_V1 0x39u +#ifdef NGTCP2_USE_GENERIC_SOCKADDR +# ifndef NGTCP2_AF_INET +# error NGTCP2_AF_INET must be defined +# endif /* !NGTCP2_AF_INET */ + +# ifndef NGTCP2_AF_INET6 +# error NGTCP2_AF_INET6 must be defined +# endif /* !NGTCP2_AF_INET6 */ + +typedef unsigned short int ngtcp2_sa_family; +typedef uint16_t ngtcp2_in_port; + +typedef struct ngtcp2_sockaddr { + ngtcp2_sa_family sa_family; + uint8_t sa_data[14]; +} ngtcp2_sockaddr; + +typedef struct ngtcp2_in_addr { + uint32_t s_addr; +} ngtcp2_in_addr; + +typedef struct ngtcp2_sockaddr_in { + ngtcp2_sa_family sin_family; + ngtcp2_in_port sin_port; + ngtcp2_in_addr sin_addr; + uint8_t sin_zero[8]; +} ngtcp2_sockaddr_in; + +typedef struct ngtcp2_in6_addr { + uint8_t in6_addr[16]; +} ngtcp2_in6_addr; + +typedef struct ngtcp2_sockaddr_in6 { + ngtcp2_sa_family sin6_family; + ngtcp2_in_port sin6_port; + uint32_t sin6_flowinfo; + ngtcp2_in6_addr sin6_addr; + uint32_t sin6_scope_id; +} ngtcp2_sockaddr_in6; + +typedef uint32_t ngtcp2_socklen; +#else /* !NGTCP2_USE_GENERIC_SOCKADDR */ +# define NGTCP2_AF_INET AF_INET +# define NGTCP2_AF_INET6 AF_INET6 + /** - * @macro + * @typedef * - * :macro:`NGTCP2_TLSEXT_QUIC_TRANSPORT_PARAMETERS_DRAFT` is TLS - * extension type of quic_transport_parameters used during draft - * development. + * :type:`ngtcp2_sockaddr` is typedefed to struct sockaddr. If + * :macro:`NGTCP2_USE_GENERIC_SOCKADDR` is defined, it is typedefed to + * the generic struct sockaddr defined in ngtcp2.h. + */ +typedef struct sockaddr ngtcp2_sockaddr; +/** + * @typedef + * + * :type:`ngtcp2_sockaddr_in` is typedefed to struct sockaddr_in. If + * :macro:`NGTCP2_USE_GENERIC_SOCKADDR` is defined, it is typedefed to + * the generic struct sockaddr_in defined in ngtcp2.h. + */ +typedef struct sockaddr_in ngtcp2_sockaddr_in; +/** + * @typedef + * + * :type:`ngtcp2_sockaddr_in6` is typedefed to struct sockaddr_in6. + * If :macro:`NGTCP2_USE_GENERIC_SOCKADDR` is defined, it is typedefed + * to the generic struct sockaddr_in6 defined in ngtcp2.h. + */ +typedef struct sockaddr_in6 ngtcp2_sockaddr_in6; +/** + * @typedef + * + * :type:`ngtcp2_socklen` is typedefed to socklen_t. If + * :macro:`NGTCP2_USE_GENERIC_SOCKADDR` is defined, it is typedefed to + * uint32_t. + */ +typedef socklen_t ngtcp2_socklen; +#endif /* !NGTCP2_USE_GENERIC_SOCKADDR */ + +/** + * @struct + * + * :type:`ngtcp2_sockaddr_union` conveniently includes all supported + * address types. */ -#define NGTCP2_TLSEXT_QUIC_TRANSPORT_PARAMETERS_DRAFT 0xffa5u +typedef union ngtcp2_sockaddr_union { + ngtcp2_sockaddr sa; + ngtcp2_sockaddr_in in; + ngtcp2_sockaddr_in6 in6; +} ngtcp2_sockaddr_union; /** * @struct @@ -1317,29 +1329,21 @@ typedef struct ngtcp2_preferred_addr { */ ngtcp2_cid cid; /** - * :member:`ipv4_port` is a port of IPv4 address. - */ - uint16_t ipv4_port; - /** - * :member:`ipv6_port` is a port of IPv6 address. + * :member:`ipv4` contains IPv4 address and port. */ - uint16_t ipv6_port; + ngtcp2_sockaddr_in ipv4; /** - * :member:`ipv4_addr` contains IPv4 address in network byte order. + * :member:`ipv6` contains IPv6 address and port. */ - uint8_t ipv4_addr[4]; + ngtcp2_sockaddr_in6 ipv6; /** - * :member:`ipv6_addr` contains IPv6 address in network byte order. - */ - uint8_t ipv6_addr[16]; - /** - * :member:`ipv4_present` indicates that :member:`ipv4_addr` and - * :member:`ipv4_port` contain IPv4 address and port respectively. + * :member:`ipv4_present` indicates that :member:`ipv4` contains + * IPv4 address and port. */ uint8_t ipv4_present; /** - * :member:`ipv6_present` indicates that :member:`ipv6_addr` and - * :member:`ipv6_port` contain IPv6 address and port respectively. + * :member:`ipv6_present` indicates that :member:`ipv6` contains + * IPv6 address and port. */ uint8_t ipv6_present; /** @@ -1352,7 +1356,7 @@ typedef struct ngtcp2_preferred_addr { * @struct * * :type:`ngtcp2_version_info` represents version_information - * structure. + * structure. See :rfc:`9368`. */ typedef struct ngtcp2_version_info { /** @@ -1360,19 +1364,21 @@ typedef struct ngtcp2_version_info { */ uint32_t chosen_version; /** - * :member:`other_versions` points the wire image of other_versions - * field. The each version is therefore in network byte order. + * :member:`available_versions` points the wire image of + * available_versions field. The each version is therefore in + * network byte order. */ - uint8_t *other_versions; + const uint8_t *available_versions; /** - * :member:`other_versionslen` is the number of bytes pointed by - * :member:`other_versions`, not the number of versions included. + * :member:`available_versionslen` is the number of bytes pointed by + * :member:`available_versions`, not the number of versions + * included. */ - size_t other_versionslen; + size_t available_versionslen; } ngtcp2_version_info; -#define NGTCP2_TRANSPORT_PARAMS_VERSION_V1 1 -#define NGTCP2_TRANSPORT_PARAMS_VERSION NGTCP2_TRANSPORT_PARAMS_VERSION_V1 +#define NGTCP2_TRANSPORT_PARAMS_V1 1 +#define NGTCP2_TRANSPORT_PARAMS_VERSION NGTCP2_TRANSPORT_PARAMS_V1 /** * @struct @@ -1382,55 +1388,58 @@ typedef struct ngtcp2_version_info { */ typedef struct ngtcp2_transport_params { /** - * :member:`preferred_address` contains preferred address if - * :member:`preferred_address_present` is nonzero. + * :member:`preferred_addr` contains preferred address if + * :member:`preferred_addr_present` is nonzero. */ - ngtcp2_preferred_addr preferred_address; + ngtcp2_preferred_addr preferred_addr; /** * :member:`original_dcid` is the Destination Connection ID field * from the first Initial packet from client. Server must specify - * this field. It is expected that application knows the original - * Destination Connection ID even if it sends Retry packet, for - * example, by including it in retry token. Otherwise, application - * should not specify this field. + * this field and set :member:`original_dcid_present` to nonzero. + * It is expected that application knows the original Destination + * Connection ID even if it sends Retry packet, for example, by + * including it in retry token. Otherwise, application should not + * specify this field. */ ngtcp2_cid original_dcid; /** * :member:`initial_scid` is the Source Connection ID field from the - * first Initial packet the endpoint sends. Application should not - * specify this field. + * first Initial packet the local endpoint sends. Application + * should not specify this field. If :member:`initial_scid_present` + * is set to nonzero, it indicates this field is set. */ ngtcp2_cid initial_scid; /** * :member:`retry_scid` is the Source Connection ID field from Retry * packet. Only server uses this field. If server application - * received Initial packet with retry token from client and server - * verified its token, server application must set Destination - * Connection ID field from the Initial packet to this field and set - * :member:`retry_scid_present` to nonzero. Server application must - * verify that the Destination Connection ID from Initial packet was - * sent in Retry packet by, for example, including the Connection ID - * in a token, or including it in AAD when encrypting a token. + * received Initial packet with retry token from client, and server + * successfully verified its token, server application must set + * Destination Connection ID field from the Initial packet to this + * field, and set :member:`retry_scid_present` to nonzero. Server + * application must verify that the Destination Connection ID from + * Initial packet was sent in Retry packet by, for example, + * including the Connection ID in a token, or including it in AAD + * when encrypting a token. */ ngtcp2_cid retry_scid; /** * :member:`initial_max_stream_data_bidi_local` is the size of flow * control window of locally initiated stream. This is the number - * of bytes that the remote endpoint can send and the local endpoint - * must ensure that it has enough buffer to receive them. + * of bytes that the remote endpoint can send, and the local + * endpoint must ensure that it has enough buffer to receive them. */ uint64_t initial_max_stream_data_bidi_local; /** * :member:`initial_max_stream_data_bidi_remote` is the size of flow * control window of remotely initiated stream. This is the number - * of bytes that the remote endpoint can send and the local endpoint - * must ensure that it has enough buffer to receive them. + * of bytes that the remote endpoint can send, and the local + * endpoint must ensure that it has enough buffer to receive them. */ uint64_t initial_max_stream_data_bidi_remote; /** * :member:`initial_max_stream_data_uni` is the size of flow control * window of remotely initiated unidirectional stream. This is the - * number of bytes that the remote endpoint can send and the local + * number of bytes that the remote endpoint can send, and the local * endpoint must ensure that it has enough buffer to receive them. */ uint64_t initial_max_stream_data_uni; @@ -1451,12 +1460,13 @@ typedef struct ngtcp2_transport_params { uint64_t initial_max_streams_uni; /** * :member:`max_idle_timeout` is a duration during which sender - * allows quiescent. + * allows quiescent. 0 means no idle timeout. It must not be + * UINT64_MAX. */ ngtcp2_duration max_idle_timeout; /** - * :member:`max_udp_payload_size` is the maximum datagram size that - * the endpoint can receive. + * :member:`max_udp_payload_size` is the maximum UDP payload size + * that the local endpoint can receive. */ uint64_t max_udp_payload_size; /** @@ -1471,14 +1481,16 @@ typedef struct ngtcp2_transport_params { uint64_t ack_delay_exponent; /** * :member:`max_ack_delay` is the maximum acknowledgement delay by - * which the endpoint will delay sending acknowledgements. + * which the local endpoint will delay sending acknowledgements. It + * must be strictly less than (1 << 14) milliseconds. + * Sub-millisecond part is dropped when sending it in a QUIC + * transport parameter. */ ngtcp2_duration max_ack_delay; /** * :member:`max_datagram_frame_size` is the maximum size of DATAGRAM - * frame that this endpoint willingly receives. Specifying 0 - * disables DATAGRAM support. See - * https://datatracker.ietf.org/doc/html/rfc9221 + * frame that the local endpoint willingly receives. Specifying 0 + * disables DATAGRAM support. See :rfc:`9221`. */ uint64_t max_datagram_frame_size; /** @@ -1487,30 +1499,37 @@ typedef struct ngtcp2_transport_params { */ uint8_t stateless_reset_token_present; /** - * :member:`disable_active_migration` is nonzero if the endpoint - * does not support active connection migration. + * :member:`disable_active_migration` is nonzero if the local + * endpoint does not support active connection migration. */ uint8_t disable_active_migration; + /** + * :member:`original_dcid_present` is nonzero if + * :member:`original_dcid` field is set. + */ + uint8_t original_dcid_present; + /** + * :member:`initial_scid_present` is nonzero if + * :member:`initial_scid` field is set. + */ + uint8_t initial_scid_present; /** * :member:`retry_scid_present` is nonzero if :member:`retry_scid` * field is set. */ uint8_t retry_scid_present; /** - * :member:`preferred_address_present` is nonzero if + * :member:`preferred_addr_present` is nonzero if * :member:`preferred_address` is set. */ - uint8_t preferred_address_present; + uint8_t preferred_addr_present; /** * :member:`stateless_reset_token` contains stateless reset token. */ uint8_t stateless_reset_token[NGTCP2_STATELESS_RESET_TOKENLEN]; /** * :member:`grease_quic_bit` is nonzero if sender supports "Greasing - * the QUIC Bit" extension. See - * https://datatracker.ietf.org/doc/html/draft-ietf-quic-bit-grease. - * Note that the local endpoint always enables greasing QUIC bit - * regardless of this field value. + * the QUIC Bit" extension. See :rfc:`9287`. */ uint8_t grease_quic_bit; /** @@ -1527,44 +1546,15 @@ typedef struct ngtcp2_transport_params { uint8_t version_info_present; } ngtcp2_transport_params; -/** - * @enum - * - * :type:`ngtcp2_pktns_id` defines packet number space identifier. - */ -typedef enum ngtcp2_pktns_id { - /** - * :enum:`NGTCP2_PKTNS_ID_INITIAL` is the Initial packet number - * space. - */ - NGTCP2_PKTNS_ID_INITIAL, - /** - * :enum:`NGTCP2_PKTNS_ID_HANDSHAKE` is the Handshake packet number - * space. - */ - NGTCP2_PKTNS_ID_HANDSHAKE, - /** - * :enum:`NGTCP2_PKTNS_ID_APPLICATION` is the Application data - * packet number space. - */ - NGTCP2_PKTNS_ID_APPLICATION, - /** - * :enum:`NGTCP2_PKTNS_ID_MAX` is defined to get the number of - * packet number spaces. - */ - NGTCP2_PKTNS_ID_MAX -} ngtcp2_pktns_id; - -#define NGTCP2_CONN_STAT_VERSION_V1 1 -#define NGTCP2_CONN_STAT_VERSION NGTCP2_CONN_STAT_VERSION_V1 +#define NGTCP2_CONN_INFO_V1 1 +#define NGTCP2_CONN_INFO_VERSION NGTCP2_CONN_INFO_V1 /** * @struct * - * :type:`ngtcp2_conn_stat` holds various connection statistics, and - * computed data for recovery and congestion controller. + * :type:`ngtcp2_conn_info` holds various connection statistics. */ -typedef struct ngtcp2_conn_stat { +typedef struct ngtcp2_conn_info { /** * :member:`latest_rtt` is the latest RTT sample which is not * adjusted by acknowledgement delay. @@ -1583,35 +1573,6 @@ typedef struct ngtcp2_conn_stat { * :member:`rttvar` is a mean deviation of observed RTT. */ ngtcp2_duration rttvar; - /** - * :member:`initial_rtt` is the initial RTT which is used when no - * RTT sample is available. - */ - ngtcp2_duration initial_rtt; - /** - * :member:`first_rtt_sample_ts` is the timestamp when the first RTT - * sample is obtained. - */ - ngtcp2_tstamp first_rtt_sample_ts; - /** - * :member:`pto_count` is the count of successive PTO timer - * expiration. - */ - size_t pto_count; - /** - * :member:`loss_detection_timer` is the deadline of the current - * loss detection timer. - */ - ngtcp2_tstamp loss_detection_timer; - /** - * :member:`last_tx_pkt_ts` corresponds to - * time_of_last_ack_eliciting_packet in :rfc:`9002`. - */ - ngtcp2_tstamp last_tx_pkt_ts[NGTCP2_PKTNS_ID_MAX]; - /** - * :member:`loss_time` corresponds to loss_time in :rfc:`9002`. - */ - ngtcp2_tstamp loss_time[NGTCP2_PKTNS_ID_MAX]; /** * :member:`cwnd` is the size of congestion window. */ @@ -1620,38 +1581,12 @@ typedef struct ngtcp2_conn_stat { * :member:`ssthresh` is slow start threshold. */ uint64_t ssthresh; - /** - * :member:`congestion_recovery_start_ts` is the timestamp when - * congestion recovery started. - */ - ngtcp2_tstamp congestion_recovery_start_ts; /** * :member:`bytes_in_flight` is the number in bytes of all sent * packets which have not been acknowledged. */ uint64_t bytes_in_flight; - /** - * :member:`max_udp_payload_size` is the maximum size of UDP - * datagram payload that this endpoint transmits. It is used by - * congestion controller to compute congestion window. - */ - size_t max_udp_payload_size; - /** - * :member:`delivery_rate_sec` is the current sending rate measured - * in byte per second. - */ - uint64_t delivery_rate_sec; - /** - * :member:`pacing_rate` is the current packet sending rate. If - * pacing is disabled, 0 is set. - */ - double pacing_rate; - /** - * :member:`send_quantum` is the maximum size of a data aggregate - * scheduled and transmitted together. - */ - size_t send_quantum; -} ngtcp2_conn_stat; +} ngtcp2_conn_info; /** * @enum @@ -1668,15 +1603,9 @@ typedef enum ngtcp2_cc_algo { */ NGTCP2_CC_ALGO_CUBIC = 0x01, /** - * :enum:`NGTCP2_CC_ALGO_BBR` represents BBR. If BBR is chosen, - * packet pacing is enabled. + * :enum:`NGTCP2_CC_ALGO_BBR` represents BBR v2. */ - NGTCP2_CC_ALGO_BBR = 0x02, - /** - * :enum:`NGTCP2_CC_ALGO_BBR2` represents BBR v2. If BBR v2 is - * chosen, packet pacing is enabled. - */ - NGTCP2_CC_ALGO_BBR2 = 0x03 + NGTCP2_CC_ALGO_BBR = 0x02 } ngtcp2_cc_algo; /** @@ -1737,27 +1666,30 @@ typedef void (*ngtcp2_qlog_write)(void *user_data, uint32_t flags, const void *data, size_t datalen); /** - * @struct + * @enum * - * :type:`ngtcp2_qlog_settings` is a set of settings for qlog. + * :type:`ngtcp2_token_type` defines the type of token. */ -typedef struct ngtcp2_qlog_settings { +typedef enum ngtcp2_token_type { + /** + * :enum:`NGTCP2_TOKEN_TYPE_UNKNOWN` indicates that the type of + * token is unknown. + */ + NGTCP2_TOKEN_TYPE_UNKNOWN, /** - * :member:`odcid` is Original Destination Connection ID sent by - * client. It is used as group_id and ODCID fields. Client ignores - * this field and uses dcid parameter passed to - * `ngtcp2_conn_client_new()`. + * :enum:`NGTCP2_TOKEN_TYPE_RETRY` indicates that a token comes from + * Retry packet. */ - ngtcp2_cid odcid; + NGTCP2_TOKEN_TYPE_RETRY, /** - * :member:`write` is a callback function to write qlog. Setting - * ``NULL`` disables qlog. + * :enum:`NGTCP2_TOKEN_TYPE_NEW_TOKEN` indicates that a token comes + * from NEW_TOKEN frame. */ - ngtcp2_qlog_write write; -} ngtcp2_qlog_settings; + NGTCP2_TOKEN_TYPE_NEW_TOKEN +} ngtcp2_token_type; -#define NGTCP2_SETTINGS_VERSION_V1 1 -#define NGTCP2_SETTINGS_VERSION NGTCP2_SETTINGS_VERSION_V1 +#define NGTCP2_SETTINGS_V1 1 +#define NGTCP2_SETTINGS_VERSION NGTCP2_SETTINGS_V1 /** * @struct @@ -1766,9 +1698,10 @@ typedef struct ngtcp2_qlog_settings { */ typedef struct ngtcp2_settings { /** - * :member:`qlog` is qlog settings. + * :member:`qlog_write` is a callback function to write qlog. + * Setting ``NULL`` disables qlog. */ - ngtcp2_qlog_settings qlog; + ngtcp2_qlog_write qlog_write; /** * :member:`cc_algo` specifies congestion control algorithm. */ @@ -1789,24 +1722,37 @@ typedef struct ngtcp2_settings { */ ngtcp2_printf log_printf; /** - * :member:`max_udp_payload_size` is the maximum size of UDP - * datagram payload that this endpoint transmits. It is used by - * congestion controller to compute congestion window. + * :member:`max_tx_udp_payload_size` is the maximum size of UDP + * datagram payload that the local endpoint transmits. It is used + * by congestion controller to compute congestion window. */ - size_t max_udp_payload_size; + size_t max_tx_udp_payload_size; /** * :member:`token` is a token from Retry packet or NEW_TOKEN frame. * * Server sets this field if it received the token in Client Initial - * packet and successfully validated. + * packet and successfully validated. It should also set + * :member:`token_type` field. * * Client sets this field if it intends to send token in its Initial * packet. * * `ngtcp2_conn_server_new` and `ngtcp2_conn_client_new` make a copy * of token. + * + * Set NULL if there is no token. */ - ngtcp2_vec token; + const uint8_t *token; + /** + * :member:`tokenlen` is the length of :member:`token`. Set 0 if + * there is no token. + */ + size_t tokenlen; + /** + * :member:`token_type` is the type of token. Server application + * should set this field. + */ + ngtcp2_token_type token_type; /** * :member:`rand_ctx` is an optional random number generator to be * passed to :type:`ngtcp2_rand` callback. @@ -1834,20 +1780,20 @@ typedef struct ngtcp2_settings { */ uint64_t max_stream_window; /** - * :member:`ack_thresh` is the maximum number of unacknowledged - * packets before sending acknowledgement. It triggers the - * immediate acknowledgement. + * :member:`ack_thresh` is the minimum number of the received ACK + * eliciting packets that trigger the immediate acknowledgement from + * the local endpoint. */ size_t ack_thresh; /** - * :member:`no_udp_payload_size_shaping`, if set to nonzero, + * :member:`no_tx_udp_payload_size_shaping`, if set to nonzero, * instructs the library not to limit the UDP payload size to * :macro:`NGTCP2_MAX_UDP_PAYLOAD_SIZE` (which can be extended by - * Path MTU Discovery) and instead use the mininum size among the - * given buffer size, :member:`max_udp_payload_size`, and the - * received max_udp_payload QUIC transport parameter. + * Path MTU Discovery), and instead use the minimum size among the + * given buffer size, :member:`max_tx_udp_payload_size`, and the + * received max_udp_payload_size QUIC transport parameter. */ - int no_udp_payload_size_shaping; + uint8_t no_tx_udp_payload_size_shaping; /** * :member:`handshake_timeout` is the period of time before giving * up QUIC connection establishment. If QUIC handshake is not @@ -1865,10 +1811,13 @@ typedef struct ngtcp2_settings { * of preference. * * On compatible version negotiation, server will negotiate one of - * those versions contained in this array if a client initially - * chooses a less preferred version. This version set corresponds - * to Offered Versions in QUIC Version Negotiation draft, and it should - * be sent in Version Negotiation packet. + * those versions contained in this array if there is some overlap + * between these versions and the versions offered by the client. + * If there is no overlap, but the client chosen version is + * supported by the library, the server chooses the client chosen + * version as the negotiated version. This version set corresponds + * to Offered Versions described in :rfc:`9368`, and it should be + * included in Version Negotiation packet. * * Client uses this field and :member:`original_version` to prevent * version downgrade attack if it reacted upon Version Negotiation @@ -1876,7 +1825,7 @@ typedef struct ngtcp2_settings { * |client_chosen_version| passed to `ngtcp2_conn_client_new` unless * |client_chosen_version| is a reserved version. */ - uint32_t *preferred_versions; + const uint32_t *preferred_versions; /** * :member:`preferred_versionslen` is the number of versions that * are contained in the array pointed by @@ -1884,13 +1833,14 @@ typedef struct ngtcp2_settings { */ size_t preferred_versionslen; /** - * :member:`other_versions` is the array of versions that are set in - * :member:`other_versions ` - * field of outgoing version_information QUIC transport parameter. + * :member:`available_versions` is the array of versions that are + * going to be set in :member:`available_versions + * ` field of outgoing + * version_information QUIC transport parameter. * - * For server, this corresponds to Fully-Deployed Versions in QUIC - * Version Negotiation draft. If this field is set not, it is set - * to :member:`preferred_versions` internally if + * For server, this corresponds to Fully-Deployed Versions described + * in :rfc:`9368`. If this field is not set, it is set to + * :member:`preferred_versions` internally if * :member:`preferred_versionslen` is not zero. If this field is * not set, and :member:`preferred_versionslen` is zero, this field * is set to :macro:`NGTCP2_PROTO_VER_V1` internally. @@ -1902,12 +1852,13 @@ typedef struct ngtcp2_settings { * `ngtcp2_conn_client_new` will be set in this field internally * unless |client_chosen_version| is a reserved version. */ - uint32_t *other_versions; + const uint32_t *available_versions; /** - * :member:`other_versionslen` is the number of versions that are - * contained in the array pointed by :member:`other_versions`. + * :member:`available_versionslen` is the number of versions that + * are contained in the array pointed by + * :member:`available_versions`. */ - size_t other_versionslen; + size_t available_versionslen; /** * :member:`original_version` is the original version that client * initially used to make a connection attempt. If it is set, and @@ -1920,92 +1871,14 @@ typedef struct ngtcp2_settings { * :member:`no_pmtud`, if set to nonzero, disables Path MTU * Discovery. */ - int no_pmtud; + uint8_t no_pmtud; + /** + * :member:`pkt_num` is the initial packet number for each packet + * number space. It must be in range [0, INT32_MAX], inclusive. + */ + uint32_t initial_pkt_num; } ngtcp2_settings; -#ifdef NGTCP2_USE_GENERIC_SOCKADDR -typedef struct ngtcp2_sockaddr { - uint16_t sa_family; - uint8_t sa_data[14]; -} ngtcp2_sockaddr; - -typedef struct ngtcp2_in_addr { - uint32_t s_addr; -} ngtcp2_in_addr; - -typedef struct ngtcp2_sockaddr_in { - uint16_t sin_family; - uint16_t sin_port; - ngtcp2_in_addr sin_addr; - uint8_t sin_zero[8]; -} ngtcp2_sockaddr_in; - -# define NGTCP2_SS_MAXSIZE 128 -# define NGTCP2_SS_ALIGNSIZE (sizeof(uint64_t)) -# define NGTCP2_SS_PAD1SIZE (NGTCP2_SS_ALIGNSIZE - sizeof(uint16_t)) -# define NGTCP2_SS_PAD2SIZE \ - (NGTCP2_SS_MAXSIZE - \ - (sizeof(uint16_t) + NGTCP2_SS_PAD1SIZE + NGTCP2_SS_ALIGNSIZE)) - -typedef struct ngtcp2_sockaddr_storage { - uint16_t ss_family; - uint8_t _ss_pad1[NGTCP2_SS_PAD1SIZE]; - uint64_t _ss_align; - uint8_t _ss_pad2[NGTCP2_SS_PAD2SIZE]; -} ngtcp2_sockaddr_storage; - -# undef NGTCP2_SS_PAD2SIZE -# undef NGTCP2_SS_PAD1SIZE -# undef NGTCP2_SS_ALIGNSIZE -# undef NGTCP2_SS_MAXSIZE - -typedef uint32_t ngtcp2_socklen; -#else -/** - * @typedef - * - * :type:`ngtcp2_sockaddr` is typedefed to struct sockaddr. If - * :macro:`NGTCP2_USE_GENERIC_SOCKADDR` is defined, it is typedefed to - * the generic struct sockaddr defined in ngtcp2.h. - */ -typedef struct sockaddr ngtcp2_sockaddr; -/** - * @typedef - * - * :type:`ngtcp2_sockaddr_storage` is typedefed to struct - * sockaddr_storage. If :macro:`NGTCP2_USE_GENERIC_SOCKADDR` is - * defined, it is typedefed to the generic struct sockaddr_storage - * defined in ngtcp2.h. - */ -typedef struct sockaddr_storage ngtcp2_sockaddr_storage; -typedef struct sockaddr_in ngtcp2_sockaddr_in; -/** - * @typedef - * - * :type:`ngtcp2_socklen` is typedefed to socklen_t. If - * :macro:`NGTCP2_USE_GENERIC_SOCKADDR` is defined, it is typedefed to - * uint32_t. - */ -typedef socklen_t ngtcp2_socklen; -#endif - -#if defined(NGTCP2_USE_GENERIC_SOCKADDR) || \ - defined(NGTCP2_USE_GENERIC_IPV6_SOCKADDR) -typedef struct ngtcp2_in6_addr { - uint8_t in6_addr[16]; -} ngtcp2_in6_addr; - -typedef struct ngtcp2_sockaddr_in6 { - uint16_t sin6_family; - uint16_t sin6_port; - uint32_t sin6_flowinfo; - ngtcp2_in6_addr sin6_addr; - uint32_t sin6_scope_id; -} ngtcp2_sockaddr_in6; -#else -typedef struct sockaddr_in6 ngtcp2_sockaddr_in6; -#endif - /** * @struct * @@ -2018,7 +1891,8 @@ typedef struct ngtcp2_addr { */ ngtcp2_sockaddr *addr; /** - * :member:`addrlen` is the length of addr. + * :member:`addrlen` is the length of :member:`addr`. It must not + * be longer than sizeof(:type:`ngtcp2_sockaddr_union`). */ ngtcp2_socklen addrlen; } ngtcp2_addr; @@ -2045,7 +1919,7 @@ typedef struct ngtcp2_path { * Note that :type:`ngtcp2_path` is generally passed to * :type:`ngtcp2_conn` by an application, and :type:`ngtcp2_conn` * stores their copies. Unfortunately, there is no way for the - * application to know when :type:`ngtcp2_conn` finishes using a + * application to know when :type:`ngtcp2_conn` finished using a * specific :type:`ngtcp2_path` object in mid connection, which * means that the application cannot free the data pointed by this * field. Therefore, it is advised to use this field only when the @@ -2069,11 +1943,11 @@ typedef struct ngtcp2_path_storage { /** * :member:`local_addrbuf` is a buffer to store local address. */ - ngtcp2_sockaddr_storage local_addrbuf; + ngtcp2_sockaddr_union local_addrbuf; /** * :member:`remote_addrbuf` is a buffer to store remote address. */ - ngtcp2_sockaddr_storage remote_addrbuf; + ngtcp2_sockaddr_union remote_addrbuf; } ngtcp2_path_storage; /** @@ -2160,8 +2034,9 @@ typedef struct ngtcp2_crypto_cipher_ctx { * :type:`ngtcp2_crypto_ctx` is a convenient structure to bind all * crypto related objects in one place. Use * `ngtcp2_crypto_ctx_initial` to initialize this struct for Initial - * packet encryption. For Handshake and 1RTT packets, use - * `ngtcp2_crypto_ctx_tls`. + * packet encryption. For Handshake and 1-RTT packets, use + * `ngtcp2_crypto_ctx_tls`. For 0-RTT packets, use + * `ngtcp2_crypto_ctx_tls_early`. */ typedef struct ngtcp2_crypto_ctx { /** @@ -2191,60 +2066,56 @@ typedef struct ngtcp2_crypto_ctx { /** * @function * - * `ngtcp2_encode_transport_params` encodes |params| in |dest| of + * `ngtcp2_transport_params_encode` encodes |params| in |dest| of * length |destlen|. * * If |dest| is NULL, and |destlen| is zero, this function just * returns the number of bytes required to store the encoded transport * parameters. * - * This function returns the number of written, or one of the + * This function returns the number of bytes written, or one of the * following negative error codes: * * :macro:`NGTCP2_ERR_NOBUF` * Buffer is too small. - * :macro:`NGTCP2_ERR_INVALID_ARGUMENT` - * |exttype| is invalid. */ -NGTCP2_EXTERN ngtcp2_ssize ngtcp2_encode_transport_params_versioned( - uint8_t *dest, size_t destlen, ngtcp2_transport_params_type exttype, - int transport_params_version, const ngtcp2_transport_params *params); +NGTCP2_EXTERN ngtcp2_ssize ngtcp2_transport_params_encode_versioned( + uint8_t *dest, size_t destlen, int transport_params_version, + const ngtcp2_transport_params *params); /** * @function * - * `ngtcp2_decode_transport_params` decodes transport parameters in + * `ngtcp2_transport_params_decode` decodes transport parameters in * |data| of length |datalen|, and stores the result in the object * pointed by |params|. * - * If the optional parameters are missing, the default value is - * assigned. + * If an optional parameter is missing, the default value is assigned. * * The following fields may point to somewhere inside the buffer * pointed by |data| of length |datalen|: * - * - :member:`ngtcp2_transport_params.version_info.other_versions - * ` + * - :member:`ngtcp2_transport_params.version_info.available_versions + * ` * * This function returns 0 if it succeeds, or one of the following * negative error codes: * - * :macro:`NGTCP2_ERR_REQUIRED_TRANSPORT_PARAM` - * The required parameter is missing. * :macro:`NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM` * The input is malformed. */ -NGTCP2_EXTERN int ngtcp2_decode_transport_params_versioned( - int transport_params_version, ngtcp2_transport_params *params, - ngtcp2_transport_params_type exttype, const uint8_t *data, size_t datalen); +NGTCP2_EXTERN int +ngtcp2_transport_params_decode_versioned(int transport_params_version, + ngtcp2_transport_params *params, + const uint8_t *data, size_t datalen); /** * @function * - * `ngtcp2_decode_transport_params_new` decodes transport parameters + * `ngtcp2_transport_params_decode_new` decodes transport parameters * in |data| of length |datalen|, and stores the result in the object * allocated dynamically. The pointer to the allocated object is - * assigned to |*pparams|. Unlike `ngtcp2_decode_transport_params`, + * assigned to |*pparams|. Unlike `ngtcp2_transport_params_decode`, * all direct and indirect fields are also allocated dynamically if * needed. * @@ -2261,22 +2132,21 @@ NGTCP2_EXTERN int ngtcp2_decode_transport_params_versioned( * This function returns 0 if it succeeds, or one of the following * negative error codes: * - * :macro:`NGTCP2_ERR_REQUIRED_TRANSPORT_PARAM` - * The required parameter is missing. * :macro:`NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM` * The input is malformed. * :macro:`NGTCP2_ERR_NOMEM` * Out of memory. */ -NGTCP2_EXTERN int ngtcp2_decode_transport_params_new( - ngtcp2_transport_params **pparams, ngtcp2_transport_params_type exttype, - const uint8_t *data, size_t datalen, const ngtcp2_mem *mem); +NGTCP2_EXTERN int +ngtcp2_transport_params_decode_new(ngtcp2_transport_params **pparams, + const uint8_t *data, size_t datalen, + const ngtcp2_mem *mem); /** * @function * * `ngtcp2_transport_params_del` frees the |params| which must be - * dynamically allocated by `ngtcp2_decode_transport_params_new`. + * dynamically allocated by `ngtcp2_transport_params_decode_new`. * * |mem| is a memory allocator that allocated |params|. If |mem| is * ``NULL``, the memory allocator returned by `ngtcp2_mem_default()` @@ -2331,15 +2201,16 @@ typedef struct ngtcp2_version_cid { * QUIC version. * * If the given packet is Long header packet, this function extracts - * the version from the packet and assigns it to + * the version from the packet, and assigns it to * :member:`dest->version `. It also * extracts the pointer to the Destination Connection ID and its - * length and assigns them to :member:`dest->dcid + * length, and assigns them to :member:`dest->dcid * ` and :member:`dest->dcidlen * ` respectively. Similarly, it extracts - * the pointer to the Source Connection ID and its length and assigns + * the pointer to the Source Connection ID and its length, and assigns * them to :member:`dest->scid ` and * :member:`dest->scidlen ` respectively. + * |short_dcidlen| is ignored. * * If the given packet is Short header packet, :member:`dest->version * ` will be 0, :member:`dest->scid @@ -2348,7 +2219,7 @@ typedef struct ngtcp2_version_cid { * Because the Short header packet does not have the length of * Destination Connection ID, the caller has to pass the length in * |short_dcidlen|. This function extracts the pointer to the - * Destination Connection ID and assigns it to :member:`dest->dcid + * Destination Connection ID, and assigns it to :member:`dest->dcid * `. |short_dcidlen| is assigned to * :member:`dest->dcidlen `. * @@ -2408,12 +2279,12 @@ NGTCP2_EXTERN ngtcp2_ssize ngtcp2_pkt_decode_hd_long(ngtcp2_pkt_hd *dest, /** * @function * - * `ngtcp2_pkt_decode_hd_short` decodes QUIC short header packet - * header in |pkt| of length |pktlen|. |dcidlen| is the length of - * DCID in packet header. Short header packet does not encode the - * length of connection ID, thus we need the input from the outside. - * This function only parses the input just before packet number - * field. This function can handle Connection ID up to + * `ngtcp2_pkt_decode_hd_short` decodes QUIC short header in |pkt| of + * length |pktlen|. Short header packet does not encode the length of + * Connection ID, thus we need the input from the outside. |dcidlen| + * is the length of Destination Connection ID in packet header. This + * function only parses the input just before packet number field. + * This function can handle Connection ID up to * :macro:`NGTCP2_MAX_CIDLEN`. Consider to use * `ngtcp2_pkt_decode_version_cid` to get longer Connection ID. It * stores the result in the object pointed by |dest|, and returns the @@ -2461,13 +2332,13 @@ NGTCP2_EXTERN ngtcp2_ssize ngtcp2_pkt_write_stateless_reset( * * `ngtcp2_pkt_write_version_negotiation` writes Version Negotiation * packet in the buffer pointed by |dest| whose length is |destlen|. - * |unused_random| should be generated randomly. |dcid| is the - * destination connection ID which appears in a packet as a source - * connection ID sent by client which caused version negotiation. - * Similarly, |scid| is the source connection ID which appears in a - * packet as a destination connection ID sent by client. |sv| is a - * list of supported versions, and |nsv| specifies the number of - * supported versions included in |sv|. + * |unused_random| should be generated randomly. |dcid| is a + * Connection ID which appeared in a packet as a Source Connection ID + * sent by client which caused version negotiation. Similarly, |scid| + * is a Connection ID which appeared in a packet as a Destination + * Connection ID sent by client. |sv| is a list of supported + * versions, and |nsv| specifies the number of supported versions + * included in |sv|. * * This function returns the number of bytes written to the buffer, or * one of the following negative error codes: @@ -2494,7 +2365,7 @@ typedef struct ngtcp2_conn ngtcp2_conn; * asks TLS stack to produce first TLS cryptographic handshake data. * * This implementation of this callback must get the first handshake - * data from TLS stack and pass it to ngtcp2 library using + * data from TLS stack, and pass it to ngtcp2 library using * `ngtcp2_conn_submit_crypto_data` function. Make sure that before * calling `ngtcp2_conn_submit_crypto_data` function, client * application must create initial packet protection keys and IVs, and @@ -2514,9 +2385,9 @@ typedef int (*ngtcp2_client_initial)(ngtcp2_conn *conn, void *user_data); * Initial packet from client. An server application must implement * this callback, and generate initial keys and IVs for both * transmission and reception. Install them using - * `ngtcp2_conn_install_initial_key`. |dcid| is the destination - * connection ID which client generated randomly. It is used to - * derive initial packet protection keys. + * `ngtcp2_conn_install_initial_key`. |dcid| is the Destination + * Connection ID in Initial packet received from client. It is used + * to derive initial packet protection keys. * * The callback function must return 0 if it succeeds. If an error * occurs, return :macro:`NGTCP2_ERR_CALLBACK_FAILURE` which makes the @@ -2529,45 +2400,42 @@ typedef int (*ngtcp2_recv_client_initial)(ngtcp2_conn *conn, /** * @enum * - * :type:`ngtcp2_crypto_level` is encryption level. + * :type:`ngtcp2_encryption_level` is QUIC encryption level. */ -typedef enum ngtcp2_crypto_level { +typedef enum ngtcp2_encryption_level { /** - * :enum:`NGTCP2_CRYPTO_LEVEL_INITIAL` is Initial Keys encryption + * :enum:`NGTCP2_ENCRYPTION_LEVEL_INITIAL` is Initial encryption * level. */ - NGTCP2_CRYPTO_LEVEL_INITIAL, + NGTCP2_ENCRYPTION_LEVEL_INITIAL, /** - * :enum:`NGTCP2_CRYPTO_LEVEL_HANDSHAKE` is Handshake Keys - * encryption level. + * :enum:`NGTCP2_ENCRYPTION_LEVEL_HANDSHAKE` is Handshake encryption + * level. */ - NGTCP2_CRYPTO_LEVEL_HANDSHAKE, + NGTCP2_ENCRYPTION_LEVEL_HANDSHAKE, /** - * :enum:`NGTCP2_CRYPTO_LEVEL_APPLICATION` is Application Data - * (1-RTT) Keys encryption level. + * :enum:`NGTCP2_ENCRYPTION_LEVEL_1RTT` is 1-RTT encryption level. */ - NGTCP2_CRYPTO_LEVEL_APPLICATION, + NGTCP2_ENCRYPTION_LEVEL_1RTT, /** - * :enum:`NGTCP2_CRYPTO_LEVEL_EARLY` is Early Data (0-RTT) Keys - * encryption level. + * :enum:`NGTCP2_ENCRYPTION_LEVEL_0RTT` is 0-RTT encryption level. */ - NGTCP2_CRYPTO_LEVEL_EARLY -} ngtcp2_crypto_level; + NGTCP2_ENCRYPTION_LEVEL_0RTT +} ngtcp2_encryption_level; /** * @functypedef * * :type`ngtcp2_recv_crypto_data` is invoked when crypto data is - * received. The received data is pointed to by |data|, and its - * length is |datalen|. The |offset| specifies the offset where - * |data| is positioned. |user_data| is the arbitrary pointer passed - * to `ngtcp2_conn_client_new` or `ngtcp2_conn_server_new`. The - * ngtcp2 library ensures that the crypto data is passed to the - * application in the increasing order of |offset|. |datalen| is - * always strictly greater than 0. |crypto_level| indicates the - * encryption level where this data is received. Crypto data can - * never be received in - * :enum:`ngtcp2_crypto_level.NGTCP2_CRYPTO_LEVEL_EARLY`. + * received. The received data is pointed by |data|, and its length + * is |datalen|. The |offset| specifies the offset where |data| is + * positioned. |user_data| is the arbitrary pointer passed to + * `ngtcp2_conn_client_new` or `ngtcp2_conn_server_new`. The ngtcp2 + * library ensures that the crypto data is passed to the application + * in the increasing order of |offset|. |datalen| is always strictly + * greater than 0. |encryption_level| indicates the encryption level + * where this data is received. Crypto data can never be received in + * :enum:`ngtcp2_encryption_level.NGTCP2_ENCRYPTION_LEVEL_0RTT`. * * The application should provide the given data to TLS stack. * @@ -2591,7 +2459,7 @@ typedef enum ngtcp2_crypto_level { * return immediately. */ typedef int (*ngtcp2_recv_crypto_data)(ngtcp2_conn *conn, - ngtcp2_crypto_level crypto_level, + ngtcp2_encryption_level encryption_level, uint64_t offset, const uint8_t *data, size_t datalen, void *user_data); @@ -2645,9 +2513,9 @@ typedef int (*ngtcp2_recv_version_negotiation)(ngtcp2_conn *conn, * This callback is client use only. * * Application must regenerate packet protection key, IV, and header - * protection key for Initial packets using the destination connection - * ID obtained by :member:`hd->scid ` and install - * them by calling `ngtcp2_conn_install_initial_key()`. + * protection key for Initial packets using the Destination Connection + * ID obtained by :member:`hd->scid `, and install + * them by calling `ngtcp2_conn_install_initial_key`. * * 0-RTT data accepted by the ngtcp2 library will be automatically * retransmitted as 0-RTT data by the library. @@ -2666,12 +2534,12 @@ typedef int (*ngtcp2_recv_retry)(ngtcp2_conn *conn, const ngtcp2_pkt_hd *hd, * application to encrypt packet payload. The packet payload to * encrypt is passed as |plaintext| of length |plaintextlen|. The * AEAD cipher is |aead|. |aead_ctx| is the AEAD cipher context - * object which is initialized with encryption key. The nonce is - * passed as |nonce| of length |noncelen|. The Additional + * object which is initialized with the specific encryption key. The + * nonce is passed as |nonce| of length |noncelen|. The Additional * Authenticated Data is passed as |aad| of length |aadlen|. * * The implementation of this callback must encrypt |plaintext| using - * the negotiated cipher suite and write the ciphertext into the + * the negotiated cipher suite, and write the ciphertext into the * buffer pointed by |dest|. |dest| has enough capacity to store the * ciphertext and any additional AEAD tag data. * @@ -2694,12 +2562,12 @@ typedef int (*ngtcp2_encrypt)(uint8_t *dest, const ngtcp2_crypto_aead *aead, * application to decrypt packet payload. The packet payload to * decrypt is passed as |ciphertext| of length |ciphertextlen|. The * AEAD cipher is |aead|. |aead_ctx| is the AEAD cipher context - * object which is initialized with decryption key. The nonce is - * passed as |nonce| of length |noncelen|. The Additional + * object which is initialized with the specific decryption key. The + * nonce is passed as |nonce| of length |noncelen|. The Additional * Authenticated Data is passed as |aad| of length |aadlen|. * * The implementation of this callback must decrypt |ciphertext| using - * the negotiated cipher suite and write the ciphertext into the + * the negotiated cipher suite, and write the ciphertext into the * buffer pointed by |dest|. |dest| has enough capacity to store the * cleartext. * @@ -2722,12 +2590,12 @@ typedef int (*ngtcp2_decrypt)(uint8_t *dest, const ngtcp2_crypto_aead *aead, * :type:`ngtcp2_hp_mask` is invoked when the ngtcp2 library asks the * application to produce a mask to encrypt or decrypt packet header. * The encryption cipher is |hp|. |hp_ctx| is the cipher context - * object which is initialized with header protection key. The sample - * is passed as |sample| which is :macro:`NGTCP2_HP_SAMPLELEN` bytes - * long. + * object which is initialized with the specific header protection + * key. The sample is passed as |sample| which is + * :macro:`NGTCP2_HP_SAMPLELEN` bytes long. * * The implementation of this callback must produce a mask using the - * header protection cipher suite specified by QUIC specification and + * header protection cipher suite specified by QUIC specification, and * write the result into the buffer pointed by |dest|. The length of * the mask must be at least :macro:`NGTCP2_HP_MASKLEN`. The library * only uses the first :macro:`NGTCP2_HP_MASKLEN` bytes of the @@ -2746,7 +2614,7 @@ typedef int (*ngtcp2_hp_mask)(uint8_t *dest, const ngtcp2_crypto_cipher *hp, /** * @macrosection * - * Stream data flags + * STREAM frame data flags */ /** @@ -2767,11 +2635,11 @@ typedef int (*ngtcp2_hp_mask)(uint8_t *dest, const ngtcp2_crypto_cipher *hp, /** * @macro * - * :macro:`NGTCP2_STREAM_DATA_FLAG_EARLY` indicates that this chunk of - * data contains data received in 0RTT packet and the handshake has + * :macro:`NGTCP2_STREAM_DATA_FLAG_0RTT` indicates that this chunk of + * data contains data received in 0-RTT packet, and the handshake has * not completed yet, which means that the data might be replayed. */ -#define NGTCP2_STREAM_DATA_FLAG_EARLY 0x02u +#define NGTCP2_STREAM_DATA_FLAG_0RTT 0x02u /** * @functypedef @@ -2787,9 +2655,9 @@ typedef int (*ngtcp2_hp_mask)(uint8_t *dest, const ngtcp2_crypto_cipher *hp, * overlap. The data is passed as |data| of length |datalen|. * |datalen| may be 0 if and only if |fin| is nonzero. * - * If :macro:`NGTCP2_STREAM_DATA_FLAG_EARLY` is set in |flags|, it - * indicates that a part of or whole data was received in 0RTT packet - * and a handshake has not completed yet. + * If :macro:`NGTCP2_STREAM_DATA_FLAG_0RTT` is set in |flags|, it + * indicates that a part of or whole data was received in 0-RTT + * packet, and a handshake has not completed yet. * * The callback function must return 0 if it succeeds, or * :macro:`NGTCP2_ERR_CALLBACK_FAILURE` which makes the library return @@ -2804,9 +2672,9 @@ typedef int (*ngtcp2_recv_stream_data)(ngtcp2_conn *conn, uint32_t flags, * @functypedef * * :type:`ngtcp2_stream_open` is a callback function which is called - * when remote stream is opened by peer. This function is not called - * if stream is opened by implicitly (we might reconsider this - * behaviour). + * when remote stream is opened by a remote endpoint. This function + * is not called if stream is opened by implicitly (we might + * reconsider this behaviour later). * * The implementation of this callback should return 0 if it succeeds. * Returning :macro:`NGTCP2_ERR_CALLBACK_FAILURE` makes the library @@ -2883,16 +2751,19 @@ typedef int (*ngtcp2_stream_reset)(ngtcp2_conn *conn, int64_t stream_id, * @functypedef * * :type:`ngtcp2_acked_stream_data_offset` is a callback function - * which is called when stream data is acked, and application can free - * the data. The acked range of data is [offset, offset + datalen). - * For a given stream_id, this callback is called sequentially in - * increasing order of |offset| without any overlap. |datalen| is - * normally strictly greater than 0. One exception is that when a - * packet which includes STREAM frame which has fin flag set, and 0 - * length data, this callback is invoked with 0 passed as |datalen|. - * - * If a stream is closed prematurely and stream data is still + * which is called when stream data in range [|offset|, |offset| + + * |datalen|) is acknowledged, and application can free the portion of + * data. For a given |stream_id|, this callback is called + * sequentially in increasing order of |offset| without any overlap. + * |datalen| is normally strictly greater than 0. One exception is + * that when a STREAM frame has fin flag set and 0 length data, this + * callback is invoked with |datalen| == 0. + * + * If a stream is closed prematurely, and stream data is still * in-flight, this callback function is not called for those data. + * After :member:`ngtcp2_callbacks.stream_close` is called for a + * particular stream, |conn| does not touch data for the closed stream + * again, and application can free all unacknowledged stream data. * * The implementation of this callback should return 0 if it succeeds. * Returning :macro:`NGTCP2_ERR_CALLBACK_FAILURE` makes the library @@ -2938,7 +2809,7 @@ typedef int (*ngtcp2_extend_max_streams)(ngtcp2_conn *conn, * :type:`ngtcp2_extend_max_stream_data` is a callback function which * is invoked when max stream data is extended. |stream_id| * identifies the stream. |max_data| is a cumulative number of bytes - * the endpoint can send on this stream. + * an endpoint can send on this stream. * * The callback function must return 0 if it succeeds. Returning * :macro:`NGTCP2_ERR_CALLBACK_FAILURE` makes the library call return @@ -2952,10 +2823,10 @@ typedef int (*ngtcp2_extend_max_stream_data)(ngtcp2_conn *conn, /** * @functypedef * - * :type:`ngtcp2_rand` is a callback function to get randomized byte - * string from application. Application must fill random |destlen| - * bytes to the buffer pointed by |dest|. The generated bytes are - * used only in non-cryptographic context. + * :type:`ngtcp2_rand` is a callback function to get random data of + * length |destlen|. Application must fill random |destlen| bytes to + * the buffer pointed by |dest|. The generated data is used only in + * non-cryptographic context. */ typedef void (*ngtcp2_rand)(uint8_t *dest, size_t destlen, const ngtcp2_rand_ctx *rand_ctx); @@ -2965,11 +2836,11 @@ typedef void (*ngtcp2_rand)(uint8_t *dest, size_t destlen, * * :type:`ngtcp2_get_new_connection_id` is a callback function to ask * an application for new connection ID. Application must generate - * new unused connection ID with the exact |cidlen| bytes and store it - * in |cid|. It also has to generate stateless reset token into - * |token|. The length of stateless reset token is + * new unused connection ID with the exact |cidlen| bytes, and store + * it in |cid|. It also has to generate a stateless reset token, and + * store it in |token|. The length of stateless reset token is * :macro:`NGTCP2_STATELESS_RESET_TOKENLEN` and it is guaranteed that - * the buffer pointed by |cid| has the sufficient space to store the + * the buffer pointed by |token| has the sufficient space to store the * token. * * The callback function must return 0 if it succeeds. Returning @@ -2985,7 +2856,9 @@ typedef int (*ngtcp2_get_new_connection_id)(ngtcp2_conn *conn, ngtcp2_cid *cid, * * :type:`ngtcp2_remove_connection_id` is a callback function which * notifies the application that connection ID |cid| is no longer used - * by remote endpoint. + * by a remote endpoint. This Connection ID was previously offered by + * a local endpoint, and a remote endpoint could use it as Destination + * Connection ID when sending QUIC packet. * * The callback function must return 0 if it succeeds. Returning * :macro:`NGTCP2_ERR_CALLBACK_FAILURE` makes the library call return @@ -3005,15 +2878,15 @@ typedef int (*ngtcp2_remove_connection_id)(ngtcp2_conn *conn, * |current_tx_secret| of length |secretlen|. They are decryption and * encryption secrets respectively. * - * The application has to generate new secrets and keys for both - * encryption and decryption, and write decryption secret and IV to - * the buffer pointed by |rx_secret| and |rx_iv| respectively. It - * also has to create new AEAD cipher context object with new - * decryption key and initialize |rx_aead_ctx| with it. Similarly, - * write encryption secret and IV to the buffer pointed by |tx_secret| - * and |tx_iv|. Create new AEAD cipher context object with new - * encryption key and initialize |tx_aead_ctx| with it. All given - * buffers have the enough capacity to store secret, key and IV. + * The application must generate new secrets and keys for both + * encryption and decryption. It must write decryption secret and IV + * to the buffer pointed by |rx_secret| and |rx_iv| respectively. It + * also must create new AEAD cipher context object with new decryption + * key and initialize |rx_aead_ctx| with it. Similarly, write + * encryption secret and IV to the buffer pointed by |tx_secret| and + * |tx_iv|. Create new AEAD cipher context object with new encryption + * key and initialize |tx_aead_ctx| with it. All given buffers have + * the enough capacity to store secret, key and IV. * * The callback function must return 0 if it succeeds. Returning * :macro:`NGTCP2_ERR_CALLBACK_FAILURE` makes the library call return @@ -3048,14 +2921,25 @@ typedef int (*ngtcp2_update_key)( */ #define NGTCP2_PATH_VALIDATION_FLAG_PREFERRED_ADDR 0x01u +/** + * @macro + * + * :macro:`NGTCP2_PATH_VALIDATION_FLAG_NEW_TOKEN` indicates that + * server should send NEW_TOKEN frame for the new remote address. + * This flag is only set for server. + */ +#define NGTCP2_PATH_VALIDATION_FLAG_NEW_TOKEN 0x02u + /** * @functypedef * * :type:`ngtcp2_path_validation` is a callback function which tells - * the application the outcome of path validation. |flags| is zero or + * an application the outcome of path validation. |flags| is zero or * more of :macro:`NGTCP2_PATH_VALIDATION_FLAG_* * `. |path| is the path that was - * validated. If |res| is + * validated. |old_path| is the path that is previously used before a + * local endpoint has migrated to |path| if |old_path| is not NULL. + * If |res| is * :enum:`ngtcp2_path_validation_result.NGTCP2_PATH_VALIDATION_RESULT_SUCCESS`, * the path validation succeeded. If |res| is * :enum:`ngtcp2_path_validation_result.NGTCP2_PATH_VALIDATION_RESULT_FAILURE`, @@ -3067,6 +2951,7 @@ typedef int (*ngtcp2_update_key)( */ typedef int (*ngtcp2_path_validation)(ngtcp2_conn *conn, uint32_t flags, const ngtcp2_path *path, + const ngtcp2_path *old_path, ngtcp2_path_validation_result res, void *user_data); @@ -3087,10 +2972,10 @@ typedef int (*ngtcp2_path_validation)(ngtcp2_conn *conn, uint32_t flags, * from `ngtcp2_conn_get_path()`. Both :member:`dest->local.addr * ` and :member:`dest->remote.addr * ` point to buffers which are at least - * ``sizeof(struct sockaddr_storage)`` bytes long, respectively. If + * sizeof(:type:`ngtcp2_sockaddr_union`) bytes long, respectively. If * an application denies the preferred addresses, just leave |dest| * unmodified (or set :member:`dest->remote.addrlen - * ` to 0) and return 0. + * ` to 0), and return 0. * * The callback function must return 0 if it succeeds. Returning * :macro:`NGTCP2_ERR_CALLBACK_FAILURE` makes the library call return @@ -3110,12 +2995,12 @@ typedef int (*ngtcp2_select_preferred_addr)(ngtcp2_conn *conn, typedef enum ngtcp2_connection_id_status_type { /** * :enum:`NGTCP2_CONNECTION_ID_STATUS_TYPE_ACTIVATE` indicates that - * a local endpoint starts using new destination Connection ID. + * a local endpoint starts using new Destination Connection ID. */ NGTCP2_CONNECTION_ID_STATUS_TYPE_ACTIVATE, /** * :enum:`NGTCP2_CONNECTION_ID_STATUS_TYPE_DEACTIVATE` indicates - * that a local endpoint stops using a given destination Connection + * that a local endpoint stops using a given Destination Connection * ID. */ NGTCP2_CONNECTION_ID_STATUS_TYPE_DEACTIVATE @@ -3125,9 +3010,9 @@ typedef enum ngtcp2_connection_id_status_type { * @functypedef * * :type:`ngtcp2_connection_id_status` is a callback function which is - * called when the status of Connection ID changes. + * called when the status of Destination Connection ID changes. * - * |token| is the associated stateless reset token and it is ``NULL`` + * |token| is the associated stateless reset token, and it is ``NULL`` * if no token is present. * * |type| is the one of the value defined in @@ -3138,25 +3023,25 @@ typedef enum ngtcp2_connection_id_status_type { * :macro:`NGTCP2_ERR_CALLBACK_FAILURE` makes the library call return * immediately. */ -typedef int (*ngtcp2_connection_id_status)(ngtcp2_conn *conn, int type, - uint64_t seq, const ngtcp2_cid *cid, - const uint8_t *token, - void *user_data); +typedef int (*ngtcp2_connection_id_status)( + ngtcp2_conn *conn, ngtcp2_connection_id_status_type type, uint64_t seq, + const ngtcp2_cid *cid, const uint8_t *token, void *user_data); /** * @functypedef * * :type:`ngtcp2_recv_new_token` is a callback function which is - * called when new token is received from server. + * called when new token is received from server. This callback is + * client use only. * - * |token| is the received token. + * |token| is the received token of length |tokenlen| bytes long. * * The callback function must return 0 if it succeeds. Returning * :macro:`NGTCP2_ERR_CALLBACK_FAILURE` makes the library call return * immediately. */ -typedef int (*ngtcp2_recv_new_token)(ngtcp2_conn *conn, const ngtcp2_vec *token, - void *user_data); +typedef int (*ngtcp2_recv_new_token)(ngtcp2_conn *conn, const uint8_t *token, + size_t tokenlen, void *user_data); /** * @functypedef @@ -3184,7 +3069,7 @@ typedef void (*ngtcp2_delete_crypto_cipher_ctx)( /** * @macrosection * - * Datagram flags + * DATAGRAM frame flags */ /** @@ -3197,11 +3082,11 @@ typedef void (*ngtcp2_delete_crypto_cipher_ctx)( /** * @macro * - * :macro:`NGTCP2_DATAGRAM_FLAG_EARLY` indicates that DATAGRAM frame - * is received in 0RTT packet and the handshake has not completed yet, + * :macro:`NGTCP2_DATAGRAM_FLAG_0RTT` indicates that DATAGRAM frame is + * received in 0-RTT packet, and the handshake has not completed yet, * which means that the data might be replayed. */ -#define NGTCP2_DATAGRAM_FLAG_EARLY 0x01u +#define NGTCP2_DATAGRAM_FLAG_0RTT 0x01u /** * @functypedef @@ -3210,8 +3095,8 @@ typedef void (*ngtcp2_delete_crypto_cipher_ctx)( * received. |flags| is bitwise-OR of zero or more of * :macro:`NGTCP2_DATAGRAM_FLAG_* `. * - * If :macro:`NGTCP2_DATAGRAM_FLAG_EARLY` is set in |flags|, it - * indicates that DATAGRAM frame was received in 0RTT packet and a + * If :macro:`NGTCP2_DATAGRAM_FLAG_0RTT` is set in |flags|, it + * indicates that DATAGRAM frame was received in 0-RTT packet, and a * handshake has not completed yet. * * The callback function must return 0 if it succeeds, or @@ -3257,8 +3142,8 @@ typedef int (*ngtcp2_lost_datagram)(ngtcp2_conn *conn, uint64_t dgram_id, * * :type:`ngtcp2_get_path_challenge_data` is a callback function to * ask an application for new data that is sent in PATH_CHALLENGE - * frame. Application must generate new unpredictable exactly - * :macro:`NGTCP2_PATH_CHALLENGE_DATALEN` bytes of random data and + * frame. Application must generate new unpredictable, exactly + * :macro:`NGTCP2_PATH_CHALLENGE_DATALEN` bytes of random data, and * store them into the buffer pointed by |data|. * * The callback function must return 0 if it succeeds. Returning @@ -3296,8 +3181,9 @@ typedef int (*ngtcp2_stream_stop_sending)(ngtcp2_conn *conn, int64_t stream_id, * server, it is called once when the version is negotiated. * * The implementation of this callback must install new Initial keys - * for |version|. Use `ngtcp2_conn_install_vneg_initial_key` to - * install keys. + * for |version| and Destination Connection ID |client_dcid| from + * client. Use `ngtcp2_conn_install_vneg_initial_key` to install + * keys. * * The callback function must return 0 if it succeeds. Returning * :macro:`NGTCP2_ERR_CALLBACK_FAILURE` makes the library call return @@ -3317,11 +3203,25 @@ typedef int (*ngtcp2_version_negotiation)(ngtcp2_conn *conn, uint32_t version, * :macro:`NGTCP2_ERR_CALLBACK_FAILURE` makes the library call return * immediately. */ -typedef int (*ngtcp2_recv_key)(ngtcp2_conn *conn, ngtcp2_crypto_level level, +typedef int (*ngtcp2_recv_key)(ngtcp2_conn *conn, ngtcp2_encryption_level level, void *user_data); -#define NGTCP2_CALLBACKS_VERSION_V1 1 -#define NGTCP2_CALLBACKS_VERSION NGTCP2_CALLBACKS_VERSION_V1 +/** + * @functypedef + * + * :type:`ngtcp2_tls_early_data_rejected` is invoked when early data + * was rejected by server during TLS handshake, or client decided not + * to attempt early data. + * + * The callback function must return 0 if it succeeds. Returning + * :macro:`NGTCP2_ERR_CALLBACK_FAILURE` makes the library call return + * immediately. + */ +typedef int (*ngtcp2_tls_early_data_rejected)(ngtcp2_conn *conn, + void *user_data); + +#define NGTCP2_CALLBACKS_V1 1 +#define NGTCP2_CALLBACKS_VERSION NGTCP2_CALLBACKS_V1 /** * @struct @@ -3338,8 +3238,9 @@ typedef struct ngtcp2_callbacks { ngtcp2_client_initial client_initial; /** * :member:`recv_client_initial` is a callback function which is - * invoked when a server receives the first packet from client. - * This callback function must be specified for a server application. + * invoked when a server receives the first Initial packet from + * client. This callback function must be specified for a server + * application. */ ngtcp2_recv_client_initial recv_client_initial; /** @@ -3373,23 +3274,23 @@ typedef struct ngtcp2_callbacks { ngtcp2_decrypt decrypt; /** * :member:`hp_mask` is a callback function which is invoked to get - * a mask to encrypt or decrypt packet header. This callback + * a mask to encrypt or decrypt QUIC packet header. This callback * function must be specified. */ ngtcp2_hp_mask hp_mask; /** * :member:`recv_stream_data` is a callback function which is - * invoked when STREAM data, which includes application data, is + * invoked when stream data, which includes application data, is * received. This callback function is optional. */ ngtcp2_recv_stream_data recv_stream_data; /** * :member:`acked_stream_data_offset` is a callback function which - * is invoked when STREAM data, which includes application data, is + * is invoked when stream data, which includes application data, is * acknowledged by a remote endpoint. It tells an application the - * largest offset of acknowledged STREAM data without a gap so that - * application can free memory for the data. This callback function - * is optional. + * largest offset of acknowledged stream data without a gap so that + * application can free memory for the data up to that offset. This + * callback function is optional. */ ngtcp2_acked_stream_data_offset acked_stream_data_offset; /** @@ -3431,8 +3332,8 @@ typedef struct ngtcp2_callbacks { ngtcp2_extend_max_streams extend_max_local_streams_uni; /** * :member:`rand` is a callback function which is invoked when the - * library needs sequence of random data. This callback function - * must be specified. + * library needs random data. This callback function must be + * specified. */ ngtcp2_rand rand; /** @@ -3450,7 +3351,7 @@ typedef struct ngtcp2_callbacks { /** * :member:`update_key` is a callback function which is invoked when * the library tells an application that it must update keying - * materials and install new keys. This callback function must be + * materials, and install new keys. This callback function must be * specified. */ ngtcp2_update_key update_key; @@ -3463,8 +3364,8 @@ typedef struct ngtcp2_callbacks { /** * :member:`select_preferred_addr` is a callback function which is * invoked when the library asks a client to select preferred - * address presented by a server. This callback function is - * optional. + * address presented by a server. If not set, client ignores + * preferred addresses. This callback function is optional. */ ngtcp2_select_preferred_addr select_preferred_addr; /** @@ -3489,24 +3390,24 @@ typedef struct ngtcp2_callbacks { ngtcp2_extend_max_streams extend_max_remote_streams_uni; /** * :member:`extend_max_stream_data` is callback function which is - * invoked when the maximum offset of STREAM data that a local + * invoked when the maximum offset of stream data that a local * endpoint can send is increased. This callback function is * optional. */ ngtcp2_extend_max_stream_data extend_max_stream_data; /** * :member:`dcid_status` is a callback function which is invoked - * when the new destination Connection ID is activated or the - * activated destination Connection ID is now deactivated. This + * when the new Destination Connection ID is activated, or the + * activated Destination Connection ID is now deactivated. This * callback function is optional. */ ngtcp2_connection_id_status dcid_status; /** * :member:`handshake_confirmed` is a callback function which is * invoked when both endpoints agree that handshake has finished. - * This field is ignored by server because handshake_completed - * indicates the handshake confirmation for server. This callback - * function is optional. + * This field is ignored by server because + * :member:`handshake_completed` also indicates the handshake + * confirmation for server. This callback function is optional. */ ngtcp2_handshake_confirmed handshake_confirmed; /** @@ -3535,20 +3436,20 @@ typedef struct ngtcp2_callbacks { ngtcp2_recv_datagram recv_datagram; /** * :member:`ack_datagram` is a callback function which is invoked - * when a packet containing DATAGRAM frame is acknowledged. This - * callback function is optional. + * when a QUIC packet containing DATAGRAM frame is acknowledged by a + * remote endpoint. This callback function is optional. */ ngtcp2_ack_datagram ack_datagram; /** * :member:`lost_datagram` is a callback function which is invoked - * when a packet containing DATAGRAM frame is declared lost. This - * callback function is optional. + * when a QUIC packet containing DATAGRAM frame is declared lost. + * This callback function is optional. */ ngtcp2_lost_datagram lost_datagram; /** * :member:`get_path_challenge_data` is a callback function which is - * invoked when the library needs new PATH_CHALLENGE data. This - * callback must be specified. + * invoked when the library needs new data sent along with + * PATH_CHALLENGE frame. This callback must be specified. */ ngtcp2_get_path_challenge_data get_path_challenge_data; /** @@ -3568,16 +3469,23 @@ typedef struct ngtcp2_callbacks { * :member:`recv_rx_key` is a callback function which is invoked * when a new key for decrypting packets is installed during QUIC * cryptographic handshake. It is not called for - * :enum:`ngtcp2_crypto_level.NGTCP2_CRYPTO_LEVEL_INITIAL`. + * :enum:`ngtcp2_encryption_level.NGTCP2_ENCRYPTION_LEVEL_INITIAL`. */ ngtcp2_recv_key recv_rx_key; /** * :member:`recv_tx_key` is a callback function which is invoked * when a new key for encrypting packets is installed during QUIC * cryptographic handshake. It is not called for - * :enum:`ngtcp2_crypto_level.NGTCP2_CRYPTO_LEVEL_INITIAL`. + * :enum:`ngtcp2_encryption_level.NGTCP2_ENCRYPTION_LEVEL_INITIAL`. */ ngtcp2_recv_key recv_tx_key; + /** + * :member:`tls_early_data_rejected` is a callback function which is + * invoked when server rejected early data during TLS handshake, or + * client decided not to attempt early data. This callback function + * is only used by client. + */ + ngtcp2_tls_early_data_rejected tls_early_data_rejected; } ngtcp2_callbacks; /** @@ -3591,7 +3499,7 @@ typedef struct ngtcp2_callbacks { * * The primary use case of this function is for server to send * CONNECTION_CLOSE frame in Initial packet to close connection - * without committing the state when validating Retry token fails. + * without committing any state when validating Retry token fails. * * This function returns the number of bytes written if it succeeds, * or one of the following negative error codes: @@ -3613,11 +3521,11 @@ NGTCP2_EXTERN ngtcp2_ssize ngtcp2_pkt_write_connection_close( * @function * * `ngtcp2_pkt_write_retry` writes Retry packet in the buffer pointed - * by |dest| whose length is |destlen|. |dcid| is the destination - * connection ID which appeared in a packet as a source connection ID - * sent by client. |scid| is a server chosen source connection ID. - * |odcid| specifies Original Destination Connection ID which appeared - * in a packet as a destination connection ID sent by client. |token| + * by |dest| whose length is |destlen|. |dcid| is the Connection ID + * which appeared in a packet as a Source Connection ID sent by + * client. |scid| is a server chosen Source Connection ID. |odcid| + * specifies Original Destination Connection ID which appeared in a + * packet as a Destination Connection ID sent by client. |token| * specifies Retry Token, and |tokenlen| specifies its length. |aead| * must be AEAD_AES_128_GCM. |aead_ctx| must be initialized with * :macro:`NGTCP2_RETRY_KEY` as an encryption key. @@ -3644,17 +3552,14 @@ NGTCP2_EXTERN ngtcp2_ssize ngtcp2_pkt_write_retry( * * `ngtcp2_accept` is used by server implementation, and decides * whether packet |pkt| of length |pktlen| from client is acceptable - * for the very initial packet to a connection. + * for the very first packet to a connection. * - * If |dest| is not ``NULL`` and the function returns 0, or - * :macro:`NGTCP2_ERR_RETRY`, the decoded packet header is stored to - * the object pointed by |dest|. + * If |dest| is not ``NULL`` and the function returns 0, the decoded + * packet header is stored in the object pointed by |dest|. * * This function returns 0 if it succeeds, or one of the following * negative error codes: * - * :macro:`NGTCP2_ERR_RETRY` - * Retry packet should be sent. * :macro:`NGTCP2_ERR_INVALID_ARGUMENT` * The packet is not acceptable for the very first packet to a new * connection; or the function failed to parse the packet header. @@ -3666,17 +3571,21 @@ NGTCP2_EXTERN int ngtcp2_accept(ngtcp2_pkt_hd *dest, const uint8_t *pkt, * @function * * `ngtcp2_conn_client_new` creates new :type:`ngtcp2_conn`, and - * initializes it as client. |dcid| is randomized destination - * connection ID. |scid| is source connection ID. - * |client_chosen_version| is a QUIC version that a client chooses. - * |path| is the network path where this QUIC connection is being - * established and must not be ``NULL``. |callbacks|, |settings|, and - * |params| must not be ``NULL``, and the function make a copy of each - * of them. |params| is local QUIC transport parameters and sent to a - * remote endpoint during handshake. |user_data| is the arbitrary - * pointer which is passed to the user-defined callback functions. If - * |mem| is ``NULL``, the memory allocator returned by - * `ngtcp2_mem_default()` is used. + * initializes it as client. On success, it stores the pointer to the + * newly allocated object in |*pconn|. |dcid| is a randomized + * Destination Connection ID which must be longer than or equal to + * :macro:`NGTCP2_MIN_INITIAL_DCIDLEN`. |scid| is a Source Connection + * ID chosen by client. |client_chosen_version| is a QUIC version + * that a client chooses. |path| is the network path where this QUIC + * connection is being established, and must not be ``NULL``. + * |callbacks|, |settings|, and |params| must not be ``NULL``, and the + * function makes a copy of each of them. |params| is a local QUIC + * transport parameters, and sent to a remote endpoint during + * handshake. |user_data| is the arbitrary pointer which is passed to + * the user-defined callback functions. If |mem| is ``NULL``, the + * memory allocator returned by `ngtcp2_mem_default()` is used. + * + * Call `ngtcp2_conn_del` to free memory allocated for |*pconn|. * * This function returns 0 if it succeeds, or one of the following * negative error codes: @@ -3696,16 +3605,22 @@ NGTCP2_EXTERN int ngtcp2_conn_client_new_versioned( * @function * * `ngtcp2_conn_server_new` creates new :type:`ngtcp2_conn`, and - * initializes it as server. |dcid| is a destination connection ID. - * |scid| is a source connection ID. |path| is the network path where - * this QUIC connection is being established and must not be ``NULL``. - * |client_chosen_version| is a QUIC version that a client chooses. - * |callbacks|, |settings|, and |params| must not be ``NULL``, and the - * function make a copy of each of them. |params| is local QUIC - * transport parameters and sent to a remote endpoint during - * handshake. |user_data| is the arbitrary pointer which is passed to - * the user-defined callback functions. If |mem| is ``NULL``, the - * memory allocator returned by `ngtcp2_mem_default()` is used. + * initializes it as server. On success, it stores the pointer to the + * newly allocated object in |*pconn|. |dcid| is a Destination + * Connection ID, and is usually the Connection ID that appears in + * client Initial packet as Source Connection ID. |scid| is a Source + * Connection ID chosen by server. |path| is the network path where + * this QUIC connection is being established, and must not be + * ``NULL``. |client_chosen_version| is a QUIC version that a client + * chooses. |callbacks|, |settings|, and |params| must not be + * ``NULL``, and the function makes a copy of each of them. |params| + * is a local QUIC transport parameters, and sent to a remote endpoint + * during handshake. |user_data| is the arbitrary pointer which is + * passed to the user-defined callback functions. If |mem| is + * ``NULL``, the memory allocator returned by `ngtcp2_mem_default()` + * is used. + * + * Call `ngtcp2_conn_del` to free memory allocated for |*pconn|. * * This function returns 0 if it succeeds, or one of the following * negative error codes: @@ -3741,19 +3656,31 @@ NGTCP2_EXTERN void ngtcp2_conn_del(ngtcp2_conn *conn); * This function must not be called from inside the callback * functions. * - * This function returns 0 if it succeeds, or negative error codes. - * If :macro:`NGTCP2_ERR_RETRY` is returned, application must be a - * server and it must perform address validation by sending Retry - * packet and discard the connection state. If - * :macro:`NGTCP2_ERR_DROP_CONN` is returned, server application must - * drop the connection silently (without sending any CONNECTION_CLOSE - * frame) and discard connection state. If - * :macro:`NGTCP2_ERR_DRAINING` is returned, a connection has entered - * the draining state, and no further packet transmission is allowed. - * If :macro:`NGTCP2_ERR_CRYPTO` is returned, the error happened in - * TLS stack and `ngtcp2_conn_get_tls_alert` returns TLS alert if set. - * - * If any other negative errors are returned, call + * This function returns 0 if it succeeds, or one of the following + * negative error codes: + * + * :macro:`NGTCP2_ERR_RETRY` + * Server must perform address validation by sending Retry packet + * (see `ngtcp2_crypto_write_retry` and `ngtcp2_pkt_write_retry`), + * and discard the connection state. Client application does not + * get this error code. + * :macro:`NGTCP2_ERR_DROP_CONN` + * Server application must drop the connection silently (without + * sending any CONNECTION_CLOSE frame), and discard connection + * state. Client application does not get this error code. + * :macro:`NGTCP2_ERR_DRAINING` + * A connection has entered the draining state, and no further + * packet transmission is allowed. + * :macro:`NGTCP2_ERR_CLOSING` + * A connection has entered the closing state, and no further + * packet transmission is allowed. Calling + * `ngtcp2_conn_write_connection_close` makes a connection enter + * this state. + * :macro:`NGTCP2_ERR_CRYPTO` + * An error happened in TLS stack. `ngtcp2_conn_get_tls_alert` + * returns TLS alert if set. + * + * If any other negative error is returned, call * `ngtcp2_conn_write_connection_close` to get terminal packet, and * sending it makes QUIC connection enter the closing state. */ @@ -3767,8 +3694,8 @@ ngtcp2_conn_read_pkt_versioned(ngtcp2_conn *conn, const ngtcp2_path *path, * @function * * `ngtcp2_conn_write_pkt` is equivalent to calling - * `ngtcp2_conn_writev_stream` with -1 as stream_id, no stream data, and - * :macro:`NGTCP2_WRITE_STREAM_FLAG_NONE` as flags. + * `ngtcp2_conn_writev_stream` with -1 as |stream_id|, no stream data, + * and :macro:`NGTCP2_WRITE_STREAM_FLAG_NONE` as flags. */ NGTCP2_EXTERN ngtcp2_ssize ngtcp2_conn_write_pkt_versioned( ngtcp2_conn *conn, ngtcp2_path *path, int pkt_info_version, @@ -3777,18 +3704,18 @@ NGTCP2_EXTERN ngtcp2_ssize ngtcp2_conn_write_pkt_versioned( /** * @function * - * `ngtcp2_conn_handshake_completed` tells |conn| that the TLS stack - * declares TLS handshake completion. This does not mean QUIC + * `ngtcp2_conn_tls_handshake_completed` tells |conn| that the TLS + * stack declares TLS handshake completion. This does not mean QUIC * handshake has completed. The library needs extra conditions to be * met. */ -NGTCP2_EXTERN void ngtcp2_conn_handshake_completed(ngtcp2_conn *conn); +NGTCP2_EXTERN void ngtcp2_conn_tls_handshake_completed(ngtcp2_conn *conn); /** * @function * - * `ngtcp2_conn_get_handshake_completed` returns nonzero if QUIC handshake - * has completed. + * `ngtcp2_conn_get_handshake_completed` returns nonzero if QUIC + * handshake has completed. */ NGTCP2_EXTERN int ngtcp2_conn_get_handshake_completed(ngtcp2_conn *conn); @@ -3797,11 +3724,11 @@ NGTCP2_EXTERN int ngtcp2_conn_get_handshake_completed(ngtcp2_conn *conn); * * `ngtcp2_conn_install_initial_key` installs packet protection keying * materials for Initial packets. |rx_aead_ctx| is AEAD cipher - * context object and must be initialized with a decryption key. + * context object, and must be initialized with a decryption key. * |rx_iv| is IV of length |rx_ivlen| for decryption. |rx_hp_ctx| is * a packet header protection cipher context object for decryption. * Similarly, |tx_aead_ctx|, |tx_iv| and |tx_hp_ctx| are for - * encrypting outgoing packets and are the same length with the + * encrypting outgoing packets, and are the same length with the * decryption counterpart . If they have already been set, they are * overwritten. * @@ -3810,14 +3737,16 @@ NGTCP2_EXTERN int ngtcp2_conn_get_handshake_completed(ngtcp2_conn *conn); * * If this function succeeds, |conn| takes ownership of |rx_aead_ctx|, * |rx_hp_ctx|, |tx_aead_ctx|, and |tx_hp_ctx|. - * :type:`ngtcp2_delete_crypto_aead_ctx` and - * :type:`ngtcp2_delete_crypto_cipher_ctx` will be called to delete - * these objects when they are no longer used. If this function - * fails, the caller is responsible to delete them. - * - * After receiving Retry packet, the DCID most likely changes. In - * that case, client application must generate these keying materials - * again based on new DCID and install them again. + * :member:`ngtcp2_callbacks.delete_crypto_aead_ctx` and + * :member:`ngtcp2_callbacks.delete_crypto_cipher_ctx` will be called + * to delete these objects when they are no longer used. If this + * function fails, the caller is responsible to delete them. + * + * After receiving Retry packet, a Destination Connection ID that + * client sends in Initial packet most likely changes. In that case, + * client application must generate these keying materials again based + * on new Destination Connection ID, and install them again with this + * function. * * This function returns 0 if it succeeds, or one of the following * negative error codes: @@ -3837,11 +3766,11 @@ NGTCP2_EXTERN int ngtcp2_conn_install_initial_key( * `ngtcp2_conn_install_vneg_initial_key` installs packet protection * keying materials for Initial packets on compatible version * negotiation for |version|. |rx_aead_ctx| is AEAD cipher context - * object and must be initialized with a decryption key. |rx_iv| is + * object, and must be initialized with a decryption key. |rx_iv| is * IV of length |rx_ivlen| for decryption. |rx_hp_ctx| is a packet * header protection cipher context object for decryption. Similarly, * |tx_aead_ctx|, |tx_iv| and |tx_hp_ctx| are for encrypting outgoing - * packets and are the same length with the decryption counterpart . + * packets, and are the same length with the decryption counterpart. * If they have already been set, they are overwritten. * * |ivlen| must be the minimum length of AEAD nonce, or 8 bytes if @@ -3849,10 +3778,10 @@ NGTCP2_EXTERN int ngtcp2_conn_install_initial_key( * * If this function succeeds, |conn| takes ownership of |rx_aead_ctx|, * |rx_hp_ctx|, |tx_aead_ctx|, and |tx_hp_ctx|. - * :type:`ngtcp2_delete_crypto_aead_ctx` and - * :type:`ngtcp2_delete_crypto_cipher_ctx` will be called to delete - * these objects when they are no longer used. If this function - * fails, the caller is responsible to delete them. + * :member:`ngtcp2_callbacks.delete_crypto_aead_ctx` and + * :member:`ngtcp2_callbacks.delete_crypto_cipher_ctx` will be called + * to delete these objects when they are no longer used. If this + * function fails, the caller is responsible to delete them. * * This function returns 0 if it succeeds, or one of the following * negative error codes: @@ -3880,10 +3809,10 @@ NGTCP2_EXTERN int ngtcp2_conn_install_vneg_initial_key( * that is larger. * * If this function succeeds, |conn| takes ownership of |aead_ctx|, - * and |hp_ctx|. :type:`ngtcp2_delete_crypto_aead_ctx` and - * :type:`ngtcp2_delete_crypto_cipher_ctx` will be called to delete - * these objects when they are no longer used. If this function - * fails, the caller is responsible to delete them. + * and |hp_ctx|. :member:`ngtcp2_callbacks.delete_crypto_aead_ctx` + * and :member:`ngtcp2_callbacks.delete_crypto_cipher_ctx` will be + * called to delete these objects when they are no longer used. If + * this function fails, the caller is responsible to delete them. * * This function returns 0 if it succeeds, or one of the following * negative error codes: @@ -3908,10 +3837,10 @@ NGTCP2_EXTERN int ngtcp2_conn_install_rx_handshake_key( * that is larger. * * If this function succeeds, |conn| takes ownership of |aead_ctx| and - * |hp_ctx|. :type:`ngtcp2_delete_crypto_aead_ctx` and - * :type:`ngtcp2_delete_crypto_cipher_ctx` will be called to delete - * these objects when they are no longer used. If this function - * fails, the caller is responsible to delete them. + * |hp_ctx|. :member:`ngtcp2_callbacks.delete_crypto_aead_ctx` and + * :member:`ngtcp2_callbacks.delete_crypto_cipher_ctx` will be called + * to delete these objects when they are no longer used. If this + * function fails, the caller is responsible to delete them. * * This function returns 0 if it succeeds, or one of the following * negative error codes: @@ -3926,19 +3855,19 @@ NGTCP2_EXTERN int ngtcp2_conn_install_tx_handshake_key( /** * @function * - * `ngtcp2_conn_install_early_key` installs packet protection AEAD + * `ngtcp2_conn_install_0rtt_key` installs packet protection AEAD * cipher context object |aead_ctx|, IV |iv| of length |ivlen|, and * packet header protection cipher context object |hp_ctx| to encrypt - * (for client) or decrypt (for server) 0RTT packets. + * (for client) or decrypt (for server) 0-RTT packets. * * |ivlen| must be the minimum length of AEAD nonce, or 8 bytes if * that is larger. * * If this function succeeds, |conn| takes ownership of |aead_ctx| and - * |hp_ctx|. :type:`ngtcp2_delete_crypto_aead_ctx` and - * :type:`ngtcp2_delete_crypto_cipher_ctx` will be called to delete - * these objects when they are no longer used. If this function - * fails, the caller is responsible to delete them. + * |hp_ctx|. :member:`ngtcp2_callbacks.delete_crypto_aead_ctx` and + * :member:`ngtcp2_callbacks.delete_crypto_cipher_ctx` will be called + * to delete these objects when they are no longer used. If this + * function fails, the caller is responsible to delete them. * * This function returns 0 if it succeeds, or one of the following * negative error codes: @@ -3946,7 +3875,7 @@ NGTCP2_EXTERN int ngtcp2_conn_install_tx_handshake_key( * :macro:`NGTCP2_ERR_NOMEM` * Out of memory. */ -NGTCP2_EXTERN int ngtcp2_conn_install_early_key( +NGTCP2_EXTERN int ngtcp2_conn_install_0rtt_key( ngtcp2_conn *conn, const ngtcp2_crypto_aead_ctx *aead_ctx, const uint8_t *iv, size_t ivlen, const ngtcp2_crypto_cipher_ctx *hp_ctx); @@ -3954,7 +3883,7 @@ NGTCP2_EXTERN int ngtcp2_conn_install_early_key( * @function * * `ngtcp2_conn_install_rx_key` installs packet protection keying - * materials for decrypting Short header packets. |secret| of length + * materials for decrypting 1-RTT packets. |secret| of length * |secretlen| is the decryption secret which is used to derive keying * materials passed to this function. |aead_ctx| is AEAD cipher * context object which must be initialized with a decryption key. @@ -3965,10 +3894,10 @@ NGTCP2_EXTERN int ngtcp2_conn_install_early_key( * that is larger. * * If this function succeeds, |conn| takes ownership of |aead_ctx| and - * |hp_ctx|. :type:`ngtcp2_delete_crypto_aead_ctx` and - * :type:`ngtcp2_delete_crypto_cipher_ctx` will be called to delete - * these objects when they are no longer used. If this function - * fails, the caller is responsible to delete them. + * |hp_ctx|. :member:`ngtcp2_callbacks.delete_crypto_aead_ctx` and + * :member:`ngtcp2_callbacks.delete_crypto_cipher_ctx` will be called + * to delete these objects when they are no longer used. If this + * function fails, the caller is responsible to delete them. * * This function returns 0 if it succeeds, or one of the following * negative error codes: @@ -3985,7 +3914,7 @@ NGTCP2_EXTERN int ngtcp2_conn_install_rx_key( * @function * * `ngtcp2_conn_install_tx_key` installs packet protection keying - * materials for encrypting Short header packets. |secret| of length + * materials for encrypting 1-RTT packets. |secret| of length * |secretlen| is the encryption secret which is used to derive keying * materials passed to this function. |aead_ctx| is AEAD cipher * context object which must be initialized with an encryption key. @@ -3996,10 +3925,10 @@ NGTCP2_EXTERN int ngtcp2_conn_install_rx_key( * that is larger. * * If this function succeeds, |conn| takes ownership of |aead_ctx| and - * |hp_ctx|. :type:`ngtcp2_delete_crypto_aead_ctx` and - * :type:`ngtcp2_delete_crypto_cipher_ctx` will be called to delete - * these objects when they are no longer used. If this function - * fails, the caller is responsible to delete them. + * |hp_ctx|. :member:`ngtcp2_callbacks.delete_crypto_aead_ctx` and + * :member:`ngtcp2_callbacks.delete_crypto_cipher_ctx` will be called + * to delete these objects when they are no longer used. If this + * function fails, the caller is responsible to delete them. * * This function returns 0 if it succeeds, or one of the following * negative error codes: @@ -4035,10 +3964,12 @@ NGTCP2_EXTERN int ngtcp2_conn_initiate_key_update(ngtcp2_conn *conn, * is defined as NGTCP2_ERR_* macro, such as * :macro:`NGTCP2_ERR_DECRYPT`). In general, error code should be * propagated via return value, but sometimes ngtcp2 API is called - * inside callback function of TLS stack and it does not allow to + * inside callback function of TLS stack, and it does not allow to * return ngtcp2 error code directly. In this case, implementation * can set the error code (e.g., * :macro:`NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM`) using this function. + * + * See also `ngtcp2_conn_get_tls_error`. */ NGTCP2_EXTERN void ngtcp2_conn_set_tls_error(ngtcp2_conn *conn, int liberr); @@ -4055,7 +3986,9 @@ NGTCP2_EXTERN int ngtcp2_conn_get_tls_error(ngtcp2_conn *conn); * @function * * `ngtcp2_conn_set_tls_alert` sets a TLS alert |alert| generated by a - * local endpoint to |conn|. + * TLS stack of a local endpoint to |conn|. + * + * See also `ngtcp2_conn_get_tls_alert`. */ NGTCP2_EXTERN void ngtcp2_conn_set_tls_alert(ngtcp2_conn *conn, uint8_t alert); @@ -4073,8 +4006,10 @@ NGTCP2_EXTERN uint8_t ngtcp2_conn_get_tls_alert(ngtcp2_conn *conn); * * `ngtcp2_conn_set_keep_alive_timeout` sets keep-alive timeout. If * nonzero value is given, after a connection is idle at least in a - * given amount of time, a keep-alive packet is sent. If 0 is set, - * keep-alive functionality is disabled and this is the default. + * given amount of time, a keep-alive packet is sent. If UINT64_MAX + * is set, keep-alive functionality is disabled, and this is the + * default. Specifying 0 in |timeout| is reserved for a future + * extension, and for now it is treated as if UINT64_MAX is given. */ NGTCP2_EXTERN void ngtcp2_conn_set_keep_alive_timeout(ngtcp2_conn *conn, ngtcp2_duration timeout); @@ -4082,18 +4017,19 @@ NGTCP2_EXTERN void ngtcp2_conn_set_keep_alive_timeout(ngtcp2_conn *conn, /** * @function * - * `ngtcp2_conn_get_expiry` returns the next expiry time. + * `ngtcp2_conn_get_expiry` returns the next expiry time. It returns + * ``UINT64_MAX`` if there is no next expiry. * - * Call `ngtcp2_conn_handle_expiry()` and `ngtcp2_conn_write_pkt` (or - * `ngtcp2_conn_writev_stream`) if expiry time is passed. + * Call `ngtcp2_conn_handle_expiry` and then + * `ngtcp2_conn_writev_stream` (or `ngtcp2_conn_writev_datagram`) when + * the expiry time has passed. */ NGTCP2_EXTERN ngtcp2_tstamp ngtcp2_conn_get_expiry(ngtcp2_conn *conn); /** * @function * - * `ngtcp2_conn_handle_expiry` handles expired timer. It does nothing - * if timer is not expired. + * `ngtcp2_conn_handle_expiry` handles expired timer. */ NGTCP2_EXTERN int ngtcp2_conn_handle_expiry(ngtcp2_conn *conn, ngtcp2_tstamp ts); @@ -4108,9 +4044,9 @@ NGTCP2_EXTERN ngtcp2_duration ngtcp2_conn_get_pto(ngtcp2_conn *conn); /** * @function * - * `ngtcp2_conn_decode_remote_transport_params` decodes QUIC transport - * parameters from the buffer pointed by |data| of length |datalen|, - * and sets the result to |conn|. + * `ngtcp2_conn_decode_and_set_remote_transport_params` decodes QUIC + * transport parameters from the buffer pointed by |data| of length + * |datalen|, and sets the result to |conn|. * * This function returns 0 if it succeeds, or one of the following * negative error codes: @@ -4126,9 +4062,8 @@ NGTCP2_EXTERN ngtcp2_duration ngtcp2_conn_get_pto(ngtcp2_conn *conn); * :macro:`NGTCP2_ERR_CALLBACK_FAILURE` * User callback failed */ -NGTCP2_EXTERN int -ngtcp2_conn_decode_remote_transport_params(ngtcp2_conn *conn, - const uint8_t *data, size_t datalen); +NGTCP2_EXTERN int ngtcp2_conn_decode_and_set_remote_transport_params( + ngtcp2_conn *conn, const uint8_t *data, size_t datalen); /** * @function @@ -4143,36 +4078,77 @@ ngtcp2_conn_get_remote_transport_params(ngtcp2_conn *conn); /** * @function * - * `ngtcp2_conn_set_early_remote_transport_params` sets |params| as - * transport parameters previously received from a server. The - * parameters are used to send early data. QUIC requires that client - * application should remember transport parameters along with a - * session ticket. - * - * At least following fields should be set: - * - * - initial_max_stream_id_bidi - * - initial_max_stream_id_uni - * - initial_max_stream_data_bidi_local - * - initial_max_stream_data_bidi_remote - * - initial_max_stream_data_uni - * - initial_max_data - * - active_connection_id_limit - * - max_datagram_frame_size (if DATAGRAM extension was negotiated) - * - * The following fields are ignored: - * - * - ack_delay_exponent - * - max_ack_delay - * - initial_scid - * - original_dcid - * - preferred_address and preferred_address_present - * - retry_scid and retry_scid_present - * - stateless_reset_token and stateless_reset_token_present - */ -NGTCP2_EXTERN void ngtcp2_conn_set_early_remote_transport_params_versioned( - ngtcp2_conn *conn, int transport_params_version, - const ngtcp2_transport_params *params); + * `ngtcp2_conn_encode_0rtt_transport_params` encodes the QUIC + * transport parameters that are used for 0-RTT data in the buffer + * pointed by |dest| of length |destlen|. It includes at least the + * following fields: + * + * - :member:`ngtcp2_transport_params.initial_max_streams_bidi` + * - :member:`ngtcp2_transport_params.initial_max_streams_uni` + * - :member:`ngtcp2_transport_params.initial_max_stream_data_bidi_local` + * - :member:`ngtcp2_transport_params.initial_max_stream_data_bidi_remote` + * - :member:`ngtcp2_transport_params.initial_max_stream_data_uni` + * - :member:`ngtcp2_transport_params.initial_max_data` + * - :member:`ngtcp2_transport_params.active_connection_id_limit` + * - :member:`ngtcp2_transport_params.max_datagram_frame_size` + * + * If |conn| is initialized as server, the following additional fields + * are also included: + * + * - :member:`ngtcp2_transport_params.max_idle_timeout` + * - :member:`ngtcp2_transport_params.max_udp_payload_size` + * - :member:`ngtcp2_transport_params.disable_active_migration` + * + * If |conn| is initialized as client, these parameters are + * synthesized from the remote transport parameters received from + * server. Otherwise, it is the local transport parameters that are + * set by the local endpoint. + * + * This function returns the number of bytes written, or one of the + * following negative error codes: + * + * :macro:`NGTCP2_ERR_NOBUF` + * Buffer is too small. + */ +NGTCP2_EXTERN +ngtcp2_ssize ngtcp2_conn_encode_0rtt_transport_params(ngtcp2_conn *conn, + uint8_t *dest, + size_t destlen); + +/** + * @function + * + * `ngtcp2_conn_decode_and_set_0rtt_transport_params` decodes QUIC + * transport parameters from |data| of length |datalen|, which is + * assumed to be the parameters received from the server in the + * previous connection, and sets it to |conn|. These parameters are + * used to send 0-RTT data. QUIC requires that client application + * should remember transport parameters along with a session ticket. + * + * At least following fields should be included: + * + * - :member:`ngtcp2_transport_params.initial_max_streams_bidi` + * - :member:`ngtcp2_transport_params.initial_max_streams_uni` + * - :member:`ngtcp2_transport_params.initial_max_stream_data_bidi_local` + * - :member:`ngtcp2_transport_params.initial_max_stream_data_bidi_remote` + * - :member:`ngtcp2_transport_params.initial_max_stream_data_uni` + * - :member:`ngtcp2_transport_params.initial_max_data` + * - :member:`ngtcp2_transport_params.active_connection_id_limit` + * - :member:`ngtcp2_transport_params.max_datagram_frame_size` (if + * DATAGRAM extension was negotiated) + * + * This function must only be used by client. + * + * This function returns 0 if it succeeds, or one of the following + * negative error codes: + * + * :macro:`NGTCP2_ERR_NOMEM` + * Out of memory. + * :macro:`NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM` + * The input is malformed. + */ +NGTCP2_EXTERN int ngtcp2_conn_decode_and_set_0rtt_transport_params( + ngtcp2_conn *conn, const uint8_t *data, size_t datalen); /** * @function @@ -4182,7 +4158,7 @@ NGTCP2_EXTERN void ngtcp2_conn_set_early_remote_transport_params_versioned( * Although the local transport parameters are passed to * `ngtcp2_conn_server_new`, server might want to update them after * ALPN is chosen. In that case, server can update the transport - * parameter with this function. Server must call this function + * parameters with this function. Server must call this function * before calling `ngtcp2_conn_install_tx_handshake_key`. * * This function returns 0 if it succeeds, or one of the following @@ -4208,11 +4184,9 @@ ngtcp2_conn_get_local_transport_params(ngtcp2_conn *conn); * @function * * `ngtcp2_conn_encode_local_transport_params` encodes the local QUIC - * transport parameters in |dest| of length |destlen|. This is - * equivalent to calling `ngtcp2_conn_get_local_transport_params` and - * then `ngtcp2_encode_transport_params`. + * transport parameters in |dest| of length |destlen|. * - * This function returns the number of written, or one of the + * This function returns the number of bytes written, or one of the * following negative error codes: * * :macro:`NGTCP2_ERR_NOBUF` @@ -4226,16 +4200,16 @@ NGTCP2_EXTERN ngtcp2_ssize ngtcp2_conn_encode_local_transport_params( * * `ngtcp2_conn_open_bidi_stream` opens new bidirectional stream. The * |stream_user_data| is the user data specific to the stream. The - * open stream ID is stored in |*pstream_id|. + * stream ID of the opened stream is stored in |*pstream_id|. * * Application can call this function before handshake completes. For - * 0RTT packet, application can call this function after calling - * `ngtcp2_conn_set_early_remote_transport_params`. For 1RTT packet, - * application can call this function after calling - * `ngtcp2_conn_decode_remote_transport_params` and + * 0-RTT packet, application can call this function after calling + * `ngtcp2_conn_decode_and_set_0rtt_transport_params`. For 1-RTT + * packet, application can call this function after calling + * `ngtcp2_conn_decode_and_set_remote_transport_params` and * `ngtcp2_conn_install_tx_key`. If ngtcp2 crypto support library is * used, application can call this function after calling - * `ngtcp2_crypto_derive_and_install_tx_key` for 1RTT packet. + * `ngtcp2_crypto_derive_and_install_tx_key` for 1-RTT packet. * * This function returns 0 if it succeeds, or one of the following * negative error codes: @@ -4243,7 +4217,7 @@ NGTCP2_EXTERN ngtcp2_ssize ngtcp2_conn_encode_local_transport_params( * :macro:`NGTCP2_ERR_NOMEM` * Out of memory * :macro:`NGTCP2_ERR_STREAM_ID_BLOCKED` - * The remote peer does not allow |stream_id| yet. + * The remote endpoint does not allow |stream_id| yet. */ NGTCP2_EXTERN int ngtcp2_conn_open_bidi_stream(ngtcp2_conn *conn, int64_t *pstream_id, @@ -4254,16 +4228,16 @@ NGTCP2_EXTERN int ngtcp2_conn_open_bidi_stream(ngtcp2_conn *conn, * * `ngtcp2_conn_open_uni_stream` opens new unidirectional stream. The * |stream_user_data| is the user data specific to the stream. The - * open stream ID is stored in |*pstream_id|. + * stream ID of the opened stream is stored in |*pstream_id|. * * Application can call this function before handshake completes. For - * 0RTT packet, application can call this function after calling - * `ngtcp2_conn_set_early_remote_transport_params`. For 1RTT packet, - * application can call this function after calling - * `ngtcp2_conn_decode_remote_transport_params` and + * 0-RTT packet, application can call this function after calling + * `ngtcp2_conn_decode_and_set_0rtt_transport_params`. For 1-RTT + * packet, application can call this function after calling + * `ngtcp2_conn_decode_and_set_remote_transport_params` and * `ngtcp2_conn_install_tx_key`. If ngtcp2 crypto support library is * used, application can call this function after calling - * `ngtcp2_crypto_derive_and_install_tx_key` for 1RTT packet. + * `ngtcp2_crypto_derive_and_install_tx_key` for 1-RTT packet. * * This function returns 0 if it succeeds, or one of the following * negative error codes: @@ -4271,7 +4245,7 @@ NGTCP2_EXTERN int ngtcp2_conn_open_bidi_stream(ngtcp2_conn *conn, * :macro:`NGTCP2_ERR_NOMEM` * Out of memory * :macro:`NGTCP2_ERR_STREAM_ID_BLOCKED` - * The remote peer does not allow |stream_id| yet. + * The remote endpoint does not allow |stream_id| yet. */ NGTCP2_EXTERN int ngtcp2_conn_open_uni_stream(ngtcp2_conn *conn, int64_t *pstream_id, @@ -4280,14 +4254,23 @@ NGTCP2_EXTERN int ngtcp2_conn_open_uni_stream(ngtcp2_conn *conn, /** * @function * - * `ngtcp2_conn_shutdown_stream` closes stream denoted by |stream_id| - * abruptly. |app_error_code| is one of application error codes, and - * indicates the reason of shutdown. Successful call of this function - * does not immediately erase the state of the stream. The actual - * deletion is done when the remote endpoint sends acknowledgement. - * Calling this function is equivalent to call + * `ngtcp2_conn_shutdown_stream` closes a stream denoted by + * |stream_id| abruptly. |app_error_code| is one of application error + * codes, and indicates the reason of shutdown. Successful call of + * this function does not immediately erase the state of the stream. + * The actual deletion is done when the remote endpoint sends + * acknowledgement. Calling this function is equivalent to call * `ngtcp2_conn_shutdown_stream_read`, and - * `ngtcp2_conn_shutdown_stream_write` sequentially. + * `ngtcp2_conn_shutdown_stream_write` sequentially with the following + * differences. If |stream_id| refers to a local unidirectional + * stream, this function only shutdowns write side of the stream. If + * |stream_id| refers to a remote unidirectional stream, this function + * only shutdowns read side of the stream. + * + * |flags| is currently unused, and should be set to 0. + * + * This function returns 0 if a stream denoted by |stream_id| is not + * found. * * This function returns 0 if it succeeds, or one of the following * negative error codes: @@ -4295,46 +4278,62 @@ NGTCP2_EXTERN int ngtcp2_conn_open_uni_stream(ngtcp2_conn *conn, * :macro:`NGTCP2_ERR_NOMEM` * Out of memory */ -NGTCP2_EXTERN int ngtcp2_conn_shutdown_stream(ngtcp2_conn *conn, +NGTCP2_EXTERN int ngtcp2_conn_shutdown_stream(ngtcp2_conn *conn, uint32_t flags, int64_t stream_id, uint64_t app_error_code); /** * @function * - * `ngtcp2_conn_shutdown_stream_write` closes write-side of stream + * `ngtcp2_conn_shutdown_stream_write` closes write-side of a stream * denoted by |stream_id| abruptly. |app_error_code| is one of * application error codes, and indicates the reason of shutdown. If - * this function succeeds, no application data is sent to the remote - * endpoint. It discards all data which has not been acknowledged - * yet. + * this function succeeds, no further application data is sent to the + * remote endpoint. It discards all data which has not been + * acknowledged yet. + * + * |flags| is currently unused, and should be set to 0. + * + * This function returns 0 if a stream denoted by |stream_id| is not + * found. * * This function returns 0 if it succeeds, or one of the following * negative error codes: * * :macro:`NGTCP2_ERR_NOMEM` * Out of memory + * :macro:`NGTCP2_ERR_INVALID_ARGUMENT` + * |stream_id| refers to a remote unidirectional stream. */ NGTCP2_EXTERN int ngtcp2_conn_shutdown_stream_write(ngtcp2_conn *conn, + uint32_t flags, int64_t stream_id, uint64_t app_error_code); /** * @function * - * `ngtcp2_conn_shutdown_stream_read` closes read-side of stream + * `ngtcp2_conn_shutdown_stream_read` closes read-side of a stream * denoted by |stream_id| abruptly. |app_error_code| is one of * application error codes, and indicates the reason of shutdown. If - * this function succeeds, no application data is forwarded to an - * application layer. + * this function succeeds, no further application data is forwarded to + * an application layer. + * + * |flags| is currently unused, and should be set to 0. + * + * This function returns 0 if a stream denoted by |stream_id| is not + * found. * * This function returns 0 if it succeeds, or one of the following * negative error codes: * * :macro:`NGTCP2_ERR_NOMEM` * Out of memory + * :macro:`NGTCP2_ERR_INVALID_ARGUMENT` + * |stream_id| refers to a local unidirectional stream. */ NGTCP2_EXTERN int ngtcp2_conn_shutdown_stream_read(ngtcp2_conn *conn, + uint32_t flags, int64_t stream_id, uint64_t app_error_code); @@ -4355,15 +4354,15 @@ NGTCP2_EXTERN int ngtcp2_conn_shutdown_stream_read(ngtcp2_conn *conn, * @macro * * :macro:`NGTCP2_WRITE_STREAM_FLAG_MORE` indicates that more data may - * come and should be coalesced into the same packet if possible. + * come, and should be coalesced into the same packet if possible. */ #define NGTCP2_WRITE_STREAM_FLAG_MORE 0x01u /** * @macro * - * :macro:`NGTCP2_WRITE_STREAM_FLAG_FIN` indicates that the passed - * data is the final part of a stream. + * :macro:`NGTCP2_WRITE_STREAM_FLAG_FIN` indicates that a passed data + * is the final part of a stream. */ #define NGTCP2_WRITE_STREAM_FLAG_FIN 0x02u @@ -4384,20 +4383,21 @@ NGTCP2_EXTERN ngtcp2_ssize ngtcp2_conn_write_stream_versioned( * @function * * `ngtcp2_conn_writev_stream` writes a packet containing stream data - * of stream denoted by |stream_id|. The buffer of the packet is + * of a stream denoted by |stream_id|. The buffer of the packet is * pointed by |dest| of length |destlen|. This function performs QUIC * handshake as well. * * |destlen| should be at least - * :member:`ngtcp2_settings.max_udp_payload_size`. + * :member:`ngtcp2_settings.max_tx_udp_payload_size`. * * Specifying -1 to |stream_id| means no new stream data to send. * * If |path| is not ``NULL``, this function stores the network path - * with which the packet should be sent. Each addr field must point - * to the buffer which should be at least ``sizeof(struct - * sockaddr_storage)`` bytes long. The assignment might not be done - * if nothing is written to |dest|. + * with which the packet should be sent. Each addr field + * (:member:`ngtcp2_path.local` and :member:`ngtcp2_path.remote`) must + * point to the buffer which should be at least + * sizeof(:type:`sockaddr_union`) bytes long. The assignment might + * not be done if nothing is written to |dest|. * * If |pi| is not ``NULL``, this function stores packet metadata in it * if it succeeds. The metadata includes ECN markings. When calling @@ -4405,7 +4405,10 @@ NGTCP2_EXTERN ngtcp2_ssize ngtcp2_conn_write_stream_versioned( * :macro:`NGTCP2_ERR_WRITE_MORE`, caller must pass the same |pi| to * this function. * - * If the all given data is encoded as STREAM frame in |dest|, and if + * Stream data is specified as vector of data |datav|. |datavcnt| + * specifies the number of :type:`ngtcp2_vec` that |datav| includes. + * + * If all given data is encoded as STREAM frame in |dest|, and if * |flags| & :macro:`NGTCP2_WRITE_STREAM_FLAG_FIN` is nonzero, fin * flag is set to outgoing STREAM frame. Otherwise, fin flag in * STREAM frame is not set. @@ -4422,49 +4425,67 @@ NGTCP2_EXTERN ngtcp2_ssize ngtcp2_conn_write_stream_versioned( * The number of data encoded in STREAM frame is stored in |*pdatalen| * if it is not ``NULL``. The caller must keep the portion of data * covered by |*pdatalen| bytes in tact until - * :type:`ngtcp2_acked_stream_data_offset` indicates that they are - * acknowledged by a remote endpoint or the stream is closed. - * - * If |flags| equals to :macro:`NGTCP2_WRITE_STREAM_FLAG_NONE`, this - * function produces a single payload of UDP packet. If the given - * stream data is small (e.g., few bytes), the packet might be - * severely under filled. Too many small packet might increase - * overall packet processing costs. Unless there are retransmissions, - * by default, application can only send 1 STREAM frame in one QUIC - * packet. In order to include more than 1 STREAM frame in one QUIC - * packet, specify :macro:`NGTCP2_WRITE_STREAM_FLAG_MORE` in |flags|. - * This is analogous to ``MSG_MORE`` flag in :manpage:`send(2)`. If - * the :macro:`NGTCP2_WRITE_STREAM_FLAG_MORE` is used, there are 4 + * :member:`ngtcp2_callbacks.acked_stream_data_offset` indicates that + * they are acknowledged by a remote endpoint or the stream is closed. + * + * If the given stream data is small (e.g., few bytes), the packet + * might be severely under filled. Too many small packet might + * increase overall packet processing costs. Unless there are + * retransmissions, by default, application can only send 1 STREAM + * frame in one QUIC packet. In order to include more than 1 STREAM + * frame in one QUIC packet, specify + * :macro:`NGTCP2_WRITE_STREAM_FLAG_MORE` in |flags|. This is + * analogous to ``MSG_MORE`` flag in :manpage:`send(2)`. If the + * :macro:`NGTCP2_WRITE_STREAM_FLAG_MORE` is used, there are 4 * outcomes: * * - The function returns the written length of packet just like * without :macro:`NGTCP2_WRITE_STREAM_FLAG_MORE`. This is because - * packet is nearly full and the library decided to make a complete - * packet. |*pdatalen| might be -1 or >= 0. + * packet is nearly full, and the library decided to make a complete + * packet. |*pdatalen| might be -1 or >= 0. It may return 0 which + * indicates that no packet transmission is possible at the moment + * for some reason. * * - The function returns :macro:`NGTCP2_ERR_WRITE_MORE`. In this - * case, |*pdatalen| >= 0 is asserted. This indicates that - * application can call this function with different stream data (or - * `ngtcp2_conn_writev_datagram` if it has data to send in + * case, |*pdatalen| >= 0 is asserted. It indicates that + * application can still call this function with different stream + * data (or `ngtcp2_conn_writev_datagram` if it has data to send in * unreliable datagram) to pack them into the same packet. * Application has to specify the same |conn|, |path|, |pi|, |dest|, * |destlen|, and |ts| parameters, otherwise the behaviour is * undefined. The application can change |flags|. * - * - The function returns :macro:`NGTCP2_ERR_STREAM_DATA_BLOCKED` which - * indicates that stream is blocked because of flow control. - * - * - The other error might be returned just like without - * :macro:`NGTCP2_WRITE_STREAM_FLAG_MORE`. + * - The function returns one of the following negative error codes: + * :macro:`NGTCP2_ERR_STREAM_DATA_BLOCKED`, + * :macro:`NGTCP2_ERR_STREAM_NOT_FOUND`, or + * :macro:`NGTCP2_ERR_STREAM_SHUT_WR`. In this case, |*pdatalen| == + * -1 is asserted. Application can still write the stream data of + * the other streams by calling this function (or + * `ngtcp2_conn_writev_datagram` if it has data to send in + * unreliable datagram) to pack them into the same packet. + * Application has to specify the same |conn|, |path|, |pi|, |dest|, + * |destlen|, and |ts| parameters, otherwise the behaviour is + * undefined. The application can change |flags|. * - * When application sees :macro:`NGTCP2_ERR_WRITE_MORE`, it must not - * call other ngtcp2 API functions (application can still call - * `ngtcp2_conn_write_connection_close` to handle error from this - * function). Just keep calling `ngtcp2_conn_writev_stream`, - * `ngtcp2_conn_write_pkt`, or `ngtcp2_conn_writev_datagram` until it - * returns a positive number (which indicates a complete packet is - * ready). If there is no stream data to include, call this function - * with |stream_id| as -1 to stop coalescing and write a packet. + * - The other negative error codes might be returned just like + * without :macro:`NGTCP2_WRITE_STREAM_FLAG_MORE`. These errors + * should be treated as a connection error. + * + * When application uses :macro:`NGTCP2_WRITE_STREAM_FLAG_MORE` at + * least once, it must not call other ngtcp2 API functions + * (application can still call `ngtcp2_conn_write_connection_close` to + * handle error from this function. It can also call + * `ngtcp2_conn_shutdown_stream_read`, + * `ngtcp2_conn_shutdown_stream_write`, and + * `ngtcp2_conn_shutdown_stream`), just keep calling this function (or + * `ngtcp2_conn_writev_datagram`) until it returns 0, a positive + * number (which indicates a complete packet is ready), or the error + * codes other than :macro:`NGTCP2_ERR_WRITE_MORE`, + * :macro:`NGTCP2_ERR_STREAM_DATA_BLOCKED`, + * :macro:`NGTCP2_ERR_STREAM_NOT_FOUND`, and + * :macro:`NGTCP2_ERR_STREAM_SHUT_WR`. If there is no stream data to + * include, call this function with |stream_id| as -1 to stop + * coalescing and write a packet. * * This function returns 0 if it cannot write any frame because buffer * is too small, or packet is congestion limited. Application should @@ -4473,11 +4494,9 @@ NGTCP2_EXTERN ngtcp2_ssize ngtcp2_conn_write_stream_versioned( * This function must not be called from inside the callback * functions. * - * If pacing is enabled, `ngtcp2_conn_update_pkt_tx_time` must be - * called after this function. Application may call this function - * multiple times before calling `ngtcp2_conn_update_pkt_tx_time`. - * Packet pacing is enabled if BBR congestion controller algorithm is - * used. + * `ngtcp2_conn_update_pkt_tx_time` must be called after this + * function. Application may call this function multiple times before + * calling `ngtcp2_conn_update_pkt_tx_time`. * * This function returns the number of bytes written in |dest| if it * succeeds, or one of the following negative error codes: @@ -4501,12 +4520,9 @@ NGTCP2_EXTERN ngtcp2_ssize ngtcp2_conn_write_stream_versioned( * Application can call this function to pack more stream data * into the same packet. See above to know how it works. * - * In general, if the error code which satisfies - * `ngtcp2_err_is_fatal(err) ` != 0 is returned, - * the application should just close the connection by calling - * `ngtcp2_conn_write_connection_close` or just delete the QUIC - * connection using `ngtcp2_conn_del`. It is undefined to call the - * other library functions. + * If any other negative error is returned, call + * `ngtcp2_conn_write_connection_close` to get terminal packet, and + * sending it makes QUIC connection enter the closing state. */ NGTCP2_EXTERN ngtcp2_ssize ngtcp2_conn_writev_stream_versioned( ngtcp2_conn *conn, ngtcp2_path *path, int pkt_info_version, @@ -4531,10 +4547,23 @@ NGTCP2_EXTERN ngtcp2_ssize ngtcp2_conn_writev_stream_versioned( * @macro * * :macro:`NGTCP2_WRITE_DATAGRAM_FLAG_MORE` indicates that more data - * may come and should be coalesced into the same packet if possible. + * may come, and should be coalesced into the same packet if possible. */ #define NGTCP2_WRITE_DATAGRAM_FLAG_MORE 0x01u +/** + * @function + * + * `ngtcp2_conn_write_datagram` is just like + * `ngtcp2_conn_writev_datagram`. The only difference is that it + * conveniently accepts a single buffer. + */ +NGTCP2_EXTERN ngtcp2_ssize ngtcp2_conn_write_datagram_versioned( + ngtcp2_conn *conn, ngtcp2_path *path, int pkt_info_version, + ngtcp2_pkt_info *pi, uint8_t *dest, size_t destlen, int *paccepted, + uint32_t flags, uint64_t dgram_id, const uint8_t *data, size_t datalen, + ngtcp2_tstamp ts); + /** * @function * @@ -4544,25 +4573,28 @@ NGTCP2_EXTERN ngtcp2_ssize ngtcp2_conn_writev_stream_versioned( * as well. * * |destlen| should be at least - * :member:`ngtcp2_settings.max_udp_payload_size`. + * :member:`ngtcp2_settings.max_tx_udp_payload_size`. * * For |path| and |pi| parameters, refer to * `ngtcp2_conn_writev_stream`. * + * Stream data is specified as vector of data |datav|. |datavcnt| + * specifies the number of :type:`ngtcp2_vec` that |datav| includes. + * * If the given data is written to the buffer, nonzero value is * assigned to |*paccepted| if it is not NULL. The data in DATAGRAM * frame cannot be fragmented; writing partial data is not possible. * * |dgram_id| is an opaque identifier which should uniquely identify - * the given DATAGRAM. It is passed to :type:`ngtcp2_ack_datagram` - * callback when a packet that contains DATAGRAM frame is - * acknowledged. It is passed to :type:`ngtcp2_lost_datagram` - * callback when a packet that contains DATAGRAM frame is declared - * lost. If an application uses neither of those callbacks, it can - * sets 0 to this parameter. + * the given DATAGRAM data. It is passed to + * :member:`ngtcp2_callbacks.ack_datagram` callback when a packet that + * contains DATAGRAM frame is acknowledged. It is also passed to + * :member:`ngtcp2_callbacks.lost_datagram` callback when a packet + * that contains DATAGRAM frame is declared lost. If an application + * uses neither of those callbacks, it can sets 0 to this parameter. * - * This function might write other frames other than DATAGRAM, just - * like `ngtcp2_conn_writev_stream`. + * This function might write other frames other than DATAGRAM frame, + * just like `ngtcp2_conn_writev_stream`. * * If the function returns 0, it means that no more data cannot be * sent because of congestion control limit; or, data does not fit @@ -4593,10 +4625,11 @@ NGTCP2_EXTERN ngtcp2_ssize ngtcp2_conn_writev_stream_versioned( * When application sees :macro:`NGTCP2_ERR_WRITE_MORE`, it must not * call other ngtcp2 API functions (application can still call * `ngtcp2_conn_write_connection_close` to handle error from this - * function). Just keep calling `ngtcp2_conn_writev_datagram`, - * `ngtcp2_conn_writev_stream` or `ngtcp2_conn_write_pkt` until it - * returns a positive number (which indicates a complete packet is - * ready). + * function. It can also call `ngtcp2_conn_shutdown_stream_read`, + * `ngtcp2_conn_shutdown_stream_write`, and + * `ngtcp2_conn_shutdown_stream`). Just keep calling this function + * (or `ngtcp2_conn_writev_stream`) until it returns a positive number + * (which indicates a complete packet is ready). * * This function returns the number of bytes written in |dest| if it * succeeds, or one of the following negative error codes: @@ -4617,12 +4650,9 @@ NGTCP2_EXTERN ngtcp2_ssize ngtcp2_conn_writev_stream_versioned( * The provisional DATAGRAM frame size exceeds the maximum * DATAGRAM frame size that a remote endpoint can receive. * - * In general, if the error code which satisfies - * `ngtcp2_err_is_fatal(err) ` != 0 is returned, - * the application should just close the connection by calling - * `ngtcp2_conn_write_connection_close` or just delete the QUIC - * connection using `ngtcp2_conn_del`. It is undefined to call the - * other library functions. + * If any other negative error is returned, call + * `ngtcp2_conn_write_connection_close` to get terminal packet, and + * sending it makes QUIC connection enter the closing state. */ NGTCP2_EXTERN ngtcp2_ssize ngtcp2_conn_writev_datagram_versioned( ngtcp2_conn *conn, ngtcp2_path *path, int pkt_info_version, @@ -4633,30 +4663,37 @@ NGTCP2_EXTERN ngtcp2_ssize ngtcp2_conn_writev_datagram_versioned( /** * @function * - * `ngtcp2_conn_is_in_closing_period` returns nonzero if |conn| is in - * the closing period. + * `ngtcp2_conn_in_closing_period` returns nonzero if |conn| is in the + * closing period. */ -NGTCP2_EXTERN int ngtcp2_conn_is_in_closing_period(ngtcp2_conn *conn); +NGTCP2_EXTERN int ngtcp2_conn_in_closing_period(ngtcp2_conn *conn); /** * @function * - * `ngtcp2_conn_is_in_draining_period` returns nonzero if |conn| is in + * `ngtcp2_conn_in_draining_period` returns nonzero if |conn| is in * the draining period. */ -NGTCP2_EXTERN int ngtcp2_conn_is_in_draining_period(ngtcp2_conn *conn); +NGTCP2_EXTERN int ngtcp2_conn_in_draining_period(ngtcp2_conn *conn); /** * @function * - * `ngtcp2_conn_extend_max_stream_offset` extends stream's max stream - * data value by |datalen|. + * `ngtcp2_conn_extend_max_stream_offset` extends the maximum stream + * data that a remote endpoint can send by |datalen|. |stream_id| + * specifies the stream ID. This function only extends stream-level + * flow control window. + * + * This function returns 0 if a stream denoted by |stream_id| is not + * found. * * This function returns 0 if it succeeds, or one of the following * negative error codes: * * :macro:`NGTCP2_ERR_NOMEM` * Out of memory. + * :macro:`NGTCP2_ERR_INVALID_ARGUMENT` + * |stream_id| refers to a local unidirectional stream. */ NGTCP2_EXTERN int ngtcp2_conn_extend_max_stream_offset(ngtcp2_conn *conn, int64_t stream_id, @@ -4666,7 +4703,8 @@ NGTCP2_EXTERN int ngtcp2_conn_extend_max_stream_offset(ngtcp2_conn *conn, * @function * * `ngtcp2_conn_extend_max_offset` extends max data offset by - * |datalen|. + * |datalen|. This function only extends connection-level flow + * control window. */ NGTCP2_EXTERN void ngtcp2_conn_extend_max_offset(ngtcp2_conn *conn, uint64_t datalen); @@ -4675,12 +4713,13 @@ NGTCP2_EXTERN void ngtcp2_conn_extend_max_offset(ngtcp2_conn *conn, * @function * * `ngtcp2_conn_extend_max_streams_bidi` extends the number of maximum - * local bidirectional streams that a remote endpoint can open by |n|. + * remote bidirectional streams that a remote endpoint can open by + * |n|. * * The library does not increase maximum stream limit automatically. * The exception is when a stream is closed without - * :type:`ngtcp2_stream_open` callback being called. In this case, - * stream limit is increased automatically. + * :member:`ngtcp2_callbacks.stream_open` callback being called. In + * this case, stream limit is increased automatically. */ NGTCP2_EXTERN void ngtcp2_conn_extend_max_streams_bidi(ngtcp2_conn *conn, size_t n); @@ -4689,13 +4728,13 @@ NGTCP2_EXTERN void ngtcp2_conn_extend_max_streams_bidi(ngtcp2_conn *conn, * @function * * `ngtcp2_conn_extend_max_streams_uni` extends the number of maximum - * local unidirectional streams that a remote endpoint can open by + * remote unidirectional streams that a remote endpoint can open by * |n|. * * The library does not increase maximum stream limit automatically. * The exception is when a stream is closed without - * :type:`ngtcp2_stream_open` callback being called. In this case, - * stream limit is increased automatically. + * :member:`ngtcp2_callbacks.stream_open` callback being called. In + * this case, stream limit is increased automatically. */ NGTCP2_EXTERN void ngtcp2_conn_extend_max_streams_uni(ngtcp2_conn *conn, size_t n); @@ -4703,9 +4742,10 @@ NGTCP2_EXTERN void ngtcp2_conn_extend_max_streams_uni(ngtcp2_conn *conn, /** * @function * - * `ngtcp2_conn_get_dcid` returns the non-NULL pointer to destination - * connection ID. If no destination connection ID is present, the - * return value is not ``NULL``, and its datalen field is 0. + * `ngtcp2_conn_get_dcid` returns the non-NULL pointer to the current + * Destination Connection ID. If no Destination Connection ID is + * present, the return value is not ``NULL``, and its :member:`datalen + * ` field is 0. */ NGTCP2_EXTERN const ngtcp2_cid *ngtcp2_conn_get_dcid(ngtcp2_conn *conn); @@ -4714,7 +4754,9 @@ NGTCP2_EXTERN const ngtcp2_cid *ngtcp2_conn_get_dcid(ngtcp2_conn *conn); * * `ngtcp2_conn_get_client_initial_dcid` returns the non-NULL pointer * to the Destination Connection ID that client sent in its Initial - * packet. + * packet. If the Destination Connection ID is not present, the + * return value is not ``NULL``, and its :member:`datalen + * ` field is 0. */ NGTCP2_EXTERN const ngtcp2_cid * ngtcp2_conn_get_client_initial_dcid(ngtcp2_conn *conn); @@ -4722,31 +4764,17 @@ ngtcp2_conn_get_client_initial_dcid(ngtcp2_conn *conn); /** * @function * - * `ngtcp2_conn_get_num_scid` returns the number of source connection - * IDs which the local endpoint has provided to the peer and have not - * retired. - */ -NGTCP2_EXTERN size_t ngtcp2_conn_get_num_scid(ngtcp2_conn *conn); - -/** - * @function - * - * `ngtcp2_conn_get_scid` writes the all source connection IDs which - * the local endpoint has provided to the peer and have not retired in - * |dest|. The buffer pointed by |dest| must have - * ``sizeof(ngtcp2_cid) * n`` bytes available, where n is the return - * value of `ngtcp2_conn_get_num_scid()`. + * `ngtcp2_conn_get_scid` writes the all Source Connection IDs which a + * local endpoint has provided to a remote endpoint, and are not + * retired in |dest|. If |dest| is NULL, this function does not write + * anything, and returns the number of Source Connection IDs that + * would otherwise be written to the provided buffer. The buffer + * pointed by |dest| must have sizeof(:type:`ngtcp2_cid`) * n bytes + * available, where n is the return value of `ngtcp2_conn_get_scid` + * with |dest| == NULL. */ NGTCP2_EXTERN size_t ngtcp2_conn_get_scid(ngtcp2_conn *conn, ngtcp2_cid *dest); -/** - * @function - * - * `ngtcp2_conn_get_num_active_dcid` returns the number of the active - * destination connection ID. - */ -NGTCP2_EXTERN size_t ngtcp2_conn_get_num_active_dcid(ngtcp2_conn *conn); - /** * @struct * @@ -4763,8 +4791,8 @@ typedef struct ngtcp2_cid_token { */ ngtcp2_cid cid; /** - * :member:`ps` is the path which is associated to this Connection - * ID. + * :member:`ps` is the path which this Connection ID is associated + * with. */ ngtcp2_path_storage ps; /** @@ -4782,10 +4810,15 @@ typedef struct ngtcp2_cid_token { /** * @function * - * `ngtcp2_conn_get_active_dcid` writes the all active destination - * connection IDs and tokens to |dest|. The buffer pointed by |dest| - * must have ``sizeof(ngtcp2_cid_token) * n`` bytes available, where n - * is the return value of `ngtcp2_conn_get_num_active_dcid()`. + * `ngtcp2_conn_get_active_dcid` writes the all active Destination + * Connection IDs and their tokens to |dest|. Before handshake + * completes, this function returns 0. If |dest| is NULL, this + * function does not write anything, and returns the number of + * Destination Connection IDs that would otherwise be written to the + * provided buffer. The buffer pointed by |dest| must have + * sizeof(:type:`ngtcp2_cid_token`) * n bytes available, where n is + * the return value of `ngtcp2_conn_get_active_dcid` with |dest| == + * NULL. */ NGTCP2_EXTERN size_t ngtcp2_conn_get_active_dcid(ngtcp2_conn *conn, ngtcp2_cid_token *dest); @@ -4801,7 +4834,8 @@ NGTCP2_EXTERN uint32_t ngtcp2_conn_get_client_chosen_version(ngtcp2_conn *conn); /** * @function * - * `ngtcp2_conn_get_negotiated_version` returns the negotiated version. + * `ngtcp2_conn_get_negotiated_version` returns the negotiated + * version. * * Until the version is negotiated, this function returns 0. */ @@ -4810,44 +4844,59 @@ NGTCP2_EXTERN uint32_t ngtcp2_conn_get_negotiated_version(ngtcp2_conn *conn); /** * @function * - * `ngtcp2_conn_early_data_rejected` tells |conn| that early data was - * rejected by a server. |conn| discards the following connection - * states: + * `ngtcp2_conn_tls_early_data_rejected` tells |conn| that early data + * was rejected by a server during TLS handshake, or client decided + * not to attempt early data for some reason. |conn| discards the + * following connection states: * - * - Any opended streams. + * - Any opened streams. * - Stream identifier allocations. * - Max data extended by `ngtcp2_conn_extend_max_offset`. * - Max bidi streams extended by `ngtcp2_conn_extend_max_streams_bidi`. * - Max uni streams extended by `ngtcp2_conn_extend_max_streams_uni`. * * Application which wishes to retransmit early data, it has to open - * streams and send stream data again. + * streams, and send stream data again. + * + * This function returns 0 if it succeeds, or one of the following + * negative error codes: + * + * :macro:`NGTCP2_ERR_CALLBACK_FAILURE` + * User callback failed + */ +NGTCP2_EXTERN int ngtcp2_conn_tls_early_data_rejected(ngtcp2_conn *conn); + +/** + * @function + * + * `ngtcp2_conn_get_tls_early_data_rejected` returns nonzero if + * `ngtcp2_conn_tls_early_data_rejected` has been called. */ -NGTCP2_EXTERN void ngtcp2_conn_early_data_rejected(ngtcp2_conn *conn); +NGTCP2_EXTERN int ngtcp2_conn_get_tls_early_data_rejected(ngtcp2_conn *conn); /** * @function * - * `ngtcp2_conn_get_conn_stat` assigns connection statistics data to - * |*cstat|. + * `ngtcp2_conn_get_conn_info` assigns connection statistics data to + * |*cinfo|. */ -NGTCP2_EXTERN void ngtcp2_conn_get_conn_stat_versioned(ngtcp2_conn *conn, - int conn_stat_version, - ngtcp2_conn_stat *cstat); +NGTCP2_EXTERN void ngtcp2_conn_get_conn_info_versioned(ngtcp2_conn *conn, + int conn_info_version, + ngtcp2_conn_info *cinfo); /** * @function * - * `ngtcp2_conn_submit_crypto_data` submits crypto stream data |data| - * of length |datalen| to the library for transmission. The - * encryption level is given in |crypto_level|. + * `ngtcp2_conn_submit_crypto_data` submits crypto data |data| of + * length |datalen| to the library for transmission. + * |encryption_level| specifies the encryption level of data. * * The library makes a copy of the buffer pointed by |data| of length * |datalen|. Application can discard |data|. */ NGTCP2_EXTERN int ngtcp2_conn_submit_crypto_data(ngtcp2_conn *conn, - ngtcp2_crypto_level crypto_level, + ngtcp2_encryption_level encryption_level, const uint8_t *data, const size_t datalen); /** @@ -4874,7 +4923,8 @@ NGTCP2_EXTERN int ngtcp2_conn_submit_new_token(ngtcp2_conn *conn, * @function * * `ngtcp2_conn_set_local_addr` sets local endpoint address |addr| to - * the current path of |conn|. + * the current path of |conn|. This function is provided for testing + * purpose only. */ NGTCP2_EXTERN void ngtcp2_conn_set_local_addr(ngtcp2_conn *conn, const ngtcp2_addr *addr); @@ -4898,34 +4948,34 @@ NGTCP2_EXTERN const ngtcp2_path *ngtcp2_conn_get_path(ngtcp2_conn *conn); /** * @function * - * `ngtcp2_conn_get_max_udp_payload_size` returns the maximum UDP + * `ngtcp2_conn_get_max_tx_udp_payload_size` returns the maximum UDP * payload size that this local endpoint would send. This is the - * value of :member:`ngtcp2_settings.max_udp_payload_size` that is + * value of :member:`ngtcp2_settings.max_tx_udp_payload_size` that is * passed to `ngtcp2_conn_client_new` or `ngtcp2_conn_server_new`. */ -NGTCP2_EXTERN size_t ngtcp2_conn_get_max_udp_payload_size(ngtcp2_conn *conn); +NGTCP2_EXTERN size_t ngtcp2_conn_get_max_tx_udp_payload_size(ngtcp2_conn *conn); /** * @function * - * `ngtcp2_conn_get_path_max_udp_payload_size` returns the maximum UDP - * payload size for the current path. If - * :member:`ngtcp2_settings.no_udp_payload_size_shaping` is set to + * `ngtcp2_conn_get_path_max_tx_udp_payload_size` returns the maximum + * UDP payload size for the current path. If + * :member:`ngtcp2_settings.no_tx_udp_payload_size_shaping` is set to * nonzero, this function is equivalent to - * `ngtcp2_conn_get_max_udp_payload_size`. Otherwise, it returns the - * maximum UDP payload size that is probed for the current path. + * `ngtcp2_conn_get_max_tx_udp_payload_size`. Otherwise, it returns + * the maximum UDP payload size that is probed for the current path. */ NGTCP2_EXTERN size_t -ngtcp2_conn_get_path_max_udp_payload_size(ngtcp2_conn *conn); +ngtcp2_conn_get_path_max_tx_udp_payload_size(ngtcp2_conn *conn); /** * @function * * `ngtcp2_conn_initiate_immediate_migration` starts connection - * migration to the given |path|. - * Only client can initiate migration. This function does - * immediate migration; it does not probe peer reachability from a new - * local address. + * migration to the given |path|. Only client can initiate migration. + * This function does immediate migration; while the path validation + * is nonetheless performed, this function does not wait for it to + * succeed. * * This function returns 0 if it succeeds, or one of the following * negative error codes: @@ -4936,7 +4986,8 @@ ngtcp2_conn_get_path_max_udp_payload_size(ngtcp2_conn *conn); * :macro:`NGTCP2_ERR_CONN_ID_BLOCKED` * No unused connection ID is available. * :macro:`NGTCP2_ERR_INVALID_ARGUMENT` - * |local_addr| equals the current local address. + * :member:`local ` field of |path| equals the + * current local address. * :macro:`NGTCP2_ERR_NOMEM` * Out of memory */ @@ -4949,7 +5000,7 @@ NGTCP2_EXTERN int ngtcp2_conn_initiate_immediate_migration( * `ngtcp2_conn_initiate_migration` starts connection migration to the * given |path|. Only client can initiate migration. Unlike * `ngtcp2_conn_initiate_immediate_migration`, this function starts a - * path validation with a new path and migrate to the new path after + * path validation with a new path, and migrate to the new path after * successful path validation. * * This function returns 0 if it succeeds, or one of the following @@ -4961,7 +5012,8 @@ NGTCP2_EXTERN int ngtcp2_conn_initiate_immediate_migration( * :macro:`NGTCP2_ERR_CONN_ID_BLOCKED` * No unused connection ID is available. * :macro:`NGTCP2_ERR_INVALID_ARGUMENT` - * |local_addr| equals the current local address. + * :member:`local ` field of |path| equals the + * current local address. * :macro:`NGTCP2_ERR_NOMEM` * Out of memory */ @@ -4969,19 +5021,12 @@ NGTCP2_EXTERN int ngtcp2_conn_initiate_migration(ngtcp2_conn *conn, const ngtcp2_path *path, ngtcp2_tstamp ts); -/** - * @function - * - * `ngtcp2_conn_get_max_local_streams_uni` returns the cumulative - * number of streams which local endpoint can open. - */ -NGTCP2_EXTERN uint64_t ngtcp2_conn_get_max_local_streams_uni(ngtcp2_conn *conn); - /** * @function * * `ngtcp2_conn_get_max_data_left` returns the number of bytes that - * this local endpoint can send in this connection. + * this local endpoint can send in this connection without violating + * connection-level flow control. */ NGTCP2_EXTERN uint64_t ngtcp2_conn_get_max_data_left(ngtcp2_conn *conn); @@ -4990,7 +5035,8 @@ NGTCP2_EXTERN uint64_t ngtcp2_conn_get_max_data_left(ngtcp2_conn *conn); * * `ngtcp2_conn_get_max_stream_data_left` returns the number of bytes * that this local endpoint can send to a stream identified by - * |stream_id|. If no such stream is found, this function returns 0. + * |stream_id| without violating stream-level flow control. If no + * such stream is found, this function returns 0. */ NGTCP2_EXTERN uint64_t ngtcp2_conn_get_max_stream_data_left(ngtcp2_conn *conn, int64_t stream_id); @@ -5046,7 +5092,7 @@ ngtcp2_conn_get_initial_crypto_ctx(ngtcp2_conn *conn); /** * @function * - * `ngtcp2_conn_set_crypto_ctx` sets |ctx| for Handshake/1RTT packet + * `ngtcp2_conn_set_crypto_ctx` sets |ctx| for Handshake/1-RTT packet * encryption. The passed data will be passed to * :type:`ngtcp2_encrypt`, :type:`ngtcp2_decrypt` and * :type:`ngtcp2_hp_mask` callbacks. @@ -5057,114 +5103,113 @@ NGTCP2_EXTERN void ngtcp2_conn_set_crypto_ctx(ngtcp2_conn *conn, /** * @function * - * `ngtcp2_conn_get_tls_native_handle` returns TLS native handle set by - * `ngtcp2_conn_set_tls_native_handle()`. + * `ngtcp2_conn_get_crypto_ctx` returns :type:`ngtcp2_crypto_ctx` + * object for Handshake/1-RTT packet encryption. */ -NGTCP2_EXTERN void *ngtcp2_conn_get_tls_native_handle(ngtcp2_conn *conn); +NGTCP2_EXTERN const ngtcp2_crypto_ctx * +ngtcp2_conn_get_crypto_ctx(ngtcp2_conn *conn); /** * @function * - * `ngtcp2_conn_set_tls_native_handle` sets TLS native handle - * |tls_native_handle| to |conn|. Internally, it is used as an opaque - * pointer. + * `ngtcp2_conn_set_0rtt_crypto_ctx` sets |ctx| for 0-RTT packet + * encryption. The passed data will be passed to + * :type:`ngtcp2_encrypt`, :type:`ngtcp2_decrypt` and + * :type:`ngtcp2_hp_mask` callbacks. */ -NGTCP2_EXTERN void ngtcp2_conn_set_tls_native_handle(ngtcp2_conn *conn, - void *tls_native_handle); +NGTCP2_EXTERN void +ngtcp2_conn_set_0rtt_crypto_ctx(ngtcp2_conn *conn, + const ngtcp2_crypto_ctx *ctx); /** * @function * - * `ngtcp2_conn_set_retry_aead` sets |aead| and |aead_ctx| for Retry - * integrity tag verification. |aead| must be AEAD_AES_128_GCM. - * |aead_ctx| must be initialized with :macro:`NGTCP2_RETRY_KEY` as - * encryption key. This function must be called if |conn| is - * initialized as client. Server does not verify the tag and has no - * need to call this function. - * - * If this function succeeds, |conn| takes ownership of |aead_ctx|. - * :type:`ngtcp2_delete_crypto_aead_ctx` will be called to delete this - * object when it is no longer used. If this function fails, the - * caller is responsible to delete it. + * `ngtcp2_conn_get_0rtt_crypto_ctx` returns :type:`ngtcp2_crypto_ctx` + * object for 0-RTT packet encryption. */ -NGTCP2_EXTERN void -ngtcp2_conn_set_retry_aead(ngtcp2_conn *conn, const ngtcp2_crypto_aead *aead, - const ngtcp2_crypto_aead_ctx *aead_ctx); +NGTCP2_EXTERN const ngtcp2_crypto_ctx * +ngtcp2_conn_get_0rtt_crypto_ctx(ngtcp2_conn *conn); /** * @function * - * `ngtcp2_conn_get_crypto_ctx` returns :type:`ngtcp2_crypto_ctx` - * object for Handshake/1RTT packet encryption. + * `ngtcp2_conn_get_tls_native_handle` returns TLS native handle set + * by `ngtcp2_conn_set_tls_native_handle`. */ -NGTCP2_EXTERN const ngtcp2_crypto_ctx * -ngtcp2_conn_get_crypto_ctx(ngtcp2_conn *conn); +NGTCP2_EXTERN void *ngtcp2_conn_get_tls_native_handle(ngtcp2_conn *conn); /** * @function * - * `ngtcp2_conn_set_early_crypto_ctx` sets |ctx| for 0RTT packet - * encryption. The passed data will be passed to - * :type:`ngtcp2_encrypt`, :type:`ngtcp2_decrypt` and - * :type:`ngtcp2_hp_mask` callbacks. + * `ngtcp2_conn_set_tls_native_handle` sets TLS native handle + * |tls_native_handle| to |conn|. Internally, it is used as an opaque + * pointer. */ -NGTCP2_EXTERN void -ngtcp2_conn_set_early_crypto_ctx(ngtcp2_conn *conn, - const ngtcp2_crypto_ctx *ctx); +NGTCP2_EXTERN void ngtcp2_conn_set_tls_native_handle(ngtcp2_conn *conn, + void *tls_native_handle); /** * @function * - * `ngtcp2_conn_get_early_crypto_ctx` returns - * :type:`ngtcp2_crypto_ctx` object for 0RTT packet encryption. + * `ngtcp2_conn_set_retry_aead` sets |aead| and |aead_ctx| for Retry + * integrity tag verification. |aead| must be AEAD_AES_128_GCM. + * |aead_ctx| must be initialized with :macro:`NGTCP2_RETRY_KEY` as + * encryption key. This function must be called if |conn| is + * initialized as client. Server does not verify the tag, and has no + * need to call this function. + * + * |conn| takes ownership of |aead_ctx|. + * :member:`ngtcp2_callbacks.delete_crypto_aead_ctx` will be called to + * delete this object when it is no longer used. */ -NGTCP2_EXTERN const ngtcp2_crypto_ctx * -ngtcp2_conn_get_early_crypto_ctx(ngtcp2_conn *conn); +NGTCP2_EXTERN void +ngtcp2_conn_set_retry_aead(ngtcp2_conn *conn, const ngtcp2_crypto_aead *aead, + const ngtcp2_crypto_aead_ctx *aead_ctx); /** * @enum * - * :type:`ngtcp2_connection_close_error_code_type` defines connection - * error code type. + * :type:`ngtcp2_ccerr_type` defines connection error type. */ -typedef enum ngtcp2_connection_close_error_code_type { +typedef enum ngtcp2_ccerr_type { /** - * :enum:`NGTCP2_CONNECTION_CLOSE_ERROR_CODE_TYPE_TRANSPORT` - * indicates the error code is QUIC transport error code. + * :enum:`NGTCP2_CCERR_TYPE_TRANSPORT` indicates the QUIC transport + * error, and the error code is QUIC transport error code. */ - NGTCP2_CONNECTION_CLOSE_ERROR_CODE_TYPE_TRANSPORT, + NGTCP2_CCERR_TYPE_TRANSPORT, /** - * :enum:`NGTCP2_CONNECTION_CLOSE_ERROR_CODE_TYPE_APPLICATION` - * indicates the error code is application error code. + * :enum:`NGTCP2_CCERR_TYPE_APPLICATION` indicates an application + * error, and the error code is application error code. */ - NGTCP2_CONNECTION_CLOSE_ERROR_CODE_TYPE_APPLICATION, + NGTCP2_CCERR_TYPE_APPLICATION, /** - * :enum:`NGTCP2_CONNECTION_CLOSE_ERROR_CODE_TYPE_TRANSPORT_VERSION_NEGOTIATION` - * is a special case of QUIC transport error, and it indicates that - * client receives Version Negotiation packet. + * :enum:`NGTCP2_CCERR_TYPE_VERSION_NEGOTIATION` is a special case + * of QUIC transport error, and it indicates that client receives + * Version Negotiation packet. */ - NGTCP2_CONNECTION_CLOSE_ERROR_CODE_TYPE_TRANSPORT_VERSION_NEGOTIATION, + NGTCP2_CCERR_TYPE_VERSION_NEGOTIATION, /** - * :enum:`NGTCP2_CONNECTION_CLOSE_ERROR_CODE_TYPE_TRANSPORT_IDLE_CLOSE` - * is a special case of QUIC transport error, and it indicates that - * connection is closed because of idle timeout. + * :enum:`NGTCP2_CCERR_TYPE_IDLE_CLOSE` is a special case of QUIC + * transport error, and it indicates that connection is closed + * because of idle timeout. */ - NGTCP2_CONNECTION_CLOSE_ERROR_CODE_TYPE_TRANSPORT_IDLE_CLOSE -} ngtcp2_connection_close_error_code_type; + NGTCP2_CCERR_TYPE_IDLE_CLOSE +} ngtcp2_ccerr_type; /** * @struct * - * :type:`ngtcp2_connection_close_error` contains connection - * error code, its type, and the optional reason phrase. + * :type:`ngtcp2_ccerr` contains connection error code, its type, a + * frame type that caused this error, and the optional reason phrase. */ -typedef struct ngtcp2_connection_close_error { +typedef struct ngtcp2_ccerr { /** - * :member:`type` is the type of :member:`error_code`. + * :member:`type` is the type of this error. */ - ngtcp2_connection_close_error_code_type type; + ngtcp2_ccerr_type type; /** * :member:`error_code` is the error code for connection closure. + * Its interpretation depends on :member:`type`. */ uint64_t error_code; /** @@ -5179,109 +5224,106 @@ typedef struct ngtcp2_connection_close_error { * received from a remote endpoint, it is truncated to at most 1024 * bytes. */ - uint8_t *reason; + const uint8_t *reason; /** * :member:`reasonlen` is the length of data pointed by * :member:`reason`. */ size_t reasonlen; -} ngtcp2_connection_close_error; +} ngtcp2_ccerr; /** * @function * - * `ngtcp2_connection_close_error_default` initializes |ccerr| with - * the default values. It sets the following fields: + * `ngtcp2_ccerr_default` initializes |ccerr| with the default values. + * It sets the following fields: * - * - :member:`type ` = - * :enum:`ngtcp2_connection_close_error_code_type.NGTCP2_CONNECTION_CLOSE_ERROR_CODE_TYPE_TRANSPORT` - * - :member:`error_code ` = + * - :member:`type ` = + * :enum:`ngtcp2_ccerr_type.NGTCP2_CCERR_TYPE_TRANSPORT` + * - :member:`error_code ` = * :macro:`NGTCP2_NO_ERROR`. - * - :member:`frame_type ` = - * 0 - * - :member:`reason ` = NULL - * - :member:`reasonlen ` = 0 + * - :member:`frame_type ` = 0 + * - :member:`reason ` = NULL + * - :member:`reasonlen ` = 0 */ -NGTCP2_EXTERN void -ngtcp2_connection_close_error_default(ngtcp2_connection_close_error *ccerr); +NGTCP2_EXTERN void ngtcp2_ccerr_default(ngtcp2_ccerr *ccerr); /** * @function * - * `ngtcp2_connection_close_error_set_transport_error` sets - * :member:`ccerr->type ` to - * :enum:`ngtcp2_connection_close_error_code_type.NGTCP2_CONNECTION_CLOSE_ERROR_CODE_TYPE_TRANSPORT`, - * and :member:`ccerr->error_code - * ` to |error_code|. - * |reason| is the reason phrase of length |reasonlen|. This function - * does not make a copy of the reason phrase. + * `ngtcp2_ccerr_set_transport_error` sets :member:`ccerr->type + * ` to + * :enum:`ngtcp2_ccerr_type.NGTCP2_CCERR_TYPE_TRANSPORT`, and + * :member:`ccerr->error_code ` to + * |error_code|. |reason| is the reason phrase of length |reasonlen|. + * This function does not make a copy of the reason phrase. */ -NGTCP2_EXTERN void ngtcp2_connection_close_error_set_transport_error( - ngtcp2_connection_close_error *ccerr, uint64_t error_code, - const uint8_t *reason, size_t reasonlen); +NGTCP2_EXTERN void ngtcp2_ccerr_set_transport_error(ngtcp2_ccerr *ccerr, + uint64_t error_code, + const uint8_t *reason, + size_t reasonlen); /** * @function * - * `ngtcp2_connection_close_error_set_transport_error_liberr` sets - * type and error_code based on |liberr|. + * `ngtcp2_ccerr_set_liberr` sets type and error_code based on + * |liberr|. + * + * |reason| is the reason phrase of length |reasonlen|. This function + * does not make a copy of the reason phrase. * * If |liberr| is :macro:`NGTCP2_ERR_RECV_VERSION_NEGOTIATION`, - * :member:`ccerr->type ` is set - * to - * :enum:`ngtcp2_connection_close_error_code_type.NGTCP2_CONNECTION_CLOSE_ERROR_CODE_TYPE_TRANSPORT_VERSION_NEGOTIATION`, - * and :member:`ccerr->error_code - * ` to - * :macro:`NGTCP2_NO_ERROR`. If |liberr| is - * :macro:`NGTCP2_ERR_IDLE_CLOSE`, :member:`ccerr->type - * ` is set to - * :enum:`ngtcp2_connection_close_error_code_type.NGTCP2_CONNECTION_CLOSE_ERROR_CODE_TYPE_TRANSPORT_IDLE_CLOSE`, - * and :member:`ccerr->error_code - * ` to - * :macro:`NGTCP2_NO_ERROR`. Otherwise, :member:`ccerr->type - * ` is set to - * :enum:`ngtcp2_connection_close_error_code_type.NGTCP2_CONNECTION_CLOSE_ERROR_CODE_TYPE_TRANSPORT`, - * and :member:`ccerr->error_code - * ` is set to an error code - * inferred by |liberr| (see - * `ngtcp2_err_infer_quic_transport_error_code`). |reason| is the - * reason phrase of length |reasonlen|. This function does not make a - * copy of the reason phrase. + * :member:`ccerr->type ` is set to + * :enum:`ngtcp2_ccerr_type.NGTCP2_CCERR_TYPE_VERSION_NEGOTIATION`, + * and :member:`ccerr->error_code ` to + * :macro:`NGTCP2_NO_ERROR`. + * + * If |liberr| is :macro:`NGTCP2_ERR_IDLE_CLOSE`, :member:`ccerr->type + * ` is set to + * :enum:`ngtcp2_ccerr_type.NGTCP2_CCERR_TYPE_IDLE_CLOSE`, and + * :member:`ccerr->error_code ` to + * :macro:`NGTCP2_NO_ERROR`. + * + * Otherwise, :member:`ccerr->type ` is set to + * :enum:`ngtcp2_ccerr_type.NGTCP2_CCERR_TYPE_TRANSPORT`, and + * :member:`ccerr->error_code ` is set to an + * error code inferred by |liberr| (see + * `ngtcp2_err_infer_quic_transport_error_code`). */ -NGTCP2_EXTERN void ngtcp2_connection_close_error_set_transport_error_liberr( - ngtcp2_connection_close_error *ccerr, int liberr, const uint8_t *reason, - size_t reasonlen); +NGTCP2_EXTERN void ngtcp2_ccerr_set_liberr(ngtcp2_ccerr *ccerr, int liberr, + const uint8_t *reason, + size_t reasonlen); /** * @function * - * `ngtcp2_connection_close_error_set_transport_error_tls_alert` sets - * :member:`ccerr->type ` to - * :enum:`ngtcp2_connection_close_error_code_type.NGTCP2_CONNECTION_CLOSE_ERROR_CODE_TYPE_TRANSPORT`, - * and :member:`ccerr->error_code - * ` to bitwise-OR of - * :macro:`NGTCP2_CRYPTO_ERROR` and |tls_alert|. |reason| is the + * `ngtcp2_ccerr_set_tls_alert` sets :member:`ccerr->type + * ` to + * :enum:`ngtcp2_ccerr_type.NGTCP2_CCERR_TYPE_TRANSPORT`, and + * :member:`ccerr->error_code ` to bitwise-OR + * of :macro:`NGTCP2_CRYPTO_ERROR` and |tls_alert|. |reason| is the * reason phrase of length |reasonlen|. This function does not make a * copy of the reason phrase. */ -NGTCP2_EXTERN void ngtcp2_connection_close_error_set_transport_error_tls_alert( - ngtcp2_connection_close_error *ccerr, uint8_t tls_alert, - const uint8_t *reason, size_t reasonlen); +NGTCP2_EXTERN void ngtcp2_ccerr_set_tls_alert(ngtcp2_ccerr *ccerr, + uint8_t tls_alert, + const uint8_t *reason, + size_t reasonlen); /** * @function * - * `ngtcp2_connection_close_error_set_application_error` sets - * :member:`ccerr->type ` to - * :enum:`ngtcp2_connection_close_error_code_type.NGTCP2_CONNECTION_CLOSE_ERROR_CODE_TYPE_APPLICATION`, - * and :member:`ccerr->error_code - * ` to |error_code|. - * |reason| is the reason phrase of length |reasonlen|. This function - * does not make a copy of the reason phrase. + * `ngtcp2_ccerr_set_application_error` sets :member:`ccerr->type + * ` to + * :enum:`ngtcp2_ccerr_type.NGTCP2_CCERR_TYPE_APPLICATION`, and + * :member:`ccerr->error_code ` to + * |error_code|. |reason| is the reason phrase of length |reasonlen|. + * This function does not make a copy of the reason phrase. */ -NGTCP2_EXTERN void ngtcp2_connection_close_error_set_application_error( - ngtcp2_connection_close_error *ccerr, uint64_t error_code, - const uint8_t *reason, size_t reasonlen); +NGTCP2_EXTERN void ngtcp2_ccerr_set_application_error(ngtcp2_ccerr *ccerr, + uint64_t error_code, + const uint8_t *reason, + size_t reasonlen); /** * @function @@ -5295,20 +5337,20 @@ NGTCP2_EXTERN void ngtcp2_connection_close_error_set_application_error( * * If |path| is not ``NULL``, this function stores the network path * with which the packet should be sent. Each addr field must point - * to the buffer which should be at least ``sizeof(struct - * sockaddr_storage)`` bytes long. The assignment might not be done - * if nothing is written to |dest|. + * to the buffer which should be at least + * sizeof(:type:`ngtcp2_sockaddr_union`) bytes long. The assignment + * might not be done if nothing is written to |dest|. * * If |pi| is not ``NULL``, this function stores packet metadata in it * if it succeeds. The metadata includes ECN markings. * - * If :member:`ccerr->type ` == - * :enum:`ngtcp2_connection_close_error_code_type.NGTCP2_CONNECTION_CLOSE_ERROR_CODE_TYPE_TRANSPORT`, - * this function sends CONNECTION_CLOSE (type 0x1c) frame. If - * :member:`ccerr->type ` == - * :enum:`ngtcp2_connection_close_error_code_type.NGTCP2_CONNECTION_CLOSE_ERROR_CODE_TYPE_APPLICATION`, - * it sends CONNECTION_CLOSE (type 0x1d) frame. Otherwise, it does - * not produce any data, and returns 0. + * If :member:`ccerr->type ` == + * :enum:`ngtcp2_ccerr_type.NGTCP2_CCERR_TYPE_TRANSPORT`, this + * function sends CONNECTION_CLOSE (type 0x1c) frame. If + * :member:`ccerr->type ` == + * :enum:`ngtcp2_ccerr_type.NGTCP2_CCERR_TYPE_APPLICATION`, it sends + * CONNECTION_CLOSE (type 0x1d) frame. Otherwise, it does not produce + * any data, and returns 0. * * This function must not be called from inside the callback * functions. @@ -5325,7 +5367,8 @@ NGTCP2_EXTERN void ngtcp2_connection_close_error_set_application_error( * :macro:`NGTCP2_ERR_NOBUF` * Buffer is too small * :macro:`NGTCP2_ERR_INVALID_STATE` - * The current state does not allow sending CONNECTION_CLOSE. + * The current state does not allow sending CONNECTION_CLOSE + * frame. * :macro:`NGTCP2_ERR_PKT_NUM_EXHAUSTED` * Packet number is exhausted, and cannot send any more packet. * :macro:`NGTCP2_ERR_CALLBACK_FAILURE` @@ -5334,23 +5377,22 @@ NGTCP2_EXTERN void ngtcp2_connection_close_error_set_application_error( NGTCP2_EXTERN ngtcp2_ssize ngtcp2_conn_write_connection_close_versioned( ngtcp2_conn *conn, ngtcp2_path *path, int pkt_info_version, ngtcp2_pkt_info *pi, uint8_t *dest, size_t destlen, - const ngtcp2_connection_close_error *ccerr, ngtcp2_tstamp ts); + const ngtcp2_ccerr *ccerr, ngtcp2_tstamp ts); /** * @function * - * `ngtcp2_conn_get_connection_close_error` stores the received - * connection close error in |ccerr|. + * `ngtcp2_conn_get_ccerr` returns the received connection close + * error. If no connection error is received, it returns + * :type:`ngtcp2_ccerr` that is initialized by `ngtcp2_ccerr_default`. */ -NGTCP2_EXTERN void -ngtcp2_conn_get_connection_close_error(ngtcp2_conn *conn, - ngtcp2_connection_close_error *ccerr); +NGTCP2_EXTERN const ngtcp2_ccerr *ngtcp2_conn_get_ccerr(ngtcp2_conn *conn); /** * @function * - * `ngtcp2_conn_is_local_stream` returns nonzero if |stream_id| denotes the - * stream which a local endpoint issues. + * `ngtcp2_conn_is_local_stream` returns nonzero if |stream_id| + * denotes a locally initiated stream. */ NGTCP2_EXTERN int ngtcp2_conn_is_local_stream(ngtcp2_conn *conn, int64_t stream_id); @@ -5367,7 +5409,7 @@ NGTCP2_EXTERN int ngtcp2_conn_is_server(ngtcp2_conn *conn); * @function * * `ngtcp2_conn_after_retry` returns nonzero if |conn| as a client has - * received Retry packet from server and successfully validated it. + * received Retry packet from server, and successfully validated it. */ NGTCP2_EXTERN int ngtcp2_conn_after_retry(ngtcp2_conn *conn); @@ -5391,12 +5433,11 @@ NGTCP2_EXTERN int ngtcp2_conn_set_stream_user_data(ngtcp2_conn *conn, * @function * * `ngtcp2_conn_update_pkt_tx_time` sets the time instant of the next - * packet transmission. This function is noop if packet pacing is - * disabled. If packet pacing is enabled, this function must be - * called after (multiple invocation of) `ngtcp2_conn_writev_stream`. - * If packet aggregation (e.g., packet batching, GSO) is used, call - * this function after all aggregated datagrams are sent, which - * indicates multiple invocation of `ngtcp2_conn_writev_stream`. + * packet transmission to pace packets. This function must be called + * after (multiple invocation of) `ngtcp2_conn_writev_stream`. If + * packet aggregation (e.g., packet batching, GSO) is used, call this + * function after all aggregated datagrams are sent, which indicates + * multiple invocation of `ngtcp2_conn_writev_stream`. */ NGTCP2_EXTERN void ngtcp2_conn_update_pkt_tx_time(ngtcp2_conn *conn, ngtcp2_tstamp ts); @@ -5405,8 +5446,7 @@ NGTCP2_EXTERN void ngtcp2_conn_update_pkt_tx_time(ngtcp2_conn *conn, * @function * * `ngtcp2_conn_get_send_quantum` returns the maximum number of bytes - * that can be sent in one go without packet spacing. If packet - * pacing is disabled, this function returns SIZE_MAX. + * that can be sent in one go without packet spacing. */ NGTCP2_EXTERN size_t ngtcp2_conn_get_send_quantum(ngtcp2_conn *conn); @@ -5427,8 +5467,8 @@ NGTCP2_EXTERN size_t ngtcp2_conn_get_stream_loss_count(ngtcp2_conn *conn, * * `ngtcp2_strerror` returns the text representation of |liberr|. * |liberr| must be one of ngtcp2 library error codes (which is - * defined as NGTCP2_ERR_* macro, such as - * :macro:`NGTCP2_ERR_DECRYPT`). + * defined as :macro:`NGTCP2_ERR_* ` + * macros). */ NGTCP2_EXTERN const char *ngtcp2_strerror(int liberr); @@ -5437,8 +5477,8 @@ NGTCP2_EXTERN const char *ngtcp2_strerror(int liberr); * * `ngtcp2_err_is_fatal` returns nonzero if |liberr| is a fatal error. * |liberr| must be one of ngtcp2 library error codes (which is - * defined as NGTCP2_ERR_* macro, such as - * :macro:`NGTCP2_ERR_DECRYPT`). + * defined as :macro:`NGTCP2_ERR_* ` + * macros). */ NGTCP2_EXTERN int ngtcp2_err_is_fatal(int liberr); @@ -5448,7 +5488,7 @@ NGTCP2_EXTERN int ngtcp2_err_is_fatal(int liberr); * `ngtcp2_err_infer_quic_transport_error_code` returns a QUIC * transport error code which corresponds to |liberr|. |liberr| must * be one of ngtcp2 library error codes (which is defined as - * NGTCP2_ERR_* macro, such as :macro:`NGTCP2_ERR_DECRYPT`). + * :macro:`NGTCP2_ERR_* ` macros). */ NGTCP2_EXTERN uint64_t ngtcp2_err_infer_quic_transport_error_code(int liberr); @@ -5501,7 +5541,7 @@ NGTCP2_EXTERN void ngtcp2_path_storage_zero(ngtcp2_path_storage *ps); * @function * * `ngtcp2_settings_default` initializes |settings| with the default - * values. First this function fills |settings| with 0 and set the + * values. First this function fills |settings| with 0, and set the * default value to the following fields: * * * :type:`cc_algo ` = @@ -5509,10 +5549,10 @@ NGTCP2_EXTERN void ngtcp2_path_storage_zero(ngtcp2_path_storage *ps); * * :type:`initial_rtt ` = * :macro:`NGTCP2_DEFAULT_INITIAL_RTT` * * :type:`ack_thresh ` = 2 - * * :type:`max_udp_payload_size - * ` = 1452 + * * :type:`max_tx_udp_payload_size + * ` = 1452 * * :type:`handshake_timeout ` = - * :macro:`NGTCP2_DEFAULT_HANDSHAKE_TIMEOUT`. + * ``UINT64_MAX`` */ NGTCP2_EXTERN void ngtcp2_settings_default_versioned(int settings_version, ngtcp2_settings *settings); @@ -5521,7 +5561,7 @@ NGTCP2_EXTERN void ngtcp2_settings_default_versioned(int settings_version, * @function * * `ngtcp2_transport_params_default` initializes |params| with the - * default values. First this function fills |params| with 0 and set + * default values. First this function fills |params| with 0, and set * the default value to the following fields: * * * :type:`max_udp_payload_size @@ -5564,7 +5604,7 @@ NGTCP2_EXTERN const ngtcp2_mem *ngtcp2_mem_default(void); /** * @struct * - * :type:`ngtcp2_info` is what `ngtcp2_version()` returns. It holds + * :type:`ngtcp2_info` is what `ngtcp2_version` returns. It holds * information about the particular ngtcp2 version. */ typedef struct ngtcp2_info { @@ -5576,12 +5616,12 @@ typedef struct ngtcp2_info { int age; /** * :member:`version_num` is the :macro:`NGTCP2_VERSION_NUM` number - * (since age ==1) + * (since :member:`age` ==1) */ int version_num; /** * :member:`version_str` points to the :macro:`NGTCP2_VERSION` - * string (since age ==1) + * string (since :member:`age` ==1) */ const char *version_str; /* -------- the above fields all exist when age == 1 */ @@ -5590,10 +5630,10 @@ typedef struct ngtcp2_info { /** * @function * - * `ngtcp2_version` returns a pointer to a ngtcp2_info struct with - * version information about the run-time library in use. The + * `ngtcp2_version` returns a pointer to a :type:`ngtcp2_info` struct + * with version information about the run-time library in use. The * |least_version| argument can be set to a 24 bit numerical value for - * the least accepted version number and if the condition is not met, + * the least accepted version number, and if the condition is not met, * this function will return a ``NULL``. Pass in 0 to skip the * version checking. */ @@ -5628,12 +5668,12 @@ NGTCP2_EXTERN int ngtcp2_path_eq(const ngtcp2_path *a, const ngtcp2_path *b); /** * @function * - * `ngtcp2_is_supported_version` returns nonzero if the library supports - * QUIC version |version|. + * `ngtcp2_is_supported_version` returns nonzero if the library + * supports QUIC version |version|. */ NGTCP2_EXTERN int ngtcp2_is_supported_version(uint32_t version); -/* +/** * @function * * `ngtcp2_is_reserved_version` returns nonzero if |version| is a @@ -5649,9 +5689,9 @@ NGTCP2_EXTERN int ngtcp2_is_reserved_version(uint32_t version); * |preferred_versions| of |preferred_versionslen| elements specifies * the preference of versions, which is sorted in the order of * preference. All versions included in |preferred_versions| must be - * supported by the library, that is, passing a version to - * `ngtcp2_is_supported_version` must return nonzero. This function - * is intended to be used by client when it receives Version + * supported by the library, that is, passing any version in the array + * to `ngtcp2_is_supported_version` must return nonzero. This + * function is intended to be used by client when it receives Version * Negotiation packet. If no version is selected, this function * returns 0. */ @@ -5703,6 +5743,17 @@ NGTCP2_EXTERN uint32_t ngtcp2_select_version(const uint32_t *preferred_versions, (CONN), (PATH), NGTCP2_PKT_INFO_VERSION, (PI), (DEST), (DESTLEN), \ (PDATALEN), (FLAGS), (STREAM_ID), (DATAV), (DATAVCNT), (TS)) +/* + * `ngtcp2_conn_write_datagram` is a wrapper around + * `ngtcp2_conn_write_datagram_versioned` to set the correct struct + * version. + */ +#define ngtcp2_conn_write_datagram(CONN, PATH, PI, DEST, DESTLEN, PACCEPTED, \ + FLAGS, DGRAM_ID, DATA, DATALEN, TS) \ + ngtcp2_conn_write_datagram_versioned( \ + (CONN), (PATH), NGTCP2_PKT_INFO_VERSION, (PI), (DEST), (DESTLEN), \ + (PACCEPTED), (FLAGS), (DGRAM_ID), (DATA), (DATALEN), (TS)) + /* * `ngtcp2_conn_writev_datagram` is a wrapper around * `ngtcp2_conn_writev_datagram_versioned` to set the correct struct @@ -5726,22 +5777,22 @@ NGTCP2_EXTERN uint32_t ngtcp2_select_version(const uint32_t *preferred_versions, (CCERR), (TS)) /* - * `ngtcp2_encode_transport_params` is a wrapper around - * `ngtcp2_encode_transport_params_versioned` to set the correct + * `ngtcp2_transport_params_encode` is a wrapper around + * `ngtcp2_transport_params_encode_versioned` to set the correct * struct version. */ -#define ngtcp2_encode_transport_params(DEST, DESTLEN, EXTTYPE, PARAMS) \ - ngtcp2_encode_transport_params_versioned( \ - (DEST), (DESTLEN), (EXTTYPE), NGTCP2_TRANSPORT_PARAMS_VERSION, (PARAMS)) +#define ngtcp2_transport_params_encode(DEST, DESTLEN, PARAMS) \ + ngtcp2_transport_params_encode_versioned( \ + (DEST), (DESTLEN), NGTCP2_TRANSPORT_PARAMS_VERSION, (PARAMS)) /* - * `ngtcp2_decode_transport_params` is a wrapper around - * `ngtcp2_decode_transport_params_versioned` to set the correct + * `ngtcp2_transport_params_decode` is a wrapper around + * `ngtcp2_transport_params_decode_versioned` to set the correct * struct version. */ -#define ngtcp2_decode_transport_params(PARAMS, EXTTYPE, DATA, DATALEN) \ - ngtcp2_decode_transport_params_versioned( \ - NGTCP2_TRANSPORT_PARAMS_VERSION, (PARAMS), (EXTTYPE), (DATA), (DATALEN)) +#define ngtcp2_transport_params_decode(PARAMS, DATA, DATALEN) \ + ngtcp2_transport_params_decode_versioned(NGTCP2_TRANSPORT_PARAMS_VERSION, \ + (PARAMS), (DATA), (DATALEN)) /* * `ngtcp2_conn_client_new` is a wrapper around @@ -5767,15 +5818,6 @@ NGTCP2_EXTERN uint32_t ngtcp2_select_version(const uint32_t *preferred_versions, (CALLBACKS), NGTCP2_SETTINGS_VERSION, (SETTINGS), \ NGTCP2_TRANSPORT_PARAMS_VERSION, (PARAMS), (MEM), (USER_DATA)) -/* - * `ngtcp2_conn_set_early_remote_transport_params` is a wrapper around - * `ngtcp2_conn_set_early_remote_transport_params_versioned` to set - * the correct struct version. - */ -#define ngtcp2_conn_set_early_remote_transport_params(CONN, PARAMS) \ - ngtcp2_conn_set_early_remote_transport_params_versioned( \ - (CONN), NGTCP2_TRANSPORT_PARAMS_VERSION, (PARAMS)) - /* * `ngtcp2_conn_set_local_transport_params` is a wrapper around * `ngtcp2_conn_set_local_transport_params_versioned` to set the @@ -5795,12 +5837,12 @@ NGTCP2_EXTERN uint32_t ngtcp2_select_version(const uint32_t *preferred_versions, (PARAMS)) /* - * `ngtcp2_conn_get_conn_stat` is a wrapper around - * `ngtcp2_conn_get_conn_stat_versioned` to set the correct struct + * `ngtcp2_conn_get_conn_info` is a wrapper around + * `ngtcp2_conn_get_conn_info_versioned` to set the correct struct * version. */ -#define ngtcp2_conn_get_conn_stat(CONN, CSTAT) \ - ngtcp2_conn_get_conn_stat_versioned((CONN), NGTCP2_CONN_STAT_VERSION, (CSTAT)) +#define ngtcp2_conn_get_conn_info(CONN, CINFO) \ + ngtcp2_conn_get_conn_info_versioned((CONN), NGTCP2_CONN_INFO_VERSION, (CINFO)) /* * `ngtcp2_settings_default` is a wrapper around diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_acktr.c b/deps/ngtcp2/ngtcp2/lib/ngtcp2_acktr.c index 02c45de90d112a..d4778d66accf31 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_acktr.c +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_acktr.c @@ -27,6 +27,9 @@ #include #include "ngtcp2_macro.h" +#include "ngtcp2_tstamp.h" + +ngtcp2_objalloc_def(acktr_entry, ngtcp2_acktr_entry, oplent); static void acktr_entry_init(ngtcp2_acktr_entry *ent, int64_t pkt_num, ngtcp2_tstamp tstamp) { @@ -66,7 +69,7 @@ int ngtcp2_acktr_init(ngtcp2_acktr *acktr, ngtcp2_log *log, rv = ngtcp2_ringbuf_init(&acktr->acks, 32, sizeof(ngtcp2_acktr_ack_entry), mem); if (rv != 0) { - return rv; + goto fail_acks_init; } ngtcp2_ksl_init(&acktr->ents, greater, sizeof(int64_t), mem); @@ -78,6 +81,10 @@ int ngtcp2_acktr_init(ngtcp2_acktr *acktr, ngtcp2_log *log, acktr->rx_npkt = 0; return 0; + +fail_acks_init: + ngtcp2_objalloc_free(&acktr->objalloc); + return rv; } void ngtcp2_acktr_free(ngtcp2_acktr *acktr) { @@ -286,16 +293,16 @@ void ngtcp2_acktr_recv_ack(ngtcp2_acktr *acktr, const ngtcp2_ack *fr) { return; } - min_ack = largest_ack - (int64_t)fr->first_ack_blklen; + min_ack = largest_ack - (int64_t)fr->first_ack_range; if (min_ack <= ent->pkt_num && ent->pkt_num <= largest_ack) { acktr_on_ack(acktr, rb, j); return; } - for (i = 0; i < fr->num_blks && j < nacks; ++i) { - largest_ack = min_ack - (int64_t)fr->blks[i].gap - 2; - min_ack = largest_ack - (int64_t)fr->blks[i].blklen; + for (i = 0; i < fr->rangecnt && j < nacks; ++i) { + largest_ack = min_ack - (int64_t)fr->ranges[i].gap - 2; + min_ack = largest_ack - (int64_t)fr->ranges[i].len; for (;;) { if (ent->pkt_num > largest_ack) { @@ -326,8 +333,7 @@ void ngtcp2_acktr_commit_ack(ngtcp2_acktr *acktr) { int ngtcp2_acktr_require_active_ack(ngtcp2_acktr *acktr, ngtcp2_duration max_ack_delay, ngtcp2_tstamp ts) { - return acktr->first_unacked_ts != UINT64_MAX && - acktr->first_unacked_ts + max_ack_delay <= ts; + return ngtcp2_tstamp_elapsed(acktr->first_unacked_ts, max_ack_delay, ts); } void ngtcp2_acktr_immediate_ack(ngtcp2_acktr *acktr) { diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_acktr.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_acktr.h index 1b00d64fe62cb6..809fb692adc3c8 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_acktr.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_acktr.h @@ -65,7 +65,7 @@ typedef struct ngtcp2_acktr_entry { }; } ngtcp2_acktr_entry; -ngtcp2_objalloc_def(acktr_entry, ngtcp2_acktr_entry, oplent); +ngtcp2_objalloc_decl(acktr_entry, ngtcp2_acktr_entry, oplent); /* * ngtcp2_acktr_entry_objalloc_new allocates memory for ent, and @@ -124,7 +124,8 @@ typedef struct ngtcp2_acktr { /* first_unacked_ts is timestamp when ngtcp2_acktr_entry is added first time after the last outgoing ACK frame. */ ngtcp2_tstamp first_unacked_ts; - /* rx_npkt is the number of packets received without sending ACK. */ + /* rx_npkt is the number of ACK eliciting packets received without + sending ACK. */ size_t rx_npkt; } ngtcp2_acktr; diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_addr.c b/deps/ngtcp2/ngtcp2/lib/ngtcp2_addr.c index daab5dd7ce664b..f389abe76d71c8 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_addr.c +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_addr.c @@ -27,6 +27,8 @@ #include #include +#include "ngtcp2_unreachable.h" + ngtcp2_addr *ngtcp2_addr_init(ngtcp2_addr *dest, const ngtcp2_sockaddr *addr, ngtcp2_socklen addrlen) { dest->addrlen = addrlen; @@ -66,8 +68,7 @@ static int sockaddr_eq(const ngtcp2_sockaddr *a, const ngtcp2_sockaddr *b) { memcmp(&ai->sin6_addr, &bi->sin6_addr, sizeof(ai->sin6_addr)) == 0; } default: - assert(0); - abort(); + ngtcp2_unreachable(); } } @@ -109,8 +110,7 @@ uint32_t ngtcp2_addr_compare(const ngtcp2_addr *aa, const ngtcp2_addr *bb) { return flags; } default: - assert(0); - abort(); + ngtcp2_unreachable(); } } diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_addr.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_addr.h index f1d7f7bdc70ea9..8e3a9f591d9977 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_addr.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_addr.h @@ -38,8 +38,10 @@ */ void ngtcp2_addr_copy(ngtcp2_addr *dest, const ngtcp2_addr *src); -/* - * ngtcp2_addr_eq returns nonzero if |a| equals |b|. +/** + * @function + * + * `ngtcp2_addr_eq` returns nonzero if |a| equals |b|. */ int ngtcp2_addr_eq(const ngtcp2_addr *a, const ngtcp2_addr *b); diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_bbr.c b/deps/ngtcp2/ngtcp2/lib/ngtcp2_bbr.c index 0816d69b816c52..27c4667c03924a 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_bbr.c +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_bbr.c @@ -9,7 +9,7 @@ * without limitation the rights to use, copy, modify, merge, publish, * distribute, sublicense, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to - * the following conditions + * the following conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. @@ -25,668 +25,1426 @@ #include "ngtcp2_bbr.h" #include +#include #include "ngtcp2_log.h" #include "ngtcp2_macro.h" #include "ngtcp2_mem.h" #include "ngtcp2_rcvry.h" #include "ngtcp2_rst.h" +#include "ngtcp2_conn_stat.h" -static const double pacing_gain_cycle[] = {1.25, 0.75, 1, 1, 1, 1, 1, 1}; +#define NGTCP2_BBR_MAX_BW_FILTERLEN 2 -#define NGTCP2_BBR_GAIN_CYCLELEN \ - (sizeof(pacing_gain_cycle) / sizeof(pacing_gain_cycle[0])) +#define NGTCP2_BBR_EXTRA_ACKED_FILTERLEN 10 + +#define NGTCP2_BBR_STARTUP_PACING_GAIN_H 277 + +#define NGTCP2_BBR_STARTUP_CWND_GAIN_H 200 + +#define NGTCP2_BBR_PROBE_RTT_CWND_GAIN_H 50 + +#define NGTCP2_BBR_BETA_NUMER 7 +#define NGTCP2_BBR_BETA_DENOM 10 + +#define NGTCP2_BBR_LOSS_THRESH_NUMER 2 +#define NGTCP2_BBR_LOSS_THRESH_DENOM 100 + +#define NGTCP2_BBR_HEADROOM_NUMER 15 +#define NGTCP2_BBR_HEADROOM_DENOM 100 + +#define NGTCP2_BBR_PROBE_RTT_INTERVAL (5 * NGTCP2_SECONDS) +#define NGTCP2_BBR_MIN_RTT_FILTERLEN (10 * NGTCP2_SECONDS) -#define NGTCP2_BBR_HIGH_GAIN 2.89 #define NGTCP2_BBR_PROBE_RTT_DURATION (200 * NGTCP2_MILLISECONDS) -#define NGTCP2_RTPROP_FILTERLEN (10 * NGTCP2_SECONDS) -#define NGTCP2_BBR_BTL_BW_FILTERLEN 10 -static void bbr_update_on_ack(ngtcp2_bbr_cc *cc, ngtcp2_conn_stat *cstat, +#define NGTCP2_BBR_PACING_MARGIN_PERCENT 1 + +static void bbr_on_init(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat, + ngtcp2_tstamp initial_ts); + +static void bbr_on_transmit(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat, + ngtcp2_tstamp ts); + +static void bbr_reset_congestion_signals(ngtcp2_cc_bbr *bbr); + +static void bbr_reset_lower_bounds(ngtcp2_cc_bbr *bbr); + +static void bbr_init_round_counting(ngtcp2_cc_bbr *bbr); + +static void bbr_init_full_pipe(ngtcp2_cc_bbr *bbr); + +static void bbr_init_pacing_rate(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat); + +static void bbr_set_pacing_rate_with_gain(ngtcp2_cc_bbr *bbr, + ngtcp2_conn_stat *cstat, + uint64_t pacing_gain_h); + +static void bbr_set_pacing_rate(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat); + +static void bbr_enter_startup(ngtcp2_cc_bbr *bbr); + +static void bbr_check_startup_done(ngtcp2_cc_bbr *bbr, + const ngtcp2_cc_ack *ack); + +static void bbr_update_on_ack(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat, const ngtcp2_cc_ack *ack, ngtcp2_tstamp ts); -static void bbr_update_model_and_state(ngtcp2_bbr_cc *cc, + +static void bbr_update_model_and_state(ngtcp2_cc_bbr *cc, ngtcp2_conn_stat *cstat, const ngtcp2_cc_ack *ack, ngtcp2_tstamp ts); -static void bbr_update_control_parameters(ngtcp2_bbr_cc *cc, + +static void bbr_update_control_parameters(ngtcp2_cc_bbr *cc, ngtcp2_conn_stat *cstat, const ngtcp2_cc_ack *ack); -static void bbr_on_transmit(ngtcp2_bbr_cc *cc, ngtcp2_conn_stat *cstat); -static void bbr_init_round_counting(ngtcp2_bbr_cc *cc); -static void bbr_update_round(ngtcp2_bbr_cc *cc, const ngtcp2_cc_ack *ack); -static void bbr_update_btl_bw(ngtcp2_bbr_cc *cc, ngtcp2_conn_stat *cstat, - const ngtcp2_cc_ack *ack); -static void bbr_update_rtprop(ngtcp2_bbr_cc *cc, ngtcp2_conn_stat *cstat, - ngtcp2_tstamp ts); -static void bbr_init_pacing_rate(ngtcp2_bbr_cc *cc, ngtcp2_conn_stat *cstat); -static void bbr_set_pacing_rate_with_gain(ngtcp2_bbr_cc *cc, + +static void bbr_update_on_loss(ngtcp2_cc_bbr *cc, ngtcp2_conn_stat *cstat, + const ngtcp2_cc_pkt *pkt, ngtcp2_tstamp ts); + +static void bbr_update_latest_delivery_signals(ngtcp2_cc_bbr *bbr, + ngtcp2_conn_stat *cstat); + +static void bbr_advance_latest_delivery_signals(ngtcp2_cc_bbr *bbr, + ngtcp2_conn_stat *cstat); + +static void bbr_update_congestion_signals(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat, - double pacing_gain); -static void bbr_set_pacing_rate(ngtcp2_bbr_cc *cc, ngtcp2_conn_stat *cstat); -static void bbr_set_send_quantum(ngtcp2_bbr_cc *cc, ngtcp2_conn_stat *cstat); -static void bbr_update_target_cwnd(ngtcp2_bbr_cc *cc, ngtcp2_conn_stat *cstat); -static void bbr_modulate_cwnd_for_recovery(ngtcp2_bbr_cc *cc, - ngtcp2_conn_stat *cstat, - const ngtcp2_cc_ack *ack); -static void bbr_save_cwnd(ngtcp2_bbr_cc *cc, ngtcp2_conn_stat *cstat); -static void bbr_restore_cwnd(ngtcp2_bbr_cc *cc, ngtcp2_conn_stat *cstat); -static void bbr_modulate_cwnd_for_probe_rtt(ngtcp2_bbr_cc *cc, - ngtcp2_conn_stat *cstat); -static void bbr_set_cwnd(ngtcp2_bbr_cc *cc, ngtcp2_conn_stat *cstat, - const ngtcp2_cc_ack *ack); -static void bbr_init(ngtcp2_bbr_cc *cc, ngtcp2_conn_stat *cstat, - ngtcp2_tstamp initial_ts); -static void bbr_enter_startup(ngtcp2_bbr_cc *cc); -static void bbr_init_full_pipe(ngtcp2_bbr_cc *cc); -static void bbr_check_full_pipe(ngtcp2_bbr_cc *cc); -static void bbr_enter_drain(ngtcp2_bbr_cc *cc); -static void bbr_check_drain(ngtcp2_bbr_cc *cc, ngtcp2_conn_stat *cstat, + const ngtcp2_cc_ack *ack); + +static void bbr_adapt_lower_bounds_from_congestion(ngtcp2_cc_bbr *bbr, + ngtcp2_conn_stat *cstat); + +static void bbr_init_lower_bounds(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat); + +static void bbr_loss_lower_bounds(ngtcp2_cc_bbr *bbr); + +static void bbr_bound_bw_for_model(ngtcp2_cc_bbr *bbr); + +static void bbr_update_max_bw(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat, + const ngtcp2_cc_ack *ack); + +static void bbr_update_round(ngtcp2_cc_bbr *bbr, const ngtcp2_cc_ack *ack); + +static void bbr_start_round(ngtcp2_cc_bbr *bbr); + +static int bbr_is_in_probe_bw_state(ngtcp2_cc_bbr *bbr); + +static void bbr_update_ack_aggregation(ngtcp2_cc_bbr *bbr, + ngtcp2_conn_stat *cstat, + const ngtcp2_cc_ack *ack, + ngtcp2_tstamp ts); + +static void bbr_enter_drain(ngtcp2_cc_bbr *bbr); + +static void bbr_check_drain(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat, ngtcp2_tstamp ts); -static void bbr_enter_probe_bw(ngtcp2_bbr_cc *cc, ngtcp2_tstamp ts); -static void bbr_check_cycle_phase(ngtcp2_bbr_cc *cc, ngtcp2_conn_stat *cstat, - const ngtcp2_cc_ack *ack, ngtcp2_tstamp ts); -static void bbr_advance_cycle_phase(ngtcp2_bbr_cc *cc, ngtcp2_tstamp ts); -static int bbr_is_next_cycle_phase(ngtcp2_bbr_cc *cc, ngtcp2_conn_stat *cstat, + +static void bbr_enter_probe_bw(ngtcp2_cc_bbr *bbr, ngtcp2_tstamp ts); + +static void bbr_start_probe_bw_down(ngtcp2_cc_bbr *bbr, ngtcp2_tstamp ts); + +static void bbr_start_probe_bw_cruise(ngtcp2_cc_bbr *bbr); + +static void bbr_start_probe_bw_refill(ngtcp2_cc_bbr *bbr); + +static void bbr_start_probe_bw_up(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat, + ngtcp2_tstamp ts); + +static void bbr_update_probe_bw_cycle_phase(ngtcp2_cc_bbr *bbr, + ngtcp2_conn_stat *cstat, + const ngtcp2_cc_ack *ack, + ngtcp2_tstamp ts); + +static int bbr_check_time_to_cruise(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat, + ngtcp2_tstamp ts); + +static int bbr_has_elapsed_in_phase(ngtcp2_cc_bbr *bbr, + ngtcp2_duration interval, ngtcp2_tstamp ts); + +static uint64_t bbr_inflight_with_headroom(ngtcp2_cc_bbr *bbr, + ngtcp2_conn_stat *cstat); + +static void bbr_raise_inflight_hi_slope(ngtcp2_cc_bbr *bbr, + ngtcp2_conn_stat *cstat); + +static void bbr_probe_inflight_hi_upward(ngtcp2_cc_bbr *bbr, + ngtcp2_conn_stat *cstat, + const ngtcp2_cc_ack *ack); + +static void bbr_adapt_upper_bounds(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat, const ngtcp2_cc_ack *ack, ngtcp2_tstamp ts); -static void bbr_handle_restart_from_idle(ngtcp2_bbr_cc *cc, - ngtcp2_conn_stat *cstat); -static void bbr_check_probe_rtt(ngtcp2_bbr_cc *cc, ngtcp2_conn_stat *cstat, + +static int bbr_check_time_to_probe_bw(ngtcp2_cc_bbr *bbr, + ngtcp2_conn_stat *cstat, + ngtcp2_tstamp ts); + +static void bbr_pick_probe_wait(ngtcp2_cc_bbr *bbr); + +static int bbr_is_reno_coexistence_probe_time(ngtcp2_cc_bbr *bbr, + ngtcp2_conn_stat *cstat); + +static uint64_t bbr_target_inflight(ngtcp2_cc_bbr *bbr, + ngtcp2_conn_stat *cstat); + +static int bbr_check_inflight_too_high(ngtcp2_cc_bbr *bbr, + ngtcp2_conn_stat *cstat, + ngtcp2_tstamp ts); + +static int is_inflight_too_high(const ngtcp2_rs *rs); + +static void bbr_handle_inflight_too_high(ngtcp2_cc_bbr *bbr, + ngtcp2_conn_stat *cstat, + const ngtcp2_rs *rs, ngtcp2_tstamp ts); + +static void bbr_handle_lost_packet(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat, + const ngtcp2_cc_pkt *pkt, ngtcp2_tstamp ts); + +static uint64_t bbr_inflight_hi_from_lost_packet(ngtcp2_cc_bbr *bbr, + const ngtcp2_rs *rs, + const ngtcp2_cc_pkt *pkt); + +static void bbr_update_min_rtt(ngtcp2_cc_bbr *bbr, const ngtcp2_cc_ack *ack, + ngtcp2_tstamp ts); + +static void bbr_check_probe_rtt(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat, ngtcp2_tstamp ts); -static void bbr_enter_probe_rtt(ngtcp2_bbr_cc *cc); -static void bbr_handle_probe_rtt(ngtcp2_bbr_cc *cc, ngtcp2_conn_stat *cstat, + +static void bbr_enter_probe_rtt(ngtcp2_cc_bbr *bbr); + +static void bbr_handle_probe_rtt(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat, ngtcp2_tstamp ts); -static void bbr_exit_probe_rtt(ngtcp2_bbr_cc *cc, ngtcp2_tstamp ts); - -void ngtcp2_bbr_cc_init(ngtcp2_bbr_cc *cc, ngtcp2_conn_stat *cstat, - ngtcp2_rst *rst, ngtcp2_tstamp initial_ts, - ngtcp2_rand rand, const ngtcp2_rand_ctx *rand_ctx, - ngtcp2_log *log) { - cc->ccb.log = log; - cc->rst = rst; - cc->rand = rand; - cc->rand_ctx = *rand_ctx; - cc->initial_cwnd = cstat->cwnd; - bbr_init(cc, cstat, initial_ts); -} - -void ngtcp2_bbr_cc_free(ngtcp2_bbr_cc *cc) { (void)cc; } - -int ngtcp2_cc_bbr_cc_init(ngtcp2_cc *cc, ngtcp2_log *log, - ngtcp2_conn_stat *cstat, ngtcp2_rst *rst, - ngtcp2_tstamp initial_ts, ngtcp2_rand rand, - const ngtcp2_rand_ctx *rand_ctx, - const ngtcp2_mem *mem) { - ngtcp2_bbr_cc *bbr_cc; - - bbr_cc = ngtcp2_mem_calloc(mem, 1, sizeof(ngtcp2_bbr_cc)); - if (bbr_cc == NULL) { - return NGTCP2_ERR_NOMEM; - } - - ngtcp2_bbr_cc_init(bbr_cc, cstat, rst, initial_ts, rand, rand_ctx, log); - - cc->ccb = &bbr_cc->ccb; - cc->on_pkt_acked = ngtcp2_cc_bbr_cc_on_pkt_acked; - cc->congestion_event = ngtcp2_cc_bbr_cc_congestion_event; - cc->on_spurious_congestion = ngtcp2_cc_bbr_cc_on_spurious_congestion; - cc->on_persistent_congestion = ngtcp2_cc_bbr_cc_on_persistent_congestion; - cc->on_ack_recv = ngtcp2_cc_bbr_cc_on_ack_recv; - cc->on_pkt_sent = ngtcp2_cc_bbr_cc_on_pkt_sent; - cc->new_rtt_sample = ngtcp2_cc_bbr_cc_new_rtt_sample; - cc->reset = ngtcp2_cc_bbr_cc_reset; - cc->event = ngtcp2_cc_bbr_cc_event; - return 0; -} +static void bbr_check_probe_rtt_done(ngtcp2_cc_bbr *bbr, + ngtcp2_conn_stat *cstat, ngtcp2_tstamp ts); + +static void bbr_mark_connection_app_limited(ngtcp2_cc_bbr *bbr, + ngtcp2_conn_stat *cstat); + +static void bbr_exit_probe_rtt(ngtcp2_cc_bbr *bbr, ngtcp2_tstamp ts); + +static void bbr_handle_restart_from_idle(ngtcp2_cc_bbr *bbr, + ngtcp2_conn_stat *cstat, + ngtcp2_tstamp ts); + +static uint64_t bbr_bdp_multiple(ngtcp2_cc_bbr *bbr, uint64_t bw, + uint64_t gain_h); + +static uint64_t bbr_quantization_budget(ngtcp2_cc_bbr *bbr, + ngtcp2_conn_stat *cstat, + uint64_t inflight); + +static uint64_t bbr_inflight(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat, + uint64_t bw, uint64_t gain_h); + +static void bbr_update_max_inflight(ngtcp2_cc_bbr *bbr, + ngtcp2_conn_stat *cstat); + +static void bbr_update_offload_budget(ngtcp2_cc_bbr *bbr, + ngtcp2_conn_stat *cstat); + +static uint64_t min_pipe_cwnd(size_t max_udp_payload_size); + +static void bbr_advance_max_bw_filter(ngtcp2_cc_bbr *bbr); + +static void bbr_modulate_cwnd_for_recovery(ngtcp2_cc_bbr *bbr, + ngtcp2_conn_stat *cstat, + const ngtcp2_cc_ack *ack); + +static void bbr_save_cwnd(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat); + +static void bbr_restore_cwnd(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat); + +static uint64_t bbr_probe_rtt_cwnd(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat); + +static void bbr_bound_cwnd_for_probe_rtt(ngtcp2_cc_bbr *bbr, + ngtcp2_conn_stat *cstat); + +static void bbr_set_cwnd(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat, + const ngtcp2_cc_ack *ack); + +static void bbr_bound_cwnd_for_model(ngtcp2_cc_bbr *bbr, + ngtcp2_conn_stat *cstat); + +static void bbr_set_send_quantum(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat); + +static int in_congestion_recovery(const ngtcp2_conn_stat *cstat, + ngtcp2_tstamp sent_time); + +static void bbr_handle_recovery(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat, + const ngtcp2_cc_ack *ack, ngtcp2_tstamp ts); + +static void bbr_on_init(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat, + ngtcp2_tstamp initial_ts) { + ngtcp2_window_filter_init(&bbr->max_bw_filter, NGTCP2_BBR_MAX_BW_FILTERLEN); + ngtcp2_window_filter_init(&bbr->extra_acked_filter, + NGTCP2_BBR_EXTRA_ACKED_FILTERLEN); + + bbr->min_rtt = UINT64_MAX; + bbr->min_rtt_stamp = initial_ts; + /* remark: Use UINT64_MAX instead of 0 for consistency. */ + bbr->probe_rtt_done_stamp = UINT64_MAX; + bbr->probe_rtt_round_done = 0; + bbr->prior_cwnd = 0; + bbr->idle_restart = 0; + bbr->extra_acked_interval_start = initial_ts; + bbr->extra_acked_delivered = 0; + + bbr_reset_congestion_signals(bbr); + bbr_reset_lower_bounds(bbr); + bbr_init_round_counting(bbr); + bbr_init_full_pipe(bbr); + bbr_init_pacing_rate(bbr, cstat); + bbr_enter_startup(bbr); + + cstat->send_quantum = cstat->max_tx_udp_payload_size * 10; + + /* Missing in documentation */ + bbr->loss_round_start = 0; + bbr->loss_round_delivered = UINT64_MAX; + + bbr->rounds_since_bw_probe = 0; + + bbr->max_bw = 0; + bbr->bw = 0; + + bbr->cycle_count = 0; + + bbr->extra_acked = 0; + + bbr->bytes_lost_in_round = 0; + bbr->loss_events_in_round = 0; -void ngtcp2_cc_bbr_cc_free(ngtcp2_cc *cc, const ngtcp2_mem *mem) { - ngtcp2_bbr_cc *bbr_cc = ngtcp2_struct_of(cc->ccb, ngtcp2_bbr_cc, ccb); + bbr->offload_budget = 0; - ngtcp2_bbr_cc_free(bbr_cc); - ngtcp2_mem_free(mem, bbr_cc); + bbr->probe_up_cnt = UINT64_MAX; + bbr->cycle_stamp = UINT64_MAX; + bbr->ack_phase = 0; + bbr->bw_probe_wait = 0; + bbr->bw_probe_samples = 0; + bbr->bw_probe_up_rounds = 0; + bbr->bw_probe_up_acks = 0; + + bbr->inflight_hi = UINT64_MAX; + bbr->bw_hi = UINT64_MAX; + + bbr->probe_rtt_expired = 0; + bbr->probe_rtt_min_delay = UINT64_MAX; + bbr->probe_rtt_min_stamp = initial_ts; + + bbr->in_loss_recovery = 0; + bbr->packet_conservation = 0; + + bbr->max_inflight = 0; + + bbr->congestion_recovery_start_ts = UINT64_MAX; + bbr->congestion_recovery_next_round_delivered = 0; + + bbr->prior_inflight_lo = 0; + bbr->prior_inflight_hi = 0; + bbr->prior_bw_lo = 0; } -void ngtcp2_cc_bbr_cc_on_pkt_acked(ngtcp2_cc *ccx, ngtcp2_conn_stat *cstat, - const ngtcp2_cc_pkt *pkt, ngtcp2_tstamp ts) { - (void)ccx; - (void)cstat; - (void)pkt; - (void)ts; +static void bbr_reset_congestion_signals(ngtcp2_cc_bbr *bbr) { + bbr->loss_in_round = 0; + bbr->bw_latest = 0; + bbr->inflight_latest = 0; } -static int in_congestion_recovery(const ngtcp2_conn_stat *cstat, - ngtcp2_tstamp sent_time) { - return cstat->congestion_recovery_start_ts != UINT64_MAX && - sent_time <= cstat->congestion_recovery_start_ts; +static void bbr_reset_lower_bounds(ngtcp2_cc_bbr *bbr) { + bbr->bw_lo = UINT64_MAX; + bbr->inflight_lo = UINT64_MAX; } -void ngtcp2_cc_bbr_cc_congestion_event(ngtcp2_cc *ccx, ngtcp2_conn_stat *cstat, - ngtcp2_tstamp sent_ts, - ngtcp2_tstamp ts) { - ngtcp2_bbr_cc *cc = ngtcp2_struct_of(ccx->ccb, ngtcp2_bbr_cc, ccb); +static void bbr_init_round_counting(ngtcp2_cc_bbr *bbr) { + bbr->next_round_delivered = 0; + bbr->round_start = 0; + bbr->round_count = 0; +} - if (cc->in_loss_recovery || cc->congestion_recovery_start_ts != UINT64_MAX || - in_congestion_recovery(cstat, sent_ts)) { +static void bbr_init_full_pipe(ngtcp2_cc_bbr *bbr) { + bbr->filled_pipe = 0; + bbr->full_bw = 0; + bbr->full_bw_count = 0; +} + +static void bbr_check_startup_full_bandwidth(ngtcp2_cc_bbr *bbr) { + if (bbr->filled_pipe || !bbr->round_start || bbr->rst->rs.is_app_limited) { return; } - cc->congestion_recovery_start_ts = ts; -} + if (bbr->max_bw * 100 >= bbr->full_bw * 125) { + bbr->full_bw = bbr->max_bw; + bbr->full_bw_count = 0; + } -void ngtcp2_cc_bbr_cc_on_spurious_congestion(ngtcp2_cc *ccx, - ngtcp2_conn_stat *cstat, - ngtcp2_tstamp ts) { - ngtcp2_bbr_cc *cc = ngtcp2_struct_of(ccx->ccb, ngtcp2_bbr_cc, ccb); - (void)ts; + ++bbr->full_bw_count; - cc->congestion_recovery_start_ts = UINT64_MAX; - cstat->congestion_recovery_start_ts = UINT64_MAX; + if (bbr->full_bw_count >= 3) { + bbr->filled_pipe = 1; - if (cc->in_loss_recovery) { - cc->in_loss_recovery = 0; - cc->packet_conservation = 0; - bbr_restore_cwnd(cc, cstat); + ngtcp2_log_info(bbr->cc.log, NGTCP2_LOG_EVENT_CCA, + "bbr filled pipe, full_bw=%" PRIu64, bbr->full_bw); } } -void ngtcp2_cc_bbr_cc_on_persistent_congestion(ngtcp2_cc *ccx, - ngtcp2_conn_stat *cstat, - ngtcp2_tstamp ts) { - ngtcp2_bbr_cc *cc = ngtcp2_struct_of(ccx->ccb, ngtcp2_bbr_cc, ccb); - (void)ts; +static void bbr_check_startup_high_loss(ngtcp2_cc_bbr *bbr, + const ngtcp2_cc_ack *ack) { + if (bbr->filled_pipe || !bbr->round_start || bbr->rst->rs.is_app_limited) { + return; + } - cstat->congestion_recovery_start_ts = UINT64_MAX; - cc->congestion_recovery_start_ts = UINT64_MAX; - cc->in_loss_recovery = 0; - cc->packet_conservation = 0; + if (bbr->loss_events_in_round <= 3) { + return; + } - bbr_save_cwnd(cc, cstat); - cstat->cwnd = 2 * cstat->max_udp_payload_size; -} + /* loss_thresh = 2% */ + if (bbr->bytes_lost_in_round * 100 <= ack->prior_bytes_in_flight * 2) { + return; + } -void ngtcp2_cc_bbr_cc_on_ack_recv(ngtcp2_cc *ccx, ngtcp2_conn_stat *cstat, - const ngtcp2_cc_ack *ack, ngtcp2_tstamp ts) { - ngtcp2_bbr_cc *bbr_cc = ngtcp2_struct_of(ccx->ccb, ngtcp2_bbr_cc, ccb); + bbr->filled_pipe = 1; +} - bbr_update_on_ack(bbr_cc, cstat, ack, ts); +static void bbr_init_pacing_rate(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat) { + cstat->pacing_interval = NGTCP2_MILLISECONDS * 100 / + NGTCP2_BBR_STARTUP_PACING_GAIN_H / bbr->initial_cwnd; } -void ngtcp2_cc_bbr_cc_on_pkt_sent(ngtcp2_cc *ccx, ngtcp2_conn_stat *cstat, - const ngtcp2_cc_pkt *pkt) { - ngtcp2_bbr_cc *bbr_cc = ngtcp2_struct_of(ccx->ccb, ngtcp2_bbr_cc, ccb); - (void)pkt; +static void bbr_set_pacing_rate_with_gain(ngtcp2_cc_bbr *bbr, + ngtcp2_conn_stat *cstat, + uint64_t pacing_gain_h) { + ngtcp2_duration interval; + + if (bbr->bw == 0) { + return; + } + + interval = NGTCP2_SECONDS * 100 * 100 / pacing_gain_h / bbr->bw / + (100 - NGTCP2_BBR_PACING_MARGIN_PERCENT); - bbr_on_transmit(bbr_cc, cstat); + if (bbr->filled_pipe || interval < cstat->pacing_interval) { + cstat->pacing_interval = interval; + } } -void ngtcp2_cc_bbr_cc_new_rtt_sample(ngtcp2_cc *ccx, ngtcp2_conn_stat *cstat, - ngtcp2_tstamp ts) { - (void)ccx; - (void)cstat; - (void)ts; +static void bbr_set_pacing_rate(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat) { + bbr_set_pacing_rate_with_gain(bbr, cstat, bbr->pacing_gain_h); } -void ngtcp2_cc_bbr_cc_reset(ngtcp2_cc *ccx, ngtcp2_conn_stat *cstat, - ngtcp2_tstamp ts) { - ngtcp2_bbr_cc *bbr_cc = ngtcp2_struct_of(ccx->ccb, ngtcp2_bbr_cc, ccb); - bbr_init(bbr_cc, cstat, ts); +static void bbr_enter_startup(ngtcp2_cc_bbr *bbr) { + ngtcp2_log_info(bbr->cc.log, NGTCP2_LOG_EVENT_CCA, "bbr enter Startup"); + + bbr->state = NGTCP2_BBR_STATE_STARTUP; + bbr->pacing_gain_h = NGTCP2_BBR_STARTUP_PACING_GAIN_H; + bbr->cwnd_gain_h = NGTCP2_BBR_STARTUP_CWND_GAIN_H; } -void ngtcp2_cc_bbr_cc_event(ngtcp2_cc *ccx, ngtcp2_conn_stat *cstat, - ngtcp2_cc_event_type event, ngtcp2_tstamp ts) { - (void)ccx; - (void)cstat; - (void)event; - (void)ts; +static void bbr_check_startup_done(ngtcp2_cc_bbr *bbr, + const ngtcp2_cc_ack *ack) { + bbr_check_startup_full_bandwidth(bbr); + bbr_check_startup_high_loss(bbr, ack); + + if (bbr->state == NGTCP2_BBR_STATE_STARTUP && bbr->filled_pipe) { + bbr_enter_drain(bbr); + } +} + +static void bbr_on_transmit(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat, + ngtcp2_tstamp ts) { + bbr_handle_restart_from_idle(bbr, cstat, ts); } -static void bbr_update_on_ack(ngtcp2_bbr_cc *cc, ngtcp2_conn_stat *cstat, +static void bbr_update_on_ack(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat, const ngtcp2_cc_ack *ack, ngtcp2_tstamp ts) { - bbr_update_model_and_state(cc, cstat, ack, ts); - bbr_update_control_parameters(cc, cstat, ack); + bbr_update_model_and_state(bbr, cstat, ack, ts); + bbr_update_control_parameters(bbr, cstat, ack); } -static void bbr_update_model_and_state(ngtcp2_bbr_cc *cc, +static void bbr_update_model_and_state(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat, const ngtcp2_cc_ack *ack, ngtcp2_tstamp ts) { - bbr_update_btl_bw(cc, cstat, ack); - bbr_check_cycle_phase(cc, cstat, ack, ts); - bbr_check_full_pipe(cc); - bbr_check_drain(cc, cstat, ts); - bbr_update_rtprop(cc, cstat, ts); - bbr_check_probe_rtt(cc, cstat, ts); + bbr_update_latest_delivery_signals(bbr, cstat); + bbr_update_congestion_signals(bbr, cstat, ack); + bbr_update_ack_aggregation(bbr, cstat, ack, ts); + bbr_check_startup_done(bbr, ack); + bbr_check_drain(bbr, cstat, ts); + bbr_update_probe_bw_cycle_phase(bbr, cstat, ack, ts); + bbr_update_min_rtt(bbr, ack, ts); + bbr_check_probe_rtt(bbr, cstat, ts); + bbr_advance_latest_delivery_signals(bbr, cstat); + bbr_bound_bw_for_model(bbr); } -static void bbr_update_control_parameters(ngtcp2_bbr_cc *cc, +static void bbr_update_control_parameters(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat, const ngtcp2_cc_ack *ack) { - bbr_set_pacing_rate(cc, cstat); - bbr_set_send_quantum(cc, cstat); - bbr_set_cwnd(cc, cstat, ack); + bbr_set_pacing_rate(bbr, cstat); + bbr_set_send_quantum(bbr, cstat); + bbr_set_cwnd(bbr, cstat, ack); } -static void bbr_on_transmit(ngtcp2_bbr_cc *cc, ngtcp2_conn_stat *cstat) { - bbr_handle_restart_from_idle(cc, cstat); +static void bbr_update_on_loss(ngtcp2_cc_bbr *cc, ngtcp2_conn_stat *cstat, + const ngtcp2_cc_pkt *pkt, ngtcp2_tstamp ts) { + bbr_handle_lost_packet(cc, cstat, pkt, ts); } -static void bbr_init_round_counting(ngtcp2_bbr_cc *cc) { - cc->next_round_delivered = 0; - cc->round_start = 0; - cc->round_count = 0; -} - -static void bbr_update_round(ngtcp2_bbr_cc *cc, const ngtcp2_cc_ack *ack) { - if (ack->pkt_delivered >= cc->next_round_delivered) { - cc->next_round_delivered = cc->rst->delivered; - ++cc->round_count; - cc->round_start = 1; +static void bbr_update_latest_delivery_signals(ngtcp2_cc_bbr *bbr, + ngtcp2_conn_stat *cstat) { + bbr->loss_round_start = 0; + bbr->bw_latest = ngtcp2_max(bbr->bw_latest, cstat->delivery_rate_sec); + bbr->inflight_latest = + ngtcp2_max(bbr->inflight_latest, bbr->rst->rs.delivered); - return; + if (bbr->rst->rs.prior_delivered >= bbr->loss_round_delivered) { + bbr->loss_round_delivered = bbr->rst->delivered; + bbr->loss_round_start = 1; } +} - cc->round_start = 0; +static void bbr_advance_latest_delivery_signals(ngtcp2_cc_bbr *bbr, + ngtcp2_conn_stat *cstat) { + if (bbr->loss_round_start) { + bbr->bw_latest = cstat->delivery_rate_sec; + bbr->inflight_latest = bbr->rst->rs.delivered; + } } -static void bbr_handle_recovery(ngtcp2_bbr_cc *cc, ngtcp2_conn_stat *cstat, - const ngtcp2_cc_ack *ack) { - if (cc->in_loss_recovery) { - if (ack->pkt_delivered >= cc->congestion_recovery_next_round_delivered) { - cc->packet_conservation = 0; - } +static void bbr_update_congestion_signals(ngtcp2_cc_bbr *bbr, + ngtcp2_conn_stat *cstat, + const ngtcp2_cc_ack *ack) { + bbr_update_max_bw(bbr, cstat, ack); + + if (ack->bytes_lost) { + bbr->bytes_lost_in_round += ack->bytes_lost; + ++bbr->loss_events_in_round; - if (!in_congestion_recovery(cstat, ack->largest_acked_sent_ts)) { - cc->in_loss_recovery = 0; - cc->packet_conservation = 0; - bbr_restore_cwnd(cc, cstat); + if (!bbr->loss_in_round) { + bbr->loss_in_round = 1; + bbr->loss_round_delivered = bbr->rst->delivered; } + } + if (!bbr->loss_round_start) { return; } - if (cc->congestion_recovery_start_ts != UINT64_MAX) { - cc->in_loss_recovery = 1; - bbr_save_cwnd(cc, cstat); - cstat->cwnd = cstat->bytes_in_flight + - ngtcp2_max(ack->bytes_delivered, cstat->max_udp_payload_size); + bbr_adapt_lower_bounds_from_congestion(bbr, cstat); - cstat->congestion_recovery_start_ts = cc->congestion_recovery_start_ts; - cc->congestion_recovery_start_ts = UINT64_MAX; - cc->packet_conservation = 1; - cc->congestion_recovery_next_round_delivered = cc->rst->delivered; + bbr->loss_in_round = 0; +} + +static void bbr_adapt_lower_bounds_from_congestion(ngtcp2_cc_bbr *bbr, + ngtcp2_conn_stat *cstat) { + if (bbr_is_in_probe_bw_state(bbr)) { + return; + } + + if (bbr->loss_in_round) { + bbr_init_lower_bounds(bbr, cstat); + bbr_loss_lower_bounds(bbr); } } -static void bbr_update_btl_bw(ngtcp2_bbr_cc *cc, ngtcp2_conn_stat *cstat, - const ngtcp2_cc_ack *ack) { - bbr_update_round(cc, ack); - bbr_handle_recovery(cc, cstat, ack); +static void bbr_init_lower_bounds(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat) { + if (bbr->bw_lo == UINT64_MAX) { + bbr->bw_lo = bbr->max_bw; + } - if (cstat->delivery_rate_sec < cc->btl_bw && cc->rst->rs.is_app_limited) { - return; + if (bbr->inflight_lo == UINT64_MAX) { + bbr->inflight_lo = cstat->cwnd; } +} - ngtcp2_window_filter_update(&cc->btl_bw_filter, cstat->delivery_rate_sec, - cc->round_count); +static void bbr_loss_lower_bounds(ngtcp2_cc_bbr *bbr) { + bbr->bw_lo = ngtcp2_max(bbr->bw_latest, bbr->bw_lo * NGTCP2_BBR_BETA_NUMER / + NGTCP2_BBR_BETA_DENOM); + bbr->inflight_lo = ngtcp2_max(bbr->inflight_latest, + bbr->inflight_lo * NGTCP2_BBR_BETA_NUMER / + NGTCP2_BBR_BETA_DENOM); +} - cc->btl_bw = ngtcp2_window_filter_get_best(&cc->btl_bw_filter); +static void bbr_bound_bw_for_model(ngtcp2_cc_bbr *bbr) { + bbr->bw = ngtcp2_min(bbr->max_bw, bbr->bw_lo); + bbr->bw = ngtcp2_min(bbr->bw, bbr->bw_hi); } -static void bbr_update_rtprop(ngtcp2_bbr_cc *cc, ngtcp2_conn_stat *cstat, - ngtcp2_tstamp ts) { - cc->rtprop_expired = ts > cc->rtprop_stamp + NGTCP2_RTPROP_FILTERLEN; +static void bbr_update_max_bw(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat, + const ngtcp2_cc_ack *ack) { + bbr_update_round(bbr, ack); - /* Need valid RTT sample */ - if (cstat->latest_rtt && - (cstat->latest_rtt <= cc->rt_prop || cc->rtprop_expired)) { - cc->rt_prop = cstat->latest_rtt; - cc->rtprop_stamp = ts; + if (cstat->delivery_rate_sec >= bbr->max_bw || !bbr->rst->rs.is_app_limited) { + ngtcp2_window_filter_update(&bbr->max_bw_filter, cstat->delivery_rate_sec, + bbr->cycle_count); - ngtcp2_log_info(cc->ccb.log, NGTCP2_LOG_EVENT_RCV, - "bbr update RTprop=%" PRIu64, cc->rt_prop); + bbr->max_bw = ngtcp2_window_filter_get_best(&bbr->max_bw_filter); } } -static void bbr_init_pacing_rate(ngtcp2_bbr_cc *cc, ngtcp2_conn_stat *cstat) { - double nominal_bandwidth = - (double)cc->initial_cwnd / (double)NGTCP2_MILLISECONDS; +static void bbr_update_round(ngtcp2_cc_bbr *bbr, const ngtcp2_cc_ack *ack) { + if (ack->pkt_delivered >= bbr->next_round_delivered) { + bbr_start_round(bbr); - cstat->pacing_rate = cc->pacing_gain * nominal_bandwidth; -} + ++bbr->round_count; + ++bbr->rounds_since_bw_probe; + bbr->round_start = 1; -static void bbr_set_pacing_rate_with_gain(ngtcp2_bbr_cc *cc, - ngtcp2_conn_stat *cstat, - double pacing_gain) { - double rate = pacing_gain * (double)cc->btl_bw / NGTCP2_SECONDS; + bbr->bytes_lost_in_round = 0; + bbr->loss_events_in_round = 0; + + bbr->rst->is_cwnd_limited = 0; - if (cc->filled_pipe || rate > cstat->pacing_rate) { - cstat->pacing_rate = rate; + return; } + + bbr->round_start = 0; } -static void bbr_set_pacing_rate(ngtcp2_bbr_cc *cc, ngtcp2_conn_stat *cstat) { - bbr_set_pacing_rate_with_gain(cc, cstat, cc->pacing_gain); +static void bbr_start_round(ngtcp2_cc_bbr *bbr) { + bbr->next_round_delivered = bbr->rst->delivered; } -static void bbr_set_send_quantum(ngtcp2_bbr_cc *cc, ngtcp2_conn_stat *cstat) { - uint64_t send_quantum; - (void)cc; +static int bbr_is_in_probe_bw_state(ngtcp2_cc_bbr *bbr) { + switch (bbr->state) { + case NGTCP2_BBR_STATE_PROBE_BW_DOWN: + case NGTCP2_BBR_STATE_PROBE_BW_CRUISE: + case NGTCP2_BBR_STATE_PROBE_BW_REFILL: + case NGTCP2_BBR_STATE_PROBE_BW_UP: + return 1; + default: + return 0; + } +} - if (cstat->pacing_rate < 1.2 * 1024 * 1024 / 8 / NGTCP2_SECONDS) { - cstat->send_quantum = cstat->max_udp_payload_size; - } else if (cstat->pacing_rate < 24.0 * 1024 * 1024 / 8 / NGTCP2_SECONDS) { - cstat->send_quantum = cstat->max_udp_payload_size * 2; - } else { - send_quantum = - (uint64_t)(cstat->pacing_rate * (double)(cstat->min_rtt == UINT64_MAX - ? NGTCP2_MILLISECONDS - : cstat->min_rtt)); - cstat->send_quantum = (size_t)ngtcp2_min(send_quantum, 64 * 1024); +static void bbr_update_ack_aggregation(ngtcp2_cc_bbr *bbr, + ngtcp2_conn_stat *cstat, + const ngtcp2_cc_ack *ack, + ngtcp2_tstamp ts) { + ngtcp2_duration interval = ts - bbr->extra_acked_interval_start; + uint64_t expected_delivered = bbr->bw * interval / NGTCP2_SECONDS; + uint64_t extra; + + if (bbr->extra_acked_delivered <= expected_delivered) { + bbr->extra_acked_delivered = 0; + bbr->extra_acked_interval_start = ts; + expected_delivered = 0; } - cstat->send_quantum = - ngtcp2_max(cstat->send_quantum, cstat->max_udp_payload_size * 10); + bbr->extra_acked_delivered += ack->bytes_delivered; + extra = bbr->extra_acked_delivered - expected_delivered; + extra = ngtcp2_min(extra, cstat->cwnd); + + ngtcp2_window_filter_update(&bbr->extra_acked_filter, extra, + bbr->round_count); + + bbr->extra_acked = ngtcp2_window_filter_get_best(&bbr->extra_acked_filter); } -static uint64_t bbr_inflight(ngtcp2_bbr_cc *cc, ngtcp2_conn_stat *cstat, - double gain) { - uint64_t quanta = 3 * cstat->send_quantum; - double estimated_bdp; +static void bbr_enter_drain(ngtcp2_cc_bbr *bbr) { + ngtcp2_log_info(bbr->cc.log, NGTCP2_LOG_EVENT_CCA, "bbr enter Drain"); + + bbr->state = NGTCP2_BBR_STATE_DRAIN; + bbr->pacing_gain_h = 100 * 100 / NGTCP2_BBR_STARTUP_CWND_GAIN_H; + bbr->cwnd_gain_h = NGTCP2_BBR_STARTUP_CWND_GAIN_H; +} - if (cc->rt_prop == UINT64_MAX) { - /* no valid RTT samples yet */ - return cc->initial_cwnd; +static void bbr_check_drain(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat, + ngtcp2_tstamp ts) { + if (bbr->state == NGTCP2_BBR_STATE_DRAIN && + cstat->bytes_in_flight <= bbr_inflight(bbr, cstat, bbr->bw, 100)) { + bbr_enter_probe_bw(bbr, ts); } +} - estimated_bdp = (double)cc->btl_bw * (double)cc->rt_prop / NGTCP2_SECONDS; +static void bbr_enter_probe_bw(ngtcp2_cc_bbr *bbr, ngtcp2_tstamp ts) { + bbr_start_probe_bw_down(bbr, ts); +} - return (uint64_t)(gain * estimated_bdp) + quanta; +static void bbr_start_probe_bw_down(ngtcp2_cc_bbr *bbr, ngtcp2_tstamp ts) { + ngtcp2_log_info(bbr->cc.log, NGTCP2_LOG_EVENT_CCA, "bbr start ProbeBW_DOWN"); + + bbr_reset_congestion_signals(bbr); + + bbr->probe_up_cnt = UINT64_MAX; + + bbr_pick_probe_wait(bbr); + + bbr->cycle_stamp = ts; + bbr->ack_phase = NGTCP2_BBR_ACK_PHASE_ACKS_PROBE_STOPPING; + + bbr_start_round(bbr); + + bbr->state = NGTCP2_BBR_STATE_PROBE_BW_DOWN; + bbr->pacing_gain_h = 90; + bbr->cwnd_gain_h = 200; } -static void bbr_update_target_cwnd(ngtcp2_bbr_cc *cc, ngtcp2_conn_stat *cstat) { - cc->target_cwnd = bbr_inflight(cc, cstat, cc->cwnd_gain); +static void bbr_start_probe_bw_cruise(ngtcp2_cc_bbr *bbr) { + ngtcp2_log_info(bbr->cc.log, NGTCP2_LOG_EVENT_CCA, + "bbr start ProbeBW_CRUISE"); + + bbr->state = NGTCP2_BBR_STATE_PROBE_BW_CRUISE; + bbr->pacing_gain_h = 100; + bbr->cwnd_gain_h = 200; } -static void bbr_modulate_cwnd_for_recovery(ngtcp2_bbr_cc *cc, - ngtcp2_conn_stat *cstat, - const ngtcp2_cc_ack *ack) { - if (ack->bytes_lost > 0) { - if (cstat->cwnd > ack->bytes_lost) { - cstat->cwnd -= ack->bytes_lost; - cstat->cwnd = ngtcp2_max(cstat->cwnd, 2 * cstat->max_udp_payload_size); - } else { - cstat->cwnd = cstat->max_udp_payload_size; - } +static void bbr_start_probe_bw_refill(ngtcp2_cc_bbr *bbr) { + ngtcp2_log_info(bbr->cc.log, NGTCP2_LOG_EVENT_CCA, + "bbr start ProbeBW_REFILL"); + + bbr_reset_lower_bounds(bbr); + + bbr->bw_probe_up_rounds = 0; + bbr->bw_probe_up_acks = 0; + bbr->ack_phase = NGTCP2_BBR_ACK_PHASE_ACKS_REFILLING; + + bbr_start_round(bbr); + + bbr->state = NGTCP2_BBR_STATE_PROBE_BW_REFILL; + bbr->pacing_gain_h = 100; + bbr->cwnd_gain_h = 200; +} + +static void bbr_start_probe_bw_up(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat, + ngtcp2_tstamp ts) { + ngtcp2_log_info(bbr->cc.log, NGTCP2_LOG_EVENT_CCA, "bbr start ProbeBW_UP"); + + bbr->ack_phase = NGTCP2_BBR_ACK_PHASE_ACKS_PROBE_STARTING; + + bbr_start_round(bbr); + + bbr->cycle_stamp = ts; + bbr->state = NGTCP2_BBR_STATE_PROBE_BW_UP; + bbr->pacing_gain_h = 125; + bbr->cwnd_gain_h = 225; + + bbr_raise_inflight_hi_slope(bbr, cstat); +} + +static void bbr_update_probe_bw_cycle_phase(ngtcp2_cc_bbr *bbr, + ngtcp2_conn_stat *cstat, + const ngtcp2_cc_ack *ack, + ngtcp2_tstamp ts) { + if (!bbr->filled_pipe) { + return; } - if (cc->packet_conservation) { - cstat->cwnd = - ngtcp2_max(cstat->cwnd, cstat->bytes_in_flight + ack->bytes_delivered); + bbr_adapt_upper_bounds(bbr, cstat, ack, ts); + + if (!bbr_is_in_probe_bw_state(bbr)) { + return; + } + + switch (bbr->state) { + case NGTCP2_BBR_STATE_PROBE_BW_DOWN: + if (bbr_check_time_to_probe_bw(bbr, cstat, ts)) { + return; + } + + if (bbr_check_time_to_cruise(bbr, cstat, ts)) { + bbr_start_probe_bw_cruise(bbr); + } + + break; + case NGTCP2_BBR_STATE_PROBE_BW_CRUISE: + if (bbr_check_time_to_probe_bw(bbr, cstat, ts)) { + return; + } + + break; + case NGTCP2_BBR_STATE_PROBE_BW_REFILL: + if (bbr->round_start) { + bbr->bw_probe_samples = 1; + bbr_start_probe_bw_up(bbr, cstat, ts); + } + + break; + case NGTCP2_BBR_STATE_PROBE_BW_UP: + if (bbr_has_elapsed_in_phase(bbr, bbr->min_rtt, ts) && + cstat->bytes_in_flight > bbr_inflight(bbr, cstat, bbr->max_bw, 125)) { + bbr_start_probe_bw_down(bbr, ts); + } + + break; + default: + break; } } -static void bbr_save_cwnd(ngtcp2_bbr_cc *cc, ngtcp2_conn_stat *cstat) { - if (!cc->in_loss_recovery && cc->state != NGTCP2_BBR_STATE_PROBE_RTT) { - cc->prior_cwnd = cstat->cwnd; - return; +static int bbr_check_time_to_cruise(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat, + ngtcp2_tstamp ts) { + (void)ts; + + if (cstat->bytes_in_flight > bbr_inflight_with_headroom(bbr, cstat)) { + return 0; } - cc->prior_cwnd = ngtcp2_max(cc->prior_cwnd, cstat->cwnd); + if (cstat->bytes_in_flight <= bbr_inflight(bbr, cstat, bbr->max_bw, 100)) { + return 1; + } + + return 0; } -static void bbr_restore_cwnd(ngtcp2_bbr_cc *cc, ngtcp2_conn_stat *cstat) { - cstat->cwnd = ngtcp2_max(cstat->cwnd, cc->prior_cwnd); +static int bbr_has_elapsed_in_phase(ngtcp2_cc_bbr *bbr, + ngtcp2_duration interval, + ngtcp2_tstamp ts) { + return ts > bbr->cycle_stamp + interval; } -static uint64_t min_pipe_cwnd(size_t max_udp_payload_size) { - return max_udp_payload_size * 4; +static uint64_t bbr_inflight_with_headroom(ngtcp2_cc_bbr *bbr, + ngtcp2_conn_stat *cstat) { + uint64_t headroom; + uint64_t mpcwnd; + if (bbr->inflight_hi == UINT64_MAX) { + return UINT64_MAX; + } + + headroom = ngtcp2_max(cstat->max_tx_udp_payload_size, + bbr->inflight_hi * NGTCP2_BBR_HEADROOM_NUMER / + NGTCP2_BBR_HEADROOM_DENOM); + mpcwnd = min_pipe_cwnd(cstat->max_tx_udp_payload_size); + + if (bbr->inflight_hi > headroom) { + return ngtcp2_max(bbr->inflight_hi - headroom, mpcwnd); + } + + return mpcwnd; } -static void bbr_modulate_cwnd_for_probe_rtt(ngtcp2_bbr_cc *cc, - ngtcp2_conn_stat *cstat) { - if (cc->state == NGTCP2_BBR_STATE_PROBE_RTT) { - cstat->cwnd = - ngtcp2_min(cstat->cwnd, min_pipe_cwnd(cstat->max_udp_payload_size)); +static void bbr_raise_inflight_hi_slope(ngtcp2_cc_bbr *bbr, + ngtcp2_conn_stat *cstat) { + uint64_t growth_this_round = cstat->max_tx_udp_payload_size + << bbr->bw_probe_up_rounds; + + bbr->bw_probe_up_rounds = ngtcp2_min(bbr->bw_probe_up_rounds + 1, 30); + bbr->probe_up_cnt = ngtcp2_max(cstat->cwnd / growth_this_round, 1) * + cstat->max_tx_udp_payload_size; +} + +static void bbr_probe_inflight_hi_upward(ngtcp2_cc_bbr *bbr, + ngtcp2_conn_stat *cstat, + const ngtcp2_cc_ack *ack) { + uint64_t delta; + + if (!bbr->rst->is_cwnd_limited || cstat->cwnd < bbr->inflight_hi) { + return; + } + + bbr->bw_probe_up_acks += ack->bytes_delivered; + + if (bbr->bw_probe_up_acks >= bbr->probe_up_cnt) { + delta = bbr->bw_probe_up_acks / bbr->probe_up_cnt; + bbr->bw_probe_up_acks -= delta * bbr->probe_up_cnt; + bbr->inflight_hi += delta * cstat->max_tx_udp_payload_size; + } + + if (bbr->round_start) { + bbr_raise_inflight_hi_slope(bbr, cstat); } } -static void bbr_set_cwnd(ngtcp2_bbr_cc *cc, ngtcp2_conn_stat *cstat, - const ngtcp2_cc_ack *ack) { - bbr_update_target_cwnd(cc, cstat); - bbr_modulate_cwnd_for_recovery(cc, cstat, ack); - - if (!cc->packet_conservation) { - if (cc->filled_pipe) { - cstat->cwnd = - ngtcp2_min(cstat->cwnd + ack->bytes_delivered, cc->target_cwnd); - } else if (cstat->cwnd < cc->target_cwnd || - cc->rst->delivered < cc->initial_cwnd) { - cstat->cwnd += ack->bytes_delivered; +static void bbr_adapt_upper_bounds(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat, + const ngtcp2_cc_ack *ack, ngtcp2_tstamp ts) { + if (bbr->ack_phase == NGTCP2_BBR_ACK_PHASE_ACKS_PROBE_STARTING && + bbr->round_start) { + bbr->ack_phase = NGTCP2_BBR_ACK_PHASE_ACKS_PROBE_FEEDBACK; + } + + if (bbr->ack_phase == NGTCP2_BBR_ACK_PHASE_ACKS_PROBE_STOPPING && + bbr->round_start) { + if (bbr_is_in_probe_bw_state(bbr) && !bbr->rst->rs.is_app_limited) { + bbr_advance_max_bw_filter(bbr); } + } - cstat->cwnd = - ngtcp2_max(cstat->cwnd, min_pipe_cwnd(cstat->max_udp_payload_size)); + if (!bbr_check_inflight_too_high(bbr, cstat, ts)) { + /* bbr->bw_hi never be updated */ + if (bbr->inflight_hi == UINT64_MAX /* || bbr->bw_hi == UINT64_MAX */) { + return; + } + + if (bbr->rst->rs.tx_in_flight > bbr->inflight_hi) { + bbr->inflight_hi = bbr->rst->rs.tx_in_flight; + } + + if (cstat->delivery_rate_sec > bbr->bw_hi) { + bbr->bw_hi = cstat->delivery_rate_sec; + } + + if (bbr->state == NGTCP2_BBR_STATE_PROBE_BW_UP) { + bbr_probe_inflight_hi_upward(bbr, cstat, ack); + } + } +} + +static int bbr_check_time_to_probe_bw(ngtcp2_cc_bbr *bbr, + ngtcp2_conn_stat *cstat, + ngtcp2_tstamp ts) { + if (bbr_has_elapsed_in_phase(bbr, bbr->bw_probe_wait, ts) || + bbr_is_reno_coexistence_probe_time(bbr, cstat)) { + bbr_start_probe_bw_refill(bbr); + + return 1; } - bbr_modulate_cwnd_for_probe_rtt(cc, cstat); + return 0; } -static void bbr_init(ngtcp2_bbr_cc *cc, ngtcp2_conn_stat *cstat, - ngtcp2_tstamp initial_ts) { - cc->pacing_gain = NGTCP2_BBR_HIGH_GAIN; - cc->prior_cwnd = 0; - cc->target_cwnd = 0; - cc->btl_bw = 0; - cc->rt_prop = UINT64_MAX; - cc->rtprop_stamp = initial_ts; - cc->cycle_stamp = UINT64_MAX; - cc->probe_rtt_done_stamp = UINT64_MAX; - cc->cycle_index = 0; - cc->rtprop_expired = 0; - cc->idle_restart = 0; - cc->packet_conservation = 0; - cc->probe_rtt_round_done = 0; +static void bbr_pick_probe_wait(ngtcp2_cc_bbr *bbr) { + uint8_t rand; - cc->congestion_recovery_start_ts = UINT64_MAX; - cc->congestion_recovery_next_round_delivered = 0; - cc->in_loss_recovery = 0; + bbr->rand(&rand, 1, &bbr->rand_ctx); - cstat->send_quantum = cstat->max_udp_payload_size * 10; + bbr->rounds_since_bw_probe = (uint64_t)(rand * 2 / 256); - ngtcp2_window_filter_init(&cc->btl_bw_filter, NGTCP2_BBR_BTL_BW_FILTERLEN); + bbr->rand(&rand, 1, &bbr->rand_ctx); - bbr_init_round_counting(cc); - bbr_init_full_pipe(cc); - bbr_init_pacing_rate(cc, cstat); - bbr_enter_startup(cc); + bbr->bw_probe_wait = + 2 * NGTCP2_SECONDS + (ngtcp2_tstamp)(NGTCP2_SECONDS * rand / 255); } -static void bbr_enter_startup(ngtcp2_bbr_cc *cc) { - cc->state = NGTCP2_BBR_STATE_STARTUP; - cc->pacing_gain = NGTCP2_BBR_HIGH_GAIN; - cc->cwnd_gain = NGTCP2_BBR_HIGH_GAIN; +static int bbr_is_reno_coexistence_probe_time(ngtcp2_cc_bbr *bbr, + ngtcp2_conn_stat *cstat) { + uint64_t reno_rounds = + bbr_target_inflight(bbr, cstat) / cstat->max_tx_udp_payload_size; + + return bbr->rounds_since_bw_probe >= ngtcp2_min(reno_rounds, 63); } -static void bbr_init_full_pipe(ngtcp2_bbr_cc *cc) { - cc->filled_pipe = 0; - cc->full_bw = 0; - cc->full_bw_count = 0; +static uint64_t bbr_target_inflight(ngtcp2_cc_bbr *bbr, + ngtcp2_conn_stat *cstat) { + uint64_t bdp = bbr_inflight(bbr, cstat, bbr->bw, 100); + + return ngtcp2_min(bdp, cstat->cwnd); } -static void bbr_check_full_pipe(ngtcp2_bbr_cc *cc) { - if (cc->filled_pipe || !cc->round_start || cc->rst->rs.is_app_limited) { - /* no need to check for a full pipe now. */ - return; +static int bbr_check_inflight_too_high(ngtcp2_cc_bbr *bbr, + ngtcp2_conn_stat *cstat, + ngtcp2_tstamp ts) { + if (is_inflight_too_high(&bbr->rst->rs)) { + if (bbr->bw_probe_samples) { + bbr_handle_inflight_too_high(bbr, cstat, &bbr->rst->rs, ts); + } + + return 1; } - /* cc->btl_bw still growing? */ - if (cc->btl_bw * 100 >= cc->full_bw * 125) { - /* record new baseline level */ - cc->full_bw = cc->btl_bw; - cc->full_bw_count = 0; + return 0; +} + +static int is_inflight_too_high(const ngtcp2_rs *rs) { + return rs->lost * NGTCP2_BBR_LOSS_THRESH_DENOM > + rs->tx_in_flight * NGTCP2_BBR_LOSS_THRESH_NUMER; +} + +static void bbr_handle_inflight_too_high(ngtcp2_cc_bbr *bbr, + ngtcp2_conn_stat *cstat, + const ngtcp2_rs *rs, + ngtcp2_tstamp ts) { + bbr->bw_probe_samples = 0; + + if (!rs->is_app_limited) { + bbr->inflight_hi = ngtcp2_max( + rs->tx_in_flight, bbr_target_inflight(bbr, cstat) * + NGTCP2_BBR_BETA_NUMER / NGTCP2_BBR_BETA_DENOM); + } + + if (bbr->state == NGTCP2_BBR_STATE_PROBE_BW_UP) { + bbr_start_probe_bw_down(bbr, ts); + } +} + +static void bbr_handle_lost_packet(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat, + const ngtcp2_cc_pkt *pkt, ngtcp2_tstamp ts) { + ngtcp2_rs rs = {0}; + + if (!bbr->bw_probe_samples) { return; } - /* another round w/o much growth */ - ++cc->full_bw_count; - if (cc->full_bw_count >= 3) { - cc->filled_pipe = 1; - ngtcp2_log_info(cc->ccb.log, NGTCP2_LOG_EVENT_RCV, - "bbr filled pipe, btl_bw=%" PRIu64, cc->btl_bw); + + rs.tx_in_flight = pkt->tx_in_flight; + /* bbr->rst->lost is not incremented for pkt yet */ + rs.lost = bbr->rst->lost + pkt->pktlen - pkt->lost; + rs.is_app_limited = pkt->is_app_limited; + + if (is_inflight_too_high(&rs)) { + rs.tx_in_flight = bbr_inflight_hi_from_lost_packet(bbr, &rs, pkt); + + bbr_handle_inflight_too_high(bbr, cstat, &rs, ts); } } -static void bbr_enter_drain(ngtcp2_bbr_cc *cc) { - cc->state = NGTCP2_BBR_STATE_DRAIN; - /* pace slowly */ - cc->pacing_gain = 1.0 / NGTCP2_BBR_HIGH_GAIN; - /* maintain cwnd */ - cc->cwnd_gain = NGTCP2_BBR_HIGH_GAIN; +static uint64_t bbr_inflight_hi_from_lost_packet(ngtcp2_cc_bbr *bbr, + const ngtcp2_rs *rs, + const ngtcp2_cc_pkt *pkt) { + uint64_t inflight_prev, lost_prev, lost_prefix; + (void)bbr; + + assert(rs->tx_in_flight >= pkt->pktlen); + + inflight_prev = rs->tx_in_flight - pkt->pktlen; + + assert(rs->lost >= pkt->pktlen); + + lost_prev = rs->lost - pkt->pktlen; + + if (inflight_prev * NGTCP2_BBR_LOSS_THRESH_NUMER < + lost_prev * NGTCP2_BBR_LOSS_THRESH_DENOM) { + return inflight_prev; + } + + lost_prefix = (inflight_prev * NGTCP2_BBR_LOSS_THRESH_NUMER - + lost_prev * NGTCP2_BBR_LOSS_THRESH_DENOM) / + (NGTCP2_BBR_LOSS_THRESH_DENOM - NGTCP2_BBR_LOSS_THRESH_NUMER); + + return inflight_prev + lost_prefix; } -static void bbr_check_drain(ngtcp2_bbr_cc *cc, ngtcp2_conn_stat *cstat, - ngtcp2_tstamp ts) { - if (cc->state == NGTCP2_BBR_STATE_STARTUP && cc->filled_pipe) { - ngtcp2_log_info(cc->ccb.log, NGTCP2_LOG_EVENT_RCV, - "bbr exit Startup and enter Drain"); +static void bbr_update_min_rtt(ngtcp2_cc_bbr *bbr, const ngtcp2_cc_ack *ack, + ngtcp2_tstamp ts) { + int min_rtt_expired; + + bbr->probe_rtt_expired = + ts > bbr->probe_rtt_min_stamp + NGTCP2_BBR_PROBE_RTT_INTERVAL; - bbr_enter_drain(cc); + if (ack->rtt != UINT64_MAX && + (ack->rtt < bbr->probe_rtt_min_delay || bbr->probe_rtt_expired)) { + bbr->probe_rtt_min_delay = ack->rtt; + bbr->probe_rtt_min_stamp = ts; } - if (cc->state == NGTCP2_BBR_STATE_DRAIN && - cstat->bytes_in_flight <= bbr_inflight(cc, cstat, 1.0)) { - ngtcp2_log_info(cc->ccb.log, NGTCP2_LOG_EVENT_RCV, - "bbr exit Drain and enter ProbeBW"); + min_rtt_expired = ts > bbr->min_rtt_stamp + NGTCP2_BBR_MIN_RTT_FILTERLEN; - /* we estimate queue is drained */ - bbr_enter_probe_bw(cc, ts); + if (bbr->probe_rtt_min_delay < bbr->min_rtt || min_rtt_expired) { + bbr->min_rtt = bbr->probe_rtt_min_delay; + bbr->min_rtt_stamp = bbr->probe_rtt_min_stamp; + + ngtcp2_log_info(bbr->cc.log, NGTCP2_LOG_EVENT_CCA, + "bbr update min_rtt=%" PRIu64, bbr->min_rtt); } } -static void bbr_enter_probe_bw(ngtcp2_bbr_cc *cc, ngtcp2_tstamp ts) { - uint8_t rand; +static void bbr_check_probe_rtt(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat, + ngtcp2_tstamp ts) { + if (bbr->state != NGTCP2_BBR_STATE_PROBE_RTT && bbr->probe_rtt_expired && + !bbr->idle_restart) { + bbr_enter_probe_rtt(bbr); + bbr_save_cwnd(bbr, cstat); + + bbr->probe_rtt_done_stamp = UINT64_MAX; + bbr->ack_phase = NGTCP2_BBR_ACK_PHASE_ACKS_PROBE_STOPPING; - cc->state = NGTCP2_BBR_STATE_PROBE_BW; - cc->pacing_gain = 1; - cc->cwnd_gain = 2; + bbr_start_round(bbr); + } + + if (bbr->state == NGTCP2_BBR_STATE_PROBE_RTT) { + bbr_handle_probe_rtt(bbr, cstat, ts); + } - assert(cc->rand); + if (bbr->rst->rs.delivered) { + bbr->idle_restart = 0; + } +} - cc->rand(&rand, 1, &cc->rand_ctx); +static void bbr_enter_probe_rtt(ngtcp2_cc_bbr *bbr) { + ngtcp2_log_info(bbr->cc.log, NGTCP2_LOG_EVENT_CCA, "bbr enter ProbeRTT"); - cc->cycle_index = NGTCP2_BBR_GAIN_CYCLELEN - 1 - (size_t)(rand * 7 / 256); - bbr_advance_cycle_phase(cc, ts); + bbr->state = NGTCP2_BBR_STATE_PROBE_RTT; + bbr->pacing_gain_h = 100; + bbr->cwnd_gain_h = NGTCP2_BBR_PROBE_RTT_CWND_GAIN_H; } -static void bbr_check_cycle_phase(ngtcp2_bbr_cc *cc, ngtcp2_conn_stat *cstat, - const ngtcp2_cc_ack *ack, ngtcp2_tstamp ts) { - if (cc->state == NGTCP2_BBR_STATE_PROBE_BW && - bbr_is_next_cycle_phase(cc, cstat, ack, ts)) { - bbr_advance_cycle_phase(cc, ts); +static void bbr_handle_probe_rtt(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat, + ngtcp2_tstamp ts) { + bbr_mark_connection_app_limited(bbr, cstat); + + if (bbr->probe_rtt_done_stamp == UINT64_MAX && + cstat->bytes_in_flight <= bbr_probe_rtt_cwnd(bbr, cstat)) { + bbr->probe_rtt_done_stamp = ts + NGTCP2_BBR_PROBE_RTT_DURATION; + bbr->probe_rtt_round_done = 0; + + bbr_start_round(bbr); + + return; + } + + if (bbr->probe_rtt_done_stamp != UINT64_MAX) { + if (bbr->round_start) { + bbr->probe_rtt_round_done = 1; + } + + if (bbr->probe_rtt_round_done) { + bbr_check_probe_rtt_done(bbr, cstat, ts); + } } } -static void bbr_advance_cycle_phase(ngtcp2_bbr_cc *cc, ngtcp2_tstamp ts) { - cc->cycle_stamp = ts; - cc->cycle_index = (cc->cycle_index + 1) & (NGTCP2_BBR_GAIN_CYCLELEN - 1); - cc->pacing_gain = pacing_gain_cycle[cc->cycle_index]; +static void bbr_check_probe_rtt_done(ngtcp2_cc_bbr *bbr, + ngtcp2_conn_stat *cstat, + ngtcp2_tstamp ts) { + if (bbr->probe_rtt_done_stamp != UINT64_MAX && + ts > bbr->probe_rtt_done_stamp) { + bbr->probe_rtt_min_stamp = ts; + bbr_restore_cwnd(bbr, cstat); + bbr_exit_probe_rtt(bbr, ts); + } } -static int bbr_is_next_cycle_phase(ngtcp2_bbr_cc *cc, ngtcp2_conn_stat *cstat, - const ngtcp2_cc_ack *ack, ngtcp2_tstamp ts) { - int is_full_length = (ts - cc->cycle_stamp) > cc->rt_prop; +static void bbr_mark_connection_app_limited(ngtcp2_cc_bbr *bbr, + ngtcp2_conn_stat *cstat) { + uint64_t app_limited = bbr->rst->delivered + cstat->bytes_in_flight; - if (cc->pacing_gain > 1) { - return is_full_length && (ack->bytes_lost > 0 || - ack->prior_bytes_in_flight >= - bbr_inflight(cc, cstat, cc->pacing_gain)); + if (app_limited) { + bbr->rst->app_limited = app_limited; + } else { + bbr->rst->app_limited = cstat->max_tx_udp_payload_size; } +} - if (cc->pacing_gain < 1) { - return is_full_length || - ack->prior_bytes_in_flight <= bbr_inflight(cc, cstat, 1); - } +static void bbr_exit_probe_rtt(ngtcp2_cc_bbr *bbr, ngtcp2_tstamp ts) { + bbr_reset_lower_bounds(bbr); - return is_full_length; + if (bbr->filled_pipe) { + bbr_start_probe_bw_down(bbr, ts); + bbr_start_probe_bw_cruise(bbr); + } else { + bbr_enter_startup(bbr); + } } -static void bbr_handle_restart_from_idle(ngtcp2_bbr_cc *cc, - ngtcp2_conn_stat *cstat) { - if (cstat->bytes_in_flight == 0 && cc->rst->app_limited) { - ngtcp2_log_info(cc->ccb.log, NGTCP2_LOG_EVENT_RCV, "bbr restart from idle"); +static void bbr_handle_restart_from_idle(ngtcp2_cc_bbr *bbr, + ngtcp2_conn_stat *cstat, + ngtcp2_tstamp ts) { + if (cstat->bytes_in_flight == 0 && bbr->rst->app_limited) { + ngtcp2_log_info(bbr->cc.log, NGTCP2_LOG_EVENT_CCA, "bbr restart from idle"); - cc->idle_restart = 1; + bbr->idle_restart = 1; + bbr->extra_acked_interval_start = ts; - if (cc->state == NGTCP2_BBR_STATE_PROBE_BW) { - bbr_set_pacing_rate_with_gain(cc, cstat, 1); + if (bbr_is_in_probe_bw_state(bbr)) { + bbr_set_pacing_rate_with_gain(bbr, cstat, 100); + } else if (bbr->state == NGTCP2_BBR_STATE_PROBE_RTT) { + bbr_check_probe_rtt_done(bbr, cstat, ts); } } } -static void bbr_check_probe_rtt(ngtcp2_bbr_cc *cc, ngtcp2_conn_stat *cstat, - ngtcp2_tstamp ts) { - if (cc->state != NGTCP2_BBR_STATE_PROBE_RTT && cc->rtprop_expired && - !cc->idle_restart) { - ngtcp2_log_info(cc->ccb.log, NGTCP2_LOG_EVENT_RCV, "bbr enter ProbeRTT"); +static uint64_t bbr_bdp_multiple(ngtcp2_cc_bbr *bbr, uint64_t bw, + uint64_t gain_h) { + uint64_t bdp; - bbr_enter_probe_rtt(cc); - bbr_save_cwnd(cc, cstat); - cc->probe_rtt_done_stamp = UINT64_MAX; + if (bbr->min_rtt == UINT64_MAX) { + return bbr->initial_cwnd; } - if (cc->state == NGTCP2_BBR_STATE_PROBE_RTT) { - bbr_handle_probe_rtt(cc, cstat, ts); + bdp = bw * bbr->min_rtt / NGTCP2_SECONDS; + + return (uint64_t)(bdp * gain_h / 100); +} + +static uint64_t min_pipe_cwnd(size_t max_udp_payload_size) { + return max_udp_payload_size * 4; +} + +static uint64_t bbr_quantization_budget(ngtcp2_cc_bbr *bbr, + ngtcp2_conn_stat *cstat, + uint64_t inflight) { + bbr_update_offload_budget(bbr, cstat); + + inflight = ngtcp2_max(inflight, bbr->offload_budget); + inflight = + ngtcp2_max(inflight, min_pipe_cwnd(cstat->max_tx_udp_payload_size)); + + if (bbr->state == NGTCP2_BBR_STATE_PROBE_BW_UP) { + inflight += 2 * cstat->max_tx_udp_payload_size; } - cc->idle_restart = 0; + return inflight; } -static void bbr_enter_probe_rtt(ngtcp2_bbr_cc *cc) { - cc->state = NGTCP2_BBR_STATE_PROBE_RTT; - cc->pacing_gain = 1; - cc->cwnd_gain = 1; +static uint64_t bbr_inflight(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat, + uint64_t bw, uint64_t gain_h) { + uint64_t inflight = bbr_bdp_multiple(bbr, bw, gain_h); + + return bbr_quantization_budget(bbr, cstat, inflight); } -static void bbr_handle_probe_rtt(ngtcp2_bbr_cc *cc, ngtcp2_conn_stat *cstat, - ngtcp2_tstamp ts) { - uint64_t app_limited = cc->rst->delivered + cstat->bytes_in_flight; +static void bbr_update_max_inflight(ngtcp2_cc_bbr *bbr, + ngtcp2_conn_stat *cstat) { + uint64_t inflight; + + /* Not documented */ + /* bbr_update_aggregation_budget(bbr); */ + + inflight = + bbr_bdp_multiple(bbr, bbr->bw, bbr->cwnd_gain_h) + bbr->extra_acked; + bbr->max_inflight = bbr_quantization_budget(bbr, cstat, inflight); +} - /* Ignore low rate samples during NGTCP2_BBR_STATE_PROBE_RTT. */ - cc->rst->app_limited = app_limited ? app_limited : 1; +static void bbr_update_offload_budget(ngtcp2_cc_bbr *bbr, + ngtcp2_conn_stat *cstat) { + bbr->offload_budget = 3 * cstat->send_quantum; +} - if (cc->probe_rtt_done_stamp == UINT64_MAX && - cstat->bytes_in_flight <= min_pipe_cwnd(cstat->max_udp_payload_size)) { - cc->probe_rtt_done_stamp = ts + NGTCP2_BBR_PROBE_RTT_DURATION; - cc->probe_rtt_round_done = 0; - cc->next_round_delivered = cc->rst->delivered; +static void bbr_advance_max_bw_filter(ngtcp2_cc_bbr *bbr) { + ++bbr->cycle_count; +} +static void bbr_modulate_cwnd_for_recovery(ngtcp2_cc_bbr *bbr, + ngtcp2_conn_stat *cstat, + const ngtcp2_cc_ack *ack) { + if (ack->bytes_lost > 0) { + if (cstat->cwnd > ack->bytes_lost) { + cstat->cwnd -= ack->bytes_lost; + cstat->cwnd = ngtcp2_max(cstat->cwnd, 2 * cstat->max_tx_udp_payload_size); + } else { + cstat->cwnd = 2 * cstat->max_tx_udp_payload_size; + } + } + + if (bbr->packet_conservation) { + cstat->cwnd = + ngtcp2_max(cstat->cwnd, cstat->bytes_in_flight + ack->bytes_delivered); + } +} + +static void bbr_save_cwnd(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat) { + if (!bbr->in_loss_recovery && bbr->state != NGTCP2_BBR_STATE_PROBE_RTT) { + bbr->prior_cwnd = cstat->cwnd; return; } - if (cc->probe_rtt_done_stamp != UINT64_MAX) { - if (cc->round_start) { - cc->probe_rtt_round_done = 1; + bbr->prior_cwnd = ngtcp2_max(bbr->prior_cwnd, cstat->cwnd); +} + +static void bbr_restore_cwnd(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat) { + cstat->cwnd = ngtcp2_max(cstat->cwnd, bbr->prior_cwnd); +} + +static uint64_t bbr_probe_rtt_cwnd(ngtcp2_cc_bbr *bbr, + ngtcp2_conn_stat *cstat) { + uint64_t probe_rtt_cwnd = + bbr_bdp_multiple(bbr, bbr->bw, NGTCP2_BBR_PROBE_RTT_CWND_GAIN_H); + uint64_t mpcwnd = min_pipe_cwnd(cstat->max_tx_udp_payload_size); + + return ngtcp2_max(probe_rtt_cwnd, mpcwnd); +} + +static void bbr_bound_cwnd_for_probe_rtt(ngtcp2_cc_bbr *bbr, + ngtcp2_conn_stat *cstat) { + uint64_t probe_rtt_cwnd; + + if (bbr->state == NGTCP2_BBR_STATE_PROBE_RTT) { + probe_rtt_cwnd = bbr_probe_rtt_cwnd(bbr, cstat); + + cstat->cwnd = ngtcp2_min(cstat->cwnd, probe_rtt_cwnd); + } +} + +static void bbr_set_cwnd(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat, + const ngtcp2_cc_ack *ack) { + uint64_t mpcwnd; + + bbr_update_max_inflight(bbr, cstat); + bbr_modulate_cwnd_for_recovery(bbr, cstat, ack); + + if (!bbr->packet_conservation) { + if (bbr->filled_pipe) { + cstat->cwnd += ack->bytes_delivered; + cstat->cwnd = ngtcp2_min(cstat->cwnd, bbr->max_inflight); + } else if (cstat->cwnd < bbr->max_inflight || + bbr->rst->delivered < bbr->initial_cwnd) { + cstat->cwnd += ack->bytes_delivered; + } + + mpcwnd = min_pipe_cwnd(cstat->max_tx_udp_payload_size); + cstat->cwnd = ngtcp2_max(cstat->cwnd, mpcwnd); + } + + bbr_bound_cwnd_for_probe_rtt(bbr, cstat); + bbr_bound_cwnd_for_model(bbr, cstat); +} + +static void bbr_bound_cwnd_for_model(ngtcp2_cc_bbr *bbr, + ngtcp2_conn_stat *cstat) { + uint64_t cap = UINT64_MAX; + uint64_t mpcwnd = min_pipe_cwnd(cstat->max_tx_udp_payload_size); + + if (bbr_is_in_probe_bw_state(bbr) && + bbr->state != NGTCP2_BBR_STATE_PROBE_BW_CRUISE) { + cap = bbr->inflight_hi; + } else if (bbr->state == NGTCP2_BBR_STATE_PROBE_RTT || + bbr->state == NGTCP2_BBR_STATE_PROBE_BW_CRUISE) { + cap = bbr_inflight_with_headroom(bbr, cstat); + } + + cap = ngtcp2_min(cap, bbr->inflight_lo); + cap = ngtcp2_max(cap, mpcwnd); + + cstat->cwnd = ngtcp2_min(cstat->cwnd, cap); +} + +static void bbr_set_send_quantum(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat) { + size_t floor, send_quantum; + (void)bbr; + + if (cstat->pacing_interval > (NGTCP2_SECONDS * 8 * 10 / 12) >> 20) { + floor = cstat->max_tx_udp_payload_size; + } else { + floor = 2 * cstat->max_tx_udp_payload_size; + } + + if (cstat->pacing_interval) { + send_quantum = (size_t)(NGTCP2_MILLISECONDS / cstat->pacing_interval); + send_quantum = ngtcp2_min(send_quantum, 64 * 1024); + } else { + send_quantum = 64 * 1024; + } + + cstat->send_quantum = ngtcp2_max(send_quantum, floor); +} + +static int in_congestion_recovery(const ngtcp2_conn_stat *cstat, + ngtcp2_tstamp sent_time) { + return cstat->congestion_recovery_start_ts != UINT64_MAX && + sent_time <= cstat->congestion_recovery_start_ts; +} + +static void bbr_handle_recovery(ngtcp2_cc_bbr *bbr, ngtcp2_conn_stat *cstat, + const ngtcp2_cc_ack *ack, ngtcp2_tstamp ts) { + if (bbr->in_loss_recovery) { + if (ts - cstat->congestion_recovery_start_ts >= cstat->smoothed_rtt) { + bbr->packet_conservation = 0; } - if (cc->probe_rtt_round_done && ts > cc->probe_rtt_done_stamp) { - cc->rtprop_stamp = ts; - bbr_restore_cwnd(cc, cstat); - bbr_exit_probe_rtt(cc, ts); + if (ack->largest_pkt_sent_ts != UINT64_MAX && + !in_congestion_recovery(cstat, ack->largest_pkt_sent_ts)) { + bbr->in_loss_recovery = 0; + bbr->packet_conservation = 0; + bbr_restore_cwnd(bbr, cstat); } + + return; + } + + if (bbr->congestion_recovery_start_ts != UINT64_MAX) { + bbr->in_loss_recovery = 1; + bbr_save_cwnd(bbr, cstat); + cstat->cwnd = + cstat->bytes_in_flight + + ngtcp2_max(ack->bytes_delivered, cstat->max_tx_udp_payload_size); + + cstat->congestion_recovery_start_ts = bbr->congestion_recovery_start_ts; + bbr->congestion_recovery_start_ts = UINT64_MAX; + bbr->packet_conservation = 1; + bbr->congestion_recovery_next_round_delivered = bbr->rst->delivered; + bbr->prior_inflight_hi = bbr->inflight_hi; + bbr->prior_inflight_lo = bbr->inflight_lo; + bbr->prior_bw_lo = bbr->bw_lo; } } -static void bbr_exit_probe_rtt(ngtcp2_bbr_cc *cc, ngtcp2_tstamp ts) { - if (cc->filled_pipe) { - ngtcp2_log_info(cc->ccb.log, NGTCP2_LOG_EVENT_RCV, - "bbr exit ProbeRTT and enter ProbeBW"); +static void bbr_cc_on_pkt_lost(ngtcp2_cc *cc, ngtcp2_conn_stat *cstat, + const ngtcp2_cc_pkt *pkt, ngtcp2_tstamp ts) { + ngtcp2_cc_bbr *bbr = ngtcp2_struct_of(cc, ngtcp2_cc_bbr, cc); + + bbr_update_on_loss(bbr, cstat, pkt, ts); +} - bbr_enter_probe_bw(cc, ts); +static void bbr_cc_congestion_event(ngtcp2_cc *cc, ngtcp2_conn_stat *cstat, + ngtcp2_tstamp sent_ts, ngtcp2_tstamp ts) { + ngtcp2_cc_bbr *bbr = ngtcp2_struct_of(cc, ngtcp2_cc_bbr, cc); + if (bbr->in_loss_recovery || + bbr->congestion_recovery_start_ts != UINT64_MAX || + in_congestion_recovery(cstat, sent_ts)) { return; } - ngtcp2_log_info(cc->ccb.log, NGTCP2_LOG_EVENT_RCV, - "bbr exit ProbeRTT and enter Startup"); + bbr->congestion_recovery_start_ts = ts; +} + +static void bbr_cc_on_spurious_congestion(ngtcp2_cc *cc, + ngtcp2_conn_stat *cstat, + ngtcp2_tstamp ts) { + ngtcp2_cc_bbr *bbr = ngtcp2_struct_of(cc, ngtcp2_cc_bbr, cc); + (void)ts; + + bbr->congestion_recovery_start_ts = UINT64_MAX; + cstat->congestion_recovery_start_ts = UINT64_MAX; + + if (bbr->in_loss_recovery) { + bbr->in_loss_recovery = 0; + bbr->packet_conservation = 0; + bbr_restore_cwnd(bbr, cstat); + bbr->full_bw_count = 0; + bbr->loss_in_round = 0; + bbr->inflight_lo = ngtcp2_max(bbr->inflight_lo, bbr->prior_inflight_lo); + bbr->inflight_hi = ngtcp2_max(bbr->inflight_hi, bbr->prior_inflight_hi); + bbr->bw_lo = ngtcp2_max(bbr->bw_lo, bbr->prior_bw_lo); + } +} + +static void bbr_cc_on_persistent_congestion(ngtcp2_cc *cc, + ngtcp2_conn_stat *cstat, + ngtcp2_tstamp ts) { + ngtcp2_cc_bbr *bbr = ngtcp2_struct_of(cc, ngtcp2_cc_bbr, cc); + (void)ts; + + cstat->congestion_recovery_start_ts = UINT64_MAX; + bbr->congestion_recovery_start_ts = UINT64_MAX; + bbr->in_loss_recovery = 0; + bbr->packet_conservation = 0; + + bbr_save_cwnd(bbr, cstat); + cstat->cwnd = cstat->bytes_in_flight + cstat->max_tx_udp_payload_size; + cstat->cwnd = + ngtcp2_max(cstat->cwnd, min_pipe_cwnd(cstat->max_tx_udp_payload_size)); +} + +static void bbr_cc_on_ack_recv(ngtcp2_cc *cc, ngtcp2_conn_stat *cstat, + const ngtcp2_cc_ack *ack, ngtcp2_tstamp ts) { + ngtcp2_cc_bbr *bbr = ngtcp2_struct_of(cc, ngtcp2_cc_bbr, cc); + + bbr_handle_recovery(bbr, cstat, ack, ts); + bbr_update_on_ack(bbr, cstat, ack, ts); +} + +static void bbr_cc_on_pkt_sent(ngtcp2_cc *cc, ngtcp2_conn_stat *cstat, + const ngtcp2_cc_pkt *pkt) { + ngtcp2_cc_bbr *bbr = ngtcp2_struct_of(cc, ngtcp2_cc_bbr, cc); + + bbr_on_transmit(bbr, cstat, pkt->sent_ts); +} + +static void bbr_cc_reset(ngtcp2_cc *cc, ngtcp2_conn_stat *cstat, + ngtcp2_tstamp ts) { + ngtcp2_cc_bbr *bbr = ngtcp2_struct_of(cc, ngtcp2_cc_bbr, cc); + + bbr_on_init(bbr, cstat, ts); +} - bbr_enter_startup(cc); +void ngtcp2_cc_bbr_init(ngtcp2_cc_bbr *bbr, ngtcp2_log *log, + ngtcp2_conn_stat *cstat, ngtcp2_rst *rst, + ngtcp2_tstamp initial_ts, ngtcp2_rand rand, + const ngtcp2_rand_ctx *rand_ctx) { + memset(bbr, 0, sizeof(*bbr)); + + bbr->cc.log = log; + bbr->cc.on_pkt_lost = bbr_cc_on_pkt_lost; + bbr->cc.congestion_event = bbr_cc_congestion_event; + bbr->cc.on_spurious_congestion = bbr_cc_on_spurious_congestion; + bbr->cc.on_persistent_congestion = bbr_cc_on_persistent_congestion; + bbr->cc.on_ack_recv = bbr_cc_on_ack_recv; + bbr->cc.on_pkt_sent = bbr_cc_on_pkt_sent; + bbr->cc.reset = bbr_cc_reset; + + bbr->rst = rst; + bbr->rand = rand; + bbr->rand_ctx = *rand_ctx; + bbr->initial_cwnd = cstat->cwnd; + + bbr_on_init(bbr, cstat, initial_ts); } diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_bbr.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_bbr.h index 7311f051e187bc..0017be35010e66 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_bbr.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_bbr.h @@ -39,118 +39,108 @@ typedef struct ngtcp2_rst ngtcp2_rst; typedef enum ngtcp2_bbr_state { NGTCP2_BBR_STATE_STARTUP, NGTCP2_BBR_STATE_DRAIN, - NGTCP2_BBR_STATE_PROBE_BW, + NGTCP2_BBR_STATE_PROBE_BW_DOWN, + NGTCP2_BBR_STATE_PROBE_BW_CRUISE, + NGTCP2_BBR_STATE_PROBE_BW_REFILL, + NGTCP2_BBR_STATE_PROBE_BW_UP, NGTCP2_BBR_STATE_PROBE_RTT, } ngtcp2_bbr_state; +typedef enum ngtcp2_bbr_ack_phase { + NGTCP2_BBR_ACK_PHASE_ACKS_PROBE_STARTING, + NGTCP2_BBR_ACK_PHASE_ACKS_PROBE_STOPPING, + NGTCP2_BBR_ACK_PHASE_ACKS_PROBE_FEEDBACK, + NGTCP2_BBR_ACK_PHASE_ACKS_REFILLING, +} ngtcp2_bbr_ack_phase; + /* - * ngtcp2_bbr_cc is BBR congestion controller, described in - * https://tools.ietf.org/html/draft-cardwell-iccrg-bbr-congestion-control-00 + * ngtcp2_cc_bbr is BBR v2 congestion controller, described in + * https://datatracker.ietf.org/doc/html/draft-cardwell-iccrg-bbr-congestion-control-01 */ -typedef struct ngtcp2_bbr_cc { - ngtcp2_cc_base ccb; +typedef struct ngtcp2_cc_bbr { + ngtcp2_cc cc; - /* The max filter used to estimate BBR.BtlBw. */ - ngtcp2_window_filter btl_bw_filter; uint64_t initial_cwnd; ngtcp2_rst *rst; ngtcp2_rand rand; ngtcp2_rand_ctx rand_ctx; - /* BBR variables */ + /* max_bw_filter for tracking the maximum recent delivery rate + samples for estimating max_bw. */ + ngtcp2_window_filter max_bw_filter; - /* The dynamic gain factor used to scale BBR.BtlBw to - produce BBR.pacing_rate. */ - double pacing_gain; - /* The dynamic gain factor used to scale the estimated BDP to produce a - congestion window (cwnd). */ - double cwnd_gain; - uint64_t full_bw; - /* packet.delivered value denoting the end of a packet-timed round trip. */ - uint64_t next_round_delivered; - /* Count of packet-timed round trips. */ - uint64_t round_count; - uint64_t prior_cwnd; - /* target_cwnd is the upper bound on the volume of data BBR - allows in flight. */ - uint64_t target_cwnd; - /* BBR's estimated bottleneck bandwidth available to the - transport flow, estimated from the maximum delivery rate sample in a - sliding window. */ - uint64_t btl_bw; - /* BBR's estimated two-way round-trip propagation delay of - the path, estimated from the windowed minimum recent round-trip delay - sample. */ - ngtcp2_duration rt_prop; - /* The wall clock time at which the current BBR.RTProp - sample was obtained. */ - ngtcp2_tstamp rtprop_stamp; - ngtcp2_tstamp cycle_stamp; + ngtcp2_window_filter extra_acked_filter; + + ngtcp2_duration min_rtt; + ngtcp2_tstamp min_rtt_stamp; ngtcp2_tstamp probe_rtt_done_stamp; - /* congestion_recovery_start_ts is the time when congestion recovery - period started.*/ - ngtcp2_tstamp congestion_recovery_start_ts; - uint64_t congestion_recovery_next_round_delivered; - size_t full_bw_count; - size_t cycle_index; - ngtcp2_bbr_state state; - /* A boolean that records whether BBR estimates that it has ever fully - utilized its available bandwidth ("filled the pipe"). */ - int filled_pipe; - /* A boolean that BBR sets to true once per packet-timed round trip, - on ACKs that advance BBR.round_count. */ - int round_start; - int rtprop_expired; - int idle_restart; - int packet_conservation; int probe_rtt_round_done; - /* in_loss_recovery becomes nonzero when BBR enters loss recovery - period. */ - int in_loss_recovery; -} ngtcp2_bbr_cc; - -int ngtcp2_cc_bbr_cc_init(ngtcp2_cc *cc, ngtcp2_log *log, - ngtcp2_conn_stat *cstat, ngtcp2_rst *rst, - ngtcp2_tstamp initial_ts, ngtcp2_rand rand, - const ngtcp2_rand_ctx *rand_ctx, - const ngtcp2_mem *mem); - -void ngtcp2_cc_bbr_cc_free(ngtcp2_cc *cc, const ngtcp2_mem *mem); - -void ngtcp2_bbr_cc_init(ngtcp2_bbr_cc *bbr_cc, ngtcp2_conn_stat *cstat, - ngtcp2_rst *rst, ngtcp2_tstamp initial_ts, - ngtcp2_rand rand, const ngtcp2_rand_ctx *rand_ctx, - ngtcp2_log *log); - -void ngtcp2_bbr_cc_free(ngtcp2_bbr_cc *cc); - -void ngtcp2_cc_bbr_cc_on_pkt_acked(ngtcp2_cc *cc, ngtcp2_conn_stat *cstat, - const ngtcp2_cc_pkt *pkt, ngtcp2_tstamp ts); + uint64_t prior_cwnd; + int idle_restart; + ngtcp2_tstamp extra_acked_interval_start; + uint64_t extra_acked_delivered; -void ngtcp2_cc_bbr_cc_congestion_event(ngtcp2_cc *cc, ngtcp2_conn_stat *cstat, - ngtcp2_tstamp sent_ts, ngtcp2_tstamp ts); + /* Congestion signals */ + int loss_in_round; + uint64_t bw_latest; + uint64_t inflight_latest; -void ngtcp2_cc_bbr_cc_on_spurious_congestion(ngtcp2_cc *ccx, - ngtcp2_conn_stat *cstat, - ngtcp2_tstamp ts); + /* Lower bounds */ + uint64_t bw_lo; + uint64_t inflight_lo; -void ngtcp2_cc_bbr_cc_on_persistent_congestion(ngtcp2_cc *cc, - ngtcp2_conn_stat *cstat, - ngtcp2_tstamp ts); + /* Round counting */ + uint64_t next_round_delivered; + int round_start; + uint64_t round_count; -void ngtcp2_cc_bbr_cc_on_ack_recv(ngtcp2_cc *cc, ngtcp2_conn_stat *cstat, - const ngtcp2_cc_ack *ack, ngtcp2_tstamp ts); + /* Full pipe */ + int filled_pipe; + uint64_t full_bw; + size_t full_bw_count; -void ngtcp2_cc_bbr_cc_on_pkt_sent(ngtcp2_cc *cc, ngtcp2_conn_stat *cstat, - const ngtcp2_cc_pkt *pkt); + /* Pacing rate */ + uint64_t pacing_gain_h; -void ngtcp2_cc_bbr_cc_new_rtt_sample(ngtcp2_cc *cc, ngtcp2_conn_stat *cstat, - ngtcp2_tstamp ts); + ngtcp2_bbr_state state; + uint64_t cwnd_gain_h; + + int loss_round_start; + uint64_t loss_round_delivered; + uint64_t rounds_since_bw_probe; + uint64_t max_bw; + uint64_t bw; + uint64_t cycle_count; + uint64_t extra_acked; + uint64_t bytes_lost_in_round; + size_t loss_events_in_round; + uint64_t offload_budget; + uint64_t probe_up_cnt; + ngtcp2_tstamp cycle_stamp; + ngtcp2_bbr_ack_phase ack_phase; + ngtcp2_duration bw_probe_wait; + int bw_probe_samples; + size_t bw_probe_up_rounds; + uint64_t bw_probe_up_acks; + uint64_t inflight_hi; + uint64_t bw_hi; + int probe_rtt_expired; + ngtcp2_duration probe_rtt_min_delay; + ngtcp2_tstamp probe_rtt_min_stamp; + int in_loss_recovery; + int packet_conservation; + uint64_t max_inflight; + ngtcp2_tstamp congestion_recovery_start_ts; + uint64_t congestion_recovery_next_round_delivered; -void ngtcp2_cc_bbr_cc_reset(ngtcp2_cc *cc, ngtcp2_conn_stat *cstat, - ngtcp2_tstamp ts); + uint64_t prior_inflight_lo; + uint64_t prior_inflight_hi; + uint64_t prior_bw_lo; +} ngtcp2_cc_bbr; -void ngtcp2_cc_bbr_cc_event(ngtcp2_cc *cc, ngtcp2_conn_stat *cstat, - ngtcp2_cc_event_type event, ngtcp2_tstamp ts); +void ngtcp2_cc_bbr_init(ngtcp2_cc_bbr *bbr, ngtcp2_log *log, + ngtcp2_conn_stat *cstat, ngtcp2_rst *rst, + ngtcp2_tstamp initial_ts, ngtcp2_rand rand, + const ngtcp2_rand_ctx *rand_ctx); #endif /* NGTCP2_BBR_H */ diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_bbr2.c b/deps/ngtcp2/ngtcp2/lib/ngtcp2_bbr2.c deleted file mode 100644 index 585ea11e8e29a5..00000000000000 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_bbr2.c +++ /dev/null @@ -1,1486 +0,0 @@ -/* - * ngtcp2 - * - * Copyright (c) 2021 ngtcp2 contributors - * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sublicense, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice shall be - * included in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ -#include "ngtcp2_bbr2.h" - -#include - -#include "ngtcp2_log.h" -#include "ngtcp2_macro.h" -#include "ngtcp2_mem.h" -#include "ngtcp2_rcvry.h" -#include "ngtcp2_rst.h" - -#define NGTCP2_BBR_MAX_BW_FILTERLEN 2 - -#define NGTCP2_BBR_EXTRA_ACKED_FILTERLEN 10 - -#define NGTCP2_BBR_STARTUP_PACING_GAIN ((double)2.77) - -#define NGTCP2_BBR_STARTUP_CWND_GAIN 2 - -#define NGTCP2_BBR_PROBE_RTT_CWND_GAIN ((double)0.5) - -#define NGTCP2_BBR_BETA_NUMER 7 -#define NGTCP2_BBR_BETA_DENOM 10 - -#define NGTCP2_BBR_LOSS_THRESH_NUMER 2 -#define NGTCP2_BBR_LOSS_THRESH_DENOM 100 - -#define NGTCP2_BBR_HEADROOM_NUMER 15 -#define NGTCP2_BBR_HEADROOM_DENOM 100 - -#define NGTCP2_BBR_PROBE_RTT_INTERVAL (5 * NGTCP2_SECONDS) -#define NGTCP2_BBR_MIN_RTT_FILTERLEN (10 * NGTCP2_SECONDS) - -#define NGTCP2_BBR_PROBE_RTT_DURATION (200 * NGTCP2_MILLISECONDS) - -#define NGTCP2_BBR_PACING_MARGIN_PERCENT 1 - -static void bbr_on_init(ngtcp2_bbr2_cc *bbr, ngtcp2_conn_stat *cstat, - ngtcp2_tstamp initial_ts); - -static void bbr_on_transmit(ngtcp2_bbr2_cc *bbr, ngtcp2_conn_stat *cstat, - ngtcp2_tstamp ts); - -static void bbr_reset_congestion_signals(ngtcp2_bbr2_cc *bbr); - -static void bbr_reset_lower_bounds(ngtcp2_bbr2_cc *bbr); - -static void bbr_init_round_counting(ngtcp2_bbr2_cc *bbr); - -static void bbr_init_full_pipe(ngtcp2_bbr2_cc *bbr); - -static void bbr_init_pacing_rate(ngtcp2_bbr2_cc *bbr, ngtcp2_conn_stat *cstat); - -static void bbr_set_pacing_rate_with_gain(ngtcp2_bbr2_cc *bbr, - ngtcp2_conn_stat *cstat, - double pacing_gain); - -static void bbr_set_pacing_rate(ngtcp2_bbr2_cc *bbr, ngtcp2_conn_stat *cstat); - -static void bbr_enter_startup(ngtcp2_bbr2_cc *bbr); - -static void bbr_check_startup_done(ngtcp2_bbr2_cc *bbr, - const ngtcp2_cc_ack *ack); - -static void bbr_update_on_ack(ngtcp2_bbr2_cc *bbr, ngtcp2_conn_stat *cstat, - const ngtcp2_cc_ack *ack, ngtcp2_tstamp ts); - -static void bbr_update_model_and_state(ngtcp2_bbr2_cc *cc, - ngtcp2_conn_stat *cstat, - const ngtcp2_cc_ack *ack, - ngtcp2_tstamp ts); - -static void bbr_update_control_parameters(ngtcp2_bbr2_cc *cc, - ngtcp2_conn_stat *cstat, - const ngtcp2_cc_ack *ack); - -static void bbr_update_on_loss(ngtcp2_bbr2_cc *cc, ngtcp2_conn_stat *cstat, - const ngtcp2_cc_pkt *pkt, ngtcp2_tstamp ts); - -static void bbr_update_latest_delivery_signals(ngtcp2_bbr2_cc *bbr, - ngtcp2_conn_stat *cstat); - -static void bbr_advance_latest_delivery_signals(ngtcp2_bbr2_cc *bbr, - ngtcp2_conn_stat *cstat); - -static void bbr_update_congestion_signals(ngtcp2_bbr2_cc *bbr, - ngtcp2_conn_stat *cstat, - const ngtcp2_cc_ack *ack); - -static void bbr_adapt_lower_bounds_from_congestion(ngtcp2_bbr2_cc *bbr, - ngtcp2_conn_stat *cstat); - -static void bbr_init_lower_bounds(ngtcp2_bbr2_cc *bbr, ngtcp2_conn_stat *cstat); - -static void bbr_loss_lower_bounds(ngtcp2_bbr2_cc *bbr); - -static void bbr_bound_bw_for_model(ngtcp2_bbr2_cc *bbr); - -static void bbr_update_max_bw(ngtcp2_bbr2_cc *bbr, ngtcp2_conn_stat *cstat, - const ngtcp2_cc_ack *ack); - -static void bbr_update_round(ngtcp2_bbr2_cc *bbr, const ngtcp2_cc_ack *ack); - -static void bbr_start_round(ngtcp2_bbr2_cc *bbr); - -static int bbr_is_in_probe_bw_state(ngtcp2_bbr2_cc *bbr); - -static void bbr_update_ack_aggregation(ngtcp2_bbr2_cc *bbr, - ngtcp2_conn_stat *cstat, - const ngtcp2_cc_ack *ack, - ngtcp2_tstamp ts); - -static void bbr_enter_drain(ngtcp2_bbr2_cc *bbr); - -static void bbr_check_drain(ngtcp2_bbr2_cc *bbr, ngtcp2_conn_stat *cstat, - ngtcp2_tstamp ts); - -static void bbr_enter_probe_bw(ngtcp2_bbr2_cc *bbr, ngtcp2_tstamp ts); - -static void bbr_start_probe_bw_down(ngtcp2_bbr2_cc *bbr, ngtcp2_tstamp ts); - -static void bbr_start_probe_bw_cruise(ngtcp2_bbr2_cc *bbr); - -static void bbr_start_probe_bw_refill(ngtcp2_bbr2_cc *bbr); - -static void bbr_start_probe_bw_up(ngtcp2_bbr2_cc *bbr, ngtcp2_conn_stat *cstat, - ngtcp2_tstamp ts); - -static void bbr_update_probe_bw_cycle_phase(ngtcp2_bbr2_cc *bbr, - ngtcp2_conn_stat *cstat, - const ngtcp2_cc_ack *ack, - ngtcp2_tstamp ts); - -static int bbr_check_time_to_cruise(ngtcp2_bbr2_cc *bbr, - ngtcp2_conn_stat *cstat, ngtcp2_tstamp ts); - -static int bbr_has_elapsed_in_phase(ngtcp2_bbr2_cc *bbr, - ngtcp2_duration interval, ngtcp2_tstamp ts); - -static uint64_t bbr_inflight_with_headroom(ngtcp2_bbr2_cc *bbr, - ngtcp2_conn_stat *cstat); - -static void bbr_raise_inflight_hi_slope(ngtcp2_bbr2_cc *bbr, - ngtcp2_conn_stat *cstat); - -static void bbr_probe_inflight_hi_upward(ngtcp2_bbr2_cc *bbr, - ngtcp2_conn_stat *cstat, - const ngtcp2_cc_ack *ack); - -static void bbr_adapt_upper_bounds(ngtcp2_bbr2_cc *bbr, ngtcp2_conn_stat *cstat, - const ngtcp2_cc_ack *ack, ngtcp2_tstamp ts); - -static int bbr_check_time_to_probe_bw(ngtcp2_bbr2_cc *bbr, - ngtcp2_conn_stat *cstat, - ngtcp2_tstamp ts); - -static void bbr_pick_probe_wait(ngtcp2_bbr2_cc *bbr); - -static int bbr_is_reno_coexistence_probe_time(ngtcp2_bbr2_cc *bbr, - ngtcp2_conn_stat *cstat); - -static uint64_t bbr_target_inflight(ngtcp2_bbr2_cc *bbr, - ngtcp2_conn_stat *cstat); - -static int bbr_check_inflight_too_high(ngtcp2_bbr2_cc *bbr, - ngtcp2_conn_stat *cstat, - ngtcp2_tstamp ts); - -static int is_inflight_too_high(const ngtcp2_rs *rs); - -static void bbr_handle_inflight_too_high(ngtcp2_bbr2_cc *bbr, - ngtcp2_conn_stat *cstat, - const ngtcp2_rs *rs, ngtcp2_tstamp ts); - -static void bbr_handle_lost_packet(ngtcp2_bbr2_cc *bbr, ngtcp2_conn_stat *cstat, - const ngtcp2_cc_pkt *pkt, ngtcp2_tstamp ts); - -static uint64_t bbr_inflight_hi_from_lost_packet(ngtcp2_bbr2_cc *bbr, - const ngtcp2_rs *rs, - const ngtcp2_cc_pkt *pkt); - -static void bbr_update_min_rtt(ngtcp2_bbr2_cc *bbr, const ngtcp2_cc_ack *ack, - ngtcp2_tstamp ts); - -static void bbr_check_probe_rtt(ngtcp2_bbr2_cc *bbr, ngtcp2_conn_stat *cstat, - ngtcp2_tstamp ts); - -static void bbr_enter_probe_rtt(ngtcp2_bbr2_cc *bbr); - -static void bbr_handle_probe_rtt(ngtcp2_bbr2_cc *bbr, ngtcp2_conn_stat *cstat, - ngtcp2_tstamp ts); - -static void bbr_check_probe_rtt_done(ngtcp2_bbr2_cc *bbr, - ngtcp2_conn_stat *cstat, ngtcp2_tstamp ts); - -static void bbr_mark_connection_app_limited(ngtcp2_bbr2_cc *bbr, - ngtcp2_conn_stat *cstat); - -static void bbr_exit_probe_rtt(ngtcp2_bbr2_cc *bbr, ngtcp2_tstamp ts); - -static void bbr_handle_restart_from_idle(ngtcp2_bbr2_cc *bbr, - ngtcp2_conn_stat *cstat, - ngtcp2_tstamp ts); - -static uint64_t bbr_bdp_multiple(ngtcp2_bbr2_cc *bbr, uint64_t bw, double gain); - -static uint64_t bbr_quantization_budget(ngtcp2_bbr2_cc *bbr, - ngtcp2_conn_stat *cstat, - uint64_t inflight); - -static uint64_t bbr_inflight(ngtcp2_bbr2_cc *bbr, ngtcp2_conn_stat *cstat, - uint64_t bw, double gain); - -static void bbr_update_max_inflight(ngtcp2_bbr2_cc *bbr, - ngtcp2_conn_stat *cstat); - -static void bbr_update_offload_budget(ngtcp2_bbr2_cc *bbr, - ngtcp2_conn_stat *cstat); - -static uint64_t min_pipe_cwnd(size_t max_udp_payload_size); - -static void bbr_advance_max_bw_filter(ngtcp2_bbr2_cc *bbr); - -static void bbr_modulate_cwnd_for_recovery(ngtcp2_bbr2_cc *bbr, - ngtcp2_conn_stat *cstat, - const ngtcp2_cc_ack *ack); - -static void bbr_save_cwnd(ngtcp2_bbr2_cc *bbr, ngtcp2_conn_stat *cstat); - -static void bbr_restore_cwnd(ngtcp2_bbr2_cc *bbr, ngtcp2_conn_stat *cstat); - -static uint64_t bbr_probe_rtt_cwnd(ngtcp2_bbr2_cc *bbr, - ngtcp2_conn_stat *cstat); - -static void bbr_bound_cwnd_for_probe_rtt(ngtcp2_bbr2_cc *bbr, - ngtcp2_conn_stat *cstat); - -static void bbr_set_cwnd(ngtcp2_bbr2_cc *bbr, ngtcp2_conn_stat *cstat, - const ngtcp2_cc_ack *ack); - -static void bbr_bound_cwnd_for_model(ngtcp2_bbr2_cc *bbr, - ngtcp2_conn_stat *cstat); - -static void bbr_set_send_quantum(ngtcp2_bbr2_cc *bbr, ngtcp2_conn_stat *cstat); - -static int in_congestion_recovery(const ngtcp2_conn_stat *cstat, - ngtcp2_tstamp sent_time); - -static void bbr_handle_recovery(ngtcp2_bbr2_cc *bbr, ngtcp2_conn_stat *cstat, - const ngtcp2_cc_ack *ack); - -static void bbr_on_init(ngtcp2_bbr2_cc *bbr, ngtcp2_conn_stat *cstat, - ngtcp2_tstamp initial_ts) { - ngtcp2_window_filter_init(&bbr->max_bw_filter, NGTCP2_BBR_MAX_BW_FILTERLEN); - ngtcp2_window_filter_init(&bbr->extra_acked_filter, - NGTCP2_BBR_EXTRA_ACKED_FILTERLEN); - - bbr->min_rtt = UINT64_MAX; - bbr->min_rtt_stamp = initial_ts; - /* remark: Use UINT64_MAX instead of 0 for consistency. */ - bbr->probe_rtt_done_stamp = UINT64_MAX; - bbr->probe_rtt_round_done = 0; - bbr->prior_cwnd = 0; - bbr->idle_restart = 0; - bbr->extra_acked_interval_start = initial_ts; - bbr->extra_acked_delivered = 0; - - bbr_reset_congestion_signals(bbr); - bbr_reset_lower_bounds(bbr); - bbr_init_round_counting(bbr); - bbr_init_full_pipe(bbr); - bbr_init_pacing_rate(bbr, cstat); - bbr_enter_startup(bbr); - - cstat->send_quantum = cstat->max_udp_payload_size * 10; - - /* Missing in documentation */ - bbr->loss_round_start = 0; - bbr->loss_round_delivered = UINT64_MAX; - - bbr->rounds_since_bw_probe = 0; - - bbr->max_bw = 0; - bbr->bw = 0; - - bbr->cycle_count = 0; - - bbr->extra_acked = 0; - - bbr->bytes_lost_in_round = 0; - bbr->loss_events_in_round = 0; - - bbr->offload_budget = 0; - - bbr->probe_up_cnt = UINT64_MAX; - bbr->cycle_stamp = UINT64_MAX; - bbr->ack_phase = 0; - bbr->bw_probe_wait = 0; - bbr->bw_probe_samples = 0; - bbr->bw_probe_up_rounds = 0; - bbr->bw_probe_up_acks = 0; - - bbr->inflight_hi = UINT64_MAX; - bbr->bw_hi = UINT64_MAX; - - bbr->probe_rtt_expired = 0; - bbr->probe_rtt_min_delay = UINT64_MAX; - bbr->probe_rtt_min_stamp = initial_ts; - - bbr->in_loss_recovery = 0; - bbr->packet_conservation = 0; - - bbr->max_inflight = 0; - - bbr->congestion_recovery_start_ts = UINT64_MAX; - bbr->congestion_recovery_next_round_delivered = 0; - - bbr->prior_inflight_lo = 0; - bbr->prior_inflight_hi = 0; - bbr->prior_bw_lo = 0; -} - -static void bbr_reset_congestion_signals(ngtcp2_bbr2_cc *bbr) { - bbr->loss_in_round = 0; - bbr->bw_latest = 0; - bbr->inflight_latest = 0; -} - -static void bbr_reset_lower_bounds(ngtcp2_bbr2_cc *bbr) { - bbr->bw_lo = UINT64_MAX; - bbr->inflight_lo = UINT64_MAX; -} - -static void bbr_init_round_counting(ngtcp2_bbr2_cc *bbr) { - bbr->next_round_delivered = 0; - bbr->round_start = 0; - bbr->round_count = 0; -} - -static void bbr_init_full_pipe(ngtcp2_bbr2_cc *bbr) { - bbr->filled_pipe = 0; - bbr->full_bw = 0; - bbr->full_bw_count = 0; -} - -static void bbr_check_startup_full_bandwidth(ngtcp2_bbr2_cc *bbr) { - if (bbr->filled_pipe || !bbr->round_start || bbr->rst->rs.is_app_limited) { - return; - } - - if (bbr->max_bw * 100 >= bbr->full_bw * 125) { - bbr->full_bw = bbr->max_bw; - bbr->full_bw_count = 0; - } - - ++bbr->full_bw_count; - - if (bbr->full_bw_count >= 3) { - bbr->filled_pipe = 1; - - ngtcp2_log_info(bbr->ccb.log, NGTCP2_LOG_EVENT_RCV, - "bbr2 filled pipe, full_bw=%" PRIu64, bbr->full_bw); - } -} - -static void bbr_check_startup_high_loss(ngtcp2_bbr2_cc *bbr, - const ngtcp2_cc_ack *ack) { - if (bbr->filled_pipe || !bbr->round_start || bbr->rst->rs.is_app_limited) { - return; - } - - if (bbr->loss_events_in_round <= 3) { - return; - } - - /* loss_thresh = 2% */ - if (bbr->bytes_lost_in_round * 100 <= ack->prior_bytes_in_flight * 2) { - return; - } - - bbr->filled_pipe = 1; -} - -static void bbr_init_pacing_rate(ngtcp2_bbr2_cc *bbr, ngtcp2_conn_stat *cstat) { - double nominal_bandwidth = (double)bbr->initial_cwnd; - - cstat->pacing_rate = NGTCP2_BBR_STARTUP_PACING_GAIN * nominal_bandwidth / - (double)NGTCP2_MILLISECONDS; -} - -static void bbr_set_pacing_rate_with_gain(ngtcp2_bbr2_cc *bbr, - ngtcp2_conn_stat *cstat, - double pacing_gain) { - double rate = pacing_gain * (double)bbr->bw * - (100 - NGTCP2_BBR_PACING_MARGIN_PERCENT) / 100 / NGTCP2_SECONDS; - - if (bbr->filled_pipe || rate > cstat->pacing_rate) { - cstat->pacing_rate = rate; - } -} - -static void bbr_set_pacing_rate(ngtcp2_bbr2_cc *bbr, ngtcp2_conn_stat *cstat) { - bbr_set_pacing_rate_with_gain(bbr, cstat, bbr->pacing_gain); -} - -static void bbr_enter_startup(ngtcp2_bbr2_cc *bbr) { - ngtcp2_log_info(bbr->ccb.log, NGTCP2_LOG_EVENT_RCV, "bbr2 enter Startup"); - - bbr->state = NGTCP2_BBR2_STATE_STARTUP; - bbr->pacing_gain = NGTCP2_BBR_STARTUP_PACING_GAIN; - bbr->cwnd_gain = NGTCP2_BBR_STARTUP_CWND_GAIN; -} - -static void bbr_check_startup_done(ngtcp2_bbr2_cc *bbr, - const ngtcp2_cc_ack *ack) { - bbr_check_startup_full_bandwidth(bbr); - bbr_check_startup_high_loss(bbr, ack); - - if (bbr->state == NGTCP2_BBR2_STATE_STARTUP && bbr->filled_pipe) { - bbr_enter_drain(bbr); - } -} - -static void bbr_on_transmit(ngtcp2_bbr2_cc *bbr, ngtcp2_conn_stat *cstat, - ngtcp2_tstamp ts) { - bbr_handle_restart_from_idle(bbr, cstat, ts); -} - -static void bbr_update_on_ack(ngtcp2_bbr2_cc *bbr, ngtcp2_conn_stat *cstat, - const ngtcp2_cc_ack *ack, ngtcp2_tstamp ts) { - bbr_update_model_and_state(bbr, cstat, ack, ts); - bbr_update_control_parameters(bbr, cstat, ack); -} - -static void bbr_update_model_and_state(ngtcp2_bbr2_cc *bbr, - ngtcp2_conn_stat *cstat, - const ngtcp2_cc_ack *ack, - ngtcp2_tstamp ts) { - bbr_update_latest_delivery_signals(bbr, cstat); - bbr_update_congestion_signals(bbr, cstat, ack); - bbr_update_ack_aggregation(bbr, cstat, ack, ts); - bbr_check_startup_done(bbr, ack); - bbr_check_drain(bbr, cstat, ts); - bbr_update_probe_bw_cycle_phase(bbr, cstat, ack, ts); - bbr_update_min_rtt(bbr, ack, ts); - bbr_check_probe_rtt(bbr, cstat, ts); - bbr_advance_latest_delivery_signals(bbr, cstat); - bbr_bound_bw_for_model(bbr); -} - -static void bbr_update_control_parameters(ngtcp2_bbr2_cc *bbr, - ngtcp2_conn_stat *cstat, - const ngtcp2_cc_ack *ack) { - bbr_set_pacing_rate(bbr, cstat); - bbr_set_send_quantum(bbr, cstat); - bbr_set_cwnd(bbr, cstat, ack); -} - -static void bbr_update_on_loss(ngtcp2_bbr2_cc *cc, ngtcp2_conn_stat *cstat, - const ngtcp2_cc_pkt *pkt, ngtcp2_tstamp ts) { - bbr_handle_lost_packet(cc, cstat, pkt, ts); -} - -static void bbr_update_latest_delivery_signals(ngtcp2_bbr2_cc *bbr, - ngtcp2_conn_stat *cstat) { - bbr->loss_round_start = 0; - bbr->bw_latest = ngtcp2_max(bbr->bw_latest, cstat->delivery_rate_sec); - bbr->inflight_latest = - ngtcp2_max(bbr->inflight_latest, bbr->rst->rs.delivered); - - if (bbr->rst->rs.prior_delivered >= bbr->loss_round_delivered) { - bbr->loss_round_delivered = bbr->rst->delivered; - bbr->loss_round_start = 1; - } -} - -static void bbr_advance_latest_delivery_signals(ngtcp2_bbr2_cc *bbr, - ngtcp2_conn_stat *cstat) { - if (bbr->loss_round_start) { - bbr->bw_latest = cstat->delivery_rate_sec; - bbr->inflight_latest = bbr->rst->rs.delivered; - } -} - -static void bbr_update_congestion_signals(ngtcp2_bbr2_cc *bbr, - ngtcp2_conn_stat *cstat, - const ngtcp2_cc_ack *ack) { - bbr_update_max_bw(bbr, cstat, ack); - - if (ack->bytes_lost) { - bbr->bytes_lost_in_round += ack->bytes_lost; - ++bbr->loss_events_in_round; - - if (!bbr->loss_in_round) { - bbr->loss_in_round = 1; - bbr->loss_round_delivered = bbr->rst->delivered; - } - } - - if (!bbr->loss_round_start) { - return; - } - - bbr_adapt_lower_bounds_from_congestion(bbr, cstat); - - bbr->loss_in_round = 0; -} - -static void bbr_adapt_lower_bounds_from_congestion(ngtcp2_bbr2_cc *bbr, - ngtcp2_conn_stat *cstat) { - if (!bbr->filled_pipe || bbr_is_in_probe_bw_state(bbr)) { - return; - } - - if (bbr->loss_in_round) { - bbr_init_lower_bounds(bbr, cstat); - bbr_loss_lower_bounds(bbr); - } -} - -static void bbr_init_lower_bounds(ngtcp2_bbr2_cc *bbr, - ngtcp2_conn_stat *cstat) { - if (bbr->bw_lo == UINT64_MAX) { - bbr->bw_lo = bbr->max_bw; - } - - if (bbr->inflight_lo == UINT64_MAX) { - bbr->inflight_lo = cstat->cwnd; - } -} - -static void bbr_loss_lower_bounds(ngtcp2_bbr2_cc *bbr) { - bbr->bw_lo = ngtcp2_max(bbr->bw_latest, bbr->bw_lo * NGTCP2_BBR_BETA_NUMER / - NGTCP2_BBR_BETA_DENOM); - bbr->inflight_lo = ngtcp2_max(bbr->inflight_latest, - bbr->inflight_lo * NGTCP2_BBR_BETA_NUMER / - NGTCP2_BBR_BETA_DENOM); -} - -static void bbr_bound_bw_for_model(ngtcp2_bbr2_cc *bbr) { - bbr->bw = ngtcp2_min(bbr->max_bw, bbr->bw_lo); - bbr->bw = ngtcp2_min(bbr->bw, bbr->bw_hi); -} - -static void bbr_update_max_bw(ngtcp2_bbr2_cc *bbr, ngtcp2_conn_stat *cstat, - const ngtcp2_cc_ack *ack) { - bbr_update_round(bbr, ack); - bbr_handle_recovery(bbr, cstat, ack); - - if (cstat->delivery_rate_sec >= bbr->max_bw || !bbr->rst->rs.is_app_limited) { - ngtcp2_window_filter_update(&bbr->max_bw_filter, cstat->delivery_rate_sec, - bbr->cycle_count); - - bbr->max_bw = ngtcp2_window_filter_get_best(&bbr->max_bw_filter); - } -} - -static void bbr_update_round(ngtcp2_bbr2_cc *bbr, const ngtcp2_cc_ack *ack) { - if (ack->pkt_delivered >= bbr->next_round_delivered) { - bbr_start_round(bbr); - - ++bbr->round_count; - ++bbr->rounds_since_bw_probe; - bbr->round_start = 1; - - bbr->bytes_lost_in_round = 0; - bbr->loss_events_in_round = 0; - - bbr->rst->is_cwnd_limited = 0; - - return; - } - - bbr->round_start = 0; -} - -static void bbr_start_round(ngtcp2_bbr2_cc *bbr) { - bbr->next_round_delivered = bbr->rst->delivered; -} - -static int bbr_is_in_probe_bw_state(ngtcp2_bbr2_cc *bbr) { - switch (bbr->state) { - case NGTCP2_BBR2_STATE_PROBE_BW_DOWN: - case NGTCP2_BBR2_STATE_PROBE_BW_CRUISE: - case NGTCP2_BBR2_STATE_PROBE_BW_REFILL: - case NGTCP2_BBR2_STATE_PROBE_BW_UP: - return 1; - default: - return 0; - } -} - -static void bbr_update_ack_aggregation(ngtcp2_bbr2_cc *bbr, - ngtcp2_conn_stat *cstat, - const ngtcp2_cc_ack *ack, - ngtcp2_tstamp ts) { - ngtcp2_duration interval = ts - bbr->extra_acked_interval_start; - uint64_t expected_delivered = bbr->bw * interval / NGTCP2_SECONDS; - uint64_t extra; - - if (bbr->extra_acked_delivered <= expected_delivered) { - bbr->extra_acked_delivered = 0; - bbr->extra_acked_interval_start = ts; - expected_delivered = 0; - } - - bbr->extra_acked_delivered += ack->bytes_delivered; - extra = bbr->extra_acked_delivered - expected_delivered; - extra = ngtcp2_min(extra, cstat->cwnd); - - ngtcp2_window_filter_update(&bbr->extra_acked_filter, extra, - bbr->round_count); - - bbr->extra_acked = ngtcp2_window_filter_get_best(&bbr->extra_acked_filter); -} - -static void bbr_enter_drain(ngtcp2_bbr2_cc *bbr) { - ngtcp2_log_info(bbr->ccb.log, NGTCP2_LOG_EVENT_RCV, "bbr2 enter Drain"); - - bbr->state = NGTCP2_BBR2_STATE_DRAIN; - bbr->pacing_gain = 1. / NGTCP2_BBR_STARTUP_CWND_GAIN; - bbr->cwnd_gain = NGTCP2_BBR_STARTUP_CWND_GAIN; -} - -static void bbr_check_drain(ngtcp2_bbr2_cc *bbr, ngtcp2_conn_stat *cstat, - ngtcp2_tstamp ts) { - if (bbr->state == NGTCP2_BBR2_STATE_DRAIN && - cstat->bytes_in_flight <= bbr_inflight(bbr, cstat, bbr->bw, 1.0)) { - bbr_enter_probe_bw(bbr, ts); - } -} - -static void bbr_enter_probe_bw(ngtcp2_bbr2_cc *bbr, ngtcp2_tstamp ts) { - bbr_start_probe_bw_down(bbr, ts); -} - -static void bbr_start_probe_bw_down(ngtcp2_bbr2_cc *bbr, ngtcp2_tstamp ts) { - ngtcp2_log_info(bbr->ccb.log, NGTCP2_LOG_EVENT_RCV, - "bbr2 start ProbeBW_DOWN"); - - bbr_reset_congestion_signals(bbr); - - bbr->probe_up_cnt = UINT64_MAX; - - bbr_pick_probe_wait(bbr); - - bbr->cycle_stamp = ts; - bbr->ack_phase = NGTCP2_BBR2_ACK_PHASE_ACKS_PROBE_STOPPING; - - bbr_start_round(bbr); - - bbr->state = NGTCP2_BBR2_STATE_PROBE_BW_DOWN; - bbr->pacing_gain = 0.9; - bbr->cwnd_gain = 2; -} - -static void bbr_start_probe_bw_cruise(ngtcp2_bbr2_cc *bbr) { - ngtcp2_log_info(bbr->ccb.log, NGTCP2_LOG_EVENT_RCV, - "bbr2 start ProbeBW_CRUISE"); - - bbr->state = NGTCP2_BBR2_STATE_PROBE_BW_CRUISE; - bbr->pacing_gain = 1.0; - bbr->cwnd_gain = 2; -} - -static void bbr_start_probe_bw_refill(ngtcp2_bbr2_cc *bbr) { - ngtcp2_log_info(bbr->ccb.log, NGTCP2_LOG_EVENT_RCV, - "bbr2 start ProbeBW_REFILL"); - - bbr_reset_lower_bounds(bbr); - - bbr->bw_probe_up_rounds = 0; - bbr->bw_probe_up_acks = 0; - bbr->ack_phase = NGTCP2_BBR2_ACK_PHASE_ACKS_REFILLING; - - bbr_start_round(bbr); - - bbr->state = NGTCP2_BBR2_STATE_PROBE_BW_REFILL; - bbr->pacing_gain = 1.0; - bbr->cwnd_gain = 2; -} - -static void bbr_start_probe_bw_up(ngtcp2_bbr2_cc *bbr, ngtcp2_conn_stat *cstat, - ngtcp2_tstamp ts) { - ngtcp2_log_info(bbr->ccb.log, NGTCP2_LOG_EVENT_RCV, "bbr2 start ProbeBW_UP"); - - bbr->ack_phase = NGTCP2_BBR2_ACK_PHASE_ACKS_PROBE_STARTING; - - bbr_start_round(bbr); - - bbr->cycle_stamp = ts; - bbr->state = NGTCP2_BBR2_STATE_PROBE_BW_UP; - bbr->pacing_gain = 1.25; - bbr->cwnd_gain = 2; - - bbr_raise_inflight_hi_slope(bbr, cstat); -} - -static void bbr_update_probe_bw_cycle_phase(ngtcp2_bbr2_cc *bbr, - ngtcp2_conn_stat *cstat, - const ngtcp2_cc_ack *ack, - ngtcp2_tstamp ts) { - if (!bbr->filled_pipe) { - return; - } - - bbr_adapt_upper_bounds(bbr, cstat, ack, ts); - - if (!bbr_is_in_probe_bw_state(bbr)) { - return; - } - - switch (bbr->state) { - case NGTCP2_BBR2_STATE_PROBE_BW_DOWN: - if (bbr_check_time_to_probe_bw(bbr, cstat, ts)) { - return; - } - - if (bbr_check_time_to_cruise(bbr, cstat, ts)) { - bbr_start_probe_bw_cruise(bbr); - } - - break; - case NGTCP2_BBR2_STATE_PROBE_BW_CRUISE: - if (bbr_check_time_to_probe_bw(bbr, cstat, ts)) { - return; - } - - break; - case NGTCP2_BBR2_STATE_PROBE_BW_REFILL: - if (bbr->round_start) { - bbr->bw_probe_samples = 1; - bbr_start_probe_bw_up(bbr, cstat, ts); - } - - break; - case NGTCP2_BBR2_STATE_PROBE_BW_UP: - if (bbr_has_elapsed_in_phase(bbr, bbr->min_rtt, ts) && - cstat->bytes_in_flight > bbr_inflight(bbr, cstat, bbr->max_bw, 1.25)) { - bbr_start_probe_bw_down(bbr, ts); - } - - break; - default: - break; - } -} - -static int bbr_check_time_to_cruise(ngtcp2_bbr2_cc *bbr, - ngtcp2_conn_stat *cstat, ngtcp2_tstamp ts) { - (void)ts; - - if (cstat->bytes_in_flight > bbr_inflight_with_headroom(bbr, cstat)) { - return 0; - } - - if (cstat->bytes_in_flight <= bbr_inflight(bbr, cstat, bbr->max_bw, 1.0)) { - return 1; - } - - return 0; -} - -static int bbr_has_elapsed_in_phase(ngtcp2_bbr2_cc *bbr, - ngtcp2_duration interval, - ngtcp2_tstamp ts) { - return ts > bbr->cycle_stamp + interval; -} - -static uint64_t bbr_inflight_with_headroom(ngtcp2_bbr2_cc *bbr, - ngtcp2_conn_stat *cstat) { - uint64_t headroom; - uint64_t mpcwnd; - if (bbr->inflight_hi == UINT64_MAX) { - return UINT64_MAX; - } - - headroom = ngtcp2_max(cstat->max_udp_payload_size, - bbr->inflight_hi * NGTCP2_BBR_HEADROOM_NUMER / - NGTCP2_BBR_HEADROOM_DENOM); - mpcwnd = min_pipe_cwnd(cstat->max_udp_payload_size); - - if (bbr->inflight_hi > headroom) { - return ngtcp2_max(bbr->inflight_hi - headroom, mpcwnd); - } - - return mpcwnd; -} - -static void bbr_raise_inflight_hi_slope(ngtcp2_bbr2_cc *bbr, - ngtcp2_conn_stat *cstat) { - uint64_t growth_this_round = cstat->max_udp_payload_size - << bbr->bw_probe_up_rounds; - - bbr->bw_probe_up_rounds = ngtcp2_min(bbr->bw_probe_up_rounds + 1, 30); - bbr->probe_up_cnt = ngtcp2_max(cstat->cwnd / growth_this_round, 1) * - cstat->max_udp_payload_size; -} - -static void bbr_probe_inflight_hi_upward(ngtcp2_bbr2_cc *bbr, - ngtcp2_conn_stat *cstat, - const ngtcp2_cc_ack *ack) { - uint64_t delta; - - if (!bbr->rst->is_cwnd_limited || cstat->cwnd < bbr->inflight_hi) { - return; - } - - bbr->bw_probe_up_acks += ack->bytes_delivered; - - if (bbr->bw_probe_up_acks >= bbr->probe_up_cnt) { - delta = bbr->bw_probe_up_acks / bbr->probe_up_cnt; - bbr->bw_probe_up_acks -= delta * bbr->probe_up_cnt; - bbr->inflight_hi += delta * cstat->max_udp_payload_size; - } - - if (bbr->round_start) { - bbr_raise_inflight_hi_slope(bbr, cstat); - } -} - -static void bbr_adapt_upper_bounds(ngtcp2_bbr2_cc *bbr, ngtcp2_conn_stat *cstat, - const ngtcp2_cc_ack *ack, ngtcp2_tstamp ts) { - if (bbr->ack_phase == NGTCP2_BBR2_ACK_PHASE_ACKS_PROBE_STARTING && - bbr->round_start) { - bbr->ack_phase = NGTCP2_BBR2_ACK_PHASE_ACKS_PROBE_FEEDBACK; - } - - if (bbr->ack_phase == NGTCP2_BBR2_ACK_PHASE_ACKS_PROBE_STOPPING && - bbr->round_start) { - if (bbr_is_in_probe_bw_state(bbr) && !bbr->rst->rs.is_app_limited) { - bbr_advance_max_bw_filter(bbr); - } - } - - if (!bbr_check_inflight_too_high(bbr, cstat, ts)) { - /* bbr->bw_hi never be updated */ - if (bbr->inflight_hi == UINT64_MAX /* || bbr->bw_hi == UINT64_MAX */) { - return; - } - - if (bbr->rst->rs.tx_in_flight > bbr->inflight_hi) { - bbr->inflight_hi = bbr->rst->rs.tx_in_flight; - } - - if (cstat->delivery_rate_sec > bbr->bw_hi) { - bbr->bw_hi = cstat->delivery_rate_sec; - } - - if (bbr->state == NGTCP2_BBR2_STATE_PROBE_BW_UP) { - bbr_probe_inflight_hi_upward(bbr, cstat, ack); - } - } -} - -static int bbr_check_time_to_probe_bw(ngtcp2_bbr2_cc *bbr, - ngtcp2_conn_stat *cstat, - ngtcp2_tstamp ts) { - if (bbr_has_elapsed_in_phase(bbr, bbr->bw_probe_wait, ts) || - bbr_is_reno_coexistence_probe_time(bbr, cstat)) { - bbr_start_probe_bw_refill(bbr); - - return 1; - } - - return 0; -} - -static void bbr_pick_probe_wait(ngtcp2_bbr2_cc *bbr) { - uint8_t rand; - - bbr->rand(&rand, 1, &bbr->rand_ctx); - - bbr->rounds_since_bw_probe = (uint64_t)(rand * 2 / 256); - - bbr->rand(&rand, 1, &bbr->rand_ctx); - - bbr->bw_probe_wait = 2 * NGTCP2_SECONDS + - (ngtcp2_tstamp)((double)rand / 255. * NGTCP2_SECONDS); -} - -static int bbr_is_reno_coexistence_probe_time(ngtcp2_bbr2_cc *bbr, - ngtcp2_conn_stat *cstat) { - uint64_t reno_rounds = - bbr_target_inflight(bbr, cstat) / cstat->max_udp_payload_size; - - return bbr->rounds_since_bw_probe >= ngtcp2_min(reno_rounds, 63); -} - -static uint64_t bbr_target_inflight(ngtcp2_bbr2_cc *bbr, - ngtcp2_conn_stat *cstat) { - uint64_t bdp = bbr_inflight(bbr, cstat, bbr->bw, 1.0); - - return ngtcp2_min(bdp, cstat->cwnd); -} - -static int bbr_check_inflight_too_high(ngtcp2_bbr2_cc *bbr, - ngtcp2_conn_stat *cstat, - ngtcp2_tstamp ts) { - if (is_inflight_too_high(&bbr->rst->rs)) { - if (bbr->bw_probe_samples) { - bbr_handle_inflight_too_high(bbr, cstat, &bbr->rst->rs, ts); - } - - return 1; - } - - return 0; -} - -static int is_inflight_too_high(const ngtcp2_rs *rs) { - return rs->lost * NGTCP2_BBR_LOSS_THRESH_DENOM > - rs->tx_in_flight * NGTCP2_BBR_LOSS_THRESH_NUMER; -} - -static void bbr_handle_inflight_too_high(ngtcp2_bbr2_cc *bbr, - ngtcp2_conn_stat *cstat, - const ngtcp2_rs *rs, - ngtcp2_tstamp ts) { - bbr->bw_probe_samples = 0; - - if (!rs->is_app_limited) { - bbr->prior_inflight_hi = bbr->inflight_hi; - - bbr->inflight_hi = ngtcp2_max( - rs->tx_in_flight, bbr_target_inflight(bbr, cstat) * - NGTCP2_BBR_BETA_NUMER / NGTCP2_BBR_BETA_DENOM); - } - - if (bbr->state == NGTCP2_BBR2_STATE_PROBE_BW_UP) { - bbr_start_probe_bw_down(bbr, ts); - } -} - -static void bbr_handle_lost_packet(ngtcp2_bbr2_cc *bbr, ngtcp2_conn_stat *cstat, - const ngtcp2_cc_pkt *pkt, ngtcp2_tstamp ts) { - ngtcp2_rs rs = {0}; - - if (!bbr->bw_probe_samples) { - return; - } - - rs.tx_in_flight = pkt->tx_in_flight; - rs.lost = bbr->rst->lost - pkt->lost; - rs.is_app_limited = pkt->is_app_limited; - - if (is_inflight_too_high(&rs)) { - rs.tx_in_flight = bbr_inflight_hi_from_lost_packet(bbr, &rs, pkt); - - bbr_handle_inflight_too_high(bbr, cstat, &rs, ts); - } -} - -static uint64_t bbr_inflight_hi_from_lost_packet(ngtcp2_bbr2_cc *bbr, - const ngtcp2_rs *rs, - const ngtcp2_cc_pkt *pkt) { - uint64_t inflight_prev, lost_prefix; - (void)bbr; - - assert(rs->tx_in_flight >= pkt->pktlen); - - inflight_prev = rs->tx_in_flight - pkt->pktlen; - - assert(rs->lost >= pkt->pktlen); - - /* bbr->rst->lost is not incremented for pkt yet */ - - if (inflight_prev * NGTCP2_BBR_LOSS_THRESH_NUMER < - rs->lost * NGTCP2_BBR_LOSS_THRESH_DENOM) { - return inflight_prev; - } - - lost_prefix = (inflight_prev * NGTCP2_BBR_LOSS_THRESH_NUMER - - rs->lost * NGTCP2_BBR_LOSS_THRESH_DENOM) / - (NGTCP2_BBR_LOSS_THRESH_DENOM - NGTCP2_BBR_LOSS_THRESH_NUMER); - - return inflight_prev + lost_prefix; -} - -static void bbr_update_min_rtt(ngtcp2_bbr2_cc *bbr, const ngtcp2_cc_ack *ack, - ngtcp2_tstamp ts) { - int min_rtt_expired; - - bbr->probe_rtt_expired = - ts > bbr->probe_rtt_min_stamp + NGTCP2_BBR_PROBE_RTT_INTERVAL; - - if (ack->rtt != UINT64_MAX && - (ack->rtt < bbr->probe_rtt_min_delay || bbr->probe_rtt_expired)) { - bbr->probe_rtt_min_delay = ack->rtt; - bbr->probe_rtt_min_stamp = ts; - } - - min_rtt_expired = ts > bbr->min_rtt_stamp + NGTCP2_BBR_MIN_RTT_FILTERLEN; - - if (bbr->probe_rtt_min_delay < bbr->min_rtt || min_rtt_expired) { - bbr->min_rtt = bbr->probe_rtt_min_delay; - bbr->min_rtt_stamp = bbr->probe_rtt_min_stamp; - - ngtcp2_log_info(bbr->ccb.log, NGTCP2_LOG_EVENT_RCV, - "bbr2 update min_rtt=%" PRIu64, bbr->min_rtt); - } -} - -static void bbr_check_probe_rtt(ngtcp2_bbr2_cc *bbr, ngtcp2_conn_stat *cstat, - ngtcp2_tstamp ts) { - if (bbr->state != NGTCP2_BBR2_STATE_PROBE_RTT && bbr->probe_rtt_expired && - !bbr->idle_restart) { - bbr_enter_probe_rtt(bbr); - bbr_save_cwnd(bbr, cstat); - - bbr->probe_rtt_done_stamp = UINT64_MAX; - bbr->ack_phase = NGTCP2_BBR2_ACK_PHASE_ACKS_PROBE_STOPPING; - - bbr_start_round(bbr); - } - - if (bbr->state == NGTCP2_BBR2_STATE_PROBE_RTT) { - bbr_handle_probe_rtt(bbr, cstat, ts); - } - - if (bbr->rst->rs.delivered) { - bbr->idle_restart = 0; - } -} - -static void bbr_enter_probe_rtt(ngtcp2_bbr2_cc *bbr) { - ngtcp2_log_info(bbr->ccb.log, NGTCP2_LOG_EVENT_RCV, "bbr2 enter ProbeRTT"); - - bbr->state = NGTCP2_BBR2_STATE_PROBE_RTT; - bbr->pacing_gain = 1; - bbr->cwnd_gain = NGTCP2_BBR_PROBE_RTT_CWND_GAIN; -} - -static void bbr_handle_probe_rtt(ngtcp2_bbr2_cc *bbr, ngtcp2_conn_stat *cstat, - ngtcp2_tstamp ts) { - bbr_mark_connection_app_limited(bbr, cstat); - - if (bbr->probe_rtt_done_stamp == UINT64_MAX && - cstat->bytes_in_flight <= bbr_probe_rtt_cwnd(bbr, cstat)) { - bbr->probe_rtt_done_stamp = ts + NGTCP2_BBR_PROBE_RTT_DURATION; - bbr->probe_rtt_round_done = 0; - - bbr_start_round(bbr); - - return; - } - - if (bbr->probe_rtt_done_stamp != UINT64_MAX) { - if (bbr->round_start) { - bbr->probe_rtt_round_done = 1; - } - - if (bbr->probe_rtt_round_done) { - bbr_check_probe_rtt_done(bbr, cstat, ts); - } - } -} - -static void bbr_check_probe_rtt_done(ngtcp2_bbr2_cc *bbr, - ngtcp2_conn_stat *cstat, - ngtcp2_tstamp ts) { - if (bbr->probe_rtt_done_stamp != UINT64_MAX && - ts > bbr->probe_rtt_done_stamp) { - bbr->probe_rtt_min_stamp = ts; - bbr_restore_cwnd(bbr, cstat); - bbr_exit_probe_rtt(bbr, ts); - } -} - -static void bbr_mark_connection_app_limited(ngtcp2_bbr2_cc *bbr, - ngtcp2_conn_stat *cstat) { - uint64_t app_limited = bbr->rst->delivered + cstat->bytes_in_flight; - - if (app_limited) { - bbr->rst->app_limited = app_limited; - } else { - bbr->rst->app_limited = cstat->max_udp_payload_size; - } -} - -static void bbr_exit_probe_rtt(ngtcp2_bbr2_cc *bbr, ngtcp2_tstamp ts) { - bbr_reset_lower_bounds(bbr); - - if (bbr->filled_pipe) { - bbr_start_probe_bw_down(bbr, ts); - bbr_start_probe_bw_cruise(bbr); - } else { - bbr_enter_startup(bbr); - } -} - -static void bbr_handle_restart_from_idle(ngtcp2_bbr2_cc *bbr, - ngtcp2_conn_stat *cstat, - ngtcp2_tstamp ts) { - if (cstat->bytes_in_flight == 0 && bbr->rst->app_limited) { - ngtcp2_log_info(bbr->ccb.log, NGTCP2_LOG_EVENT_RCV, - "bbr2 restart from idle"); - - bbr->idle_restart = 1; - bbr->extra_acked_interval_start = ts; - - if (bbr_is_in_probe_bw_state(bbr)) { - bbr_set_pacing_rate_with_gain(bbr, cstat, 1); - } else if (bbr->state == NGTCP2_BBR2_STATE_PROBE_RTT) { - bbr_check_probe_rtt_done(bbr, cstat, ts); - } - } -} - -static uint64_t bbr_bdp_multiple(ngtcp2_bbr2_cc *bbr, uint64_t bw, - double gain) { - uint64_t bdp; - - if (bbr->min_rtt == UINT64_MAX) { - return bbr->initial_cwnd; - } - - bdp = bw * bbr->min_rtt / NGTCP2_SECONDS; - - return (uint64_t)(gain * (double)bdp); -} - -static uint64_t min_pipe_cwnd(size_t max_udp_payload_size) { - return max_udp_payload_size * 4; -} - -static uint64_t bbr_quantization_budget(ngtcp2_bbr2_cc *bbr, - ngtcp2_conn_stat *cstat, - uint64_t inflight) { - bbr_update_offload_budget(bbr, cstat); - - inflight = ngtcp2_max(inflight, bbr->offload_budget); - inflight = ngtcp2_max(inflight, min_pipe_cwnd(cstat->max_udp_payload_size)); - - if (bbr->state == NGTCP2_BBR2_STATE_PROBE_BW_UP) { - inflight += 2 * cstat->max_udp_payload_size; - } - - return inflight; -} - -static uint64_t bbr_inflight(ngtcp2_bbr2_cc *bbr, ngtcp2_conn_stat *cstat, - uint64_t bw, double gain) { - uint64_t inflight = bbr_bdp_multiple(bbr, bw, gain); - - return bbr_quantization_budget(bbr, cstat, inflight); -} - -static void bbr_update_max_inflight(ngtcp2_bbr2_cc *bbr, - ngtcp2_conn_stat *cstat) { - uint64_t inflight; - - /* Not documented */ - /* bbr_update_aggregation_budget(bbr); */ - - inflight = bbr_bdp_multiple(bbr, bbr->bw, bbr->cwnd_gain) + bbr->extra_acked; - bbr->max_inflight = bbr_quantization_budget(bbr, cstat, inflight); -} - -static void bbr_update_offload_budget(ngtcp2_bbr2_cc *bbr, - ngtcp2_conn_stat *cstat) { - bbr->offload_budget = 3 * cstat->send_quantum; -} - -static void bbr_advance_max_bw_filter(ngtcp2_bbr2_cc *bbr) { - ++bbr->cycle_count; -} - -static void bbr_modulate_cwnd_for_recovery(ngtcp2_bbr2_cc *bbr, - ngtcp2_conn_stat *cstat, - const ngtcp2_cc_ack *ack) { - if (ack->bytes_lost > 0) { - if (cstat->cwnd > ack->bytes_lost) { - cstat->cwnd -= ack->bytes_lost; - cstat->cwnd = ngtcp2_max(cstat->cwnd, 2 * cstat->max_udp_payload_size); - } else { - cstat->cwnd = cstat->max_udp_payload_size; - } - } - - if (bbr->packet_conservation) { - cstat->cwnd = - ngtcp2_max(cstat->cwnd, cstat->bytes_in_flight + ack->bytes_delivered); - } -} - -static void bbr_save_cwnd(ngtcp2_bbr2_cc *bbr, ngtcp2_conn_stat *cstat) { - if (!bbr->in_loss_recovery && bbr->state != NGTCP2_BBR2_STATE_PROBE_RTT) { - bbr->prior_cwnd = cstat->cwnd; - return; - } - - bbr->prior_cwnd = ngtcp2_max(bbr->prior_cwnd, cstat->cwnd); -} - -static void bbr_restore_cwnd(ngtcp2_bbr2_cc *bbr, ngtcp2_conn_stat *cstat) { - cstat->cwnd = ngtcp2_max(cstat->cwnd, bbr->prior_cwnd); -} - -static uint64_t bbr_probe_rtt_cwnd(ngtcp2_bbr2_cc *bbr, - ngtcp2_conn_stat *cstat) { - uint64_t probe_rtt_cwnd = - bbr_bdp_multiple(bbr, bbr->bw, NGTCP2_BBR_PROBE_RTT_CWND_GAIN); - uint64_t mpcwnd = min_pipe_cwnd(cstat->max_udp_payload_size); - - return ngtcp2_max(probe_rtt_cwnd, mpcwnd); -} - -static void bbr_bound_cwnd_for_probe_rtt(ngtcp2_bbr2_cc *bbr, - ngtcp2_conn_stat *cstat) { - uint64_t probe_rtt_cwnd; - - if (bbr->state == NGTCP2_BBR2_STATE_PROBE_RTT) { - probe_rtt_cwnd = bbr_probe_rtt_cwnd(bbr, cstat); - - cstat->cwnd = ngtcp2_min(cstat->cwnd, probe_rtt_cwnd); - } -} - -static void bbr_set_cwnd(ngtcp2_bbr2_cc *bbr, ngtcp2_conn_stat *cstat, - const ngtcp2_cc_ack *ack) { - uint64_t mpcwnd; - - bbr_update_max_inflight(bbr, cstat); - bbr_modulate_cwnd_for_recovery(bbr, cstat, ack); - - if (!bbr->packet_conservation) { - if (bbr->filled_pipe) { - cstat->cwnd += ack->bytes_delivered; - cstat->cwnd = ngtcp2_min(cstat->cwnd, bbr->max_inflight); - } else if (cstat->cwnd < bbr->max_inflight || - bbr->rst->delivered < bbr->initial_cwnd) { - cstat->cwnd += ack->bytes_delivered; - } - - mpcwnd = min_pipe_cwnd(cstat->max_udp_payload_size); - cstat->cwnd = ngtcp2_max(cstat->cwnd, mpcwnd); - } - - bbr_bound_cwnd_for_probe_rtt(bbr, cstat); - bbr_bound_cwnd_for_model(bbr, cstat); -} - -static void bbr_bound_cwnd_for_model(ngtcp2_bbr2_cc *bbr, - ngtcp2_conn_stat *cstat) { - uint64_t cap = UINT64_MAX; - uint64_t mpcwnd = min_pipe_cwnd(cstat->max_udp_payload_size); - - if (bbr_is_in_probe_bw_state(bbr) && - bbr->state != NGTCP2_BBR2_STATE_PROBE_BW_CRUISE) { - cap = bbr->inflight_hi; - } else if (bbr->state == NGTCP2_BBR2_STATE_PROBE_RTT || - bbr->state == NGTCP2_BBR2_STATE_PROBE_BW_CRUISE) { - cap = bbr_inflight_with_headroom(bbr, cstat); - } - - cap = ngtcp2_min(cap, bbr->inflight_lo); - cap = ngtcp2_max(cap, mpcwnd); - - cstat->cwnd = ngtcp2_min(cstat->cwnd, cap); -} - -static void bbr_set_send_quantum(ngtcp2_bbr2_cc *bbr, ngtcp2_conn_stat *cstat) { - size_t send_quantum = - (size_t)(cstat->pacing_rate * (double)(bbr->min_rtt == UINT64_MAX - ? NGTCP2_MILLISECONDS - : bbr->min_rtt)); - (void)bbr; - - cstat->send_quantum = ngtcp2_min(send_quantum, 64 * 1024); - cstat->send_quantum = - ngtcp2_max(cstat->send_quantum, cstat->max_udp_payload_size * 10); -} - -static int in_congestion_recovery(const ngtcp2_conn_stat *cstat, - ngtcp2_tstamp sent_time) { - return cstat->congestion_recovery_start_ts != UINT64_MAX && - sent_time <= cstat->congestion_recovery_start_ts; -} - -static void bbr_handle_recovery(ngtcp2_bbr2_cc *bbr, ngtcp2_conn_stat *cstat, - const ngtcp2_cc_ack *ack) { - if (bbr->in_loss_recovery) { - if (ack->pkt_delivered >= bbr->congestion_recovery_next_round_delivered) { - bbr->packet_conservation = 0; - } - - if (!in_congestion_recovery(cstat, ack->largest_acked_sent_ts)) { - bbr->in_loss_recovery = 0; - bbr->packet_conservation = 0; - bbr_restore_cwnd(bbr, cstat); - } - - return; - } - - if (bbr->congestion_recovery_start_ts != UINT64_MAX) { - bbr->in_loss_recovery = 1; - bbr_save_cwnd(bbr, cstat); - cstat->cwnd = cstat->bytes_in_flight + - ngtcp2_max(ack->bytes_delivered, cstat->max_udp_payload_size); - - cstat->congestion_recovery_start_ts = bbr->congestion_recovery_start_ts; - bbr->congestion_recovery_start_ts = UINT64_MAX; - bbr->packet_conservation = 1; - bbr->congestion_recovery_next_round_delivered = bbr->rst->delivered; - bbr->prior_inflight_lo = bbr->inflight_lo; - bbr->prior_bw_lo = bbr->bw_lo; - } -} - -static void bbr2_cc_init(ngtcp2_bbr2_cc *bbr, ngtcp2_conn_stat *cstat, - ngtcp2_rst *rst, ngtcp2_tstamp initial_ts, - ngtcp2_rand rand, const ngtcp2_rand_ctx *rand_ctx, - ngtcp2_log *log) { - bbr->ccb.log = log; - bbr->rst = rst; - bbr->rand = rand; - bbr->rand_ctx = *rand_ctx; - bbr->initial_cwnd = cstat->cwnd; - - bbr_on_init(bbr, cstat, initial_ts); -} - -static void bbr2_cc_free(ngtcp2_bbr2_cc *bbr) { (void)bbr; } - -static void bbr2_cc_on_pkt_acked(ngtcp2_cc *ccx, ngtcp2_conn_stat *cstat, - const ngtcp2_cc_pkt *pkt, ngtcp2_tstamp ts) { - (void)ccx; - (void)cstat; - (void)pkt; - (void)ts; -} - -static void bbr2_cc_on_pkt_lost(ngtcp2_cc *ccx, ngtcp2_conn_stat *cstat, - const ngtcp2_cc_pkt *pkt, ngtcp2_tstamp ts) { - ngtcp2_bbr2_cc *bbr = ngtcp2_struct_of(ccx->ccb, ngtcp2_bbr2_cc, ccb); - - bbr_update_on_loss(bbr, cstat, pkt, ts); -} - -static void bbr2_cc_congestion_event(ngtcp2_cc *ccx, ngtcp2_conn_stat *cstat, - ngtcp2_tstamp sent_ts, ngtcp2_tstamp ts) { - ngtcp2_bbr2_cc *bbr = ngtcp2_struct_of(ccx->ccb, ngtcp2_bbr2_cc, ccb); - - if (!bbr->filled_pipe || bbr->in_loss_recovery || - bbr->congestion_recovery_start_ts != UINT64_MAX || - in_congestion_recovery(cstat, sent_ts)) { - return; - } - - bbr->congestion_recovery_start_ts = ts; -} - -static void bbr2_cc_on_spurious_congestion(ngtcp2_cc *ccx, - ngtcp2_conn_stat *cstat, - ngtcp2_tstamp ts) { - ngtcp2_bbr2_cc *bbr = ngtcp2_struct_of(ccx->ccb, ngtcp2_bbr2_cc, ccb); - (void)ts; - - bbr->congestion_recovery_start_ts = UINT64_MAX; - cstat->congestion_recovery_start_ts = UINT64_MAX; - - if (bbr->in_loss_recovery) { - bbr->in_loss_recovery = 0; - bbr->packet_conservation = 0; - bbr_restore_cwnd(bbr, cstat); - bbr->full_bw_count = 0; - bbr->loss_in_round = 0; - bbr->inflight_lo = ngtcp2_max(bbr->inflight_lo, bbr->prior_inflight_lo); - bbr->inflight_hi = ngtcp2_max(bbr->inflight_hi, bbr->prior_inflight_hi); - bbr->bw_lo = ngtcp2_max(bbr->bw_lo, bbr->prior_bw_lo); - } -} - -static void bbr2_cc_on_persistent_congestion(ngtcp2_cc *ccx, - ngtcp2_conn_stat *cstat, - ngtcp2_tstamp ts) { - ngtcp2_bbr2_cc *bbr = ngtcp2_struct_of(ccx->ccb, ngtcp2_bbr2_cc, ccb); - (void)ts; - - cstat->congestion_recovery_start_ts = UINT64_MAX; - bbr->congestion_recovery_start_ts = UINT64_MAX; - bbr->in_loss_recovery = 0; - bbr->packet_conservation = 0; - - bbr_save_cwnd(bbr, cstat); - cstat->cwnd = cstat->bytes_in_flight + cstat->max_udp_payload_size; - cstat->cwnd = - ngtcp2_max(cstat->cwnd, min_pipe_cwnd(cstat->max_udp_payload_size)); -} - -static void bbr2_cc_on_ack_recv(ngtcp2_cc *ccx, ngtcp2_conn_stat *cstat, - const ngtcp2_cc_ack *ack, ngtcp2_tstamp ts) { - ngtcp2_bbr2_cc *bbr = ngtcp2_struct_of(ccx->ccb, ngtcp2_bbr2_cc, ccb); - - bbr_update_on_ack(bbr, cstat, ack, ts); -} - -static void bbr2_cc_on_pkt_sent(ngtcp2_cc *ccx, ngtcp2_conn_stat *cstat, - const ngtcp2_cc_pkt *pkt) { - ngtcp2_bbr2_cc *bbr = ngtcp2_struct_of(ccx->ccb, ngtcp2_bbr2_cc, ccb); - - bbr_on_transmit(bbr, cstat, pkt->sent_ts); -} - -static void bbr2_cc_new_rtt_sample(ngtcp2_cc *ccx, ngtcp2_conn_stat *cstat, - ngtcp2_tstamp ts) { - (void)ccx; - (void)cstat; - (void)ts; -} - -static void bbr2_cc_reset(ngtcp2_cc *ccx, ngtcp2_conn_stat *cstat, - ngtcp2_tstamp ts) { - ngtcp2_bbr2_cc *bbr = ngtcp2_struct_of(ccx->ccb, ngtcp2_bbr2_cc, ccb); - - bbr_on_init(bbr, cstat, ts); -} - -static void bbr2_cc_event(ngtcp2_cc *ccx, ngtcp2_conn_stat *cstat, - ngtcp2_cc_event_type event, ngtcp2_tstamp ts) { - (void)ccx; - (void)cstat; - (void)event; - (void)ts; -} - -int ngtcp2_cc_bbr2_cc_init(ngtcp2_cc *cc, ngtcp2_log *log, - ngtcp2_conn_stat *cstat, ngtcp2_rst *rst, - ngtcp2_tstamp initial_ts, ngtcp2_rand rand, - const ngtcp2_rand_ctx *rand_ctx, - const ngtcp2_mem *mem) { - ngtcp2_bbr2_cc *bbr; - - bbr = ngtcp2_mem_calloc(mem, 1, sizeof(ngtcp2_bbr2_cc)); - if (bbr == NULL) { - return NGTCP2_ERR_NOMEM; - } - - bbr2_cc_init(bbr, cstat, rst, initial_ts, rand, rand_ctx, log); - - cc->ccb = &bbr->ccb; - cc->on_pkt_acked = bbr2_cc_on_pkt_acked; - cc->on_pkt_lost = bbr2_cc_on_pkt_lost; - cc->congestion_event = bbr2_cc_congestion_event; - cc->on_spurious_congestion = bbr2_cc_on_spurious_congestion; - cc->on_persistent_congestion = bbr2_cc_on_persistent_congestion; - cc->on_ack_recv = bbr2_cc_on_ack_recv; - cc->on_pkt_sent = bbr2_cc_on_pkt_sent; - cc->new_rtt_sample = bbr2_cc_new_rtt_sample; - cc->reset = bbr2_cc_reset; - cc->event = bbr2_cc_event; - - return 0; -} - -void ngtcp2_cc_bbr2_cc_free(ngtcp2_cc *cc, const ngtcp2_mem *mem) { - ngtcp2_bbr2_cc *bbr = ngtcp2_struct_of(cc->ccb, ngtcp2_bbr2_cc, ccb); - - bbr2_cc_free(bbr); - ngtcp2_mem_free(mem, bbr); -} diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_bbr2.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_bbr2.h deleted file mode 100644 index 50dc05a5f26121..00000000000000 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_bbr2.h +++ /dev/null @@ -1,149 +0,0 @@ -/* - * ngtcp2 - * - * Copyright (c) 2021 ngtcp2 contributors - * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sublicense, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice shall be - * included in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ -#ifndef NGTCP2_BBR2_H -#define NGTCP2_BBR2_H - -#ifdef HAVE_CONFIG_H -# include -#endif /* HAVE_CONFIG_H */ - -#include - -#include "ngtcp2_cc.h" -#include "ngtcp2_window_filter.h" - -typedef struct ngtcp2_rst ngtcp2_rst; - -typedef enum ngtcp2_bbr2_state { - NGTCP2_BBR2_STATE_STARTUP, - NGTCP2_BBR2_STATE_DRAIN, - NGTCP2_BBR2_STATE_PROBE_BW_DOWN, - NGTCP2_BBR2_STATE_PROBE_BW_CRUISE, - NGTCP2_BBR2_STATE_PROBE_BW_REFILL, - NGTCP2_BBR2_STATE_PROBE_BW_UP, - NGTCP2_BBR2_STATE_PROBE_RTT, -} ngtcp2_bbr2_state; - -typedef enum ngtcp2_bbr2_ack_phase { - NGTCP2_BBR2_ACK_PHASE_ACKS_PROBE_STARTING, - NGTCP2_BBR2_ACK_PHASE_ACKS_PROBE_STOPPING, - NGTCP2_BBR2_ACK_PHASE_ACKS_PROBE_FEEDBACK, - NGTCP2_BBR2_ACK_PHASE_ACKS_REFILLING, -} ngtcp2_bbr2_ack_phase; - -/* - * ngtcp2_bbr2_cc is BBR v2 congestion controller, described in - * https://datatracker.ietf.org/doc/html/draft-cardwell-iccrg-bbr-congestion-control-01 - */ -typedef struct ngtcp2_bbr2_cc { - ngtcp2_cc_base ccb; - - uint64_t initial_cwnd; - ngtcp2_rst *rst; - ngtcp2_rand rand; - ngtcp2_rand_ctx rand_ctx; - - /* max_bw_filter for tracking the maximum recent delivery rate - samples for estimating max_bw. */ - ngtcp2_window_filter max_bw_filter; - - ngtcp2_window_filter extra_acked_filter; - - ngtcp2_duration min_rtt; - ngtcp2_tstamp min_rtt_stamp; - ngtcp2_tstamp probe_rtt_done_stamp; - int probe_rtt_round_done; - uint64_t prior_cwnd; - int idle_restart; - ngtcp2_tstamp extra_acked_interval_start; - uint64_t extra_acked_delivered; - - /* Congestion signals */ - int loss_in_round; - uint64_t bw_latest; - uint64_t inflight_latest; - - /* Lower bounds */ - uint64_t bw_lo; - uint64_t inflight_lo; - - /* Round counting */ - uint64_t next_round_delivered; - int round_start; - uint64_t round_count; - - /* Full pipe */ - int filled_pipe; - uint64_t full_bw; - size_t full_bw_count; - - /* Pacing rate */ - double pacing_gain; - - ngtcp2_bbr2_state state; - double cwnd_gain; - - int loss_round_start; - uint64_t loss_round_delivered; - uint64_t rounds_since_bw_probe; - uint64_t max_bw; - uint64_t bw; - uint64_t cycle_count; - uint64_t extra_acked; - uint64_t bytes_lost_in_round; - size_t loss_events_in_round; - uint64_t offload_budget; - uint64_t probe_up_cnt; - ngtcp2_tstamp cycle_stamp; - ngtcp2_bbr2_ack_phase ack_phase; - ngtcp2_duration bw_probe_wait; - int bw_probe_samples; - size_t bw_probe_up_rounds; - uint64_t bw_probe_up_acks; - uint64_t inflight_hi; - uint64_t bw_hi; - int probe_rtt_expired; - ngtcp2_duration probe_rtt_min_delay; - ngtcp2_tstamp probe_rtt_min_stamp; - int in_loss_recovery; - int packet_conservation; - uint64_t max_inflight; - ngtcp2_tstamp congestion_recovery_start_ts; - uint64_t congestion_recovery_next_round_delivered; - - uint64_t prior_inflight_lo; - uint64_t prior_inflight_hi; - uint64_t prior_bw_lo; -} ngtcp2_bbr2_cc; - -int ngtcp2_cc_bbr2_cc_init(ngtcp2_cc *cc, ngtcp2_log *log, - ngtcp2_conn_stat *cstat, ngtcp2_rst *rst, - ngtcp2_tstamp initial_ts, ngtcp2_rand rand, - const ngtcp2_rand_ctx *rand_ctx, - const ngtcp2_mem *mem); - -void ngtcp2_cc_bbr2_cc_free(ngtcp2_cc *cc, const ngtcp2_mem *mem); - -#endif /* NGTCP2_BBR2_H */ diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_cc.c b/deps/ngtcp2/ngtcp2/lib/ngtcp2_cc.c index 1ee7d96b04776e..6369887c28671b 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_cc.c +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_cc.c @@ -25,6 +25,7 @@ #include "ngtcp2_cc.h" #include +#include #if defined(_MSC_VER) # include @@ -34,6 +35,11 @@ #include "ngtcp2_macro.h" #include "ngtcp2_mem.h" #include "ngtcp2_rcvry.h" +#include "ngtcp2_conn_stat.h" + +/* NGTCP2_CC_DELIVERY_RATE_SEC_FILTERLEN is the window length of + delivery rate filter driven by ACK clocking. */ +#define NGTCP2_CC_DELIVERY_RATE_SEC_FILTERLEN 10 uint64_t ngtcp2_cc_compute_initcwnd(size_t max_udp_payload_size) { uint64_t n = 2 * max_udp_payload_size; @@ -56,45 +62,26 @@ ngtcp2_cc_pkt *ngtcp2_cc_pkt_init(ngtcp2_cc_pkt *pkt, int64_t pkt_num, return pkt; } -static void reno_cc_reset(ngtcp2_reno_cc *cc) { - cc->max_delivery_rate_sec = 0; - cc->target_cwnd = 0; - cc->pending_add = 0; -} - -void ngtcp2_reno_cc_init(ngtcp2_reno_cc *cc, ngtcp2_log *log) { - cc->ccb.log = log; - reno_cc_reset(cc); +static void reno_cc_reset(ngtcp2_cc_reno *reno) { + ngtcp2_window_filter_init(&reno->delivery_rate_sec_filter, + NGTCP2_CC_DELIVERY_RATE_SEC_FILTERLEN); + reno->ack_count = 0; + reno->target_cwnd = 0; + reno->pending_add = 0; } -void ngtcp2_reno_cc_free(ngtcp2_reno_cc *cc) { (void)cc; } - -int ngtcp2_cc_reno_cc_init(ngtcp2_cc *cc, ngtcp2_log *log, - const ngtcp2_mem *mem) { - ngtcp2_reno_cc *reno_cc; - - reno_cc = ngtcp2_mem_calloc(mem, 1, sizeof(ngtcp2_reno_cc)); - if (reno_cc == NULL) { - return NGTCP2_ERR_NOMEM; - } - - ngtcp2_reno_cc_init(reno_cc, log); - - cc->ccb = &reno_cc->ccb; - cc->on_pkt_acked = ngtcp2_cc_reno_cc_on_pkt_acked; - cc->congestion_event = ngtcp2_cc_reno_cc_congestion_event; - cc->on_persistent_congestion = ngtcp2_cc_reno_cc_on_persistent_congestion; - cc->on_ack_recv = ngtcp2_cc_reno_cc_on_ack_recv; - cc->reset = ngtcp2_cc_reno_cc_reset; - - return 0; -} +void ngtcp2_cc_reno_init(ngtcp2_cc_reno *reno, ngtcp2_log *log) { + memset(reno, 0, sizeof(*reno)); -void ngtcp2_cc_reno_cc_free(ngtcp2_cc *cc, const ngtcp2_mem *mem) { - ngtcp2_reno_cc *reno_cc = ngtcp2_struct_of(cc->ccb, ngtcp2_reno_cc, ccb); + reno->cc.log = log; + reno->cc.on_pkt_acked = ngtcp2_cc_reno_cc_on_pkt_acked; + reno->cc.congestion_event = ngtcp2_cc_reno_cc_congestion_event; + reno->cc.on_persistent_congestion = + ngtcp2_cc_reno_cc_on_persistent_congestion; + reno->cc.on_ack_recv = ngtcp2_cc_reno_cc_on_ack_recv; + reno->cc.reset = ngtcp2_cc_reno_cc_reset; - ngtcp2_reno_cc_free(reno_cc); - ngtcp2_mem_free(mem, reno_cc); + reno_cc_reset(reno); } static int in_congestion_recovery(const ngtcp2_conn_stat *cstat, @@ -103,10 +90,10 @@ static int in_congestion_recovery(const ngtcp2_conn_stat *cstat, sent_time <= cstat->congestion_recovery_start_ts; } -void ngtcp2_cc_reno_cc_on_pkt_acked(ngtcp2_cc *ccx, ngtcp2_conn_stat *cstat, +void ngtcp2_cc_reno_cc_on_pkt_acked(ngtcp2_cc *cc, ngtcp2_conn_stat *cstat, const ngtcp2_cc_pkt *pkt, ngtcp2_tstamp ts) { - ngtcp2_reno_cc *cc = ngtcp2_struct_of(ccx->ccb, ngtcp2_reno_cc, ccb); + ngtcp2_cc_reno *reno = ngtcp2_struct_of(cc, ngtcp2_cc_reno, cc); uint64_t m; (void)ts; @@ -114,28 +101,28 @@ void ngtcp2_cc_reno_cc_on_pkt_acked(ngtcp2_cc *ccx, ngtcp2_conn_stat *cstat, return; } - if (cc->target_cwnd && cc->target_cwnd < cstat->cwnd) { + if (reno->target_cwnd && reno->target_cwnd < cstat->cwnd) { return; } if (cstat->cwnd < cstat->ssthresh) { cstat->cwnd += pkt->pktlen; - ngtcp2_log_info(cc->ccb.log, NGTCP2_LOG_EVENT_RCV, + ngtcp2_log_info(reno->cc.log, NGTCP2_LOG_EVENT_CCA, "pkn=%" PRId64 " acked, slow start cwnd=%" PRIu64, pkt->pkt_num, cstat->cwnd); return; } - m = cstat->max_udp_payload_size * pkt->pktlen + cc->pending_add; - cc->pending_add = m % cstat->cwnd; + m = cstat->max_tx_udp_payload_size * pkt->pktlen + reno->pending_add; + reno->pending_add = m % cstat->cwnd; cstat->cwnd += m / cstat->cwnd; } -void ngtcp2_cc_reno_cc_congestion_event(ngtcp2_cc *ccx, ngtcp2_conn_stat *cstat, +void ngtcp2_cc_reno_cc_congestion_event(ngtcp2_cc *cc, ngtcp2_conn_stat *cstat, ngtcp2_tstamp sent_ts, ngtcp2_tstamp ts) { - ngtcp2_reno_cc *cc = ngtcp2_struct_of(ccx->ccb, ngtcp2_reno_cc, ccb); + ngtcp2_cc_reno *reno = ngtcp2_struct_of(cc, ngtcp2_cc_reno, cc); uint64_t min_cwnd; if (in_congestion_recovery(cstat, sent_ts)) { @@ -144,123 +131,109 @@ void ngtcp2_cc_reno_cc_congestion_event(ngtcp2_cc *ccx, ngtcp2_conn_stat *cstat, cstat->congestion_recovery_start_ts = ts; cstat->cwnd >>= NGTCP2_LOSS_REDUCTION_FACTOR_BITS; - min_cwnd = 2 * cstat->max_udp_payload_size; + min_cwnd = 2 * cstat->max_tx_udp_payload_size; cstat->cwnd = ngtcp2_max(cstat->cwnd, min_cwnd); cstat->ssthresh = cstat->cwnd; - cc->pending_add = 0; + reno->pending_add = 0; - ngtcp2_log_info(cc->ccb.log, NGTCP2_LOG_EVENT_RCV, + ngtcp2_log_info(reno->cc.log, NGTCP2_LOG_EVENT_CCA, "reduce cwnd because of packet loss cwnd=%" PRIu64, cstat->cwnd); } -void ngtcp2_cc_reno_cc_on_persistent_congestion(ngtcp2_cc *ccx, +void ngtcp2_cc_reno_cc_on_persistent_congestion(ngtcp2_cc *cc, ngtcp2_conn_stat *cstat, ngtcp2_tstamp ts) { - (void)ccx; + (void)cc; (void)ts; - cstat->cwnd = 2 * cstat->max_udp_payload_size; + cstat->cwnd = 2 * cstat->max_tx_udp_payload_size; cstat->congestion_recovery_start_ts = UINT64_MAX; } -void ngtcp2_cc_reno_cc_on_ack_recv(ngtcp2_cc *ccx, ngtcp2_conn_stat *cstat, +void ngtcp2_cc_reno_cc_on_ack_recv(ngtcp2_cc *cc, ngtcp2_conn_stat *cstat, const ngtcp2_cc_ack *ack, ngtcp2_tstamp ts) { - ngtcp2_reno_cc *cc = ngtcp2_struct_of(ccx->ccb, ngtcp2_reno_cc, ccb); + ngtcp2_cc_reno *reno = ngtcp2_struct_of(cc, ngtcp2_cc_reno, cc); uint64_t target_cwnd, initcwnd; + uint64_t max_delivery_rate_sec; (void)ack; (void)ts; - /* TODO Use sliding window for min rtt measurement */ - /* TODO Use sliding window */ - cc->max_delivery_rate_sec = - ngtcp2_max(cc->max_delivery_rate_sec, cstat->delivery_rate_sec); + ++reno->ack_count; - if (cstat->min_rtt != UINT64_MAX && cc->max_delivery_rate_sec) { - target_cwnd = cc->max_delivery_rate_sec * cstat->min_rtt / NGTCP2_SECONDS; - initcwnd = ngtcp2_cc_compute_initcwnd(cstat->max_udp_payload_size); - cc->target_cwnd = ngtcp2_max(initcwnd, target_cwnd) * 289 / 100; + ngtcp2_window_filter_update(&reno->delivery_rate_sec_filter, + cstat->delivery_rate_sec, reno->ack_count); - ngtcp2_log_info(cc->ccb.log, NGTCP2_LOG_EVENT_RCV, + max_delivery_rate_sec = + ngtcp2_window_filter_get_best(&reno->delivery_rate_sec_filter); + + if (cstat->min_rtt != UINT64_MAX && max_delivery_rate_sec) { + target_cwnd = max_delivery_rate_sec * cstat->smoothed_rtt / NGTCP2_SECONDS; + initcwnd = ngtcp2_cc_compute_initcwnd(cstat->max_tx_udp_payload_size); + reno->target_cwnd = ngtcp2_max(initcwnd, target_cwnd) * 289 / 100; + + ngtcp2_log_info(reno->cc.log, NGTCP2_LOG_EVENT_CCA, "target_cwnd=%" PRIu64 " max_delivery_rate_sec=%" PRIu64 - " min_rtt=%" PRIu64, - cc->target_cwnd, cc->max_delivery_rate_sec, cstat->min_rtt); + " smoothed_rtt=%" PRIu64, + reno->target_cwnd, max_delivery_rate_sec, + cstat->smoothed_rtt); } } -void ngtcp2_cc_reno_cc_reset(ngtcp2_cc *ccx, ngtcp2_conn_stat *cstat, +void ngtcp2_cc_reno_cc_reset(ngtcp2_cc *cc, ngtcp2_conn_stat *cstat, ngtcp2_tstamp ts) { - ngtcp2_reno_cc *cc = ngtcp2_struct_of(ccx->ccb, ngtcp2_reno_cc, ccb); + ngtcp2_cc_reno *reno = ngtcp2_struct_of(cc, ngtcp2_cc_reno, cc); (void)cstat; (void)ts; - reno_cc_reset(cc); -} - -static void cubic_cc_reset(ngtcp2_cubic_cc *cc) { - cc->max_delivery_rate_sec = 0; - cc->target_cwnd = 0; - cc->w_last_max = 0; - cc->w_tcp = 0; - cc->origin_point = 0; - cc->epoch_start = UINT64_MAX; - cc->k = 0; - - cc->prior.cwnd = 0; - cc->prior.ssthresh = 0; - cc->prior.w_last_max = 0; - cc->prior.w_tcp = 0; - cc->prior.origin_point = 0; - cc->prior.epoch_start = UINT64_MAX; - cc->prior.k = 0; - - cc->rtt_sample_count = 0; - cc->current_round_min_rtt = UINT64_MAX; - cc->last_round_min_rtt = UINT64_MAX; - cc->window_end = -1; -} - -void ngtcp2_cubic_cc_init(ngtcp2_cubic_cc *cc, ngtcp2_log *log) { - cc->ccb.log = log; - cubic_cc_reset(cc); + reno_cc_reset(reno); } -void ngtcp2_cubic_cc_free(ngtcp2_cubic_cc *cc) { (void)cc; } - -int ngtcp2_cc_cubic_cc_init(ngtcp2_cc *cc, ngtcp2_log *log, - const ngtcp2_mem *mem) { - ngtcp2_cubic_cc *cubic_cc; - - cubic_cc = ngtcp2_mem_calloc(mem, 1, sizeof(ngtcp2_cubic_cc)); - if (cubic_cc == NULL) { - return NGTCP2_ERR_NOMEM; - } - - ngtcp2_cubic_cc_init(cubic_cc, log); - - cc->ccb = &cubic_cc->ccb; - cc->on_pkt_acked = ngtcp2_cc_cubic_cc_on_pkt_acked; - cc->congestion_event = ngtcp2_cc_cubic_cc_congestion_event; - cc->on_spurious_congestion = ngtcp2_cc_cubic_cc_on_spurious_congestion; - cc->on_persistent_congestion = ngtcp2_cc_cubic_cc_on_persistent_congestion; - cc->on_ack_recv = ngtcp2_cc_cubic_cc_on_ack_recv; - cc->on_pkt_sent = ngtcp2_cc_cubic_cc_on_pkt_sent; - cc->new_rtt_sample = ngtcp2_cc_cubic_cc_new_rtt_sample; - cc->reset = ngtcp2_cc_cubic_cc_reset; - cc->event = ngtcp2_cc_cubic_cc_event; - - return 0; +static void cubic_cc_reset(ngtcp2_cc_cubic *cubic) { + ngtcp2_window_filter_init(&cubic->delivery_rate_sec_filter, + NGTCP2_CC_DELIVERY_RATE_SEC_FILTERLEN); + cubic->ack_count = 0; + cubic->target_cwnd = 0; + cubic->w_last_max = 0; + cubic->w_tcp = 0; + cubic->origin_point = 0; + cubic->epoch_start = UINT64_MAX; + cubic->k = 0; + + cubic->prior.cwnd = 0; + cubic->prior.ssthresh = 0; + cubic->prior.w_last_max = 0; + cubic->prior.w_tcp = 0; + cubic->prior.origin_point = 0; + cubic->prior.epoch_start = UINT64_MAX; + cubic->prior.k = 0; + + cubic->rtt_sample_count = 0; + cubic->current_round_min_rtt = UINT64_MAX; + cubic->last_round_min_rtt = UINT64_MAX; + cubic->window_end = -1; } -void ngtcp2_cc_cubic_cc_free(ngtcp2_cc *cc, const ngtcp2_mem *mem) { - ngtcp2_cubic_cc *cubic_cc = ngtcp2_struct_of(cc->ccb, ngtcp2_cubic_cc, ccb); - - ngtcp2_cubic_cc_free(cubic_cc); - ngtcp2_mem_free(mem, cubic_cc); +void ngtcp2_cc_cubic_init(ngtcp2_cc_cubic *cubic, ngtcp2_log *log) { + memset(cubic, 0, sizeof(*cubic)); + + cubic->cc.log = log; + cubic->cc.on_pkt_acked = ngtcp2_cc_cubic_cc_on_pkt_acked; + cubic->cc.congestion_event = ngtcp2_cc_cubic_cc_congestion_event; + cubic->cc.on_spurious_congestion = ngtcp2_cc_cubic_cc_on_spurious_congestion; + cubic->cc.on_persistent_congestion = + ngtcp2_cc_cubic_cc_on_persistent_congestion; + cubic->cc.on_ack_recv = ngtcp2_cc_cubic_cc_on_ack_recv; + cubic->cc.on_pkt_sent = ngtcp2_cc_cubic_cc_on_pkt_sent; + cubic->cc.new_rtt_sample = ngtcp2_cc_cubic_cc_new_rtt_sample; + cubic->cc.reset = ngtcp2_cc_cubic_cc_reset; + cubic->cc.event = ngtcp2_cc_cubic_cc_event; + + cubic_cc_reset(cubic); } -static uint64_t ngtcp2_cbrt(uint64_t n) { +uint64_t ngtcp2_cbrt(uint64_t n) { int d; uint64_t a; @@ -269,26 +242,23 @@ static uint64_t ngtcp2_cbrt(uint64_t n) { } #if defined(_MSC_VER) -# if defined(_M_X64) - d = (int)__lzcnt64(n); -# elif defined(_M_ARM64) { unsigned long index; - d = sizeof(uint64_t) * CHAR_BIT; +# if defined(_WIN64) if (_BitScanReverse64(&index, n)) { - d = d - 1 - index; + d = 61 - index; } +# else /* !defined(_WIN64) */ + if (_BitScanReverse(&index, (unsigned int)(n >> 32))) { + d = 31 - index; + } else { + d = 32 + 31 - _BitScanReverse(&index, (unsigned int)n); + } +# endif /* !defined(_WIN64) */ } -# else - if ((n >> 32) != 0) { - d = __lzcnt((unsigned int)(n >> 32)); - } else { - d = 32 + __lzcnt((unsigned int)n); - } -# endif -#else +#else /* !defined(_MSC_VER) */ d = __builtin_clzll(n); -#endif +#endif /* !defined(_MSC_VER) */ a = 1ULL << ((64 - d) / 3 + 1); for (; a * a * a > n;) { @@ -303,42 +273,41 @@ static uint64_t ngtcp2_cbrt(uint64_t n) { #define NGTCP2_HS_MIN_ETA (4 * NGTCP2_MILLISECONDS) #define NGTCP2_HS_MAX_ETA (16 * NGTCP2_MILLISECONDS) -void ngtcp2_cc_cubic_cc_on_pkt_acked(ngtcp2_cc *ccx, ngtcp2_conn_stat *cstat, +void ngtcp2_cc_cubic_cc_on_pkt_acked(ngtcp2_cc *cc, ngtcp2_conn_stat *cstat, const ngtcp2_cc_pkt *pkt, ngtcp2_tstamp ts) { - ngtcp2_cubic_cc *cc = ngtcp2_struct_of(ccx->ccb, ngtcp2_cubic_cc, ccb); - ngtcp2_duration t, min_rtt, eta; - uint64_t target; + ngtcp2_cc_cubic *cubic = ngtcp2_struct_of(cc, ngtcp2_cc_cubic, cc); + ngtcp2_duration t, eta; + uint64_t target, cwnd_thres; uint64_t tx, kx, time_delta, delta; uint64_t add, tcp_add; uint64_t m; - if (pkt->pktns_id == NGTCP2_PKTNS_ID_APPLICATION && cc->window_end != -1 && - cc->window_end <= pkt->pkt_num) { - cc->window_end = -1; + if (pkt->pktns_id == NGTCP2_PKTNS_ID_APPLICATION && cubic->window_end != -1 && + cubic->window_end <= pkt->pkt_num) { + cubic->window_end = -1; } if (in_congestion_recovery(cstat, pkt->sent_ts)) { return; } - if (cc->target_cwnd && cc->target_cwnd < cstat->cwnd) { - return; - } - if (cstat->cwnd < cstat->ssthresh) { /* slow-start */ - cstat->cwnd += pkt->pktlen; + if (cubic->target_cwnd == 0 || cubic->target_cwnd > cstat->cwnd) { + cstat->cwnd += pkt->pktlen; + } - ngtcp2_log_info(cc->ccb.log, NGTCP2_LOG_EVENT_RCV, + ngtcp2_log_info(cubic->cc.log, NGTCP2_LOG_EVENT_CCA, "pkn=%" PRId64 " acked, slow start cwnd=%" PRIu64, pkt->pkt_num, cstat->cwnd); - if (cc->last_round_min_rtt != UINT64_MAX && - cc->current_round_min_rtt != UINT64_MAX && - cstat->cwnd >= NGTCP2_HS_MIN_SSTHRESH * cstat->max_udp_payload_size && - cc->rtt_sample_count >= NGTCP2_HS_N_RTT_SAMPLE) { - eta = cc->last_round_min_rtt / 8; + if (cubic->last_round_min_rtt != UINT64_MAX && + cubic->current_round_min_rtt != UINT64_MAX && + cstat->cwnd >= + NGTCP2_HS_MIN_SSTHRESH * cstat->max_tx_udp_payload_size && + cubic->rtt_sample_count >= NGTCP2_HS_N_RTT_SAMPLE) { + eta = cubic->last_round_min_rtt / 8; if (eta < NGTCP2_HS_MIN_ETA) { eta = NGTCP2_HS_MIN_ETA; @@ -346,11 +315,11 @@ void ngtcp2_cc_cubic_cc_on_pkt_acked(ngtcp2_cc *ccx, ngtcp2_conn_stat *cstat, eta = NGTCP2_HS_MAX_ETA; } - if (cc->current_round_min_rtt >= cc->last_round_min_rtt + eta) { - ngtcp2_log_info(cc->ccb.log, NGTCP2_LOG_EVENT_RCV, + if (cubic->current_round_min_rtt >= cubic->last_round_min_rtt + eta) { + ngtcp2_log_info(cubic->cc.log, NGTCP2_LOG_EVENT_CCA, "HyStart++ exit slow start"); - cc->w_last_max = cstat->cwnd; + cubic->w_last_max = cstat->cwnd; cstat->ssthresh = cstat->cwnd; } } @@ -360,34 +329,32 @@ void ngtcp2_cc_cubic_cc_on_pkt_acked(ngtcp2_cc *ccx, ngtcp2_conn_stat *cstat, /* congestion avoidance */ - if (cc->epoch_start == UINT64_MAX) { - cc->epoch_start = ts; - if (cstat->cwnd < cc->w_last_max) { - cc->k = ngtcp2_cbrt((cc->w_last_max - cstat->cwnd) * 10 / 4 / - cstat->max_udp_payload_size); - cc->origin_point = cc->w_last_max; + if (cubic->epoch_start == UINT64_MAX) { + cubic->epoch_start = ts; + if (cstat->cwnd < cubic->w_last_max) { + cubic->k = ngtcp2_cbrt((cubic->w_last_max - cstat->cwnd) * 10 / 4 / + cstat->max_tx_udp_payload_size); + cubic->origin_point = cubic->w_last_max; } else { - cc->k = 0; - cc->origin_point = cstat->cwnd; + cubic->k = 0; + cubic->origin_point = cstat->cwnd; } - cc->w_tcp = cstat->cwnd; + cubic->w_tcp = cstat->cwnd; - ngtcp2_log_info(cc->ccb.log, NGTCP2_LOG_EVENT_RCV, + ngtcp2_log_info(cubic->cc.log, NGTCP2_LOG_EVENT_CCA, "cubic-ca epoch_start=%" PRIu64 " k=%" PRIu64 " origin_point=%" PRIu64, - cc->epoch_start, cc->k, cc->origin_point); + cubic->epoch_start, cubic->k, cubic->origin_point); - cc->pending_add = 0; - cc->pending_w_add = 0; + cubic->pending_add = 0; + cubic->pending_w_add = 0; } - min_rtt = cstat->min_rtt == UINT64_MAX ? cstat->initial_rtt : cstat->min_rtt; + t = ts - cubic->epoch_start; - t = ts + min_rtt - cc->epoch_start; - - tx = (t << 4) / NGTCP2_SECONDS; - kx = (cc->k << 4); + tx = (t << 10) / NGTCP2_SECONDS; + kx = (cubic->k << 10); if (tx > kx) { time_delta = tx - kx; @@ -395,210 +362,229 @@ void ngtcp2_cc_cubic_cc_on_pkt_acked(ngtcp2_cc *ccx, ngtcp2_conn_stat *cstat, time_delta = kx - tx; } - delta = cstat->max_udp_payload_size * - ((((time_delta * time_delta) >> 4) * time_delta) >> 8) * 4 / 10; + delta = cstat->max_tx_udp_payload_size * + ((((time_delta * time_delta) >> 10) * time_delta) >> 10) * 4 / 10; + delta >>= 10; if (tx > kx) { - target = cc->origin_point + delta; + target = cubic->origin_point + delta; + } else { + target = cubic->origin_point - delta; + } + + cwnd_thres = + (target * (((t + cstat->smoothed_rtt) << 10) / NGTCP2_SECONDS)) >> 10; + if (cwnd_thres < cstat->cwnd) { + target = cstat->cwnd; + } else if (2 * cwnd_thres > 3 * cstat->cwnd) { + target = cstat->cwnd * 3 / 2; } else { - target = cc->origin_point - delta; + target = cwnd_thres; } if (target > cstat->cwnd) { - m = cc->pending_add + cstat->max_udp_payload_size * (target - cstat->cwnd); + m = cubic->pending_add + + cstat->max_tx_udp_payload_size * (target - cstat->cwnd); add = m / cstat->cwnd; - cc->pending_add = m % cstat->cwnd; + cubic->pending_add = m % cstat->cwnd; } else { - m = cc->pending_add + cstat->max_udp_payload_size; + m = cubic->pending_add + cstat->max_tx_udp_payload_size; add = m / (100 * cstat->cwnd); - cc->pending_add = m % (100 * cstat->cwnd); + cubic->pending_add = m % (100 * cstat->cwnd); } - m = cc->pending_w_add + cstat->max_udp_payload_size * pkt->pktlen; + m = cubic->pending_w_add + cstat->max_tx_udp_payload_size * pkt->pktlen; - cc->w_tcp += m / cstat->cwnd; - cc->pending_w_add = m % cstat->cwnd; + cubic->w_tcp += m / cstat->cwnd; + cubic->pending_w_add = m % cstat->cwnd; - if (cc->w_tcp > cstat->cwnd) { - tcp_add = - cstat->max_udp_payload_size * (cc->w_tcp - cstat->cwnd) / cstat->cwnd; + if (cubic->w_tcp > cstat->cwnd) { + tcp_add = cstat->max_tx_udp_payload_size * (cubic->w_tcp - cstat->cwnd) / + cstat->cwnd; if (tcp_add > add) { add = tcp_add; } } - cstat->cwnd += add; + if (cubic->target_cwnd == 0 || cubic->target_cwnd > cstat->cwnd) { + cstat->cwnd += add; + } - ngtcp2_log_info(cc->ccb.log, NGTCP2_LOG_EVENT_RCV, + ngtcp2_log_info(cubic->cc.log, NGTCP2_LOG_EVENT_CCA, "pkn=%" PRId64 " acked, cubic-ca cwnd=%" PRIu64 " t=%" PRIu64 " k=%" PRIi64 " time_delta=%" PRIu64 " delta=%" PRIu64 " target=%" PRIu64 " w_tcp=%" PRIu64, - pkt->pkt_num, cstat->cwnd, t, cc->k, time_delta >> 4, delta, - target, cc->w_tcp); + pkt->pkt_num, cstat->cwnd, t, cubic->k, time_delta >> 4, + delta, target, cubic->w_tcp); } -void ngtcp2_cc_cubic_cc_congestion_event(ngtcp2_cc *ccx, - ngtcp2_conn_stat *cstat, +void ngtcp2_cc_cubic_cc_congestion_event(ngtcp2_cc *cc, ngtcp2_conn_stat *cstat, ngtcp2_tstamp sent_ts, ngtcp2_tstamp ts) { - ngtcp2_cubic_cc *cc = ngtcp2_struct_of(ccx->ccb, ngtcp2_cubic_cc, ccb); + ngtcp2_cc_cubic *cubic = ngtcp2_struct_of(cc, ngtcp2_cc_cubic, cc); uint64_t min_cwnd; if (in_congestion_recovery(cstat, sent_ts)) { return; } - if (cc->prior.cwnd < cstat->cwnd) { - cc->prior.cwnd = cstat->cwnd; - cc->prior.ssthresh = cstat->ssthresh; - cc->prior.w_last_max = cc->w_last_max; - cc->prior.w_tcp = cc->w_tcp; - cc->prior.origin_point = cc->origin_point; - cc->prior.epoch_start = cc->epoch_start; - cc->prior.k = cc->k; + if (cubic->prior.cwnd < cstat->cwnd) { + cubic->prior.cwnd = cstat->cwnd; + cubic->prior.ssthresh = cstat->ssthresh; + cubic->prior.w_last_max = cubic->w_last_max; + cubic->prior.w_tcp = cubic->w_tcp; + cubic->prior.origin_point = cubic->origin_point; + cubic->prior.epoch_start = cubic->epoch_start; + cubic->prior.k = cubic->k; } cstat->congestion_recovery_start_ts = ts; - cc->epoch_start = UINT64_MAX; - if (cstat->cwnd < cc->w_last_max) { - cc->w_last_max = cstat->cwnd * 17 / 10 / 2; + cubic->epoch_start = UINT64_MAX; + if (cstat->cwnd < cubic->w_last_max) { + cubic->w_last_max = cstat->cwnd * 17 / 10 / 2; } else { - cc->w_last_max = cstat->cwnd; + cubic->w_last_max = cstat->cwnd; } - min_cwnd = 2 * cstat->max_udp_payload_size; + min_cwnd = 2 * cstat->max_tx_udp_payload_size; cstat->ssthresh = cstat->cwnd * 7 / 10; cstat->ssthresh = ngtcp2_max(cstat->ssthresh, min_cwnd); cstat->cwnd = cstat->ssthresh; - ngtcp2_log_info(cc->ccb.log, NGTCP2_LOG_EVENT_RCV, + ngtcp2_log_info(cubic->cc.log, NGTCP2_LOG_EVENT_CCA, "reduce cwnd because of packet loss cwnd=%" PRIu64, cstat->cwnd); } -void ngtcp2_cc_cubic_cc_on_spurious_congestion(ngtcp2_cc *ccx, +void ngtcp2_cc_cubic_cc_on_spurious_congestion(ngtcp2_cc *cc, ngtcp2_conn_stat *cstat, ngtcp2_tstamp ts) { - ngtcp2_cubic_cc *cc = ngtcp2_struct_of(ccx->ccb, ngtcp2_cubic_cc, ccb); + ngtcp2_cc_cubic *cubic = ngtcp2_struct_of(cc, ngtcp2_cc_cubic, cc); (void)ts; - if (cstat->cwnd >= cc->prior.cwnd) { + if (cstat->cwnd >= cubic->prior.cwnd) { return; } cstat->congestion_recovery_start_ts = UINT64_MAX; - cstat->cwnd = cc->prior.cwnd; - cstat->ssthresh = cc->prior.ssthresh; - cc->w_last_max = cc->prior.w_last_max; - cc->w_tcp = cc->prior.w_tcp; - cc->origin_point = cc->prior.origin_point; - cc->epoch_start = cc->prior.epoch_start; - cc->k = cc->prior.k; - - cc->prior.cwnd = 0; - cc->prior.ssthresh = 0; - cc->prior.w_last_max = 0; - cc->prior.w_tcp = 0; - cc->prior.origin_point = 0; - cc->prior.epoch_start = UINT64_MAX; - cc->prior.k = 0; - - ngtcp2_log_info(cc->ccb.log, NGTCP2_LOG_EVENT_RCV, + cstat->cwnd = cubic->prior.cwnd; + cstat->ssthresh = cubic->prior.ssthresh; + cubic->w_last_max = cubic->prior.w_last_max; + cubic->w_tcp = cubic->prior.w_tcp; + cubic->origin_point = cubic->prior.origin_point; + cubic->epoch_start = cubic->prior.epoch_start; + cubic->k = cubic->prior.k; + + cubic->prior.cwnd = 0; + cubic->prior.ssthresh = 0; + cubic->prior.w_last_max = 0; + cubic->prior.w_tcp = 0; + cubic->prior.origin_point = 0; + cubic->prior.epoch_start = UINT64_MAX; + cubic->prior.k = 0; + + ngtcp2_log_info(cubic->cc.log, NGTCP2_LOG_EVENT_CCA, "spurious congestion is detected and congestion state is " "restored cwnd=%" PRIu64, cstat->cwnd); } -void ngtcp2_cc_cubic_cc_on_persistent_congestion(ngtcp2_cc *ccx, +void ngtcp2_cc_cubic_cc_on_persistent_congestion(ngtcp2_cc *cc, ngtcp2_conn_stat *cstat, ngtcp2_tstamp ts) { - (void)ccx; + (void)cc; (void)ts; - cstat->cwnd = 2 * cstat->max_udp_payload_size; + cstat->cwnd = 2 * cstat->max_tx_udp_payload_size; cstat->congestion_recovery_start_ts = UINT64_MAX; } -void ngtcp2_cc_cubic_cc_on_ack_recv(ngtcp2_cc *ccx, ngtcp2_conn_stat *cstat, +void ngtcp2_cc_cubic_cc_on_ack_recv(ngtcp2_cc *cc, ngtcp2_conn_stat *cstat, const ngtcp2_cc_ack *ack, ngtcp2_tstamp ts) { - ngtcp2_cubic_cc *cc = ngtcp2_struct_of(ccx->ccb, ngtcp2_cubic_cc, ccb); + ngtcp2_cc_cubic *cubic = ngtcp2_struct_of(cc, ngtcp2_cc_cubic, cc); uint64_t target_cwnd, initcwnd; + uint64_t max_delivery_rate_sec; (void)ack; (void)ts; - /* TODO Use sliding window for min rtt measurement */ - /* TODO Use sliding window */ - cc->max_delivery_rate_sec = - ngtcp2_max(cc->max_delivery_rate_sec, cstat->delivery_rate_sec); + ++cubic->ack_count; + + ngtcp2_window_filter_update(&cubic->delivery_rate_sec_filter, + cstat->delivery_rate_sec, cubic->ack_count); + + max_delivery_rate_sec = + ngtcp2_window_filter_get_best(&cubic->delivery_rate_sec_filter); - if (cstat->min_rtt != UINT64_MAX && cc->max_delivery_rate_sec) { - target_cwnd = cc->max_delivery_rate_sec * cstat->min_rtt / NGTCP2_SECONDS; - initcwnd = ngtcp2_cc_compute_initcwnd(cstat->max_udp_payload_size); - cc->target_cwnd = ngtcp2_max(initcwnd, target_cwnd) * 289 / 100; + if (cstat->min_rtt != UINT64_MAX && max_delivery_rate_sec) { + target_cwnd = max_delivery_rate_sec * cstat->smoothed_rtt / NGTCP2_SECONDS; + initcwnd = ngtcp2_cc_compute_initcwnd(cstat->max_tx_udp_payload_size); + cubic->target_cwnd = ngtcp2_max(initcwnd, target_cwnd) * 289 / 100; - ngtcp2_log_info(cc->ccb.log, NGTCP2_LOG_EVENT_RCV, + ngtcp2_log_info(cubic->cc.log, NGTCP2_LOG_EVENT_CCA, "target_cwnd=%" PRIu64 " max_delivery_rate_sec=%" PRIu64 - " min_rtt=%" PRIu64, - cc->target_cwnd, cc->max_delivery_rate_sec, cstat->min_rtt); + " smoothed_rtt=%" PRIu64, + cubic->target_cwnd, max_delivery_rate_sec, + cstat->smoothed_rtt); } } -void ngtcp2_cc_cubic_cc_on_pkt_sent(ngtcp2_cc *ccx, ngtcp2_conn_stat *cstat, +void ngtcp2_cc_cubic_cc_on_pkt_sent(ngtcp2_cc *cc, ngtcp2_conn_stat *cstat, const ngtcp2_cc_pkt *pkt) { - ngtcp2_cubic_cc *cc = ngtcp2_struct_of(ccx->ccb, ngtcp2_cubic_cc, ccb); + ngtcp2_cc_cubic *cubic = ngtcp2_struct_of(cc, ngtcp2_cc_cubic, cc); (void)cstat; - if (pkt->pktns_id != NGTCP2_PKTNS_ID_APPLICATION || cc->window_end != -1) { + if (pkt->pktns_id != NGTCP2_PKTNS_ID_APPLICATION || cubic->window_end != -1) { return; } - cc->window_end = pkt->pkt_num; - cc->last_round_min_rtt = cc->current_round_min_rtt; - cc->current_round_min_rtt = UINT64_MAX; - cc->rtt_sample_count = 0; + cubic->window_end = pkt->pkt_num; + cubic->last_round_min_rtt = cubic->current_round_min_rtt; + cubic->current_round_min_rtt = UINT64_MAX; + cubic->rtt_sample_count = 0; } -void ngtcp2_cc_cubic_cc_new_rtt_sample(ngtcp2_cc *ccx, ngtcp2_conn_stat *cstat, +void ngtcp2_cc_cubic_cc_new_rtt_sample(ngtcp2_cc *cc, ngtcp2_conn_stat *cstat, ngtcp2_tstamp ts) { - ngtcp2_cubic_cc *cc = ngtcp2_struct_of(ccx->ccb, ngtcp2_cubic_cc, ccb); + ngtcp2_cc_cubic *cubic = ngtcp2_struct_of(cc, ngtcp2_cc_cubic, cc); (void)ts; - if (cc->window_end == -1) { + if (cubic->window_end == -1) { return; } - cc->current_round_min_rtt = - ngtcp2_min(cc->current_round_min_rtt, cstat->latest_rtt); - ++cc->rtt_sample_count; + cubic->current_round_min_rtt = + ngtcp2_min(cubic->current_round_min_rtt, cstat->latest_rtt); + ++cubic->rtt_sample_count; } -void ngtcp2_cc_cubic_cc_reset(ngtcp2_cc *ccx, ngtcp2_conn_stat *cstat, +void ngtcp2_cc_cubic_cc_reset(ngtcp2_cc *cc, ngtcp2_conn_stat *cstat, ngtcp2_tstamp ts) { - ngtcp2_cubic_cc *cc = ngtcp2_struct_of(ccx->ccb, ngtcp2_cubic_cc, ccb); + ngtcp2_cc_cubic *cubic = ngtcp2_struct_of(cc, ngtcp2_cc_cubic, cc); (void)cstat; (void)ts; - cubic_cc_reset(cc); + cubic_cc_reset(cubic); } -void ngtcp2_cc_cubic_cc_event(ngtcp2_cc *ccx, ngtcp2_conn_stat *cstat, +void ngtcp2_cc_cubic_cc_event(ngtcp2_cc *cc, ngtcp2_conn_stat *cstat, ngtcp2_cc_event_type event, ngtcp2_tstamp ts) { - ngtcp2_cubic_cc *cc = ngtcp2_struct_of(ccx->ccb, ngtcp2_cubic_cc, ccb); + ngtcp2_cc_cubic *cubic = ngtcp2_struct_of(cc, ngtcp2_cc_cubic, cc); ngtcp2_tstamp last_ts; - if (event != NGTCP2_CC_EVENT_TYPE_TX_START || cc->epoch_start == UINT64_MAX) { + if (event != NGTCP2_CC_EVENT_TYPE_TX_START || + cubic->epoch_start == UINT64_MAX) { return; } last_ts = cstat->last_tx_pkt_ts[NGTCP2_PKTNS_ID_APPLICATION]; - if (last_ts == UINT64_MAX || last_ts <= cc->epoch_start) { + if (last_ts == UINT64_MAX || last_ts <= cubic->epoch_start) { return; } assert(ts >= last_ts); - cc->epoch_start += ts - last_ts; + cubic->epoch_start += ts - last_ts; } diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_cc.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_cc.h index 6d9e0c2459ece4..524bcdb7e4bf86 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_cc.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_cc.h @@ -31,24 +31,14 @@ #include +#include "ngtcp2_pktns_id.h" +#include "ngtcp2_window_filter.h" + #define NGTCP2_LOSS_REDUCTION_FACTOR_BITS 1 #define NGTCP2_PERSISTENT_CONGESTION_THRESHOLD 3 typedef struct ngtcp2_log ngtcp2_log; - -/** - * @struct - * - * :type:`ngtcp2_cc_base` is the base structure of custom congestion - * control algorithm. It must be the first field of custom congestion - * controller. - */ -typedef struct ngtcp2_cc_base { - /** - * :member:`log` is ngtcp2 library internal logger. - */ - ngtcp2_log *log; -} ngtcp2_cc_base; +typedef struct ngtcp2_conn_stat ngtcp2_conn_stat; /** * @struct @@ -117,10 +107,10 @@ typedef struct ngtcp2_cc_ack { */ uint64_t pkt_delivered; /** - * :member:`largest_acked_sent_ts` is the time when the largest - * acknowledged packet was sent. + * :member:`largest_pkt_sent_ts` is the time when the largest + * acknowledged packet was sent. It is UINT64_MAX if it is unknown. */ - ngtcp2_tstamp largest_acked_sent_ts; + ngtcp2_tstamp largest_pkt_sent_ts; /** * :member:`rtt` is the RTT sample. It is UINT64_MAX if no RTT * sample is available. @@ -242,15 +232,14 @@ typedef void (*ngtcp2_cc_event)(ngtcp2_cc *cc, ngtcp2_conn_stat *cstat, /** * @struct * - * :type:`ngtcp2_cc` is congestion control algorithm interface to - * allow custom implementation. + * :type:`ngtcp2_cc` is congestion control algorithm interface shared + * by implementations. All callback functions are optional. */ typedef struct ngtcp2_cc { /** - * :member:`ccb` is a pointer to :type:`ngtcp2_cc_base` which - * usually contains a state. + * :member:`log` is ngtcp2 library internal logger. */ - ngtcp2_cc_base *ccb; + ngtcp2_log *log; /** * :member:`on_pkt_acked` is a callback function which is called * when a packet is acknowledged. @@ -313,22 +302,16 @@ ngtcp2_cc_pkt *ngtcp2_cc_pkt_init(ngtcp2_cc_pkt *pkt, int64_t pkt_num, ngtcp2_tstamp sent_ts, uint64_t lost, uint64_t tx_in_flight, int is_app_limited); -/* ngtcp2_reno_cc is the RENO congestion controller. */ -typedef struct ngtcp2_reno_cc { - ngtcp2_cc_base ccb; - uint64_t max_delivery_rate_sec; +/* ngtcp2_cc_reno is the RENO congestion controller. */ +typedef struct ngtcp2_cc_reno { + ngtcp2_cc cc; + ngtcp2_window_filter delivery_rate_sec_filter; + uint64_t ack_count; uint64_t target_cwnd; uint64_t pending_add; -} ngtcp2_reno_cc; - -int ngtcp2_cc_reno_cc_init(ngtcp2_cc *cc, ngtcp2_log *log, - const ngtcp2_mem *mem); - -void ngtcp2_cc_reno_cc_free(ngtcp2_cc *cc, const ngtcp2_mem *mem); - -void ngtcp2_reno_cc_init(ngtcp2_reno_cc *cc, ngtcp2_log *log); +} ngtcp2_cc_reno; -void ngtcp2_reno_cc_free(ngtcp2_reno_cc *cc); +void ngtcp2_cc_reno_init(ngtcp2_cc_reno *reno, ngtcp2_log *log); void ngtcp2_cc_reno_cc_on_pkt_acked(ngtcp2_cc *cc, ngtcp2_conn_stat *cstat, const ngtcp2_cc_pkt *pkt, ngtcp2_tstamp ts); @@ -347,10 +330,11 @@ void ngtcp2_cc_reno_cc_on_ack_recv(ngtcp2_cc *cc, ngtcp2_conn_stat *cstat, void ngtcp2_cc_reno_cc_reset(ngtcp2_cc *cc, ngtcp2_conn_stat *cstat, ngtcp2_tstamp ts); -/* ngtcp2_cubic_cc is CUBIC congestion controller. */ -typedef struct ngtcp2_cubic_cc { - ngtcp2_cc_base ccb; - uint64_t max_delivery_rate_sec; +/* ngtcp2_cc_cubic is CUBIC congestion controller. */ +typedef struct ngtcp2_cc_cubic { + ngtcp2_cc cc; + ngtcp2_window_filter delivery_rate_sec_filter; + uint64_t ack_count; uint64_t target_cwnd; uint64_t w_last_max; uint64_t w_tcp; @@ -376,16 +360,9 @@ typedef struct ngtcp2_cubic_cc { int64_t window_end; uint64_t pending_add; uint64_t pending_w_add; -} ngtcp2_cubic_cc; +} ngtcp2_cc_cubic; -int ngtcp2_cc_cubic_cc_init(ngtcp2_cc *cc, ngtcp2_log *log, - const ngtcp2_mem *mem); - -void ngtcp2_cc_cubic_cc_free(ngtcp2_cc *cc, const ngtcp2_mem *mem); - -void ngtcp2_cubic_cc_init(ngtcp2_cubic_cc *cc, ngtcp2_log *log); - -void ngtcp2_cubic_cc_free(ngtcp2_cubic_cc *cc); +void ngtcp2_cc_cubic_init(ngtcp2_cc_cubic *cc, ngtcp2_log *log); void ngtcp2_cc_cubic_cc_on_pkt_acked(ngtcp2_cc *cc, ngtcp2_conn_stat *cstat, const ngtcp2_cc_pkt *pkt, @@ -418,4 +395,6 @@ void ngtcp2_cc_cubic_cc_reset(ngtcp2_cc *cc, ngtcp2_conn_stat *cstat, void ngtcp2_cc_cubic_cc_event(ngtcp2_cc *cc, ngtcp2_conn_stat *cstat, ngtcp2_cc_event_type event, ngtcp2_tstamp ts); +uint64_t ngtcp2_cbrt(uint64_t n); + #endif /* NGTCP2_CC_H */ diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_conn.c b/deps/ngtcp2/ngtcp2/lib/ngtcp2_conn.c index a3135680ca160f..f40ab5626109e9 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_conn.c +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_conn.c @@ -35,6 +35,11 @@ #include "ngtcp2_addr.h" #include "ngtcp2_path.h" #include "ngtcp2_rcvry.h" +#include "ngtcp2_unreachable.h" +#include "ngtcp2_net.h" +#include "ngtcp2_conversion.h" +#include "ngtcp2_tstamp.h" +#include "ngtcp2_frame_chain.h" /* NGTCP2_FLOW_WINDOW_RTT_FACTOR is the factor of RTT when flow control window auto-tuning is triggered. */ @@ -46,6 +51,8 @@ packet payload that should be coalesced to a long packet. */ #define NGTCP2_MIN_COALESCED_PAYLOADLEN 128 +ngtcp2_objalloc_def(strm, ngtcp2_strm, oplent); + /* * conn_local_stream returns nonzero if |stream_id| indicates that it * is the stream initiated by local endpoint. @@ -60,12 +67,20 @@ static int conn_local_stream(ngtcp2_conn *conn, int64_t stream_id) { */ static int bidi_stream(int64_t stream_id) { return (stream_id & 0x2) == 0; } +static void conn_update_timestamp(ngtcp2_conn *conn, ngtcp2_tstamp ts) { + assert(conn->log.last_ts <= ts); + assert(conn->qlog.last_ts <= ts); + + conn->log.last_ts = ts; + conn->qlog.last_ts = ts; +} + /* - * conn_is_handshake_completed returns nonzero if QUIC handshake has - * completed. + * conn_is_tls_handshake_completed returns nonzero if TLS handshake + * has completed and 1 RTT keys are available. */ -static int conn_is_handshake_completed(ngtcp2_conn *conn) { - return (conn->flags & NGTCP2_CONN_FLAG_HANDSHAKE_COMPLETED) && +static int conn_is_tls_handshake_completed(ngtcp2_conn *conn) { + return (conn->flags & NGTCP2_CONN_FLAG_TLS_HANDSHAKE_COMPLETED) && conn->pktns.crypto.rx.ckm && conn->pktns.crypto.tx.ckm; } @@ -118,14 +133,14 @@ static int conn_call_recv_stream_data(ngtcp2_conn *conn, ngtcp2_strm *strm, } static int conn_call_recv_crypto_data(ngtcp2_conn *conn, - ngtcp2_crypto_level crypto_level, + ngtcp2_encryption_level encryption_level, uint64_t offset, const uint8_t *data, size_t datalen) { int rv; assert(conn->callbacks.recv_crypto_data); - rv = conn->callbacks.recv_crypto_data(conn, crypto_level, offset, data, + rv = conn->callbacks.recv_crypto_data(conn, encryption_level, offset, data, datalen, conn->user_data); switch (rv) { case 0: @@ -267,6 +282,7 @@ static int conn_call_path_validation(ngtcp2_conn *conn, const ngtcp2_pv *pv, ngtcp2_path_validation_result res) { int rv; uint32_t flags = NGTCP2_PATH_VALIDATION_FLAG_NONE; + const ngtcp2_path *old_path = NULL; if (!conn->callbacks.path_validation) { return 0; @@ -276,8 +292,18 @@ static int conn_call_path_validation(ngtcp2_conn *conn, const ngtcp2_pv *pv, flags |= NGTCP2_PATH_VALIDATION_FLAG_PREFERRED_ADDR; } - rv = conn->callbacks.path_validation(conn, flags, &pv->dcid.ps.path, res, - conn->user_data); + if (pv->flags & NGTCP2_PV_FLAG_FALLBACK_ON_FAILURE) { + old_path = &pv->fallback_dcid.ps.path; + } + + if (conn->server && old_path && + (ngtcp2_addr_compare(&pv->dcid.ps.path.remote, &old_path->remote) & + (NGTCP2_ADDR_COMPARE_FLAG_ADDR | NGTCP2_ADDR_COMPARE_FLAG_FAMILY))) { + flags |= NGTCP2_PATH_VALIDATION_FLAG_NEW_TOKEN; + } + + rv = conn->callbacks.path_validation(conn, flags, &pv->dcid.ps.path, old_path, + res, conn->user_data); if (rv != 0) { return NGTCP2_ERR_CALLBACK_FAILURE; } @@ -294,10 +320,10 @@ static int conn_call_select_preferred_addr(ngtcp2_conn *conn, } assert(conn->remote.transport_params); - assert(conn->remote.transport_params->preferred_address_present); + assert(conn->remote.transport_params->preferred_addr_present); rv = conn->callbacks.select_preferred_addr( - conn, dest, &conn->remote.transport_params->preferred_address, + conn, dest, &conn->remote.transport_params->preferred_addr, conn->user_data); if (rv != 0) { return NGTCP2_ERR_CALLBACK_FAILURE; @@ -369,7 +395,7 @@ static int conn_call_dcid_status(ngtcp2_conn *conn, } rv = conn->callbacks.dcid_status( - conn, (int)type, dcid->seq, &dcid->cid, + conn, type, dcid->seq, &dcid->cid, (dcid->flags & NGTCP2_DCID_FLAG_TOKEN_PRESENT) ? dcid->token : NULL, conn->user_data); if (rv != 0) { @@ -505,15 +531,15 @@ conn_call_recv_stateless_reset(ngtcp2_conn *conn, return 0; } -static int conn_call_recv_new_token(ngtcp2_conn *conn, - const ngtcp2_vec *token) { +static int conn_call_recv_new_token(ngtcp2_conn *conn, const uint8_t *token, + size_t tokenlen) { int rv; if (!conn->callbacks.recv_new_token) { return 0; } - rv = conn->callbacks.recv_new_token(conn, token, conn->user_data); + rv = conn->callbacks.recv_new_token(conn, token, tokenlen, conn->user_data); if (rv != 0) { return NGTCP2_ERR_CALLBACK_FAILURE; } @@ -557,8 +583,8 @@ static int conn_call_recv_datagram(ngtcp2_conn *conn, datalen = 0; } - if (!conn_is_handshake_completed(conn)) { - flags |= NGTCP2_DATAGRAM_FLAG_EARLY; + if (!conn_is_tls_handshake_completed(conn)) { + flags |= NGTCP2_DATAGRAM_FLAG_0RTT; } rv = conn->callbacks.recv_datagram(conn, flags, data, datalen, @@ -605,7 +631,8 @@ static int conn_call_version_negotiation(ngtcp2_conn *conn, uint32_t version, return 0; } -static int conn_call_recv_rx_key(ngtcp2_conn *conn, ngtcp2_crypto_level level) { +static int conn_call_recv_rx_key(ngtcp2_conn *conn, + ngtcp2_encryption_level level) { int rv; if (!conn->callbacks.recv_rx_key) { @@ -620,7 +647,8 @@ static int conn_call_recv_rx_key(ngtcp2_conn *conn, ngtcp2_crypto_level level) { return 0; } -static int conn_call_recv_tx_key(ngtcp2_conn *conn, ngtcp2_crypto_level level) { +static int conn_call_recv_tx_key(ngtcp2_conn *conn, + ngtcp2_encryption_level level) { int rv; if (!conn->callbacks.recv_tx_key) { @@ -635,14 +663,10 @@ static int conn_call_recv_tx_key(ngtcp2_conn *conn, ngtcp2_crypto_level level) { return 0; } -static int crypto_offset_less(const ngtcp2_ksl_key *lhs, - const ngtcp2_ksl_key *rhs) { - return *(int64_t *)lhs < *(int64_t *)rhs; -} - static int pktns_init(ngtcp2_pktns *pktns, ngtcp2_pktns_id pktns_id, - ngtcp2_rst *rst, ngtcp2_cc *cc, ngtcp2_log *log, - ngtcp2_qlog *qlog, ngtcp2_objalloc *rtb_entry_objalloc, + ngtcp2_rst *rst, ngtcp2_cc *cc, int64_t initial_pkt_num, + ngtcp2_log *log, ngtcp2_qlog *qlog, + ngtcp2_objalloc *rtb_entry_objalloc, ngtcp2_objalloc *frc_objalloc, const ngtcp2_mem *mem) { int rv; @@ -650,7 +674,8 @@ static int pktns_init(ngtcp2_pktns *pktns, ngtcp2_pktns_id pktns_id, ngtcp2_gaptr_init(&pktns->rx.pngap, mem); - pktns->tx.last_pkt_num = -1; + pktns->tx.last_pkt_num = initial_pkt_num - 1; + pktns->tx.non_ack_pkt_start_ts = UINT64_MAX; pktns->rx.max_pkt_num = -1; pktns->rx.max_ack_eliciting_pkt_num = -1; @@ -660,14 +685,12 @@ static int pktns_init(ngtcp2_pktns *pktns, ngtcp2_pktns_id pktns_id, } ngtcp2_strm_init(&pktns->crypto.strm, 0, NGTCP2_STRM_FLAG_NONE, 0, 0, NULL, - NULL, mem); + frc_objalloc, mem); - ngtcp2_ksl_init(&pktns->crypto.tx.frq, crypto_offset_less, sizeof(uint64_t), + ngtcp2_rtb_init(&pktns->rtb, pktns_id, &pktns->crypto.strm, rst, cc, + initial_pkt_num, log, qlog, rtb_entry_objalloc, frc_objalloc, mem); - ngtcp2_rtb_init(&pktns->rtb, pktns_id, &pktns->crypto.strm, rst, cc, log, - qlog, rtb_entry_objalloc, frc_objalloc, mem); - return 0; fail_acktr_init: @@ -677,8 +700,9 @@ static int pktns_init(ngtcp2_pktns *pktns, ngtcp2_pktns_id pktns_id, } static int pktns_new(ngtcp2_pktns **ppktns, ngtcp2_pktns_id pktns_id, - ngtcp2_rst *rst, ngtcp2_cc *cc, ngtcp2_log *log, - ngtcp2_qlog *qlog, ngtcp2_objalloc *rtb_entry_objalloc, + ngtcp2_rst *rst, ngtcp2_cc *cc, int64_t initial_pkt_num, + ngtcp2_log *log, ngtcp2_qlog *qlog, + ngtcp2_objalloc *rtb_entry_objalloc, ngtcp2_objalloc *frc_objalloc, const ngtcp2_mem *mem) { int rv; @@ -687,8 +711,8 @@ static int pktns_new(ngtcp2_pktns **ppktns, ngtcp2_pktns_id pktns_id, return NGTCP2_ERR_NOMEM; } - rv = pktns_init(*ppktns, pktns_id, rst, cc, log, qlog, rtb_entry_objalloc, - frc_objalloc, mem); + rv = pktns_init(*ppktns, pktns_id, rst, cc, initial_pkt_num, log, qlog, + rtb_entry_objalloc, frc_objalloc, mem); if (rv != 0) { ngtcp2_mem_free(mem, *ppktns); } @@ -729,9 +753,6 @@ static void delete_buf_chain(ngtcp2_buf_chain *bufchain, } static void pktns_free(ngtcp2_pktns *pktns, const ngtcp2_mem *mem) { - ngtcp2_frame_chain *frc; - ngtcp2_ksl_it it; - delete_buf_chain(pktns->crypto.tx.data, mem); delete_buffed_pkts(pktns->rx.buffed_pkts, mem); @@ -742,13 +763,6 @@ static void pktns_free(ngtcp2_pktns *pktns, const ngtcp2_mem *mem) { ngtcp2_crypto_km_del(pktns->crypto.rx.ckm, mem); ngtcp2_crypto_km_del(pktns->crypto.tx.ckm, mem); - for (it = ngtcp2_ksl_begin(&pktns->crypto.tx.frq); !ngtcp2_ksl_it_end(&it); - ngtcp2_ksl_it_next(&it)) { - frc = ngtcp2_ksl_it_get(&it); - ngtcp2_frame_chain_objalloc_del(frc, pktns->rtb.frc_objalloc, mem); - } - - ngtcp2_ksl_free(&pktns->crypto.tx.frq); ngtcp2_rtb_free(&pktns->rtb); ngtcp2_strm_free(&pktns->crypto.strm); ngtcp2_acktr_free(&pktns->acktr); @@ -765,26 +779,6 @@ static void pktns_del(ngtcp2_pktns *pktns, const ngtcp2_mem *mem) { ngtcp2_mem_free(mem, pktns); } -static void cc_del(ngtcp2_cc *cc, ngtcp2_cc_algo cc_algo, - const ngtcp2_mem *mem) { - switch (cc_algo) { - case NGTCP2_CC_ALGO_RENO: - ngtcp2_cc_reno_cc_free(cc, mem); - break; - case NGTCP2_CC_ALGO_CUBIC: - ngtcp2_cc_cubic_cc_free(cc, mem); - break; - case NGTCP2_CC_ALGO_BBR: - ngtcp2_cc_bbr_cc_free(cc, mem); - break; - case NGTCP2_CC_ALGO_BBR2: - ngtcp2_cc_bbr2_cc_free(cc, mem); - break; - default: - break; - } -} - static int cid_less(const ngtcp2_ksl_key *lhs, const ngtcp2_ksl_key *rhs) { return ngtcp2_cid_less(lhs, rhs); } @@ -810,13 +804,13 @@ static void conn_reset_conn_stat_cc(ngtcp2_conn *conn, cstat->pto_count = 0; cstat->loss_detection_timer = UINT64_MAX; cstat->cwnd = - ngtcp2_cc_compute_initcwnd(conn->local.settings.max_udp_payload_size); + ngtcp2_cc_compute_initcwnd(conn->local.settings.max_tx_udp_payload_size); cstat->ssthresh = UINT64_MAX; cstat->congestion_recovery_start_ts = UINT64_MAX; cstat->bytes_in_flight = 0; cstat->delivery_rate_sec = 0; - cstat->pacing_rate = 0.0; - cstat->send_quantum = SIZE_MAX; + cstat->pacing_interval = 0; + cstat->send_quantum = 64 * 1024; } /* @@ -896,6 +890,25 @@ ngtcp2_duration ngtcp2_conn_compute_pto(ngtcp2_conn *conn, return conn_compute_pto(conn, pktns); } +/* + * conn_compute_pv_timeout_pto returns path validation timeout using + * the given |pto|. + */ +static ngtcp2_duration conn_compute_pv_timeout_pto(ngtcp2_conn *conn, + ngtcp2_duration pto) { + ngtcp2_duration initial_pto = conn_compute_initial_pto(conn, &conn->pktns); + + return 3 * ngtcp2_max(pto, initial_pto); +} + +/* + * conn_compute_pv_timeout returns path validation timeout. + */ +static ngtcp2_duration conn_compute_pv_timeout(ngtcp2_conn *conn) { + return conn_compute_pv_timeout_pto(conn, + conn_compute_pto(conn, &conn->pktns)); +} + static void conn_handle_tx_ecn(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, uint16_t *prtb_entry_flags, ngtcp2_pktns *pktns, const ngtcp2_pkt_hd *hd, ngtcp2_tstamp ts) { @@ -959,7 +972,7 @@ static void conn_handle_tx_ecn(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, case NGTCP2_ECN_STATE_FAILED: break; default: - assert(0); + ngtcp2_unreachable(); } } @@ -989,14 +1002,14 @@ static void conn_reset_ecn_validation_state(ngtcp2_conn *conn) { pktns->tx.ecn.validation_pkt_lost = 0; } -/* server_default_other_versions is the default other_versions field - sent by server. */ -static uint8_t server_default_other_versions[] = {0, 0, 0, 1}; +/* server_default_available_versions is the default available_versions + field sent by server. */ +static uint8_t server_default_available_versions[] = {0, 0, 0, 1}; /* - * other_versions_new allocates new buffer, and writes |versions| of - * length |versionslen| in network byte order, suitable for sending in - * other_versions field of version_information QUIC transport + * available_versions_new allocates new buffer, and writes |versions| + * of length |versionslen| in network byte order, suitable for sending + * in available_versions field of version_information QUIC transport * parameter. The pointer to the allocated buffer is assigned to * |*pbuf|. * @@ -1006,8 +1019,8 @@ static uint8_t server_default_other_versions[] = {0, 0, 0, 1}; * NGTCP2_ERR_NOMEM * Out of memory. */ -static int other_versions_new(uint8_t **pbuf, const uint32_t *versions, - size_t versionslen, const ngtcp2_mem *mem) { +static int available_versions_new(uint8_t **pbuf, const uint32_t *versions, + size_t versionslen, const ngtcp2_mem *mem) { size_t i; uint8_t *buf = ngtcp2_mem_malloc(mem, sizeof(uint32_t) * versionslen); @@ -1032,16 +1045,13 @@ conn_set_local_transport_params(ngtcp2_conn *conn, *p = *params; - /* grease_quic_bit is always enabled. */ - p->grease_quic_bit = 1; - if (conn->server) { p->version_info.chosen_version = chosen_version; } else { p->version_info.chosen_version = conn->client_chosen_version; } - p->version_info.other_versions = conn->vneg.other_versions; - p->version_info.other_versionslen = conn->vneg.other_versionslen; + p->version_info.available_versions = conn->vneg.available_versions; + p->version_info.available_versionslen = conn->vneg.available_versionslen; p->version_info_present = 1; } @@ -1059,19 +1069,33 @@ static int conn_new(ngtcp2_conn **pconn, const ngtcp2_cid *dcid, uint8_t fixed_bit_byte; size_t i; uint32_t *preferred_versions; + ngtcp2_transport_params paramsbuf; (void)callbacks_version; (void)settings_version; - (void)transport_params_version; + + params = ngtcp2_transport_params_convert_to_latest( + ¶msbuf, transport_params_version, params); assert(settings->max_window <= NGTCP2_MAX_VARINT); assert(settings->max_stream_window <= NGTCP2_MAX_VARINT); - assert(settings->max_udp_payload_size); - assert(settings->max_udp_payload_size <= NGTCP2_HARD_MAX_UDP_PAYLOAD_SIZE); + assert(settings->max_tx_udp_payload_size); + assert(settings->max_tx_udp_payload_size <= NGTCP2_HARD_MAX_UDP_PAYLOAD_SIZE); + assert(settings->initial_pkt_num <= INT32_MAX); + assert(params->active_connection_id_limit >= + NGTCP2_DEFAULT_ACTIVE_CONNECTION_ID_LIMIT); assert(params->active_connection_id_limit <= NGTCP2_MAX_DCID_POOL_SIZE); assert(params->initial_max_data <= NGTCP2_MAX_VARINT); assert(params->initial_max_stream_data_bidi_local <= NGTCP2_MAX_VARINT); assert(params->initial_max_stream_data_bidi_remote <= NGTCP2_MAX_VARINT); assert(params->initial_max_stream_data_uni <= NGTCP2_MAX_VARINT); + assert((server && params->original_dcid_present) || + (!server && !params->original_dcid_present)); + assert(!params->initial_scid_present); + assert(server || !params->stateless_reset_token_present); + assert(server || !params->preferred_addr_present); + assert(server || !params->retry_scid_present); + assert(params->max_idle_timeout != UINT64_MAX); + assert(params->max_ack_delay < (1 << 14) * NGTCP2_MILLISECONDS); assert(server || callbacks->client_initial); assert(!server || callbacks->recv_client_initial); assert(callbacks->recv_crypto_data); @@ -1097,6 +1121,8 @@ static int conn_new(ngtcp2_conn **pconn, const ngtcp2_cid *dcid, goto fail_conn; } + (*pconn)->server = server; + ngtcp2_objalloc_frame_chain_init(&(*pconn)->frc_objalloc, 64, mem); ngtcp2_objalloc_rtb_entry_init(&(*pconn)->rtb_entry_objalloc, 64, mem); ngtcp2_objalloc_strm_init(&(*pconn)->strm_objalloc, 64, mem); @@ -1125,7 +1151,7 @@ static int conn_new(ngtcp2_conn **pconn, const ngtcp2_cid *dcid, ngtcp2_log_init(&(*pconn)->log, scid, settings->log_printf, settings->initial_ts, user_data); - ngtcp2_qlog_init(&(*pconn)->qlog, settings->qlog.write, settings->initial_ts, + ngtcp2_qlog_init(&(*pconn)->qlog, settings->qlog_write, settings->initial_ts, user_data); if ((*pconn)->qlog.write) { buf = ngtcp2_mem_malloc(mem, NGTCP2_QLOG_BUFLEN); @@ -1138,17 +1164,16 @@ static int conn_new(ngtcp2_conn **pconn, const ngtcp2_cid *dcid, (*pconn)->local.settings = *settings; - if (settings->token.len) { - buf = ngtcp2_mem_malloc(mem, settings->token.len); + if (settings->tokenlen) { + buf = ngtcp2_mem_malloc(mem, settings->tokenlen); if (buf == NULL) { rv = NGTCP2_ERR_NOMEM; goto fail_token; } - memcpy(buf, settings->token.base, settings->token.len); - (*pconn)->local.settings.token.base = buf; + memcpy(buf, settings->token, settings->tokenlen); + (*pconn)->local.settings.token = buf; } else { - (*pconn)->local.settings.token.base = NULL; - (*pconn)->local.settings.token.len = 0; + (*pconn)->local.settings.token = NULL; } if (!(*pconn)->local.settings.original_version) { @@ -1157,8 +1182,8 @@ static int conn_new(ngtcp2_conn **pconn, const ngtcp2_cid *dcid, conn_reset_conn_stat(*pconn, &(*pconn)->cstat); (*pconn)->cstat.initial_rtt = settings->initial_rtt; - (*pconn)->cstat.max_udp_payload_size = - (*pconn)->local.settings.max_udp_payload_size; + (*pconn)->cstat.max_tx_udp_payload_size = + (*pconn)->local.settings.max_tx_udp_payload_size; ngtcp2_rst_init(&(*pconn)->rst); @@ -1166,54 +1191,43 @@ static int conn_new(ngtcp2_conn **pconn, const ngtcp2_cid *dcid, switch (settings->cc_algo) { case NGTCP2_CC_ALGO_RENO: - rv = ngtcp2_cc_reno_cc_init(&(*pconn)->cc, &(*pconn)->log, mem); - if (rv != 0) { - goto fail_cc_init; - } + ngtcp2_cc_reno_init(&(*pconn)->reno, &(*pconn)->log); + break; case NGTCP2_CC_ALGO_CUBIC: - rv = ngtcp2_cc_cubic_cc_init(&(*pconn)->cc, &(*pconn)->log, mem); - if (rv != 0) { - goto fail_cc_init; - } + ngtcp2_cc_cubic_init(&(*pconn)->cubic, &(*pconn)->log); + break; case NGTCP2_CC_ALGO_BBR: - rv = ngtcp2_cc_bbr_cc_init(&(*pconn)->cc, &(*pconn)->log, &(*pconn)->cstat, - &(*pconn)->rst, settings->initial_ts, - callbacks->rand, &settings->rand_ctx, mem); - if (rv != 0) { - goto fail_cc_init; - } - break; - case NGTCP2_CC_ALGO_BBR2: - rv = ngtcp2_cc_bbr2_cc_init(&(*pconn)->cc, &(*pconn)->log, &(*pconn)->cstat, - &(*pconn)->rst, settings->initial_ts, - callbacks->rand, &settings->rand_ctx, mem); - if (rv != 0) { - goto fail_cc_init; - } + ngtcp2_cc_bbr_init(&(*pconn)->bbr, &(*pconn)->log, &(*pconn)->cstat, + &(*pconn)->rst, settings->initial_ts, callbacks->rand, + &settings->rand_ctx); + break; default: - assert(0); + ngtcp2_unreachable(); } rv = pktns_new(&(*pconn)->in_pktns, NGTCP2_PKTNS_ID_INITIAL, &(*pconn)->rst, - &(*pconn)->cc, &(*pconn)->log, &(*pconn)->qlog, - &(*pconn)->rtb_entry_objalloc, &(*pconn)->frc_objalloc, mem); + &(*pconn)->cc, settings->initial_pkt_num, &(*pconn)->log, + &(*pconn)->qlog, &(*pconn)->rtb_entry_objalloc, + &(*pconn)->frc_objalloc, mem); if (rv != 0) { goto fail_in_pktns_init; } rv = pktns_new(&(*pconn)->hs_pktns, NGTCP2_PKTNS_ID_HANDSHAKE, &(*pconn)->rst, - &(*pconn)->cc, &(*pconn)->log, &(*pconn)->qlog, - &(*pconn)->rtb_entry_objalloc, &(*pconn)->frc_objalloc, mem); + &(*pconn)->cc, settings->initial_pkt_num, &(*pconn)->log, + &(*pconn)->qlog, &(*pconn)->rtb_entry_objalloc, + &(*pconn)->frc_objalloc, mem); if (rv != 0) { goto fail_hs_pktns_init; } rv = pktns_init(&(*pconn)->pktns, NGTCP2_PKTNS_ID_APPLICATION, &(*pconn)->rst, - &(*pconn)->cc, &(*pconn)->log, &(*pconn)->qlog, - &(*pconn)->rtb_entry_objalloc, &(*pconn)->frc_objalloc, mem); + &(*pconn)->cc, settings->initial_pkt_num, &(*pconn)->log, + &(*pconn)->qlog, &(*pconn)->rtb_entry_objalloc, + &(*pconn)->frc_objalloc, mem); if (rv != 0) { goto fail_pktns_init; } @@ -1271,56 +1285,63 @@ static int conn_new(ngtcp2_conn **pconn, const ngtcp2_cid *dcid, (*pconn)->vneg.preferred_versionslen = settings->preferred_versionslen; } - if (settings->other_versionslen) { + (*pconn)->local.settings.preferred_versions = NULL; + (*pconn)->local.settings.preferred_versionslen = 0; + + if (settings->available_versionslen) { if (!server && !ngtcp2_is_reserved_version(client_chosen_version)) { - for (i = 0; i < settings->other_versionslen; ++i) { - if (settings->other_versions[i] == client_chosen_version) { + for (i = 0; i < settings->available_versionslen; ++i) { + if (settings->available_versions[i] == client_chosen_version) { break; } } - assert(i < settings->other_versionslen); + assert(i < settings->available_versionslen); } - for (i = 0; i < settings->other_versionslen; ++i) { - assert(ngtcp2_is_reserved_version(settings->other_versions[i]) || - ngtcp2_is_supported_version(settings->other_versions[i])); + for (i = 0; i < settings->available_versionslen; ++i) { + assert(ngtcp2_is_reserved_version(settings->available_versions[i]) || + ngtcp2_is_supported_version(settings->available_versions[i])); } - rv = other_versions_new(&buf, settings->other_versions, - settings->other_versionslen, mem); + rv = available_versions_new(&buf, settings->available_versions, + settings->available_versionslen, mem); if (rv != 0) { - goto fail_other_versions; + goto fail_available_versions; } - (*pconn)->vneg.other_versions = buf; - (*pconn)->vneg.other_versionslen = - sizeof(uint32_t) * settings->other_versionslen; + (*pconn)->vneg.available_versions = buf; + (*pconn)->vneg.available_versionslen = + sizeof(uint32_t) * settings->available_versionslen; } else if (server) { if (settings->preferred_versionslen) { - rv = other_versions_new(&buf, settings->preferred_versions, - settings->preferred_versionslen, mem); + rv = available_versions_new(&buf, settings->preferred_versions, + settings->preferred_versionslen, mem); if (rv != 0) { - goto fail_other_versions; + goto fail_available_versions; } - (*pconn)->vneg.other_versions = buf; - (*pconn)->vneg.other_versionslen = + (*pconn)->vneg.available_versions = buf; + (*pconn)->vneg.available_versionslen = sizeof(uint32_t) * settings->preferred_versionslen; } else { - (*pconn)->vneg.other_versions = server_default_other_versions; - (*pconn)->vneg.other_versionslen = sizeof(server_default_other_versions); + (*pconn)->vneg.available_versions = server_default_available_versions; + (*pconn)->vneg.available_versionslen = + sizeof(server_default_available_versions); } } else if (!server && !ngtcp2_is_reserved_version(client_chosen_version)) { - rv = other_versions_new(&buf, &client_chosen_version, 1, mem); + rv = available_versions_new(&buf, &client_chosen_version, 1, mem); if (rv != 0) { - goto fail_other_versions; + goto fail_available_versions; } - (*pconn)->vneg.other_versions = buf; - (*pconn)->vneg.other_versionslen = sizeof(uint32_t); + (*pconn)->vneg.available_versions = buf; + (*pconn)->vneg.available_versionslen = sizeof(uint32_t); } + (*pconn)->local.settings.available_versions = NULL; + (*pconn)->local.settings.available_versionslen = 0; + (*pconn)->client_chosen_version = client_chosen_version; conn_set_local_transport_params(*pconn, params); @@ -1331,8 +1352,8 @@ static int conn_new(ngtcp2_conn **pconn, const ngtcp2_cid *dcid, } (*pconn)->keep_alive.last_ts = UINT64_MAX; + (*pconn)->keep_alive.timeout = UINT64_MAX; - (*pconn)->server = server; (*pconn)->oscid = *scid; (*pconn)->callbacks = *callbacks; (*pconn)->mem = mem; @@ -1341,16 +1362,22 @@ static int conn_new(ngtcp2_conn **pconn, const ngtcp2_cid *dcid, (*pconn)->crypto.key_update.confirmed_ts = UINT64_MAX; (*pconn)->tx.last_max_data_ts = UINT64_MAX; (*pconn)->tx.pacing.next_ts = UINT64_MAX; + (*pconn)->tx.last_blocked_offset = UINT64_MAX; (*pconn)->early.discard_started_ts = UINT64_MAX; conn_reset_ecn_validation_state(*pconn); - ngtcp2_qlog_start(&(*pconn)->qlog, server ? &settings->qlog.odcid : dcid, - server); + ngtcp2_qlog_start( + &(*pconn)->qlog, + server ? ((*pconn)->local.transport_params.retry_scid_present + ? &(*pconn)->local.transport_params.retry_scid + : &(*pconn)->local.transport_params.original_dcid) + : dcid, + server); return 0; -fail_other_versions: +fail_available_versions: ngtcp2_mem_free(mem, (*pconn)->vneg.preferred_versions); fail_preferred_versions: fail_seqgap_push: @@ -1363,9 +1390,7 @@ static int conn_new(ngtcp2_conn **pconn, const ngtcp2_cid *dcid, fail_hs_pktns_init: pktns_del((*pconn)->in_pktns, mem); fail_in_pktns_init: - cc_del(&(*pconn)->cc, settings->cc_algo, mem); -fail_cc_init: - ngtcp2_mem_free(mem, (*pconn)->local.settings.token.base); + ngtcp2_mem_free(mem, (uint8_t *)(*pconn)->local.settings.token); fail_token: ngtcp2_mem_free(mem, (*pconn)->qlog.buf.begin); fail_qlog_buf: @@ -1432,7 +1457,7 @@ int ngtcp2_conn_server_new_versioned( (*pconn)->local.bidi.next_stream_id = 1; (*pconn)->local.uni.next_stream_id = 3; - if ((*pconn)->local.settings.token.len) { + if ((*pconn)->local.settings.tokenlen) { /* Usage of token lifts amplification limit */ (*pconn)->dcid.current.flags |= NGTCP2_DCID_FLAG_PATH_VALIDATED; } @@ -1558,13 +1583,13 @@ void ngtcp2_conn_del(ngtcp2_conn *conn) { conn_vneg_crypto_free(conn); ngtcp2_mem_free(conn->mem, conn->vneg.preferred_versions); - if (conn->vneg.other_versions != server_default_other_versions) { - ngtcp2_mem_free(conn->mem, conn->vneg.other_versions); + if (conn->vneg.available_versions != server_default_available_versions) { + ngtcp2_mem_free(conn->mem, conn->vneg.available_versions); } ngtcp2_mem_free(conn->mem, conn->crypto.decrypt_buf.base); ngtcp2_mem_free(conn->mem, conn->crypto.decrypt_hp_buf.base); - ngtcp2_mem_free(conn->mem, conn->local.settings.token.base); + ngtcp2_mem_free(conn->mem, (uint8_t *)conn->local.settings.token); ngtcp2_crypto_km_del(conn->crypto.key_update.old_rx_ckm, conn->mem); ngtcp2_crypto_km_del(conn->crypto.key_update.new_rx_ckm, conn->mem); @@ -1575,14 +1600,12 @@ void ngtcp2_conn_del(ngtcp2_conn *conn) { pktns_del(conn->hs_pktns, conn->mem); pktns_del(conn->in_pktns, conn->mem); - cc_del(&conn->cc, conn->cc_algo, conn->mem); - ngtcp2_mem_free(conn->mem, conn->qlog.buf.begin); ngtcp2_pmtud_del(conn->pmtud); ngtcp2_pv_del(conn->pv); - ngtcp2_mem_free(conn->mem, conn->rx.ccerr.reason); + ngtcp2_mem_free(conn->mem, (uint8_t *)conn->rx.ccerr.reason); ngtcp2_idtr_free(&conn->remote.uni.idtr); ngtcp2_idtr_free(&conn->remote.bidi.idtr); @@ -1604,8 +1627,8 @@ void ngtcp2_conn_del(ngtcp2_conn *conn) { } /* - * conn_ensure_ack_blks makes sure that conn->tx.ack->ack.blks can - * contain at least |n| additional ngtcp2_ack_blk. + * conn_ensure_ack_ranges makes sure that conn->tx.ack->ack.ranges can + * contain at least |n| additional ngtcp2_ack_range. * * This function returns 0 if it succeeds, or one of the following * negative error codes: @@ -1613,9 +1636,9 @@ void ngtcp2_conn_del(ngtcp2_conn *conn) { * NGTCP2_ERR_NOMEM * Out of memory. */ -static int conn_ensure_ack_blks(ngtcp2_conn *conn, size_t n) { +static int conn_ensure_ack_ranges(ngtcp2_conn *conn, size_t n) { ngtcp2_frame *fr; - size_t max = conn->tx.max_ack_blks; + size_t max = conn->tx.max_ack_ranges; if (n <= max) { return 0; @@ -1626,13 +1649,13 @@ static int conn_ensure_ack_blks(ngtcp2_conn *conn, size_t n) { assert(max >= n); fr = ngtcp2_mem_realloc(conn->mem, conn->tx.ack, - sizeof(ngtcp2_ack) + sizeof(ngtcp2_ack_blk) * max); + sizeof(ngtcp2_ack) + sizeof(ngtcp2_ack_range) * max); if (fr == NULL) { return NGTCP2_ERR_NOMEM; } conn->tx.ack = fr; - conn->tx.max_ack_blks = max; + conn->tx.max_ack_ranges = max; return 0; } @@ -1646,40 +1669,20 @@ static ngtcp2_duration conn_compute_ack_delay(ngtcp2_conn *conn) { conn->cstat.smoothed_rtt / 8); } -/* - * conn_create_ack_frame creates ACK frame, and assigns its pointer to - * |*pfr| if there are any received packets to acknowledge. If there - * are no packets to acknowledge, this function returns 0, and |*pfr| - * is untouched. The caller is advised to set |*pfr| to NULL before - * calling this function, and check it after this function returns. - * If |nodelay| is nonzero, delayed ACK timer is ignored. - * - * The memory for ACK frame is dynamically allocated by this function. - * A caller is responsible to free it. - * - * Call ngtcp2_acktr_commit_ack after a created ACK frame is - * successfully serialized into a packet. - * - * This function returns 0 if it succeeds, or one of the following - * negative error codes: - * - * NGTCP2_ERR_NOMEM - * Out of memory. - */ -static int conn_create_ack_frame(ngtcp2_conn *conn, ngtcp2_frame **pfr, +int ngtcp2_conn_create_ack_frame(ngtcp2_conn *conn, ngtcp2_frame **pfr, ngtcp2_pktns *pktns, uint8_t type, ngtcp2_tstamp ts, ngtcp2_duration ack_delay, uint64_t ack_delay_exponent) { /* TODO Measure an actual size of ACK blocks to find the best default value. */ - const size_t initial_max_ack_blks = 8; + const size_t initial_max_ack_ranges = 8; int64_t last_pkt_num; ngtcp2_acktr *acktr = &pktns->acktr; - ngtcp2_ack_blk *blk; + ngtcp2_ack_range *range; ngtcp2_ksl_it it; ngtcp2_acktr_entry *rpkt; ngtcp2_ack *ack; - size_t blk_idx; + size_t range_idx; ngtcp2_tstamp largest_ack_ts; int rv; @@ -1700,11 +1703,11 @@ static int conn_create_ack_frame(ngtcp2_conn *conn, ngtcp2_frame **pfr, if (conn->tx.ack == NULL) { conn->tx.ack = ngtcp2_mem_malloc( conn->mem, - sizeof(ngtcp2_ack) + sizeof(ngtcp2_ack_blk) * initial_max_ack_blks); + sizeof(ngtcp2_ack) + sizeof(ngtcp2_ack_range) * initial_max_ack_ranges); if (conn->tx.ack == NULL) { return NGTCP2_ERR_NOMEM; } - conn->tx.max_ack_blks = initial_max_ack_blks; + conn->tx.max_ack_ranges = initial_max_ack_ranges; } ack = &conn->tx.ack->ack; @@ -1717,7 +1720,7 @@ static int conn_create_ack_frame(ngtcp2_conn *conn, ngtcp2_frame **pfr, } else { ack->type = NGTCP2_FRAME_ACK; } - ack->num_blks = 0; + ack->rangecnt = 0; rpkt = ngtcp2_ksl_it_get(&it); @@ -1725,7 +1728,14 @@ static int conn_create_ack_frame(ngtcp2_conn *conn, ngtcp2_frame **pfr, last_pkt_num = rpkt->pkt_num - (int64_t)(rpkt->len - 1); largest_ack_ts = rpkt->tstamp; ack->largest_ack = rpkt->pkt_num; - ack->first_ack_blklen = rpkt->len - 1; + ack->first_ack_range = rpkt->len - 1; + + ngtcp2_ksl_it_next(&it); + } else if (rpkt->pkt_num + 1 == pktns->rx.max_pkt_num) { + last_pkt_num = rpkt->pkt_num - (int64_t)(rpkt->len - 1); + largest_ack_ts = pktns->rx.max_pkt_ts; + ack->largest_ack = pktns->rx.max_pkt_num; + ack->first_ack_range = rpkt->len; ngtcp2_ksl_it_next(&it); } else { @@ -1734,7 +1744,7 @@ static int conn_create_ack_frame(ngtcp2_conn *conn, ngtcp2_frame **pfr, last_pkt_num = pktns->rx.max_pkt_num; largest_ack_ts = pktns->rx.max_pkt_ts; ack->largest_ack = pktns->rx.max_pkt_num; - ack->first_ack_blklen = 0; + ack->first_ack_range = 0; } if (type == NGTCP2_PKT_1RTT) { @@ -1747,21 +1757,21 @@ static int conn_create_ack_frame(ngtcp2_conn *conn, ngtcp2_frame **pfr, } for (; !ngtcp2_ksl_it_end(&it); ngtcp2_ksl_it_next(&it)) { - if (ack->num_blks == NGTCP2_MAX_ACK_BLKS) { + if (ack->rangecnt == NGTCP2_MAX_ACK_RANGES) { break; } rpkt = ngtcp2_ksl_it_get(&it); - blk_idx = ack->num_blks++; - rv = conn_ensure_ack_blks(conn, ack->num_blks); + range_idx = ack->rangecnt++; + rv = conn_ensure_ack_ranges(conn, ack->rangecnt); if (rv != 0) { return rv; } ack = &conn->tx.ack->ack; - blk = &ack->blks[blk_idx]; - blk->gap = (uint64_t)(last_pkt_num - rpkt->pkt_num - 2); - blk->blklen = rpkt->len - 1; + range = &ack->ranges[range_idx]; + range->gap = (uint64_t)(last_pkt_num - rpkt->pkt_num - 2); + range->len = rpkt->len - 1; last_pkt_num = rpkt->pkt_num - (int64_t)(rpkt->len - 1); } @@ -1888,7 +1898,7 @@ static size_t pktns_select_pkt_numlen(ngtcp2_pktns *pktns) { */ static uint64_t conn_get_cwnd(ngtcp2_conn *conn) { return conn->pv && (conn->pv->flags & NGTCP2_PV_FLAG_FALLBACK_ON_FAILURE) - ? ngtcp2_cc_compute_initcwnd(conn->cstat.max_udp_payload_size) + ? ngtcp2_cc_compute_initcwnd(conn->cstat.max_tx_udp_payload_size) : conn->cstat.cwnd; } @@ -1896,12 +1906,12 @@ static uint64_t conn_get_cwnd(ngtcp2_conn *conn) { * conn_cwnd_is_zero returns nonzero if the number of bytes the local * endpoint can sent at this time is zero. */ -static uint64_t conn_cwnd_is_zero(ngtcp2_conn *conn) { +static int conn_cwnd_is_zero(ngtcp2_conn *conn) { uint64_t bytes_in_flight = conn->cstat.bytes_in_flight; uint64_t cwnd = conn_get_cwnd(conn); if (bytes_in_flight >= cwnd) { - ngtcp2_log_info(&conn->log, NGTCP2_LOG_EVENT_RCV, + ngtcp2_log_info(&conn->log, NGTCP2_LOG_EVENT_LDC, "cwnd limited bytes_in_flight=%lu cwnd=%lu", bytes_in_flight, cwnd); } @@ -1943,349 +1953,6 @@ static uint64_t conn_retry_early_payloadlen(ngtcp2_conn *conn) { return 0; } -static void conn_cryptofrq_clear(ngtcp2_conn *conn, ngtcp2_pktns *pktns) { - ngtcp2_frame_chain *frc; - ngtcp2_ksl_it it; - - for (it = ngtcp2_ksl_begin(&pktns->crypto.tx.frq); !ngtcp2_ksl_it_end(&it); - ngtcp2_ksl_it_next(&it)) { - frc = ngtcp2_ksl_it_get(&it); - ngtcp2_frame_chain_objalloc_del(frc, &conn->frc_objalloc, conn->mem); - } - ngtcp2_ksl_clear(&pktns->crypto.tx.frq); -} - -/* - * conn_cryptofrq_unacked_offset returns the CRYPTO frame offset by - * taking into account acknowledged offset. If there is no data to - * send, this function returns (uint64_t)-1. - */ -static uint64_t conn_cryptofrq_unacked_offset(ngtcp2_conn *conn, - ngtcp2_pktns *pktns) { - ngtcp2_frame_chain *frc; - ngtcp2_crypto *fr; - ngtcp2_range gap; - ngtcp2_rtb *rtb = &pktns->rtb; - ngtcp2_ksl_it it; - uint64_t datalen; - - (void)conn; - - for (it = ngtcp2_ksl_begin(&pktns->crypto.tx.frq); !ngtcp2_ksl_it_end(&it); - ngtcp2_ksl_it_next(&it)) { - frc = ngtcp2_ksl_it_get(&it); - fr = &frc->fr.crypto; - - gap = ngtcp2_strm_get_unacked_range_after(rtb->crypto, fr->offset); - - datalen = ngtcp2_vec_len(fr->data, fr->datacnt); - - if (gap.begin <= fr->offset) { - return fr->offset; - } - if (gap.begin < fr->offset + datalen) { - return gap.begin; - } - } - - return (uint64_t)-1; -} - -static int conn_cryptofrq_unacked_pop(ngtcp2_conn *conn, ngtcp2_pktns *pktns, - ngtcp2_frame_chain **pfrc) { - ngtcp2_frame_chain *frc, *nfrc; - ngtcp2_crypto *fr, *nfr; - uint64_t offset, end_offset; - size_t idx, end_idx; - uint64_t base_offset, end_base_offset; - ngtcp2_range gap; - ngtcp2_rtb *rtb = &pktns->rtb; - ngtcp2_vec *v; - int rv; - ngtcp2_ksl_it it; - - *pfrc = NULL; - - for (it = ngtcp2_ksl_begin(&pktns->crypto.tx.frq); !ngtcp2_ksl_it_end(&it);) { - frc = ngtcp2_ksl_it_get(&it); - fr = &frc->fr.crypto; - - ngtcp2_ksl_remove_hint(&pktns->crypto.tx.frq, &it, &it, &fr->offset); - - idx = 0; - offset = fr->offset; - base_offset = 0; - - gap = ngtcp2_strm_get_unacked_range_after(rtb->crypto, offset); - if (gap.begin < offset) { - gap.begin = offset; - } - - for (; idx < fr->datacnt && offset < gap.begin; ++idx) { - v = &fr->data[idx]; - if (offset + v->len > gap.begin) { - base_offset = gap.begin - offset; - break; - } - - offset += v->len; - } - - if (idx == fr->datacnt) { - ngtcp2_frame_chain_objalloc_del(frc, &conn->frc_objalloc, conn->mem); - continue; - } - - assert(gap.begin == offset + base_offset); - - end_idx = idx; - end_offset = offset; - end_base_offset = 0; - - for (; end_idx < fr->datacnt; ++end_idx) { - v = &fr->data[end_idx]; - if (end_offset + v->len > gap.end) { - end_base_offset = gap.end - end_offset; - break; - } - - end_offset += v->len; - } - - if (fr->offset == offset && base_offset == 0 && fr->datacnt == end_idx) { - *pfrc = frc; - return 0; - } - - if (fr->datacnt == end_idx) { - memmove(fr->data, fr->data + idx, sizeof(fr->data[0]) * (end_idx - idx)); - - assert(fr->data[0].len > base_offset); - - fr->offset = offset + base_offset; - fr->datacnt = end_idx - idx; - fr->data[0].base += base_offset; - fr->data[0].len -= (size_t)base_offset; - - *pfrc = frc; - return 0; - } - - rv = ngtcp2_frame_chain_crypto_datacnt_objalloc_new( - &nfrc, fr->datacnt - end_idx, &conn->frc_objalloc, conn->mem); - if (rv != 0) { - ngtcp2_frame_chain_objalloc_del(frc, &conn->frc_objalloc, conn->mem); - return rv; - } - - nfr = &nfrc->fr.crypto; - nfr->type = NGTCP2_FRAME_CRYPTO; - memcpy(nfr->data, fr->data + end_idx, - sizeof(nfr->data[0]) * (fr->datacnt - end_idx)); - - assert(nfr->data[0].len > end_base_offset); - - nfr->offset = end_offset + end_base_offset; - nfr->datacnt = fr->datacnt - end_idx; - nfr->data[0].base += end_base_offset; - nfr->data[0].len -= (size_t)end_base_offset; - - rv = ngtcp2_ksl_insert(&pktns->crypto.tx.frq, NULL, &nfr->offset, nfrc); - if (rv != 0) { - assert(ngtcp2_err_is_fatal(rv)); - ngtcp2_frame_chain_objalloc_del(nfrc, &conn->frc_objalloc, conn->mem); - ngtcp2_frame_chain_objalloc_del(frc, &conn->frc_objalloc, conn->mem); - return rv; - } - - if (end_base_offset) { - ++end_idx; - } - - memmove(fr->data, fr->data + idx, sizeof(fr->data[0]) * (end_idx - idx)); - - assert(fr->data[0].len > base_offset); - - fr->offset = offset + base_offset; - fr->datacnt = end_idx - idx; - if (end_base_offset) { - assert(fr->data[fr->datacnt - 1].len > end_base_offset); - fr->data[fr->datacnt - 1].len = (size_t)end_base_offset; - } - fr->data[0].base += base_offset; - fr->data[0].len -= (size_t)base_offset; - - *pfrc = frc; - return 0; - } - - return 0; -} -static int conn_cryptofrq_pop(ngtcp2_conn *conn, ngtcp2_frame_chain **pfrc, - ngtcp2_pktns *pktns, size_t left) { - ngtcp2_crypto *fr, *nfr; - ngtcp2_frame_chain *frc, *nfrc; - int rv; - size_t nmerged; - uint64_t datalen; - ngtcp2_vec a[NGTCP2_MAX_CRYPTO_DATACNT]; - ngtcp2_vec b[NGTCP2_MAX_CRYPTO_DATACNT]; - size_t acnt, bcnt; - ngtcp2_ksl_it it; - - rv = conn_cryptofrq_unacked_pop(conn, pktns, &frc); - if (rv != 0) { - return rv; - } - if (frc == NULL) { - *pfrc = NULL; - return 0; - } - - fr = &frc->fr.crypto; - datalen = ngtcp2_vec_len(fr->data, fr->datacnt); - - if (datalen > left) { - ngtcp2_vec_copy(a, fr->data, fr->datacnt); - acnt = fr->datacnt; - - bcnt = 0; - ngtcp2_vec_split(a, &acnt, b, &bcnt, left, NGTCP2_MAX_CRYPTO_DATACNT); - - assert(acnt > 0); - assert(bcnt > 0); - - rv = ngtcp2_frame_chain_crypto_datacnt_objalloc_new( - &nfrc, bcnt, &conn->frc_objalloc, conn->mem); - if (rv != 0) { - assert(ngtcp2_err_is_fatal(rv)); - ngtcp2_frame_chain_objalloc_del(frc, &conn->frc_objalloc, conn->mem); - return rv; - } - - nfr = &nfrc->fr.crypto; - nfr->type = NGTCP2_FRAME_CRYPTO; - nfr->offset = fr->offset + left; - nfr->datacnt = bcnt; - ngtcp2_vec_copy(nfr->data, b, bcnt); - - rv = ngtcp2_ksl_insert(&pktns->crypto.tx.frq, NULL, &nfr->offset, nfrc); - if (rv != 0) { - assert(ngtcp2_err_is_fatal(rv)); - ngtcp2_frame_chain_objalloc_del(nfrc, &conn->frc_objalloc, conn->mem); - ngtcp2_frame_chain_objalloc_del(frc, &conn->frc_objalloc, conn->mem); - return rv; - } - - rv = ngtcp2_frame_chain_crypto_datacnt_objalloc_new( - &nfrc, acnt, &conn->frc_objalloc, conn->mem); - if (rv != 0) { - assert(ngtcp2_err_is_fatal(rv)); - ngtcp2_frame_chain_objalloc_del(frc, &conn->frc_objalloc, conn->mem); - return rv; - } - - nfr = &nfrc->fr.crypto; - *nfr = *fr; - nfr->datacnt = acnt; - ngtcp2_vec_copy(nfr->data, a, acnt); - - ngtcp2_frame_chain_objalloc_del(frc, &conn->frc_objalloc, conn->mem); - - *pfrc = nfrc; - - return 0; - } - - left -= (size_t)datalen; - - ngtcp2_vec_copy(a, fr->data, fr->datacnt); - acnt = fr->datacnt; - - for (; left && ngtcp2_ksl_len(&pktns->crypto.tx.frq);) { - it = ngtcp2_ksl_begin(&pktns->crypto.tx.frq); - nfrc = ngtcp2_ksl_it_get(&it); - nfr = &nfrc->fr.crypto; - - if (nfr->offset != fr->offset + datalen) { - assert(fr->offset + datalen < nfr->offset); - break; - } - - rv = conn_cryptofrq_unacked_pop(conn, pktns, &nfrc); - if (rv != 0) { - assert(ngtcp2_err_is_fatal(rv)); - ngtcp2_frame_chain_objalloc_del(frc, &conn->frc_objalloc, conn->mem); - return rv; - } - if (nfrc == NULL) { - break; - } - - nfr = &nfrc->fr.crypto; - - nmerged = ngtcp2_vec_merge(a, &acnt, nfr->data, &nfr->datacnt, left, - NGTCP2_MAX_CRYPTO_DATACNT); - if (nmerged == 0) { - rv = ngtcp2_ksl_insert(&pktns->crypto.tx.frq, NULL, &nfr->offset, nfrc); - if (rv != 0) { - assert(ngtcp2_err_is_fatal(rv)); - ngtcp2_frame_chain_objalloc_del(nfrc, &conn->frc_objalloc, conn->mem); - ngtcp2_frame_chain_objalloc_del(frc, &conn->frc_objalloc, conn->mem); - return rv; - } - break; - } - - datalen += nmerged; - left -= nmerged; - - if (nfr->datacnt == 0) { - ngtcp2_frame_chain_objalloc_del(nfrc, &conn->frc_objalloc, conn->mem); - continue; - } - - nfr->offset += nmerged; - - rv = ngtcp2_ksl_insert(&pktns->crypto.tx.frq, NULL, &nfr->offset, nfrc); - if (rv != 0) { - ngtcp2_frame_chain_objalloc_del(nfrc, &conn->frc_objalloc, conn->mem); - ngtcp2_frame_chain_objalloc_del(frc, &conn->frc_objalloc, conn->mem); - return rv; - } - - break; - } - - if (acnt == fr->datacnt) { - assert(acnt > 0); - fr->data[acnt - 1] = a[acnt - 1]; - - *pfrc = frc; - return 0; - } - - assert(acnt > fr->datacnt); - - rv = ngtcp2_frame_chain_crypto_datacnt_objalloc_new( - &nfrc, acnt, &conn->frc_objalloc, conn->mem); - if (rv != 0) { - ngtcp2_frame_chain_objalloc_del(frc, &conn->frc_objalloc, conn->mem); - return rv; - } - - nfr = &nfrc->fr.crypto; - *nfr = *fr; - nfr->datacnt = acnt; - ngtcp2_vec_copy(nfr->data, a, acnt); - - ngtcp2_frame_chain_objalloc_del(frc, &conn->frc_objalloc, conn->mem); - - *pfrc = nfrc; - - return 0; -} - /* * conn_verify_dcid verifies that destination connection ID in |hd| is * valid for the connection. If it is successfully verified and the @@ -2341,7 +2008,7 @@ static int conn_verify_dcid(ngtcp2_conn *conn, int *pnew_cid_used, * conn_should_pad_pkt returns nonzero if the packet should be padded. * |type| is the type of packet. |left| is the space left in packet * buffer. |write_datalen| is the number of bytes which will be sent - * in the next, coalesced 0-RTT or 1RTT packet. + * in the next, coalesced 0-RTT packet. */ static int conn_should_pad_pkt(ngtcp2_conn *conn, uint8_t type, size_t left, uint64_t write_datalen, int ack_eliciting, @@ -2356,7 +2023,7 @@ static int conn_should_pad_pkt(ngtcp2_conn *conn, uint8_t type, size_t left, if (conn->hs_pktns->crypto.tx.ckm && (conn->hs_pktns->rtb.probe_pkt_left || - ngtcp2_ksl_len(&conn->hs_pktns->crypto.tx.frq) || + !ngtcp2_strm_streamfrq_empty(&conn->hs_pktns->crypto.strm) || !ngtcp2_acktr_empty(&conn->hs_pktns->acktr))) { /* If we have something to send in Handshake packet, then add PADDING in Handshake packet. */ @@ -2367,20 +2034,19 @@ static int conn_should_pad_pkt(ngtcp2_conn *conn, uint8_t type, size_t left, } else { if (conn->hs_pktns->crypto.tx.ckm && (conn->hs_pktns->rtb.probe_pkt_left || - ngtcp2_ksl_len(&conn->hs_pktns->crypto.tx.frq) || + !ngtcp2_strm_streamfrq_empty(&conn->hs_pktns->crypto.strm) || !ngtcp2_acktr_empty(&conn->hs_pktns->acktr))) { /* If we have something to send in Handshake packet, then add PADDING in Handshake packet. */ min_payloadlen = NGTCP2_MIN_COALESCED_PAYLOADLEN; - } else if ((!conn->early.ckm && !conn->pktns.crypto.tx.ckm) || - write_datalen == 0) { - return 1; - } else { - /* If we have something to send in 0RTT or 1RTT packet, then - add PADDING in that packet. Take maximum in case that + } else if (conn->early.ckm && write_datalen > 0) { + /* If we have something to send in 0RTT packet, then add + PADDING in that packet. Take maximum in case that write_datalen includes DATAGRAM which cannot be split. */ min_payloadlen = ngtcp2_max(write_datalen, NGTCP2_MIN_COALESCED_PAYLOADLEN); + } else { + return 1; } } } else { @@ -2390,11 +2056,11 @@ static int conn_should_pad_pkt(ngtcp2_conn *conn, uint8_t type, size_t left, return 0; } - if (!conn->pktns.crypto.tx.ckm || write_datalen == 0) { + if (!conn->pktns.crypto.tx.ckm) { return 1; } - min_payloadlen = ngtcp2_max(write_datalen, NGTCP2_MIN_COALESCED_PAYLOADLEN); + min_payloadlen = NGTCP2_MIN_COALESCED_PAYLOADLEN; } /* TODO the next packet type should be taken into account */ @@ -2419,7 +2085,8 @@ static void conn_restart_timer_on_read(ngtcp2_conn *conn, ngtcp2_tstamp ts) { * conn_keep_alive_enabled returns nonzero if keep-alive is enabled. */ static int conn_keep_alive_enabled(ngtcp2_conn *conn) { - return conn->keep_alive.last_ts != UINT64_MAX && conn->keep_alive.timeout; + return conn->keep_alive.last_ts != UINT64_MAX && + conn->keep_alive.timeout != UINT64_MAX; } /* @@ -2427,8 +2094,8 @@ static int conn_keep_alive_enabled(ngtcp2_conn *conn) { * expired. */ static int conn_keep_alive_expired(ngtcp2_conn *conn, ngtcp2_tstamp ts) { - return conn_keep_alive_enabled(conn) && - conn->keep_alive.last_ts + conn->keep_alive.timeout <= ts; + return ngtcp2_tstamp_elapsed(conn->keep_alive.last_ts, + conn->keep_alive.timeout, ts); } /* @@ -2436,7 +2103,9 @@ static int conn_keep_alive_expired(ngtcp2_conn *conn, ngtcp2_tstamp ts) { */ static ngtcp2_tstamp conn_keep_alive_expiry(ngtcp2_conn *conn) { if ((conn->flags & NGTCP2_CONN_FLAG_KEEP_ALIVE_CANCELLED) || - !conn_keep_alive_enabled(conn)) { + !(conn->flags & NGTCP2_CONN_FLAG_HANDSHAKE_COMPLETED) || + !conn_keep_alive_enabled(conn) || + conn->keep_alive.last_ts >= UINT64_MAX - conn->keep_alive.timeout) { return UINT64_MAX; } @@ -2467,6 +2136,10 @@ static void conn_update_keep_alive_last_ts(ngtcp2_conn *conn, void ngtcp2_conn_set_keep_alive_timeout(ngtcp2_conn *conn, ngtcp2_duration timeout) { + if (timeout == 0) { + timeout = UINT64_MAX; + } + conn->keep_alive.timeout = timeout; } @@ -2527,7 +2200,7 @@ static uint8_t conn_pkt_flags_short(ngtcp2_conn *conn) { * NGTCP2_PKT_HANDSHAKE_PKT. * * |write_datalen| is the minimum length of application data ready to - * send in subsequent 0RTT or 1RTT packet. + * send in subsequent 0RTT packet. * * This function returns the number of bytes written in |dest| if it * succeeds, or one of the following negative error codes: @@ -2590,8 +2263,7 @@ conn_write_handshake_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, uint8_t *dest, cc.hp_ctx = pktns->crypto.tx.hp_ctx; break; default: - assert(0); - abort(); + ngtcp2_unreachable(); } cc.aead = pktns->crypto.ctx.aead; @@ -2605,8 +2277,9 @@ conn_write_handshake_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, uint8_t *dest, version, 0); if (!conn->server && type == NGTCP2_PKT_INITIAL && - conn->local.settings.token.len) { + conn->local.settings.tokenlen) { hd.token = conn->local.settings.token; + hd.tokenlen = conn->local.settings.tokenlen; } ngtcp2_ppe_init(&ppe, dest, destlen, &cc); @@ -2621,9 +2294,9 @@ conn_write_handshake_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, uint8_t *dest, return 0; } - rv = conn_create_ack_frame(conn, &ackfr, pktns, type, ts, - /* ack_delay = */ 0, - NGTCP2_DEFAULT_ACK_DELAY_EXPONENT); + rv = ngtcp2_conn_create_ack_frame(conn, &ackfr, pktns, type, ts, + /* ack_delay = */ 0, + NGTCP2_DEFAULT_ACK_DELAY_EXPONENT); if (rv != 0) { ngtcp2_frame_chain_list_objalloc_del(frq, &conn->frc_objalloc, conn->mem); return rv; @@ -2645,12 +2318,12 @@ conn_write_handshake_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, uint8_t *dest, if (!conn->server || type != NGTCP2_PKT_INITIAL || destlen >= NGTCP2_MAX_UDP_PAYLOAD_SIZE) { build_pkt: - for (; ngtcp2_ksl_len(&pktns->crypto.tx.frq);) { + for (; !ngtcp2_strm_streamfrq_empty(&pktns->crypto.strm);) { left = ngtcp2_ppe_left(&ppe); - crypto_offset = conn_cryptofrq_unacked_offset(conn, pktns); - if (crypto_offset == (size_t)-1) { - conn_cryptofrq_clear(conn, pktns); + crypto_offset = ngtcp2_strm_streamfrq_unacked_offset(&pktns->crypto.strm); + if (crypto_offset == (uint64_t)-1) { + ngtcp2_strm_streamfrq_clear(&pktns->crypto.strm); break; } @@ -2659,7 +2332,7 @@ conn_write_handshake_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, uint8_t *dest, break; } - rv = conn_cryptofrq_pop(conn, &nfrc, pktns, left); + rv = ngtcp2_strm_streamfrq_pop(&pktns->crypto.strm, &nfrc, left); if (rv != 0) { assert(ngtcp2_err_is_fatal(rv)); ngtcp2_frame_chain_list_objalloc_del(frq, &conn->frc_objalloc, @@ -2673,7 +2346,7 @@ conn_write_handshake_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, uint8_t *dest, rv = conn_ppe_write_frame_hd_log(conn, &ppe, &hd_logged, &hd, &nfrc->fr); if (rv != 0) { - assert(0); + ngtcp2_unreachable(); } *pfrc = nfrc; @@ -2729,9 +2402,8 @@ conn_write_handshake_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, uint8_t *dest, if (!pkt_empty) { if (!(rtb_entry_flags & NGTCP2_RTB_ENTRY_FLAG_ACK_ELICITING)) { - /* The intention of smaller limit is get more chance to measure - RTT samples in early phase. */ - if (pktns->tx.num_non_ack_pkt >= 1) { + if (ngtcp2_tstamp_elapsed(pktns->tx.non_ack_pkt_start_ts, + conn->cstat.smoothed_rtt, ts)) { lfr.type = NGTCP2_FRAME_PING; rv = conn_ppe_write_frame_hd_log(conn, &ppe, &hd_logged, &hd, &lfr); @@ -2739,18 +2411,18 @@ conn_write_handshake_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, uint8_t *dest, assert(rv == NGTCP2_ERR_NOBUF); } else { rtb_entry_flags |= NGTCP2_RTB_ENTRY_FLAG_ACK_ELICITING; - pktns->tx.num_non_ack_pkt = 0; + pktns->tx.non_ack_pkt_start_ts = UINT64_MAX; } - } else { - ++pktns->tx.num_non_ack_pkt; + } else if (pktns->tx.non_ack_pkt_start_ts == UINT64_MAX) { + pktns->tx.non_ack_pkt_start_ts = ts; } } else { - pktns->tx.num_non_ack_pkt = 0; + pktns->tx.non_ack_pkt_start_ts = UINT64_MAX; } } } - if (pkt_empty) { + if (pkt_empty && !require_padding) { return 0; } @@ -2762,6 +2434,8 @@ conn_write_handshake_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, uint8_t *dest, require_padding)) { lfr.type = NGTCP2_FRAME_PADDING; lfr.padding.len = ngtcp2_ppe_padding(&ppe); + } else if (pkt_empty) { + return 0; } else { lfr.type = NGTCP2_FRAME_PADDING; lfr.padding.len = ngtcp2_ppe_padding_hp_sample(&ppe); @@ -2872,8 +2546,7 @@ static ngtcp2_ssize conn_write_ack_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, ack_delay_exponent = conn->local.transport_params.ack_delay_exponent; break; default: - assert(0); - abort(); + ngtcp2_unreachable(); } if (!pktns->crypto.tx.ckm) { @@ -2881,8 +2554,8 @@ static ngtcp2_ssize conn_write_ack_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, } ackfr = NULL; - rv = conn_create_ack_frame(conn, &ackfr, pktns, type, ts, ack_delay, - ack_delay_exponent); + rv = ngtcp2_conn_create_ack_frame(conn, &ackfr, pktns, type, ts, ack_delay, + ack_delay_exponent); if (rv != 0) { return rv; } @@ -2992,11 +2665,11 @@ static ngtcp2_ssize conn_write_handshake_ack_pkts(ngtcp2_conn *conn, ngtcp2_ssize res = 0, nwrite = 0; /* In the most cases, client sends ACK in conn_write_handshake_pkt. - This function is only called when it is CWND limited. It is not - required for client to send ACK for server Initial. This is - because once it gets server Initial, it gets Handshake tx key and - discards Initial key. The only good reason to send ACK is give - server RTT measurement early. */ + This function is only called when it is CWND limited or pacing + limited. It is not required for client to send ACK for server + Initial. This is because once it gets server Initial, it gets + Handshake tx key and discards Initial key. The only good reason + to send ACK is give server RTT measurement early. */ if (conn->server && conn->in_pktns) { nwrite = conn_write_ack_pkt(conn, pi, dest, destlen, NGTCP2_PKT_INITIAL, ts); @@ -3145,13 +2818,14 @@ static ngtcp2_ssize conn_write_handshake_pkts(ngtcp2_conn *conn, } if (nwrite == 0) { - if (conn->server && (conn->in_pktns->rtb.probe_pkt_left || - ngtcp2_ksl_len(&conn->in_pktns->crypto.tx.frq))) { + if (conn->server && + (conn->in_pktns->rtb.probe_pkt_left || + !ngtcp2_strm_streamfrq_empty(&conn->in_pktns->crypto.strm))) { if (cstat->loss_detection_timer != UINT64_MAX && conn_server_tx_left(conn, &conn->dcid.current) < NGTCP2_MAX_UDP_PAYLOAD_SIZE) { ngtcp2_log_info( - &conn->log, NGTCP2_LOG_EVENT_RCV, + &conn->log, NGTCP2_LOG_EVENT_LDC, "loss detection timer canceled due to amplification limit"); cstat->loss_detection_timer = UINT64_MAX; } @@ -3252,11 +2926,19 @@ static int conn_should_send_max_data(ngtcp2_conn *conn) { static size_t conn_required_num_new_connection_id(ngtcp2_conn *conn) { uint64_t n; size_t len = ngtcp2_ksl_len(&conn->scid.set); + size_t lim; if (len >= NGTCP2_MAX_SCID_POOL_SIZE) { return 0; } + assert(NGTCP2_MAX_SCID_POOL_SIZE >= conn->scid.num_in_flight); + + lim = NGTCP2_MAX_SCID_POOL_SIZE - conn->scid.num_in_flight; + if (lim == 0) { + return 0; + } + assert(conn->remote.transport_params); assert(conn->remote.transport_params->active_connection_id_limit); @@ -3266,7 +2948,9 @@ static size_t conn_required_num_new_connection_id(ngtcp2_conn *conn) { n = conn->remote.transport_params->active_connection_id_limit + conn->scid.num_retired; - return (size_t)ngtcp2_min(NGTCP2_MAX_SCID_POOL_SIZE, n) - len; + n = ngtcp2_min(NGTCP2_MAX_SCID_POOL_SIZE, n) - len; + + return (size_t)ngtcp2_min(lim, n); } /* @@ -3338,6 +3022,10 @@ static int conn_enqueue_new_connection_id(ngtcp2_conn *conn) { sizeof(token)); nfrc->next = pktns->tx.frq; pktns->tx.frq = nfrc; + + assert(NGTCP2_MAX_SCID_POOL_SIZE > conn->scid.num_in_flight); + + ++conn->scid.num_in_flight; } return 0; @@ -3368,7 +3056,7 @@ static int conn_remove_retired_connection_id(ngtcp2_conn *conn, for (; !ngtcp2_pq_empty(&conn->scid.used);) { scid = ngtcp2_struct_of(ngtcp2_pq_top(&conn->scid.used), ngtcp2_scid, pe); - if (scid->retired_ts == UINT64_MAX || scid->retired_ts + timeout >= ts) { + if (!ngtcp2_tstamp_elapsed(scid->retired_ts, timeout, ts)) { break; } @@ -3435,6 +3123,36 @@ static void conn_handle_unconfirmed_key_update_from_remote(ngtcp2_conn *conn, ngtcp2_log_info(&conn->log, NGTCP2_LOG_EVENT_CRY, "key update confirmed"); } +static uint64_t conn_tx_strmq_first_cycle(ngtcp2_conn *conn); + +/* + * strm_should_send_stream_data_blocked returns nonzero if + * STREAM_DATA_BLOCKED frame should be sent to |strm|. + */ +static int strm_should_send_stream_data_blocked(ngtcp2_strm *strm) { + return strm->tx.offset == strm->tx.max_offset && + strm->tx.last_blocked_offset != strm->tx.max_offset; +} + +/* + * conn_should_send_data_blocked returns nonzero if DATA_BLOCKED frame + * should be sent. + */ +static int conn_should_send_data_blocked(ngtcp2_conn *conn) { + return conn->tx.offset == conn->tx.max_offset && + conn->tx.last_blocked_offset != conn->tx.max_offset; +} + +/* + * conn_reset_ppe_pending clears NGTCP2_CONN_FLAG_PPE_PENDING flag and + * nullifies conn->pkt. + */ +static void conn_reset_ppe_pending(ngtcp2_conn *conn) { + conn->flags &= (uint32_t)~NGTCP2_CONN_FLAG_PPE_PENDING; + + memset(&conn->pkt, 0, sizeof(conn->pkt)); +} + /* * conn_write_pkt writes a protected packet in the buffer pointed by * |dest| whose length if |destlen|. |type| specifies the type of @@ -3550,8 +3268,12 @@ static ngtcp2_ssize conn_write_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, /* transport parameter is only valid after handshake completion which means we don't know how many connection ID that remote - peer can accept before handshake completion. */ - if (conn->oscid.datalen && conn_is_handshake_completed(conn)) { + peer can accept before handshake completion. Because server + can use remote transport parameters sending stream data in + 0.5 RTT, it is also allowed to use remote transport + parameters here. */ + if (conn->oscid.datalen && + (conn->server || conn_is_tls_handshake_completed(conn))) { rv = conn_enqueue_new_connection_id(conn); if (rv != 0) { return rv; @@ -3574,7 +3296,7 @@ static ngtcp2_ssize conn_write_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, break; default: /* Unreachable */ - assert(0); + ngtcp2_unreachable(); } cc->encrypt = conn->callbacks.encrypt; @@ -3617,6 +3339,32 @@ static ngtcp2_ssize conn_write_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, nfrc->fr.max_data.max_data; } + if (stream_blocked && conn_should_send_max_data(conn)) { + rv = ngtcp2_frame_chain_objalloc_new(&nfrc, &conn->frc_objalloc); + if (rv != 0) { + return rv; + } + + nfrc->fr.type = NGTCP2_FRAME_DATA_BLOCKED; + nfrc->fr.data_blocked.offset = conn->tx.max_offset; + nfrc->next = pktns->tx.frq; + pktns->tx.frq = nfrc; + + conn->tx.last_blocked_offset = conn->tx.max_offset; + } + + if (stream_blocked && !ngtcp2_strm_is_tx_queued(vmsg->stream.strm) && + strm_should_send_stream_data_blocked(vmsg->stream.strm)) { + assert(vmsg); + assert(vmsg->type == NGTCP2_VMSG_TYPE_STREAM); + + vmsg->stream.strm->cycle = conn_tx_strmq_first_cycle(conn); + rv = ngtcp2_conn_tx_strmq_push(conn, vmsg->stream.strm); + if (rv != 0) { + return rv; + } + } + ngtcp2_pkt_hd_init(hd, hd_flags, type, &conn->dcid.current.cid, scid, pktns->tx.last_pkt_num + 1, pktns_select_pkt_numlen(pktns), version, 0); @@ -3651,16 +3399,16 @@ static ngtcp2_ssize conn_write_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, pkt_empty = 0; rtb_entry_flags |= NGTCP2_RTB_ENTRY_FLAG_ACK_ELICITING; - require_padding = - !conn->server || destlen >= NGTCP2_MAX_UDP_PAYLOAD_SIZE; + require_padding = require_padding || !conn->server || + destlen >= NGTCP2_MAX_UDP_PAYLOAD_SIZE; /* We don't retransmit PATH_RESPONSE. */ } } } - rv = conn_create_ack_frame(conn, &ackfr, pktns, type, ts, - conn_compute_ack_delay(conn), - conn->local.transport_params.ack_delay_exponent); + rv = ngtcp2_conn_create_ack_frame( + conn, &ackfr, pktns, type, ts, conn_compute_ack_delay(conn), + conn->local.transport_params.ack_delay_exponent); if (rv != 0) { assert(ngtcp2_err_is_fatal(rv)); return rv; @@ -3696,29 +3444,17 @@ static ngtcp2_ssize conn_write_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, case NGTCP2_FRAME_STOP_SENDING: strm = ngtcp2_conn_find_stream(conn, (*pfrc)->fr.stop_sending.stream_id); - if (strm == NULL || (strm->flags & NGTCP2_STRM_FLAG_SHUT_RD)) { + if (strm == NULL || + ((strm->flags & NGTCP2_STRM_FLAG_SHUT_RD) && + ngtcp2_strm_rx_offset(strm) == strm->rx.last_offset)) { frc = *pfrc; *pfrc = (*pfrc)->next; ngtcp2_frame_chain_objalloc_del(frc, &conn->frc_objalloc, conn->mem); continue; } - - if (!(strm->flags & NGTCP2_STRM_FLAG_STREAM_STOP_SENDING_CALLED)) { - strm->flags |= NGTCP2_STRM_FLAG_STREAM_STOP_SENDING_CALLED; - - rv = conn_call_stream_stop_sending( - conn, (*pfrc)->fr.stop_sending.stream_id, - (*pfrc)->fr.stop_sending.app_error_code, strm->stream_user_data); - if (rv != 0) { - assert(ngtcp2_err_is_fatal(rv)); - return rv; - } - } - break; case NGTCP2_FRAME_STREAM: - assert(0); - break; + ngtcp2_unreachable(); case NGTCP2_FRAME_MAX_STREAMS_BIDI: if ((*pfrc)->fr.max_streams.max_streams < conn->remote.bidi.max_streams) { @@ -3740,7 +3476,9 @@ static ngtcp2_ssize conn_write_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, case NGTCP2_FRAME_MAX_STREAM_DATA: strm = ngtcp2_conn_find_stream(conn, (*pfrc)->fr.max_stream_data.stream_id); - if (strm == NULL || (strm->flags & NGTCP2_STRM_FLAG_SHUT_RD) || + if (strm == NULL || + (strm->flags & + (NGTCP2_STRM_FLAG_SHUT_RD | NGTCP2_STRM_FLAG_STOP_SENDING)) || (*pfrc)->fr.max_stream_data.max_stream_data < strm->rx.max_offset) { frc = *pfrc; *pfrc = (*pfrc)->next; @@ -3756,9 +3494,27 @@ static ngtcp2_ssize conn_write_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, continue; } break; - case NGTCP2_FRAME_CRYPTO: - assert(0); + case NGTCP2_FRAME_STREAM_DATA_BLOCKED: + strm = ngtcp2_conn_find_stream( + conn, (*pfrc)->fr.stream_data_blocked.stream_id); + if (strm == NULL || (strm->flags & NGTCP2_STRM_FLAG_SHUT_WR) || + (*pfrc)->fr.stream_data_blocked.offset != strm->tx.max_offset) { + frc = *pfrc; + *pfrc = (*pfrc)->next; + ngtcp2_frame_chain_objalloc_del(frc, &conn->frc_objalloc, conn->mem); + continue; + } + break; + case NGTCP2_FRAME_DATA_BLOCKED: + if ((*pfrc)->fr.data_blocked.offset != conn->tx.max_offset) { + frc = *pfrc; + *pfrc = (*pfrc)->next; + ngtcp2_frame_chain_objalloc_del(frc, &conn->frc_objalloc, conn->mem); + continue; + } break; + case NGTCP2_FRAME_CRYPTO: + ngtcp2_unreachable(); } rv = conn_ppe_write_frame_hd_log(conn, ppe, &hd_logged, hd, &(*pfrc)->fr); @@ -3774,13 +3530,14 @@ static ngtcp2_ssize conn_write_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, pfrc = &(*pfrc)->next; } - if (rv != NGTCP2_ERR_NOBUF) { - for (; ngtcp2_ksl_len(&pktns->crypto.tx.frq);) { + if (*pfrc == NULL) { + for (; !ngtcp2_strm_streamfrq_empty(&pktns->crypto.strm);) { left = ngtcp2_ppe_left(ppe); - crypto_offset = conn_cryptofrq_unacked_offset(conn, pktns); - if (crypto_offset == (size_t)-1) { - conn_cryptofrq_clear(conn, pktns); + crypto_offset = + ngtcp2_strm_streamfrq_unacked_offset(&pktns->crypto.strm); + if (crypto_offset == (uint64_t)-1) { + ngtcp2_strm_streamfrq_clear(&pktns->crypto.strm); break; } @@ -3790,7 +3547,7 @@ static ngtcp2_ssize conn_write_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, break; } - rv = conn_cryptofrq_pop(conn, &nfrc, pktns, left); + rv = ngtcp2_strm_streamfrq_pop(&pktns->crypto.strm, &nfrc, left); if (rv != 0) { assert(ngtcp2_err_is_fatal(rv)); return rv; @@ -3802,7 +3559,7 @@ static ngtcp2_ssize conn_write_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, rv = conn_ppe_write_frame_hd_log(conn, ppe, &hd_logged, hd, &nfrc->fr); if (rv != 0) { - assert(0); + ngtcp2_unreachable(); } *pfrc = nfrc; @@ -3815,79 +3572,114 @@ static ngtcp2_ssize conn_write_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, } } - /* Write MAX_STREAM_ID after RESET_STREAM so that we can extend stream - ID space in one packet. */ - if (rv != NGTCP2_ERR_NOBUF && *pfrc == NULL && - conn->remote.bidi.unsent_max_streams > conn->remote.bidi.max_streams) { - rv = conn_call_extend_max_remote_streams_bidi( - conn, conn->remote.bidi.unsent_max_streams); - if (rv != 0) { - assert(ngtcp2_err_is_fatal(rv)); - return rv; - } + if (*pfrc == NULL) { + for (; !ngtcp2_pq_empty(&conn->tx.strmq);) { + strm = ngtcp2_conn_tx_strmq_top(conn); - rv = ngtcp2_frame_chain_objalloc_new(&nfrc, &conn->frc_objalloc); - if (rv != 0) { - assert(ngtcp2_err_is_fatal(rv)); - return rv; - } - nfrc->fr.type = NGTCP2_FRAME_MAX_STREAMS_BIDI; - nfrc->fr.max_streams.max_streams = conn->remote.bidi.unsent_max_streams; - *pfrc = nfrc; + if (strm->flags & NGTCP2_STRM_FLAG_SEND_RESET_STREAM) { + rv = ngtcp2_frame_chain_objalloc_new(&nfrc, &conn->frc_objalloc); + if (rv != 0) { + return rv; + } - conn->remote.bidi.max_streams = conn->remote.bidi.unsent_max_streams; + nfrc->fr.type = NGTCP2_FRAME_RESET_STREAM; + nfrc->fr.reset_stream.stream_id = strm->stream_id; + nfrc->fr.reset_stream.app_error_code = + strm->tx.reset_stream_app_error_code; + nfrc->fr.reset_stream.final_size = strm->tx.offset; + *pfrc = nfrc; - rv = conn_ppe_write_frame_hd_log(conn, ppe, &hd_logged, hd, &(*pfrc)->fr); - if (rv != 0) { - assert(NGTCP2_ERR_NOBUF == rv); - } else { - pkt_empty = 0; - rtb_entry_flags |= NGTCP2_RTB_ENTRY_FLAG_ACK_ELICITING | - NGTCP2_RTB_ENTRY_FLAG_PTO_ELICITING | - NGTCP2_RTB_ENTRY_FLAG_RETRANSMITTABLE; - pfrc = &(*pfrc)->next; - } - } + strm->flags &= ~NGTCP2_STRM_FLAG_SEND_RESET_STREAM; - if (rv != NGTCP2_ERR_NOBUF && *pfrc == NULL) { - if (conn->remote.uni.unsent_max_streams > conn->remote.uni.max_streams) { - rv = conn_call_extend_max_remote_streams_uni( - conn, conn->remote.uni.unsent_max_streams); - if (rv != 0) { - assert(ngtcp2_err_is_fatal(rv)); - return rv; + rv = + conn_ppe_write_frame_hd_log(conn, ppe, &hd_logged, hd, &nfrc->fr); + if (rv != 0) { + assert(NGTCP2_ERR_NOBUF == rv); + + break; + } + + pkt_empty = 0; + rtb_entry_flags |= NGTCP2_RTB_ENTRY_FLAG_ACK_ELICITING | + NGTCP2_RTB_ENTRY_FLAG_PTO_ELICITING | + NGTCP2_RTB_ENTRY_FLAG_RETRANSMITTABLE; + pfrc = &(*pfrc)->next; } - rv = ngtcp2_frame_chain_objalloc_new(&nfrc, &conn->frc_objalloc); - if (rv != 0) { - assert(ngtcp2_err_is_fatal(rv)); - return rv; + if (strm->flags & NGTCP2_STRM_FLAG_SEND_STOP_SENDING) { + if ((strm->flags & NGTCP2_STRM_FLAG_SHUT_RD) && + ngtcp2_strm_rx_offset(strm) == strm->rx.last_offset) { + strm->flags &= ~NGTCP2_STRM_FLAG_SEND_STOP_SENDING; + } else { + rv = conn_call_stream_stop_sending( + conn, strm->stream_id, strm->tx.stop_sending_app_error_code, + strm->stream_user_data); + if (rv != 0) { + assert(ngtcp2_err_is_fatal(rv)); + + return rv; + } + + rv = ngtcp2_frame_chain_objalloc_new(&nfrc, &conn->frc_objalloc); + if (rv != 0) { + return rv; + } + + nfrc->fr.type = NGTCP2_FRAME_STOP_SENDING; + nfrc->fr.stop_sending.stream_id = strm->stream_id; + nfrc->fr.stop_sending.app_error_code = + strm->tx.stop_sending_app_error_code; + *pfrc = nfrc; + + strm->flags &= ~NGTCP2_STRM_FLAG_SEND_STOP_SENDING; + + rv = conn_ppe_write_frame_hd_log(conn, ppe, &hd_logged, hd, + &nfrc->fr); + if (rv != 0) { + assert(NGTCP2_ERR_NOBUF == rv); + + break; + } + + pkt_empty = 0; + rtb_entry_flags |= NGTCP2_RTB_ENTRY_FLAG_ACK_ELICITING | + NGTCP2_RTB_ENTRY_FLAG_PTO_ELICITING | + NGTCP2_RTB_ENTRY_FLAG_RETRANSMITTABLE; + pfrc = &(*pfrc)->next; + } } - nfrc->fr.type = NGTCP2_FRAME_MAX_STREAMS_UNI; - nfrc->fr.max_streams.max_streams = conn->remote.uni.unsent_max_streams; - *pfrc = nfrc; - conn->remote.uni.max_streams = conn->remote.uni.unsent_max_streams; + if (!(strm->flags & NGTCP2_STRM_FLAG_SHUT_WR) && + strm_should_send_stream_data_blocked(strm)) { + rv = ngtcp2_frame_chain_objalloc_new(&nfrc, &conn->frc_objalloc); + if (rv != 0) { + return rv; + } + + nfrc->fr.type = NGTCP2_FRAME_STREAM_DATA_BLOCKED; + nfrc->fr.stream_data_blocked.stream_id = strm->stream_id; + nfrc->fr.stream_data_blocked.offset = strm->tx.max_offset; + *pfrc = nfrc; + + strm->tx.last_blocked_offset = strm->tx.max_offset; + + rv = + conn_ppe_write_frame_hd_log(conn, ppe, &hd_logged, hd, &nfrc->fr); + if (rv != 0) { + assert(NGTCP2_ERR_NOBUF == rv); + + break; + } - rv = conn_ppe_write_frame_hd_log(conn, ppe, &hd_logged, hd, - &(*pfrc)->fr); - if (rv != 0) { - assert(NGTCP2_ERR_NOBUF == rv); - } else { pkt_empty = 0; rtb_entry_flags |= NGTCP2_RTB_ENTRY_FLAG_ACK_ELICITING | NGTCP2_RTB_ENTRY_FLAG_PTO_ELICITING | NGTCP2_RTB_ENTRY_FLAG_RETRANSMITTABLE; pfrc = &(*pfrc)->next; } - } - } - - if (rv != NGTCP2_ERR_NOBUF) { - for (; !ngtcp2_pq_empty(&conn->tx.strmq);) { - strm = ngtcp2_conn_tx_strmq_top(conn); - if (!(strm->flags & NGTCP2_STRM_FLAG_SHUT_RD) && + if (!(strm->flags & + (NGTCP2_STRM_FLAG_SHUT_RD | NGTCP2_STRM_FLAG_STOP_SENDING)) && conn_should_send_max_stream_data(conn, strm)) { rv = ngtcp2_frame_chain_objalloc_new(&nfrc, &conn->frc_objalloc); if (rv != 0) { @@ -3922,7 +3714,10 @@ static ngtcp2_ssize conn_write_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, nfrc->fr.max_stream_data.stream_id = strm->stream_id; nfrc->fr.max_stream_data.max_stream_data = strm->rx.unsent_max_offset + delta; - ngtcp2_list_insert(nfrc, pfrc); + *pfrc = nfrc; + + strm->rx.max_offset = strm->rx.unsent_max_offset = + nfrc->fr.max_stream_data.max_stream_data; rv = conn_ppe_write_frame_hd_log(conn, ppe, &hd_logged, hd, &nfrc->fr); @@ -3936,8 +3731,6 @@ static ngtcp2_ssize conn_write_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, NGTCP2_RTB_ENTRY_FLAG_PTO_ELICITING | NGTCP2_RTB_ENTRY_FLAG_RETRANSMITTABLE; pfrc = &(*pfrc)->next; - strm->rx.max_offset = strm->rx.unsent_max_offset = - nfrc->fr.max_stream_data.max_stream_data; } if (ngtcp2_strm_streamfrq_empty(strm)) { @@ -3949,8 +3742,6 @@ static ngtcp2_ssize conn_write_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, if (stream_offset == (uint64_t)-1) { ngtcp2_strm_streamfrq_clear(strm); ngtcp2_conn_tx_strmq_pop(conn); - assert(conn->tx.strmq_nretrans); - --conn->tx.strmq_nretrans; continue; } @@ -3976,7 +3767,7 @@ static ngtcp2_ssize conn_write_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, rv = conn_ppe_write_frame_hd_log(conn, ppe, &hd_logged, hd, &nfrc->fr); if (rv != 0) { - assert(0); + ngtcp2_unreachable(); } *pfrc = nfrc; @@ -3989,8 +3780,6 @@ static ngtcp2_ssize conn_write_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, if (ngtcp2_strm_streamfrq_empty(strm)) { ngtcp2_conn_tx_strmq_pop(conn); - assert(conn->tx.strmq_nretrans); - --conn->tx.strmq_nretrans; continue; } @@ -4004,10 +3793,75 @@ static ngtcp2_ssize conn_write_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, } } - if (rv != NGTCP2_ERR_NOBUF && !send_stream && !send_datagram && + /* Write MAX_STREAMS after RESET_STREAM so that we can extend + stream ID space in one packet. */ + if (*pfrc == NULL && + conn->remote.bidi.unsent_max_streams > conn->remote.bidi.max_streams) { + rv = conn_call_extend_max_remote_streams_bidi( + conn, conn->remote.bidi.unsent_max_streams); + if (rv != 0) { + assert(ngtcp2_err_is_fatal(rv)); + return rv; + } + + rv = ngtcp2_frame_chain_objalloc_new(&nfrc, &conn->frc_objalloc); + if (rv != 0) { + assert(ngtcp2_err_is_fatal(rv)); + return rv; + } + nfrc->fr.type = NGTCP2_FRAME_MAX_STREAMS_BIDI; + nfrc->fr.max_streams.max_streams = conn->remote.bidi.unsent_max_streams; + *pfrc = nfrc; + + conn->remote.bidi.max_streams = conn->remote.bidi.unsent_max_streams; + + rv = conn_ppe_write_frame_hd_log(conn, ppe, &hd_logged, hd, &(*pfrc)->fr); + if (rv != 0) { + assert(NGTCP2_ERR_NOBUF == rv); + } else { + pkt_empty = 0; + rtb_entry_flags |= NGTCP2_RTB_ENTRY_FLAG_ACK_ELICITING | + NGTCP2_RTB_ENTRY_FLAG_PTO_ELICITING | + NGTCP2_RTB_ENTRY_FLAG_RETRANSMITTABLE; + pfrc = &(*pfrc)->next; + } + } + + if (*pfrc == NULL && + conn->remote.uni.unsent_max_streams > conn->remote.uni.max_streams) { + rv = conn_call_extend_max_remote_streams_uni( + conn, conn->remote.uni.unsent_max_streams); + if (rv != 0) { + assert(ngtcp2_err_is_fatal(rv)); + return rv; + } + + rv = ngtcp2_frame_chain_objalloc_new(&nfrc, &conn->frc_objalloc); + if (rv != 0) { + assert(ngtcp2_err_is_fatal(rv)); + return rv; + } + nfrc->fr.type = NGTCP2_FRAME_MAX_STREAMS_UNI; + nfrc->fr.max_streams.max_streams = conn->remote.uni.unsent_max_streams; + *pfrc = nfrc; + + conn->remote.uni.max_streams = conn->remote.uni.unsent_max_streams; + + rv = conn_ppe_write_frame_hd_log(conn, ppe, &hd_logged, hd, &(*pfrc)->fr); + if (rv != 0) { + assert(NGTCP2_ERR_NOBUF == rv); + } else { + pkt_empty = 0; + rtb_entry_flags |= NGTCP2_RTB_ENTRY_FLAG_ACK_ELICITING | + NGTCP2_RTB_ENTRY_FLAG_PTO_ELICITING | + NGTCP2_RTB_ENTRY_FLAG_RETRANSMITTABLE; + pfrc = &(*pfrc)->next; + } + } + + if (pktns->tx.frq == NULL && !send_stream && !send_datagram && !(rtb_entry_flags & NGTCP2_RTB_ENTRY_FLAG_ACK_ELICITING) && - pktns->rtb.num_retransmittable && pktns->tx.frq == NULL && - pktns->rtb.probe_pkt_left) { + pktns->rtb.num_retransmittable && pktns->rtb.probe_pkt_left) { num_reclaimed = ngtcp2_rtb_reclaim_on_pto(&pktns->rtb, conn, pktns, 1); if (num_reclaimed < 0) { return rv; @@ -4016,15 +3870,16 @@ static ngtcp2_ssize conn_write_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, goto build_pkt; } - /* We had pktns->rtb.num_retransmittable > 0 but the contents of - those packets have been acknowledged (i.e., retransmission in - another packet). In this case, we don't have to send any - probe packet. */ + /* We had pktns->rtb.num_retransmittable > 0 but we were unable + to reclaim any frame. In this case, we do not have to send + any probe packet. */ if (pktns->rtb.num_pto_eliciting == 0) { pktns->rtb.probe_pkt_left = 0; ngtcp2_conn_set_loss_detection_timer(conn, ts); - /* TODO If packet is empty, we should return now if cwnd is - zero. */ + + if (pkt_empty && conn_cwnd_is_zero(conn) && !require_padding) { + return 0; + } } } } else { @@ -4036,7 +3891,7 @@ static ngtcp2_ssize conn_write_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, left = ngtcp2_ppe_left(ppe); - if (rv != NGTCP2_ERR_NOBUF && send_stream && *pfrc == NULL && + if (*pfrc == NULL && send_stream && (ndatalen = ngtcp2_pkt_stream_max_datalen( vmsg->stream.strm->stream_id, vmsg->stream.strm->tx.offset, ndatalen, left)) != (size_t)-1 && @@ -4068,7 +3923,7 @@ static ngtcp2_ssize conn_write_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, rv = conn_ppe_write_frame_hd_log(conn, ppe, &hd_logged, hd, &nfrc->fr); if (rv != 0) { - assert(0); + ngtcp2_unreachable(); } *pfrc = nfrc; @@ -4093,7 +3948,83 @@ static ngtcp2_ssize conn_write_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, send_stream = 0; } - if (rv != NGTCP2_ERR_NOBUF && send_datagram && + if (vmsg && vmsg->type == NGTCP2_VMSG_TYPE_STREAM && + ((stream_blocked && *pfrc == NULL) || + (send_stream && + !(vmsg->stream.strm->flags & NGTCP2_STRM_FLAG_SHUT_WR)))) { + if (conn_should_send_data_blocked(conn)) { + rv = ngtcp2_frame_chain_objalloc_new(&nfrc, &conn->frc_objalloc); + if (rv != 0) { + assert(ngtcp2_err_is_fatal(rv)); + + return rv; + } + + nfrc->fr.type = NGTCP2_FRAME_DATA_BLOCKED; + nfrc->fr.data_blocked.offset = conn->tx.offset; + + rv = conn_ppe_write_frame_hd_log(conn, ppe, &hd_logged, hd, &nfrc->fr); + if (rv != 0) { + assert(NGTCP2_ERR_NOBUF == rv); + + /* We cannot add nfrc to pktns->tx.frq here. */ + ngtcp2_frame_chain_objalloc_del(nfrc, &conn->frc_objalloc, conn->mem); + } else { + *pfrc = nfrc; + pfrc = &(*pfrc)->next; + + pkt_empty = 0; + rtb_entry_flags |= NGTCP2_RTB_ENTRY_FLAG_ACK_ELICITING | + NGTCP2_RTB_ENTRY_FLAG_PTO_ELICITING | + NGTCP2_RTB_ENTRY_FLAG_RETRANSMITTABLE; + + conn->tx.last_blocked_offset = conn->tx.max_offset; + } + } + + strm = vmsg->stream.strm; + + if (strm_should_send_stream_data_blocked(strm)) { + rv = ngtcp2_frame_chain_objalloc_new(&nfrc, &conn->frc_objalloc); + if (rv != 0) { + assert(ngtcp2_err_is_fatal(rv)); + + return rv; + } + + nfrc->fr.type = NGTCP2_FRAME_STREAM_DATA_BLOCKED; + nfrc->fr.stream_data_blocked.stream_id = strm->stream_id; + nfrc->fr.stream_data_blocked.offset = strm->tx.max_offset; + + rv = conn_ppe_write_frame_hd_log(conn, ppe, &hd_logged, hd, &nfrc->fr); + if (rv != 0) { + assert(NGTCP2_ERR_NOBUF == rv); + + /* We cannot add nfrc to pktns->tx.frq here. */ + ngtcp2_frame_chain_objalloc_del(nfrc, &conn->frc_objalloc, conn->mem); + + if (!ngtcp2_strm_is_tx_queued(strm)) { + strm->cycle = conn_tx_strmq_first_cycle(conn); + rv = ngtcp2_conn_tx_strmq_push(conn, strm); + if (rv != 0) { + return rv; + } + } + } else { + *pfrc = nfrc; + pfrc = &(*pfrc)->next; + + pkt_empty = 0; + rtb_entry_flags |= NGTCP2_RTB_ENTRY_FLAG_ACK_ELICITING | + NGTCP2_RTB_ENTRY_FLAG_PTO_ELICITING | + NGTCP2_RTB_ENTRY_FLAG_RETRANSMITTABLE; + + strm->tx.last_blocked_offset = strm->tx.max_offset; + } + } + } + + if (*pfrc == NULL && send_datagram && left >= ngtcp2_pkt_datagram_framelen((size_t)datalen)) { if (conn->callbacks.ack_datagram || conn->callbacks.lost_datagram) { rv = ngtcp2_frame_chain_objalloc_new(&nfrc, &conn->frc_objalloc); @@ -4129,8 +4060,9 @@ static ngtcp2_ssize conn_write_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, } pkt_empty = 0; - rtb_entry_flags |= - NGTCP2_RTB_ENTRY_FLAG_ACK_ELICITING | NGTCP2_RTB_ENTRY_FLAG_DATAGRAM; + rtb_entry_flags |= NGTCP2_RTB_ENTRY_FLAG_ACK_ELICITING | + NGTCP2_RTB_ENTRY_FLAG_PTO_ELICITING | + NGTCP2_RTB_ENTRY_FLAG_DATAGRAM; if (vmsg->datagram.paccepted) { *vmsg->datagram.paccepted = 1; @@ -4140,14 +4072,27 @@ static ngtcp2_ssize conn_write_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, } if (pkt_empty) { - assert(rv == 0 || NGTCP2_ERR_NOBUF == rv); - if (rv == 0 && stream_blocked && ngtcp2_conn_get_max_data_left(conn)) { + if (*pfrc == NULL && rv == 0 && stream_blocked && + (write_more || !require_padding) && + ngtcp2_conn_get_max_data_left(conn)) { + if (write_more) { + conn->pkt.pfrc = pfrc; + conn->pkt.pkt_empty = pkt_empty; + conn->pkt.rtb_entry_flags = rtb_entry_flags; + conn->pkt.hd_logged = hd_logged; + conn->flags |= NGTCP2_CONN_FLAG_PPE_PENDING; + } + return NGTCP2_ERR_STREAM_DATA_BLOCKED; } - keep_alive_expired = conn_keep_alive_expired(conn, ts); + keep_alive_expired = + type == NGTCP2_PKT_1RTT && conn_keep_alive_expired(conn, ts); + + if (conn->pktns.rtb.probe_pkt_left == 0 && !keep_alive_expired && + !require_padding) { + conn_reset_ppe_pending(conn); - if (conn->pktns.rtb.probe_pkt_left == 0 && !keep_alive_expired) { return 0; } } else if (write_more) { @@ -4165,7 +4110,11 @@ static ngtcp2_ssize conn_write_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, if (ngtcp2_ppe_left(ppe)) { return NGTCP2_ERR_WRITE_MORE; } - } else if (ngtcp2_conn_get_max_data_left(conn) && stream_blocked) { + break; + } + + if (*pfrc == NULL && ngtcp2_conn_get_max_data_left(conn) && + stream_blocked) { return NGTCP2_ERR_STREAM_DATA_BLOCKED; } break; @@ -4178,12 +4127,13 @@ static ngtcp2_ssize conn_write_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, calls ngtcp2_conn_writev_datagram again. */ break; default: - assert(0); + ngtcp2_unreachable(); } } if (!(rtb_entry_flags & NGTCP2_RTB_ENTRY_FLAG_ACK_ELICITING)) { - if (pktns->tx.num_non_ack_pkt >= NGTCP2_MAX_NON_ACK_TX_PKT || + if (ngtcp2_tstamp_elapsed(pktns->tx.non_ack_pkt_start_ts, + cstat->smoothed_rtt, ts) || keep_alive_expired || conn->pktns.rtb.probe_pkt_left) { lfr.type = NGTCP2_FRAME_PING; @@ -4197,13 +4147,13 @@ static ngtcp2_ssize conn_write_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, if (conn->pktns.rtb.probe_pkt_left) { rtb_entry_flags |= NGTCP2_RTB_ENTRY_FLAG_PROBE; } - pktns->tx.num_non_ack_pkt = 0; + pktns->tx.non_ack_pkt_start_ts = UINT64_MAX; } - } else { - ++pktns->tx.num_non_ack_pkt; + } else if (pktns->tx.non_ack_pkt_start_ts == UINT64_MAX) { + pktns->tx.non_ack_pkt_start_ts = ts; } } else { - pktns->tx.num_non_ack_pkt = 0; + pktns->tx.non_ack_pkt_start_ts = UINT64_MAX; } /* TODO Push STREAM frame back to ngtcp2_strm if there is an error @@ -4212,8 +4162,10 @@ static ngtcp2_ssize conn_write_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, /* Making full sized packet will help GSO a bit */ ngtcp2_ppe_left(ppe) < 10) { lfr.padding.len = ngtcp2_ppe_padding(ppe); - } else { + } else if (type == NGTCP2_PKT_1RTT) { lfr.padding.len = ngtcp2_ppe_padding_size(ppe, min_pktlen); + } else { + lfr.padding.len = ngtcp2_ppe_padding_hp_sample(ppe); } if (lfr.padding.len) { @@ -4285,7 +4237,7 @@ static ngtcp2_ssize conn_write_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, conn_handle_tx_ecn(conn, pi, NULL, pktns, hd, ts); } - conn->flags &= (uint32_t)~NGTCP2_CONN_FLAG_PPE_PENDING; + conn_reset_ppe_pending(conn); if (pktns->rtb.probe_pkt_left && (rtb_entry_flags & NGTCP2_RTB_ENTRY_FLAG_ACK_ELICITING)) { @@ -4360,8 +4312,7 @@ ngtcp2_ssize ngtcp2_conn_write_single_frame_pkt( break; default: /* We don't support 0-RTT packet in this function. */ - assert(0); - abort(); + ngtcp2_unreachable(); } cc.aead = pktns->crypto.ctx.aead; @@ -4448,7 +4399,8 @@ ngtcp2_ssize ngtcp2_conn_write_single_frame_pkt( if (((rtb_entry_flags & NGTCP2_RTB_ENTRY_FLAG_ACK_ELICITING) || padded) && (!path || ngtcp2_path_eq(&conn->dcid.current.ps.path, path))) { - if (pi && !(rtb_entry_flags & NGTCP2_RTB_ENTRY_FLAG_PMTUD_PROBE)) { + if (pi && (conn->tx.ecn.state == NGTCP2_ECN_STATE_CAPABLE || + !(rtb_entry_flags & NGTCP2_RTB_ENTRY_FLAG_PMTUD_PROBE))) { conn_handle_tx_ecn(conn, pi, &rtb_entry_flags, pktns, &hd, ts); } @@ -4479,8 +4431,7 @@ ngtcp2_ssize ngtcp2_conn_write_single_frame_pkt( nwrite); } } - } else if (pi && !(rtb_entry_flags & NGTCP2_RTB_ENTRY_FLAG_PMTUD_PROBE) && - conn->tx.ecn.state == NGTCP2_ECN_STATE_CAPABLE) { + } else if (pi && conn->tx.ecn.state == NGTCP2_ECN_STATE_CAPABLE) { conn_handle_tx_ecn(conn, pi, NULL, pktns, &hd, ts); } @@ -4488,7 +4439,17 @@ ngtcp2_ssize ngtcp2_conn_write_single_frame_pkt( conn_update_keep_alive_last_ts(conn, ts); } - conn->tx.pacing.pktlen += (size_t)nwrite; + if (!padded) { + switch (fr->type) { + case NGTCP2_FRAME_ACK: + case NGTCP2_FRAME_ACK_ECN: + break; + default: + conn->tx.pacing.pktlen += (size_t)nwrite; + } + } else { + conn->tx.pacing.pktlen += (size_t)nwrite; + } ngtcp2_qlog_metrics_updated(&conn->qlog, &conn->cstat); @@ -4529,11 +4490,11 @@ static int conn_handshake_remnants_left(ngtcp2_conn *conn) { ngtcp2_pktns *in_pktns = conn->in_pktns; ngtcp2_pktns *hs_pktns = conn->hs_pktns; - return !conn_is_handshake_completed(conn) || + return !conn_is_tls_handshake_completed(conn) || (in_pktns && (in_pktns->rtb.num_pto_eliciting || - ngtcp2_ksl_len(&in_pktns->crypto.tx.frq))) || + !ngtcp2_strm_streamfrq_empty(&in_pktns->crypto.strm))) || (hs_pktns && (hs_pktns->rtb.num_pto_eliciting || - ngtcp2_ksl_len(&hs_pktns->crypto.tx.frq))); + !ngtcp2_strm_streamfrq_empty(&hs_pktns->crypto.strm))); } /* @@ -4684,14 +4645,14 @@ static int conn_start_pmtud(ngtcp2_conn *conn) { assert(!conn->local.settings.no_pmtud); assert(!conn->pmtud); - assert(conn_is_handshake_completed(conn)); + assert(conn_is_tls_handshake_completed(conn)); assert(conn->remote.transport_params); assert(conn->remote.transport_params->max_udp_payload_size >= NGTCP2_MAX_UDP_PAYLOAD_SIZE); - hard_max_udp_payload_size = - (size_t)ngtcp2_min(conn->remote.transport_params->max_udp_payload_size, - (uint64_t)conn->local.settings.max_udp_payload_size); + hard_max_udp_payload_size = (size_t)ngtcp2_min( + conn->remote.transport_params->max_udp_payload_size, + (uint64_t)conn->local.settings.max_tx_udp_payload_size); rv = ngtcp2_pmtud_new(&conn->pmtud, conn->dcid.current.max_udp_payload_size, hard_max_udp_payload_size, @@ -4850,9 +4811,9 @@ static size_t conn_shape_udp_payload(ngtcp2_conn *conn, const ngtcp2_dcid *dcid, } payloadlen = - ngtcp2_min(payloadlen, conn->local.settings.max_udp_payload_size); + ngtcp2_min(payloadlen, conn->local.settings.max_tx_udp_payload_size); - if (conn->local.settings.no_udp_payload_size_shaping) { + if (conn->local.settings.no_tx_udp_payload_size_shaping) { return payloadlen; } @@ -4885,10 +4846,6 @@ static int conn_on_path_validation_failed(ngtcp2_conn *conn, ngtcp2_pv *pv, } } - if (pv->flags & NGTCP2_PV_FLAG_MTU_PROBE) { - return NGTCP2_ERR_NO_VIABLE_PATH; - } - if (pv->flags & NGTCP2_PV_FLAG_FALLBACK_ON_FAILURE) { ngtcp2_dcid_copy(&conn->dcid.current, &pv->fallback_dcid); conn_reset_congestion_state(conn, ts); @@ -4918,7 +4875,7 @@ static ngtcp2_ssize conn_write_path_challenge(ngtcp2_conn *conn, ngtcp2_tstamp expiry; ngtcp2_pv *pv = conn->pv; ngtcp2_frame lfr; - ngtcp2_duration timeout; + ngtcp2_duration timeout, initial_pto; uint8_t flags; uint64_t tx_left; int rv; @@ -4953,8 +4910,9 @@ static ngtcp2_ssize conn_write_path_challenge(ngtcp2_conn *conn, lfr.type = NGTCP2_FRAME_PATH_CHALLENGE; + initial_pto = conn_compute_initial_pto(conn, &conn->pktns); timeout = conn_compute_pto(conn, &conn->pktns); - timeout = ngtcp2_max(timeout, 3 * conn->cstat.initial_rtt); + timeout = ngtcp2_max(timeout, initial_pto); expiry = ts + timeout * (1ULL << pv->round); destlen = ngtcp2_min(destlen, NGTCP2_MAX_UDP_PAYLOAD_SIZE); @@ -5216,75 +5174,6 @@ uint64_t ngtcp2_conn_tx_strmq_first_cycle(ngtcp2_conn *conn) { return strm->cycle; } -int ngtcp2_conn_resched_frames(ngtcp2_conn *conn, ngtcp2_pktns *pktns, - ngtcp2_frame_chain **pfrc) { - ngtcp2_frame_chain **first = pfrc; - ngtcp2_frame_chain *frc; - ngtcp2_stream *sfr; - ngtcp2_strm *strm; - int rv; - int streamfrq_empty; - - if (*pfrc == NULL) { - return 0; - } - - for (; *pfrc;) { - switch ((*pfrc)->fr.type) { - case NGTCP2_FRAME_STREAM: - frc = *pfrc; - - *pfrc = frc->next; - frc->next = NULL; - sfr = &frc->fr.stream; - - strm = ngtcp2_conn_find_stream(conn, sfr->stream_id); - if (!strm) { - ngtcp2_frame_chain_objalloc_del(frc, &conn->frc_objalloc, conn->mem); - break; - } - streamfrq_empty = ngtcp2_strm_streamfrq_empty(strm); - rv = ngtcp2_strm_streamfrq_push(strm, frc); - if (rv != 0) { - ngtcp2_frame_chain_objalloc_del(frc, &conn->frc_objalloc, conn->mem); - return rv; - } - if (!ngtcp2_strm_is_tx_queued(strm)) { - strm->cycle = conn_tx_strmq_first_cycle(conn); - rv = ngtcp2_conn_tx_strmq_push(conn, strm); - if (rv != 0) { - return rv; - } - } - if (streamfrq_empty) { - ++conn->tx.strmq_nretrans; - } - break; - case NGTCP2_FRAME_CRYPTO: - frc = *pfrc; - - *pfrc = frc->next; - frc->next = NULL; - - rv = ngtcp2_ksl_insert(&pktns->crypto.tx.frq, NULL, - &frc->fr.crypto.offset, frc); - if (rv != 0) { - assert(ngtcp2_err_is_fatal(rv)); - ngtcp2_frame_chain_objalloc_del(frc, &conn->frc_objalloc, conn->mem); - return rv; - } - break; - default: - pfrc = &(*pfrc)->next; - } - } - - *pfrc = pktns->tx.frq; - pktns->tx.frq = *first; - - return 0; -} - /* * conn_on_retry is called when Retry packet is received. The * function decodes the data in the buffer pointed by |pkt| whose @@ -5312,7 +5201,7 @@ static int conn_on_retry(ngtcp2_conn *conn, const ngtcp2_pkt_hd *hd, ngtcp2_rtb *rtb = &conn->pktns.rtb; ngtcp2_rtb *in_rtb; uint8_t cidbuf[sizeof(retry.odcid.data) * 2 + 1]; - ngtcp2_vec *token; + uint8_t *token; if (!in_pktns || conn->flags & NGTCP2_CONN_FLAG_RECV_RETRY) { return 0; @@ -5340,7 +5229,7 @@ static int conn_on_retry(ngtcp2_conn *conn, const ngtcp2_pkt_hd *hd, (const char *)ngtcp2_encode_hex(cidbuf, retry.odcid.data, retry.odcid.datalen)); - if (retry.token.len == 0) { + if (retry.tokenlen == 0) { return NGTCP2_ERR_PROTO; } @@ -5376,19 +5265,19 @@ static int conn_on_retry(ngtcp2_conn *conn, const ngtcp2_pkt_hd *hd, return rv; } - token = &conn->local.settings.token; - - ngtcp2_mem_free(conn->mem, token->base); - token->base = NULL; - token->len = 0; + ngtcp2_mem_free(conn->mem, (uint8_t *)conn->local.settings.token); + conn->local.settings.token = NULL; + conn->local.settings.tokenlen = 0; - token->base = ngtcp2_mem_malloc(conn->mem, retry.token.len); - if (token->base == NULL) { + token = ngtcp2_mem_malloc(conn->mem, retry.tokenlen); + if (token == NULL) { return NGTCP2_ERR_NOMEM; } - token->len = retry.token.len; - ngtcp2_cpymem(token->base, retry.token.base, retry.token.len); + ngtcp2_cpymem(token, retry.token, retry.tokenlen); + + conn->local.settings.token = token; + conn->local.settings.tokenlen = retry.tokenlen; reset_conn_stat_recovery(&conn->cstat); conn_reset_congestion_state(conn, ts); @@ -5423,7 +5312,6 @@ int ngtcp2_conn_detect_lost_pkt(ngtcp2_conn *conn, ngtcp2_pktns *pktns, static int conn_recv_ack(ngtcp2_conn *conn, ngtcp2_pktns *pktns, ngtcp2_ack *fr, ngtcp2_tstamp pkt_ts, ngtcp2_tstamp ts) { int rv; - ngtcp2_frame_chain *frc = NULL; ngtcp2_ssize num_acked; ngtcp2_conn_stat *cstat = &conn->cstat; @@ -5431,7 +5319,7 @@ static int conn_recv_ack(ngtcp2_conn *conn, ngtcp2_pktns *pktns, ngtcp2_ack *fr, return NGTCP2_ERR_PROTO; } - rv = ngtcp2_pkt_validate_ack(fr); + rv = ngtcp2_pkt_validate_ack(fr, conn->local.settings.initial_pkt_num); if (rv != 0) { return rv; } @@ -5443,7 +5331,6 @@ static int conn_recv_ack(ngtcp2_conn *conn, ngtcp2_pktns *pktns, ngtcp2_ack *fr, if (num_acked < 0) { /* TODO assert this */ assert(ngtcp2_err_is_fatal((int)num_acked)); - ngtcp2_frame_chain_list_objalloc_del(frc, &conn->frc_objalloc, conn->mem); return (int)num_acked; } @@ -5778,10 +5665,10 @@ decrypt_hp(ngtcp2_pkt_hd *hd, uint8_t *dest, const ngtcp2_crypto_cipher *hp, * NGTCP2_ERR_CRYPTO * TLS backend reported error */ -static int conn_emit_pending_crypto_data(ngtcp2_conn *conn, - ngtcp2_crypto_level crypto_level, - ngtcp2_strm *strm, - uint64_t rx_offset) { +static int +conn_emit_pending_crypto_data(ngtcp2_conn *conn, + ngtcp2_encryption_level encryption_level, + ngtcp2_strm *strm, uint64_t rx_offset) { size_t datalen; const uint8_t *data; int rv; @@ -5801,7 +5688,8 @@ static int conn_emit_pending_crypto_data(ngtcp2_conn *conn, offset = rx_offset; rx_offset += datalen; - rv = conn_call_recv_crypto_data(conn, crypto_level, offset, data, datalen); + rv = conn_call_recv_crypto_data(conn, encryption_level, offset, data, + datalen); if (rv != 0) { return rv; } @@ -5816,13 +5704,13 @@ static int conn_emit_pending_crypto_data(ngtcp2_conn *conn, */ static int conn_recv_connection_close(ngtcp2_conn *conn, ngtcp2_connection_close *fr) { - ngtcp2_connection_close_error *ccerr = &conn->rx.ccerr; + ngtcp2_ccerr *ccerr = &conn->rx.ccerr; conn->state = NGTCP2_CS_DRAINING; if (fr->type == NGTCP2_FRAME_CONNECTION_CLOSE) { - ccerr->type = NGTCP2_CONNECTION_CLOSE_ERROR_CODE_TYPE_TRANSPORT; + ccerr->type = NGTCP2_CCERR_TYPE_TRANSPORT; } else { - ccerr->type = NGTCP2_CONNECTION_CLOSE_ERROR_CODE_TYPE_APPLICATION; + ccerr->type = NGTCP2_CCERR_TYPE_APPLICATION; } ccerr->error_code = fr->error_code; ccerr->frame_type = fr->frame_type; @@ -5834,16 +5722,14 @@ static int conn_recv_connection_close(ngtcp2_conn *conn, } if (ccerr->reason == NULL) { - ccerr->reason = ngtcp2_mem_malloc( - conn->mem, NGTCP2_CONNECTION_CLOSE_ERROR_MAX_REASONLEN); + ccerr->reason = ngtcp2_mem_malloc(conn->mem, NGTCP2_CCERR_MAX_REASONLEN); if (ccerr->reason == NULL) { return NGTCP2_ERR_NOMEM; } } - ccerr->reasonlen = - ngtcp2_min(fr->reasonlen, NGTCP2_CONNECTION_CLOSE_ERROR_MAX_REASONLEN); - ngtcp2_cpymem(ccerr->reason, fr->reason, ccerr->reasonlen); + ccerr->reasonlen = ngtcp2_min(fr->reasonlen, NGTCP2_CCERR_MAX_REASONLEN); + ngtcp2_cpymem((uint8_t *)ccerr->reason, fr->reason, ccerr->reasonlen); return 0; } @@ -5872,7 +5758,9 @@ static void conn_recv_path_challenge(ngtcp2_conn *conn, const ngtcp2_path *path, static void conn_reset_congestion_state(ngtcp2_conn *conn, ngtcp2_tstamp ts) { conn_reset_conn_stat_cc(conn, &conn->cstat); - conn->cc.reset(&conn->cc, &conn->cstat, ts); + if (conn->cc.reset) { + conn->cc.reset(&conn->cc, &conn->cstat, ts); + } if (conn->hs_pktns) { ngtcp2_rtb_reset_cc_state(&conn->hs_pktns->rtb, @@ -5887,7 +5775,6 @@ static void conn_reset_congestion_state(ngtcp2_conn *conn, ngtcp2_tstamp ts) { static int conn_recv_path_response(ngtcp2_conn *conn, ngtcp2_path_response *fr, ngtcp2_tstamp ts) { int rv; - ngtcp2_duration pto, timeout; ngtcp2_pv *pv = conn->pv, *npv; uint8_t ent_flags; @@ -5904,53 +5791,52 @@ static int conn_recv_path_response(ngtcp2_conn *conn, ngtcp2_path_response *fr, if (!(pv->flags & NGTCP2_PV_FLAG_DONT_CARE)) { if (!(pv->flags & NGTCP2_PV_FLAG_FALLBACK_ON_FAILURE)) { - if (pv->dcid.seq != conn->dcid.current.seq) { - assert(conn->dcid.current.cid.datalen); + assert(!conn->server); + assert(pv->dcid.seq != conn->dcid.current.seq); + assert(conn->dcid.current.cid.datalen); - rv = conn_retire_dcid(conn, &conn->dcid.current, ts); - if (rv != 0) { - return rv; - } - ngtcp2_dcid_copy(&conn->dcid.current, &pv->dcid); + rv = conn_retire_dcid(conn, &conn->dcid.current, ts); + if (rv != 0) { + return rv; } + ngtcp2_dcid_copy(&conn->dcid.current, &pv->dcid); + conn_reset_congestion_state(conn, ts); conn_reset_ecn_validation_state(conn); } - if (ngtcp2_path_eq(&pv->dcid.ps.path, &conn->dcid.current.ps.path)) { - conn->dcid.current.flags |= NGTCP2_DCID_FLAG_PATH_VALIDATED; - } + assert(ngtcp2_path_eq(&pv->dcid.ps.path, &conn->dcid.current.ps.path)); - rv = conn_call_path_validation(conn, pv, - NGTCP2_PATH_VALIDATION_RESULT_SUCCESS); - if (rv != 0) { - return rv; - } + conn->dcid.current.flags |= NGTCP2_DCID_FLAG_PATH_VALIDATED; if (!conn->local.settings.no_pmtud) { ngtcp2_conn_stop_pmtud(conn); - if (!(pv->flags & NGTCP2_PV_ENTRY_FLAG_UNDERSIZED)) { + if (!(ent_flags & NGTCP2_PV_ENTRY_FLAG_UNDERSIZED)) { rv = conn_start_pmtud(conn); if (rv != 0) { return rv; } } } + + if (!(ent_flags & NGTCP2_PV_ENTRY_FLAG_UNDERSIZED)) { + rv = conn_call_path_validation(conn, pv, + NGTCP2_PATH_VALIDATION_RESULT_SUCCESS); + if (rv != 0) { + return rv; + } + } } if (pv->flags & NGTCP2_PV_FLAG_FALLBACK_ON_FAILURE) { - pto = conn_compute_pto(conn, &conn->pktns); - timeout = 3 * ngtcp2_max(pto, pv->fallback_pto); - if (ent_flags & NGTCP2_PV_ENTRY_FLAG_UNDERSIZED) { assert(conn->server); /* Validate path again */ - rv = ngtcp2_pv_new(&npv, &pv->dcid, timeout, - NGTCP2_PV_FLAG_FALLBACK_ON_FAILURE | - NGTCP2_PV_FLAG_MTU_PROBE, - &conn->log, conn->mem); + rv = ngtcp2_pv_new(&npv, &pv->dcid, conn_compute_pv_timeout(conn), + NGTCP2_PV_FLAG_FALLBACK_ON_FAILURE, &conn->log, + conn->mem); if (rv != 0) { return rv; } @@ -5959,7 +5845,8 @@ static int conn_recv_path_response(ngtcp2_conn *conn, ngtcp2_path_response *fr, ngtcp2_dcid_copy(&npv->fallback_dcid, &pv->fallback_dcid); npv->fallback_pto = pv->fallback_pto; } else { - rv = ngtcp2_pv_new(&npv, &pv->fallback_dcid, timeout, + rv = ngtcp2_pv_new(&npv, &pv->fallback_dcid, + conn_compute_pv_timeout_pto(conn, pv->fallback_pto), NGTCP2_PV_FLAG_DONT_CARE, &conn->log, conn->mem); if (rv != 0) { return rv; @@ -5999,8 +5886,7 @@ static size_t pkt_num_bits(size_t pkt_numlen) { case 4: return 32; default: - assert(0); - abort(); + ngtcp2_unreachable(); } } @@ -6019,17 +5905,7 @@ static int pktns_pkt_num_is_duplicate(ngtcp2_pktns *pktns, int64_t pkt_num) { static int pktns_commit_recv_pkt_num(ngtcp2_pktns *pktns, int64_t pkt_num, int ack_eliciting, ngtcp2_tstamp ts) { int rv; - - if (ack_eliciting && pktns->rx.max_ack_eliciting_pkt_num + 1 != pkt_num) { - ngtcp2_acktr_immediate_ack(&pktns->acktr); - } - if (pktns->rx.max_pkt_num < pkt_num) { - pktns->rx.max_pkt_num = pkt_num; - pktns->rx.max_pkt_ts = ts; - } - if (ack_eliciting && pktns->rx.max_ack_eliciting_pkt_num < pkt_num) { - pktns->rx.max_ack_eliciting_pkt_num = pkt_num; - } + ngtcp2_range r; rv = ngtcp2_gaptr_push(&pktns->rx.pngap, (uint64_t)pkt_num, 1); if (rv != 0) { @@ -6040,6 +5916,30 @@ static int pktns_commit_recv_pkt_num(ngtcp2_pktns *pktns, int64_t pkt_num, ngtcp2_gaptr_drop_first_gap(&pktns->rx.pngap); } + if (ack_eliciting) { + if (pktns->rx.max_ack_eliciting_pkt_num != -1) { + if (pkt_num < pktns->rx.max_ack_eliciting_pkt_num) { + ngtcp2_acktr_immediate_ack(&pktns->acktr); + } else if (pkt_num > pktns->rx.max_ack_eliciting_pkt_num) { + r = ngtcp2_gaptr_get_first_gap_after( + &pktns->rx.pngap, (uint64_t)pktns->rx.max_ack_eliciting_pkt_num); + + if (r.begin < (uint64_t)pkt_num) { + ngtcp2_acktr_immediate_ack(&pktns->acktr); + } + } + } + + if (pktns->rx.max_ack_eliciting_pkt_num < pkt_num) { + pktns->rx.max_ack_eliciting_pkt_num = pkt_num; + } + } + + if (pktns->rx.max_pkt_num < pkt_num) { + pktns->rx.max_pkt_num = pkt_num; + pktns->rx.max_pkt_ts = ts; + } + return 0; } @@ -6047,9 +5947,9 @@ static int pktns_commit_recv_pkt_num(ngtcp2_pktns *pktns, int64_t pkt_num, * verify_token verifies |hd| contains |token| in its token field. It * returns 0 if it succeeds, or NGTCP2_ERR_PROTO. */ -static int verify_token(const ngtcp2_vec *token, const ngtcp2_pkt_hd *hd) { - if (token->len == hd->token.len && - ngtcp2_cmemeq(token->base, hd->token.base, token->len)) { +static int verify_token(const uint8_t *token, size_t tokenlen, + const ngtcp2_pkt_hd *hd) { + if (tokenlen == hd->tokenlen && ngtcp2_cmemeq(token, hd->token, tokenlen)) { return 0; } return NGTCP2_ERR_PROTO; @@ -6071,34 +5971,80 @@ static void pktns_increase_ecn_counts(ngtcp2_pktns *pktns, } /* - * vneg_other_versions_includes returns nonzero if |other_versions| of - * length |other_versionslen| includes |version|. |other_versions| is - * the wire image of other_versions field of version_information - * transport parameter, and each version is encoded in network byte - * order. + * vneg_available_versions_includes returns nonzero if + * |available_versions| of length |available_versionslen| includes + * |version|. |available_versions| is the wire image of + * available_versions field of version_information transport + * parameter, and each version is encoded in network byte order. */ -static int vneg_other_versions_includes(const uint8_t *other_versions, - size_t other_versionslen, - uint32_t version) { +static int vneg_available_versions_includes(const uint8_t *available_versions, + size_t available_versionslen, + uint32_t version) { size_t i; + uint32_t v; + + assert(!(available_versionslen & 0x3)); + + if (available_versionslen == 0) { + return 0; + } + + for (i = 0; i < available_versionslen; i += sizeof(uint32_t)) { + available_versions = ngtcp2_get_uint32(&v, available_versions); + + if (version == v) { + return 1; + } + } + + return 0; +} + +/* + * conn_verify_fixed_bit verifies that fixed bit in |hd| is + * acceptable. + * + * This function returns 0 if it succeeds, or one of the following + * negative error codes: + * + * NGTCP2_ERR_INVALID_ARGUMENT + * Clearing fixed bit is not permitted. + */ +static int conn_verify_fixed_bit(ngtcp2_conn *conn, ngtcp2_pkt_hd *hd) { + if (!(hd->flags & NGTCP2_PKT_FLAG_FIXED_BIT_CLEAR)) { + return 0; + } - assert(!(other_versionslen & 0x3)); + if (conn->server) { + switch (hd->type) { + case NGTCP2_PKT_INITIAL: + case NGTCP2_PKT_0RTT: + case NGTCP2_PKT_HANDSHAKE: + /* TODO we cannot determine whether a token comes from NEW_TOKEN + frame or Retry packet. RFC 9287 requires that a token from + NEW_TOKEN. */ + if (!(conn->flags & NGTCP2_CONN_FLAG_INITIAL_PKT_PROCESSED) && + (conn->local.settings.token_type != NGTCP2_TOKEN_TYPE_NEW_TOKEN || + !conn->local.settings.tokenlen)) { + return NGTCP2_ERR_INVALID_ARGUMENT; + } - if (other_versionslen == 0) { - return 0; + break; + } } - for (i = 0; i < other_versionslen; i += sizeof(uint32_t)) { - if (version == ngtcp2_get_uint32(&other_versions[i])) { - return 1; - } + /* TODO we have no information that we enabled grease_quic_bit in + the previous connection. */ + if (!conn->local.transport_params.grease_quic_bit) { + return NGTCP2_ERR_INVALID_ARGUMENT; } return 0; } -static int conn_recv_crypto(ngtcp2_conn *conn, ngtcp2_crypto_level crypto_level, - ngtcp2_strm *strm, const ngtcp2_crypto *fr); +static int conn_recv_crypto(ngtcp2_conn *conn, + ngtcp2_encryption_level encryption_level, + ngtcp2_strm *strm, const ngtcp2_stream *fr); static ngtcp2_ssize conn_recv_pkt(ngtcp2_conn *conn, const ngtcp2_path *path, const ngtcp2_pkt_info *pi, const uint8_t *pkt, @@ -6164,7 +6110,7 @@ conn_recv_handshake_pkt(ngtcp2_conn *conn, const ngtcp2_path *path, ngtcp2_decrypt decrypt; ngtcp2_pktns *pktns; ngtcp2_strm *crypto; - ngtcp2_crypto_level crypto_level; + ngtcp2_encryption_level encryption_level; int invalid_reserved_bits = 0; if (pktlen == 0) { @@ -6210,7 +6156,7 @@ conn_recv_handshake_pkt(ngtcp2_conn *conn, const ngtcp2_path *path, /* Receiving Version Negotiation packet after getting Handshake packet from server is invalid. */ - if (conn->flags & NGTCP2_CONN_FLAG_CONN_ID_NEGOTIATED) { + if (conn->flags & NGTCP2_CONN_FLAG_INITIAL_PKT_PROCESSED) { return NGTCP2_ERR_DISCARD_PKT; } @@ -6244,9 +6190,13 @@ conn_recv_handshake_pkt(ngtcp2_conn *conn, const ngtcp2_path *path, return NGTCP2_ERR_DISCARD_PKT; } + if (conn_verify_fixed_bit(conn, &hd) != 0) { + return NGTCP2_ERR_DISCARD_PKT; + } + /* Receiving Retry packet after getting Initial packet from server is invalid. */ - if (conn->flags & NGTCP2_CONN_FLAG_CONN_ID_NEGOTIATED) { + if (conn->flags & NGTCP2_CONN_FLAG_INITIAL_PKT_PROCESSED) { return NGTCP2_ERR_DISCARD_PKT; } @@ -6285,9 +6235,13 @@ conn_recv_handshake_pkt(ngtcp2_conn *conn, const ngtcp2_path *path, return NGTCP2_ERR_DISCARD_PKT; } + if (conn_verify_fixed_bit(conn, &hd) != 0) { + return NGTCP2_ERR_DISCARD_PKT; + } + /* Quoted from spec: if subsequent packets of those types include a different Source Connection ID, they MUST be discarded. */ - if ((conn->flags & NGTCP2_CONN_FLAG_CONN_ID_NEGOTIATED) && + if ((conn->flags & NGTCP2_CONN_FLAG_INITIAL_PKT_PROCESSED) && !ngtcp2_cid_eq(&conn->dcid.current.cid, &hd.scid)) { ngtcp2_log_rx_pkt_hd(&conn->log, &hd); ngtcp2_log_info(&conn->log, NGTCP2_LOG_EVENT_PKT, @@ -6305,7 +6259,7 @@ conn_recv_handshake_pkt(ngtcp2_conn *conn, const ngtcp2_path *path, return NGTCP2_ERR_DISCARD_PKT; } - if (conn->flags & NGTCP2_CONN_FLAG_CONN_ID_NEGOTIATED) { + if (conn->flags & NGTCP2_CONN_FLAG_INITIAL_PKT_PROCESSED) { if (conn->early.ckm) { ngtcp2_ssize nread2; /* TODO Avoid to parse header twice. */ @@ -6351,15 +6305,16 @@ conn_recv_handshake_pkt(ngtcp2_conn *conn, const ngtcp2_path *path, NGTCP2_MAX_UDP_PAYLOAD_SIZE, dgramlen); return NGTCP2_ERR_DISCARD_PKT; } - if (conn->local.settings.token.len) { - rv = verify_token(&conn->local.settings.token, &hd); + if (conn->local.settings.tokenlen) { + rv = verify_token(conn->local.settings.token, + conn->local.settings.tokenlen, &hd); if (rv != 0) { ngtcp2_log_info(&conn->log, NGTCP2_LOG_EVENT_PKT, "packet was ignored because token is invalid"); return NGTCP2_ERR_DISCARD_PKT; } } - if ((conn->flags & NGTCP2_CONN_FLAG_CONN_ID_NEGOTIATED) == 0) { + if ((conn->flags & NGTCP2_CONN_FLAG_INITIAL_PKT_PROCESSED) == 0) { /* Set rcid here so that it is available to callback. If this packet is discarded later in this function and no packet is processed in this connection attempt so far, connection @@ -6372,7 +6327,7 @@ conn_recv_handshake_pkt(ngtcp2_conn *conn, const ngtcp2_path *path, } } } else { - if (hd.token.len != 0) { + if (hd.tokenlen != 0) { ngtcp2_log_info(&conn->log, NGTCP2_LOG_EVENT_PKT, "packet was ignored because token is not empty"); return NGTCP2_ERR_DISCARD_PKT; @@ -6380,9 +6335,9 @@ conn_recv_handshake_pkt(ngtcp2_conn *conn, const ngtcp2_path *path, if (hd.version != conn->client_chosen_version && !conn->negotiated_version && conn->vneg.version != hd.version) { - if (!vneg_other_versions_includes(conn->vneg.other_versions, - conn->vneg.other_versionslen, - hd.version)) { + if (!vneg_available_versions_includes(conn->vneg.available_versions, + conn->vneg.available_versionslen, + hd.version)) { return NGTCP2_ERR_DISCARD_PKT; } @@ -6402,7 +6357,7 @@ conn_recv_handshake_pkt(ngtcp2_conn *conn, const ngtcp2_path *path, pktns = conn->in_pktns; crypto = &pktns->crypto.strm; - crypto_level = NGTCP2_CRYPTO_LEVEL_INITIAL; + encryption_level = NGTCP2_ENCRYPTION_LEVEL_INITIAL; if (hd.version == conn->client_chosen_version) { ckm = pktns->crypto.rx.ckm; @@ -6441,7 +6396,7 @@ conn_recv_handshake_pkt(ngtcp2_conn *conn, const ngtcp2_path *path, pktns = conn->hs_pktns; crypto = &pktns->crypto.strm; - crypto_level = NGTCP2_CRYPTO_LEVEL_HANDSHAKE; + encryption_level = NGTCP2_ENCRYPTION_LEVEL_HANDSHAKE; ckm = pktns->crypto.rx.ckm; hp_ctx = &pktns->crypto.rx.hp_ctx; @@ -6543,8 +6498,9 @@ conn_recv_handshake_pkt(ngtcp2_conn *conn, const ngtcp2_path *path, switch (hd.type) { case NGTCP2_PKT_INITIAL: - if (!conn->server || ((conn->flags & NGTCP2_CONN_FLAG_CONN_ID_NEGOTIATED) && - !ngtcp2_cid_eq(&conn->rcid, &hd.dcid))) { + if (!conn->server || + ((conn->flags & NGTCP2_CONN_FLAG_INITIAL_PKT_PROCESSED) && + !ngtcp2_cid_eq(&conn->rcid, &hd.dcid))) { rv = conn_verify_dcid(conn, NULL, &hd); if (rv != 0) { if (ngtcp2_err_is_fatal(rv)) { @@ -6568,7 +6524,7 @@ conn_recv_handshake_pkt(ngtcp2_conn *conn, const ngtcp2_path *path, } break; default: - assert(0); + ngtcp2_unreachable(); } if (payloadlen == 0) { @@ -6580,8 +6536,8 @@ conn_recv_handshake_pkt(ngtcp2_conn *conn, const ngtcp2_path *path, } if (hd.type == NGTCP2_PKT_INITIAL && - !(conn->flags & NGTCP2_CONN_FLAG_CONN_ID_NEGOTIATED)) { - conn->flags |= NGTCP2_CONN_FLAG_CONN_ID_NEGOTIATED; + !(conn->flags & NGTCP2_CONN_FLAG_INITIAL_PKT_PROCESSED)) { + conn->flags |= NGTCP2_CONN_FLAG_INITIAL_PKT_PROCESSED; if (!conn->server) { conn->dcid.current.cid = hd.scid; } @@ -6623,7 +6579,7 @@ conn_recv_handshake_pkt(ngtcp2_conn *conn, const ngtcp2_path *path, break; case NGTCP2_FRAME_CRYPTO: if (!conn->server && !conn->negotiated_version && - ngtcp2_vec_len(fr->crypto.data, fr->crypto.datacnt)) { + ngtcp2_vec_len(fr->stream.data, fr->stream.datacnt)) { conn->negotiated_version = hd.version; ngtcp2_log_info(&conn->log, NGTCP2_LOG_EVENT_CON, @@ -6631,7 +6587,7 @@ conn_recv_handshake_pkt(ngtcp2_conn *conn, const ngtcp2_path *path, conn->negotiated_version); } - rv = conn_recv_crypto(conn, crypto_level, crypto, &fr->crypto); + rv = conn_recv_crypto(conn, encryption_level, crypto, &fr->stream); if (rv != 0) { return rv; } @@ -6653,9 +6609,9 @@ conn_recv_handshake_pkt(ngtcp2_conn *conn, const ngtcp2_path *path, ngtcp2_qlog_write_frame(&conn->qlog, fr); } - if (conn->server && hd.type == NGTCP2_PKT_HANDSHAKE) { - /* Successful processing of Handshake packet from client verifies - source address. */ + if (hd.type == NGTCP2_PKT_HANDSHAKE) { + /* Successful processing of Handshake packet from a remote + endpoint validates its source address. */ conn->dcid.current.flags |= NGTCP2_DCID_FLAG_PATH_VALIDATED; } @@ -6740,7 +6696,7 @@ static ngtcp2_ssize conn_recv_handshake_cpkt(ngtcp2_conn *conn, if ((pkt[0] & NGTCP2_HEADER_FORM_BIT) && pktlen > 4) { /* Not a Version Negotiation packet */ - version = ngtcp2_get_uint32(&pkt[1]); + ngtcp2_get_uint32(&version, &pkt[1]); if (ngtcp2_pkt_get_type_long(version, pkt[0]) == NGTCP2_PKT_INITIAL) { if (conn->server) { if (is_unrecoverable_error((int)nread)) { @@ -6863,7 +6819,7 @@ static int conn_emit_pending_stream_data(ngtcp2_conn *conn, ngtcp2_strm *strm, int rv; uint64_t offset; uint32_t sdflags; - int handshake_completed = conn_is_handshake_completed(conn); + int handshake_completed = conn_is_tls_handshake_completed(conn); if (!strm->rx.rob) { return 0; @@ -6892,7 +6848,7 @@ static int conn_emit_pending_stream_data(ngtcp2_conn *conn, ngtcp2_strm *strm, sdflags |= NGTCP2_STREAM_DATA_FLAG_FIN; } if (!handshake_completed) { - sdflags |= NGTCP2_STREAM_DATA_FLAG_EARLY; + sdflags |= NGTCP2_STREAM_DATA_FLAG_0RTT; } rv = conn_call_recv_stream_data(conn, strm, sdflags, offset, data, datalen); @@ -6909,8 +6865,9 @@ static int conn_emit_pending_stream_data(ngtcp2_conn *conn, ngtcp2_strm *strm, * |rx_offset_base| is the offset in the entire TLS handshake stream. * fr->offset specifies the offset in each encryption level. * |max_rx_offset| is, if it is nonzero, the maximum offset in the - * entire TLS handshake stream that |fr| can carry. |crypto_level| is - * the encryption level where this data is received. + * entire TLS handshake stream that |fr| can carry. + * |encryption_level| is the encryption level where this data is + * received. * * This function returns 0 if it succeeds, or one of the following * negative error codes: @@ -6926,8 +6883,9 @@ static int conn_emit_pending_stream_data(ngtcp2_conn *conn, ngtcp2_strm *strm, * NGTCP2_ERR_CALLBACK_FAILURE * User-defined callback function failed. */ -static int conn_recv_crypto(ngtcp2_conn *conn, ngtcp2_crypto_level crypto_level, - ngtcp2_strm *crypto, const ngtcp2_crypto *fr) { +static int conn_recv_crypto(ngtcp2_conn *conn, + ngtcp2_encryption_level encryption_level, + ngtcp2_strm *crypto, const ngtcp2_stream *fr) { uint64_t fr_end_offset; uint64_t rx_offset; int rv; @@ -6947,8 +6905,9 @@ static int conn_recv_crypto(ngtcp2_conn *conn, ngtcp2_crypto_level crypto_level, if (fr_end_offset <= rx_offset) { if (conn->server && !(conn->flags & NGTCP2_CONN_FLAG_HANDSHAKE_EARLY_RETRANSMIT) && - crypto_level == NGTCP2_CRYPTO_LEVEL_INITIAL) { - /* recovery draft: Speeding Up Handshake Completion + encryption_level == NGTCP2_ENCRYPTION_LEVEL_INITIAL) { + /* https://datatracker.ietf.org/doc/html/rfc9002#section-6.2.3: + Speeding Up Handshake Completion When a server receives an Initial packet containing duplicate CRYPTO data, it can assume the client did not receive all of @@ -6979,17 +6938,16 @@ static int conn_recv_crypto(ngtcp2_conn *conn, ngtcp2_crypto_level crypto_level, uint64_t offset = rx_offset; rx_offset += datalen; - rv = ngtcp2_strm_update_rx_offset(crypto, rx_offset); - if (rv != 0) { - return rv; - } + ngtcp2_strm_update_rx_offset(crypto, rx_offset); - rv = conn_call_recv_crypto_data(conn, crypto_level, offset, data, datalen); + rv = conn_call_recv_crypto_data(conn, encryption_level, offset, data, + datalen); if (rv != 0) { return rv; } - rv = conn_emit_pending_crypto_data(conn, crypto_level, crypto, rx_offset); + rv = conn_emit_pending_crypto_data(conn, encryption_level, crypto, + rx_offset); if (rv != 0) { return rv; } @@ -7106,14 +7064,15 @@ static int conn_recv_stream(ngtcp2_conn *conn, const ngtcp2_stream *fr) { return rv; } + if (!bidi) { + ngtcp2_strm_shutdown(strm, NGTCP2_STRM_FLAG_SHUT_WR); + strm->flags |= NGTCP2_STRM_FLAG_FIN_ACKED; + } + rv = conn_call_stream_open(conn, strm); if (rv != 0) { return rv; } - - if (!bidi) { - ngtcp2_strm_shutdown(strm, NGTCP2_STRM_FLAG_SHUT_WR); - } } fr_end_offset = fr->offset + datalen; @@ -7144,7 +7103,7 @@ static int conn_recv_stream(ngtcp2_conn *conn, const ngtcp2_stream *fr) { return NGTCP2_ERR_FINAL_SIZE; } - if (strm->flags & NGTCP2_STRM_FLAG_RECV_RST) { + if (strm->flags & NGTCP2_STRM_FLAG_RESET_STREAM_RECVED) { return 0; } @@ -7170,7 +7129,7 @@ static int conn_recv_stream(ngtcp2_conn *conn, const ngtcp2_stream *fr) { return 0; } - if (strm->flags & NGTCP2_STRM_FLAG_RECV_RST) { + if (strm->flags & NGTCP2_STRM_FLAG_RESET_STREAM_RECVED) { return 0; } } @@ -7186,10 +7145,7 @@ static int conn_recv_stream(ngtcp2_conn *conn, const ngtcp2_stream *fr) { datalen -= ncut; rx_offset += datalen; - rv = ngtcp2_strm_update_rx_offset(strm, rx_offset); - if (rv != 0) { - return rv; - } + ngtcp2_strm_update_rx_offset(strm, rx_offset); } else { data = NULL; datalen = 0; @@ -7206,8 +7162,8 @@ static int conn_recv_stream(ngtcp2_conn *conn, const ngtcp2_stream *fr) { if (fin) { sdflags |= NGTCP2_STREAM_DATA_FLAG_FIN; } - if (!conn_is_handshake_completed(conn)) { - sdflags |= NGTCP2_STREAM_DATA_FLAG_EARLY; + if (!conn_is_tls_handshake_completed(conn)) { + sdflags |= NGTCP2_STREAM_DATA_FLAG_0RTT; } rv = conn_call_recv_stream_data(conn, strm, sdflags, offset, data, (size_t)datalen); @@ -7242,25 +7198,16 @@ static int conn_recv_stream(ngtcp2_conn *conn, const ngtcp2_stream *fr) { */ static int conn_reset_stream(ngtcp2_conn *conn, ngtcp2_strm *strm, uint64_t app_error_code) { - int rv; - ngtcp2_frame_chain *frc; - ngtcp2_pktns *pktns = &conn->pktns; + strm->flags |= NGTCP2_STRM_FLAG_SEND_RESET_STREAM; + strm->tx.reset_stream_app_error_code = app_error_code; - rv = ngtcp2_frame_chain_objalloc_new(&frc, &conn->frc_objalloc); - if (rv != 0) { - return rv; + if (ngtcp2_strm_is_tx_queued(strm)) { + return 0; } - frc->fr.type = NGTCP2_FRAME_RESET_STREAM; - frc->fr.reset_stream.stream_id = strm->stream_id; - frc->fr.reset_stream.app_error_code = app_error_code; - frc->fr.reset_stream.final_size = strm->tx.offset; - - /* TODO This prepends RESET_STREAM to pktns->tx.frq. */ - frc->next = pktns->tx.frq; - pktns->tx.frq = frc; + strm->cycle = conn_tx_strmq_first_cycle(conn); - return 0; + return ngtcp2_conn_tx_strmq_push(conn, strm); } /* @@ -7275,24 +7222,16 @@ static int conn_reset_stream(ngtcp2_conn *conn, ngtcp2_strm *strm, */ static int conn_stop_sending(ngtcp2_conn *conn, ngtcp2_strm *strm, uint64_t app_error_code) { - int rv; - ngtcp2_frame_chain *frc; - ngtcp2_pktns *pktns = &conn->pktns; + strm->flags |= NGTCP2_STRM_FLAG_SEND_STOP_SENDING; + strm->tx.stop_sending_app_error_code = app_error_code; - rv = ngtcp2_frame_chain_objalloc_new(&frc, &conn->frc_objalloc); - if (rv != 0) { - return rv; + if (ngtcp2_strm_is_tx_queued(strm)) { + return 0; } - frc->fr.type = NGTCP2_FRAME_STOP_SENDING; - frc->fr.stop_sending.stream_id = strm->stream_id; - frc->fr.stop_sending.app_error_code = app_error_code; - - /* TODO This prepends STOP_SENDING to pktns->tx.frq. */ - frc->next = pktns->tx.frq; - pktns->tx.frq = frc; + strm->cycle = conn_tx_strmq_first_cycle(conn); - return 0; + return ngtcp2_conn_tx_strmq_push(conn, strm); } /* @@ -7427,7 +7366,7 @@ static int conn_recv_reset_stream(ngtcp2_conn *conn, return NGTCP2_ERR_FINAL_SIZE; } - if (strm->flags & NGTCP2_STRM_FLAG_RECV_RST) { + if (strm->flags & NGTCP2_STRM_FLAG_RESET_STREAM_RECVED) { return 0; } @@ -7458,7 +7397,8 @@ static int conn_recv_reset_stream(ngtcp2_conn *conn, ngtcp2_conn_extend_max_offset(conn, datalen); strm->rx.last_offset = fr->final_size; - strm->flags |= NGTCP2_STRM_FLAG_SHUT_RD | NGTCP2_STRM_FLAG_RECV_RST; + strm->flags |= + NGTCP2_STRM_FLAG_SHUT_RD | NGTCP2_STRM_FLAG_RESET_STREAM_RECVED; ngtcp2_strm_set_app_error_code(strm, fr->app_error_code); @@ -7547,19 +7487,14 @@ static int conn_recv_stop_sending(ngtcp2_conn *conn, /* No RESET_STREAM is required if we have sent FIN and all data have been acknowledged. */ if (!ngtcp2_strm_is_all_tx_data_fin_acked(strm) && - !(strm->flags & NGTCP2_STRM_FLAG_SENT_RST)) { + !(strm->flags & NGTCP2_STRM_FLAG_RESET_STREAM)) { rv = conn_reset_stream(conn, strm, fr->app_error_code); if (rv != 0) { return rv; } } - strm->flags |= NGTCP2_STRM_FLAG_SHUT_WR | NGTCP2_STRM_FLAG_SENT_RST; - - if (ngtcp2_strm_is_tx_queued(strm) && !ngtcp2_strm_streamfrq_empty(strm)) { - assert(conn->tx.strmq_nretrans); - --conn->tx.strmq_nretrans; - } + strm->flags |= NGTCP2_STRM_FLAG_SHUT_WR | NGTCP2_STRM_FLAG_RESET_STREAM; ngtcp2_strm_streamfrq_clear(strm); @@ -8042,11 +7977,11 @@ static int conn_recv_new_token(ngtcp2_conn *conn, const ngtcp2_new_token *fr) { return NGTCP2_ERR_PROTO; } - if (fr->token.len == 0) { + if (fr->tokenlen == 0) { return NGTCP2_ERR_FRAME_ENCODING; } - return conn_call_recv_new_token(conn, &fr->token); + return conn_call_recv_new_token(conn, fr->token, fr->tokenlen); } /* @@ -8087,6 +8022,144 @@ static int conn_recv_streams_blocked_uni(ngtcp2_conn *conn, return 0; } +/* + * conn_recv_stream_data_blocked processes the incoming + * STREAM_DATA_BLOCKED frame |fr|. + * + * This function returns 0 if it succeeds, or one of the following + * negative error codes: + * + * NGTCP2_ERR_STREAM_STATE + * STREAM_DATA_BLOCKED is received for a local stream which is not + * initiated; or it is received for a local unidirectional stream. + * NGTCP2_ERR_STREAM_LIMIT + * STREAM_DATA_BLOCKED has remote stream ID which is strictly + * greater than the allowed limit. + * NGTCP2_ERR_FLOW_CONTROL + * STREAM_DATA_BLOCKED frame violates flow control limit. + * NGTCP2_ERR_FINAL_SIZE + * The offset is strictly larger than it is permitted. + * NGTCP2_ERR_NOMEM + * Out of memory. + * NGTCP2_ERR_CALLBACK_FAILURE + * User-defined callback function failed. + */ +static int conn_recv_stream_data_blocked(ngtcp2_conn *conn, + ngtcp2_stream_data_blocked *fr) { + int rv; + ngtcp2_strm *strm; + ngtcp2_idtr *idtr; + int local_stream = conn_local_stream(conn, fr->stream_id); + int bidi = bidi_stream(fr->stream_id); + uint64_t datalen; + + if (bidi) { + if (local_stream) { + if (conn->local.bidi.next_stream_id <= fr->stream_id) { + return NGTCP2_ERR_STREAM_STATE; + } + } else if (conn->remote.bidi.max_streams < + ngtcp2_ord_stream_id(fr->stream_id)) { + return NGTCP2_ERR_STREAM_LIMIT; + } + + idtr = &conn->remote.bidi.idtr; + } else { + if (local_stream) { + return NGTCP2_ERR_STREAM_STATE; + } + if (conn->remote.uni.max_streams < ngtcp2_ord_stream_id(fr->stream_id)) { + return NGTCP2_ERR_STREAM_LIMIT; + } + + idtr = &conn->remote.uni.idtr; + } + + strm = ngtcp2_conn_find_stream(conn, fr->stream_id); + if (strm == NULL) { + if (local_stream) { + return 0; + } + + rv = ngtcp2_idtr_open(idtr, fr->stream_id); + if (rv != 0) { + if (ngtcp2_err_is_fatal(rv)) { + return rv; + } + assert(rv == NGTCP2_ERR_STREAM_IN_USE); + return 0; + } + + /* Frame is received before we create ngtcp2_strm object. */ + strm = ngtcp2_objalloc_strm_get(&conn->strm_objalloc); + if (strm == NULL) { + return NGTCP2_ERR_NOMEM; + } + rv = ngtcp2_conn_init_stream(conn, strm, fr->stream_id, NULL); + if (rv != 0) { + ngtcp2_objalloc_strm_release(&conn->strm_objalloc, strm); + return rv; + } + + if (!bidi) { + ngtcp2_strm_shutdown(strm, NGTCP2_STRM_FLAG_SHUT_WR); + strm->flags |= NGTCP2_STRM_FLAG_FIN_ACKED; + } + + rv = conn_call_stream_open(conn, strm); + if (rv != 0) { + return rv; + } + } + + if (strm->rx.max_offset < fr->offset) { + return NGTCP2_ERR_FLOW_CONTROL; + } + + if (fr->offset <= strm->rx.last_offset) { + return 0; + } + + if (strm->flags & NGTCP2_STRM_FLAG_SHUT_RD) { + return NGTCP2_ERR_FINAL_SIZE; + } + + datalen = fr->offset - strm->rx.last_offset; + if (datalen) { + if (conn_max_data_violated(conn, datalen)) { + return NGTCP2_ERR_FLOW_CONTROL; + } + + conn->rx.offset += datalen; + + if (strm->flags & NGTCP2_STRM_FLAG_STOP_SENDING) { + ngtcp2_conn_extend_max_offset(conn, datalen); + } + } + + strm->rx.last_offset = fr->offset; + + return 0; +} + +/* + * conn_recv_data_blocked processes the incoming DATA_BLOCKED frame + * |fr|. + * + * This function returns 0 if it succeeds, or one of the following + * negative error codes: + * + * NGTCP2_ERR_FLOW_CONTROL + * It violates connection-level flow control limit. + */ +static int conn_recv_data_blocked(ngtcp2_conn *conn, ngtcp2_data_blocked *fr) { + if (conn->rx.max_offset < fr->offset) { + return NGTCP2_ERR_FLOW_CONTROL; + } + + return 0; +} + /* * conn_select_preferred_addr asks a client application to select a * server address from preferred addresses received from server. If a @@ -8103,7 +8176,6 @@ static int conn_recv_streams_blocked_uni(ngtcp2_conn *conn, static int conn_select_preferred_addr(ngtcp2_conn *conn) { ngtcp2_path_storage ps; int rv; - ngtcp2_duration pto, initial_pto, timeout; ngtcp2_pv *pv; ngtcp2_dcid *dcid; @@ -8129,12 +8201,8 @@ static int conn_select_preferred_addr(ngtcp2_conn *conn) { dcid = ngtcp2_ringbuf_get(&conn->dcid.unused.rb, 0); ngtcp2_dcid_set_path(dcid, &ps.path); - pto = conn_compute_pto(conn, &conn->pktns); - initial_pto = conn_compute_initial_pto(conn, &conn->pktns); - timeout = 3 * ngtcp2_max(pto, initial_pto); - - rv = ngtcp2_pv_new(&pv, dcid, timeout, NGTCP2_PV_FLAG_PREFERRED_ADDR, - &conn->log, conn->mem); + rv = ngtcp2_pv_new(&pv, dcid, conn_compute_pv_timeout(conn), + NGTCP2_PV_FLAG_PREFERRED_ADDR, &conn->log, conn->mem); if (rv != 0) { /* TODO Call ngtcp2_dcid_free here if it is introduced */ return rv; @@ -8180,7 +8248,7 @@ static int conn_recv_handshake_done(ngtcp2_conn *conn, ngtcp2_tstamp ts) { assert(conn->remote.transport_params); - if (conn->remote.transport_params->preferred_address_present) { + if (conn->remote.transport_params->preferred_addr_present) { rv = conn_select_preferred_addr(conn); if (rv != 0) { return rv; @@ -8225,6 +8293,8 @@ static int conn_key_phase_changed(ngtcp2_conn *conn, const ngtcp2_pkt_hd *hd) { !(hd->flags & NGTCP2_PKT_FLAG_KEY_PHASE); } +static int conn_initiate_key_update(ngtcp2_conn *conn, ngtcp2_tstamp ts); + /* * conn_prepare_key_update installs new updated keys. */ @@ -8241,12 +8311,12 @@ static int conn_prepare_key_update(ngtcp2_conn *conn, ngtcp2_tstamp ts) { if ((conn->flags & NGTCP2_CONN_FLAG_HANDSHAKE_CONFIRMED) && tx_ckm->use_count >= pktns->crypto.ctx.max_encryption && - ngtcp2_conn_initiate_key_update(conn, ts) != 0) { + conn_initiate_key_update(conn, ts) != 0) { return NGTCP2_ERR_AEAD_LIMIT_REACHED; } if ((conn->flags & NGTCP2_CONN_FLAG_KEY_UPDATE_NOT_CONFIRMED) || - (confirmed_ts != UINT64_MAX && confirmed_ts + pto > ts)) { + ngtcp2_tstamp_not_elapsed(confirmed_ts, pto, ts)) { return 0; } @@ -8369,7 +8439,7 @@ static int conn_recv_non_probing_pkt_on_new_path(ngtcp2_conn *conn, ngtcp2_dcid dcid, *bound_dcid, *last; ngtcp2_pv *pv; int rv; - ngtcp2_duration pto, initial_pto, timeout; + ngtcp2_duration pto; int require_new_cid; int local_addr_eq; uint32_t remote_addr_cmp; @@ -8427,10 +8497,6 @@ static int conn_recv_non_probing_pkt_on_new_path(ngtcp2_conn *conn, ngtcp2_log_info(&conn->log, NGTCP2_LOG_EVENT_CON, "non-probing packet was received from new remote address"); - pto = conn_compute_pto(conn, &conn->pktns); - initial_pto = conn_compute_initial_pto(conn, &conn->pktns); - timeout = 3 * ngtcp2_max(pto, initial_pto); - len = ngtcp2_ringbuf_len(&conn->dcid.bound.rb); for (i = 0; i < len; ++i) { @@ -8482,13 +8548,16 @@ static int conn_recv_non_probing_pkt_on_new_path(ngtcp2_conn *conn, dcid.bytes_recv = 0; dcid.flags &= (uint8_t)~NGTCP2_DCID_FLAG_PATH_VALIDATED; } + + ngtcp2_dcid_set_path(&dcid, path); } - ngtcp2_dcid_set_path(&dcid, path); dcid.bytes_recv += dgramlen; - rv = ngtcp2_pv_new(&pv, &dcid, timeout, NGTCP2_PV_FLAG_FALLBACK_ON_FAILURE, - &conn->log, conn->mem); + pto = conn_compute_pto(conn, &conn->pktns); + + rv = ngtcp2_pv_new(&pv, &dcid, conn_compute_pv_timeout_pto(conn, pto), + NGTCP2_PV_FLAG_FALLBACK_ON_FAILURE, &conn->log, conn->mem); if (rv != 0) { return rv; } @@ -8507,11 +8576,6 @@ static int conn_recv_non_probing_pkt_on_new_path(ngtcp2_conn *conn, if (!local_addr_eq || (remote_addr_cmp & (NGTCP2_ADDR_COMPARE_FLAG_ADDR | NGTCP2_ADDR_COMPARE_FLAG_FAMILY))) { conn_reset_congestion_state(conn, ts); - } else { - /* For NAT rebinding, keep max_udp_payload_size since client most - likely does not send a padded PATH_CHALLENGE. */ - dcid.max_udp_payload_size = ngtcp2_max( - dcid.max_udp_payload_size, conn->dcid.current.max_udp_payload_size); } ngtcp2_dcid_copy(&conn->dcid.current, &dcid); @@ -8706,6 +8770,61 @@ conn_recv_delayed_handshake_pkt(ngtcp2_conn *conn, const ngtcp2_pkt_info *pi, return 0; } +/* + * conn_allow_path_change_under_disable_active_migration returns + * nonzero if a packet from |path| is acceptable under + * disable_active_migration is on. + */ +static int +conn_allow_path_change_under_disable_active_migration(ngtcp2_conn *conn, + const ngtcp2_path *path) { + uint32_t remote_addr_cmp; + const ngtcp2_preferred_addr *paddr; + ngtcp2_addr addr; + + assert(conn->server); + assert(conn->local.transport_params.disable_active_migration); + + /* If local address does not change, it must be passive migration + (NAT rebinding). */ + if (ngtcp2_addr_eq(&conn->dcid.current.ps.path.local, &path->local)) { + remote_addr_cmp = + ngtcp2_addr_compare(&conn->dcid.current.ps.path.remote, &path->remote); + + return (remote_addr_cmp | NGTCP2_ADDR_COMPARE_FLAG_PORT) == + NGTCP2_ADDR_COMPARE_FLAG_PORT; + } + + /* If local address changes, it must be one of the preferred + addresses. */ + + if (!conn->local.transport_params.preferred_addr_present) { + return 0; + } + + paddr = &conn->local.transport_params.preferred_addr; + + if (paddr->ipv4_present) { + ngtcp2_addr_init(&addr, (const ngtcp2_sockaddr *)&paddr->ipv4, + sizeof(paddr->ipv4)); + + if (ngtcp2_addr_eq(&addr, &path->local)) { + return 1; + } + } + + if (paddr->ipv6_present) { + ngtcp2_addr_init(&addr, (const ngtcp2_sockaddr *)&paddr->ipv6, + sizeof(paddr->ipv6)); + + if (ngtcp2_addr_eq(&addr, &path->local)) { + return 1; + } + } + + return 0; +} + /* * conn_recv_pkt processes a packet contained in the buffer pointed by * |pkt| of length |pktlen|. |pkt| may contain multiple QUIC packets. @@ -8769,6 +8888,15 @@ static ngtcp2_ssize conn_recv_pkt(ngtcp2_conn *conn, const ngtcp2_path *path, int new_cid_used = 0; int path_challenge_recved = 0; + if (conn->server && conn->local.transport_params.disable_active_migration && + !ngtcp2_path_eq(&conn->dcid.current.ps.path, path) && + !conn_allow_path_change_under_disable_active_migration(conn, path)) { + ngtcp2_log_info(&conn->log, NGTCP2_LOG_EVENT_PKT, + "packet is discarded because active migration is disabled"); + + return NGTCP2_ERR_DISCARD_PKT; + } + if (pkt[0] & NGTCP2_HEADER_FORM_BIT) { nread = ngtcp2_pkt_decode_hd_long(&hd, pkt, pktlen); if (nread < 0) { @@ -8788,6 +8916,10 @@ static ngtcp2_ssize conn_recv_pkt(ngtcp2_conn *conn, const ngtcp2_path *path, return NGTCP2_ERR_DISCARD_PKT; } + if (conn_verify_fixed_bit(conn, &hd) != 0) { + return NGTCP2_ERR_DISCARD_PKT; + } + pktlen = (size_t)nread + hd.len; /* Quoted from spec: if subsequent packets of those types include @@ -8854,6 +8986,10 @@ static ngtcp2_ssize conn_recv_pkt(ngtcp2_conn *conn, const ngtcp2_path *path, return NGTCP2_ERR_DISCARD_PKT; } + if (conn_verify_fixed_bit(conn, &hd) != 0) { + return NGTCP2_ERR_DISCARD_PKT; + } + pktns = &conn->pktns; aead = &pktns->crypto.ctx.aead; hp = &pktns->crypto.ctx.hp; @@ -9020,7 +9156,7 @@ static ngtcp2_ssize conn_recv_pkt(ngtcp2_conn *conn, const ngtcp2_path *path, break; default: /* Unreachable */ - assert(0); + ngtcp2_unreachable(); } } else { rv = conn_verify_dcid(conn, &new_cid_used, &hd); @@ -9118,8 +9254,8 @@ static ngtcp2_ssize conn_recv_pkt(ngtcp2_conn *conn, const ngtcp2_path *path, non_probing_pkt = 1; break; case NGTCP2_FRAME_CRYPTO: - rv = conn_recv_crypto(conn, NGTCP2_CRYPTO_LEVEL_APPLICATION, - &pktns->crypto.strm, &fr->crypto); + rv = conn_recv_crypto(conn, NGTCP2_ENCRYPTION_LEVEL_1RTT, + &pktns->crypto.strm, &fr->stream); if (rv != 0) { return rv; } @@ -9221,8 +9357,18 @@ static ngtcp2_ssize conn_recv_pkt(ngtcp2_conn *conn, const ngtcp2_path *path, } non_probing_pkt = 1; break; + case NGTCP2_FRAME_STREAM_DATA_BLOCKED: + rv = conn_recv_stream_data_blocked(conn, &fr->stream_data_blocked); + if (rv != 0) { + return rv; + } + non_probing_pkt = 1; + break; case NGTCP2_FRAME_DATA_BLOCKED: - /* TODO Not implemented yet */ + rv = conn_recv_data_blocked(conn, &fr->data_blocked); + if (rv != 0) { + return rv; + } non_probing_pkt = 1; break; case NGTCP2_FRAME_DATAGRAM: @@ -9468,7 +9614,7 @@ static int conn_sync_stream_data_limit(ngtcp2_conn *conn) { static int conn_handshake_completed(ngtcp2_conn *conn) { int rv; - conn->flags |= NGTCP2_CONN_FLAG_HANDSHAKE_COMPLETED_HANDLED; + conn->flags |= NGTCP2_CONN_FLAG_HANDSHAKE_COMPLETED; rv = conn_call_handshake_completed(conn); if (rv != 0) { @@ -9637,8 +9783,8 @@ static ngtcp2_ssize conn_read_handshake(ngtcp2_conn *conn, } } - if (conn_is_handshake_completed(conn) && - !(conn->flags & NGTCP2_CONN_FLAG_HANDSHAKE_COMPLETED_HANDLED)) { + if (conn_is_tls_handshake_completed(conn) && + !(conn->flags & NGTCP2_CONN_FLAG_HANDSHAKE_COMPLETED)) { rv = conn_handshake_completed(conn); if (rv != 0) { return rv; @@ -9658,27 +9804,22 @@ static ngtcp2_ssize conn_read_handshake(ngtcp2_conn *conn, } /* - * Client ServerHello might not fit into single Initial packet - * (e.g., resuming session with client authentication). If we get - * Client Initial which does not increase offset or it is 0RTT - * packet buffered, perform address validation in order to buffer + * Client Hello might not fit into single Initial packet (e.g., + * resuming session with client authentication). If we get Client + * Initial which does not increase offset or it is 0RTT packet + * buffered, perform address validation in order to buffer * validated data only. */ if (ngtcp2_strm_rx_offset(&conn->in_pktns->crypto.strm) == 0) { if (conn->in_pktns->crypto.strm.rx.rob && ngtcp2_rob_data_buffered(conn->in_pktns->crypto.strm.rx.rob)) { /* Address has been validated with token */ - if (conn->local.settings.token.len) { + if (conn->local.settings.tokenlen) { return nread; } return NGTCP2_ERR_RETRY; } - if (conn->in_pktns->rx.buffed_pkts) { - /* 0RTT is buffered, force retry */ - return NGTCP2_ERR_RETRY; - } - /* If neither CRYPTO frame nor 0RTT packet is processed, just - drop connection. */ + /* If CRYPTO frame is not processed, just drop connection. */ return NGTCP2_ERR_DROP_CONN; } @@ -9711,7 +9852,7 @@ static ngtcp2_ssize conn_read_handshake(ngtcp2_conn *conn, conn_discard_initial_state(conn, ts); } - if (!conn_is_handshake_completed(conn)) { + if (!conn_is_tls_handshake_completed(conn)) { /* If server hits amplification limit, it cancels loss detection timer. If server receives a packet from client, the limit is increased and server can send more. If server has @@ -9814,8 +9955,9 @@ int ngtcp2_conn_read_pkt_versioned(ngtcp2_conn *conn, const ngtcp2_path *path, const ngtcp2_pkt_info zero_pi = {0}; (void)pkt_info_version; - conn->log.last_ts = ts; - conn->qlog.last_ts = ts; + assert(!(conn->flags & NGTCP2_CONN_FLAG_PPE_PENDING)); + + conn_update_timestamp(conn, ts); ngtcp2_log_info(&conn->log, NGTCP2_LOG_EVENT_CON, "recv packet len=%zu", pktlen); @@ -9840,7 +9982,6 @@ int ngtcp2_conn_read_pkt_versioned(ngtcp2_conn *conn, const ngtcp2_path *path, switch (conn->state) { case NGTCP2_CS_CLIENT_INITIAL: case NGTCP2_CS_CLIENT_WAIT_HANDSHAKE: - case NGTCP2_CS_CLIENT_TLS_HANDSHAKE_FAILED: nread = conn_read_handshake(conn, path, pi, pkt, pktlen, ts); if (nread < 0) { return (int)nread; @@ -9858,7 +9999,6 @@ int ngtcp2_conn_read_pkt_versioned(ngtcp2_conn *conn, const ngtcp2_path *path, break; case NGTCP2_CS_SERVER_INITIAL: case NGTCP2_CS_SERVER_WAIT_HANDSHAKE: - case NGTCP2_CS_SERVER_TLS_HANDSHAKE_FAILED: if (!ngtcp2_path_eq(&conn->dcid.current.ps.path, path)) { ngtcp2_log_info(&conn->log, NGTCP2_LOG_EVENT_CON, "ignore packet from unknown path during handshake"); @@ -9899,8 +10039,7 @@ int ngtcp2_conn_read_pkt_versioned(ngtcp2_conn *conn, const ngtcp2_path *path, } break; default: - assert(0); - abort(); + ngtcp2_unreachable(); } return conn_recv_cpkt(conn, path, pi, pkt, pktlen, ts); @@ -9979,7 +10118,7 @@ static int conn_validate_early_transport_params_limits(ngtcp2_conn *conn) { /* * conn_write_handshake writes QUIC handshake packets to the buffer * pointed by |dest| of length |destlen|. |write_datalen| specifies - * the expected length of 0RTT or 1RTT packet payload. Specify 0 to + * the expected length of 0RTT packet payload. Specify 0 to * |write_datalen| if there is no such data. * * This function returns the number of bytes written to the buffer, or @@ -10054,7 +10193,7 @@ static ngtcp2_ssize conn_write_handshake(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, if (!conn_handshake_probe_left(conn) && conn_cwnd_is_zero(conn)) { destlen = 0; } else { - if (!(conn->flags & NGTCP2_CONN_FLAG_HANDSHAKE_COMPLETED_HANDLED)) { + if (!(conn->flags & NGTCP2_CONN_FLAG_HANDSHAKE_COMPLETED)) { pending_early_datalen = conn_retry_early_payloadlen(conn); if (pending_early_datalen) { write_datalen = pending_early_datalen; @@ -10072,7 +10211,7 @@ static ngtcp2_ssize conn_write_handshake(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, destlen -= (size_t)nwrite; } - if (!conn_is_handshake_completed(conn)) { + if (!conn_is_tls_handshake_completed(conn)) { if (!(conn->flags & NGTCP2_CONN_FLAG_EARLY_DATA_REJECTED)) { nwrite = conn_retransmit_retry_early(conn, pi, dest, destlen, NGTCP2_WRITE_PKT_FLAG_NONE, ts); @@ -10094,6 +10233,10 @@ static ngtcp2_ssize conn_write_handshake(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, return res; } + if (!(conn->flags & NGTCP2_CONN_FLAG_HANDSHAKE_COMPLETED)) { + return res; + } + if (!(conn->flags & NGTCP2_CONN_FLAG_TRANSPORT_PARAM_RECVED)) { return NGTCP2_ERR_REQUIRED_TRANSPORT_PARAM; } @@ -10117,10 +10260,10 @@ static ngtcp2_ssize conn_write_handshake(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, assert(conn->remote.transport_params); - if (conn->remote.transport_params->preferred_address_present) { + if (conn->remote.transport_params->preferred_addr_present) { assert(!ngtcp2_ringbuf_full(&conn->dcid.unused.rb)); - paddr = &conn->remote.transport_params->preferred_address; + paddr = &conn->remote.transport_params->preferred_addr; dcid = ngtcp2_ringbuf_push_back(&conn->dcid.unused.rb); ngtcp2_dcid_init(dcid, 1, &paddr->cid, paddr->stateless_reset_token); @@ -10245,19 +10388,10 @@ static ngtcp2_ssize conn_client_write_handshake(ngtcp2_conn *conn, switch (vmsg->type) { case NGTCP2_VMSG_TYPE_STREAM: datalen = ngtcp2_vec_len(vmsg->stream.data, vmsg->stream.datacnt); - send_stream = - conn_retry_early_payloadlen(conn) == 0 && - /* 0 length STREAM frame is allowed */ - (datalen == 0 || - (datalen > 0 && - (vmsg->stream.strm->tx.max_offset - vmsg->stream.strm->tx.offset) && - (conn->tx.max_offset - conn->tx.offset))); + send_stream = conn_retry_early_payloadlen(conn) == 0; if (send_stream) { - write_datalen = - conn_enforce_flow_control(conn, vmsg->stream.strm, datalen); - write_datalen = - ngtcp2_min(write_datalen, NGTCP2_MIN_COALESCED_PAYLOADLEN); - write_datalen += NGTCP2_STREAM_OVERHEAD; + write_datalen = ngtcp2_min(datalen + NGTCP2_STREAM_OVERHEAD, + NGTCP2_MIN_COALESCED_PAYLOADLEN); if (vmsg->stream.flags & NGTCP2_WRITE_STREAM_FLAG_MORE) { wflags |= NGTCP2_WRITE_PKT_FLAG_MORE; @@ -10302,8 +10436,6 @@ static ngtcp2_ssize conn_client_write_handshake(ngtcp2_conn *conn, ngtcp2_pkt_get_type_long(version, dest[0]) == NGTCP2_PKT_INITIAL) { wflags |= NGTCP2_WRITE_PKT_FLAG_REQUIRE_PADDING; conn->pkt.require_padding = 1; - } else { - conn->pkt.require_padding = 0; } } else { assert(!conn->pktns.crypto.rx.ckm); @@ -10325,11 +10457,17 @@ static ngtcp2_ssize conn_client_write_handshake(ngtcp2_conn *conn, early_spktlen = conn_write_pkt(conn, pi, dest, destlen, vmsg, NGTCP2_PKT_0RTT, wflags, ts); - if (early_spktlen < 0) { switch (early_spktlen) { case NGTCP2_ERR_STREAM_DATA_BLOCKED: - return spktlen; + if (!(wflags & NGTCP2_WRITE_PKT_FLAG_MORE)) { + if (spktlen) { + return spktlen; + } + + break; + } + /* fall through */ case NGTCP2_ERR_WRITE_MORE: conn->pkt.hs_spktlen = spktlen; break; @@ -10340,16 +10478,16 @@ static ngtcp2_ssize conn_client_write_handshake(ngtcp2_conn *conn, return spktlen + early_spktlen; } -void ngtcp2_conn_handshake_completed(ngtcp2_conn *conn) { - conn->flags |= NGTCP2_CONN_FLAG_HANDSHAKE_COMPLETED; +void ngtcp2_conn_tls_handshake_completed(ngtcp2_conn *conn) { + conn->flags |= NGTCP2_CONN_FLAG_TLS_HANDSHAKE_COMPLETED; if (conn->server) { conn->flags |= NGTCP2_CONN_FLAG_HANDSHAKE_CONFIRMED; } } int ngtcp2_conn_get_handshake_completed(ngtcp2_conn *conn) { - return conn_is_handshake_completed(conn) && - (conn->flags & NGTCP2_CONN_FLAG_HANDSHAKE_COMPLETED_HANDLED); + return conn_is_tls_handshake_completed(conn) && + (conn->flags & NGTCP2_CONN_FLAG_HANDSHAKE_COMPLETED); } int ngtcp2_conn_sched_ack(ngtcp2_conn *conn, ngtcp2_acktr *acktr, @@ -10391,14 +10529,17 @@ int ngtcp2_accept(ngtcp2_pkt_hd *dest, const uint8_t *pkt, size_t pktlen) { case NGTCP2_PKT_0RTT: /* 0-RTT packet may arrive before Initial packet due to re-ordering. ngtcp2 does not buffer 0RTT packet unless the - very first Initial packet is received or token is received. */ - return NGTCP2_ERR_RETRY; + very first Initial packet is received or token is received. + Previously, we returned NGTCP2_ERR_RETRY here, so that client + can resend 0RTT data. But it incurs 1RTT already and + diminishes the value of 0RTT. Therefore, we just discard the + packet here for now. */ default: return NGTCP2_ERR_INVALID_ARGUMENT; } if (pktlen < NGTCP2_MAX_UDP_PAYLOAD_SIZE || - (p->token.len == 0 && p->dcid.datalen < NGTCP2_MIN_INITIAL_DCIDLEN)) { + (p->tokenlen == 0 && p->dcid.datalen < NGTCP2_MIN_INITIAL_DCIDLEN)) { return NGTCP2_ERR_INVALID_ARGUMENT; } @@ -10526,7 +10667,7 @@ int ngtcp2_conn_install_rx_handshake_key( pktns->crypto.rx.hp_ctx = *hp_ctx; - rv = conn_call_recv_rx_key(conn, NGTCP2_CRYPTO_LEVEL_HANDSHAKE); + rv = conn_call_recv_rx_key(conn, NGTCP2_ENCRYPTION_LEVEL_HANDSHAKE); if (rv != 0) { ngtcp2_crypto_km_del(pktns->crypto.rx.ckm, conn->mem); pktns->crypto.rx.ckm = NULL; @@ -10565,7 +10706,7 @@ int ngtcp2_conn_install_tx_handshake_key( } } - rv = conn_call_recv_tx_key(conn, NGTCP2_CRYPTO_LEVEL_HANDSHAKE); + rv = conn_call_recv_tx_key(conn, NGTCP2_ENCRYPTION_LEVEL_HANDSHAKE); if (rv != 0) { ngtcp2_crypto_km_del(pktns->crypto.tx.ckm, conn->mem); pktns->crypto.tx.ckm = NULL; @@ -10578,10 +10719,10 @@ int ngtcp2_conn_install_tx_handshake_key( return 0; } -int ngtcp2_conn_install_early_key(ngtcp2_conn *conn, - const ngtcp2_crypto_aead_ctx *aead_ctx, - const uint8_t *iv, size_t ivlen, - const ngtcp2_crypto_cipher_ctx *hp_ctx) { +int ngtcp2_conn_install_0rtt_key(ngtcp2_conn *conn, + const ngtcp2_crypto_aead_ctx *aead_ctx, + const uint8_t *iv, size_t ivlen, + const ngtcp2_crypto_cipher_ctx *hp_ctx) { int rv; assert(ivlen >= 8); @@ -10599,9 +10740,9 @@ int ngtcp2_conn_install_early_key(ngtcp2_conn *conn, conn->flags |= NGTCP2_CONN_FLAG_EARLY_KEY_INSTALLED; if (conn->server) { - rv = conn_call_recv_rx_key(conn, NGTCP2_CRYPTO_LEVEL_EARLY); + rv = conn_call_recv_rx_key(conn, NGTCP2_ENCRYPTION_LEVEL_0RTT); } else { - rv = conn_call_recv_tx_key(conn, NGTCP2_CRYPTO_LEVEL_EARLY); + rv = conn_call_recv_tx_key(conn, NGTCP2_ENCRYPTION_LEVEL_0RTT); } if (rv != 0) { ngtcp2_crypto_km_del(conn->early.ckm, conn->mem); @@ -10650,7 +10791,7 @@ int ngtcp2_conn_install_rx_key(ngtcp2_conn *conn, const uint8_t *secret, } } - rv = conn_call_recv_rx_key(conn, NGTCP2_CRYPTO_LEVEL_APPLICATION); + rv = conn_call_recv_rx_key(conn, NGTCP2_ENCRYPTION_LEVEL_1RTT); if (rv != 0) { ngtcp2_crypto_km_del(pktns->crypto.rx.ckm, conn->mem); pktns->crypto.rx.ckm = NULL; @@ -10696,7 +10837,7 @@ int ngtcp2_conn_install_tx_key(ngtcp2_conn *conn, const uint8_t *secret, conn_discard_early_key(conn); } - rv = conn_call_recv_tx_key(conn, NGTCP2_CRYPTO_LEVEL_APPLICATION); + rv = conn_call_recv_tx_key(conn, NGTCP2_ENCRYPTION_LEVEL_1RTT); if (rv != 0) { ngtcp2_crypto_km_del(pktns->crypto.tx.ckm, conn->mem); pktns->crypto.tx.ckm = NULL; @@ -10709,7 +10850,7 @@ int ngtcp2_conn_install_tx_key(ngtcp2_conn *conn, const uint8_t *secret, return 0; } -int ngtcp2_conn_initiate_key_update(ngtcp2_conn *conn, ngtcp2_tstamp ts) { +static int conn_initiate_key_update(ngtcp2_conn *conn, ngtcp2_tstamp ts) { ngtcp2_tstamp confirmed_ts = conn->crypto.key_update.confirmed_ts; ngtcp2_duration pto = conn_compute_pto(conn, &conn->pktns); @@ -10719,7 +10860,7 @@ int ngtcp2_conn_initiate_key_update(ngtcp2_conn *conn, ngtcp2_tstamp ts) { (conn->flags & NGTCP2_CONN_FLAG_KEY_UPDATE_NOT_CONFIRMED) || !conn->crypto.key_update.new_tx_ckm || !conn->crypto.key_update.new_rx_ckm || - (confirmed_ts != UINT64_MAX && confirmed_ts + 3 * pto > ts)) { + ngtcp2_tstamp_not_elapsed(confirmed_ts, 3 * pto, ts)) { return NGTCP2_ERR_INVALID_STATE; } @@ -10728,6 +10869,12 @@ int ngtcp2_conn_initiate_key_update(ngtcp2_conn *conn, ngtcp2_tstamp ts) { return 0; } +int ngtcp2_conn_initiate_key_update(ngtcp2_conn *conn, ngtcp2_tstamp ts) { + conn_update_timestamp(conn, ts); + + return conn_initiate_key_update(conn, ts); +} + /* * conn_retire_stale_bound_dcid retires stale destination connection * ID in conn->dcid.bound to keep some unused destination connection @@ -10751,7 +10898,7 @@ static int conn_retire_stale_bound_dcid(ngtcp2_conn *conn, assert(dcid->cid.datalen); - if (dcid->bound_ts + timeout > ts) { + if (ngtcp2_tstamp_not_elapsed(dcid->bound_ts, timeout, ts)) { ++i; continue; } @@ -10846,8 +10993,10 @@ ngtcp2_tstamp ngtcp2_conn_ack_delay_expiry(ngtcp2_conn *conn) { } static ngtcp2_tstamp conn_handshake_expiry(ngtcp2_conn *conn) { - if (conn_is_handshake_completed(conn) || - conn->local.settings.handshake_timeout == UINT64_MAX) { + if (conn_is_tls_handshake_completed(conn) || + conn->local.settings.handshake_timeout == UINT64_MAX || + conn->local.settings.initial_ts >= + UINT64_MAX - conn->local.settings.handshake_timeout) { return UINT64_MAX; } @@ -10874,7 +11023,13 @@ ngtcp2_tstamp ngtcp2_conn_get_expiry(ngtcp2_conn *conn) { int ngtcp2_conn_handle_expiry(ngtcp2_conn *conn, ngtcp2_tstamp ts) { int rv; - ngtcp2_duration pto = conn_compute_pto(conn, &conn->pktns); + ngtcp2_duration pto; + + conn_update_timestamp(conn, ts); + + pto = conn_compute_pto(conn, &conn->pktns); + + assert(!(conn->flags & NGTCP2_CONN_FLAG_PPE_PENDING)); if (ngtcp2_conn_get_idle_expiry(conn) <= ts) { return NGTCP2_ERR_IDLE_CLOSE; @@ -10919,17 +11074,13 @@ int ngtcp2_conn_handle_expiry(ngtcp2_conn *conn, ngtcp2_tstamp ts) { } if (conn->server && conn->early.ckm && - conn->early.discard_started_ts != UINT64_MAX) { - if (conn->early.discard_started_ts + 3 * pto <= ts) { - conn_discard_early_key(conn); - } + ngtcp2_tstamp_elapsed(conn->early.discard_started_ts, 3 * pto, ts)) { + conn_discard_early_key(conn); } - if (!conn_is_handshake_completed(conn) && - conn->local.settings.handshake_timeout != UINT64_MAX && - conn->local.settings.initial_ts + - conn->local.settings.handshake_timeout <= - ts) { + if (!conn_is_tls_handshake_completed(conn) && + ngtcp2_tstamp_elapsed(conn->local.settings.initial_ts, + conn->local.settings.handshake_timeout, ts)) { return NGTCP2_ERR_HANDSHAKE_TIMEOUT; } @@ -10940,8 +11091,7 @@ static void acktr_cancel_expired_ack_delay_timer(ngtcp2_acktr *acktr, ngtcp2_duration max_ack_delay, ngtcp2_tstamp ts) { if (!(acktr->flags & NGTCP2_ACKTR_FLAG_CANCEL_TIMER) && - acktr->first_unacked_ts != UINT64_MAX && - acktr->first_unacked_ts + max_ack_delay <= ts) { + ngtcp2_tstamp_elapsed(acktr->first_unacked_ts, max_ack_delay, ts)) { acktr->flags |= NGTCP2_ACKTR_FLAG_CANCEL_TIMER; } } @@ -11006,19 +11156,21 @@ void ngtcp2_conn_remove_lost_pkt(ngtcp2_conn *conn, ngtcp2_tstamp ts) { * select_preferred_version selects the most preferred version. * |fallback_version| is chosen if no preference is made, or * |preferred_versions| does not include any of |chosen_version| or - * |other_versions|. |chosen_version| is treated as an extra other - * version. + * |available_versions|. |chosen_version| is treated as an extra + * other version. */ static uint32_t select_preferred_version(const uint32_t *preferred_versions, size_t preferred_versionslen, uint32_t chosen_version, - const uint8_t *other_versions, - size_t other_versionslen, + const uint8_t *available_versions, + size_t available_versionslen, uint32_t fallback_version) { size_t i, j; + const uint8_t *p; + uint32_t v; if (!preferred_versionslen || - (!other_versionslen && chosen_version == fallback_version)) { + (!available_versionslen && chosen_version == fallback_version)) { return fallback_version; } @@ -11026,12 +11178,13 @@ static uint32_t select_preferred_version(const uint32_t *preferred_versions, if (preferred_versions[i] == chosen_version) { return chosen_version; } - for (j = 0; j < other_versionslen; j += sizeof(uint32_t)) { - if (preferred_versions[i] != ngtcp2_get_uint32(&other_versions[j])) { - continue; - } + for (j = 0, p = available_versions; j < available_versionslen; + j += sizeof(uint32_t)) { + p = ngtcp2_get_uint32(&v, p); - return preferred_versions[i]; + if (preferred_versions[i] == v) { + return v; + } } } @@ -11054,6 +11207,10 @@ static uint32_t select_preferred_version(const uint32_t *preferred_versions, static int conn_client_validate_transport_params(ngtcp2_conn *conn, const ngtcp2_transport_params *params) { + if (!params->original_dcid_present) { + return NGTCP2_ERR_REQUIRED_TRANSPORT_PARAM; + } + if (!ngtcp2_cid_eq(&conn->rcid, ¶ms->original_dcid)) { return NGTCP2_ERR_TRANSPORT_PARAM; } @@ -11069,8 +11226,7 @@ conn_client_validate_transport_params(ngtcp2_conn *conn, return NGTCP2_ERR_TRANSPORT_PARAM; } - if (params->preferred_address_present && - conn->dcid.current.cid.datalen == 0) { + if (params->preferred_addr_present && conn->dcid.current.cid.datalen == 0) { return NGTCP2_ERR_TRANSPORT_PARAM; } @@ -11079,21 +11235,31 @@ conn_client_validate_transport_params(ngtcp2_conn *conn, return NGTCP2_ERR_VERSION_NEGOTIATION_FAILURE; } - assert(vneg_other_versions_includes(conn->vneg.other_versions, - conn->vneg.other_versionslen, - conn->negotiated_version)); - } else if (conn->client_chosen_version != conn->negotiated_version || - conn->client_chosen_version != - conn->local.settings.original_version) { + assert(vneg_available_versions_includes(conn->vneg.available_versions, + conn->vneg.available_versionslen, + conn->negotiated_version)); + } else if (conn->client_chosen_version != conn->negotiated_version) { return NGTCP2_ERR_VERSION_NEGOTIATION_FAILURE; } /* When client reacted upon Version Negotiation */ if (conn->local.settings.original_version != conn->client_chosen_version) { - assert(params->version_info_present); + if (!params->version_info_present) { + assert(conn->client_chosen_version == conn->negotiated_version); + + /* QUIC v1 is treated specially. If version_info is missing, no + further validation is necessary. See + https://datatracker.ietf.org/doc/html/rfc9368#section-8 + */ + if (conn->client_chosen_version == NGTCP2_PROTO_VER_V1) { + return 0; + } + + return NGTCP2_ERR_VERSION_NEGOTIATION_FAILURE; + } - /* Server choose original version after Version Negotiation. - Draft does not say this particular case, but this smells like + /* Server choose original version after Version Negotiation. RFC + 9368 does not say this particular case, but this smells like misbehaved server because server should accept original_version in the original connection. */ if (conn->local.settings.original_version == @@ -11102,7 +11268,7 @@ conn_client_validate_transport_params(ngtcp2_conn *conn, } /* Check version downgrade on incompatible version negotiation. */ - if (params->version_info.other_versionslen == 0) { + if (params->version_info.available_versionslen == 0) { return NGTCP2_ERR_VERSION_NEGOTIATION_FAILURE; } @@ -11110,8 +11276,8 @@ conn_client_validate_transport_params(ngtcp2_conn *conn, select_preferred_version(conn->vneg.preferred_versions, conn->vneg.preferred_versionslen, params->version_info.chosen_version, - params->version_info.other_versions, - params->version_info.other_versionslen, + params->version_info.available_versions, + params->version_info.available_versionslen, /* fallback_version = */ 0)) { return NGTCP2_ERR_VERSION_NEGOTIATION_FAILURE; } @@ -11128,8 +11294,8 @@ ngtcp2_conn_server_negotiate_version(ngtcp2_conn *conn, return select_preferred_version( conn->vneg.preferred_versions, conn->vneg.preferred_versionslen, - version_info->chosen_version, version_info->other_versions, - version_info->other_versionslen, version_info->chosen_version); + version_info->chosen_version, version_info->available_versions, + version_info->available_versionslen, version_info->chosen_version); } int ngtcp2_conn_set_remote_transport_params( @@ -11145,7 +11311,11 @@ int ngtcp2_conn_set_remote_transport_params( return 0; } - /* Assume that ngtcp2_decode_transport_params sets default value if + if (!params->initial_scid_present) { + return NGTCP2_ERR_REQUIRED_TRANSPORT_PARAM; + } + + /* Assume that ngtcp2_transport_params_decode sets default value if active_connection_id_limit is omitted. */ if (params->active_connection_id_limit < NGTCP2_DEFAULT_ACTIVE_CONNECTION_ID_LIMIT) { @@ -11164,7 +11334,20 @@ int ngtcp2_conn_set_remote_transport_params( } if (conn->server) { + if (params->original_dcid_present || + params->stateless_reset_token_present || + params->preferred_addr_present || params->retry_scid_present) { + return NGTCP2_ERR_TRANSPORT_PARAM; + } + if (params->version_info_present) { + if (!vneg_available_versions_includes( + params->version_info.available_versions, + params->version_info.available_versionslen, + params->version_info.chosen_version)) { + return NGTCP2_ERR_TRANSPORT_PARAM; + } + if (params->version_info.chosen_version != conn->client_chosen_version) { return NGTCP2_ERR_VERSION_NEGOTIATION_FAILURE; } @@ -11195,11 +11378,7 @@ int ngtcp2_conn_set_remote_transport_params( } } - ngtcp2_log_remote_tp(&conn->log, - conn->server - ? NGTCP2_TRANSPORT_PARAMS_TYPE_CLIENT_HELLO - : NGTCP2_TRANSPORT_PARAMS_TYPE_ENCRYPTED_EXTENSIONS, - params); + ngtcp2_log_remote_tp(&conn->log, params); ngtcp2_qlog_parameters_set_transport_params(&conn->qlog, params, conn->server, NGTCP2_QLOG_SIDE_REMOTE); @@ -11231,17 +11410,13 @@ int ngtcp2_conn_set_remote_transport_params( return 0; } -int ngtcp2_conn_decode_remote_transport_params(ngtcp2_conn *conn, - const uint8_t *data, - size_t datalen) { +int ngtcp2_conn_decode_and_set_remote_transport_params(ngtcp2_conn *conn, + const uint8_t *data, + size_t datalen) { ngtcp2_transport_params params; int rv; - rv = ngtcp2_decode_transport_params( - ¶ms, - conn->server ? NGTCP2_TRANSPORT_PARAMS_TYPE_CLIENT_HELLO - : NGTCP2_TRANSPORT_PARAMS_TYPE_ENCRYPTED_EXTENSIONS, - data, datalen); + rv = ngtcp2_transport_params_decode(¶ms, data, datalen); if (rv != 0) { return rv; } @@ -11258,20 +11433,72 @@ ngtcp2_conn_get_remote_transport_params(ngtcp2_conn *conn) { return conn->remote.transport_params; } -void ngtcp2_conn_set_early_remote_transport_params_versioned( - ngtcp2_conn *conn, int transport_params_version, - const ngtcp2_transport_params *params) { +ngtcp2_ssize ngtcp2_conn_encode_0rtt_transport_params(ngtcp2_conn *conn, + uint8_t *dest, + size_t destlen) { + ngtcp2_transport_params params, *src; + + if (conn->server) { + src = &conn->local.transport_params; + } else { + assert(conn->remote.transport_params); + + src = conn->remote.transport_params; + } + + ngtcp2_transport_params_default(¶ms); + + params.initial_max_streams_bidi = src->initial_max_streams_bidi; + params.initial_max_streams_uni = src->initial_max_streams_uni; + params.initial_max_stream_data_bidi_local = + src->initial_max_stream_data_bidi_local; + params.initial_max_stream_data_bidi_remote = + src->initial_max_stream_data_bidi_remote; + params.initial_max_stream_data_uni = src->initial_max_stream_data_uni; + params.initial_max_data = src->initial_max_data; + params.active_connection_id_limit = src->active_connection_id_limit; + params.max_datagram_frame_size = src->max_datagram_frame_size; + + if (conn->server) { + params.max_idle_timeout = src->max_idle_timeout; + params.max_udp_payload_size = src->max_udp_payload_size; + params.disable_active_migration = src->disable_active_migration; + } + + return ngtcp2_transport_params_encode(dest, destlen, ¶ms); +} + +int ngtcp2_conn_decode_and_set_0rtt_transport_params(ngtcp2_conn *conn, + const uint8_t *data, + size_t datalen) { + ngtcp2_transport_params params; + int rv; + + rv = ngtcp2_transport_params_decode(¶ms, data, datalen); + if (rv != 0) { + return rv; + } + + return ngtcp2_conn_set_0rtt_remote_transport_params(conn, ¶ms); +} + +int ngtcp2_conn_set_0rtt_remote_transport_params( + ngtcp2_conn *conn, const ngtcp2_transport_params *params) { ngtcp2_transport_params *p; - (void)transport_params_version; assert(!conn->server); assert(!conn->remote.transport_params); /* Assume that all pointer fields in p are NULL */ p = ngtcp2_mem_calloc(conn->mem, 1, sizeof(*p)); + if (p == NULL) { + return NGTCP2_ERR_NOMEM; + } conn->remote.transport_params = p; + ngtcp2_transport_params_default(conn->remote.transport_params); + p->initial_max_streams_bidi = params->initial_max_streams_bidi; p->initial_max_streams_uni = params->initial_max_streams_uni; p->initial_max_stream_data_bidi_local = @@ -11280,18 +11507,17 @@ void ngtcp2_conn_set_early_remote_transport_params_versioned( params->initial_max_stream_data_bidi_remote; p->initial_max_stream_data_uni = params->initial_max_stream_data_uni; p->initial_max_data = params->initial_max_data; + /* we might hit garbage, then set the sane default. */ p->active_connection_id_limit = ngtcp2_max(NGTCP2_DEFAULT_ACTIVE_CONNECTION_ID_LIMIT, params->active_connection_id_limit); - p->max_idle_timeout = params->max_idle_timeout; - if (!params->max_udp_payload_size) { - p->max_udp_payload_size = NGTCP2_DEFAULT_MAX_RECV_UDP_PAYLOAD_SIZE; - } else { + p->max_datagram_frame_size = params->max_datagram_frame_size; + + /* we might hit garbage, then set the sane default. */ + if (params->max_udp_payload_size) { p->max_udp_payload_size = ngtcp2_max(NGTCP2_MAX_UDP_PAYLOAD_SIZE, params->max_udp_payload_size); } - p->disable_active_migration = params->disable_active_migration; - p->max_datagram_frame_size = params->max_datagram_frame_size; /* These parameters are treated specially. If server accepts early data, it must not set values for these parameters that are @@ -11318,14 +11544,21 @@ void ngtcp2_conn_set_early_remote_transport_params_versioned( ngtcp2_qlog_parameters_set_transport_params(&conn->qlog, p, conn->server, NGTCP2_QLOG_SIDE_REMOTE); + + return 0; } int ngtcp2_conn_set_local_transport_params_versioned( ngtcp2_conn *conn, int transport_params_version, const ngtcp2_transport_params *params) { - (void)transport_params_version; + ngtcp2_transport_params paramsbuf; + + params = ngtcp2_transport_params_convert_to_latest( + ¶msbuf, transport_params_version, params); assert(conn->server); + assert(params->active_connection_id_limit >= + NGTCP2_DEFAULT_ACTIVE_CONNECTION_ID_LIMIT); assert(params->active_connection_id_limit <= NGTCP2_MAX_DCID_POOL_SIZE); if (conn->hs_pktns == NULL || conn->hs_pktns->crypto.tx.ckm) { @@ -11345,24 +11578,20 @@ int ngtcp2_conn_commit_local_transport_params(ngtcp2_conn *conn) { assert(1 == ngtcp2_ksl_len(&conn->scid.set)); - if (params->active_connection_id_limit == 0) { - params->active_connection_id_limit = - NGTCP2_DEFAULT_ACTIVE_CONNECTION_ID_LIMIT; - } - params->initial_scid = conn->oscid; + params->initial_scid_present = 1; if (conn->oscid.datalen == 0) { - params->preferred_address_present = 0; + params->preferred_addr_present = 0; } - if (conn->server && params->preferred_address_present) { + if (conn->server && params->preferred_addr_present) { scident = ngtcp2_mem_malloc(mem, sizeof(*scident)); if (scident == NULL) { return NGTCP2_ERR_NOMEM; } - ngtcp2_scid_init(scident, 1, ¶ms->preferred_address.cid); + ngtcp2_scid_init(scident, 1, ¶ms->preferred_addr.cid); rv = ngtcp2_ksl_insert(&conn->scid.set, NULL, &scident->cid, scident); if (rv != 0) { @@ -11396,11 +11625,8 @@ ngtcp2_conn_get_local_transport_params(ngtcp2_conn *conn) { ngtcp2_ssize ngtcp2_conn_encode_local_transport_params(ngtcp2_conn *conn, uint8_t *dest, size_t destlen) { - return ngtcp2_encode_transport_params( - dest, destlen, - conn->server ? NGTCP2_TRANSPORT_PARAMS_TYPE_ENCRYPTED_EXTENSIONS - : NGTCP2_TRANSPORT_PARAMS_TYPE_CLIENT_HELLO, - &conn->local.transport_params); + return ngtcp2_transport_params_encode(dest, destlen, + &conn->local.transport_params); } int ngtcp2_conn_open_bidi_stream(ngtcp2_conn *conn, int64_t *pstream_id, @@ -11485,7 +11711,6 @@ static ngtcp2_ssize conn_write_vmsg_wrapper(ngtcp2_conn *conn, ngtcp2_tstamp ts) { ngtcp2_conn_stat *cstat = &conn->cstat; ngtcp2_ssize nwrite; - int undersized; nwrite = ngtcp2_conn_write_vmsg(conn, path, pkt_info_version, pi, dest, destlen, vmsg, ts); @@ -11497,20 +11722,11 @@ static ngtcp2_ssize conn_write_vmsg_wrapper(ngtcp2_conn *conn, conn->rst.is_cwnd_limited = 1; } - if (vmsg == NULL && cstat->bytes_in_flight < cstat->cwnd && - conn->tx.strmq_nretrans == 0) { - if (conn->local.settings.no_udp_payload_size_shaping) { - undersized = (size_t)nwrite < conn->local.settings.max_udp_payload_size; - } else { - undersized = (size_t)nwrite < conn->dcid.current.max_udp_payload_size; - } - - if (undersized) { - conn->rst.app_limited = conn->rst.delivered + cstat->bytes_in_flight; + if (nwrite == 0 && cstat->bytes_in_flight < cstat->cwnd) { + conn->rst.app_limited = conn->rst.delivered + cstat->bytes_in_flight; - if (conn->rst.app_limited == 0) { - conn->rst.app_limited = cstat->max_udp_payload_size; - } + if (conn->rst.app_limited == 0) { + conn->rst.app_limited = cstat->max_tx_udp_payload_size; } } @@ -11566,6 +11782,21 @@ ngtcp2_ssize ngtcp2_conn_writev_stream_versioned( destlen, pvmsg, ts); } +ngtcp2_ssize ngtcp2_conn_write_datagram_versioned( + ngtcp2_conn *conn, ngtcp2_path *path, int pkt_info_version, + ngtcp2_pkt_info *pi, uint8_t *dest, size_t destlen, int *paccepted, + uint32_t flags, uint64_t dgram_id, const uint8_t *data, size_t datalen, + ngtcp2_tstamp ts) { + ngtcp2_vec datav; + + datav.len = datalen; + datav.base = (uint8_t *)data; + + return ngtcp2_conn_writev_datagram_versioned(conn, path, pkt_info_version, pi, + dest, destlen, paccepted, flags, + dgram_id, &datav, 1, ts); +} + ngtcp2_ssize ngtcp2_conn_writev_datagram_versioned( ngtcp2_conn *conn, ngtcp2_path *path, int pkt_info_version, ngtcp2_pkt_info *pi, uint8_t *dest, size_t destlen, int *paccepted, @@ -11584,8 +11815,12 @@ ngtcp2_ssize ngtcp2_conn_writev_datagram_versioned( } datalen = ngtcp2_vec_len_varint(datav, datavcnt); - if (datalen == -1 || (uint64_t)datalen > SIZE_MAX) { - return NGTCP2_ERR_INVALID_STATE; + if (datalen == -1 +#if UINT64_MAX > SIZE_MAX + || (uint64_t)datalen > SIZE_MAX +#endif /* UINT64_MAX > SIZE_MAX */ + ) { + return NGTCP2_ERR_INVALID_ARGUMENT; } if (conn->remote.transport_params->max_datagram_frame_size < @@ -11617,15 +11852,12 @@ ngtcp2_ssize ngtcp2_conn_write_vmsg(ngtcp2_conn *conn, ngtcp2_path *path, ngtcp2_conn_stat *cstat = &conn->cstat; ngtcp2_ssize res = 0; uint64_t server_tx_left; - uint64_t datalen; - uint64_t write_datalen = 0; int64_t prev_in_pkt_num = -1; ngtcp2_ksl_it it; ngtcp2_rtb_entry *rtbent; (void)pkt_info_version; - conn->log.last_ts = ts; - conn->qlog.last_ts = ts; + conn_update_timestamp(conn, ts); if (path) { ngtcp2_path_copy(path, &conn->dcid.current.ps.path); @@ -11638,14 +11870,15 @@ ngtcp2_ssize ngtcp2_conn_write_vmsg(ngtcp2_conn *conn, ngtcp2_path *path, pi->ecn = NGTCP2_ECN_NOT_ECT; } - if (!conn_pacing_pkt_tx_allowed(conn, ts)) { - return 0; - } - switch (conn->state) { case NGTCP2_CS_CLIENT_INITIAL: case NGTCP2_CS_CLIENT_WAIT_HANDSHAKE: - case NGTCP2_CS_CLIENT_TLS_HANDSHAKE_FAILED: + if (!conn_pacing_pkt_tx_allowed(conn, ts)) { + assert(!ppe_pending); + + return conn_write_handshake_ack_pkts(conn, pi, dest, origlen, ts); + } + nwrite = conn_client_write_handshake(conn, pi, dest, destlen, vmsg, ts); /* We might be unable to write a packet because of depletion of congestion window budget, perhaps due to packet loss that @@ -11675,14 +11908,28 @@ ngtcp2_ssize ngtcp2_conn_write_vmsg(ngtcp2_conn *conn, ngtcp2_path *path, break; case NGTCP2_CS_SERVER_INITIAL: case NGTCP2_CS_SERVER_WAIT_HANDSHAKE: - case NGTCP2_CS_SERVER_TLS_HANDSHAKE_FAILED: + if (!conn_pacing_pkt_tx_allowed(conn, ts)) { + assert(!ppe_pending); + + if (!(conn->dcid.current.flags & NGTCP2_DCID_FLAG_PATH_VALIDATED)) { + server_tx_left = conn_server_tx_left(conn, &conn->dcid.current); + if (server_tx_left == 0) { + return 0; + } + + origlen = (size_t)ngtcp2_min((uint64_t)origlen, server_tx_left); + } + + return conn_write_handshake_ack_pkts(conn, pi, dest, origlen, ts); + } + if (!ppe_pending) { if (!(conn->dcid.current.flags & NGTCP2_DCID_FLAG_PATH_VALIDATED)) { server_tx_left = conn_server_tx_left(conn, &conn->dcid.current); if (server_tx_left == 0) { if (cstat->loss_detection_timer != UINT64_MAX) { ngtcp2_log_info( - &conn->log, NGTCP2_LOG_EVENT_RCV, + &conn->log, NGTCP2_LOG_EVENT_LDC, "loss detection timer canceled due to amplification limit"); cstat->loss_detection_timer = UINT64_MAX; } @@ -11693,40 +11940,16 @@ ngtcp2_ssize ngtcp2_conn_write_vmsg(ngtcp2_conn *conn, ngtcp2_path *path, destlen = (size_t)ngtcp2_min((uint64_t)destlen, server_tx_left); } - if (vmsg) { - switch (vmsg->type) { - case NGTCP2_VMSG_TYPE_STREAM: - datalen = ngtcp2_vec_len(vmsg->stream.data, vmsg->stream.datacnt); - if (datalen == 0 || (datalen > 0 && - (vmsg->stream.strm->tx.max_offset - - vmsg->stream.strm->tx.offset) && - (conn->tx.max_offset - conn->tx.offset))) { - write_datalen = - conn_enforce_flow_control(conn, vmsg->stream.strm, datalen); - write_datalen = - ngtcp2_min(write_datalen, NGTCP2_MIN_COALESCED_PAYLOADLEN); - write_datalen += NGTCP2_STREAM_OVERHEAD; - } - break; - case NGTCP2_VMSG_TYPE_DATAGRAM: - write_datalen = - ngtcp2_vec_len(vmsg->datagram.data, vmsg->datagram.datacnt) + - NGTCP2_DATAGRAM_OVERHEAD; - break; - default: - assert(0); - } - - if (conn->in_pktns && write_datalen > 0) { - it = ngtcp2_rtb_head(&conn->in_pktns->rtb); - if (!ngtcp2_ksl_it_end(&it)) { - rtbent = ngtcp2_ksl_it_get(&it); - prev_in_pkt_num = rtbent->hd.pkt_num; - } + if (conn->in_pktns) { + it = ngtcp2_rtb_head(&conn->in_pktns->rtb); + if (!ngtcp2_ksl_it_end(&it)) { + rtbent = ngtcp2_ksl_it_get(&it); + prev_in_pkt_num = rtbent->hd.pkt_num; } } - nwrite = conn_write_handshake(conn, pi, dest, destlen, write_datalen, ts); + nwrite = conn_write_handshake(conn, pi, dest, destlen, + /* write_datalen = */ 0, ts); if (nwrite < 0) { return nwrite; } @@ -11735,7 +11958,7 @@ ngtcp2_ssize ngtcp2_conn_write_vmsg(ngtcp2_conn *conn, ngtcp2_path *path, dest += nwrite; destlen -= (size_t)nwrite; - if (conn->in_pktns && write_datalen > 0) { + if (conn->in_pktns && nwrite > 0) { it = ngtcp2_rtb_head(&conn->in_pktns->rtb); if (!ngtcp2_ksl_it_end(&it)) { rtbent = ngtcp2_ksl_it_get(&it); @@ -11748,12 +11971,27 @@ ngtcp2_ssize ngtcp2_conn_write_vmsg(ngtcp2_conn *conn, ngtcp2_path *path, } } } - if (conn->state != NGTCP2_CS_POST_HANDSHAKE && - conn->pktns.crypto.tx.ckm == NULL) { + if (conn->pktns.crypto.tx.ckm == NULL) { return res; } break; case NGTCP2_CS_POST_HANDSHAKE: + if (!conn_pacing_pkt_tx_allowed(conn, ts)) { + assert(!ppe_pending); + + if (conn->server && + !(conn->dcid.current.flags & NGTCP2_DCID_FLAG_PATH_VALIDATED)) { + server_tx_left = conn_server_tx_left(conn, &conn->dcid.current); + if (server_tx_left == 0) { + return 0; + } + + origlen = (size_t)ngtcp2_min((uint64_t)origlen, server_tx_left); + } + + return conn_write_ack_pkt(conn, pi, dest, origlen, NGTCP2_PKT_1RTT, ts); + } + break; case NGTCP2_CS_CLOSING: return NGTCP2_ERR_CLOSING; @@ -11788,7 +12026,6 @@ ngtcp2_ssize ngtcp2_conn_write_vmsg(ngtcp2_conn *conn, ngtcp2_path *path, if (ppe_pending) { res = conn->pkt.hs_spktlen; - conn->pkt.hs_spktlen = 0; if (conn->pkt.require_padding) { wflags |= NGTCP2_WRITE_PKT_FLAG_REQUIRE_PADDING; } @@ -11827,35 +12064,44 @@ ngtcp2_ssize ngtcp2_conn_write_vmsg(ngtcp2_conn *conn, ngtcp2_path *path, } if (conn->pmtud && + (conn->dcid.current.flags & NGTCP2_DCID_FLAG_PATH_VALIDATED) && (!conn->hs_pktns || - ngtcp2_ksl_len(&conn->hs_pktns->crypto.tx.frq) == 0)) { + ngtcp2_strm_streamfrq_empty(&conn->hs_pktns->crypto.strm))) { nwrite = conn_write_pmtud_probe(conn, pi, dest, origdestlen, ts); if (nwrite) { goto fin; } } } + } - if (conn->server && - !(conn->dcid.current.flags & NGTCP2_DCID_FLAG_PATH_VALIDATED)) { - server_tx_left = conn_server_tx_left(conn, &conn->dcid.current); - origlen = (size_t)ngtcp2_min((uint64_t)origlen, server_tx_left); - destlen = (size_t)ngtcp2_min((uint64_t)destlen, server_tx_left); + if (conn->server && + !(conn->dcid.current.flags & NGTCP2_DCID_FLAG_PATH_VALIDATED)) { + server_tx_left = conn_server_tx_left(conn, &conn->dcid.current); + origlen = (size_t)ngtcp2_min((uint64_t)origlen, server_tx_left); + destlen = (size_t)ngtcp2_min((uint64_t)destlen, server_tx_left); - if (server_tx_left == 0 && - conn->cstat.loss_detection_timer != UINT64_MAX) { - ngtcp2_log_info( - &conn->log, NGTCP2_LOG_EVENT_RCV, - "loss detection timer canceled due to amplification limit"); - conn->cstat.loss_detection_timer = UINT64_MAX; - } + if (server_tx_left == 0 && + conn->cstat.loss_detection_timer != UINT64_MAX) { + ngtcp2_log_info( + &conn->log, NGTCP2_LOG_EVENT_LDC, + "loss detection timer canceled due to amplification limit"); + conn->cstat.loss_detection_timer = UINT64_MAX; } } } if (res == 0) { if (conn_handshake_remnants_left(conn)) { - if (conn_handshake_probe_left(conn)) { + if (conn_handshake_probe_left(conn) || + /* Allow exceeding CWND if an Handshake packet needs to be + sent in order to avoid dead lock. In some situation, + typically for client, 1 RTT packets may occupy in-flight + bytes (e.g., some large requests and PMTUD), and + Handshake packet loss shrinks CWND, and we may get in the + situation that we are unable to send Handshake packet. */ + (conn->hs_pktns->rtb.num_pto_eliciting == 0 && + !ngtcp2_strm_streamfrq_empty(&conn->hs_pktns->crypto.strm))) { destlen = origlen; } nwrite = conn_write_handshake_pkts(conn, pi, dest, destlen, @@ -11867,6 +12113,11 @@ ngtcp2_ssize ngtcp2_conn_write_vmsg(ngtcp2_conn *conn, ngtcp2_path *path, res = nwrite; dest += nwrite; destlen -= (size_t)nwrite; + } else if (destlen == 0) { + res = conn_write_handshake_ack_pkts(conn, pi, dest, origlen, ts); + if (res) { + return res; + } } } } @@ -11894,16 +12145,24 @@ ngtcp2_ssize ngtcp2_conn_write_vmsg(ngtcp2_conn *conn, ngtcp2_path *path, } fin: - conn->pkt.hs_spktlen = 0; - if (nwrite >= 0) { res += nwrite; return res; } - /* NGTCP2_CONN_FLAG_PPE_PENDING is set in conn_write_pkt above. - ppe_pending cannot be used here. */ - if (conn->flags & NGTCP2_CONN_FLAG_PPE_PENDING) { + + switch (nwrite) { + case NGTCP2_ERR_STREAM_DATA_BLOCKED: + if (!(wflags & NGTCP2_WRITE_PKT_FLAG_MORE)) { + if (res) { + return res; + } + + break; + } + /* fall through */ + case NGTCP2_ERR_WRITE_MORE: conn->pkt.hs_spktlen = res; + break; } return nwrite; @@ -11989,9 +12248,6 @@ ngtcp2_ssize ngtcp2_conn_write_connection_close_pkt( ngtcp2_ssize nwrite; uint64_t server_tx_left; - conn->log.last_ts = ts; - conn->qlog.last_ts = ts; - if (conn_check_pkt_num_exhausted(conn)) { return NGTCP2_ERR_PKT_NUM_EXHAUSTED; } @@ -12054,9 +12310,6 @@ ngtcp2_ssize ngtcp2_conn_write_application_close_pkt( ngtcp2_frame fr; uint64_t server_tx_left; - conn->log.last_ts = ts; - conn->qlog.last_ts = ts; - if (conn_check_pkt_num_exhausted(conn)) { return NGTCP2_ERR_PKT_NUM_EXHAUSTED; } @@ -12135,11 +12388,9 @@ ngtcp2_ssize ngtcp2_conn_write_application_close_pkt( return res; } -static void -connection_close_error_init(ngtcp2_connection_close_error *ccerr, - ngtcp2_connection_close_error_code_type type, - uint64_t error_code, const uint8_t *reason, - size_t reasonlen) { +static void ccerr_init(ngtcp2_ccerr *ccerr, ngtcp2_ccerr_type type, + uint64_t error_code, const uint8_t *reason, + size_t reasonlen) { ccerr->type = type; ccerr->error_code = error_code; ccerr->frame_type = 0; @@ -12147,72 +12398,63 @@ connection_close_error_init(ngtcp2_connection_close_error *ccerr, ccerr->reasonlen = reasonlen; } -void ngtcp2_connection_close_error_default( - ngtcp2_connection_close_error *ccerr) { - connection_close_error_init(ccerr, - NGTCP2_CONNECTION_CLOSE_ERROR_CODE_TYPE_TRANSPORT, - NGTCP2_NO_ERROR, NULL, 0); +void ngtcp2_ccerr_default(ngtcp2_ccerr *ccerr) { + ccerr_init(ccerr, NGTCP2_CCERR_TYPE_TRANSPORT, NGTCP2_NO_ERROR, NULL, 0); } -void ngtcp2_connection_close_error_set_transport_error( - ngtcp2_connection_close_error *ccerr, uint64_t error_code, - const uint8_t *reason, size_t reasonlen) { - connection_close_error_init(ccerr, - NGTCP2_CONNECTION_CLOSE_ERROR_CODE_TYPE_TRANSPORT, - error_code, reason, reasonlen); +void ngtcp2_ccerr_set_transport_error(ngtcp2_ccerr *ccerr, uint64_t error_code, + const uint8_t *reason, size_t reasonlen) { + ccerr_init(ccerr, NGTCP2_CCERR_TYPE_TRANSPORT, error_code, reason, reasonlen); } -void ngtcp2_connection_close_error_set_transport_error_liberr( - ngtcp2_connection_close_error *ccerr, int liberr, const uint8_t *reason, - size_t reasonlen) { +void ngtcp2_ccerr_set_liberr(ngtcp2_ccerr *ccerr, int liberr, + const uint8_t *reason, size_t reasonlen) { switch (liberr) { case NGTCP2_ERR_RECV_VERSION_NEGOTIATION: - connection_close_error_init( - ccerr, - NGTCP2_CONNECTION_CLOSE_ERROR_CODE_TYPE_TRANSPORT_VERSION_NEGOTIATION, - NGTCP2_NO_ERROR, reason, reasonlen); + ccerr_init(ccerr, NGTCP2_CCERR_TYPE_VERSION_NEGOTIATION, NGTCP2_NO_ERROR, + reason, reasonlen); return; case NGTCP2_ERR_IDLE_CLOSE: - connection_close_error_init( - ccerr, NGTCP2_CONNECTION_CLOSE_ERROR_CODE_TYPE_TRANSPORT_IDLE_CLOSE, - NGTCP2_NO_ERROR, reason, reasonlen); + ccerr_init(ccerr, NGTCP2_CCERR_TYPE_IDLE_CLOSE, NGTCP2_NO_ERROR, reason, + reasonlen); return; }; - ngtcp2_connection_close_error_set_transport_error( + ngtcp2_ccerr_set_transport_error( ccerr, ngtcp2_err_infer_quic_transport_error_code(liberr), reason, reasonlen); } -void ngtcp2_connection_close_error_set_transport_error_tls_alert( - ngtcp2_connection_close_error *ccerr, uint8_t tls_alert, - const uint8_t *reason, size_t reasonlen) { - ngtcp2_connection_close_error_set_transport_error( - ccerr, NGTCP2_CRYPTO_ERROR | tls_alert, reason, reasonlen); +void ngtcp2_ccerr_set_tls_alert(ngtcp2_ccerr *ccerr, uint8_t tls_alert, + const uint8_t *reason, size_t reasonlen) { + ngtcp2_ccerr_set_transport_error(ccerr, NGTCP2_CRYPTO_ERROR | tls_alert, + reason, reasonlen); } -void ngtcp2_connection_close_error_set_application_error( - ngtcp2_connection_close_error *ccerr, uint64_t error_code, - const uint8_t *reason, size_t reasonlen) { - connection_close_error_init( - ccerr, NGTCP2_CONNECTION_CLOSE_ERROR_CODE_TYPE_APPLICATION, error_code, - reason, reasonlen); +void ngtcp2_ccerr_set_application_error(ngtcp2_ccerr *ccerr, + uint64_t error_code, + const uint8_t *reason, + size_t reasonlen) { + ccerr_init(ccerr, NGTCP2_CCERR_TYPE_APPLICATION, error_code, reason, + reasonlen); } ngtcp2_ssize ngtcp2_conn_write_connection_close_versioned( ngtcp2_conn *conn, ngtcp2_path *path, int pkt_info_version, ngtcp2_pkt_info *pi, uint8_t *dest, size_t destlen, - const ngtcp2_connection_close_error *ccerr, ngtcp2_tstamp ts) { + const ngtcp2_ccerr *ccerr, ngtcp2_tstamp ts) { (void)pkt_info_version; + conn_update_timestamp(conn, ts); + switch (ccerr->type) { - case NGTCP2_CONNECTION_CLOSE_ERROR_CODE_TYPE_TRANSPORT: + case NGTCP2_CCERR_TYPE_TRANSPORT: return ngtcp2_conn_write_connection_close_pkt( conn, path, pi, dest, destlen, ccerr->error_code, ccerr->reason, ccerr->reasonlen, ts); - case NGTCP2_CONNECTION_CLOSE_ERROR_CODE_TYPE_APPLICATION: + case NGTCP2_CCERR_TYPE_APPLICATION: return ngtcp2_conn_write_application_close_pkt( conn, path, pi, dest, destlen, ccerr->error_code, ccerr->reason, ccerr->reasonlen, ts); @@ -12221,51 +12463,46 @@ ngtcp2_ssize ngtcp2_conn_write_connection_close_versioned( } } -int ngtcp2_conn_is_in_closing_period(ngtcp2_conn *conn) { +int ngtcp2_conn_in_closing_period(ngtcp2_conn *conn) { return conn->state == NGTCP2_CS_CLOSING; } -int ngtcp2_conn_is_in_draining_period(ngtcp2_conn *conn) { +int ngtcp2_conn_in_draining_period(ngtcp2_conn *conn) { return conn->state == NGTCP2_CS_DRAINING; } int ngtcp2_conn_close_stream(ngtcp2_conn *conn, ngtcp2_strm *strm) { int rv; - rv = ngtcp2_map_remove(&conn->strms, (ngtcp2_map_key_type)strm->stream_id); + rv = conn_call_stream_close(conn, strm); if (rv != 0) { - assert(rv != NGTCP2_ERR_INVALID_ARGUMENT); return rv; } - rv = conn_call_stream_close(conn, strm); + rv = ngtcp2_map_remove(&conn->strms, (ngtcp2_map_key_type)strm->stream_id); if (rv != 0) { - goto fin; + assert(rv != NGTCP2_ERR_INVALID_ARGUMENT); + return rv; } if (ngtcp2_strm_is_tx_queued(strm)) { ngtcp2_pq_remove(&conn->tx.strmq, &strm->pe); - if (!ngtcp2_strm_streamfrq_empty(strm)) { - assert(conn->tx.strmq_nretrans); - --conn->tx.strmq_nretrans; - } } -fin: ngtcp2_strm_free(strm); ngtcp2_objalloc_strm_release(&conn->strm_objalloc, strm); - return rv; + return 0; } int ngtcp2_conn_close_stream_if_shut_rdwr(ngtcp2_conn *conn, ngtcp2_strm *strm) { if ((strm->flags & NGTCP2_STRM_FLAG_SHUT_RDWR) == NGTCP2_STRM_FLAG_SHUT_RDWR && - ((strm->flags & NGTCP2_STRM_FLAG_RECV_RST) || + ((strm->flags & NGTCP2_STRM_FLAG_RESET_STREAM_RECVED) || ngtcp2_strm_rx_offset(strm) == strm->rx.last_offset) && - (((strm->flags & NGTCP2_STRM_FLAG_SENT_RST) && - (strm->flags & NGTCP2_STRM_FLAG_RST_ACKED)) || + (((strm->flags & NGTCP2_STRM_FLAG_RESET_STREAM) && + (strm->flags & NGTCP2_STRM_FLAG_RESET_STREAM_ACKED)) || ngtcp2_strm_is_all_tx_data_fin_acked(strm))) { return ngtcp2_conn_close_stream(conn, strm); } @@ -12286,14 +12523,14 @@ static int conn_shutdown_stream_write(ngtcp2_conn *conn, ngtcp2_strm *strm, uint64_t app_error_code) { ngtcp2_strm_set_app_error_code(strm, app_error_code); - if ((strm->flags & NGTCP2_STRM_FLAG_SENT_RST) || + if ((strm->flags & NGTCP2_STRM_FLAG_RESET_STREAM) || ngtcp2_strm_is_all_tx_data_fin_acked(strm)) { return 0; } /* Set this flag so that we don't accidentally send DATA to this stream. */ - strm->flags |= NGTCP2_STRM_FLAG_SHUT_WR | NGTCP2_STRM_FLAG_SENT_RST; + strm->flags |= NGTCP2_STRM_FLAG_SHUT_WR | NGTCP2_STRM_FLAG_RESET_STREAM; ngtcp2_strm_streamfrq_clear(strm); @@ -12312,6 +12549,8 @@ static int conn_shutdown_stream_write(ngtcp2_conn *conn, ngtcp2_strm *strm, */ static int conn_shutdown_stream_read(ngtcp2_conn *conn, ngtcp2_strm *strm, uint64_t app_error_code) { + ngtcp2_strm_set_app_error_code(strm, app_error_code); + if (strm->flags & NGTCP2_STRM_FLAG_STOP_SENDING) { return 0; } @@ -12322,44 +12561,54 @@ static int conn_shutdown_stream_read(ngtcp2_conn *conn, ngtcp2_strm *strm, /* Extend connection flow control window for the amount of data which are not passed to application. */ - if (!(strm->flags & - (NGTCP2_STRM_FLAG_STOP_SENDING | NGTCP2_STRM_FLAG_RECV_RST))) { + if (!(strm->flags & (NGTCP2_STRM_FLAG_STOP_SENDING | + NGTCP2_STRM_FLAG_RESET_STREAM_RECVED))) { ngtcp2_conn_extend_max_offset(conn, strm->rx.last_offset - ngtcp2_strm_rx_offset(strm)); } strm->flags |= NGTCP2_STRM_FLAG_STOP_SENDING; - ngtcp2_strm_set_app_error_code(strm, app_error_code); return conn_stop_sending(conn, strm, app_error_code); } -int ngtcp2_conn_shutdown_stream(ngtcp2_conn *conn, int64_t stream_id, - uint64_t app_error_code) { +int ngtcp2_conn_shutdown_stream(ngtcp2_conn *conn, uint32_t flags, + int64_t stream_id, uint64_t app_error_code) { int rv; ngtcp2_strm *strm; + (void)flags; strm = ngtcp2_conn_find_stream(conn, stream_id); if (strm == NULL) { return 0; } - rv = conn_shutdown_stream_read(conn, strm, app_error_code); - if (rv != 0) { - return rv; + if (bidi_stream(stream_id) || !conn_local_stream(conn, stream_id)) { + rv = conn_shutdown_stream_read(conn, strm, app_error_code); + if (rv != 0) { + return rv; + } } - rv = conn_shutdown_stream_write(conn, strm, app_error_code); - if (rv != 0) { - return rv; + if (bidi_stream(stream_id) || conn_local_stream(conn, stream_id)) { + rv = conn_shutdown_stream_write(conn, strm, app_error_code); + if (rv != 0) { + return rv; + } } return 0; } -int ngtcp2_conn_shutdown_stream_write(ngtcp2_conn *conn, int64_t stream_id, +int ngtcp2_conn_shutdown_stream_write(ngtcp2_conn *conn, uint32_t flags, + int64_t stream_id, uint64_t app_error_code) { ngtcp2_strm *strm; + (void)flags; + + if (!bidi_stream(stream_id) && !conn_local_stream(conn, stream_id)) { + return NGTCP2_ERR_INVALID_ARGUMENT; + } strm = ngtcp2_conn_find_stream(conn, stream_id); if (strm == NULL) { @@ -12369,9 +12618,15 @@ int ngtcp2_conn_shutdown_stream_write(ngtcp2_conn *conn, int64_t stream_id, return conn_shutdown_stream_write(conn, strm, app_error_code); } -int ngtcp2_conn_shutdown_stream_read(ngtcp2_conn *conn, int64_t stream_id, +int ngtcp2_conn_shutdown_stream_read(ngtcp2_conn *conn, uint32_t flags, + int64_t stream_id, uint64_t app_error_code) { ngtcp2_strm *strm; + (void)flags; + + if (!bidi_stream(stream_id) && conn_local_stream(conn, stream_id)) { + return NGTCP2_ERR_INVALID_ARGUMENT; + } strm = ngtcp2_conn_find_stream(conn, stream_id); if (strm == NULL) { @@ -12421,6 +12676,10 @@ int ngtcp2_conn_extend_max_stream_offset(ngtcp2_conn *conn, int64_t stream_id, uint64_t datalen) { ngtcp2_strm *strm; + if (!bidi_stream(stream_id) && conn_local_stream(conn, stream_id)) { + return NGTCP2_ERR_INVALID_ARGUMENT; + } + strm = ngtcp2_conn_find_stream(conn, stream_id); if (strm == NULL) { return 0; @@ -12469,10 +12728,6 @@ static int delete_strms_pq_each(void *data, void *ptr) { if (ngtcp2_strm_is_tx_queued(s)) { ngtcp2_pq_remove(&conn->tx.strmq, &s->pe); - if (!ngtcp2_strm_streamfrq_empty(s)) { - assert(conn->tx.strmq_nretrans); - --conn->tx.strmq_nretrans; - } } ngtcp2_strm_free(s); @@ -12494,6 +12749,7 @@ static void conn_discard_early_data_state(ngtcp2_conn *conn) { ngtcp2_map_clear(&conn->strms); conn->tx.offset = 0; + conn->tx.last_blocked_offset = UINT64_MAX; conn->rx.unsent_max_offset = conn->rx.max_offset = conn->local.transport_params.initial_max_data; @@ -12519,14 +12775,28 @@ static void conn_discard_early_data_state(ngtcp2_conn *conn) { } } -void ngtcp2_conn_early_data_rejected(ngtcp2_conn *conn) { +int ngtcp2_conn_tls_early_data_rejected(ngtcp2_conn *conn) { if (conn->flags & NGTCP2_CONN_FLAG_EARLY_DATA_REJECTED) { - return; + return 0; } conn->flags |= NGTCP2_CONN_FLAG_EARLY_DATA_REJECTED; conn_discard_early_data_state(conn); + + if (conn->callbacks.tls_early_data_rejected) { + return conn->callbacks.tls_early_data_rejected(conn, conn->user_data); + } + + if (conn->early.ckm) { + conn_discard_early_key(conn); + } + + return 0; +} + +int ngtcp2_conn_get_tls_early_data_rejected(ngtcp2_conn *conn) { + return (conn->flags & NGTCP2_CONN_FLAG_EARLY_DATA_REJECTED) != 0; } int ngtcp2_conn_update_rtt(ngtcp2_conn *conn, ngtcp2_duration rtt, @@ -12550,7 +12820,7 @@ int ngtcp2_conn_update_rtt(ngtcp2_conn *conn, ngtcp2_duration rtt, /* Ignore RTT sample if adjusting ack_delay causes the sample less than min_rtt before handshake confirmation. */ ngtcp2_log_info( - &conn->log, NGTCP2_LOG_EVENT_RCV, + &conn->log, NGTCP2_LOG_EVENT_LDC, "ignore rtt sample because ack_delay is too large latest_rtt=%" PRIu64 " min_rtt=%" PRIu64 " ack_delay=%" PRIu64, rtt / NGTCP2_MILLISECONDS, cstat->min_rtt / NGTCP2_MILLISECONDS, @@ -12573,7 +12843,7 @@ int ngtcp2_conn_update_rtt(ngtcp2_conn *conn, ngtcp2_duration rtt, } ngtcp2_log_info( - &conn->log, NGTCP2_LOG_EVENT_RCV, + &conn->log, NGTCP2_LOG_EVENT_LDC, "latest_rtt=%" PRIu64 " min_rtt=%" PRIu64 " smoothed_rtt=%" PRIu64 " rttvar=%" PRIu64 " ack_delay=%" PRIu64, cstat->latest_rtt / NGTCP2_MILLISECONDS, @@ -12584,12 +12854,19 @@ int ngtcp2_conn_update_rtt(ngtcp2_conn *conn, ngtcp2_duration rtt, return 0; } -void ngtcp2_conn_get_conn_stat_versioned(ngtcp2_conn *conn, - int conn_stat_version, - ngtcp2_conn_stat *cstat) { - (void)conn_stat_version; +void ngtcp2_conn_get_conn_info_versioned(ngtcp2_conn *conn, + int conn_info_version, + ngtcp2_conn_info *cinfo) { + const ngtcp2_conn_stat *cstat = &conn->cstat; + (void)conn_info_version; - *cstat = conn->cstat; + cinfo->latest_rtt = cstat->latest_rtt; + cinfo->min_rtt = cstat->min_rtt; + cinfo->smoothed_rtt = cstat->smoothed_rtt; + cinfo->rttvar = cstat->rttvar; + cinfo->cwnd = cstat->cwnd; + cinfo->ssthresh = cstat->ssthresh; + cinfo->bytes_in_flight = cstat->bytes_in_flight; } static void conn_get_loss_time_and_pktns(ngtcp2_conn *conn, @@ -12597,14 +12874,13 @@ static void conn_get_loss_time_and_pktns(ngtcp2_conn *conn, ngtcp2_pktns **ppktns) { ngtcp2_pktns *const ns[] = {conn->hs_pktns, &conn->pktns}; ngtcp2_conn_stat *cstat = &conn->cstat; - ngtcp2_duration *loss_time = cstat->loss_time; - ngtcp2_tstamp earliest_loss_time = loss_time[NGTCP2_PKTNS_ID_INITIAL]; + ngtcp2_duration *loss_time = cstat->loss_time + 1; + ngtcp2_tstamp earliest_loss_time = cstat->loss_time[NGTCP2_PKTNS_ID_INITIAL]; ngtcp2_pktns *pktns = conn->in_pktns; size_t i; - for (i = 0; i < sizeof(ns) / sizeof(ns[0]); ++i) { - if (ns[i] == NULL || ns[i]->rtb.num_pto_eliciting == 0 || - loss_time[i] >= earliest_loss_time) { + for (i = 0; i < ngtcp2_arraylen(ns); ++i) { + if (ns[i] == NULL || loss_time[i] >= earliest_loss_time) { continue; } @@ -12672,7 +12948,7 @@ void ngtcp2_conn_set_loss_detection_timer(ngtcp2_conn *conn, ngtcp2_tstamp ts) { if (earliest_loss_time != UINT64_MAX) { cstat->loss_detection_timer = earliest_loss_time; - ngtcp2_log_info(&conn->log, NGTCP2_LOG_EVENT_RCV, + ngtcp2_log_info(&conn->log, NGTCP2_LOG_EVENT_LDC, "loss_detection_timer=%" PRIu64 " nonzero crypto loss time", cstat->loss_detection_timer); return; @@ -12686,7 +12962,7 @@ void ngtcp2_conn_set_loss_detection_timer(ngtcp2_conn *conn, ngtcp2_tstamp ts) { (conn->flags & (NGTCP2_CONN_FLAG_SERVER_ADDR_VERIFIED | NGTCP2_CONN_FLAG_HANDSHAKE_CONFIRMED)))) { if (cstat->loss_detection_timer != UINT64_MAX) { - ngtcp2_log_info(&conn->log, NGTCP2_LOG_EVENT_RCV, + ngtcp2_log_info(&conn->log, NGTCP2_LOG_EVENT_LDC, "loss detection timer canceled"); cstat->loss_detection_timer = UINT64_MAX; cstat->pto_count = 0; @@ -12699,7 +12975,7 @@ void ngtcp2_conn_set_loss_detection_timer(ngtcp2_conn *conn, ngtcp2_tstamp ts) { timeout = cstat->loss_detection_timer > ts ? cstat->loss_detection_timer - ts : 0; - ngtcp2_log_info(&conn->log, NGTCP2_LOG_EVENT_RCV, + ngtcp2_log_info(&conn->log, NGTCP2_LOG_EVENT_LDC, "loss_detection_timer=%" PRIu64 " timeout=%" PRIu64, cstat->loss_detection_timer, timeout / NGTCP2_MILLISECONDS); } @@ -12712,9 +12988,6 @@ int ngtcp2_conn_on_loss_detection_timer(ngtcp2_conn *conn, ngtcp2_tstamp ts) { ngtcp2_tstamp earliest_loss_time; ngtcp2_pktns *loss_pktns = NULL; - conn->log.last_ts = ts; - conn->qlog.last_ts = ts; - switch (conn->state) { case NGTCP2_CS_CLOSING: case NGTCP2_CS_DRAINING: @@ -12731,7 +13004,7 @@ int ngtcp2_conn_on_loss_detection_timer(ngtcp2_conn *conn, ngtcp2_tstamp ts) { conn_get_loss_time_and_pktns(conn, &earliest_loss_time, &loss_pktns); - ngtcp2_log_info(&conn->log, NGTCP2_LOG_EVENT_RCV, + ngtcp2_log_info(&conn->log, NGTCP2_LOG_EVENT_LDC, "loss detection timer fired"); if (earliest_loss_time != UINT64_MAX) { @@ -12745,7 +13018,7 @@ int ngtcp2_conn_on_loss_detection_timer(ngtcp2_conn *conn, ngtcp2_tstamp ts) { return 0; } - if (!conn->server && !conn_is_handshake_completed(conn)) { + if (!conn->server && !conn_is_tls_handshake_completed(conn)) { if (hs_pktns->crypto.tx.ckm) { hs_pktns->rtb.probe_pkt_left = 1; } else { @@ -12762,7 +13035,7 @@ int ngtcp2_conn_on_loss_detection_timer(ngtcp2_conn *conn, ngtcp2_tstamp ts) { hs_pktns->rtb.probe_pkt_left = 1; } } else if (hs_pktns && hs_pktns->rtb.num_pto_eliciting) { - hs_pktns->rtb.probe_pkt_left = 1; + hs_pktns->rtb.probe_pkt_left = 2; } else { conn->pktns.rtb.probe_pkt_left = 2; } @@ -12770,7 +13043,7 @@ int ngtcp2_conn_on_loss_detection_timer(ngtcp2_conn *conn, ngtcp2_tstamp ts) { ++cstat->pto_count; - ngtcp2_log_info(&conn->log, NGTCP2_LOG_EVENT_RCV, "pto_count=%zu", + ngtcp2_log_info(&conn->log, NGTCP2_LOG_EVENT_LDC, "pto_count=%zu", cstat->pto_count); ngtcp2_conn_set_loss_detection_timer(conn, ts); @@ -12807,27 +13080,27 @@ static int conn_buffer_crypto_data(ngtcp2_conn *conn, const uint8_t **pdata, } int ngtcp2_conn_submit_crypto_data(ngtcp2_conn *conn, - ngtcp2_crypto_level crypto_level, + ngtcp2_encryption_level encryption_level, const uint8_t *data, const size_t datalen) { ngtcp2_pktns *pktns; ngtcp2_frame_chain *frc; - ngtcp2_crypto *fr; + ngtcp2_stream *fr; int rv; if (datalen == 0) { return 0; } - switch (crypto_level) { - case NGTCP2_CRYPTO_LEVEL_INITIAL: + switch (encryption_level) { + case NGTCP2_ENCRYPTION_LEVEL_INITIAL: assert(conn->in_pktns); pktns = conn->in_pktns; break; - case NGTCP2_CRYPTO_LEVEL_HANDSHAKE: + case NGTCP2_ENCRYPTION_LEVEL_HANDSHAKE: assert(conn->hs_pktns); pktns = conn->hs_pktns; break; - case NGTCP2_CRYPTO_LEVEL_APPLICATION: + case NGTCP2_ENCRYPTION_LEVEL_1RTT: pktns = &conn->pktns; break; default: @@ -12844,15 +13117,18 @@ int ngtcp2_conn_submit_crypto_data(ngtcp2_conn *conn, return rv; } - fr = &frc->fr.crypto; + fr = &frc->fr.stream; fr->type = NGTCP2_FRAME_CRYPTO; + fr->flags = 0; + fr->fin = 0; + fr->stream_id = 0; fr->offset = pktns->crypto.tx.offset; fr->datacnt = 1; fr->data[0].len = datalen; fr->data[0].base = (uint8_t *)data; - rv = ngtcp2_ksl_insert(&pktns->crypto.tx.frq, NULL, &fr->offset, frc); + rv = ngtcp2_strm_streamfrq_push(&pktns->crypto.strm, frc); if (rv != 0) { ngtcp2_frame_chain_objalloc_del(frc, &conn->frc_objalloc, conn->mem); return rv; @@ -12868,14 +13144,13 @@ int ngtcp2_conn_submit_new_token(ngtcp2_conn *conn, const uint8_t *token, size_t tokenlen) { int rv; ngtcp2_frame_chain *nfrc; - ngtcp2_vec tokenv = {(uint8_t *)token, tokenlen}; assert(conn->server); assert(token); assert(tokenlen); rv = ngtcp2_frame_chain_new_token_objalloc_new( - &nfrc, &tokenv, &conn->frc_objalloc, conn->mem); + &nfrc, token, tokenlen, &conn->frc_objalloc, conn->mem); if (rv != 0) { return rv; } @@ -12902,16 +13177,11 @@ int ngtcp2_conn_tx_strmq_push(ngtcp2_conn *conn, ngtcp2_strm *strm) { return ngtcp2_pq_push(&conn->tx.strmq, &strm->pe); } -static int conn_has_uncommited_preferred_address_cid(ngtcp2_conn *conn) { +static int conn_has_uncommitted_preferred_addr_cid(ngtcp2_conn *conn) { return conn->server && !(conn->flags & NGTCP2_CONN_FLAG_LOCAL_TRANSPORT_PARAMS_COMMITTED) && conn->oscid.datalen && - conn->local.transport_params.preferred_address_present; -} - -size_t ngtcp2_conn_get_num_scid(ngtcp2_conn *conn) { - return ngtcp2_ksl_len(&conn->scid.set) + - (size_t)conn_has_uncommited_preferred_address_cid(conn); + conn->local.transport_params.preferred_addr_present; } size_t ngtcp2_conn_get_scid(ngtcp2_conn *conn, ngtcp2_cid *dest) { @@ -12919,27 +13189,28 @@ size_t ngtcp2_conn_get_scid(ngtcp2_conn *conn, ngtcp2_cid *dest) { ngtcp2_ksl_it it; ngtcp2_scid *scid; + if (dest == NULL) { + return ngtcp2_ksl_len(&conn->scid.set) + + (size_t)conn_has_uncommitted_preferred_addr_cid(conn); + } + for (it = ngtcp2_ksl_begin(&conn->scid.set); !ngtcp2_ksl_it_end(&it); ngtcp2_ksl_it_next(&it)) { scid = ngtcp2_ksl_it_get(&it); *dest++ = scid->cid; } - if (conn_has_uncommited_preferred_address_cid(conn)) { - *dest++ = conn->local.transport_params.preferred_address.cid; + if (conn_has_uncommitted_preferred_addr_cid(conn)) { + *dest++ = conn->local.transport_params.preferred_addr.cid; } return (size_t)(dest - origdest); } -size_t ngtcp2_conn_get_num_active_dcid(ngtcp2_conn *conn) { +static size_t conn_get_num_active_dcid(ngtcp2_conn *conn) { size_t n = 1; /* for conn->dcid.current */ ngtcp2_pv *pv = conn->pv; - if (!(conn->flags & NGTCP2_CONN_FLAG_HANDSHAKE_COMPLETED_HANDLED)) { - return 0; - } - if (pv) { if (pv->dcid.seq != conn->dcid.current.seq) { ++n; @@ -12973,10 +13244,14 @@ size_t ngtcp2_conn_get_active_dcid(ngtcp2_conn *conn, ngtcp2_cid_token *dest) { ngtcp2_dcid *dcid; size_t len, i; - if (!(conn->flags & NGTCP2_CONN_FLAG_HANDSHAKE_COMPLETED_HANDLED)) { + if (!(conn->flags & NGTCP2_CONN_FLAG_HANDSHAKE_COMPLETED)) { return 0; } + if (dest == NULL) { + return conn_get_num_active_dcid(conn); + } + copy_dcid_to_cid_token(dest, &conn->dcid.current); ++dest; @@ -13019,13 +13294,13 @@ const ngtcp2_path *ngtcp2_conn_get_path(ngtcp2_conn *conn) { return &conn->dcid.current.ps.path; } -size_t ngtcp2_conn_get_max_udp_payload_size(ngtcp2_conn *conn) { - return conn->local.settings.max_udp_payload_size; +size_t ngtcp2_conn_get_max_tx_udp_payload_size(ngtcp2_conn *conn) { + return conn->local.settings.max_tx_udp_payload_size; } -size_t ngtcp2_conn_get_path_max_udp_payload_size(ngtcp2_conn *conn) { - if (conn->local.settings.no_udp_payload_size_shaping) { - return ngtcp2_conn_get_max_udp_payload_size(conn); +size_t ngtcp2_conn_get_path_max_tx_udp_payload_size(ngtcp2_conn *conn) { + if (conn->local.settings.no_tx_udp_payload_size_shaping) { + return ngtcp2_conn_get_max_tx_udp_payload_size(conn); } return conn->dcid.current.max_udp_payload_size; @@ -13056,11 +13331,11 @@ int ngtcp2_conn_initiate_immediate_migration(ngtcp2_conn *conn, ngtcp2_tstamp ts) { int rv; ngtcp2_dcid *dcid; + ngtcp2_pv *pv; assert(!conn->server); - conn->log.last_ts = ts; - conn->qlog.last_ts = ts; + conn_update_timestamp(conn, ts); rv = conn_initiate_migration_precheck(conn, &path->local); if (rv != 0) { @@ -13087,35 +13362,33 @@ int ngtcp2_conn_initiate_immediate_migration(ngtcp2_conn *conn, ngtcp2_dcid_copy(&conn->dcid.current, dcid); ngtcp2_ringbuf_pop_front(&conn->dcid.unused.rb); - rv = conn_call_activate_dcid(conn, &conn->dcid.current); - if (rv != 0) { - return rv; - } - conn_reset_congestion_state(conn, ts); conn_reset_ecn_validation_state(conn); - if (!conn->local.settings.no_pmtud) { - rv = conn_start_pmtud(conn); - if (rv != 0) { - return rv; - } + /* TODO It might be better to add a new flag which indicates that a + connection should be closed if this path validation failed. The + current design allows an application to continue, by migrating + into yet another path. */ + rv = ngtcp2_pv_new(&pv, dcid, conn_compute_pv_timeout(conn), + NGTCP2_PV_FLAG_NONE, &conn->log, conn->mem); + if (rv != 0) { + return rv; } - return 0; + conn->pv = pv; + + return conn_call_activate_dcid(conn, &conn->dcid.current); } int ngtcp2_conn_initiate_migration(ngtcp2_conn *conn, const ngtcp2_path *path, ngtcp2_tstamp ts) { int rv; ngtcp2_dcid *dcid; - ngtcp2_duration pto, initial_pto, timeout; ngtcp2_pv *pv; assert(!conn->server); - conn->log.last_ts = ts; - conn->qlog.last_ts = ts; + conn_update_timestamp(conn, ts); rv = conn_initiate_migration_precheck(conn, &path->local); if (rv != 0) { @@ -13132,12 +13405,8 @@ int ngtcp2_conn_initiate_migration(ngtcp2_conn *conn, const ngtcp2_path *path, dcid = ngtcp2_ringbuf_get(&conn->dcid.unused.rb, 0); ngtcp2_dcid_set_path(dcid, path); - pto = conn_compute_pto(conn, &conn->pktns); - initial_pto = conn_compute_initial_pto(conn, &conn->pktns); - timeout = 3 * ngtcp2_max(pto, initial_pto); - - rv = ngtcp2_pv_new(&pv, dcid, timeout, NGTCP2_PV_FLAG_NONE, &conn->log, - conn->mem); + rv = ngtcp2_pv_new(&pv, dcid, conn_compute_pv_timeout(conn), + NGTCP2_PV_FLAG_NONE, &conn->log, conn->mem); if (rv != 0) { return rv; } @@ -13148,10 +13417,6 @@ int ngtcp2_conn_initiate_migration(ngtcp2_conn *conn, const ngtcp2_path *path, return conn_call_activate_dcid(conn, &pv->dcid); } -uint64_t ngtcp2_conn_get_max_local_streams_uni(ngtcp2_conn *conn) { - return conn->local.uni.max_streams; -} - uint64_t ngtcp2_conn_get_max_data_left(ngtcp2_conn *conn) { return conn->tx.max_offset - conn->tx.offset; } @@ -13200,7 +13465,7 @@ ngtcp2_tstamp ngtcp2_conn_get_idle_expiry(ngtcp2_conn *conn) { /* TODO Remote max_idle_timeout becomes effective after handshake completion. */ - if (!conn_is_handshake_completed(conn) || + if (!conn_is_tls_handshake_completed(conn) || conn->remote.transport_params->max_idle_timeout == 0 || (conn->local.transport_params.max_idle_timeout && conn->local.transport_params.max_idle_timeout < @@ -13214,16 +13479,23 @@ ngtcp2_tstamp ngtcp2_conn_get_idle_expiry(ngtcp2_conn *conn) { return UINT64_MAX; } - trpto = 3 * conn_compute_pto(conn, conn_is_handshake_completed(conn) + trpto = 3 * conn_compute_pto(conn, conn_is_tls_handshake_completed(conn) ? &conn->pktns : conn->hs_pktns); - return conn->idle_ts + ngtcp2_max(idle_timeout, trpto); + idle_timeout = ngtcp2_max(idle_timeout, trpto); + + if (conn->idle_ts >= UINT64_MAX - idle_timeout) { + return UINT64_MAX; + } + + return conn->idle_ts + idle_timeout; } ngtcp2_duration ngtcp2_conn_get_pto(ngtcp2_conn *conn) { - return conn_compute_pto( - conn, conn_is_handshake_completed(conn) ? &conn->pktns : conn->hs_pktns); + return conn_compute_pto(conn, conn_is_tls_handshake_completed(conn) + ? &conn->pktns + : conn->hs_pktns); } void ngtcp2_conn_set_initial_crypto_ctx(ngtcp2_conn *conn, @@ -13257,12 +13529,12 @@ const ngtcp2_crypto_ctx *ngtcp2_conn_get_crypto_ctx(ngtcp2_conn *conn) { return &conn->pktns.crypto.ctx; } -void ngtcp2_conn_set_early_crypto_ctx(ngtcp2_conn *conn, - const ngtcp2_crypto_ctx *ctx) { +void ngtcp2_conn_set_0rtt_crypto_ctx(ngtcp2_conn *conn, + const ngtcp2_crypto_ctx *ctx) { conn->early.ctx = *ctx; } -const ngtcp2_crypto_ctx *ngtcp2_conn_get_early_crypto_ctx(ngtcp2_conn *conn) { +const ngtcp2_crypto_ctx *ngtcp2_conn_get_0rtt_crypto_ctx(ngtcp2_conn *conn) { return &conn->early.ctx; } @@ -13275,9 +13547,8 @@ void ngtcp2_conn_set_tls_native_handle(ngtcp2_conn *conn, conn->crypto.tls_native_handle = tls_native_handle; } -void ngtcp2_conn_get_connection_close_error( - ngtcp2_conn *conn, ngtcp2_connection_close_error *ccerr) { - *ccerr = conn->rx.ccerr; +const ngtcp2_ccerr *ngtcp2_conn_get_ccerr(ngtcp2_conn *conn) { + return &conn->rx.ccerr; } void ngtcp2_conn_set_tls_error(ngtcp2_conn *conn, int liberr) { @@ -13320,13 +13591,29 @@ int ngtcp2_conn_set_stream_user_data(ngtcp2_conn *conn, int64_t stream_id, } void ngtcp2_conn_update_pkt_tx_time(ngtcp2_conn *conn, ngtcp2_tstamp ts) { - if (!(conn->cstat.pacing_rate > 0) || conn->tx.pacing.pktlen == 0) { + ngtcp2_duration pacing_interval; + ngtcp2_duration wait; + + conn_update_timestamp(conn, ts); + + if (conn->tx.pacing.pktlen == 0) { return; } - conn->tx.pacing.next_ts = - ts + (ngtcp2_duration)((double)conn->tx.pacing.pktlen / - conn->cstat.pacing_rate); + if (conn->cstat.pacing_interval) { + pacing_interval = conn->cstat.pacing_interval; + } else { + /* 1.25 is the under-utilization avoidance factor described in + https://datatracker.ietf.org/doc/html/rfc9002#section-7.7 */ + pacing_interval = (conn->cstat.first_rtt_sample_ts == UINT64_MAX + ? NGTCP2_MILLISECONDS + : conn->cstat.smoothed_rtt) * + 100 / 125 / conn->cstat.cwnd; + } + + wait = (ngtcp2_duration)(conn->tx.pacing.pktlen * pacing_interval); + + conn->tx.pacing.next_ts = ts + wait; conn->tx.pacing.pktlen = 0; } @@ -13338,15 +13625,14 @@ int ngtcp2_conn_track_retired_dcid_seq(ngtcp2_conn *conn, uint64_t seq) { size_t i; if (conn->dcid.retire_unacked.len >= - sizeof(conn->dcid.retire_unacked.seqs) / - sizeof(conn->dcid.retire_unacked.seqs[0])) { + ngtcp2_arraylen(conn->dcid.retire_unacked.seqs)) { return NGTCP2_ERR_CONNECTION_ID_LIMIT; } /* Make sure that we do not have a duplicate */ for (i = 0; i < conn->dcid.retire_unacked.len; ++i) { if (conn->dcid.retire_unacked.seqs[i] == seq) { - assert(0); + ngtcp2_unreachable(); } } @@ -13399,20 +13685,35 @@ void ngtcp2_settings_default_versioned(int settings_version, settings->cc_algo = NGTCP2_CC_ALGO_CUBIC; settings->initial_rtt = NGTCP2_DEFAULT_INITIAL_RTT; settings->ack_thresh = 2; - settings->max_udp_payload_size = 1500 - 48; - settings->handshake_timeout = NGTCP2_DEFAULT_HANDSHAKE_TIMEOUT; + settings->max_tx_udp_payload_size = 1500 - 48; + settings->handshake_timeout = UINT64_MAX; } void ngtcp2_transport_params_default_versioned( int transport_params_version, ngtcp2_transport_params *params) { - (void)transport_params_version; - - memset(params, 0, sizeof(*params)); - params->max_udp_payload_size = NGTCP2_DEFAULT_MAX_RECV_UDP_PAYLOAD_SIZE; - params->ack_delay_exponent = NGTCP2_DEFAULT_ACK_DELAY_EXPONENT; - params->max_ack_delay = NGTCP2_DEFAULT_MAX_ACK_DELAY; - params->active_connection_id_limit = - NGTCP2_DEFAULT_ACTIVE_CONNECTION_ID_LIMIT; + size_t len; + + switch (transport_params_version) { + case NGTCP2_TRANSPORT_PARAMS_VERSION: + len = sizeof(*params); + + break; + default: + ngtcp2_unreachable(); + } + + memset(params, 0, len); + + switch (transport_params_version) { + case NGTCP2_TRANSPORT_PARAMS_VERSION: + params->max_udp_payload_size = NGTCP2_DEFAULT_MAX_RECV_UDP_PAYLOAD_SIZE; + params->active_connection_id_limit = + NGTCP2_DEFAULT_ACTIVE_CONNECTION_ID_LIMIT; + params->ack_delay_exponent = NGTCP2_DEFAULT_ACK_DELAY_EXPONENT; + params->max_ack_delay = NGTCP2_DEFAULT_MAX_ACK_DELAY; + + break; + } } /* The functions prefixed with ngtcp2_pkt_ are usually put inside diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_conn.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_conn.h index b1c6564175d482..4ed67876bc3749 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_conn.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_conn.h @@ -43,7 +43,6 @@ #include "ngtcp2_pq.h" #include "ngtcp2_cc.h" #include "ngtcp2_bbr.h" -#include "ngtcp2_bbr2.h" #include "ngtcp2_pv.h" #include "ngtcp2_pmtud.h" #include "ngtcp2_cid.h" @@ -51,16 +50,15 @@ #include "ngtcp2_ppe.h" #include "ngtcp2_qlog.h" #include "ngtcp2_rst.h" +#include "ngtcp2_conn_stat.h" typedef enum { /* Client specific handshake states */ NGTCP2_CS_CLIENT_INITIAL, NGTCP2_CS_CLIENT_WAIT_HANDSHAKE, - NGTCP2_CS_CLIENT_TLS_HANDSHAKE_FAILED, /* Server specific handshake states */ NGTCP2_CS_SERVER_INITIAL, NGTCP2_CS_SERVER_WAIT_HANDSHAKE, - NGTCP2_CS_SERVER_TLS_HANDSHAKE_FAILED, /* Shared by both client and server */ NGTCP2_CS_POST_HANDSHAKE, NGTCP2_CS_CLOSING, @@ -111,18 +109,14 @@ typedef enum { to put the sane limit.*/ #define NGTCP2_MAX_SCID_POOL_SIZE 8 -/* NGTCP2_MAX_NON_ACK_TX_PKT is the maximum number of continuous non - ACK-eliciting packets. */ -#define NGTCP2_MAX_NON_ACK_TX_PKT 3 - /* NGTCP2_ECN_MAX_NUM_VALIDATION_PKTS is the maximum number of ECN marked packets sent in NGTCP2_ECN_STATE_TESTING period. */ #define NGTCP2_ECN_MAX_NUM_VALIDATION_PKTS 10 -/* NGTCP2_CONNECTION_CLOSE_ERROR_MAX_REASONLEN is the maximum length - of reason phrase to remember. If the received reason phrase is - longer than this value, it is truncated. */ -#define NGTCP2_CONNECTION_CLOSE_ERROR_MAX_REASONLEN 1024 +/* NGTCP2_CCERR_MAX_REASONLEN is the maximum length of reason phrase + to remember. If the received reason phrase is longer than this + value, it is truncated. */ +#define NGTCP2_CCERR_MAX_REASONLEN 1024 /* NGTCP2_WRITE_PKT_FLAG_NONE indicates that no flag is set. */ #define NGTCP2_WRITE_PKT_FLAG_NONE 0x00u @@ -142,8 +136,8 @@ typedef union ngtcp2_max_frame { ngtcp2_frame fr; struct { ngtcp2_ack ack; - /* ack includes 1 ngtcp2_ack_blk. */ - ngtcp2_ack_blk blks[NGTCP2_MAX_ACK_BLKS - 1]; + /* ack includes 1 ngtcp2_ack_range. */ + ngtcp2_ack_range ranges[NGTCP2_MAX_ACK_RANGES - 1]; } ackfr; } ngtcp2_max_frame; @@ -158,17 +152,17 @@ void ngtcp2_path_challenge_entry_init(ngtcp2_path_challenge_entry *pcent, /* NGTCP2_CONN_FLAG_NONE indicates that no flag is set. */ #define NGTCP2_CONN_FLAG_NONE 0x00u -/* NGTCP2_CONN_FLAG_HANDSHAKE_COMPLETED is set when TLS stack declares - that TLS handshake has completed. The condition of this +/* NGTCP2_CONN_FLAG_TLS_HANDSHAKE_COMPLETED is set when TLS stack + declares that TLS handshake has completed. The condition of this declaration varies between TLS implementations and this flag does not indicate the completion of QUIC handshake. Some implementations declare TLS handshake completion as server when they write off Server Finished and before deriving application rx secret. */ -#define NGTCP2_CONN_FLAG_HANDSHAKE_COMPLETED 0x01u -/* NGTCP2_CONN_FLAG_CONN_ID_NEGOTIATED is set if connection ID is - negotiated. This is only used for client. */ -#define NGTCP2_CONN_FLAG_CONN_ID_NEGOTIATED 0x02u +#define NGTCP2_CONN_FLAG_TLS_HANDSHAKE_COMPLETED 0x01u +/* NGTCP2_CONN_FLAG_INITIAL_PKT_PROCESSED is set when the first + Initial packet has successfully been processed. */ +#define NGTCP2_CONN_FLAG_INITIAL_PKT_PROCESSED 0x02u /* NGTCP2_CONN_FLAG_TRANSPORT_PARAM_RECVED is set if transport parameters are received. */ #define NGTCP2_CONN_FLAG_TRANSPORT_PARAM_RECVED 0x04u @@ -187,9 +181,9 @@ void ngtcp2_path_challenge_entry_init(ngtcp2_path_challenge_entry *pcent, /* NGTCP2_CONN_FLAG_HANDSHAKE_CONFIRMED is set when an endpoint confirmed completion of handshake. */ #define NGTCP2_CONN_FLAG_HANDSHAKE_CONFIRMED 0x80u -/* NGTCP2_CONN_FLAG_HANDSHAKE_COMPLETED_HANDLED is set when the - library transitions its state to "post handshake". */ -#define NGTCP2_CONN_FLAG_HANDSHAKE_COMPLETED_HANDLED 0x0100u +/* NGTCP2_CONN_FLAG_HANDSHAKE_COMPLETED is set when the library + transitions its state to "post handshake". */ +#define NGTCP2_CONN_FLAG_HANDSHAKE_COMPLETED 0x0100u /* NGTCP2_CONN_FLAG_HANDSHAKE_EARLY_RETRANSMIT is set when the early handshake retransmission has done when server receives overlapping Initial crypto data. */ @@ -221,23 +215,15 @@ void ngtcp2_path_challenge_entry_init(ngtcp2_path_challenge_entry *pcent, endpoint has initiated key update. */ #define NGTCP2_CONN_FLAG_KEY_UPDATE_INITIATOR 0x10000u -typedef struct ngtcp2_crypto_data { - ngtcp2_buf buf; - /* pkt_type is the type of packet to send data in buf. If it is 0, - it must be sent in Short packet. Otherwise, it is sent the long - packet type denoted by pkt_type. */ - uint8_t pkt_type; -} ngtcp2_crypto_data; - typedef struct ngtcp2_pktns { struct { /* last_pkt_num is the packet number which the local endpoint sent last time.*/ int64_t last_pkt_num; ngtcp2_frame_chain *frq; - /* num_non_ack_pkt is the number of continuous non ACK-eliciting - packets. */ - size_t num_non_ack_pkt; + /* non_ack_pkt_start_ts is the timestamp since the local endpoint + starts sending continuous non ACK-eliciting packets. */ + ngtcp2_tstamp non_ack_pkt_start_ts; struct { /* ect0 is the number of QUIC packets, not UDP datagram, which @@ -307,8 +293,6 @@ typedef struct ngtcp2_pktns { struct { struct { - /* frq contains crypto data sorted by their offset. */ - ngtcp2_ksl frq; /* offset is the offset of crypto stream in this packet number space. */ uint64_t offset; @@ -344,6 +328,19 @@ typedef enum ngtcp2_ecn_state { NGTCP2_ECN_STATE_CAPABLE, } ngtcp2_ecn_state; +/* ngtcp2_early_transport_params is the values remembered by client + from the previous session. */ +typedef struct ngtcp2_early_transport_params { + uint64_t initial_max_streams_bidi; + uint64_t initial_max_streams_uni; + uint64_t initial_max_stream_data_bidi_local; + uint64_t initial_max_stream_data_bidi_remote; + uint64_t initial_max_stream_data_uni; + uint64_t initial_max_data; + uint64_t active_connection_id_limit; + uint64_t max_datagram_frame_size; +} ngtcp2_early_transport_params; + ngtcp2_static_ringbuf_def(dcid_bound, NGTCP2_MAX_BOUND_DCID_POOL_SIZE, sizeof(ngtcp2_dcid)); ngtcp2_static_ringbuf_def(dcid_unused, NGTCP2_MAX_DCID_POOL_SIZE, @@ -353,7 +350,7 @@ ngtcp2_static_ringbuf_def(dcid_retired, NGTCP2_MAX_DCID_RETIRED_SIZE, ngtcp2_static_ringbuf_def(path_challenge, 4, sizeof(ngtcp2_path_challenge_entry)); -ngtcp2_objalloc_def(strm, ngtcp2_strm, oplent); +ngtcp2_objalloc_decl(strm, ngtcp2_strm, oplent); struct ngtcp2_conn { ngtcp2_objalloc frc_objalloc; @@ -421,25 +418,28 @@ struct ngtcp2_conn { /* num_retired is the number of retired Connection ID still included in set. */ size_t num_retired; + /* num_in_flight is the number of NEW_CONNECTION_ID frames that + are in-flight and not acknowledged yet. */ + size_t num_in_flight; } scid; struct { /* strmq contains ngtcp2_strm which has frames to send. */ ngtcp2_pq strmq; - /* strmq_nretrans is the number of entries in strmq which has - stream data to resent. */ - size_t strmq_nretrans; /* ack is ACK frame. The underlying buffer is reused. */ ngtcp2_frame *ack; - /* max_ack_blks is the number of additional ngtcp2_ack_blk which - ack can contain. */ - size_t max_ack_blks; + /* max_ack_ranges is the number of additional ngtcp2_ack_range + which ack can contain. */ + size_t max_ack_ranges; /* offset is the offset the local endpoint has sent to the remote endpoint. */ uint64_t offset; /* max_offset is the maximum offset that local endpoint can send. */ uint64_t max_offset; + /* last_blocked_offset is the largest offset where the + transmission of stream data is blocked. */ + uint64_t last_blocked_offset; /* last_max_data_ts is the timestamp when last MAX_DATA frame is sent. */ ngtcp2_tstamp last_max_data_ts; @@ -482,7 +482,7 @@ struct ngtcp2_conn { /* path_challenge stores received PATH_CHALLENGE data. */ ngtcp2_static_ringbuf_path_challenge path_challenge; /* ccerr is the received connection close error. */ - ngtcp2_connection_close_error ccerr; + ngtcp2_ccerr ccerr; } rx; struct { @@ -497,16 +497,7 @@ struct ngtcp2_conn { ngtcp2_conn_set_early_remote_transport_params(). Server does not use this field. Server must not set values for these parameters that are smaller than the remembered values. */ - struct { - uint64_t initial_max_streams_bidi; - uint64_t initial_max_streams_uni; - uint64_t initial_max_stream_data_bidi_local; - uint64_t initial_max_stream_data_bidi_remote; - uint64_t initial_max_stream_data_uni; - uint64_t initial_max_data; - uint64_t active_connection_id_limit; - uint64_t max_datagram_frame_size; - } transport_params; + ngtcp2_early_transport_params transport_params; } early; struct { @@ -658,14 +649,14 @@ struct ngtcp2_conn { array pointed by preferred_versions. This field is only used by server. */ size_t preferred_versionslen; - /* other_versions is the versions that the local endpoint sends in - version_information transport parameter. This is the wire - image of other_versions field of version_information transport - parameter. */ - uint8_t *other_versions; - /* other_versionslen is the length of data pointed by - other_versions field. */ - size_t other_versionslen; + /* available_versions is the versions that the local endpoint + sends in version_information transport parameter. This is the + wire image of available_versions field of version_information + transport parameter. */ + uint8_t *available_versions; + /* available_versionslen is the length of data pointed by + available_versions field. */ + size_t available_versionslen; } vneg; ngtcp2_map strms; @@ -676,7 +667,12 @@ struct ngtcp2_conn { ngtcp2_qlog qlog; ngtcp2_rst rst; ngtcp2_cc_algo cc_algo; - ngtcp2_cc cc; + union { + ngtcp2_cc cc; + ngtcp2_cc_reno reno; + ngtcp2_cc_cubic cubic; + ngtcp2_cc_bbr bbr; + }; const ngtcp2_mem *mem; /* idle_ts is the time instant when idle timer started. */ ngtcp2_tstamp idle_ts; @@ -913,19 +909,6 @@ ngtcp2_tstamp ngtcp2_conn_lost_pkt_expiry(ngtcp2_conn *conn); */ void ngtcp2_conn_remove_lost_pkt(ngtcp2_conn *conn, ngtcp2_tstamp ts); -/* - * ngtcp2_conn_resched_frames reschedules frames linked from |*pfrc| - * for retransmission. - * - * This function returns 0 if it succeeds, or one of the following - * negative error codes: - * - * NGTCP2_ERR_NOMEM - * Out of memory. - */ -int ngtcp2_conn_resched_frames(ngtcp2_conn *conn, ngtcp2_pktns *pktns, - ngtcp2_frame_chain **pfrc); - uint64_t ngtcp2_conn_tx_strmq_first_cycle(ngtcp2_conn *conn); /** @@ -1112,4 +1095,65 @@ void ngtcp2_conn_stop_pmtud(ngtcp2_conn *conn); int ngtcp2_conn_set_remote_transport_params( ngtcp2_conn *conn, const ngtcp2_transport_params *params); +/** + * @function + * + * `ngtcp2_conn_set_0rtt_remote_transport_params` sets |params| as + * transport parameters previously received from a server. The + * parameters are used to send early data. QUIC requires that client + * application should remember transport parameters along with a + * session ticket. + * + * At least following fields should be set: + * + * - initial_max_stream_id_bidi + * - initial_max_stream_id_uni + * - initial_max_stream_data_bidi_local + * - initial_max_stream_data_bidi_remote + * - initial_max_stream_data_uni + * - initial_max_data + * - active_connection_id_limit + * - max_datagram_frame_size (if DATAGRAM extension was negotiated) + * + * The following fields are ignored: + * + * - ack_delay_exponent + * - max_ack_delay + * - initial_scid + * - original_dcid + * - preferred_address and preferred_address_present + * - retry_scid and retry_scid_present + * - stateless_reset_token and stateless_reset_token_present + * + * This function returns 0 if it succeeds, or one of the following + * negative error codes: + * + * :macro:`NGTCP2_ERR_NOMEM` + * Out of memory. + */ +int ngtcp2_conn_set_0rtt_remote_transport_params( + ngtcp2_conn *conn, const ngtcp2_transport_params *params); + +/* + * ngtcp2_conn_create_ack_frame creates ACK frame, and assigns its + * pointer to |*pfr| if there are any received packets to acknowledge. + * If there are no packets to acknowledge, this function returns 0, + * and |*pfr| is untouched. The caller is advised to set |*pfr| to + * NULL before calling this function, and check it after this function + * returns. + * + * Call ngtcp2_acktr_commit_ack after a created ACK frame is + * successfully serialized into a packet. + * + * This function returns 0 if it succeeds, or one of the following + * negative error codes: + * + * NGTCP2_ERR_NOMEM + * Out of memory. + */ +int ngtcp2_conn_create_ack_frame(ngtcp2_conn *conn, ngtcp2_frame **pfr, + ngtcp2_pktns *pktns, uint8_t type, + ngtcp2_tstamp ts, ngtcp2_duration ack_delay, + uint64_t ack_delay_exponent); + #endif /* NGTCP2_CONN_H */ diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_conn_stat.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_conn_stat.h new file mode 100644 index 00000000000000..1a93867aab3cae --- /dev/null +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_conn_stat.h @@ -0,0 +1,132 @@ +/* + * ngtcp2 + * + * Copyright (c) 2023 ngtcp2 contributors + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef NGTCP2_CONN_STAT_H +#define NGTCP2_CONN_STAT_H + +#ifdef HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include + +/** + * @struct + * + * :type:`ngtcp2_conn_stat` holds various connection statistics, and + * computed data for recovery and congestion controller. + */ +typedef struct ngtcp2_conn_stat { + /** + * :member:`latest_rtt` is the latest RTT sample which is not + * adjusted by acknowledgement delay. + */ + ngtcp2_duration latest_rtt; + /** + * :member:`min_rtt` is the minimum RTT seen so far. It is not + * adjusted by acknowledgement delay. + */ + ngtcp2_duration min_rtt; + /** + * :member:`smoothed_rtt` is the smoothed RTT. + */ + ngtcp2_duration smoothed_rtt; + /** + * :member:`rttvar` is a mean deviation of observed RTT. + */ + ngtcp2_duration rttvar; + /** + * :member:`initial_rtt` is the initial RTT which is used when no + * RTT sample is available. + */ + ngtcp2_duration initial_rtt; + /** + * :member:`first_rtt_sample_ts` is the timestamp when the first RTT + * sample is obtained. + */ + ngtcp2_tstamp first_rtt_sample_ts; + /** + * :member:`pto_count` is the count of successive PTO timer + * expiration. + */ + size_t pto_count; + /** + * :member:`loss_detection_timer` is the deadline of the current + * loss detection timer. + */ + ngtcp2_tstamp loss_detection_timer; + /** + * :member:`last_tx_pkt_ts` corresponds to + * time_of_last_ack_eliciting_packet in :rfc:`9002`. + */ + ngtcp2_tstamp last_tx_pkt_ts[NGTCP2_PKTNS_ID_MAX]; + /** + * :member:`loss_time` corresponds to loss_time in :rfc:`9002`. + */ + ngtcp2_tstamp loss_time[NGTCP2_PKTNS_ID_MAX]; + /** + * :member:`cwnd` is the size of congestion window. + */ + uint64_t cwnd; + /** + * :member:`ssthresh` is slow start threshold. + */ + uint64_t ssthresh; + /** + * :member:`congestion_recovery_start_ts` is the timestamp when + * congestion recovery started. + */ + ngtcp2_tstamp congestion_recovery_start_ts; + /** + * :member:`bytes_in_flight` is the number in bytes of all sent + * packets which have not been acknowledged. + */ + uint64_t bytes_in_flight; + /** + * :member:`max_tx_udp_payload_size` is the maximum size of UDP + * datagram payload that this endpoint transmits. It is used by + * congestion controller to compute congestion window. + */ + size_t max_tx_udp_payload_size; + /** + * :member:`delivery_rate_sec` is the current sending rate measured + * in byte per second. + */ + uint64_t delivery_rate_sec; + /** + * :member:`pacing_interval` is the inverse of pacing rate, which is + * the current packet sending rate computed by a congestion + * controller. 0 if a congestion controller does not set pacing + * interval. Even if this value is set to 0, the library paces + * packets. + */ + ngtcp2_duration pacing_interval; + /** + * :member:`send_quantum` is the maximum size of a data aggregate + * scheduled and transmitted together. + */ + size_t send_quantum; +} ngtcp2_conn_stat; + +#endif /* NGTCP2_CONN_STAT_H */ diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_conv.c b/deps/ngtcp2/ngtcp2/lib/ngtcp2_conv.c index dcf72e4d0ec980..336721772b4e4c 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_conv.c +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_conv.c @@ -30,40 +30,51 @@ #include "ngtcp2_str.h" #include "ngtcp2_pkt.h" #include "ngtcp2_net.h" +#include "ngtcp2_unreachable.h" -uint64_t ngtcp2_get_uint64(const uint8_t *p) { +const uint8_t *ngtcp2_get_uint64(uint64_t *dest, const uint8_t *p) { uint64_t n; - memcpy(&n, p, 8); - return ngtcp2_ntohl64(n); + memcpy(&n, p, sizeof(n)); + *dest = ngtcp2_ntohl64(n); + return p + sizeof(n); } -uint64_t ngtcp2_get_uint48(const uint8_t *p) { +const uint8_t *ngtcp2_get_uint48(uint64_t *dest, const uint8_t *p) { uint64_t n = 0; memcpy(((uint8_t *)&n) + 2, p, 6); - return ngtcp2_ntohl64(n); + *dest = ngtcp2_ntohl64(n); + return p + 6; } -uint32_t ngtcp2_get_uint32(const uint8_t *p) { +const uint8_t *ngtcp2_get_uint32(uint32_t *dest, const uint8_t *p) { uint32_t n; - memcpy(&n, p, 4); - return ngtcp2_ntohl(n); + memcpy(&n, p, sizeof(n)); + *dest = ngtcp2_ntohl(n); + return p + sizeof(n); } -uint32_t ngtcp2_get_uint24(const uint8_t *p) { +const uint8_t *ngtcp2_get_uint24(uint32_t *dest, const uint8_t *p) { uint32_t n = 0; memcpy(((uint8_t *)&n) + 1, p, 3); - return ngtcp2_ntohl(n); + *dest = ngtcp2_ntohl(n); + return p + 3; } -uint16_t ngtcp2_get_uint16(const uint8_t *p) { +const uint8_t *ngtcp2_get_uint16(uint16_t *dest, const uint8_t *p) { uint16_t n; - memcpy(&n, p, 2); - return ngtcp2_ntohs(n); + memcpy(&n, p, sizeof(n)); + *dest = ngtcp2_ntohs(n); + return p + sizeof(n); } -uint64_t ngtcp2_get_varint(size_t *plen, const uint8_t *p) { +const uint8_t *ngtcp2_get_uint16be(uint16_t *dest, const uint8_t *p) { + memcpy(dest, p, sizeof(*dest)); + return p + sizeof(*dest); +} + +static uint64_t get_uvarint(size_t *plen, const uint8_t *p) { union { - char b[8]; + uint8_t n8; uint16_t n16; uint32_t n32; uint64_t n64; @@ -76,36 +87,55 @@ uint64_t ngtcp2_get_varint(size_t *plen, const uint8_t *p) { return *p; case 2: memcpy(&n, p, 2); - n.b[0] &= 0x3f; + n.n8 &= 0x3f; return ngtcp2_ntohs(n.n16); case 4: memcpy(&n, p, 4); - n.b[0] &= 0x3f; + n.n8 &= 0x3f; return ngtcp2_ntohl(n.n32); case 8: memcpy(&n, p, 8); - n.b[0] &= 0x3f; + n.n8 &= 0x3f; return ngtcp2_ntohl64(n.n64); default: - assert(0); + ngtcp2_unreachable(); } +} + +const uint8_t *ngtcp2_get_uvarint(uint64_t *dest, const uint8_t *p) { + size_t len; - return 0; + *dest = get_uvarint(&len, p); + + return p + len; +} + +const uint8_t *ngtcp2_get_varint(int64_t *dest, const uint8_t *p) { + size_t len; + + *dest = (int64_t)get_uvarint(&len, p); + + return p + len; } int64_t ngtcp2_get_pkt_num(const uint8_t *p, size_t pkt_numlen) { + uint32_t l; + uint16_t s; + switch (pkt_numlen) { case 1: return *p; case 2: - return (int64_t)ngtcp2_get_uint16(p); + ngtcp2_get_uint16(&s, p); + return (int64_t)s; case 3: - return (int64_t)ngtcp2_get_uint24(p); + ngtcp2_get_uint24(&l, p); + return (int64_t)l; case 4: - return (int64_t)ngtcp2_get_uint32(p); + ngtcp2_get_uint32(&l, p); + return (int64_t)l; default: - assert(0); - abort(); + ngtcp2_unreachable(); } } @@ -134,7 +164,11 @@ uint8_t *ngtcp2_put_uint16be(uint8_t *p, uint16_t n) { return ngtcp2_cpymem(p, (const uint8_t *)&n, sizeof(n)); } -uint8_t *ngtcp2_put_varint(uint8_t *p, uint64_t n) { +uint8_t *ngtcp2_put_uint16(uint8_t *p, uint16_t n) { + return ngtcp2_cpymem(p, (const uint8_t *)&n, sizeof(n)); +} + +uint8_t *ngtcp2_put_uvarint(uint8_t *p, uint64_t n) { uint8_t *rv; if (n < 64) { *p++ = (uint8_t)n; @@ -156,7 +190,7 @@ uint8_t *ngtcp2_put_varint(uint8_t *p, uint64_t n) { return rv; } -uint8_t *ngtcp2_put_varint30(uint8_t *p, uint32_t n) { +uint8_t *ngtcp2_put_uvarint30(uint8_t *p, uint32_t n) { uint8_t *rv; assert(n < 1073741824); @@ -182,16 +216,15 @@ uint8_t *ngtcp2_put_pkt_num(uint8_t *p, int64_t pkt_num, size_t len) { ngtcp2_put_uint32be(p, (uint32_t)pkt_num); return p + 4; default: - assert(0); - abort(); + ngtcp2_unreachable(); } } -size_t ngtcp2_get_varint_len(const uint8_t *p) { +size_t ngtcp2_get_uvarintlen(const uint8_t *p) { return (size_t)(1u << (*p >> 6)); } -size_t ngtcp2_put_varint_len(uint64_t n) { +size_t ngtcp2_put_uvarintlen(uint64_t n) { if (n < 64) { return 1; } diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_conv.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_conv.h index 99746fdb4cefb7..ef089a971a37f1 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_conv.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_conv.h @@ -33,45 +33,61 @@ /* * ngtcp2_get_uint64 reads 8 bytes from |p| as 64 bits unsigned - * integer encoded as network byte order, and returns it in host byte - * order. + * integer encoded as network byte order, and stores it in the buffer + * pointed by |dest| in host byte order. It returns |p| + 8. */ -uint64_t ngtcp2_get_uint64(const uint8_t *p); +const uint8_t *ngtcp2_get_uint64(uint64_t *dest, const uint8_t *p); /* * ngtcp2_get_uint48 reads 6 bytes from |p| as 48 bits unsigned - * integer encoded as network byte order, and returns it in host byte - * order. + * integer encoded as network byte order, and stores it in the buffer + * pointed by |dest| in host byte order. It returns |p| + 6. */ -uint64_t ngtcp2_get_uint48(const uint8_t *p); +const uint8_t *ngtcp2_get_uint48(uint64_t *dest, const uint8_t *p); /* * ngtcp2_get_uint32 reads 4 bytes from |p| as 32 bits unsigned - * integer encoded as network byte order, and returns it in host byte - * order. + * integer encoded as network byte order, and stores it in the buffer + * pointed by |dest| in host byte order. It returns |p| + 4. */ -uint32_t ngtcp2_get_uint32(const uint8_t *p); +const uint8_t *ngtcp2_get_uint32(uint32_t *dest, const uint8_t *p); /* * ngtcp2_get_uint24 reads 3 bytes from |p| as 24 bits unsigned - * integer encoded as network byte order, and returns it in host byte - * order. + * integer encoded as network byte order, and stores it in the buffer + * pointed by |dest| in host byte order. It returns |p| + 3. */ -uint32_t ngtcp2_get_uint24(const uint8_t *p); +const uint8_t *ngtcp2_get_uint24(uint32_t *dest, const uint8_t *p); /* * ngtcp2_get_uint16 reads 2 bytes from |p| as 16 bits unsigned - * integer encoded as network byte order, and returns it in host byte - * order. + * integer encoded as network byte order, and stores it in the buffer + * pointed by |dest| in host byte order. It returns |p| + 2. */ -uint16_t ngtcp2_get_uint16(const uint8_t *p); +const uint8_t *ngtcp2_get_uint16(uint16_t *dest, const uint8_t *p); /* - * ngtcp2_get_varint reads variable-length integer from |p|, and - * returns it in host byte order. The number of bytes read is stored - * in |*plen|. + * ngtcp2_get_uint16be reads 2 bytes from |p| as 16 bits unsigned + * integer encoded as network byte order, and stores it in the buffer + * pointed by |dest| as is. It returns |p| + 2. */ -uint64_t ngtcp2_get_varint(size_t *plen, const uint8_t *p); +const uint8_t *ngtcp2_get_uint16be(uint16_t *dest, const uint8_t *p); + +/* + * ngtcp2_get_uvarint reads variable-length unsigned integer from |p|, + * and stores it in the buffer pointed by |dest| in host byte order. + * It returns |p| plus the number of bytes read from |p|. + */ +const uint8_t *ngtcp2_get_uvarint(uint64_t *dest, const uint8_t *p); + +/* + * ngtcp2_get_varint reads variable-length unsigned integer from |p|, + * and casts it to the signed integer, and stores it in the buffer + * pointed by |dest| in host byte order. No information should be + * lost in this cast, because the variable-length integer is 62 + * bits. It returns |p| plus the number of bytes read from |p|. + */ +const uint8_t *ngtcp2_get_varint(int64_t *dest, const uint8_t *p); /* * ngtcp2_get_pkt_num reads encoded packet number from |p|. The @@ -115,18 +131,24 @@ uint8_t *ngtcp2_put_uint24be(uint8_t *p, uint32_t n); uint8_t *ngtcp2_put_uint16be(uint8_t *p, uint16_t n); /* - * ngtcp2_put_varint writes |n| in |p| using variable-length integer + * ngtcp2_put_uint16 writes |n| as is in |p|. It returns the one + * beyond of the last written position. + */ +uint8_t *ngtcp2_put_uint16(uint8_t *p, uint16_t n); + +/* + * ngtcp2_put_uvarint writes |n| in |p| using variable-length integer * encoding. It returns the one beyond of the last written position. */ -uint8_t *ngtcp2_put_varint(uint8_t *p, uint64_t n); +uint8_t *ngtcp2_put_uvarint(uint8_t *p, uint64_t n); /* - * ngtcp2_put_varint30 writes |n| in |p| using variable-length integer - * encoding. |n| must be strictly less than 1073741824. The function - * always encodes |n| in 4 bytes. It returns the one beyond of the - * last written position. + * ngtcp2_put_uvarint30 writes |n| in |p| using variable-length + * integer encoding. |n| must be strictly less than 1073741824. The + * function always encodes |n| in 4 bytes. It returns the one beyond + * of the last written position. */ -uint8_t *ngtcp2_put_varint30(uint8_t *p, uint32_t n); +uint8_t *ngtcp2_put_uvarint30(uint8_t *p, uint32_t n); /* * ngtcp2_put_pkt_num encodes |pkt_num| using |len| bytes. It @@ -135,16 +157,16 @@ uint8_t *ngtcp2_put_varint30(uint8_t *p, uint32_t n); uint8_t *ngtcp2_put_pkt_num(uint8_t *p, int64_t pkt_num, size_t len); /* - * ngtcp2_get_varint_len returns the required number of bytes to read + * ngtcp2_get_uvarintlen returns the required number of bytes to read * variable-length integer starting at |p|. */ -size_t ngtcp2_get_varint_len(const uint8_t *p); +size_t ngtcp2_get_uvarintlen(const uint8_t *p); /* - * ngtcp2_put_varint_len returns the required number of bytes to + * ngtcp2_put_uvarintlen returns the required number of bytes to * encode |n|. */ -size_t ngtcp2_put_varint_len(uint64_t n); +size_t ngtcp2_put_uvarintlen(uint64_t n); /* * ngtcp2_nth_server_bidi_id returns |n|-th server bidirectional diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_conversion.c b/deps/ngtcp2/ngtcp2/lib/ngtcp2_conversion.c new file mode 100644 index 00000000000000..eb85687a068449 --- /dev/null +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_conversion.c @@ -0,0 +1,66 @@ +/* + * ngtcp2 + * + * Copyright (c) 2023 ngtcp2 contributors + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#include "ngtcp2_conversion.h" + +#include +#include + +static void transport_params_copy(int transport_params_version, + ngtcp2_transport_params *dest, + const ngtcp2_transport_params *src) { + assert(transport_params_version != NGTCP2_TRANSPORT_PARAMS_VERSION); + + switch (transport_params_version) { + case NGTCP2_TRANSPORT_PARAMS_V1: + memcpy(dest, src, + offsetof(ngtcp2_transport_params, version_info_present) + + sizeof(src->version_info_present)); + + break; + } +} + +const ngtcp2_transport_params * +ngtcp2_transport_params_convert_to_latest(ngtcp2_transport_params *dest, + int transport_params_version, + const ngtcp2_transport_params *src) { + if (transport_params_version == NGTCP2_TRANSPORT_PARAMS_VERSION) { + return src; + } + + ngtcp2_transport_params_default(dest); + + transport_params_copy(transport_params_version, dest, src); + + return dest; +} + +void ngtcp2_transport_params_convert_to_old( + int transport_params_version, ngtcp2_transport_params *dest, + const ngtcp2_transport_params *src) { + assert(transport_params_version != NGTCP2_TRANSPORT_PARAMS_VERSION); + + transport_params_copy(transport_params_version, dest, src); +} diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_conversion.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_conversion.h new file mode 100644 index 00000000000000..3457a8f2053aba --- /dev/null +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_conversion.h @@ -0,0 +1,71 @@ +/* + * ngtcp2 + * + * Copyright (c) 2023 ngtcp2 contributors + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef NGTCP2_CONVERSION_H +#define NGTCP2_CONVERSION_H + +#ifdef HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include + +/* + * ngtcp2_transport_params_convert_to_latest converts |src| of version + * |transport_params_version| to the latest version + * NGTCP2_TRANSPORT_PARAMS_VERSION. + * + * |dest| must point to the latest version. |src| may be the older + * version, and if so, it may have fewer fields. Accessing those + * fields causes undefined behavior. + * + * If |transport_params_version| == NGTCP2_TRANSPORT_PARAMS_VERSION, + * no conversion is made, and |src| is returned. Otherwise, first + * |dest| is initialized via ngtcp2_transport_params_default, and then + * all valid fields in |src| are copied into |dest|. Finally, |dest| + * is returned. + */ +const ngtcp2_transport_params * +ngtcp2_transport_params_convert_to_latest(ngtcp2_transport_params *dest, + int transport_params_version, + const ngtcp2_transport_params *src); + +/* + * ngtcp2_transport_params_convert_to_old converts |src| of the latest + * version to |dest| of version |transport_params_version|. + * + * |transport_params_version| must not be the latest version + * NGTCP2_TRANSPORT_PARAMS_VERSION. + * + * |dest| points to the older version, and it may have fewer fields. + * Accessing those fields causes undefined behavior. + * + * This function copies all valid fields in version + * |transport_params_version| from |src| to |dest|. + */ +void ngtcp2_transport_params_convert_to_old(int transport_params_version, + ngtcp2_transport_params *dest, + const ngtcp2_transport_params *src); + +#endif /* NGTCP2_CONVERSION_H */ diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_crypto.c b/deps/ngtcp2/ngtcp2/lib/ngtcp2_crypto.c index f7592f885b4cb2..2c00af5ea53d99 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_crypto.c +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_crypto.c @@ -31,6 +31,7 @@ #include "ngtcp2_conv.h" #include "ngtcp2_conn.h" #include "ngtcp2_net.h" +#include "ngtcp2_conversion.h" int ngtcp2_crypto_km_new(ngtcp2_crypto_km **pckm, const uint8_t *secret, size_t secretlen, @@ -107,8 +108,8 @@ void ngtcp2_crypto_create_nonce(uint8_t *dest, const uint8_t *iv, size_t ivlen, * which has variable integer in its parameter. */ static size_t varint_paramlen(ngtcp2_transport_param_id id, uint64_t param) { - size_t valuelen = ngtcp2_put_varint_len(param); - return ngtcp2_put_varint_len(id) + ngtcp2_put_varint_len(valuelen) + valuelen; + size_t valuelen = ngtcp2_put_uvarintlen(param); + return ngtcp2_put_uvarintlen(id) + ngtcp2_put_uvarintlen(valuelen) + valuelen; } /* @@ -117,9 +118,9 @@ static size_t varint_paramlen(ngtcp2_transport_param_id id, uint64_t param) { */ static uint8_t *write_varint_param(uint8_t *p, ngtcp2_transport_param_id id, uint64_t value) { - p = ngtcp2_put_varint(p, id); - p = ngtcp2_put_varint(p, ngtcp2_put_varint_len(value)); - return ngtcp2_put_varint(p, value); + p = ngtcp2_put_uvarint(p, id); + p = ngtcp2_put_uvarint(p, ngtcp2_put_uvarintlen(value)); + return ngtcp2_put_uvarint(p, value); } /* @@ -128,7 +129,7 @@ static uint8_t *write_varint_param(uint8_t *p, ngtcp2_transport_param_id id, */ static size_t cid_paramlen(ngtcp2_transport_param_id id, const ngtcp2_cid *cid) { - return ngtcp2_put_varint_len(id) + ngtcp2_put_varint_len(cid->datalen) + + return ngtcp2_put_uvarintlen(id) + ngtcp2_put_uvarintlen(cid->datalen) + cid->datalen; } @@ -141,8 +142,8 @@ static uint8_t *write_cid_param(uint8_t *p, ngtcp2_transport_param_id id, assert(cid->datalen == 0 || cid->datalen >= NGTCP2_MIN_CIDLEN); assert(cid->datalen <= NGTCP2_MAX_CIDLEN); - p = ngtcp2_put_varint(p, id); - p = ngtcp2_put_varint(p, cid->datalen); + p = ngtcp2_put_uvarint(p, id); + p = ngtcp2_put_uvarint(p, cid->datalen); if (cid->datalen) { p = ngtcp2_cpymem(p, cid->data, cid->datalen); } @@ -151,52 +152,52 @@ static uint8_t *write_cid_param(uint8_t *p, ngtcp2_transport_param_id id, static const uint8_t empty_address[16]; -ngtcp2_ssize ngtcp2_encode_transport_params_versioned( - uint8_t *dest, size_t destlen, ngtcp2_transport_params_type exttype, - int transport_params_version, const ngtcp2_transport_params *params) { +ngtcp2_ssize ngtcp2_transport_params_encode_versioned( + uint8_t *dest, size_t destlen, int transport_params_version, + const ngtcp2_transport_params *params) { uint8_t *p; size_t len = 0; /* For some reason, gcc 7.3.0 requires this initialization. */ size_t preferred_addrlen = 0; size_t version_infolen = 0; - (void)transport_params_version; + const ngtcp2_sockaddr_in *sa_in; + const ngtcp2_sockaddr_in6 *sa_in6; + ngtcp2_transport_params paramsbuf; - switch (exttype) { - case NGTCP2_TRANSPORT_PARAMS_TYPE_CLIENT_HELLO: - break; - case NGTCP2_TRANSPORT_PARAMS_TYPE_ENCRYPTED_EXTENSIONS: + params = ngtcp2_transport_params_convert_to_latest( + ¶msbuf, transport_params_version, params); + + if (params->original_dcid_present) { len += cid_paramlen(NGTCP2_TRANSPORT_PARAM_ORIGINAL_DESTINATION_CONNECTION_ID, ¶ms->original_dcid); + } - if (params->stateless_reset_token_present) { - len += - ngtcp2_put_varint_len(NGTCP2_TRANSPORT_PARAM_STATELESS_RESET_TOKEN) + - ngtcp2_put_varint_len(NGTCP2_STATELESS_RESET_TOKENLEN) + - NGTCP2_STATELESS_RESET_TOKENLEN; - } - if (params->preferred_address_present) { - assert(params->preferred_address.cid.datalen >= NGTCP2_MIN_CIDLEN); - assert(params->preferred_address.cid.datalen <= NGTCP2_MAX_CIDLEN); - preferred_addrlen = 4 /* ipv4Address */ + 2 /* ipv4Port */ + - 16 /* ipv6Address */ + 2 /* ipv6Port */ - + 1 + - params->preferred_address.cid.datalen /* CID */ + - NGTCP2_STATELESS_RESET_TOKENLEN; - len += ngtcp2_put_varint_len(NGTCP2_TRANSPORT_PARAM_PREFERRED_ADDRESS) + - ngtcp2_put_varint_len(preferred_addrlen) + preferred_addrlen; - } - if (params->retry_scid_present) { - len += cid_paramlen(NGTCP2_TRANSPORT_PARAM_RETRY_SOURCE_CONNECTION_ID, - ¶ms->retry_scid); - } - break; - default: - return NGTCP2_ERR_INVALID_ARGUMENT; + if (params->stateless_reset_token_present) { + len += ngtcp2_put_uvarintlen(NGTCP2_TRANSPORT_PARAM_STATELESS_RESET_TOKEN) + + ngtcp2_put_uvarintlen(NGTCP2_STATELESS_RESET_TOKENLEN) + + NGTCP2_STATELESS_RESET_TOKENLEN; + } + + if (params->preferred_addr_present) { + assert(params->preferred_addr.cid.datalen >= NGTCP2_MIN_CIDLEN); + assert(params->preferred_addr.cid.datalen <= NGTCP2_MAX_CIDLEN); + preferred_addrlen = 4 /* ipv4Address */ + 2 /* ipv4Port */ + + 16 /* ipv6Address */ + 2 /* ipv6Port */ + + 1 + params->preferred_addr.cid.datalen /* CID */ + + NGTCP2_STATELESS_RESET_TOKENLEN; + len += ngtcp2_put_uvarintlen(NGTCP2_TRANSPORT_PARAM_PREFERRED_ADDRESS) + + ngtcp2_put_uvarintlen(preferred_addrlen) + preferred_addrlen; + } + if (params->retry_scid_present) { + len += cid_paramlen(NGTCP2_TRANSPORT_PARAM_RETRY_SOURCE_CONNECTION_ID, + ¶ms->retry_scid); } - len += cid_paramlen(NGTCP2_TRANSPORT_PARAM_INITIAL_SOURCE_CONNECTION_ID, - ¶ms->initial_scid); + if (params->initial_scid_present) { + len += cid_paramlen(NGTCP2_TRANSPORT_PARAM_INITIAL_SOURCE_CONNECTION_ID, + ¶ms->initial_scid); + } if (params->initial_max_stream_data_bidi_local) { len += varint_paramlen( @@ -235,8 +236,8 @@ ngtcp2_ssize ngtcp2_encode_transport_params_versioned( } if (params->disable_active_migration) { len += - ngtcp2_put_varint_len(NGTCP2_TRANSPORT_PARAM_DISABLE_ACTIVE_MIGRATION) + - ngtcp2_put_varint_len(0); + ngtcp2_put_uvarintlen(NGTCP2_TRANSPORT_PARAM_DISABLE_ACTIVE_MIGRATION) + + ngtcp2_put_uvarintlen(0); } if (params->max_ack_delay != NGTCP2_DEFAULT_MAX_ACK_DELAY) { len += varint_paramlen(NGTCP2_TRANSPORT_PARAM_MAX_ACK_DELAY, @@ -257,14 +258,14 @@ ngtcp2_ssize ngtcp2_encode_transport_params_versioned( params->max_datagram_frame_size); } if (params->grease_quic_bit) { - len += ngtcp2_put_varint_len(NGTCP2_TRANSPORT_PARAM_GREASE_QUIC_BIT) + - ngtcp2_put_varint_len(0); + len += ngtcp2_put_uvarintlen(NGTCP2_TRANSPORT_PARAM_GREASE_QUIC_BIT) + + ngtcp2_put_uvarintlen(0); } if (params->version_info_present) { - version_infolen = sizeof(uint32_t) + params->version_info.other_versionslen; - len += ngtcp2_put_varint_len( - NGTCP2_TRANSPORT_PARAM_VERSION_INFORMATION_DRAFT) + - ngtcp2_put_varint_len(version_infolen) + version_infolen; + version_infolen = + sizeof(uint32_t) + params->version_info.available_versionslen; + len += ngtcp2_put_uvarintlen(NGTCP2_TRANSPORT_PARAM_VERSION_INFORMATION) + + ngtcp2_put_uvarintlen(version_infolen) + version_infolen; } if (dest == NULL && destlen == 0) { @@ -277,58 +278,59 @@ ngtcp2_ssize ngtcp2_encode_transport_params_versioned( p = dest; - if (exttype == NGTCP2_TRANSPORT_PARAMS_TYPE_ENCRYPTED_EXTENSIONS) { + if (params->original_dcid_present) { p = write_cid_param( p, NGTCP2_TRANSPORT_PARAM_ORIGINAL_DESTINATION_CONNECTION_ID, ¶ms->original_dcid); + } + + if (params->stateless_reset_token_present) { + p = ngtcp2_put_uvarint(p, NGTCP2_TRANSPORT_PARAM_STATELESS_RESET_TOKEN); + p = ngtcp2_put_uvarint(p, sizeof(params->stateless_reset_token)); + p = ngtcp2_cpymem(p, params->stateless_reset_token, + sizeof(params->stateless_reset_token)); + } - if (params->stateless_reset_token_present) { - p = ngtcp2_put_varint(p, NGTCP2_TRANSPORT_PARAM_STATELESS_RESET_TOKEN); - p = ngtcp2_put_varint(p, sizeof(params->stateless_reset_token)); - p = ngtcp2_cpymem(p, params->stateless_reset_token, - sizeof(params->stateless_reset_token)); + if (params->preferred_addr_present) { + p = ngtcp2_put_uvarint(p, NGTCP2_TRANSPORT_PARAM_PREFERRED_ADDRESS); + p = ngtcp2_put_uvarint(p, preferred_addrlen); + + if (params->preferred_addr.ipv4_present) { + sa_in = ¶ms->preferred_addr.ipv4; + p = ngtcp2_cpymem(p, &sa_in->sin_addr, sizeof(sa_in->sin_addr)); + p = ngtcp2_put_uint16(p, sa_in->sin_port); + } else { + p = ngtcp2_cpymem(p, empty_address, sizeof(sa_in->sin_addr)); + p = ngtcp2_put_uint16(p, 0); } - if (params->preferred_address_present) { - p = ngtcp2_put_varint(p, NGTCP2_TRANSPORT_PARAM_PREFERRED_ADDRESS); - p = ngtcp2_put_varint(p, preferred_addrlen); - - if (params->preferred_address.ipv4_present) { - p = ngtcp2_cpymem(p, params->preferred_address.ipv4_addr, - sizeof(params->preferred_address.ipv4_addr)); - p = ngtcp2_put_uint16be(p, params->preferred_address.ipv4_port); - } else { - p = ngtcp2_cpymem(p, empty_address, - sizeof(params->preferred_address.ipv4_addr)); - p = ngtcp2_put_uint16be(p, 0); - } - - if (params->preferred_address.ipv6_present) { - p = ngtcp2_cpymem(p, params->preferred_address.ipv6_addr, - sizeof(params->preferred_address.ipv6_addr)); - p = ngtcp2_put_uint16be(p, params->preferred_address.ipv6_port); - } else { - p = ngtcp2_cpymem(p, empty_address, - sizeof(params->preferred_address.ipv6_addr)); - p = ngtcp2_put_uint16be(p, 0); - } - - *p++ = (uint8_t)params->preferred_address.cid.datalen; - if (params->preferred_address.cid.datalen) { - p = ngtcp2_cpymem(p, params->preferred_address.cid.data, - params->preferred_address.cid.datalen); - } - p = ngtcp2_cpymem( - p, params->preferred_address.stateless_reset_token, - sizeof(params->preferred_address.stateless_reset_token)); + + if (params->preferred_addr.ipv6_present) { + sa_in6 = ¶ms->preferred_addr.ipv6; + p = ngtcp2_cpymem(p, &sa_in6->sin6_addr, sizeof(sa_in6->sin6_addr)); + p = ngtcp2_put_uint16(p, sa_in6->sin6_port); + } else { + p = ngtcp2_cpymem(p, empty_address, sizeof(sa_in6->sin6_addr)); + p = ngtcp2_put_uint16(p, 0); } - if (params->retry_scid_present) { - p = write_cid_param(p, NGTCP2_TRANSPORT_PARAM_RETRY_SOURCE_CONNECTION_ID, - ¶ms->retry_scid); + + *p++ = (uint8_t)params->preferred_addr.cid.datalen; + if (params->preferred_addr.cid.datalen) { + p = ngtcp2_cpymem(p, params->preferred_addr.cid.data, + params->preferred_addr.cid.datalen); } + p = ngtcp2_cpymem(p, params->preferred_addr.stateless_reset_token, + sizeof(params->preferred_addr.stateless_reset_token)); } - p = write_cid_param(p, NGTCP2_TRANSPORT_PARAM_INITIAL_SOURCE_CONNECTION_ID, - ¶ms->initial_scid); + if (params->retry_scid_present) { + p = write_cid_param(p, NGTCP2_TRANSPORT_PARAM_RETRY_SOURCE_CONNECTION_ID, + ¶ms->retry_scid); + } + + if (params->initial_scid_present) { + p = write_cid_param(p, NGTCP2_TRANSPORT_PARAM_INITIAL_SOURCE_CONNECTION_ID, + ¶ms->initial_scid); + } if (params->initial_max_stream_data_bidi_local) { p = write_varint_param( @@ -375,8 +377,8 @@ ngtcp2_ssize ngtcp2_encode_transport_params_versioned( } if (params->disable_active_migration) { - p = ngtcp2_put_varint(p, NGTCP2_TRANSPORT_PARAM_DISABLE_ACTIVE_MIGRATION); - p = ngtcp2_put_varint(p, 0); + p = ngtcp2_put_uvarint(p, NGTCP2_TRANSPORT_PARAM_DISABLE_ACTIVE_MIGRATION); + p = ngtcp2_put_uvarint(p, 0); } if (params->max_ack_delay != NGTCP2_DEFAULT_MAX_ACK_DELAY) { @@ -402,17 +404,17 @@ ngtcp2_ssize ngtcp2_encode_transport_params_versioned( } if (params->grease_quic_bit) { - p = ngtcp2_put_varint(p, NGTCP2_TRANSPORT_PARAM_GREASE_QUIC_BIT); - p = ngtcp2_put_varint(p, 0); + p = ngtcp2_put_uvarint(p, NGTCP2_TRANSPORT_PARAM_GREASE_QUIC_BIT); + p = ngtcp2_put_uvarint(p, 0); } if (params->version_info_present) { - p = ngtcp2_put_varint(p, NGTCP2_TRANSPORT_PARAM_VERSION_INFORMATION_DRAFT); - p = ngtcp2_put_varint(p, version_infolen); + p = ngtcp2_put_uvarint(p, NGTCP2_TRANSPORT_PARAM_VERSION_INFORMATION); + p = ngtcp2_put_uvarint(p, version_infolen); p = ngtcp2_put_uint32be(p, params->version_info.chosen_version); - if (params->version_info.other_versionslen) { - p = ngtcp2_cpymem(p, params->version_info.other_versions, - params->version_info.other_versionslen); + if (params->version_info.available_versionslen) { + p = ngtcp2_cpymem(p, params->version_info.available_versions, + params->version_info.available_versionslen); } } @@ -423,49 +425,46 @@ ngtcp2_ssize ngtcp2_encode_transport_params_versioned( /* * decode_varint decodes a single varint from the buffer pointed by - * |p| of length |end - p|. If it decodes an integer successfully, it - * stores the integer in |*pdest| and returns 0. Otherwise it returns - * -1. + * |*pp| of length |end - *pp|. If it decodes an integer + * successfully, it stores the integer in |*pdest|, increment |*pp| by + * the number of bytes read from |*pp|, and returns 0. Otherwise it + * returns -1. */ -static ngtcp2_ssize decode_varint(uint64_t *pdest, const uint8_t *p, - const uint8_t *end) { +static int decode_varint(uint64_t *pdest, const uint8_t **pp, + const uint8_t *end) { + const uint8_t *p = *pp; size_t len; if (p == end) { return -1; } - len = ngtcp2_get_varint_len(p); + len = ngtcp2_get_uvarintlen(p); if ((uint64_t)(end - p) < len) { return -1; } - *pdest = ngtcp2_get_varint(&len, p); + *pp = ngtcp2_get_uvarint(pdest, p); - return (ngtcp2_ssize)len; + return 0; } /* * decode_varint_param decodes length prefixed value from the buffer - * pointed by |p| of length |end - p|. The length and value are + * pointed by |*pp| of length |end - *pp|. The length and value are * encoded in varint form. If it decodes a value successfully, it - * stores the value in |*pdest| and returns 0. Otherwise it returns - * -1. + * stores the value in |*pdest|, increment |*pp| by the number of + * bytes read from |*pp|, and returns 0. Otherwise it returns -1. */ -static ngtcp2_ssize decode_varint_param(uint64_t *pdest, const uint8_t *p, - const uint8_t *end) { - const uint8_t *begin = p; - ngtcp2_ssize nread; +static int decode_varint_param(uint64_t *pdest, const uint8_t **pp, + const uint8_t *end) { + const uint8_t *p = *pp; uint64_t valuelen; - size_t n; - nread = decode_varint(&valuelen, p, end); - if (nread < 0) { + if (decode_varint(&valuelen, &p, end) != 0) { return -1; } - p += nread; - if (p == end) { return -1; } @@ -474,36 +473,35 @@ static ngtcp2_ssize decode_varint_param(uint64_t *pdest, const uint8_t *p, return -1; } - if (ngtcp2_get_varint_len(p) != valuelen) { + if (ngtcp2_get_uvarintlen(p) != valuelen) { return -1; } - *pdest = ngtcp2_get_varint(&n, p); + *pp = ngtcp2_get_uvarint(pdest, p); - p += valuelen; - - return (ngtcp2_ssize)(p - begin); + return 0; } /* * decode_cid_param decodes length prefixed ngtcp2_cid from the buffer - * pointed by |p| of length |end - p|. The length is encoded in + * pointed by |*pp| of length |end - *pp|. The length is encoded in * varint form. If it decodes a value successfully, it stores the - * value in |*pdest| and returns the number of bytes read. Otherwise - * it returns -1. + * value in |*pdest|, increment |*pp| by the number of read from + * |*pp|, and returns the number of bytes read. Otherwise it returns + * the one of the negative error code: + * + * NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM + * Could not decode Connection ID. */ -static ngtcp2_ssize decode_cid_param(ngtcp2_cid *pdest, const uint8_t *p, - const uint8_t *end) { - const uint8_t *begin = p; +static int decode_cid_param(ngtcp2_cid *pdest, const uint8_t **pp, + const uint8_t *end) { + const uint8_t *p = *pp; uint64_t valuelen; - ngtcp2_ssize nread = decode_varint(&valuelen, p, end); - if (nread < 0) { + if (decode_varint(&valuelen, &p, end) != 0) { return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; } - p += nread; - if ((valuelen != 0 && valuelen < NGTCP2_MIN_CIDLEN) || valuelen > NGTCP2_MAX_CIDLEN || (size_t)(end - p) < valuelen) { return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; @@ -513,28 +511,35 @@ static ngtcp2_ssize decode_cid_param(ngtcp2_cid *pdest, const uint8_t *p, p += valuelen; - return (ngtcp2_ssize)(p - begin); + *pp = p; + + return 0; } -int ngtcp2_decode_transport_params_versioned( - int transport_params_version, ngtcp2_transport_params *params, - ngtcp2_transport_params_type exttype, const uint8_t *data, size_t datalen) { - const uint8_t *p, *end; +int ngtcp2_transport_params_decode_versioned(int transport_params_version, + ngtcp2_transport_params *dest, + const uint8_t *data, + size_t datalen) { + const uint8_t *p, *end, *lend; size_t len; uint64_t param_type; uint64_t valuelen; - ngtcp2_ssize nread; - int initial_scid_present = 0; - int original_dcid_present = 0; - size_t i; - (void)transport_params_version; + int rv; + ngtcp2_sockaddr_in *sa_in; + ngtcp2_sockaddr_in6 *sa_in6; + uint32_t version; + ngtcp2_transport_params *params, paramsbuf; - if (datalen == 0) { - return NGTCP2_ERR_REQUIRED_TRANSPORT_PARAM; + if (transport_params_version == NGTCP2_TRANSPORT_PARAMS_VERSION) { + params = dest; + } else { + params = ¶msbuf; } /* Set default values */ memset(params, 0, sizeof(*params)); + params->original_dcid_present = 0; + params->initial_scid_present = 0; params->initial_max_streams_bidi = 0; params->initial_max_streams_uni = 0; params->initial_max_stream_data_bidi_local = 0; @@ -543,7 +548,7 @@ int ngtcp2_decode_transport_params_versioned( params->max_udp_payload_size = NGTCP2_DEFAULT_MAX_RECV_UDP_PAYLOAD_SIZE; params->ack_delay_exponent = NGTCP2_DEFAULT_ACK_DELAY_EXPONENT; params->stateless_reset_token_present = 0; - params->preferred_address_present = 0; + params->preferred_addr_present = 0; params->disable_active_migration = 0; params->max_ack_delay = NGTCP2_DEFAULT_MAX_ACK_DELAY; params->max_idle_timeout = 0; @@ -560,87 +565,66 @@ int ngtcp2_decode_transport_params_versioned( end = data + datalen; for (; (size_t)(end - p) >= 2;) { - nread = decode_varint(¶m_type, p, end); - if (nread < 0) { + if (decode_varint(¶m_type, &p, end) != 0) { return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; } - p += nread; switch (param_type) { case NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_STREAM_DATA_BIDI_LOCAL: - nread = decode_varint_param(¶ms->initial_max_stream_data_bidi_local, - p, end); - if (nread < 0) { + if (decode_varint_param(¶ms->initial_max_stream_data_bidi_local, &p, + end) != 0) { return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; } - p += nread; break; case NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_STREAM_DATA_BIDI_REMOTE: - nread = decode_varint_param(¶ms->initial_max_stream_data_bidi_remote, - p, end); - if (nread < 0) { + if (decode_varint_param(¶ms->initial_max_stream_data_bidi_remote, &p, + end) != 0) { return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; } - p += nread; break; case NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_STREAM_DATA_UNI: - nread = decode_varint_param(¶ms->initial_max_stream_data_uni, p, end); - if (nread < 0) { + if (decode_varint_param(¶ms->initial_max_stream_data_uni, &p, end) != + 0) { return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; } - p += nread; break; case NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_DATA: - nread = decode_varint_param(¶ms->initial_max_data, p, end); - if (nread < 0) { + if (decode_varint_param(¶ms->initial_max_data, &p, end) != 0) { return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; } - p += nread; break; case NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_STREAMS_BIDI: - nread = decode_varint_param(¶ms->initial_max_streams_bidi, p, end); - if (nread < 0) { + if (decode_varint_param(¶ms->initial_max_streams_bidi, &p, end) != + 0) { return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; } if (params->initial_max_streams_bidi > NGTCP2_MAX_STREAMS) { return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; } - p += nread; break; case NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_STREAMS_UNI: - nread = decode_varint_param(¶ms->initial_max_streams_uni, p, end); - if (nread < 0) { + if (decode_varint_param(¶ms->initial_max_streams_uni, &p, end) != 0) { return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; } if (params->initial_max_streams_uni > NGTCP2_MAX_STREAMS) { return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; } - p += nread; break; case NGTCP2_TRANSPORT_PARAM_MAX_IDLE_TIMEOUT: - nread = decode_varint_param(¶ms->max_idle_timeout, p, end); - if (nread < 0) { + if (decode_varint_param(¶ms->max_idle_timeout, &p, end) != 0) { return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; } params->max_idle_timeout *= NGTCP2_MILLISECONDS; - p += nread; break; case NGTCP2_TRANSPORT_PARAM_MAX_UDP_PAYLOAD_SIZE: - nread = decode_varint_param(¶ms->max_udp_payload_size, p, end); - if (nread < 0) { + if (decode_varint_param(¶ms->max_udp_payload_size, &p, end) != 0) { return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; } - p += nread; break; case NGTCP2_TRANSPORT_PARAM_STATELESS_RESET_TOKEN: - if (exttype != NGTCP2_TRANSPORT_PARAMS_TYPE_ENCRYPTED_EXTENSIONS) { + if (decode_varint(&valuelen, &p, end) != 0) { return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; } - nread = decode_varint(&valuelen, p, end); - if (nread < 0) { - return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; - } - p += nread; if ((size_t)valuelen != sizeof(params->stateless_reset_token)) { return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; } @@ -648,28 +632,23 @@ int ngtcp2_decode_transport_params_versioned( return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; } - memcpy(params->stateless_reset_token, p, - sizeof(params->stateless_reset_token)); + p = ngtcp2_get_bytes(params->stateless_reset_token, p, + sizeof(params->stateless_reset_token)); params->stateless_reset_token_present = 1; - p += sizeof(params->stateless_reset_token); break; case NGTCP2_TRANSPORT_PARAM_ACK_DELAY_EXPONENT: - nread = decode_varint_param(¶ms->ack_delay_exponent, p, end); - if (nread < 0 || params->ack_delay_exponent > 20) { + if (decode_varint_param(¶ms->ack_delay_exponent, &p, end) != 0) { return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; } - p += nread; - break; - case NGTCP2_TRANSPORT_PARAM_PREFERRED_ADDRESS: - if (exttype != NGTCP2_TRANSPORT_PARAMS_TYPE_ENCRYPTED_EXTENSIONS) { + if (params->ack_delay_exponent > 20) { return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; } - nread = decode_varint(&valuelen, p, end); - if (nread < 0) { + break; + case NGTCP2_TRANSPORT_PARAM_PREFERRED_ADDRESS: + if (decode_varint(&valuelen, &p, end) != 0) { return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; } - p += nread; if ((size_t)(end - p) < valuelen) { return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; } @@ -680,143 +659,128 @@ int ngtcp2_decode_transport_params_versioned( return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; } - memcpy(params->preferred_address.ipv4_addr, p, - sizeof(params->preferred_address.ipv4_addr)); - p += sizeof(params->preferred_address.ipv4_addr); - params->preferred_address.ipv4_port = ngtcp2_get_uint16(p); - p += sizeof(uint16_t); + sa_in = ¶ms->preferred_addr.ipv4; - if (params->preferred_address.ipv4_port || - memcmp(empty_address, params->preferred_address.ipv4_addr, - sizeof(params->preferred_address.ipv4_addr)) != 0) { - params->preferred_address.ipv4_present = 1; + p = ngtcp2_get_bytes(&sa_in->sin_addr, p, sizeof(sa_in->sin_addr)); + p = ngtcp2_get_uint16be(&sa_in->sin_port, p); + + if (sa_in->sin_port || memcmp(empty_address, &sa_in->sin_addr, + sizeof(sa_in->sin_addr)) != 0) { + sa_in->sin_family = NGTCP2_AF_INET; + params->preferred_addr.ipv4_present = 1; } - memcpy(params->preferred_address.ipv6_addr, p, - sizeof(params->preferred_address.ipv6_addr)); - p += sizeof(params->preferred_address.ipv6_addr); - params->preferred_address.ipv6_port = ngtcp2_get_uint16(p); - p += sizeof(uint16_t); + sa_in6 = ¶ms->preferred_addr.ipv6; + + p = ngtcp2_get_bytes(&sa_in6->sin6_addr, p, sizeof(sa_in6->sin6_addr)); + p = ngtcp2_get_uint16be(&sa_in6->sin6_port, p); - if (params->preferred_address.ipv6_port || - memcmp(empty_address, params->preferred_address.ipv6_addr, - sizeof(params->preferred_address.ipv6_addr)) != 0) { - params->preferred_address.ipv6_present = 1; + if (sa_in6->sin6_port || memcmp(empty_address, &sa_in6->sin6_addr, + sizeof(sa_in6->sin6_addr)) != 0) { + sa_in6->sin6_family = NGTCP2_AF_INET6; + params->preferred_addr.ipv6_present = 1; } /* cid */ - params->preferred_address.cid.datalen = *p++; - len += params->preferred_address.cid.datalen; + params->preferred_addr.cid.datalen = *p++; + len += params->preferred_addr.cid.datalen; if (valuelen != len || - params->preferred_address.cid.datalen > NGTCP2_MAX_CIDLEN || - params->preferred_address.cid.datalen < NGTCP2_MIN_CIDLEN) { + params->preferred_addr.cid.datalen > NGTCP2_MAX_CIDLEN || + params->preferred_addr.cid.datalen < NGTCP2_MIN_CIDLEN) { return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; } - if (params->preferred_address.cid.datalen) { - memcpy(params->preferred_address.cid.data, p, - params->preferred_address.cid.datalen); - p += params->preferred_address.cid.datalen; + if (params->preferred_addr.cid.datalen) { + p = ngtcp2_get_bytes(params->preferred_addr.cid.data, p, + params->preferred_addr.cid.datalen); } /* stateless reset token */ - memcpy(params->preferred_address.stateless_reset_token, p, - sizeof(params->preferred_address.stateless_reset_token)); - p += sizeof(params->preferred_address.stateless_reset_token); - params->preferred_address_present = 1; + p = ngtcp2_get_bytes( + params->preferred_addr.stateless_reset_token, p, + sizeof(params->preferred_addr.stateless_reset_token)); + params->preferred_addr_present = 1; break; case NGTCP2_TRANSPORT_PARAM_DISABLE_ACTIVE_MIGRATION: - nread = decode_varint(&valuelen, p, end); - if (nread < 0 || valuelen != 0) { + if (decode_varint(&valuelen, &p, end) != 0) { + return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; + } + if (valuelen != 0) { return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; } - p += nread; params->disable_active_migration = 1; break; case NGTCP2_TRANSPORT_PARAM_ORIGINAL_DESTINATION_CONNECTION_ID: - if (exttype != NGTCP2_TRANSPORT_PARAMS_TYPE_ENCRYPTED_EXTENSIONS) { - return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; - } - nread = decode_cid_param(¶ms->original_dcid, p, end); - if (nread < 0) { - return (int)nread; + rv = decode_cid_param(¶ms->original_dcid, &p, end); + if (rv != 0) { + return rv; } - original_dcid_present = 1; - p += nread; + params->original_dcid_present = 1; break; case NGTCP2_TRANSPORT_PARAM_RETRY_SOURCE_CONNECTION_ID: - if (exttype != NGTCP2_TRANSPORT_PARAMS_TYPE_ENCRYPTED_EXTENSIONS) { - return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; - } - nread = decode_cid_param(¶ms->retry_scid, p, end); - if (nread < 0) { - return (int)nread; + rv = decode_cid_param(¶ms->retry_scid, &p, end); + if (rv != 0) { + return rv; } params->retry_scid_present = 1; - p += nread; break; case NGTCP2_TRANSPORT_PARAM_INITIAL_SOURCE_CONNECTION_ID: - nread = decode_cid_param(¶ms->initial_scid, p, end); - if (nread < 0) { - return (int)nread; + rv = decode_cid_param(¶ms->initial_scid, &p, end); + if (rv != 0) { + return rv; } - initial_scid_present = 1; - p += nread; + params->initial_scid_present = 1; break; case NGTCP2_TRANSPORT_PARAM_MAX_ACK_DELAY: - nread = decode_varint_param(¶ms->max_ack_delay, p, end); - if (nread < 0 || params->max_ack_delay >= 16384) { + if (decode_varint_param(¶ms->max_ack_delay, &p, end) != 0) { + return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; + } + if (params->max_ack_delay >= 16384) { return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; } params->max_ack_delay *= NGTCP2_MILLISECONDS; - p += nread; break; case NGTCP2_TRANSPORT_PARAM_ACTIVE_CONNECTION_ID_LIMIT: - nread = decode_varint_param(¶ms->active_connection_id_limit, p, end); - if (nread < 0) { + if (decode_varint_param(¶ms->active_connection_id_limit, &p, end) != + 0) { return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; } - p += nread; break; case NGTCP2_TRANSPORT_PARAM_MAX_DATAGRAM_FRAME_SIZE: - nread = decode_varint_param(¶ms->max_datagram_frame_size, p, end); - if (nread < 0) { + if (decode_varint_param(¶ms->max_datagram_frame_size, &p, end) != 0) { return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; } - p += nread; break; case NGTCP2_TRANSPORT_PARAM_GREASE_QUIC_BIT: - nread = decode_varint(&valuelen, p, end); - if (nread < 0 || valuelen != 0) { + if (decode_varint(&valuelen, &p, end) != 0) { + return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; + } + if (valuelen != 0) { return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; } - p += nread; params->grease_quic_bit = 1; break; - case NGTCP2_TRANSPORT_PARAM_VERSION_INFORMATION_DRAFT: - nread = decode_varint(&valuelen, p, end); - if (nread < 0) { + case NGTCP2_TRANSPORT_PARAM_VERSION_INFORMATION: + if (decode_varint(&valuelen, &p, end) != 0) { return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; } - p += nread; if ((size_t)(end - p) < valuelen) { return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; } if (valuelen < sizeof(uint32_t) || (valuelen & 0x3)) { return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; } - params->version_info.chosen_version = ngtcp2_get_uint32(p); + p = ngtcp2_get_uint32(¶ms->version_info.chosen_version, p); if (params->version_info.chosen_version == 0) { return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; } - p += sizeof(uint32_t); if (valuelen > sizeof(uint32_t)) { - params->version_info.other_versions = (uint8_t *)p; - params->version_info.other_versionslen = + params->version_info.available_versions = (uint8_t *)p; + params->version_info.available_versionslen = (size_t)valuelen - sizeof(uint32_t); - for (i = sizeof(uint32_t); i < valuelen; - i += sizeof(uint32_t), p += sizeof(uint32_t)) { - if (ngtcp2_get_uint32(p) == 0) { + for (lend = p + (valuelen - sizeof(uint32_t)); p != lend;) { + p = ngtcp2_get_uint32(&version, p); + if (version == 0) { return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; } } @@ -825,11 +789,9 @@ int ngtcp2_decode_transport_params_versioned( break; default: /* Ignore unknown parameter */ - nread = decode_varint(&valuelen, p, end); - if (nread < 0) { + if (decode_varint(&valuelen, &p, end) != 0) { return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; } - p += nread; if ((size_t)(end - p) < valuelen) { return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; } @@ -842,10 +804,9 @@ int ngtcp2_decode_transport_params_versioned( return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; } - if (!initial_scid_present || - (exttype == NGTCP2_TRANSPORT_PARAMS_TYPE_ENCRYPTED_EXTENSIONS && - !original_dcid_present)) { - return NGTCP2_ERR_REQUIRED_TRANSPORT_PARAM; + if (transport_params_version != NGTCP2_TRANSPORT_PARAMS_VERSION) { + ngtcp2_transport_params_convert_to_old(transport_params_version, dest, + params); } return 0; @@ -859,7 +820,7 @@ static int transport_params_copy_new(ngtcp2_transport_params **pdest, uint8_t *p; if (src->version_info_present) { - len += src->version_info.other_versionslen; + len += src->version_info.available_versionslen; } dest = ngtcp2_mem_malloc(mem, len); @@ -869,11 +830,11 @@ static int transport_params_copy_new(ngtcp2_transport_params **pdest, *dest = *src; - if (src->version_info_present && src->version_info.other_versionslen) { + if (src->version_info_present && src->version_info.available_versionslen) { p = (uint8_t *)dest + sizeof(*dest); - memcpy(p, src->version_info.other_versions, - src->version_info.other_versionslen); - dest->version_info.other_versions = p; + memcpy(p, src->version_info.available_versions, + src->version_info.available_versionslen); + dest->version_info.available_versions = p; } *pdest = dest; @@ -881,14 +842,13 @@ static int transport_params_copy_new(ngtcp2_transport_params **pdest, return 0; } -int ngtcp2_decode_transport_params_new(ngtcp2_transport_params **pparams, - ngtcp2_transport_params_type exttype, +int ngtcp2_transport_params_decode_new(ngtcp2_transport_params **pparams, const uint8_t *data, size_t datalen, const ngtcp2_mem *mem) { int rv; ngtcp2_transport_params params; - rv = ngtcp2_decode_transport_params(¶ms, exttype, data, datalen); + rv = ngtcp2_transport_params_decode(¶ms, data, datalen); if (rv < 0) { return rv; } diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_crypto.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_crypto.h index 9a9d95f5b9fe82..b78429bb38f582 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_crypto.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_crypto.h @@ -43,30 +43,30 @@ /* ngtcp2_transport_param_id is the registry of QUIC transport parameter ID. */ -typedef enum ngtcp2_transport_param_id { - NGTCP2_TRANSPORT_PARAM_ORIGINAL_DESTINATION_CONNECTION_ID = 0x0000, - NGTCP2_TRANSPORT_PARAM_MAX_IDLE_TIMEOUT = 0x0001, - NGTCP2_TRANSPORT_PARAM_STATELESS_RESET_TOKEN = 0x0002, - NGTCP2_TRANSPORT_PARAM_MAX_UDP_PAYLOAD_SIZE = 0x0003, - NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_DATA = 0x0004, - NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_STREAM_DATA_BIDI_LOCAL = 0x0005, - NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_STREAM_DATA_BIDI_REMOTE = 0x0006, - NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_STREAM_DATA_UNI = 0x0007, - NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_STREAMS_BIDI = 0x0008, - NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_STREAMS_UNI = 0x0009, - NGTCP2_TRANSPORT_PARAM_ACK_DELAY_EXPONENT = 0x000a, - NGTCP2_TRANSPORT_PARAM_MAX_ACK_DELAY = 0x000b, - NGTCP2_TRANSPORT_PARAM_DISABLE_ACTIVE_MIGRATION = 0x000c, - NGTCP2_TRANSPORT_PARAM_PREFERRED_ADDRESS = 0x000d, - NGTCP2_TRANSPORT_PARAM_ACTIVE_CONNECTION_ID_LIMIT = 0x000e, - NGTCP2_TRANSPORT_PARAM_INITIAL_SOURCE_CONNECTION_ID = 0x000f, - NGTCP2_TRANSPORT_PARAM_RETRY_SOURCE_CONNECTION_ID = 0x0010, - /* https://datatracker.ietf.org/doc/html/rfc9221 */ - NGTCP2_TRANSPORT_PARAM_MAX_DATAGRAM_FRAME_SIZE = 0x0020, - NGTCP2_TRANSPORT_PARAM_GREASE_QUIC_BIT = 0x2ab2, - /* https://quicwg.org/quic-v2/draft-ietf-quic-v2.html */ - NGTCP2_TRANSPORT_PARAM_VERSION_INFORMATION_DRAFT = 0xff73db, -} ngtcp2_transport_param_id; +typedef uint64_t ngtcp2_transport_param_id; + +#define NGTCP2_TRANSPORT_PARAM_ORIGINAL_DESTINATION_CONNECTION_ID 0x00 +#define NGTCP2_TRANSPORT_PARAM_MAX_IDLE_TIMEOUT 0x01 +#define NGTCP2_TRANSPORT_PARAM_STATELESS_RESET_TOKEN 0x02 +#define NGTCP2_TRANSPORT_PARAM_MAX_UDP_PAYLOAD_SIZE 0x03 +#define NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_DATA 0x04 +#define NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_STREAM_DATA_BIDI_LOCAL 0x05 +#define NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_STREAM_DATA_BIDI_REMOTE 0x06 +#define NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_STREAM_DATA_UNI 0x07 +#define NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_STREAMS_BIDI 0x08 +#define NGTCP2_TRANSPORT_PARAM_INITIAL_MAX_STREAMS_UNI 0x09 +#define NGTCP2_TRANSPORT_PARAM_ACK_DELAY_EXPONENT 0x0a +#define NGTCP2_TRANSPORT_PARAM_MAX_ACK_DELAY 0x0b +#define NGTCP2_TRANSPORT_PARAM_DISABLE_ACTIVE_MIGRATION 0x0c +#define NGTCP2_TRANSPORT_PARAM_PREFERRED_ADDRESS 0x0d +#define NGTCP2_TRANSPORT_PARAM_ACTIVE_CONNECTION_ID_LIMIT 0x0e +#define NGTCP2_TRANSPORT_PARAM_INITIAL_SOURCE_CONNECTION_ID 0x0f +#define NGTCP2_TRANSPORT_PARAM_RETRY_SOURCE_CONNECTION_ID 0x10 +/* https://datatracker.ietf.org/doc/html/rfc9221 */ +#define NGTCP2_TRANSPORT_PARAM_MAX_DATAGRAM_FRAME_SIZE 0x20 +#define NGTCP2_TRANSPORT_PARAM_GREASE_QUIC_BIT 0x2ab2 +/* https://datatracker.ietf.org/doc/html/rfc9368 */ +#define NGTCP2_TRANSPORT_PARAM_VERSION_INFORMATION 0x11 /* NGTCP2_CRYPTO_KM_FLAG_NONE indicates that no flag is set. */ #define NGTCP2_CRYPTO_KM_FLAG_NONE 0x00u diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_err.c b/deps/ngtcp2/ngtcp2/lib/ngtcp2_err.c index 8f676da3ef0a13..5e4794cd72e7d4 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_err.c +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_err.c @@ -137,6 +137,9 @@ uint64_t ngtcp2_err_infer_quic_transport_error_code(int liberr) { case NGTCP2_ERR_INVALID_ARGUMENT: case NGTCP2_ERR_NOMEM: case NGTCP2_ERR_CALLBACK_FAILURE: + case NGTCP2_ERR_HANDSHAKE_TIMEOUT: + case NGTCP2_ERR_PKT_NUM_EXHAUSTED: + case NGTCP2_ERR_INTERNAL: return NGTCP2_INTERNAL_ERROR; case NGTCP2_ERR_STREAM_STATE: return NGTCP2_STREAM_STATE_ERROR; @@ -147,7 +150,7 @@ uint64_t ngtcp2_err_infer_quic_transport_error_code(int liberr) { case NGTCP2_ERR_NO_VIABLE_PATH: return NGTCP2_NO_VIABLE_PATH; case NGTCP2_ERR_VERSION_NEGOTIATION_FAILURE: - return NGTCP2_VERSION_NEGOTIATION_ERROR_DRAFT; + return NGTCP2_VERSION_NEGOTIATION_ERROR; default: return NGTCP2_PROTOCOL_VIOLATION; } diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_frame_chain.c b/deps/ngtcp2/ngtcp2/lib/ngtcp2_frame_chain.c new file mode 100644 index 00000000000000..41c2a6a755cc8a --- /dev/null +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_frame_chain.c @@ -0,0 +1,220 @@ +/* + * ngtcp2 + * + * Copyright (c) 2023 ngtcp2 contributors + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#include "ngtcp2_frame_chain.h" + +#include +#include + +ngtcp2_objalloc_def(frame_chain, ngtcp2_frame_chain, oplent); + +int ngtcp2_frame_chain_new(ngtcp2_frame_chain **pfrc, const ngtcp2_mem *mem) { + *pfrc = ngtcp2_mem_malloc(mem, sizeof(ngtcp2_frame_chain)); + if (*pfrc == NULL) { + return NGTCP2_ERR_NOMEM; + } + + ngtcp2_frame_chain_init(*pfrc); + + return 0; +} + +int ngtcp2_frame_chain_objalloc_new(ngtcp2_frame_chain **pfrc, + ngtcp2_objalloc *objalloc) { + *pfrc = ngtcp2_objalloc_frame_chain_get(objalloc); + if (*pfrc == NULL) { + return NGTCP2_ERR_NOMEM; + } + + ngtcp2_frame_chain_init(*pfrc); + + return 0; +} + +int ngtcp2_frame_chain_extralen_new(ngtcp2_frame_chain **pfrc, size_t extralen, + const ngtcp2_mem *mem) { + *pfrc = ngtcp2_mem_malloc(mem, sizeof(ngtcp2_frame_chain) + extralen); + if (*pfrc == NULL) { + return NGTCP2_ERR_NOMEM; + } + + ngtcp2_frame_chain_init(*pfrc); + + return 0; +} + +int ngtcp2_frame_chain_stream_datacnt_objalloc_new(ngtcp2_frame_chain **pfrc, + size_t datacnt, + ngtcp2_objalloc *objalloc, + const ngtcp2_mem *mem) { + size_t need, avail = sizeof(ngtcp2_frame) - sizeof(ngtcp2_stream); + + if (datacnt > 1) { + need = sizeof(ngtcp2_vec) * (datacnt - 1); + + if (need > avail) { + return ngtcp2_frame_chain_extralen_new(pfrc, need - avail, mem); + } + } + + return ngtcp2_frame_chain_objalloc_new(pfrc, objalloc); +} + +int ngtcp2_frame_chain_new_token_objalloc_new(ngtcp2_frame_chain **pfrc, + const uint8_t *token, + size_t tokenlen, + ngtcp2_objalloc *objalloc, + const ngtcp2_mem *mem) { + size_t avail = sizeof(ngtcp2_frame) - sizeof(ngtcp2_new_token); + int rv; + uint8_t *p; + ngtcp2_frame *fr; + + if (tokenlen > avail) { + rv = ngtcp2_frame_chain_extralen_new(pfrc, tokenlen - avail, mem); + } else { + rv = ngtcp2_frame_chain_objalloc_new(pfrc, objalloc); + } + if (rv != 0) { + return rv; + } + + fr = &(*pfrc)->fr; + fr->type = NGTCP2_FRAME_NEW_TOKEN; + + p = (uint8_t *)fr + sizeof(ngtcp2_new_token); + memcpy(p, token, tokenlen); + + fr->new_token.token = p; + fr->new_token.tokenlen = tokenlen; + + return 0; +} + +void ngtcp2_frame_chain_del(ngtcp2_frame_chain *frc, const ngtcp2_mem *mem) { + ngtcp2_frame_chain_binder *binder; + + if (frc == NULL) { + return; + } + + binder = frc->binder; + if (binder && --binder->refcount == 0) { + ngtcp2_mem_free(mem, binder); + } + + ngtcp2_mem_free(mem, frc); +} + +void ngtcp2_frame_chain_objalloc_del(ngtcp2_frame_chain *frc, + ngtcp2_objalloc *objalloc, + const ngtcp2_mem *mem) { + ngtcp2_frame_chain_binder *binder; + + if (frc == NULL) { + return; + } + + switch (frc->fr.type) { + case NGTCP2_FRAME_CRYPTO: + case NGTCP2_FRAME_STREAM: + if (frc->fr.stream.datacnt && + sizeof(ngtcp2_vec) * (frc->fr.stream.datacnt - 1) > + sizeof(ngtcp2_frame) - sizeof(ngtcp2_stream)) { + ngtcp2_frame_chain_del(frc, mem); + + return; + } + + break; + case NGTCP2_FRAME_NEW_TOKEN: + if (frc->fr.new_token.tokenlen > + sizeof(ngtcp2_frame) - sizeof(ngtcp2_new_token)) { + ngtcp2_frame_chain_del(frc, mem); + + return; + } + + break; + } + + binder = frc->binder; + if (binder && --binder->refcount == 0) { + ngtcp2_mem_free(mem, binder); + } + + frc->binder = NULL; + + ngtcp2_objalloc_frame_chain_release(objalloc, frc); +} + +void ngtcp2_frame_chain_init(ngtcp2_frame_chain *frc) { + frc->next = NULL; + frc->binder = NULL; +} + +void ngtcp2_frame_chain_list_objalloc_del(ngtcp2_frame_chain *frc, + ngtcp2_objalloc *objalloc, + const ngtcp2_mem *mem) { + ngtcp2_frame_chain *next; + + for (; frc; frc = next) { + next = frc->next; + + ngtcp2_frame_chain_objalloc_del(frc, objalloc, mem); + } +} + +int ngtcp2_frame_chain_binder_new(ngtcp2_frame_chain_binder **pbinder, + const ngtcp2_mem *mem) { + *pbinder = ngtcp2_mem_calloc(mem, 1, sizeof(ngtcp2_frame_chain_binder)); + if (*pbinder == NULL) { + return NGTCP2_ERR_NOMEM; + } + + return 0; +} + +int ngtcp2_bind_frame_chains(ngtcp2_frame_chain *a, ngtcp2_frame_chain *b, + const ngtcp2_mem *mem) { + ngtcp2_frame_chain_binder *binder; + int rv; + + assert(b->binder == NULL); + + if (a->binder == NULL) { + rv = ngtcp2_frame_chain_binder_new(&binder, mem); + if (rv != 0) { + return rv; + } + + a->binder = binder; + ++a->binder->refcount; + } + + b->binder = a->binder; + ++b->binder->refcount; + + return 0; +} diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_frame_chain.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_frame_chain.h new file mode 100644 index 00000000000000..656fa5b799450e --- /dev/null +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_frame_chain.h @@ -0,0 +1,171 @@ +/* + * ngtcp2 + * + * Copyright (c) 2023 ngtcp2 contributors + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef NGTCP2_FRAME_CHAIN_H +#define NGTCP2_FRAME_CHAIN_H + +#ifdef HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include + +#include "ngtcp2_pkt.h" +#include "ngtcp2_objalloc.h" + +/* NGTCP2_FRAME_CHAIN_BINDER_FLAG_NONE indicates that no flag is + set. */ +#define NGTCP2_FRAME_CHAIN_BINDER_FLAG_NONE 0x00u +/* NGTCP2_FRAME_CHAIN_BINDER_FLAG_ACK indicates that an information + which a frame carries has been acknowledged. */ +#define NGTCP2_FRAME_CHAIN_BINDER_FLAG_ACK 0x01u + +/* + * ngtcp2_frame_chain_binder binds 2 or more of ngtcp2_frame_chain to + * share the acknowledgement state. In general, all + * ngtcp2_frame_chains bound to the same binder must have the same + * information. + */ +typedef struct ngtcp2_frame_chain_binder { + size_t refcount; + /* flags is bitwise OR of zero or more of + NGTCP2_FRAME_CHAIN_BINDER_FLAG_*. */ + uint32_t flags; +} ngtcp2_frame_chain_binder; + +int ngtcp2_frame_chain_binder_new(ngtcp2_frame_chain_binder **pbinder, + const ngtcp2_mem *mem); + +typedef struct ngtcp2_frame_chain ngtcp2_frame_chain; + +/* + * ngtcp2_frame_chain chains frames in a single packet. + */ +struct ngtcp2_frame_chain { + union { + struct { + ngtcp2_frame_chain *next; + ngtcp2_frame_chain_binder *binder; + ngtcp2_frame fr; + }; + + ngtcp2_opl_entry oplent; + }; +}; + +ngtcp2_objalloc_decl(frame_chain, ngtcp2_frame_chain, oplent); + +/* + * ngtcp2_bind_frame_chains binds two frame chains |a| and |b| using + * new or existing ngtcp2_frame_chain_binder. |a| might have non-NULL + * a->binder. |b| must not have non-NULL b->binder. + * + * This function returns 0 if it succeeds, or one of the following + * negative error codes: + * + * NGTCP2_ERR_NOMEM + * Out of memory + */ +int ngtcp2_bind_frame_chains(ngtcp2_frame_chain *a, ngtcp2_frame_chain *b, + const ngtcp2_mem *mem); + +/* NGTCP2_MAX_STREAM_DATACNT is the maximum number of ngtcp2_vec that + a ngtcp2_stream can include. */ +#define NGTCP2_MAX_STREAM_DATACNT 256 + +/* + * ngtcp2_frame_chain_new allocates ngtcp2_frame_chain object and + * assigns its pointer to |*pfrc|. + * + * This function returns 0 if it succeeds, or one of the following + * negative error codes: + * + * NGTCP2_ERR_NOMEM + * Out of memory. + */ +int ngtcp2_frame_chain_new(ngtcp2_frame_chain **pfrc, const ngtcp2_mem *mem); + +/* + * ngtcp2_frame_chain_objalloc_new behaves like + * ngtcp2_frame_chain_new, but it uses |objalloc| to allocate the object. + */ +int ngtcp2_frame_chain_objalloc_new(ngtcp2_frame_chain **pfrc, + ngtcp2_objalloc *objalloc); + +/* + * ngtcp2_frame_chain_extralen_new works like ngtcp2_frame_chain_new, + * but it allocates extra memory |extralen| in order to extend + * ngtcp2_frame. + */ +int ngtcp2_frame_chain_extralen_new(ngtcp2_frame_chain **pfrc, size_t extralen, + const ngtcp2_mem *mem); + +/* + * ngtcp2_frame_chain_stream_datacnt_objalloc_new works like + * ngtcp2_frame_chain_new, but it allocates enough data to store + * additional |datacnt| - 1 ngtcp2_vec object after ngtcp2_stream + * object. If no additional space is required, + * ngtcp2_frame_chain_objalloc_new is called internally. + */ +int ngtcp2_frame_chain_stream_datacnt_objalloc_new(ngtcp2_frame_chain **pfrc, + size_t datacnt, + ngtcp2_objalloc *objalloc, + const ngtcp2_mem *mem); + +int ngtcp2_frame_chain_new_token_objalloc_new(ngtcp2_frame_chain **pfrc, + const uint8_t *token, + size_t tokenlen, + ngtcp2_objalloc *objalloc, + const ngtcp2_mem *mem); + +/* + * ngtcp2_frame_chain_del deallocates |frc|. It also deallocates the + * memory pointed by |frc|. + */ +void ngtcp2_frame_chain_del(ngtcp2_frame_chain *frc, const ngtcp2_mem *mem); + +/* + * ngtcp2_frame_chain_objalloc_del adds |frc| to |objalloc| for reuse. + * It might just delete |frc| depending on the frame type and the size + * of |frc|. + */ +void ngtcp2_frame_chain_objalloc_del(ngtcp2_frame_chain *frc, + ngtcp2_objalloc *objalloc, + const ngtcp2_mem *mem); + +/* + * ngtcp2_frame_chain_init initializes |frc|. + */ +void ngtcp2_frame_chain_init(ngtcp2_frame_chain *frc); + +/* + * ngtcp2_frame_chain_list_objalloc_del adds all ngtcp2_frame_chain + * linked from |frc| to |objalloc| for reuse. Depending on the frame type + * and its size, ngtcp2_frame_chain might be deleted instead. + */ +void ngtcp2_frame_chain_list_objalloc_del(ngtcp2_frame_chain *frc, + ngtcp2_objalloc *objalloc, + const ngtcp2_mem *mem); + +#endif /* NGTCP2_FRAME_CHAIN_H */ diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_ksl.c b/deps/ngtcp2/ngtcp2/lib/ngtcp2_ksl.c index 0bd424cb0bc1f1..0ccc048b5b16b1 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_ksl.c +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_ksl.c @@ -35,6 +35,8 @@ static ngtcp2_ksl_blk null_blk = {{{NULL, NULL, 0, 0, {0}}}}; +ngtcp2_objalloc_def(ksl_blk, ngtcp2_ksl_blk, oplent); + static size_t ksl_nodelen(size_t keylen) { return (sizeof(ngtcp2_ksl_node) + keylen - sizeof(uint64_t) + 0xfu) & ~(uintptr_t)0xfu; @@ -714,6 +716,24 @@ void ngtcp2_ksl_update_key(ngtcp2_ksl *ksl, const ngtcp2_ksl_key *old_key, } } +size_t ngtcp2_ksl_len(ngtcp2_ksl *ksl) { return ksl->n; } + +void ngtcp2_ksl_clear(ngtcp2_ksl *ksl) { + if (!ksl->head) { + return; + } + +#ifdef NOMEMPOOL + ksl_free_blk(ksl, ksl->head); +#endif /* NOMEMPOOL */ + + ksl->front = ksl->back = ksl->head = NULL; + ksl->n = 0; + + ngtcp2_objalloc_clear(&ksl->blkalloc); +} + +#ifndef WIN32 static void ksl_print(ngtcp2_ksl *ksl, ngtcp2_ksl_blk *blk, size_t level) { size_t i; ngtcp2_ksl_node *node; @@ -734,23 +754,6 @@ static void ksl_print(ngtcp2_ksl *ksl, ngtcp2_ksl_blk *blk, size_t level) { } } -size_t ngtcp2_ksl_len(ngtcp2_ksl *ksl) { return ksl->n; } - -void ngtcp2_ksl_clear(ngtcp2_ksl *ksl) { - if (!ksl->head) { - return; - } - -#ifdef NOMEMPOOL - ksl_free_blk(ksl, ksl->head); -#endif /* NOMEMPOOL */ - - ksl->front = ksl->back = ksl->head = NULL; - ksl->n = 0; - - ngtcp2_objalloc_clear(&ksl->blkalloc); -} - void ngtcp2_ksl_print(ngtcp2_ksl *ksl) { if (!ksl->head) { return; @@ -758,6 +761,7 @@ void ngtcp2_ksl_print(ngtcp2_ksl *ksl) { ksl_print(ksl, ksl->head, 0); } +#endif /* !WIN32 */ ngtcp2_ksl_it ngtcp2_ksl_begin(const ngtcp2_ksl *ksl) { ngtcp2_ksl_it it; diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_ksl.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_ksl.h index 312a151d4aa9ec..7e08f15cdae6e8 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_ksl.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_ksl.h @@ -106,7 +106,7 @@ struct ngtcp2_ksl_blk { }; }; -ngtcp2_objalloc_def(ksl_blk, ngtcp2_ksl_blk, oplent); +ngtcp2_objalloc_decl(ksl_blk, ngtcp2_ksl_blk, oplent); /* * ngtcp2_ksl_compar is a function type which returns nonzero if key @@ -263,12 +263,14 @@ void ngtcp2_ksl_clear(ngtcp2_ksl *ksl); #define ngtcp2_ksl_nth_node(KSL, BLK, N) \ ((ngtcp2_ksl_node *)(void *)((BLK)->nodes + (KSL)->nodelen * (N))) +#ifndef WIN32 /* * ngtcp2_ksl_print prints its internal state in stderr. It assumes * that the key is of type int64_t. This function should be used for * the debugging purpose only. */ void ngtcp2_ksl_print(ngtcp2_ksl *ksl); +#endif /* !WIN32 */ /* * ngtcp2_ksl_it_init initializes |it|. diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_log.c b/deps/ngtcp2/ngtcp2/lib/ngtcp2_log.c index ee37ff3517b2bc..760bd60a9aff76 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_log.c +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_log.c @@ -35,6 +35,8 @@ #include "ngtcp2_vec.h" #include "ngtcp2_macro.h" #include "ngtcp2_conv.h" +#include "ngtcp2_unreachable.h" +#include "ngtcp2_net.h" void ngtcp2_log_init(ngtcp2_log *log, const ngtcp2_cid *scid, ngtcp2_printf log_printf, ngtcp2_tstamp ts, @@ -45,6 +47,7 @@ void ngtcp2_log_init(ngtcp2_log *log, const ngtcp2_cid *scid, log->scid[0] = '\0'; } log->log_printf = log_printf; + log->events = 0xff; log->ts = log->last_ts = ts; log->user_data = user_data; } @@ -65,7 +68,7 @@ void ngtcp2_log_init(ngtcp2_log *log, const ngtcp2_cid *scid, * Source Connection ID in hex string. * * : - * Event. pkt=packet, frm=frame, rcv=recovery, cry=crypto, + * Event. pkt=packet, frm=frame, ldc=loss-detection, cry=crypto, * con=connection(catch all) * * # Frame event @@ -138,7 +141,7 @@ static const char *strerrorcode(uint64_t error_code) { return "CRYPTO_BUFFER_EXCEEDED"; case NGTCP2_KEY_UPDATE_ERROR: return "KEY_UPDATE_ERROR"; - case NGTCP2_VERSION_NEGOTIATION_ERROR_DRAFT: + case NGTCP2_VERSION_NEGOTIATION_ERROR: return "VERSION_NEGOTIATION_ERROR"; default: if (0x100u <= error_code && error_code <= 0x1ffu) { @@ -202,12 +205,14 @@ static const char *strevent(ngtcp2_log_event ev) { return "pkt"; case NGTCP2_LOG_EVENT_FRM: return "frm"; - case NGTCP2_LOG_EVENT_RCV: - return "rcv"; + case NGTCP2_LOG_EVENT_LDC: + return "ldc"; case NGTCP2_LOG_EVENT_CRY: return "cry"; case NGTCP2_LOG_EVENT_PTV: return "ptv"; + case NGTCP2_LOG_EVENT_CCA: + return "cca"; case NGTCP2_LOG_EVENT_NONE: default: return "non"; @@ -220,8 +225,8 @@ static void log_fr_stream(ngtcp2_log *log, const ngtcp2_pkt_hd *hd, const ngtcp2_stream *fr, const char *dir) { log->log_printf( log->user_data, - (NGTCP2_LOG_PKT " STREAM(0x%02x) id=0x%" PRIx64 " fin=%d offset=%" PRIu64 - " len=%" PRIu64 " uni=%d"), + (NGTCP2_LOG_PKT " STREAM(0x%02" PRIx64 ") id=0x%" PRIx64 + " fin=%d offset=%" PRIu64 " len=%" PRIu64 " uni=%d"), NGTCP2_LOG_FRM_HD_FIELDS(dir), fr->type | fr->flags, fr->stream_id, fr->fin, fr->offset, ngtcp2_vec_len(fr->data, fr->datacnt), (fr->stream_id & 0x2) != 0); @@ -233,36 +238,37 @@ static void log_fr_ack(ngtcp2_log *log, const ngtcp2_pkt_hd *hd, size_t i; log->log_printf(log->user_data, - (NGTCP2_LOG_PKT " ACK(0x%02x) largest_ack=%" PRId64 + (NGTCP2_LOG_PKT " ACK(0x%02" PRIx64 ") largest_ack=%" PRId64 " ack_delay=%" PRIu64 "(%" PRIu64 - ") ack_block_count=%zu"), + ") ack_range_count=%zu"), NGTCP2_LOG_FRM_HD_FIELDS(dir), fr->type, fr->largest_ack, fr->ack_delay_unscaled / NGTCP2_MILLISECONDS, fr->ack_delay, - fr->num_blks); + fr->rangecnt); largest_ack = fr->largest_ack; - min_ack = fr->largest_ack - (int64_t)fr->first_ack_blklen; + min_ack = fr->largest_ack - (int64_t)fr->first_ack_range; log->log_printf(log->user_data, - (NGTCP2_LOG_PKT " ACK(0x%02x) block=[%" PRId64 "..%" PRId64 - "] block_count=%" PRIu64), + (NGTCP2_LOG_PKT " ACK(0x%02" PRIx64 ") range=[%" PRId64 + "..%" PRId64 "] len=%" PRIu64), NGTCP2_LOG_FRM_HD_FIELDS(dir), fr->type, largest_ack, min_ack, - fr->first_ack_blklen); + fr->first_ack_range); - for (i = 0; i < fr->num_blks; ++i) { - const ngtcp2_ack_blk *blk = &fr->blks[i]; - largest_ack = min_ack - (int64_t)blk->gap - 2; - min_ack = largest_ack - (int64_t)blk->blklen; + for (i = 0; i < fr->rangecnt; ++i) { + const ngtcp2_ack_range *range = &fr->ranges[i]; + largest_ack = min_ack - (int64_t)range->gap - 2; + min_ack = largest_ack - (int64_t)range->len; log->log_printf(log->user_data, - (NGTCP2_LOG_PKT " ACK(0x%02x) block=[%" PRId64 "..%" PRId64 - "] gap=%" PRIu64 " block_count=%" PRIu64), + (NGTCP2_LOG_PKT " ACK(0x%02" PRIx64 ") range=[%" PRId64 + "..%" PRId64 "] gap=%" PRIu64 + " len=%" PRIu64), NGTCP2_LOG_FRM_HD_FIELDS(dir), fr->type, largest_ack, - min_ack, blk->gap, blk->blklen); + min_ack, range->gap, range->len); } if (fr->type == NGTCP2_FRAME_ACK_ECN) { log->log_printf(log->user_data, - (NGTCP2_LOG_PKT " ACK(0x%02x) ect0=%" PRIu64 + (NGTCP2_LOG_PKT " ACK(0x%02" PRIx64 ") ect0=%" PRIu64 " ect1=%" PRIu64 " ce=%" PRIu64), NGTCP2_LOG_FRM_HD_FIELDS(dir), fr->type, fr->ecn.ect0, fr->ecn.ect1, fr->ecn.ce); @@ -271,7 +277,8 @@ static void log_fr_ack(ngtcp2_log *log, const ngtcp2_pkt_hd *hd, static void log_fr_padding(ngtcp2_log *log, const ngtcp2_pkt_hd *hd, const ngtcp2_padding *fr, const char *dir) { - log->log_printf(log->user_data, (NGTCP2_LOG_PKT " PADDING(0x%02x) len=%zu"), + log->log_printf(log->user_data, + (NGTCP2_LOG_PKT " PADDING(0x%02" PRIx64 ") len=%zu"), NGTCP2_LOG_FRM_HD_FIELDS(dir), fr->type, fr->len); } @@ -280,7 +287,7 @@ static void log_fr_reset_stream(ngtcp2_log *log, const ngtcp2_pkt_hd *hd, const char *dir) { log->log_printf( log->user_data, - (NGTCP2_LOG_PKT " RESET_STREAM(0x%02x) id=0x%" PRIx64 + (NGTCP2_LOG_PKT " RESET_STREAM(0x%02" PRIx64 ") id=0x%" PRIx64 " app_error_code=%s(0x%" PRIx64 ") final_size=%" PRIu64), NGTCP2_LOG_FRM_HD_FIELDS(dir), fr->type, fr->stream_id, strapperrorcode(fr->app_error_code), fr->app_error_code, fr->final_size); @@ -293,9 +300,10 @@ static void log_fr_connection_close(ngtcp2_log *log, const ngtcp2_pkt_hd *hd, size_t reasonlen = ngtcp2_min(sizeof(reason) - 1, fr->reasonlen); log->log_printf(log->user_data, - (NGTCP2_LOG_PKT - " CONNECTION_CLOSE(0x%02x) error_code=%s(0x%" PRIx64 ") " - "frame_type=%" PRIx64 " reason_len=%zu reason=[%s]"), + (NGTCP2_LOG_PKT " CONNECTION_CLOSE(0x%02" PRIx64 + ") error_code=%s(0x%" PRIx64 ") " + "frame_type=%" PRIx64 + " reason_len=%zu reason=[%s]"), NGTCP2_LOG_FRM_HD_FIELDS(dir), fr->type, fr->type == NGTCP2_FRAME_CONNECTION_CLOSE ? strerrorcode(fr->error_code) @@ -306,16 +314,18 @@ static void log_fr_connection_close(ngtcp2_log *log, const ngtcp2_pkt_hd *hd, static void log_fr_max_data(ngtcp2_log *log, const ngtcp2_pkt_hd *hd, const ngtcp2_max_data *fr, const char *dir) { - log->log_printf(log->user_data, - (NGTCP2_LOG_PKT " MAX_DATA(0x%02x) max_data=%" PRIu64), - NGTCP2_LOG_FRM_HD_FIELDS(dir), fr->type, fr->max_data); + log->log_printf( + log->user_data, + (NGTCP2_LOG_PKT " MAX_DATA(0x%02" PRIx64 ") max_data=%" PRIu64), + NGTCP2_LOG_FRM_HD_FIELDS(dir), fr->type, fr->max_data); } static void log_fr_max_stream_data(ngtcp2_log *log, const ngtcp2_pkt_hd *hd, const ngtcp2_max_stream_data *fr, const char *dir) { log->log_printf(log->user_data, - (NGTCP2_LOG_PKT " MAX_STREAM_DATA(0x%02x) id=0x%" PRIx64 + (NGTCP2_LOG_PKT " MAX_STREAM_DATA(0x%02" PRIx64 + ") id=0x%" PRIx64 " max_stream_data=%" PRIu64), NGTCP2_LOG_FRM_HD_FIELDS(dir), fr->type, fr->stream_id, fr->max_stream_data); @@ -323,31 +333,33 @@ static void log_fr_max_stream_data(ngtcp2_log *log, const ngtcp2_pkt_hd *hd, static void log_fr_max_streams(ngtcp2_log *log, const ngtcp2_pkt_hd *hd, const ngtcp2_max_streams *fr, const char *dir) { - log->log_printf(log->user_data, - (NGTCP2_LOG_PKT " MAX_STREAMS(0x%02x) max_streams=%" PRIu64), - NGTCP2_LOG_FRM_HD_FIELDS(dir), fr->type, fr->max_streams); + log->log_printf( + log->user_data, + (NGTCP2_LOG_PKT " MAX_STREAMS(0x%02" PRIx64 ") max_streams=%" PRIu64), + NGTCP2_LOG_FRM_HD_FIELDS(dir), fr->type, fr->max_streams); } static void log_fr_ping(ngtcp2_log *log, const ngtcp2_pkt_hd *hd, const ngtcp2_ping *fr, const char *dir) { - log->log_printf(log->user_data, (NGTCP2_LOG_PKT " PING(0x%02x)"), + log->log_printf(log->user_data, (NGTCP2_LOG_PKT " PING(0x%02" PRIx64 ")"), NGTCP2_LOG_FRM_HD_FIELDS(dir), fr->type); } static void log_fr_data_blocked(ngtcp2_log *log, const ngtcp2_pkt_hd *hd, const ngtcp2_data_blocked *fr, const char *dir) { - log->log_printf(log->user_data, - (NGTCP2_LOG_PKT " DATA_BLOCKED(0x%02x) offset=%" PRIu64), - NGTCP2_LOG_FRM_HD_FIELDS(dir), fr->type, fr->offset); + log->log_printf( + log->user_data, + (NGTCP2_LOG_PKT " DATA_BLOCKED(0x%02" PRIx64 ") offset=%" PRIu64), + NGTCP2_LOG_FRM_HD_FIELDS(dir), fr->type, fr->offset); } static void log_fr_stream_data_blocked(ngtcp2_log *log, const ngtcp2_pkt_hd *hd, const ngtcp2_stream_data_blocked *fr, const char *dir) { log->log_printf(log->user_data, - (NGTCP2_LOG_PKT " STREAM_DATA_BLOCKED(0x%02x) id=0x%" PRIx64 - " offset=%" PRIu64), + (NGTCP2_LOG_PKT " STREAM_DATA_BLOCKED(0x%02" PRIx64 + ") id=0x%" PRIx64 " offset=%" PRIu64), NGTCP2_LOG_FRM_HD_FIELDS(dir), fr->type, fr->stream_id, fr->offset); } @@ -357,7 +369,7 @@ static void log_fr_streams_blocked(ngtcp2_log *log, const ngtcp2_pkt_hd *hd, const char *dir) { log->log_printf( log->user_data, - (NGTCP2_LOG_PKT " STREAMS_BLOCKED(0x%02x) max_streams=%" PRIu64), + (NGTCP2_LOG_PKT " STREAMS_BLOCKED(0x%02" PRIx64 ") max_streams=%" PRIu64), NGTCP2_LOG_FRM_HD_FIELDS(dir), fr->type, fr->max_streams); } @@ -369,7 +381,7 @@ static void log_fr_new_connection_id(ngtcp2_log *log, const ngtcp2_pkt_hd *hd, log->log_printf( log->user_data, - (NGTCP2_LOG_PKT " NEW_CONNECTION_ID(0x%02x) seq=%" PRIu64 + (NGTCP2_LOG_PKT " NEW_CONNECTION_ID(0x%02" PRIx64 ") seq=%" PRIu64 " cid=0x%s retire_prior_to=%" PRIu64 " stateless_reset_token=0x%s"), NGTCP2_LOG_FRM_HD_FIELDS(dir), fr->type, fr->seq, @@ -383,7 +395,7 @@ static void log_fr_stop_sending(ngtcp2_log *log, const ngtcp2_pkt_hd *hd, const ngtcp2_stop_sending *fr, const char *dir) { log->log_printf(log->user_data, - (NGTCP2_LOG_PKT " STOP_SENDING(0x%02x) id=0x%" PRIx64 + (NGTCP2_LOG_PKT " STOP_SENDING(0x%02" PRIx64 ") id=0x%" PRIx64 " app_error_code=%s(0x%" PRIx64 ")"), NGTCP2_LOG_FRM_HD_FIELDS(dir), fr->type, fr->stream_id, strapperrorcode(fr->app_error_code), fr->app_error_code); @@ -395,7 +407,8 @@ static void log_fr_path_challenge(ngtcp2_log *log, const ngtcp2_pkt_hd *hd, uint8_t buf[sizeof(fr->data) * 2 + 1]; log->log_printf( - log->user_data, (NGTCP2_LOG_PKT " PATH_CHALLENGE(0x%02x) data=0x%s"), + log->user_data, + (NGTCP2_LOG_PKT " PATH_CHALLENGE(0x%02" PRIx64 ") data=0x%s"), NGTCP2_LOG_FRM_HD_FIELDS(dir), fr->type, (const char *)ngtcp2_encode_hex(buf, fr->data, sizeof(fr->data))); } @@ -406,18 +419,19 @@ static void log_fr_path_response(ngtcp2_log *log, const ngtcp2_pkt_hd *hd, uint8_t buf[sizeof(fr->data) * 2 + 1]; log->log_printf( - log->user_data, (NGTCP2_LOG_PKT " PATH_RESPONSE(0x%02x) data=0x%s"), + log->user_data, + (NGTCP2_LOG_PKT " PATH_RESPONSE(0x%02" PRIx64 ") data=0x%s"), NGTCP2_LOG_FRM_HD_FIELDS(dir), fr->type, (const char *)ngtcp2_encode_hex(buf, fr->data, sizeof(fr->data))); } static void log_fr_crypto(ngtcp2_log *log, const ngtcp2_pkt_hd *hd, - const ngtcp2_crypto *fr, const char *dir) { - log->log_printf( - log->user_data, - (NGTCP2_LOG_PKT " CRYPTO(0x%02x) offset=%" PRIu64 " len=%" PRIu64), - NGTCP2_LOG_FRM_HD_FIELDS(dir), fr->type, fr->offset, - ngtcp2_vec_len(fr->data, fr->datacnt)); + const ngtcp2_stream *fr, const char *dir) { + log->log_printf(log->user_data, + (NGTCP2_LOG_PKT " CRYPTO(0x%02" PRIx64 ") offset=%" PRIu64 + " len=%" PRIu64), + NGTCP2_LOG_FRM_HD_FIELDS(dir), fr->type, fr->offset, + ngtcp2_vec_len(fr->data, fr->datacnt)); } static void log_fr_new_token(ngtcp2_log *log, const ngtcp2_pkt_hd *hd, @@ -427,38 +441,41 @@ static void log_fr_new_token(ngtcp2_log *log, const ngtcp2_pkt_hd *hd, uint8_t buf[128 + 1 + 1]; uint8_t *p; - if (fr->token.len > 64) { - p = ngtcp2_encode_hex(buf, fr->token.base, 64); + if (fr->tokenlen > 64) { + p = ngtcp2_encode_hex(buf, fr->token, 64); p[128] = '*'; p[129] = '\0'; } else { - p = ngtcp2_encode_hex(buf, fr->token.base, fr->token.len); + p = ngtcp2_encode_hex(buf, fr->token, fr->tokenlen); } log->log_printf( - log->user_data, (NGTCP2_LOG_PKT " NEW_TOKEN(0x%02x) token=0x%s len=%zu"), - NGTCP2_LOG_FRM_HD_FIELDS(dir), fr->type, (const char *)p, fr->token.len); + log->user_data, + (NGTCP2_LOG_PKT " NEW_TOKEN(0x%02" PRIx64 ") token=0x%s len=%zu"), + NGTCP2_LOG_FRM_HD_FIELDS(dir), fr->type, (const char *)p, fr->tokenlen); } static void log_fr_retire_connection_id(ngtcp2_log *log, const ngtcp2_pkt_hd *hd, const ngtcp2_retire_connection_id *fr, const char *dir) { - log->log_printf(log->user_data, - (NGTCP2_LOG_PKT " RETIRE_CONNECTION_ID(0x%02x) seq=%" PRIu64), - NGTCP2_LOG_FRM_HD_FIELDS(dir), fr->type, fr->seq); + log->log_printf( + log->user_data, + (NGTCP2_LOG_PKT " RETIRE_CONNECTION_ID(0x%02" PRIx64 ") seq=%" PRIu64), + NGTCP2_LOG_FRM_HD_FIELDS(dir), fr->type, fr->seq); } static void log_fr_handshake_done(ngtcp2_log *log, const ngtcp2_pkt_hd *hd, const ngtcp2_handshake_done *fr, const char *dir) { - log->log_printf(log->user_data, (NGTCP2_LOG_PKT " HANDSHAKE_DONE(0x%02x)"), + log->log_printf(log->user_data, + (NGTCP2_LOG_PKT " HANDSHAKE_DONE(0x%02" PRIx64 ")"), NGTCP2_LOG_FRM_HD_FIELDS(dir), fr->type); } static void log_fr_datagram(ngtcp2_log *log, const ngtcp2_pkt_hd *hd, const ngtcp2_datagram *fr, const char *dir) { log->log_printf(log->user_data, - (NGTCP2_LOG_PKT " DATAGRAM(0x%02x) len=%" PRIu64), + (NGTCP2_LOG_PKT " DATAGRAM(0x%02" PRIx64 ") len=%" PRIu64), NGTCP2_LOG_FRM_HD_FIELDS(dir), fr->type, ngtcp2_vec_len(fr->data, fr->datacnt)); } @@ -519,7 +536,7 @@ static void log_fr(ngtcp2_log *log, const ngtcp2_pkt_hd *hd, log_fr_path_response(log, hd, &fr->path_response, dir); break; case NGTCP2_FRAME_CRYPTO: - log_fr_crypto(log, hd, &fr->crypto, dir); + log_fr_crypto(log, hd, &fr->stream, dir); break; case NGTCP2_FRAME_NEW_TOKEN: log_fr_new_token(log, hd, &fr->new_token, dir); @@ -535,13 +552,13 @@ static void log_fr(ngtcp2_log *log, const ngtcp2_pkt_hd *hd, log_fr_datagram(log, hd, &fr->datagram, dir); break; default: - assert(0); + ngtcp2_unreachable(); } } void ngtcp2_log_rx_fr(ngtcp2_log *log, const ngtcp2_pkt_hd *hd, const ngtcp2_frame *fr) { - if (!log->log_printf) { + if (!log->log_printf || !(log->events & NGTCP2_LOG_EVENT_FRM)) { return; } @@ -550,7 +567,7 @@ void ngtcp2_log_rx_fr(ngtcp2_log *log, const ngtcp2_pkt_hd *hd, void ngtcp2_log_tx_fr(ngtcp2_log *log, const ngtcp2_pkt_hd *hd, const ngtcp2_frame *fr) { - if (!log->log_printf) { + if (!log->log_printf || !(log->events & NGTCP2_LOG_EVENT_FRM)) { return; } @@ -561,7 +578,7 @@ void ngtcp2_log_rx_vn(ngtcp2_log *log, const ngtcp2_pkt_hd *hd, const uint32_t *sv, size_t nsv) { size_t i; - if (!log->log_printf) { + if (!log->log_printf || !(log->events & NGTCP2_LOG_EVENT_PKT)) { return; } @@ -576,7 +593,7 @@ void ngtcp2_log_rx_sr(ngtcp2_log *log, const ngtcp2_pkt_stateless_reset *sr) { ngtcp2_pkt_hd shd; ngtcp2_pkt_hd *hd = &shd; - if (!log->log_printf) { + if (!log->log_printf || !(log->events & NGTCP2_LOG_EVENT_PKT)) { return; } @@ -592,82 +609,94 @@ void ngtcp2_log_rx_sr(ngtcp2_log *log, const ngtcp2_pkt_stateless_reset *sr) { sr->randlen); } -void ngtcp2_log_remote_tp(ngtcp2_log *log, uint8_t exttype, +void ngtcp2_log_remote_tp(ngtcp2_log *log, const ngtcp2_transport_params *params) { uint8_t token[NGTCP2_STATELESS_RESET_TOKENLEN * 2 + 1]; uint8_t addr[16 * 2 + 7 + 1]; uint8_t cid[NGTCP2_MAX_CIDLEN * 2 + 1]; size_t i; + const ngtcp2_sockaddr_in *sa_in; + const ngtcp2_sockaddr_in6 *sa_in6; + const uint8_t *p; + uint32_t version; - if (!log->log_printf) { + if (!log->log_printf || !(log->events & NGTCP2_LOG_EVENT_CRY)) { return; } - if (exttype == NGTCP2_TRANSPORT_PARAMS_TYPE_ENCRYPTED_EXTENSIONS) { - if (params->stateless_reset_token_present) { - log->log_printf(log->user_data, - (NGTCP2_LOG_TP " stateless_reset_token=0x%s"), - NGTCP2_LOG_TP_HD_FIELDS, - (const char *)ngtcp2_encode_hex( - token, params->stateless_reset_token, - sizeof(params->stateless_reset_token))); - } + if (params->stateless_reset_token_present) { + log->log_printf( + log->user_data, (NGTCP2_LOG_TP " stateless_reset_token=0x%s"), + NGTCP2_LOG_TP_HD_FIELDS, + (const char *)ngtcp2_encode_hex(token, params->stateless_reset_token, + sizeof(params->stateless_reset_token))); + } + + if (params->preferred_addr_present) { + if (params->preferred_addr.ipv4_present) { + sa_in = ¶ms->preferred_addr.ipv4; - if (params->preferred_address_present) { log->log_printf(log->user_data, (NGTCP2_LOG_TP " preferred_address.ipv4_addr=%s"), NGTCP2_LOG_TP_HD_FIELDS, (const char *)ngtcp2_encode_ipv4( - addr, params->preferred_address.ipv4_addr)); - log->log_printf( - log->user_data, (NGTCP2_LOG_TP " preferred_address.ipv4_port=%u"), - NGTCP2_LOG_TP_HD_FIELDS, params->preferred_address.ipv4_port); + addr, (const uint8_t *)&sa_in->sin_addr)); + log->log_printf(log->user_data, + (NGTCP2_LOG_TP " preferred_address.ipv4_port=%u"), + NGTCP2_LOG_TP_HD_FIELDS, ngtcp2_ntohs(sa_in->sin_port)); + } + + if (params->preferred_addr.ipv6_present) { + sa_in6 = ¶ms->preferred_addr.ipv6; log->log_printf(log->user_data, (NGTCP2_LOG_TP " preferred_address.ipv6_addr=%s"), NGTCP2_LOG_TP_HD_FIELDS, (const char *)ngtcp2_encode_ipv6( - addr, params->preferred_address.ipv6_addr)); - log->log_printf( - log->user_data, (NGTCP2_LOG_TP " preferred_address.ipv6_port=%u"), - NGTCP2_LOG_TP_HD_FIELDS, params->preferred_address.ipv6_port); - + addr, (const uint8_t *)&sa_in6->sin6_addr)); log->log_printf(log->user_data, - (NGTCP2_LOG_TP " preferred_address.cid=0x%s"), - NGTCP2_LOG_TP_HD_FIELDS, - (const char *)ngtcp2_encode_hex( - cid, params->preferred_address.cid.data, - params->preferred_address.cid.datalen)); - log->log_printf( - log->user_data, - (NGTCP2_LOG_TP " preferred_address.stateless_reset_token=0x%s"), - NGTCP2_LOG_TP_HD_FIELDS, - (const char *)ngtcp2_encode_hex( - token, params->preferred_address.stateless_reset_token, - sizeof(params->preferred_address.stateless_reset_token))); + (NGTCP2_LOG_TP " preferred_address.ipv6_port=%u"), + NGTCP2_LOG_TP_HD_FIELDS, ngtcp2_ntohs(sa_in6->sin6_port)); } + log->log_printf( + log->user_data, (NGTCP2_LOG_TP " preferred_address.cid=0x%s"), + NGTCP2_LOG_TP_HD_FIELDS, + (const char *)ngtcp2_encode_hex(cid, params->preferred_addr.cid.data, + params->preferred_addr.cid.datalen)); + log->log_printf( + log->user_data, + (NGTCP2_LOG_TP " preferred_address.stateless_reset_token=0x%s"), + NGTCP2_LOG_TP_HD_FIELDS, + (const char *)ngtcp2_encode_hex( + token, params->preferred_addr.stateless_reset_token, + sizeof(params->preferred_addr.stateless_reset_token))); + } + + if (params->original_dcid_present) { log->log_printf( log->user_data, (NGTCP2_LOG_TP " original_destination_connection_id=0x%s"), NGTCP2_LOG_TP_HD_FIELDS, (const char *)ngtcp2_encode_hex(cid, params->original_dcid.data, params->original_dcid.datalen)); + } - if (params->retry_scid_present) { - log->log_printf( - log->user_data, (NGTCP2_LOG_TP " retry_source_connection_id=0x%s"), - NGTCP2_LOG_TP_HD_FIELDS, - (const char *)ngtcp2_encode_hex(cid, params->retry_scid.data, - params->retry_scid.datalen)); - } + if (params->retry_scid_present) { + log->log_printf( + log->user_data, (NGTCP2_LOG_TP " retry_source_connection_id=0x%s"), + NGTCP2_LOG_TP_HD_FIELDS, + (const char *)ngtcp2_encode_hex(cid, params->retry_scid.data, + params->retry_scid.datalen)); } - log->log_printf( - log->user_data, (NGTCP2_LOG_TP " initial_source_connection_id=0x%s"), - NGTCP2_LOG_TP_HD_FIELDS, - (const char *)ngtcp2_encode_hex(cid, params->initial_scid.data, - params->initial_scid.datalen)); + if (params->initial_scid_present) { + log->log_printf( + log->user_data, (NGTCP2_LOG_TP " initial_source_connection_id=0x%s"), + NGTCP2_LOG_TP_HD_FIELDS, + (const char *)ngtcp2_encode_hex(cid, params->initial_scid.data, + params->initial_scid.datalen)); + } log->log_printf( log->user_data, @@ -718,26 +747,28 @@ void ngtcp2_log_remote_tp(ngtcp2_log *log, uint8_t exttype, (NGTCP2_LOG_TP " version_information.chosen_version=0x%08x"), NGTCP2_LOG_TP_HD_FIELDS, params->version_info.chosen_version); - assert(!(params->version_info.other_versionslen & 0x3)); + assert(!(params->version_info.available_versionslen & 0x3)); - for (i = 0; i < params->version_info.other_versionslen; + for (i = 0, p = params->version_info.available_versions; + i < params->version_info.available_versionslen; i += sizeof(uint32_t)) { + p = ngtcp2_get_uint32(&version, p); + log->log_printf( log->user_data, - (NGTCP2_LOG_TP " version_information.other_versions[%zu]=0x%08x"), - NGTCP2_LOG_TP_HD_FIELDS, i >> 2, - ngtcp2_get_uint32(¶ms->version_info.other_versions[i])); + (NGTCP2_LOG_TP " version_information.available_versions[%zu]=0x%08x"), + NGTCP2_LOG_TP_HD_FIELDS, i >> 2, version); } } } void ngtcp2_log_pkt_lost(ngtcp2_log *log, int64_t pkt_num, uint8_t type, uint8_t flags, ngtcp2_tstamp sent_ts) { - if (!log->log_printf) { + if (!log->log_printf || !(log->events & NGTCP2_LOG_EVENT_LDC)) { return; } - ngtcp2_log_info(log, NGTCP2_LOG_EVENT_RCV, + ngtcp2_log_info(log, NGTCP2_LOG_EVENT_LDC, "pkn=%" PRId64 " lost type=%s sent_ts=%" PRIu64, pkt_num, strpkttype_type_flags(type, flags), sent_ts); } @@ -747,7 +778,7 @@ static void log_pkt_hd(ngtcp2_log *log, const ngtcp2_pkt_hd *hd, uint8_t dcid[sizeof(hd->dcid.data) * 2 + 1]; uint8_t scid[sizeof(hd->scid.data) * 2 + 1]; - if (!log->log_printf) { + if (!log->log_printf || !(log->events & NGTCP2_LOG_EVENT_PKT)) { return; } @@ -782,7 +813,7 @@ void ngtcp2_log_info(ngtcp2_log *log, ngtcp2_log_event ev, const char *fmt, int n; char buf[NGTCP2_LOG_BUFLEN]; - if (!log->log_printf) { + if (!log->log_printf || !(log->events & ev)) { return; } diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_log.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_log.h index 029ef1b757ab09..1280ce04d6385a 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_log.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_log.h @@ -37,6 +37,9 @@ typedef struct ngtcp2_log { /* log_printf is a sink to write log. NULL means no logging output. */ ngtcp2_printf log_printf; + /* events is an event filter. Only events set in this field are + emitted. */ + uint8_t events; /* ts is the time point used to write time delta in the log. */ ngtcp2_tstamp ts; /* last_ts is the most recent time point that this object is @@ -63,27 +66,33 @@ typedef enum ngtcp2_log_event { /** * :enum:`NGTCP2_LOG_EVENT_CON` is a connection (catch-all) event */ - NGTCP2_LOG_EVENT_CON, + NGTCP2_LOG_EVENT_CON = 0x1, /** * :enum:`NGTCP2_LOG_EVENT_PKT` is a packet event. */ - NGTCP2_LOG_EVENT_PKT, + NGTCP2_LOG_EVENT_PKT = 0x2, /** * :enum:`NGTCP2_LOG_EVENT_FRM` is a QUIC frame event. */ - NGTCP2_LOG_EVENT_FRM, + NGTCP2_LOG_EVENT_FRM = 0x4, /** - * :enum:`NGTCP2_LOG_EVENT_RCV` is a congestion and recovery event. + * :enum:`NGTCP2_LOG_EVENT_LDC` is a loss detection and congestion + * control event. */ - NGTCP2_LOG_EVENT_RCV, + NGTCP2_LOG_EVENT_LDC = 0x8, /** * :enum:`NGTCP2_LOG_EVENT_CRY` is a crypto event. */ - NGTCP2_LOG_EVENT_CRY, + NGTCP2_LOG_EVENT_CRY = 0x10, /** * :enum:`NGTCP2_LOG_EVENT_PTV` is a path validation event. */ - NGTCP2_LOG_EVENT_PTV, + NGTCP2_LOG_EVENT_PTV = 0x20, + /** + * :enum:`NGTCP2_LOG_EVENT_CCA` is a congestion controller algorithm + * event. + */ + NGTCP2_LOG_EVENT_CCA = 0x40, } ngtcp2_log_event; void ngtcp2_log_init(ngtcp2_log *log, const ngtcp2_cid *scid, @@ -100,7 +109,7 @@ void ngtcp2_log_rx_vn(ngtcp2_log *log, const ngtcp2_pkt_hd *hd, void ngtcp2_log_rx_sr(ngtcp2_log *log, const ngtcp2_pkt_stateless_reset *sr); -void ngtcp2_log_remote_tp(ngtcp2_log *log, uint8_t exttype, +void ngtcp2_log_remote_tp(ngtcp2_log *log, const ngtcp2_transport_params *params); void ngtcp2_log_pkt_lost(ngtcp2_log *log, int64_t pkt_num, uint8_t type, diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_macro.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_macro.h index e2603aae15dd04..28d3461bef9238 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_macro.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_macro.h @@ -50,4 +50,9 @@ *(PD) = (T); \ } while (0) +/* + * ngtcp2_arraylen returns the number of elements in array |A|. + */ +#define ngtcp2_arraylen(A) (sizeof(A) / sizeof(A[0])) + #endif /* NGTCP2_MACRO_H */ diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_map.c b/deps/ngtcp2/ngtcp2/lib/ngtcp2_map.c index 12bc6e84bd4f0c..33e9fcc018b5db 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_map.c +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_map.c @@ -126,6 +126,7 @@ static void map_bucket_set_data(ngtcp2_map_bucket *bkt, uint32_t hash, bkt->data = data; } +#ifndef WIN32 void ngtcp2_map_print_distance(ngtcp2_map *map) { uint32_t i; size_t idx; @@ -145,6 +146,7 @@ void ngtcp2_map_print_distance(ngtcp2_map *map) { distance(map->tablelen, map->tablelenbits, bkt, idx)); } } +#endif /* !WIN32 */ static int insert(ngtcp2_map_bucket *table, uint32_t tablelen, uint32_t tablelenbits, uint32_t hash, ngtcp2_map_key_type key, diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_map.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_map.h index a64344a9a301a3..d05b1657489e45 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_map.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_map.h @@ -131,6 +131,8 @@ size_t ngtcp2_map_size(ngtcp2_map *map); int ngtcp2_map_each(ngtcp2_map *map, int (*func)(void *data, void *ptr), void *ptr); +#ifndef WIN32 void ngtcp2_map_print_distance(ngtcp2_map *map); +#endif /* !WIN32 */ #endif /* NGTCP2_MAP_H */ diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_net.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_net.h index b1f28096174605..bf697927351851 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_net.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_net.h @@ -52,16 +52,11 @@ # include #endif /* HAVE_SYS_ENDIAN_H */ -#include +#if defined(__APPLE__) +# include +#endif // __APPLE__ -#if defined(HAVE_BSWAP_64) || \ - (defined(HAVE_DECL_BSWAP_64) && HAVE_DECL_BSWAP_64 > 0) -# define ngtcp2_bswap64 bswap_64 -#else /* !HAVE_BSWAP_64 */ -# define ngtcp2_bswap64(N) \ - ((uint64_t)(ngtcp2_ntohl((uint32_t)(N))) << 32 | \ - ngtcp2_ntohl((uint32_t)((N) >> 32))) -#endif /* !HAVE_BSWAP_64 */ +#include #if defined(HAVE_BE64TOH) || \ (defined(HAVE_DECL_BE64TOH) && HAVE_DECL_BE64TOH > 0) @@ -72,6 +67,18 @@ # define ngtcp2_ntohl64(N) (N) # define ngtcp2_htonl64(N) (N) # else /* !WORDS_BIGENDIAN */ +# if defined(HAVE_BSWAP_64) || \ + (defined(HAVE_DECL_BSWAP_64) && HAVE_DECL_BSWAP_64 > 0) +# define ngtcp2_bswap64 bswap_64 +# elif defined(WIN32) +# define ngtcp2_bswap64 _byteswap_uint64 +# elif defined(__APPLE__) +# define ngtcp2_bswap64 OSSwapInt64 +# else /* !HAVE_BSWAP_64 && !WIN32 && !__APPLE__ */ +# define ngtcp2_bswap64(N) \ + ((uint64_t)(ngtcp2_ntohl((uint32_t)(N))) << 32 | \ + ngtcp2_ntohl((uint32_t)((N) >> 32))) +# endif /* !HAVE_BSWAP_64 && !WIN32 && !__APPLE__ */ # define ngtcp2_ntohl64(N) ngtcp2_bswap64(N) # define ngtcp2_htonl64(N) ngtcp2_bswap64(N) # endif /* !WORDS_BIGENDIAN */ @@ -109,9 +116,9 @@ STIN uint16_t ngtcp2_htons(uint16_t hostshort) { STIN uint32_t ngtcp2_ntohl(uint32_t netlong) { uint32_t res; unsigned char *p = (unsigned char *)&netlong; - res = *p++ << 24; - res += *p++ << 16; - res += *p++ << 8; + res = (uint32_t)(*p++ << 24); + res += (uint32_t)(*p++ << 16); + res += (uint32_t)(*p++ << 8); res += *p; return res; } @@ -119,7 +126,7 @@ STIN uint32_t ngtcp2_ntohl(uint32_t netlong) { STIN uint16_t ngtcp2_ntohs(uint16_t netshort) { uint16_t res; unsigned char *p = (unsigned char *)&netshort; - res = *p++ << 8; + res = (uint16_t)(*p++ << 8); res += *p; return res; } diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_objalloc.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_objalloc.h index f1bbd3a5c9405c..ea73e788317681 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_objalloc.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_objalloc.h @@ -65,15 +65,25 @@ void ngtcp2_objalloc_free(ngtcp2_objalloc *objalloc); void ngtcp2_objalloc_clear(ngtcp2_objalloc *objalloc); #ifndef NOMEMPOOL -# define ngtcp2_objalloc_def(NAME, TYPE, OPLENTFIELD) \ +# define ngtcp2_objalloc_decl(NAME, TYPE, OPLENTFIELD) \ inline static void ngtcp2_objalloc_##NAME##_init( \ ngtcp2_objalloc *objalloc, size_t nmemb, const ngtcp2_mem *mem) { \ ngtcp2_objalloc_init( \ objalloc, ((sizeof(TYPE) + 0xfu) & ~(uintptr_t)0xfu) * nmemb, mem); \ } \ \ - inline static TYPE *ngtcp2_objalloc_##NAME##_get( \ - ngtcp2_objalloc *objalloc) { \ + TYPE *ngtcp2_objalloc_##NAME##_get(ngtcp2_objalloc *objalloc); \ + \ + TYPE *ngtcp2_objalloc_##NAME##_len_get(ngtcp2_objalloc *objalloc, \ + size_t len); \ + \ + inline static void ngtcp2_objalloc_##NAME##_release( \ + ngtcp2_objalloc *objalloc, TYPE *obj) { \ + ngtcp2_opl_push(&objalloc->opl, &obj->OPLENTFIELD); \ + } + +# define ngtcp2_objalloc_def(NAME, TYPE, OPLENTFIELD) \ + TYPE *ngtcp2_objalloc_##NAME##_get(ngtcp2_objalloc *objalloc) { \ ngtcp2_opl_entry *oplent = ngtcp2_opl_pop(&objalloc->opl); \ TYPE *obj; \ int rv; \ @@ -91,8 +101,8 @@ void ngtcp2_objalloc_clear(ngtcp2_objalloc *objalloc); return ngtcp2_struct_of(oplent, TYPE, OPLENTFIELD); \ } \ \ - inline static TYPE *ngtcp2_objalloc_##NAME##_len_get( \ - ngtcp2_objalloc *objalloc, size_t len) { \ + TYPE *ngtcp2_objalloc_##NAME##_len_get(ngtcp2_objalloc *objalloc, \ + size_t len) { \ ngtcp2_opl_entry *oplent = ngtcp2_opl_pop(&objalloc->opl); \ TYPE *obj; \ int rv; \ @@ -107,14 +117,9 @@ void ngtcp2_objalloc_clear(ngtcp2_objalloc *objalloc); } \ \ return ngtcp2_struct_of(oplent, TYPE, OPLENTFIELD); \ - } \ - \ - inline static void ngtcp2_objalloc_##NAME##_release( \ - ngtcp2_objalloc *objalloc, TYPE *obj) { \ - ngtcp2_opl_push(&objalloc->opl, &obj->OPLENTFIELD); \ } #else /* NOMEMPOOL */ -# define ngtcp2_objalloc_def(NAME, TYPE, OPLENTFIELD) \ +# define ngtcp2_objalloc_decl(NAME, TYPE, OPLENTFIELD) \ inline static void ngtcp2_objalloc_##NAME##_init( \ ngtcp2_objalloc *objalloc, size_t nmemb, const ngtcp2_mem *mem) { \ ngtcp2_objalloc_init( \ @@ -135,6 +140,8 @@ void ngtcp2_objalloc_clear(ngtcp2_objalloc *objalloc); ngtcp2_objalloc *objalloc, TYPE *obj) { \ ngtcp2_mem_free(objalloc->balloc.mem, obj); \ } + +# define ngtcp2_objalloc_def(NAME, TYPE, OPLENTFIELD) #endif /* NOMEMPOOL */ #endif /* NGTCP2_OBJALLOC_H */ diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_pkt.c b/deps/ngtcp2/ngtcp2/lib/ngtcp2_pkt.c index 62fef7d6005ed6..12f7daeaf242a9 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_pkt.c +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_pkt.c @@ -33,6 +33,7 @@ #include "ngtcp2_cid.h" #include "ngtcp2_mem.h" #include "ngtcp2_vec.h" +#include "ngtcp2_unreachable.h" int ngtcp2_pkt_chain_new(ngtcp2_pkt_chain **ppc, const ngtcp2_path *path, const ngtcp2_pkt_info *pi, const uint8_t *pkt, @@ -91,7 +92,7 @@ int ngtcp2_pkt_decode_version_cid(ngtcp2_version_cid *dest, const uint8_t *data, return NGTCP2_ERR_INVALID_ARGUMENT; } - version = ngtcp2_get_uint32(&data[1]); + ngtcp2_get_uint32(&version, &data[1]); supported_version = ngtcp2_is_supported_version(version); @@ -155,15 +156,13 @@ void ngtcp2_pkt_hd_init(ngtcp2_pkt_hd *hd, uint8_t flags, uint8_t type, ngtcp2_cid_zero(&hd->scid); } hd->pkt_num = pkt_num; - hd->token.base = NULL; - hd->token.len = 0; + hd->token = NULL; + hd->tokenlen = 0; hd->pkt_numlen = pkt_numlen; hd->version = version; hd->len = len; } -static int has_mask(uint8_t b, uint8_t mask) { return (b & mask) == mask; } - ngtcp2_ssize ngtcp2_pkt_decode_hd_long(ngtcp2_pkt_hd *dest, const uint8_t *pkt, size_t pktlen) { uint8_t type; @@ -186,7 +185,7 @@ ngtcp2_ssize ngtcp2_pkt_decode_hd_long(ngtcp2_pkt_hd *dest, const uint8_t *pkt, return NGTCP2_ERR_INVALID_ARGUMENT; } - version = ngtcp2_get_uint32(&pkt[1]); + ngtcp2_get_uint32(&version, &pkt[1]); if (version == 0) { type = NGTCP2_PKT_VERSION_NEGOTIATION; @@ -218,7 +217,7 @@ ngtcp2_ssize ngtcp2_pkt_decode_hd_long(ngtcp2_pkt_hd *dest, const uint8_t *pkt, break; default: /* Unreachable */ - assert(0); + ngtcp2_unreachable(); } } @@ -254,22 +253,20 @@ ngtcp2_ssize ngtcp2_pkt_decode_hd_long(ngtcp2_pkt_hd *dest, const uint8_t *pkt, if (type == NGTCP2_PKT_INITIAL) { /* Token Length */ - ntokenlen = ngtcp2_get_varint_len(p); + ntokenlen = ngtcp2_get_uvarintlen(p); len += ntokenlen - 1; if (pktlen < len) { return NGTCP2_ERR_INVALID_ARGUMENT; } - vi = ngtcp2_get_varint(&ntokenlen, p); + p = ngtcp2_get_uvarint(&vi, p); if (pktlen - len < vi) { return NGTCP2_ERR_INVALID_ARGUMENT; } tokenlen = (size_t)vi; len += tokenlen; - p += ntokenlen; - if (tokenlen) { token = p; } @@ -288,7 +285,7 @@ ngtcp2_ssize ngtcp2_pkt_decode_hd_long(ngtcp2_pkt_hd *dest, const uint8_t *pkt, } /* Length */ - n = ngtcp2_get_varint_len(p); + n = ngtcp2_get_uvarintlen(p); len += n - 1; if (pktlen < len) { @@ -308,8 +305,8 @@ ngtcp2_ssize ngtcp2_pkt_decode_hd_long(ngtcp2_pkt_hd *dest, const uint8_t *pkt, ngtcp2_cid_init(&dest->scid, p, scil); p += scil; - dest->token.base = (uint8_t *)token; - dest->token.len = tokenlen; + dest->token = token; + dest->tokenlen = tokenlen; p += ntokenlen + tokenlen; switch (type) { @@ -324,12 +321,11 @@ ngtcp2_ssize ngtcp2_pkt_decode_hd_long(ngtcp2_pkt_hd *dest, const uint8_t *pkt, break; } - vi = ngtcp2_get_varint(&n, p); + p = ngtcp2_get_uvarint(&vi, p); if (vi > SIZE_MAX) { return NGTCP2_ERR_INVALID_ARGUMENT; } dest->len = (size_t)vi; - p += n; } assert((size_t)(p - pkt) == len); @@ -373,8 +369,8 @@ ngtcp2_ssize ngtcp2_pkt_decode_hd_short(ngtcp2_pkt_hd *dest, const uint8_t *pkt, dest->len = 0; dest->pkt_num = 0; dest->pkt_numlen = 0; - dest->token.base = NULL; - dest->token.len = 0; + dest->token = NULL; + dest->tokenlen = 0; assert((size_t)(p - pkt) == len); @@ -393,7 +389,7 @@ ngtcp2_ssize ngtcp2_pkt_encode_hd_long(uint8_t *out, size_t outlen, } if (hd->type == NGTCP2_PKT_INITIAL) { - len += ngtcp2_put_varint_len(hd->token.len) + hd->token.len; + len += ngtcp2_put_uvarintlen(hd->tokenlen) + hd->tokenlen; } if (outlen < len) { @@ -422,14 +418,14 @@ ngtcp2_ssize ngtcp2_pkt_encode_hd_long(uint8_t *out, size_t outlen, } if (hd->type == NGTCP2_PKT_INITIAL) { - p = ngtcp2_put_varint(p, hd->token.len); - if (hd->token.len) { - p = ngtcp2_cpymem(p, hd->token.base, hd->token.len); + p = ngtcp2_put_uvarint(p, hd->tokenlen); + if (hd->tokenlen) { + p = ngtcp2_cpymem(p, hd->token, hd->tokenlen); } } if (hd->type != NGTCP2_PKT_RETRY) { - p = ngtcp2_put_varint30(p, (uint32_t)hd->len); + p = ngtcp2_put_uvarint30(p, (uint32_t)hd->len); p = ngtcp2_put_pkt_num(p, hd->pkt_num, hd->pkt_numlen); } @@ -475,15 +471,14 @@ ngtcp2_ssize ngtcp2_pkt_decode_frame(ngtcp2_frame *dest, const uint8_t *payload, uint8_t type; if (payloadlen == 0) { - return 0; + return NGTCP2_ERR_FRAME_ENCODING; } type = payload[0]; switch (type) { case NGTCP2_FRAME_PADDING: - return (ngtcp2_ssize)ngtcp2_pkt_decode_padding_frame(&dest->padding, - payload, payloadlen); + return ngtcp2_pkt_decode_padding_frame(&dest->padding, payload, payloadlen); case NGTCP2_FRAME_RESET_STREAM: return ngtcp2_pkt_decode_reset_stream_frame(&dest->reset_stream, payload, payloadlen); @@ -529,7 +524,7 @@ ngtcp2_ssize ngtcp2_pkt_decode_frame(ngtcp2_frame *dest, const uint8_t *payload, return ngtcp2_pkt_decode_path_response_frame(&dest->path_response, payload, payloadlen); case NGTCP2_FRAME_CRYPTO: - return ngtcp2_pkt_decode_crypto_frame(&dest->crypto, payload, payloadlen); + return ngtcp2_pkt_decode_crypto_frame(&dest->stream, payload, payloadlen); case NGTCP2_FRAME_NEW_TOKEN: return ngtcp2_pkt_decode_new_token_frame(&dest->new_token, payload, payloadlen); @@ -544,9 +539,15 @@ ngtcp2_ssize ngtcp2_pkt_decode_frame(ngtcp2_frame *dest, const uint8_t *payload, return ngtcp2_pkt_decode_datagram_frame(&dest->datagram, payload, payloadlen); default: - if (has_mask(type, NGTCP2_FRAME_STREAM)) { + if ((type & ~(NGTCP2_FRAME_STREAM - 1)) == NGTCP2_FRAME_STREAM) { return ngtcp2_pkt_decode_stream_frame(&dest->stream, payload, payloadlen); } + + /* For frame types > 0xff, use ngtcp2_get_uvarintlen and + ngtcp2_get_uvarint to get a frame type, and then switch over + it. Verify that payloadlen >= ngtcp2_get_uvarintlen(payload) + before calling ngtcp2_get_uvarint(payload). */ + return NGTCP2_ERR_FRAME_ENCODING; } } @@ -570,7 +571,7 @@ ngtcp2_ssize ngtcp2_pkt_decode_stream_frame(ngtcp2_stream *dest, p = payload + 1; - n = ngtcp2_get_varint_len(p); + n = ngtcp2_get_uvarintlen(p); len += n - 1; if (payloadlen < len) { @@ -585,7 +586,7 @@ ngtcp2_ssize ngtcp2_pkt_decode_stream_frame(ngtcp2_stream *dest, return NGTCP2_ERR_FRAME_ENCODING; } - n = ngtcp2_get_varint_len(p); + n = ngtcp2_get_uvarintlen(p); len += n - 1; if (payloadlen < len) { @@ -601,14 +602,14 @@ ngtcp2_ssize ngtcp2_pkt_decode_stream_frame(ngtcp2_stream *dest, return NGTCP2_ERR_FRAME_ENCODING; } - ndatalen = ngtcp2_get_varint_len(p); + ndatalen = ngtcp2_get_uvarintlen(p); len += ndatalen - 1; if (payloadlen < len) { return NGTCP2_ERR_FRAME_ENCODING; } - vi = ngtcp2_get_varint(&ndatalen, p); + /* p = */ ngtcp2_get_uvarint(&vi, p); if (payloadlen - len < vi) { return NGTCP2_ERR_FRAME_ENCODING; } @@ -623,12 +624,10 @@ ngtcp2_ssize ngtcp2_pkt_decode_stream_frame(ngtcp2_stream *dest, dest->type = NGTCP2_FRAME_STREAM; dest->flags = (uint8_t)(type & ~NGTCP2_FRAME_STREAM); dest->fin = (type & NGTCP2_STREAM_FIN_BIT) != 0; - dest->stream_id = (int64_t)ngtcp2_get_varint(&n, p); - p += n; + p = ngtcp2_get_varint(&dest->stream_id, p); if (type & NGTCP2_STREAM_OFF_BIT) { - dest->offset = ngtcp2_get_varint(&n, p); - p += n; + p = ngtcp2_get_uvarint(&dest->offset, p); } else { dest->offset = 0; } @@ -656,12 +655,12 @@ ngtcp2_ssize ngtcp2_pkt_decode_stream_frame(ngtcp2_stream *dest, ngtcp2_ssize ngtcp2_pkt_decode_ack_frame(ngtcp2_ack *dest, const uint8_t *payload, size_t payloadlen) { - size_t num_blks, max_num_blks; - size_t nnum_blks; + size_t rangecnt, max_rangecnt; + size_t nrangecnt; size_t len = 1 + 1 + 1 + 1 + 1; const uint8_t *p; size_t i, j; - ngtcp2_ack_blk *blk; + ngtcp2_ack_range *range; size_t n; uint8_t type; uint64_t vi; @@ -675,7 +674,7 @@ ngtcp2_ssize ngtcp2_pkt_decode_ack_frame(ngtcp2_ack *dest, p = payload + 1; /* Largest Acknowledged */ - n = ngtcp2_get_varint_len(p); + n = ngtcp2_get_uvarintlen(p); len += n - 1; if (payloadlen < len) { @@ -685,7 +684,7 @@ ngtcp2_ssize ngtcp2_pkt_decode_ack_frame(ngtcp2_ack *dest, p += n; /* ACK Delay */ - n = ngtcp2_get_varint_len(p); + n = ngtcp2_get_uvarintlen(p); len += n - 1; if (payloadlen < len) { @@ -694,26 +693,24 @@ ngtcp2_ssize ngtcp2_pkt_decode_ack_frame(ngtcp2_ack *dest, p += n; - /* ACK Block Count */ - nnum_blks = ngtcp2_get_varint_len(p); - len += nnum_blks - 1; + /* ACK Range Count */ + nrangecnt = ngtcp2_get_uvarintlen(p); + len += nrangecnt - 1; if (payloadlen < len) { return NGTCP2_ERR_FRAME_ENCODING; } - vi = ngtcp2_get_varint(&nnum_blks, p); + p = ngtcp2_get_uvarint(&vi, p); if (vi > SIZE_MAX / (1 + 1) || payloadlen - len < vi * (1 + 1)) { return NGTCP2_ERR_FRAME_ENCODING; } - num_blks = (size_t)vi; - len += num_blks * (1 + 1); - - p += nnum_blks; + rangecnt = (size_t)vi; + len += rangecnt * (1 + 1); - /* First ACK Block */ - n = ngtcp2_get_varint_len(p); + /* First ACK Range */ + n = ngtcp2_get_uvarintlen(p); len += n - 1; if (payloadlen < len) { @@ -722,10 +719,10 @@ ngtcp2_ssize ngtcp2_pkt_decode_ack_frame(ngtcp2_ack *dest, p += n; - for (i = 0; i < num_blks; ++i) { - /* Gap, and Additional ACK Block */ + for (i = 0; i < rangecnt; ++i) { + /* Gap, and Additional ACK Range */ for (j = 0; j < 2; ++j) { - n = ngtcp2_get_varint_len(p); + n = ngtcp2_get_uvarintlen(p); len += n - 1; if (payloadlen < len) { @@ -743,7 +740,7 @@ ngtcp2_ssize ngtcp2_pkt_decode_ack_frame(ngtcp2_ack *dest, } for (i = 0; i < 3; ++i) { - n = ngtcp2_get_varint_len(p); + n = ngtcp2_get_uvarintlen(p); len += n - 1; if (payloadlen < len) { @@ -754,44 +751,34 @@ ngtcp2_ssize ngtcp2_pkt_decode_ack_frame(ngtcp2_ack *dest, } } - /* TODO We might not decode all blocks. It could be very large. */ - max_num_blks = ngtcp2_min(NGTCP2_MAX_ACK_BLKS, num_blks); + /* TODO We might not decode all ranges. It could be very large. */ + max_rangecnt = ngtcp2_min(NGTCP2_MAX_ACK_RANGES, rangecnt); p = payload + 1; dest->type = type; - dest->largest_ack = (int64_t)ngtcp2_get_varint(&n, p); - p += n; - dest->ack_delay = ngtcp2_get_varint(&n, p); + p = ngtcp2_get_varint(&dest->largest_ack, p); + p = ngtcp2_get_uvarint(&dest->ack_delay, p); /* This value will be assigned in the upper layer. */ dest->ack_delay_unscaled = 0; - p += n; - dest->num_blks = max_num_blks; - p += nnum_blks; - dest->first_ack_blklen = ngtcp2_get_varint(&n, p); - p += n; + dest->rangecnt = max_rangecnt; + p += nrangecnt; + p = ngtcp2_get_uvarint(&dest->first_ack_range, p); - for (i = 0; i < max_num_blks; ++i) { - blk = &dest->blks[i]; - blk->gap = ngtcp2_get_varint(&n, p); - p += n; - blk->blklen = ngtcp2_get_varint(&n, p); - p += n; + for (i = 0; i < max_rangecnt; ++i) { + range = &dest->ranges[i]; + p = ngtcp2_get_uvarint(&range->gap, p); + p = ngtcp2_get_uvarint(&range->len, p); } - for (i = max_num_blks; i < num_blks; ++i) { - p += ngtcp2_get_varint_len(p); - p += ngtcp2_get_varint_len(p); + for (i = max_rangecnt; i < rangecnt; ++i) { + p += ngtcp2_get_uvarintlen(p); + p += ngtcp2_get_uvarintlen(p); } if (type == NGTCP2_FRAME_ACK_ECN) { - dest->ecn.ect0 = ngtcp2_get_varint(&n, p); - p += n; - - dest->ecn.ect1 = ngtcp2_get_varint(&n, p); - p += n; - - dest->ecn.ce = ngtcp2_get_varint(&n, p); - p += n; + p = ngtcp2_get_uvarint(&dest->ecn.ect0, p); + p = ngtcp2_get_uvarint(&dest->ecn.ect1, p); + p = ngtcp2_get_uvarint(&dest->ecn.ce, p); } assert((size_t)(p - payload) == len); @@ -799,9 +786,9 @@ ngtcp2_ssize ngtcp2_pkt_decode_ack_frame(ngtcp2_ack *dest, return (ngtcp2_ssize)len; } -size_t ngtcp2_pkt_decode_padding_frame(ngtcp2_padding *dest, - const uint8_t *payload, - size_t payloadlen) { +ngtcp2_ssize ngtcp2_pkt_decode_padding_frame(ngtcp2_padding *dest, + const uint8_t *payload, + size_t payloadlen) { const uint8_t *p, *ep; assert(payloadlen > 0); @@ -815,7 +802,7 @@ size_t ngtcp2_pkt_decode_padding_frame(ngtcp2_padding *dest, dest->type = NGTCP2_FRAME_PADDING; dest->len = (size_t)(p - payload); - return dest->len; + return (ngtcp2_ssize)dest->len; } ngtcp2_ssize ngtcp2_pkt_decode_reset_stream_frame(ngtcp2_reset_stream *dest, @@ -831,19 +818,19 @@ ngtcp2_ssize ngtcp2_pkt_decode_reset_stream_frame(ngtcp2_reset_stream *dest, p = payload + 1; - n = ngtcp2_get_varint_len(p); + n = ngtcp2_get_uvarintlen(p); len += n - 1; if (payloadlen < len) { return NGTCP2_ERR_FRAME_ENCODING; } p += n; - n = ngtcp2_get_varint_len(p); + n = ngtcp2_get_uvarintlen(p); len += n - 1; if (payloadlen < len) { return NGTCP2_ERR_FRAME_ENCODING; } p += n; - n = ngtcp2_get_varint_len(p); + n = ngtcp2_get_uvarintlen(p); len += n - 1; if (payloadlen < len) { return NGTCP2_ERR_FRAME_ENCODING; @@ -852,12 +839,9 @@ ngtcp2_ssize ngtcp2_pkt_decode_reset_stream_frame(ngtcp2_reset_stream *dest, p = payload + 1; dest->type = NGTCP2_FRAME_RESET_STREAM; - dest->stream_id = (int64_t)ngtcp2_get_varint(&n, p); - p += n; - dest->app_error_code = ngtcp2_get_varint(&n, p); - p += n; - dest->final_size = ngtcp2_get_varint(&n, p); - p += n; + p = ngtcp2_get_varint(&dest->stream_id, p); + p = ngtcp2_get_uvarint(&dest->app_error_code, p); + p = ngtcp2_get_uvarint(&dest->final_size, p); assert((size_t)(p - payload) == len); @@ -882,7 +866,7 @@ ngtcp2_ssize ngtcp2_pkt_decode_connection_close_frame( p = payload + 1; - n = ngtcp2_get_varint_len(p); + n = ngtcp2_get_uvarintlen(p); len += n - 1; if (payloadlen < len) { return NGTCP2_ERR_FRAME_ENCODING; @@ -893,7 +877,7 @@ ngtcp2_ssize ngtcp2_pkt_decode_connection_close_frame( if (type == NGTCP2_FRAME_CONNECTION_CLOSE) { ++len; - n = ngtcp2_get_varint_len(p); + n = ngtcp2_get_uvarintlen(p); len += n - 1; if (payloadlen < len) { return NGTCP2_ERR_FRAME_ENCODING; @@ -902,13 +886,13 @@ ngtcp2_ssize ngtcp2_pkt_decode_connection_close_frame( p += n; } - nreasonlen = ngtcp2_get_varint_len(p); + nreasonlen = ngtcp2_get_uvarintlen(p); len += nreasonlen - 1; if (payloadlen < len) { return NGTCP2_ERR_FRAME_ENCODING; } - vi = ngtcp2_get_varint(&nreasonlen, p); + p = ngtcp2_get_uvarint(&vi, p); if (payloadlen - len < vi) { return NGTCP2_ERR_FRAME_ENCODING; } @@ -918,11 +902,9 @@ ngtcp2_ssize ngtcp2_pkt_decode_connection_close_frame( p = payload + 1; dest->type = type; - dest->error_code = ngtcp2_get_varint(&n, p); - p += n; + p = ngtcp2_get_uvarint(&dest->error_code, p); if (type == NGTCP2_FRAME_CONNECTION_CLOSE) { - dest->frame_type = ngtcp2_get_varint(&n, p); - p += n; + p = ngtcp2_get_uvarint(&dest->frame_type, p); } else { dest->frame_type = 0; } @@ -953,7 +935,7 @@ ngtcp2_ssize ngtcp2_pkt_decode_max_data_frame(ngtcp2_max_data *dest, p = payload + 1; - n = ngtcp2_get_varint_len(p); + n = ngtcp2_get_uvarintlen(p); len += n - 1; if (payloadlen < len) { @@ -961,8 +943,7 @@ ngtcp2_ssize ngtcp2_pkt_decode_max_data_frame(ngtcp2_max_data *dest, } dest->type = NGTCP2_FRAME_MAX_DATA; - dest->max_data = ngtcp2_get_varint(&n, p); - p += n; + p = ngtcp2_get_uvarint(&dest->max_data, p); assert((size_t)(p - payload) == len); @@ -981,7 +962,7 @@ ngtcp2_ssize ngtcp2_pkt_decode_max_stream_data_frame( p = payload + 1; - n = ngtcp2_get_varint_len(p); + n = ngtcp2_get_uvarintlen(p); len += n - 1; if (payloadlen < len) { @@ -990,7 +971,7 @@ ngtcp2_ssize ngtcp2_pkt_decode_max_stream_data_frame( p += n; - n = ngtcp2_get_varint_len(p); + n = ngtcp2_get_uvarintlen(p); len += n - 1; if (payloadlen < len) { @@ -1000,10 +981,8 @@ ngtcp2_ssize ngtcp2_pkt_decode_max_stream_data_frame( p = payload + 1; dest->type = NGTCP2_FRAME_MAX_STREAM_DATA; - dest->stream_id = (int64_t)ngtcp2_get_varint(&n, p); - p += n; - dest->max_stream_data = ngtcp2_get_varint(&n, p); - p += n; + p = ngtcp2_get_varint(&dest->stream_id, p); + p = ngtcp2_get_uvarint(&dest->max_stream_data, p); assert((size_t)(p - payload) == len); @@ -1023,7 +1002,7 @@ ngtcp2_ssize ngtcp2_pkt_decode_max_streams_frame(ngtcp2_max_streams *dest, p = payload + 1; - n = ngtcp2_get_varint_len(p); + n = ngtcp2_get_uvarintlen(p); len += n - 1; if (payloadlen < len) { @@ -1031,8 +1010,7 @@ ngtcp2_ssize ngtcp2_pkt_decode_max_streams_frame(ngtcp2_max_streams *dest, } dest->type = payload[0]; - dest->max_streams = ngtcp2_get_varint(&n, p); - p += n; + p = ngtcp2_get_uvarint(&dest->max_streams, p); assert((size_t)(p - payload) == len); @@ -1062,7 +1040,7 @@ ngtcp2_ssize ngtcp2_pkt_decode_data_blocked_frame(ngtcp2_data_blocked *dest, p = payload + 1; - n = ngtcp2_get_varint_len(p); + n = ngtcp2_get_uvarintlen(p); len += n - 1; if (payloadlen < len) { @@ -1070,8 +1048,7 @@ ngtcp2_ssize ngtcp2_pkt_decode_data_blocked_frame(ngtcp2_data_blocked *dest, } dest->type = NGTCP2_FRAME_DATA_BLOCKED; - dest->offset = ngtcp2_get_varint(&n, p); - p += n; + p = ngtcp2_get_uvarint(&dest->offset, p); assert((size_t)(p - payload) == len); @@ -1092,7 +1069,7 @@ ngtcp2_pkt_decode_stream_data_blocked_frame(ngtcp2_stream_data_blocked *dest, p = payload + 1; - n = ngtcp2_get_varint_len(p); + n = ngtcp2_get_uvarintlen(p); len += n - 1; if (payloadlen < len) { @@ -1101,7 +1078,7 @@ ngtcp2_pkt_decode_stream_data_blocked_frame(ngtcp2_stream_data_blocked *dest, p += n; - n = ngtcp2_get_varint_len(p); + n = ngtcp2_get_uvarintlen(p); len += n - 1; if (payloadlen < len) { @@ -1111,10 +1088,8 @@ ngtcp2_pkt_decode_stream_data_blocked_frame(ngtcp2_stream_data_blocked *dest, p = payload + 1; dest->type = NGTCP2_FRAME_STREAM_DATA_BLOCKED; - dest->stream_id = (int64_t)ngtcp2_get_varint(&n, p); - p += n; - dest->offset = ngtcp2_get_varint(&n, p); - p += n; + p = ngtcp2_get_varint(&dest->stream_id, p); + p = ngtcp2_get_uvarint(&dest->offset, p); assert((size_t)(p - payload) == len); @@ -1133,7 +1108,7 @@ ngtcp2_ssize ngtcp2_pkt_decode_streams_blocked_frame( p = payload + 1; - n = ngtcp2_get_varint_len(p); + n = ngtcp2_get_uvarintlen(p); len += n - 1; if (payloadlen < len) { @@ -1141,8 +1116,7 @@ ngtcp2_ssize ngtcp2_pkt_decode_streams_blocked_frame( } dest->type = payload[0]; - dest->max_streams = ngtcp2_get_varint(&n, p); - p += n; + p = ngtcp2_get_uvarint(&dest->max_streams, p); assert((size_t)(p - payload) == len); @@ -1162,7 +1136,7 @@ ngtcp2_ssize ngtcp2_pkt_decode_new_connection_id_frame( p = payload + 1; - n = ngtcp2_get_varint_len(p); + n = ngtcp2_get_uvarintlen(p); len += n - 1; if (payloadlen < len) { return NGTCP2_ERR_FRAME_ENCODING; @@ -1170,7 +1144,7 @@ ngtcp2_ssize ngtcp2_pkt_decode_new_connection_id_frame( p += n; - n = ngtcp2_get_varint_len(p); + n = ngtcp2_get_uvarintlen(p); len += n - 1; if (payloadlen < len) { return NGTCP2_ERR_FRAME_ENCODING; @@ -1191,14 +1165,13 @@ ngtcp2_ssize ngtcp2_pkt_decode_new_connection_id_frame( p = payload + 1; dest->type = NGTCP2_FRAME_NEW_CONNECTION_ID; - dest->seq = ngtcp2_get_varint(&n, p); - p += n; - dest->retire_prior_to = ngtcp2_get_varint(&n, p); - p += n + 1; + p = ngtcp2_get_uvarint(&dest->seq, p); + p = ngtcp2_get_uvarint(&dest->retire_prior_to, p); + ++p; ngtcp2_cid_init(&dest->cid, p, cil); p += cil; - memcpy(dest->stateless_reset_token, p, NGTCP2_STATELESS_RESET_TOKENLEN); - p += NGTCP2_STATELESS_RESET_TOKENLEN; + p = ngtcp2_get_bytes(dest->stateless_reset_token, p, + NGTCP2_STATELESS_RESET_TOKENLEN); assert((size_t)(p - payload) == len); @@ -1218,14 +1191,14 @@ ngtcp2_ssize ngtcp2_pkt_decode_stop_sending_frame(ngtcp2_stop_sending *dest, p = payload + 1; - n = ngtcp2_get_varint_len(p); + n = ngtcp2_get_uvarintlen(p); len += n - 1; if (payloadlen < len) { return NGTCP2_ERR_FRAME_ENCODING; } p += n; - n = ngtcp2_get_varint_len(p); + n = ngtcp2_get_uvarintlen(p); len += n - 1; if (payloadlen < len) { @@ -1235,10 +1208,8 @@ ngtcp2_ssize ngtcp2_pkt_decode_stop_sending_frame(ngtcp2_stop_sending *dest, p = payload + 1; dest->type = NGTCP2_FRAME_STOP_SENDING; - dest->stream_id = (int64_t)ngtcp2_get_varint(&n, p); - p += n; - dest->app_error_code = ngtcp2_get_varint(&n, p); - p += n; + p = ngtcp2_get_varint(&dest->stream_id, p); + p = ngtcp2_get_uvarint(&dest->app_error_code, p); assert((size_t)(p - payload) == len); @@ -1287,7 +1258,7 @@ ngtcp2_ssize ngtcp2_pkt_decode_path_response_frame(ngtcp2_path_response *dest, return (ngtcp2_ssize)len; } -ngtcp2_ssize ngtcp2_pkt_decode_crypto_frame(ngtcp2_crypto *dest, +ngtcp2_ssize ngtcp2_pkt_decode_crypto_frame(ngtcp2_stream *dest, const uint8_t *payload, size_t payloadlen) { size_t len = 1 + 1 + 1; @@ -1303,7 +1274,7 @@ ngtcp2_ssize ngtcp2_pkt_decode_crypto_frame(ngtcp2_crypto *dest, p = payload + 1; - n = ngtcp2_get_varint_len(p); + n = ngtcp2_get_uvarintlen(p); len += n - 1; if (payloadlen < len) { @@ -1312,14 +1283,14 @@ ngtcp2_ssize ngtcp2_pkt_decode_crypto_frame(ngtcp2_crypto *dest, p += n; - ndatalen = ngtcp2_get_varint_len(p); + ndatalen = ngtcp2_get_uvarintlen(p); len += ndatalen - 1; if (payloadlen < len) { return NGTCP2_ERR_FRAME_ENCODING; } - vi = ngtcp2_get_varint(&ndatalen, p); + p = ngtcp2_get_uvarint(&vi, p); if (payloadlen - len < vi) { return NGTCP2_ERR_FRAME_ENCODING; } @@ -1330,8 +1301,10 @@ ngtcp2_ssize ngtcp2_pkt_decode_crypto_frame(ngtcp2_crypto *dest, p = payload + 1; dest->type = NGTCP2_FRAME_CRYPTO; - dest->offset = ngtcp2_get_varint(&n, p); - p += n; + dest->flags = 0; + dest->fin = 0; + dest->stream_id = 0; + p = ngtcp2_get_uvarint(&dest->offset, p); dest->data[0].len = datalen; p += ndatalen; if (dest->data[0].len) { @@ -1363,14 +1336,14 @@ ngtcp2_ssize ngtcp2_pkt_decode_new_token_frame(ngtcp2_new_token *dest, p = payload + 1; - n = ngtcp2_get_varint_len(p); + n = ngtcp2_get_uvarintlen(p); len += n - 1; if (payloadlen < len) { return NGTCP2_ERR_FRAME_ENCODING; } - vi = ngtcp2_get_varint(&n, p); + p = ngtcp2_get_uvarint(&vi, p); if (payloadlen - len < vi) { return NGTCP2_ERR_FRAME_ENCODING; } @@ -1378,10 +1351,9 @@ ngtcp2_ssize ngtcp2_pkt_decode_new_token_frame(ngtcp2_new_token *dest, len += datalen; dest->type = NGTCP2_FRAME_NEW_TOKEN; - dest->token.len = datalen; - p += n; - dest->token.base = (uint8_t *)p; - p += dest->token.len; + dest->tokenlen = datalen; + dest->token = (uint8_t *)p; + p += dest->tokenlen; assert((size_t)(p - payload) == len); @@ -1402,7 +1374,7 @@ ngtcp2_pkt_decode_retire_connection_id_frame(ngtcp2_retire_connection_id *dest, p = payload + 1; - n = ngtcp2_get_varint_len(p); + n = ngtcp2_get_uvarintlen(p); len += n - 1; if (payloadlen < len) { @@ -1410,8 +1382,7 @@ ngtcp2_pkt_decode_retire_connection_id_frame(ngtcp2_retire_connection_id *dest, } dest->type = NGTCP2_FRAME_RETIRE_CONNECTION_ID; - dest->seq = ngtcp2_get_varint(&n, p); - p += n; + p = ngtcp2_get_uvarint(&dest->seq, p); assert((size_t)(p - payload) == len); @@ -1457,14 +1428,14 @@ ngtcp2_ssize ngtcp2_pkt_decode_datagram_frame(ngtcp2_datagram *dest, return NGTCP2_ERR_FRAME_ENCODING; } - n = ngtcp2_get_varint_len(p); + n = ngtcp2_get_uvarintlen(p); len += n - 1; if (payloadlen < len) { return NGTCP2_ERR_FRAME_ENCODING; } - vi = ngtcp2_get_varint(&n, p); + p = ngtcp2_get_uvarint(&vi, p); if (payloadlen - len < vi) { return NGTCP2_ERR_FRAME_ENCODING; } @@ -1473,8 +1444,7 @@ ngtcp2_ssize ngtcp2_pkt_decode_datagram_frame(ngtcp2_datagram *dest, len += datalen; break; default: - assert(0); - abort(); + ngtcp2_unreachable(); } dest->type = type; @@ -1482,19 +1452,11 @@ ngtcp2_ssize ngtcp2_pkt_decode_datagram_frame(ngtcp2_datagram *dest, if (datalen == 0) { dest->datacnt = 0; dest->data = NULL; - - if (type == NGTCP2_FRAME_DATAGRAM_LEN) { - p += n; - } } else { dest->datacnt = 1; dest->data = dest->rdata; dest->rdata[0].len = datalen; - if (type == NGTCP2_FRAME_DATAGRAM_LEN) { - p += n; - } - dest->rdata[0].base = (uint8_t *)p; p += datalen; } @@ -1551,7 +1513,7 @@ ngtcp2_ssize ngtcp2_pkt_encode_frame(uint8_t *out, size_t outlen, return ngtcp2_pkt_encode_path_response_frame(out, outlen, &fr->path_response); case NGTCP2_FRAME_CRYPTO: - return ngtcp2_pkt_encode_crypto_frame(out, outlen, &fr->crypto); + return ngtcp2_pkt_encode_crypto_frame(out, outlen, &fr->stream); case NGTCP2_FRAME_NEW_TOKEN: return ngtcp2_pkt_encode_new_token_frame(out, outlen, &fr->new_token); case NGTCP2_FRAME_RETIRE_CONNECTION_ID: @@ -1582,16 +1544,16 @@ ngtcp2_ssize ngtcp2_pkt_encode_stream_frame(uint8_t *out, size_t outlen, if (fr->offset) { flags |= NGTCP2_STREAM_OFF_BIT; - len += ngtcp2_put_varint_len(fr->offset); + len += ngtcp2_put_uvarintlen(fr->offset); } - len += ngtcp2_put_varint_len((uint64_t)fr->stream_id); + len += ngtcp2_put_uvarintlen((uint64_t)fr->stream_id); for (i = 0; i < fr->datacnt; ++i) { datalen += fr->data[i].len; } - len += ngtcp2_put_varint_len(datalen); + len += ngtcp2_put_uvarintlen(datalen); len += datalen; if (outlen < len) { @@ -1604,13 +1566,13 @@ ngtcp2_ssize ngtcp2_pkt_encode_stream_frame(uint8_t *out, size_t outlen, fr->flags = flags; - p = ngtcp2_put_varint(p, (uint64_t)fr->stream_id); + p = ngtcp2_put_uvarint(p, (uint64_t)fr->stream_id); if (fr->offset) { - p = ngtcp2_put_varint(p, fr->offset); + p = ngtcp2_put_uvarint(p, fr->offset); } - p = ngtcp2_put_varint(p, datalen); + p = ngtcp2_put_uvarint(p, datalen); for (i = 0; i < fr->datacnt; ++i) { assert(fr->data[i].len); @@ -1625,24 +1587,24 @@ ngtcp2_ssize ngtcp2_pkt_encode_stream_frame(uint8_t *out, size_t outlen, ngtcp2_ssize ngtcp2_pkt_encode_ack_frame(uint8_t *out, size_t outlen, ngtcp2_ack *fr) { - size_t len = 1 + ngtcp2_put_varint_len((uint64_t)fr->largest_ack) + - ngtcp2_put_varint_len(fr->ack_delay) + - ngtcp2_put_varint_len(fr->num_blks) + - ngtcp2_put_varint_len(fr->first_ack_blklen); + size_t len = 1 + ngtcp2_put_uvarintlen((uint64_t)fr->largest_ack) + + ngtcp2_put_uvarintlen(fr->ack_delay) + + ngtcp2_put_uvarintlen(fr->rangecnt) + + ngtcp2_put_uvarintlen(fr->first_ack_range); uint8_t *p; size_t i; - const ngtcp2_ack_blk *blk; + const ngtcp2_ack_range *range; - for (i = 0; i < fr->num_blks; ++i) { - blk = &fr->blks[i]; - len += ngtcp2_put_varint_len(blk->gap); - len += ngtcp2_put_varint_len(blk->blklen); + for (i = 0; i < fr->rangecnt; ++i) { + range = &fr->ranges[i]; + len += ngtcp2_put_uvarintlen(range->gap); + len += ngtcp2_put_uvarintlen(range->len); } if (fr->type == NGTCP2_FRAME_ACK_ECN) { - len += ngtcp2_put_varint_len(fr->ecn.ect0) + - ngtcp2_put_varint_len(fr->ecn.ect1) + - ngtcp2_put_varint_len(fr->ecn.ce); + len += ngtcp2_put_uvarintlen(fr->ecn.ect0) + + ngtcp2_put_uvarintlen(fr->ecn.ect1) + + ngtcp2_put_uvarintlen(fr->ecn.ce); } if (outlen < len) { @@ -1651,22 +1613,22 @@ ngtcp2_ssize ngtcp2_pkt_encode_ack_frame(uint8_t *out, size_t outlen, p = out; - *p++ = fr->type; - p = ngtcp2_put_varint(p, (uint64_t)fr->largest_ack); - p = ngtcp2_put_varint(p, fr->ack_delay); - p = ngtcp2_put_varint(p, fr->num_blks); - p = ngtcp2_put_varint(p, fr->first_ack_blklen); + *p++ = (uint8_t)fr->type; + p = ngtcp2_put_uvarint(p, (uint64_t)fr->largest_ack); + p = ngtcp2_put_uvarint(p, fr->ack_delay); + p = ngtcp2_put_uvarint(p, fr->rangecnt); + p = ngtcp2_put_uvarint(p, fr->first_ack_range); - for (i = 0; i < fr->num_blks; ++i) { - blk = &fr->blks[i]; - p = ngtcp2_put_varint(p, blk->gap); - p = ngtcp2_put_varint(p, blk->blklen); + for (i = 0; i < fr->rangecnt; ++i) { + range = &fr->ranges[i]; + p = ngtcp2_put_uvarint(p, range->gap); + p = ngtcp2_put_uvarint(p, range->len); } if (fr->type == NGTCP2_FRAME_ACK_ECN) { - p = ngtcp2_put_varint(p, fr->ecn.ect0); - p = ngtcp2_put_varint(p, fr->ecn.ect1); - p = ngtcp2_put_varint(p, fr->ecn.ce); + p = ngtcp2_put_uvarint(p, fr->ecn.ect0); + p = ngtcp2_put_uvarint(p, fr->ecn.ect1); + p = ngtcp2_put_uvarint(p, fr->ecn.ce); } assert((size_t)(p - out) == len); @@ -1688,9 +1650,9 @@ ngtcp2_ssize ngtcp2_pkt_encode_padding_frame(uint8_t *out, size_t outlen, ngtcp2_ssize ngtcp2_pkt_encode_reset_stream_frame(uint8_t *out, size_t outlen, const ngtcp2_reset_stream *fr) { - size_t len = 1 + ngtcp2_put_varint_len((uint64_t)fr->stream_id) + - ngtcp2_put_varint_len(fr->app_error_code) + - ngtcp2_put_varint_len(fr->final_size); + size_t len = 1 + ngtcp2_put_uvarintlen((uint64_t)fr->stream_id) + + ngtcp2_put_uvarintlen(fr->app_error_code) + + ngtcp2_put_uvarintlen(fr->final_size); uint8_t *p; if (outlen < len) { @@ -1700,9 +1662,9 @@ ngtcp2_pkt_encode_reset_stream_frame(uint8_t *out, size_t outlen, p = out; *p++ = NGTCP2_FRAME_RESET_STREAM; - p = ngtcp2_put_varint(p, (uint64_t)fr->stream_id); - p = ngtcp2_put_varint(p, fr->app_error_code); - p = ngtcp2_put_varint(p, fr->final_size); + p = ngtcp2_put_uvarint(p, (uint64_t)fr->stream_id); + p = ngtcp2_put_uvarint(p, fr->app_error_code); + p = ngtcp2_put_uvarint(p, fr->final_size); assert((size_t)(p - out) == len); @@ -1712,11 +1674,11 @@ ngtcp2_pkt_encode_reset_stream_frame(uint8_t *out, size_t outlen, ngtcp2_ssize ngtcp2_pkt_encode_connection_close_frame(uint8_t *out, size_t outlen, const ngtcp2_connection_close *fr) { - size_t len = 1 + ngtcp2_put_varint_len(fr->error_code) + + size_t len = 1 + ngtcp2_put_uvarintlen(fr->error_code) + (fr->type == NGTCP2_FRAME_CONNECTION_CLOSE - ? ngtcp2_put_varint_len(fr->frame_type) + ? ngtcp2_put_uvarintlen(fr->frame_type) : 0) + - ngtcp2_put_varint_len(fr->reasonlen) + fr->reasonlen; + ngtcp2_put_uvarintlen(fr->reasonlen) + fr->reasonlen; uint8_t *p; if (outlen < len) { @@ -1725,12 +1687,12 @@ ngtcp2_pkt_encode_connection_close_frame(uint8_t *out, size_t outlen, p = out; - *p++ = fr->type; - p = ngtcp2_put_varint(p, fr->error_code); + *p++ = (uint8_t)fr->type; + p = ngtcp2_put_uvarint(p, fr->error_code); if (fr->type == NGTCP2_FRAME_CONNECTION_CLOSE) { - p = ngtcp2_put_varint(p, fr->frame_type); + p = ngtcp2_put_uvarint(p, fr->frame_type); } - p = ngtcp2_put_varint(p, fr->reasonlen); + p = ngtcp2_put_uvarint(p, fr->reasonlen); if (fr->reasonlen) { p = ngtcp2_cpymem(p, fr->reason, fr->reasonlen); } @@ -1742,7 +1704,7 @@ ngtcp2_pkt_encode_connection_close_frame(uint8_t *out, size_t outlen, ngtcp2_ssize ngtcp2_pkt_encode_max_data_frame(uint8_t *out, size_t outlen, const ngtcp2_max_data *fr) { - size_t len = 1 + ngtcp2_put_varint_len(fr->max_data); + size_t len = 1 + ngtcp2_put_uvarintlen(fr->max_data); uint8_t *p; if (outlen < len) { @@ -1752,7 +1714,7 @@ ngtcp2_ssize ngtcp2_pkt_encode_max_data_frame(uint8_t *out, size_t outlen, p = out; *p++ = NGTCP2_FRAME_MAX_DATA; - p = ngtcp2_put_varint(p, fr->max_data); + p = ngtcp2_put_uvarint(p, fr->max_data); assert((size_t)(p - out) == len); @@ -1762,8 +1724,8 @@ ngtcp2_ssize ngtcp2_pkt_encode_max_data_frame(uint8_t *out, size_t outlen, ngtcp2_ssize ngtcp2_pkt_encode_max_stream_data_frame(uint8_t *out, size_t outlen, const ngtcp2_max_stream_data *fr) { - size_t len = 1 + ngtcp2_put_varint_len((uint64_t)fr->stream_id) + - ngtcp2_put_varint_len(fr->max_stream_data); + size_t len = 1 + ngtcp2_put_uvarintlen((uint64_t)fr->stream_id) + + ngtcp2_put_uvarintlen(fr->max_stream_data); uint8_t *p; if (outlen < len) { @@ -1773,8 +1735,8 @@ ngtcp2_pkt_encode_max_stream_data_frame(uint8_t *out, size_t outlen, p = out; *p++ = NGTCP2_FRAME_MAX_STREAM_DATA; - p = ngtcp2_put_varint(p, (uint64_t)fr->stream_id); - p = ngtcp2_put_varint(p, fr->max_stream_data); + p = ngtcp2_put_uvarint(p, (uint64_t)fr->stream_id); + p = ngtcp2_put_uvarint(p, fr->max_stream_data); assert((size_t)(p - out) == len); @@ -1783,7 +1745,7 @@ ngtcp2_pkt_encode_max_stream_data_frame(uint8_t *out, size_t outlen, ngtcp2_ssize ngtcp2_pkt_encode_max_streams_frame(uint8_t *out, size_t outlen, const ngtcp2_max_streams *fr) { - size_t len = 1 + ngtcp2_put_varint_len(fr->max_streams); + size_t len = 1 + ngtcp2_put_uvarintlen(fr->max_streams); uint8_t *p; if (outlen < len) { @@ -1792,8 +1754,8 @@ ngtcp2_ssize ngtcp2_pkt_encode_max_streams_frame(uint8_t *out, size_t outlen, p = out; - *p++ = fr->type; - p = ngtcp2_put_varint(p, fr->max_streams); + *p++ = (uint8_t)fr->type; + p = ngtcp2_put_uvarint(p, fr->max_streams); assert((size_t)(p - out) == len); @@ -1816,7 +1778,7 @@ ngtcp2_ssize ngtcp2_pkt_encode_ping_frame(uint8_t *out, size_t outlen, ngtcp2_ssize ngtcp2_pkt_encode_data_blocked_frame(uint8_t *out, size_t outlen, const ngtcp2_data_blocked *fr) { - size_t len = 1 + ngtcp2_put_varint_len(fr->offset); + size_t len = 1 + ngtcp2_put_uvarintlen(fr->offset); uint8_t *p; if (outlen < len) { @@ -1826,7 +1788,7 @@ ngtcp2_pkt_encode_data_blocked_frame(uint8_t *out, size_t outlen, p = out; *p++ = NGTCP2_FRAME_DATA_BLOCKED; - p = ngtcp2_put_varint(p, fr->offset); + p = ngtcp2_put_uvarint(p, fr->offset); assert((size_t)(p - out) == len); @@ -1835,8 +1797,8 @@ ngtcp2_pkt_encode_data_blocked_frame(uint8_t *out, size_t outlen, ngtcp2_ssize ngtcp2_pkt_encode_stream_data_blocked_frame( uint8_t *out, size_t outlen, const ngtcp2_stream_data_blocked *fr) { - size_t len = 1 + ngtcp2_put_varint_len((uint64_t)fr->stream_id) + - ngtcp2_put_varint_len(fr->offset); + size_t len = 1 + ngtcp2_put_uvarintlen((uint64_t)fr->stream_id) + + ngtcp2_put_uvarintlen(fr->offset); uint8_t *p; if (outlen < len) { @@ -1846,8 +1808,8 @@ ngtcp2_ssize ngtcp2_pkt_encode_stream_data_blocked_frame( p = out; *p++ = NGTCP2_FRAME_STREAM_DATA_BLOCKED; - p = ngtcp2_put_varint(p, (uint64_t)fr->stream_id); - p = ngtcp2_put_varint(p, fr->offset); + p = ngtcp2_put_uvarint(p, (uint64_t)fr->stream_id); + p = ngtcp2_put_uvarint(p, fr->offset); assert((size_t)(p - out) == len); @@ -1857,7 +1819,7 @@ ngtcp2_ssize ngtcp2_pkt_encode_stream_data_blocked_frame( ngtcp2_ssize ngtcp2_pkt_encode_streams_blocked_frame(uint8_t *out, size_t outlen, const ngtcp2_streams_blocked *fr) { - size_t len = 1 + ngtcp2_put_varint_len(fr->max_streams); + size_t len = 1 + ngtcp2_put_uvarintlen(fr->max_streams); uint8_t *p; if (outlen < len) { @@ -1866,8 +1828,8 @@ ngtcp2_pkt_encode_streams_blocked_frame(uint8_t *out, size_t outlen, p = out; - *p++ = fr->type; - p = ngtcp2_put_varint(p, fr->max_streams); + *p++ = (uint8_t)fr->type; + p = ngtcp2_put_uvarint(p, fr->max_streams); assert((size_t)(p - out) == len); @@ -1877,8 +1839,8 @@ ngtcp2_pkt_encode_streams_blocked_frame(uint8_t *out, size_t outlen, ngtcp2_ssize ngtcp2_pkt_encode_new_connection_id_frame(uint8_t *out, size_t outlen, const ngtcp2_new_connection_id *fr) { - size_t len = 1 + ngtcp2_put_varint_len(fr->seq) + - ngtcp2_put_varint_len(fr->retire_prior_to) + 1 + + size_t len = 1 + ngtcp2_put_uvarintlen(fr->seq) + + ngtcp2_put_uvarintlen(fr->retire_prior_to) + 1 + fr->cid.datalen + NGTCP2_STATELESS_RESET_TOKENLEN; uint8_t *p; @@ -1889,8 +1851,8 @@ ngtcp2_pkt_encode_new_connection_id_frame(uint8_t *out, size_t outlen, p = out; *p++ = NGTCP2_FRAME_NEW_CONNECTION_ID; - p = ngtcp2_put_varint(p, fr->seq); - p = ngtcp2_put_varint(p, fr->retire_prior_to); + p = ngtcp2_put_uvarint(p, fr->seq); + p = ngtcp2_put_uvarint(p, fr->retire_prior_to); *p++ = (uint8_t)fr->cid.datalen; p = ngtcp2_cpymem(p, fr->cid.data, fr->cid.datalen); p = ngtcp2_cpymem(p, fr->stateless_reset_token, @@ -1904,8 +1866,8 @@ ngtcp2_pkt_encode_new_connection_id_frame(uint8_t *out, size_t outlen, ngtcp2_ssize ngtcp2_pkt_encode_stop_sending_frame(uint8_t *out, size_t outlen, const ngtcp2_stop_sending *fr) { - size_t len = 1 + ngtcp2_put_varint_len((uint64_t)fr->stream_id) + - ngtcp2_put_varint_len(fr->app_error_code); + size_t len = 1 + ngtcp2_put_uvarintlen((uint64_t)fr->stream_id) + + ngtcp2_put_uvarintlen(fr->app_error_code); uint8_t *p; if (outlen < len) { @@ -1915,8 +1877,8 @@ ngtcp2_pkt_encode_stop_sending_frame(uint8_t *out, size_t outlen, p = out; *p++ = NGTCP2_FRAME_STOP_SENDING; - p = ngtcp2_put_varint(p, (uint64_t)fr->stream_id); - p = ngtcp2_put_varint(p, fr->app_error_code); + p = ngtcp2_put_uvarint(p, (uint64_t)fr->stream_id); + p = ngtcp2_put_uvarint(p, fr->app_error_code); assert((size_t)(p - out) == len); @@ -1964,19 +1926,19 @@ ngtcp2_pkt_encode_path_response_frame(uint8_t *out, size_t outlen, } ngtcp2_ssize ngtcp2_pkt_encode_crypto_frame(uint8_t *out, size_t outlen, - const ngtcp2_crypto *fr) { + const ngtcp2_stream *fr) { size_t len = 1; uint8_t *p; size_t i; size_t datalen = 0; - len += ngtcp2_put_varint_len(fr->offset); + len += ngtcp2_put_uvarintlen(fr->offset); for (i = 0; i < fr->datacnt; ++i) { datalen += fr->data[i].len; } - len += ngtcp2_put_varint_len(datalen); + len += ngtcp2_put_uvarintlen(datalen); len += datalen; if (outlen < len) { @@ -1987,8 +1949,8 @@ ngtcp2_ssize ngtcp2_pkt_encode_crypto_frame(uint8_t *out, size_t outlen, *p++ = NGTCP2_FRAME_CRYPTO; - p = ngtcp2_put_varint(p, fr->offset); - p = ngtcp2_put_varint(p, datalen); + p = ngtcp2_put_uvarint(p, fr->offset); + p = ngtcp2_put_uvarint(p, datalen); for (i = 0; i < fr->datacnt; ++i) { assert(fr->data[i].base); @@ -2002,10 +1964,10 @@ ngtcp2_ssize ngtcp2_pkt_encode_crypto_frame(uint8_t *out, size_t outlen, ngtcp2_ssize ngtcp2_pkt_encode_new_token_frame(uint8_t *out, size_t outlen, const ngtcp2_new_token *fr) { - size_t len = 1 + ngtcp2_put_varint_len(fr->token.len) + fr->token.len; + size_t len = 1 + ngtcp2_put_uvarintlen(fr->tokenlen) + fr->tokenlen; uint8_t *p; - assert(fr->token.len); + assert(fr->tokenlen); if (outlen < len) { return NGTCP2_ERR_NOBUF; @@ -2015,8 +1977,8 @@ ngtcp2_ssize ngtcp2_pkt_encode_new_token_frame(uint8_t *out, size_t outlen, *p++ = NGTCP2_FRAME_NEW_TOKEN; - p = ngtcp2_put_varint(p, fr->token.len); - p = ngtcp2_cpymem(p, fr->token.base, fr->token.len); + p = ngtcp2_put_uvarint(p, fr->tokenlen); + p = ngtcp2_cpymem(p, fr->token, fr->tokenlen); assert((size_t)(p - out) == len); @@ -2025,7 +1987,7 @@ ngtcp2_ssize ngtcp2_pkt_encode_new_token_frame(uint8_t *out, size_t outlen, ngtcp2_ssize ngtcp2_pkt_encode_retire_connection_id_frame( uint8_t *out, size_t outlen, const ngtcp2_retire_connection_id *fr) { - size_t len = 1 + ngtcp2_put_varint_len(fr->seq); + size_t len = 1 + ngtcp2_put_uvarintlen(fr->seq); uint8_t *p; if (outlen < len) { @@ -2036,7 +1998,7 @@ ngtcp2_ssize ngtcp2_pkt_encode_retire_connection_id_frame( *p++ = NGTCP2_FRAME_RETIRE_CONNECTION_ID; - p = ngtcp2_put_varint(p, fr->seq); + p = ngtcp2_put_uvarint(p, fr->seq); assert((size_t)(p - out) == len); @@ -2062,7 +2024,7 @@ ngtcp2_ssize ngtcp2_pkt_encode_datagram_frame(uint8_t *out, size_t outlen, uint64_t datalen = ngtcp2_vec_len(fr->data, fr->datacnt); uint64_t len = 1 + - (fr->type == NGTCP2_FRAME_DATAGRAM ? 0 : ngtcp2_put_varint_len(datalen)) + + (fr->type == NGTCP2_FRAME_DATAGRAM ? 0 : ngtcp2_put_uvarintlen(datalen)) + datalen; uint8_t *p; size_t i; @@ -2076,9 +2038,9 @@ ngtcp2_ssize ngtcp2_pkt_encode_datagram_frame(uint8_t *out, size_t outlen, p = out; - *p++ = fr->type; + *p++ = (uint8_t)fr->type; if (fr->type == NGTCP2_FRAME_DATAGRAM_LEN) { - p = ngtcp2_put_varint(p, datalen); + p = ngtcp2_put_uvarint(p, datalen); } for (i = 0; i < fr->datacnt; ++i) { @@ -2109,7 +2071,7 @@ ngtcp2_ssize ngtcp2_pkt_write_version_negotiation( p = dest; - *p++ = 0x80 | unused_random; + *p++ = 0xc0 | unused_random; p = ngtcp2_put_uint32be(p, 0); *p++ = (uint8_t)dcidlen; if (dcidlen) { @@ -2136,8 +2098,8 @@ size_t ngtcp2_pkt_decode_version_negotiation(uint32_t *dest, assert((payloadlen % sizeof(uint32_t)) == 0); - for (; payload != end; payload += sizeof(uint32_t)) { - *dest++ = ngtcp2_get_uint32(payload); + for (; payload != end;) { + payload = ngtcp2_get_uint32(dest++, payload); } return payloadlen / sizeof(uint32_t); @@ -2169,9 +2131,9 @@ int ngtcp2_pkt_decode_retry(ngtcp2_pkt_retry *dest, const uint8_t *payload, return NGTCP2_ERR_INVALID_ARGUMENT; } - dest->token.base = (uint8_t *)payload; - dest->token.len = (size_t)(payloadlen - NGTCP2_RETRY_TAGLEN); - ngtcp2_cpymem(dest->tag, payload + dest->token.len, NGTCP2_RETRY_TAGLEN); + dest->token = (uint8_t *)payload; + dest->tokenlen = (size_t)(payloadlen - NGTCP2_RETRY_TAGLEN); + ngtcp2_cpymem(dest->tag, payload + dest->tokenlen, NGTCP2_RETRY_TAGLEN); return 0; } @@ -2194,28 +2156,36 @@ int64_t ngtcp2_pkt_adjust_pkt_num(int64_t max_pkt_num, int64_t pkt_num, return cand; } -int ngtcp2_pkt_validate_ack(ngtcp2_ack *fr) { +int ngtcp2_pkt_validate_ack(ngtcp2_ack *fr, int64_t min_pkt_num) { int64_t largest_ack = fr->largest_ack; size_t i; - if (largest_ack < (int64_t)fr->first_ack_blklen) { + if (largest_ack < (int64_t)fr->first_ack_range) { return NGTCP2_ERR_ACK_FRAME; } - largest_ack -= (int64_t)fr->first_ack_blklen; + largest_ack -= (int64_t)fr->first_ack_range; + + if (largest_ack < min_pkt_num) { + return NGTCP2_ERR_PROTO; + } - for (i = 0; i < fr->num_blks; ++i) { - if (largest_ack < (int64_t)fr->blks[i].gap + 2) { + for (i = 0; i < fr->rangecnt; ++i) { + if (largest_ack < (int64_t)fr->ranges[i].gap + 2) { return NGTCP2_ERR_ACK_FRAME; } - largest_ack -= (int64_t)fr->blks[i].gap + 2; + largest_ack -= (int64_t)fr->ranges[i].gap + 2; - if (largest_ack < (int64_t)fr->blks[i].blklen) { + if (largest_ack < (int64_t)fr->ranges[i].len) { return NGTCP2_ERR_ACK_FRAME; } - largest_ack -= (int64_t)fr->blks[i].blklen; + largest_ack -= (int64_t)fr->ranges[i].len; + + if (largest_ack < min_pkt_num) { + return NGTCP2_ERR_PROTO; + } } return 0; @@ -2285,16 +2255,14 @@ ngtcp2_ssize ngtcp2_pkt_write_retry( switch (version) { case NGTCP2_PROTO_VER_V1: + default: nonce = (const uint8_t *)NGTCP2_RETRY_NONCE_V1; noncelen = sizeof(NGTCP2_RETRY_NONCE_V1) - 1; break; - case NGTCP2_PROTO_VER_V2_DRAFT: - nonce = (const uint8_t *)NGTCP2_RETRY_NONCE_V2_DRAFT; - noncelen = sizeof(NGTCP2_RETRY_NONCE_V2_DRAFT) - 1; + case NGTCP2_PROTO_VER_V2: + nonce = (const uint8_t *)NGTCP2_RETRY_NONCE_V2; + noncelen = sizeof(NGTCP2_RETRY_NONCE_V2) - 1; break; - default: - nonce = (const uint8_t *)NGTCP2_RETRY_NONCE_DRAFT; - noncelen = sizeof(NGTCP2_RETRY_NONCE_DRAFT) - 1; } /* OpenSSL does not like NULL plaintext. */ @@ -2377,16 +2345,14 @@ int ngtcp2_pkt_verify_retry_tag(uint32_t version, const ngtcp2_pkt_retry *retry, switch (version) { case NGTCP2_PROTO_VER_V1: + default: nonce = (const uint8_t *)NGTCP2_RETRY_NONCE_V1; noncelen = sizeof(NGTCP2_RETRY_NONCE_V1) - 1; break; - case NGTCP2_PROTO_VER_V2_DRAFT: - nonce = (const uint8_t *)NGTCP2_RETRY_NONCE_V2_DRAFT; - noncelen = sizeof(NGTCP2_RETRY_NONCE_V2_DRAFT) - 1; + case NGTCP2_PROTO_VER_V2: + nonce = (const uint8_t *)NGTCP2_RETRY_NONCE_V2; + noncelen = sizeof(NGTCP2_RETRY_NONCE_V2) - 1; break; - default: - nonce = (const uint8_t *)NGTCP2_RETRY_NONCE_DRAFT; - noncelen = sizeof(NGTCP2_RETRY_NONCE_DRAFT) - 1; } /* OpenSSL does not like NULL plaintext. */ @@ -2405,8 +2371,8 @@ int ngtcp2_pkt_verify_retry_tag(uint32_t version, const ngtcp2_pkt_retry *retry, size_t ngtcp2_pkt_stream_max_datalen(int64_t stream_id, uint64_t offset, uint64_t len, size_t left) { - size_t n = 1 /* type */ + ngtcp2_put_varint_len((uint64_t)stream_id) + - (offset ? ngtcp2_put_varint_len(offset) : 0); + size_t n = 1 /* type */ + ngtcp2_put_uvarintlen((uint64_t)stream_id) + + (offset ? ngtcp2_put_uvarintlen(offset) : 0); if (left <= n) { return (size_t)-1; @@ -2436,7 +2402,7 @@ size_t ngtcp2_pkt_stream_max_datalen(int64_t stream_id, uint64_t offset, } size_t ngtcp2_pkt_crypto_max_datalen(uint64_t offset, size_t len, size_t left) { - size_t n = 1 /* type */ + ngtcp2_put_varint_len(offset); + size_t n = 1 /* type */ + ngtcp2_put_uvarintlen(offset); /* CRYPTO frame must contain nonzero length data. Return -1 if there is no space to write crypto data. */ @@ -2468,17 +2434,16 @@ size_t ngtcp2_pkt_crypto_max_datalen(uint64_t offset, size_t len, size_t left) { } size_t ngtcp2_pkt_datagram_framelen(size_t len) { - return 1 /* type */ + ngtcp2_put_varint_len(len) + len; + return 1 /* type */ + ngtcp2_put_uvarintlen(len) + len; } int ngtcp2_is_supported_version(uint32_t version) { switch (version) { case NGTCP2_PROTO_VER_V1: - case NGTCP2_PROTO_VER_V2_DRAFT: + case NGTCP2_PROTO_VER_V2: return 1; default: - return NGTCP2_PROTO_VER_DRAFT_MIN <= version && - version <= NGTCP2_PROTO_VER_DRAFT_MAX; + return 0; } } @@ -2491,15 +2456,15 @@ uint8_t ngtcp2_pkt_get_type_long(uint32_t version, uint8_t c) { uint8_t pkt_type = (uint8_t)((c & NGTCP2_LONG_TYPE_MASK) >> 4); switch (version) { - case NGTCP2_PROTO_VER_V2_DRAFT: + case NGTCP2_PROTO_VER_V2: switch (pkt_type) { - case NGTCP2_PKT_TYPE_INITIAL_V2_DRAFT: + case NGTCP2_PKT_TYPE_INITIAL_V2: return NGTCP2_PKT_INITIAL; - case NGTCP2_PKT_TYPE_0RTT_V2_DRAFT: + case NGTCP2_PKT_TYPE_0RTT_V2: return NGTCP2_PKT_0RTT; - case NGTCP2_PKT_TYPE_HANDSHAKE_V2_DRAFT: + case NGTCP2_PKT_TYPE_HANDSHAKE_V2: return NGTCP2_PKT_HANDSHAKE; - case NGTCP2_PKT_TYPE_RETRY_V2_DRAFT: + case NGTCP2_PKT_TYPE_RETRY_V2: return NGTCP2_PKT_RETRY; default: return 0; @@ -2509,8 +2474,6 @@ uint8_t ngtcp2_pkt_get_type_long(uint32_t version, uint8_t c) { return 0; } - /* QUIC v1 and draft versions share the same numeric packet - types. */ switch (pkt_type) { case NGTCP2_PKT_TYPE_INITIAL_V1: return NGTCP2_PKT_INITIAL; @@ -2528,27 +2491,24 @@ uint8_t ngtcp2_pkt_get_type_long(uint32_t version, uint8_t c) { uint8_t ngtcp2_pkt_versioned_type(uint32_t version, uint32_t pkt_type) { switch (version) { - case NGTCP2_PROTO_VER_V2_DRAFT: + case NGTCP2_PROTO_VER_V2: switch (pkt_type) { case NGTCP2_PKT_INITIAL: - return NGTCP2_PKT_TYPE_INITIAL_V2_DRAFT; + return NGTCP2_PKT_TYPE_INITIAL_V2; case NGTCP2_PKT_0RTT: - return NGTCP2_PKT_TYPE_0RTT_V2_DRAFT; + return NGTCP2_PKT_TYPE_0RTT_V2; case NGTCP2_PKT_HANDSHAKE: - return NGTCP2_PKT_TYPE_HANDSHAKE_V2_DRAFT; + return NGTCP2_PKT_TYPE_HANDSHAKE_V2; case NGTCP2_PKT_RETRY: - return NGTCP2_PKT_TYPE_RETRY_V2_DRAFT; + return NGTCP2_PKT_TYPE_RETRY_V2; default: - assert(0); - abort(); + ngtcp2_unreachable(); } default: /* Assume that unsupported versions share the numeric long packet types with QUIC v1 in order to send a packet to elicit Version Negotiation packet. */ - /* QUIC v1 and draft versions share the same numeric packet - types. */ switch (pkt_type) { case NGTCP2_PKT_INITIAL: return NGTCP2_PKT_TYPE_INITIAL_V1; @@ -2559,8 +2519,7 @@ uint8_t ngtcp2_pkt_versioned_type(uint32_t version, uint32_t pkt_type) { case NGTCP2_PKT_RETRY: return NGTCP2_PKT_TYPE_RETRY_V1; default: - assert(0); - abort(); + ngtcp2_unreachable(); } } } diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_pkt.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_pkt.h index 2f7838a08a5625..b1bec97c31a08c 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_pkt.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_pkt.h @@ -88,9 +88,9 @@ client stream ID. */ #define NGTCP2_MAX_CLIENT_STREAM_ID_UNI ((int64_t)0x3ffffffffffffffell) -/* NGTCP2_MAX_NUM_ACK_BLK is the maximum number of Additional ACK - blocks which this library can create, or decode. */ -#define NGTCP2_MAX_ACK_BLKS 32 +/* NGTCP2_MAX_NUM_ACK_RANGES is the maximum number of Additional ACK + ranges which this library can create, or decode. */ +#define NGTCP2_MAX_ACK_RANGES 32 /* NGTCP2_MAX_PKT_NUM is the maximum packet number. */ #define NGTCP2_MAX_PKT_NUM ((int64_t)((1ll << 62) - 1)) @@ -124,62 +124,67 @@ v1. */ #define NGTCP2_PKT_TYPE_RETRY_V1 0x3 -/* NGTCP2_PKT_TYPE_INITIAL_V2_DRAFT is Initial long header packet type - for QUIC v2 draft. */ -#define NGTCP2_PKT_TYPE_INITIAL_V2_DRAFT 0x1 -/* NGTCP2_PKT_TYPE_0RTT_V2_DRAFT is 0RTT long header packet type for - QUIC v2 draft. */ -#define NGTCP2_PKT_TYPE_0RTT_V2_DRAFT 0x2 -/* NGTCP2_PKT_TYPE_HANDSHAKE_V2_DRAFT is Handshake long header packet - type for QUIC v2 draft. */ -#define NGTCP2_PKT_TYPE_HANDSHAKE_V2_DRAFT 0x3 -/* NGTCP2_PKT_TYPE_RETRY_V2_DRAFT is Retry long header packet type for - QUIC v2 draft. */ -#define NGTCP2_PKT_TYPE_RETRY_V2_DRAFT 0x0 +/* NGTCP2_PKT_TYPE_INITIAL_V2 is Initial long header packet type for + QUIC v2. */ +#define NGTCP2_PKT_TYPE_INITIAL_V2 0x1 +/* NGTCP2_PKT_TYPE_0RTT_V2 is 0RTT long header packet type for QUIC + v2. */ +#define NGTCP2_PKT_TYPE_0RTT_V2 0x2 +/* NGTCP2_PKT_TYPE_HANDSHAKE_V2 is Handshake long header packet type + for QUIC v2. */ +#define NGTCP2_PKT_TYPE_HANDSHAKE_V2 0x3 +/* NGTCP2_PKT_TYPE_RETRY_V2 is Retry long header packet type for QUIC + v2. */ +#define NGTCP2_PKT_TYPE_RETRY_V2 0x0 typedef struct ngtcp2_pkt_retry { ngtcp2_cid odcid; - ngtcp2_vec token; + uint8_t *token; + size_t tokenlen; uint8_t tag[NGTCP2_RETRY_TAGLEN]; } ngtcp2_pkt_retry; -typedef enum { - NGTCP2_FRAME_PADDING = 0x00, - NGTCP2_FRAME_PING = 0x01, - NGTCP2_FRAME_ACK = 0x02, - NGTCP2_FRAME_ACK_ECN = 0x03, - NGTCP2_FRAME_RESET_STREAM = 0x04, - NGTCP2_FRAME_STOP_SENDING = 0x05, - NGTCP2_FRAME_CRYPTO = 0x06, - NGTCP2_FRAME_NEW_TOKEN = 0x07, - NGTCP2_FRAME_STREAM = 0x08, - NGTCP2_FRAME_MAX_DATA = 0x10, - NGTCP2_FRAME_MAX_STREAM_DATA = 0x11, - NGTCP2_FRAME_MAX_STREAMS_BIDI = 0x12, - NGTCP2_FRAME_MAX_STREAMS_UNI = 0x13, - NGTCP2_FRAME_DATA_BLOCKED = 0x14, - NGTCP2_FRAME_STREAM_DATA_BLOCKED = 0x15, - NGTCP2_FRAME_STREAMS_BLOCKED_BIDI = 0x16, - NGTCP2_FRAME_STREAMS_BLOCKED_UNI = 0x17, - NGTCP2_FRAME_NEW_CONNECTION_ID = 0x18, - NGTCP2_FRAME_RETIRE_CONNECTION_ID = 0x19, - NGTCP2_FRAME_PATH_CHALLENGE = 0x1a, - NGTCP2_FRAME_PATH_RESPONSE = 0x1b, - NGTCP2_FRAME_CONNECTION_CLOSE = 0x1c, - NGTCP2_FRAME_CONNECTION_CLOSE_APP = 0x1d, - NGTCP2_FRAME_HANDSHAKE_DONE = 0x1e, - NGTCP2_FRAME_DATAGRAM = 0x30, - NGTCP2_FRAME_DATAGRAM_LEN = 0x31, -} ngtcp2_frame_type; - +#define NGTCP2_FRAME_PADDING 0x00 +#define NGTCP2_FRAME_PING 0x01 +#define NGTCP2_FRAME_ACK 0x02 +#define NGTCP2_FRAME_ACK_ECN 0x03 +#define NGTCP2_FRAME_RESET_STREAM 0x04 +#define NGTCP2_FRAME_STOP_SENDING 0x05 +#define NGTCP2_FRAME_CRYPTO 0x06 +#define NGTCP2_FRAME_NEW_TOKEN 0x07 +#define NGTCP2_FRAME_STREAM 0x08 +#define NGTCP2_FRAME_MAX_DATA 0x10 +#define NGTCP2_FRAME_MAX_STREAM_DATA 0x11 +#define NGTCP2_FRAME_MAX_STREAMS_BIDI 0x12 +#define NGTCP2_FRAME_MAX_STREAMS_UNI 0x13 +#define NGTCP2_FRAME_DATA_BLOCKED 0x14 +#define NGTCP2_FRAME_STREAM_DATA_BLOCKED 0x15 +#define NGTCP2_FRAME_STREAMS_BLOCKED_BIDI 0x16 +#define NGTCP2_FRAME_STREAMS_BLOCKED_UNI 0x17 +#define NGTCP2_FRAME_NEW_CONNECTION_ID 0x18 +#define NGTCP2_FRAME_RETIRE_CONNECTION_ID 0x19 +#define NGTCP2_FRAME_PATH_CHALLENGE 0x1a +#define NGTCP2_FRAME_PATH_RESPONSE 0x1b +#define NGTCP2_FRAME_CONNECTION_CLOSE 0x1c +#define NGTCP2_FRAME_CONNECTION_CLOSE_APP 0x1d +#define NGTCP2_FRAME_HANDSHAKE_DONE 0x1e +#define NGTCP2_FRAME_DATAGRAM 0x30 +#define NGTCP2_FRAME_DATAGRAM_LEN 0x31 + +/* ngtcp2_stream represents STREAM and CRYPTO frames. */ typedef struct ngtcp2_stream { - uint8_t type; + uint64_t type; /** * flags of decoded STREAM frame. This gets ignored when encoding - * STREAM frame. + * STREAM frame. CRYPTO frame does not include this field, and must + * set it to 0. */ uint8_t flags; + /* CRYPTO frame does not include this field, and must set it to + 0. */ uint8_t fin; + /* CRYPTO frame does not include this field, and must set it to + 0. */ int64_t stream_id; uint64_t offset; /* datacnt is the number of elements that data contains. Although @@ -190,13 +195,13 @@ typedef struct ngtcp2_stream { ngtcp2_vec data[1]; } ngtcp2_stream; -typedef struct ngtcp2_ack_blk { +typedef struct ngtcp2_ack_range { uint64_t gap; - uint64_t blklen; -} ngtcp2_ack_blk; + uint64_t len; +} ngtcp2_ack_range; typedef struct ngtcp2_ack { - uint8_t type; + uint64_t type; int64_t largest_ack; uint64_t ack_delay; /** @@ -209,13 +214,13 @@ typedef struct ngtcp2_ack { uint64_t ect1; uint64_t ce; } ecn; - uint64_t first_ack_blklen; - size_t num_blks; - ngtcp2_ack_blk blks[1]; + uint64_t first_ack_range; + size_t rangecnt; + ngtcp2_ack_range ranges[1]; } ngtcp2_ack; typedef struct ngtcp2_padding { - uint8_t type; + uint64_t type; /** * The length of contiguous PADDING frames. */ @@ -223,14 +228,14 @@ typedef struct ngtcp2_padding { } ngtcp2_padding; typedef struct ngtcp2_reset_stream { - uint8_t type; + uint64_t type; int64_t stream_id; uint64_t app_error_code; uint64_t final_size; } ngtcp2_reset_stream; typedef struct ngtcp2_connection_close { - uint8_t type; + uint64_t type; uint64_t error_code; uint64_t frame_type; size_t reasonlen; @@ -238,7 +243,7 @@ typedef struct ngtcp2_connection_close { } ngtcp2_connection_close; typedef struct ngtcp2_max_data { - uint8_t type; + uint64_t type; /** * max_data is Maximum Data. */ @@ -246,38 +251,38 @@ typedef struct ngtcp2_max_data { } ngtcp2_max_data; typedef struct ngtcp2_max_stream_data { - uint8_t type; + uint64_t type; int64_t stream_id; uint64_t max_stream_data; } ngtcp2_max_stream_data; typedef struct ngtcp2_max_streams { - uint8_t type; + uint64_t type; uint64_t max_streams; } ngtcp2_max_streams; typedef struct ngtcp2_ping { - uint8_t type; + uint64_t type; } ngtcp2_ping; typedef struct ngtcp2_data_blocked { - uint8_t type; + uint64_t type; uint64_t offset; } ngtcp2_data_blocked; typedef struct ngtcp2_stream_data_blocked { - uint8_t type; + uint64_t type; int64_t stream_id; uint64_t offset; } ngtcp2_stream_data_blocked; typedef struct ngtcp2_streams_blocked { - uint8_t type; + uint64_t type; uint64_t max_streams; } ngtcp2_streams_blocked; typedef struct ngtcp2_new_connection_id { - uint8_t type; + uint64_t type; uint64_t seq; uint64_t retire_prior_to; ngtcp2_cid cid; @@ -285,48 +290,38 @@ typedef struct ngtcp2_new_connection_id { } ngtcp2_new_connection_id; typedef struct ngtcp2_stop_sending { - uint8_t type; + uint64_t type; int64_t stream_id; uint64_t app_error_code; } ngtcp2_stop_sending; typedef struct ngtcp2_path_challenge { - uint8_t type; + uint64_t type; uint8_t data[NGTCP2_PATH_CHALLENGE_DATALEN]; } ngtcp2_path_challenge; typedef struct ngtcp2_path_response { - uint8_t type; + uint64_t type; uint8_t data[NGTCP2_PATH_CHALLENGE_DATALEN]; } ngtcp2_path_response; -typedef struct ngtcp2_crypto { - uint8_t type; - uint64_t offset; - /* datacnt is the number of elements that data contains. Although - the length of data is 1 in this definition, the library may - allocate extra bytes to hold more elements. */ - size_t datacnt; - /* data is the array of ngtcp2_vec which references data. */ - ngtcp2_vec data[1]; -} ngtcp2_crypto; - typedef struct ngtcp2_new_token { - uint8_t type; - ngtcp2_vec token; + uint64_t type; + uint8_t *token; + size_t tokenlen; } ngtcp2_new_token; typedef struct ngtcp2_retire_connection_id { - uint8_t type; + uint64_t type; uint64_t seq; } ngtcp2_retire_connection_id; typedef struct ngtcp2_handshake_done { - uint8_t type; + uint64_t type; } ngtcp2_handshake_done; typedef struct ngtcp2_datagram { - uint8_t type; + uint64_t type; /* dgram_id is an opaque identifier chosen by an application. */ uint64_t dgram_id; /* datacnt is the number of elements that data contains. */ @@ -341,7 +336,7 @@ typedef struct ngtcp2_datagram { } ngtcp2_datagram; typedef union ngtcp2_frame { - uint8_t type; + uint64_t type; ngtcp2_stream stream; ngtcp2_ack ack; ngtcp2_padding padding; @@ -358,7 +353,6 @@ typedef union ngtcp2_frame { ngtcp2_stop_sending stop_sending; ngtcp2_path_challenge path_challenge; ngtcp2_path_response path_response; - ngtcp2_crypto crypto; ngtcp2_new_token new_token; ngtcp2_retire_connection_id retire_connection_id; ngtcp2_handshake_done handshake_done; @@ -454,7 +448,8 @@ ngtcp2_ssize ngtcp2_pkt_encode_hd_short(uint8_t *out, size_t outlen, * frame if it succeeds, or one of the following negative error codes: * * :enum:`NGTCP2_ERR_FRAME_ENCODING` - * Frame is badly formatted; or frame type is unknown. + * Frame is badly formatted; or frame type is unknown; or + * |payloadlen| is 0. */ ngtcp2_ssize ngtcp2_pkt_decode_frame(ngtcp2_frame *dest, const uint8_t *payload, size_t payloadlen); @@ -554,9 +549,9 @@ ngtcp2_ssize ngtcp2_pkt_decode_ack_frame(ngtcp2_ack *dest, * This function returns the exact number of bytes read to decode * PADDING frames. */ -size_t ngtcp2_pkt_decode_padding_frame(ngtcp2_padding *dest, - const uint8_t *payload, - size_t payloadlen); +ngtcp2_ssize ngtcp2_pkt_decode_padding_frame(ngtcp2_padding *dest, + const uint8_t *payload, + size_t payloadlen); /* * ngtcp2_pkt_decode_reset_stream_frame decodes RESET_STREAM frame @@ -639,11 +634,7 @@ ngtcp2_ssize ngtcp2_pkt_decode_max_streams_frame(ngtcp2_max_streams *dest, * length |payloadlen|. The result is stored in the object pointed by * |dest|. PING frame must start at payload[0]. This function * finishes when it decodes one PING frame, and returns the exact - * number of bytes read to decode a frame if it succeeds, or one of - * the following negative error codes: - * - * NGTCP2_ERR_FRAME_ENCODING - * Payload is too short to include PING frame. + * number of bytes read to decode a frame. */ ngtcp2_ssize ngtcp2_pkt_decode_ping_frame(ngtcp2_ping *dest, const uint8_t *payload, @@ -773,7 +764,7 @@ ngtcp2_ssize ngtcp2_pkt_decode_path_response_frame(ngtcp2_path_response *dest, * NGTCP2_ERR_FRAME_ENCODING * Payload is too short to include CRYPTO frame. */ -ngtcp2_ssize ngtcp2_pkt_decode_crypto_frame(ngtcp2_crypto *dest, +ngtcp2_ssize ngtcp2_pkt_decode_crypto_frame(ngtcp2_stream *dest, const uint8_t *payload, size_t payloadlen); @@ -814,11 +805,7 @@ ngtcp2_pkt_decode_retire_connection_id_frame(ngtcp2_retire_connection_id *dest, * object pointed by |dest|. HANDSHAKE_DONE frame must start at * payload[0]. This function finishes when it decodes one * HANDSHAKE_DONE frame, and returns the exact number of bytes read to - * decode a frame if it succeeds, or one of the following negative - * error codes: - * - * NGTCP2_ERR_FRAME_ENCODING - * Payload is too short to include HANDSHAKE_DONE frame. + * decode a frame. */ ngtcp2_ssize ngtcp2_pkt_decode_handshake_done_frame(ngtcp2_handshake_done *dest, const uint8_t *payload, @@ -1076,7 +1063,7 @@ ngtcp2_pkt_encode_path_response_frame(uint8_t *out, size_t outlen, * Buffer does not have enough capacity to write a frame. */ ngtcp2_ssize ngtcp2_pkt_encode_crypto_frame(uint8_t *out, size_t outlen, - const ngtcp2_crypto *fr); + const ngtcp2_stream *fr); /* * ngtcp2_pkt_encode_new_token_frame encodes NEW_TOKEN frame |fr| into @@ -1142,14 +1129,19 @@ int64_t ngtcp2_pkt_adjust_pkt_num(int64_t max_pkt_num, int64_t pkt_num, /* * ngtcp2_pkt_validate_ack checks that ack is malformed or not. + * |min_pkt_num| is the minimum packet number that an endpoint sends. + * It is an error to receive acknowledgements for a packet less than + * |min_pkt_num|. * * This function returns 0 if it succeeds, or one of the following * negative error codes: * * NGTCP2_ERR_ACK_FRAME * ACK frame is malformed + * NGTCP2_ERR_PROTO + * |fr| contains a packet number less than |min_pkt_num|. */ -int ngtcp2_pkt_validate_ack(ngtcp2_ack *fr); +int ngtcp2_pkt_validate_ack(ngtcp2_ack *fr, int64_t min_pkt_num); /* * ngtcp2_pkt_stream_max_datalen returns the maximum number of bytes diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_pktns_id.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_pktns_id.h new file mode 100644 index 00000000000000..66b0ee9e6c13cf --- /dev/null +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_pktns_id.h @@ -0,0 +1,62 @@ +/* + * ngtcp2 + * + * Copyright (c) 2023 ngtcp2 contributors + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef NGTCP2_PKTNS_ID_H +#define NGTCP2_PKTNS_ID_H + +#ifdef HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include + +/** + * @enum + * + * :type:`ngtcp2_pktns_id` defines packet number space identifier. + */ +typedef enum ngtcp2_pktns_id { + /** + * :enum:`NGTCP2_PKTNS_ID_INITIAL` is the Initial packet number + * space. + */ + NGTCP2_PKTNS_ID_INITIAL, + /** + * :enum:`NGTCP2_PKTNS_ID_HANDSHAKE` is the Handshake packet number + * space. + */ + NGTCP2_PKTNS_ID_HANDSHAKE, + /** + * :enum:`NGTCP2_PKTNS_ID_APPLICATION` is the Application data + * packet number space. + */ + NGTCP2_PKTNS_ID_APPLICATION, + /** + * :enum:`NGTCP2_PKTNS_ID_MAX` is defined to get the number of + * packet number spaces. + */ + NGTCP2_PKTNS_ID_MAX +} ngtcp2_pktns_id; + +#endif /* NGTCP2_PKTNS_ID_H */ diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_pmtud.c b/deps/ngtcp2/ngtcp2/lib/ngtcp2_pmtud.c index 26318bb1c8e38c..771ef5e026d12d 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_pmtud.c +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_pmtud.c @@ -41,7 +41,7 @@ static size_t mtu_probes[] = { 1492 - 48, /* PPPoE */ }; -static size_t mtu_probeslen = sizeof(mtu_probes) / sizeof(mtu_probes[0]); +#define NGTCP2_MTU_PROBESLEN ngtcp2_arraylen(mtu_probes) int ngtcp2_pmtud_new(ngtcp2_pmtud **ppmtud, size_t max_udp_payload_size, size_t hard_max_udp_payload_size, int64_t tx_pkt_num, @@ -61,7 +61,7 @@ int ngtcp2_pmtud_new(ngtcp2_pmtud **ppmtud, size_t max_udp_payload_size, pmtud->hard_max_udp_payload_size = hard_max_udp_payload_size; pmtud->min_fail_udp_payload_size = SIZE_MAX; - for (; pmtud->mtu_idx < mtu_probeslen; ++pmtud->mtu_idx) { + for (; pmtud->mtu_idx < NGTCP2_MTU_PROBESLEN; ++pmtud->mtu_idx) { if (mtu_probes[pmtud->mtu_idx] > pmtud->hard_max_udp_payload_size) { continue; } @@ -84,7 +84,7 @@ void ngtcp2_pmtud_del(ngtcp2_pmtud *pmtud) { } size_t ngtcp2_pmtud_probelen(ngtcp2_pmtud *pmtud) { - assert(pmtud->mtu_idx < mtu_probeslen); + assert(pmtud->mtu_idx < NGTCP2_MTU_PROBESLEN); return mtu_probes[pmtud->mtu_idx]; } @@ -107,13 +107,13 @@ int ngtcp2_pmtud_require_probe(ngtcp2_pmtud *pmtud) { } static void pmtud_next_probe(ngtcp2_pmtud *pmtud) { - assert(pmtud->mtu_idx < mtu_probeslen); + assert(pmtud->mtu_idx < NGTCP2_MTU_PROBESLEN); ++pmtud->mtu_idx; pmtud->num_pkts_sent = 0; pmtud->expiry = UINT64_MAX; - for (; pmtud->mtu_idx < mtu_probeslen; ++pmtud->mtu_idx) { + for (; pmtud->mtu_idx < NGTCP2_MTU_PROBESLEN; ++pmtud->mtu_idx) { if (mtu_probes[pmtud->mtu_idx] <= pmtud->max_udp_payload_size || mtu_probes[pmtud->mtu_idx] > pmtud->hard_max_udp_payload_size) { continue; @@ -129,7 +129,7 @@ void ngtcp2_pmtud_probe_success(ngtcp2_pmtud *pmtud, size_t payloadlen) { pmtud->max_udp_payload_size = ngtcp2_max(pmtud->max_udp_payload_size, payloadlen); - assert(pmtud->mtu_idx < mtu_probeslen); + assert(pmtud->mtu_idx < NGTCP2_MTU_PROBESLEN); if (mtu_probes[pmtud->mtu_idx] > pmtud->max_udp_payload_size) { return; @@ -156,5 +156,5 @@ void ngtcp2_pmtud_handle_expiry(ngtcp2_pmtud *pmtud, ngtcp2_tstamp ts) { } int ngtcp2_pmtud_finished(ngtcp2_pmtud *pmtud) { - return pmtud->mtu_idx >= mtu_probeslen; + return pmtud->mtu_idx >= NGTCP2_MTU_PROBESLEN; } diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_ppe.c b/deps/ngtcp2/ngtcp2/lib/ngtcp2_ppe.c index 5376246bd4caa9..ffba131e02b9a5 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_ppe.c +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_ppe.c @@ -55,7 +55,7 @@ int ngtcp2_ppe_encode_hd(ngtcp2_ppe *ppe, const ngtcp2_pkt_hd *hd) { if (hd->flags & NGTCP2_PKT_FLAG_LONG_FORM) { ppe->len_offset = 1 + 4 + 1 + hd->dcid.datalen + 1 + hd->scid.datalen; if (hd->type == NGTCP2_PKT_INITIAL) { - ppe->len_offset += ngtcp2_put_varint_len(hd->token.len) + hd->token.len; + ppe->len_offset += ngtcp2_put_uvarintlen(hd->tokenlen) + hd->tokenlen; } ppe->pkt_num_offset = ppe->len_offset + NGTCP2_PKT_LENGTHLEN; rv = ngtcp2_pkt_encode_hd_long( @@ -115,7 +115,7 @@ ngtcp2_ssize ngtcp2_ppe_final(ngtcp2_ppe *ppe, const uint8_t **ppkt) { assert(cc->hp_mask); if (ppe->len_offset) { - ngtcp2_put_varint30( + ngtcp2_put_uvarint30( buf->begin + ppe->len_offset, (uint16_t)(payloadlen + ppe->pkt_numlen + cc->aead.max_overhead)); } diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_pv.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_pv.h index 293cbcaaf6e881..c9da15248a3e2b 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_pv.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_pv.h @@ -76,10 +76,6 @@ void ngtcp2_pv_entry_init(ngtcp2_pv_entry *pvent, const uint8_t *data, fallback DCID. If path validation succeeds, fallback DCID is retired if it does not equal to the current DCID. */ #define NGTCP2_PV_FLAG_FALLBACK_ON_FAILURE 0x04u -/* NGTCP2_PV_FLAG_MTU_PROBE indicates that a validation must probe - least MTU that QUIC requires, which is 1200 bytes. If it fails, a - path is not viable. */ -#define NGTCP2_PV_FLAG_MTU_PROBE 0x08u /* NGTCP2_PV_FLAG_PREFERRED_ADDR indicates that client is migrating to server's preferred address. This flag is only used by client. */ #define NGTCP2_PV_FLAG_PREFERRED_ADDR 0x10u diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_qlog.c b/deps/ngtcp2/ngtcp2/lib/ngtcp2_qlog.c index 69eaeb7367438d..27675347794b2a 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_qlog.c +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_qlog.c @@ -30,6 +30,8 @@ #include "ngtcp2_vec.h" #include "ngtcp2_conv.h" #include "ngtcp2_net.h" +#include "ngtcp2_unreachable.h" +#include "ngtcp2_conn_stat.h" void ngtcp2_qlog_init(ngtcp2_qlog *qlog, ngtcp2_qlog_write write, ngtcp2_tstamp ts, void *user_data) { @@ -284,9 +286,9 @@ static uint8_t *write_pkt_hd(uint8_t *p, const ngtcp2_pkt_hd *hd) { p = write_pair(p, "packet_type", qlog_pkt_type(hd)); *p++ = ','; p = write_pair_number(p, "packet_number", (uint64_t)hd->pkt_num); - if (hd->type == NGTCP2_PKT_INITIAL && hd->token.len) { + if (hd->type == NGTCP2_PKT_INITIAL && hd->tokenlen) { p = write_verbatim(p, ",\"token\":{"); - p = write_pair_hex(p, "data", hd->token.base, hd->token.len); + p = write_pair_hex(p, "data", hd->token, hd->tokenlen); *p++ = '}'; } /* TODO Write DCIL and DCID */ @@ -316,7 +318,7 @@ static uint8_t *write_ping_frame(uint8_t *p, const ngtcp2_ping *fr) { static uint8_t *write_ack_frame(uint8_t *p, const ngtcp2_ack *fr) { int64_t largest_ack, min_ack; size_t i; - const ngtcp2_ack_blk *blk; + const ngtcp2_ack_range *range; /* * {"frame_type":"ack","ack_delay":0000000000000000000,"acked_ranges":[]} @@ -336,7 +338,7 @@ static uint8_t *write_ack_frame(uint8_t *p, const ngtcp2_ack *fr) { p = write_verbatim(p, ",\"acked_ranges\":["); largest_ack = fr->largest_ack; - min_ack = fr->largest_ack - (int64_t)fr->first_ack_blklen; + min_ack = fr->largest_ack - (int64_t)fr->first_ack_range; *p++ = '['; p = write_number(p, (uint64_t)min_ack); @@ -346,10 +348,10 @@ static uint8_t *write_ack_frame(uint8_t *p, const ngtcp2_ack *fr) { } *p++ = ']'; - for (i = 0; i < fr->num_blks; ++i) { - blk = &fr->blks[i]; - largest_ack = min_ack - (int64_t)blk->gap - 2; - min_ack = largest_ack - (int64_t)blk->blklen; + for (i = 0; i < fr->rangecnt; ++i) { + range = &fr->ranges[i]; + largest_ack = min_ack - (int64_t)range->gap - 2; + min_ack = largest_ack - (int64_t)range->len; *p++ = ','; *p++ = '['; p = write_number(p, (uint64_t)min_ack); @@ -410,7 +412,7 @@ static uint8_t *write_stop_sending_frame(uint8_t *p, return p; } -static uint8_t *write_crypto_frame(uint8_t *p, const ngtcp2_crypto *fr) { +static uint8_t *write_crypto_frame(uint8_t *p, const ngtcp2_stream *fr) { /* * {"frame_type":"crypto","offset":0000000000000000000,"length":0000000000000000000} */ @@ -432,9 +434,9 @@ static uint8_t *write_new_token_frame(uint8_t *p, const ngtcp2_new_token *fr) { #define NGTCP2_QLOG_NEW_TOKEN_FRAME_OVERHEAD 75 p = write_verbatim(p, "{\"frame_type\":\"new_token\","); - p = write_pair_number(p, "length", fr->token.len); + p = write_pair_number(p, "length", fr->tokenlen); p = write_verbatim(p, ",\"token\":{"); - p = write_pair_hex(p, "data", fr->token.base, fr->token.len); + p = write_pair_hex(p, "data", fr->token, fr->tokenlen); *p++ = '}'; *p++ = '}'; @@ -513,45 +515,53 @@ static uint8_t *write_max_streams_frame(uint8_t *p, static uint8_t *write_data_blocked_frame(uint8_t *p, const ngtcp2_data_blocked *fr) { - (void)fr; - /* - * {"frame_type":"data_blocked"} + * {"frame_type":"data_blocked","limit":0000000000000000000} */ -#define NGTCP2_QLOG_DATA_BLOCKED_FRAME_OVERHEAD 29 +#define NGTCP2_QLOG_DATA_BLOCKED_FRAME_OVERHEAD 57 - /* TODO log limit */ + p = write_verbatim(p, "{\"frame_type\":\"data_blocked\","); + p = write_pair_number(p, "limit", fr->offset); + *p++ = '}'; - return write_verbatim(p, "{\"frame_type\":\"data_blocked\"}"); + return p; } static uint8_t * write_stream_data_blocked_frame(uint8_t *p, const ngtcp2_stream_data_blocked *fr) { - (void)fr; - /* - * {"frame_type":"stream_data_blocked"} + * {"frame_type":"stream_data_blocked","stream_id":0000000000000000000,"limit":0000000000000000000} */ -#define NGTCP2_QLOG_STREAM_DATA_BLOCKED_FRAME_OVERHEAD 36 +#define NGTCP2_QLOG_STREAM_DATA_BLOCKED_FRAME_OVERHEAD 96 - /* TODO log limit */ + p = write_verbatim(p, "{\"frame_type\":\"stream_data_blocked\","); + p = write_pair_number(p, "stream_id", (uint64_t)fr->stream_id); + *p++ = ','; + p = write_pair_number(p, "limit", fr->offset); + *p++ = '}'; - return write_verbatim(p, "{\"frame_type\":\"stream_data_blocked\"}"); + return p; } static uint8_t *write_streams_blocked_frame(uint8_t *p, const ngtcp2_streams_blocked *fr) { - (void)fr; - /* - * {"frame_type":"streams_blocked"} + * {"frame_type":"streams_blocked","stream_type":"unidirectional","limit":0000000000000000000} */ -#define NGTCP2_QLOG_STREAMS_BLOCKED_FRAME_OVERHEAD 32 +#define NGTCP2_QLOG_STREAMS_BLOCKED_FRAME_OVERHEAD 91 - /* TODO Log stream_type and limit */ + p = write_verbatim(p, "{\"frame_type\":\"streams_blocked\",\"stream_type\":"); + if (fr->type == NGTCP2_FRAME_STREAMS_BLOCKED_BIDI) { + p = write_string(p, "bidirectional"); + } else { + p = write_string(p, "unidirectional"); + } + *p++ = ','; + p = write_pair_number(p, "limit", fr->max_streams); + *p++ = '}'; - return write_verbatim(p, "{\"frame_type\":\"streams_blocked\"}"); + return p; } static uint8_t * @@ -715,7 +725,7 @@ static void qlog_pkt_write_end(ngtcp2_qlog *qlog, const ngtcp2_pkt_hd *hd, (1 + 50 + NGTCP2_QLOG_PKT_HD_OVERHEAD) if (ngtcp2_buf_left(&qlog->buf) < - NGTCP2_QLOG_PKT_WRITE_END_OVERHEAD + hd->token.len * 2) { + NGTCP2_QLOG_PKT_WRITE_END_OVERHEAD + hd->tokenlen * 2) { return; } @@ -765,7 +775,7 @@ void ngtcp2_qlog_write_frame(ngtcp2_qlog *qlog, const ngtcp2_frame *fr) { (size_t)(fr->type == NGTCP2_FRAME_ACK_ECN ? NGTCP2_QLOG_ACK_FRAME_ECN_OVERHEAD : 0) + - NGTCP2_QLOG_ACK_FRAME_RANGE_OVERHEAD * (1 + fr->ack.num_blks) + 1) { + NGTCP2_QLOG_ACK_FRAME_RANGE_OVERHEAD * (1 + fr->ack.rangecnt) + 1) { return; } p = write_ack_frame(p, &fr->ack); @@ -788,11 +798,11 @@ void ngtcp2_qlog_write_frame(ngtcp2_qlog *qlog, const ngtcp2_frame *fr) { if (ngtcp2_buf_left(&qlog->buf) < NGTCP2_QLOG_CRYPTO_FRAME_OVERHEAD + 1) { return; } - p = write_crypto_frame(p, &fr->crypto); + p = write_crypto_frame(p, &fr->stream); break; case NGTCP2_FRAME_NEW_TOKEN: - if (ngtcp2_buf_left(&qlog->buf) < NGTCP2_QLOG_NEW_TOKEN_FRAME_OVERHEAD + - fr->new_token.token.len * 2 + 1) { + if (ngtcp2_buf_left(&qlog->buf) < + NGTCP2_QLOG_NEW_TOKEN_FRAME_OVERHEAD + fr->new_token.tokenlen * 2 + 1) { return; } p = write_new_token_frame(p, &fr->new_token); @@ -897,7 +907,7 @@ void ngtcp2_qlog_write_frame(ngtcp2_qlog *qlog, const ngtcp2_frame *fr) { p = write_datagram_frame(p, &fr->datagram); break; default: - assert(0); + ngtcp2_unreachable(); } *p++ = ','; @@ -929,6 +939,8 @@ void ngtcp2_qlog_parameters_set_transport_params( uint8_t buf[1024]; uint8_t *p = buf; const ngtcp2_preferred_addr *paddr; + const ngtcp2_sockaddr_in *sa_in; + const ngtcp2_sockaddr_in6 *sa_in6; if (!qlog->write) { return; @@ -996,20 +1008,33 @@ void ngtcp2_qlog_parameters_set_transport_params( *p++ = ','; p = write_pair_number(p, "initial_max_streams_uni", params->initial_max_streams_uni); - if (params->preferred_address_present) { + if (params->preferred_addr_present) { *p++ = ','; - paddr = ¶ms->preferred_address; + paddr = ¶ms->preferred_addr; p = write_string(p, "preferred_address"); *p++ = ':'; *p++ = '{'; - p = write_pair_hex(p, "ip_v4", paddr->ipv4_addr, sizeof(paddr->ipv4_addr)); - *p++ = ','; - p = write_pair_number(p, "port_v4", paddr->ipv4_port); - *p++ = ','; - p = write_pair_hex(p, "ip_v6", paddr->ipv6_addr, sizeof(paddr->ipv6_addr)); - *p++ = ','; - p = write_pair_number(p, "port_v6", paddr->ipv6_port); - *p++ = ','; + + if (paddr->ipv4_present) { + sa_in = &paddr->ipv4; + + p = write_pair_hex(p, "ip_v4", (const uint8_t *)&sa_in->sin_addr, + sizeof(sa_in->sin_addr)); + *p++ = ','; + p = write_pair_number(p, "port_v4", ngtcp2_ntohs(sa_in->sin_port)); + *p++ = ','; + } + + if (paddr->ipv6_present) { + sa_in6 = &paddr->ipv6; + + p = write_pair_hex(p, "ip_v6", (const uint8_t *)&sa_in6->sin6_addr, + sizeof(sa_in6->sin6_addr)); + *p++ = ','; + p = write_pair_number(p, "port_v6", ngtcp2_ntohs(sa_in6->sin6_port)); + *p++ = ','; + } + p = write_pair_cid(p, "connection_id", &paddr->cid); p = write_verbatim(p, ",\"stateless_reset_token\":{"); p = write_pair_hex(p, "data", paddr->stateless_reset_token, @@ -1113,16 +1138,15 @@ void ngtcp2_qlog_retry_pkt_received(ngtcp2_qlog *qlog, const ngtcp2_pkt_hd *hd, ",\"name\":\"transport:packet_received\",\"data\":{\"header\":"); if (ngtcp2_buf_left(&buf) < - NGTCP2_QLOG_PKT_HD_OVERHEAD + hd->token.len * 2 + + NGTCP2_QLOG_PKT_HD_OVERHEAD + hd->tokenlen * 2 + sizeof(",\"retry_token\":{\"data\":\"\"}}}\n") - 1 + - retry->token.len * 2) { + retry->tokenlen * 2) { return; } buf.last = write_pkt_hd(buf.last, hd); buf.last = write_verbatim(buf.last, ",\"retry_token\":{"); - buf.last = - write_pair_hex(buf.last, "data", retry->token.base, retry->token.len); + buf.last = write_pair_hex(buf.last, "data", retry->token, retry->tokenlen); buf.last = write_verbatim(buf.last, "}}}\n"); qlog->write(qlog->user_data, NGTCP2_QLOG_WRITE_FLAG_NONE, buf.pos, diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_ringbuf.c b/deps/ngtcp2/ngtcp2/lib/ngtcp2_ringbuf.c index 74e488bce76f24..c381c231276d34 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_ringbuf.c +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_ringbuf.c @@ -31,8 +31,9 @@ #include "ngtcp2_macro.h" -#if defined(_MSC_VER) && !defined(__clang__) && (defined(_M_ARM) || defined(_M_ARM64)) -unsigned int __popcnt(unsigned int x) { +#if defined(_MSC_VER) && !defined(__clang__) && \ + (defined(_M_ARM) || defined(_M_ARM64)) +static unsigned int __popcnt(unsigned int x) { unsigned int c = 0; for (; x; ++c) { x &= x - 1; @@ -63,7 +64,7 @@ void ngtcp2_ringbuf_buf_init(ngtcp2_ringbuf *rb, size_t nmemb, size_t size, rb->buf = buf; rb->mem = mem; - rb->nmemb = nmemb; + rb->mask = nmemb - 1; rb->size = size; rb->first = 0; rb->len = 0; @@ -78,17 +79,19 @@ void ngtcp2_ringbuf_free(ngtcp2_ringbuf *rb) { } void *ngtcp2_ringbuf_push_front(ngtcp2_ringbuf *rb) { - rb->first = (rb->first - 1) & (rb->nmemb - 1); - rb->len = ngtcp2_min(rb->nmemb, rb->len + 1); + rb->first = (rb->first - 1) & rb->mask; + if (rb->len < rb->mask + 1) { + ++rb->len; + } return (void *)&rb->buf[rb->first * rb->size]; } void *ngtcp2_ringbuf_push_back(ngtcp2_ringbuf *rb) { - size_t offset = (rb->first + rb->len) & (rb->nmemb - 1); + size_t offset = (rb->first + rb->len) & rb->mask; - if (rb->len == rb->nmemb) { - rb->first = (rb->first + 1) & (rb->nmemb - 1); + if (rb->len == rb->mask + 1) { + rb->first = (rb->first + 1) & rb->mask; } else { ++rb->len; } @@ -97,7 +100,7 @@ void *ngtcp2_ringbuf_push_back(ngtcp2_ringbuf *rb) { } void ngtcp2_ringbuf_pop_front(ngtcp2_ringbuf *rb) { - rb->first = (rb->first + 1) & (rb->nmemb - 1); + rb->first = (rb->first + 1) & rb->mask; --rb->len; } @@ -107,14 +110,14 @@ void ngtcp2_ringbuf_pop_back(ngtcp2_ringbuf *rb) { } void ngtcp2_ringbuf_resize(ngtcp2_ringbuf *rb, size_t len) { - assert(len <= rb->nmemb); + assert(len <= rb->mask + 1); rb->len = len; } void *ngtcp2_ringbuf_get(ngtcp2_ringbuf *rb, size_t offset) { assert(offset < rb->len); - offset = (rb->first + offset) & (rb->nmemb - 1); + offset = (rb->first + offset) & rb->mask; return &rb->buf[offset * rb->size]; } -int ngtcp2_ringbuf_full(ngtcp2_ringbuf *rb) { return rb->len == rb->nmemb; } +int ngtcp2_ringbuf_full(ngtcp2_ringbuf *rb) { return rb->len == rb->mask + 1; } diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_ringbuf.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_ringbuf.h index 16635c941032c7..b28a882c4bae84 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_ringbuf.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_ringbuf.h @@ -37,9 +37,9 @@ typedef struct ngtcp2_ringbuf { /* buf points to the underlying buffer. */ uint8_t *buf; const ngtcp2_mem *mem; - /* nmemb is the number of elements that can be stored in this ring - buffer. */ - size_t nmemb; + /* mask is the bit mask to cover all bits for the maximum number of + elements. The maximum number of elements is mask + 1. */ + size_t mask; /* size is the size of each element. */ size_t size; /* first is the offset to the first element. */ diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_rob.c b/deps/ngtcp2/ngtcp2/lib/ngtcp2_rob.c index 9c3d75dc33ae0c..5cac383f7bb166 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_rob.c +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_rob.c @@ -218,7 +218,7 @@ int ngtcp2_rob_push(ngtcp2_rob *rob, uint64_t offset, const uint8_t *data, return 0; } -int ngtcp2_rob_remove_prefix(ngtcp2_rob *rob, uint64_t offset) { +void ngtcp2_rob_remove_prefix(ngtcp2_rob *rob, uint64_t offset) { ngtcp2_rob_gap *g; ngtcp2_rob_data *d; ngtcp2_ksl_it it; @@ -245,13 +245,11 @@ int ngtcp2_rob_remove_prefix(ngtcp2_rob *rob, uint64_t offset) { for (; !ngtcp2_ksl_it_end(&it);) { d = ngtcp2_ksl_it_get(&it); if (offset < d->range.begin + rob->chunk) { - return 0; + return; } ngtcp2_ksl_remove_hint(&rob->dataksl, &it, &it, &d->range); ngtcp2_rob_data_del(d, rob->mem); } - - return 0; } size_t ngtcp2_rob_data_at(ngtcp2_rob *rob, const uint8_t **pdest, diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_rob.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_rob.h index c7688df4542956..6518d56c539185 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_rob.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_rob.h @@ -152,14 +152,8 @@ int ngtcp2_rob_push(ngtcp2_rob *rob, uint64_t offset, const uint8_t *data, /* * ngtcp2_rob_remove_prefix removes gap up to |offset|, exclusive. It * also removes data buffer if it is completely included in |offset|. - * - * This function returns 0 if it succeeds, or one of the following - * negative error codes: - * - * NGTCP2_ERR_NOMEM - * Out of memory */ -int ngtcp2_rob_remove_prefix(ngtcp2_rob *rob, uint64_t offset); +void ngtcp2_rob_remove_prefix(ngtcp2_rob *rob, uint64_t offset); /* * ngtcp2_rob_data_at stores the pointer to the buffer of stream diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_rst.c b/deps/ngtcp2/ngtcp2/lib/ngtcp2_rst.c index 7b50f98d41ec7b..b8587e3e9dbac8 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_rst.c +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_rst.c @@ -29,6 +29,7 @@ #include "ngtcp2_rtb.h" #include "ngtcp2_cc.h" #include "ngtcp2_macro.h" +#include "ngtcp2_conn_stat.h" void ngtcp2_rs_init(ngtcp2_rs *rs) { rs->interval = UINT64_MAX; @@ -69,8 +70,8 @@ void ngtcp2_rst_on_pkt_sent(ngtcp2_rst *rst, ngtcp2_rtb_entry *ent, ent->rst.lost = rst->lost; } -int ngtcp2_rst_on_ack_recv(ngtcp2_rst *rst, ngtcp2_conn_stat *cstat, - uint64_t pkt_delivered) { +void ngtcp2_rst_on_ack_recv(ngtcp2_rst *rst, ngtcp2_conn_stat *cstat, + uint64_t pkt_delivered) { ngtcp2_rs *rs = &rst->rs; uint64_t rate; @@ -84,7 +85,7 @@ int ngtcp2_rst_on_ack_recv(ngtcp2_rst *rst, ngtcp2_conn_stat *cstat, } if (rs->prior_ts == 0) { - return 0; + return; } rs->interval = ngtcp2_max(rs->send_elapsed, rs->ack_elapsed); @@ -94,11 +95,11 @@ int ngtcp2_rst_on_ack_recv(ngtcp2_rst *rst, ngtcp2_conn_stat *cstat, if (rs->interval < cstat->min_rtt) { rs->interval = UINT64_MAX; - return 0; + return; } if (!rs->interval) { - return 0; + return; } rate = rs->delivered * NGTCP2_SECONDS / rs->interval; @@ -107,8 +108,6 @@ int ngtcp2_rst_on_ack_recv(ngtcp2_rst *rst, ngtcp2_conn_stat *cstat, ngtcp2_window_filter_update(&rst->wf, rate, rst->round_count); cstat->delivery_rate_sec = ngtcp2_window_filter_get_best(&rst->wf); } - - return 0; } void ngtcp2_rst_update_rate_sample(ngtcp2_rst *rst, const ngtcp2_rtb_entry *ent, diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_rst.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_rst.h index 488c65575a5589..c9e1e161b7766f 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_rst.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_rst.h @@ -34,6 +34,7 @@ #include "ngtcp2_window_filter.h" typedef struct ngtcp2_rtb_entry ngtcp2_rtb_entry; +typedef struct ngtcp2_conn_stat ngtcp2_conn_stat; /** * @struct @@ -76,8 +77,8 @@ void ngtcp2_rst_init(ngtcp2_rst *rst); void ngtcp2_rst_on_pkt_sent(ngtcp2_rst *rst, ngtcp2_rtb_entry *ent, const ngtcp2_conn_stat *cstat); -int ngtcp2_rst_on_ack_recv(ngtcp2_rst *rst, ngtcp2_conn_stat *cstat, - uint64_t pkt_delivered); +void ngtcp2_rst_on_ack_recv(ngtcp2_rst *rst, ngtcp2_conn_stat *cstat, + uint64_t pkt_delivered); void ngtcp2_rst_update_rate_sample(ngtcp2_rst *rst, const ngtcp2_rtb_entry *ent, ngtcp2_tstamp ts); void ngtcp2_rst_update_app_limited(ngtcp2_rst *rst, ngtcp2_conn_stat *cstat); diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_rtb.c b/deps/ngtcp2/ngtcp2/lib/ngtcp2_rtb.c index 644071400a6eb7..b9e0139bddfcac 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_rtb.c +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_rtb.c @@ -34,220 +34,11 @@ #include "ngtcp2_cc.h" #include "ngtcp2_rcvry.h" #include "ngtcp2_rst.h" +#include "ngtcp2_unreachable.h" +#include "ngtcp2_tstamp.h" +#include "ngtcp2_frame_chain.h" -int ngtcp2_frame_chain_new(ngtcp2_frame_chain **pfrc, const ngtcp2_mem *mem) { - *pfrc = ngtcp2_mem_malloc(mem, sizeof(ngtcp2_frame_chain)); - if (*pfrc == NULL) { - return NGTCP2_ERR_NOMEM; - } - - ngtcp2_frame_chain_init(*pfrc); - - return 0; -} - -int ngtcp2_frame_chain_objalloc_new(ngtcp2_frame_chain **pfrc, - ngtcp2_objalloc *objalloc) { - *pfrc = ngtcp2_objalloc_frame_chain_get(objalloc); - if (*pfrc == NULL) { - return NGTCP2_ERR_NOMEM; - } - - ngtcp2_frame_chain_init(*pfrc); - - return 0; -} - -int ngtcp2_frame_chain_extralen_new(ngtcp2_frame_chain **pfrc, size_t extralen, - const ngtcp2_mem *mem) { - *pfrc = ngtcp2_mem_malloc(mem, sizeof(ngtcp2_frame_chain) + extralen); - if (*pfrc == NULL) { - return NGTCP2_ERR_NOMEM; - } - - ngtcp2_frame_chain_init(*pfrc); - - return 0; -} - -int ngtcp2_frame_chain_stream_datacnt_objalloc_new(ngtcp2_frame_chain **pfrc, - size_t datacnt, - ngtcp2_objalloc *objalloc, - const ngtcp2_mem *mem) { - size_t need, avail = sizeof(ngtcp2_frame) - sizeof(ngtcp2_stream); - - if (datacnt > 1) { - need = sizeof(ngtcp2_vec) * (datacnt - 1); - - if (need > avail) { - return ngtcp2_frame_chain_extralen_new(pfrc, need - avail, mem); - } - } - - return ngtcp2_frame_chain_objalloc_new(pfrc, objalloc); -} - -int ngtcp2_frame_chain_crypto_datacnt_objalloc_new(ngtcp2_frame_chain **pfrc, - size_t datacnt, - ngtcp2_objalloc *objalloc, - const ngtcp2_mem *mem) { - size_t need, avail = sizeof(ngtcp2_frame) - sizeof(ngtcp2_crypto); - - if (datacnt > 1) { - need = sizeof(ngtcp2_vec) * (datacnt - 1); - - if (need > avail) { - return ngtcp2_frame_chain_extralen_new(pfrc, need - avail, mem); - } - } - - return ngtcp2_frame_chain_objalloc_new(pfrc, objalloc); -} - -int ngtcp2_frame_chain_new_token_objalloc_new(ngtcp2_frame_chain **pfrc, - const ngtcp2_vec *token, - ngtcp2_objalloc *objalloc, - const ngtcp2_mem *mem) { - size_t avail = sizeof(ngtcp2_frame) - sizeof(ngtcp2_new_token); - int rv; - uint8_t *p; - ngtcp2_frame *fr; - - if (token->len > avail) { - rv = ngtcp2_frame_chain_extralen_new(pfrc, token->len - avail, mem); - } else { - rv = ngtcp2_frame_chain_objalloc_new(pfrc, objalloc); - } - if (rv != 0) { - return rv; - } - - fr = &(*pfrc)->fr; - fr->type = NGTCP2_FRAME_NEW_TOKEN; - - p = (uint8_t *)fr + sizeof(ngtcp2_new_token); - memcpy(p, token->base, token->len); - - ngtcp2_vec_init(&fr->new_token.token, p, token->len); - - return 0; -} - -void ngtcp2_frame_chain_del(ngtcp2_frame_chain *frc, const ngtcp2_mem *mem) { - ngtcp2_frame_chain_binder *binder; - - if (frc == NULL) { - return; - } - - binder = frc->binder; - if (binder && --binder->refcount == 0) { - ngtcp2_mem_free(mem, binder); - } - - ngtcp2_mem_free(mem, frc); -} - -void ngtcp2_frame_chain_objalloc_del(ngtcp2_frame_chain *frc, - ngtcp2_objalloc *objalloc, - const ngtcp2_mem *mem) { - ngtcp2_frame_chain_binder *binder; - - if (frc == NULL) { - return; - } - - switch (frc->fr.type) { - case NGTCP2_FRAME_STREAM: - if (frc->fr.stream.datacnt && - sizeof(ngtcp2_vec) * (frc->fr.stream.datacnt - 1) > - sizeof(ngtcp2_frame) - sizeof(ngtcp2_stream)) { - ngtcp2_frame_chain_del(frc, mem); - - return; - } - - break; - case NGTCP2_FRAME_CRYPTO: - if (frc->fr.crypto.datacnt && - sizeof(ngtcp2_vec) * (frc->fr.crypto.datacnt - 1) > - sizeof(ngtcp2_frame) - sizeof(ngtcp2_crypto)) { - ngtcp2_frame_chain_del(frc, mem); - - return; - } - - break; - case NGTCP2_FRAME_NEW_TOKEN: - if (frc->fr.new_token.token.len > - sizeof(ngtcp2_frame) - sizeof(ngtcp2_new_token)) { - ngtcp2_frame_chain_del(frc, mem); - - return; - } - - break; - } - - binder = frc->binder; - if (binder && --binder->refcount == 0) { - ngtcp2_mem_free(mem, binder); - } - - frc->binder = NULL; - - ngtcp2_objalloc_frame_chain_release(objalloc, frc); -} - -void ngtcp2_frame_chain_init(ngtcp2_frame_chain *frc) { - frc->next = NULL; - frc->binder = NULL; -} - -void ngtcp2_frame_chain_list_objalloc_del(ngtcp2_frame_chain *frc, - ngtcp2_objalloc *objalloc, - const ngtcp2_mem *mem) { - ngtcp2_frame_chain *next; - - for (; frc; frc = next) { - next = frc->next; - - ngtcp2_frame_chain_objalloc_del(frc, objalloc, mem); - } -} - -int ngtcp2_frame_chain_binder_new(ngtcp2_frame_chain_binder **pbinder, - const ngtcp2_mem *mem) { - *pbinder = ngtcp2_mem_calloc(mem, 1, sizeof(ngtcp2_frame_chain_binder)); - if (*pbinder == NULL) { - return NGTCP2_ERR_NOMEM; - } - - return 0; -} - -int ngtcp2_bind_frame_chains(ngtcp2_frame_chain *a, ngtcp2_frame_chain *b, - const ngtcp2_mem *mem) { - ngtcp2_frame_chain_binder *binder; - int rv; - - assert(b->binder == NULL); - - if (a->binder == NULL) { - rv = ngtcp2_frame_chain_binder_new(&binder, mem); - if (rv != 0) { - return rv; - } - - a->binder = binder; - ++a->binder->refcount; - } - - b->binder = a->binder; - ++b->binder->refcount; - - return 0; -} +ngtcp2_objalloc_def(rtb_entry, ngtcp2_rtb_entry, oplent); static void rtb_entry_init(ngtcp2_rtb_entry *ent, const ngtcp2_pkt_hd *hd, ngtcp2_frame_chain *frc, ngtcp2_tstamp ts, @@ -297,7 +88,7 @@ static int greater(const ngtcp2_ksl_key *lhs, const ngtcp2_ksl_key *rhs) { void ngtcp2_rtb_init(ngtcp2_rtb *rtb, ngtcp2_pktns_id pktns_id, ngtcp2_strm *crypto, ngtcp2_rst *rst, ngtcp2_cc *cc, - ngtcp2_log *log, ngtcp2_qlog *qlog, + int64_t cc_pkt_num, ngtcp2_log *log, ngtcp2_qlog *qlog, ngtcp2_objalloc *rtb_entry_objalloc, ngtcp2_objalloc *frc_objalloc, const ngtcp2_mem *mem) { rtb->rtb_entry_objalloc = rtb_entry_objalloc; @@ -315,7 +106,7 @@ void ngtcp2_rtb_init(ngtcp2_rtb *rtb, ngtcp2_pktns_id pktns_id, rtb->num_pto_eliciting = 0; rtb->probe_pkt_left = 0; rtb->pktns_id = pktns_id; - rtb->cc_pkt_num = 0; + rtb->cc_pkt_num = cc_pkt_num; rtb->cc_bytes_in_flight = 0; rtb->persistent_congestion_start_ts = UINT64_MAX; rtb->num_lost_pkts = 0; @@ -432,7 +223,6 @@ static ngtcp2_ssize rtb_reclaim_frame(ngtcp2_rtb *rtb, uint8_t flags, ngtcp2_range gap, range; size_t num_reclaimed = 0; int rv; - int streamfrq_empty; assert(ent->flags & NGTCP2_RTB_ENTRY_FLAG_RETRANSMITTABLE); @@ -487,7 +277,6 @@ static ngtcp2_ssize rtb_reclaim_frame(ngtcp2_rtb *rtb, uint8_t flags, ngtcp2_vec_copy(nfrc->fr.stream.data, fr->stream.data, fr->stream.datacnt); - streamfrq_empty = ngtcp2_strm_streamfrq_empty(strm); rv = ngtcp2_strm_streamfrq_push(strm, nfrc); if (rv != 0) { ngtcp2_frame_chain_objalloc_del(nfrc, rtb->frc_objalloc, rtb->mem); @@ -500,9 +289,6 @@ static ngtcp2_ssize rtb_reclaim_frame(ngtcp2_rtb *rtb, uint8_t flags, return rv; } } - if (streamfrq_empty) { - ++conn->tx.strmq_nretrans; - } ++num_reclaimed; @@ -510,28 +296,27 @@ static ngtcp2_ssize rtb_reclaim_frame(ngtcp2_rtb *rtb, uint8_t flags, case NGTCP2_FRAME_CRYPTO: /* Don't resend CRYPTO frame if the whole region it contains has been acknowledged */ - gap = ngtcp2_strm_get_unacked_range_after(rtb->crypto, fr->crypto.offset); + gap = ngtcp2_strm_get_unacked_range_after(rtb->crypto, fr->stream.offset); - range.begin = fr->crypto.offset; - range.end = fr->crypto.offset + - ngtcp2_vec_len(fr->crypto.data, fr->crypto.datacnt); + range.begin = fr->stream.offset; + range.end = fr->stream.offset + + ngtcp2_vec_len(fr->stream.data, fr->stream.datacnt); range = ngtcp2_range_intersect(&range, &gap); if (ngtcp2_range_len(&range) == 0) { continue; } - rv = ngtcp2_frame_chain_crypto_datacnt_objalloc_new( - &nfrc, fr->crypto.datacnt, rtb->frc_objalloc, rtb->mem); + rv = ngtcp2_frame_chain_stream_datacnt_objalloc_new( + &nfrc, fr->stream.datacnt, rtb->frc_objalloc, rtb->mem); if (rv != 0) { return rv; } nfrc->fr = *fr; - ngtcp2_vec_copy(nfrc->fr.crypto.data, fr->crypto.data, - fr->crypto.datacnt); + ngtcp2_vec_copy(nfrc->fr.stream.data, fr->stream.data, + fr->stream.datacnt); - rv = ngtcp2_ksl_insert(&pktns->crypto.tx.frq, NULL, - &nfrc->fr.crypto.offset, nfrc); + rv = ngtcp2_strm_streamfrq_push(&pktns->crypto.strm, nfrc); if (rv != 0) { assert(ngtcp2_err_is_fatal(rv)); ngtcp2_frame_chain_objalloc_del(nfrc, rtb->frc_objalloc, rtb->mem); @@ -543,7 +328,8 @@ static ngtcp2_ssize rtb_reclaim_frame(ngtcp2_rtb *rtb, uint8_t flags, continue; case NGTCP2_FRAME_NEW_TOKEN: rv = ngtcp2_frame_chain_new_token_objalloc_new( - &nfrc, &fr->new_token.token, rtb->frc_objalloc, rtb->mem); + &nfrc, fr->new_token.token, fr->new_token.tokenlen, rtb->frc_objalloc, + rtb->mem); if (rv != 0) { return rv; } @@ -638,7 +424,7 @@ static int rtb_on_pkt_lost(ngtcp2_rtb *rtb, ngtcp2_ksl_it *it, } if (ent->flags & NGTCP2_RTB_ENTRY_FLAG_PTO_RECLAIMED) { - ngtcp2_log_info(rtb->log, NGTCP2_LOG_EVENT_RCV, + ngtcp2_log_info(rtb->log, NGTCP2_LOG_EVENT_LDC, "pkn=%" PRId64 " has already been reclaimed on PTO", ent->hd.pkt_num); assert(!(ent->flags & NGTCP2_RTB_ENTRY_FLAG_LOST_RETRANSMITTED)); @@ -770,6 +556,10 @@ static int rtb_process_acked_pkt(ngtcp2_rtb *rtb, ngtcp2_rtb_entry *ent, for (frc = ent->frc; frc; frc = frc->next) { if (frc->binder) { + if (frc->binder->flags & NGTCP2_FRAME_CHAIN_BINDER_FLAG_ACK) { + continue; + } + frc->binder->flags |= NGTCP2_FRAME_CHAIN_BINDER_FLAG_ACK; } @@ -817,8 +607,8 @@ static int rtb_process_acked_pkt(ngtcp2_rtb *rtb, ngtcp2_rtb_entry *ent, case NGTCP2_FRAME_CRYPTO: prev_stream_offset = ngtcp2_strm_get_acked_offset(crypto); rv = ngtcp2_strm_ack_data( - crypto, frc->fr.crypto.offset, - ngtcp2_vec_len(frc->fr.crypto.data, frc->fr.crypto.datacnt)); + crypto, frc->fr.stream.offset, + ngtcp2_vec_len(frc->fr.stream.data, frc->fr.stream.datacnt)); if (rv != 0) { return rv; } @@ -840,7 +630,7 @@ static int rtb_process_acked_pkt(ngtcp2_rtb *rtb, ngtcp2_rtb_entry *ent, pktns = &conn->pktns; break; default: - assert(0); + ngtcp2_unreachable(); } conn_ack_crypto_data(conn, pktns, datalen); @@ -851,7 +641,7 @@ static int rtb_process_acked_pkt(ngtcp2_rtb *rtb, ngtcp2_rtb_entry *ent, if (strm == NULL) { break; } - strm->flags |= NGTCP2_STRM_FLAG_RST_ACKED; + strm->flags |= NGTCP2_STRM_FLAG_RESET_STREAM_ACKED; rv = ngtcp2_conn_close_stream_if_shut_rdwr(conn, strm); if (rv != 0) { return rv; @@ -861,6 +651,12 @@ static int rtb_process_acked_pkt(ngtcp2_rtb *rtb, ngtcp2_rtb_entry *ent, ngtcp2_conn_untrack_retired_dcid_seq(conn, frc->fr.retire_connection_id.seq); break; + case NGTCP2_FRAME_NEW_CONNECTION_ID: + assert(conn->scid.num_in_flight); + + --conn->scid.num_in_flight; + + break; case NGTCP2_FRAME_DATAGRAM: case NGTCP2_FRAME_DATAGRAM_LEN: if (!conn->callbacks.ack_datagram) { @@ -885,12 +681,14 @@ static void rtb_on_pkt_acked(ngtcp2_rtb *rtb, ngtcp2_rtb_entry *ent, ngtcp2_rst_update_rate_sample(rtb->rst, ent, ts); - cc->on_pkt_acked(cc, cstat, - ngtcp2_cc_pkt_init(&pkt, ent->hd.pkt_num, ent->pktlen, - rtb->pktns_id, ent->ts, ent->rst.lost, - ent->rst.tx_in_flight, - ent->rst.is_app_limited), - ts); + if (cc->on_pkt_acked) { + cc->on_pkt_acked(cc, cstat, + ngtcp2_cc_pkt_init(&pkt, ent->hd.pkt_num, ent->pktlen, + rtb->pktns_id, ent->ts, ent->rst.lost, + ent->rst.tx_in_flight, + ent->rst.is_app_limited), + ts); + } if (!(ent->flags & NGTCP2_RTB_ENTRY_FLAG_PROBE) && (ent->flags & NGTCP2_RTB_ENTRY_FLAG_ACK_ELICITING)) { @@ -901,7 +699,7 @@ static void rtb_on_pkt_acked(ngtcp2_rtb *rtb, ngtcp2_rtb_entry *ent, static void conn_verify_ecn(ngtcp2_conn *conn, ngtcp2_pktns *pktns, ngtcp2_cc *cc, ngtcp2_conn_stat *cstat, const ngtcp2_ack *fr, size_t ecn_acked, - ngtcp2_tstamp largest_acked_sent_ts, + ngtcp2_tstamp largest_pkt_sent_ts, ngtcp2_tstamp ts) { if (conn->tx.ecn.state == NGTCP2_ECN_STATE_FAILED) { return; @@ -928,9 +726,9 @@ static void conn_verify_ecn(ngtcp2_conn *conn, ngtcp2_pktns *pktns, } if (fr->type == NGTCP2_FRAME_ACK_ECN) { - if (largest_acked_sent_ts != UINT64_MAX && + if (cc->congestion_event && largest_pkt_sent_ts != UINT64_MAX && fr->ecn.ce > pktns->rx.ecn.ack.ce) { - cc->congestion_event(cc, cstat, largest_acked_sent_ts, ts); + cc->congestion_event(cc, cstat, largest_pkt_sent_ts, ts); } pktns->rx.ecn.ack.ect0 = fr->ecn.ect0; @@ -954,7 +752,6 @@ ngtcp2_ssize ngtcp2_rtb_recv_ack(ngtcp2_rtb *rtb, const ngtcp2_ack *fr, ngtcp2_ksl_it it; ngtcp2_ssize num_acked = 0; ngtcp2_tstamp largest_pkt_sent_ts = UINT64_MAX; - ngtcp2_tstamp largest_acked_sent_ts = UINT64_MAX; int64_t pkt_num; ngtcp2_cc *cc = rtb->cc; ngtcp2_rtb_entry *acked_ent = NULL; @@ -987,12 +784,12 @@ ngtcp2_ssize ngtcp2_rtb_recv_ack(ngtcp2_rtb *rtb, const ngtcp2_ack *fr, if (ngtcp2_ksl_it_end(&it)) { if (conn && verify_ecn) { conn_verify_ecn(conn, pktns, rtb->cc, cstat, fr, ecn_acked, - largest_acked_sent_ts, ts); + largest_pkt_sent_ts, ts); } return 0; } - min_ack = largest_ack - (int64_t)fr->first_ack_blklen; + min_ack = largest_ack - (int64_t)fr->first_ack_range; for (; !ngtcp2_ksl_it_end(&it);) { pkt_num = *(int64_t *)ngtcp2_ksl_it_key(&it); @@ -1017,9 +814,9 @@ ngtcp2_ssize ngtcp2_rtb_recv_ack(ngtcp2_rtb *rtb, const ngtcp2_ack *fr, ++num_acked; } - for (i = 0; i < fr->num_blks;) { - largest_ack = min_ack - (int64_t)fr->blks[i].gap - 2; - min_ack = largest_ack - (int64_t)fr->blks[i].blklen; + for (i = 0; i < fr->rangecnt;) { + largest_ack = min_ack - (int64_t)fr->ranges[i].gap - 2; + min_ack = largest_ack - (int64_t)fr->ranges[i].len; it = ngtcp2_ksl_lower_bound(&rtb->ents, &largest_ack); if (ngtcp2_ksl_it_end(&it)) { @@ -1060,11 +857,6 @@ ngtcp2_ssize ngtcp2_rtb_recv_ack(ngtcp2_rtb *rtb, const ngtcp2_ack *fr, ++ecn_acked; } - assert(largest_acked_sent_ts == UINT64_MAX || - largest_acked_sent_ts <= ent->ts); - - largest_acked_sent_ts = ent->ts; - rv = rtb_process_acked_pkt(rtb, ent, conn); if (rv != 0) { goto fail; @@ -1085,7 +877,7 @@ ngtcp2_ssize ngtcp2_rtb_recv_ack(ngtcp2_rtb *rtb, const ngtcp2_ack *fr, if (verify_ecn) { conn_verify_ecn(conn, pktns, rtb->cc, cstat, fr, ecn_acked, - largest_acked_sent_ts, ts); + largest_pkt_sent_ts, ts); } } else { /* For unit tests */ @@ -1113,8 +905,10 @@ ngtcp2_ssize ngtcp2_rtb_recv_ack(ngtcp2_rtb *rtb, const ngtcp2_ack *fr, rtb->rst->lost += cc_ack.bytes_lost; - cc_ack.largest_acked_sent_ts = largest_acked_sent_ts; - cc->on_ack_recv(cc, cstat, &cc_ack, ts); + cc_ack.largest_pkt_sent_ts = largest_pkt_sent_ts; + if (cc->on_ack_recv) { + cc->on_ack_recv(cc, cstat, &cc_ack, ts); + } return num_acked; @@ -1133,7 +927,7 @@ static int rtb_pkt_lost(ngtcp2_rtb *rtb, ngtcp2_conn_stat *cstat, size_t pkt_thres, ngtcp2_tstamp ts) { ngtcp2_tstamp loss_time; - if (ent->ts + loss_delay <= ts || + if (ngtcp2_tstamp_elapsed(ent->ts, loss_delay, ts) || rtb->largest_acked_tx_pkt_num >= ent->hd.pkt_num + (int64_t)pkt_thres) { return 1; } @@ -1189,7 +983,7 @@ static int rtb_detect_lost_pkt(ngtcp2_rtb *rtb, uint64_t *ppkt_lost, ngtcp2_cc *cc = rtb->cc; int rv; uint64_t pkt_thres = - rtb->cc_bytes_in_flight / cstat->max_udp_payload_size / 2; + rtb->cc_bytes_in_flight / cstat->max_tx_udp_payload_size / 2; size_t ecn_pkt_lost = 0; ngtcp2_tstamp start_ts; ngtcp2_duration pto = ngtcp2_conn_compute_pto(conn, pktns); @@ -1288,7 +1082,9 @@ static int rtb_detect_lost_pkt(ngtcp2_rtb *rtb, uint64_t *ppkt_lost, break; } - cc->congestion_event(cc, cstat, latest_ts, ts); + if (cc->congestion_event) { + cc->congestion_event(cc, cstat, latest_ts, ts); + } loss_window = latest_ts - oldest_ts; /* Persistent congestion situation is only evaluated for app @@ -1300,7 +1096,7 @@ static int rtb_detect_lost_pkt(ngtcp2_rtb *rtb, uint64_t *ppkt_lost, */ if (rtb->pktns_id == NGTCP2_PKTNS_ID_APPLICATION && loss_window > 0) { if (loss_window >= congestion_period) { - ngtcp2_log_info(rtb->log, NGTCP2_LOG_EVENT_RCV, + ngtcp2_log_info(rtb->log, NGTCP2_LOG_EVENT_LDC, "persistent congestion loss_window=%" PRIu64 " congestion_period=%" PRIu64, loss_window, congestion_period); @@ -1312,7 +1108,9 @@ static int rtb_detect_lost_pkt(ngtcp2_rtb *rtb, uint64_t *ppkt_lost, cstat->rttvar = conn->local.settings.initial_rtt / 2; cstat->first_rtt_sample_ts = UINT64_MAX; - cc->on_persistent_congestion(cc, cstat, ts); + if (cc->on_persistent_congestion) { + cc->on_persistent_congestion(cc, cstat, ts); + } } } @@ -1349,7 +1147,7 @@ void ngtcp2_rtb_remove_excessive_lost_pkt(ngtcp2_rtb *rtb, size_t n) { assert(ent->flags & NGTCP2_RTB_ENTRY_FLAG_LOST_RETRANSMITTED); - ngtcp2_log_info(rtb->log, NGTCP2_LOG_EVENT_RCV, + ngtcp2_log_info(rtb->log, NGTCP2_LOG_EVENT_LDC, "removing stale lost pkn=%" PRId64, ent->hd.pkt_num); --rtb->num_lost_pkts; @@ -1389,7 +1187,7 @@ void ngtcp2_rtb_remove_expired_lost_pkt(ngtcp2_rtb *rtb, ngtcp2_duration pto, return; } - ngtcp2_log_info(rtb->log, NGTCP2_LOG_EVENT_RCV, + ngtcp2_log_info(rtb->log, NGTCP2_LOG_EVENT_LDC, "removing stale lost pkn=%" PRId64, ent->hd.pkt_num); --rtb->num_lost_pkts; @@ -1435,7 +1233,6 @@ static int rtb_on_pkt_lost_resched_move(ngtcp2_rtb *rtb, ngtcp2_conn *conn, ngtcp2_stream *sfr; ngtcp2_strm *strm; int rv; - int streamfrq_empty; ngtcp2_log_pkt_lost(rtb->log, ent->hd.pkt_num, ent->hd.type, ent->hd.flags, ent->ts); @@ -1445,7 +1242,7 @@ static int rtb_on_pkt_lost_resched_move(ngtcp2_rtb *rtb, ngtcp2_conn *conn, } if (ent->flags & NGTCP2_RTB_ENTRY_FLAG_PROBE) { - ngtcp2_log_info(rtb->log, NGTCP2_LOG_EVENT_RCV, + ngtcp2_log_info(rtb->log, NGTCP2_LOG_EVENT_LDC, "pkn=%" PRId64 " is a probe packet, no retransmission is necessary", ent->hd.pkt_num); @@ -1453,7 +1250,7 @@ static int rtb_on_pkt_lost_resched_move(ngtcp2_rtb *rtb, ngtcp2_conn *conn, } if (ent->flags & NGTCP2_RTB_ENTRY_FLAG_PMTUD_PROBE) { - ngtcp2_log_info(rtb->log, NGTCP2_LOG_EVENT_RCV, + ngtcp2_log_info(rtb->log, NGTCP2_LOG_EVENT_LDC, "pkn=%" PRId64 " is a PMTUD probe packet, no retransmission is necessary", ent->hd.pkt_num); @@ -1467,7 +1264,7 @@ static int rtb_on_pkt_lost_resched_move(ngtcp2_rtb *rtb, ngtcp2_conn *conn, --rtb->num_lost_pmtud_pkts; } - ngtcp2_log_info(rtb->log, NGTCP2_LOG_EVENT_RCV, + ngtcp2_log_info(rtb->log, NGTCP2_LOG_EVENT_LDC, "pkn=%" PRId64 " was declared lost and has already been retransmitted", ent->hd.pkt_num); @@ -1475,7 +1272,7 @@ static int rtb_on_pkt_lost_resched_move(ngtcp2_rtb *rtb, ngtcp2_conn *conn, } if (ent->flags & NGTCP2_RTB_ENTRY_FLAG_PTO_RECLAIMED) { - ngtcp2_log_info(rtb->log, NGTCP2_LOG_EVENT_RCV, + ngtcp2_log_info(rtb->log, NGTCP2_LOG_EVENT_LDC, "pkn=%" PRId64 " has already been reclaimed on PTO", ent->hd.pkt_num); return 0; @@ -1505,7 +1302,6 @@ static int rtb_on_pkt_lost_resched_move(ngtcp2_rtb *rtb, ngtcp2_conn *conn, ngtcp2_frame_chain_objalloc_del(frc, rtb->frc_objalloc, rtb->mem); break; } - streamfrq_empty = ngtcp2_strm_streamfrq_empty(strm); rv = ngtcp2_strm_streamfrq_push(strm, frc); if (rv != 0) { ngtcp2_frame_chain_objalloc_del(frc, rtb->frc_objalloc, rtb->mem); @@ -1518,9 +1314,6 @@ static int rtb_on_pkt_lost_resched_move(ngtcp2_rtb *rtb, ngtcp2_conn *conn, return rv; } } - if (streamfrq_empty) { - ++conn->tx.strmq_nretrans; - } break; case NGTCP2_FRAME_CRYPTO: frc = *pfrc; @@ -1528,8 +1321,7 @@ static int rtb_on_pkt_lost_resched_move(ngtcp2_rtb *rtb, ngtcp2_conn *conn, *pfrc = frc->next; frc->next = NULL; - rv = ngtcp2_ksl_insert(&pktns->crypto.tx.frq, NULL, - &frc->fr.crypto.offset, frc); + rv = ngtcp2_strm_streamfrq_push(&pktns->crypto.strm, frc); if (rv != 0) { assert(ngtcp2_err_is_fatal(rv)); ngtcp2_frame_chain_objalloc_del(frc, rtb->frc_objalloc, rtb->mem); diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_rtb.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_rtb.h index a97805dbaf3bc3..a1ff208b19eac7 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_rtb.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_rtb.h @@ -35,6 +35,7 @@ #include "ngtcp2_ksl.h" #include "ngtcp2_pq.h" #include "ngtcp2_objalloc.h" +#include "ngtcp2_pktns_id.h" typedef struct ngtcp2_conn ngtcp2_conn; typedef struct ngtcp2_pktns ngtcp2_pktns; @@ -43,156 +44,9 @@ typedef struct ngtcp2_qlog ngtcp2_qlog; typedef struct ngtcp2_strm ngtcp2_strm; typedef struct ngtcp2_rst ngtcp2_rst; typedef struct ngtcp2_cc ngtcp2_cc; - -/* NGTCP2_FRAME_CHAIN_BINDER_FLAG_NONE indicates that no flag is - set. */ -#define NGTCP2_FRAME_CHAIN_BINDER_FLAG_NONE 0x00u -/* NGTCP2_FRAME_CHAIN_BINDER_FLAG_ACK indicates that an information - which a frame carries has been acknowledged. */ -#define NGTCP2_FRAME_CHAIN_BINDER_FLAG_ACK 0x01u - -/* - * ngtcp2_frame_chain_binder binds 2 or more of ngtcp2_frame_chain to - * share the acknowledgement state. In general, all - * ngtcp2_frame_chains bound to the same binder must have the same - * information. - */ -typedef struct ngtcp2_frame_chain_binder { - size_t refcount; - /* flags is bitwise OR of zero or more of - NGTCP2_FRAME_CHAIN_BINDER_FLAG_*. */ - uint32_t flags; -} ngtcp2_frame_chain_binder; - -int ngtcp2_frame_chain_binder_new(ngtcp2_frame_chain_binder **pbinder, - const ngtcp2_mem *mem); - +typedef struct ngtcp2_conn_stat ngtcp2_conn_stat; typedef struct ngtcp2_frame_chain ngtcp2_frame_chain; -/* - * ngtcp2_frame_chain chains frames in a single packet. - */ -struct ngtcp2_frame_chain { - union { - struct { - ngtcp2_frame_chain *next; - ngtcp2_frame_chain_binder *binder; - ngtcp2_frame fr; - }; - - ngtcp2_opl_entry oplent; - }; -}; - -ngtcp2_objalloc_def(frame_chain, ngtcp2_frame_chain, oplent); - -/* - * ngtcp2_bind_frame_chains binds two frame chains |a| and |b| using - * new or existing ngtcp2_frame_chain_binder. |a| might have non-NULL - * a->binder. |b| must not have non-NULL b->binder. - * - * This function returns 0 if it succeeds, or one of the following - * negative error codes: - * - * NGTCP2_ERR_NOMEM - * Out of memory - */ -int ngtcp2_bind_frame_chains(ngtcp2_frame_chain *a, ngtcp2_frame_chain *b, - const ngtcp2_mem *mem); - -/* NGTCP2_MAX_STREAM_DATACNT is the maximum number of ngtcp2_vec that - a ngtcp2_stream can include. */ -#define NGTCP2_MAX_STREAM_DATACNT 256 - -/* NGTCP2_MAX_CRYPTO_DATACNT is the maximum number of ngtcp2_vec that - a ngtcp2_crypto can include. */ -#define NGTCP2_MAX_CRYPTO_DATACNT 8 - -/* - * ngtcp2_frame_chain_new allocates ngtcp2_frame_chain object and - * assigns its pointer to |*pfrc|. - * - * This function returns 0 if it succeeds, or one of the following - * negative error codes: - * - * NGTCP2_ERR_NOMEM - * Out of memory. - */ -int ngtcp2_frame_chain_new(ngtcp2_frame_chain **pfrc, const ngtcp2_mem *mem); - -/* - * ngtcp2_frame_chain_objalloc_new behaves like - * ngtcp2_frame_chain_new, but it uses |objalloc| to allocate the object. - */ -int ngtcp2_frame_chain_objalloc_new(ngtcp2_frame_chain **pfrc, - ngtcp2_objalloc *objalloc); - -/* - * ngtcp2_frame_chain_extralen_new works like ngtcp2_frame_chain_new, - * but it allocates extra memory |extralen| in order to extend - * ngtcp2_frame. - */ -int ngtcp2_frame_chain_extralen_new(ngtcp2_frame_chain **pfrc, size_t extralen, - const ngtcp2_mem *mem); - -/* - * ngtcp2_frame_chain_stream_datacnt_objalloc_new works like - * ngtcp2_frame_chain_new, but it allocates enough data to store - * additional |datacnt| - 1 ngtcp2_vec object after ngtcp2_stream - * object. If no additional space is required, - * ngtcp2_frame_chain_objalloc_new is called internally. - */ -int ngtcp2_frame_chain_stream_datacnt_objalloc_new(ngtcp2_frame_chain **pfrc, - size_t datacnt, - ngtcp2_objalloc *objalloc, - const ngtcp2_mem *mem); - -/* - * ngtcp2_frame_chain_crypto_datacnt_objalloc_new works like - * ngtcp2_frame_chain_new, but it allocates enough data to store - * additional |datacnt| - 1 ngtcp2_vec object after ngtcp2_crypto - * object. If no additional space is required, - * ngtcp2_frame_chain_objalloc_new is called internally. - */ -int ngtcp2_frame_chain_crypto_datacnt_objalloc_new(ngtcp2_frame_chain **pfrc, - size_t datacnt, - ngtcp2_objalloc *objalloc, - const ngtcp2_mem *mem); - -int ngtcp2_frame_chain_new_token_objalloc_new(ngtcp2_frame_chain **pfrc, - const ngtcp2_vec *token, - ngtcp2_objalloc *objalloc, - const ngtcp2_mem *mem); - -/* - * ngtcp2_frame_chain_del deallocates |frc|. It also deallocates the - * memory pointed by |frc|. - */ -void ngtcp2_frame_chain_del(ngtcp2_frame_chain *frc, const ngtcp2_mem *mem); - -/* - * ngtcp2_frame_chain_objalloc_del adds |frc| to |objalloc| for reuse. - * It might just delete |frc| depending on the frame type and the size - * of |frc|. - */ -void ngtcp2_frame_chain_objalloc_del(ngtcp2_frame_chain *frc, - ngtcp2_objalloc *objalloc, - const ngtcp2_mem *mem); - -/* - * ngtcp2_frame_chain_init initializes |frc|. - */ -void ngtcp2_frame_chain_init(ngtcp2_frame_chain *frc); - -/* - * ngtcp2_frame_chain_list_objalloc_del adds all ngtcp2_frame_chain - * linked from |frc| to |objalloc| for reuse. Depending on the frame type - * and its size, ngtcp2_frame_chain might be deleted instead. - */ -void ngtcp2_frame_chain_list_objalloc_del(ngtcp2_frame_chain *frc, - ngtcp2_objalloc *objalloc, - const ngtcp2_mem *mem); - /* NGTCP2_RTB_ENTRY_FLAG_NONE indicates that no flag is set. */ #define NGTCP2_RTB_ENTRY_FLAG_NONE 0x00u /* NGTCP2_RTB_ENTRY_FLAG_PROBE indicates that the entry includes a @@ -268,7 +122,7 @@ struct ngtcp2_rtb_entry { }; }; -ngtcp2_objalloc_def(rtb_entry, ngtcp2_rtb_entry, oplent); +ngtcp2_objalloc_decl(rtb_entry, ngtcp2_rtb_entry, oplent); /* * ngtcp2_rtb_entry_new allocates ngtcp2_rtb_entry object, and assigns @@ -347,7 +201,7 @@ typedef struct ngtcp2_rtb { */ void ngtcp2_rtb_init(ngtcp2_rtb *rtb, ngtcp2_pktns_id pktns_id, ngtcp2_strm *crypto, ngtcp2_rst *rst, ngtcp2_cc *cc, - ngtcp2_log *log, ngtcp2_qlog *qlog, + int64_t cc_pkt_num, ngtcp2_log *log, ngtcp2_qlog *qlog, ngtcp2_objalloc *rtb_entry_objalloc, ngtcp2_objalloc *frc_objalloc, const ngtcp2_mem *mem); diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_str.c b/deps/ngtcp2/ngtcp2/lib/ngtcp2_str.c index c1ce64a2e57ac4..a61636d188fae5 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_str.c +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_str.c @@ -38,6 +38,11 @@ uint8_t *ngtcp2_setmem(uint8_t *dest, uint8_t b, size_t n) { return dest + n; } +const void *ngtcp2_get_bytes(void *dest, const void *src, size_t n) { + memcpy(dest, src, n); + return (uint8_t *)src + n; +} + #define LOWER_XDIGITS "0123456789abcdef" uint8_t *ngtcp2_encode_hex(uint8_t *dest, const uint8_t *data, size_t len) { diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_str.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_str.h index 04735d6dec5c63..deb75e356d70d4 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_str.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_str.h @@ -38,6 +38,13 @@ void *ngtcp2_cpymem(void *dest, const void *src, size_t n); * the buffer pointed by |dest|. It returns dest + n; */ uint8_t *ngtcp2_setmem(uint8_t *dest, uint8_t b, size_t n); + +/* + * ngtcp2_get_bytes copies |n| bytes from |src| to |dest|, and returns + * |src| + |n|. + */ +const void *ngtcp2_get_bytes(void *dest, const void *src, size_t n); + /* * ngtcp2_encode_hex encodes |data| of length |len| in hex string. It * writes additional NULL bytes at the end of the buffer. The buffer diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_strm.c b/deps/ngtcp2/ngtcp2/lib/ngtcp2_strm.c index 6f20e866ad51c0..6bbeb8f9f81fc2 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_strm.c +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_strm.c @@ -30,6 +30,7 @@ #include "ngtcp2_rtb.h" #include "ngtcp2_pkt.h" #include "ngtcp2_vec.h" +#include "ngtcp2_frame_chain.h" static int offset_less(const ngtcp2_ksl_key *lhs, const ngtcp2_ksl_key *rhs) { return *(int64_t *)lhs < *(int64_t *)rhs; @@ -46,9 +47,12 @@ void ngtcp2_strm_init(ngtcp2_strm *strm, int64_t stream_id, uint32_t flags, strm->tx.streamfrq = NULL; strm->tx.offset = 0; strm->tx.max_offset = max_tx_offset; + strm->tx.last_blocked_offset = UINT64_MAX; strm->tx.last_max_stream_data_ts = UINT64_MAX; strm->tx.loss_count = 0; strm->tx.last_lost_pkt_num = -1; + strm->tx.stop_sending_app_error_code = 0; + strm->tx.reset_stream_app_error_code = 0; strm->rx.rob = NULL; strm->rx.cont_offset = 0; strm->rx.last_offset = 0; @@ -120,7 +124,7 @@ uint64_t ngtcp2_strm_rx_offset(ngtcp2_strm *strm) { /* strm_rob_heavily_fragmented returns nonzero if the number of gaps in |rob| exceeds the limit. */ static int strm_rob_heavily_fragmented(ngtcp2_rob *rob) { - return ngtcp2_ksl_len(&rob->gapksl) >= 1000; + return ngtcp2_ksl_len(&rob->gapksl) >= 5000; } int ngtcp2_strm_recv_reordering(ngtcp2_strm *strm, const uint8_t *data, @@ -134,10 +138,7 @@ int ngtcp2_strm_recv_reordering(ngtcp2_strm *strm, const uint8_t *data, } if (strm->rx.cont_offset) { - rv = ngtcp2_rob_remove_prefix(strm->rx.rob, strm->rx.cont_offset); - if (rv != 0) { - return rv; - } + ngtcp2_rob_remove_prefix(strm->rx.rob, strm->rx.cont_offset); } } @@ -148,13 +149,13 @@ int ngtcp2_strm_recv_reordering(ngtcp2_strm *strm, const uint8_t *data, return ngtcp2_rob_push(strm->rx.rob, offset, data, datalen); } -int ngtcp2_strm_update_rx_offset(ngtcp2_strm *strm, uint64_t offset) { +void ngtcp2_strm_update_rx_offset(ngtcp2_strm *strm, uint64_t offset) { if (strm->rx.rob == NULL) { strm->rx.cont_offset = offset; - return 0; + return; } - return ngtcp2_rob_remove_prefix(strm->rx.rob, offset); + ngtcp2_rob_remove_prefix(strm->rx.rob, offset); } void ngtcp2_strm_shutdown(ngtcp2_strm *strm, uint32_t flags) { @@ -177,7 +178,8 @@ static int strm_streamfrq_init(ngtcp2_strm *strm) { int ngtcp2_strm_streamfrq_push(ngtcp2_strm *strm, ngtcp2_frame_chain *frc) { int rv; - assert(frc->fr.type == NGTCP2_FRAME_STREAM); + assert(frc->fr.type == NGTCP2_FRAME_STREAM || + frc->fr.type == NGTCP2_FRAME_CRYPTO); assert(frc->next == NULL); if (strm->tx.streamfrq == NULL) { @@ -308,7 +310,7 @@ static int strm_streamfrq_unacked_pop(ngtcp2_strm *strm, assert(nfr->data[0].len > end_base_offset); - nfr->type = NGTCP2_FRAME_STREAM; + nfr->type = fr->type; nfr->flags = 0; nfr->fin = fr->fin; nfr->stream_id = fr->stream_id; @@ -379,18 +381,16 @@ int ngtcp2_strm_streamfrq_pop(ngtcp2_strm *strm, ngtcp2_frame_chain **pfrc, fr = &frc->fr.stream; datalen = ngtcp2_vec_len(fr->data, fr->datacnt); - if (left == 0) { - /* datalen could be zero if 0 length STREAM has been sent */ - if (datalen || ngtcp2_ksl_len(strm->tx.streamfrq) > 1) { - rv = ngtcp2_ksl_insert(strm->tx.streamfrq, NULL, &fr->offset, frc); - if (rv != 0) { - assert(ngtcp2_err_is_fatal(rv)); - ngtcp2_frame_chain_objalloc_del(frc, strm->frc_objalloc, strm->mem); - return rv; - } - *pfrc = NULL; - return 0; + /* datalen could be zero if 0 length STREAM has been sent */ + if (left == 0 && datalen) { + rv = ngtcp2_ksl_insert(strm->tx.streamfrq, NULL, &fr->offset, frc); + if (rv != 0) { + assert(ngtcp2_err_is_fatal(rv)); + ngtcp2_frame_chain_objalloc_del(frc, strm->frc_objalloc, strm->mem); + return rv; } + *pfrc = NULL; + return 0; } if (datalen > left) { @@ -412,7 +412,7 @@ int ngtcp2_strm_streamfrq_pop(ngtcp2_strm *strm, ngtcp2_frame_chain **pfrc, } nfr = &nfrc->fr.stream; - nfr->type = NGTCP2_FRAME_STREAM; + nfr->type = fr->type; nfr->flags = 0; nfr->fin = fr->fin; nfr->stream_id = fr->stream_id; diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_strm.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_strm.h index 8e3cfe83543509..e8cc531f217ab1 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_strm.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_strm.h @@ -49,20 +49,20 @@ typedef struct ngtcp2_frame_chain ngtcp2_frame_chain; #define NGTCP2_STRM_FLAG_SHUT_WR 0x02u #define NGTCP2_STRM_FLAG_SHUT_RDWR \ (NGTCP2_STRM_FLAG_SHUT_RD | NGTCP2_STRM_FLAG_SHUT_WR) -/* NGTCP2_STRM_FLAG_SENT_RST indicates that RST_STREAM is sent from - the local endpoint. In this case, NGTCP2_STRM_FLAG_SHUT_WR is also - set. */ -#define NGTCP2_STRM_FLAG_SENT_RST 0x04u -/* NGTCP2_STRM_FLAG_SENT_RST indicates that RST_STREAM is received - from the remote endpoint. In this case, NGTCP2_STRM_FLAG_SHUT_RD - is also set. */ -#define NGTCP2_STRM_FLAG_RECV_RST 0x08u +/* NGTCP2_STRM_FLAG_RESET_STREAM indicates that RESET_STREAM is sent + from the local endpoint. In this case, NGTCP2_STRM_FLAG_SHUT_WR is + also set. */ +#define NGTCP2_STRM_FLAG_RESET_STREAM 0x04u +/* NGTCP2_STRM_FLAG_RESET_STREAM_RECVED indicates that RESET_STREAM is + received from the remote endpoint. In this case, + NGTCP2_STRM_FLAG_SHUT_RD is also set. */ +#define NGTCP2_STRM_FLAG_RESET_STREAM_RECVED 0x08u /* NGTCP2_STRM_FLAG_STOP_SENDING indicates that STOP_SENDING is sent from the local endpoint. */ #define NGTCP2_STRM_FLAG_STOP_SENDING 0x10u -/* NGTCP2_STRM_FLAG_RST_ACKED indicates that the outgoing RST_STREAM - is acknowledged by peer. */ -#define NGTCP2_STRM_FLAG_RST_ACKED 0x20u +/* NGTCP2_STRM_FLAG_RESET_STREAM_ACKED indicates that the outgoing + RESET_STREAM is acknowledged by peer. */ +#define NGTCP2_STRM_FLAG_RESET_STREAM_ACKED 0x20u /* NGTCP2_STRM_FLAG_FIN_ACKED indicates that a STREAM with FIN bit set is acknowledged by a remote endpoint. */ #define NGTCP2_STRM_FLAG_FIN_ACKED 0x40u @@ -75,9 +75,12 @@ typedef struct ngtcp2_frame_chain ngtcp2_frame_chain; In this case, without this flag, we are unable to distinguish assigned value from unassigned one. */ #define NGTCP2_STRM_FLAG_APP_ERROR_CODE_SET 0x100u -/* NGTCP2_STRM_FLAG_STREAM_STOP_SENDING_CALLED is set when - stream_stop_sending callback is called. */ -#define NGTCP2_STRM_FLAG_STREAM_STOP_SENDING_CALLED 0x200u +/* NGTCP2_STRM_FLAG_SEND_STOP_SENDING is set when STOP_SENDING frame + should be sent. */ +#define NGTCP2_STRM_FLAG_SEND_STOP_SENDING 0x200u +/* NGTCP2_STRM_FLAG_SEND_RESET_STREAM is set when RESET_STREAM frame + should be sent. */ +#define NGTCP2_STRM_FLAG_SEND_RESET_STREAM 0x400u typedef struct ngtcp2_strm ngtcp2_strm; @@ -96,10 +99,10 @@ struct ngtcp2_strm { remote endpoint acknowledges data in out-of-order. After that, acked_offset is used instead. */ uint64_t cont_acked_offset; - /* streamfrq contains STREAM frame for retransmission. The flow - control credits have been paid when they are transmitted first - time. There are no restriction regarding flow control for - retransmission. */ + /* streamfrq contains STREAM or CRYPTO frame for + retransmission. The flow control credits have been paid + when they are transmitted first time. There are no + restriction regarding flow control for retransmission. */ ngtcp2_ksl *streamfrq; /* offset is the next offset of outgoing data. In other words, it is the number of bytes sent in this stream without @@ -108,6 +111,9 @@ struct ngtcp2_strm { /* max_tx_offset is the maximum offset that local endpoint can send for this stream. */ uint64_t max_offset; + /* last_blocked_offset is the largest offset where the + transmission of stream data is blocked. */ + uint64_t last_blocked_offset; /* last_max_stream_data_ts is the timestamp when last MAX_STREAM_DATA frame is sent. */ ngtcp2_tstamp last_max_stream_data_ts; @@ -123,6 +129,12 @@ struct ngtcp2_strm { is counted to loss_count. It is used to avoid to count multiple STREAM frames in one lost packet. */ int64_t last_lost_pkt_num; + /* stop_sending_app_error_code is the application specific + error code that is sent along with STOP_SENDING. */ + uint64_t stop_sending_app_error_code; + /* reset_stream_app_error_code is the application specific + error code that is sent along with RESET_STREAM. */ + uint64_t reset_stream_app_error_code; } tx; struct { @@ -200,11 +212,8 @@ int ngtcp2_strm_recv_reordering(ngtcp2_strm *strm, const uint8_t *data, /* * ngtcp2_strm_update_rx_offset tells that data up to offset bytes are * received in order. - * - * NGTCP2_ERR_NOMEM - * Out of memory */ -int ngtcp2_strm_update_rx_offset(ngtcp2_strm *strm, uint64_t offset); +void ngtcp2_strm_update_rx_offset(ngtcp2_strm *strm, uint64_t offset); /* * ngtcp2_strm_shutdown shutdowns |strm|. |flags| should be diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_tstamp.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_tstamp.h new file mode 100644 index 00000000000000..9a210a320dc1ca --- /dev/null +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_tstamp.h @@ -0,0 +1,68 @@ +/* + * ngtcp2 + * + * Copyright (c) 2023 ngtcp2 contributors + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef NGTCP2_TSTAMP_H +#define NGTCP2_TSTAMP_H + +#ifdef HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include + +/* + * ngtcp2_tstamp_elapsed returns nonzero if at least |d| has passed + * since |base|. |ts| expresses a current time, and must not be + * UINT64_MAX. + * + * If |base| is UINT64_MAX, this function returns 0 because UINT64_MAX + * is an invalid timestamp. Otherwise, if |base| + |d| >= UINT64_MAX, + * this function returns 0. + * + * !ngtcp2_tstamp_elapsed() == ngtcp2_tstamp_not_elapsed() does not + * hold when |base| is UINT64_MAX. If you need nonzero if |base| is + * UINT64_MAX, use !ngtcp2_tstamp_elapsed. Otherwise, use + * ngtcp2_tstamp_not_elapsed. + */ +static inline int ngtcp2_tstamp_elapsed(ngtcp2_tstamp base, ngtcp2_duration d, + ngtcp2_tstamp ts) { + return base != UINT64_MAX && base < UINT64_MAX - d && base + d <= ts; +} + +/* + * ngtcp2_tstamp_not_elapsed returns nonzero if |d| has not passed + * since |base|. |ts| expresses a current time, and must not be + * UINT64_MAX. + * + * If |base| is UINT64_MAX, this function returns 0 because UINT64_MAX + * is an invalid timestamp. Otherwise, if |base| + |d| >= UINT64_MAX, + * this function returns nonzero. + */ +static inline int ngtcp2_tstamp_not_elapsed(ngtcp2_tstamp base, + ngtcp2_duration d, + ngtcp2_tstamp ts) { + return base != UINT64_MAX && (base >= UINT64_MAX - d || base + d > ts); +} + +#endif /* NGTCP2_TSTAMP_H */ diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_unreachable.c b/deps/ngtcp2/ngtcp2/lib/ngtcp2_unreachable.c new file mode 100644 index 00000000000000..7c7d9ae78e914d --- /dev/null +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_unreachable.c @@ -0,0 +1,71 @@ +/* + * ngtcp2 + * + * Copyright (c) 2022 ngtcp2 contributors + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#include "ngtcp2_unreachable.h" + +#include +#include +#ifdef HAVE_UNISTD_H +# include +#endif /* HAVE_UNISTD_H */ +#include +#ifdef WIN32 +# include +#endif /* WIN32 */ + +void ngtcp2_unreachable_fail(const char *file, int line, const char *func) { + char *buf; + size_t buflen; + int rv; + +#define NGTCP2_UNREACHABLE_TEMPLATE "%s:%d %s: Unreachable.\n" + + rv = snprintf(NULL, 0, NGTCP2_UNREACHABLE_TEMPLATE, file, line, func); + if (rv < 0) { + abort(); + } + + /* here we explicitly use system malloc */ + buflen = (size_t)rv + 1; + buf = malloc(buflen); + if (buf == NULL) { + abort(); + } + + rv = snprintf(buf, buflen, NGTCP2_UNREACHABLE_TEMPLATE, file, line, func); + if (rv < 0) { + abort(); + } + +#ifndef WIN32 + while (write(STDERR_FILENO, buf, (size_t)rv) == -1 && errno == EINTR) + ; +#else /* WIN32 */ + _write(_fileno(stderr), buf, (unsigned int)rv); +#endif /* WIN32 */ + + free(buf); + + abort(); +} diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_unreachable.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_unreachable.h new file mode 100644 index 00000000000000..a5276fd505463f --- /dev/null +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_unreachable.h @@ -0,0 +1,52 @@ +/* + * ngtcp2 + * + * Copyright (c) 2022 ngtcp2 contributors + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef NGTCP2_UNREACHABLE_H +#define NGTCP2_UNREACHABLE_H + +#ifdef HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include + +#ifdef __FILE_NAME__ +# define NGTCP2_FILE_NAME __FILE_NAME__ +#else /* !__FILE_NAME__ */ +# define NGTCP2_FILE_NAME "(file)" +#endif /* !__FILE_NAME__ */ + +#define ngtcp2_unreachable() \ + ngtcp2_unreachable_fail(NGTCP2_FILE_NAME, __LINE__, __func__) + +#ifdef _MSC_VER +__declspec(noreturn) +#endif /* _MSC_VER */ + void ngtcp2_unreachable_fail(const char *file, int line, const char *func) +#ifndef _MSC_VER + __attribute__((noreturn)) +#endif /* !_MSC_VER */ + ; + +#endif /* NGTCP2_UNREACHABLE_H */ diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_vec.c b/deps/ngtcp2/ngtcp2/lib/ngtcp2_vec.c index 257332e27a2abe..dbc7b668042695 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_vec.c +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_vec.c @@ -145,7 +145,7 @@ ngtcp2_ssize ngtcp2_vec_split(ngtcp2_vec *src, size_t *psrccnt, ngtcp2_vec *dst, size_t ngtcp2_vec_merge(ngtcp2_vec *dst, size_t *pdstcnt, ngtcp2_vec *src, size_t *psrccnt, size_t left, size_t maxcnt) { size_t orig_left = left; - size_t i; + size_t i = 0; ngtcp2_vec *a, *b; assert(maxcnt); @@ -158,12 +158,7 @@ size_t ngtcp2_vec_merge(ngtcp2_vec *dst, size_t *pdstcnt, ngtcp2_vec *src, a = &dst[0]; b = &src[0]; - if (left >= b->len) { - *a = *b; - ++*pdstcnt; - left -= b->len; - i = 1; - } else { + if (left < b->len) { a->len = left; a->base = b->base; @@ -172,41 +167,43 @@ size_t ngtcp2_vec_merge(ngtcp2_vec *dst, size_t *pdstcnt, ngtcp2_vec *src, return left; } - } else { - i = 0; + + *a = *b; + ++*pdstcnt; + left -= b->len; + i = 1; } for (; left && i < *psrccnt; ++i) { a = &dst[*pdstcnt - 1]; b = &src[i]; - if (left >= b->len) { + if (left < b->len) { if (a->base + a->len == b->base) { - a->len += b->len; + a->len += left; } else if (*pdstcnt == maxcnt) { break; } else { - dst[(*pdstcnt)++] = *b; + dst[*pdstcnt].len = left; + dst[*pdstcnt].base = b->base; + ++*pdstcnt; } - left -= b->len; - continue; + + b->len -= left; + b->base += left; + left = 0; + + break; } if (a->base + a->len == b->base) { - a->len += left; + a->len += b->len; } else if (*pdstcnt == maxcnt) { break; } else { - dst[*pdstcnt].len = left; - dst[*pdstcnt].base = b->base; - ++*pdstcnt; + dst[(*pdstcnt)++] = *b; } - - b->len -= left; - b->base += left; - left = 0; - - break; + left -= b->len; } memmove(src, src + i, sizeof(ngtcp2_vec) * (*psrccnt - i)); From 1aa9da467fd811327422097026d34c1f8266b103 Mon Sep 17 00:00:00 2001 From: Luigi Pinca Date: Mon, 8 Jan 2024 09:23:08 +0100 Subject: [PATCH 26/41] deps: add nghttp3/**/.deps to .gitignore MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit PR-URL: https://github.com/nodejs/node/pull/51400 Reviewed-By: Michaël Zasso Reviewed-By: James M Snell Reviewed-By: Marco Ippolito --- deps/ngtcp2/.gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/deps/ngtcp2/.gitignore b/deps/ngtcp2/.gitignore index 40909a37dd7c69..1ccf3c1c44daf8 100644 --- a/deps/ngtcp2/.gitignore +++ b/deps/ngtcp2/.gitignore @@ -6,3 +6,4 @@ Makefile *gnutls* ngtcp2/**/.gitignore ngtcp2/**/.deps +nghttp3/**/.deps From 3034968225150a66c13c266f173f154dac53b511 Mon Sep 17 00:00:00 2001 From: "Node.js GitHub Bot" Date: Mon, 22 Jan 2024 22:31:11 +0200 Subject: [PATCH 27/41] deps: update ngtcp2 to 1.1.0 PR-URL: https://github.com/nodejs/node/pull/51319 Reviewed-By: Luigi Pinca Reviewed-By: James M Snell --- deps/ngtcp2/ngtcp2/lib/includes/ngtcp2/ngtcp2.h | 12 ------------ deps/ngtcp2/ngtcp2/lib/includes/ngtcp2/version.h | 4 ++-- 2 files changed, 2 insertions(+), 14 deletions(-) diff --git a/deps/ngtcp2/ngtcp2/lib/includes/ngtcp2/ngtcp2.h b/deps/ngtcp2/ngtcp2/lib/includes/ngtcp2/ngtcp2.h index a8d4b4afd3a470..f16d15cb39bb52 100644 --- a/deps/ngtcp2/ngtcp2/lib/includes/ngtcp2/ngtcp2.h +++ b/deps/ngtcp2/ngtcp2/lib/includes/ngtcp2/ngtcp2.h @@ -4269,9 +4269,6 @@ NGTCP2_EXTERN int ngtcp2_conn_open_uni_stream(ngtcp2_conn *conn, * * |flags| is currently unused, and should be set to 0. * - * This function returns 0 if a stream denoted by |stream_id| is not - * found. - * * This function returns 0 if it succeeds, or one of the following * negative error codes: * @@ -4294,9 +4291,6 @@ NGTCP2_EXTERN int ngtcp2_conn_shutdown_stream(ngtcp2_conn *conn, uint32_t flags, * * |flags| is currently unused, and should be set to 0. * - * This function returns 0 if a stream denoted by |stream_id| is not - * found. - * * This function returns 0 if it succeeds, or one of the following * negative error codes: * @@ -4321,9 +4315,6 @@ NGTCP2_EXTERN int ngtcp2_conn_shutdown_stream_write(ngtcp2_conn *conn, * * |flags| is currently unused, and should be set to 0. * - * This function returns 0 if a stream denoted by |stream_id| is not - * found. - * * This function returns 0 if it succeeds, or one of the following * negative error codes: * @@ -4684,9 +4675,6 @@ NGTCP2_EXTERN int ngtcp2_conn_in_draining_period(ngtcp2_conn *conn); * specifies the stream ID. This function only extends stream-level * flow control window. * - * This function returns 0 if a stream denoted by |stream_id| is not - * found. - * * This function returns 0 if it succeeds, or one of the following * negative error codes: * diff --git a/deps/ngtcp2/ngtcp2/lib/includes/ngtcp2/version.h b/deps/ngtcp2/ngtcp2/lib/includes/ngtcp2/version.h index 9f7592b84a4585..66a70ffe962964 100644 --- a/deps/ngtcp2/ngtcp2/lib/includes/ngtcp2/version.h +++ b/deps/ngtcp2/ngtcp2/lib/includes/ngtcp2/version.h @@ -36,7 +36,7 @@ * * Version number of the ngtcp2 library release. */ -#define NGTCP2_VERSION "0.8.1" +#define NGTCP2_VERSION "1.1.0" /** * @macro @@ -46,6 +46,6 @@ * number, 8 bits for minor and 8 bits for patch. Version 1.2.3 * becomes 0x010203. */ -#define NGTCP2_VERSION_NUM 0x000801 +#define NGTCP2_VERSION_NUM 0x010100 #endif /* VERSION_H */ From 1f489a37532574be24dd056d120672ec7d813109 Mon Sep 17 00:00:00 2001 From: "Node.js GitHub Bot" Date: Wed, 31 Jan 2024 18:01:29 +0200 Subject: [PATCH 28/41] deps: update ngtcp2 to 1.2.0 PR-URL: https://github.com/nodejs/node/pull/51584 Reviewed-By: Marco Ippolito Reviewed-By: Antoine du Hamel --- .../ngtcp2/lib/includes/ngtcp2/ngtcp2.h | 17 +++++ .../ngtcp2/lib/includes/ngtcp2/version.h | 4 +- deps/ngtcp2/ngtcp2/lib/ngtcp2_cc.c | 3 + deps/ngtcp2/ngtcp2/lib/ngtcp2_conn.c | 74 ++++++------------- deps/ngtcp2/ngtcp2/lib/ngtcp2_log.c | 3 +- deps/ngtcp2/ngtcp2/lib/ngtcp2_net.h | 2 +- deps/ngtcp2/ngtcp2/lib/ngtcp2_pkt.c | 6 +- deps/ngtcp2/ngtcp2/lib/ngtcp2_pkt.h | 4 +- deps/ngtcp2/ngtcp2/lib/ngtcp2_ppe.c | 18 +++-- deps/ngtcp2/ngtcp2/lib/ngtcp2_ppe.h | 3 - deps/ngtcp2/ngtcp2/lib/ngtcp2_pq.h | 2 +- deps/ngtcp2/ngtcp2/lib/ngtcp2_rtb.c | 4 +- deps/ngtcp2/ngtcp2/lib/ngtcp2_strm.h | 4 + deps/ngtcp2/ngtcp2/lib/ngtcp2_window_filter.c | 21 +++++- 14 files changed, 91 insertions(+), 74 deletions(-) diff --git a/deps/ngtcp2/ngtcp2/lib/includes/ngtcp2/ngtcp2.h b/deps/ngtcp2/ngtcp2/lib/includes/ngtcp2/ngtcp2.h index f16d15cb39bb52..72c8142a5a5aa7 100644 --- a/deps/ngtcp2/ngtcp2/lib/includes/ngtcp2/ngtcp2.h +++ b/deps/ngtcp2/ngtcp2/lib/includes/ngtcp2/ngtcp2.h @@ -4269,6 +4269,9 @@ NGTCP2_EXTERN int ngtcp2_conn_open_uni_stream(ngtcp2_conn *conn, * * |flags| is currently unused, and should be set to 0. * + * This function returns 0 if a stream denoted by |stream_id| is not + * found. + * * This function returns 0 if it succeeds, or one of the following * negative error codes: * @@ -4291,6 +4294,9 @@ NGTCP2_EXTERN int ngtcp2_conn_shutdown_stream(ngtcp2_conn *conn, uint32_t flags, * * |flags| is currently unused, and should be set to 0. * + * This function returns 0 if a stream denoted by |stream_id| is not + * found. + * * This function returns 0 if it succeeds, or one of the following * negative error codes: * @@ -4315,6 +4321,9 @@ NGTCP2_EXTERN int ngtcp2_conn_shutdown_stream_write(ngtcp2_conn *conn, * * |flags| is currently unused, and should be set to 0. * + * This function returns 0 if a stream denoted by |stream_id| is not + * found. + * * This function returns 0 if it succeeds, or one of the following * negative error codes: * @@ -4675,6 +4684,9 @@ NGTCP2_EXTERN int ngtcp2_conn_in_draining_period(ngtcp2_conn *conn); * specifies the stream ID. This function only extends stream-level * flow control window. * + * This function returns 0 if a stream denoted by |stream_id| is not + * found. + * * This function returns 0 if it succeeds, or one of the following * negative error codes: * @@ -5340,6 +5352,11 @@ NGTCP2_EXTERN void ngtcp2_ccerr_set_application_error(ngtcp2_ccerr *ccerr, * CONNECTION_CLOSE (type 0x1d) frame. Otherwise, it does not produce * any data, and returns 0. * + * |destlen| could be shorten by some factors (e.g., server side + * amplification limit). This function returns + * :macro:`NGTCP2_ERR_NOBUF` if the resulting buffer is too small even + * if the given buffer has enough space. + * * This function must not be called from inside the callback * functions. * diff --git a/deps/ngtcp2/ngtcp2/lib/includes/ngtcp2/version.h b/deps/ngtcp2/ngtcp2/lib/includes/ngtcp2/version.h index 66a70ffe962964..b102eae8f9ec77 100644 --- a/deps/ngtcp2/ngtcp2/lib/includes/ngtcp2/version.h +++ b/deps/ngtcp2/ngtcp2/lib/includes/ngtcp2/version.h @@ -36,7 +36,7 @@ * * Version number of the ngtcp2 library release. */ -#define NGTCP2_VERSION "1.1.0" +#define NGTCP2_VERSION "1.2.0" /** * @macro @@ -46,6 +46,6 @@ * number, 8 bits for minor and 8 bits for patch. Version 1.2.3 * becomes 0x010203. */ -#define NGTCP2_VERSION_NUM 0x010100 +#define NGTCP2_VERSION_NUM 0x010200 #endif /* VERSION_H */ diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_cc.c b/deps/ngtcp2/ngtcp2/lib/ngtcp2_cc.c index 6369887c28671b..ef311ff93c0feb 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_cc.c +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_cc.c @@ -36,6 +36,7 @@ #include "ngtcp2_mem.h" #include "ngtcp2_rcvry.h" #include "ngtcp2_conn_stat.h" +#include "ngtcp2_unreachable.h" /* NGTCP2_CC_DELIVERY_RATE_SEC_FILTERLEN is the window length of delivery rate filter driven by ACK clocking. */ @@ -247,6 +248,8 @@ uint64_t ngtcp2_cbrt(uint64_t n) { # if defined(_WIN64) if (_BitScanReverse64(&index, n)) { d = 61 - index; + } else { + ngtcp2_unreachable(); } # else /* !defined(_WIN64) */ if (_BitScanReverse(&index, (unsigned int)(n >> 32))) { diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_conn.c b/deps/ngtcp2/ngtcp2/lib/ngtcp2_conn.c index f40ab5626109e9..a4873eb20c4b86 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_conn.c +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_conn.c @@ -5329,7 +5329,6 @@ static int conn_recv_ack(ngtcp2_conn *conn, ngtcp2_pktns *pktns, ngtcp2_ack *fr, num_acked = ngtcp2_rtb_recv_ack(&pktns->rtb, fr, &conn->cstat, conn, pktns, pkt_ts, ts); if (num_acked < 0) { - /* TODO assert this */ assert(ngtcp2_err_is_fatal((int)num_acked)); return (int)num_acked; } @@ -5790,9 +5789,8 @@ static int conn_recv_path_response(ngtcp2_conn *conn, ngtcp2_path_response *fr, } if (!(pv->flags & NGTCP2_PV_FLAG_DONT_CARE)) { - if (!(pv->flags & NGTCP2_PV_FLAG_FALLBACK_ON_FAILURE)) { + if (pv->dcid.seq != conn->dcid.current.seq) { assert(!conn->server); - assert(pv->dcid.seq != conn->dcid.current.seq); assert(conn->dcid.current.cid.datalen); rv = conn_retire_dcid(conn, &conn->dcid.current, ts); @@ -5871,25 +5869,6 @@ static int conn_recv_path_response(ngtcp2_conn *conn, ngtcp2_path_response *fr, return conn_stop_pv(conn, ts); } -/* - * pkt_num_bits returns the number of bits available when packet - * number is encoded in |pkt_numlen| bytes. - */ -static size_t pkt_num_bits(size_t pkt_numlen) { - switch (pkt_numlen) { - case 1: - return 8; - case 2: - return 16; - case 3: - return 24; - case 4: - return 32; - default: - ngtcp2_unreachable(); - } -} - /* * pktns_pkt_num_is_duplicate returns nonzero if |pkt_num| is * duplicated packet number. @@ -6020,9 +5999,7 @@ static int conn_verify_fixed_bit(ngtcp2_conn *conn, ngtcp2_pkt_hd *hd) { case NGTCP2_PKT_INITIAL: case NGTCP2_PKT_0RTT: case NGTCP2_PKT_HANDSHAKE: - /* TODO we cannot determine whether a token comes from NEW_TOKEN - frame or Retry packet. RFC 9287 requires that a token from - NEW_TOKEN. */ + /* RFC 9287 requires that a token from NEW_TOKEN. */ if (!(conn->flags & NGTCP2_CONN_FLAG_INITIAL_PKT_PROCESSED) && (conn->local.settings.token_type != NGTCP2_TOKEN_TYPE_NEW_TOKEN || !conn->local.settings.tokenlen)) { @@ -6145,7 +6122,8 @@ conn_recv_handshake_pkt(ngtcp2_conn *conn, const ngtcp2_path *path, return NGTCP2_ERR_DISCARD_PKT; } - if (hd.type == NGTCP2_PKT_VERSION_NEGOTIATION) { + switch (hd.type) { + case NGTCP2_PKT_VERSION_NEGOTIATION: hdpktlen = (size_t)nread; ngtcp2_log_rx_pkt_hd(&conn->log, &hd); @@ -6181,7 +6159,7 @@ conn_recv_handshake_pkt(ngtcp2_conn *conn, const ngtcp2_path *path, return NGTCP2_ERR_DISCARD_PKT; } return NGTCP2_ERR_RECV_VERSION_NEGOTIATION; - } else if (hd.type == NGTCP2_PKT_RETRY) { + case NGTCP2_PKT_RETRY: hdpktlen = (size_t)nread; ngtcp2_log_rx_pkt_hd(&conn->log, &hd); @@ -6402,10 +6380,7 @@ conn_recv_handshake_pkt(ngtcp2_conn *conn, const ngtcp2_path *path, break; default: - /* unknown packet type */ - ngtcp2_log_info(&conn->log, NGTCP2_LOG_EVENT_PKT, - "packet was ignored because of unknown packet type"); - return (ngtcp2_ssize)pktlen; + ngtcp2_unreachable(); } hp_mask = conn->callbacks.hp_mask; @@ -6438,7 +6413,7 @@ conn_recv_handshake_pkt(ngtcp2_conn *conn, const ngtcp2_path *path, payloadlen = hd.len - hd.pkt_numlen; hd.pkt_num = ngtcp2_pkt_adjust_pkt_num(pktns->rx.max_pkt_num, hd.pkt_num, - pkt_num_bits(hd.pkt_numlen)); + hd.pkt_numlen); if (hd.pkt_num > NGTCP2_MAX_PKT_NUM) { ngtcp2_log_info(&conn->log, NGTCP2_LOG_EVENT_PKT, "pkn=%" PRId64 " is greater than maximum pkn", hd.pkt_num); @@ -6624,14 +6599,8 @@ conn_recv_handshake_pkt(ngtcp2_conn *conn, const ngtcp2_path *path, pktns_increase_ecn_counts(pktns, pi); - /* TODO Initial and Handshake are always acknowledged without - delay. */ - if (require_ack && - (++pktns->acktr.rx_npkt >= conn->local.settings.ack_thresh || - (pi->ecn & NGTCP2_ECN_MASK) == NGTCP2_ECN_CE)) { - ngtcp2_acktr_immediate_ack(&pktns->acktr); - } - + /* Initial and Handshake are always acknowledged without delay. No + need to call ngtcp2_acktr_immediate_ack(). */ rv = ngtcp2_conn_sched_ack(conn, &pktns->acktr, hd.pkt_num, require_ack, pkt_ts); if (rv != 0) { @@ -7057,7 +7026,7 @@ static int conn_recv_stream(ngtcp2_conn *conn, const ngtcp2_stream *fr) { if (strm == NULL) { return NGTCP2_ERR_NOMEM; } - /* TODO Perhaps, call new_stream callback? */ + rv = ngtcp2_conn_init_stream(conn, strm, fr->stream_id, NULL); if (rv != 0) { ngtcp2_objalloc_strm_release(&conn->strm_objalloc, strm); @@ -7464,7 +7433,7 @@ static int conn_recv_stop_sending(ngtcp2_conn *conn, return 0; } - /* Frame is received reset before we create ngtcp2_strm + /* STOP_SENDING frame is received before we create ngtcp2_strm object. */ strm = ngtcp2_objalloc_strm_get(&conn->strm_objalloc); if (strm == NULL) { @@ -7482,6 +7451,10 @@ static int conn_recv_stop_sending(ngtcp2_conn *conn, } } + if (strm->flags & NGTCP2_STRM_FLAG_STOP_SENDING_RECVED) { + return 0; + } + ngtcp2_strm_set_app_error_code(strm, fr->app_error_code); /* No RESET_STREAM is required if we have sent FIN and all data have @@ -7494,7 +7467,9 @@ static int conn_recv_stop_sending(ngtcp2_conn *conn, } } - strm->flags |= NGTCP2_STRM_FLAG_SHUT_WR | NGTCP2_STRM_FLAG_RESET_STREAM; + strm->flags |= NGTCP2_STRM_FLAG_SHUT_WR | + NGTCP2_STRM_FLAG_STOP_SENDING_RECVED | + NGTCP2_STRM_FLAG_RESET_STREAM; ngtcp2_strm_streamfrq_clear(strm); @@ -8751,12 +8726,8 @@ conn_recv_delayed_handshake_pkt(ngtcp2_conn *conn, const ngtcp2_pkt_info *pi, pktns_increase_ecn_counts(pktns, pi); - if (require_ack && - (++pktns->acktr.rx_npkt >= conn->local.settings.ack_thresh || - (pi->ecn & NGTCP2_ECN_MASK) == NGTCP2_ECN_CE)) { - ngtcp2_acktr_immediate_ack(&pktns->acktr); - } - + /* Initial and Handshake are always acknowledged without delay. No + need to call ngtcp2_acktr_immediate_ack(). */ rv = ngtcp2_conn_sched_ack(conn, &pktns->acktr, hd->pkt_num, require_ack, pkt_ts); if (rv != 0) { @@ -9020,7 +8991,7 @@ static ngtcp2_ssize conn_recv_pkt(ngtcp2_conn *conn, const ngtcp2_path *path, payloadlen = pktlen - hdpktlen; hd.pkt_num = ngtcp2_pkt_adjust_pkt_num(pktns->rx.max_pkt_num, hd.pkt_num, - pkt_num_bits(hd.pkt_numlen)); + hd.pkt_numlen); if (hd.pkt_num > NGTCP2_MAX_PKT_NUM) { ngtcp2_log_info(&conn->log, NGTCP2_LOG_EVENT_PKT, "pkn=%" PRId64 " is greater than maximum pkn", hd.pkt_num); @@ -12551,7 +12522,8 @@ static int conn_shutdown_stream_read(ngtcp2_conn *conn, ngtcp2_strm *strm, uint64_t app_error_code) { ngtcp2_strm_set_app_error_code(strm, app_error_code); - if (strm->flags & NGTCP2_STRM_FLAG_STOP_SENDING) { + if (strm->flags & + (NGTCP2_STRM_FLAG_STOP_SENDING | NGTCP2_STRM_FLAG_RESET_STREAM_RECVED)) { return 0; } if ((strm->flags & NGTCP2_STRM_FLAG_SHUT_RD) && diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_log.c b/deps/ngtcp2/ngtcp2/lib/ngtcp2_log.c index 760bd60a9aff76..93922a29c319f4 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_log.c +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_log.c @@ -68,8 +68,7 @@ void ngtcp2_log_init(ngtcp2_log *log, const ngtcp2_cid *scid, * Source Connection ID in hex string. * * : - * Event. pkt=packet, frm=frame, ldc=loss-detection, cry=crypto, - * con=connection(catch all) + * Event. See ngtcp2_log_event. * * # Frame event * diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_net.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_net.h index bf697927351851..4a2c4041d4d170 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_net.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_net.h @@ -87,7 +87,7 @@ #if defined(WIN32) /* Windows requires ws2_32 library for ntonl family functions. We define inline functions for those function so that we don't have - dependeny on that lib. */ + dependency on that lib. */ # ifdef _MSC_VER # define STIN static __inline diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_pkt.c b/deps/ngtcp2/ngtcp2/lib/ngtcp2_pkt.c index 12f7daeaf242a9..1687ff254d94c7 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_pkt.c +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_pkt.c @@ -558,7 +558,7 @@ ngtcp2_ssize ngtcp2_pkt_decode_stream_frame(ngtcp2_stream *dest, uint8_t type; size_t len = 1 + 1; const uint8_t *p; - size_t datalen; + size_t datalen = 0; size_t ndatalen = 0; size_t n; uint64_t vi; @@ -2139,9 +2139,9 @@ int ngtcp2_pkt_decode_retry(ngtcp2_pkt_retry *dest, const uint8_t *payload, } int64_t ngtcp2_pkt_adjust_pkt_num(int64_t max_pkt_num, int64_t pkt_num, - size_t n) { + size_t pkt_numlen) { int64_t expected = max_pkt_num + 1; - int64_t win = (int64_t)1 << n; + int64_t win = (int64_t)1 << (pkt_numlen * 8); int64_t hwin = win / 2; int64_t mask = win - 1; int64_t cand = (expected & ~mask) | pkt_num; diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_pkt.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_pkt.h index b1bec97c31a08c..feec4d32c97bdd 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_pkt.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_pkt.h @@ -1120,12 +1120,12 @@ ngtcp2_ssize ngtcp2_pkt_encode_datagram_frame(uint8_t *out, size_t outlen, /* * ngtcp2_pkt_adjust_pkt_num find the full 64 bits packet number for - * |pkt_num|, which is expected to be least significant |n| bits. The + * |pkt_num|, which is encoded in |pkt_numlen| bytes. The * |max_pkt_num| is the highest successfully authenticated packet * number. */ int64_t ngtcp2_pkt_adjust_pkt_num(int64_t max_pkt_num, int64_t pkt_num, - size_t n); + size_t pkt_numlen); /* * ngtcp2_pkt_validate_ack checks that ack is malformed or not. diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_ppe.c b/deps/ngtcp2/ngtcp2/lib/ngtcp2_ppe.c index ffba131e02b9a5..f7c122b1ab406b 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_ppe.c +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_ppe.c @@ -39,7 +39,6 @@ void ngtcp2_ppe_init(ngtcp2_ppe *ppe, uint8_t *out, size_t outlen, ppe->pkt_num_offset = 0; ppe->pkt_numlen = 0; ppe->pkt_num = 0; - ppe->sample_offset = 0; ppe->cc = cc; } @@ -69,8 +68,6 @@ int ngtcp2_ppe_encode_hd(ngtcp2_ppe *ppe, const ngtcp2_pkt_hd *hd) { return (int)rv; } - ppe->sample_offset = ppe->pkt_num_offset + 4; - buf->last += rv; ppe->pkt_numlen = hd->pkt_numlen; @@ -101,6 +98,14 @@ int ngtcp2_ppe_encode_frame(ngtcp2_ppe *ppe, ngtcp2_frame *fr) { return 0; } +/* + * ppe_sample_offset returns the offset to sample for packet number + * encryption. + */ +static size_t ppe_sample_offset(ngtcp2_ppe *ppe) { + return ppe->pkt_num_offset + 4; +} + ngtcp2_ssize ngtcp2_ppe_final(ngtcp2_ppe *ppe, const uint8_t **ppkt) { ngtcp2_buf *buf = &ppe->buf; ngtcp2_crypto_cc *cc = ppe->cc; @@ -132,9 +137,10 @@ ngtcp2_ssize ngtcp2_ppe_final(ngtcp2_ppe *ppe, const uint8_t **ppkt) { buf->last = payload + payloadlen + cc->aead.max_overhead; /* TODO Check that we have enough space to get sample */ - assert(ppe->sample_offset + NGTCP2_HP_SAMPLELEN <= ngtcp2_buf_len(buf)); + assert(ppe_sample_offset(ppe) + NGTCP2_HP_SAMPLELEN <= ngtcp2_buf_len(buf)); - rv = cc->hp_mask(mask, &cc->hp, &cc->hp_ctx, buf->begin + ppe->sample_offset); + rv = cc->hp_mask(mask, &cc->hp, &cc->hp_ctx, + buf->begin + ppe_sample_offset(ppe)); if (rv != 0) { return NGTCP2_ERR_CALLBACK_FAILURE; } @@ -197,7 +203,7 @@ size_t ngtcp2_ppe_padding_hp_sample(ngtcp2_ppe *ppe) { assert(cc->aead.max_overhead); max_samplelen = - ngtcp2_buf_len(buf) + cc->aead.max_overhead - ppe->sample_offset; + ngtcp2_buf_len(buf) + cc->aead.max_overhead - ppe_sample_offset(ppe); if (max_samplelen < NGTCP2_HP_SAMPLELEN) { len = NGTCP2_HP_SAMPLELEN - max_samplelen; assert(ngtcp2_ppe_left(ppe) >= len); diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_ppe.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_ppe.h index bf220df37c14f2..2a069ef33451ab 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_ppe.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_ppe.h @@ -50,9 +50,6 @@ typedef struct ngtcp2_ppe { /* pkt_numlen is the number of bytes used to encode a packet number */ size_t pkt_numlen; - /* sample_offset is the offset to sample for packet number - encryption. */ - size_t sample_offset; /* pkt_num is the packet number written in buf. */ int64_t pkt_num; /* nonce is the buffer to store nonce. It should be equal or longer diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_pq.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_pq.h index 720c309f5adb5e..484c8f21f75de2 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_pq.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_pq.h @@ -110,7 +110,7 @@ size_t ngtcp2_pq_size(ngtcp2_pq *pq); typedef int (*ngtcp2_pq_item_cb)(ngtcp2_pq_entry *item, void *arg); /* - * Applys |fun| to each item in |pq|. The |arg| is passed as arg + * Applies |fun| to each item in |pq|. The |arg| is passed as arg * parameter to callback function. This function must not change the * ordering key. If the return value from callback is nonzero, this * function returns 1 immediately without iterating remaining items. diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_rtb.c b/deps/ngtcp2/ngtcp2/lib/ngtcp2_rtb.c index b9e0139bddfcac..6308261369c382 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_rtb.c +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_rtb.c @@ -1006,7 +1006,9 @@ static int rtb_detect_lost_pkt(ngtcp2_rtb *rtb, uint64_t *ppkt_lost, if (rtb_pkt_lost(rtb, cstat, ent, loss_delay, (size_t)pkt_thres, ts)) { /* All entries from ent are considered to be lost. */ latest_ts = oldest_ts = ent->ts; - last_lost_pkt_num = ent->hd.pkt_num; + /* +1 to pick this packet for persistent congestion in the + following loop. */ + last_lost_pkt_num = ent->hd.pkt_num + 1; max_ack_delay = conn->remote.transport_params ? conn->remote.transport_params->max_ack_delay : 0; diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_strm.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_strm.h index e8cc531f217ab1..223e38fc646b38 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_strm.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_strm.h @@ -81,6 +81,10 @@ typedef struct ngtcp2_frame_chain ngtcp2_frame_chain; /* NGTCP2_STRM_FLAG_SEND_RESET_STREAM is set when RESET_STREAM frame should be sent. */ #define NGTCP2_STRM_FLAG_SEND_RESET_STREAM 0x400u +/* NGTCP2_STRM_FLAG_STOP_SENDING_RECVED indicates that STOP_SENDING is + received from the remote endpoint. In this case, + NGTCP2_STRM_FLAG_SHUT_WR is also set. */ +#define NGTCP2_STRM_FLAG_STOP_SENDING_RECVED 0x800u typedef struct ngtcp2_strm ngtcp2_strm; diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_window_filter.c b/deps/ngtcp2/ngtcp2/lib/ngtcp2_window_filter.c index 71c816e4d3d815..39f3d408a741ba 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_window_filter.c +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_window_filter.c @@ -39,12 +39,16 @@ void ngtcp2_window_filter_init(ngtcp2_window_filter *wf, uint64_t window_length) { wf->window_length = window_length; - memset(wf->estimates, 0, sizeof(wf->estimates)); + memset(wf->estimates, 0xff, sizeof(wf->estimates)); } void ngtcp2_window_filter_update(ngtcp2_window_filter *wf, uint64_t new_sample, uint64_t new_time) { - if (wf->estimates[0].sample == 0 || new_sample > wf->estimates[0].sample || + /* Reset all estimates if they have not yet been initialized, if new + sample is a new best, or if the newest recorded estimate is too + old. */ + if (wf->estimates[0].sample == UINT64_MAX || + new_sample > wf->estimates[0].sample || new_time - wf->estimates[2].time > wf->window_length) { ngtcp2_window_filter_reset(wf, new_sample, new_time); return; @@ -59,12 +63,19 @@ void ngtcp2_window_filter_update(ngtcp2_window_filter *wf, uint64_t new_sample, wf->estimates[2].time = new_time; } + /* Expire and update estimates as necessary. */ if (new_time - wf->estimates[0].time > wf->window_length) { + /* The best estimate hasn't been updated for an entire window, so + promote second and third best estimates. */ wf->estimates[0] = wf->estimates[1]; wf->estimates[1] = wf->estimates[2]; wf->estimates[2].sample = new_sample; wf->estimates[2].time = new_time; + /* Need to iterate one more time. Check if the new best estimate + is outside the window as well, since it may also have been + recorded a long time ago. Don't need to iterate once more + since we cover that case at the beginning of the method. */ if (new_time - wf->estimates[0].time > wf->window_length) { wf->estimates[0] = wf->estimates[1]; wf->estimates[1] = wf->estimates[2]; @@ -74,6 +85,9 @@ void ngtcp2_window_filter_update(ngtcp2_window_filter *wf, uint64_t new_sample, if (wf->estimates[1].sample == wf->estimates[0].sample && new_time - wf->estimates[1].time > wf->window_length >> 2) { + /* A quarter of the window has passed without a better sample, so + the second-best estimate is taken from the second quarter of + the window. */ wf->estimates[2].sample = new_sample; wf->estimates[2].time = new_time; wf->estimates[1] = wf->estimates[2]; @@ -82,6 +96,9 @@ void ngtcp2_window_filter_update(ngtcp2_window_filter *wf, uint64_t new_sample, if (wf->estimates[2].sample == wf->estimates[1].sample && new_time - wf->estimates[2].time > wf->window_length >> 1) { + /* We've passed a half of the window without a better estimate, so + take a third-best estimate from the second half of the + window. */ wf->estimates[2].sample = new_sample; wf->estimates[2].time = new_time; } From 78f84ebb092e0062e0b3743da5bd0b2ab18b7ebf Mon Sep 17 00:00:00 2001 From: "Node.js GitHub Bot" Date: Mon, 26 Feb 2024 15:03:23 +0200 Subject: [PATCH 29/41] deps: update ngtcp2 to 1.3.0 PR-URL: https://github.com/nodejs/node/pull/51796 Reviewed-By: Marco Ippolito Reviewed-By: Luigi Pinca Reviewed-By: Rafael Gonzaga --- .../ngtcp2/lib/includes/ngtcp2/version.h | 4 +- deps/ngtcp2/ngtcp2/lib/ngtcp2_buf.h | 2 +- deps/ngtcp2/ngtcp2/lib/ngtcp2_cc.c | 50 +++++---------- deps/ngtcp2/ngtcp2/lib/ngtcp2_conn.c | 63 ++++++++++--------- deps/ngtcp2/ngtcp2/lib/ngtcp2_crypto.c | 58 ++++++++++++----- deps/ngtcp2/ngtcp2/lib/ngtcp2_rtb.c | 54 +++++++++++++--- deps/ngtcp2/ngtcp2/lib/ngtcp2_strm.c | 34 ++++++++++ deps/ngtcp2/ngtcp2/lib/ngtcp2_strm.h | 33 ++++++++++ 8 files changed, 205 insertions(+), 93 deletions(-) diff --git a/deps/ngtcp2/ngtcp2/lib/includes/ngtcp2/version.h b/deps/ngtcp2/ngtcp2/lib/includes/ngtcp2/version.h index b102eae8f9ec77..801c6cb2681386 100644 --- a/deps/ngtcp2/ngtcp2/lib/includes/ngtcp2/version.h +++ b/deps/ngtcp2/ngtcp2/lib/includes/ngtcp2/version.h @@ -36,7 +36,7 @@ * * Version number of the ngtcp2 library release. */ -#define NGTCP2_VERSION "1.2.0" +#define NGTCP2_VERSION "1.3.0" /** * @macro @@ -46,6 +46,6 @@ * number, 8 bits for minor and 8 bits for patch. Version 1.2.3 * becomes 0x010203. */ -#define NGTCP2_VERSION_NUM 0x010200 +#define NGTCP2_VERSION_NUM 0x010300 #endif /* VERSION_H */ diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_buf.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_buf.h index 107d413382da20..85b5f4ddf0464a 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_buf.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_buf.h @@ -36,7 +36,7 @@ typedef struct ngtcp2_buf { uint8_t *begin; /* end points to the one beyond of the last byte of the buffer */ uint8_t *end; - /* pos pointers to the start of data. Typically, this points to the + /* pos points to the start of data. Typically, this points to the point that next data should be read. Initially, it points to |begin|. */ uint8_t *pos; diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_cc.c b/deps/ngtcp2/ngtcp2/lib/ngtcp2_cc.c index ef311ff93c0feb..9ad37fbdb6395a 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_cc.c +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_cc.c @@ -27,10 +27,6 @@ #include #include -#if defined(_MSC_VER) -# include -#endif - #include "ngtcp2_log.h" #include "ngtcp2_macro.h" #include "ngtcp2_mem.h" @@ -235,39 +231,27 @@ void ngtcp2_cc_cubic_init(ngtcp2_cc_cubic *cubic, ngtcp2_log *log) { } uint64_t ngtcp2_cbrt(uint64_t n) { - int d; - uint64_t a; - - if (n == 0) { - return 0; - } - -#if defined(_MSC_VER) - { - unsigned long index; -# if defined(_WIN64) - if (_BitScanReverse64(&index, n)) { - d = 61 - index; - } else { - ngtcp2_unreachable(); - } -# else /* !defined(_WIN64) */ - if (_BitScanReverse(&index, (unsigned int)(n >> 32))) { - d = 31 - index; - } else { - d = 32 + 31 - _BitScanReverse(&index, (unsigned int)n); + size_t s; + uint64_t y = 0; + uint64_t b; + + for (s = 63; s > 0; s -= 3) { + y <<= 1; + b = 3 * y * (y + 1) + 1; + if ((n >> s) >= b) { + n -= b << s; + y++; } -# endif /* !defined(_WIN64) */ } -#else /* !defined(_MSC_VER) */ - d = __builtin_clzll(n); -#endif /* !defined(_MSC_VER) */ - a = 1ULL << ((64 - d) / 3 + 1); - for (; a * a * a > n;) { - a = (2 * a + n / a / a) / 3; + y <<= 1; + b = 3 * y * (y + 1) + 1; + if (n >= b) { + n -= b; + y++; } - return a; + + return y; } /* HyStart++ constants */ diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_conn.c b/deps/ngtcp2/ngtcp2/lib/ngtcp2_conn.c index a4873eb20c4b86..c8caf47ea76232 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_conn.c +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_conn.c @@ -3441,12 +3441,22 @@ static ngtcp2_ssize conn_write_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, } switch ((*pfrc)->fr.type) { + case NGTCP2_FRAME_RESET_STREAM: + strm = + ngtcp2_conn_find_stream(conn, (*pfrc)->fr.reset_stream.stream_id); + if (strm == NULL || + !ngtcp2_strm_require_retransmit_reset_stream(strm)) { + frc = *pfrc; + *pfrc = (*pfrc)->next; + ngtcp2_frame_chain_objalloc_del(frc, &conn->frc_objalloc, conn->mem); + continue; + } + break; case NGTCP2_FRAME_STOP_SENDING: strm = ngtcp2_conn_find_stream(conn, (*pfrc)->fr.stop_sending.stream_id); if (strm == NULL || - ((strm->flags & NGTCP2_STRM_FLAG_SHUT_RD) && - ngtcp2_strm_rx_offset(strm) == strm->rx.last_offset)) { + !ngtcp2_strm_require_retransmit_stop_sending(strm)) { frc = *pfrc; *pfrc = (*pfrc)->next; ngtcp2_frame_chain_objalloc_del(frc, &conn->frc_objalloc, conn->mem); @@ -3476,10 +3486,8 @@ static ngtcp2_ssize conn_write_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, case NGTCP2_FRAME_MAX_STREAM_DATA: strm = ngtcp2_conn_find_stream(conn, (*pfrc)->fr.max_stream_data.stream_id); - if (strm == NULL || - (strm->flags & - (NGTCP2_STRM_FLAG_SHUT_RD | NGTCP2_STRM_FLAG_STOP_SENDING)) || - (*pfrc)->fr.max_stream_data.max_stream_data < strm->rx.max_offset) { + if (strm == NULL || !ngtcp2_strm_require_retransmit_max_stream_data( + strm, &(*pfrc)->fr.max_stream_data)) { frc = *pfrc; *pfrc = (*pfrc)->next; ngtcp2_frame_chain_objalloc_del(frc, &conn->frc_objalloc, conn->mem); @@ -3497,8 +3505,8 @@ static ngtcp2_ssize conn_write_pkt(ngtcp2_conn *conn, ngtcp2_pkt_info *pi, case NGTCP2_FRAME_STREAM_DATA_BLOCKED: strm = ngtcp2_conn_find_stream( conn, (*pfrc)->fr.stream_data_blocked.stream_id); - if (strm == NULL || (strm->flags & NGTCP2_STRM_FLAG_SHUT_WR) || - (*pfrc)->fr.stream_data_blocked.offset != strm->tx.max_offset) { + if (strm == NULL || !ngtcp2_strm_require_retransmit_stream_data_blocked( + strm, &(*pfrc)->fr.stream_data_blocked)) { frc = *pfrc; *pfrc = (*pfrc)->next; ngtcp2_frame_chain_objalloc_del(frc, &conn->frc_objalloc, conn->mem); @@ -7145,7 +7153,7 @@ static int conn_recv_stream(ngtcp2_conn *conn, const ngtcp2_stream *fr) { return rv; } } - } else if (fr->datacnt) { + } else if (fr->datacnt && !(strm->flags & NGTCP2_STRM_FLAG_STOP_SENDING)) { rv = ngtcp2_strm_recv_reordering(strm, fr->data[0].base, fr->data[0].len, fr->offset); if (rv != 0) { @@ -7304,27 +7312,20 @@ static int conn_recv_reset_stream(ngtcp2_conn *conn, } /* Stream is reset before we create ngtcp2_strm object. */ - conn->rx.offset += fr->final_size; - ngtcp2_conn_extend_max_offset(conn, fr->final_size); - - rv = conn_call_stream_reset(conn, fr->stream_id, fr->final_size, - fr->app_error_code, NULL); + strm = ngtcp2_objalloc_strm_get(&conn->strm_objalloc); + if (strm == NULL) { + return NGTCP2_ERR_NOMEM; + } + rv = ngtcp2_conn_init_stream(conn, strm, fr->stream_id, NULL); if (rv != 0) { + ngtcp2_objalloc_strm_release(&conn->strm_objalloc, strm); return rv; } - /* There will be no activity in this stream because we got - RESET_STREAM and don't write stream data any further. This - effectively allows another new stream for peer. */ - if (bidi) { - handle_max_remote_streams_extension(&conn->remote.bidi.unsent_max_streams, - 1); - } else { - handle_max_remote_streams_extension(&conn->remote.uni.unsent_max_streams, - 1); + rv = conn_call_stream_open(conn, strm); + if (rv != 0) { + return rv; } - - return 0; } if ((strm->flags & NGTCP2_STRM_FLAG_SHUT_RD)) { @@ -7461,15 +7462,16 @@ static int conn_recv_stop_sending(ngtcp2_conn *conn, been acknowledged. */ if (!ngtcp2_strm_is_all_tx_data_fin_acked(strm) && !(strm->flags & NGTCP2_STRM_FLAG_RESET_STREAM)) { + strm->flags |= NGTCP2_STRM_FLAG_RESET_STREAM; + rv = conn_reset_stream(conn, strm, fr->app_error_code); if (rv != 0) { return rv; } } - strm->flags |= NGTCP2_STRM_FLAG_SHUT_WR | - NGTCP2_STRM_FLAG_STOP_SENDING_RECVED | - NGTCP2_STRM_FLAG_RESET_STREAM; + strm->flags |= + NGTCP2_STRM_FLAG_SHUT_WR | NGTCP2_STRM_FLAG_STOP_SENDING_RECVED; ngtcp2_strm_streamfrq_clear(strm); @@ -12533,14 +12535,15 @@ static int conn_shutdown_stream_read(ngtcp2_conn *conn, ngtcp2_strm *strm, /* Extend connection flow control window for the amount of data which are not passed to application. */ - if (!(strm->flags & (NGTCP2_STRM_FLAG_STOP_SENDING | - NGTCP2_STRM_FLAG_RESET_STREAM_RECVED))) { + if (!(strm->flags & NGTCP2_STRM_FLAG_RESET_STREAM_RECVED)) { ngtcp2_conn_extend_max_offset(conn, strm->rx.last_offset - ngtcp2_strm_rx_offset(strm)); } strm->flags |= NGTCP2_STRM_FLAG_STOP_SENDING; + ngtcp2_strm_discard_reordered_data(strm); + return conn_stop_sending(conn, strm, app_error_code); } diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_crypto.c b/deps/ngtcp2/ngtcp2/lib/ngtcp2_crypto.c index 2c00af5ea53d99..0a3ecf6a2440cb 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_crypto.c +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_crypto.c @@ -123,6 +123,25 @@ static uint8_t *write_varint_param(uint8_t *p, ngtcp2_transport_param_id id, return ngtcp2_put_uvarint(p, value); } +/* + * zero_paramlen returns the length of a single transport parameter + * which has zero length value in its parameter. + */ +static size_t zero_paramlen(ngtcp2_transport_param_id id) { + return ngtcp2_put_uvarintlen(id) + 1; +} + +/* + * write_zero_param writes parameter |id| that has zero length value. + * It returns p + the number of bytes written. + */ +static uint8_t *write_zero_param(uint8_t *p, ngtcp2_transport_param_id id) { + p = ngtcp2_put_uvarint(p, id); + *p++ = 0; + + return p; +} + /* * cid_paramlen returns the length of a single transport parameter * which has |cid| as value. @@ -235,9 +254,7 @@ ngtcp2_ssize ngtcp2_transport_params_encode_versioned( params->ack_delay_exponent); } if (params->disable_active_migration) { - len += - ngtcp2_put_uvarintlen(NGTCP2_TRANSPORT_PARAM_DISABLE_ACTIVE_MIGRATION) + - ngtcp2_put_uvarintlen(0); + len += zero_paramlen(NGTCP2_TRANSPORT_PARAM_DISABLE_ACTIVE_MIGRATION); } if (params->max_ack_delay != NGTCP2_DEFAULT_MAX_ACK_DELAY) { len += varint_paramlen(NGTCP2_TRANSPORT_PARAM_MAX_ACK_DELAY, @@ -258,8 +275,7 @@ ngtcp2_ssize ngtcp2_transport_params_encode_versioned( params->max_datagram_frame_size); } if (params->grease_quic_bit) { - len += ngtcp2_put_uvarintlen(NGTCP2_TRANSPORT_PARAM_GREASE_QUIC_BIT) + - ngtcp2_put_uvarintlen(0); + len += zero_paramlen(NGTCP2_TRANSPORT_PARAM_GREASE_QUIC_BIT); } if (params->version_info_present) { version_infolen = @@ -377,8 +393,7 @@ ngtcp2_ssize ngtcp2_transport_params_encode_versioned( } if (params->disable_active_migration) { - p = ngtcp2_put_uvarint(p, NGTCP2_TRANSPORT_PARAM_DISABLE_ACTIVE_MIGRATION); - p = ngtcp2_put_uvarint(p, 0); + p = write_zero_param(p, NGTCP2_TRANSPORT_PARAM_DISABLE_ACTIVE_MIGRATION); } if (params->max_ack_delay != NGTCP2_DEFAULT_MAX_ACK_DELAY) { @@ -404,8 +419,7 @@ ngtcp2_ssize ngtcp2_transport_params_encode_versioned( } if (params->grease_quic_bit) { - p = ngtcp2_put_uvarint(p, NGTCP2_TRANSPORT_PARAM_GREASE_QUIC_BIT); - p = ngtcp2_put_uvarint(p, 0); + p = write_zero_param(p, NGTCP2_TRANSPORT_PARAM_GREASE_QUIC_BIT); } if (params->version_info_present) { @@ -482,6 +496,22 @@ static int decode_varint_param(uint64_t *pdest, const uint8_t **pp, return 0; } +/* + * decode_zero_param decodes zero length value from the buffer pointed + * by |*pp| of length |end - *pp|. The length is encoded in varint + * form. If it decodes zero length value successfully, it increments + * |*pp| by 1, and returns 0. Otherwise it returns -1. + */ +static int decode_zero_param(const uint8_t **pp, const uint8_t *end) { + if (*pp == end || **pp != 0) { + return -1; + } + + ++*pp; + + return 0; +} + /* * decode_cid_param decodes length prefixed ngtcp2_cid from the buffer * pointed by |*pp| of length |end - *pp|. The length is encoded in @@ -701,10 +731,7 @@ int ngtcp2_transport_params_decode_versioned(int transport_params_version, params->preferred_addr_present = 1; break; case NGTCP2_TRANSPORT_PARAM_DISABLE_ACTIVE_MIGRATION: - if (decode_varint(&valuelen, &p, end) != 0) { - return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; - } - if (valuelen != 0) { + if (decode_zero_param(&p, end) != 0) { return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; } params->disable_active_migration = 1; @@ -751,10 +778,7 @@ int ngtcp2_transport_params_decode_versioned(int transport_params_version, } break; case NGTCP2_TRANSPORT_PARAM_GREASE_QUIC_BIT: - if (decode_varint(&valuelen, &p, end) != 0) { - return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; - } - if (valuelen != 0) { + if (decode_zero_param(&p, end) != 0) { return NGTCP2_ERR_MALFORMED_TRANSPORT_PARAM; } params->grease_quic_bit = 1; diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_rtb.c b/deps/ngtcp2/ngtcp2/lib/ngtcp2_rtb.c index 6308261369c382..5ebdce7d0e2715 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_rtb.c +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_rtb.c @@ -237,7 +237,7 @@ static ngtcp2_ssize rtb_reclaim_frame(ngtcp2_rtb *rtb, uint8_t flags, switch (frc->fr.type) { case NGTCP2_FRAME_STREAM: strm = ngtcp2_conn_find_stream(conn, fr->stream.stream_id); - if (strm == NULL) { + if (strm == NULL || (strm->flags & NGTCP2_STRM_FLAG_RESET_STREAM)) { continue; } @@ -339,26 +339,60 @@ static ngtcp2_ssize rtb_reclaim_frame(ngtcp2_rtb *rtb, uint8_t flags, return rv; } - break; + ++num_reclaimed; + + nfrc->next = *pfrc; + *pfrc = nfrc; + pfrc = &nfrc->next; + + continue; case NGTCP2_FRAME_DATAGRAM: case NGTCP2_FRAME_DATAGRAM_LEN: continue; - default: - rv = ngtcp2_frame_chain_objalloc_new(&nfrc, rtb->frc_objalloc); - if (rv != 0) { - return rv; + case NGTCP2_FRAME_RESET_STREAM: + strm = ngtcp2_conn_find_stream(conn, fr->reset_stream.stream_id); + if (strm == NULL || !ngtcp2_strm_require_retransmit_reset_stream(strm)) { + continue; } - nfrc->fr = *fr; + break; + case NGTCP2_FRAME_STOP_SENDING: + strm = ngtcp2_conn_find_stream(conn, fr->stop_sending.stream_id); + if (strm == NULL || !ngtcp2_strm_require_retransmit_stop_sending(strm)) { + continue; + } - rv = ngtcp2_bind_frame_chains(frc, nfrc, rtb->mem); - if (rv != 0) { - return rv; + break; + case NGTCP2_FRAME_MAX_STREAM_DATA: + strm = ngtcp2_conn_find_stream(conn, fr->max_stream_data.stream_id); + if (strm == NULL || !ngtcp2_strm_require_retransmit_max_stream_data( + strm, &fr->max_stream_data)) { + continue; + } + + break; + case NGTCP2_FRAME_STREAM_DATA_BLOCKED: + strm = ngtcp2_conn_find_stream(conn, fr->stream_data_blocked.stream_id); + if (strm == NULL || !ngtcp2_strm_require_retransmit_stream_data_blocked( + strm, &fr->stream_data_blocked)) { + continue; } break; } + rv = ngtcp2_frame_chain_objalloc_new(&nfrc, rtb->frc_objalloc); + if (rv != 0) { + return rv; + } + + nfrc->fr = *fr; + + rv = ngtcp2_bind_frame_chains(frc, nfrc, rtb->mem); + if (rv != 0) { + return rv; + } + ++num_reclaimed; nfrc->next = *pfrc; diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_strm.c b/deps/ngtcp2/ngtcp2/lib/ngtcp2_strm.c index 6bbeb8f9f81fc2..c00e86fa8c1afa 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_strm.c +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_strm.c @@ -158,6 +158,18 @@ void ngtcp2_strm_update_rx_offset(ngtcp2_strm *strm, uint64_t offset) { ngtcp2_rob_remove_prefix(strm->rx.rob, offset); } +void ngtcp2_strm_discard_reordered_data(ngtcp2_strm *strm) { + if (strm->rx.rob == NULL) { + return; + } + + strm->rx.cont_offset = ngtcp2_strm_rx_offset(strm); + + ngtcp2_rob_free(strm->rx.rob); + ngtcp2_mem_free(strm->mem, strm->rx.rob); + strm->rx.rob = NULL; +} + void ngtcp2_strm_shutdown(ngtcp2_strm *strm, uint32_t flags) { strm->flags |= flags & NGTCP2_STRM_FLAG_SHUT_RDWR; } @@ -696,3 +708,25 @@ void ngtcp2_strm_set_app_error_code(ngtcp2_strm *strm, strm->flags |= NGTCP2_STRM_FLAG_APP_ERROR_CODE_SET; strm->app_error_code = app_error_code; } + +int ngtcp2_strm_require_retransmit_reset_stream(ngtcp2_strm *strm) { + return !ngtcp2_strm_is_all_tx_data_fin_acked(strm); +} + +int ngtcp2_strm_require_retransmit_stop_sending(ngtcp2_strm *strm) { + return !(strm->flags & NGTCP2_STRM_FLAG_SHUT_RD) || + ngtcp2_strm_rx_offset(strm) != strm->rx.last_offset; +} + +int ngtcp2_strm_require_retransmit_max_stream_data(ngtcp2_strm *strm, + ngtcp2_max_stream_data *fr) { + return fr->max_stream_data == strm->rx.max_offset && + !(strm->flags & + (NGTCP2_STRM_FLAG_SHUT_RD | NGTCP2_STRM_FLAG_STOP_SENDING)); +} + +int ngtcp2_strm_require_retransmit_stream_data_blocked( + ngtcp2_strm *strm, ngtcp2_stream_data_blocked *fr) { + return fr->offset == strm->tx.max_offset && + !(strm->flags & NGTCP2_STRM_FLAG_SHUT_WR); +} diff --git a/deps/ngtcp2/ngtcp2/lib/ngtcp2_strm.h b/deps/ngtcp2/ngtcp2/lib/ngtcp2_strm.h index 223e38fc646b38..385302a5eafa9f 100644 --- a/deps/ngtcp2/ngtcp2/lib/ngtcp2_strm.h +++ b/deps/ngtcp2/ngtcp2/lib/ngtcp2_strm.h @@ -36,6 +36,7 @@ #include "ngtcp2_gaptr.h" #include "ngtcp2_ksl.h" #include "ngtcp2_pq.h" +#include "ngtcp2_pkt.h" typedef struct ngtcp2_frame_chain ngtcp2_frame_chain; @@ -219,6 +220,12 @@ int ngtcp2_strm_recv_reordering(ngtcp2_strm *strm, const uint8_t *data, */ void ngtcp2_strm_update_rx_offset(ngtcp2_strm *strm, uint64_t offset); +/* + * ngtcp2_strm_discard_reordered_data discards all buffered reordered + * data. + */ +void ngtcp2_strm_discard_reordered_data(ngtcp2_strm *strm); + /* * ngtcp2_strm_shutdown shutdowns |strm|. |flags| should be * NGTCP2_STRM_FLAG_SHUT_RD, and/or NGTCP2_STRM_FLAG_SHUT_WR. @@ -320,4 +327,30 @@ int ngtcp2_strm_ack_data(ngtcp2_strm *strm, uint64_t offset, uint64_t len); */ void ngtcp2_strm_set_app_error_code(ngtcp2_strm *strm, uint64_t app_error_code); +/* + * ngtcp2_strm_require_retransmit_reset_stream returns nonzero if + * RESET_STREAM frame should be retransmitted. + */ +int ngtcp2_strm_require_retransmit_reset_stream(ngtcp2_strm *strm); + +/* + * ngtcp2_strm_require_retransmit_stop_sending returns nonzero if + * STOP_SENDING frame should be retransmitted. + */ +int ngtcp2_strm_require_retransmit_stop_sending(ngtcp2_strm *strm); + +/* + * ngtcp2_strm_require_retransmit_max_stream_data returns nonzero if + * MAX_STREAM_DATA frame should be retransmitted. + */ +int ngtcp2_strm_require_retransmit_max_stream_data(ngtcp2_strm *strm, + ngtcp2_max_stream_data *fr); + +/* + * ngtcp2_strm_require_retransmit_stream_data_blocked returns nonzero + * if STREAM_DATA_BLOCKED frame frame should be retransmitted. + */ +int ngtcp2_strm_require_retransmit_stream_data_blocked( + ngtcp2_strm *strm, ngtcp2_stream_data_blocked *fr); + #endif /* NGTCP2_STRM_H */ From 7f5dd44ca6785f7fddfa6f52cb99d53bd06f633c Mon Sep 17 00:00:00 2001 From: npm CLI robot Date: Tue, 30 Apr 2024 23:53:22 -0700 Subject: [PATCH 30/41] deps: upgrade npm to 10.7.0 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit PR-URL: https://github.com/nodejs/node/pull/52767 Reviewed-By: Richard Lau Reviewed-By: Trivikram Kamat Reviewed-By: Luigi Pinca Reviewed-By: Michaël Zasso --- deps/npm/bin/npm.ps1 | 39 +- deps/npm/bin/npx-cli.js | 2 +- deps/npm/bin/npx.ps1 | 39 +- deps/npm/docs/content/commands/npm-access.md | 2 +- deps/npm/docs/content/commands/npm-doctor.md | 21 +- deps/npm/docs/content/commands/npm-ls.md | 2 +- deps/npm/docs/content/commands/npm-profile.md | 33 +- deps/npm/docs/content/commands/npm-search.md | 11 +- deps/npm/docs/content/commands/npm-token.md | 38 +- deps/npm/docs/content/commands/npm.md | 2 +- .../content/configuring-npm/package-json.md | 7 + .../configuring-npm/package-lock-json.md | 2 + deps/npm/docs/lib/index.js | 2 +- deps/npm/docs/output/commands/npm-access.html | 2 +- deps/npm/docs/output/commands/npm-doctor.html | 23 +- deps/npm/docs/output/commands/npm-ls.html | 2 +- .../npm/docs/output/commands/npm-profile.html | 31 +- deps/npm/docs/output/commands/npm-search.html | 10 +- deps/npm/docs/output/commands/npm-token.html | 34 +- deps/npm/docs/output/commands/npm.html | 2 +- .../output/configuring-npm/package-json.html | 6 + .../configuring-npm/package-lock-json.html | 1 + deps/npm/lib/arborist-cmd.js | 5 +- deps/npm/lib/{base-command.js => base-cmd.js} | 17 +- deps/npm/lib/cli.js | 4 +- deps/npm/lib/{cli-entry.js => cli/entry.js} | 57 +- deps/npm/lib/{utils => cli}/exit-handler.js | 89 +- .../npm/lib/{utils => cli}/update-notifier.js | 20 +- deps/npm/lib/{es6 => cli}/validate-engines.js | 0 deps/npm/lib/commands/access.js | 16 +- deps/npm/lib/commands/adduser.js | 11 +- deps/npm/lib/commands/audit.js | 396 +-- deps/npm/lib/commands/cache.js | 24 +- deps/npm/lib/commands/ci.js | 8 +- deps/npm/lib/commands/completion.js | 18 +- deps/npm/lib/commands/config.js | 77 +- deps/npm/lib/commands/dedupe.js | 5 +- deps/npm/lib/commands/deprecate.js | 2 +- deps/npm/lib/commands/diff.js | 10 +- deps/npm/lib/commands/dist-tag.js | 13 +- deps/npm/lib/commands/docs.js | 2 + deps/npm/lib/commands/doctor.js | 258 +- deps/npm/lib/commands/edit.js | 72 +- deps/npm/lib/commands/exec.js | 4 +- deps/npm/lib/commands/explain.js | 8 +- deps/npm/lib/commands/explore.js | 49 +- deps/npm/lib/commands/find-dupes.js | 5 +- deps/npm/lib/commands/fund.js | 43 +- deps/npm/lib/commands/get.js | 3 +- deps/npm/lib/commands/help-search.js | 25 +- deps/npm/lib/commands/help.js | 6 +- deps/npm/lib/commands/hook.js | 74 +- deps/npm/lib/commands/init.js | 24 +- deps/npm/lib/commands/install-ci-test.js | 6 +- deps/npm/lib/commands/install-test.js | 6 +- deps/npm/lib/commands/install.js | 15 +- deps/npm/lib/commands/link.js | 9 +- deps/npm/lib/commands/login.js | 11 +- deps/npm/lib/commands/logout.js | 7 +- deps/npm/lib/commands/ls.js | 41 +- deps/npm/lib/commands/org.js | 38 +- deps/npm/lib/commands/outdated.js | 33 +- deps/npm/lib/commands/owner.js | 12 +- deps/npm/lib/commands/pack.js | 9 +- deps/npm/lib/commands/ping.js | 11 +- deps/npm/lib/commands/pkg.js | 7 +- deps/npm/lib/commands/prefix.js | 8 +- deps/npm/lib/commands/profile.js | 79 +- deps/npm/lib/commands/prune.js | 5 +- deps/npm/lib/commands/publish.js | 23 +- deps/npm/lib/commands/query.js | 12 +- deps/npm/lib/commands/rebuild.js | 8 +- deps/npm/lib/commands/repo.js | 3 +- deps/npm/lib/commands/root.js | 7 +- deps/npm/lib/commands/run-script.js | 82 +- deps/npm/lib/commands/sbom.js | 11 +- deps/npm/lib/commands/search.js | 62 +- deps/npm/lib/commands/set.js | 3 +- deps/npm/lib/commands/shrinkwrap.js | 8 +- deps/npm/lib/commands/star.js | 7 +- deps/npm/lib/commands/stars.js | 7 +- deps/npm/lib/commands/team.js | 45 +- deps/npm/lib/commands/token.js | 146 +- deps/npm/lib/commands/uninstall.js | 6 +- deps/npm/lib/commands/unpublish.js | 8 +- deps/npm/lib/commands/unstar.js | 1 + deps/npm/lib/commands/update.js | 9 +- deps/npm/lib/commands/version.js | 55 +- deps/npm/lib/commands/view.js | 193 +- deps/npm/lib/commands/whoami.js | 8 +- deps/npm/lib/lifecycle-cmd.js | 5 +- deps/npm/lib/npm.js | 335 +-- deps/npm/lib/package-url-cmd.js | 11 +- deps/npm/lib/utils/audit-error.js | 6 +- deps/npm/lib/utils/auth.js | 4 +- deps/npm/lib/utils/did-you-mean.js | 17 +- deps/npm/lib/utils/display.js | 615 +++-- deps/npm/lib/utils/error-message.js | 22 +- deps/npm/lib/utils/explain-dep.js | 70 +- deps/npm/lib/utils/format-search-stream.js | 186 +- deps/npm/lib/utils/format.js | 50 + .../{workspaces => utils}/get-workspaces.js | 0 .../utils/{completion => }/installed-deep.js | 0 .../{completion => }/installed-shallow.js | 0 deps/npm/lib/utils/is-windows.js | 4 +- deps/npm/lib/utils/log-file.js | 71 +- deps/npm/lib/utils/log-shim.js | 59 - deps/npm/lib/utils/open-url-prompt.js | 11 +- deps/npm/lib/utils/open-url.js | 3 +- deps/npm/lib/utils/otplease.js | 2 - deps/npm/lib/utils/pulse-till-done.js | 26 - deps/npm/lib/utils/read-user-info.js | 17 +- deps/npm/lib/utils/reify-finish.js | 2 +- deps/npm/lib/utils/reify-output.js | 65 +- deps/npm/lib/utils/tar.js | 71 +- deps/npm/lib/utils/timers.js | 137 +- .../update-workspaces.js | 0 deps/npm/lib/utils/verify-signatures.js | 389 +++ deps/npm/man/man1/npm-access.1 | 2 +- deps/npm/man/man1/npm-doctor.1 | 14 +- deps/npm/man/man1/npm-ls.1 | 2 +- deps/npm/man/man1/npm-profile.1 | 31 +- deps/npm/man/man1/npm-search.1 | 12 +- deps/npm/man/man1/npm-token.1 | 34 +- deps/npm/man/man1/npm.1 | 2 +- deps/npm/man/man5/npm-json.5 | 2 + deps/npm/man/man5/package-json.5 | 2 + deps/npm/man/man5/package-lock-json.5 | 2 + deps/npm/node_modules/@colors/colors/LICENSE | 26 - .../@colors/colors/examples/normal-usage.js | 82 - .../@colors/colors/examples/safe-string.js | 78 - .../node_modules/@colors/colors/lib/colors.js | 211 -- .../@colors/colors/lib/custom/trap.js | 46 - .../@colors/colors/lib/custom/zalgo.js | 109 - .../colors/lib/extendStringPrototype.js | 110 - .../node_modules/@colors/colors/lib/index.js | 13 - .../@colors/colors/lib/maps/america.js | 10 - .../@colors/colors/lib/maps/rainbow.js | 11 - .../@colors/colors/lib/maps/random.js | 11 - .../@colors/colors/lib/maps/zebra.js | 5 - .../node_modules/@colors/colors/lib/styles.js | 95 - .../@colors/colors/lib/system/has-flag.js | 35 - .../colors/lib/system/supports-colors.js | 151 -- .../node_modules/@colors/colors/package.json | 45 - deps/npm/node_modules/@colors/colors/safe.js | 10 - .../@colors/colors/themes/generic-logging.js | 12 - .../@npmcli/arborist/bin/index.js | 9 +- .../@npmcli/arborist/bin/lib/logging.js | 2 +- .../@npmcli/arborist/bin/lib/timers.js | 32 +- .../@npmcli/arborist/lib/add-rm-pkg-deps.js | 2 +- .../arborist/lib/arborist/build-ideal-tree.js | 37 +- .../@npmcli/arborist/lib/arborist/index.js | 37 +- .../arborist/lib/arborist/isolated-reifier.js | 6 +- .../arborist/lib/arborist/load-actual.js | 4 +- .../arborist/lib/arborist/load-virtual.js | 2 +- .../@npmcli/arborist/lib/arborist/rebuild.js | 36 +- .../@npmcli/arborist/lib/arborist/reify.js | 657 +++-- .../@npmcli/arborist/lib/audit-report.js | 11 +- .../@npmcli/arborist/lib/dep-valid.js | 2 +- .../@npmcli/arborist/lib/inventory.js | 2 +- .../@npmcli/arborist/lib/place-dep.js | 2 +- .../arborist/lib/query-selector-all.js | 14 +- .../@npmcli/arborist/lib/shrinkwrap.js | 4 +- .../@npmcli/arborist/lib/tracker.js | 41 +- .../@npmcli/arborist/package.json | 24 +- .../config/lib/definitions/definitions.js | 4 +- .../node_modules/@npmcli/config/lib/index.js | 41 +- .../node_modules/@npmcli/config/package.json | 6 +- .../@npmcli/disparity-colors/lib/index.js | 34 - .../node_modules/ansi-styles/index.js | 163 -- .../node_modules/ansi-styles/license | 9 - .../node_modules/ansi-styles/package.json | 56 - .../@npmcli/disparity-colors/package.json | 70 - .../npm/node_modules/@npmcli/git/lib/spawn.js | 2 +- .../npm/node_modules/@npmcli/git/package.json | 4 +- .../installed-package-contents/bin/index.js | 44 + .../installed-package-contents/lib/index.js | 121 +- .../installed-package-contents/package.json | 12 +- .../@npmcli/metavuln-calculator/lib/index.js | 22 +- .../@npmcli/metavuln-calculator/package.json | 11 +- .../@npmcli/package-json/lib/index.js | 6 + .../@npmcli/package-json/lib/normalize.js | 2 +- .../@npmcli/package-json/package.json | 8 +- .../@npmcli/redact/lib/deep-map.js | 59 + .../node_modules/@npmcli/redact/lib/index.js | 25 +- .../@npmcli/redact/lib/matchers.js | 81 + .../node_modules/@npmcli/redact/lib/server.js | 34 + .../node_modules/@npmcli/redact/lib/utils.js | 202 ++ .../node_modules/@npmcli/redact/package.json | 10 +- .../@npmcli/run-script/lib/run-script-pkg.js | 36 +- .../@npmcli/run-script/package.json | 7 +- .../node_modules/are-we-there-yet/LICENSE.md | 18 - .../are-we-there-yet/lib/index.js | 4 - .../are-we-there-yet/lib/tracker-base.js | 13 - .../are-we-there-yet/lib/tracker-group.js | 112 - .../are-we-there-yet/lib/tracker-stream.js | 42 - .../are-we-there-yet/lib/tracker.js | 34 - deps/npm/node_modules/cli-table3/LICENSE | 21 - deps/npm/node_modules/cli-table3/index.js | 1 - deps/npm/node_modules/cli-table3/package.json | 100 - deps/npm/node_modules/cli-table3/src/cell.js | 409 --- deps/npm/node_modules/cli-table3/src/debug.js | 28 - .../cli-table3/src/layout-manager.js | 254 -- deps/npm/node_modules/cli-table3/src/table.js | 106 - deps/npm/node_modules/cli-table3/src/utils.js | 336 --- deps/npm/node_modules/clone/LICENSE | 18 - deps/npm/node_modules/clone/clone.iml | 10 - deps/npm/node_modules/clone/clone.js | 166 -- deps/npm/node_modules/clone/package.json | 51 - deps/npm/node_modules/color-support/README.md | 129 - deps/npm/node_modules/color-support/bin.js | 3 - .../npm/node_modules/color-support/browser.js | 14 - deps/npm/node_modules/color-support/index.js | 134 - .../node_modules/color-support/package.json | 36 - deps/npm/node_modules/columnify/LICENSE | 21 - deps/npm/node_modules/columnify/Makefile | 9 - deps/npm/node_modules/columnify/columnify.js | 306 --- deps/npm/node_modules/columnify/index.js | 297 --- deps/npm/node_modules/columnify/package.json | 53 - deps/npm/node_modules/columnify/utils.js | 193 -- deps/npm/node_modules/columnify/width.js | 6 - .../console-control-strings/LICENSE | 13 - .../console-control-strings/index.js | 125 - .../console-control-strings/package.json | 27 - deps/npm/node_modules/defaults/LICENSE | 22 - deps/npm/node_modules/defaults/index.js | 13 - deps/npm/node_modules/defaults/package.json | 33 - deps/npm/node_modules/defaults/test.js | 34 - deps/npm/node_modules/gauge/LICENSE.md | 20 - deps/npm/node_modules/gauge/lib/base-theme.js | 18 - deps/npm/node_modules/gauge/lib/error.js | 24 - deps/npm/node_modules/gauge/lib/has-color.js | 4 - deps/npm/node_modules/gauge/lib/index.js | 289 --- deps/npm/node_modules/gauge/lib/plumbing.js | 50 - deps/npm/node_modules/gauge/lib/process.js | 3 - .../node_modules/gauge/lib/progress-bar.js | 41 - .../node_modules/gauge/lib/render-template.js | 222 -- .../node_modules/gauge/lib/set-immediate.js | 7 - .../node_modules/gauge/lib/set-interval.js | 3 - deps/npm/node_modules/gauge/lib/spin.js | 5 - .../node_modules/gauge/lib/template-item.js | 87 - deps/npm/node_modules/gauge/lib/theme-set.js | 122 - deps/npm/node_modules/gauge/lib/themes.js | 56 - .../node_modules/gauge/lib/wide-truncate.js | 31 - deps/npm/node_modules/gauge/package.json | 68 - deps/npm/node_modules/has-unicode/LICENSE | 14 - deps/npm/node_modules/has-unicode/index.js | 16 - .../npm/node_modules/has-unicode/package.json | 30 - .../node_modules/libnpmaccess/package.json | 8 +- .../libnpmdiff/lib/format-diff.js | 38 +- .../node_modules/libnpmdiff/lib/tarball.js | 2 +- deps/npm/node_modules/libnpmdiff/package.json | 11 +- deps/npm/node_modules/libnpmexec/README.md | 1 - deps/npm/node_modules/libnpmexec/lib/index.js | 30 +- .../node_modules/libnpmexec/lib/run-script.js | 62 +- deps/npm/node_modules/libnpmexec/package.json | 13 +- deps/npm/node_modules/libnpmfund/lib/index.js | 2 +- deps/npm/node_modules/libnpmfund/package.json | 4 +- deps/npm/node_modules/libnpmhook/package.json | 6 +- deps/npm/node_modules/libnpmorg/package.json | 6 +- deps/npm/node_modules/libnpmpack/lib/index.js | 9 +- deps/npm/node_modules/libnpmpack/package.json | 10 +- .../node_modules/libnpmpublish/lib/publish.js | 2 +- .../node_modules/libnpmpublish/package.json | 10 +- .../node_modules/libnpmsearch/package.json | 6 +- deps/npm/node_modules/libnpmteam/package.json | 6 +- deps/npm/node_modules/libnpmversion/README.md | 1 - .../libnpmversion/lib/enforce-clean.js | 2 +- .../node_modules/libnpmversion/lib/index.js | 2 - .../libnpmversion/lib/read-json.js | 3 +- .../node_modules/libnpmversion/lib/version.js | 6 +- .../libnpmversion/lib/write-json.js | 3 +- .../node_modules/libnpmversion/package.json | 10 +- .../lru-cache/dist/commonjs/index.min.js | 2 + .../lru-cache/dist/esm/index.min.js | 2 + deps/npm/node_modules/lru-cache/package.json | 13 +- .../make-fetch-happen/lib/cache/entry.js | 2 + .../make-fetch-happen/lib/remote.js | 4 + .../make-fetch-happen/package.json | 15 +- .../node_modules/proc-log}/LICENSE | 2 +- .../node_modules/proc-log/lib/index.js | 23 + .../node_modules/proc-log}/package.json | 48 +- .../node_modules/npm-package-arg/lib/npa.js | 2 +- .../node_modules/npm-package-arg/package.json | 19 +- .../npm/node_modules/npm-profile/lib/index.js | 5 +- .../npm/node_modules/npm-profile/package.json | 18 +- .../npm-registry-fetch/lib/check-response.js | 2 +- .../npm-registry-fetch/lib/index.js | 5 - .../npm-registry-fetch/package.json | 10 +- deps/npm/node_modules/npmlog/LICENSE.md | 20 - deps/npm/node_modules/npmlog/lib/log.js | 400 --- deps/npm/node_modules/pacote/README.md | 2 - deps/npm/node_modules/pacote/lib/dir.js | 7 +- deps/npm/node_modules/pacote/lib/fetcher.js | 15 +- deps/npm/node_modules/pacote/lib/file.js | 31 +- deps/npm/node_modules/pacote/lib/git.js | 8 +- deps/npm/node_modules/pacote/lib/registry.js | 9 +- deps/npm/node_modules/pacote/package.json | 15 +- deps/npm/node_modules/proc-log/lib/index.js | 174 +- deps/npm/node_modules/proc-log/package.json | 11 +- .../{color-support => proggy}/LICENSE | 2 +- deps/npm/node_modules/proggy/lib/client.js | 114 + deps/npm/node_modules/proggy/lib/index.js | 15 + deps/npm/node_modules/proggy/lib/tracker.js | 68 + .../{are-we-there-yet => proggy}/package.json | 57 +- .../node_modules/read-package-json/LICENSE | 15 - .../read-package-json/lib/read-json.js | 589 ----- .../read-package-json/package.json | 65 - .../npm/node_modules/set-blocking/LICENSE.txt | 14 - deps/npm/node_modules/set-blocking/index.js | 7 - .../node_modules/set-blocking/package.json | 42 - .../sprintf-js/CONTRIBUTORS.md | 0 .../node_modules => }/sprintf-js/LICENSE | 0 deps/npm/node_modules/sprintf-js/bower.json | 14 + .../node_modules/sprintf-js/demo/angular.html | 20 + .../sprintf-js/dist/.gitattributes | 0 .../sprintf-js/dist/angular-sprintf.min.js | 0 .../sprintf-js/dist/sprintf.min.js | 0 deps/npm/node_modules/sprintf-js/gruntfile.js | 36 + .../node_modules => }/sprintf-js/package.json | 0 .../sprintf-js/src/angular-sprintf.js | 0 .../sprintf-js/src/sprintf.js | 0 deps/npm/node_modules/sprintf-js/test/test.js | 82 + deps/npm/node_modules/wcwidth/LICENSE | 30 - deps/npm/node_modules/wcwidth/combining.js | 50 - deps/npm/node_modules/wcwidth/docs/index.md | 65 - deps/npm/node_modules/wcwidth/index.js | 99 - deps/npm/node_modules/wcwidth/package.json | 42 - deps/npm/node_modules/wcwidth/test/index.js | 64 - deps/npm/node_modules/wide-align/LICENSE | 14 - deps/npm/node_modules/wide-align/align.js | 65 - deps/npm/node_modules/wide-align/package.json | 33 - deps/npm/package.json | 41 +- .../{utils => cli}/exit-handler.js.test.cjs | 47 +- .../test/lib/cli/update-notifier.js.test.cjs | 102 + .../test/lib/commands/audit.js.test.cjs | 25 +- .../test/lib/commands/completion.js.test.cjs | 316 ++- .../test/lib/commands/config.js.test.cjs | 111 +- .../test/lib/commands/dist-tag.js.test.cjs | 17 +- .../test/lib/commands/doctor.js.test.cjs | 2292 ++++++++--------- .../test/lib/commands/fund.js.test.cjs | 26 +- .../test/lib/commands/init.js.test.cjs | 1 + .../test/lib/commands/ls.js.test.cjs | 86 +- .../test/lib/commands/outdated.js.test.cjs | 44 +- .../test/lib/commands/pack.js.test.cjs | 109 +- .../test/lib/commands/profile.js.test.cjs | 13 - .../test/lib/commands/publish.js.test.cjs | 488 +--- .../test/lib/commands/search.js.test.cjs | 1106 +++++++- .../test/lib/commands/shrinkwrap.js.test.cjs | 51 +- .../test/lib/commands/view.js.test.cjs | 326 ++- .../tap-snapshots/test/lib/docs.js.test.cjs | 17 +- .../tap-snapshots/test/lib/npm.js.test.cjs | 10 + .../test/lib/utils/error-message.js.test.cjs | 165 +- .../test/lib/utils/explain-dep.js.test.cjs | 86 +- .../lib/utils/explain-eresolve.js.test.cjs | 172 +- .../test/lib/utils/log-file.js.test.cjs | 119 +- .../lib/utils/open-url-prompt.js.test.cjs | 1 - .../test/lib/utils/open-url.js.test.cjs | 2 - .../test/lib/utils/reify-output.js.test.cjs | 14 +- .../test/lib/utils/tar.js.test.cjs | 82 +- .../lib/utils/update-notifier.js.test.cjs | 102 - deps/npm/test/bin/windows-shims.js | 31 +- .../fixtures/libnpmsearch-stream-result.js | 1 + deps/npm/test/fixtures/mock-logs.js | 198 +- deps/npm/test/fixtures/mock-npm.js | 97 +- deps/npm/test/fixtures/sandbox.js | 336 --- .../sigstore/valid-sigstore-attestations.json | 0 .../sigstore/valid-tuf-js-attestations.json | 0 deps/npm/test/lib/arborist-cmd.js | 9 +- deps/npm/test/lib/cli.js | 2 +- .../test/lib/{cli-entry.js => cli/entry.js} | 95 +- .../test/lib/{utils => cli}/exit-handler.js | 301 ++- .../lib/{utils => cli}/update-notifier.js | 2 +- .../test/lib/{es6 => cli}/validate-engines.js | 2 +- deps/npm/test/lib/commands/access.js | 22 +- deps/npm/test/lib/commands/audit.js | 18 +- deps/npm/test/lib/commands/ci.js | 3 +- deps/npm/test/lib/commands/config.js | 559 ++-- deps/npm/test/lib/commands/dist-tag.js | 36 +- deps/npm/test/lib/commands/doctor.js | 23 +- deps/npm/test/lib/commands/exec.js | 2 +- deps/npm/test/lib/commands/help-search.js | 2 +- deps/npm/test/lib/commands/hook.js | 66 +- deps/npm/test/lib/commands/init.js | 5 +- deps/npm/test/lib/commands/login.js | 6 +- deps/npm/test/lib/commands/logout.js | 24 +- deps/npm/test/lib/commands/org.js | 34 +- deps/npm/test/lib/commands/owner.js | 10 +- deps/npm/test/lib/commands/pack.js | 78 +- deps/npm/test/lib/commands/ping.js | 23 +- deps/npm/test/lib/commands/profile.js | 49 +- deps/npm/test/lib/commands/publish.js | 52 +- deps/npm/test/lib/commands/query.js | 8 +- deps/npm/test/lib/commands/run-script.js | 214 +- deps/npm/test/lib/commands/search.js | 395 +-- deps/npm/test/lib/commands/shrinkwrap.js | 26 +- deps/npm/test/lib/commands/stars.js | 6 +- deps/npm/test/lib/commands/token.js | 677 ++--- deps/npm/test/lib/commands/update.js | 4 +- deps/npm/test/lib/commands/version.js | 53 +- deps/npm/test/lib/commands/view.js | 126 +- deps/npm/test/lib/docs.js | 4 +- deps/npm/test/lib/load-all-commands.js | 6 +- deps/npm/test/lib/npm.js | 308 +-- deps/npm/test/lib/utils/audit-error.js | 10 +- deps/npm/test/lib/utils/display.js | 296 ++- .../{workspaces => utils}/get-workspaces.js | 2 +- .../utils/{completion => }/installed-deep.js | 4 +- .../{completion => }/installed-shallow.js | 4 +- deps/npm/test/lib/utils/log-file.js | 4 +- deps/npm/test/lib/utils/log-shim.js | 101 - deps/npm/test/lib/utils/otplease.js | 4 +- deps/npm/test/lib/utils/pulse-till-done.js | 35 - deps/npm/test/lib/utils/read-user-info.js | 60 +- deps/npm/test/lib/utils/reify-output.js | 13 +- deps/npm/test/lib/utils/tar.js | 6 +- deps/npm/test/lib/utils/timers.js | 105 +- deps/npm/test/lib/utils/web-auth.js | 4 +- 418 files changed, 9123 insertions(+), 16420 deletions(-) rename deps/npm/lib/{base-command.js => base-cmd.js} (93%) rename deps/npm/lib/{cli-entry.js => cli/entry.js} (56%) rename deps/npm/lib/{utils => cli}/exit-handler.js (65%) rename deps/npm/lib/{utils => cli}/update-notifier.js (86%) rename deps/npm/lib/{es6 => cli}/validate-engines.js (100%) create mode 100644 deps/npm/lib/utils/format.js rename deps/npm/lib/{workspaces => utils}/get-workspaces.js (100%) rename deps/npm/lib/utils/{completion => }/installed-deep.js (100%) rename deps/npm/lib/utils/{completion => }/installed-shallow.js (100%) delete mode 100644 deps/npm/lib/utils/log-shim.js delete mode 100644 deps/npm/lib/utils/pulse-till-done.js rename deps/npm/lib/{workspaces => utils}/update-workspaces.js (100%) create mode 100644 deps/npm/lib/utils/verify-signatures.js delete mode 100644 deps/npm/node_modules/@colors/colors/LICENSE delete mode 100644 deps/npm/node_modules/@colors/colors/examples/normal-usage.js delete mode 100644 deps/npm/node_modules/@colors/colors/examples/safe-string.js delete mode 100644 deps/npm/node_modules/@colors/colors/lib/colors.js delete mode 100644 deps/npm/node_modules/@colors/colors/lib/custom/trap.js delete mode 100644 deps/npm/node_modules/@colors/colors/lib/custom/zalgo.js delete mode 100644 deps/npm/node_modules/@colors/colors/lib/extendStringPrototype.js delete mode 100644 deps/npm/node_modules/@colors/colors/lib/index.js delete mode 100644 deps/npm/node_modules/@colors/colors/lib/maps/america.js delete mode 100644 deps/npm/node_modules/@colors/colors/lib/maps/rainbow.js delete mode 100644 deps/npm/node_modules/@colors/colors/lib/maps/random.js delete mode 100644 deps/npm/node_modules/@colors/colors/lib/maps/zebra.js delete mode 100644 deps/npm/node_modules/@colors/colors/lib/styles.js delete mode 100644 deps/npm/node_modules/@colors/colors/lib/system/has-flag.js delete mode 100644 deps/npm/node_modules/@colors/colors/lib/system/supports-colors.js delete mode 100644 deps/npm/node_modules/@colors/colors/package.json delete mode 100644 deps/npm/node_modules/@colors/colors/safe.js delete mode 100644 deps/npm/node_modules/@colors/colors/themes/generic-logging.js delete mode 100644 deps/npm/node_modules/@npmcli/disparity-colors/lib/index.js delete mode 100644 deps/npm/node_modules/@npmcli/disparity-colors/node_modules/ansi-styles/index.js delete mode 100644 deps/npm/node_modules/@npmcli/disparity-colors/node_modules/ansi-styles/license delete mode 100644 deps/npm/node_modules/@npmcli/disparity-colors/node_modules/ansi-styles/package.json delete mode 100644 deps/npm/node_modules/@npmcli/disparity-colors/package.json create mode 100755 deps/npm/node_modules/@npmcli/installed-package-contents/bin/index.js mode change 100755 => 100644 deps/npm/node_modules/@npmcli/installed-package-contents/lib/index.js create mode 100644 deps/npm/node_modules/@npmcli/redact/lib/deep-map.js create mode 100644 deps/npm/node_modules/@npmcli/redact/lib/matchers.js create mode 100644 deps/npm/node_modules/@npmcli/redact/lib/server.js create mode 100644 deps/npm/node_modules/@npmcli/redact/lib/utils.js delete mode 100644 deps/npm/node_modules/are-we-there-yet/LICENSE.md delete mode 100644 deps/npm/node_modules/are-we-there-yet/lib/index.js delete mode 100644 deps/npm/node_modules/are-we-there-yet/lib/tracker-base.js delete mode 100644 deps/npm/node_modules/are-we-there-yet/lib/tracker-group.js delete mode 100644 deps/npm/node_modules/are-we-there-yet/lib/tracker-stream.js delete mode 100644 deps/npm/node_modules/are-we-there-yet/lib/tracker.js delete mode 100644 deps/npm/node_modules/cli-table3/LICENSE delete mode 100644 deps/npm/node_modules/cli-table3/index.js delete mode 100644 deps/npm/node_modules/cli-table3/package.json delete mode 100644 deps/npm/node_modules/cli-table3/src/cell.js delete mode 100644 deps/npm/node_modules/cli-table3/src/debug.js delete mode 100644 deps/npm/node_modules/cli-table3/src/layout-manager.js delete mode 100644 deps/npm/node_modules/cli-table3/src/table.js delete mode 100644 deps/npm/node_modules/cli-table3/src/utils.js delete mode 100644 deps/npm/node_modules/clone/LICENSE delete mode 100644 deps/npm/node_modules/clone/clone.iml delete mode 100644 deps/npm/node_modules/clone/clone.js delete mode 100644 deps/npm/node_modules/clone/package.json delete mode 100644 deps/npm/node_modules/color-support/README.md delete mode 100755 deps/npm/node_modules/color-support/bin.js delete mode 100644 deps/npm/node_modules/color-support/browser.js delete mode 100644 deps/npm/node_modules/color-support/index.js delete mode 100644 deps/npm/node_modules/color-support/package.json delete mode 100644 deps/npm/node_modules/columnify/LICENSE delete mode 100644 deps/npm/node_modules/columnify/Makefile delete mode 100644 deps/npm/node_modules/columnify/columnify.js delete mode 100644 deps/npm/node_modules/columnify/index.js delete mode 100644 deps/npm/node_modules/columnify/package.json delete mode 100644 deps/npm/node_modules/columnify/utils.js delete mode 100644 deps/npm/node_modules/columnify/width.js delete mode 100644 deps/npm/node_modules/console-control-strings/LICENSE delete mode 100644 deps/npm/node_modules/console-control-strings/index.js delete mode 100644 deps/npm/node_modules/console-control-strings/package.json delete mode 100644 deps/npm/node_modules/defaults/LICENSE delete mode 100644 deps/npm/node_modules/defaults/index.js delete mode 100644 deps/npm/node_modules/defaults/package.json delete mode 100644 deps/npm/node_modules/defaults/test.js delete mode 100644 deps/npm/node_modules/gauge/LICENSE.md delete mode 100644 deps/npm/node_modules/gauge/lib/base-theme.js delete mode 100644 deps/npm/node_modules/gauge/lib/error.js delete mode 100644 deps/npm/node_modules/gauge/lib/has-color.js delete mode 100644 deps/npm/node_modules/gauge/lib/index.js delete mode 100644 deps/npm/node_modules/gauge/lib/plumbing.js delete mode 100644 deps/npm/node_modules/gauge/lib/process.js delete mode 100644 deps/npm/node_modules/gauge/lib/progress-bar.js delete mode 100644 deps/npm/node_modules/gauge/lib/render-template.js delete mode 100644 deps/npm/node_modules/gauge/lib/set-immediate.js delete mode 100644 deps/npm/node_modules/gauge/lib/set-interval.js delete mode 100644 deps/npm/node_modules/gauge/lib/spin.js delete mode 100644 deps/npm/node_modules/gauge/lib/template-item.js delete mode 100644 deps/npm/node_modules/gauge/lib/theme-set.js delete mode 100644 deps/npm/node_modules/gauge/lib/themes.js delete mode 100644 deps/npm/node_modules/gauge/lib/wide-truncate.js delete mode 100644 deps/npm/node_modules/gauge/package.json delete mode 100644 deps/npm/node_modules/has-unicode/LICENSE delete mode 100644 deps/npm/node_modules/has-unicode/index.js delete mode 100644 deps/npm/node_modules/has-unicode/package.json create mode 100644 deps/npm/node_modules/lru-cache/dist/commonjs/index.min.js create mode 100644 deps/npm/node_modules/lru-cache/dist/esm/index.min.js rename deps/npm/node_modules/{@npmcli/disparity-colors => node-gyp/node_modules/proc-log}/LICENSE (96%) create mode 100644 deps/npm/node_modules/node-gyp/node_modules/proc-log/lib/index.js rename deps/npm/node_modules/{npmlog => node-gyp/node_modules/proc-log}/package.json (59%) delete mode 100644 deps/npm/node_modules/npmlog/LICENSE.md delete mode 100644 deps/npm/node_modules/npmlog/lib/log.js rename deps/npm/node_modules/{color-support => proggy}/LICENSE (93%) create mode 100644 deps/npm/node_modules/proggy/lib/client.js create mode 100644 deps/npm/node_modules/proggy/lib/index.js create mode 100644 deps/npm/node_modules/proggy/lib/tracker.js rename deps/npm/node_modules/{are-we-there-yet => proggy}/package.json (53%) delete mode 100644 deps/npm/node_modules/read-package-json/LICENSE delete mode 100644 deps/npm/node_modules/read-package-json/lib/read-json.js delete mode 100644 deps/npm/node_modules/read-package-json/package.json delete mode 100644 deps/npm/node_modules/set-blocking/LICENSE.txt delete mode 100644 deps/npm/node_modules/set-blocking/index.js delete mode 100644 deps/npm/node_modules/set-blocking/package.json rename deps/npm/node_modules/{ip-address/node_modules => }/sprintf-js/CONTRIBUTORS.md (100%) rename deps/npm/node_modules/{ip-address/node_modules => }/sprintf-js/LICENSE (100%) create mode 100644 deps/npm/node_modules/sprintf-js/bower.json create mode 100644 deps/npm/node_modules/sprintf-js/demo/angular.html rename deps/npm/node_modules/{ip-address/node_modules => }/sprintf-js/dist/.gitattributes (100%) rename deps/npm/node_modules/{ip-address/node_modules => }/sprintf-js/dist/angular-sprintf.min.js (100%) rename deps/npm/node_modules/{ip-address/node_modules => }/sprintf-js/dist/sprintf.min.js (100%) create mode 100644 deps/npm/node_modules/sprintf-js/gruntfile.js rename deps/npm/node_modules/{ip-address/node_modules => }/sprintf-js/package.json (100%) rename deps/npm/node_modules/{ip-address/node_modules => }/sprintf-js/src/angular-sprintf.js (100%) rename deps/npm/node_modules/{ip-address/node_modules => }/sprintf-js/src/sprintf.js (100%) create mode 100644 deps/npm/node_modules/sprintf-js/test/test.js delete mode 100644 deps/npm/node_modules/wcwidth/LICENSE delete mode 100644 deps/npm/node_modules/wcwidth/combining.js delete mode 100644 deps/npm/node_modules/wcwidth/docs/index.md delete mode 100644 deps/npm/node_modules/wcwidth/index.js delete mode 100644 deps/npm/node_modules/wcwidth/package.json delete mode 100644 deps/npm/node_modules/wcwidth/test/index.js delete mode 100755 deps/npm/node_modules/wide-align/LICENSE delete mode 100755 deps/npm/node_modules/wide-align/align.js delete mode 100755 deps/npm/node_modules/wide-align/package.json rename deps/npm/tap-snapshots/test/lib/{utils => cli}/exit-handler.js.test.cjs (63%) create mode 100644 deps/npm/tap-snapshots/test/lib/cli/update-notifier.js.test.cjs delete mode 100644 deps/npm/tap-snapshots/test/lib/utils/update-notifier.js.test.cjs delete mode 100644 deps/npm/test/fixtures/sandbox.js rename deps/npm/test/{lib => }/fixtures/sigstore/valid-sigstore-attestations.json (100%) rename deps/npm/test/{lib => }/fixtures/sigstore/valid-tuf-js-attestations.json (100%) rename deps/npm/test/lib/{cli-entry.js => cli/entry.js} (52%) rename deps/npm/test/lib/{utils => cli}/exit-handler.js (69%) rename deps/npm/test/lib/{utils => cli}/update-notifier.js (99%) rename deps/npm/test/lib/{es6 => cli}/validate-engines.js (94%) rename deps/npm/test/lib/{workspaces => utils}/get-workspaces.js (98%) rename deps/npm/test/lib/utils/{completion => }/installed-deep.js (96%) rename deps/npm/test/lib/utils/{completion => }/installed-shallow.js (89%) delete mode 100644 deps/npm/test/lib/utils/log-shim.js delete mode 100644 deps/npm/test/lib/utils/pulse-till-done.js diff --git a/deps/npm/bin/npm.ps1 b/deps/npm/bin/npm.ps1 index 399e33360e853c..04a1fd478ef9dd 100644 --- a/deps/npm/bin/npm.ps1 +++ b/deps/npm/bin/npm.ps1 @@ -1,35 +1,32 @@ #!/usr/bin/env pwsh -$basedir=Split-Path $MyInvocation.MyCommand.Definition -Parent -$exe="" -if ($PSVersionTable.PSVersion -lt "6.0" -or $IsWindows) { - # Fix case when both the Windows and Linux builds of Node - # are installed in the same directory - $exe=".exe" +$NODE_EXE="$PSScriptRoot/node.exe" +if (-not (Test-Path $NODE_EXE)) { + $NODE_EXE="$PSScriptRoot/node" } -$ret=0 - -$nodeexe = "node$exe" -$nodebin = $(Get-Command $nodeexe -ErrorAction SilentlyContinue -ErrorVariable F).Source -if ($nodebin -eq $null) { - Write-Host "$nodeexe not found." - exit 1 +if (-not (Test-Path $NODE_EXE)) { + $NODE_EXE="node" } -$nodedir = $(New-Object -ComObject Scripting.FileSystemObject).GetFile("$nodebin").ParentFolder.Path -$npmprefixjs="$nodedir/node_modules/npm/bin/npm-prefix.js" -$npmprefix=(& $nodeexe $npmprefixjs) +$NPM_PREFIX_JS="$PSScriptRoot/node_modules/npm/bin/npm-prefix.js" +$NPM_CLI_JS="$PSScriptRoot/node_modules/npm/bin/npm-cli.js" +$NPM_PREFIX=(& $NODE_EXE $NPM_PREFIX_JS) + if ($LASTEXITCODE -ne 0) { Write-Host "Could not determine Node.js install directory" exit 1 } -$npmprefixclijs="$npmprefix/node_modules/npm/bin/npm-cli.js" + +$NPM_PREFIX_NPM_CLI_JS="$NPM_PREFIX/node_modules/npm/bin/npm-cli.js" +if (Test-Path $NPM_PREFIX_NPM_CLI_JS) { + $NPM_CLI_JS=$NPM_PREFIX_NPM_CLI_JS +} # Support pipeline input if ($MyInvocation.ExpectingInput) { - $input | & $nodeexe $npmprefixclijs $args + $input | & $NODE_EXE $NPM_CLI_JS $args } else { - & $nodeexe $npmprefixclijs $args + & $NODE_EXE $NPM_CLI_JS $args } -$ret=$LASTEXITCODE -exit $ret + +exit $LASTEXITCODE diff --git a/deps/npm/bin/npx-cli.js b/deps/npm/bin/npx-cli.js index 17d96fb26267c7..e2e1b87906abe0 100755 --- a/deps/npm/bin/npx-cli.js +++ b/deps/npm/bin/npx-cli.js @@ -26,7 +26,7 @@ const removed = new Set([ const { definitions, shorthands } = require('@npmcli/config/lib/definitions') const npmSwitches = Object.entries(definitions) - .filter(([key, { type }]) => type === Boolean || + .filter(([, { type }]) => type === Boolean || (Array.isArray(type) && type.includes(Boolean))) .map(([key]) => key) diff --git a/deps/npm/bin/npx.ps1 b/deps/npm/bin/npx.ps1 index 1d59fc52083d70..28dae51b22ca93 100644 --- a/deps/npm/bin/npx.ps1 +++ b/deps/npm/bin/npx.ps1 @@ -1,35 +1,32 @@ #!/usr/bin/env pwsh -$basedir=Split-Path $MyInvocation.MyCommand.Definition -Parent -$exe="" -if ($PSVersionTable.PSVersion -lt "6.0" -or $IsWindows) { - # Fix case when both the Windows and Linux builds of Node - # are installed in the same directory - $exe=".exe" +$NODE_EXE="$PSScriptRoot/node.exe" +if (-not (Test-Path $NODE_EXE)) { + $NODE_EXE="$PSScriptRoot/node" } -$ret=0 - -$nodeexe = "node$exe" -$nodebin = $(Get-Command $nodeexe -ErrorAction SilentlyContinue -ErrorVariable F).Source -if ($nodebin -eq $null) { - Write-Host "$nodeexe not found." - exit 1 +if (-not (Test-Path $NODE_EXE)) { + $NODE_EXE="node" } -$nodedir = $(New-Object -ComObject Scripting.FileSystemObject).GetFile("$nodebin").ParentFolder.Path -$npmprefixjs="$nodedir/node_modules/npm/bin/npm-prefix.js" -$npmprefix=(& $nodeexe $npmprefixjs) +$NPM_PREFIX_JS="$PSScriptRoot/node_modules/npm/bin/npm-prefix.js" +$NPX_CLI_JS="$PSScriptRoot/node_modules/npm/bin/npx-cli.js" +$NPM_PREFIX=(& $NODE_EXE $NPM_PREFIX_JS) + if ($LASTEXITCODE -ne 0) { Write-Host "Could not determine Node.js install directory" exit 1 } -$npmprefixclijs="$npmprefix/node_modules/npm/bin/npx-cli.js" + +$NPM_PREFIX_NPX_CLI_JS="$NPM_PREFIX/node_modules/npm/bin/npx-cli.js" +if (Test-Path $NPM_PREFIX_NPX_CLI_JS) { + $NPX_CLI_JS=$NPM_PREFIX_NPX_CLI_JS +} # Support pipeline input if ($MyInvocation.ExpectingInput) { - $input | & $nodeexe $npmprefixclijs $args + $input | & $NODE_EXE $NPX_CLI_JS $args } else { - & $nodeexe $npmprefixclijs $args + & $NODE_EXE $NPX_CLI_JS $args } -$ret=$LASTEXITCODE -exit $ret + +exit $LASTEXITCODE diff --git a/deps/npm/docs/content/commands/npm-access.md b/deps/npm/docs/content/commands/npm-access.md index 9e9f385fe386de..e08030deba076d 100644 --- a/deps/npm/docs/content/commands/npm-access.md +++ b/deps/npm/docs/content/commands/npm-access.md @@ -7,7 +7,7 @@ description: Set access level on published packages ### Synopsis ```bash -npm access list packages [|| [] +npm access list packages [||] [] npm access list collaborators [ []] npm access get status [] npm access set status=public|private [] diff --git a/deps/npm/docs/content/commands/npm-doctor.md b/deps/npm/docs/content/commands/npm-doctor.md index b36ec3883be03a..a96036a773b2a1 100644 --- a/deps/npm/docs/content/commands/npm-doctor.md +++ b/deps/npm/docs/content/commands/npm-doctor.md @@ -7,7 +7,7 @@ description: Check the health of your npm environment ### Synopsis ```bash -npm doctor [ping] [registry] [versions] [environment] [permissions] [cache] +npm doctor [connection] [registry] [versions] [environment] [permissions] [cache] ``` Note: This command is unaware of workspaces. @@ -38,20 +38,21 @@ there are any recommended changes, it will display them. By default npm runs all of these checks. You can limit what checks are ran by specifying them as extra arguments. -#### `npm ping` +#### `Connecting to the registry` By default, npm installs from the primary npm registry, -`registry.npmjs.org`. `npm doctor` hits a special ping endpoint within the -registry. This can also be checked with `npm ping`. If this check fails, -you may be using a proxy that needs to be configured, or may need to talk -to your IT staff to get access over HTTPS to `registry.npmjs.org`. +`registry.npmjs.org`. `npm doctor` hits a special connection testing +endpoint within the registry. This can also be checked with `npm ping`. +If this check fails, you may be using a proxy that needs to be +configured, or may need to talk to your IT staff to get access over +HTTPS to `registry.npmjs.org`. This check is done against whichever registry you've configured (you can see what that is by running `npm config get registry`), and if you're using a private registry that doesn't support the `/whoami` endpoint supported by the primary registry, this check may fail. -#### `npm -v` +#### `Checking npm version` While Node.js may come bundled with a particular version of npm, it's the policy of the CLI team that we recommend all users run `npm@latest` if they @@ -61,7 +62,7 @@ support releases typically only receive critical security and regression fixes. The team believes that the latest tested version of npm is almost always likely to be the most functional and defect-free version of npm. -#### `node -v` +#### `Checking node version` For most users, in most circumstances, the best version of Node will be the latest long-term support (LTS) release. Those of you who want access to new @@ -70,7 +71,7 @@ be running a newer version, and some may be required to run an older version of Node because of enterprise change control policies. That's OK! But in general, the npm team recommends that most users run Node.js LTS. -#### `npm config get registry` +#### `Checking configured npm registry` You may be installing from private package registries for your project or company. That's great! Others may be following tutorials or StackOverflow @@ -79,7 +80,7 @@ Sometimes, this may entail changing the registry you're pointing at. This part of `npm doctor` just lets you, and maybe whoever's helping you with support, know that you're not using the default registry. -#### `which git` +#### `Checking for git executable in PATH` While it's documented in the README, it may not be obvious that npm needs Git installed to do many of the things that it does. Also, in some cases diff --git a/deps/npm/docs/content/commands/npm-ls.md b/deps/npm/docs/content/commands/npm-ls.md index 9c761e13fa34f0..89a4bd3604ce40 100644 --- a/deps/npm/docs/content/commands/npm-ls.md +++ b/deps/npm/docs/content/commands/npm-ls.md @@ -27,7 +27,7 @@ packages will *also* show the paths to the specified packages. For example, running `npm ls promzard` in npm's source tree will show: ```bash -npm@10.5.2 /path/to/npm +npm@10.7.0 /path/to/npm └─┬ init-package-json@0.0.4 └── promzard@0.1.5 ``` diff --git a/deps/npm/docs/content/commands/npm-profile.md b/deps/npm/docs/content/commands/npm-profile.md index d048532c98d452..2468d6c87faf60 100644 --- a/deps/npm/docs/content/commands/npm-profile.md +++ b/deps/npm/docs/content/commands/npm-profile.md @@ -24,28 +24,17 @@ support this interface. * `npm profile get []`: Display all of the properties of your profile, or one or more specific properties. It looks like: -```bash -+-----------------+---------------------------+ -| name | example | -+-----------------+---------------------------+ -| email | me@example.com (verified) | -+-----------------+---------------------------+ -| two factor auth | auth-and-writes | -+-----------------+---------------------------+ -| fullname | Example User | -+-----------------+---------------------------+ -| homepage | | -+-----------------+---------------------------+ -| freenode | | -+-----------------+---------------------------+ -| twitter | | -+-----------------+---------------------------+ -| github | | -+-----------------+---------------------------+ -| created | 2015-02-26T01:38:35.892Z | -+-----------------+---------------------------+ -| updated | 2017-10-02T21:29:45.922Z | -+-----------------+---------------------------+ +``` +name: example +email: e@example.com (verified) +two-factor auth: auth-and-writes +fullname: Example User +homepage: +freenode: +twitter: +github: +created: 2015-02-26T01:38:35.892Z +updated: 2017-10-02T21:29:45.922Z ``` * `npm profile set `: Set the value of a profile diff --git a/deps/npm/docs/content/commands/npm-search.md b/deps/npm/docs/content/commands/npm-search.md index 047102af61766e..4e4bbf58aec39e 100644 --- a/deps/npm/docs/content/commands/npm-search.md +++ b/deps/npm/docs/content/commands/npm-search.md @@ -7,7 +7,7 @@ description: Search for packages ### Synopsis ```bash -npm search [search terms ...] +npm search [ ...] aliases: find, s, se ``` @@ -39,15 +39,6 @@ expression characters in most shells.) ### Configuration -#### `long` - -* Default: false -* Type: Boolean - -Show extended information in `ls`, `search`, and `help-search`. - - - #### `json` * Default: false diff --git a/deps/npm/docs/content/commands/npm-token.md b/deps/npm/docs/content/commands/npm-token.md index a17193d5fe8d05..771da7d9857534 100644 --- a/deps/npm/docs/content/commands/npm-token.md +++ b/deps/npm/docs/content/commands/npm-token.md @@ -22,24 +22,14 @@ This lets you list, create and revoke authentication tokens. Shows a table of all active authentication tokens. You can request this as JSON with `--json` or tab-separated values with `--parseable`. -```bash -+--------+---------+------------+----------+----------------+ -| id | token | created | read-only | CIDR whitelist | -+--------+---------+------------+----------+----------------+ -| 7f3134 | 1fa9ba… | 2017-10-02 | yes | | -+--------+---------+------------+----------+----------------+ -| c03241 | af7aef… | 2017-10-02 | no | 192.168.0.1/24 | -+--------+---------+------------+----------+----------------+ -| e0cf92 | 3a436a… | 2017-10-02 | no | | -+--------+---------+------------+----------+----------------+ -| 63eb9d | 74ef35… | 2017-09-28 | no | | -+--------+---------+------------+----------+----------------+ -| 2daaa8 | cbad5f… | 2017-09-26 | no | | -+--------+---------+------------+----------+----------------+ -| 68c2fe | 127e51… | 2017-09-23 | no | | -+--------+---------+------------+----------+----------------+ -| 6334e1 | 1dadd1… | 2017-09-23 | no | | -+--------+---------+------------+----------+----------------+ +``` +Read only token npm_1f… with id 7f3134 created 2017-10-21 + +Publish token npm_af… with id c03241 created 2017-10-02 +with IP Whitelist: 192.168.0.1/24 + +Publish token npm_… with id e0cf92 created 2017-10-02 + ``` * `npm token create [--read-only] [--cidr=]`: @@ -55,16 +45,8 @@ This lets you list, create and revoke authentication tokens. website](https://docs.npmjs.com/creating-and-viewing-access-tokens) for more information on generating automation tokens. -```bash -+----------------+--------------------------------------+ -| token | a73c9572-f1b9-8983-983d-ba3ac3cc913d | -+----------------+--------------------------------------+ -| cidr_whitelist | | -+----------------+--------------------------------------+ -| readonly | false | -+----------------+--------------------------------------+ -| created | 2017-10-02T07:52:24.838Z | -+----------------+--------------------------------------+ +``` +Created publish token a73c9572-f1b9-8983-983d-ba3ac3cc913d ``` * `npm token revoke `: diff --git a/deps/npm/docs/content/commands/npm.md b/deps/npm/docs/content/commands/npm.md index 0e44e3e35e73f6..eb9a779cfdee9d 100644 --- a/deps/npm/docs/content/commands/npm.md +++ b/deps/npm/docs/content/commands/npm.md @@ -14,7 +14,7 @@ Note: This command is unaware of workspaces. ### Version -10.5.2 +10.7.0 ### Description diff --git a/deps/npm/docs/content/configuring-npm/package-json.md b/deps/npm/docs/content/configuring-npm/package-json.md index ec5cfcab1bb49e..43037477491e35 100644 --- a/deps/npm/docs/content/configuring-npm/package-json.md +++ b/deps/npm/docs/content/configuring-npm/package-json.md @@ -928,6 +928,13 @@ Overrides provide a way to replace a package in your dependency tree with another version, or another package entirely. These changes can be scoped as specific or as vague as desired. +Overrides are only considered in the root `package.json` file for a project. +Overrides in installed dependencies (including +[workspaces](/using-npm/workspaces)) are not considered in dependency tree +resolution. Published packages may dictate their resolutions by pinning +dependencies or using an +[`npm-shrinkwrap.json`](/configuring-npm/npm-shrinkwrap-json) file. + To make sure the package `foo` is always installed as version `1.0.0` no matter what version your dependencies rely on: diff --git a/deps/npm/docs/content/configuring-npm/package-lock-json.md b/deps/npm/docs/content/configuring-npm/package-lock-json.md index d540dd0e7228b8..f3b012175fa0ec 100644 --- a/deps/npm/docs/content/configuring-npm/package-lock-json.md +++ b/deps/npm/docs/content/configuring-npm/package-lock-json.md @@ -31,6 +31,8 @@ various purposes: picture of the package tree, reducing the need to read `package.json` files, and allowing for significant performance improvements. +When `npm` creates or updates `package-lock.json`, it will infer line endings and indentation from `package.json` so that the formatting of both files matches. + ### `package-lock.json` vs `npm-shrinkwrap.json` Both of these files have the same format, and perform similar functions in diff --git a/deps/npm/docs/lib/index.js b/deps/npm/docs/lib/index.js index 5d4ae7af3457bb..5f8501ead27b83 100644 --- a/deps/npm/docs/lib/index.js +++ b/deps/npm/docs/lib/index.js @@ -119,7 +119,7 @@ const replaceConfig = (src, { path }) => { } const allConfig = Object.entries(definitions).sort(sort) - .map(([_, def]) => def.describe()) + .map(([, def]) => def.describe()) .join('\n\n') return src.replace(replacer, allConfig) diff --git a/deps/npm/docs/output/commands/npm-access.html b/deps/npm/docs/output/commands/npm-access.html index aa89c3e0068fcf..3263227f8afd5f 100644 --- a/deps/npm/docs/output/commands/npm-access.html +++ b/deps/npm/docs/output/commands/npm-access.html @@ -146,7 +146,7 @@

      Table of contents

      Synopsis

      -
      npm access list packages [<user>|<scope>|<scope:team> [<package>]
      +
      npm access list packages [<user>|<scope>|<scope:team>] [<package>]
       npm access list collaborators [<package> [<user>]]
       npm access get status [<package>]
       npm access set status=public|private [<package>]
      diff --git a/deps/npm/docs/output/commands/npm-doctor.html b/deps/npm/docs/output/commands/npm-doctor.html
      index d63f33389af97e..fc4f7643fad896 100644
      --- a/deps/npm/docs/output/commands/npm-doctor.html
      +++ b/deps/npm/docs/output/commands/npm-doctor.html
      @@ -142,11 +142,11 @@ 

      npm-doctor

      Table of contents

      - +

      Synopsis

      -
      npm doctor [ping] [registry] [versions] [environment] [permissions] [cache]
      +
      npm doctor [connection] [registry] [versions] [environment] [permissions] [cache]
       

      Note: This command is unaware of workspaces.

      Description

      @@ -171,17 +171,18 @@

      Description

      there are any recommended changes, it will display them. By default npm runs all of these checks. You can limit what checks are ran by specifying them as extra arguments.

      -

      npm ping

      +

      Connecting to the registry

      By default, npm installs from the primary npm registry, -registry.npmjs.org. npm doctor hits a special ping endpoint within the -registry. This can also be checked with npm ping. If this check fails, -you may be using a proxy that needs to be configured, or may need to talk -to your IT staff to get access over HTTPS to registry.npmjs.org.

      +registry.npmjs.org. npm doctor hits a special connection testing +endpoint within the registry. This can also be checked with npm ping. +If this check fails, you may be using a proxy that needs to be +configured, or may need to talk to your IT staff to get access over +HTTPS to registry.npmjs.org.

      This check is done against whichever registry you've configured (you can see what that is by running npm config get registry), and if you're using a private registry that doesn't support the /whoami endpoint supported by the primary registry, this check may fail.

      -

      npm -v

      +

      Checking npm version

      While Node.js may come bundled with a particular version of npm, it's the policy of the CLI team that we recommend all users run npm@latest if they can. As the CLI is maintained by a small team of contributors, there are @@ -189,21 +190,21 @@

      npm -v

      support releases typically only receive critical security and regression fixes. The team believes that the latest tested version of npm is almost always likely to be the most functional and defect-free version of npm.

      -

      node -v

      +

      Checking node version

      For most users, in most circumstances, the best version of Node will be the latest long-term support (LTS) release. Those of you who want access to new ECMAscript features or bleeding-edge changes to Node's standard library may be running a newer version, and some may be required to run an older version of Node because of enterprise change control policies. That's OK! But in general, the npm team recommends that most users run Node.js LTS.

      -

      npm config get registry

      +

      Checking configured npm registry

      You may be installing from private package registries for your project or company. That's great! Others may be following tutorials or StackOverflow questions in an effort to troubleshoot problems you may be having. Sometimes, this may entail changing the registry you're pointing at. This part of npm doctor just lets you, and maybe whoever's helping you with support, know that you're not using the default registry.

      -

      which git

      +

      Checking for git executable in PATH

      While it's documented in the README, it may not be obvious that npm needs Git installed to do many of the things that it does. Also, in some cases – especially on Windows – you may have Git set up in such a way that it's diff --git a/deps/npm/docs/output/commands/npm-ls.html b/deps/npm/docs/output/commands/npm-ls.html index 7449728edf37fe..be2287797a8f30 100644 --- a/deps/npm/docs/output/commands/npm-ls.html +++ b/deps/npm/docs/output/commands/npm-ls.html @@ -160,7 +160,7 @@

      Description

      the results to only the paths to the packages named. Note that nested packages will also show the paths to the specified packages. For example, running npm ls promzard in npm's source tree will show:

      -
      npm@10.5.2 /path/to/npm
      +
      npm@10.7.0 /path/to/npm
       └─┬ init-package-json@0.0.4
         └── promzard@0.1.5
       
      diff --git a/deps/npm/docs/output/commands/npm-profile.html b/deps/npm/docs/output/commands/npm-profile.html index a6283aeb1884f8..e0c913a6b46b22 100644 --- a/deps/npm/docs/output/commands/npm-profile.html +++ b/deps/npm/docs/output/commands/npm-profile.html @@ -160,27 +160,16 @@

      Description

    • npm profile get [<property>]: Display all of the properties of your profile, or one or more specific properties. It looks like:
    -
    +-----------------+---------------------------+
    -| name            | example                   |
    -+-----------------+---------------------------+
    -| email           | me@example.com (verified) |
    -+-----------------+---------------------------+
    -| two factor auth | auth-and-writes           |
    -+-----------------+---------------------------+
    -| fullname        | Example User              |
    -+-----------------+---------------------------+
    -| homepage        |                           |
    -+-----------------+---------------------------+
    -| freenode        |                           |
    -+-----------------+---------------------------+
    -| twitter         |                           |
    -+-----------------+---------------------------+
    -| github          |                           |
    -+-----------------+---------------------------+
    -| created         | 2015-02-26T01:38:35.892Z  |
    -+-----------------+---------------------------+
    -| updated         | 2017-10-02T21:29:45.922Z  |
    -+-----------------+---------------------------+
    +
    name: example
    +email: e@example.com (verified)
    +two-factor auth: auth-and-writes
    +fullname: Example User
    +homepage:
    +freenode:
    +twitter:
    +github:
    +created: 2015-02-26T01:38:35.892Z
    +updated: 2017-10-02T21:29:45.922Z
     
    • diff --git a/deps/npm/docs/output/commands/npm-search.html b/deps/npm/docs/output/commands/npm-search.html index fb35290351bf63..3d7821531f730a 100644 --- a/deps/npm/docs/output/commands/npm-search.html +++ b/deps/npm/docs/output/commands/npm-search.html @@ -142,11 +142,11 @@

      npm-search

      Table of contents

      - +

      Synopsis

      -
      npm search [search terms ...]
      +
      npm search <search term> [<search term> ...]
       
       aliases: find, s, se
       
      @@ -170,12 +170,6 @@

      Description

      ignore a trailing / . (Note you must escape or quote many regular expression characters in most shells.)

      Configuration

      -

      long

      -
        -
      • Default: false
      • -
      • Type: Boolean
      • -
      -

      Show extended information in ls, search, and help-search.

      json

      • Default: false
      • diff --git a/deps/npm/docs/output/commands/npm-token.html b/deps/npm/docs/output/commands/npm-token.html index 44f94ec71ff14e..33b66d08e69def 100644 --- a/deps/npm/docs/output/commands/npm-token.html +++ b/deps/npm/docs/output/commands/npm-token.html @@ -158,23 +158,13 @@

        Description

        Shows a table of all active authentication tokens. You can request this as JSON with --json or tab-separated values with --parseable.
      -
      +--------+---------+------------+----------+----------------+
      -| id     | token   | created    | read-only | CIDR whitelist |
      -+--------+---------+------------+----------+----------------+
      -| 7f3134 | 1fa9ba… | 2017-10-02 | yes      |                |
      -+--------+---------+------------+----------+----------------+
      -| c03241 | af7aef… | 2017-10-02 | no       | 192.168.0.1/24 |
      -+--------+---------+------------+----------+----------------+
      -| e0cf92 | 3a436a… | 2017-10-02 | no       |                |
      -+--------+---------+------------+----------+----------------+
      -| 63eb9d | 74ef35… | 2017-09-28 | no       |                |
      -+--------+---------+------------+----------+----------------+
      -| 2daaa8 | cbad5f… | 2017-09-26 | no       |                |
      -+--------+---------+------------+----------+----------------+
      -| 68c2fe | 127e51… | 2017-09-23 | no       |                |
      -+--------+---------+------------+----------+----------------+
      -| 6334e1 | 1dadd1… | 2017-09-23 | no       |                |
      -+--------+---------+------------+----------+----------------+
      +
      Read only token npm_1f… with id 7f3134 created 2017-10-21
      +
      +Publish token npm_af…  with id c03241 created 2017-10-02
      +with IP Whitelist: 192.168.0.1/24
      +
      +Publish token npm_… with id e0cf92 created 2017-10-02
      +
       
      • @@ -191,15 +181,7 @@

        Description

        for more information on generating automation tokens.

      -
      +----------------+--------------------------------------+
      -| token          | a73c9572-f1b9-8983-983d-ba3ac3cc913d |
      -+----------------+--------------------------------------+
      -| cidr_whitelist |                                      |
      -+----------------+--------------------------------------+
      -| readonly       | false                                |
      -+----------------+--------------------------------------+
      -| created        | 2017-10-02T07:52:24.838Z             |
      -+----------------+--------------------------------------+
      +
      Created publish token a73c9572-f1b9-8983-983d-ba3ac3cc913d
       
      • npm token revoke <token|id>: diff --git a/deps/npm/docs/output/commands/npm.html b/deps/npm/docs/output/commands/npm.html index d79ded11c70ba2..1a279cf259db33 100644 --- a/deps/npm/docs/output/commands/npm.html +++ b/deps/npm/docs/output/commands/npm.html @@ -150,7 +150,7 @@

        Table of contents

      Note: This command is unaware of workspaces.

      Version

      -

      10.5.2

      +

      10.7.0

      Description

      npm is the package manager for the Node JavaScript platform. It puts modules in place so that node can find them, and manages dependency diff --git a/deps/npm/docs/output/configuring-npm/package-json.html b/deps/npm/docs/output/configuring-npm/package-json.html index becaa89251f049..3a0e58663c6ac7 100644 --- a/deps/npm/docs/output/configuring-npm/package-json.html +++ b/deps/npm/docs/output/configuring-npm/package-json.html @@ -850,6 +850,12 @@

      overrides

      Overrides provide a way to replace a package in your dependency tree with another version, or another package entirely. These changes can be scoped as specific or as vague as desired.

      +

      Overrides are only considered in the root package.json file for a project. +Overrides in installed dependencies (including +workspaces) are not considered in dependency tree +resolution. Published packages may dictate their resolutions by pinning +dependencies or using an +npm-shrinkwrap.json file.

      To make sure the package foo is always installed as version 1.0.0 no matter what version your dependencies rely on:

      {
      diff --git a/deps/npm/docs/output/configuring-npm/package-lock-json.html b/deps/npm/docs/output/configuring-npm/package-lock-json.html
      index 11e4ab79900e5a..ca61021db3a36a 100644
      --- a/deps/npm/docs/output/configuring-npm/package-lock-json.html
      +++ b/deps/npm/docs/output/configuring-npm/package-lock-json.html
      @@ -176,6 +176,7 @@ 

      Table of contents

      files, and allowing for significant performance improvements.

    +

    When npm creates or updates package-lock.json, it will infer line endings and indentation from package.json so that the formatting of both files matches.

    package-lock.json vs npm-shrinkwrap.json

    Both of these files have the same format, and perform similar functions in the root of a project.

    diff --git a/deps/npm/lib/arborist-cmd.js b/deps/npm/lib/arborist-cmd.js index 42699ece364ad1..9d247d02fa181d 100644 --- a/deps/npm/lib/arborist-cmd.js +++ b/deps/npm/lib/arborist-cmd.js @@ -1,10 +1,9 @@ -const log = require('./utils/log-shim.js') +const { log } = require('proc-log') +const BaseCommand = require('./base-cmd.js') // This is the base for all commands whose execWorkspaces just gets // a list of workspace names and passes it on to new Arborist() to // be able to run a filtered Arborist.reify() at some point. - -const BaseCommand = require('./base-command.js') class ArboristCmd extends BaseCommand { get isArboristCmd () { return true diff --git a/deps/npm/lib/base-command.js b/deps/npm/lib/base-cmd.js similarity index 93% rename from deps/npm/lib/base-command.js rename to deps/npm/lib/base-cmd.js index cdf7971b5aaf92..9c30ef8a4b44e0 100644 --- a/deps/npm/lib/base-command.js +++ b/deps/npm/lib/base-cmd.js @@ -1,10 +1,4 @@ -// Base class for npm commands - -const { relative } = require('path') - -const { definitions } = require('@npmcli/config/lib/definitions') -const { aliases: cmdAliases } = require('./utils/cmd-list') -const log = require('./utils/log-shim.js') +const { log, output } = require('proc-log') class BaseCommand { static workspaces = false @@ -18,6 +12,8 @@ class BaseCommand { // this is a static so that we can read from it without instantiating a command // which would require loading the config static get describeUsage () { + const { definitions } = require('@npmcli/config/lib/definitions') + const { aliases: cmdAliases } = require('./utils/cmd-list') const seenExclusive = new Set() const wrapWidth = 80 const { description, usage = [''], name, params } = this @@ -119,7 +115,7 @@ class BaseCommand { const { config } = this.npm if (config.get('usage')) { - return this.npm.output(this.usage) + return output.standard(this.usage) } const hasWsConfig = config.get('workspaces') || config.get('workspace').length @@ -161,6 +157,8 @@ class BaseCommand { } async setWorkspaces () { + const { relative } = require('node:path') + const includeWorkspaceRoot = this.isArboristCmd ? false : this.npm.config.get('include-workspace-root') @@ -169,7 +167,7 @@ class BaseCommand { const relativeFrom = prefixInsideCwd ? this.npm.localPrefix : process.cwd() const filters = this.npm.config.get('workspace') - const getWorkspaces = require('./workspaces/get-workspaces.js') + const getWorkspaces = require('./utils/get-workspaces.js') const ws = await getWorkspaces(filters, { path: this.npm.localPrefix, includeWorkspaceRoot, @@ -181,4 +179,5 @@ class BaseCommand { this.workspacePaths = [...ws.values()] } } + module.exports = BaseCommand diff --git a/deps/npm/lib/cli.js b/deps/npm/lib/cli.js index c85ecb65a7005a..e11729fe3205b9 100644 --- a/deps/npm/lib/cli.js +++ b/deps/npm/lib/cli.js @@ -1,4 +1,4 @@ -const validateEngines = require('./es6/validate-engines.js') -const cliEntry = require('path').resolve(__dirname, 'cli-entry.js') +const validateEngines = require('./cli/validate-engines.js') +const cliEntry = require('node:path').resolve(__dirname, 'cli/entry.js') module.exports = (process) => validateEngines(process, () => require(cliEntry)) diff --git a/deps/npm/lib/cli-entry.js b/deps/npm/lib/cli/entry.js similarity index 56% rename from deps/npm/lib/cli-entry.js rename to deps/npm/lib/cli/entry.js index aad06e06903856..5d676c3f0a8a10 100644 --- a/deps/npm/lib/cli-entry.js +++ b/deps/npm/lib/cli/entry.js @@ -11,14 +11,17 @@ module.exports = async (process, validateEngines) => { process.argv.splice(1, 1, 'npm', '-g') } + // Patch the global fs module here at the app level + require('graceful-fs').gracefulify(require('fs')) + const satisfies = require('semver/functions/satisfies') - const exitHandler = require('./utils/exit-handler.js') - const Npm = require('./npm.js') + const exitHandler = require('./exit-handler.js') + const Npm = require('../npm.js') const npm = new Npm() exitHandler.setNpm(npm) // only log node and npm paths in argv initially since argv can contain sensitive info. a cleaned version will be logged later - const log = require('./utils/log-shim.js') + const { log, output } = require('proc-log') log.verbose('cli', process.argv.slice(0, 2).join(' ')) log.info('using', 'npm@%s', npm.version) log.info('using', 'node@%s', process.version) @@ -33,39 +36,49 @@ module.exports = async (process, validateEngines) => { log.warn('cli', validateEngines.unsupportedMessage) } - let cmd // Now actually fire up npm and run the command. // This is how to use npm programmatically: try { - await npm.load() + const { exec, command, args } = await npm.load() - // npm -v - if (npm.config.get('version', 'cli')) { - npm.output(npm.version) + if (!exec) { return exitHandler() } - // npm --versions - if (npm.config.get('versions', 'cli')) { - npm.argv = ['version'] - npm.config.set('usage', false, 'cli') - } - - cmd = npm.argv.shift() - if (!cmd) { - npm.output(npm.usage) + if (!command) { + output.standard(npm.usage) process.exitCode = 1 return exitHandler() } - await npm.exec(cmd) + // Options are prefixed by a hyphen-minus (-, \u2d). + // Other dash-type chars look similar but are invalid. + const nonDashArgs = npm.argv.filter(a => /^[\u2010-\u2015\u2212\uFE58\uFE63\uFF0D]/.test(a)) + if (nonDashArgs.length) { + log.error( + 'arg', + 'Argument starts with non-ascii dash, this is probably invalid:', + require('@npmcli/redact').redactLog(nonDashArgs.join(', ')) + ) + } + + const execPromise = npm.exec(command, args) + + // this is async but we dont await it, since its ok if it doesnt + // finish before the command finishes running. it uses command and argv + // so it must be initiated here, after the command name is set + const updateNotifier = require('./update-notifier.js') + // eslint-disable-next-line promise/catch-or-return + updateNotifier(npm).then((msg) => (npm.updateNotification = msg)) + + await execPromise return exitHandler() } catch (err) { if (err.code === 'EUNKNOWNCOMMAND') { - const didYouMean = require('./utils/did-you-mean.js') - const suggestions = await didYouMean(npm.localPrefix, cmd) - npm.output(`Unknown command: "${cmd}"${suggestions}\n`) - npm.output('To see a list of supported npm commands, run:\n npm help') + const didYouMean = require('../utils/did-you-mean.js') + const suggestions = await didYouMean(npm.localPrefix, err.command) + output.standard(`Unknown command: "${err.command}"${suggestions}\n`) + output.standard('To see a list of supported npm commands, run:\n npm help') process.exitCode = 1 return exitHandler() } diff --git a/deps/npm/lib/utils/exit-handler.js b/deps/npm/lib/cli/exit-handler.js similarity index 65% rename from deps/npm/lib/utils/exit-handler.js rename to deps/npm/lib/cli/exit-handler.js index 8b4ab45c4d4745..5866c46b57c5f0 100644 --- a/deps/npm/lib/utils/exit-handler.js +++ b/deps/npm/lib/cli/exit-handler.js @@ -1,8 +1,5 @@ -const os = require('os') -const fs = require('fs') - -const log = require('./log-shim.js') -const errorMessage = require('./error-message.js') +const { log, output, META } = require('proc-log') +const errorMessage = require('../utils/error-message.js') const { redactLog: replaceInfo } = require('@npmcli/redact') let npm = null // set by the cli @@ -10,21 +7,8 @@ let exitHandlerCalled = false let showLogFileError = false process.on('exit', code => { - log.disableProgress() - - // process.emit is synchronous, so the timeEnd handler will run before the - // unfinished timer check below - process.emit('timeEnd', 'npm') - const hasLoadedNpm = npm?.config.loaded - // Unfinished timers can be read before config load - if (npm) { - for (const [name, timer] of npm.unfinishedTimers) { - log.verbose('unfinished npm timer', name, timer) - } - } - if (!code) { log.info('ok') } else { @@ -34,66 +18,16 @@ process.on('exit', code => { if (!exitHandlerCalled) { process.exitCode = code || 1 log.error('', 'Exit handler never called!') - // eslint-disable-next-line no-console - console.error('') log.error('', 'This is an error with npm itself. Please report this error at:') log.error('', ' ') + // eslint-disable-next-line no-console + console.error('') showLogFileError = true } // npm must be loaded to know where the log file was written if (hasLoadedNpm) { - // write the timing file now, this might do nothing based on the configs set. - // we need to call it here in case it errors so we dont tell the user - // about a timing file that doesn't exist - npm.writeTimingFile() - - const logsDir = npm.logsDir - const logFiles = npm.logFiles - - const timingDir = npm.timingDir - const timingFile = npm.timingFile - - const timing = npm.config.get('timing') - const logsMax = npm.config.get('logs-max') - - // Determine whether to show log file message and why it is - // being shown since in timing mode we always show the log file message - const logMethod = showLogFileError ? 'error' : timing ? 'info' : null - - if (logMethod) { - if (!npm.silent) { - // just a line break if not in silent mode - // eslint-disable-next-line no-console - console.error('') - } - - const message = [] - - if (timingFile) { - message.push(`Timing info written to: ${timingFile}`) - } else if (timing) { - message.push( - `The timing file was not written due to an error writing to the directory: ${timingDir}` - ) - } - - if (logFiles.length) { - message.push(`A complete log of this run can be found in: ${logFiles}`) - } else if (logsMax <= 0) { - // user specified no log file - message.push(`Log files were not written due to the config logs-max=${logsMax}`) - } else { - // could be an error writing to the directory - message.push( - `Log files were not written due to an error writing to the directory: ${logsDir}`, - 'You can rerun the command with `--loglevel=verbose` to see the logs in your terminal' - ) - } - - log[logMethod]('', message.join('\n')) - } - + npm.finish({ showLogFileError }) // This removes any listeners npm setup, mostly for tests to avoid max listener warnings npm.unload() } @@ -106,12 +40,11 @@ process.on('exit', code => { const exitHandler = err => { exitHandlerCalled = true - log.disableProgress() - const hasLoadedNpm = npm?.config.loaded if (!npm) { err = err || new Error('Exit prior to setting npm in exit handler') + // Don't use proc-log here since npm was never set // eslint-disable-next-line no-console console.error(err.stack || err.message) return process.exit(1) @@ -119,16 +52,14 @@ const exitHandler = err => { if (!hasLoadedNpm) { err = err || new Error('Exit prior to config file resolving.') + // Don't use proc-log here since npm was never loaded // eslint-disable-next-line no-console console.error(err.stack || err.message) } // only show the notification if it finished. if (typeof npm.updateNotification === 'string') { - const { level } = log - log.level = 'notice' - log.notice('', npm.updateNotification) - log.level = level + log.notice('', npm.updateNotification, { [META]: true, force: true }) } let exitCode = process.exitCode || 0 @@ -154,6 +85,8 @@ const exitHandler = err => { log.error('weird error', err) noLogMessage = true } else { + const os = require('node:os') + const fs = require('node:fs') if (!err.code) { const matchErrorCode = err.message.match(/^(?:Error: )?(E[A-Z]+)/) err.code = matchErrorCode && matchErrorCode[1] @@ -205,7 +138,7 @@ const exitHandler = err => { } if (hasLoadedNpm) { - npm.flushOutput(jsonError) + output.flush({ [META]: true, jsonError }) } log.verbose('exit', exitCode || 0) diff --git a/deps/npm/lib/utils/update-notifier.js b/deps/npm/lib/cli/update-notifier.js similarity index 86% rename from deps/npm/lib/utils/update-notifier.js rename to deps/npm/lib/cli/update-notifier.js index 7481b65d562217..41fece36a558eb 100644 --- a/deps/npm/lib/utils/update-notifier.js +++ b/deps/npm/lib/cli/update-notifier.js @@ -49,7 +49,6 @@ const updateCheck = async (npm, spec, version, current) => { return null } - const useColor = npm.logColor const chalk = npm.logChalk // ok! notify the user about this update they should get. @@ -60,19 +59,14 @@ const updateCheck = async (npm, spec, version, current) => { : update.minor !== current.minor ? 'minor' : update.patch !== current.patch ? 'patch' : 'prerelease' - const typec = type === 'major' ? chalk.red(type) - : type === 'minor' ? chalk.yellow(type) - : chalk.green(type) - const oldc = chalk.red(current) - const latestc = chalk.green(latest) - const changelog = `https://github.com/npm/cli/releases/tag/v${latest}` - const changelogc = !useColor ? `<${changelog}>` : chalk.cyan(changelog) + const typec = type === 'major' ? 'red' + : type === 'minor' ? 'yellow' + : 'cyan' const cmd = `npm install -g npm@${latest}` - const cmdc = !useColor ? `\`${cmd}\`` : chalk.green(cmd) - const message = `\nNew ${typec} version of npm available! ` + - `${oldc} -> ${latestc}\n` + - `Changelog: ${changelogc}\n` + - `Run ${cmdc} to update!\n` + const message = `\nNew ${chalk[typec](type)} version of npm available! ` + + `${chalk[typec](current)} -> ${chalk.blue(latest)}\n` + + `Changelog: ${chalk.blue(`https://github.com/npm/cli/releases/tag/v${latest}`)}\n` + + `To update run: ${chalk.underline(cmd)}\n` return message } diff --git a/deps/npm/lib/es6/validate-engines.js b/deps/npm/lib/cli/validate-engines.js similarity index 100% rename from deps/npm/lib/es6/validate-engines.js rename to deps/npm/lib/cli/validate-engines.js diff --git a/deps/npm/lib/commands/access.js b/deps/npm/lib/commands/access.js index 99c1264a84eda3..d35699e839109c 100644 --- a/deps/npm/lib/commands/access.js +++ b/deps/npm/lib/commands/access.js @@ -1,11 +1,11 @@ const libnpmaccess = require('libnpmaccess') const npa = require('npm-package-arg') +const { output } = require('proc-log') const pkgJson = require('@npmcli/package-json') const localeCompare = require('@isaacs/string-locale-compare')('en') - const otplease = require('../utils/otplease.js') const getIdentity = require('../utils/get-identity.js') -const BaseCommand = require('../base-command.js') +const BaseCommand = require('../base-cmd.js') const commands = [ 'get', @@ -36,7 +36,7 @@ class Access extends BaseCommand { ] static usage = [ - 'list packages [|| []', + 'list packages [||] []', 'list collaborators [ []]', 'get status []', 'set status=public|private []', @@ -197,7 +197,7 @@ class Access extends BaseCommand { } #output (items, limiter) { - const output = {} + const outputs = {} const lookup = { __proto__: null, read: 'read-only', @@ -205,14 +205,14 @@ class Access extends BaseCommand { } for (const item in items) { const val = items[item] - output[item] = lookup[val] || val + outputs[item] = lookup[val] || val } if (this.npm.config.get('json')) { - this.npm.output(JSON.stringify(output, null, 2)) + output.standard(JSON.stringify(outputs, null, 2)) } else { - for (const item of Object.keys(output).sort(localeCompare)) { + for (const item of Object.keys(outputs).sort(localeCompare)) { if (!limiter || limiter === item) { - this.npm.output(`${item}: ${output[item]}`) + output.standard(`${item}: ${outputs[item]}`) } } } diff --git a/deps/npm/lib/commands/adduser.js b/deps/npm/lib/commands/adduser.js index a69ef366fbf32c..cf64e7a7e74389 100644 --- a/deps/npm/lib/commands/adduser.js +++ b/deps/npm/lib/commands/adduser.js @@ -1,8 +1,7 @@ -const log = require('../utils/log-shim.js') +const { log, output } = require('proc-log') const { redactLog: replaceInfo } = require('@npmcli/redact') const auth = require('../utils/auth.js') - -const BaseCommand = require('../base-command.js') +const BaseCommand = require('../base-cmd.js') class AddUser extends BaseCommand { static description = 'Add a registry user account' @@ -13,7 +12,7 @@ class AddUser extends BaseCommand { 'auth-type', ] - async exec (args) { + async exec () { const scope = this.npm.config.get('scope') let registry = this.npm.config.get('registry') @@ -27,7 +26,6 @@ class AddUser extends BaseCommand { const creds = this.npm.config.getCredentialsByURI(registry) - log.disableProgress() log.notice('', `Log in on ${replaceInfo(registry)}`) const { message, newCreds } = await auth.adduser(this.npm, { @@ -45,7 +43,8 @@ class AddUser extends BaseCommand { await this.npm.config.save('user') - this.npm.output(message) + output.standard(message) } } + module.exports = AddUser diff --git a/deps/npm/lib/commands/audit.js b/deps/npm/lib/commands/audit.js index 8c10a36cfee3cf..aed1be7a82906b 100644 --- a/deps/npm/lib/commands/audit.js +++ b/deps/npm/lib/commands/audit.js @@ -1,399 +1,9 @@ const npmAuditReport = require('npm-audit-report') -const fetch = require('npm-registry-fetch') -const localeCompare = require('@isaacs/string-locale-compare')('en') -const npa = require('npm-package-arg') -const pacote = require('pacote') -const pMap = require('p-map') -const tufClient = require('@sigstore/tuf') - const ArboristWorkspaceCmd = require('../arborist-cmd.js') const auditError = require('../utils/audit-error.js') -const log = require('../utils/log-shim.js') +const { log, output } = require('proc-log') const reifyFinish = require('../utils/reify-finish.js') - -const sortAlphabetically = (a, b) => localeCompare(a.name, b.name) - -class VerifySignatures { - constructor (tree, filterSet, npm, opts) { - this.tree = tree - this.filterSet = filterSet - this.npm = npm - this.opts = opts - this.keys = new Map() - this.invalid = [] - this.missing = [] - this.checkedPackages = new Set() - this.auditedWithKeysCount = 0 - this.verifiedSignatureCount = 0 - this.verifiedAttestationCount = 0 - this.exitCode = 0 - } - - async run () { - const start = process.hrtime.bigint() - - // Find all deps in tree - const { edges, registries } = this.getEdgesOut(this.tree.inventory.values(), this.filterSet) - if (edges.size === 0) { - throw new Error('found no installed dependencies to audit') - } - - const tuf = await tufClient.initTUF({ - cachePath: this.opts.tufCache, - retry: this.opts.retry, - timeout: this.opts.timeout, - }) - await Promise.all([...registries].map(registry => this.setKeys({ registry, tuf }))) - - const progress = log.newItem('verifying registry signatures', edges.size) - const mapper = async (edge) => { - progress.completeWork(1) - await this.getVerifiedInfo(edge) - } - await pMap(edges, mapper, { concurrency: 20, stopOnError: true }) - - // Didn't find any dependencies that could be verified, e.g. only local - // deps, missing version, not on a registry etc. - if (!this.auditedWithKeysCount) { - throw new Error('found no dependencies to audit that were installed from ' + - 'a supported registry') - } - - const invalid = this.invalid.sort(sortAlphabetically) - const missing = this.missing.sort(sortAlphabetically) - - const hasNoInvalidOrMissing = invalid.length === 0 && missing.length === 0 - - if (!hasNoInvalidOrMissing) { - process.exitCode = 1 - } - - if (this.npm.config.get('json')) { - this.npm.output(JSON.stringify({ - invalid, - missing, - }, null, 2)) - return - } - const end = process.hrtime.bigint() - const elapsed = end - start - - const auditedPlural = this.auditedWithKeysCount > 1 ? 's' : '' - const timing = `audited ${this.auditedWithKeysCount} package${auditedPlural} in ` + - `${Math.floor(Number(elapsed) / 1e9)}s` - this.npm.output(timing) - this.npm.output('') - - const verifiedBold = this.npm.chalk.bold('verified') - if (this.verifiedSignatureCount) { - if (this.verifiedSignatureCount === 1) { - /* eslint-disable-next-line max-len */ - this.npm.output(`${this.verifiedSignatureCount} package has a ${verifiedBold} registry signature`) - } else { - /* eslint-disable-next-line max-len */ - this.npm.output(`${this.verifiedSignatureCount} packages have ${verifiedBold} registry signatures`) - } - this.npm.output('') - } - - if (this.verifiedAttestationCount) { - if (this.verifiedAttestationCount === 1) { - /* eslint-disable-next-line max-len */ - this.npm.output(`${this.verifiedAttestationCount} package has a ${verifiedBold} attestation`) - } else { - /* eslint-disable-next-line max-len */ - this.npm.output(`${this.verifiedAttestationCount} packages have ${verifiedBold} attestations`) - } - this.npm.output('') - } - - if (missing.length) { - const missingClr = this.npm.chalk.bold(this.npm.chalk.red('missing')) - if (missing.length === 1) { - /* eslint-disable-next-line max-len */ - this.npm.output(`1 package has a ${missingClr} registry signature but the registry is providing signing keys:`) - } else { - /* eslint-disable-next-line max-len */ - this.npm.output(`${missing.length} packages have ${missingClr} registry signatures but the registry is providing signing keys:`) - } - this.npm.output('') - missing.map(m => - this.npm.output(`${this.npm.chalk.red(`${m.name}@${m.version}`)} (${m.registry})`) - ) - } - - if (invalid.length) { - if (missing.length) { - this.npm.output('') - } - const invalidClr = this.npm.chalk.bold(this.npm.chalk.red('invalid')) - // We can have either invalid signatures or invalid provenance - const invalidSignatures = this.invalid.filter(i => i.code === 'EINTEGRITYSIGNATURE') - if (invalidSignatures.length) { - if (invalidSignatures.length === 1) { - this.npm.output(`1 package has an ${invalidClr} registry signature:`) - } else { - /* eslint-disable-next-line max-len */ - this.npm.output(`${invalidSignatures.length} packages have ${invalidClr} registry signatures:`) - } - this.npm.output('') - invalidSignatures.map(i => - this.npm.output(`${this.npm.chalk.red(`${i.name}@${i.version}`)} (${i.registry})`) - ) - this.npm.output('') - } - - const invalidAttestations = this.invalid.filter(i => i.code === 'EATTESTATIONVERIFY') - if (invalidAttestations.length) { - if (invalidAttestations.length === 1) { - this.npm.output(`1 package has an ${invalidClr} attestation:`) - } else { - /* eslint-disable-next-line max-len */ - this.npm.output(`${invalidAttestations.length} packages have ${invalidClr} attestations:`) - } - this.npm.output('') - invalidAttestations.map(i => - this.npm.output(`${this.npm.chalk.red(`${i.name}@${i.version}`)} (${i.registry})`) - ) - this.npm.output('') - } - - if (invalid.length === 1) { - /* eslint-disable-next-line max-len */ - this.npm.output(`Someone might have tampered with this package since it was published on the registry!`) - } else { - /* eslint-disable-next-line max-len */ - this.npm.output(`Someone might have tampered with these packages since they were published on the registry!`) - } - this.npm.output('') - } - } - - getEdgesOut (nodes, filterSet) { - const edges = new Set() - const registries = new Set() - for (const node of nodes) { - for (const edge of node.edgesOut.values()) { - const filteredOut = - edge.from - && filterSet - && filterSet.size > 0 - && !filterSet.has(edge.from.target) - - if (!filteredOut) { - const spec = this.getEdgeSpec(edge) - if (spec) { - // Prefetch and cache public keys from used registries - registries.add(this.getSpecRegistry(spec)) - } - edges.add(edge) - } - } - } - return { edges, registries } - } - - async setKeys ({ registry, tuf }) { - const { host, pathname } = new URL(registry) - // Strip any trailing slashes from pathname - const regKey = `${host}${pathname.replace(/\/$/, '')}/keys.json` - let keys = await tuf.getTarget(regKey) - .then((target) => JSON.parse(target)) - .then(({ keys: ks }) => ks.map((key) => ({ - ...key, - keyid: key.keyId, - pemkey: `-----BEGIN PUBLIC KEY-----\n${key.publicKey.rawBytes}\n-----END PUBLIC KEY-----`, - expires: key.publicKey.validFor.end || null, - }))).catch(err => { - if (err.code === 'TUF_FIND_TARGET_ERROR') { - return null - } else { - throw err - } - }) - - // If keys not found in Sigstore TUF repo, fallback to registry keys API - if (!keys) { - keys = await fetch.json('/-/npm/v1/keys', { - ...this.npm.flatOptions, - registry, - }).then(({ keys: ks }) => ks.map((key) => ({ - ...key, - pemkey: `-----BEGIN PUBLIC KEY-----\n${key.key}\n-----END PUBLIC KEY-----`, - }))).catch(err => { - if (err.code === 'E404' || err.code === 'E400') { - return null - } else { - throw err - } - }) - } - - if (keys) { - this.keys.set(registry, keys) - } - } - - getEdgeType (edge) { - return edge.optional ? 'optionalDependencies' - : edge.peer ? 'peerDependencies' - : edge.dev ? 'devDependencies' - : 'dependencies' - } - - getEdgeSpec (edge) { - let name = edge.name - try { - name = npa(edge.spec).subSpec.name - } catch { - // leave it as edge.name - } - try { - return npa(`${name}@${edge.spec}`) - } catch { - // Skip packages with invalid spec - } - } - - buildRegistryConfig (registry) { - const keys = this.keys.get(registry) || [] - const parsedRegistry = new URL(registry) - const regKey = `//${parsedRegistry.host}${parsedRegistry.pathname}` - return { - [`${regKey}:_keys`]: keys, - } - } - - getSpecRegistry (spec) { - return fetch.pickRegistry(spec, this.npm.flatOptions) - } - - getValidPackageInfo (edge) { - const type = this.getEdgeType(edge) - // Skip potentially optional packages that are not on disk, as these could - // be omitted during install - if (edge.error === 'MISSING' && type !== 'dependencies') { - return - } - - const spec = this.getEdgeSpec(edge) - // Skip invalid version requirements - if (!spec) { - return - } - const node = edge.to || edge - const { version } = node.package || {} - - if (node.isWorkspace || // Skip local workspaces packages - !version || // Skip packages that don't have a installed version, e.g. optonal dependencies - !spec.registry) { // Skip if not from registry, e.g. git package - return - } - - for (const omitType of this.npm.config.get('omit')) { - if (node[omitType]) { - return - } - } - - return { - name: spec.name, - version, - type, - location: node.location, - registry: this.getSpecRegistry(spec), - } - } - - async verifySignatures (name, version, registry) { - const { - _integrity: integrity, - _signatures, - _attestations, - _resolved: resolved, - } = await pacote.manifest(`${name}@${version}`, { - verifySignatures: true, - verifyAttestations: true, - ...this.buildRegistryConfig(registry), - ...this.npm.flatOptions, - }) - const signatures = _signatures || [] - const result = { - integrity, - signatures, - attestations: _attestations, - resolved, - } - return result - } - - async getVerifiedInfo (edge) { - const info = this.getValidPackageInfo(edge) - if (!info) { - return - } - const { name, version, location, registry, type } = info - if (this.checkedPackages.has(location)) { - // we already did or are doing this one - return - } - this.checkedPackages.add(location) - - // We only "audit" or verify the signature, or the presence of it, on - // packages whose registry returns signing keys - const keys = this.keys.get(registry) || [] - if (keys.length) { - this.auditedWithKeysCount += 1 - } - - try { - const { integrity, signatures, attestations, resolved } = await this.verifySignatures( - name, version, registry - ) - - // Currently we only care about missing signatures on registries that provide a public key - // We could make this configurable in the future with a strict/paranoid mode - if (signatures.length) { - this.verifiedSignatureCount += 1 - } else if (keys.length) { - this.missing.push({ - integrity, - location, - name, - registry, - resolved, - version, - }) - } - - // Track verified attestations separately to registry signatures, as all - // packages on registries with signing keys are expected to have registry - // signatures, but not all packages have provenance and publish attestations. - if (attestations) { - this.verifiedAttestationCount += 1 - } - } catch (e) { - if (e.code === 'EINTEGRITYSIGNATURE' || e.code === 'EATTESTATIONVERIFY') { - this.invalid.push({ - code: e.code, - message: e.message, - integrity: e.integrity, - keyid: e.keyid, - location, - name, - registry, - resolved: e.resolved, - signature: e.signature, - predicateType: e.predicateType, - type, - version, - }) - } else { - throw e - } - } - } -} +const VerifySignatures = require('../utils/verify-signatures.js') class Audit extends ArboristWorkspaceCmd { static description = 'Run a security audit' @@ -467,7 +77,7 @@ class Audit extends ArboristWorkspaceCmd { chalk: this.npm.chalk, }) process.exitCode = process.exitCode || result.exitCode - this.npm.output(result.report) + output.standard(result.report) } } diff --git a/deps/npm/lib/commands/cache.js b/deps/npm/lib/commands/cache.js index 50bb35e3544dfe..108c261ffe57d5 100644 --- a/deps/npm/lib/commands/cache.js +++ b/deps/npm/lib/commands/cache.js @@ -3,11 +3,11 @@ const pacote = require('pacote') const fs = require('fs/promises') const { join } = require('path') const semver = require('semver') -const BaseCommand = require('../base-command.js') +const BaseCommand = require('../base-cmd.js') const npa = require('npm-package-arg') const jsonParse = require('json-parse-even-better-errors') const localeCompare = require('@isaacs/string-locale-compare')('en') -const log = require('../utils/log-shim') +const { log, output } = require('proc-log') const searchCachePackage = async (path, parsed, cacheKeys) => { /* eslint-disable-next-line max-len */ @@ -135,7 +135,7 @@ class Cache extends BaseCommand { log.warn(`Not Found: ${key}`) break } - this.npm.output(`Deleted: ${key}`) + output.standard(`Deleted: ${key}`) await cacache.rm.entry(cachePath, key) // XXX this could leave other entries without content! await cacache.rm.content(cachePath, entry.integrity) @@ -170,20 +170,20 @@ class Cache extends BaseCommand { ? `~${cache.slice(process.env.HOME.length)}` : cache const stats = await cacache.verify(cache) - this.npm.output(`Cache verified and compressed (${prefix})`) - this.npm.output(`Content verified: ${stats.verifiedContent} (${stats.keptSize} bytes)`) + output.standard(`Cache verified and compressed (${prefix})`) + output.standard(`Content verified: ${stats.verifiedContent} (${stats.keptSize} bytes)`) if (stats.badContentCount) { - this.npm.output(`Corrupted content removed: ${stats.badContentCount}`) + output.standard(`Corrupted content removed: ${stats.badContentCount}`) } if (stats.reclaimedCount) { /* eslint-disable-next-line max-len */ - this.npm.output(`Content garbage-collected: ${stats.reclaimedCount} (${stats.reclaimedSize} bytes)`) + output.standard(`Content garbage-collected: ${stats.reclaimedCount} (${stats.reclaimedSize} bytes)`) } if (stats.missingContent) { - this.npm.output(`Missing content: ${stats.missingContent}`) + output.standard(`Missing content: ${stats.missingContent}`) } - this.npm.output(`Index entries: ${stats.totalEntries}`) - this.npm.output(`Finished in ${stats.runTime.total / 1000}s`) + output.standard(`Index entries: ${stats.totalEntries}`) + output.standard(`Finished in ${stats.runTime.total / 1000}s`) } // npm cache ls [--package ...] @@ -203,10 +203,10 @@ class Cache extends BaseCommand { results.add(key) } } - [...results].sort(localeCompare).forEach(key => this.npm.output(key)) + [...results].sort(localeCompare).forEach(key => output.standard(key)) return } - cacheKeys.sort(localeCompare).forEach(key => this.npm.output(key)) + cacheKeys.sort(localeCompare).forEach(key => output.standard(key)) } } diff --git a/deps/npm/lib/commands/ci.js b/deps/npm/lib/commands/ci.js index 428c43e6c30edc..7e79d7208c9c4b 100644 --- a/deps/npm/lib/commands/ci.js +++ b/deps/npm/lib/commands/ci.js @@ -1,9 +1,8 @@ const reifyFinish = require('../utils/reify-finish.js') const runScript = require('@npmcli/run-script') const fs = require('fs/promises') -const log = require('../utils/log-shim.js') +const { log, time } = require('proc-log') const validateLockfile = require('../utils/validate-lockfile.js') - const ArboristWorkspaceCmd = require('../arborist-cmd.js') class CI extends ArboristWorkspaceCmd { @@ -79,10 +78,10 @@ class CI extends ArboristWorkspaceCmd { if (!dryRun) { // Only remove node_modules after we've successfully loaded the virtual // tree and validated the lockfile - await this.npm.time('npm-ci:rm', async () => { + await time.start('npm-ci:rm', async () => { const path = `${where}/node_modules` // get the list of entries so we can skip the glob for performance - const entries = await fs.readdir(path, null).catch(er => []) + const entries = await fs.readdir(path, null).catch(() => []) return Promise.all(entries.map(f => fs.rm(`${path}/${f}`, { force: true, recursive: true }))) }) @@ -109,7 +108,6 @@ class CI extends ArboristWorkspaceCmd { args: [], scriptShell, stdio: 'inherit', - banner: !this.npm.silent, event, }) } diff --git a/deps/npm/lib/commands/completion.js b/deps/npm/lib/commands/completion.js index 59113c50560bca..9b147d2f5bdac6 100644 --- a/deps/npm/lib/commands/completion.js +++ b/deps/npm/lib/commands/completion.js @@ -27,22 +27,22 @@ // Matches are wrapped with ' to escape them, if necessary, and then printed // one per line for the shell completion method to consume in IFS=$'\n' mode // as an array. -// const fs = require('fs/promises') const nopt = require('nopt') const { resolve } = require('path') - +const { output } = require('proc-log') const Npm = require('../npm.js') const { definitions, shorthands } = require('@npmcli/config/lib/definitions') const { commands, aliases, deref } = require('../utils/cmd-list.js') -const configNames = Object.keys(definitions) -const shorthandNames = Object.keys(shorthands) -const allConfs = configNames.concat(shorthandNames) const { isWindowsShell } = require('../utils/is-windows.js') +const BaseCommand = require('../base-cmd.js') + const fileExists = (file) => fs.stat(file).then(s => s.isFile()).catch(() => false) -const BaseCommand = require('../base-command.js') +const configNames = Object.keys(definitions) +const shorthandNames = Object.keys(shorthands) +const allConfs = configNames.concat(shorthandNames) class Completion extends BaseCommand { static description = 'Tab Completion for npm' @@ -185,7 +185,7 @@ class Completion extends BaseCommand { } if (compls.length > 0) { - this.npm.output(compls.join('\n')) + output.standard(compls.join('\n')) } } } @@ -248,7 +248,7 @@ const configCompl = opts => { // expand with the valid values of various config values. // not yet implemented. -const configValueCompl = opts => [] +const configValueCompl = () => [] // check if the thing is a flag or not. const isFlag = word => { @@ -265,7 +265,7 @@ const isFlag = word => { // complete against the npm commands // if they all resolve to the same thing, just return the thing it already is -const cmdCompl = (opts, npm) => { +const cmdCompl = (opts) => { const allCommands = commands.concat(Object.keys(aliases)) const matches = allCommands.filter(c => c.startsWith(opts.partialWord)) if (!matches.length) { diff --git a/deps/npm/lib/commands/config.js b/deps/npm/lib/commands/config.js index 8e8358fc50b7be..7fb8476276937b 100644 --- a/deps/npm/lib/commands/config.js +++ b/deps/npm/lib/commands/config.js @@ -1,12 +1,12 @@ -const { mkdir, readFile, writeFile } = require('fs/promises') -const { dirname, resolve } = require('path') -const { spawn } = require('child_process') -const { EOL } = require('os') -const ini = require('ini') +const { mkdir, readFile, writeFile } = require('node:fs/promises') +const { dirname, resolve } = require('node:path') +const { spawn } = require('node:child_process') +const { EOL } = require('node:os') const localeCompare = require('@isaacs/string-locale-compare')('en') const pkgJson = require('@npmcli/package-json') const { defaults, definitions } = require('@npmcli/config/lib/definitions') -const log = require('../utils/log-shim.js') +const { log, output } = require('proc-log') +const BaseCommand = require('../base-cmd.js') // These are the configs that we can nerf-dart. Not all of them currently even // *have* config definitions so we have to explicitly validate them here @@ -47,7 +47,6 @@ const publicVar = k => { return true } -const BaseCommand = require('../base-command.js') class Config extends BaseCommand { static description = 'Manage the npm configuration files' static name = 'config' @@ -111,35 +110,30 @@ class Config extends BaseCommand { } async exec ([action, ...args]) { - log.disableProgress() - try { - switch (action) { - case 'set': - await this.set(args) - break - case 'get': - await this.get(args) - break - case 'delete': - case 'rm': - case 'del': - await this.del(args) - break - case 'list': - case 'ls': - await (this.npm.flatOptions.json ? this.listJson() : this.list()) - break - case 'edit': - await this.edit() - break - case 'fix': - await this.fix() - break - default: - throw this.usageError() - } - } finally { - log.enableProgress() + switch (action) { + case 'set': + await this.set(args) + break + case 'get': + await this.get(args) + break + case 'delete': + case 'rm': + case 'del': + await this.del(args) + break + case 'list': + case 'ls': + await (this.npm.flatOptions.json ? this.listJson() : this.list()) + break + case 'edit': + await this.edit() + break + case 'fix': + await this.fix() + break + default: + throw this.usageError() } } @@ -190,7 +184,7 @@ class Config extends BaseCommand { const pref = keys.length > 1 ? `${key}=` : '' out.push(pref + this.npm.config.get(key)) } - this.npm.output(out.join('\n')) + output.standard(out.join('\n')) } async del (keys) { @@ -206,6 +200,7 @@ class Config extends BaseCommand { } async edit () { + const ini = require('ini') const e = this.npm.flatOptions.editor const where = this.npm.flatOptions.location const file = this.npm.config.data.get(where).source @@ -287,7 +282,7 @@ ${defData} this.npm.config.repair(problems) const locations = [] - this.npm.output('The following configuration problems have been repaired:\n') + output.standard('The following configuration problems have been repaired:\n') const summary = problems.map(({ action, from, to, key, where }) => { // coverage disabled for else branch because it is intentionally omitted // istanbul ignore else @@ -300,7 +295,7 @@ ${defData} return `- \`${key}\` deleted from ${where} config` } }).join('\n') - this.npm.output(summary) + output.standard(summary) return await Promise.all(locations.map((location) => this.npm.config.save(location))) } @@ -359,7 +354,7 @@ ${defData} } } - this.npm.output(msg.join('\n').trim()) + output.standard(msg.join('\n').trim()) } async listJson () { @@ -371,7 +366,7 @@ ${defData} publicConf[key] = this.npm.config.get(key) } - this.npm.output(JSON.stringify(publicConf, null, 2)) + output.standard(JSON.stringify(publicConf, null, 2)) } } diff --git a/deps/npm/lib/commands/dedupe.js b/deps/npm/lib/commands/dedupe.js index 0d0e26621b2275..e07bcd31e894b9 100644 --- a/deps/npm/lib/commands/dedupe.js +++ b/deps/npm/lib/commands/dedupe.js @@ -1,8 +1,7 @@ -// dedupe duplicated packages, or find them in the tree const reifyFinish = require('../utils/reify-finish.js') - const ArboristWorkspaceCmd = require('../arborist-cmd.js') +// dedupe duplicated packages, or find them in the tree class Dedupe extends ArboristWorkspaceCmd { static description = 'Reduce duplication in the package tree' static name = 'dedupe' @@ -22,7 +21,7 @@ class Dedupe extends ArboristWorkspaceCmd { ...super.params, ] - async exec (args) { + async exec () { if (this.npm.global) { const er = new Error('`npm dedupe` does not work in global mode.') er.code = 'EDEDUPEGLOBAL' diff --git a/deps/npm/lib/commands/deprecate.js b/deps/npm/lib/commands/deprecate.js index bdce313923cff8..58856538fe23f2 100644 --- a/deps/npm/lib/commands/deprecate.js +++ b/deps/npm/lib/commands/deprecate.js @@ -4,7 +4,7 @@ const npa = require('npm-package-arg') const semver = require('semver') const getIdentity = require('../utils/get-identity.js') const libaccess = require('libnpmaccess') -const BaseCommand = require('../base-command.js') +const BaseCommand = require('../base-cmd.js') class Deprecate extends BaseCommand { static description = 'Deprecate a version of a package' diff --git a/deps/npm/lib/commands/diff.js b/deps/npm/lib/commands/diff.js index 64d81d525d79d2..ca8b1237b40c5a 100644 --- a/deps/npm/lib/commands/diff.js +++ b/deps/npm/lib/commands/diff.js @@ -4,9 +4,9 @@ const libnpmdiff = require('libnpmdiff') const npa = require('npm-package-arg') const pacote = require('pacote') const pickManifest = require('npm-pick-manifest') -const log = require('../utils/log-shim') +const { log, output } = require('proc-log') const pkgJson = require('@npmcli/package-json') -const BaseCommand = require('../base-command.js') +const BaseCommand = require('../base-cmd.js') class Diff extends BaseCommand { static description = 'The registry diff command' @@ -64,7 +64,7 @@ class Diff extends BaseCommand { diffFiles: args, where: this.top, }) - return this.npm.output(res) + return output.standard(res) } async execWorkspaces (args) { @@ -78,7 +78,7 @@ class Diff extends BaseCommand { // get the package name from the packument at `path` // throws if no packument is present OR if it does not have `name` attribute - async packageName (path) { + async packageName () { let name try { const { content: pkg } = await pkgJson.normalize(this.prefix) @@ -103,7 +103,7 @@ class Diff extends BaseCommand { // no arguments, defaults to comparing cwd // to its latest published registry version if (!a) { - const pkgName = await this.packageName(this.prefix) + const pkgName = await this.packageName() return [ `${pkgName}@${this.npm.config.get('tag')}`, `file:${this.prefix.replace(/#/g, '%23')}`, diff --git a/deps/npm/lib/commands/dist-tag.js b/deps/npm/lib/commands/dist-tag.js index ff49bc8e307cb6..e13f9ecf59c7fe 100644 --- a/deps/npm/lib/commands/dist-tag.js +++ b/deps/npm/lib/commands/dist-tag.js @@ -1,10 +1,10 @@ const npa = require('npm-package-arg') const regFetch = require('npm-registry-fetch') const semver = require('semver') -const log = require('../utils/log-shim') +const { log, output } = require('proc-log') const otplease = require('../utils/otplease.js') const pkgJson = require('@npmcli/package-json') -const BaseCommand = require('../base-command.js') +const BaseCommand = require('../base-cmd.js') class DistTag extends BaseCommand { static description = 'Modify package distribution tags' @@ -120,7 +120,7 @@ class DistTag extends BaseCommand { spec, } await otplease(this.npm, reqOpts, o => regFetch(url, o)) - this.npm.output(`+${t}: ${spec.name}@${version}`) + output.standard(`+${t}: ${spec.name}@${version}`) } async remove (spec, tag, opts) { @@ -146,7 +146,7 @@ class DistTag extends BaseCommand { spec, } await otplease(this.npm, reqOpts, o => regFetch(url, o)) - this.npm.output(`-${tag}: ${spec.name}@${version}`) + output.standard(`-${tag}: ${spec.name}@${version}`) } async list (spec, opts) { @@ -167,7 +167,7 @@ class DistTag extends BaseCommand { const tags = await this.fetchTags(spec, opts) const msg = Object.keys(tags).map(k => `${k}: ${tags[k]}`).sort().join('\n') - this.npm.output(msg) + output.standard(msg) return tags } catch (err) { log.error('dist-tag ls', "Couldn't get dist-tag data for", spec) @@ -180,7 +180,7 @@ class DistTag extends BaseCommand { for (const name of this.workspaceNames) { try { - this.npm.output(`${name}:`) + output.standard(`${name}:`) await this.list(npa(name), this.npm.flatOptions) } catch (err) { // set the exitCode directly, but ignore the error @@ -205,4 +205,5 @@ class DistTag extends BaseCommand { return data } } + module.exports = DistTag diff --git a/deps/npm/lib/commands/docs.js b/deps/npm/lib/commands/docs.js index 5d20215b56a07f..2259b49f79617f 100644 --- a/deps/npm/lib/commands/docs.js +++ b/deps/npm/lib/commands/docs.js @@ -1,4 +1,5 @@ const PackageUrlCmd = require('../package-url-cmd.js') + class Docs extends PackageUrlCmd { static description = 'Open documentation for a package in a web browser' static name = 'docs' @@ -16,4 +17,5 @@ class Docs extends PackageUrlCmd { return `https://www.npmjs.com/package/${mani.name}` } } + module.exports = Docs diff --git a/deps/npm/lib/commands/doctor.js b/deps/npm/lib/commands/doctor.js index 2a528d46ddb8dc..c29dd7e0ecb174 100644 --- a/deps/npm/lib/commands/doctor.js +++ b/deps/npm/lib/commands/doctor.js @@ -1,19 +1,14 @@ const cacache = require('cacache') -const fs = require('fs') +const { access, lstat, readdir, constants: { R_OK, W_OK, X_OK } } = require('fs/promises') const fetch = require('make-fetch-happen') -const Table = require('cli-table3') const which = require('which') const pacote = require('pacote') const { resolve } = require('path') const semver = require('semver') -const { promisify } = require('util') -const log = require('../utils/log-shim.js') +const { log, output } = require('proc-log') const ping = require('../utils/ping.js') const { defaults } = require('@npmcli/config/lib/definitions') -const lstat = promisify(fs.lstat) -const readdir = promisify(fs.readdir) -const access = promisify(fs.access) -const { R_OK, W_OK, X_OK } = fs.constants +const BaseCommand = require('../base-cmd.js') const maskLabel = mask => { const label = [] @@ -34,57 +29,59 @@ const maskLabel = mask => { const subcommands = [ { - groups: ['ping', 'registry'], - title: 'npm ping', + // Ping is left in as a legacy command but is listed as "connection" to + // make more sense to more people + groups: ['connection', 'ping', 'registry'], + title: 'Connecting to the registry', cmd: 'checkPing', }, { groups: ['versions'], - title: 'npm -v', + title: 'Checking npm version', cmd: 'getLatestNpmVersion', }, { groups: ['versions'], - title: 'node -v', + title: 'Checking node version', cmd: 'getLatestNodejsVersion', }, { groups: ['registry'], - title: 'npm config get registry', + title: 'Checking configured npm registry', cmd: 'checkNpmRegistry', }, { groups: ['environment'], - title: 'git executable in PATH', + title: 'Checking for git executable in PATH', cmd: 'getGitPath', }, { groups: ['environment'], - title: 'global bin folder in PATH', + title: 'Checking for global bin folder in PATH', cmd: 'getBinPath', }, { groups: ['permissions', 'cache'], - title: 'Perms check on cached files', + title: 'Checking permissions on cached files (this may take awhile)', cmd: 'checkCachePermission', windows: false, }, { groups: ['permissions'], - title: 'Perms check on local node_modules', + title: 'Checking permissions on local node_modules (this may take awhile)', cmd: 'checkLocalModulesPermission', windows: false, }, { groups: ['permissions'], - title: 'Perms check on global node_modules', + title: 'Checking permissions on global node_modules (this may take awhile)', cmd: 'checkGlobalModulesPermission', windows: false, }, { groups: ['permissions'], - title: 'Perms check on local bin folder', + title: 'Checking permissions on local bin folder', cmd: 'checkLocalBinPermission', windows: false, }, { groups: ['permissions'], - title: 'Perms check on global bin folder', + title: 'Checking permissions on global bin folder', cmd: 'checkGlobalBinPermission', windows: false, }, { groups: ['cache'], - title: 'Verify cache contents', + title: 'Verifying cache contents (this may take awhile)', cmd: 'verifyCachedFiles', windows: false, }, @@ -97,50 +94,35 @@ const subcommands = [ // - verify all local packages have bins linked // What is the fix for these? ] -const BaseCommand = require('../base-command.js') + class Doctor extends BaseCommand { static description = 'Check the health of your npm environment' static name = 'doctor' static params = ['registry'] static ignoreImplicitWorkspace = false static usage = [`[${subcommands.flatMap(s => s.groups) - .filter((value, index, self) => self.indexOf(value) === index) + .filter((value, index, self) => self.indexOf(value) === index && value !== 'ping') .join('] [')}]`] static subcommands = subcommands - // minimum width of check column, enough for the word `Check` - #checkWidth = 5 - async exec (args) { - log.info('Running checkup') + log.info('doctor', 'Running checkup') let allOk = true const actions = this.actions(args) - this.#checkWidth = actions.reduce((length, item) => - Math.max(item.title.length, length), this.#checkWidth) - if (!this.npm.silent) { - this.output(['Check', 'Value', 'Recommendation/Notes'].map(h => this.npm.chalk.underline(h))) - } - // Do the actual work + const chalk = this.npm.chalk for (const { title, cmd } of actions) { - const item = [title] + this.output(title) + // TODO when we have an in progress indicator that could go here + let result try { - item.push(true, await this[cmd]()) + result = await this[cmd]() + this.output(`${chalk.green('Ok')}${result ? `\n${result}` : ''}\n`) } catch (err) { - item.push(false, err) - } - if (!item[1]) { allOk = false - item[0] = this.npm.chalk.red(item[0]) - item[1] = this.npm.chalk.red('not ok') - item[2] = this.npm.chalk.magenta(String(item[2])) - } else { - item[1] = this.npm.chalk.green('ok') - } - if (!this.npm.silent) { - this.output(item) + this.output(`${chalk.red('Not ok')}\n${chalk.cyan(err)}\n`) } } @@ -155,8 +137,7 @@ class Doctor extends BaseCommand { } async checkPing () { - const tracker = log.newItem('checkPing', 1) - tracker.info('checkPing', 'Pinging registry') + log.info('doctor', 'Pinging registry') try { await ping({ ...this.npm.flatOptions, retry: false }) return '' @@ -166,23 +147,16 @@ class Doctor extends BaseCommand { } else { throw er.message } - } finally { - tracker.finish() } } async getLatestNpmVersion () { - const tracker = log.newItem('getLatestNpmVersion', 1) - tracker.info('getLatestNpmVersion', 'Getting npm package information') - try { - const latest = (await pacote.manifest('npm@latest', this.npm.flatOptions)).version - if (semver.gte(this.npm.version, latest)) { - return `current: v${this.npm.version}, latest: v${latest}` - } else { - throw `Use npm v${latest}` - } - } finally { - tracker.finish() + log.info('doctor', 'Getting npm package information') + const latest = (await pacote.manifest('npm@latest', this.npm.flatOptions)).version + if (semver.gte(this.npm.version, latest)) { + return `current: v${this.npm.version}, latest: v${latest}` + } else { + throw `Use npm v${latest}` } } @@ -191,36 +165,30 @@ class Doctor extends BaseCommand { const current = process.version const currentRange = `^${current}` const url = 'https://nodejs.org/dist/index.json' - const tracker = log.newItem('getLatestNodejsVersion', 1) - tracker.info('getLatestNodejsVersion', 'Getting Node.js release information') - try { - const res = await fetch(url, { method: 'GET', ...this.npm.flatOptions }) - const data = await res.json() - let maxCurrent = '0.0.0' - let maxLTS = '0.0.0' - for (const { lts, version } of data) { - if (lts && semver.gt(version, maxLTS)) { - maxLTS = version - } - - if (semver.satisfies(version, currentRange) && semver.gt(version, maxCurrent)) { - maxCurrent = version - } + log.info('doctor', 'Getting Node.js release information') + const res = await fetch(url, { method: 'GET', ...this.npm.flatOptions }) + const data = await res.json() + let maxCurrent = '0.0.0' + let maxLTS = '0.0.0' + for (const { lts, version } of data) { + if (lts && semver.gt(version, maxLTS)) { + maxLTS = version } - const recommended = semver.gt(maxCurrent, maxLTS) ? maxCurrent : maxLTS - if (semver.gte(process.version, recommended)) { - return `current: ${current}, recommended: ${recommended}` - } else { - throw `Use node ${recommended} (current: ${current})` + + if (semver.satisfies(version, currentRange) && semver.gt(version, maxCurrent)) { + maxCurrent = version } - } finally { - tracker.finish() + } + const recommended = semver.gt(maxCurrent, maxLTS) ? maxCurrent : maxLTS + if (semver.gte(process.version, recommended)) { + return `current: ${current}, recommended: ${recommended}` + } else { + throw `Use node ${recommended} (current: ${current})` } } - async getBinPath (dir) { - const tracker = log.newItem('getBinPath', 1) - tracker.info('getBinPath', 'Finding npm global bin in your PATH') + async getBinPath () { + log.info('doctor', 'getBinPath', 'Finding npm global bin in your PATH') if (!process.env.PATH.includes(this.npm.globalBin)) { throw new Error(`Add ${this.npm.globalBin} to your $PATH`) } @@ -250,30 +218,25 @@ class Doctor extends BaseCommand { async checkFilesPermission (root, shouldOwn, mask, missingOk) { let ok = true - const tracker = log.newItem(root, 1) - try { const uid = process.getuid() const gid = process.getgid() const files = new Set([root]) for (const f of files) { - tracker.silly('checkFilesPermission', f.slice(root.length + 1)) const st = await lstat(f).catch(er => { // if it can't be missing, or if it can and the error wasn't that it was missing if (!missingOk || er.code !== 'ENOENT') { ok = false - tracker.warn('checkFilesPermission', 'error getting info for ' + f) + log.warn('doctor', 'checkFilesPermission', 'error getting info for ' + f) } }) - tracker.completeWork(1) - if (!st) { continue } if (shouldOwn && (uid !== st.uid || gid !== st.gid)) { - tracker.warn('checkFilesPermission', 'should be owner of ' + f) + log.warn('doctor', 'checkFilesPermission', 'should be owner of ' + f) ok = false } @@ -286,14 +249,14 @@ class Doctor extends BaseCommand { } catch (er) { ok = false const msg = `Missing permissions on ${f} (expect: ${maskLabel(mask)})` - tracker.error('checkFilesPermission', msg) + log.error('doctor', 'checkFilesPermission', msg) continue } if (st.isDirectory()) { - const entries = await readdir(f).catch(er => { + const entries = await readdir(f).catch(() => { ok = false - tracker.warn('checkFilesPermission', 'error reading directory ' + f) + log.warn('doctor', 'checkFilesPermission', 'error reading directory ' + f) return [] }) for (const entry of entries) { @@ -302,7 +265,6 @@ class Doctor extends BaseCommand { } } } finally { - tracker.finish() if (!ok) { throw ( `Check the permissions of files in ${root}` + @@ -315,50 +277,43 @@ class Doctor extends BaseCommand { } async getGitPath () { - const tracker = log.newItem('getGitPath', 1) - tracker.info('getGitPath', 'Finding git in your PATH') - try { - return await which('git').catch(er => { - tracker.warn(er) - throw new Error("Install git and ensure it's in your PATH.") - }) - } finally { - tracker.finish() - } + log.info('doctor', 'Finding git in your PATH') + return await which('git').catch(er => { + log.warn('doctor', 'getGitPath', er) + throw new Error("Install git and ensure it's in your PATH.") + }) } async verifyCachedFiles () { - const tracker = log.newItem('verifyCachedFiles', 1) - tracker.info('verifyCachedFiles', 'Verifying the npm cache') - try { - const stats = await cacache.verify(this.npm.flatOptions.cache) - const { badContentCount, reclaimedCount, missingContent, reclaimedSize } = stats - if (badContentCount || reclaimedCount || missingContent) { - if (badContentCount) { - tracker.warn('verifyCachedFiles', `Corrupted content removed: ${badContentCount}`) - } + log.info('doctor', 'verifyCachedFiles', 'Verifying the npm cache') - if (reclaimedCount) { - tracker.warn( - 'verifyCachedFiles', - `Content garbage-collected: ${reclaimedCount} (${reclaimedSize} bytes)` - ) - } + const stats = await cacache.verify(this.npm.flatOptions.cache) + const { badContentCount, reclaimedCount, missingContent, reclaimedSize } = stats + if (badContentCount || reclaimedCount || missingContent) { + if (badContentCount) { + log.warn('doctor', 'verifyCachedFiles', `Corrupted content removed: ${badContentCount}`) + } - if (missingContent) { - tracker.warn('verifyCachedFiles', `Missing content: ${missingContent}`) - } + if (reclaimedCount) { + log.warn( + 'doctor', + 'verifyCachedFiles', + `Content garbage-collected: ${reclaimedCount} (${reclaimedSize} bytes)` + ) + } - tracker.warn('verifyCachedFiles', 'Cache issues have been fixed') + if (missingContent) { + log.warn('doctor', 'verifyCachedFiles', `Missing content: ${missingContent}`) } - tracker.info( - 'verifyCachedFiles', - `Verification complete. Stats: ${JSON.stringify(stats, null, 2)}` - ) - return `verified ${stats.verifiedContent} tarballs` - } finally { - tracker.finish() + + log.warn('doctor', 'verifyCachedFiles', 'Cache issues have been fixed') } + log.info( + 'doctor', + 'verifyCachedFiles', + `Verification complete. Stats: ${JSON.stringify(stats, null, 2)}` + ) + return `verified ${stats.verifiedContent} tarballs` } async checkNpmRegistry () { @@ -369,38 +324,11 @@ class Doctor extends BaseCommand { } } - output (row) { - const t = new Table({ - chars: { - top: '', - 'top-mid': '', - 'top-left': '', - 'top-right': '', - bottom: '', - 'bottom-mid': '', - 'bottom-left': '', - 'bottom-right': '', - left: '', - 'left-mid': '', - mid: '', - 'mid-mid': '', - right: '', - 'right-mid': '', - middle: ' ', - }, - style: { - 'padding-left': 0, - 'padding-right': 0, - // setting border here is not necessary visually since we've already - // zeroed out all the chars above, but without it cli-table3 will wrap - // some of the separator spaces with ansi codes which show up in - // snapshots. - border: 0, - }, - colWidths: [this.#checkWidth, 6], - }) - t.push(row) - this.npm.output(t.toString()) + output (...args) { + // TODO display layer should do this + if (!this.npm.silent) { + output.standard(...args) + } } actions (params) { diff --git a/deps/npm/lib/commands/edit.js b/deps/npm/lib/commands/edit.js index fbc7840a39876f..4110a1db55e825 100644 --- a/deps/npm/lib/commands/edit.js +++ b/deps/npm/lib/commands/edit.js @@ -1,34 +1,31 @@ -// npm edit -// open the package folder in the $EDITOR - const { resolve } = require('path') -const fs = require('graceful-fs') +const { lstat } = require('fs/promises') const cp = require('child_process') -const completion = require('../utils/completion/installed-shallow.js') -const BaseCommand = require('../base-command.js') +const completion = require('../utils/installed-shallow.js') +const BaseCommand = require('../base-cmd.js') -const splitPackageNames = (path) => { - return path.split('/') - // combine scoped parts - .reduce((parts, part) => { - if (parts.length === 0) { - return [part] - } +const splitPackageNames = (path) => path.split('/') +// combine scoped parts + .reduce((parts, part) => { + if (parts.length === 0) { + return [part] + } - const lastPart = parts[parts.length - 1] - // check if previous part is the first part of a scoped package - if (lastPart[0] === '@' && !lastPart.includes('/')) { - parts[parts.length - 1] += '/' + part - } else { - parts.push(part) - } + const lastPart = parts[parts.length - 1] + // check if previous part is the first part of a scoped package + if (lastPart[0] === '@' && !lastPart.includes('/')) { + parts[parts.length - 1] += '/' + part + } else { + parts.push(part) + } - return parts - }, []) - .join('/node_modules/') - .replace(/(\/node_modules)+/, '/node_modules') -} + return parts + }, []) + .join('/node_modules/') + .replace(/(\/node_modules)+/, '/node_modules') +// npm edit +// open the package folder in the $EDITOR class Edit extends BaseCommand { static description = 'Edit an installed package' static name = 'edit' @@ -50,27 +47,18 @@ class Edit extends BaseCommand { const path = splitPackageNames(args[0]) const dir = resolve(this.npm.dir, path) - // graceful-fs does not promisify + await lstat(dir) await new Promise((res, rej) => { - fs.lstat(dir, (err) => { - if (err) { - return rej(err) + const [bin, ...spawnArgs] = this.npm.config.get('editor').split(/\s+/) + const editor = cp.spawn(bin, [...spawnArgs, dir], { stdio: 'inherit' }) + editor.on('exit', async (code) => { + if (code) { + return rej(new Error(`editor process exited with code: ${code}`)) } - const [bin, ...spawnArgs] = this.npm.config.get('editor').split(/\s+/) - const editor = cp.spawn(bin, [...spawnArgs, dir], { stdio: 'inherit' }) - editor.on('exit', async (code) => { - if (code) { - return rej(new Error(`editor process exited with code: ${code}`)) - } - try { - await this.npm.exec('rebuild', [dir]) - } catch (execErr) { - rej(execErr) - } - res() - }) + await this.npm.exec('rebuild', [dir]).then(res).catch(rej) }) }) } } + module.exports = Edit diff --git a/deps/npm/lib/commands/exec.js b/deps/npm/lib/commands/exec.js index d532eca107c6c1..9bb4b15e0c5a3c 100644 --- a/deps/npm/lib/commands/exec.js +++ b/deps/npm/lib/commands/exec.js @@ -1,6 +1,6 @@ const { resolve } = require('path') const libexec = require('libnpmexec') -const BaseCommand = require('../base-command.js') +const BaseCommand = require('../base-cmd.js') class Exec extends BaseCommand { static description = 'Run a command from a local or remote npm package' @@ -65,7 +65,6 @@ class Exec extends BaseCommand { globalDir, chalk, } = this.npm - const output = this.npm.output.bind(this.npm) const scriptShell = this.npm.config.get('script-shell') || undefined const packages = this.npm.config.get('package') const yes = this.npm.config.get('yes') @@ -93,7 +92,6 @@ class Exec extends BaseCommand { globalPath, localBin, locationMsg, - output, packages, path, runPath, diff --git a/deps/npm/lib/commands/explain.js b/deps/npm/lib/commands/explain.js index 403274db68dfaf..2e7d07df729a8c 100644 --- a/deps/npm/lib/commands/explain.js +++ b/deps/npm/lib/commands/explain.js @@ -3,6 +3,7 @@ const npa = require('npm-package-arg') const semver = require('semver') const { relative, resolve } = require('path') const validName = require('validate-npm-package-name') +const { output } = require('proc-log') const ArboristWorkspaceCmd = require('../arborist-cmd.js') class Explain extends ArboristWorkspaceCmd { @@ -19,7 +20,7 @@ class Explain extends ArboristWorkspaceCmd { // TODO /* istanbul ignore next */ static async completion (opts, npm) { - const completion = require('../utils/completion/installed-deep.js') + const completion = require('../utils/installed-deep.js') return completion(npm, opts) } @@ -75,9 +76,9 @@ class Explain extends ArboristWorkspaceCmd { } if (this.npm.flatOptions.json) { - this.npm.output(JSON.stringify(expls, null, 2)) + output.standard(JSON.stringify(expls, null, 2)) } else { - this.npm.output(expls.map(expl => { + output.standard(expls.map(expl => { return explainNode(expl, Infinity, this.npm.chalk) }).join('\n\n')) } @@ -125,4 +126,5 @@ class Explain extends ArboristWorkspaceCmd { }) } } + module.exports = Explain diff --git a/deps/npm/lib/commands/explore.js b/deps/npm/lib/commands/explore.js index 7a03ea4eabd7f6..d9dd9a9931f569 100644 --- a/deps/npm/lib/commands/explore.js +++ b/deps/npm/lib/commands/explore.js @@ -1,13 +1,12 @@ -// npm explore [@] -// open a subshell to the package folder. - const pkgJson = require('@npmcli/package-json') const runScript = require('@npmcli/run-script') const { join, relative } = require('path') -const log = require('../utils/log-shim.js') -const completion = require('../utils/completion/installed-shallow.js') -const BaseCommand = require('../base-command.js') +const { log, output } = require('proc-log') +const completion = require('../utils/installed-shallow.js') +const BaseCommand = require('../base-cmd.js') +// npm explore [@] +// open a subshell to the package folder. class Explore extends BaseCommand { static description = 'Browse an installed package' static name = 'explore' @@ -50,30 +49,26 @@ class Explore extends BaseCommand { } if (!args.length) { - this.npm.output(`\nExploring ${path}\nType 'exit' or ^D when finished\n`) + output.standard(`\nExploring ${path}\nType 'exit' or ^D when finished\n`) } - log.disableProgress() - try { - return await runScript({ - ...this.npm.flatOptions, - pkg, - banner: false, - path, - event: '_explore', - stdio: 'inherit', - }).catch(er => { - process.exitCode = typeof er.code === 'number' && er.code !== 0 ? er.code - : 1 + + return runScript({ + ...this.npm.flatOptions, + pkg, + path, + event: '_explore', + stdio: 'inherit', + }).catch(er => { + process.exitCode = typeof er.code === 'number' && er.code !== 0 ? er.code + : 1 // if it's not an exit error, or non-interactive, throw it - const isProcExit = er.message === 'command failed' && + const isProcExit = er.message === 'command failed' && (typeof er.code === 'number' || /^SIG/.test(er.signal || '')) - if (args.length || !isProcExit) { - throw er - } - }) - } finally { - log.enableProgress() - } + if (args.length || !isProcExit) { + throw er + } + }) } } + module.exports = Explore diff --git a/deps/npm/lib/commands/find-dupes.js b/deps/npm/lib/commands/find-dupes.js index 2e06e8b6bd93f4..735ac7c4a7ed09 100644 --- a/deps/npm/lib/commands/find-dupes.js +++ b/deps/npm/lib/commands/find-dupes.js @@ -1,6 +1,6 @@ -// dedupe duplicated packages, or find them in the tree const ArboristWorkspaceCmd = require('../arborist-cmd.js') +// dedupe duplicated packages, or find them in the tree class FindDupes extends ArboristWorkspaceCmd { static description = 'Find duplication in the package tree' static name = 'find-dupes' @@ -19,9 +19,10 @@ class FindDupes extends ArboristWorkspaceCmd { ...super.params, ] - async exec (args) { + async exec () { this.npm.config.set('dry-run', true) return this.npm.exec('dedupe', []) } } + module.exports = FindDupes diff --git a/deps/npm/lib/commands/fund.js b/deps/npm/lib/commands/fund.js index 2804d36cd56034..8bcd184e709683 100644 --- a/deps/npm/lib/commands/fund.js +++ b/deps/npm/lib/commands/fund.js @@ -1,6 +1,7 @@ const archy = require('archy') const pacote = require('pacote') const semver = require('semver') +const { output } = require('proc-log') const npa = require('npm-package-arg') const { depth } = require('treeverse') const { readTree: getFundingInfo, normalizeFunding, isValidFunding } = require('libnpmfund') @@ -37,7 +38,7 @@ class Fund extends ArboristWorkspaceCmd { // TODO /* istanbul ignore next */ static async completion (opts, npm) { - const completion = require('../utils/completion/installed-deep.js') + const completion = require('../utils/installed-deep.js') return completion(npm, opts) } @@ -85,9 +86,9 @@ class Fund extends ArboristWorkspaceCmd { }) if (this.npm.config.get('json')) { - this.npm.output(this.printJSON(fundingInfo)) + output.standard(this.printJSON(fundingInfo)) } else { - this.npm.output(this.printHuman(fundingInfo)) + output.standard(this.printHuman(fundingInfo)) } } @@ -110,26 +111,25 @@ class Fund extends ArboristWorkspaceCmd { const [fundingSource] = [].concat(normalizeFunding(funding)).filter(isValidFunding) const { url } = fundingSource || {} const pkgRef = getPrintableName({ name, version }) - let item = { - label: pkgRef, - } - if (url) { - item.label = tree({ - label: this.npm.chalk.bgBlack.white(url), + if (!url) { + return { label: pkgRef } + } + let item + if (seenUrls.has(url)) { + item = seenUrls.get(url) + item.label += `${this.npm.chalk.dim(',')} ${pkgRef}` + return null + } + item = { + label: tree({ + label: this.npm.chalk.blue(url), nodes: [pkgRef], - }).trim() - - // stacks all packages together under the same item - if (seenUrls.has(url)) { - item = seenUrls.get(url) - item.label += `, ${pkgRef}` - return null - } else { - seenUrls.set(url, item) - } + }).trim(), } + // stacks all packages together under the same item + seenUrls.set(url, item) return item }, @@ -153,7 +153,7 @@ class Fund extends ArboristWorkspaceCmd { }) const res = tree(result) - return this.npm.chalk.reset(res) + return res } async openFundingUrl ({ path, tree, spec, fundingSourceNumber }) { @@ -212,7 +212,7 @@ class Fund extends ArboristWorkspaceCmd { if (fundingSourceNumber) { ambiguousUrlMsg.unshift(`--which=${fundingSourceNumber} is not a valid index`) } - this.npm.output(ambiguousUrlMsg.join('\n')) + output.standard(ambiguousUrlMsg.join('\n')) } urlMessage (source) { @@ -222,4 +222,5 @@ class Fund extends ArboristWorkspaceCmd { return [url, message] } } + module.exports = Fund diff --git a/deps/npm/lib/commands/get.js b/deps/npm/lib/commands/get.js index 4bf5d2caf82645..4191f2c973e7d3 100644 --- a/deps/npm/lib/commands/get.js +++ b/deps/npm/lib/commands/get.js @@ -1,5 +1,5 @@ const Npm = require('../npm.js') -const BaseCommand = require('../base-command.js') +const BaseCommand = require('../base-cmd.js') class Get extends BaseCommand { static description = 'Get a value from the npm configuration' @@ -19,4 +19,5 @@ class Get extends BaseCommand { return this.npm.exec('config', ['get'].concat(args)) } } + module.exports = Get diff --git a/deps/npm/lib/commands/help-search.js b/deps/npm/lib/commands/help-search.js index 273807c7469af0..72dd03ac7406ec 100644 --- a/deps/npm/lib/commands/help-search.js +++ b/deps/npm/lib/commands/help-search.js @@ -1,7 +1,8 @@ -const { readFile } = require('fs/promises') -const path = require('path') +const { readFile } = require('node:fs/promises') +const path = require('node:path') const { glob } = require('glob') -const BaseCommand = require('../base-command.js') +const { output } = require('proc-log') +const BaseCommand = require('../base-cmd.js') const globify = pattern => pattern.split('\\').join('/') @@ -21,12 +22,12 @@ class HelpSearch extends BaseCommand { // preserve glob@8 behavior files = files.sort((a, b) => a.localeCompare(b, 'en')) const data = await this.readFiles(files) - const results = await this.searchFiles(args, data, files) + const results = await this.searchFiles(args, data) const formatted = this.formatResults(args, results) if (!formatted.trim()) { - this.npm.output(`No matches in help for: ${args.join(' ')}\n`) + output.standard(`No matches in help for: ${args.join(' ')}\n`) } else { - this.npm.output(formatted) + output.standard(formatted) } } @@ -39,7 +40,7 @@ class HelpSearch extends BaseCommand { return res } - async searchFiles (args, data, files) { + async searchFiles (args, data) { const results = [] for (const [file, content] of Object.entries(data)) { const lowerCase = content.toLowerCase() @@ -140,7 +141,7 @@ class HelpSearch extends BaseCommand { formatResults (args, results) { const cols = Math.min(process.stdout.columns || Infinity, 80) + 1 - const output = results.map(res => { + const formattedOutput = results.map(res => { const out = [res.cmd] const r = Object.keys(res.hits) .map(k => `${k}:${res.hits[k]}`) @@ -169,8 +170,7 @@ class HelpSearch extends BaseCommand { for (const f of finder) { hilitLine.push(line.slice(p, p + f.length)) const word = line.slice(p + f.length, p + f.length + arg.length) - const hilit = this.npm.chalk.bgBlack.red(word) - hilitLine.push(hilit) + hilitLine.push(this.npm.chalk.blue(word)) p += f.length + arg.length } } @@ -183,12 +183,13 @@ class HelpSearch extends BaseCommand { const finalOut = results.length && !this.npm.config.get('long') ? 'Top hits for ' + (args.map(JSON.stringify).join(' ')) + '\n' + '—'.repeat(cols - 1) + '\n' + - output + '\n' + + formattedOutput + '\n' + '—'.repeat(cols - 1) + '\n' + '(run with -l or --long to see more context)' - : output + : formattedOutput return finalOut.trim() } } + module.exports = HelpSearch diff --git a/deps/npm/lib/commands/help.js b/deps/npm/lib/commands/help.js index 39c580f9a68715..fb3fe664e017df 100644 --- a/deps/npm/lib/commands/help.js +++ b/deps/npm/lib/commands/help.js @@ -2,11 +2,12 @@ const spawn = require('@npmcli/promise-spawn') const path = require('path') const openUrl = require('../utils/open-url.js') const { glob } = require('glob') +const { output } = require('proc-log') const localeCompare = require('@isaacs/string-locale-compare')('en') const { deref } = require('../utils/cmd-list.js') +const BaseCommand = require('../base-cmd.js') const globify = pattern => pattern.split('\\').join('/') -const BaseCommand = require('../base-command.js') // Strips out the number from foo.7 or foo.7. or foo.7.tgz // We don't currently compress our man pages but if we ever did this would @@ -50,7 +51,7 @@ class Help extends BaseCommand { const manSearch = /^\d+$/.test(args[0]) ? `man${args.shift()}` : 'man*' if (!args.length) { - return this.npm.output(this.npm.usage) + return output.standard(this.npm.usage) } // npm help foo bar baz: search topics @@ -110,4 +111,5 @@ class Help extends BaseCommand { return 'file:///' + path.resolve(this.npm.npmRoot, `docs/output/${sect}/${f}.html`) } } + module.exports = Help diff --git a/deps/npm/lib/commands/hook.js b/deps/npm/lib/commands/hook.js index b0f52a801f5717..3b91ff539081a9 100644 --- a/deps/npm/lib/commands/hook.js +++ b/deps/npm/lib/commands/hook.js @@ -1,9 +1,9 @@ const hookApi = require('libnpmhook') const otplease = require('../utils/otplease.js') const relativeDate = require('tiny-relative-date') -const Table = require('cli-table3') +const { output } = require('proc-log') +const BaseCommand = require('../base-cmd.js') -const BaseCommand = require('../base-command.js') class Hook extends BaseCommand { static description = 'Manage registry hooks' static name = 'hook' @@ -40,86 +40,70 @@ class Hook extends BaseCommand { async add (pkg, uri, secret, opts) { const hook = await hookApi.add(pkg, uri, secret, opts) if (opts.json) { - this.npm.output(JSON.stringify(hook, null, 2)) + output.standard(JSON.stringify(hook, null, 2)) } else if (opts.parseable) { - this.npm.output(Object.keys(hook).join('\t')) - this.npm.output(Object.keys(hook).map(k => hook[k]).join('\t')) + output.standard(Object.keys(hook).join('\t')) + output.standard(Object.keys(hook).map(k => hook[k]).join('\t')) } else if (!this.npm.silent) { - this.npm.output(`+ ${this.hookName(hook)} ${opts.unicode ? ' ➜ ' : ' -> '} ${hook.endpoint}`) + output.standard(`+ ${this.hookName(hook)} ${opts.unicode ? ' ➜ ' : ' -> '} ${hook.endpoint}`) } } async ls (pkg, opts) { const hooks = await hookApi.ls({ ...opts, package: pkg }) + if (opts.json) { - this.npm.output(JSON.stringify(hooks, null, 2)) + output.standard(JSON.stringify(hooks, null, 2)) } else if (opts.parseable) { - this.npm.output(Object.keys(hooks[0]).join('\t')) + output.standard(Object.keys(hooks[0]).join('\t')) hooks.forEach(hook => { - this.npm.output(Object.keys(hook).map(k => hook[k]).join('\t')) + output.standard(Object.keys(hook).map(k => hook[k]).join('\t')) }) } else if (!hooks.length) { - this.npm.output("You don't have any hooks configured yet.") + output.standard("You don't have any hooks configured yet.") } else if (!this.npm.silent) { - if (hooks.length === 1) { - this.npm.output('You have one hook configured.') - } else { - this.npm.output(`You have ${hooks.length} hooks configured.`) - } + output.standard(`You have ${hooks.length} hook${hooks.length !== 1 ? 's' : ''} configured.`) - const table = new Table({ head: ['id', 'target', 'endpoint'] }) - hooks.forEach((hook) => { - table.push([ - { rowSpan: 2, content: hook.id }, - this.hookName(hook), - hook.endpoint, - ]) + for (const hook of hooks) { + output.standard(`Hook ${hook.id}: ${this.hookName(hook)}`) + output.standard(`Endpoint: ${hook.endpoint}`) if (hook.last_delivery) { - table.push([ - { - colSpan: 1, - content: `triggered ${relativeDate(hook.last_delivery)}`, - }, - hook.response_code, - ]) + /* eslint-disable-next-line max-len */ + output.standard(`Triggered ${relativeDate(hook.last_delivery)}, response code was "${hook.response_code}"\n`) } else { - table.push([{ colSpan: 2, content: 'never triggered' }]) + output.standard('Never triggered\n') } - }) - this.npm.output(table.toString()) + } } } async rm (id, opts) { const hook = await hookApi.rm(id, opts) if (opts.json) { - this.npm.output(JSON.stringify(hook, null, 2)) + output.standard(JSON.stringify(hook, null, 2)) } else if (opts.parseable) { - this.npm.output(Object.keys(hook).join('\t')) - this.npm.output(Object.keys(hook).map(k => hook[k]).join('\t')) + output.standard(Object.keys(hook).join('\t')) + output.standard(Object.keys(hook).map(k => hook[k]).join('\t')) } else if (!this.npm.silent) { - this.npm.output(`- ${this.hookName(hook)} ${opts.unicode ? ' ✘ ' : ' X '} ${hook.endpoint}`) + output.standard(`- ${this.hookName(hook)} ${opts.unicode ? ' ✘ ' : ' X '} ${hook.endpoint}`) } } async update (id, uri, secret, opts) { const hook = await hookApi.update(id, uri, secret, opts) if (opts.json) { - this.npm.output(JSON.stringify(hook, null, 2)) + output.standard(JSON.stringify(hook, null, 2)) } else if (opts.parseable) { - this.npm.output(Object.keys(hook).join('\t')) - this.npm.output(Object.keys(hook).map(k => hook[k]).join('\t')) + output.standard(Object.keys(hook).join('\t')) + output.standard(Object.keys(hook).map(k => hook[k]).join('\t')) } else if (!this.npm.silent) { - this.npm.output(`+ ${this.hookName(hook)} ${opts.unicode ? ' ➜ ' : ' -> '} ${hook.endpoint}`) + output.standard(`+ ${this.hookName(hook)} ${opts.unicode ? ' ➜ ' : ' -> '} ${hook.endpoint}`) } } hookName (hook) { - let target = hook.name - if (hook.type === 'owner') { - target = '~' + target - } - return target + return `${hook.type === 'owner' ? '~' : ''}${hook.name}` } } + module.exports = Hook diff --git a/deps/npm/lib/commands/init.js b/deps/npm/lib/commands/init.js index 030c97356edb83..205352e86e6edc 100644 --- a/deps/npm/lib/commands/init.js +++ b/deps/npm/lib/commands/init.js @@ -1,4 +1,4 @@ -const fs = require('fs') +const { statSync } = require('fs') const { relative, resolve } = require('path') const { mkdir } = require('fs/promises') const initJson = require('init-package-json') @@ -6,13 +6,12 @@ const npa = require('npm-package-arg') const libexec = require('libnpmexec') const mapWorkspaces = require('@npmcli/map-workspaces') const PackageJson = require('@npmcli/package-json') -const log = require('../utils/log-shim.js') -const updateWorkspaces = require('../workspaces/update-workspaces.js') +const { log, output, input } = require('proc-log') +const updateWorkspaces = require('../utils/update-workspaces.js') +const BaseCommand = require('../base-cmd.js') const posixPath = p => p.split('\\').join('/') -const BaseCommand = require('../base-command.js') - class Init extends BaseCommand { static description = 'Create a package.json file' static params = [ @@ -60,7 +59,7 @@ class Init extends BaseCommand { // to create a workspace package.json file or its folders const { content: pkg } = await PackageJson.normalize(this.npm.localPrefix).catch(err => { if (err.code === 'ENOENT') { - log.warn('Missing package.json. Try with `--include-workspace-root`.') + log.warn('init', 'Missing package.json. Try with `--include-workspace-root`.') } throw err }) @@ -130,7 +129,6 @@ class Init extends BaseCommand { globalBin, chalk, } = this.npm - const output = this.npm.output.bind(this.npm) const runPath = path const scriptShell = this.npm.config.get('script-shell') || undefined const yes = this.npm.config.get('yes') @@ -150,12 +148,9 @@ class Init extends BaseCommand { } async template (path = process.cwd()) { - log.pause() - log.disableProgress() - const initFile = this.npm.config.get('init-module') if (!this.npm.config.get('yes') && !this.npm.config.get('force')) { - this.npm.output([ + output.standard([ 'This utility will walk you through creating a package.json file.', 'It only covers the most common items, and tries to guess sensible defaults.', '', @@ -170,7 +165,7 @@ class Init extends BaseCommand { } try { - const data = await initJson(path, initFile, this.npm.config) + const data = await input.read(() => initJson(path, initFile, this.npm.config)) log.silly('package data', data) return data } catch (er) { @@ -179,9 +174,6 @@ class Init extends BaseCommand { } else { throw er } - } finally { - log.resume() - log.enableProgress() } } @@ -200,7 +192,7 @@ class Init extends BaseCommand { // mapWorkspaces, so we're just going to avoid touching the // top-level package.json try { - fs.statSync(resolve(workspacePath, 'package.json')) + statSync(resolve(workspacePath, 'package.json')) } catch (err) { return } diff --git a/deps/npm/lib/commands/install-ci-test.js b/deps/npm/lib/commands/install-ci-test.js index f7a357ba6e1246..4b9dd269f8c748 100644 --- a/deps/npm/lib/commands/install-ci-test.js +++ b/deps/npm/lib/commands/install-ci-test.js @@ -1,8 +1,7 @@ -// npm install-ci-test -// Runs `npm ci` and then runs `npm test` - const CI = require('./ci.js') +// npm install-ci-test +// Runs `npm ci` and then runs `npm test` class InstallCITest extends CI { static description = 'Install a project with a clean slate and run tests' static name = 'install-ci-test' @@ -12,4 +11,5 @@ class InstallCITest extends CI { return this.npm.exec('test', []) } } + module.exports = InstallCITest diff --git a/deps/npm/lib/commands/install-test.js b/deps/npm/lib/commands/install-test.js index 11f22e535403cc..e21ca7c929c55e 100644 --- a/deps/npm/lib/commands/install-test.js +++ b/deps/npm/lib/commands/install-test.js @@ -1,8 +1,7 @@ -// npm install-test -// Runs `npm install` and then runs `npm test` - const Install = require('./install.js') +// npm install-test +// Runs `npm install` and then runs `npm test` class InstallTest extends Install { static description = 'Install package(s) and run tests' static name = 'install-test' @@ -12,4 +11,5 @@ class InstallTest extends Install { return this.npm.exec('test', []) } } + module.exports = InstallTest diff --git a/deps/npm/lib/commands/install.js b/deps/npm/lib/commands/install.js index d04a35fbec2a76..24e5f6819b3141 100644 --- a/deps/npm/lib/commands/install.js +++ b/deps/npm/lib/commands/install.js @@ -1,15 +1,12 @@ -/* eslint-disable camelcase */ -const fs = require('fs') -const util = require('util') -const readdir = util.promisify(fs.readdir) -const reifyFinish = require('../utils/reify-finish.js') -const log = require('../utils/log-shim.js') -const { resolve, join } = require('path') +const { readdir } = require('node:fs/promises') +const { resolve, join } = require('node:path') +const { log } = require('proc-log') const runScript = require('@npmcli/run-script') const pacote = require('pacote') const checks = require('npm-install-checks') - +const reifyFinish = require('../utils/reify-finish.js') const ArboristWorkspaceCmd = require('../arborist-cmd.js') + class Install extends ArboristWorkspaceCmd { static description = 'Install a package' static name = 'install' @@ -168,7 +165,6 @@ class Install extends ArboristWorkspaceCmd { args: [], scriptShell, stdio: 'inherit', - banner: !this.npm.silent, event, }) } @@ -176,4 +172,5 @@ class Install extends ArboristWorkspaceCmd { await reifyFinish(this.npm, arb) } } + module.exports = Install diff --git a/deps/npm/lib/commands/link.js b/deps/npm/lib/commands/link.js index cdc248569849c3..bde761c4226dcd 100644 --- a/deps/npm/lib/commands/link.js +++ b/deps/npm/lib/commands/link.js @@ -1,15 +1,11 @@ -const fs = require('fs') -const util = require('util') -const readdir = util.promisify(fs.readdir) +const { readdir } = require('fs/promises') const { resolve } = require('path') - const npa = require('npm-package-arg') const pkgJson = require('@npmcli/package-json') const semver = require('semver') - const reifyFinish = require('../utils/reify-finish.js') - const ArboristWorkspaceCmd = require('../arborist-cmd.js') + class Link extends ArboristWorkspaceCmd { static description = 'Symlink a package folder' static name = 'link' @@ -189,4 +185,5 @@ class Link extends ArboristWorkspaceCmd { return missing } } + module.exports = Link diff --git a/deps/npm/lib/commands/login.js b/deps/npm/lib/commands/login.js index b498a3bf2ecd8b..630abf9ac8e045 100644 --- a/deps/npm/lib/commands/login.js +++ b/deps/npm/lib/commands/login.js @@ -1,8 +1,7 @@ -const log = require('../utils/log-shim.js') +const { log, output } = require('proc-log') const { redactLog: replaceInfo } = require('@npmcli/redact') const auth = require('../utils/auth.js') - -const BaseCommand = require('../base-command.js') +const BaseCommand = require('../base-cmd.js') class Login extends BaseCommand { static description = 'Login to a registry user account' @@ -13,7 +12,7 @@ class Login extends BaseCommand { 'auth-type', ] - async exec (args) { + async exec () { const scope = this.npm.config.get('scope') let registry = this.npm.config.get('registry') @@ -27,7 +26,6 @@ class Login extends BaseCommand { const creds = this.npm.config.getCredentialsByURI(registry) - log.disableProgress() log.notice('', `Log in on ${replaceInfo(registry)}`) const { message, newCreds } = await auth.login(this.npm, { @@ -45,7 +43,8 @@ class Login extends BaseCommand { await this.npm.config.save('user') - this.npm.output(message) + output.standard(message) } } + module.exports = Login diff --git a/deps/npm/lib/commands/logout.js b/deps/npm/lib/commands/logout.js index 665580930639cf..dc5a0dfda0e98e 100644 --- a/deps/npm/lib/commands/logout.js +++ b/deps/npm/lib/commands/logout.js @@ -1,7 +1,7 @@ const npmFetch = require('npm-registry-fetch') const { getAuth } = npmFetch -const log = require('../utils/log-shim') -const BaseCommand = require('../base-command.js') +const { log } = require('proc-log') +const BaseCommand = require('../base-cmd.js') class Logout extends BaseCommand { static description = 'Log out of the registry' @@ -11,7 +11,7 @@ class Logout extends BaseCommand { 'scope', ] - async exec (args) { + async exec () { const registry = this.npm.config.get('registry') const scope = this.npm.config.get('scope') const regRef = scope ? `${scope}:registry` : 'registry' @@ -46,4 +46,5 @@ class Logout extends BaseCommand { await this.npm.config.save(level) } } + module.exports = Logout diff --git a/deps/npm/lib/commands/ls.js b/deps/npm/lib/commands/ls.js index 3f9775cf125040..51e99f429816a9 100644 --- a/deps/npm/lib/commands/ls.js +++ b/deps/npm/lib/commands/ls.js @@ -1,10 +1,12 @@ const { resolve, relative, sep } = require('path') -const relativePrefix = `.${sep}` -const { EOL } = require('os') - const archy = require('archy') const { breadth } = require('treeverse') const npa = require('npm-package-arg') +const { output } = require('proc-log') +const ArboristWorkspaceCmd = require('../arborist-cmd.js') +const localeCompare = require('@isaacs/string-locale-compare')('en') + +const relativePrefix = `.${sep}` const _depth = Symbol('depth') const _dedupe = Symbol('dedupe') @@ -17,8 +19,6 @@ const _parent = Symbol('parent') const _problems = Symbol('problems') const _required = Symbol('required') const _type = Symbol('type') -const ArboristWorkspaceCmd = require('../arborist-cmd.js') -const localeCompare = require('@isaacs/string-locale-compare')('en') class LS extends ArboristWorkspaceCmd { static description = 'List installed packages' @@ -42,7 +42,7 @@ class LS extends ArboristWorkspaceCmd { // TODO /* istanbul ignore next */ static async completion (opts, npm) { - const completion = require('../utils/completion/installed-deep.js') + const completion = require('../utils/installed-deep.js') return completion(npm, opts) } @@ -177,7 +177,7 @@ class LS extends ArboristWorkspaceCmd { const [rootError] = tree.errors.filter(e => e.code === 'EJSONPARSE' && e.path === resolve(path, 'package.json')) - this.npm.outputBuffer( + output.buffer( json ? jsonOutput({ path, problems, result, rootError, seenItems }) : parseable ? parseableOutput({ seenNodes, global, long }) : humanOutput({ chalk, result, seenItems, unicode }) @@ -200,7 +200,7 @@ class LS extends ArboristWorkspaceCmd { if (shouldThrow) { throw Object.assign( - new Error([...problems].join(EOL)), + new Error([...problems].join('\n')), { code: 'ELSPROBLEMS' } ) } @@ -219,6 +219,7 @@ class LS extends ArboristWorkspaceCmd { return tree } } + module.exports = LS const isGitNode = (node) => { @@ -280,7 +281,7 @@ const augmentItemWithIncludeMetadata = (node, item) => { const getHumanOutputItem = (node, { args, chalk, global, long }) => { const { pkgid, path } = node - const workspacePkgId = chalk.green(pkgid) + const workspacePkgId = chalk.blueBright(pkgid) let printable = node.isWorkspace ? workspacePkgId : pkgid // special formatting for top-level package name @@ -289,14 +290,16 @@ const getHumanOutputItem = (node, { args, chalk, global, long }) => { if (hasNoPackageJson || global) { printable = path } else { - printable += `${long ? EOL : ' '}${path}` + printable += `${long ? '\n' : ' '}${path}` } } + // TODO there is a LOT of overlap with lib/utils/explain-dep.js here + const highlightDepName = args.length && node[_filteredBy] const missingColor = isOptional(node) - ? chalk.yellow.bgBlack - : chalk.red.bgBlack + ? chalk.yellow + : chalk.red const missingMsg = `UNMET ${isOptional(node) ? 'OPTIONAL ' : ''}DEPENDENCY` const targetLocation = node.root ? relative(node.root.realpath, node.realpath) @@ -310,30 +313,30 @@ const getHumanOutputItem = (node, { args, chalk, global, long }) => { ? missingColor(missingMsg) + ' ' : '' ) + - `${highlightDepName ? chalk.yellow.bgBlack(printable) : printable}` + + `${highlightDepName ? chalk.yellow(printable) : printable}` + ( node[_dedupe] - ? ' ' + chalk.gray('deduped') + ? ' ' + chalk.dim('deduped') : '' ) + ( invalid - ? ' ' + chalk.red.bgBlack(invalid) + ? ' ' + chalk.red(invalid) : '' ) + ( isExtraneous(node, { global }) - ? ' ' + chalk.green.bgBlack('extraneous') + ? ' ' + chalk.red('extraneous') : '' ) + ( node.overridden - ? ' ' + chalk.gray('overridden') + ? ' ' + chalk.dim('overridden') : '' ) + (isGitNode(node) ? ` (${node.resolved})` : '') + (node.isLink ? ` -> ${relativePrefix}${targetLocation}` : '') + - (long ? `${EOL}${node.package.description || ''}` : '') + (long ? `\n${node.package.description || ''}` : '') return augmentItemWithIncludeMetadata(node, { label, nodes: [] }) } @@ -566,7 +569,7 @@ const parseableOutput = ({ global, long, seenNodes }) => { out += node[_invalid] ? ':INVALID' : '' out += node.overridden ? ':OVERRIDDEN' : '' } - out += EOL + out += '\n' } } return out.trim() diff --git a/deps/npm/lib/commands/org.js b/deps/npm/lib/commands/org.js index 1f32d41ff73068..af67547a643db2 100644 --- a/deps/npm/lib/commands/org.js +++ b/deps/npm/lib/commands/org.js @@ -1,7 +1,7 @@ const liborg = require('libnpmorg') const otplease = require('../utils/otplease.js') -const Table = require('cli-table3') -const BaseCommand = require('../base-command.js') +const BaseCommand = require('../base-cmd.js') +const { output } = require('proc-log') class Org extends BaseCommand { static description = 'Manage orgs' @@ -68,14 +68,14 @@ class Org extends BaseCommand { const memDeets = await liborg.set(org, user, role, opts) if (opts.json) { - this.npm.output(JSON.stringify(memDeets, null, 2)) + output.standard(JSON.stringify(memDeets, null, 2)) } else if (opts.parseable) { - this.npm.output(['org', 'orgsize', 'user', 'role'].join('\t')) - this.npm.output( + output.standard(['org', 'orgsize', 'user', 'role'].join('\t')) + output.standard( [memDeets.org.name, memDeets.org.size, memDeets.user, memDeets.role].join('\t') ) } else if (!this.npm.silent) { - this.npm.output( + output.standard( `Added ${memDeets.user} as ${memDeets.role} to ${memDeets.org.name}. You now have ${ memDeets.org.size } member${memDeets.org.size === 1 ? '' : 's'} in this org.` @@ -100,7 +100,7 @@ class Org extends BaseCommand { org = org.replace(/^[~@]?/, '') const userCount = Object.keys(roster).length if (opts.json) { - this.npm.output( + output.standard( JSON.stringify({ user, org, @@ -109,10 +109,10 @@ class Org extends BaseCommand { }) ) } else if (opts.parseable) { - this.npm.output(['user', 'org', 'userCount', 'deleted'].join('\t')) - this.npm.output([user, org, userCount, true].join('\t')) + output.standard(['user', 'org', 'userCount', 'deleted'].join('\t')) + output.standard([user, org, userCount, true].join('\t')) } else if (!this.npm.silent) { - this.npm.output( + output.standard( `Successfully removed ${user} from ${org}. You now have ${userCount} member${ userCount === 1 ? '' : 's' } in this org.` @@ -135,21 +135,19 @@ class Org extends BaseCommand { roster = newRoster } if (opts.json) { - this.npm.output(JSON.stringify(roster, null, 2)) + output.standard(JSON.stringify(roster, null, 2)) } else if (opts.parseable) { - this.npm.output(['user', 'role'].join('\t')) + output.standard(['user', 'role'].join('\t')) Object.keys(roster).forEach(u => { - this.npm.output([u, roster[u]].join('\t')) + output.standard([u, roster[u]].join('\t')) }) } else if (!this.npm.silent) { - const table = new Table({ head: ['user', 'role'] }) - Object.keys(roster) - .sort() - .forEach(u => { - table.push([u, roster[u]]) - }) - this.npm.output(table.toString()) + const chalk = this.npm.chalk + for (const u of Object.keys(roster).sort()) { + output.standard(`${u} - ${chalk.cyan(roster[u])}`) + } } } } + module.exports = Org diff --git a/deps/npm/lib/commands/outdated.js b/deps/npm/lib/commands/outdated.js index 4216f1cdb1437f..2249808110bbbf 100644 --- a/deps/npm/lib/commands/outdated.js +++ b/deps/npm/lib/commands/outdated.js @@ -1,12 +1,11 @@ -const os = require('node:os') const { resolve } = require('node:path') const { stripVTControlCharacters } = require('node:util') const pacote = require('pacote') const table = require('text-table') const npa = require('npm-package-arg') const pickManifest = require('npm-pick-manifest') +const { output } = require('proc-log') const localeCompare = require('@isaacs/string-locale-compare')('en') - const ArboristWorkspaceCmd = require('../arborist-cmd.js') class Outdated extends ArboristWorkspaceCmd { @@ -84,9 +83,9 @@ class Outdated extends ArboristWorkspaceCmd { // display results if (this.npm.config.get('json')) { - this.npm.output(this.makeJSON(outdated)) + output.standard(this.makeJSON(outdated)) } else if (this.npm.config.get('parseable')) { - this.npm.output(this.makeParseable(outdated)) + output.standard(this.makeParseable(outdated)) } else { const outList = outdated.map(x => this.makePretty(x)) const outHead = ['Package', @@ -102,13 +101,13 @@ class Outdated extends ArboristWorkspaceCmd { } const outTable = [outHead].concat(outList) - outTable[0] = outTable[0].map(heading => this.npm.chalk.underline(heading)) + outTable[0] = outTable[0].map(heading => this.npm.chalk.bold.underline(heading)) const tableOpts = { align: ['l', 'r', 'r', 'r', 'l'], stringLength: s => stripVTControlCharacters(s).length, } - this.npm.output(table(outTable, tableOpts)) + output.standard(table(outTable, tableOpts)) } } @@ -161,7 +160,7 @@ class Outdated extends ArboristWorkspaceCmd { this.edges.add(edge) } - getWorkspacesEdges (node) { + getWorkspacesEdges () { if (this.npm.global) { return } @@ -278,7 +277,7 @@ class Outdated extends ArboristWorkspaceCmd { : node.name return humanOutput - ? this.npm.chalk.green(workspaceName) + ? this.npm.chalk.blue(workspaceName) : workspaceName } @@ -295,17 +294,20 @@ class Outdated extends ArboristWorkspaceCmd { dependent, } = dep - const columns = [name, current, wanted, latest, location, dependent] + const columns = [ + this.npm.chalk[current === wanted ? 'yellow' : 'red'](name), + current, + this.npm.chalk.cyan(wanted), + this.npm.chalk.blue(latest), + location, + dependent, + ] if (this.npm.config.get('long')) { columns[6] = type - columns[7] = homepage + columns[7] = this.npm.chalk.blue(homepage) } - columns[0] = this.npm.chalk[current === wanted ? 'yellow' : 'red'](columns[0]) // current - columns[2] = this.npm.chalk.green(columns[2]) // wanted - columns[3] = this.npm.chalk.magenta(columns[3]) // latest - return columns } @@ -335,7 +337,7 @@ class Outdated extends ArboristWorkspaceCmd { } return out.join(':') - }).join(os.EOL) + }).join('\n') } makeJSON (list) { @@ -366,4 +368,5 @@ class Outdated extends ArboristWorkspaceCmd { return JSON.stringify(out, null, 2) } } + module.exports = Outdated diff --git a/deps/npm/lib/commands/owner.js b/deps/npm/lib/commands/owner.js index e530e1c51c8e1f..188065583198d0 100644 --- a/deps/npm/lib/commands/owner.js +++ b/deps/npm/lib/commands/owner.js @@ -1,10 +1,10 @@ const npa = require('npm-package-arg') const npmFetch = require('npm-registry-fetch') const pacote = require('pacote') -const log = require('../utils/log-shim') +const { log, output } = require('proc-log') const otplease = require('../utils/otplease.js') const pkgJson = require('@npmcli/package-json') -const BaseCommand = require('../base-command.js') +const BaseCommand = require('../base-cmd.js') const { redact } = require('@npmcli/redact') const readJson = async (path) => { @@ -115,9 +115,9 @@ class Owner extends BaseCommand { const packumentOpts = { ...this.npm.flatOptions, fullMetadata: true, preferOnline: true } const { maintainers } = await pacote.packument(spec, packumentOpts) if (!maintainers || !maintainers.length) { - this.npm.output('no admin found') + output.standard('no admin found') } else { - this.npm.output(maintainers.map(m => `${m.name} <${m.email}>`).join('\n')) + output.standard(maintainers.map(m => `${m.name} <${m.email}>`).join('\n')) } } catch (err) { log.error('owner ls', "Couldn't get owner data", redact(pkg)) @@ -216,9 +216,9 @@ class Owner extends BaseCommand { }) }) if (addOrRm === 'add') { - this.npm.output(`+ ${user} (${spec.name})`) + output.standard(`+ ${user} (${spec.name})`) } else { - this.npm.output(`- ${user} (${spec.name})`) + output.standard(`- ${user} (${spec.name})`) } return res } catch (err) { diff --git a/deps/npm/lib/commands/pack.js b/deps/npm/lib/commands/pack.js index 6d5f00df55e3fc..f64a21dcc0d9de 100644 --- a/deps/npm/lib/commands/pack.js +++ b/deps/npm/lib/commands/pack.js @@ -1,9 +1,9 @@ const pacote = require('pacote') const libpack = require('libnpmpack') const npa = require('npm-package-arg') -const log = require('../utils/log-shim') +const { log, output } = require('proc-log') const { getContents, logTar } = require('../utils/tar.js') -const BaseCommand = require('../base-command.js') +const BaseCommand = require('../base-cmd.js') class Pack extends BaseCommand { static description = 'Create a tarball from a package' @@ -58,13 +58,13 @@ class Pack extends BaseCommand { } if (json) { - this.npm.output(JSON.stringify(tarballs, null, 2)) + output.standard(JSON.stringify(tarballs, null, 2)) return } for (const tar of tarballs) { logTar(tar, { unicode }) - this.npm.output(tar.filename.replace(/^@/, '').replace(/\//, '-')) + output.standard(tar.filename.replace(/^@/, '').replace(/\//, '-')) } } @@ -83,4 +83,5 @@ class Pack extends BaseCommand { return this.exec([...this.workspacePaths, ...args.filter(a => a !== '.')]) } } + module.exports = Pack diff --git a/deps/npm/lib/commands/ping.js b/deps/npm/lib/commands/ping.js index 2d60f5d69a8da6..0d057862baa8fe 100644 --- a/deps/npm/lib/commands/ping.js +++ b/deps/npm/lib/commands/ping.js @@ -1,14 +1,14 @@ const { redact } = require('@npmcli/redact') -const log = require('../utils/log-shim') +const { log, output } = require('proc-log') const pingUtil = require('../utils/ping.js') -const BaseCommand = require('../base-command.js') +const BaseCommand = require('../base-cmd.js') class Ping extends BaseCommand { static description = 'Ping npm registry' static params = ['registry'] static name = 'ping' - async exec (args) { + async exec () { const cleanRegistry = redact(this.npm.config.get('registry')) log.notice('PING', cleanRegistry) const start = Date.now() @@ -16,14 +16,15 @@ class Ping extends BaseCommand { const time = Date.now() - start log.notice('PONG', `${time}ms`) if (this.npm.config.get('json')) { - this.npm.output(JSON.stringify({ + output.standard(JSON.stringify({ registry: cleanRegistry, time, details, }, null, 2)) } else if (Object.keys(details).length) { - log.notice('PONG', `${JSON.stringify(details, null, 2)}`) + log.notice('PONG', JSON.stringify(details, null, 2)) } } } + module.exports = Ping diff --git a/deps/npm/lib/commands/pkg.js b/deps/npm/lib/commands/pkg.js index 49a66823cca996..62553b15103e3b 100644 --- a/deps/npm/lib/commands/pkg.js +++ b/deps/npm/lib/commands/pkg.js @@ -1,5 +1,6 @@ +const { output } = require('proc-log') const PackageJson = require('@npmcli/package-json') -const BaseCommand = require('../base-command.js') +const BaseCommand = require('../base-cmd.js') const Queryable = require('../utils/queryable.js') class Pkg extends BaseCommand { @@ -62,7 +63,7 @@ class Pkg extends BaseCommand { } // when running in workspaces names, make sure to key by workspace // name the results of each value retrieved in each ws - this.npm.output(JSON.stringify(result, null, 2)) + output.standard(JSON.stringify(result, null, 2)) } async get (args) { @@ -85,7 +86,7 @@ class Pkg extends BaseCommand { // only outputs if not running with workspaces config // execWorkspaces will handle the output otherwise if (!this.workspaces) { - this.npm.output(JSON.stringify(result, null, 2)) + output.standard(JSON.stringify(result, null, 2)) } return result diff --git a/deps/npm/lib/commands/prefix.js b/deps/npm/lib/commands/prefix.js index 264b819fc7692a..da8702cf91caaf 100644 --- a/deps/npm/lib/commands/prefix.js +++ b/deps/npm/lib/commands/prefix.js @@ -1,4 +1,5 @@ -const BaseCommand = require('../base-command.js') +const { output } = require('proc-log') +const BaseCommand = require('../base-cmd.js') class Prefix extends BaseCommand { static description = 'Display prefix' @@ -6,8 +7,9 @@ class Prefix extends BaseCommand { static params = ['global'] static usage = ['[-g]'] - async exec (args) { - return this.npm.output(this.npm.prefix) + async exec () { + return output.standard(this.npm.prefix) } } + module.exports = Prefix diff --git a/deps/npm/lib/commands/profile.js b/deps/npm/lib/commands/profile.js index a7d4ac2f29fbe7..8eae6278549f56 100644 --- a/deps/npm/lib/commands/profile.js +++ b/deps/npm/lib/commands/profile.js @@ -1,13 +1,11 @@ -const inspect = require('util').inspect +const { inspect } = require('util') const { URL } = require('url') -const log = require('../utils/log-shim.js') +const { log, output } = require('proc-log') const npmProfile = require('npm-profile') const qrcodeTerminal = require('qrcode-terminal') -const Table = require('cli-table3') - const otplease = require('../utils/otplease.js') -const pulseTillDone = require('../utils/pulse-till-done.js') const readUserInfo = require('../utils/read-user-info.js') +const BaseCommand = require('../base-cmd.js') const qrcode = url => new Promise((resolve) => qrcodeTerminal.generate(url, resolve)) @@ -35,7 +33,6 @@ const writableProfileKeys = [ 'github', ] -const BaseCommand = require('../base-command.js') class Profile extends BaseCommand { static description = 'Change settings on your registry profile' static name = 'profile' @@ -80,8 +77,6 @@ class Profile extends BaseCommand { throw this.usageError() } - log.gauge.show('profile') - const [subcmd, ...opts] = args switch (subcmd) { @@ -106,16 +101,14 @@ class Profile extends BaseCommand { async get (args) { const tfa = 'two-factor auth' - const info = await pulseTillDone.withPromise( - npmProfile.get({ ...this.npm.flatOptions }) - ) + const info = await npmProfile.get({ ...this.npm.flatOptions }) if (!info.cidr_whitelist) { delete info.cidr_whitelist } if (this.npm.config.get('json')) { - this.npm.output(JSON.stringify(info, null, 2)) + output.standard(JSON.stringify(info, null, 2)) return } @@ -147,23 +140,20 @@ class Profile extends BaseCommand { .filter((arg) => arg.trim() !== '') .map((arg) => cleaned[arg]) .join('\t') - this.npm.output(values) + output.standard(values) } else { if (this.npm.config.get('parseable')) { for (const key of Object.keys(info)) { if (key === 'tfa') { - this.npm.output(`${key}\t${cleaned[tfa]}`) + output.standard(`${key}\t${cleaned[tfa]}`) } else { - this.npm.output(`${key}\t${info[key]}`) + output.standard(`${key}\t${info[key]}`) } } } else { - const table = new Table() - for (const key of Object.keys(cleaned)) { - table.push({ [this.npm.chalk.bold(key)]: cleaned[key] }) + for (const [key, value] of Object.entries(cleaned)) { + output.standard(`${key}: ${value}`) } - - this.npm.output(table.toString()) } } } @@ -209,7 +199,7 @@ class Profile extends BaseCommand { } // FIXME: Work around to not clear everything other than what we're setting - const user = await pulseTillDone.withPromise(npmProfile.get(conf)) + const user = await npmProfile.get(conf) const newUser = {} for (const key of writableProfileKeys) { @@ -221,13 +211,13 @@ class Profile extends BaseCommand { const result = await otplease(this.npm, conf, c => npmProfile.set(newUser, c)) if (this.npm.config.get('json')) { - this.npm.output(JSON.stringify({ [prop]: result[prop] }, null, 2)) + output.standard(JSON.stringify({ [prop]: result[prop] }, null, 2)) } else if (this.npm.config.get('parseable')) { - this.npm.output(prop + '\t' + result[prop]) + output.standard(prop + '\t' + result[prop]) } else if (result[prop] != null) { - this.npm.output('Set', prop, 'to', result[prop]) + output.standard('Set', prop, 'to', result[prop]) } else { - this.npm.output('Set', prop) + output.standard('Set', prop) } } @@ -307,16 +297,12 @@ class Profile extends BaseCommand { info.tfa.password = password log.info('profile', 'Determine if tfa is pending') - const userInfo = await pulseTillDone.withPromise( - npmProfile.get({ ...this.npm.flatOptions }) - ) + const userInfo = await npmProfile.get({ ...this.npm.flatOptions }) const conf = { ...this.npm.flatOptions } if (userInfo && userInfo.tfa && userInfo.tfa.pending) { log.info('profile', 'Resetting two-factor authentication') - await pulseTillDone.withPromise( - npmProfile.set({ tfa: { password, mode: 'disable' } }, conf) - ) + await npmProfile.set({ tfa: { password, mode: 'disable' } }, conf) } else if (userInfo && userInfo.tfa) { if (!conf.otp) { conf.otp = await readUserInfo.otp( @@ -326,12 +312,10 @@ class Profile extends BaseCommand { } log.info('profile', 'Setting two-factor authentication to ' + mode) - const challenge = await pulseTillDone.withPromise( - npmProfile.set(info, conf) - ) + const challenge = await npmProfile.set(info, conf) if (challenge.tfa === null) { - this.npm.output('Two factor authentication mode changed to: ' + mode) + output.standard('Two factor authentication mode changed to: ' + mode) return } @@ -348,7 +332,7 @@ class Profile extends BaseCommand { const secret = otpauth.searchParams.get('secret') const code = await qrcode(challenge.tfa) - this.npm.output( + output.standard( 'Scan into your authenticator app:\n' + code + '\n Or enter code:', secret ) @@ -359,26 +343,26 @@ class Profile extends BaseCommand { const result = await npmProfile.set({ tfa: [interactiveOTP] }, conf) - this.npm.output( + output.standard( '2FA successfully enabled. Below are your recovery codes, ' + 'please print these out.' ) - this.npm.output( + output.standard( 'You will need these to recover access to your account ' + 'if you lose your authentication device.' ) for (const tfaCode of result.tfa) { - this.npm.output('\t' + tfaCode) + output.standard('\t' + tfaCode) } } - async disable2fa (args) { + async disable2fa () { const conf = { ...this.npm.flatOptions } - const info = await pulseTillDone.withPromise(npmProfile.get(conf)) + const info = await npmProfile.get(conf) if (!info.tfa || info.tfa.pending) { - this.npm.output('Two factor authentication not enabled.') + output.standard('Two factor authentication not enabled.') return } @@ -391,17 +375,16 @@ class Profile extends BaseCommand { log.info('profile', 'disabling tfa') - await pulseTillDone.withPromise(npmProfile.set({ - tfa: { password: password, mode: 'disable' }, - }, conf)) + await npmProfile.set({ tfa: { password: password, mode: 'disable' } }, conf) if (this.npm.config.get('json')) { - this.npm.output(JSON.stringify({ tfa: false }, null, 2)) + output.standard(JSON.stringify({ tfa: false }, null, 2)) } else if (this.npm.config.get('parseable')) { - this.npm.output('tfa\tfalse') + output.standard('tfa\tfalse') } else { - this.npm.output('Two factor authentication disabled.') + output.standard('Two factor authentication disabled.') } } } + module.exports = Profile diff --git a/deps/npm/lib/commands/prune.js b/deps/npm/lib/commands/prune.js index 189fc29cb8bc35..1bcf8a9576316c 100644 --- a/deps/npm/lib/commands/prune.js +++ b/deps/npm/lib/commands/prune.js @@ -1,7 +1,7 @@ -// prune extraneous packages const reifyFinish = require('../utils/reify-finish.js') - const ArboristWorkspaceCmd = require('../arborist-cmd.js') + +// prune extraneous packages class Prune extends ArboristWorkspaceCmd { static description = 'Remove extraneous packages' static name = 'prune' @@ -30,4 +30,5 @@ class Prune extends ArboristWorkspaceCmd { await reifyFinish(this.npm, arb) } } + module.exports = Prune diff --git a/deps/npm/lib/commands/publish.js b/deps/npm/lib/commands/publish.js index cf6b50cce3c21c..6bb2dcc6614bb2 100644 --- a/deps/npm/lib/commands/publish.js +++ b/deps/npm/lib/commands/publish.js @@ -1,4 +1,4 @@ -const log = require('../utils/log-shim.js') +const { log, output } = require('proc-log') const semver = require('semver') const pack = require('libnpmpack') const libpub = require('libnpmpublish').publish @@ -7,18 +7,16 @@ const pacote = require('pacote') const npa = require('npm-package-arg') const npmFetch = require('npm-registry-fetch') const { redactLog: replaceInfo } = require('@npmcli/redact') - const otplease = require('../utils/otplease.js') const { getContents, logTar } = require('../utils/tar.js') - // for historical reasons, publishConfig in package.json can contain ANY config // keys that npm supports in .npmrc files and elsewhere. We *may* want to // revisit this at some point, and have a minimal set that's a SemVer-major // change that ought to get a RFC written on it. const { flatten } = require('@npmcli/config/lib/definitions') const pkgJson = require('@npmcli/package-json') +const BaseCommand = require('../base-cmd.js') -const BaseCommand = require('../base-command.js') class Publish extends BaseCommand { static description = 'Publish a package' static name = 'publish' @@ -59,7 +57,6 @@ class Publish extends BaseCommand { } const opts = { ...this.npm.flatOptions, progress: false } - log.disableProgress() // you can publish name@version, ./foo.tgz, etc. // even though the default is the 'file:.' cwd. @@ -73,7 +70,6 @@ class Publish extends BaseCommand { path: spec.fetchSpec, stdio: 'inherit', pkg: manifest, - banner: !silent, }) } @@ -132,7 +128,6 @@ class Publish extends BaseCommand { path: spec.fetchSpec, stdio: 'inherit', pkg: manifest, - banner: !silent, }) await runScript({ @@ -140,22 +135,21 @@ class Publish extends BaseCommand { path: spec.fetchSpec, stdio: 'inherit', pkg: manifest, - banner: !silent, }) } if (!this.suppressOutput) { if (!silent && json) { - this.npm.output(JSON.stringify(pkgContents, null, 2)) + output.standard(JSON.stringify(pkgContents, null, 2)) } else if (!silent) { - this.npm.output(`+ ${pkgContents.id}`) + output.standard(`+ ${pkgContents.id}`) } } return pkgContents } - async execWorkspaces (args) { + async execWorkspaces () { // Suppresses JSON output in publish() so we can handle it here this.suppressOutput = true @@ -173,7 +167,7 @@ class Publish extends BaseCommand { log.warn( 'publish', `Skipping workspace ${ - this.npm.chalk.green(name) + this.npm.chalk.cyan(name) }, marked as ${ this.npm.chalk.bold('private') }` @@ -185,14 +179,14 @@ class Publish extends BaseCommand { // This needs to be in-line w/ the rest of the output that non-JSON // publish generates if (!silent && !json) { - this.npm.output(`+ ${pkgContents.id}`) + output.standard(`+ ${pkgContents.id}`) } else { results[name] = pkgContents } } if (!silent && json) { - this.npm.output(JSON.stringify(results, null, 2)) + output.standard(JSON.stringify(results, null, 2)) } } @@ -230,4 +224,5 @@ class Publish extends BaseCommand { return manifest } } + module.exports = Publish diff --git a/deps/npm/lib/commands/query.js b/deps/npm/lib/commands/query.js index dfa1356ebf436d..fe84469b88fe0a 100644 --- a/deps/npm/lib/commands/query.js +++ b/deps/npm/lib/commands/query.js @@ -1,8 +1,6 @@ -'use strict' - -const { resolve } = require('path') -const BaseCommand = require('../base-command.js') -const log = require('../utils/log-shim.js') +const { resolve } = require('node:path') +const BaseCommand = require('../base-cmd.js') +const { log, output } = require('proc-log') class QuerySelectorItem { constructor (node) { @@ -83,7 +81,7 @@ class Query extends BaseCommand { this.buildResponse(items) this.checkExpected(this.#response.length) - this.npm.output(this.parsedResponse) + output.standard(this.parsedResponse) } async execWorkspaces (args) { @@ -107,7 +105,7 @@ class Query extends BaseCommand { this.buildResponse(items) } this.checkExpected(this.#response.length) - this.npm.output(this.parsedResponse) + output.standard(this.parsedResponse) } // builds a normalized inventory diff --git a/deps/npm/lib/commands/rebuild.js b/deps/npm/lib/commands/rebuild.js index 8af96f725555cb..3894f0aa290cc7 100644 --- a/deps/npm/lib/commands/rebuild.js +++ b/deps/npm/lib/commands/rebuild.js @@ -1,8 +1,9 @@ const { resolve } = require('path') +const { output } = require('proc-log') const npa = require('npm-package-arg') const semver = require('semver') - const ArboristWorkspaceCmd = require('../arborist-cmd.js') + class Rebuild extends ArboristWorkspaceCmd { static description = 'Rebuild a package' static name = 'rebuild' @@ -19,7 +20,7 @@ class Rebuild extends ArboristWorkspaceCmd { // TODO /* istanbul ignore next */ static async completion (opts, npm) { - const completion = require('../utils/completion/installed-deep.js') + const completion = require('../utils/installed-deep.js') return completion(npm, opts) } @@ -56,7 +57,7 @@ class Rebuild extends ArboristWorkspaceCmd { await arb.rebuild() } - this.npm.output('rebuilt dependencies successfully') + output.standard('rebuilt dependencies successfully') } isNode (specs, node) { @@ -79,4 +80,5 @@ class Rebuild extends ArboristWorkspaceCmd { }) } } + module.exports = Rebuild diff --git a/deps/npm/lib/commands/repo.js b/deps/npm/lib/commands/repo.js index b89b74c0bf1baa..8e2fef24771d9d 100644 --- a/deps/npm/lib/commands/repo.js +++ b/deps/npm/lib/commands/repo.js @@ -1,6 +1,6 @@ const { URL } = require('url') - const PackageUrlCmd = require('../package-url-cmd.js') + class Repo extends PackageUrlCmd { static description = 'Open package repository page in the browser' static name = 'repo' @@ -30,6 +30,7 @@ class Repo extends PackageUrlCmd { return url } } + module.exports = Repo const unknownHostedUrl = url => { diff --git a/deps/npm/lib/commands/root.js b/deps/npm/lib/commands/root.js index 7749c602456b77..180f4c4ed0720a 100644 --- a/deps/npm/lib/commands/root.js +++ b/deps/npm/lib/commands/root.js @@ -1,11 +1,14 @@ -const BaseCommand = require('../base-command.js') +const { output } = require('proc-log') +const BaseCommand = require('../base-cmd.js') + class Root extends BaseCommand { static description = 'Display npm root' static name = 'root' static params = ['global'] async exec () { - this.npm.output(this.npm.dir) + output.standard(this.npm.dir) } } + module.exports = Root diff --git a/deps/npm/lib/commands/run-script.js b/deps/npm/lib/commands/run-script.js index 75f00a46b84e9f..dd00c98fc8b6ec 100644 --- a/deps/npm/lib/commands/run-script.js +++ b/deps/npm/lib/commands/run-script.js @@ -1,22 +1,7 @@ -const runScript = require('@npmcli/run-script') -const { isServerPackage } = runScript +const { log, output } = require('proc-log') const pkgJson = require('@npmcli/package-json') -const log = require('../utils/log-shim.js') -const didYouMean = require('../utils/did-you-mean.js') -const { isWindowsShell } = require('../utils/is-windows.js') - -const cmdList = [ - 'publish', - 'install', - 'uninstall', - 'test', - 'stop', - 'start', - 'restart', - 'version', -].reduce((l, p) => l.concat(['pre' + p, p, 'post' + p]), []) - -const BaseCommand = require('../base-command.js') +const BaseCommand = require('../base-cmd.js') + class RunScript extends BaseCommand { static description = 'Run arbitrary package scripts' static params = [ @@ -39,7 +24,7 @@ class RunScript extends BaseCommand { const argv = opts.conf.argv.remain if (argv.length === 2) { const { content: { scripts = {} } } = await pkgJson.normalize(npm.localPrefix) - .catch(er => ({ content: {} })) + .catch(() => ({ content: {} })) if (opts.isFish) { return Object.keys(scripts).map(s => `${s}\t${scripts[s].slice(0, 30)}`) } @@ -64,9 +49,7 @@ class RunScript extends BaseCommand { } async run ([event, ...args], { path = this.npm.localPrefix, pkg } = {}) { - // this || undefined is because runScript will be unhappy with the default - // null value - const scriptShell = this.npm.config.get('script-shell') || undefined + const runScript = require('@npmcli/run-script') if (!pkg) { const { content } = await pkgJson.normalize(path) @@ -77,6 +60,7 @@ class RunScript extends BaseCommand { if (event === 'restart' && !scripts.restart) { scripts.restart = 'npm stop --if-present && npm start' } else if (event === 'env' && !scripts.env) { + const { isWindowsShell } = require('../utils/is-windows.js') scripts.env = isWindowsShell ? 'SET' : 'env' } @@ -84,12 +68,13 @@ class RunScript extends BaseCommand { if ( !Object.prototype.hasOwnProperty.call(scripts, event) && - !(event === 'start' && (await isServerPackage(path))) + !(event === 'start' && (await runScript.isServerPackage(path))) ) { if (this.npm.config.get('if-present')) { return } + const didYouMean = require('../utils/did-you-mean.js') const suggestions = await didYouMean(path, event) throw new Error( `Missing script: "${event}"${suggestions}\n\nTo see a list of scripts, run:\n npm run` @@ -108,18 +93,14 @@ class RunScript extends BaseCommand { } } - const opts = { - path, - args, - scriptShell, - stdio: 'inherit', - pkg, - banner: !this.npm.silent, - } - for (const [ev, evArgs] of events) { await runScript({ - ...opts, + path, + // this || undefined is because runScript will be unhappy with the + // default null value + scriptShell: this.npm.config.get('script-shell') || undefined, + stdio: 'inherit', + pkg, event: ev, args: evArgs, }) @@ -141,18 +122,29 @@ class RunScript extends BaseCommand { } if (this.npm.config.get('json')) { - this.npm.output(JSON.stringify(scripts, null, 2)) + output.standard(JSON.stringify(scripts, null, 2)) return allScripts } if (this.npm.config.get('parseable')) { for (const [script, cmd] of Object.entries(scripts)) { - this.npm.output(`${script}:${cmd}`) + output.standard(`${script}:${cmd}`) } return allScripts } + // TODO this is missing things like prepare, prepublishOnly, and dependencies + const cmdList = [ + 'preinstall', 'install', 'postinstall', + 'prepublish', 'publish', 'postpublish', + 'prerestart', 'restart', 'postrestart', + 'prestart', 'start', 'poststart', + 'prestop', 'stop', 'poststop', + 'pretest', 'test', 'posttest', + 'preuninstall', 'uninstall', 'postuninstall', + 'preversion', 'version', 'postversion', + ] const indent = '\n ' const prefix = ' ' const cmds = [] @@ -164,7 +156,7 @@ class RunScript extends BaseCommand { const colorize = this.npm.chalk if (cmds.length) { - this.npm.output( + output.standard( `${colorize.reset(colorize.bold('Lifecycle scripts'))} included in ${colorize.green( pkgid )}:` @@ -172,28 +164,28 @@ class RunScript extends BaseCommand { } for (const script of cmds) { - this.npm.output(prefix + script + indent + colorize.dim(scripts[script])) + output.standard(prefix + script + indent + colorize.dim(scripts[script])) } if (!cmds.length && runScripts.length) { - this.npm.output( + output.standard( `${colorize.bold('Scripts')} available in ${colorize.green(pkgid)} via \`${colorize.blue( 'npm run-script' )}\`:` ) } else if (runScripts.length) { - this.npm.output(`\navailable via \`${colorize.blue('npm run-script')}\`:`) + output.standard(`\navailable via \`${colorize.blue('npm run-script')}\`:`) } for (const script of runScripts) { - this.npm.output(prefix + script + indent + colorize.dim(scripts[script])) + output.standard(prefix + script + indent + colorize.dim(scripts[script])) } - this.npm.output('') + output.standard('') return allScripts } - async runWorkspaces (args, filters) { + async runWorkspaces (args) { const res = [] await this.setWorkspaces() @@ -213,7 +205,7 @@ class RunScript extends BaseCommand { } } - async listWorkspaces (args, filters) { + async listWorkspaces (args) { await this.setWorkspaces() if (this.npm.silent) { @@ -226,7 +218,7 @@ class RunScript extends BaseCommand { const { content: { scripts, name } } = await pkgJson.normalize(workspacePath) res[name] = { ...scripts } } - this.npm.output(JSON.stringify(res, null, 2)) + output.standard(JSON.stringify(res, null, 2)) return } @@ -234,7 +226,7 @@ class RunScript extends BaseCommand { for (const workspacePath of this.workspacePaths) { const { content: { scripts, name } } = await pkgJson.normalize(workspacePath) for (const [script, cmd] of Object.entries(scripts || {})) { - this.npm.output(`${name}:${script}:${cmd}`) + output.standard(`${name}:${script}:${cmd}`) } } return diff --git a/deps/npm/lib/commands/sbom.js b/deps/npm/lib/commands/sbom.js index 311dfbc852406d..ff7377581dfa51 100644 --- a/deps/npm/lib/commands/sbom.js +++ b/deps/npm/lib/commands/sbom.js @@ -1,9 +1,6 @@ -'use strict' - -const { EOL } = require('os') const localeCompare = require('@isaacs/string-locale-compare')('en') -const BaseCommand = require('../base-command.js') -const log = require('../utils/log-shim.js') +const BaseCommand = require('../base-cmd.js') +const { log, output } = require('proc-log') const { cyclonedxOutput } = require('../utils/sbom-cyclonedx.js') const { spdxOutput } = require('../utils/sbom-spdx.js') @@ -77,7 +74,7 @@ class SBOM extends BaseCommand { if (errors.size > 0) { throw Object.assign( - new Error([...errors].join(EOL)), + new Error([...errors].join('\n')), { code: 'ESBOMPROBLEMS' } ) } @@ -87,7 +84,7 @@ class SBOM extends BaseCommand { items .sort((a, b) => localeCompare(a.location, b.location)) ) - this.npm.output(this.#parsedResponse) + output.standard(this.#parsedResponse) } async execWorkspaces (args) { diff --git a/deps/npm/lib/commands/search.js b/deps/npm/lib/commands/search.js index bb94d6da20f1c1..8b6c01e3930d81 100644 --- a/deps/npm/lib/commands/search.js +++ b/deps/npm/lib/commands/search.js @@ -1,43 +1,13 @@ -const { Minipass } = require('minipass') const Pipeline = require('minipass-pipeline') const libSearch = require('libnpmsearch') -const log = require('../utils/log-shim.js') - +const { log, output } = require('proc-log') const formatSearchStream = require('../utils/format-search-stream.js') +const BaseCommand = require('../base-cmd.js') -function filter (data, include, exclude) { - const words = [data.name] - .concat(data.maintainers.map(m => `=${m.username}`)) - .concat(data.keywords || []) - .map(f => f && f.trim && f.trim()) - .filter(f => f) - .join(' ') - .toLowerCase() - - if (exclude.find(e => match(words, e))) { - return false - } - - return true -} - -function match (words, pattern) { - if (pattern.startsWith('/')) { - if (pattern.endsWith('/')) { - pattern = pattern.slice(0, -1) - } - pattern = new RegExp(pattern.slice(1)) - return words.match(pattern) - } - return words.indexOf(pattern) !== -1 -} - -const BaseCommand = require('../base-command.js') class Search extends BaseCommand { static description = 'Search for packages' static name = 'search' static params = [ - 'long', 'json', 'color', 'parseable', @@ -51,13 +21,13 @@ class Search extends BaseCommand { 'offline', ] - static usage = ['[search terms ...]'] + static usage = [' [ ...]'] async exec (args) { const opts = { ...this.npm.flatOptions, ...this.npm.flatOptions.search, - include: args.map(s => s.toLowerCase()).filter(s => s), + include: args.map(s => s.toLowerCase()).filter(Boolean), exclude: this.npm.flatOptions.search.exclude.split(/\s+/), } @@ -68,30 +38,16 @@ class Search extends BaseCommand { // Used later to figure out whether we had any packages go out let anyOutput = false - class FilterStream extends Minipass { - constructor () { - super({ objectMode: true }) - } - - write (pkg) { - if (filter(pkg, opts.include, opts.exclude)) { - super.write(pkg) - } - } - } - - const filterStream = new FilterStream() - // Grab a configured output stream that will spit out packages in the desired format. - const outputStream = await formatSearchStream({ + const outputStream = formatSearchStream({ args, // --searchinclude options are not highlighted ...opts, + npm: this.npm, }) log.silly('search', 'searching packages') const p = new Pipeline( libSearch.stream(opts.include, opts), - filterStream, outputStream ) @@ -99,16 +55,16 @@ class Search extends BaseCommand { if (!anyOutput) { anyOutput = true } - this.npm.output(chunk.toString('utf8')) + output.standard(chunk.toString('utf8')) }) await p.promise() if (!anyOutput && !this.npm.config.get('json') && !this.npm.config.get('parseable')) { - this.npm.output('No matches found for ' + (args.map(JSON.stringify).join(' '))) + output.standard('No matches found for ' + (args.map(JSON.stringify).join(' '))) } log.silly('search', 'search completed') - log.clearProgress() } } + module.exports = Search diff --git a/deps/npm/lib/commands/set.js b/deps/npm/lib/commands/set.js index f315d183845c5e..2e61762ba9dcd4 100644 --- a/deps/npm/lib/commands/set.js +++ b/deps/npm/lib/commands/set.js @@ -1,5 +1,5 @@ const Npm = require('../npm.js') -const BaseCommand = require('../base-command.js') +const BaseCommand = require('../base-cmd.js') class Set extends BaseCommand { static description = 'Set a value in the npm configuration' @@ -22,4 +22,5 @@ class Set extends BaseCommand { return this.npm.exec('config', ['set'].concat(args)) } } + module.exports = Set diff --git a/deps/npm/lib/commands/shrinkwrap.js b/deps/npm/lib/commands/shrinkwrap.js index c6d817d4801423..d7866bdb91dceb 100644 --- a/deps/npm/lib/commands/shrinkwrap.js +++ b/deps/npm/lib/commands/shrinkwrap.js @@ -1,7 +1,8 @@ const { resolve, basename } = require('path') -const { unlink } = require('fs').promises -const log = require('../utils/log-shim') -const BaseCommand = require('../base-command.js') +const { unlink } = require('fs/promises') +const { log } = require('proc-log') +const BaseCommand = require('../base-cmd.js') + class Shrinkwrap extends BaseCommand { static description = 'Lock down dependency versions for publication' static name = 'shrinkwrap' @@ -68,4 +69,5 @@ class Shrinkwrap extends BaseCommand { } } } + module.exports = Shrinkwrap diff --git a/deps/npm/lib/commands/star.js b/deps/npm/lib/commands/star.js index 20039bf8938116..1b76955810c726 100644 --- a/deps/npm/lib/commands/star.js +++ b/deps/npm/lib/commands/star.js @@ -1,9 +1,9 @@ const fetch = require('npm-registry-fetch') const npa = require('npm-package-arg') -const log = require('../utils/log-shim') +const { log, output } = require('proc-log') const getIdentity = require('../utils/get-identity') +const BaseCommand = require('../base-cmd.js') -const BaseCommand = require('../base-command.js') class Star extends BaseCommand { static description = 'Mark your favorite packages' static name = 'star' @@ -62,10 +62,11 @@ class Star extends BaseCommand { body, }) - this.npm.output(show + ' ' + pkg.name) + output.standard(show + ' ' + pkg.name) log.verbose('star', data) return data } } } + module.exports = Star diff --git a/deps/npm/lib/commands/stars.js b/deps/npm/lib/commands/stars.js index 4214134eb5871a..1059569979dafe 100644 --- a/deps/npm/lib/commands/stars.js +++ b/deps/npm/lib/commands/stars.js @@ -1,8 +1,8 @@ const fetch = require('npm-registry-fetch') -const log = require('../utils/log-shim') +const { log, output } = require('proc-log') const getIdentity = require('../utils/get-identity.js') +const BaseCommand = require('../base-cmd.js') -const BaseCommand = require('../base-command.js') class Stars extends BaseCommand { static description = 'View packages marked as favorites' static name = 'stars' @@ -25,7 +25,7 @@ class Stars extends BaseCommand { } for (const row of rows) { - this.npm.output(row.value) + output.standard(row.value) } } catch (err) { if (err.code === 'ENEEDAUTH') { @@ -35,4 +35,5 @@ class Stars extends BaseCommand { } } } + module.exports = Stars diff --git a/deps/npm/lib/commands/team.js b/deps/npm/lib/commands/team.js index 3c6cf305a6e5f9..22af61863851ab 100644 --- a/deps/npm/lib/commands/team.js +++ b/deps/npm/lib/commands/team.js @@ -1,9 +1,9 @@ const columns = require('cli-columns') const libteam = require('libnpmteam') - +const { output } = require('proc-log') const otplease = require('../utils/otplease.js') -const BaseCommand = require('../base-command.js') +const BaseCommand = require('../base-cmd.js') class Team extends BaseCommand { static description = 'Manage organization teams and team memberships' static name = 'team' @@ -68,87 +68,88 @@ class Team extends BaseCommand { async create (entity, opts) { await libteam.create(entity, opts) if (opts.json) { - this.npm.output(JSON.stringify({ + output.standard(JSON.stringify({ created: true, team: entity, })) } else if (opts.parseable) { - this.npm.output(`${entity}\tcreated`) + output.standard(`${entity}\tcreated`) } else if (!this.npm.silent) { - this.npm.output(`+@${entity}`) + output.standard(`+@${entity}`) } } async destroy (entity, opts) { await libteam.destroy(entity, opts) if (opts.json) { - this.npm.output(JSON.stringify({ + output.standard(JSON.stringify({ deleted: true, team: entity, })) } else if (opts.parseable) { - this.npm.output(`${entity}\tdeleted`) + output.standard(`${entity}\tdeleted`) } else if (!this.npm.silent) { - this.npm.output(`-@${entity}`) + output.standard(`-@${entity}`) } } async add (entity, user, opts) { await libteam.add(user, entity, opts) if (opts.json) { - this.npm.output(JSON.stringify({ + output.standard(JSON.stringify({ added: true, team: entity, user, })) } else if (opts.parseable) { - this.npm.output(`${user}\t${entity}\tadded`) + output.standard(`${user}\t${entity}\tadded`) } else if (!this.npm.silent) { - this.npm.output(`${user} added to @${entity}`) + output.standard(`${user} added to @${entity}`) } } async rm (entity, user, opts) { await libteam.rm(user, entity, opts) if (opts.json) { - this.npm.output(JSON.stringify({ + output.standard(JSON.stringify({ removed: true, team: entity, user, })) } else if (opts.parseable) { - this.npm.output(`${user}\t${entity}\tremoved`) + output.standard(`${user}\t${entity}\tremoved`) } else if (!this.npm.silent) { - this.npm.output(`${user} removed from @${entity}`) + output.standard(`${user} removed from @${entity}`) } } async listUsers (entity, opts) { const users = (await libteam.lsUsers(entity, opts)).sort() if (opts.json) { - this.npm.output(JSON.stringify(users, null, 2)) + output.standard(JSON.stringify(users, null, 2)) } else if (opts.parseable) { - this.npm.output(users.join('\n')) + output.standard(users.join('\n')) } else if (!this.npm.silent) { const plural = users.length === 1 ? '' : 's' const more = users.length === 0 ? '' : ':\n' - this.npm.output(`\n@${entity} has ${users.length} user${plural}${more}`) - this.npm.output(columns(users, { padding: 1 })) + output.standard(`\n@${entity} has ${users.length} user${plural}${more}`) + output.standard(columns(users, { padding: 1 })) } } async listTeams (entity, opts) { const teams = (await libteam.lsTeams(entity, opts)).sort() if (opts.json) { - this.npm.output(JSON.stringify(teams, null, 2)) + output.standard(JSON.stringify(teams, null, 2)) } else if (opts.parseable) { - this.npm.output(teams.join('\n')) + output.standard(teams.join('\n')) } else if (!this.npm.silent) { const plural = teams.length === 1 ? '' : 's' const more = teams.length === 0 ? '' : ':\n' - this.npm.output(`\n@${entity} has ${teams.length} team${plural}${more}`) - this.npm.output(columns(teams.map(t => `@${t}`), { padding: 1 })) + output.standard(`\n@${entity} has ${teams.length} team${plural}${more}`) + output.standard(columns(teams.map(t => `@${t}`), { padding: 1 })) } } } + module.exports = Team diff --git a/deps/npm/lib/commands/token.js b/deps/npm/lib/commands/token.js index dc1df6e0fcb25b..24ca21a8e29ce9 100644 --- a/deps/npm/lib/commands/token.js +++ b/deps/npm/lib/commands/token.js @@ -1,12 +1,9 @@ -const Table = require('cli-table3') -const log = require('../utils/log-shim.js') +const { log, output } = require('proc-log') const profile = require('npm-profile') - const otplease = require('../utils/otplease.js') -const pulseTillDone = require('../utils/pulse-till-done.js') const readUserInfo = require('../utils/read-user-info.js') +const BaseCommand = require('../base-cmd.js') -const BaseCommand = require('../base-command.js') class Token extends BaseCommand { static description = 'Manage your authentication tokens' static name = 'token' @@ -28,7 +25,6 @@ class Token extends BaseCommand { } async exec (args) { - log.gauge.show('token') if (args.length === 0) { return this.list() } @@ -36,10 +32,10 @@ class Token extends BaseCommand { case 'list': case 'ls': return this.list() + case 'rm': case 'delete': case 'revoke': case 'remove': - case 'rm': return this.rm(args.slice(1)) case 'create': return this.create(args.slice(1)) @@ -49,16 +45,18 @@ class Token extends BaseCommand { } async list () { - const conf = this.config() + const json = this.npm.config.get('json') + const parseable = this.npm.config.get('parseable') log.info('token', 'getting list') - const tokens = await pulseTillDone.withPromise(profile.listTokens(conf)) - if (conf.json) { - this.npm.output(JSON.stringify(tokens, null, 2)) + const tokens = await profile.listTokens(this.npm.flatOptions) + if (json) { + output.standard(JSON.stringify(tokens, null, 2)) return - } else if (conf.parseable) { - this.npm.output(['key', 'token', 'created', 'readonly', 'CIDR whitelist'].join('\t')) + } + if (parseable) { + output.standard(['key', 'token', 'created', 'readonly', 'CIDR whitelist'].join('\t')) tokens.forEach(token => { - this.npm.output( + output.standard( [ token.key, token.token, @@ -71,21 +69,17 @@ class Token extends BaseCommand { return } this.generateTokenIds(tokens, 6) - const idWidth = tokens.reduce((acc, token) => Math.max(acc, token.id.length), 0) - const table = new Table({ - head: ['id', 'token', 'created', 'readonly', 'CIDR whitelist'], - colWidths: [Math.max(idWidth, 2) + 2, 9, 12, 10], - }) - tokens.forEach(token => { - table.push([ - token.id, - token.token + '…', - String(token.created).slice(0, 10), - token.readonly ? 'yes' : 'no', - token.cidr_whitelist ? token.cidr_whitelist.join(', ') : '', - ]) - }) - this.npm.output(table.toString()) + const chalk = this.npm.chalk + for (const token of tokens) { + const level = token.readonly ? 'Read only token' : 'Publish token' + const created = String(token.created).slice(0, 10) + /* eslint-disable-next-line max-len */ + output.standard(`${chalk.blue(level)} ${token.token}… with id ${chalk.cyan(token.id)} created ${created}`) + if (token.cidr_whitelist) { + output.standard(`with IP whitelist: ${chalk.green(token.cidr_whitelist.join(','))}`) + } + output.standard() + } } async rm (args) { @@ -93,11 +87,12 @@ class Token extends BaseCommand { throw this.usageError('`` argument is required.') } - const conf = this.config() + const json = this.npm.config.get('json') + const parseable = this.npm.config.get('parseable') const toRemove = [] - const progress = log.newItem('removing tokens', toRemove.length) - progress.info('token', 'getting existing list') - const tokens = await pulseTillDone.withPromise(profile.listTokens(conf)) + const opts = { ...this.npm.flatOptions } + log.info('token', `removing ${toRemove.length} tokens`) + const tokens = await profile.listTokens(opts) args.forEach(id => { const matches = tokens.filter(token => token.key.indexOf(id) === 0) if (matches.length === 1) { @@ -118,72 +113,47 @@ class Token extends BaseCommand { }) await Promise.all( toRemove.map(key => { - return otplease(this.npm, conf, c => profile.removeToken(key, c)) + return otplease(this.npm, opts, c => profile.removeToken(key, c)) }) ) - if (conf.json) { - this.npm.output(JSON.stringify(toRemove)) - } else if (conf.parseable) { - this.npm.output(toRemove.join('\t')) + if (json) { + output.standard(JSON.stringify(toRemove)) + } else if (parseable) { + output.standard(toRemove.join('\t')) } else { - this.npm.output('Removed ' + toRemove.length + ' token' + (toRemove.length !== 1 ? 's' : '')) + output.standard('Removed ' + toRemove.length + ' token' + (toRemove.length !== 1 ? 's' : '')) } } - async create (args) { - const conf = this.config() - const cidr = conf.cidr - const readonly = conf.readOnly + async create () { + const json = this.npm.config.get('json') + const parseable = this.npm.config.get('parseable') + const cidr = this.npm.config.get('cidr') + const readonly = this.npm.config.get('read-only') - const password = await readUserInfo.password() const validCIDR = await this.validateCIDRList(cidr) + const password = await readUserInfo.password() log.info('token', 'creating') - const result = await pulseTillDone.withPromise( - otplease(this.npm, conf, c => profile.createToken(password, readonly, validCIDR, c)) + const result = await otplease( + this.npm, + { ...this.npm.flatOptions }, + c => profile.createToken(password, readonly, validCIDR, c) ) delete result.key delete result.updated - if (conf.json) { - this.npm.output(JSON.stringify(result)) - } else if (conf.parseable) { - Object.keys(result).forEach(k => this.npm.output(k + '\t' + result[k])) + if (json) { + output.standard(JSON.stringify(result)) + } else if (parseable) { + Object.keys(result).forEach(k => output.standard(k + '\t' + result[k])) } else { - const table = new Table() - for (const k of Object.keys(result)) { - table.push({ [this.npm.chalk.bold(k)]: String(result[k]) }) - } - this.npm.output(table.toString()) - } - } - - config () { - const conf = { ...this.npm.flatOptions } - const creds = this.npm.config.getCredentialsByURI(conf.registry) - if (creds.token) { - conf.auth = { token: creds.token } - } else if (creds.username) { - conf.auth = { - basic: { - username: creds.username, - password: creds.password, - }, + const chalk = this.npm.chalk + // Identical to list + const level = result.readonly ? 'read only' : 'publish' + output.standard(`Created ${chalk.blue(level)} token ${result.token}`) + if (result.cidr_whitelist?.length) { + output.standard(`with IP whitelist: ${chalk.green(result.cidr_whitelist.join(','))}`) } - } else if (creds.auth) { - const auth = Buffer.from(creds.auth, 'base64').toString().split(':', 2) - conf.auth = { - basic: { - username: auth[0], - password: auth[1], - }, - } - } else { - conf.auth = {} } - - if (conf.otp) { - conf.auth.otp = conf.otp - } - return conf } invalidCIDRError (msg) { @@ -191,7 +161,6 @@ class Token extends BaseCommand { } generateTokenIds (tokens, minLength) { - const byId = {} for (const token of tokens) { token.id = token.key for (let ii = minLength; ii < token.key.length; ++ii) { @@ -203,9 +172,7 @@ class Token extends BaseCommand { break } } - byId[token.id] = token } - return byId } async validateCIDRList (cidrs) { @@ -215,15 +182,16 @@ class Token extends BaseCommand { for (const cidr of list) { if (isCidrV6(cidr)) { throw this.invalidCIDRError( - 'CIDR whitelist can only contain IPv4 addresses, ' + cidr + ' is IPv6' + `CIDR whitelist can only contain IPv4 addresses${cidr} is IPv6` ) } if (!isCidrV4(cidr)) { - throw this.invalidCIDRError('CIDR whitelist contains invalid CIDR entry: ' + cidr) + throw this.invalidCIDRError(`CIDR whitelist contains invalid CIDR entry: ${cidr}`) } } return list } } + module.exports = Token diff --git a/deps/npm/lib/commands/uninstall.js b/deps/npm/lib/commands/uninstall.js index 07775efb9cf2f1..7496c02deb28f4 100644 --- a/deps/npm/lib/commands/uninstall.js +++ b/deps/npm/lib/commands/uninstall.js @@ -1,10 +1,9 @@ const { resolve } = require('path') const pkgJson = require('@npmcli/package-json') - const reifyFinish = require('../utils/reify-finish.js') -const completion = require('../utils/completion/installed-shallow.js') - +const completion = require('../utils/installed-shallow.js') const ArboristWorkspaceCmd = require('../arborist-cmd.js') + class Uninstall extends ArboristWorkspaceCmd { static description = 'Remove a package' static name = 'uninstall' @@ -53,4 +52,5 @@ class Uninstall extends ArboristWorkspaceCmd { await reifyFinish(this.npm, arb) } } + module.exports = Uninstall diff --git a/deps/npm/lib/commands/unpublish.js b/deps/npm/lib/commands/unpublish.js index a4d445a035b622..47a5db82062442 100644 --- a/deps/npm/lib/commands/unpublish.js +++ b/deps/npm/lib/commands/unpublish.js @@ -2,18 +2,17 @@ const libaccess = require('libnpmaccess') const libunpub = require('libnpmpublish').unpublish const npa = require('npm-package-arg') const pacote = require('pacote') +const { output, log } = require('proc-log') const pkgJson = require('@npmcli/package-json') - const { flatten } = require('@npmcli/config/lib/definitions') const getIdentity = require('../utils/get-identity.js') -const log = require('../utils/log-shim') const otplease = require('../utils/otplease.js') +const BaseCommand = require('../base-cmd.js') const LAST_REMAINING_VERSION_ERROR = 'Refusing to delete the last version of the package. ' + 'It will block from republishing a new version for 24 hours.\n' + 'Run with --force to do this.' -const BaseCommand = require('../base-command.js') class Unpublish extends BaseCommand { static description = 'Remove a package from the registry' static name = 'unpublish' @@ -161,7 +160,7 @@ class Unpublish extends BaseCommand { await otplease(this.npm, opts, o => libunpub(spec, o)) } if (!silent) { - this.npm.output(`- ${spec.name}${pkgVersion}`) + output.standard(`- ${spec.name}${pkgVersion}`) } } @@ -173,4 +172,5 @@ class Unpublish extends BaseCommand { } } } + module.exports = Unpublish diff --git a/deps/npm/lib/commands/unstar.js b/deps/npm/lib/commands/unstar.js index cbcb73636c6384..c72966866669a9 100644 --- a/deps/npm/lib/commands/unstar.js +++ b/deps/npm/lib/commands/unstar.js @@ -4,4 +4,5 @@ class Unstar extends Star { static description = 'Remove an item from your favorite packages' static name = 'unstar' } + module.exports = Unstar diff --git a/deps/npm/lib/commands/update.js b/deps/npm/lib/commands/update.js index 43d031c7ada3f3..ddc3e4a47f38a1 100644 --- a/deps/npm/lib/commands/update.js +++ b/deps/npm/lib/commands/update.js @@ -1,10 +1,8 @@ const path = require('path') - -const log = require('../utils/log-shim.js') - +const { log } = require('proc-log') const reifyFinish = require('../utils/reify-finish.js') - const ArboristWorkspaceCmd = require('../arborist-cmd.js') + class Update extends ArboristWorkspaceCmd { static description = 'Update packages' static name = 'update' @@ -33,7 +31,7 @@ class Update extends ArboristWorkspaceCmd { // TODO /* istanbul ignore next */ static async completion (opts, npm) { - const completion = require('../utils/completion/installed-deep.js') + const completion = require('../utils/installed-deep.js') return completion(npm, opts) } @@ -66,4 +64,5 @@ class Update extends ArboristWorkspaceCmd { await reifyFinish(this.npm, arb) } } + module.exports = Update diff --git a/deps/npm/lib/commands/version.js b/deps/npm/lib/commands/version.js index 029a6fdd3101e4..549ba9b9f9c771 100644 --- a/deps/npm/lib/commands/version.js +++ b/deps/npm/lib/commands/version.js @@ -1,10 +1,7 @@ -const libnpmversion = require('libnpmversion') -const { resolve } = require('path') -const { promisify } = require('util') -const readFile = promisify(require('fs').readFile) - -const updateWorkspaces = require('../workspaces/update-workspaces.js') -const BaseCommand = require('../base-command.js') +const { resolve } = require('node:path') +const { readFile } = require('node:fs/promises') +const { output } = require('proc-log') +const BaseCommand = require('../base-cmd.js') class Version extends BaseCommand { static description = 'Bump a package version' @@ -73,29 +70,43 @@ class Version extends BaseCommand { } async change (args) { + const libnpmversion = require('libnpmversion') const prefix = this.npm.config.get('tag-version-prefix') const version = await libnpmversion(args[0], { ...this.npm.flatOptions, path: this.npm.prefix, }) - return this.npm.output(`${prefix}${version}`) + return output.standard(`${prefix}${version}`) } async changeWorkspaces (args) { + const updateWorkspaces = require('../utils/update-workspaces.js') + const libnpmversion = require('libnpmversion') const prefix = this.npm.config.get('tag-version-prefix') + const { + config, + flatOptions, + localPrefix, + } = this.npm await this.setWorkspaces() const updatedWorkspaces = [] for (const [name, path] of this.workspaces) { - this.npm.output(name) + output.standard(name) const version = await libnpmversion(args[0], { - ...this.npm.flatOptions, + ...flatOptions, 'git-tag-version': false, path, }) updatedWorkspaces.push(name) - this.npm.output(`${prefix}${version}`) + output.standard(`${prefix}${version}`) } - return this.update(updatedWorkspaces) + return updateWorkspaces({ + config, + flatOptions, + localPrefix, + npm: this.npm, + workspaces: updatedWorkspaces, + }) } async list (results = {}) { @@ -115,9 +126,9 @@ class Version extends BaseCommand { } if (this.npm.config.get('json')) { - this.npm.output(JSON.stringify(results, null, 2)) + output.standard(JSON.stringify(results, null, 2)) } else { - this.npm.output(results) + output.standard(results) } } @@ -135,22 +146,6 @@ class Version extends BaseCommand { } return this.list(results) } - - async update (workspaces) { - const { - config, - flatOptions, - localPrefix, - } = this.npm - - await updateWorkspaces({ - config, - flatOptions, - localPrefix, - npm: this.npm, - workspaces, - }) - } } module.exports = Version diff --git a/deps/npm/lib/commands/view.js b/deps/npm/lib/commands/view.js index b19604f8c2ed35..c0d5bf552eee02 100644 --- a/deps/npm/lib/commands/view.js +++ b/deps/npm/lib/commands/view.js @@ -1,20 +1,19 @@ const columns = require('cli-columns') -const fs = require('fs') +const { readFile } = require('fs/promises') const jsonParse = require('json-parse-even-better-errors') -const log = require('../utils/log-shim.js') +const { log, output } = require('proc-log') const npa = require('npm-package-arg') const { resolve } = require('path') const formatBytes = require('../utils/format-bytes.js') const relativeDate = require('tiny-relative-date') const semver = require('semver') -const { inspect, promisify } = require('util') +const { inspect } = require('util') const { packument } = require('pacote') +const Queryable = require('../utils/queryable.js') +const BaseCommand = require('../base-cmd.js') -const readFile = promisify(fs.readFile) const readJson = async file => jsonParse(await readFile(file, 'utf8')) -const Queryable = require('../utils/queryable.js') -const BaseCommand = require('../base-command.js') class View extends BaseCommand { static description = 'View registry info' static name = 'view' @@ -115,13 +114,10 @@ class View extends BaseCommand { reducedData = cleanBlanks(reducedData) log.silly('view', reducedData) } - // disable the progress bar entirely, as we can't meaningfully update it - // if we may have partial lines printed. - log.disableProgress() const msg = await this.jsonData(reducedData, pckmnt._id) if (msg !== '') { - this.npm.output(msg) + output.standard(msg) } } } @@ -160,10 +156,10 @@ class View extends BaseCommand { if (wholePackument) { data.map((v) => this.prettyView(pckmnt, v[Object.keys(v)[0]][''])) } else { - this.npm.output(`${name}:`) + output.standard(`${name}:`) const msg = await this.jsonData(reducedData, pckmnt._id) if (msg !== '') { - this.npm.output(msg) + output.standard(msg) } } } else { @@ -174,7 +170,7 @@ class View extends BaseCommand { } } if (Object.keys(results).length > 0) { - this.npm.output(JSON.stringify(results, null, 2)) + output.standard(JSON.stringify(results, null, 2)) } } @@ -321,131 +317,96 @@ class View extends BaseCommand { // More modern, pretty printing of default view const unicode = this.npm.config.get('unicode') const chalk = this.npm.chalk - const tags = [] - - Object.keys(packu['dist-tags']).forEach((t) => { - const version = packu['dist-tags'][t] - tags.push(`${chalk.bold.green(t)}: ${version}`) - }) - const unpackedSize = manifest.dist.unpackedSize && - formatBytes(manifest.dist.unpackedSize, true) + const deps = Object.keys(manifest.dependencies || {}).map((dep) => + `${chalk.blue(dep)}: ${manifest.dependencies[dep]}` + ) + const site = manifest.homepage?.url || manifest.homepage + const bins = Object.keys(manifest.bin || {}) const licenseField = manifest.license || 'Proprietary' - const info = { - name: chalk.green(manifest.name), - version: chalk.green(manifest.version), - bins: Object.keys(manifest.bin || {}), - versions: chalk.yellow(packu.versions.length + ''), - description: manifest.description, - deprecated: manifest.deprecated, - keywords: packu.keywords || [], - license: typeof licenseField === 'string' - ? licenseField - : (licenseField.type || 'Proprietary'), - deps: Object.keys(manifest.dependencies || {}).map((dep) => { - return `${chalk.yellow(dep)}: ${manifest.dependencies[dep]}` - }), - publisher: manifest._npmUser && unparsePerson({ - name: chalk.yellow(manifest._npmUser.name), - email: chalk.cyan(manifest._npmUser.email), - }), - modified: !packu.time ? undefined - : chalk.yellow(relativeDate(packu.time[manifest.version])), - maintainers: (packu.maintainers || []).map((u) => unparsePerson({ - name: chalk.yellow(u.name), - email: chalk.cyan(u.email), - })), - repo: ( - manifest.bugs && (manifest.bugs.url || manifest.bugs) - ) || ( - manifest.repository && (manifest.repository.url || manifest.repository) - ), - site: ( - manifest.homepage && (manifest.homepage.url || manifest.homepage) - ), - tags, - tarball: chalk.cyan(manifest.dist.tarball), - shasum: chalk.yellow(manifest.dist.shasum), - integrity: - manifest.dist.integrity && chalk.yellow(manifest.dist.integrity), - fileCount: - manifest.dist.fileCount && chalk.yellow(manifest.dist.fileCount), - unpackedSize: unpackedSize && chalk.yellow(unpackedSize), - } - if (info.license.toLowerCase().trim() === 'proprietary') { - info.license = chalk.bold.red(info.license) - } else { - info.license = chalk.green(info.license) + const license = typeof licenseField === 'string' + ? licenseField + : (licenseField.type || 'Proprietary') + + output.standard('') + output.standard([ + chalk.underline.cyan(`${manifest.name}@${manifest.version}`), + license.toLowerCase().trim() === 'proprietary' + ? chalk.red(license) + : chalk.green(license), + `deps: ${deps.length ? chalk.cyan(deps.length) : chalk.cyan('none')}`, + `versions: ${chalk.cyan(packu.versions.length + '')}`, + ].join(' | ')) + + manifest.description && output.standard(manifest.description) + if (site) { + output.standard(chalk.blue(site)) } - this.npm.output('') - this.npm.output( - chalk.underline.bold(`${info.name}@${info.version}`) + - ' | ' + info.license + - ' | deps: ' + (info.deps.length ? chalk.cyan(info.deps.length) : chalk.green('none')) + - ' | versions: ' + info.versions + manifest.deprecated && output.standard( + `\n${chalk.redBright('DEPRECATED')}${unicode ? ' ⚠️ ' : '!!'} - ${manifest.deprecated}` ) - info.description && this.npm.output(info.description) - if (info.repo || info.site) { - info.site && this.npm.output(chalk.cyan(info.site)) - } - const warningSign = unicode ? ' ⚠️ ' : '!!' - info.deprecated && this.npm.output( - `\n${chalk.bold.red('DEPRECATED')}${ - warningSign - } - ${info.deprecated}` - ) + if (packu.keywords?.length) { + output.standard(`\nkeywords: ${ + packu.keywords.map(k => chalk.cyan(k)).join(', ') + }`) + } - if (info.keywords.length) { - this.npm.output('') - this.npm.output(`keywords: ${chalk.yellow(info.keywords.join(', '))}`) + if (bins.length) { + output.standard(`\nbin: ${chalk.cyan(bins.join(', '))}`) } - if (info.bins.length) { - this.npm.output('') - this.npm.output(`bin: ${chalk.yellow(info.bins.join(', '))}`) + output.standard('\ndist') + output.standard(`.tarball: ${chalk.blue(manifest.dist.tarball)}`) + output.standard(`.shasum: ${chalk.green(manifest.dist.shasum)}`) + if (manifest.dist.integrity) { + output.standard(`.integrity: ${chalk.green(manifest.dist.integrity)}`) + } + if (manifest.dist.unpackedSize) { + output.standard(`.unpackedSize: ${chalk.blue(formatBytes(manifest.dist.unpackedSize, true))}`) } - this.npm.output('') - this.npm.output('dist') - this.npm.output(`.tarball: ${info.tarball}`) - this.npm.output(`.shasum: ${info.shasum}`) - info.integrity && this.npm.output(`.integrity: ${info.integrity}`) - info.unpackedSize && this.npm.output(`.unpackedSize: ${info.unpackedSize}`) - - const maxDeps = 24 - if (info.deps.length) { - this.npm.output('') - this.npm.output('dependencies:') - this.npm.output(columns(info.deps.slice(0, maxDeps), { padding: 1 })) - if (info.deps.length > maxDeps) { - this.npm.output(`(...and ${info.deps.length - maxDeps} more.)`) + if (deps.length) { + const maxDeps = 24 + output.standard('\ndependencies:') + output.standard(columns(deps.slice(0, maxDeps), { padding: 1 })) + if (deps.length > maxDeps) { + output.standard(chalk.dim(`(...and ${deps.length - maxDeps} more.)`)) } } - if (info.maintainers && info.maintainers.length) { - this.npm.output('') - this.npm.output('maintainers:') - info.maintainers.forEach((u) => this.npm.output(`- ${u}`)) + if (packu.maintainers?.length) { + output.standard('\nmaintainers:') + packu.maintainers.forEach(u => + output.standard(`- ${unparsePerson({ + name: chalk.blue(u.name), + email: chalk.dim(u.email) })}`) + ) } - this.npm.output('') - this.npm.output('dist-tags:') - this.npm.output(columns(info.tags)) + output.standard('\ndist-tags:') + output.standard(columns(Object.keys(packu['dist-tags']).map(t => + `${chalk.blue(t)}: ${packu['dist-tags'][t]}` + ))) - if (info.publisher || info.modified) { + const publisher = manifest._npmUser && unparsePerson({ + name: chalk.blue(manifest._npmUser.name), + email: chalk.dim(manifest._npmUser.email), + }) + if (publisher || packu.time) { let publishInfo = 'published' - if (info.modified) { - publishInfo += ` ${info.modified}` + if (packu.time) { + publishInfo += ` ${chalk.cyan(relativeDate(packu.time[manifest.version]))}` } - if (info.publisher) { - publishInfo += ` by ${info.publisher}` + if (publisher) { + publishInfo += ` by ${publisher}` } - this.npm.output('') - this.npm.output(publishInfo) + output.standard('') + output.standard(publishInfo) } } } + module.exports = View function cleanBlanks (obj) { diff --git a/deps/npm/lib/commands/whoami.js b/deps/npm/lib/commands/whoami.js index 154cc870391ba1..507adb276c7313 100644 --- a/deps/npm/lib/commands/whoami.js +++ b/deps/npm/lib/commands/whoami.js @@ -1,16 +1,18 @@ +const { output } = require('proc-log') const getIdentity = require('../utils/get-identity.js') +const BaseCommand = require('../base-cmd.js') -const BaseCommand = require('../base-command.js') class Whoami extends BaseCommand { static description = 'Display npm username' static name = 'whoami' static params = ['registry'] - async exec (args) { + async exec () { const username = await getIdentity(this.npm, { ...this.npm.flatOptions }) - this.npm.output( + output.standard( this.npm.config.get('json') ? JSON.stringify(username) : username ) } } + module.exports = Whoami diff --git a/deps/npm/lib/lifecycle-cmd.js b/deps/npm/lib/lifecycle-cmd.js index 848771a38355e5..a509a9380f668c 100644 --- a/deps/npm/lib/lifecycle-cmd.js +++ b/deps/npm/lib/lifecycle-cmd.js @@ -1,7 +1,7 @@ +const BaseCommand = require('./base-cmd.js') + // The implementation of commands that are just "run a script" // restart, start, stop, test - -const BaseCommand = require('./base-command.js') class LifecycleCmd extends BaseCommand { static usage = ['[-- ]'] static isShellout = true @@ -16,4 +16,5 @@ class LifecycleCmd extends BaseCommand { return this.npm.exec('run-script', [this.constructor.name, ...args]) } } + module.exports = LifecycleCmd diff --git a/deps/npm/lib/npm.js b/deps/npm/lib/npm.js index d05b74ac74b833..df2297b215da7b 100644 --- a/deps/npm/lib/npm.js +++ b/deps/npm/lib/npm.js @@ -1,19 +1,14 @@ -const { resolve, dirname, join } = require('path') +const { resolve, dirname, join } = require('node:path') const Config = require('@npmcli/config') const which = require('which') -const fs = require('fs/promises') - -// Patch the global fs module here at the app level -require('graceful-fs').gracefulify(require('fs')) - +const fs = require('node:fs/promises') const { definitions, flatten, shorthands } = require('@npmcli/config/lib/definitions') const usage = require('./utils/npm-usage.js') const LogFile = require('./utils/log-file.js') const Timers = require('./utils/timers.js') const Display = require('./utils/display.js') -const log = require('./utils/log-shim') +const { log, time, output } = require('proc-log') const { redactLog: replaceInfo } = require('@npmcli/redact') -const updateNotifier = require('./utils/update-notifier.js') const pkg = require('../package.json') const { deref } = require('./utils/cmd-list.js') @@ -27,38 +22,25 @@ class Npm { if (!command) { throw Object.assign(new Error(`Unknown command ${c}`), { code: 'EUNKNOWNCOMMAND', + command: c, }) } return require(`./commands/${command}.js`) } + unrefPromises = [] updateNotification = null - loadErr = null argv = [] #command = null #runId = new Date().toISOString().replace(/[.:]/g, '_') - #loadPromise = null #title = 'npm' #argvClean = [] #npmRoot = null - #warnedNonDashArg = false - - #chalk = null - #logChalk = null - #noColorChalk = null - #outputBuffer = [] + #display = null #logFile = new LogFile() - #display = new Display() - #timers = new Timers({ - start: 'npm', - listener: (name, ms) => { - const args = ['timing', name, `Completed in ${ms}ms`] - this.#logFile.log(...args) - this.#display.log(...args) - }, - }) + #timers = new Timers() // all these options are only used by tests in order to make testing more // closely resemble real world usage. for now, npm has no programmatic API so @@ -72,7 +54,14 @@ class Npm { // allows tests created by tap inside this repo to not set the local // prefix to `npmRoot` since that is the first dir it would encounter when // doing implicit detection - constructor ({ npmRoot = dirname(__dirname), argv = [], excludeNpmCwd = false } = {}) { + constructor ({ + stdout = process.stdout, + stderr = process.stderr, + npmRoot = dirname(__dirname), + argv = [], + excludeNpmCwd = false, + } = {}) { + this.#display = new Display({ stdout, stderr }) this.#npmRoot = npmRoot this.config = new Config({ npmPath: this.#npmRoot, @@ -88,7 +77,8 @@ class Npm { return this.constructor.version } - setCmd (cmd) { + // Call an npm command + async exec (cmd, args = this.argv) { const Command = Npm.cmd(cmd) const command = new Command(this) @@ -99,51 +89,11 @@ class Npm { process.env.npm_command = this.command } - return command - } - - // Call an npm command - // TODO: tests are currently the only time the second - // parameter of args is used. When called via `lib/cli.js` the config is - // loaded and this.argv is set to the remaining command line args. We should - // consider testing the CLI the same way it is used and not allow args to be - // passed in directly. - async exec (cmd, args = this.argv) { - const command = this.setCmd(cmd) - - const timeEnd = this.time(`command:${cmd}`) - - // this is async but we dont await it, since its ok if it doesnt - // finish before the command finishes running. it uses command and argv - // so it must be initiated here, after the command name is set - // eslint-disable-next-line promise/catch-or-return - updateNotifier(this).then((msg) => (this.updateNotification = msg)) - - // Options are prefixed by a hyphen-minus (-, \u2d). - // Other dash-type chars look similar but are invalid. - if (!this.#warnedNonDashArg) { - const nonDashArgs = args.filter(a => /^[\u2010-\u2015\u2212\uFE58\uFE63\uFF0D]/.test(a)) - if (nonDashArgs.length) { - this.#warnedNonDashArg = true - log.error( - 'arg', - 'Argument starts with non-ascii dash, this is probably invalid:', - nonDashArgs.join(', ') - ) - } - } - - return command.cmdExec(args).finally(timeEnd) + return time.start(`command:${cmd}`, () => command.cmdExec(args)) } async load () { - if (!this.#loadPromise) { - this.#loadPromise = this.time('npm:load', () => this.#load().catch((er) => { - this.loadErr = er - throw er - })) - } - return this.#loadPromise + return time.start('npm:load', () => this.#load()) } get loaded () { @@ -159,30 +109,44 @@ class Npm { this.#logFile.off() } - time (name, fn) { - return this.#timers.time(name, fn) - } - - writeTimingFile () { - this.#timers.writeFile({ + finish ({ showLogFileError } = {}) { + this.#timers.finish({ id: this.#runId, command: this.#argvClean, logfiles: this.logFiles, version: this.version, }) + + if (showLogFileError) { + if (!this.silent) { + // just a line break if not in silent mode + output.error('') + } + + if (this.logFiles.length) { + return log.error('', `A complete log of this run can be found in: ${this.logFiles}`) + } + + const logsMax = this.config.get('logs-max') + if (logsMax <= 0) { + // user specified no log file + log.error('', `Log files were not written due to the config logs-max=${logsMax}`) + } else { + // could be an error writing to the directory + log.error('', + `Log files were not written due to an error writing to the directory: ${this.#logsDir}` + + '\nYou can rerun the command with `--loglevel=verbose` to see the logs in your terminal' + ) + } + } } get title () { return this.#title } - set title (t) { - process.title = t - this.#title = t - } - async #load () { - await this.time('npm:load:whichnode', async () => { + await time.start('npm:load:whichnode', async () => { // TODO should we throw here? const node = await which(process.argv[0]).catch(() => {}) if (node && node.toUpperCase() !== process.execPath.toUpperCase()) { @@ -192,46 +156,72 @@ class Npm { } }) - await this.time('npm:load:configload', () => this.config.load()) - - // get createSupportsColor from chalk directly if this lands - // https://github.com/chalk/chalk/pull/600 - const [{ Chalk }, { createSupportsColor }] = await Promise.all([ - import('chalk'), - import('supports-color'), - ]) - this.#noColorChalk = new Chalk({ level: 0 }) - // we get the chalk level based on a null stream meaning chalk will only use - // what it knows about the environment to get color support since we already - // determined in our definitions that we want to show colors. - const level = Math.max(createSupportsColor(null).level, 1) - this.#chalk = this.color ? new Chalk({ level }) : this.#noColorChalk - this.#logChalk = this.logColor ? new Chalk({ level }) : this.#noColorChalk + await time.start('npm:load:configload', () => this.config.load()) + + // npm --versions + if (this.config.get('versions', 'cli')) { + this.argv = ['version'] + this.config.set('usage', false, 'cli') + } else { + this.argv = [...this.config.parsedArgv.remain] + } + + // Remove first argv since that is our command as typed + // Note that this might not be the actual name of the command + // due to aliases, etc. But we use the raw form of it later + // in user output so it must be preserved as is. + const commandArg = this.argv.shift() + + // This is the actual name of the command that will be run or + // undefined if deref could not find a match + const command = deref(commandArg) + + await this.#display.load({ + command, + loglevel: this.config.get('loglevel'), + stdoutColor: this.color, + stderrColor: this.logColor, + timing: this.config.get('timing'), + unicode: this.config.get('unicode'), + progress: this.flatOptions.progress, + json: this.config.get('json'), + heading: this.config.get('heading'), + }) + process.env.COLOR = this.color ? '1' : '0' + + // npm -v + // return from here early so we dont create any caches/logfiles/timers etc + if (this.config.get('version', 'cli')) { + output.standard(this.version) + return { exec: false } + } // mkdir this separately since the logs dir can be set to // a different location. if this fails, then we don't have // a cache dir, but we don't want to fail immediately since // the command might not need a cache dir (like `npm --version`) - await this.time('npm:load:mkdirpcache', () => + await time.start('npm:load:mkdirpcache', () => fs.mkdir(this.cache, { recursive: true }) .catch((e) => log.verbose('cache', `could not create cache: ${e}`))) // it's ok if this fails. user might have specified an invalid dir // which we will tell them about at the end if (this.config.get('logs-max') > 0) { - await this.time('npm:load:mkdirplogs', () => - fs.mkdir(this.logsDir, { recursive: true }) + await time.start('npm:load:mkdirplogs', () => + fs.mkdir(this.#logsDir, { recursive: true }) .catch((e) => log.verbose('logfile', `could not create logs-dir: ${e}`))) } // note: this MUST be shorter than the actual argv length, because it // uses the same memory, so node will truncate it if it's too long. - this.time('npm:load:setTitle', () => { + // We time this because setting process.title is slow sometimes but we + // have to do it for security reasons. But still helpful to know how slow it is. + time.start('npm:load:setTitle', () => { const { parsedArgv: { cooked, remain } } = this.config - this.argv = remain // Secrets are mostly in configs, so title is set using only the positional args - // to keep those from being leaked. - this.title = ['npm'].concat(replaceInfo(remain)).join(' ').trim() + // to keep those from being leaked. We still do a best effort replaceInfo. + this.#title = ['npm'].concat(replaceInfo(remain)).join(' ').trim() + process.title = this.#title // The cooked argv is also logged separately for debugging purposes. It is // cleaned as a best effort by replacing known secrets like basic auth // password and strings that look like npm tokens. XXX: for this to be @@ -242,45 +232,32 @@ class Npm { log.verbose('argv', this.#argvClean.map(JSON.stringify).join(' ')) }) - this.time('npm:load:display', () => { - this.#display.load({ - // Use logColor since that is based on stderr - color: this.logColor, - chalk: this.logChalk, - progress: this.flatOptions.progress, - silent: this.silent, - timing: this.config.get('timing'), - loglevel: this.config.get('loglevel'), - unicode: this.config.get('unicode'), - heading: this.config.get('heading'), - }) - process.env.COLOR = this.color ? '1' : '0' - }) - - this.time('npm:load:logFile', () => { - this.#logFile.load({ - path: this.logPath, - logsMax: this.config.get('logs-max'), - }) - log.verbose('logfile', this.#logFile.files[0] || 'no logfile created') + // logFile.load returns a promise that resolves when old logs are done being cleaned. + // We save this promise to an array so that we can await it in tests to ensure more + // deterministic logging behavior. The process will also hang open if this were to + // take a long time to resolve, but that is why process.exit is called explicitly + // in the exit-handler. + this.unrefPromises.push(this.#logFile.load({ + path: this.logPath, + logsMax: this.config.get('logs-max'), + timing: this.config.get('timing'), + })) + + this.#timers.load({ + path: this.logPath, + timing: this.config.get('timing'), }) - this.time('npm:load:timers', () => - this.#timers.load({ - path: this.config.get('timing') ? this.logPath : null, - }) - ) - - this.time('npm:load:configScope', () => { - const configScope = this.config.get('scope') - if (configScope && !/^@/.test(configScope)) { - this.config.set('scope', `@${configScope}`, this.config.find('scope')) - } - }) + const configScope = this.config.get('scope') + if (configScope && !/^@/.test(configScope)) { + this.config.set('scope', `@${configScope}`, this.config.find('scope')) + } if (this.config.get('force')) { log.warn('using --force', 'Recommended protections disabled.') } + + return { exec: true, command: commandArg, args: this.argv } } get isShellout () { @@ -313,15 +290,15 @@ class Npm { } get noColorChalk () { - return this.#noColorChalk + return this.#display.chalk.noColor } get chalk () { - return this.#chalk + return this.#display.chalk.stdout } get logChalk () { - return this.#logChalk + return this.#display.chalk.stderr } get global () { @@ -336,14 +313,6 @@ class Npm { return 2 } - get unfinishedTimers () { - return this.#timers.unfinished - } - - get finishedTimers () { - return this.#timers.finished - } - get started () { return this.#timers.started } @@ -352,16 +321,12 @@ class Npm { return this.#logFile.files } - get logsDir () { + get #logsDir () { return this.config.get('logs-dir') || join(this.cache, '_logs') } get logPath () { - return resolve(this.logsDir, `${this.#runId}-`) - } - - get timingFile () { - return this.#timers.file + return resolve(this.#logsDir, `${this.#runId}-`) } get npmRoot () { @@ -372,26 +337,14 @@ class Npm { return this.config.get('cache') } - set cache (r) { - this.config.set('cache', r) - } - get globalPrefix () { return this.config.globalPrefix } - set globalPrefix (r) { - this.config.globalPrefix = r - } - get localPrefix () { return this.config.localPrefix } - set localPrefix (r) { - this.config.localPrefix = r - } - get localPackage () { return this.config.localPackage } @@ -427,59 +380,9 @@ class Npm { return this.global ? this.globalPrefix : this.localPrefix } - set prefix (r) { - const k = this.global ? 'globalPrefix' : 'localPrefix' - this[k] = r - } - get usage () { return usage(this) } - - // output to stdout in a progress bar compatible way - output (...msg) { - log.clearProgress() - // eslint-disable-next-line no-console - console.log(...msg.map(Display.clean)) - log.showProgress() - } - - outputBuffer (item) { - this.#outputBuffer.push(item) - } - - flushOutput (jsonError) { - if (!jsonError && !this.#outputBuffer.length) { - return - } - - if (this.config.get('json')) { - const jsonOutput = this.#outputBuffer.reduce((acc, item) => { - if (typeof item === 'string') { - // try to parse it as json in case its a string - try { - item = JSON.parse(item) - } catch { - return acc - } - } - return { ...acc, ...item } - }, {}) - this.output(JSON.stringify({ ...jsonOutput, ...jsonError }, null, 2)) - } else { - for (const item of this.#outputBuffer) { - this.output(item) - } - } - - this.#outputBuffer.length = 0 - } - - outputError (...msg) { - log.clearProgress() - // eslint-disable-next-line no-console - console.error(...msg.map(Display.clean)) - log.showProgress() - } } + module.exports = Npm diff --git a/deps/npm/lib/package-url-cmd.js b/deps/npm/lib/package-url-cmd.js index 250b46eeeddbe5..bcefd17af4492a 100644 --- a/deps/npm/lib/package-url-cmd.js +++ b/deps/npm/lib/package-url-cmd.js @@ -1,12 +1,9 @@ -// Base command for opening urls from a package manifest (bugs, docs, repo) - const pacote = require('pacote') -const hostedGitInfo = require('hosted-git-info') - const openUrl = require('./utils/open-url.js') -const log = require('./utils/log-shim') +const { log } = require('proc-log') +const BaseCommand = require('./base-cmd.js') -const BaseCommand = require('./base-command.js') +// Base command for opening urls from a package manifest (bugs, docs, repo) class PackageUrlCommand extends BaseCommand { static params = [ 'browser', @@ -52,6 +49,7 @@ class PackageUrlCommand extends BaseCommand { // repository (if a string) or repository.url (if an object) returns null // if it's not a valid repo, or not a known hosted repo hostedFromMani (mani) { + const hostedGitInfo = require('hosted-git-info') const r = mani.repository const rurl = !r ? null : typeof r === 'string' ? r @@ -62,4 +60,5 @@ class PackageUrlCommand extends BaseCommand { return (rurl && hostedGitInfo.fromUrl(rurl.replace(/^git\+/, ''))) || null } } + module.exports = PackageUrlCommand diff --git a/deps/npm/lib/utils/audit-error.js b/deps/npm/lib/utils/audit-error.js index f9850d718b198e..10aec7592b03ce 100644 --- a/deps/npm/lib/utils/audit-error.js +++ b/deps/npm/lib/utils/audit-error.js @@ -1,4 +1,4 @@ -const log = require('./log-shim') +const { log, output } = require('proc-log') const { redactLog: replaceInfo } = require('@npmcli/redact') // print an error or just nothing if the audit report has an error @@ -22,7 +22,7 @@ const auditError = (npm, report) => { const { body: errBody } = error const body = Buffer.isBuffer(errBody) ? errBody.toString() : errBody if (npm.flatOptions.json) { - npm.output(JSON.stringify({ + output.standard(JSON.stringify({ message: error.message, method: error.method, uri: replaceInfo(error.uri), @@ -31,7 +31,7 @@ const auditError = (npm, report) => { body, }, null, 2)) } else { - npm.output(body) + output.standard(body) } throw 'audit endpoint returned an error' diff --git a/deps/npm/lib/utils/auth.js b/deps/npm/lib/utils/auth.js index 729ce32c2a7a8f..04ca455ceb5261 100644 --- a/deps/npm/lib/utils/auth.js +++ b/deps/npm/lib/utils/auth.js @@ -1,5 +1,5 @@ const profile = require('npm-profile') -const log = require('../utils/log-shim') +const { log } = require('proc-log') const openUrlPrompt = require('../utils/open-url-prompt.js') const read = require('../utils/read-user-info.js') const otplease = require('../utils/otplease.js') @@ -36,7 +36,7 @@ const adduser = async (npm, { creds, ...opts }) => { // password, it's effectively a login, and if that account has otp you'll // be prompted for it. res = await otplease(npm, opts, (reqOpts) => - profile.adduserCouch(username, email, password, opts) + profile.adduserCouch(username, email, password, reqOpts) ) } diff --git a/deps/npm/lib/utils/did-you-mean.js b/deps/npm/lib/utils/did-you-mean.js index ff3c812b46c3c7..54c8ff2e35aa69 100644 --- a/deps/npm/lib/utils/did-you-mean.js +++ b/deps/npm/lib/utils/did-you-mean.js @@ -8,7 +8,7 @@ const didYouMean = async (path, scmd) => { let best = [] for (const str of close) { const cmd = Npm.cmd(str) - best.push(` npm ${str} # ${cmd.description}`) + best.push(` npm ${str} # ${cmd.description}`) } // We would already be suggesting this in `npm x` so omit them here const runScripts = ['stop', 'start', 'test', 'restart'] @@ -17,13 +17,13 @@ const didYouMean = async (path, scmd) => { best = best.concat( Object.keys(scripts || {}) .filter(cmd => distance(scmd, cmd) < scmd.length * 0.4 && !runScripts.includes(cmd)) - .map(str => ` npm run ${str} # run the "${str}" package script`), + .map(str => ` npm run ${str} # run the "${str}" package script`), Object.keys(bin || {}) .filter(cmd => distance(scmd, cmd) < scmd.length * 0.4) /* eslint-disable-next-line max-len */ - .map(str => ` npm exec ${str} # run the "${str}" command from either this or a remote npm package`) + .map(str => ` npm exec ${str} # run the "${str}" command from either this or a remote npm package`) ) - } catch (_) { + } catch { // gracefully ignore not being in a folder w/ a package.json } @@ -31,10 +31,9 @@ const didYouMean = async (path, scmd) => { return '' } - const suggestion = - best.length === 1 - ? `\n\nDid you mean this?\n${best[0]}` - : `\n\nDid you mean one of these?\n${best.slice(0, 3).join('\n')}` - return suggestion + return best.length === 1 + ? `\n\nDid you mean this?\n${best[0]}` + : `\n\nDid you mean one of these?\n${best.slice(0, 3).join('\n')}` } + module.exports = didYouMean diff --git a/deps/npm/lib/utils/display.js b/deps/npm/lib/utils/display.js index c5e5ca2b5b874a..29a1f7951d5063 100644 --- a/deps/npm/lib/utils/display.js +++ b/deps/npm/lib/utils/display.js @@ -1,214 +1,489 @@ -const { inspect } = require('util') -const npmlog = require('npmlog') -const log = require('./log-shim.js') +const { log, output, input, META } = require('proc-log') const { explain } = require('./explain-eresolve.js') +const { formatWithOptions } = require('./format') -const originalCustomInspect = Symbol('npm.display.original.util.inspect.custom') - -// These are most assuredly not a mistake -// https://eslint.org/docs/latest/rules/no-control-regex -/* eslint-disable no-control-regex */ -// \x00 through \x1f, \x7f through \x9f, not including \x09 \x0a \x0b \x0d -const hasC01 = /[\x00-\x08\x0c\x0e-\x1f\x7f-\x9f]/ -// Allows everything up to '[38;5;255m' in 8 bit notation -const allowedSGR = /^\[[0-9;]{0,8}m/ -// '[38;5;255m'.length -const sgrMaxLen = 10 - -// Strips all ANSI C0 and C1 control characters (except for SGR up to 8 bit) -function stripC01 (str) { - if (!hasC01.test(str)) { - return str - } - let result = '' - for (let i = 0; i < str.length; i++) { - const char = str[i] - const code = char.charCodeAt(0) - if (!hasC01.test(char)) { - // Most characters are in this set so continue early if we can - result = `${result}${char}` - } else if (code === 27 && allowedSGR.test(str.slice(i + 1, i + sgrMaxLen + 1))) { - // \x1b with allowed SGR - result = `${result}\x1b` - } else if (code <= 31) { - // escape all other C0 control characters besides \x7f - result = `${result}^${String.fromCharCode(code + 64)}` - } else { - // hasC01 ensures this is now a C1 control character or \x7f - result = `${result}^${String.fromCharCode(code - 64)}` +// This is the general approach to color: +// Eventually this will be exposed somewhere we can refer to these by name. +// Foreground colors only. Never set the background color. +/* + * Black # (Don't use) + * Red # Danger + * Green # Success + * Yellow # Warning + * Blue # Accent + * Magenta # Done + * Cyan # Emphasis + * White # (Don't use) + */ + +// Translates log levels to chalk colors +const COLOR_PALETTE = ({ chalk: c }) => ({ + heading: c.bold, + title: c.blueBright, + timing: c.magentaBright, + // loglevels + error: c.red, + warn: c.yellow, + notice: c.cyanBright, + http: c.green, + info: c.cyan, + verbose: c.blue, + silly: c.blue.dim, +}) + +const LEVEL_OPTIONS = { + silent: { + index: 0, + }, + error: { + index: 1, + }, + warn: { + index: 2, + }, + notice: { + index: 3, + }, + http: { + index: 4, + }, + info: { + index: 5, + }, + verbose: { + index: 6, + }, + silly: { + index: 7, + }, +} + +const LEVEL_METHODS = { + ...LEVEL_OPTIONS, + [log.KEYS.timing]: { + show: ({ timing, index }) => !!timing && index !== 0, + }, +} + +const tryJsonParse = (value) => { + if (typeof value === 'string') { + try { + return JSON.parse(value) + } catch { + return {} } } - return result + return value } -class Display { - #chalk = null +const setBlocking = (stream) => { + // Copied from https://github.com/yargs/set-blocking + // https://raw.githubusercontent.com/yargs/set-blocking/master/LICENSE.txt + /* istanbul ignore next - we trust that this works */ + if (stream._handle && stream.isTTY && typeof stream._handle.setBlocking === 'function') { + stream._handle.setBlocking(true) + } + return stream +} + +const withMeta = (handler) => (level, ...args) => { + let meta = {} + const last = args.at(-1) + if (last && typeof last === 'object' && Object.hasOwn(last, META)) { + meta = args.pop() + } + return handler(level, meta, ...args) +} - constructor () { - // pause by default until config is loaded - this.on() - log.pause() +class Display { + #logState = { + buffering: true, + buffer: [], } - static clean (output) { - if (typeof output === 'string') { - // Strings are cleaned inline - return stripC01(output) - } - if (!output || typeof output !== 'object') { - // Numbers, booleans, null all end up here and don't need cleaning - return output - } - // output && typeof output === 'object' - // We can't use hasOwn et al for detecting the original but we can use it - // for detecting the properties we set via defineProperty - if ( - output[inspect.custom] && - (!Object.hasOwn(output, originalCustomInspect)) - ) { - // Save the old one if we didn't already do it. - Object.defineProperty(output, originalCustomInspect, { - value: output[inspect.custom], - writable: true, - }) - } - if (!Object.hasOwn(output, originalCustomInspect)) { - // Put a dummy one in for when we run multiple times on the same object - Object.defineProperty(output, originalCustomInspect, { - value: function () { - return this - }, - writable: true, - }) - } - // Set the custom inspect to our own function - Object.defineProperty(output, inspect.custom, { - value: function () { - const toClean = this[originalCustomInspect]() - // Custom inspect can return things other than objects, check type again - if (typeof toClean === 'string') { - // Strings are cleaned inline - return stripC01(toClean) - } - if (!toClean || typeof toClean !== 'object') { - // Numbers, booleans, null all end up here and don't need cleaning - return toClean - } - return stripC01(inspect(toClean, { customInspect: false })) - }, - writable: true, - }) - return output + #outputState = { + buffering: true, + buffer: [], } - on () { + // colors + #noColorChalk + #stdoutChalk + #stdoutColor + #stderrChalk + #stderrColor + #logColors + + // progress + #progress + + // options + #command + #levelIndex + #timing + #json + #heading + #silent + + // display streams + #stdout + #stderr + + constructor ({ stdout, stderr }) { + this.#stdout = setBlocking(stdout) + this.#stderr = setBlocking(stderr) + + // Handlers are set immediately so they can buffer all events process.on('log', this.#logHandler) + process.on('output', this.#outputHandler) + process.on('input', this.#inputHandler) + this.#progress = new Progress({ stream: stderr }) } off () { process.off('log', this.#logHandler) - // Unbalanced calls to enable/disable progress - // will leave change listeners on the tracker - // This pretty much only happens in tests but - // this removes the event emitter listener warnings - log.tracker.removeAllListeners() - } - - load (config) { - const { - color, - chalk, - timing, - loglevel, + this.#logState.buffer.length = 0 + process.off('output', this.#outputHandler) + this.#outputState.buffer.length = 0 + process.off('input', this.#inputHandler) + this.#progress.off() + } + + get chalk () { + return { + noColor: this.#noColorChalk, + stdout: this.#stdoutChalk, + stderr: this.#stderrChalk, + } + } + + async load ({ + command, + heading, + json, + loglevel, + progress, + stderrColor, + stdoutColor, + timing, + unicode, + }) { + // get createSupportsColor from chalk directly if this lands + // https://github.com/chalk/chalk/pull/600 + const [{ Chalk }, { createSupportsColor }] = await Promise.all([ + import('chalk'), + import('supports-color'), + ]) + // we get the chalk level based on a null stream meaning chalk will only use + // what it knows about the environment to get color support since we already + // determined in our definitions that we want to show colors. + const level = Math.max(createSupportsColor(null).level, 1) + this.#noColorChalk = new Chalk({ level: 0 }) + this.#stdoutColor = stdoutColor + this.#stdoutChalk = stdoutColor ? new Chalk({ level }) : this.#noColorChalk + this.#stderrColor = stderrColor + this.#stderrChalk = stderrColor ? new Chalk({ level }) : this.#noColorChalk + this.#logColors = COLOR_PALETTE({ chalk: this.#stderrChalk }) + + this.#command = command + this.#levelIndex = LEVEL_OPTIONS[loglevel].index + this.#timing = timing + this.#json = json + this.#heading = heading + this.#silent = this.#levelIndex <= 0 + + // Emit resume event on the logs which will flush output + log.resume() + output.flush() + this.#progress.load({ unicode, - progress, - silent, - heading = 'npm', - } = config - - this.#chalk = chalk - - // npmlog is still going away someday, so this is a hack to dynamically - // set the loglevel of timing based on the timing flag, instead of making - // a breaking change to npmlog. The result is that timing logs are never - // shown except when the --timing flag is set. We also need to change - // the index of the silly level since otherwise it is set to -Infinity - // and we can't go any lower than that. silent is still set to Infinify - // because we DO want silent to hide timing levels. This allows for the - // special case of getting timing information while hiding all CLI output - // in order to get perf information that might be affected by writing to - // a terminal. XXX(npmlog): this will be removed along with npmlog - log.levels.silly = -10000 - log.levels.timing = log.levels[loglevel] + (timing ? 1 : -1) - - log.level = loglevel - log.heading = heading - - if (color) { - log.enableColor() - } else { - log.disableColor() + enabled: !!progress && !this.#silent, + }) + } + + // STREAM WRITES + + // Write formatted and (non-)colorized output to streams + #write (stream, options, ...args) { + const colors = stream === this.#stdout ? this.#stdoutColor : this.#stderrColor + const value = formatWithOptions({ colors, ...options }, ...args) + this.#progress.write(() => stream.write(value)) + } + + // HANDLERS + + // Arrow function assigned to a private class field so it can be passed + // directly as a listener and still reference "this" + #logHandler = withMeta((level, meta, ...args) => { + switch (level) { + case log.KEYS.resume: + this.#logState.buffering = false + this.#logState.buffer.forEach((item) => this.#tryWriteLog(...item)) + this.#logState.buffer.length = 0 + break + + case log.KEYS.pause: + this.#logState.buffering = true + break + + default: + if (this.#logState.buffering) { + this.#logState.buffer.push([level, meta, ...args]) + } else { + this.#tryWriteLog(level, meta, ...args) + } + break } + }) - if (unicode) { - log.enableUnicode() - } else { - log.disableUnicode() + // Arrow function assigned to a private class field so it can be passed + // directly as a listener and still reference "this" + #outputHandler = withMeta((level, meta, ...args) => { + switch (level) { + case output.KEYS.flush: + this.#outputState.buffering = false + if (meta.jsonError && this.#json) { + const json = {} + for (const item of this.#outputState.buffer) { + // index 2 skips the level and meta + Object.assign(json, tryJsonParse(item[2])) + } + this.#writeOutput( + output.KEYS.standard, + meta, + JSON.stringify({ ...json, error: meta.jsonError }, null, 2) + ) + } else { + this.#outputState.buffer.forEach((item) => this.#writeOutput(...item)) + } + this.#outputState.buffer.length = 0 + break + + case output.KEYS.buffer: + this.#outputState.buffer.push([output.KEYS.standard, meta, ...args]) + break + + default: + if (this.#outputState.buffering) { + this.#outputState.buffer.push([level, meta, ...args]) + } else { + // HACK: Check if the argument looks like a run-script banner. This can be + // replaced with proc-log.META in @npmcli/run-script + if (typeof args[0] === 'string' && args[0].startsWith('\n> ') && args[0].endsWith('\n')) { + if (this.#silent || ['exec', 'explore'].includes(this.#command)) { + // Silent mode and some specific commands always hide run script banners + break + } else if (this.#json) { + // In json mode, change output to stderr since we dont want to break json + // parsing on stdout if the user is piping to jq or something. + // XXX: in a future (breaking?) change it might make sense for run-script to + // always output these banners with proc-log.output.error if we think they + // align closer with "logging" instead of "output" + level = output.KEYS.error + } + } + this.#writeOutput(level, meta, ...args) + } + break } + }) - // if it's silent, don't show progress - if (progress && !silent) { - log.enableProgress() - } else { - log.disableProgress() + #inputHandler = withMeta((level, meta, ...args) => { + switch (level) { + case input.KEYS.start: + log.pause() + this.#outputState.buffering = true + this.#progress.off() + break + + case input.KEYS.end: + log.resume() + output.flush() + this.#progress.resume() + break + + case input.KEYS.read: { + // The convention when calling input.read is to pass in a single fn that returns + // the promise to await. resolve and reject are provided by proc-log + const [res, rej, p] = args + return input.start(() => p() + .then(res) + .catch(rej) + // Any call to procLog.input.read will render a prompt to the user, so we always + // add a single newline of output to stdout to move the cursor to the next line + .finally(() => output.standard(''))) + } } + }) - // Resume displaying logs now that we have config - log.resume() - } + // OUTPUT + + #writeOutput (level, meta, ...args) { + switch (level) { + case output.KEYS.standard: + this.#write(this.#stdout, {}, ...args) + break - log (...args) { - this.#logHandler(...args) + case output.KEYS.error: + this.#write(this.#stderr, {}, ...args) + break + } } - #logHandler = (level, ...args) => { + // LOGS + + #tryWriteLog (level, meta, ...args) { try { - this.#log(level, ...args) + // Also (and this is a really inexcusable kludge), we patch the + // log.warn() method so that when we see a peerDep override + // explanation from Arborist, we can replace the object with a + // highly abbreviated explanation of what's being overridden. + // TODO: this could probably be moved to arborist now that display is refactored + const [heading, message, expl] = args + if (level === log.KEYS.warn && heading === 'ERESOLVE' && expl && typeof expl === 'object') { + this.#writeLog(level, meta, heading, message) + this.#writeLog(level, meta, '', explain(expl, this.#stderrChalk, 2)) + return + } + this.#writeLog(level, meta, ...args) } catch (ex) { try { // if it crashed once, it might again! - this.#npmlog('verbose', `attempt to log ${inspect(args)} crashed`, ex) + this.#writeLog(log.KEYS.verbose, meta, '', `attempt to log crashed`, ...args, ex) } catch (ex2) { + // This happens if the object has an inspect method that crashes so just console.error + // with the errors but don't do anything else that might error again. // eslint-disable-next-line no-console - console.error(`attempt to log ${inspect(args)} crashed`, ex, ex2) + console.error(`attempt to log crashed`, ex, ex2) } } } - #log (...args) { - return this.#eresolveWarn(...args) || this.#npmlog(...args) + #writeLog (level, meta, ...args) { + const levelOpts = LEVEL_METHODS[level] + const show = levelOpts.show ?? (({ index }) => levelOpts.index <= index) + const force = meta.force && !this.#silent + + if (force || show({ index: this.#levelIndex, timing: this.#timing })) { + // this mutates the array so we can pass args directly to format later + const title = args.shift() + const prefix = [ + this.#logColors.heading(this.#heading), + this.#logColors[level](level), + title ? this.#logColors.title(title) : null, + ] + this.#write(this.#stderr, { prefix }, ...args) + } + } +} + +class Progress { + // Taken from https://github.com/sindresorhus/cli-spinners + // MIT License + // Copyright (c) Sindre Sorhus (https://sindresorhus.com) + static dots = { duration: 80, frames: ['⠋', '⠙', '⠹', '⠸', '⠼', '⠴', '⠦', '⠧', '⠇', '⠏'] } + static lines = { duration: 130, frames: ['-', '\\', '|', '/'] } + + #stream + #spinner + #enabled = false + + #frameIndex = 0 + #lastUpdate = 0 + #interval + #timeout + + // We are rendering is enabled option is set and we are not waiting for the render timeout + get #rendering () { + return this.#enabled && !this.#timeout + } + + // We are spinning if enabled option is set and the render interval has been set + get #spinning () { + return this.#enabled && this.#interval + } + + constructor ({ stream }) { + this.#stream = stream + } + + load ({ enabled, unicode }) { + this.#enabled = enabled + this.#spinner = unicode ? Progress.dots : Progress.lines + // Dont render the spinner for short durations + this.#render(200) + } + + off () { + if (!this.#enabled) { + return + } + clearTimeout(this.#timeout) + this.#timeout = null + clearInterval(this.#interval) + this.#interval = null + this.#frameIndex = 0 + this.#lastUpdate = 0 + this.#clearSpinner() + } + + resume () { + this.#render() } - // Explicitly call these on npmlog and not log shim - // This is the final place we should call npmlog before removing it. - #npmlog (level, ...args) { - npmlog[level](...args.map(Display.clean)) + // If we are currenting rendering the spinner we clear it + // before writing our line and then re-render the spinner after. + // If not then all we need to do is write the line + write (write) { + if (this.#spinning) { + this.#clearSpinner() + } + write() + if (this.#spinning) { + this.#render() + } + } + + #render (ms) { + if (ms) { + this.#timeout = setTimeout(() => { + this.#timeout = null + this.#renderSpinner() + }, ms) + // Make sure this timeout does not keep the process open + this.#timeout.unref() + } else { + this.#renderSpinner() + } } - // Also (and this is a really inexcusable kludge), we patch the - // log.warn() method so that when we see a peerDep override - // explanation from Arborist, we can replace the object with a - // highly abbreviated explanation of what's being overridden. - #eresolveWarn (level, heading, message, expl) { - if (level === 'warn' && - heading === 'ERESOLVE' && - expl && typeof expl === 'object' - ) { - this.#npmlog(level, heading, message) - this.#npmlog(level, '', explain(expl, this.#chalk, 2)) - // Return true to short circuit other log in chain - return true + #renderSpinner () { + if (!this.#rendering) { + return } + // We always attempt to render immediately but we only request to move to the next + // frame if it has been longer than our spinner frame duration since our last update + this.#renderFrame(Date.now() - this.#lastUpdate >= this.#spinner.duration) + clearInterval(this.#interval) + this.#interval = setInterval(() => this.#renderFrame(true), this.#spinner.duration) + } + + #renderFrame (next) { + if (next) { + this.#lastUpdate = Date.now() + this.#frameIndex++ + if (this.#frameIndex >= this.#spinner.frames.length) { + this.#frameIndex = 0 + } + } + this.#clearSpinner() + this.#stream.write(this.#spinner.frames[this.#frameIndex]) + } + + #clearSpinner () { + // Move to the start of the line and clear the rest of the line + this.#stream.cursorTo(0) + this.#stream.clearLine(1) } } diff --git a/deps/npm/lib/utils/error-message.js b/deps/npm/lib/utils/error-message.js index 348bb63e2d5abd..969e56636dfe8d 100644 --- a/deps/npm/lib/utils/error-message.js +++ b/deps/npm/lib/utils/error-message.js @@ -1,19 +1,16 @@ -const { format } = require('util') -const { resolve } = require('path') +const { format } = require('node:util') +const { resolve } = require('node:path') const { redactLog: replaceInfo } = require('@npmcli/redact') -const { report } = require('./explain-eresolve.js') -const log = require('./log-shim') +const { log } = require('proc-log') const messageText = msg => msg.map(line => line.slice(1).join(' ')).join('\n') const jsonError = (er, npm, { summary, detail }) => { if (npm?.config.loaded && npm.config.get('json')) { return { - error: { - code: er.code, - summary: messageText(summary), - detail: messageText(detail), - }, + code: er.code, + summary: messageText(summary), + detail: messageText(detail), } } } @@ -32,6 +29,7 @@ const errorMessage = (er, npm) => { switch (er.code) { case 'ERESOLVE': { + const { report } = require('./explain-eresolve.js') short.push(['ERESOLVE', er.message]) detail.push(['', '']) // XXX(display): error messages are logged so we use the logColor since that is based @@ -77,9 +75,7 @@ const errorMessage = (er, npm) => { npm.config.loaded && er.dest.startsWith(npm.config.get('cache')) - const { isWindows } = require('./is-windows.js') - - if (!isWindows && (isCachePath || isCacheDest)) { + if (process.platform !== 'win32' && (isCachePath || isCacheDest)) { // user probably doesn't need this, but still add it to the debug log log.verbose(er.stack) short.push([ @@ -101,7 +97,7 @@ const errorMessage = (er, npm) => { '', [ '\nThe operation was rejected by your operating system.', - isWindows + process.platform === 'win32' /* eslint-disable-next-line max-len */ ? "It's possible that the file was already in use (by a text editor or antivirus),\n" + 'or that you lack permissions to access it.' diff --git a/deps/npm/lib/utils/explain-dep.js b/deps/npm/lib/utils/explain-dep.js index 86660d5d3ad4b0..4e9e93454e8a28 100644 --- a/deps/npm/lib/utils/explain-dep.js +++ b/deps/npm/lib/utils/explain-dep.js @@ -1,4 +1,4 @@ -const { relative } = require('path') +const { relative } = require('node:path') const explainNode = (node, depth, chalk) => printNode(node, chalk) + @@ -6,63 +6,32 @@ const explainNode = (node, depth, chalk) => explainLinksIn(node, depth, chalk) const colorType = (type, chalk) => { - const { red, yellow, cyan, magenta, blue, green, gray } = chalk - const style = type === 'extraneous' ? red - : type === 'dev' ? yellow - : type === 'optional' ? cyan - : type === 'peer' ? magenta - : type === 'bundled' ? blue - : type === 'workspace' ? green - : type === 'overridden' ? gray + const style = type === 'extraneous' ? chalk.red + : type === 'dev' ? chalk.blue + : type === 'optional' ? chalk.magenta + : type === 'peer' ? chalk.magentaBright + : type === 'bundled' ? chalk.underline.cyan + : type === 'workspace' ? chalk.blueBright + : type === 'overridden' ? chalk.dim : /* istanbul ignore next */ s => s return style(type) } const printNode = (node, chalk) => { - const { - name, - version, - location, - extraneous, - dev, - optional, - peer, - bundled, - isWorkspace, - overridden, - } = node - const { bold, dim, green } = chalk const extra = [] - if (extraneous) { - extra.push(' ' + bold(colorType('extraneous', chalk))) - } - - if (dev) { - extra.push(' ' + bold(colorType('dev', chalk))) - } - if (optional) { - extra.push(' ' + bold(colorType('optional', chalk))) - } - - if (peer) { - extra.push(' ' + bold(colorType('peer', chalk))) - } - - if (bundled) { - extra.push(' ' + bold(colorType('bundled', chalk))) - } - - if (overridden) { - extra.push(' ' + bold(colorType('overridden', chalk))) + for (const meta of ['extraneous', 'dev', 'optional', 'peer', 'bundled', 'overridden']) { + if (node[meta]) { + extra.push(` ${colorType(meta, chalk)}`) + } } - const pkgid = isWorkspace - ? green(`${name}@${version}`) - : `${bold(name)}@${bold(version)}` + const pkgid = node.isWorkspace + ? chalk.blueBright(`${node.name}@${node.version}`) + : `${node.name}@${node.version}` return `${pkgid}${extra.join('')}` + - (location ? dim(`\n${location}`) : '') + (node.location ? chalk.dim(`\n${node.location}`) : '') } const explainLinksIn = ({ linksIn }, depth, chalk) => { @@ -75,7 +44,7 @@ const explainLinksIn = ({ linksIn }, depth, chalk) => { return str.split('\n').join('\n ') } -const explainDependents = ({ name, dependents }, depth, chalk) => { +const explainDependents = ({ dependents }, depth, chalk) => { if (!dependents || !dependents.length || depth <= 0) { return '' } @@ -107,10 +76,9 @@ const explainDependents = ({ name, dependents }, depth, chalk) => { } const explainEdge = ({ name, type, bundled, from, spec, rawSpec, overridden }, depth, chalk) => { - const { bold } = chalk let dep = type === 'workspace' - ? bold(relative(from.location, spec.slice('file:'.length))) - : `${bold(name)}@"${bold(spec)}"` + ? chalk.bold(relative(from.location, spec.slice('file:'.length))) + : `${name}@"${spec}"` if (overridden) { dep = `${colorType('overridden', chalk)} ${dep} (was "${rawSpec}")` } diff --git a/deps/npm/lib/utils/format-search-stream.js b/deps/npm/lib/utils/format-search-stream.js index 046a4b1e20587b..b70bd915123da4 100644 --- a/deps/npm/lib/utils/format-search-stream.js +++ b/deps/npm/lib/utils/format-search-stream.js @@ -1,6 +1,6 @@ -const { stripVTControlCharacters } = require('node:util') +/* eslint-disable max-len */ +const { stripVTControlCharacters: strip } = require('node:util') const { Minipass } = require('minipass') -const columnify = require('columnify') // This module consumes package data in the following format: // @@ -16,14 +16,48 @@ const columnify = require('columnify') // The returned stream will format this package data // into a byte stream of formatted, displayable output. -module.exports = async (opts) => { - return opts.json ? new JSONOutputStream() : new TextOutputStream(opts) +function filter (data, exclude) { + const words = [data.name] + .concat(data.maintainers.map(m => m.username)) + .concat(data.keywords || []) + .map(f => f?.trim?.()) + .filter(Boolean) + .join(' ') + .toLowerCase() + + if (exclude.find(pattern => { + // Treats both /foo and /foo/ as regex searches + if (pattern.startsWith('/')) { + if (pattern.endsWith('/')) { + pattern = pattern.slice(0, -1) + } + return words.match(new RegExp(pattern.slice(1))) + } + return words.includes(pattern) + })) { + return false + } + + return true +} + +module.exports = (opts) => { + return opts.json ? new JSONOutputStream(opts) : new TextOutputStream(opts) } class JSONOutputStream extends Minipass { #didFirst = false + #exclude + + constructor (opts) { + super() + this.#exclude = opts.exclude + } write (obj) { + if (!filter(obj, this.#exclude)) { + return + } if (!this.#didFirst) { super.write('[\n') this.#didFirst = true @@ -41,94 +75,100 @@ class JSONOutputStream extends Minipass { } class TextOutputStream extends Minipass { - #opts - #line = 0 + #args + #chalk + #exclude + #parseable constructor (opts) { super() - this.#opts = opts + this.#args = opts.args.map(s => s.toLowerCase()).filter(Boolean) + this.#chalk = opts.npm.chalk + this.#exclude = opts.exclude + this.#parseable = opts.parseable } - write (pkg) { - return super.write(this.#prettify(pkg)) - } - - #prettify (data) { + write (data) { + if (!filter(data, this.#exclude)) { + return + } + // Normalize const pkg = { - author: data.maintainers.map((m) => `=${stripVTControlCharacters(m.username)}`).join(' '), - date: 'prehistoric', - description: stripVTControlCharacters(data.description ?? ''), - keywords: '', - name: stripVTControlCharacters(data.name), + authors: data.maintainers.map((m) => `${strip(m.username)}`).join(' '), + publisher: strip(data.publisher?.username || ''), + date: data.date ? data.date.toISOString().slice(0, 10) : 'prehistoric', + description: strip(data.description ?? ''), + keywords: [], + name: strip(data.name), version: data.version, } if (Array.isArray(data.keywords)) { - pkg.keywords = data.keywords.map((k) => stripVTControlCharacters(k)).join(' ') + pkg.keywords = data.keywords.map(strip) } else if (typeof data.keywords === 'string') { - pkg.keywords = stripVTControlCharacters(data.keywords.replace(/[,\s]+/, ' ')) - } - if (data.date) { - pkg.date = data.date.toISOString().split('T')[0] // remove time + pkg.keywords = strip(data.keywords.replace(/[,\s]+/, ' ')).split(' ') } - const columns = ['name', 'description', 'author', 'date', 'version', 'keywords'] - if (this.#opts.parseable) { - return columns.map((col) => pkg[col] && ('' + pkg[col]).replace(/\t/g, ' ')).join('\t') + let output + if (this.#parseable) { + output = [pkg.name, pkg.description, pkg.author, pkg.date, pkg.version, pkg.keywords] + .filter(Boolean) + .map(col => ('' + col).replace(/\t/g, ' ')).join('\t') + return super.write(output) } - // stdout in tap is never a tty - /* istanbul ignore next */ - const maxWidth = process.stdout.isTTY ? process.stdout.getWindowSize()[0] : Infinity - let output = columnify( - [pkg], - { - include: columns, - showHeaders: ++this.#line <= 1, - columnSplitter: ' | ', - truncate: !this.#opts.long, - config: { - name: { minWidth: 25, maxWidth: 25, truncate: false, truncateMarker: '' }, - description: { minWidth: 20, maxWidth: 20 }, - author: { minWidth: 15, maxWidth: 15 }, - date: { maxWidth: 11 }, - version: { minWidth: 8, maxWidth: 8 }, - keywords: { maxWidth: Infinity }, - }, + const keywords = pkg.keywords.map(k => { + if (this.#args.includes(k)) { + return this.#chalk.cyan(k) + } else { + return k + } + }).join(' ') + + let description = [] + for (const arg of this.#args) { + const finder = pkg.description.toLowerCase().split(arg.toLowerCase()) + let p = 0 + for (const f of finder) { + description.push(pkg.description.slice(p, p + f.length)) + const word = pkg.description.slice(p + f.length, p + f.length + arg.length) + description.push(this.#chalk.cyan(word)) + p += f.length + arg.length } - ).split('\n').map(line => line.slice(0, maxWidth)).join('\n') - - if (!this.#opts.color) { - return output } - - const colors = ['31m', '33m', '32m', '36m', '34m', '35m'] - - this.#opts.args.forEach((arg, i) => { - const markStart = String.fromCharCode(i % colors.length + 1) - const markEnd = String.fromCharCode(0) - - if (arg.charAt(0) === '/') { - output = output.replace( - new RegExp(arg.slice(1, -1), 'gi'), - bit => `${markStart}${bit}${markEnd}` - ) - } else { - // just a normal string, do the split/map thing + description = description.filter(Boolean) + let name = pkg.name + if (this.#args.includes(pkg.name)) { + name = this.#chalk.cyan(pkg.name) + } else { + name = [] + for (const arg of this.#args) { + const finder = pkg.name.toLowerCase().split(arg.toLowerCase()) let p = 0 - - output = output.toLowerCase().split(arg.toLowerCase()).map(piece => { - piece = output.slice(p, p + piece.length) - p += piece.length - const mark = `${markStart}${output.slice(p, p + arg.length)}${markEnd}` - p += arg.length - return `${piece}${mark}` - }).join('') + for (const f of finder) { + name.push(pkg.name.slice(p, p + f.length)) + const word = pkg.name.slice(p + f.length, p + f.length + arg.length) + name.push(this.#chalk.cyan(word)) + p += f.length + arg.length + } } - }) + name = this.#chalk.blue(name.join('')) + } - for (let i = 1; i <= colors.length; i++) { - output = output.split(String.fromCharCode(i)).join(`\u001B[${colors[i - 1]}`) + if (description.length) { + output = `${name}\n${description.join('')}\n` + } else { + output = `${name}\n` + } + if (pkg.publisher) { + output += `Version ${this.#chalk.blue(pkg.version)} published ${this.#chalk.blue(pkg.date)} by ${this.#chalk.blue(pkg.publisher)}\n` + } else { + output += `Version ${this.#chalk.blue(pkg.version)} published ${this.#chalk.blue(pkg.date)} by ${this.#chalk.yellow('???')}\n` + } + output += `Maintainers: ${pkg.authors}\n` + if (keywords) { + output += `Keywords: ${keywords}\n` } - return output.split('\u0000').join('\u001B[0m').trim() + output += `${this.#chalk.blue(`https://npm.im/${pkg.name}`)}\n` + return super.write(output) } } diff --git a/deps/npm/lib/utils/format.js b/deps/npm/lib/utils/format.js new file mode 100644 index 00000000000000..abfbf9e3317043 --- /dev/null +++ b/deps/npm/lib/utils/format.js @@ -0,0 +1,50 @@ +const { formatWithOptions: baseFormatWithOptions } = require('util') + +// These are most assuredly not a mistake +// https://eslint.org/docs/latest/rules/no-control-regex +// \x00 through \x1f, \x7f through \x9f, not including \x09 \x0a \x0b \x0d +/* eslint-disable-next-line no-control-regex */ +const HAS_C01 = /[\x00-\x08\x0c\x0e-\x1f\x7f-\x9f]/ + +// Allows everything up to '[38;5;255m' in 8 bit notation +const ALLOWED_SGR = /^\[[0-9;]{0,8}m/ + +// '[38;5;255m'.length +const SGR_MAX_LEN = 10 + +// Strips all ANSI C0 and C1 control characters (except for SGR up to 8 bit) +function STRIP_C01 (str) { + if (!HAS_C01.test(str)) { + return str + } + let result = '' + for (let i = 0; i < str.length; i++) { + const char = str[i] + const code = char.charCodeAt(0) + if (!HAS_C01.test(char)) { + // Most characters are in this set so continue early if we can + result = `${result}${char}` + } else if (code === 27 && ALLOWED_SGR.test(str.slice(i + 1, i + SGR_MAX_LEN + 1))) { + // \x1b with allowed SGR + result = `${result}\x1b` + } else if (code <= 31) { + // escape all other C0 control characters besides \x7f + result = `${result}^${String.fromCharCode(code + 64)}` + } else { + // hasC01 ensures this is now a C1 control character or \x7f + result = `${result}^${String.fromCharCode(code - 64)}` + } + } + return result +} + +const formatWithOptions = ({ prefix: prefixes = [], eol = '\n', ...options }, ...args) => { + const prefix = prefixes.filter(p => p != null).join(' ') + const formatted = STRIP_C01(baseFormatWithOptions(options, ...args)) + // Splitting could be changed to only `\n` once we are sure we only emit unix newlines. + // The eol param to this function will put the correct newlines in place for the returned string. + const lines = formatted.split(/\r?\n/) + return lines.reduce((acc, l) => `${acc}${prefix}${prefix && l ? ' ' : ''}${l}${eol}`, '') +} + +module.exports = { formatWithOptions } diff --git a/deps/npm/lib/workspaces/get-workspaces.js b/deps/npm/lib/utils/get-workspaces.js similarity index 100% rename from deps/npm/lib/workspaces/get-workspaces.js rename to deps/npm/lib/utils/get-workspaces.js diff --git a/deps/npm/lib/utils/completion/installed-deep.js b/deps/npm/lib/utils/installed-deep.js similarity index 100% rename from deps/npm/lib/utils/completion/installed-deep.js rename to deps/npm/lib/utils/installed-deep.js diff --git a/deps/npm/lib/utils/completion/installed-shallow.js b/deps/npm/lib/utils/installed-shallow.js similarity index 100% rename from deps/npm/lib/utils/completion/installed-shallow.js rename to deps/npm/lib/utils/installed-shallow.js diff --git a/deps/npm/lib/utils/is-windows.js b/deps/npm/lib/utils/is-windows.js index 57f6599b6ae192..63c5671d8400ee 100644 --- a/deps/npm/lib/utils/is-windows.js +++ b/deps/npm/lib/utils/is-windows.js @@ -1,6 +1,4 @@ -const isWindows = process.platform === 'win32' -const isWindowsShell = isWindows && +const isWindowsShell = (process.platform === 'win32') && !/^MINGW(32|64)$/.test(process.env.MSYSTEM) && process.env.TERM !== 'cygwin' -exports.isWindows = isWindows exports.isWindowsShell = isWindowsShell diff --git a/deps/npm/lib/utils/log-file.js b/deps/npm/lib/utils/log-file.js index 1a46b7da0d6604..09e3873f2dce6c 100644 --- a/deps/npm/lib/utils/log-file.js +++ b/deps/npm/lib/utils/log-file.js @@ -1,18 +1,16 @@ const os = require('os') const { join, dirname, basename } = require('path') -const { format } = require('util') -const { Minipass } = require('minipass') const fsMiniPass = require('fs-minipass') const fs = require('fs/promises') -const log = require('./log-shim') -const Display = require('./display') +const { log } = require('proc-log') +const { formatWithOptions } = require('./format') const padZero = (n, length) => n.toString().padStart(length.toString().length, '0') class LogFiles { - // Default to a plain minipass stream so we can buffer + // Default to an array so we can buffer // initial writes before we know the cache location - #logStream = null + #logStream = [] // We cap log files at a certain number of log events per file. // Note that each log event can write more than one line to the @@ -30,6 +28,7 @@ class LogFiles { #path = null #logsMax = null #files = [] + #timing = false constructor ({ maxLogsPerFile = 50_000, @@ -40,23 +39,7 @@ class LogFiles { this.on() } - static format (count, level, title, ...args) { - let prefix = `${count} ${level}` - if (title) { - prefix += ` ${title}` - } - - return format(...args) - .split(/\r?\n/) - .map(Display.clean) - .reduce((lines, line) => - lines += prefix + (line ? ' ' : '') + line + os.EOL, - '' - ) - } - on () { - this.#logStream = new Minipass() process.on('log', this.#logHandler) } @@ -65,11 +48,12 @@ class LogFiles { this.#endStream() } - load ({ path, logsMax = Infinity } = {}) { + load ({ path, logsMax = Infinity, timing } = {}) { // dir is user configurable and is required to exist so // this can error if the dir is missing or not configured correctly this.#path = path this.#logsMax = logsMax + this.#timing = timing // Log stream has already ended if (!this.#logStream) { @@ -78,36 +62,40 @@ class LogFiles { log.verbose('logfile', `logs-max:${logsMax} dir:${this.#path}`) - // Pipe our initial stream to our new file stream and + // Write the contents of our array buffer to our new file stream and // set that as the new log logstream for future writes // if logs max is 0 then the user does not want a log file if (this.#logsMax > 0) { const initialFile = this.#openLogFile() if (initialFile) { - this.#logStream = this.#logStream.pipe(initialFile) + for (const item of this.#logStream) { + const formatted = this.#formatLogItem(...item) + if (formatted !== null) { + initialFile.write(formatted) + } + } + this.#logStream = initialFile } } + log.verbose('logfile', this.files[0] || 'no logfile created') + // Kickoff cleaning process, even if we aren't writing a logfile. // This is async but it will always ignore the current logfile // Return the result so it can be awaited in tests return this.#cleanLogs() } - log (...args) { - this.#logHandler(...args) - } - get files () { return this.#files } get #isBuffered () { - return this.#logStream instanceof Minipass + return Array.isArray(this.#logStream) } #endStream (output) { - if (this.#logStream) { + if (this.#logStream && !this.#isBuffered) { this.#logStream.end(output) this.#logStream = null } @@ -125,12 +113,15 @@ class LogFiles { return } - const logOutput = this.#formatLogItem(level, ...args) - if (this.#isBuffered) { // Cant do anything but buffer the output if we dont // have a file stream yet - this.#logStream.write(logOutput) + this.#logStream.push([level, ...args]) + return + } + + const logOutput = this.#formatLogItem(level, ...args) + if (logOutput === null) { return } @@ -150,9 +141,15 @@ class LogFiles { } } - #formatLogItem (...args) { + #formatLogItem (level, title, ...args) { + // Only right timing logs to logfile if explicitly requests + if (level === log.KEYS.timing && !this.#timing) { + return null + } + this.#fileLogCount += 1 - return LogFiles.format(this.#totalLogCount++, ...args) + const prefix = [this.#totalLogCount++, level, title || null] + return formatWithOptions({ prefix, eol: os.EOL, colors: false }, ...args) } #getLogFilePath (count = '') { @@ -249,7 +246,7 @@ class LogFiles { } catch (e) { // Disable cleanup failure warnings when log writing is disabled if (this.#logsMax > 0) { - log.warn('logfile', 'error cleaning log files', e) + log.verbose('logfile', 'error cleaning log files', e) } } finally { log.silly('logfile', 'done cleaning log files') diff --git a/deps/npm/lib/utils/log-shim.js b/deps/npm/lib/utils/log-shim.js deleted file mode 100644 index 9d5a36d967413f..00000000000000 --- a/deps/npm/lib/utils/log-shim.js +++ /dev/null @@ -1,59 +0,0 @@ -const NPMLOG = require('npmlog') -const PROCLOG = require('proc-log') - -// Sets getter and optionally a setter -// otherwise setting should throw -const accessors = (obj, set) => (k) => ({ - get: () => obj[k], - set: set ? (v) => (obj[k] = v) : () => { - throw new Error(`Cant set ${k}`) - }, -}) - -// Set the value to a bound function on the object -const value = (obj) => (k) => ({ - value: (...args) => obj[k].apply(obj, args), -}) - -const properties = { - // npmlog getters/setters - level: accessors(NPMLOG, true), - heading: accessors(NPMLOG, true), - levels: accessors(NPMLOG), - gauge: accessors(NPMLOG), - stream: accessors(NPMLOG), - tracker: accessors(NPMLOG), - progressEnabled: accessors(NPMLOG), - // npmlog methods - useColor: value(NPMLOG), - enableColor: value(NPMLOG), - disableColor: value(NPMLOG), - enableUnicode: value(NPMLOG), - disableUnicode: value(NPMLOG), - enableProgress: value(NPMLOG), - disableProgress: value(NPMLOG), - clearProgress: value(NPMLOG), - showProgress: value(NPMLOG), - newItem: value(NPMLOG), - newGroup: value(NPMLOG), - // proclog methods - notice: value(PROCLOG), - error: value(PROCLOG), - warn: value(PROCLOG), - info: value(PROCLOG), - verbose: value(PROCLOG), - http: value(PROCLOG), - silly: value(PROCLOG), - pause: value(PROCLOG), - resume: value(PROCLOG), -} - -const descriptors = Object.entries(properties).reduce((acc, [k, v]) => { - acc[k] = { enumerable: true, ...v(k) } - return acc -}, {}) - -// Create an object with the allowed properties rom npm log and all -// the logging methods from proc log -// XXX: this should go away and requires of this should be replaced with proc-log + new display -module.exports = Object.freeze(Object.defineProperties({}, descriptors)) diff --git a/deps/npm/lib/utils/open-url-prompt.js b/deps/npm/lib/utils/open-url-prompt.js index 71a68c253c0505..6f4d453a959d59 100644 --- a/deps/npm/lib/utils/open-url-prompt.js +++ b/deps/npm/lib/utils/open-url-prompt.js @@ -1,4 +1,5 @@ const readline = require('readline') +const { input, output } = require('proc-log') const open = require('./open-url.js') function print (npm, title, url) { @@ -6,7 +7,7 @@ function print (npm, title, url) { const message = json ? JSON.stringify({ title, url }) : `${title}:\n${url}` - npm.output(message) + output.standard(message) } // Prompt to open URL in browser if possible @@ -33,7 +34,7 @@ const promptOpen = async (npm, url, title, prompt, emitter) => { output: process.stdout, }) - const tryOpen = await new Promise(resolve => { + const tryOpen = await input.read(() => new Promise(resolve => { rl.on('SIGINT', () => { rl.close() resolve('SIGINT') @@ -46,14 +47,10 @@ const promptOpen = async (npm, url, title, prompt, emitter) => { if (emitter && emitter.addListener) { emitter.addListener('abort', () => { rl.close() - - // clear the prompt line - npm.output('') - resolve(false) }) } - }) + })) if (tryOpen === 'SIGINT') { throw new Error('canceled') diff --git a/deps/npm/lib/utils/open-url.js b/deps/npm/lib/utils/open-url.js index 77bb1d03d8e165..46b7abc731fa1d 100644 --- a/deps/npm/lib/utils/open-url.js +++ b/deps/npm/lib/utils/open-url.js @@ -1,4 +1,5 @@ const promiseSpawn = require('@npmcli/promise-spawn') +const { output } = require('proc-log') const { URL } = require('url') @@ -16,7 +17,7 @@ const open = async (npm, url, errMsg, isFile) => { }, null, 2) : `${errMsg}:\n ${url}\n` - npm.output(alternateMsg) + output.standard(alternateMsg) } if (browser === false) { diff --git a/deps/npm/lib/utils/otplease.js b/deps/npm/lib/utils/otplease.js index b4aa1674692551..b8dd0b66ed7664 100644 --- a/deps/npm/lib/utils/otplease.js +++ b/deps/npm/lib/utils/otplease.js @@ -1,4 +1,3 @@ -const log = require('./log-shim') async function otplease (npm, opts, fn) { try { return await fn(opts) @@ -8,7 +7,6 @@ async function otplease (npm, opts, fn) { } if (isWebOTP(err)) { - log.disableProgress() const webAuth = require('./web-auth') const openUrlPrompt = require('./open-url-prompt') diff --git a/deps/npm/lib/utils/pulse-till-done.js b/deps/npm/lib/utils/pulse-till-done.js deleted file mode 100644 index 22294141474839..00000000000000 --- a/deps/npm/lib/utils/pulse-till-done.js +++ /dev/null @@ -1,26 +0,0 @@ -const log = require('./log-shim.js') - -let pulseTimer = null -const withPromise = async (promise) => { - pulseStart() - try { - return await promise - } finally { - pulseStop() - } -} - -const pulseStart = () => { - pulseTimer = pulseTimer || setInterval(() => { - log.gauge.pulse('') - }, 150) -} - -const pulseStop = () => { - clearInterval(pulseTimer) - pulseTimer = null -} - -module.exports = { - withPromise, -} diff --git a/deps/npm/lib/utils/read-user-info.js b/deps/npm/lib/utils/read-user-info.js index fa1cea158e8974..4e8def4bdf1def 100644 --- a/deps/npm/lib/utils/read-user-info.js +++ b/deps/npm/lib/utils/read-user-info.js @@ -1,6 +1,6 @@ -const { read } = require('read') +const { read: _read } = require('read') const userValidate = require('npm-user-validate') -const log = require('./log-shim.js') +const { log, input } = require('proc-log') exports.otp = readOTP exports.password = readPassword @@ -16,17 +16,14 @@ const passwordPrompt = 'npm password: ' const usernamePrompt = 'npm username: ' const emailPrompt = 'email (this IS public): ' -function readWithProgress (opts) { - log.clearProgress() - return read(opts).finally(() => log.showProgress()) -} +const read = (...args) => input.read(() => _read(...args)) function readOTP (msg = otpPrompt, otp, isRetry) { if (isRetry && otp && /^[\d ]+$|^[A-Fa-f0-9]{64,64}$/.test(otp)) { return otp.replace(/\s+/g, '') } - return readWithProgress({ prompt: msg, default: otp || '' }) + return read({ prompt: msg, default: otp || '' }) .then((rOtp) => readOTP(msg, rOtp, true)) } @@ -35,7 +32,7 @@ function readPassword (msg = passwordPrompt, password, isRetry) { return password } - return readWithProgress({ prompt: msg, silent: true, default: password || '' }) + return read({ prompt: msg, silent: true, default: password || '' }) .then((rPassword) => readPassword(msg, rPassword, true)) } @@ -49,7 +46,7 @@ function readUsername (msg = usernamePrompt, username, isRetry) { } } - return readWithProgress({ prompt: msg, default: username || '' }) + return read({ prompt: msg, default: username || '' }) .then((rUsername) => readUsername(msg, rUsername, true)) } @@ -63,6 +60,6 @@ function readEmail (msg = emailPrompt, email, isRetry) { } } - return readWithProgress({ prompt: msg, default: email || '' }) + return read({ prompt: msg, default: email || '' }) .then((username) => readEmail(msg, username, true)) } diff --git a/deps/npm/lib/utils/reify-finish.js b/deps/npm/lib/utils/reify-finish.js index 9b43abcb7610a1..0b34a375768606 100644 --- a/deps/npm/lib/utils/reify-finish.js +++ b/deps/npm/lib/utils/reify-finish.js @@ -1,6 +1,6 @@ const reifyOutput = require('./reify-output.js') const ini = require('ini') -const { writeFile } = require('fs').promises +const { writeFile } = require('fs/promises') const { resolve } = require('path') const reifyFinish = async (npm, arb) => { diff --git a/deps/npm/lib/utils/reify-output.js b/deps/npm/lib/utils/reify-output.js index 44c913812a8efe..a858a546c4010c 100644 --- a/deps/npm/lib/utils/reify-output.js +++ b/deps/npm/lib/utils/reify-output.js @@ -9,13 +9,12 @@ // found 37 vulnerabilities (5 low, 7 moderate, 25 high) // run `npm audit fix` to fix them, or `npm audit` for details -const log = require('./log-shim.js') +const { log, output } = require('proc-log') const { depth } = require('treeverse') const ms = require('ms') const npmAuditReport = require('npm-audit-report') const { readTree: getFundingInfo } = require('libnpmfund') const auditError = require('./audit-error.js') -const Table = require('cli-table3') // TODO: output JSON if flatOptions.json is true const reifyOutput = (npm, arb) => { @@ -42,51 +41,31 @@ const reifyOutput = (npm, arb) => { } if (diff) { - let diffTable - if (npm.config.get('dry-run') || npm.config.get('long')) { - diffTable = new Table({ - chars: { - top: '', - 'top-mid': '', - 'top-left': '', - 'top-right': '', - bottom: '', - 'bottom-mid': '', - 'bottom-left': '', - 'bottom-right': '', - left: '', - 'left-mid': '', - mid: '', - 'mid-mid': '', - right: '', - 'right-mid': '', - middle: ' ', - }, - style: { - 'padding-left': 0, - 'padding-right': 0, - border: 0, - }, - }) - } + const showDiff = npm.config.get('dry-run') || npm.config.get('long') + const chalk = npm.chalk depth({ tree: diff, visit: d => { switch (d.action) { case 'REMOVE': - diffTable?.push(['remove', d.actual.name, d.actual.package.version]) + if (showDiff) { + /* eslint-disable-next-line max-len */ + output.standard(`${chalk.blue('remove')} ${d.actual.name} ${d.actual.package.version}`) + } summary.removed++ break case 'ADD': - diffTable?.push(['add', d.ideal.name, d.ideal.package.version]) + if (showDiff) { + output.standard(`${chalk.green('add')} ${d.ideal.name} ${d.ideal.package.version}`) + } actualTree.inventory.has(d.ideal) && summary.added++ break case 'CHANGE': - diffTable?.push(['change', - d.actual.name, - d.actual.package.version + ' -> ' + d.ideal.package.version, - ]) + if (showDiff) { + /* eslint-disable-next-line max-len */ + output.standard(`${chalk.cyan('change')} ${d.actual.name} ${d.actual.package.version} => ${d.ideal.package.version}`) + } summary.changed++ break default: @@ -97,10 +76,6 @@ const reifyOutput = (npm, arb) => { }, getChildren: d => d.children, }) - - if (diffTable) { - npm.output('\n' + diffTable.toString()) - } } if (npm.flatOptions.fund) { @@ -115,7 +90,7 @@ const reifyOutput = (npm, arb) => { summary.audit = npm.command === 'audit' ? auditReport : auditReport.toJSON().metadata } - npm.output(JSON.stringify(summary, null, 2)) + output.standard(JSON.stringify(summary, null, 2)) } else { packagesChangedMessage(npm, summary) packagesFundingMessage(npm, summary) @@ -134,7 +109,7 @@ const printAuditReport = (npm, report) => { if (!res || !res.report) { return } - npm.output(`\n${res.report}`) + output.standard(`\n${res.report}`) } const getAuditReport = (npm, report) => { @@ -206,7 +181,7 @@ const packagesChangedMessage = (npm, { added, removed, changed, audited }) => { } msg.push(` in ${ms(Date.now() - npm.started)}`) - npm.output(msg.join('')) + output.standard(msg.join('')) } const packagesFundingMessage = (npm, { funding }) => { @@ -214,11 +189,11 @@ const packagesFundingMessage = (npm, { funding }) => { return } - npm.output('') + output.standard('') const pkg = funding === 1 ? 'package' : 'packages' const is = funding === 1 ? 'is' : 'are' - npm.output(`${funding} ${pkg} ${is} looking for funding`) - npm.output(' run `npm fund` for details') + output.standard(`${funding} ${pkg} ${is} looking for funding`) + output.standard(' run `npm fund` for details') } module.exports = reifyOutput diff --git a/deps/npm/lib/utils/tar.js b/deps/npm/lib/utils/tar.js index c25fe71614a60b..9085d9dd350165 100644 --- a/deps/npm/lib/utils/tar.js +++ b/deps/npm/lib/utils/tar.js @@ -1,8 +1,7 @@ const tar = require('tar') const ssri = require('ssri') -const log = require('./log-shim') +const { log } = require('proc-log') const formatBytes = require('./format-bytes.js') -const columnify = require('columnify') const localeCompare = require('@isaacs/string-locale-compare')('en', { sensitivity: 'case', numeric: true, @@ -12,60 +11,36 @@ const logTar = (tarball, opts = {}) => { const { unicode = false } = opts log.notice('') log.notice('', `${unicode ? '📦 ' : 'package:'} ${tarball.name}@${tarball.version}`) - log.notice('=== Tarball Contents ===') + log.notice('Tarball Contents') if (tarball.files.length) { log.notice( '', - columnify( - tarball.files - .map(f => { - const bytes = formatBytes(f.size, false) - return /^node_modules\//.test(f.path) ? null : { path: f.path, size: `${bytes}` } - }) - .filter(f => f), - { - include: ['size', 'path'], - showHeaders: false, - } - ) + tarball.files.map(f => + /^node_modules\//.test(f.path) ? null : `${formatBytes(f.size, false)} ${f.path}` + ).filter(f => f).join('\n') ) } if (tarball.bundled.length) { - log.notice('=== Bundled Dependencies ===') + log.notice('Bundled Dependencies') tarball.bundled.forEach(name => log.notice('', name)) } - log.notice('=== Tarball Details ===') - log.notice( - '', - columnify( - [ - { name: 'name:', value: tarball.name }, - { name: 'version:', value: tarball.version }, - tarball.filename && { name: 'filename:', value: tarball.filename }, - { name: 'package size:', value: formatBytes(tarball.size) }, - { name: 'unpacked size:', value: formatBytes(tarball.unpackedSize) }, - { name: 'shasum:', value: tarball.shasum }, - { - name: 'integrity:', - value: - tarball.integrity.toString().slice(0, 20) + - '[...]' + - tarball.integrity.toString().slice(80), - }, - tarball.bundled.length && { name: 'bundled deps:', value: tarball.bundled.length }, - tarball.bundled.length && { - name: 'bundled files:', - value: tarball.entryCount - tarball.files.length, - }, - tarball.bundled.length && { name: 'own files:', value: tarball.files.length }, - { name: 'total files:', value: tarball.entryCount }, - ].filter(x => x), - { - include: ['name', 'value'], - showHeaders: false, - } - ) - ) + log.notice('Tarball Details') + log.notice('', `name: ${tarball.name}`) + log.notice('', `version: ${tarball.version}`) + if (tarball.filename) { + log.notice('', `filename: ${tarball.filename}`) + } + log.notice('', `package size: ${formatBytes(tarball.size)}`) + log.notice('', `unpacked size: ${formatBytes(tarball.unpackedSize)}`) + log.notice('', `shasum: ${tarball.shasum}`) + /* eslint-disable-next-line max-len */ + log.notice('', `integrity: ${tarball.integrity.toString().slice(0, 20)}[...]${tarball.integrity.toString().slice(80)}`) + if (tarball.bundled.length) { + log.notice('', `bundled deps: ${tarball.bundled.length}`) + log.notice('', `bundled files: ${tarball.entryCount - tarball.files.length}`) + log.notice('', `own files: ${tarball.files.length}`) + } + log.notice('', `total files: ${tarball.entryCount}`) log.notice('', '') } diff --git a/deps/npm/lib/utils/timers.js b/deps/npm/lib/utils/timers.js index c215fe926afb59..16a255961fee3b 100644 --- a/deps/npm/lib/utils/timers.js +++ b/deps/npm/lib/utils/timers.js @@ -1,114 +1,87 @@ -const EE = require('events') -const fs = require('fs') -const log = require('./log-shim') +const EE = require('node:events') +const fs = require('node:fs') +const { log, time } = require('proc-log') + +const INITIAL_TIMER = 'npm' -// This is an event emiiter but on/off -// only listen on a single internal event that gets -// emitted whenever a timer ends class Timers extends EE { - file = null + #file + #timing #unfinished = new Map() #finished = {} - #onTimeEnd = Symbol('onTimeEnd') - #initialListener = null - #initialTimer = null - constructor ({ listener = null, start = 'npm' } = {}) { + constructor () { super() - this.#initialListener = listener - this.#initialTimer = start - this.#init() - } - - get unfinished () { - return this.#unfinished - } - - get finished () { - return this.#finished - } - - #init () { this.on() - if (this.#initialListener) { - this.on(this.#initialListener) - } - process.emit('time', this.#initialTimer) - this.started = this.#unfinished.get(this.#initialTimer) + time.start(INITIAL_TIMER) + this.started = this.#unfinished.get(INITIAL_TIMER) } - on (listener) { - if (listener) { - super.on(this.#onTimeEnd, listener) - } else { - process.on('time', this.#timeListener) - process.on('timeEnd', this.#timeEndListener) - } + on () { + process.on('time', this.#timeHandler) } - off (listener) { - if (listener) { - super.off(this.#onTimeEnd, listener) - } else { - this.removeAllListeners(this.#onTimeEnd) - process.off('time', this.#timeListener) - process.off('timeEnd', this.#timeEndListener) - } + off () { + process.off('time', this.#timeHandler) } - time (name, fn) { - process.emit('time', name) - const end = () => process.emit('timeEnd', name) - if (typeof fn === 'function') { - const res = fn() - return res && res.finally ? res.finally(end) : (end(), res) - } - return end + load ({ path, timing } = {}) { + this.#timing = timing + this.#file = `${path}timing.json` } - load ({ path } = {}) { - if (path) { - this.file = `${path}timing.json` + finish (metadata) { + time.end(INITIAL_TIMER) + + for (const [name, timer] of this.#unfinished) { + log.silly('unfinished npm timer', name, timer) } - } - writeFile (metadata) { - if (!this.file) { + if (!this.#timing) { + // Not in timing mode, nothing else to do here return } try { - const globalStart = this.started - const globalEnd = this.#finished.npm || Date.now() - const content = { - metadata, - timers: this.#finished, - // add any unfinished timers with their relative start/end - unfinishedTimers: [...this.#unfinished.entries()].reduce((acc, [name, start]) => { - acc[name] = [start - globalStart, globalEnd - globalStart] - return acc - }, {}), - } - fs.writeFileSync(this.file, JSON.stringify(content) + '\n') + this.#writeFile(metadata) + log.info('timing', `Timing info written to: ${this.#file}`) } catch (e) { - this.file = null log.warn('timing', `could not write timing file: ${e}`) } } - #timeListener = (name) => { - this.#unfinished.set(name, Date.now()) + #writeFile (metadata) { + const globalStart = this.started + const globalEnd = this.#finished[INITIAL_TIMER] + const content = { + metadata, + timers: this.#finished, + // add any unfinished timers with their relative start/end + unfinishedTimers: [...this.#unfinished.entries()].reduce((acc, [name, start]) => { + acc[name] = [start - globalStart, globalEnd - globalStart] + return acc + }, {}), + } + fs.writeFileSync(this.#file, JSON.stringify(content) + '\n') } - #timeEndListener = (name) => { - if (this.#unfinished.has(name)) { - const ms = Date.now() - this.#unfinished.get(name) - this.#finished[name] = ms - this.#unfinished.delete(name) - this.emit(this.#onTimeEnd, name, ms) - } else { - log.silly('timing', "Tried to end timer that doesn't exist:", name) + #timeHandler = (level, name) => { + const now = Date.now() + switch (level) { + case time.KEYS.start: + this.#unfinished.set(name, now) + break + case time.KEYS.end: { + if (this.#unfinished.has(name)) { + const ms = now - this.#unfinished.get(name) + this.#finished[name] = ms + this.#unfinished.delete(name) + log.timing(name, `Completed in ${ms}ms`) + } else { + log.silly('timing', `Tried to end timer that doesn't exist: ${name}`) + } + } } } } diff --git a/deps/npm/lib/workspaces/update-workspaces.js b/deps/npm/lib/utils/update-workspaces.js similarity index 100% rename from deps/npm/lib/workspaces/update-workspaces.js rename to deps/npm/lib/utils/update-workspaces.js diff --git a/deps/npm/lib/utils/verify-signatures.js b/deps/npm/lib/utils/verify-signatures.js new file mode 100644 index 00000000000000..f2973316c9b767 --- /dev/null +++ b/deps/npm/lib/utils/verify-signatures.js @@ -0,0 +1,389 @@ +const fetch = require('npm-registry-fetch') +const localeCompare = require('@isaacs/string-locale-compare')('en') +const npa = require('npm-package-arg') +const pacote = require('pacote') +const pMap = require('p-map') +const tufClient = require('@sigstore/tuf') +const { log, output } = require('proc-log') + +const sortAlphabetically = (a, b) => localeCompare(a.name, b.name) + +class VerifySignatures { + constructor (tree, filterSet, npm, opts) { + this.tree = tree + this.filterSet = filterSet + this.npm = npm + this.opts = opts + this.keys = new Map() + this.invalid = [] + this.missing = [] + this.checkedPackages = new Set() + this.auditedWithKeysCount = 0 + this.verifiedSignatureCount = 0 + this.verifiedAttestationCount = 0 + this.exitCode = 0 + } + + async run () { + const start = process.hrtime.bigint() + + // Find all deps in tree + const { edges, registries } = this.getEdgesOut(this.tree.inventory.values(), this.filterSet) + if (edges.size === 0) { + throw new Error('found no installed dependencies to audit') + } + + const tuf = await tufClient.initTUF({ + cachePath: this.opts.tufCache, + retry: this.opts.retry, + timeout: this.opts.timeout, + }) + await Promise.all([...registries].map(registry => this.setKeys({ registry, tuf }))) + + log.verbose('verifying registry signatures') + await pMap(edges, (e) => this.getVerifiedInfo(e), { concurrency: 20, stopOnError: true }) + + // Didn't find any dependencies that could be verified, e.g. only local + // deps, missing version, not on a registry etc. + if (!this.auditedWithKeysCount) { + throw new Error('found no dependencies to audit that were installed from ' + + 'a supported registry') + } + + const invalid = this.invalid.sort(sortAlphabetically) + const missing = this.missing.sort(sortAlphabetically) + + const hasNoInvalidOrMissing = invalid.length === 0 && missing.length === 0 + + if (!hasNoInvalidOrMissing) { + process.exitCode = 1 + } + + if (this.npm.config.get('json')) { + output.standard(JSON.stringify({ + invalid, + missing, + }, null, 2)) + return + } + const end = process.hrtime.bigint() + const elapsed = end - start + + const auditedPlural = this.auditedWithKeysCount > 1 ? 's' : '' + const timing = `audited ${this.auditedWithKeysCount} package${auditedPlural} in ` + + `${Math.floor(Number(elapsed) / 1e9)}s` + output.standard(timing) + output.standard('') + + const verifiedBold = this.npm.chalk.bold('verified') + if (this.verifiedSignatureCount) { + if (this.verifiedSignatureCount === 1) { + /* eslint-disable-next-line max-len */ + output.standard(`${this.verifiedSignatureCount} package has a ${verifiedBold} registry signature`) + } else { + /* eslint-disable-next-line max-len */ + output.standard(`${this.verifiedSignatureCount} packages have ${verifiedBold} registry signatures`) + } + output.standard('') + } + + if (this.verifiedAttestationCount) { + if (this.verifiedAttestationCount === 1) { + /* eslint-disable-next-line max-len */ + output.standard(`${this.verifiedAttestationCount} package has a ${verifiedBold} attestation`) + } else { + /* eslint-disable-next-line max-len */ + output.standard(`${this.verifiedAttestationCount} packages have ${verifiedBold} attestations`) + } + output.standard('') + } + + if (missing.length) { + const missingClr = this.npm.chalk.redBright('missing') + if (missing.length === 1) { + /* eslint-disable-next-line max-len */ + output.standard(`1 package has a ${missingClr} registry signature but the registry is providing signing keys:`) + } else { + /* eslint-disable-next-line max-len */ + output.standard(`${missing.length} packages have ${missingClr} registry signatures but the registry is providing signing keys:`) + } + output.standard('') + missing.map(m => + output.standard(`${this.npm.chalk.red(`${m.name}@${m.version}`)} (${m.registry})`) + ) + } + + if (invalid.length) { + if (missing.length) { + output.standard('') + } + const invalidClr = this.npm.chalk.redBright('invalid') + // We can have either invalid signatures or invalid provenance + const invalidSignatures = this.invalid.filter(i => i.code === 'EINTEGRITYSIGNATURE') + if (invalidSignatures.length) { + if (invalidSignatures.length === 1) { + output.standard(`1 package has an ${invalidClr} registry signature:`) + } else { + /* eslint-disable-next-line max-len */ + output.standard(`${invalidSignatures.length} packages have ${invalidClr} registry signatures:`) + } + output.standard('') + invalidSignatures.map(i => + output.standard(`${this.npm.chalk.red(`${i.name}@${i.version}`)} (${i.registry})`) + ) + output.standard('') + } + + const invalidAttestations = this.invalid.filter(i => i.code === 'EATTESTATIONVERIFY') + if (invalidAttestations.length) { + if (invalidAttestations.length === 1) { + output.standard(`1 package has an ${invalidClr} attestation:`) + } else { + /* eslint-disable-next-line max-len */ + output.standard(`${invalidAttestations.length} packages have ${invalidClr} attestations:`) + } + output.standard('') + invalidAttestations.map(i => + output.standard(`${this.npm.chalk.red(`${i.name}@${i.version}`)} (${i.registry})`) + ) + output.standard('') + } + + if (invalid.length === 1) { + /* eslint-disable-next-line max-len */ + output.standard(`Someone might have tampered with this package since it was published on the registry!`) + } else { + /* eslint-disable-next-line max-len */ + output.standard(`Someone might have tampered with these packages since they were published on the registry!`) + } + output.standard('') + } + } + + getEdgesOut (nodes, filterSet) { + const edges = new Set() + const registries = new Set() + for (const node of nodes) { + for (const edge of node.edgesOut.values()) { + const filteredOut = + edge.from + && filterSet + && filterSet.size > 0 + && !filterSet.has(edge.from.target) + + if (!filteredOut) { + const spec = this.getEdgeSpec(edge) + if (spec) { + // Prefetch and cache public keys from used registries + registries.add(this.getSpecRegistry(spec)) + } + edges.add(edge) + } + } + } + return { edges, registries } + } + + async setKeys ({ registry, tuf }) { + const { host, pathname } = new URL(registry) + // Strip any trailing slashes from pathname + const regKey = `${host}${pathname.replace(/\/$/, '')}/keys.json` + let keys = await tuf.getTarget(regKey) + .then((target) => JSON.parse(target)) + .then(({ keys: ks }) => ks.map((key) => ({ + ...key, + keyid: key.keyId, + pemkey: `-----BEGIN PUBLIC KEY-----\n${key.publicKey.rawBytes}\n-----END PUBLIC KEY-----`, + expires: key.publicKey.validFor.end || null, + }))).catch(err => { + if (err.code === 'TUF_FIND_TARGET_ERROR') { + return null + } else { + throw err + } + }) + + // If keys not found in Sigstore TUF repo, fallback to registry keys API + if (!keys) { + keys = await fetch.json('/-/npm/v1/keys', { + ...this.npm.flatOptions, + registry, + }).then(({ keys: ks }) => ks.map((key) => ({ + ...key, + pemkey: `-----BEGIN PUBLIC KEY-----\n${key.key}\n-----END PUBLIC KEY-----`, + }))).catch(err => { + if (err.code === 'E404' || err.code === 'E400') { + return null + } else { + throw err + } + }) + } + + if (keys) { + this.keys.set(registry, keys) + } + } + + getEdgeType (edge) { + return edge.optional ? 'optionalDependencies' + : edge.peer ? 'peerDependencies' + : edge.dev ? 'devDependencies' + : 'dependencies' + } + + getEdgeSpec (edge) { + let name = edge.name + try { + name = npa(edge.spec).subSpec.name + } catch { + // leave it as edge.name + } + try { + return npa(`${name}@${edge.spec}`) + } catch { + // Skip packages with invalid spec + } + } + + buildRegistryConfig (registry) { + const keys = this.keys.get(registry) || [] + const parsedRegistry = new URL(registry) + const regKey = `//${parsedRegistry.host}${parsedRegistry.pathname}` + return { + [`${regKey}:_keys`]: keys, + } + } + + getSpecRegistry (spec) { + return fetch.pickRegistry(spec, this.npm.flatOptions) + } + + getValidPackageInfo (edge) { + const type = this.getEdgeType(edge) + // Skip potentially optional packages that are not on disk, as these could + // be omitted during install + if (edge.error === 'MISSING' && type !== 'dependencies') { + return + } + + const spec = this.getEdgeSpec(edge) + // Skip invalid version requirements + if (!spec) { + return + } + const node = edge.to || edge + const { version } = node.package || {} + + if (node.isWorkspace || // Skip local workspaces packages + !version || // Skip packages that don't have a installed version, e.g. optonal dependencies + !spec.registry) { // Skip if not from registry, e.g. git package + return + } + + for (const omitType of this.npm.config.get('omit')) { + if (node[omitType]) { + return + } + } + + return { + name: spec.name, + version, + type, + location: node.location, + registry: this.getSpecRegistry(spec), + } + } + + async verifySignatures (name, version, registry) { + const { + _integrity: integrity, + _signatures, + _attestations, + _resolved: resolved, + } = await pacote.manifest(`${name}@${version}`, { + verifySignatures: true, + verifyAttestations: true, + ...this.buildRegistryConfig(registry), + ...this.npm.flatOptions, + }) + const signatures = _signatures || [] + const result = { + integrity, + signatures, + attestations: _attestations, + resolved, + } + return result + } + + async getVerifiedInfo (edge) { + const info = this.getValidPackageInfo(edge) + if (!info) { + return + } + const { name, version, location, registry, type } = info + if (this.checkedPackages.has(location)) { + // we already did or are doing this one + return + } + this.checkedPackages.add(location) + + // We only "audit" or verify the signature, or the presence of it, on + // packages whose registry returns signing keys + const keys = this.keys.get(registry) || [] + if (keys.length) { + this.auditedWithKeysCount += 1 + } + + try { + const { integrity, signatures, attestations, resolved } = await this.verifySignatures( + name, version, registry + ) + + // Currently we only care about missing signatures on registries that provide a public key + // We could make this configurable in the future with a strict/paranoid mode + if (signatures.length) { + this.verifiedSignatureCount += 1 + } else if (keys.length) { + this.missing.push({ + integrity, + location, + name, + registry, + resolved, + version, + }) + } + + // Track verified attestations separately to registry signatures, as all + // packages on registries with signing keys are expected to have registry + // signatures, but not all packages have provenance and publish attestations. + if (attestations) { + this.verifiedAttestationCount += 1 + } + } catch (e) { + if (e.code === 'EINTEGRITYSIGNATURE' || e.code === 'EATTESTATIONVERIFY') { + this.invalid.push({ + code: e.code, + message: e.message, + integrity: e.integrity, + keyid: e.keyid, + location, + name, + registry, + resolved: e.resolved, + signature: e.signature, + predicateType: e.predicateType, + type, + version, + }) + } else { + throw e + } + } + } +} + +module.exports = VerifySignatures diff --git a/deps/npm/man/man1/npm-access.1 b/deps/npm/man/man1/npm-access.1 index f11a6a5ba98230..df63b4a6a5d6c6 100644 --- a/deps/npm/man/man1/npm-access.1 +++ b/deps/npm/man/man1/npm-access.1 @@ -5,7 +5,7 @@ .P .RS 2 .nf -npm access list packages \[lB]|| \[lB]\[rB] +npm access list packages \[lB]||\[rB] \[lB]\[rB] npm access list collaborators \[lB] \[lB]\[rB]\[rB] npm access get status \[lB]\[rB] npm access set status=public|private \[lB]\[rB] diff --git a/deps/npm/man/man1/npm-doctor.1 b/deps/npm/man/man1/npm-doctor.1 index 1aa4af1e485b08..16ae2f7e41715b 100644 --- a/deps/npm/man/man1/npm-doctor.1 +++ b/deps/npm/man/man1/npm-doctor.1 @@ -5,7 +5,7 @@ .P .RS 2 .nf -npm doctor \[lB]ping\[rB] \[lB]registry\[rB] \[lB]versions\[rB] \[lB]environment\[rB] \[lB]permissions\[rB] \[lB]cache\[rB] +npm doctor \[lB]connection\[rB] \[lB]registry\[rB] \[lB]versions\[rB] \[lB]environment\[rB] \[lB]permissions\[rB] \[lB]cache\[rB] .fi .RE .P @@ -30,21 +30,21 @@ Without all of these working properly, npm may not work properly. Many issues ar Also, in addition to this, there are also very many issue reports due to using old versions of npm. Since npm is constantly improving, running \fBnpm@latest\fR is better than an old version. .P \fBnpm doctor\fR verifies the following items in your environment, and if there are any recommended changes, it will display them. By default npm runs all of these checks. You can limit what checks are ran by specifying them as extra arguments. -.SS "\fBnpm ping\fR" +.SS "\fBConnecting to the registry\fR" .P -By default, npm installs from the primary npm registry, \fBregistry.npmjs.org\fR. \fBnpm doctor\fR hits a special ping endpoint within the registry. This can also be checked with \fBnpm ping\fR. If this check fails, you may be using a proxy that needs to be configured, or may need to talk to your IT staff to get access over HTTPS to \fBregistry.npmjs.org\fR. +By default, npm installs from the primary npm registry, \fBregistry.npmjs.org\fR. \fBnpm doctor\fR hits a special connection testing endpoint within the registry. This can also be checked with \fBnpm ping\fR. If this check fails, you may be using a proxy that needs to be configured, or may need to talk to your IT staff to get access over HTTPS to \fBregistry.npmjs.org\fR. .P This check is done against whichever registry you've configured (you can see what that is by running \fBnpm config get registry\fR), and if you're using a private registry that doesn't support the \fB/whoami\fR endpoint supported by the primary registry, this check may fail. -.SS "\fBnpm -v\fR" +.SS "\fBChecking npm version\fR" .P While Node.js may come bundled with a particular version of npm, it's the policy of the CLI team that we recommend all users run \fBnpm@latest\fR if they can. As the CLI is maintained by a small team of contributors, there are only resources for a single line of development, so npm's own long-term support releases typically only receive critical security and regression fixes. The team believes that the latest tested version of npm is almost always likely to be the most functional and defect-free version of npm. -.SS "\fBnode -v\fR" +.SS "\fBChecking node version\fR" .P For most users, in most circumstances, the best version of Node will be the latest long-term support (LTS) release. Those of you who want access to new ECMAscript features or bleeding-edge changes to Node's standard library may be running a newer version, and some may be required to run an older version of Node because of enterprise change control policies. That's OK! But in general, the npm team recommends that most users run Node.js LTS. -.SS "\fBnpm config get registry\fR" +.SS "\fBChecking configured npm registry\fR" .P You may be installing from private package registries for your project or company. That's great! Others may be following tutorials or StackOverflow questions in an effort to troubleshoot problems you may be having. Sometimes, this may entail changing the registry you're pointing at. This part of \fBnpm doctor\fR just lets you, and maybe whoever's helping you with support, know that you're not using the default registry. -.SS "\fBwhich git\fR" +.SS "\fBChecking for git executable in PATH\fR" .P While it's documented in the README, it may not be obvious that npm needs Git installed to do many of the things that it does. Also, in some cases \[en] especially on Windows \[en] you may have Git set up in such a way that it's not accessible via your \fBPATH\fR so that npm can find it. This check ensures that Git is available. .SS "Permissions checks" diff --git a/deps/npm/man/man1/npm-ls.1 b/deps/npm/man/man1/npm-ls.1 index 9c8a25d0c9237a..3d4728693455e4 100644 --- a/deps/npm/man/man1/npm-ls.1 +++ b/deps/npm/man/man1/npm-ls.1 @@ -20,7 +20,7 @@ Positional arguments are \fBname@version-range\fR identifiers, which will limit .P .RS 2 .nf -npm@10.5.2 /path/to/npm +npm@10.7.0 /path/to/npm └─┬ init-package-json@0.0.4 └── promzard@0.1.5 .fi diff --git a/deps/npm/man/man1/npm-profile.1 b/deps/npm/man/man1/npm-profile.1 index 401c26f05b2d5c..423167e2efafc4 100644 --- a/deps/npm/man/man1/npm-profile.1 +++ b/deps/npm/man/man1/npm-profile.1 @@ -24,27 +24,16 @@ Change your profile information on the registry. Note that this command depends .P .RS 2 .nf -+-----------------+---------------------------+ -| name | example | -+-----------------+---------------------------+ -| email | me@example.com (verified) | -+-----------------+---------------------------+ -| two factor auth | auth-and-writes | -+-----------------+---------------------------+ -| fullname | Example User | -+-----------------+---------------------------+ -| homepage | | -+-----------------+---------------------------+ -| freenode | | -+-----------------+---------------------------+ -| twitter | | -+-----------------+---------------------------+ -| github | | -+-----------------+---------------------------+ -| created | 2015-02-26T01:38:35.892Z | -+-----------------+---------------------------+ -| updated | 2017-10-02T21:29:45.922Z | -+-----------------+---------------------------+ +name: example +email: e@example.com (verified) +two-factor auth: auth-and-writes +fullname: Example User +homepage: +freenode: +twitter: +github: +created: 2015-02-26T01:38:35.892Z +updated: 2017-10-02T21:29:45.922Z .fi .RE .RS 0 diff --git a/deps/npm/man/man1/npm-search.1 b/deps/npm/man/man1/npm-search.1 index 7927e562bf9e59..041bd1113010f5 100644 --- a/deps/npm/man/man1/npm-search.1 +++ b/deps/npm/man/man1/npm-search.1 @@ -5,7 +5,7 @@ .P .RS 2 .nf -npm search \[lB]search terms ...\[rB] +npm search \[lB] ...\[rB] aliases: find, s, se .fi @@ -22,16 +22,6 @@ Search also allows targeting of maintainers in search results, by prefixing thei .P If a term starts with \fB/\fR, then it's interpreted as a regular expression and supports standard JavaScript RegExp syntax. In this case search will ignore a trailing \fB/\fR . (Note you must escape or quote many regular expression characters in most shells.) .SS "Configuration" -.SS "\fBlong\fR" -.RS 0 -.IP \(bu 4 -Default: false -.IP \(bu 4 -Type: Boolean -.RE 0 - -.P -Show extended information in \fBls\fR, \fBsearch\fR, and \fBhelp-search\fR. .SS "\fBjson\fR" .RS 0 .IP \(bu 4 diff --git a/deps/npm/man/man1/npm-token.1 b/deps/npm/man/man1/npm-token.1 index 431302c44a7757..2b35907fb49681 100644 --- a/deps/npm/man/man1/npm-token.1 +++ b/deps/npm/man/man1/npm-token.1 @@ -23,23 +23,13 @@ This lets you list, create and revoke authentication tokens. .P .RS 2 .nf -+--------+---------+------------+----------+----------------+ -| id | token | created | read-only | CIDR whitelist | -+--------+---------+------------+----------+----------------+ -| 7f3134 | 1fa9ba… | 2017-10-02 | yes | | -+--------+---------+------------+----------+----------------+ -| c03241 | af7aef… | 2017-10-02 | no | 192.168.0.1/24 | -+--------+---------+------------+----------+----------------+ -| e0cf92 | 3a436a… | 2017-10-02 | no | | -+--------+---------+------------+----------+----------------+ -| 63eb9d | 74ef35… | 2017-09-28 | no | | -+--------+---------+------------+----------+----------------+ -| 2daaa8 | cbad5f… | 2017-09-26 | no | | -+--------+---------+------------+----------+----------------+ -| 68c2fe | 127e51… | 2017-09-23 | no | | -+--------+---------+------------+----------+----------------+ -| 6334e1 | 1dadd1… | 2017-09-23 | no | | -+--------+---------+------------+----------+----------------+ +Read only token npm_1f… with id 7f3134 created 2017-10-21 + +Publish token npm_af… with id c03241 created 2017-10-02 +with IP Whitelist: 192.168.0.1/24 + +Publish token npm_… with id e0cf92 created 2017-10-02 + .fi .RE .RS 0 @@ -52,15 +42,7 @@ Currently, the cli can not generate automation tokens. Please refer to the \fBdo .P .RS 2 .nf -+----------------+--------------------------------------+ -| token | a73c9572-f1b9-8983-983d-ba3ac3cc913d | -+----------------+--------------------------------------+ -| cidr_whitelist | | -+----------------+--------------------------------------+ -| readonly | false | -+----------------+--------------------------------------+ -| created | 2017-10-02T07:52:24.838Z | -+----------------+--------------------------------------+ +Created publish token a73c9572-f1b9-8983-983d-ba3ac3cc913d .fi .RE .RS 0 diff --git a/deps/npm/man/man1/npm.1 b/deps/npm/man/man1/npm.1 index 0df8bb7e2a1962..6fb97e843320ea 100644 --- a/deps/npm/man/man1/npm.1 +++ b/deps/npm/man/man1/npm.1 @@ -12,7 +12,7 @@ npm Note: This command is unaware of workspaces. .SS "Version" .P -10.5.2 +10.7.0 .SS "Description" .P npm is the package manager for the Node JavaScript platform. It puts modules in place so that node can find them, and manages dependency conflicts intelligently. diff --git a/deps/npm/man/man5/npm-json.5 b/deps/npm/man/man5/npm-json.5 index 69dcb58aa1b1ef..f4d903e2b114cf 100644 --- a/deps/npm/man/man5/npm-json.5 +++ b/deps/npm/man/man5/npm-json.5 @@ -840,6 +840,8 @@ If you need to make specific changes to dependencies of your dependencies, for e .P Overrides provide a way to replace a package in your dependency tree with another version, or another package entirely. These changes can be scoped as specific or as vague as desired. .P +Overrides are only considered in the root \fBpackage.json\fR file for a project. Overrides in installed dependencies (including npm help workspaces) are not considered in dependency tree resolution. Published packages may dictate their resolutions by pinning dependencies or using an \fB\fBnpm-shrinkwrap.json\fR\fR \fI\(la/configuring-npm/npm-shrinkwrap-json\(ra\fR file. +.P To make sure the package \fBfoo\fR is always installed as version \fB1.0.0\fR no matter what version your dependencies rely on: .P .RS 2 diff --git a/deps/npm/man/man5/package-json.5 b/deps/npm/man/man5/package-json.5 index 69dcb58aa1b1ef..f4d903e2b114cf 100644 --- a/deps/npm/man/man5/package-json.5 +++ b/deps/npm/man/man5/package-json.5 @@ -840,6 +840,8 @@ If you need to make specific changes to dependencies of your dependencies, for e .P Overrides provide a way to replace a package in your dependency tree with another version, or another package entirely. These changes can be scoped as specific or as vague as desired. .P +Overrides are only considered in the root \fBpackage.json\fR file for a project. Overrides in installed dependencies (including npm help workspaces) are not considered in dependency tree resolution. Published packages may dictate their resolutions by pinning dependencies or using an \fB\fBnpm-shrinkwrap.json\fR\fR \fI\(la/configuring-npm/npm-shrinkwrap-json\(ra\fR file. +.P To make sure the package \fBfoo\fR is always installed as version \fB1.0.0\fR no matter what version your dependencies rely on: .P .RS 2 diff --git a/deps/npm/man/man5/package-lock-json.5 b/deps/npm/man/man5/package-lock-json.5 index 426901d95437d9..c6a92af27f189c 100644 --- a/deps/npm/man/man5/package-lock-json.5 +++ b/deps/npm/man/man5/package-lock-json.5 @@ -19,6 +19,8 @@ Optimize the installation process by allowing npm to skip repeated metadata reso As of npm v7, lockfiles include enough information to gain a complete picture of the package tree, reducing the need to read \fBpackage.json\fR files, and allowing for significant performance improvements. .RE 0 +.P +When \fBnpm\fR creates or updates \fBpackage-lock.json\fR, it will infer line endings and indentation from \fBpackage.json\fR so that the formatting of both files matches. .SS "\fBpackage-lock.json\fR vs \fBnpm-shrinkwrap.json\fR" .P Both of these files have the same format, and perform similar functions in the root of a project. diff --git a/deps/npm/node_modules/@colors/colors/LICENSE b/deps/npm/node_modules/@colors/colors/LICENSE deleted file mode 100644 index 6b86056199d2ac..00000000000000 --- a/deps/npm/node_modules/@colors/colors/LICENSE +++ /dev/null @@ -1,26 +0,0 @@ -MIT License - -Original Library - - Copyright (c) Marak Squires - -Additional Functionality - - Copyright (c) Sindre Sorhus (sindresorhus.com) - - Copyright (c) DABH (https://github.com/DABH) - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/deps/npm/node_modules/@colors/colors/examples/normal-usage.js b/deps/npm/node_modules/@colors/colors/examples/normal-usage.js deleted file mode 100644 index c4515653e288d7..00000000000000 --- a/deps/npm/node_modules/@colors/colors/examples/normal-usage.js +++ /dev/null @@ -1,82 +0,0 @@ -var colors = require('../lib/index'); - -console.log('First some yellow text'.yellow); - -console.log('Underline that text'.yellow.underline); - -console.log('Make it bold and red'.red.bold); - -console.log(('Double Raindows All Day Long').rainbow); - -console.log('Drop the bass'.trap); - -console.log('DROP THE RAINBOW BASS'.trap.rainbow); - -// styles not widely supported -console.log('Chains are also cool.'.bold.italic.underline.red); - -// styles not widely supported -console.log('So '.green + 'are'.underline + ' ' + 'inverse'.inverse - + ' styles! '.yellow.bold); -console.log('Zebras are so fun!'.zebra); - -// -// Remark: .strikethrough may not work with Mac OS Terminal App -// -console.log('This is ' + 'not'.strikethrough + ' fun.'); - -console.log('Background color attack!'.black.bgWhite); -console.log('Use random styles on everything!'.random); -console.log('America, Heck Yeah!'.america); - -// eslint-disable-next-line max-len -console.log('Blindingly '.brightCyan + 'bright? '.brightRed + 'Why '.brightYellow + 'not?!'.brightGreen); - -console.log('Setting themes is useful'); - -// -// Custom themes -// -console.log('Generic logging theme as JSON'.green.bold.underline); -// Load theme with JSON literal -colors.setTheme({ - silly: 'rainbow', - input: 'grey', - verbose: 'cyan', - prompt: 'grey', - info: 'green', - data: 'grey', - help: 'cyan', - warn: 'yellow', - debug: 'blue', - error: 'red', -}); - -// outputs red text -console.log('this is an error'.error); - -// outputs yellow text -console.log('this is a warning'.warn); - -// outputs grey text -console.log('this is an input'.input); - -console.log('Generic logging theme as file'.green.bold.underline); - -// Load a theme from file -try { - colors.setTheme(require(__dirname + '/../themes/generic-logging.js')); -} catch (err) { - console.log(err); -} - -// outputs red text -console.log('this is an error'.error); - -// outputs yellow text -console.log('this is a warning'.warn); - -// outputs grey text -console.log('this is an input'.input); - -// console.log("Don't summon".zalgo) diff --git a/deps/npm/node_modules/@colors/colors/examples/safe-string.js b/deps/npm/node_modules/@colors/colors/examples/safe-string.js deleted file mode 100644 index ed5f4ca468e10f..00000000000000 --- a/deps/npm/node_modules/@colors/colors/examples/safe-string.js +++ /dev/null @@ -1,78 +0,0 @@ -var colors = require('../safe'); - -console.log(colors.yellow('First some yellow text')); - -console.log(colors.yellow.underline('Underline that text')); - -console.log(colors.red.bold('Make it bold and red')); - -console.log(colors.rainbow('Double Raindows All Day Long')); - -console.log(colors.trap('Drop the bass')); - -console.log(colors.rainbow(colors.trap('DROP THE RAINBOW BASS'))); - -// styles not widely supported -console.log(colors.bold.italic.underline.red('Chains are also cool.')); - -// styles not widely supported -console.log(colors.green('So ') + colors.underline('are') + ' ' - + colors.inverse('inverse') + colors.yellow.bold(' styles! ')); - -console.log(colors.zebra('Zebras are so fun!')); - -console.log('This is ' + colors.strikethrough('not') + ' fun.'); - - -console.log(colors.black.bgWhite('Background color attack!')); -console.log(colors.random('Use random styles on everything!')); -console.log(colors.america('America, Heck Yeah!')); - -// eslint-disable-next-line max-len -console.log(colors.brightCyan('Blindingly ') + colors.brightRed('bright? ') + colors.brightYellow('Why ') + colors.brightGreen('not?!')); - -console.log('Setting themes is useful'); - -// -// Custom themes -// -// console.log('Generic logging theme as JSON'.green.bold.underline); -// Load theme with JSON literal -colors.setTheme({ - silly: 'rainbow', - input: 'blue', - verbose: 'cyan', - prompt: 'grey', - info: 'green', - data: 'grey', - help: 'cyan', - warn: 'yellow', - debug: 'blue', - error: 'red', -}); - -// outputs red text -console.log(colors.error('this is an error')); - -// outputs yellow text -console.log(colors.warn('this is a warning')); - -// outputs blue text -console.log(colors.input('this is an input')); - - -// console.log('Generic logging theme as file'.green.bold.underline); - -// Load a theme from file -colors.setTheme(require(__dirname + '/../themes/generic-logging.js')); - -// outputs red text -console.log(colors.error('this is an error')); - -// outputs yellow text -console.log(colors.warn('this is a warning')); - -// outputs grey text -console.log(colors.input('this is an input')); - -// console.log(colors.zalgo("Don't summon him")) diff --git a/deps/npm/node_modules/@colors/colors/lib/colors.js b/deps/npm/node_modules/@colors/colors/lib/colors.js deleted file mode 100644 index d9fb08762fde51..00000000000000 --- a/deps/npm/node_modules/@colors/colors/lib/colors.js +++ /dev/null @@ -1,211 +0,0 @@ -/* - -The MIT License (MIT) - -Original Library - - Copyright (c) Marak Squires - -Additional functionality - - Copyright (c) Sindre Sorhus (sindresorhus.com) - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - -*/ - -var colors = {}; -module['exports'] = colors; - -colors.themes = {}; - -var util = require('util'); -var ansiStyles = colors.styles = require('./styles'); -var defineProps = Object.defineProperties; -var newLineRegex = new RegExp(/[\r\n]+/g); - -colors.supportsColor = require('./system/supports-colors').supportsColor; - -if (typeof colors.enabled === 'undefined') { - colors.enabled = colors.supportsColor() !== false; -} - -colors.enable = function() { - colors.enabled = true; -}; - -colors.disable = function() { - colors.enabled = false; -}; - -colors.stripColors = colors.strip = function(str) { - return ('' + str).replace(/\x1B\[\d+m/g, ''); -}; - -// eslint-disable-next-line no-unused-vars -var stylize = colors.stylize = function stylize(str, style) { - if (!colors.enabled) { - return str+''; - } - - var styleMap = ansiStyles[style]; - - // Stylize should work for non-ANSI styles, too - if (!styleMap && style in colors) { - // Style maps like trap operate as functions on strings; - // they don't have properties like open or close. - return colors[style](str); - } - - return styleMap.open + str + styleMap.close; -}; - -var matchOperatorsRe = /[|\\{}()[\]^$+*?.]/g; -var escapeStringRegexp = function(str) { - if (typeof str !== 'string') { - throw new TypeError('Expected a string'); - } - return str.replace(matchOperatorsRe, '\\$&'); -}; - -function build(_styles) { - var builder = function builder() { - return applyStyle.apply(builder, arguments); - }; - builder._styles = _styles; - // __proto__ is used because we must return a function, but there is - // no way to create a function with a different prototype. - builder.__proto__ = proto; - return builder; -} - -var styles = (function() { - var ret = {}; - ansiStyles.grey = ansiStyles.gray; - Object.keys(ansiStyles).forEach(function(key) { - ansiStyles[key].closeRe = - new RegExp(escapeStringRegexp(ansiStyles[key].close), 'g'); - ret[key] = { - get: function() { - return build(this._styles.concat(key)); - }, - }; - }); - return ret; -})(); - -var proto = defineProps(function colors() {}, styles); - -function applyStyle() { - var args = Array.prototype.slice.call(arguments); - - var str = args.map(function(arg) { - // Use weak equality check so we can colorize null/undefined in safe mode - if (arg != null && arg.constructor === String) { - return arg; - } else { - return util.inspect(arg); - } - }).join(' '); - - if (!colors.enabled || !str) { - return str; - } - - var newLinesPresent = str.indexOf('\n') != -1; - - var nestedStyles = this._styles; - - var i = nestedStyles.length; - while (i--) { - var code = ansiStyles[nestedStyles[i]]; - str = code.open + str.replace(code.closeRe, code.open) + code.close; - if (newLinesPresent) { - str = str.replace(newLineRegex, function(match) { - return code.close + match + code.open; - }); - } - } - - return str; -} - -colors.setTheme = function(theme) { - if (typeof theme === 'string') { - console.log('colors.setTheme now only accepts an object, not a string. ' + - 'If you are trying to set a theme from a file, it is now your (the ' + - 'caller\'s) responsibility to require the file. The old syntax ' + - 'looked like colors.setTheme(__dirname + ' + - '\'/../themes/generic-logging.js\'); The new syntax looks like '+ - 'colors.setTheme(require(__dirname + ' + - '\'/../themes/generic-logging.js\'));'); - return; - } - for (var style in theme) { - (function(style) { - colors[style] = function(str) { - if (typeof theme[style] === 'object') { - var out = str; - for (var i in theme[style]) { - out = colors[theme[style][i]](out); - } - return out; - } - return colors[theme[style]](str); - }; - })(style); - } -}; - -function init() { - var ret = {}; - Object.keys(styles).forEach(function(name) { - ret[name] = { - get: function() { - return build([name]); - }, - }; - }); - return ret; -} - -var sequencer = function sequencer(map, str) { - var exploded = str.split(''); - exploded = exploded.map(map); - return exploded.join(''); -}; - -// custom formatter methods -colors.trap = require('./custom/trap'); -colors.zalgo = require('./custom/zalgo'); - -// maps -colors.maps = {}; -colors.maps.america = require('./maps/america')(colors); -colors.maps.zebra = require('./maps/zebra')(colors); -colors.maps.rainbow = require('./maps/rainbow')(colors); -colors.maps.random = require('./maps/random')(colors); - -for (var map in colors.maps) { - (function(map) { - colors[map] = function(str) { - return sequencer(colors.maps[map], str); - }; - })(map); -} - -defineProps(colors, init()); diff --git a/deps/npm/node_modules/@colors/colors/lib/custom/trap.js b/deps/npm/node_modules/@colors/colors/lib/custom/trap.js deleted file mode 100644 index fbccf88dede0b8..00000000000000 --- a/deps/npm/node_modules/@colors/colors/lib/custom/trap.js +++ /dev/null @@ -1,46 +0,0 @@ -module['exports'] = function runTheTrap(text, options) { - var result = ''; - text = text || 'Run the trap, drop the bass'; - text = text.split(''); - var trap = { - a: ['\u0040', '\u0104', '\u023a', '\u0245', '\u0394', '\u039b', '\u0414'], - b: ['\u00df', '\u0181', '\u0243', '\u026e', '\u03b2', '\u0e3f'], - c: ['\u00a9', '\u023b', '\u03fe'], - d: ['\u00d0', '\u018a', '\u0500', '\u0501', '\u0502', '\u0503'], - e: ['\u00cb', '\u0115', '\u018e', '\u0258', '\u03a3', '\u03be', '\u04bc', - '\u0a6c'], - f: ['\u04fa'], - g: ['\u0262'], - h: ['\u0126', '\u0195', '\u04a2', '\u04ba', '\u04c7', '\u050a'], - i: ['\u0f0f'], - j: ['\u0134'], - k: ['\u0138', '\u04a0', '\u04c3', '\u051e'], - l: ['\u0139'], - m: ['\u028d', '\u04cd', '\u04ce', '\u0520', '\u0521', '\u0d69'], - n: ['\u00d1', '\u014b', '\u019d', '\u0376', '\u03a0', '\u048a'], - o: ['\u00d8', '\u00f5', '\u00f8', '\u01fe', '\u0298', '\u047a', '\u05dd', - '\u06dd', '\u0e4f'], - p: ['\u01f7', '\u048e'], - q: ['\u09cd'], - r: ['\u00ae', '\u01a6', '\u0210', '\u024c', '\u0280', '\u042f'], - s: ['\u00a7', '\u03de', '\u03df', '\u03e8'], - t: ['\u0141', '\u0166', '\u0373'], - u: ['\u01b1', '\u054d'], - v: ['\u05d8'], - w: ['\u0428', '\u0460', '\u047c', '\u0d70'], - x: ['\u04b2', '\u04fe', '\u04fc', '\u04fd'], - y: ['\u00a5', '\u04b0', '\u04cb'], - z: ['\u01b5', '\u0240'], - }; - text.forEach(function(c) { - c = c.toLowerCase(); - var chars = trap[c] || [' ']; - var rand = Math.floor(Math.random() * chars.length); - if (typeof trap[c] !== 'undefined') { - result += trap[c][rand]; - } else { - result += c; - } - }); - return result; -}; diff --git a/deps/npm/node_modules/@colors/colors/lib/custom/zalgo.js b/deps/npm/node_modules/@colors/colors/lib/custom/zalgo.js deleted file mode 100644 index 01bdd2b802f626..00000000000000 --- a/deps/npm/node_modules/@colors/colors/lib/custom/zalgo.js +++ /dev/null @@ -1,109 +0,0 @@ -// please no -module['exports'] = function zalgo(text, options) { - text = text || ' he is here '; - var soul = { - 'up': [ - '̍', '̎', '̄', '̅', - '̿', '̑', '̆', '̐', - '͒', '͗', '͑', '̇', - '̈', '̊', '͂', '̓', - '̈', '͊', '͋', '͌', - '̃', '̂', '̌', '͐', - '̀', '́', '̋', '̏', - '̒', '̓', '̔', '̽', - '̉', 'ͣ', 'ͤ', 'ͥ', - 'ͦ', 'ͧ', 'ͨ', 'ͩ', - 'ͪ', 'ͫ', 'ͬ', 'ͭ', - 'ͮ', 'ͯ', '̾', '͛', - '͆', '̚', - ], - 'down': [ - '̖', '̗', '̘', '̙', - '̜', '̝', '̞', '̟', - '̠', '̤', '̥', '̦', - '̩', '̪', '̫', '̬', - '̭', '̮', '̯', '̰', - '̱', '̲', '̳', '̹', - '̺', '̻', '̼', 'ͅ', - '͇', '͈', '͉', '͍', - '͎', '͓', '͔', '͕', - '͖', '͙', '͚', '̣', - ], - 'mid': [ - '̕', '̛', '̀', '́', - '͘', '̡', '̢', '̧', - '̨', '̴', '̵', '̶', - '͜', '͝', '͞', - '͟', '͠', '͢', '̸', - '̷', '͡', ' ҉', - ], - }; - var all = [].concat(soul.up, soul.down, soul.mid); - - function randomNumber(range) { - var r = Math.floor(Math.random() * range); - return r; - } - - function isChar(character) { - var bool = false; - all.filter(function(i) { - bool = (i === character); - }); - return bool; - } - - - function heComes(text, options) { - var result = ''; - var counts; - var l; - options = options || {}; - options['up'] = - typeof options['up'] !== 'undefined' ? options['up'] : true; - options['mid'] = - typeof options['mid'] !== 'undefined' ? options['mid'] : true; - options['down'] = - typeof options['down'] !== 'undefined' ? options['down'] : true; - options['size'] = - typeof options['size'] !== 'undefined' ? options['size'] : 'maxi'; - text = text.split(''); - for (l in text) { - if (isChar(l)) { - continue; - } - result = result + text[l]; - counts = {'up': 0, 'down': 0, 'mid': 0}; - switch (options.size) { - case 'mini': - counts.up = randomNumber(8); - counts.mid = randomNumber(2); - counts.down = randomNumber(8); - break; - case 'maxi': - counts.up = randomNumber(16) + 3; - counts.mid = randomNumber(4) + 1; - counts.down = randomNumber(64) + 3; - break; - default: - counts.up = randomNumber(8) + 1; - counts.mid = randomNumber(6) / 2; - counts.down = randomNumber(8) + 1; - break; - } - - var arr = ['up', 'mid', 'down']; - for (var d in arr) { - var index = arr[d]; - for (var i = 0; i <= counts[index]; i++) { - if (options[index]) { - result = result + soul[index][randomNumber(soul[index].length)]; - } - } - } - } - return result; - } - // don't summon him - return heComes(text, options); -}; diff --git a/deps/npm/node_modules/@colors/colors/lib/extendStringPrototype.js b/deps/npm/node_modules/@colors/colors/lib/extendStringPrototype.js deleted file mode 100644 index 46fd386a915a67..00000000000000 --- a/deps/npm/node_modules/@colors/colors/lib/extendStringPrototype.js +++ /dev/null @@ -1,110 +0,0 @@ -var colors = require('./colors'); - -module['exports'] = function() { - // - // Extends prototype of native string object to allow for "foo".red syntax - // - var addProperty = function(color, func) { - String.prototype.__defineGetter__(color, func); - }; - - addProperty('strip', function() { - return colors.strip(this); - }); - - addProperty('stripColors', function() { - return colors.strip(this); - }); - - addProperty('trap', function() { - return colors.trap(this); - }); - - addProperty('zalgo', function() { - return colors.zalgo(this); - }); - - addProperty('zebra', function() { - return colors.zebra(this); - }); - - addProperty('rainbow', function() { - return colors.rainbow(this); - }); - - addProperty('random', function() { - return colors.random(this); - }); - - addProperty('america', function() { - return colors.america(this); - }); - - // - // Iterate through all default styles and colors - // - var x = Object.keys(colors.styles); - x.forEach(function(style) { - addProperty(style, function() { - return colors.stylize(this, style); - }); - }); - - function applyTheme(theme) { - // - // Remark: This is a list of methods that exist - // on String that you should not overwrite. - // - var stringPrototypeBlacklist = [ - '__defineGetter__', '__defineSetter__', '__lookupGetter__', - '__lookupSetter__', 'charAt', 'constructor', 'hasOwnProperty', - 'isPrototypeOf', 'propertyIsEnumerable', 'toLocaleString', 'toString', - 'valueOf', 'charCodeAt', 'indexOf', 'lastIndexOf', 'length', - 'localeCompare', 'match', 'repeat', 'replace', 'search', 'slice', - 'split', 'substring', 'toLocaleLowerCase', 'toLocaleUpperCase', - 'toLowerCase', 'toUpperCase', 'trim', 'trimLeft', 'trimRight', - ]; - - Object.keys(theme).forEach(function(prop) { - if (stringPrototypeBlacklist.indexOf(prop) !== -1) { - console.log('warn: '.red + ('String.prototype' + prop).magenta + - ' is probably something you don\'t want to override. ' + - 'Ignoring style name'); - } else { - if (typeof(theme[prop]) === 'string') { - colors[prop] = colors[theme[prop]]; - addProperty(prop, function() { - return colors[prop](this); - }); - } else { - var themePropApplicator = function(str) { - var ret = str || this; - for (var t = 0; t < theme[prop].length; t++) { - ret = colors[theme[prop][t]](ret); - } - return ret; - }; - addProperty(prop, themePropApplicator); - colors[prop] = function(str) { - return themePropApplicator(str); - }; - } - } - }); - } - - colors.setTheme = function(theme) { - if (typeof theme === 'string') { - console.log('colors.setTheme now only accepts an object, not a string. ' + - 'If you are trying to set a theme from a file, it is now your (the ' + - 'caller\'s) responsibility to require the file. The old syntax ' + - 'looked like colors.setTheme(__dirname + ' + - '\'/../themes/generic-logging.js\'); The new syntax looks like '+ - 'colors.setTheme(require(__dirname + ' + - '\'/../themes/generic-logging.js\'));'); - return; - } else { - applyTheme(theme); - } - }; -}; diff --git a/deps/npm/node_modules/@colors/colors/lib/index.js b/deps/npm/node_modules/@colors/colors/lib/index.js deleted file mode 100644 index 9df5ab7df30770..00000000000000 --- a/deps/npm/node_modules/@colors/colors/lib/index.js +++ /dev/null @@ -1,13 +0,0 @@ -var colors = require('./colors'); -module['exports'] = colors; - -// Remark: By default, colors will add style properties to String.prototype. -// -// If you don't wish to extend String.prototype, you can do this instead and -// native String will not be touched: -// -// var colors = require('colors/safe); -// colors.red("foo") -// -// -require('./extendStringPrototype')(); diff --git a/deps/npm/node_modules/@colors/colors/lib/maps/america.js b/deps/npm/node_modules/@colors/colors/lib/maps/america.js deleted file mode 100644 index dc96903328989f..00000000000000 --- a/deps/npm/node_modules/@colors/colors/lib/maps/america.js +++ /dev/null @@ -1,10 +0,0 @@ -module['exports'] = function(colors) { - return function(letter, i, exploded) { - if (letter === ' ') return letter; - switch (i%3) { - case 0: return colors.red(letter); - case 1: return colors.white(letter); - case 2: return colors.blue(letter); - } - }; -}; diff --git a/deps/npm/node_modules/@colors/colors/lib/maps/rainbow.js b/deps/npm/node_modules/@colors/colors/lib/maps/rainbow.js deleted file mode 100644 index 874508da8ed17e..00000000000000 --- a/deps/npm/node_modules/@colors/colors/lib/maps/rainbow.js +++ /dev/null @@ -1,11 +0,0 @@ -module['exports'] = function(colors) { - // RoY G BiV - var rainbowColors = ['red', 'yellow', 'green', 'blue', 'magenta']; - return function(letter, i, exploded) { - if (letter === ' ') { - return letter; - } else { - return colors[rainbowColors[i++ % rainbowColors.length]](letter); - } - }; -}; diff --git a/deps/npm/node_modules/@colors/colors/lib/maps/random.js b/deps/npm/node_modules/@colors/colors/lib/maps/random.js deleted file mode 100644 index 3d82a39ec0fab4..00000000000000 --- a/deps/npm/node_modules/@colors/colors/lib/maps/random.js +++ /dev/null @@ -1,11 +0,0 @@ -module['exports'] = function(colors) { - var available = ['underline', 'inverse', 'grey', 'yellow', 'red', 'green', - 'blue', 'white', 'cyan', 'magenta', 'brightYellow', 'brightRed', - 'brightGreen', 'brightBlue', 'brightWhite', 'brightCyan', 'brightMagenta']; - return function(letter, i, exploded) { - return letter === ' ' ? letter : - colors[ - available[Math.round(Math.random() * (available.length - 2))] - ](letter); - }; -}; diff --git a/deps/npm/node_modules/@colors/colors/lib/maps/zebra.js b/deps/npm/node_modules/@colors/colors/lib/maps/zebra.js deleted file mode 100644 index fa73623544a82c..00000000000000 --- a/deps/npm/node_modules/@colors/colors/lib/maps/zebra.js +++ /dev/null @@ -1,5 +0,0 @@ -module['exports'] = function(colors) { - return function(letter, i, exploded) { - return i % 2 === 0 ? letter : colors.inverse(letter); - }; -}; diff --git a/deps/npm/node_modules/@colors/colors/lib/styles.js b/deps/npm/node_modules/@colors/colors/lib/styles.js deleted file mode 100644 index 011dafd8c28f70..00000000000000 --- a/deps/npm/node_modules/@colors/colors/lib/styles.js +++ /dev/null @@ -1,95 +0,0 @@ -/* -The MIT License (MIT) - -Copyright (c) Sindre Sorhus (sindresorhus.com) - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - -*/ - -var styles = {}; -module['exports'] = styles; - -var codes = { - reset: [0, 0], - - bold: [1, 22], - dim: [2, 22], - italic: [3, 23], - underline: [4, 24], - inverse: [7, 27], - hidden: [8, 28], - strikethrough: [9, 29], - - black: [30, 39], - red: [31, 39], - green: [32, 39], - yellow: [33, 39], - blue: [34, 39], - magenta: [35, 39], - cyan: [36, 39], - white: [37, 39], - gray: [90, 39], - grey: [90, 39], - - brightRed: [91, 39], - brightGreen: [92, 39], - brightYellow: [93, 39], - brightBlue: [94, 39], - brightMagenta: [95, 39], - brightCyan: [96, 39], - brightWhite: [97, 39], - - bgBlack: [40, 49], - bgRed: [41, 49], - bgGreen: [42, 49], - bgYellow: [43, 49], - bgBlue: [44, 49], - bgMagenta: [45, 49], - bgCyan: [46, 49], - bgWhite: [47, 49], - bgGray: [100, 49], - bgGrey: [100, 49], - - bgBrightRed: [101, 49], - bgBrightGreen: [102, 49], - bgBrightYellow: [103, 49], - bgBrightBlue: [104, 49], - bgBrightMagenta: [105, 49], - bgBrightCyan: [106, 49], - bgBrightWhite: [107, 49], - - // legacy styles for colors pre v1.0.0 - blackBG: [40, 49], - redBG: [41, 49], - greenBG: [42, 49], - yellowBG: [43, 49], - blueBG: [44, 49], - magentaBG: [45, 49], - cyanBG: [46, 49], - whiteBG: [47, 49], - -}; - -Object.keys(codes).forEach(function(key) { - var val = codes[key]; - var style = styles[key] = []; - style.open = '\u001b[' + val[0] + 'm'; - style.close = '\u001b[' + val[1] + 'm'; -}); diff --git a/deps/npm/node_modules/@colors/colors/lib/system/has-flag.js b/deps/npm/node_modules/@colors/colors/lib/system/has-flag.js deleted file mode 100644 index a347dd4d7a697e..00000000000000 --- a/deps/npm/node_modules/@colors/colors/lib/system/has-flag.js +++ /dev/null @@ -1,35 +0,0 @@ -/* -MIT License - -Copyright (c) Sindre Sorhus (sindresorhus.com) - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -of the Software, and to permit persons to whom the Software is furnished to do -so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. -*/ - -'use strict'; - -module.exports = function(flag, argv) { - argv = argv || process.argv; - - var terminatorPos = argv.indexOf('--'); - var prefix = /^-{1,2}/.test(flag) ? '' : '--'; - var pos = argv.indexOf(prefix + flag); - - return pos !== -1 && (terminatorPos === -1 ? true : pos < terminatorPos); -}; diff --git a/deps/npm/node_modules/@colors/colors/lib/system/supports-colors.js b/deps/npm/node_modules/@colors/colors/lib/system/supports-colors.js deleted file mode 100644 index f1f9c8ff3da284..00000000000000 --- a/deps/npm/node_modules/@colors/colors/lib/system/supports-colors.js +++ /dev/null @@ -1,151 +0,0 @@ -/* -The MIT License (MIT) - -Copyright (c) Sindre Sorhus (sindresorhus.com) - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - -*/ - -'use strict'; - -var os = require('os'); -var hasFlag = require('./has-flag.js'); - -var env = process.env; - -var forceColor = void 0; -if (hasFlag('no-color') || hasFlag('no-colors') || hasFlag('color=false')) { - forceColor = false; -} else if (hasFlag('color') || hasFlag('colors') || hasFlag('color=true') - || hasFlag('color=always')) { - forceColor = true; -} -if ('FORCE_COLOR' in env) { - forceColor = env.FORCE_COLOR.length === 0 - || parseInt(env.FORCE_COLOR, 10) !== 0; -} - -function translateLevel(level) { - if (level === 0) { - return false; - } - - return { - level: level, - hasBasic: true, - has256: level >= 2, - has16m: level >= 3, - }; -} - -function supportsColor(stream) { - if (forceColor === false) { - return 0; - } - - if (hasFlag('color=16m') || hasFlag('color=full') - || hasFlag('color=truecolor')) { - return 3; - } - - if (hasFlag('color=256')) { - return 2; - } - - if (stream && !stream.isTTY && forceColor !== true) { - return 0; - } - - var min = forceColor ? 1 : 0; - - if (process.platform === 'win32') { - // Node.js 7.5.0 is the first version of Node.js to include a patch to - // libuv that enables 256 color output on Windows. Anything earlier and it - // won't work. However, here we target Node.js 8 at minimum as it is an LTS - // release, and Node.js 7 is not. Windows 10 build 10586 is the first - // Windows release that supports 256 colors. Windows 10 build 14931 is the - // first release that supports 16m/TrueColor. - var osRelease = os.release().split('.'); - if (Number(process.versions.node.split('.')[0]) >= 8 - && Number(osRelease[0]) >= 10 && Number(osRelease[2]) >= 10586) { - return Number(osRelease[2]) >= 14931 ? 3 : 2; - } - - return 1; - } - - if ('CI' in env) { - if (['TRAVIS', 'CIRCLECI', 'APPVEYOR', 'GITLAB_CI'].some(function(sign) { - return sign in env; - }) || env.CI_NAME === 'codeship') { - return 1; - } - - return min; - } - - if ('TEAMCITY_VERSION' in env) { - return (/^(9\.(0*[1-9]\d*)\.|\d{2,}\.)/.test(env.TEAMCITY_VERSION) ? 1 : 0 - ); - } - - if ('TERM_PROGRAM' in env) { - var version = parseInt((env.TERM_PROGRAM_VERSION || '').split('.')[0], 10); - - switch (env.TERM_PROGRAM) { - case 'iTerm.app': - return version >= 3 ? 3 : 2; - case 'Hyper': - return 3; - case 'Apple_Terminal': - return 2; - // No default - } - } - - if (/-256(color)?$/i.test(env.TERM)) { - return 2; - } - - if (/^screen|^xterm|^vt100|^rxvt|color|ansi|cygwin|linux/i.test(env.TERM)) { - return 1; - } - - if ('COLORTERM' in env) { - return 1; - } - - if (env.TERM === 'dumb') { - return min; - } - - return min; -} - -function getSupportLevel(stream) { - var level = supportsColor(stream); - return translateLevel(level); -} - -module.exports = { - supportsColor: getSupportLevel, - stdout: getSupportLevel(process.stdout), - stderr: getSupportLevel(process.stderr), -}; diff --git a/deps/npm/node_modules/@colors/colors/package.json b/deps/npm/node_modules/@colors/colors/package.json deleted file mode 100644 index cb87f20953886a..00000000000000 --- a/deps/npm/node_modules/@colors/colors/package.json +++ /dev/null @@ -1,45 +0,0 @@ -{ - "name": "@colors/colors", - "description": "get colors in your node.js console", - "version": "1.5.0", - "author": "DABH", - "contributors": [ - { - "name": "DABH", - "url": "https://github.com/DABH" - } - ], - "homepage": "https://github.com/DABH/colors.js", - "bugs": "https://github.com/DABH/colors.js/issues", - "keywords": [ - "ansi", - "terminal", - "colors" - ], - "repository": { - "type": "git", - "url": "http://github.com/DABH/colors.js.git" - }, - "license": "MIT", - "scripts": { - "lint": "eslint . --fix", - "test": "export FORCE_COLOR=1 && node tests/basic-test.js && node tests/safe-test.js" - }, - "engines": { - "node": ">=0.1.90" - }, - "main": "lib/index.js", - "files": [ - "examples", - "lib", - "LICENSE", - "safe.js", - "themes", - "index.d.ts", - "safe.d.ts" - ], - "devDependencies": { - "eslint": "^5.2.0", - "eslint-config-google": "^0.11.0" - } -} diff --git a/deps/npm/node_modules/@colors/colors/safe.js b/deps/npm/node_modules/@colors/colors/safe.js deleted file mode 100644 index a013d542464854..00000000000000 --- a/deps/npm/node_modules/@colors/colors/safe.js +++ /dev/null @@ -1,10 +0,0 @@ -// -// Remark: Requiring this file will use the "safe" colors API, -// which will not touch String.prototype. -// -// var colors = require('colors/safe'); -// colors.red("foo") -// -// -var colors = require('./lib/colors'); -module['exports'] = colors; diff --git a/deps/npm/node_modules/@colors/colors/themes/generic-logging.js b/deps/npm/node_modules/@colors/colors/themes/generic-logging.js deleted file mode 100644 index 63adfe4ac31f9a..00000000000000 --- a/deps/npm/node_modules/@colors/colors/themes/generic-logging.js +++ /dev/null @@ -1,12 +0,0 @@ -module['exports'] = { - silly: 'rainbow', - input: 'grey', - verbose: 'cyan', - prompt: 'grey', - info: 'green', - data: 'grey', - help: 'cyan', - warn: 'yellow', - debug: 'blue', - error: 'red', -}; diff --git a/deps/npm/node_modules/@npmcli/arborist/bin/index.js b/deps/npm/node_modules/@npmcli/arborist/bin/index.js index ff356fafab7c34..414c7eb468cd85 100755 --- a/deps/npm/node_modules/@npmcli/arborist/bin/index.js +++ b/deps/npm/node_modules/@npmcli/arborist/bin/index.js @@ -2,6 +2,7 @@ const fs = require('fs') const path = require('path') +const { time } = require('proc-log') const { bin, arb: options } = require('./lib/options') const version = require('../package.json').version @@ -72,11 +73,11 @@ for (const file of commandFiles) { log.info(name, options) - process.emit('time', totalTime) - process.emit('time', scriptTime) + const timeEnd = time.start(totalTime) + const scriptEnd = time.start(scriptTime) return command(options, (result) => { - process.emit('timeEnd', scriptTime) + scriptEnd() return { result, timing: { @@ -95,7 +96,7 @@ for (const file of commandFiles) { return err }) .then((r) => { - process.emit('timeEnd', totalTime) + timeEnd() if (bin.loglevel !== 'silent') { console[process.exitCode ? 'error' : 'log'](r) } diff --git a/deps/npm/node_modules/@npmcli/arborist/bin/lib/logging.js b/deps/npm/node_modules/@npmcli/arborist/bin/lib/logging.js index ffb5544b21463e..f06716735de74c 100644 --- a/deps/npm/node_modules/@npmcli/arborist/bin/lib/logging.js +++ b/deps/npm/node_modules/@npmcli/arborist/bin/lib/logging.js @@ -1,4 +1,4 @@ -const log = require('proc-log') +const { log } = require('proc-log') const fs = require('fs') const { dirname } = require('path') const os = require('os') diff --git a/deps/npm/node_modules/@npmcli/arborist/bin/lib/timers.js b/deps/npm/node_modules/@npmcli/arborist/bin/lib/timers.js index 586dee7806dd0e..a7ec534f5c5a79 100644 --- a/deps/npm/node_modules/@npmcli/arborist/bin/lib/timers.js +++ b/deps/npm/node_modules/@npmcli/arborist/bin/lib/timers.js @@ -4,22 +4,22 @@ const log = require('./logging.js') const timers = new Map() const finished = new Map() -process.on('time', name => { - if (timers.has(name)) { - throw new Error('conflicting timer! ' + name) - } - timers.set(name, process.hrtime.bigint()) -}) - -process.on('timeEnd', name => { - if (!timers.has(name)) { - throw new Error('timer not started! ' + name) - } - const elapsed = Number(process.hrtime.bigint() - timers.get(name)) - timers.delete(name) - finished.set(name, elapsed) - if (options.timing) { - log.info('timeEnd', `${name} ${elapsed / 1e9}s`, log.meta({ force: options.timing === 'always' })) +process.on('time', (level, name) => { + if (level === 'start') { + if (timers.has(name)) { + throw new Error('conflicting timer! ' + name) + } + timers.set(name, process.hrtime.bigint()) + } else if (level === 'end') { + if (!timers.has(name)) { + throw new Error('timer not started! ' + name) + } + const elapsed = Number(process.hrtime.bigint() - timers.get(name)) + timers.delete(name) + finished.set(name, elapsed) + if (options.timing) { + log.info('timeEnd', `${name} ${elapsed / 1e9}s`, log.meta({ force: options.timing === 'always' })) + } } }) diff --git a/deps/npm/node_modules/@npmcli/arborist/lib/add-rm-pkg-deps.js b/deps/npm/node_modules/@npmcli/arborist/lib/add-rm-pkg-deps.js index c5cdc097a9fab7..2e30eb1de76264 100644 --- a/deps/npm/node_modules/@npmcli/arborist/lib/add-rm-pkg-deps.js +++ b/deps/npm/node_modules/@npmcli/arborist/lib/add-rm-pkg-deps.js @@ -1,6 +1,6 @@ // add and remove dependency specs to/from pkg manifest -const log = require('proc-log') +const { log } = require('proc-log') const localeCompare = require('@isaacs/string-locale-compare')('en') const add = ({ pkg, add, saveBundle, saveType }) => { diff --git a/deps/npm/node_modules/@npmcli/arborist/lib/arborist/build-ideal-tree.js b/deps/npm/node_modules/@npmcli/arborist/lib/arborist/build-ideal-tree.js index 75e4d373259a09..920403d231d6db 100644 --- a/deps/npm/node_modules/@npmcli/arborist/lib/arborist/build-ideal-tree.js +++ b/deps/npm/node_modules/@npmcli/arborist/lib/arborist/build-ideal-tree.js @@ -11,7 +11,7 @@ const treeCheck = require('../tree-check.js') const { readdirScoped } = require('@npmcli/fs') const { lstat, readlink } = require('fs/promises') const { depth } = require('treeverse') -const log = require('proc-log') +const { log, time } = require('proc-log') const { redact } = require('@npmcli/redact') const { @@ -179,7 +179,7 @@ module.exports = cls => class IdealTreeBuilder extends cls { options.rm = null } - process.emit('time', 'idealTree') + const timeEnd = time.start('idealTree') if (!options.add && !options.rm && !options.update && this.options.global) { throw new Error('global requires add, rm, or update option') @@ -205,7 +205,7 @@ module.exports = cls => class IdealTreeBuilder extends cls { await this.#pruneFailedOptional() await this.#checkEngineAndPlatform() } finally { - process.emit('timeEnd', 'idealTree') + timeEnd() this.finishTracker('idealTree') } @@ -278,7 +278,7 @@ module.exports = cls => class IdealTreeBuilder extends cls { // load the initial tree, either the virtualTree from a shrinkwrap, // or just the root node from a package.json async #initTree () { - process.emit('time', 'idealTree:init') + const timeEnd = time.start('idealTree:init') let root if (this.options.global) { root = await this.#globalRootNode() @@ -356,7 +356,7 @@ module.exports = cls => class IdealTreeBuilder extends cls { // if you want another one, load another copy. this.idealTree = tree this.virtualTree = null - process.emit('timeEnd', 'idealTree:init') + timeEnd() return tree }) } @@ -420,7 +420,7 @@ module.exports = cls => class IdealTreeBuilder extends cls { // process the add/rm requests by modifying the root node, and the // update.names request by queueing nodes dependent on those named. async #applyUserRequests (options) { - process.emit('time', 'idealTree:userRequests') + const timeEnd = time.start('idealTree:userRequests') const tree = this.idealTree.target if (!this.options.workspaces.length) { @@ -436,7 +436,7 @@ module.exports = cls => class IdealTreeBuilder extends cls { await Promise.all(appliedRequests) } - process.emit('timeEnd', 'idealTree:userRequests') + timeEnd() } async #applyUserRequestsToNode (tree, options) { @@ -463,7 +463,7 @@ module.exports = cls => class IdealTreeBuilder extends cls { } const dir = resolve(nm, name) const st = await lstat(dir) - .catch(/* istanbul ignore next */ er => null) + .catch(/* istanbul ignore next */ () => null) if (st && st.isSymbolicLink()) { const target = await readlink(dir) const real = resolve(dirname(dir), target).replace(/#/g, '%23') @@ -691,7 +691,7 @@ module.exports = cls => class IdealTreeBuilder extends cls { // if the lockfile is from node v5 or earlier, then we'll have to reload // all the manifests of everything we encounter. this is costly, but at // least it's just a one-time hit. - process.emit('time', 'idealTree:inflate') + const timeEnd = time.start('idealTree:inflate') // don't warn if we're not gonna actually write it back anyway. const heading = ancient ? 'ancient lockfile' : 'old lockfile' @@ -758,14 +758,14 @@ This is a one-time fix-up, please be patient... meta.originalLockfileVersion = defaultLockfileVersion } this.finishTracker('idealTree:inflate') - process.emit('timeEnd', 'idealTree:inflate') + timeEnd() } // at this point we have a virtual tree with the actual root node's // package deps, which may be partly or entirely incomplete, invalid // or extraneous. #buildDeps () { - process.emit('time', 'idealTree:buildDeps') + const timeEnd = time.start('idealTree:buildDeps') const tree = this.idealTree.target tree.assertRootOverrides() this.#depsQueue.push(tree) @@ -773,15 +773,14 @@ This is a one-time fix-up, please be patient... // in the override list log.silly('idealTree', 'buildDeps') this.addTracker('idealTree', tree.name, '') - return this.#buildDepStep() - .then(() => process.emit('timeEnd', 'idealTree:buildDeps')) + return this.#buildDepStep().then(timeEnd) } async #buildDepStep () { // removes tracker of previous dependency in the queue if (this.#currentDep) { const { location, name } = this.#currentDep - process.emit('timeEnd', `idealTree:${location || '#root'}`) + time.end(`idealTree:${location || '#root'}`) this.finishTracker('idealTree', name, location) this.#currentDep = null } @@ -807,7 +806,7 @@ This is a one-time fix-up, please be patient... this.#depsSeen.add(node) this.#currentDep = node - process.emit('time', `idealTree:${node.location || '#root'}`) + time.start(`idealTree:${node.location || '#root'}`) // if we're loading a _complete_ ideal tree, for a --package-lock-only // installation for example, we have to crack open the tarball and @@ -1025,7 +1024,7 @@ This is a one-time fix-up, please be patient... for (const e of this.#problemEdges(placed)) { promises.push(() => this.#fetchManifest(npa.resolve(e.name, e.spec, fromPath(placed, e))) - .catch(er => null) + .catch(() => null) ) } }, @@ -1274,7 +1273,7 @@ This is a one-time fix-up, please be patient... }) } - #linkFromSpec (name, spec, parent, edge) { + #linkFromSpec (name, spec, parent) { const realpath = spec.fetchSpec const { installLinks, legacyPeerDeps } = this return rpj(realpath + '/package.json').catch(() => ({})).then(pkg => { @@ -1449,7 +1448,7 @@ This is a one-time fix-up, please be patient... } #fixDepFlags () { - process.emit('time', 'idealTree:fixDepFlags') + const timeEnd = time.start('idealTree:fixDepFlags') const metaFromDisk = this.idealTree.meta.loadedFromDisk const flagsSuspect = this[_flagsSuspect] const mutateTree = this.#mutateTree @@ -1496,7 +1495,7 @@ This is a one-time fix-up, please be patient... } } - process.emit('timeEnd', 'idealTree:fixDepFlags') + timeEnd() } #idealTreePrune () { diff --git a/deps/npm/node_modules/@npmcli/arborist/lib/arborist/index.js b/deps/npm/node_modules/@npmcli/arborist/lib/arborist/index.js index 358f3e1b1a7598..ba180f354708a2 100644 --- a/deps/npm/node_modules/@npmcli/arborist/lib/arborist/index.js +++ b/deps/npm/node_modules/@npmcli/arborist/lib/arborist/index.js @@ -30,7 +30,7 @@ const { resolve } = require('path') const { homedir } = require('os') const { depth } = require('treeverse') const mapWorkspaces = require('@npmcli/map-workspaces') -const log = require('proc-log') +const { log, time } = require('proc-log') const { saveTypeMap } = require('../add-rm-pkg-deps.js') const AuditReport = require('../audit-report.js') @@ -66,7 +66,7 @@ const lockfileVersion = lfv => { class Arborist extends Base { constructor (options = {}) { - process.emit('time', 'arborist:ctor') + const timeEnd = time.start('arborist:ctor') super(options) this.options = { nodeVersion: process.version, @@ -74,20 +74,26 @@ class Arborist extends Base { Arborist: this.constructor, binLinks: 'binLinks' in options ? !!options.binLinks : true, cache: options.cache || `${homedir()}/.npm/_cacache`, + dryRun: !!options.dryRun, + formatPackageLock: 'formatPackageLock' in options ? !!options.formatPackageLock : true, force: !!options.force, global: !!options.global, ignoreScripts: !!options.ignoreScripts, installStrategy: options.global ? 'shallow' : (options.installStrategy ? options.installStrategy : 'hoisted'), lockfileVersion: lockfileVersion(options.lockfileVersion), + packageLockOnly: !!options.packageLockOnly, packumentCache: options.packumentCache || new Map(), path: options.path || '.', rebuildBundle: 'rebuildBundle' in options ? !!options.rebuildBundle : true, replaceRegistryHost: options.replaceRegistryHost, + savePrefix: 'savePrefix' in options ? options.savePrefix : '^', scriptShell: options.scriptShell, workspaces: options.workspaces || [], workspacesEnabled: options.workspacesEnabled !== false, } - // TODO is this even used? If not is that a bug? + // TODO we only ever look at this.options.replaceRegistryHost, not + // this.replaceRegistryHost. Defaulting needs to be written back to + // this.options to work properly this.replaceRegistryHost = this.options.replaceRegistryHost = (!this.options.replaceRegistryHost || this.options.replaceRegistryHost === 'npmjs') ? 'registry.npmjs.org' : this.options.replaceRegistryHost @@ -96,8 +102,9 @@ class Arborist extends Base { throw new Error(`Invalid saveType ${options.saveType}`) } this.cache = resolve(this.options.cache) + this.diff = null this.path = resolve(this.options.path) - process.emit('timeEnd', 'arborist:ctor') + timeEnd() } // TODO: We should change these to static functions instead @@ -223,7 +230,7 @@ class Arborist extends Base { // XXX: deprecate separate method options objects. options = { ...this.options, ...options } - process.emit('time', 'audit') + const timeEnd = time.start('audit') let tree if (options.packageLock === false) { // build ideal tree @@ -246,10 +253,28 @@ class Arborist extends Base { } this.auditReport = await AuditReport.load(tree, options) const ret = options.fix ? this.reify(options) : this.auditReport - process.emit('timeEnd', 'audit') + timeEnd() this.finishTracker('audit') return ret } + + async dedupe (options = {}) { + // allow the user to set options on the ctor as well. + // XXX: deprecate separate method options objects. + options = { ...this.options, ...options } + const tree = await this.loadVirtual().catch(() => this.loadActual()) + const names = [] + for (const name of tree.inventory.query('name')) { + if (tree.inventory.query('name', name).size > 1) { + names.push(name) + } + } + return this.reify({ + ...options, + preferDedupe: true, + update: { names }, + }) + } } module.exports = Arborist diff --git a/deps/npm/node_modules/@npmcli/arborist/lib/arborist/isolated-reifier.js b/deps/npm/node_modules/@npmcli/arborist/lib/arborist/isolated-reifier.js index f4f1bb8e443624..1e60d0f696b26d 100644 --- a/deps/npm/node_modules/@npmcli/arborist/lib/arborist/isolated-reifier.js +++ b/deps/npm/node_modules/@npmcli/arborist/lib/arborist/isolated-reifier.js @@ -1,7 +1,7 @@ const _makeIdealGraph = Symbol('makeIdealGraph') const _createIsolatedTree = Symbol.for('createIsolatedTree') const _createBundledTree = Symbol('createBundledTree') -const fs = require('fs') +const { mkdirSync } = require('fs') const pacote = require('pacote') const { join } = require('path') const { depth } = require('treeverse') @@ -108,7 +108,7 @@ module.exports = cls => class IsolatedReifier extends cls { '.store', `${node.name}@${node.version}` ) - fs.mkdirSync(dir, { recursive: true }) + mkdirSync(dir, { recursive: true }) // TODO this approach feels wrong // and shouldn't be necessary for shrinkwraps await pacote.extract(node.resolved, dir, { @@ -212,7 +212,7 @@ module.exports = cls => class IsolatedReifier extends cls { return { edges, nodes } } - async [_createIsolatedTree] (idealTree) { + async [_createIsolatedTree] () { await this[_makeIdealGraph](this.options) const proxiedIdealTree = this.idealGraph diff --git a/deps/npm/node_modules/@npmcli/arborist/lib/arborist/load-actual.js b/deps/npm/node_modules/@npmcli/arborist/lib/arborist/load-actual.js index 3ab5f5983768df..81c1bd11327753 100644 --- a/deps/npm/node_modules/@npmcli/arborist/lib/arborist/load-actual.js +++ b/deps/npm/node_modules/@npmcli/arborist/lib/arborist/load-actual.js @@ -336,8 +336,8 @@ module.exports = cls => class ActualLoader extends cls { await this.#loadFSChildren(node.target) return Promise.all( [...node.target.children.entries()] - .filter(([name, kid]) => !did.has(kid.realpath)) - .map(([name, kid]) => this.#loadFSTree(kid)) + .filter(([, kid]) => !did.has(kid.realpath)) + .map(([, kid]) => this.#loadFSTree(kid)) ) } } diff --git a/deps/npm/node_modules/@npmcli/arborist/lib/arborist/load-virtual.js b/deps/npm/node_modules/@npmcli/arborist/lib/arborist/load-virtual.js index 9b681a47a83587..d96d4adc88a702 100644 --- a/deps/npm/node_modules/@npmcli/arborist/lib/arborist/load-virtual.js +++ b/deps/npm/node_modules/@npmcli/arborist/lib/arborist/load-virtual.js @@ -283,7 +283,7 @@ module.exports = cls => class VirtualLoader extends cls { return node } - #loadLink (location, targetLoc, target, meta) { + #loadLink (location, targetLoc, target) { const path = resolve(this.path, location) const link = new Link({ installLinks: this.installLinks, diff --git a/deps/npm/node_modules/@npmcli/arborist/lib/arborist/rebuild.js b/deps/npm/node_modules/@npmcli/arborist/lib/arborist/rebuild.js index 422819b2104b7e..4f8730cde7f382 100644 --- a/deps/npm/node_modules/@npmcli/arborist/lib/arborist/rebuild.js +++ b/deps/npm/node_modules/@npmcli/arborist/lib/arborist/rebuild.js @@ -9,11 +9,8 @@ const binLinks = require('bin-links') const runScript = require('@npmcli/run-script') const { callLimit: promiseCallLimit } = require('promise-call-limit') const { resolve } = require('path') -const { - isNodeGypPackage, - defaultGypInstallScript, -} = require('@npmcli/node-gyp') -const log = require('proc-log') +const { isNodeGypPackage, defaultGypInstallScript } = require('@npmcli/node-gyp') +const { log, time } = require('proc-log') const boolEnv = b => b ? '1' : '' const sortNodes = (a, b) => @@ -54,7 +51,7 @@ module.exports = cls => class Builder extends cls { // separates links nodes so that it can run // prepare scripts and link bins in the expected order - process.emit('time', 'build') + const timeEnd = time.start('build') const { depNodes, @@ -70,7 +67,7 @@ module.exports = cls => class Builder extends cls { await this.#build(linkNodes, { type: 'links' }) } - process.emit('timeEnd', 'build') + timeEnd() } // if we don't have a set of nodes, then just rebuild @@ -147,7 +144,7 @@ module.exports = cls => class Builder extends cls { } async #build (nodes, { type = 'deps' }) { - process.emit('time', `build:${type}`) + const timeEnd = time.start(`build:${type}`) await this.#buildQueues(nodes) @@ -168,11 +165,11 @@ module.exports = cls => class Builder extends cls { await this.#runScripts('postinstall') } - process.emit('timeEnd', `build:${type}`) + timeEnd() } async #buildQueues (nodes) { - process.emit('time', 'build:queue') + const timeEnd = time.start('build:queue') const set = new Set() const promises = [] @@ -210,7 +207,7 @@ module.exports = cls => class Builder extends cls { } } } - process.emit('timeEnd', 'build:queue') + timeEnd() } async [_checkBins] (node) { @@ -286,7 +283,7 @@ module.exports = cls => class Builder extends cls { return } - process.emit('time', `build:run:${event}`) + const timeEnd = time.start(`build:run:${event}`) const stdio = this.options.foregroundScripts ? 'inherit' : 'pipe' const limit = this.options.foregroundScripts ? 1 : undefined await promiseCallLimit(queue.map(node => async () => { @@ -309,8 +306,7 @@ module.exports = cls => class Builder extends cls { return } - const timer = `build:run:${event}:${location}` - process.emit('time', timer) + const timeEndLocation = time.start(`build:run:${event}:${location}`) log.info('run', pkg._id, event, location, pkg.scripts[event]) const env = { npm_package_resolved: resolved, @@ -356,9 +352,9 @@ module.exports = cls => class Builder extends cls { ? this[_handleOptionalFailure](node, p) : p) - process.emit('timeEnd', timer) + timeEndLocation() }), { limit }) - process.emit('timeEnd', `build:run:${event}`) + timeEnd() } async #linkAllBins () { @@ -367,7 +363,7 @@ module.exports = cls => class Builder extends cls { return } - process.emit('time', 'build:link') + const timeEnd = time.start('build:link') const promises = [] // sort the queue by node path, so that the module-local collision // detector in bin-links will always resolve the same way. @@ -377,7 +373,7 @@ module.exports = cls => class Builder extends cls { } await promiseAllRejectLate(promises) - process.emit('timeEnd', 'build:link') + timeEnd() } async #createBinLinks (node) { @@ -385,7 +381,7 @@ module.exports = cls => class Builder extends cls { return } - process.emit('time', `build:link:${node.location}`) + const timeEnd = time.start(`build:link:${node.location}`) const p = binLinks({ pkg: node.package, @@ -399,6 +395,6 @@ module.exports = cls => class Builder extends cls { ? this[_handleOptionalFailure](node, p) : p) - process.emit('timeEnd', `build:link:${node.location}`) + timeEnd() } } diff --git a/deps/npm/node_modules/@npmcli/arborist/lib/arborist/reify.js b/deps/npm/node_modules/@npmcli/arborist/lib/arborist/reify.js index a70e21821ecb86..96704f6556e0da 100644 --- a/deps/npm/node_modules/@npmcli/arborist/lib/arborist/reify.js +++ b/deps/npm/node_modules/@npmcli/arborist/lib/arborist/reify.js @@ -7,7 +7,7 @@ const npa = require('npm-package-arg') const semver = require('semver') const debug = require('../debug.js') const { walkUp } = require('walk-up-path') -const log = require('proc-log') +const { log, time } = require('proc-log') const hgi = require('hosted-git-info') const rpj = require('read-package-json-fast') @@ -38,119 +38,96 @@ const { saveTypeMap, hasSubKey } = require('../add-rm-pkg-deps.js') const Shrinkwrap = require('../shrinkwrap.js') const { defaultLockfileVersion } = Shrinkwrap -const _retiredPaths = Symbol('retiredPaths') -const _retiredUnchanged = Symbol('retiredUnchanged') -const _sparseTreeDirs = Symbol('sparseTreeDirs') -const _sparseTreeRoots = Symbol('sparseTreeRoots') -const _savePrefix = Symbol('savePrefix') +// Part of steps (steps need refactoring before we can do anything about these) const _retireShallowNodes = Symbol.for('retireShallowNodes') -const _getBundlesByDepth = Symbol('getBundlesByDepth') -const _registryResolved = Symbol('registryResolved') -const _addNodeToTrashList = Symbol.for('addNodeToTrashList') +const _loadBundlesAndUpdateTrees = Symbol.for('loadBundlesAndUpdateTrees') +const _submitQuickAudit = Symbol('submitQuickAudit') +const _addOmitsToTrashList = Symbol('addOmitsToTrashList') +const _unpackNewModules = Symbol.for('unpackNewModules') +const _build = Symbol.for('build') // shared by rebuild mixin const _trashList = Symbol.for('trashList') const _handleOptionalFailure = Symbol.for('handleOptionalFailure') const _loadTrees = Symbol.for('loadTrees') +// defined by rebuild mixin +const _checkBins = Symbol.for('checkBins') // shared symbols for swapping out when testing +// TODO tests should not be this deep into internals const _diffTrees = Symbol.for('diffTrees') const _createSparseTree = Symbol.for('createSparseTree') const _loadShrinkwrapsAndUpdateTrees = Symbol.for('loadShrinkwrapsAndUpdateTrees') -const _shrinkwrapInflated = Symbol('shrinkwrapInflated') -const _bundleUnpacked = Symbol('bundleUnpacked') -const _bundleMissing = Symbol('bundleMissing') const _reifyNode = Symbol.for('reifyNode') -const _extractOrLink = Symbol('extractOrLink') const _updateAll = Symbol.for('updateAll') const _updateNames = Symbol.for('updateNames') -// defined by rebuild mixin -const _checkBins = Symbol.for('checkBins') -const _symlink = Symbol('symlink') -const _warnDeprecated = Symbol('warnDeprecated') -const _loadBundlesAndUpdateTrees = Symbol.for('loadBundlesAndUpdateTrees') -const _submitQuickAudit = Symbol('submitQuickAudit') -const _unpackNewModules = Symbol.for('unpackNewModules') const _moveContents = Symbol.for('moveContents') const _moveBackRetiredUnchanged = Symbol.for('moveBackRetiredUnchanged') -const _build = Symbol.for('build') const _removeTrash = Symbol.for('removeTrash') const _renamePath = Symbol.for('renamePath') const _rollbackRetireShallowNodes = Symbol.for('rollbackRetireShallowNodes') const _rollbackCreateSparseTree = Symbol.for('rollbackCreateSparseTree') const _rollbackMoveBackRetiredUnchanged = Symbol.for('rollbackMoveBackRetiredUnchanged') const _saveIdealTree = Symbol.for('saveIdealTree') -const _copyIdealToActual = Symbol('copyIdealToActual') -const _addOmitsToTrashList = Symbol('addOmitsToTrashList') -const _packageLockOnly = Symbol('packageLockOnly') -const _dryRun = Symbol('dryRun') -const _validateNodeModules = Symbol('validateNodeModules') -const _nmValidated = Symbol('nmValidated') -const _validatePath = Symbol('validatePath') const _reifyPackages = Symbol.for('reifyPackages') -const _omitDev = Symbol('omitDev') -const _omitOptional = Symbol('omitOptional') -const _omitPeer = Symbol('omitPeer') - -const _pruneBundledMetadeps = Symbol('pruneBundledMetadeps') - -// defined by Ideal mixin +// defined by build-ideal-tree mixin const _resolvedAdd = Symbol.for('resolvedAdd') const _usePackageLock = Symbol.for('usePackageLock') -const _formatPackageLock = Symbol.for('formatPackageLock') +// used by build-ideal-tree mixin +const _addNodeToTrashList = Symbol.for('addNodeToTrashList') const _createIsolatedTree = Symbol.for('createIsolatedTree') module.exports = cls => class Reifier extends cls { + #bundleMissing = new Set() // child nodes we'd EXPECT to be included in a bundle, but aren't + #bundleUnpacked = new Set() // the nodes we unpack to read their bundles + #dryRun + #nmValidated = new Set() + #omitDev + #omitPeer + #omitOptional + #retiredPaths = {} + #retiredUnchanged = {} + #savePrefix + #shrinkwrapInflated = new Set() + #sparseTreeDirs = new Set() + #sparseTreeRoots = new Set() + constructor (options) { super(options) - const { - savePrefix = '^', - packageLockOnly = false, - dryRun = false, - formatPackageLock = true, - } = options - - this[_dryRun] = !!dryRun - this[_packageLockOnly] = !!packageLockOnly - this[_savePrefix] = savePrefix - this[_formatPackageLock] = !!formatPackageLock - - this.diff = null - this[_retiredPaths] = {} - this[_shrinkwrapInflated] = new Set() - this[_retiredUnchanged] = {} - this[_sparseTreeDirs] = new Set() - this[_sparseTreeRoots] = new Set() this[_trashList] = new Set() - // the nodes we unpack to read their bundles - this[_bundleUnpacked] = new Set() - // child nodes we'd EXPECT to be included in a bundle, but aren't - this[_bundleMissing] = new Set() - this[_nmValidated] = new Set() } // public method async reify (options = {}) { const linked = (options.installStrategy || this.options.installStrategy) === 'linked' - if (this[_packageLockOnly] && this.options.global) { + if (this.options.packageLockOnly && this.options.global) { const er = new Error('cannot generate lockfile for global packages') er.code = 'ESHRINKWRAPGLOBAL' throw er } const omit = new Set(options.omit || []) - this[_omitDev] = omit.has('dev') - this[_omitOptional] = omit.has('optional') - this[_omitPeer] = omit.has('peer') + this.#omitDev = omit.has('dev') + this.#omitOptional = omit.has('optional') + this.#omitPeer = omit.has('peer') // start tracker block this.addTracker('reify') - process.emit('time', 'reify') - await this[_validatePath]() + const timeEnd = time.start('reify') + // don't create missing dirs on dry runs + if (!this.options.packageLockOnly && !this.options.dryRun) { + // we do NOT want to set ownership on this folder, especially + // recursively, because it can have other side effects to do that + // in a project directory. We just want to make it if it's missing. + await mkdir(resolve(this.path), { recursive: true }) + + // do not allow the top-level node_modules to be a symlink + await this.#validateNodeModules(resolve(this.path, 'node_modules')) + } await this[_loadTrees](options) const oldTree = this.idealTree @@ -159,7 +136,7 @@ module.exports = cls => class Reifier extends cls { // this is currently technical debt which will be resolved in a refactor // of Node/Link trees log.warn('reify', 'The "linked" install strategy is EXPERIMENTAL and may contain bugs.') - this.idealTree = await this[_createIsolatedTree](this.idealTree) + this.idealTree = await this[_createIsolatedTree]() } await this[_diffTrees]() await this[_reifyPackages]() @@ -169,37 +146,139 @@ module.exports = cls => class Reifier extends cls { this.idealTree = oldTree } await this[_saveIdealTree](options) - await this[_copyIdealToActual]() - // This is a very bad pattern and I can't wait to stop doing it - this.auditReport = await this.auditReport + // clean up any trash that is still in the tree + for (const path of this[_trashList]) { + const loc = relpath(this.idealTree.realpath, path) + const node = this.idealTree.inventory.get(loc) + if (node && node.root === this.idealTree) { + node.parent = null + } + } - this.finishTracker('reify') - process.emit('timeEnd', 'reify') - return treeCheck(this.actualTree) - } + // if we filtered to only certain nodes, then anything ELSE needs + // to be untouched in the resulting actual tree, even if it differs + // in the idealTree. Copy over anything that was in the actual and + // was not changed, delete anything in the ideal and not actual. + // Then we move the entire idealTree over to this.actualTree, and + // save the hidden lockfile. + if (this.diff && this.diff.filterSet.size) { + const reroot = new Set() - async [_validatePath] () { - // don't create missing dirs on dry runs - if (this[_packageLockOnly] || this[_dryRun]) { - return + const { filterSet } = this.diff + const seen = new Set() + for (const [loc, ideal] of this.idealTree.inventory.entries()) { + seen.add(loc) + + // if it's an ideal node from the filter set, then skip it + // because we already made whatever changes were necessary + if (filterSet.has(ideal)) { + continue + } + + // otherwise, if it's not in the actualTree, then it's not a thing + // that we actually added. And if it IS in the actualTree, then + // it's something that we left untouched, so we need to record + // that. + const actual = this.actualTree.inventory.get(loc) + if (!actual) { + ideal.root = null + } else { + if ([...actual.linksIn].some(link => filterSet.has(link))) { + seen.add(actual.location) + continue + } + const { realpath, isLink } = actual + if (isLink && ideal.isLink && ideal.realpath === realpath) { + continue + } else { + reroot.add(actual) + } + } + } + + // now find any actual nodes that may not be present in the ideal + // tree, but were left behind by virtue of not being in the filter + for (const [loc, actual] of this.actualTree.inventory.entries()) { + if (seen.has(loc)) { + continue + } + seen.add(loc) + + // we know that this is something that ISN'T in the idealTree, + // or else we will have addressed it in the previous loop. + // If it's in the filterSet, that means we intentionally removed + // it, so nothing to do here. + if (filterSet.has(actual)) { + continue + } + + reroot.add(actual) + } + + // go through the rerooted actual nodes, and move them over. + for (const actual of reroot) { + actual.root = this.idealTree + } + + // prune out any tops that lack a linkIn, they are no longer relevant. + for (const top of this.idealTree.tops) { + if (top.linksIn.size === 0) { + top.root = null + } + } + + // need to calculate dep flags, since nodes may have been marked + // as extraneous or otherwise incorrect during transit. + calcDepFlags(this.idealTree) } - // we do NOT want to set ownership on this folder, especially - // recursively, because it can have other side effects to do that - // in a project directory. We just want to make it if it's missing. - await mkdir(resolve(this.path), { recursive: true }) + // save the ideal's meta as a hidden lockfile after we actualize it + this.idealTree.meta.filename = + this.idealTree.realpath + '/node_modules/.package-lock.json' + this.idealTree.meta.hiddenLockfile = true + this.idealTree.meta.lockfileVersion = defaultLockfileVersion + + this.actualTree = this.idealTree + this.idealTree = null + + if (!this.options.global) { + await this.actualTree.meta.save() + const ignoreScripts = !!this.options.ignoreScripts + // if we aren't doing a dry run or ignoring scripts and we actually made changes to the dep + // tree, then run the dependencies scripts + if (!this.options.dryRun && !ignoreScripts && this.diff && this.diff.children.length) { + const { path, package: pkg } = this.actualTree.target + const stdio = this.options.foregroundScripts ? 'inherit' : 'pipe' + const { scripts = {} } = pkg + for (const event of ['predependencies', 'dependencies', 'postdependencies']) { + if (Object.prototype.hasOwnProperty.call(scripts, event)) { + log.info('run', pkg._id, event, scripts[event]) + await time.start(`reify:run:${event}`, () => runScript({ + event, + path, + pkg, + stdio, + scriptShell: this.options.scriptShell, + })) + } + } + } + } + // This is a very bad pattern and I can't wait to stop doing it + this.auditReport = await this.auditReport - // do not allow the top-level node_modules to be a symlink - await this[_validateNodeModules](resolve(this.path, 'node_modules')) + this.finishTracker('reify') + timeEnd() + return treeCheck(this.actualTree) } async [_reifyPackages] () { // we don't submit the audit report or write to disk on dry runs - if (this[_dryRun]) { + if (this.options.dryRun) { return } - if (this[_packageLockOnly]) { + if (this.options.packageLockOnly) { // we already have the complete tree, so just audit it now, // and that's all we have to do here. return this[_submitQuickAudit]() @@ -248,6 +327,7 @@ module.exports = cls => class Reifier extends cls { throw reifyTerminated } } catch (er) { + // TODO rollbacks shouldn't be relied on to throw err await this[rollback](er) /* istanbul ignore next - rollback throws, should never hit this */ throw er @@ -269,16 +349,15 @@ module.exports = cls => class Reifier extends cls { // when doing a local install, we load everything and figure it all out. // when doing a global install, we *only* care about the explicit requests. [_loadTrees] (options) { - process.emit('time', 'reify:loadTrees') + const timeEnd = time.start('reify:loadTrees') const bitOpt = { ...options, - complete: this[_packageLockOnly] || this[_dryRun], + complete: this.options.packageLockOnly || this.options.dryRun, } // if we're only writing a package lock, then it doesn't matter what's here - if (this[_packageLockOnly]) { - return this.buildIdealTree(bitOpt) - .then(() => process.emit('timeEnd', 'reify:loadTrees')) + if (this.options.packageLockOnly) { + return this.buildIdealTree(bitOpt).then(timeEnd) } const actualOpt = this.options.global ? { @@ -312,7 +391,7 @@ module.exports = cls => class Reifier extends cls { return Promise.all([ this.loadActual(actualOpt), this.buildIdealTree(bitOpt), - ]).then(() => process.emit('timeEnd', 'reify:loadTrees')) + ]).then(timeEnd) } // the global install space tends to have a lot of stuff in it. don't @@ -322,15 +401,15 @@ module.exports = cls => class Reifier extends cls { // explicitRequests which is set during buildIdealTree return this.buildIdealTree(bitOpt) .then(() => this.loadActual(actualOpt)) - .then(() => process.emit('timeEnd', 'reify:loadTrees')) + .then(timeEnd) } [_diffTrees] () { - if (this[_packageLockOnly]) { + if (this.options.packageLockOnly) { return } - process.emit('time', 'reify:diffTrees') + const timeEnd = time.start('reify:diffTrees') // XXX if we have an existing diff already, there should be a way // to just invalidate the parts that changed, but avoid walking the // whole tree again. @@ -384,7 +463,7 @@ module.exports = cls => class Reifier extends cls { // find all the nodes that need to change between the actual // and ideal trees. this.diff = Diff.calculate({ - shrinkwrapInflated: this[_shrinkwrapInflated], + shrinkwrapInflated: this.#shrinkwrapInflated, filterNodes, actual: this.actualTree, ideal: this.idealTree, @@ -397,7 +476,7 @@ module.exports = cls => class Reifier extends cls { // because if we remove node_modules/FOO on case-insensitive systems, // it will remove the dep that we *want* at node_modules/foo. - process.emit('timeEnd', 'reify:diffTrees') + timeEnd() } // add the node and all its bins to the list of things to be @@ -406,7 +485,7 @@ module.exports = cls => class Reifier extends cls { // replace them when rolling back on failure. [_addNodeToTrashList] (node, retire = false) { const paths = [node.path, ...node.binPaths] - const moves = this[_retiredPaths] + const moves = this.#retiredPaths log.silly('reify', 'mark', retire ? 'retired' : 'deleted', paths) for (const path of paths) { if (retire) { @@ -422,8 +501,8 @@ module.exports = cls => class Reifier extends cls { // move aside the shallowest nodes in the tree that have to be // changed or removed, so that we can rollback if necessary. [_retireShallowNodes] () { - process.emit('time', 'reify:retireShallow') - const moves = this[_retiredPaths] = {} + const timeEnd = time.start('reify:retireShallow') + const moves = this.#retiredPaths = {} for (const diff of this.diff.children) { if (diff.action === 'CHANGE' || diff.action === 'REMOVE') { // we'll have to clean these up at the end, so add them to the list @@ -433,8 +512,7 @@ module.exports = cls => class Reifier extends cls { log.silly('reify', 'moves', moves) const movePromises = Object.entries(moves) .map(([from, to]) => this[_renamePath](from, to)) - return promiseAllRejectLate(movePromises) - .then(() => process.emit('timeEnd', 'reify:retireShallow')) + return promiseAllRejectLate(movePromises).then(timeEnd) } [_renamePath] (from, to, didMkdirp = false) { @@ -456,14 +534,14 @@ module.exports = cls => class Reifier extends cls { } [_rollbackRetireShallowNodes] (er) { - process.emit('time', 'reify:rollback:retireShallow') - const moves = this[_retiredPaths] + const timeEnd = time.start('reify:rollback:retireShallow') + const moves = this.#retiredPaths const movePromises = Object.entries(moves) .map(([from, to]) => this[_renamePath](to, from)) return promiseAllRejectLate(movePromises) // ignore subsequent rollback errors - .catch(er => {}) - .then(() => process.emit('timeEnd', 'reify:rollback:retireShallow')) + .catch(() => {}) + .then(timeEnd) .then(() => { throw er }) @@ -472,11 +550,11 @@ module.exports = cls => class Reifier extends cls { // adding to the trash list will skip reifying, and delete them // if they are currently in the tree and otherwise untouched. [_addOmitsToTrashList] () { - if (!this[_omitDev] && !this[_omitOptional] && !this[_omitPeer]) { + if (!this.#omitDev && !this.#omitOptional && !this.#omitPeer) { return } - process.emit('time', 'reify:trashOmits') + const timeEnd = time.start('reify:trashOmits') for (const node of this.idealTree.inventory.values()) { const { top } = node @@ -494,26 +572,26 @@ module.exports = cls => class Reifier extends cls { // omit node if the dep type matches any omit flags that were set if ( - node.peer && this[_omitPeer] || - node.dev && this[_omitDev] || - node.optional && this[_omitOptional] || - node.devOptional && this[_omitOptional] && this[_omitDev] + node.peer && this.#omitPeer || + node.dev && this.#omitDev || + node.optional && this.#omitOptional || + node.devOptional && this.#omitOptional && this.#omitDev ) { this[_addNodeToTrashList](node) } } - process.emit('timeEnd', 'reify:trashOmits') + timeEnd() } [_createSparseTree] () { - process.emit('time', 'reify:createSparse') + const timeEnd = time.start('reify:createSparse') // if we call this fn again, we look for the previous list // so that we can avoid making the same directory multiple times const leaves = this.diff.leaves .filter(diff => { return (diff.action === 'ADD' || diff.action === 'CHANGE') && - !this[_sparseTreeDirs].has(diff.ideal.path) && + !this.#sparseTreeDirs.has(diff.ideal.path) && !diff.ideal.isLink }) .map(diff => diff.ideal) @@ -530,37 +608,36 @@ module.exports = cls => class Reifier extends cls { continue } dirsChecked.add(d) - const st = await lstat(d).catch(er => null) + const st = await lstat(d).catch(() => null) // this can happen if we have a link to a package with a name // that the filesystem treats as if it is the same thing. // would be nice to have conditional istanbul ignores here... /* istanbul ignore next - defense in depth */ if (st && !st.isDirectory()) { const retired = retirePath(d) - this[_retiredPaths][d] = retired + this.#retiredPaths[d] = retired this[_trashList].add(retired) await this[_renamePath](d, retired) } } - this[_sparseTreeDirs].add(node.path) + this.#sparseTreeDirs.add(node.path) const made = await mkdir(node.path, { recursive: true }) // if the directory already exists, made will be undefined. if that's the case // we don't want to remove it because we aren't the ones who created it so we - // omit it from the _sparseTreeRoots + // omit it from the #sparseTreeRoots if (made) { - this[_sparseTreeRoots].add(made) + this.#sparseTreeRoots.add(made) } - })) - .then(() => process.emit('timeEnd', 'reify:createSparse')) + })).then(timeEnd) } [_rollbackCreateSparseTree] (er) { - process.emit('time', 'reify:rollback:createSparse') + const timeEnd = time.start('reify:rollback:createSparse') // cut the roots of the sparse tree that were created, not the leaves - const roots = this[_sparseTreeRoots] + const roots = this.#sparseTreeRoots // also delete the moves that we retired, so that we can move them back const failures = [] - const targets = [...roots, ...Object.keys(this[_retiredPaths])] + const targets = [...roots, ...Object.keys(this.#retiredPaths)] const unlinks = targets .map(path => rm(path, { recursive: true, force: true }).catch(er => failures.push([path, er]))) return promiseAllRejectLate(unlinks).then(() => { @@ -569,7 +646,7 @@ module.exports = cls => class Reifier extends cls { log.warn('cleanup', 'Failed to remove some directories', failures) } }) - .then(() => process.emit('timeEnd', 'reify:rollback:createSparse')) + .then(timeEnd) .then(() => this[_rollbackRetireShallowNodes](er)) } @@ -577,7 +654,7 @@ module.exports = cls => class Reifier extends cls { // we need to unpack them, read that shrinkwrap file, and then update // the tree by calling loadVirtual with the node as the root. [_loadShrinkwrapsAndUpdateTrees] () { - const seen = this[_shrinkwrapInflated] + const seen = this.#shrinkwrapInflated const shrinkwraps = this.diff.leaves .filter(d => (d.action === 'CHANGE' || d.action === 'ADD' || !d.action) && d.ideal.hasShrinkwrap && !seen.has(d.ideal) && @@ -587,7 +664,7 @@ module.exports = cls => class Reifier extends cls { return } - process.emit('time', 'reify:loadShrinkwraps') + const timeEnd = time.start('reify:loadShrinkwraps') const Arborist = this.constructor return promiseAllRejectLate(shrinkwraps.map(diff => { @@ -604,7 +681,7 @@ module.exports = cls => class Reifier extends cls { .then(() => this[_createSparseTree]()) .then(() => this[_addOmitsToTrashList]()) .then(() => this[_loadShrinkwrapsAndUpdateTrees]()) - .then(() => process.emit('timeEnd', 'reify:loadShrinkwraps')) + .then(timeEnd) } // create a symlink for Links, extract for Nodes @@ -619,8 +696,7 @@ module.exports = cls => class Reifier extends cls { return node } - const timer = `reifyNode:${node.location}` - process.emit('time', timer) + const timeEnd = time.start(`reifyNode:${node.location}`) this.addTracker('reify', node.name, node.location) const { npmVersion, nodeVersion, cpu, os, libc } = this.options @@ -636,35 +712,40 @@ module.exports = cls => class Reifier extends cls { checkPlatform(node.package, false, { cpu, os, libc }) } await this[_checkBins](node) - await this[_extractOrLink](node) - await this[_warnDeprecated](node) + await this.#extractOrLink(node) + const { _id, deprecated } = node.package + // The .catch is in _handleOptionalFailure. Not ideal, this should be cleaned up. + // eslint-disable-next-line promise/always-return + if (deprecated) { + log.warn('deprecated', `${_id}: ${deprecated}`) + } }) return this[_handleOptionalFailure](node, p) .then(() => { this.finishTracker('reify', node.name, node.location) - process.emit('timeEnd', timer) + timeEnd() return node }) } // do not allow node_modules to be a symlink - async [_validateNodeModules] (nm) { - if (this.options.force || this[_nmValidated].has(nm)) { + async #validateNodeModules (nm) { + if (this.options.force || this.#nmValidated.has(nm)) { return } const st = await lstat(nm).catch(() => null) if (!st || st.isDirectory()) { - this[_nmValidated].add(nm) + this.#nmValidated.add(nm) return } log.warn('reify', 'Removing non-directory', nm) await rm(nm, { recursive: true, force: true }) } - async [_extractOrLink] (node) { + async #extractOrLink (node) { const nm = resolve(node.parent.path, 'node_modules') - await this[_validateNodeModules](nm) + await this.#validateNodeModules(nm) if (!node.isLink) { // in normal cases, node.resolved should *always* be set by now. @@ -676,7 +757,7 @@ module.exports = cls => class Reifier extends cls { // entirely, since we can't possibly reify it. let res = null if (node.resolved) { - const registryResolved = this[_registryResolved](node.resolved) + const registryResolved = this.#registryResolved(node.resolved) if (registryResolved) { res = `${node.name}@${registryResolved}` } @@ -698,7 +779,7 @@ module.exports = cls => class Reifier extends cls { return } await debug(async () => { - const st = await lstat(node.path).catch(e => null) + const st = await lstat(node.path).catch(() => null) if (st && !st.isDirectory()) { debug.log('unpacking into a non-directory', node) throw Object.assign(new Error('ENOTDIR: not a directory'), { @@ -722,10 +803,8 @@ module.exports = cls => class Reifier extends cls { // node.isLink await rm(node.path, { recursive: true, force: true }) - await this[_symlink](node) - } - async [_symlink] (node) { + // symlink const dir = dirname(node.path) const target = node.realpath const rel = relative(dir, target) @@ -733,17 +812,10 @@ module.exports = cls => class Reifier extends cls { return symlink(rel, node.path, 'junction') } - [_warnDeprecated] (node) { - const { _id, deprecated } = node.package - if (deprecated) { - log.warn('deprecated', `${_id}: ${deprecated}`) - } - } - // if the node is optional, then the failure of the promise is nonfatal // just add it and its optional set to the trash list. [_handleOptionalFailure] (node, p) { - return (node.optional ? p.catch(er => { + return (node.optional ? p.catch(() => { const set = optionalSet(node) for (node of set) { log.verbose('reify', 'failed optional dependency', node.path) @@ -752,7 +824,7 @@ module.exports = cls => class Reifier extends cls { }) : p).then(() => node) } - [_registryResolved] (resolved) { + #registryResolved (resolved) { // the default registry url is a magic value meaning "the currently // configured registry". // `resolved` must never be falsey. @@ -782,21 +854,51 @@ module.exports = cls => class Reifier extends cls { // by the contents of the package. however, in their case, rather than // shipping a virtual tree that must be reified, they ship an entire // reified actual tree that must be unpacked and not modified. - [_loadBundlesAndUpdateTrees] ( - depth = 0, bundlesByDepth = this[_getBundlesByDepth]() - ) { + [_loadBundlesAndUpdateTrees] (depth = 0, bundlesByDepth) { + let maxBundleDepth + if (!bundlesByDepth) { + bundlesByDepth = new Map() + maxBundleDepth = -1 + dfwalk({ + tree: this.diff, + visit: diff => { + const node = diff.ideal + if (!node) { + return + } + if (node.isProjectRoot) { + return + } + + const { bundleDependencies } = node.package + if (bundleDependencies && bundleDependencies.length) { + maxBundleDepth = Math.max(maxBundleDepth, node.depth) + if (!bundlesByDepth.has(node.depth)) { + bundlesByDepth.set(node.depth, [node]) + } else { + bundlesByDepth.get(node.depth).push(node) + } + } + }, + getChildren: diff => diff.children, + }) + + bundlesByDepth.set('maxBundleDepth', maxBundleDepth) + } else { + maxBundleDepth = bundlesByDepth.get('maxBundleDepth') + } + if (depth === 0) { - process.emit('time', 'reify:loadBundles') + time.start('reify:loadBundles') } - const maxBundleDepth = bundlesByDepth.get('maxBundleDepth') if (depth > maxBundleDepth) { // if we did something, then prune the tree and update the diffs if (maxBundleDepth !== -1) { - this[_pruneBundledMetadeps](bundlesByDepth) + this.#pruneBundledMetadeps(bundlesByDepth) this[_diffTrees]() } - process.emit('timeEnd', 'reify:loadBundles') + time.end('reify:loadBundles') return } @@ -814,7 +916,7 @@ module.exports = cls => class Reifier extends cls { // extract all the nodes with bundles return promiseCallLimit(set.map(node => { return () => { - this[_bundleUnpacked].add(node) + this.#bundleUnpacked.add(node) return this[_reifyNode](node) } }), { rejectLate: true }) @@ -843,46 +945,15 @@ module.exports = cls => class Reifier extends cls { }, }) for (const name of notTransplanted) { - this[_bundleMissing].add(node.children.get(name)) + this.#bundleMissing.add(node.children.get(name)) } }))) // move onto the next level of bundled items .then(() => this[_loadBundlesAndUpdateTrees](depth + 1, bundlesByDepth)) } - [_getBundlesByDepth] () { - const bundlesByDepth = new Map() - let maxBundleDepth = -1 - dfwalk({ - tree: this.diff, - visit: diff => { - const node = diff.ideal - if (!node) { - return - } - if (node.isProjectRoot) { - return - } - - const { bundleDependencies } = node.package - if (bundleDependencies && bundleDependencies.length) { - maxBundleDepth = Math.max(maxBundleDepth, node.depth) - if (!bundlesByDepth.has(node.depth)) { - bundlesByDepth.set(node.depth, [node]) - } else { - bundlesByDepth.get(node.depth).push(node) - } - } - }, - getChildren: diff => diff.children, - }) - - bundlesByDepth.set('maxBundleDepth', maxBundleDepth) - return bundlesByDepth - } - // https://github.com/npm/cli/issues/1597#issuecomment-667639545 - [_pruneBundledMetadeps] (bundlesByDepth) { + #pruneBundledMetadeps (bundlesByDepth) { const bundleShadowed = new Set() // Example dep graph: @@ -981,7 +1052,7 @@ module.exports = cls => class Reifier extends cls { // before finishing the reify() and returning the tree. Thus, we do // NOT return the promise, as the intent is for this to run in parallel // with the reification, and be resolved at a later time. - process.emit('time', 'reify:audit') + const timeEnd = time.start('reify:audit') const options = { ...this.options } const tree = this.idealTree @@ -995,7 +1066,7 @@ module.exports = cls => class Reifier extends cls { } this.auditReport = AuditReport.load(tree, options).then(res => { - process.emit('timeEnd', 'reify:audit') + timeEnd() return res }) } @@ -1005,7 +1076,7 @@ module.exports = cls => class Reifier extends cls { // kicking off each unpack job. If any fail, we rm the sparse // tree entirely and try to put everything back where it was. [_unpackNewModules] () { - process.emit('time', 'reify:unpack') + const timeEnd = time.start('reify:unpack') const unpacks = [] dfwalk({ tree: this.diff, @@ -1016,9 +1087,9 @@ module.exports = cls => class Reifier extends cls { } const node = diff.ideal - const bd = this[_bundleUnpacked].has(node) - const sw = this[_shrinkwrapInflated].has(node) - const bundleMissing = this[_bundleMissing].has(node) + const bd = this.#bundleUnpacked.has(node) + const sw = this.#shrinkwrapInflated.has(node) + const bundleMissing = this.#bundleMissing.has(node) // check whether we still need to unpack this one. // test the inDepBundle last, since that's potentially a tree walk. @@ -1038,8 +1109,7 @@ module.exports = cls => class Reifier extends cls { }, getChildren: diff => diff.children, }) - return promiseAllRejectLate(unpacks) - .then(() => process.emit('timeEnd', 'reify:unpack')) + return promiseAllRejectLate(unpacks).then(timeEnd) } // This is the part where we move back the unchanging nodes that were @@ -1054,9 +1124,9 @@ module.exports = cls => class Reifier extends cls { // This is sort of an inverse diff tree, of all the nodes where // the actualTree and idealTree _don't_ differ, starting from the // shallowest nodes that we moved aside in the first place. - process.emit('time', 'reify:unretire') - const moves = this[_retiredPaths] - this[_retiredUnchanged] = {} + const timeEnd = time.start('reify:unretire') + const moves = this.#retiredPaths + this.#retiredUnchanged = {} return promiseAllRejectLate(this.diff.children.map(diff => { // skip if nothing was retired if (diff.action !== 'CHANGE' && diff.action !== 'REMOVE') { @@ -1079,7 +1149,7 @@ module.exports = cls => class Reifier extends cls { } }) - this[_retiredUnchanged][retireFolder] = [] + this.#retiredUnchanged[retireFolder] = [] return promiseAllRejectLate(diff.unchanged.map(node => { // no need to roll back links, since we'll just delete them anyway if (node.isLink) { @@ -1088,11 +1158,11 @@ module.exports = cls => class Reifier extends cls { } // will have been moved/unpacked along with bundler - if (node.inDepBundle && !this[_bundleMissing].has(node)) { + if (node.inDepBundle && !this.#bundleMissing.has(node)) { return } - this[_retiredUnchanged][retireFolder].push(node) + this.#retiredUnchanged[retireFolder].push(node) const rel = relative(realFolder, node.path) const fromPath = resolve(retireFolder, rel) @@ -1102,8 +1172,7 @@ module.exports = cls => class Reifier extends cls { const dir = bd && bd.length ? node.path + '/node_modules' : node.path return mkdir(dir, { recursive: true }).then(() => this[_moveContents](node, fromPath)) })) - })) - .then(() => process.emit('timeEnd', 'reify:unretire')) + })).then(timeEnd) } // move the contents from the fromPath to the node.path @@ -1120,10 +1189,10 @@ module.exports = cls => class Reifier extends cls { } [_rollbackMoveBackRetiredUnchanged] (er) { - const moves = this[_retiredPaths] + const moves = this.#retiredPaths // flip the mapping around to go back const realFolders = new Map(Object.entries(moves).map(([k, v]) => [v, k])) - const promises = Object.entries(this[_retiredUnchanged]) + const promises = Object.entries(this.#retiredUnchanged) .map(([retireFolder, nodes]) => promiseAllRejectLate(nodes.map(node => { const realFolder = realFolders.get(retireFolder) const rel = relative(realFolder, node.path) @@ -1135,7 +1204,7 @@ module.exports = cls => class Reifier extends cls { } [_build] () { - process.emit('time', 'reify:build') + const timeEnd = time.start('reify:build') // for all the things being installed, run their appropriate scripts // run in tip->root order, so as to be more likely to build a node's @@ -1167,8 +1236,7 @@ module.exports = cls => class Reifier extends cls { } } - return this.rebuild({ nodes, handleOptionalFailure: true }) - .then(() => process.emit('timeEnd', 'reify:build')) + return this.rebuild({ nodes, handleOptionalFailure: true }).then(timeEnd) } // the tree is pretty much built now, so it's cleanup time. @@ -1176,7 +1244,7 @@ module.exports = cls => class Reifier extends cls { // If this fails, there isn't much we can do but tell the user about it. // Thankfully, it's pretty unlikely that it'll fail, since rm is a node builtin. async [_removeTrash] () { - process.emit('time', 'reify:trash') + const timeEnd = time.start('reify:trash') const promises = [] const failures = [] const _rm = path => rm(path, { recursive: true, force: true }).catch(er => failures.push([path, er])) @@ -1189,7 +1257,8 @@ module.exports = cls => class Reifier extends cls { if (failures.length) { log.warn('cleanup', 'Failed to remove some directories', failures) } - process.emit('timeEnd', 'reify:trash') + + timeEnd() } // last but not least, we save the ideal tree metadata to the package-lock @@ -1215,14 +1284,14 @@ module.exports = cls => class Reifier extends cls { const saveIdealTree = !( (!save && !hasUpdates) || this.options.global - || this[_dryRun] + || this.options.dryRun ) if (!saveIdealTree) { return false } - process.emit('time', 'reify:save') + const timeEnd = time.start('reify:save') const updatedTrees = new Set() const updateNodes = nodes => { @@ -1251,7 +1320,7 @@ module.exports = cls => class Reifier extends cls { const isLocalDep = req.type === 'directory' || req.type === 'file' if (req.registry) { const version = child.version - const prefixRange = version ? this[_savePrefix] + version : '*' + const prefixRange = version ? this.options.savePrefix + version : '*' // if we installed a range, then we save the range specified // if it is not a subset of the ^x.y.z. eg, installing a range // of `1.x <1.2.3` will not be saved as `^1.2.0`, because that @@ -1286,7 +1355,7 @@ module.exports = cls => class Reifier extends cls { // using their relative path if (edge.type === 'workspace') { const { version } = edge.to.target - const prefixRange = version ? this[_savePrefix] + version : '*' + const prefixRange = version ? this.options.savePrefix + version : '*' newSpec = prefixRange } else { // save the relative path in package.json @@ -1455,154 +1524,12 @@ module.exports = cls => class Reifier extends cls { // TODO this ignores options.save await this.idealTree.meta.save({ - format: (this[_formatPackageLock] && format) ? format - : this[_formatPackageLock], + format: (this.options.formatPackageLock && format) ? format + : this.options.formatPackageLock, }) } - process.emit('timeEnd', 'reify:save') + timeEnd() return true } - - async [_copyIdealToActual] () { - // clean up any trash that is still in the tree - for (const path of this[_trashList]) { - const loc = relpath(this.idealTree.realpath, path) - const node = this.idealTree.inventory.get(loc) - if (node && node.root === this.idealTree) { - node.parent = null - } - } - - // if we filtered to only certain nodes, then anything ELSE needs - // to be untouched in the resulting actual tree, even if it differs - // in the idealTree. Copy over anything that was in the actual and - // was not changed, delete anything in the ideal and not actual. - // Then we move the entire idealTree over to this.actualTree, and - // save the hidden lockfile. - if (this.diff && this.diff.filterSet.size) { - const reroot = new Set() - - const { filterSet } = this.diff - const seen = new Set() - for (const [loc, ideal] of this.idealTree.inventory.entries()) { - seen.add(loc) - - // if it's an ideal node from the filter set, then skip it - // because we already made whatever changes were necessary - if (filterSet.has(ideal)) { - continue - } - - // otherwise, if it's not in the actualTree, then it's not a thing - // that we actually added. And if it IS in the actualTree, then - // it's something that we left untouched, so we need to record - // that. - const actual = this.actualTree.inventory.get(loc) - if (!actual) { - ideal.root = null - } else { - if ([...actual.linksIn].some(link => filterSet.has(link))) { - seen.add(actual.location) - continue - } - const { realpath, isLink } = actual - if (isLink && ideal.isLink && ideal.realpath === realpath) { - continue - } else { - reroot.add(actual) - } - } - } - - // now find any actual nodes that may not be present in the ideal - // tree, but were left behind by virtue of not being in the filter - for (const [loc, actual] of this.actualTree.inventory.entries()) { - if (seen.has(loc)) { - continue - } - seen.add(loc) - - // we know that this is something that ISN'T in the idealTree, - // or else we will have addressed it in the previous loop. - // If it's in the filterSet, that means we intentionally removed - // it, so nothing to do here. - if (filterSet.has(actual)) { - continue - } - - reroot.add(actual) - } - - // go through the rerooted actual nodes, and move them over. - for (const actual of reroot) { - actual.root = this.idealTree - } - - // prune out any tops that lack a linkIn, they are no longer relevant. - for (const top of this.idealTree.tops) { - if (top.linksIn.size === 0) { - top.root = null - } - } - - // need to calculate dep flags, since nodes may have been marked - // as extraneous or otherwise incorrect during transit. - calcDepFlags(this.idealTree) - } - - // save the ideal's meta as a hidden lockfile after we actualize it - this.idealTree.meta.filename = - this.idealTree.realpath + '/node_modules/.package-lock.json' - this.idealTree.meta.hiddenLockfile = true - this.idealTree.meta.lockfileVersion = defaultLockfileVersion - - this.actualTree = this.idealTree - this.idealTree = null - - if (!this.options.global) { - await this.actualTree.meta.save() - const ignoreScripts = !!this.options.ignoreScripts - // if we aren't doing a dry run or ignoring scripts and we actually made changes to the dep - // tree, then run the dependencies scripts - if (!this[_dryRun] && !ignoreScripts && this.diff && this.diff.children.length) { - const { path, package: pkg } = this.actualTree.target - const stdio = this.options.foregroundScripts ? 'inherit' : 'pipe' - const { scripts = {} } = pkg - for (const event of ['predependencies', 'dependencies', 'postdependencies']) { - if (Object.prototype.hasOwnProperty.call(scripts, event)) { - const timer = `reify:run:${event}` - process.emit('time', timer) - log.info('run', pkg._id, event, scripts[event]) - await runScript({ - event, - path, - pkg, - stdio, - scriptShell: this.options.scriptShell, - }) - process.emit('timeEnd', timer) - } - } - } - } - } - - async dedupe (options = {}) { - // allow the user to set options on the ctor as well. - // XXX: deprecate separate method options objects. - options = { ...this.options, ...options } - const tree = await this.loadVirtual().catch(() => this.loadActual()) - const names = [] - for (const name of tree.inventory.query('name')) { - if (tree.inventory.query('name', name).size > 1) { - names.push(name) - } - } - return this.reify({ - ...options, - preferDedupe: true, - update: { names }, - }) - } } diff --git a/deps/npm/node_modules/@npmcli/arborist/lib/audit-report.js b/deps/npm/node_modules/@npmcli/arborist/lib/audit-report.js index 387919f610829e..f7700ce9119de3 100644 --- a/deps/npm/node_modules/@npmcli/arborist/lib/audit-report.js +++ b/deps/npm/node_modules/@npmcli/arborist/lib/audit-report.js @@ -13,7 +13,7 @@ const _fixAvailable = Symbol('fixAvailable') const _checkTopNode = Symbol('checkTopNode') const _init = Symbol('init') const _omit = Symbol('omit') -const log = require('proc-log') +const { log, time } = require('proc-log') const fetch = require('npm-registry-fetch') @@ -117,7 +117,7 @@ class AuditReport extends Map { } async [_init] () { - process.emit('time', 'auditReport:init') + const timeEnd = time.start('auditReport:init') const promises = [] for (const [name, advisories] of Object.entries(this.report)) { @@ -210,7 +210,8 @@ class AuditReport extends Map { } } } - process.emit('timeEnd', 'auditReport:init') + + timeEnd() } [_checkTopNode] (topNode, vuln, spec) { @@ -306,7 +307,7 @@ class AuditReport extends Map { return null } - process.emit('time', 'auditReport:getReport') + const timeEnd = time.start('auditReport:getReport') try { try { // first try the super fast bulk advisory listing @@ -347,7 +348,7 @@ class AuditReport extends Map { this.error = er return null } finally { - process.emit('timeEnd', 'auditReport:getReport') + timeEnd() } } } diff --git a/deps/npm/node_modules/@npmcli/arborist/lib/dep-valid.js b/deps/npm/node_modules/@npmcli/arborist/lib/dep-valid.js index 4afb5e47cf111f..58656e8dbbad29 100644 --- a/deps/npm/node_modules/@npmcli/arborist/lib/dep-valid.js +++ b/deps/npm/node_modules/@npmcli/arborist/lib/dep-valid.js @@ -124,7 +124,7 @@ const linkValid = (child, requested, requestor) => { return isLink && relative(child.realpath, requested.fetchSpec) === '' } -const tarballValid = (child, requested, requestor) => { +const tarballValid = (child, requested) => { if (child.isLink) { return false } diff --git a/deps/npm/node_modules/@npmcli/arborist/lib/inventory.js b/deps/npm/node_modules/@npmcli/arborist/lib/inventory.js index 0885034666b50a..7b3f294fdab2c3 100644 --- a/deps/npm/node_modules/@npmcli/arborist/lib/inventory.js +++ b/deps/npm/node_modules/@npmcli/arborist/lib/inventory.js @@ -130,7 +130,7 @@ class Inventory extends Map { return super.get(node.location) === node } - set (k, v) { + set () { throw new Error('direct set() not supported, use inventory.add(node)') } } diff --git a/deps/npm/node_modules/@npmcli/arborist/lib/place-dep.js b/deps/npm/node_modules/@npmcli/arborist/lib/place-dep.js index bf0fef6525343a..fca36eb6856137 100644 --- a/deps/npm/node_modules/@npmcli/arborist/lib/place-dep.js +++ b/deps/npm/node_modules/@npmcli/arborist/lib/place-dep.js @@ -8,7 +8,7 @@ // a result. const localeCompare = require('@isaacs/string-locale-compare')('en') -const log = require('proc-log') +const { log } = require('proc-log') const { redact } = require('@npmcli/redact') const deepestNestingTarget = require('./deepest-nesting-target.js') const CanPlaceDep = require('./can-place-dep.js') diff --git a/deps/npm/node_modules/@npmcli/arborist/lib/query-selector-all.js b/deps/npm/node_modules/@npmcli/arborist/lib/query-selector-all.js index c8ec866f0f9691..77640a3803d13b 100644 --- a/deps/npm/node_modules/@npmcli/arborist/lib/query-selector-all.js +++ b/deps/npm/node_modules/@npmcli/arborist/lib/query-selector-all.js @@ -3,7 +3,7 @@ const { resolve } = require('path') const { parser, arrayDelimiter } = require('@npmcli/query') const localeCompare = require('@isaacs/string-locale-compare')('en') -const log = require('proc-log') +const { log } = require('proc-log') const { minimatch } = require('minimatch') const npa = require('npm-package-arg') const pacote = require('pacote') @@ -650,27 +650,27 @@ class Results { // operators for attribute selectors const attributeOperators = { // attribute value is equivalent - '=' ({ attr, value, insensitive }) { + '=' ({ attr, value }) { return attr === value }, // attribute value contains word - '~=' ({ attr, value, insensitive }) { + '~=' ({ attr, value }) { return (attr.match(/\w+/g) || []).includes(value) }, // attribute value contains string - '*=' ({ attr, value, insensitive }) { + '*=' ({ attr, value }) { return attr.includes(value) }, // attribute value is equal or starts with - '|=' ({ attr, value, insensitive }) { + '|=' ({ attr, value }) { return attr.startsWith(`${value}-`) }, // attribute value starts with - '^=' ({ attr, value, insensitive }) { + '^=' ({ attr, value }) { return attr.startsWith(value) }, // attribute value ends with - '$=' ({ attr, value, insensitive }) { + '$=' ({ attr, value }) { return attr.endsWith(value) }, } diff --git a/deps/npm/node_modules/@npmcli/arborist/lib/shrinkwrap.js b/deps/npm/node_modules/@npmcli/arborist/lib/shrinkwrap.js index e6525ffe67b65d..026abc55ccba18 100644 --- a/deps/npm/node_modules/@npmcli/arborist/lib/shrinkwrap.js +++ b/deps/npm/node_modules/@npmcli/arborist/lib/shrinkwrap.js @@ -33,7 +33,7 @@ const mismatch = (a, b) => a && b && a !== b // After calling this.commit(), any nodes not present in the tree will have // been removed from the shrinkwrap data as well. -const log = require('proc-log') +const { log } = require('proc-log') const YarnLock = require('./yarn-lock.js') const { readFile, @@ -1145,6 +1145,7 @@ class Shrinkwrap { throw new Error('run load() before saving data') } + // This must be called before the lockfile conversion check below since it sets properties as part of `commit()` const json = this.toString(options) if ( !this.hiddenLockfile @@ -1155,6 +1156,7 @@ class Shrinkwrap { `Converting lock file (${relative(process.cwd(), this.filename)}) from v${this.originalLockfileVersion} -> v${this.lockfileVersion}` ) } + return Promise.all([ writeFile(this.filename, json).catch(er => { if (this.hiddenLockfile) { diff --git a/deps/npm/node_modules/@npmcli/arborist/lib/tracker.js b/deps/npm/node_modules/@npmcli/arborist/lib/tracker.js index 5acb32a5a7cfd9..4a754d995dfcd2 100644 --- a/deps/npm/node_modules/@npmcli/arborist/lib/tracker.js +++ b/deps/npm/node_modules/@npmcli/arborist/lib/tracker.js @@ -1,12 +1,12 @@ -const npmlog = require('npmlog') +const proggy = require('proggy') module.exports = cls => class Tracker extends cls { #progress = new Map() - #setProgress - constructor (options = {}) { - super(options) - this.#setProgress = !!options.progress + #createTracker (key, name) { + const tracker = new proggy.Tracker(name ?? key) + tracker.on('done', () => this.#progress.delete(key)) + this.#progress.set(key, tracker) } addTracker (section, subsection = null, key = null) { @@ -26,22 +26,17 @@ module.exports = cls => class Tracker extends cls { this.#onError(`Tracker "${section}" already exists`) } else if (!hasTracker && subsection === null) { // 1. no existing tracker, no subsection - // Create a new tracker from npmlog - // starts progress bar - if (this.#setProgress && this.#progress.size === 0) { - npmlog.enableProgress() - } - - this.#progress.set(section, npmlog.newGroup(section)) + // Create a new progress tracker + this.#createTracker(section) } else if (!hasTracker && subsection !== null) { // 2. no parent tracker and subsection this.#onError(`Parent tracker "${section}" does not exist`) } else if (!hasTracker || !hasSubtracker) { // 3. existing parent tracker, no subsection tracker - // Create a new subtracker in this.#progress from parent tracker - this.#progress.set(`${section}:${key}`, - this.#progress.get(section).newGroup(`${section}:${subsection}`) - ) + // Create a new subtracker and update parents + const parentTracker = this.#progress.get(section) + parentTracker.update(parentTracker.value, parentTracker.total + 1) + this.#createTracker(`${section}:${key}`, `${section}:${subsection}`) } // 4. existing parent tracker, existing subsection tracker // skip it @@ -70,32 +65,22 @@ module.exports = cls => class Tracker extends cls { this.finishTracker(section, key) } } - // remove parent tracker this.#progress.get(section).finish() - this.#progress.delete(section) - - // remove progress bar if all - // trackers are finished - if (this.#setProgress && this.#progress.size === 0) { - npmlog.disableProgress() - } } else if (!hasTracker && subsection === null) { // 1. no existing parent tracker, no subsection this.#onError(`Tracker "${section}" does not exist`) } else if (!hasTracker || hasSubtracker) { // 2. subtracker exists // Finish subtracker and remove from this.#progress + const parentTracker = this.#progress.get(section) + parentTracker.update(parentTracker.value + 1) this.#progress.get(`${section}:${key}`).finish() - this.#progress.delete(`${section}:${key}`) } // 3. existing parent tracker, no subsection } #onError (msg) { - if (this.#setProgress) { - npmlog.disableProgress() - } throw new Error(msg) } } diff --git a/deps/npm/node_modules/@npmcli/arborist/package.json b/deps/npm/node_modules/@npmcli/arborist/package.json index 3a92e669d4bb68..11c0ab4df3b529 100644 --- a/deps/npm/node_modules/@npmcli/arborist/package.json +++ b/deps/npm/node_modules/@npmcli/arborist/package.json @@ -1,19 +1,19 @@ { "name": "@npmcli/arborist", - "version": "7.4.2", + "version": "7.5.1", "description": "Manage node_modules trees", "dependencies": { "@isaacs/string-locale-compare": "^1.1.0", "@npmcli/fs": "^3.1.0", - "@npmcli/installed-package-contents": "^2.0.2", + "@npmcli/installed-package-contents": "^2.1.0", "@npmcli/map-workspaces": "^3.0.2", - "@npmcli/metavuln-calculator": "^7.0.0", + "@npmcli/metavuln-calculator": "^7.1.0", "@npmcli/name-from-folder": "^2.0.0", "@npmcli/node-gyp": "^3.0.0", - "@npmcli/package-json": "^5.0.0", + "@npmcli/package-json": "^5.1.0", "@npmcli/query": "^3.1.0", - "@npmcli/redact": "^1.1.0", - "@npmcli/run-script": "^7.0.2", + "@npmcli/redact": "^2.0.0", + "@npmcli/run-script": "^8.1.0", "bin-links": "^4.0.1", "cacache": "^18.0.0", "common-ancestor-path": "^1.0.1", @@ -23,13 +23,13 @@ "minimatch": "^9.0.4", "nopt": "^7.0.0", "npm-install-checks": "^6.2.0", - "npm-package-arg": "^11.0.1", + "npm-package-arg": "^11.0.2", "npm-pick-manifest": "^9.0.0", - "npm-registry-fetch": "^16.2.0", - "npmlog": "^7.0.1", - "pacote": "^17.0.4", + "npm-registry-fetch": "^17.0.0", + "pacote": "^18.0.1", "parse-conflict-json": "^3.0.0", - "proc-log": "^3.0.0", + "proc-log": "^4.2.0", + "proggy": "^2.0.0", "promise-all-reject-late": "^1.0.0", "promise-call-limit": "^3.0.1", "read-package-json-fast": "^3.0.2", @@ -62,7 +62,7 @@ }, "repository": { "type": "git", - "url": "https://github.com/npm/cli.git", + "url": "git+https://github.com/npm/cli.git", "directory": "workspaces/arborist" }, "author": "GitHub Inc.", diff --git a/deps/npm/node_modules/@npmcli/config/lib/definitions/definitions.js b/deps/npm/node_modules/@npmcli/config/lib/definitions/definitions.js index 3565cdb4feb44b..57ab1716118386 100644 --- a/deps/npm/node_modules/@npmcli/config/lib/definitions/definitions.js +++ b/deps/npm/node_modules/@npmcli/config/lib/definitions/definitions.js @@ -7,10 +7,10 @@ const { join } = require('node:path') const isWindows = process.platform === 'win32' // used by cafile flattening to flatOptions.ca -const fs = require('fs') +const { readFileSync } = require('fs') const maybeReadFile = file => { try { - return fs.readFileSync(file, 'utf8') + return readFileSync(file, 'utf8') } catch (er) { if (er.code !== 'ENOENT') { throw er diff --git a/deps/npm/node_modules/@npmcli/config/lib/index.js b/deps/npm/node_modules/@npmcli/config/lib/index.js index 1ff19c128696ca..c99292db9afb40 100644 --- a/deps/npm/node_modules/@npmcli/config/lib/index.js +++ b/deps/npm/node_modules/@npmcli/config/lib/index.js @@ -2,7 +2,7 @@ const { walkUp } = require('walk-up-path') const ini = require('ini') const nopt = require('nopt') -const log = require('proc-log') +const { log, time } = require('proc-log') const { resolve, dirname, join } = require('node:path') const { homedir } = require('node:os') @@ -201,7 +201,7 @@ class Config { } // create the object for flat options passed to deps - process.emit('time', 'config:load:flatten') + const timeEnd = time.start('config:load:flatten') this.#flatOptions = {} // walk from least priority to highest for (const { data } of this.data.values()) { @@ -209,7 +209,7 @@ class Config { } this.#flatOptions.nodeBin = this.execPath this.#flatOptions.npmBin = this.npmBin - process.emit('timeEnd', 'config:load:flatten') + timeEnd() return this.#flatOptions } @@ -233,37 +233,24 @@ class Config { throw new Error('attempting to load npm config multiple times') } - process.emit('time', 'config:load') // first load the defaults, which sets the global prefix - process.emit('time', 'config:load:defaults') this.loadDefaults() - process.emit('timeEnd', 'config:load:defaults') // next load the builtin config, as this sets new effective defaults - process.emit('time', 'config:load:builtin') await this.loadBuiltinConfig() - process.emit('timeEnd', 'config:load:builtin') // cli and env are not async, and can set the prefix, relevant to project - process.emit('time', 'config:load:cli') this.loadCLI() - process.emit('timeEnd', 'config:load:cli') - process.emit('time', 'config:load:env') this.loadEnv() - process.emit('timeEnd', 'config:load:env') // next project config, which can affect userconfig location - process.emit('time', 'config:load:project') await this.loadProjectConfig() - process.emit('timeEnd', 'config:load:project') + // then user config, which can affect globalconfig location - process.emit('time', 'config:load:user') await this.loadUserConfig() - process.emit('timeEnd', 'config:load:user') + // last but not least, global config file - process.emit('time', 'config:load:global') await this.loadGlobalConfig() - process.emit('timeEnd', 'config:load:global') // set this before calling setEnvs, so that we don't have to share // private attributes, as that module also does a bunch of get operations @@ -272,11 +259,7 @@ class Config { // set proper globalPrefix now that everything is loaded this.globalPrefix = this.get('prefix') - process.emit('time', 'config:load:setEnvs') this.setEnvs() - process.emit('timeEnd', 'config:load:setEnvs') - - process.emit('timeEnd', 'config:load') } loadDefaults () { @@ -574,7 +557,7 @@ class Config { const k = envReplace(key, this.env) const v = this.parseField(value, k) if (where !== 'default') { - this.#checkDeprecated(k, where, obj, [key, value]) + this.#checkDeprecated(k) if (this.definitions[key]?.exclusive) { for (const exclusive of this.definitions[key].exclusive) { if (!this.isDefault(exclusive)) { @@ -588,7 +571,7 @@ class Config { } } - #checkDeprecated (key, where, obj, kv) { + #checkDeprecated (key) { // XXX(npm9+) make this throw an error if (this.deprecated[key]) { log.warn('config', key, this.deprecated[key]) @@ -601,8 +584,8 @@ class Config { } async #loadFile (file, type) { - process.emit('time', 'config:load:file:' + file) // only catch the error from readFile, not from the loadObject call + log.silly(`config:load:file:${file}`) await readFile(file, 'utf8').then( data => { const parsedConfig = ini.parse(data) @@ -615,7 +598,6 @@ class Config { }, er => this.#loadObject(null, type, file, er) ) - process.emit('timeEnd', 'config:load:file:' + file) } loadBuiltinConfig () { @@ -757,7 +739,7 @@ class Config { const iniData = ini.stringify(conf.raw).trim() + '\n' if (!iniData.trim()) { // ignore the unlink error (eg, if file doesn't exist) - await unlink(conf.source).catch(er => {}) + await unlink(conf.source).catch(() => {}) return } const dir = dirname(conf.source) @@ -792,12 +774,9 @@ class Config { this.delete(`${nerfed}:keyfile`, level) } - setCredentialsByURI (uri, { token, username, password, email, certfile, keyfile }) { + setCredentialsByURI (uri, { token, username, password, certfile, keyfile }) { const nerfed = nerfDart(uri) - // email is either provided, a top level key, or nothing - email = email || this.get('email', 'user') - // field that hasn't been used as documented for a LONG time, // and as of npm 7.10.0, isn't used at all. We just always // send auth if we have it, only to the URIs under the nerf dart. diff --git a/deps/npm/node_modules/@npmcli/config/package.json b/deps/npm/node_modules/@npmcli/config/package.json index 797c32f7ee4a63..8c59bc3ae3dff6 100644 --- a/deps/npm/node_modules/@npmcli/config/package.json +++ b/deps/npm/node_modules/@npmcli/config/package.json @@ -1,6 +1,6 @@ { "name": "@npmcli/config", - "version": "8.2.2", + "version": "8.3.1", "files": [ "bin/", "lib/" @@ -9,7 +9,7 @@ "description": "Configuration management for the npm cli", "repository": { "type": "git", - "url": "https://github.com/npm/cli.git", + "url": "git+https://github.com/npm/cli.git", "directory": "workspaces/config" }, "author": "GitHub Inc.", @@ -40,7 +40,7 @@ "ci-info": "^4.0.0", "ini": "^4.1.2", "nopt": "^7.0.0", - "proc-log": "^3.0.0", + "proc-log": "^4.2.0", "read-package-json-fast": "^3.0.2", "semver": "^7.3.5", "walk-up-path": "^3.0.1" diff --git a/deps/npm/node_modules/@npmcli/disparity-colors/lib/index.js b/deps/npm/node_modules/@npmcli/disparity-colors/lib/index.js deleted file mode 100644 index 3d2aa56be92536..00000000000000 --- a/deps/npm/node_modules/@npmcli/disparity-colors/lib/index.js +++ /dev/null @@ -1,34 +0,0 @@ -const ansi = require('ansi-styles') - -const colors = { - removed: ansi.red, - added: ansi.green, - header: ansi.yellow, - section: ansi.magenta, -} - -function colorize (str, opts) { - let headerLength = (opts || {}).headerLength - if (typeof headerLength !== 'number' || Number.isNaN(headerLength)) { - headerLength = 2 - } - - const color = (colorStr, colorId) => { - const { open, close } = colors[colorId] - // avoid highlighting the "\n" (would highlight till the end of the line) - return colorStr.replace(/[^\n\r]+/g, open + '$&' + close) - } - - // this RegExp will include all the `\n` chars into the lines, easier to join - const lines = ((typeof str === 'string' && str) || '').split(/^/m) - - const start = color(lines.slice(0, headerLength).join(''), 'header') - const end = lines.slice(headerLength).join('') - .replace(/^-.*/gm, color('$&', 'removed')) - .replace(/^\+.*/gm, color('$&', 'added')) - .replace(/^@@.+@@/gm, color('$&', 'section')) - - return start + end -} - -module.exports = colorize diff --git a/deps/npm/node_modules/@npmcli/disparity-colors/node_modules/ansi-styles/index.js b/deps/npm/node_modules/@npmcli/disparity-colors/node_modules/ansi-styles/index.js deleted file mode 100644 index 5d82581a13f990..00000000000000 --- a/deps/npm/node_modules/@npmcli/disparity-colors/node_modules/ansi-styles/index.js +++ /dev/null @@ -1,163 +0,0 @@ -'use strict'; - -const wrapAnsi16 = (fn, offset) => (...args) => { - const code = fn(...args); - return `\u001B[${code + offset}m`; -}; - -const wrapAnsi256 = (fn, offset) => (...args) => { - const code = fn(...args); - return `\u001B[${38 + offset};5;${code}m`; -}; - -const wrapAnsi16m = (fn, offset) => (...args) => { - const rgb = fn(...args); - return `\u001B[${38 + offset};2;${rgb[0]};${rgb[1]};${rgb[2]}m`; -}; - -const ansi2ansi = n => n; -const rgb2rgb = (r, g, b) => [r, g, b]; - -const setLazyProperty = (object, property, get) => { - Object.defineProperty(object, property, { - get: () => { - const value = get(); - - Object.defineProperty(object, property, { - value, - enumerable: true, - configurable: true - }); - - return value; - }, - enumerable: true, - configurable: true - }); -}; - -/** @type {typeof import('color-convert')} */ -let colorConvert; -const makeDynamicStyles = (wrap, targetSpace, identity, isBackground) => { - if (colorConvert === undefined) { - colorConvert = require('color-convert'); - } - - const offset = isBackground ? 10 : 0; - const styles = {}; - - for (const [sourceSpace, suite] of Object.entries(colorConvert)) { - const name = sourceSpace === 'ansi16' ? 'ansi' : sourceSpace; - if (sourceSpace === targetSpace) { - styles[name] = wrap(identity, offset); - } else if (typeof suite === 'object') { - styles[name] = wrap(suite[targetSpace], offset); - } - } - - return styles; -}; - -function assembleStyles() { - const codes = new Map(); - const styles = { - modifier: { - reset: [0, 0], - // 21 isn't widely supported and 22 does the same thing - bold: [1, 22], - dim: [2, 22], - italic: [3, 23], - underline: [4, 24], - inverse: [7, 27], - hidden: [8, 28], - strikethrough: [9, 29] - }, - color: { - black: [30, 39], - red: [31, 39], - green: [32, 39], - yellow: [33, 39], - blue: [34, 39], - magenta: [35, 39], - cyan: [36, 39], - white: [37, 39], - - // Bright color - blackBright: [90, 39], - redBright: [91, 39], - greenBright: [92, 39], - yellowBright: [93, 39], - blueBright: [94, 39], - magentaBright: [95, 39], - cyanBright: [96, 39], - whiteBright: [97, 39] - }, - bgColor: { - bgBlack: [40, 49], - bgRed: [41, 49], - bgGreen: [42, 49], - bgYellow: [43, 49], - bgBlue: [44, 49], - bgMagenta: [45, 49], - bgCyan: [46, 49], - bgWhite: [47, 49], - - // Bright color - bgBlackBright: [100, 49], - bgRedBright: [101, 49], - bgGreenBright: [102, 49], - bgYellowBright: [103, 49], - bgBlueBright: [104, 49], - bgMagentaBright: [105, 49], - bgCyanBright: [106, 49], - bgWhiteBright: [107, 49] - } - }; - - // Alias bright black as gray (and grey) - styles.color.gray = styles.color.blackBright; - styles.bgColor.bgGray = styles.bgColor.bgBlackBright; - styles.color.grey = styles.color.blackBright; - styles.bgColor.bgGrey = styles.bgColor.bgBlackBright; - - for (const [groupName, group] of Object.entries(styles)) { - for (const [styleName, style] of Object.entries(group)) { - styles[styleName] = { - open: `\u001B[${style[0]}m`, - close: `\u001B[${style[1]}m` - }; - - group[styleName] = styles[styleName]; - - codes.set(style[0], style[1]); - } - - Object.defineProperty(styles, groupName, { - value: group, - enumerable: false - }); - } - - Object.defineProperty(styles, 'codes', { - value: codes, - enumerable: false - }); - - styles.color.close = '\u001B[39m'; - styles.bgColor.close = '\u001B[49m'; - - setLazyProperty(styles.color, 'ansi', () => makeDynamicStyles(wrapAnsi16, 'ansi16', ansi2ansi, false)); - setLazyProperty(styles.color, 'ansi256', () => makeDynamicStyles(wrapAnsi256, 'ansi256', ansi2ansi, false)); - setLazyProperty(styles.color, 'ansi16m', () => makeDynamicStyles(wrapAnsi16m, 'rgb', rgb2rgb, false)); - setLazyProperty(styles.bgColor, 'ansi', () => makeDynamicStyles(wrapAnsi16, 'ansi16', ansi2ansi, true)); - setLazyProperty(styles.bgColor, 'ansi256', () => makeDynamicStyles(wrapAnsi256, 'ansi256', ansi2ansi, true)); - setLazyProperty(styles.bgColor, 'ansi16m', () => makeDynamicStyles(wrapAnsi16m, 'rgb', rgb2rgb, true)); - - return styles; -} - -// Make the export immutable -Object.defineProperty(module, 'exports', { - enumerable: true, - get: assembleStyles -}); diff --git a/deps/npm/node_modules/@npmcli/disparity-colors/node_modules/ansi-styles/license b/deps/npm/node_modules/@npmcli/disparity-colors/node_modules/ansi-styles/license deleted file mode 100644 index e7af2f77107d73..00000000000000 --- a/deps/npm/node_modules/@npmcli/disparity-colors/node_modules/ansi-styles/license +++ /dev/null @@ -1,9 +0,0 @@ -MIT License - -Copyright (c) Sindre Sorhus (sindresorhus.com) - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/deps/npm/node_modules/@npmcli/disparity-colors/node_modules/ansi-styles/package.json b/deps/npm/node_modules/@npmcli/disparity-colors/node_modules/ansi-styles/package.json deleted file mode 100644 index 75393284d7e474..00000000000000 --- a/deps/npm/node_modules/@npmcli/disparity-colors/node_modules/ansi-styles/package.json +++ /dev/null @@ -1,56 +0,0 @@ -{ - "name": "ansi-styles", - "version": "4.3.0", - "description": "ANSI escape codes for styling strings in the terminal", - "license": "MIT", - "repository": "chalk/ansi-styles", - "funding": "https://github.com/chalk/ansi-styles?sponsor=1", - "author": { - "name": "Sindre Sorhus", - "email": "sindresorhus@gmail.com", - "url": "sindresorhus.com" - }, - "engines": { - "node": ">=8" - }, - "scripts": { - "test": "xo && ava && tsd", - "screenshot": "svg-term --command='node screenshot' --out=screenshot.svg --padding=3 --width=55 --height=3 --at=1000 --no-cursor" - }, - "files": [ - "index.js", - "index.d.ts" - ], - "keywords": [ - "ansi", - "styles", - "color", - "colour", - "colors", - "terminal", - "console", - "cli", - "string", - "tty", - "escape", - "formatting", - "rgb", - "256", - "shell", - "xterm", - "log", - "logging", - "command-line", - "text" - ], - "dependencies": { - "color-convert": "^2.0.1" - }, - "devDependencies": { - "@types/color-convert": "^1.9.0", - "ava": "^2.3.0", - "svg-term-cli": "^2.1.1", - "tsd": "^0.11.0", - "xo": "^0.25.3" - } -} diff --git a/deps/npm/node_modules/@npmcli/disparity-colors/package.json b/deps/npm/node_modules/@npmcli/disparity-colors/package.json deleted file mode 100644 index 17eb4846c353c4..00000000000000 --- a/deps/npm/node_modules/@npmcli/disparity-colors/package.json +++ /dev/null @@ -1,70 +0,0 @@ -{ - "name": "@npmcli/disparity-colors", - "version": "3.0.0", - "main": "lib/index.js", - "files": [ - "bin/", - "lib/" - ], - "engines": { - "node": "^14.17.0 || ^16.13.0 || >=18.0.0" - }, - "description": "Colorizes unified diff output", - "repository": { - "type": "git", - "url": "https://github.com/npm/disparity-colors.git" - }, - "keywords": [ - "disparity", - "npm", - "npmcli", - "diff", - "char", - "unified", - "multiline", - "string", - "color", - "ansi", - "terminal", - "cli", - "tty" - ], - "author": "GitHub Inc.", - "contributors": [ - { - "name": "Ruy Adorno", - "url": "https://ruyadorno.com", - "twitter": "ruyadorno" - } - ], - "license": "ISC", - "scripts": { - "lint": "eslint \"**/*.js\"", - "pretest": "npm run lint", - "test": "tap", - "snap": "tap", - "postlint": "template-oss-check", - "template-oss-apply": "template-oss-apply --force", - "lintfix": "npm run lint -- --fix", - "posttest": "npm run lint" - }, - "tap": { - "check-coverage": true, - "nyc-arg": [ - "--exclude", - "tap-snapshots/**" - ] - }, - "devDependencies": { - "@npmcli/eslint-config": "^3.0.1", - "@npmcli/template-oss": "4.5.1", - "tap": "^16.0.1" - }, - "dependencies": { - "ansi-styles": "^4.3.0" - }, - "templateOSS": { - "//@npmcli/template-oss": "This file is partially managed by @npmcli/template-oss. Edits may be overwritten.", - "version": "4.5.1" - } -} diff --git a/deps/npm/node_modules/@npmcli/git/lib/spawn.js b/deps/npm/node_modules/@npmcli/git/lib/spawn.js index 5e96eb5542b5a6..03c1cbde215477 100644 --- a/deps/npm/node_modules/@npmcli/git/lib/spawn.js +++ b/deps/npm/node_modules/@npmcli/git/lib/spawn.js @@ -1,6 +1,6 @@ const spawn = require('@npmcli/promise-spawn') const promiseRetry = require('promise-retry') -const log = require('proc-log') +const { log } = require('proc-log') const makeError = require('./make-error.js') const makeOpts = require('./opts.js') diff --git a/deps/npm/node_modules/@npmcli/git/package.json b/deps/npm/node_modules/@npmcli/git/package.json index 7493ec7fb0effb..f7117f13a9399c 100644 --- a/deps/npm/node_modules/@npmcli/git/package.json +++ b/deps/npm/node_modules/@npmcli/git/package.json @@ -1,6 +1,6 @@ { "name": "@npmcli/git", - "version": "5.0.5", + "version": "5.0.6", "main": "lib/index.js", "files": [ "bin/", @@ -40,7 +40,7 @@ "@npmcli/promise-spawn": "^7.0.0", "lru-cache": "^10.0.1", "npm-pick-manifest": "^9.0.0", - "proc-log": "^3.0.0", + "proc-log": "^4.0.0", "promise-inflight": "^1.0.1", "promise-retry": "^2.0.1", "semver": "^7.3.5", diff --git a/deps/npm/node_modules/@npmcli/installed-package-contents/bin/index.js b/deps/npm/node_modules/@npmcli/installed-package-contents/bin/index.js new file mode 100755 index 00000000000000..7b83b23bf168c0 --- /dev/null +++ b/deps/npm/node_modules/@npmcli/installed-package-contents/bin/index.js @@ -0,0 +1,44 @@ +#! /usr/bin/env node + +const { relative } = require('path') +const pkgContents = require('../') + +const usage = `Usage: + installed-package-contents [-d --depth=] + +Lists the files installed for a package specified by . + +Options: + -d --depth= Provide a numeric value ("Infinity" is allowed) + to specify how deep in the file tree to traverse. + Default=1 + -h --help Show this usage information` + +const options = {} + +process.argv.slice(2).forEach(arg => { + let match + if ((match = arg.match(/^(?:--depth=|-d)([0-9]+|Infinity)/))) { + options.depth = +match[1] + } else if (arg === '-h' || arg === '--help') { + console.log(usage) + process.exit(0) + } else { + options.path = arg + } +}) + +if (!options.path) { + console.error('ERROR: no path provided') + console.error(usage) + process.exit(1) +} + +const cwd = process.cwd() + +pkgContents(options) + .then(list => list.sort().forEach(p => console.log(relative(cwd, p)))) + .catch(/* istanbul ignore next - pretty unusual */ er => { + console.error(er) + process.exit(1) + }) diff --git a/deps/npm/node_modules/@npmcli/installed-package-contents/lib/index.js b/deps/npm/node_modules/@npmcli/installed-package-contents/lib/index.js old mode 100755 new mode 100644 index 20b25c4bc8437d..ab1486cd01d003 --- a/deps/npm/node_modules/@npmcli/installed-package-contents/lib/index.js +++ b/deps/npm/node_modules/@npmcli/installed-package-contents/lib/index.js @@ -1,5 +1,3 @@ -#! /usr/bin/env node - // to GET CONTENTS for folder at PATH (which may be a PACKAGE): // - if PACKAGE, read path/package.json // - if bins in ../node_modules/.bin, add those to result @@ -19,53 +17,46 @@ // - add GET CONTENTS of bundled deps, PACKAGE=true, depth + 1 const bundled = require('npm-bundled') -const { promisify } = require('util') -const fs = require('fs') -const readFile = promisify(fs.readFile) -const readdir = promisify(fs.readdir) -const stat = promisify(fs.stat) -const lstat = promisify(fs.lstat) -const { relative, resolve, basename, dirname } = require('path') +const { readFile, readdir, stat } = require('fs/promises') +const { resolve, basename, dirname } = require('path') const normalizePackageBin = require('npm-normalize-package-bin') -const readPackage = ({ path, packageJsonCache }) => - packageJsonCache.has(path) ? Promise.resolve(packageJsonCache.get(path)) +const readPackage = ({ path, packageJsonCache }) => packageJsonCache.has(path) + ? Promise.resolve(packageJsonCache.get(path)) : readFile(path).then(json => { const pkg = normalizePackageBin(JSON.parse(json)) packageJsonCache.set(path, pkg) return pkg - }) - .catch(er => null) + }).catch(() => null) // just normalize bundle deps and bin, that's all we care about here. const normalized = Symbol('package data has been normalized') -const rpj = ({ path, packageJsonCache }) => - readPackage({ path, packageJsonCache }) - .then(pkg => { - if (!pkg || pkg[normalized]) { - return pkg - } - if (pkg.bundledDependencies && !pkg.bundleDependencies) { - pkg.bundleDependencies = pkg.bundledDependencies - delete pkg.bundledDependencies - } - const bd = pkg.bundleDependencies - if (bd === true) { - pkg.bundleDependencies = [ - ...Object.keys(pkg.dependencies || {}), - ...Object.keys(pkg.optionalDependencies || {}), - ] - } - if (typeof bd === 'object' && !Array.isArray(bd)) { - pkg.bundleDependencies = Object.keys(bd) - } - pkg[normalized] = true +const rpj = ({ path, packageJsonCache }) => readPackage({ path, packageJsonCache }) + .then(pkg => { + if (!pkg || pkg[normalized]) { return pkg - }) + } + if (pkg.bundledDependencies && !pkg.bundleDependencies) { + pkg.bundleDependencies = pkg.bundledDependencies + delete pkg.bundledDependencies + } + const bd = pkg.bundleDependencies + if (bd === true) { + pkg.bundleDependencies = [ + ...Object.keys(pkg.dependencies || {}), + ...Object.keys(pkg.optionalDependencies || {}), + ] + } + if (typeof bd === 'object' && !Array.isArray(bd)) { + pkg.bundleDependencies = Object.keys(bd) + } + pkg[normalized] = true + return pkg + }) const pkgContents = async ({ path, - depth, + depth = 1, currentDepth = 0, pkg = null, result = null, @@ -105,7 +96,7 @@ const pkgContents = async ({ }) const bins = await Promise.all( - binFiles.map(b => stat(b).then(() => b).catch((er) => null)) + binFiles.map(b => stat(b).then(() => b).catch(() => null)) ) bins.filter(b => b).forEach(b => result.add(b)) } @@ -136,18 +127,6 @@ const pkgContents = async ({ const recursePromises = [] - // if we didn't get withFileTypes support, tack that on - if (typeof dirEntries[0] === 'string') { - // use a map so we can return a promise, but we mutate dirEntries in place - // this is much slower than getting the entries from the readdir call, - // but polyfills support for node versions before 10.10 - await Promise.all(dirEntries.map(async (name, index) => { - const p = resolve(path, name) - const st = await lstat(p) - dirEntries[index] = Object.assign(st, { name }) - })) - } - for (const entry of dirEntries) { const p = resolve(path, entry.name) if (entry.isDirectory() === false) { @@ -195,48 +174,8 @@ const pkgContents = async ({ return result } -module.exports = ({ path, depth = 1, packageJsonCache }) => pkgContents({ +module.exports = ({ path, ...opts }) => pkgContents({ path: resolve(path), - depth, + ...opts, pkg: true, - packageJsonCache, }).then(results => [...results]) - -if (require.main === module) { - const options = { path: null, depth: 1 } - const usage = `Usage: - installed-package-contents [-d --depth=] - -Lists the files installed for a package specified by . - -Options: - -d --depth= Provide a numeric value ("Infinity" is allowed) - to specify how deep in the file tree to traverse. - Default=1 - -h --help Show this usage information` - - process.argv.slice(2).forEach(arg => { - let match - if ((match = arg.match(/^--depth=([0-9]+|Infinity)/)) || - (match = arg.match(/^-d([0-9]+|Infinity)/))) { - options.depth = +match[1] - } else if (arg === '-h' || arg === '--help') { - console.log(usage) - process.exit(0) - } else { - options.path = arg - } - }) - if (!options.path) { - console.error('ERROR: no path provided') - console.error(usage) - process.exit(1) - } - const cwd = process.cwd() - module.exports(options) - .then(list => list.sort().forEach(p => console.log(relative(cwd, p)))) - .catch(/* istanbul ignore next - pretty unusual */ er => { - console.error(er) - process.exit(1) - }) -} diff --git a/deps/npm/node_modules/@npmcli/installed-package-contents/package.json b/deps/npm/node_modules/@npmcli/installed-package-contents/package.json index 3554754123e618..132256430a6c18 100644 --- a/deps/npm/node_modules/@npmcli/installed-package-contents/package.json +++ b/deps/npm/node_modules/@npmcli/installed-package-contents/package.json @@ -1,17 +1,17 @@ { "name": "@npmcli/installed-package-contents", - "version": "2.0.2", + "version": "2.1.0", "description": "Get the list of files installed in a package in node_modules, including bundled dependencies", "author": "GitHub Inc.", "main": "lib/index.js", "bin": { - "installed-package-contents": "lib/index.js" + "installed-package-contents": "bin/index.js" }, "license": "ISC", "scripts": { "test": "tap", "snap": "tap", - "lint": "eslint \"**/*.js\"", + "lint": "eslint \"**/*.{js,cjs,ts,mjs,jsx,tsx}\"", "postlint": "template-oss-check", "template-oss-apply": "template-oss-apply --force", "lintfix": "npm run lint -- --fix", @@ -19,8 +19,7 @@ }, "devDependencies": { "@npmcli/eslint-config": "^4.0.0", - "@npmcli/template-oss": "4.11.4", - "require-inject": "^1.4.4", + "@npmcli/template-oss": "4.21.4", "tap": "^16.3.0" }, "dependencies": { @@ -40,7 +39,8 @@ }, "templateOSS": { "//@npmcli/template-oss": "This file is partially managed by @npmcli/template-oss. Edits may be overwritten.", - "version": "4.11.4" + "version": "4.21.4", + "publish": true }, "tap": { "nyc-arg": [ diff --git a/deps/npm/node_modules/@npmcli/metavuln-calculator/lib/index.js b/deps/npm/node_modules/@npmcli/metavuln-calculator/lib/index.js index 668f55942c5065..2a4e08395e7edd 100644 --- a/deps/npm/node_modules/@npmcli/metavuln-calculator/lib/index.js +++ b/deps/npm/node_modules/@npmcli/metavuln-calculator/lib/index.js @@ -3,6 +3,7 @@ // class handles all the IO with the registry and cache. const pacote = require('pacote') const cacache = require('cacache') +const { time } = require('proc-log') const Advisory = require('./advisory.js') const { homedir } = require('os') const jsonParse = require('json-parse-even-better-errors') @@ -48,34 +49,33 @@ class Calculator { async [_calculate] (name, source) { const k = `security-advisory:${name}:${source.id}` - const t = `metavuln:calculate:${k}` - process.emit('time', t) + const timeEnd = time.start(`metavuln:calculate:${k}`) const advisory = new Advisory(name, source, this[_options]) // load packument and cached advisory const [cached, packument] = await Promise.all([ this[_cacheGet](advisory), this[_packument](name), ]) - process.emit('time', `metavuln:load:${k}`) + const timeEndLoad = time.start(`metavuln:load:${k}`) advisory.load(cached, packument) - process.emit('timeEnd', `metavuln:load:${k}`) + timeEndLoad() if (advisory.updated) { await this[_cachePut](advisory) } this[_advisories].set(k, advisory) - process.emit('timeEnd', t) + timeEnd() return advisory } async [_cachePut] (advisory) { const { name, id } = advisory const key = `security-advisory:${name}:${id}` - process.emit('time', `metavuln:cache:put:${key}`) + const timeEnd = time.start(`metavuln:cache:put:${key}`) const data = JSON.stringify(advisory) const options = { ...this[_options] } this[_cacheData].set(key, jsonParse(data)) await cacache.put(this[_cache], key, data, options).catch(() => {}) - process.emit('timeEnd', `metavuln:cache:put:${key}`) + timeEnd() } async [_cacheGet] (advisory) { @@ -87,12 +87,12 @@ class Calculator { return this[_cacheData].get(key) } - process.emit('time', `metavuln:cache:get:${key}`) + const timeEnd = time.start(`metavuln:cache:get:${key}`) const p = cacache.get(this[_cache], key, { ...this[_options] }) .catch(() => ({ data: '{}' })) .then(({ data }) => { data = jsonParse(data) - process.emit('timeEnd', `metavuln:cache:get:${key}`) + timeEnd() this[_cacheData].set(key, data) return data }) @@ -105,7 +105,7 @@ class Calculator { return this[_packuments].get(name) } - process.emit('time', `metavuln:packument:${name}`) + const timeEnd = time.start(`metavuln:packument:${name}`) const p = pacote.packument(name, { ...this[_options] }) .catch((er) => { // presumably not something from the registry. @@ -116,7 +116,7 @@ class Calculator { } }) .then(paku => { - process.emit('timeEnd', `metavuln:packument:${name}`) + timeEnd() this[_packuments].set(name, paku) return paku }) diff --git a/deps/npm/node_modules/@npmcli/metavuln-calculator/package.json b/deps/npm/node_modules/@npmcli/metavuln-calculator/package.json index 4d0af031d54148..74c23ad62bd7a5 100644 --- a/deps/npm/node_modules/@npmcli/metavuln-calculator/package.json +++ b/deps/npm/node_modules/@npmcli/metavuln-calculator/package.json @@ -1,6 +1,6 @@ { "name": "@npmcli/metavuln-calculator", - "version": "7.0.0", + "version": "7.1.0", "main": "lib/index.js", "files": [ "bin/", @@ -19,7 +19,7 @@ "snap": "tap", "postsnap": "npm run lint", "eslint": "eslint", - "lint": "eslint \"**/*.js\"", + "lint": "eslint \"**/*.{js,cjs,ts,mjs,jsx,tsx}\"", "lintfix": "npm run lint -- --fix", "postlint": "template-oss-check", "template-oss-apply": "template-oss-apply --force" @@ -34,14 +34,15 @@ }, "devDependencies": { "@npmcli/eslint-config": "^4.0.0", - "@npmcli/template-oss": "4.18.0", + "@npmcli/template-oss": "4.21.4", "require-inject": "^1.4.4", "tap": "^16.0.1" }, "dependencies": { "cacache": "^18.0.0", "json-parse-even-better-errors": "^3.0.0", - "pacote": "^17.0.0", + "pacote": "^18.0.0", + "proc-log": "^4.1.0", "semver": "^7.3.5" }, "engines": { @@ -49,7 +50,7 @@ }, "templateOSS": { "//@npmcli/template-oss": "This file is partially managed by @npmcli/template-oss. Edits may be overwritten.", - "version": "4.18.0", + "version": "4.21.4", "publish": "true", "ciVersions": [ "16.14.0", diff --git a/deps/npm/node_modules/@npmcli/package-json/lib/index.js b/deps/npm/node_modules/@npmcli/package-json/lib/index.js index 0cc41c685a39e7..6d1b760727ba60 100644 --- a/deps/npm/node_modules/@npmcli/package-json/lib/index.js +++ b/deps/npm/node_modules/@npmcli/package-json/lib/index.js @@ -167,6 +167,12 @@ class PackageJson { return this } + fromContent (data) { + this.#manifest = data + this.#canSave = false + return this + } + // Load data from a comment // /**package { "name": "foo", "version": "1.2.3", ... } **/ fromComment (data) { diff --git a/deps/npm/node_modules/@npmcli/package-json/lib/normalize.js b/deps/npm/node_modules/@npmcli/package-json/lib/normalize.js index 350b3f3d7cb8f0..e3b37984884279 100644 --- a/deps/npm/node_modules/@npmcli/package-json/lib/normalize.js +++ b/deps/npm/node_modules/@npmcli/package-json/lib/normalize.js @@ -2,7 +2,7 @@ const valid = require('semver/functions/valid') const clean = require('semver/functions/clean') const fs = require('fs/promises') const path = require('path') -const log = require('proc-log') +const { log } = require('proc-log') /** * @type {import('hosted-git-info')} diff --git a/deps/npm/node_modules/@npmcli/package-json/package.json b/deps/npm/node_modules/@npmcli/package-json/package.json index 4f7a29d2e4c597..98236f604ecbd7 100644 --- a/deps/npm/node_modules/@npmcli/package-json/package.json +++ b/deps/npm/node_modules/@npmcli/package-json/package.json @@ -1,6 +1,6 @@ { "name": "@npmcli/package-json", - "version": "5.0.2", + "version": "5.1.0", "description": "Programmatic API to update package.json", "main": "lib/index.js", "files": [ @@ -25,7 +25,7 @@ "license": "ISC", "devDependencies": { "@npmcli/eslint-config": "^4.0.0", - "@npmcli/template-oss": "4.21.3", + "@npmcli/template-oss": "4.21.4", "read-package-json": "^7.0.0", "read-package-json-fast": "^3.0.2", "tap": "^16.0.1" @@ -36,7 +36,7 @@ "hosted-git-info": "^7.0.0", "json-parse-even-better-errors": "^3.0.0", "normalize-package-data": "^6.0.0", - "proc-log": "^3.0.0", + "proc-log": "^4.0.0", "semver": "^7.5.3" }, "repository": { @@ -48,7 +48,7 @@ }, "templateOSS": { "//@npmcli/template-oss": "This file is partially managed by @npmcli/template-oss. Edits may be overwritten.", - "version": "4.21.3", + "version": "4.21.4", "publish": "true" }, "tap": { diff --git a/deps/npm/node_modules/@npmcli/redact/lib/deep-map.js b/deps/npm/node_modules/@npmcli/redact/lib/deep-map.js new file mode 100644 index 00000000000000..ad042dbdfc5341 --- /dev/null +++ b/deps/npm/node_modules/@npmcli/redact/lib/deep-map.js @@ -0,0 +1,59 @@ +const deepMap = (input, handler = v => v, path = ['$'], seen = new Set([input])) => { + if (Array.isArray(input)) { + const result = [] + for (let i = 0; i < input.length; i++) { + const element = input[i] + const elementPath = [...path, i] + if (element instanceof Object) { + if (!seen.has(element)) { // avoid getting stuck in circular reference + seen.add(element) + result.push(deepMap(handler(element, elementPath), handler, elementPath, seen)) + } + } else { + result.push(handler(element, elementPath)) + } + } + return result + } + + if (input === null) { + return null + } else if (typeof input === 'object' || typeof input === 'function') { + const result = {} + + if (input instanceof Error) { + // `name` property is not included in `Object.getOwnPropertyNames(error)` + result.errorType = input.name + } + + for (const propertyName of Object.getOwnPropertyNames(input)) { + // skip logging internal properties + if (propertyName.startsWith('_')) { + continue + } + + try { + const property = input[propertyName] + const propertyPath = [...path, propertyName] + if (property instanceof Object) { + if (!seen.has(property)) { // avoid getting stuck in circular reference + seen.add(property) + result[propertyName] = deepMap( + handler(property, propertyPath), handler, propertyPath, seen + ) + } + } else { + result[propertyName] = handler(property, propertyPath) + } + } catch (err) { + // a getter may throw an error + result[propertyName] = `[error getting value: ${err.message}]` + } + } + return result + } + + return handler(input, path) +} + +module.exports = { deepMap } diff --git a/deps/npm/node_modules/@npmcli/redact/lib/index.js b/deps/npm/node_modules/@npmcli/redact/lib/index.js index e5b5e74157c2a3..9b10c7f6a0081d 100644 --- a/deps/npm/node_modules/@npmcli/redact/lib/index.js +++ b/deps/npm/node_modules/@npmcli/redact/lib/index.js @@ -1,29 +1,15 @@ -const { URL } = require('url') +const matchers = require('./matchers') +const { redactUrlPassword } = require('./utils') const REPLACE = '***' -const TOKEN_REGEX = /\bnpm_[a-zA-Z0-9]{36}\b/g -const GUID_REGEX = /\b[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}\b/g const redact = (value) => { if (typeof value !== 'string' || !value) { return value } - - let urlValue - try { - urlValue = new URL(value) - } catch { - // If it's not a URL then we can ignore all errors - } - - if (urlValue?.password) { - urlValue.password = REPLACE - value = urlValue.toString() - } - - return value - .replace(TOKEN_REGEX, `npm_${REPLACE}`) - .replace(GUID_REGEX, REPLACE) + return redactUrlPassword(value, REPLACE) + .replace(matchers.NPM_SECRET.pattern, `npm_${REPLACE}`) + .replace(matchers.UUID.pattern, REPLACE) } // split on \s|= similar to how nopt parses options @@ -49,7 +35,6 @@ const redactLog = (arg) => { } else if (Array.isArray(arg)) { return arg.map((a) => typeof a === 'string' ? splitAndRedact(a) : a) } - return arg } diff --git a/deps/npm/node_modules/@npmcli/redact/lib/matchers.js b/deps/npm/node_modules/@npmcli/redact/lib/matchers.js new file mode 100644 index 00000000000000..fe9b9071de8a16 --- /dev/null +++ b/deps/npm/node_modules/@npmcli/redact/lib/matchers.js @@ -0,0 +1,81 @@ +const TYPE_REGEX = 'regex' +const TYPE_URL = 'url' +const TYPE_PATH = 'path' + +const NPM_SECRET = { + type: TYPE_REGEX, + pattern: /\b(npms?_)[a-zA-Z0-9]{36,48}\b/gi, + replacement: `[REDACTED_NPM_SECRET]`, +} + +const AUTH_HEADER = { + type: TYPE_REGEX, + pattern: /\b(Basic\s+|Bearer\s+)[\w+=\-.]+\b/gi, + replacement: `[REDACTED_AUTH_HEADER]`, +} + +const JSON_WEB_TOKEN = { + type: TYPE_REGEX, + pattern: /\b[A-Za-z0-9-_]{10,}(?!\.\d+\.)\.[A-Za-z0-9-_]{3,}\.[A-Za-z0-9-_]{20,}\b/gi, + replacement: `[REDACTED_JSON_WEB_TOKEN]`, +} + +const UUID = { + type: TYPE_REGEX, + pattern: /\b[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}\b/gi, + replacement: `[REDACTED_UUID]`, +} + +const URL_MATCHER = { + type: TYPE_REGEX, + pattern: /(?:https?|ftp):\/\/[^\s/"$.?#].[^\s"]*/gi, + replacement: '[REDACTED_URL]', +} + +const DEEP_HEADER_AUTHORIZATION = { + type: TYPE_PATH, + predicate: ({ path }) => path.endsWith('.headers.authorization'), + replacement: '[REDACTED_HEADER_AUTHORIZATION]', +} + +const DEEP_HEADER_SET_COOKIE = { + type: TYPE_PATH, + predicate: ({ path }) => path.endsWith('.headers.set-cookie'), + replacement: '[REDACTED_HEADER_SET_COOKIE]', +} + +const REWRITE_REQUEST = { + type: TYPE_PATH, + predicate: ({ path }) => path.endsWith('.request'), + replacement: (input) => ({ + method: input?.method, + path: input?.path, + headers: input?.headers, + url: input?.url, + }), +} + +const REWRITE_RESPONSE = { + type: TYPE_PATH, + predicate: ({ path }) => path.endsWith('.response'), + replacement: (input) => ({ + data: input?.data, + status: input?.status, + headers: input?.headers, + }), +} + +module.exports = { + TYPE_REGEX, + TYPE_URL, + TYPE_PATH, + NPM_SECRET, + AUTH_HEADER, + JSON_WEB_TOKEN, + UUID, + URL_MATCHER, + DEEP_HEADER_AUTHORIZATION, + DEEP_HEADER_SET_COOKIE, + REWRITE_REQUEST, + REWRITE_RESPONSE, +} diff --git a/deps/npm/node_modules/@npmcli/redact/lib/server.js b/deps/npm/node_modules/@npmcli/redact/lib/server.js new file mode 100644 index 00000000000000..669e834da6131d --- /dev/null +++ b/deps/npm/node_modules/@npmcli/redact/lib/server.js @@ -0,0 +1,34 @@ +const { + AUTH_HEADER, + JSON_WEB_TOKEN, + NPM_SECRET, + DEEP_HEADER_AUTHORIZATION, + DEEP_HEADER_SET_COOKIE, + REWRITE_REQUEST, + REWRITE_RESPONSE, +} = require('./matchers') + +const { + redactUrlMatcher, + redactUrlPasswordMatcher, + redactMatchers, +} = require('./utils') + +const { deepMap } = require('./deep-map') + +const _redact = redactMatchers( + NPM_SECRET, + AUTH_HEADER, + JSON_WEB_TOKEN, + DEEP_HEADER_AUTHORIZATION, + DEEP_HEADER_SET_COOKIE, + REWRITE_REQUEST, + REWRITE_RESPONSE, + redactUrlMatcher( + redactUrlPasswordMatcher() + ) +) + +const redact = (input) => deepMap(input, (value, path) => _redact(value, { path })) + +module.exports = { redact } diff --git a/deps/npm/node_modules/@npmcli/redact/lib/utils.js b/deps/npm/node_modules/@npmcli/redact/lib/utils.js new file mode 100644 index 00000000000000..8395ab25fc373e --- /dev/null +++ b/deps/npm/node_modules/@npmcli/redact/lib/utils.js @@ -0,0 +1,202 @@ +const { + URL_MATCHER, + TYPE_URL, + TYPE_REGEX, + TYPE_PATH, +} = require('./matchers') + +/** + * creates a string of asterisks, + * this forces a minimum asterisk for security purposes + */ +const asterisk = (length = 0) => { + length = typeof length === 'string' ? length.length : length + if (length < 8) { + return '*'.repeat(8) + } + return '*'.repeat(length) +} + +/** + * escapes all special regex chars + * @see https://stackoverflow.com/a/9310752 + * @see https://github.com/tc39/proposal-regex-escaping + */ +const escapeRegExp = (text) => { + return text.replace(/[-[\]{}()*+?.,\\^$|#\s]/g, `\\$&`) +} + +/** + * provieds a regex "or" of the url versions of a string + */ +const urlEncodeRegexGroup = (value) => { + const decoded = decodeURIComponent(value) + const encoded = encodeURIComponent(value) + const union = [...new Set([encoded, decoded, value])].map(escapeRegExp).join('|') + return union +} + +/** + * a tagged template literal that returns a regex ensures all variables are excaped + */ +const urlEncodeRegexTag = (strings, ...values) => { + let pattern = '' + for (let i = 0; i < values.length; i++) { + pattern += strings[i] + `(${urlEncodeRegexGroup(values[i])})` + } + pattern += strings[strings.length - 1] + return new RegExp(pattern) +} + +/** + * creates a matcher for redacting url hostname + */ +const redactUrlHostnameMatcher = ({ hostname, replacement } = {}) => ({ + type: TYPE_URL, + predicate: ({ url }) => url.hostname === hostname, + pattern: ({ url }) => { + return urlEncodeRegexTag`(^${url.protocol}//${url.username}:.+@)?${url.hostname}` + }, + replacement: `$1${replacement || asterisk()}`, +}) + +/** + * creates a matcher for redacting url search / query parameter values + */ +const redactUrlSearchParamsMatcher = ({ param, replacement } = {}) => ({ + type: TYPE_URL, + predicate: ({ url }) => url.searchParams.has(param), + pattern: ({ url }) => urlEncodeRegexTag`(${param}=)${url.searchParams.get(param)}`, + replacement: `$1${replacement || asterisk()}`, +}) + +/** creates a matcher for redacting the url password */ +const redactUrlPasswordMatcher = ({ replacement } = {}) => ({ + type: TYPE_URL, + predicate: ({ url }) => url.password, + pattern: ({ url }) => urlEncodeRegexTag`(^${url.protocol}//${url.username}:)${url.password}`, + replacement: `$1${replacement || asterisk()}`, +}) + +const redactUrlReplacement = (...matchers) => (subValue) => { + try { + const url = new URL(subValue) + return redactMatchers(...matchers)(subValue, { url }) + } catch (err) { + return subValue + } +} + +/** + * creates a matcher / submatcher for urls, this function allows you to first + * collect all urls within a larger string and then pass those urls to a + * submatcher + * + * @example + * console.log("this will first match all urls, then pass those urls to the password patcher") + * redactMatchers(redactUrlMatcher(redactUrlPasswordMatcher())) + * + * @example + * console.log( + * "this will assume you are passing in a string that is a url, and will redact the password" + * ) + * redactMatchers(redactUrlPasswordMatcher()) + * + */ +const redactUrlMatcher = (...matchers) => { + return { + ...URL_MATCHER, + replacement: redactUrlReplacement(...matchers), + } +} + +const matcherFunctions = { + [TYPE_REGEX]: (matcher) => (value) => { + if (typeof value === 'string') { + value = value.replace(matcher.pattern, matcher.replacement) + } + return value + }, + [TYPE_URL]: (matcher) => (value, ctx) => { + if (typeof value === 'string') { + try { + const url = ctx?.url || new URL(value) + const { predicate, pattern } = matcher + const predicateValue = predicate({ url }) + if (predicateValue) { + value = value.replace(pattern({ url }), matcher.replacement) + } + } catch (_e) { + return value + } + } + return value + }, + [TYPE_PATH]: (matcher) => (value, ctx) => { + const rawPath = ctx?.path + const path = rawPath.join('.').toLowerCase() + const { predicate, replacement } = matcher + const replace = typeof replacement === 'function' ? replacement : () => replacement + const shouldRun = predicate({ rawPath, path }) + if (shouldRun) { + value = replace(value, { rawPath, path }) + } + return value + }, +} + +/** converts a matcher to a function */ +const redactMatcher = (matcher) => { + return matcherFunctions[matcher.type](matcher) +} + +/** converts a series of matchers to a function */ +const redactMatchers = (...matchers) => (value, ctx) => { + const flatMatchers = matchers.flat() + return flatMatchers.reduce((result, matcher) => { + const fn = (typeof matcher === 'function') ? matcher : redactMatcher(matcher) + return fn(result, ctx) + }, value) +} + +/** + * replacement handler, keeping $1 (if it exists) and replacing the + * rest of the string with asterisks, maintaining string length + */ +const redactDynamicReplacement = () => (value, start) => { + if (typeof start === 'number') { + return asterisk(value) + } + return start + asterisk(value.substring(start.length).length) +} + +/** + * replacement handler, keeping $1 (if it exists) and replacing the + * rest of the string with a fixed number of asterisks + */ +const redactFixedReplacement = (length) => (_value, start) => { + if (typeof start === 'number') { + return asterisk(length) + } + return start + asterisk(length) +} + +const redactUrlPassword = (value, replacement) => { + return redactMatchers(redactUrlPasswordMatcher({ replacement }))(value) +} + +module.exports = { + asterisk, + escapeRegExp, + urlEncodeRegexGroup, + urlEncodeRegexTag, + redactUrlHostnameMatcher, + redactUrlSearchParamsMatcher, + redactUrlPasswordMatcher, + redactUrlMatcher, + redactUrlReplacement, + redactDynamicReplacement, + redactFixedReplacement, + redactMatchers, + redactUrlPassword, +} diff --git a/deps/npm/node_modules/@npmcli/redact/package.json b/deps/npm/node_modules/@npmcli/redact/package.json index 1fc64a4c02f28e..2bcee9ea0884b4 100644 --- a/deps/npm/node_modules/@npmcli/redact/package.json +++ b/deps/npm/node_modules/@npmcli/redact/package.json @@ -1,8 +1,13 @@ { "name": "@npmcli/redact", - "version": "1.1.0", + "version": "2.0.0", "description": "Redact sensitive npm information from output", "main": "lib/index.js", + "exports": { + ".": "./lib/index.js", + "./server": "./lib/server.js", + "./package.json": "./package.json" + }, "scripts": { "test": "tap", "lint": "eslint \"**/*.{js,cjs,ts,mjs,jsx,tsx}\"", @@ -32,7 +37,8 @@ "nyc-arg": [ "--exclude", "tap-snapshots/**" - ] + ], + "timeout": 120 }, "devDependencies": { "@npmcli/eslint-config": "^4.0.2", diff --git a/deps/npm/node_modules/@npmcli/run-script/lib/run-script-pkg.js b/deps/npm/node_modules/@npmcli/run-script/lib/run-script-pkg.js index ea33db56298586..9900c96315f85f 100644 --- a/deps/npm/node_modules/@npmcli/run-script/lib/run-script-pkg.js +++ b/deps/npm/node_modules/@npmcli/run-script/lib/run-script-pkg.js @@ -5,19 +5,6 @@ const { isNodeGypPackage, defaultGypInstallScript } = require('@npmcli/node-gyp' const signalManager = require('./signal-manager.js') const isServerPackage = require('./is-server-package.js') -// you wouldn't like me when I'm angry... -const bruce = (id, event, cmd, args) => { - let banner = id - ? `\n> ${id} ${event}\n` - : `\n> ${event}\n` - banner += `> ${cmd.trim().replace(/\n/g, '\n> ')}` - if (args.length) { - banner += ` ${args.join(' ')}` - } - banner += '\n' - return banner -} - const runScriptPkg = async options => { const { event, @@ -29,8 +16,6 @@ const runScriptPkg = async options => { pkg, args = [], stdioString, - // note: only used when stdio:inherit - banner = true, // how long to wait for a process.kill signal // only exposed here so that we can make the test go a bit faster. signalTimeout = 500, @@ -59,9 +44,22 @@ const runScriptPkg = async options => { return { code: 0, signal: null } } - if (stdio === 'inherit' && banner !== false) { - // we're dumping to the parent's stdout, so print the banner - console.log(bruce(pkg._id, event, cmd, args)) + let inputEnd = () => {} + if (stdio === 'inherit') { + let banner + if (pkg._id) { + banner = `\n> ${pkg._id} ${event}\n` + } else { + banner = `\n> ${event}\n` + } + banner += `> ${cmd.trim().replace(/\n/g, '\n> ')}` + if (args.length) { + banner += ` ${args.join(' ')}` + } + banner += '\n' + const { output, input } = require('proc-log') + output.standard(banner) + inputEnd = input.start() } const [spawnShell, spawnArgs, spawnOpts] = makeSpawnArgs({ @@ -108,7 +106,7 @@ const runScriptPkg = async options => { } else { throw er } - }) + }).finally(inputEnd) } module.exports = runScriptPkg diff --git a/deps/npm/node_modules/@npmcli/run-script/package.json b/deps/npm/node_modules/@npmcli/run-script/package.json index 1c98b1b170e265..8a83e726fbeb2c 100644 --- a/deps/npm/node_modules/@npmcli/run-script/package.json +++ b/deps/npm/node_modules/@npmcli/run-script/package.json @@ -1,6 +1,6 @@ { "name": "@npmcli/run-script", - "version": "7.0.4", + "version": "8.1.0", "description": "Run a lifecycle script for a package (descendant of npm-lifecycle)", "author": "GitHub Inc.", "license": "ISC", @@ -16,7 +16,7 @@ }, "devDependencies": { "@npmcli/eslint-config": "^4.0.0", - "@npmcli/template-oss": "4.21.3", + "@npmcli/template-oss": "4.21.4", "spawk": "^1.8.1", "tap": "^16.0.1" }, @@ -25,6 +25,7 @@ "@npmcli/package-json": "^5.0.0", "@npmcli/promise-spawn": "^7.0.0", "node-gyp": "^10.0.0", + "proc-log": "^4.0.0", "which": "^4.0.0" }, "files": [ @@ -41,7 +42,7 @@ }, "templateOSS": { "//@npmcli/template-oss": "This file is partially managed by @npmcli/template-oss. Edits may be overwritten.", - "version": "4.21.3", + "version": "4.21.4", "publish": "true" }, "tap": { diff --git a/deps/npm/node_modules/are-we-there-yet/LICENSE.md b/deps/npm/node_modules/are-we-there-yet/LICENSE.md deleted file mode 100644 index 845be76f64e789..00000000000000 --- a/deps/npm/node_modules/are-we-there-yet/LICENSE.md +++ /dev/null @@ -1,18 +0,0 @@ -ISC License - -Copyright npm, Inc. - -Permission to use, copy, modify, and/or distribute this -software for any purpose with or without fee is hereby -granted, provided that the above copyright notice and this -permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND NPM DISCLAIMS ALL -WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO -EVENT SHALL NPM BE LIABLE FOR ANY SPECIAL, DIRECT, -INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, -WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER -TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE -USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/deps/npm/node_modules/are-we-there-yet/lib/index.js b/deps/npm/node_modules/are-we-there-yet/lib/index.js deleted file mode 100644 index 57d8743fdad177..00000000000000 --- a/deps/npm/node_modules/are-we-there-yet/lib/index.js +++ /dev/null @@ -1,4 +0,0 @@ -'use strict' -exports.TrackerGroup = require('./tracker-group.js') -exports.Tracker = require('./tracker.js') -exports.TrackerStream = require('./tracker-stream.js') diff --git a/deps/npm/node_modules/are-we-there-yet/lib/tracker-base.js b/deps/npm/node_modules/are-we-there-yet/lib/tracker-base.js deleted file mode 100644 index 1b5e0dc30c49bb..00000000000000 --- a/deps/npm/node_modules/are-we-there-yet/lib/tracker-base.js +++ /dev/null @@ -1,13 +0,0 @@ -'use strict' -const EventEmitter = require('events') - -let trackerId = 0 -class TrackerBase extends EventEmitter { - constructor (name) { - super() - this.id = ++trackerId - this.name = name - } -} - -module.exports = TrackerBase diff --git a/deps/npm/node_modules/are-we-there-yet/lib/tracker-group.js b/deps/npm/node_modules/are-we-there-yet/lib/tracker-group.js deleted file mode 100644 index 162c22584cdc53..00000000000000 --- a/deps/npm/node_modules/are-we-there-yet/lib/tracker-group.js +++ /dev/null @@ -1,112 +0,0 @@ -'use strict' -const TrackerBase = require('./tracker-base.js') -const Tracker = require('./tracker.js') -const TrackerStream = require('./tracker-stream.js') - -class TrackerGroup extends TrackerBase { - parentGroup = null - trackers = [] - completion = {} - weight = {} - totalWeight = 0 - finished = false - bubbleChange = bubbleChange(this) - - nameInTree () { - var names = [] - var from = this - while (from) { - names.unshift(from.name) - from = from.parentGroup - } - return names.join('/') - } - - addUnit (unit, weight) { - if (unit.addUnit) { - var toTest = this - while (toTest) { - if (unit === toTest) { - throw new Error( - 'Attempted to add tracker group ' + - unit.name + ' to tree that already includes it ' + - this.nameInTree(this)) - } - toTest = toTest.parentGroup - } - unit.parentGroup = this - } - this.weight[unit.id] = weight || 1 - this.totalWeight += this.weight[unit.id] - this.trackers.push(unit) - this.completion[unit.id] = unit.completed() - unit.on('change', this.bubbleChange) - if (!this.finished) { - this.emit('change', unit.name, this.completion[unit.id], unit) - } - return unit - } - - completed () { - if (this.trackers.length === 0) { - return 0 - } - var valPerWeight = 1 / this.totalWeight - var completed = 0 - for (var ii = 0; ii < this.trackers.length; ii++) { - var trackerId = this.trackers[ii].id - completed += - valPerWeight * this.weight[trackerId] * this.completion[trackerId] - } - return completed - } - - newGroup (name, weight) { - return this.addUnit(new TrackerGroup(name), weight) - } - - newItem (name, todo, weight) { - return this.addUnit(new Tracker(name, todo), weight) - } - - newStream (name, todo, weight) { - return this.addUnit(new TrackerStream(name, todo), weight) - } - - finish () { - this.finished = true - if (!this.trackers.length) { - this.addUnit(new Tracker(), 1, true) - } - for (var ii = 0; ii < this.trackers.length; ii++) { - var tracker = this.trackers[ii] - tracker.finish() - tracker.removeListener('change', this.bubbleChange) - } - this.emit('change', this.name, 1, this) - } - - debug (depth = 0) { - const indent = ' '.repeat(depth) - let output = `${indent}${this.name || 'top'}: ${this.completed()}\n` - - this.trackers.forEach(function (tracker) { - output += tracker instanceof TrackerGroup - ? tracker.debug(depth + 1) - : `${indent} ${tracker.name}: ${tracker.completed()}\n` - }) - return output - } -} - -function bubbleChange (trackerGroup) { - return function (name, completed, tracker) { - trackerGroup.completion[tracker.id] = completed - if (trackerGroup.finished) { - return - } - trackerGroup.emit('change', name || trackerGroup.name, trackerGroup.completed(), trackerGroup) - } -} - -module.exports = TrackerGroup diff --git a/deps/npm/node_modules/are-we-there-yet/lib/tracker-stream.js b/deps/npm/node_modules/are-we-there-yet/lib/tracker-stream.js deleted file mode 100644 index 75e44df309150f..00000000000000 --- a/deps/npm/node_modules/are-we-there-yet/lib/tracker-stream.js +++ /dev/null @@ -1,42 +0,0 @@ -'use strict' -const stream = require('stream') -const Tracker = require('./tracker.js') - -class TrackerStream extends stream.Transform { - constructor (name, size, options) { - super(options) - this.tracker = new Tracker(name, size) - this.name = name - this.id = this.tracker.id - this.tracker.on('change', this.trackerChange.bind(this)) - } - - trackerChange (name, completion) { - this.emit('change', name, completion, this) - } - - _transform (data, encoding, cb) { - this.tracker.completeWork(data.length ? data.length : 1) - this.push(data) - cb() - } - - _flush (cb) { - this.tracker.finish() - cb() - } - - completed () { - return this.tracker.completed() - } - - addWork (work) { - return this.tracker.addWork(work) - } - - finish () { - return this.tracker.finish() - } -} - -module.exports = TrackerStream diff --git a/deps/npm/node_modules/are-we-there-yet/lib/tracker.js b/deps/npm/node_modules/are-we-there-yet/lib/tracker.js deleted file mode 100644 index 02e879ce6e3e26..00000000000000 --- a/deps/npm/node_modules/are-we-there-yet/lib/tracker.js +++ /dev/null @@ -1,34 +0,0 @@ -'use strict' -const TrackerBase = require('./tracker-base.js') - -class Tracker extends TrackerBase { - constructor (name, todo) { - super(name) - this.workDone = 0 - this.workTodo = todo || 0 - } - - completed () { - return this.workTodo === 0 ? 0 : this.workDone / this.workTodo - } - - addWork (work) { - this.workTodo += work - this.emit('change', this.name, this.completed(), this) - } - - completeWork (work) { - this.workDone += work - if (this.workDone > this.workTodo) { - this.workDone = this.workTodo - } - this.emit('change', this.name, this.completed(), this) - } - - finish () { - this.workTodo = this.workDone = 1 - this.emit('change', this.name, 1, this) - } -} - -module.exports = Tracker diff --git a/deps/npm/node_modules/cli-table3/LICENSE b/deps/npm/node_modules/cli-table3/LICENSE deleted file mode 100644 index a09b7de012ac85..00000000000000 --- a/deps/npm/node_modules/cli-table3/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2014 James Talmage - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/deps/npm/node_modules/cli-table3/index.js b/deps/npm/node_modules/cli-table3/index.js deleted file mode 100644 index b49d920dd3ef69..00000000000000 --- a/deps/npm/node_modules/cli-table3/index.js +++ /dev/null @@ -1 +0,0 @@ -module.exports = require('./src/table'); \ No newline at end of file diff --git a/deps/npm/node_modules/cli-table3/package.json b/deps/npm/node_modules/cli-table3/package.json deleted file mode 100644 index 0bd5d31d102463..00000000000000 --- a/deps/npm/node_modules/cli-table3/package.json +++ /dev/null @@ -1,100 +0,0 @@ -{ - "name": "cli-table3", - "version": "0.6.4", - "description": "Pretty unicode tables for the command line. Based on the original cli-table.", - "main": "index.js", - "types": "index.d.ts", - "files": [ - "src/", - "index.d.ts", - "index.js" - ], - "directories": { - "test": "test" - }, - "dependencies": { - "string-width": "^4.2.0" - }, - "devDependencies": { - "cli-table": "^0.3.1", - "eslint": "^6.0.0", - "eslint-config-prettier": "^6.0.0", - "eslint-plugin-prettier": "^3.0.0", - "jest": "^25.2.4", - "jest-runner-eslint": "^0.7.0", - "lerna-changelog": "^1.0.1", - "prettier": "2.3.2" - }, - "optionalDependencies": { - "@colors/colors": "1.5.0" - }, - "scripts": { - "changelog": "lerna-changelog", - "docs": "node ./scripts/update-docs.js", - "prettier": "prettier --write '{examples,lib,scripts,src,test}/**/*.js'", - "test": "jest --color", - "test:watch": "jest --color --watchAll --notify" - }, - "repository": { - "type": "git", - "url": "https://github.com/cli-table/cli-table3.git" - }, - "keywords": [ - "node", - "command", - "line", - "cli", - "table", - "tables", - "tabular", - "unicode", - "colors", - "grid" - ], - "author": "James Talmage", - "license": "MIT", - "bugs": { - "url": "https://github.com/cli-table/cli-table3/issues" - }, - "homepage": "https://github.com/cli-table/cli-table3", - "engines": { - "node": "10.* || >= 12.*" - }, - "changelog": { - "repo": "cli-table/cli-table3", - "labels": { - "breaking": ":boom: Breaking Change", - "enhancement": ":rocket: Enhancement", - "bug": ":bug: Bug Fix", - "documentation": ":memo: Documentation", - "internal": ":house: Internal" - } - }, - "jest": { - "projects": [ - { - "displayName": "test", - "testMatch": [ - "/test/**/*.js" - ] - }, - { - "runner": "jest-runner-eslint", - "displayName": "lint", - "testMatch": [ - "/examples/**/*.js", - "/lib/**/*.js", - "/scripts/**/*.js", - "/src/**/*.js", - "/test/**/*.js" - ] - } - ] - }, - "prettier": { - "printWidth": 120, - "tabWidth": 2, - "singleQuote": true, - "trailingComma": "es5" - } -} diff --git a/deps/npm/node_modules/cli-table3/src/cell.js b/deps/npm/node_modules/cli-table3/src/cell.js deleted file mode 100644 index 8c3df35d194076..00000000000000 --- a/deps/npm/node_modules/cli-table3/src/cell.js +++ /dev/null @@ -1,409 +0,0 @@ -const { info, debug } = require('./debug'); -const utils = require('./utils'); - -class Cell { - /** - * A representation of a cell within the table. - * Implementations must have `init` and `draw` methods, - * as well as `colSpan`, `rowSpan`, `desiredHeight` and `desiredWidth` properties. - * @param options - * @constructor - */ - constructor(options) { - this.setOptions(options); - - /** - * Each cell will have it's `x` and `y` values set by the `layout-manager` prior to - * `init` being called; - * @type {Number} - */ - this.x = null; - this.y = null; - } - - setOptions(options) { - if (['boolean', 'number', 'string'].indexOf(typeof options) !== -1) { - options = { content: '' + options }; - } - options = options || {}; - this.options = options; - let content = options.content; - if (['boolean', 'number', 'string'].indexOf(typeof content) !== -1) { - this.content = String(content); - } else if (!content) { - this.content = this.options.href || ''; - } else { - throw new Error('Content needs to be a primitive, got: ' + typeof content); - } - this.colSpan = options.colSpan || 1; - this.rowSpan = options.rowSpan || 1; - if (this.options.href) { - Object.defineProperty(this, 'href', { - get() { - return this.options.href; - }, - }); - } - } - - mergeTableOptions(tableOptions, cells) { - this.cells = cells; - - let optionsChars = this.options.chars || {}; - let tableChars = tableOptions.chars; - let chars = (this.chars = {}); - CHAR_NAMES.forEach(function (name) { - setOption(optionsChars, tableChars, name, chars); - }); - - this.truncate = this.options.truncate || tableOptions.truncate; - - let style = (this.options.style = this.options.style || {}); - let tableStyle = tableOptions.style; - setOption(style, tableStyle, 'padding-left', this); - setOption(style, tableStyle, 'padding-right', this); - this.head = style.head || tableStyle.head; - this.border = style.border || tableStyle.border; - - this.fixedWidth = tableOptions.colWidths[this.x]; - this.lines = this.computeLines(tableOptions); - - this.desiredWidth = utils.strlen(this.content) + this.paddingLeft + this.paddingRight; - this.desiredHeight = this.lines.length; - } - - computeLines(tableOptions) { - const tableWordWrap = tableOptions.wordWrap || tableOptions.textWrap; - const { wordWrap = tableWordWrap } = this.options; - if (this.fixedWidth && wordWrap) { - this.fixedWidth -= this.paddingLeft + this.paddingRight; - if (this.colSpan) { - let i = 1; - while (i < this.colSpan) { - this.fixedWidth += tableOptions.colWidths[this.x + i]; - i++; - } - } - const { wrapOnWordBoundary: tableWrapOnWordBoundary = true } = tableOptions; - const { wrapOnWordBoundary = tableWrapOnWordBoundary } = this.options; - return this.wrapLines(utils.wordWrap(this.fixedWidth, this.content, wrapOnWordBoundary)); - } - return this.wrapLines(this.content.split('\n')); - } - - wrapLines(computedLines) { - const lines = utils.colorizeLines(computedLines); - if (this.href) { - return lines.map((line) => utils.hyperlink(this.href, line)); - } - return lines; - } - - /** - * Initializes the Cells data structure. - * - * @param tableOptions - A fully populated set of tableOptions. - * In addition to the standard default values, tableOptions must have fully populated the - * `colWidths` and `rowWidths` arrays. Those arrays must have lengths equal to the number - * of columns or rows (respectively) in this table, and each array item must be a Number. - * - */ - init(tableOptions) { - let x = this.x; - let y = this.y; - this.widths = tableOptions.colWidths.slice(x, x + this.colSpan); - this.heights = tableOptions.rowHeights.slice(y, y + this.rowSpan); - this.width = this.widths.reduce(sumPlusOne, -1); - this.height = this.heights.reduce(sumPlusOne, -1); - - this.hAlign = this.options.hAlign || tableOptions.colAligns[x]; - this.vAlign = this.options.vAlign || tableOptions.rowAligns[y]; - - this.drawRight = x + this.colSpan == tableOptions.colWidths.length; - } - - /** - * Draws the given line of the cell. - * This default implementation defers to methods `drawTop`, `drawBottom`, `drawLine` and `drawEmpty`. - * @param lineNum - can be `top`, `bottom` or a numerical line number. - * @param spanningCell - will be a number if being called from a RowSpanCell, and will represent how - * many rows below it's being called from. Otherwise it's undefined. - * @returns {String} The representation of this line. - */ - draw(lineNum, spanningCell) { - if (lineNum == 'top') return this.drawTop(this.drawRight); - if (lineNum == 'bottom') return this.drawBottom(this.drawRight); - let content = utils.truncate(this.content, 10, this.truncate); - if (!lineNum) { - info(`${this.y}-${this.x}: ${this.rowSpan - lineNum}x${this.colSpan} Cell ${content}`); - } else { - // debug(`${lineNum}-${this.x}: 1x${this.colSpan} RowSpanCell ${content}`); - } - let padLen = Math.max(this.height - this.lines.length, 0); - let padTop; - switch (this.vAlign) { - case 'center': - padTop = Math.ceil(padLen / 2); - break; - case 'bottom': - padTop = padLen; - break; - default: - padTop = 0; - } - if (lineNum < padTop || lineNum >= padTop + this.lines.length) { - return this.drawEmpty(this.drawRight, spanningCell); - } - let forceTruncation = this.lines.length > this.height && lineNum + 1 >= this.height; - return this.drawLine(lineNum - padTop, this.drawRight, forceTruncation, spanningCell); - } - - /** - * Renders the top line of the cell. - * @param drawRight - true if this method should render the right edge of the cell. - * @returns {String} - */ - drawTop(drawRight) { - let content = []; - if (this.cells) { - //TODO: cells should always exist - some tests don't fill it in though - this.widths.forEach(function (width, index) { - content.push(this._topLeftChar(index)); - content.push(utils.repeat(this.chars[this.y == 0 ? 'top' : 'mid'], width)); - }, this); - } else { - content.push(this._topLeftChar(0)); - content.push(utils.repeat(this.chars[this.y == 0 ? 'top' : 'mid'], this.width)); - } - if (drawRight) { - content.push(this.chars[this.y == 0 ? 'topRight' : 'rightMid']); - } - return this.wrapWithStyleColors('border', content.join('')); - } - - _topLeftChar(offset) { - let x = this.x + offset; - let leftChar; - if (this.y == 0) { - leftChar = x == 0 ? 'topLeft' : offset == 0 ? 'topMid' : 'top'; - } else { - if (x == 0) { - leftChar = 'leftMid'; - } else { - leftChar = offset == 0 ? 'midMid' : 'bottomMid'; - if (this.cells) { - //TODO: cells should always exist - some tests don't fill it in though - let spanAbove = this.cells[this.y - 1][x] instanceof Cell.ColSpanCell; - if (spanAbove) { - leftChar = offset == 0 ? 'topMid' : 'mid'; - } - if (offset == 0) { - let i = 1; - while (this.cells[this.y][x - i] instanceof Cell.ColSpanCell) { - i++; - } - if (this.cells[this.y][x - i] instanceof Cell.RowSpanCell) { - leftChar = 'leftMid'; - } - } - } - } - } - return this.chars[leftChar]; - } - - wrapWithStyleColors(styleProperty, content) { - if (this[styleProperty] && this[styleProperty].length) { - try { - let colors = require('@colors/colors/safe'); - for (let i = this[styleProperty].length - 1; i >= 0; i--) { - colors = colors[this[styleProperty][i]]; - } - return colors(content); - } catch (e) { - return content; - } - } else { - return content; - } - } - - /** - * Renders a line of text. - * @param lineNum - Which line of text to render. This is not necessarily the line within the cell. - * There may be top-padding above the first line of text. - * @param drawRight - true if this method should render the right edge of the cell. - * @param forceTruncationSymbol - `true` if the rendered text should end with the truncation symbol even - * if the text fits. This is used when the cell is vertically truncated. If `false` the text should - * only include the truncation symbol if the text will not fit horizontally within the cell width. - * @param spanningCell - a number of if being called from a RowSpanCell. (how many rows below). otherwise undefined. - * @returns {String} - */ - drawLine(lineNum, drawRight, forceTruncationSymbol, spanningCell) { - let left = this.chars[this.x == 0 ? 'left' : 'middle']; - if (this.x && spanningCell && this.cells) { - let cellLeft = this.cells[this.y + spanningCell][this.x - 1]; - while (cellLeft instanceof ColSpanCell) { - cellLeft = this.cells[cellLeft.y][cellLeft.x - 1]; - } - if (!(cellLeft instanceof RowSpanCell)) { - left = this.chars['rightMid']; - } - } - let leftPadding = utils.repeat(' ', this.paddingLeft); - let right = drawRight ? this.chars['right'] : ''; - let rightPadding = utils.repeat(' ', this.paddingRight); - let line = this.lines[lineNum]; - let len = this.width - (this.paddingLeft + this.paddingRight); - if (forceTruncationSymbol) line += this.truncate || '…'; - let content = utils.truncate(line, len, this.truncate); - content = utils.pad(content, len, ' ', this.hAlign); - content = leftPadding + content + rightPadding; - return this.stylizeLine(left, content, right); - } - - stylizeLine(left, content, right) { - left = this.wrapWithStyleColors('border', left); - right = this.wrapWithStyleColors('border', right); - if (this.y === 0) { - content = this.wrapWithStyleColors('head', content); - } - return left + content + right; - } - - /** - * Renders the bottom line of the cell. - * @param drawRight - true if this method should render the right edge of the cell. - * @returns {String} - */ - drawBottom(drawRight) { - let left = this.chars[this.x == 0 ? 'bottomLeft' : 'bottomMid']; - let content = utils.repeat(this.chars.bottom, this.width); - let right = drawRight ? this.chars['bottomRight'] : ''; - return this.wrapWithStyleColors('border', left + content + right); - } - - /** - * Renders a blank line of text within the cell. Used for top and/or bottom padding. - * @param drawRight - true if this method should render the right edge of the cell. - * @param spanningCell - a number of if being called from a RowSpanCell. (how many rows below). otherwise undefined. - * @returns {String} - */ - drawEmpty(drawRight, spanningCell) { - let left = this.chars[this.x == 0 ? 'left' : 'middle']; - if (this.x && spanningCell && this.cells) { - let cellLeft = this.cells[this.y + spanningCell][this.x - 1]; - while (cellLeft instanceof ColSpanCell) { - cellLeft = this.cells[cellLeft.y][cellLeft.x - 1]; - } - if (!(cellLeft instanceof RowSpanCell)) { - left = this.chars['rightMid']; - } - } - let right = drawRight ? this.chars['right'] : ''; - let content = utils.repeat(' ', this.width); - return this.stylizeLine(left, content, right); - } -} - -class ColSpanCell { - /** - * A Cell that doesn't do anything. It just draws empty lines. - * Used as a placeholder in column spanning. - * @constructor - */ - constructor() {} - - draw(lineNum) { - if (typeof lineNum === 'number') { - debug(`${this.y}-${this.x}: 1x1 ColSpanCell`); - } - return ''; - } - - init() {} - - mergeTableOptions() {} -} - -class RowSpanCell { - /** - * A placeholder Cell for a Cell that spans multiple rows. - * It delegates rendering to the original cell, but adds the appropriate offset. - * @param originalCell - * @constructor - */ - constructor(originalCell) { - this.originalCell = originalCell; - } - - init(tableOptions) { - let y = this.y; - let originalY = this.originalCell.y; - this.cellOffset = y - originalY; - this.offset = findDimension(tableOptions.rowHeights, originalY, this.cellOffset); - } - - draw(lineNum) { - if (lineNum == 'top') { - return this.originalCell.draw(this.offset, this.cellOffset); - } - if (lineNum == 'bottom') { - return this.originalCell.draw('bottom'); - } - debug(`${this.y}-${this.x}: 1x${this.colSpan} RowSpanCell for ${this.originalCell.content}`); - return this.originalCell.draw(this.offset + 1 + lineNum); - } - - mergeTableOptions() {} -} - -function firstDefined(...args) { - return args.filter((v) => v !== undefined && v !== null).shift(); -} - -// HELPER FUNCTIONS -function setOption(objA, objB, nameB, targetObj) { - let nameA = nameB.split('-'); - if (nameA.length > 1) { - nameA[1] = nameA[1].charAt(0).toUpperCase() + nameA[1].substr(1); - nameA = nameA.join(''); - targetObj[nameA] = firstDefined(objA[nameA], objA[nameB], objB[nameA], objB[nameB]); - } else { - targetObj[nameB] = firstDefined(objA[nameB], objB[nameB]); - } -} - -function findDimension(dimensionTable, startingIndex, span) { - let ret = dimensionTable[startingIndex]; - for (let i = 1; i < span; i++) { - ret += 1 + dimensionTable[startingIndex + i]; - } - return ret; -} - -function sumPlusOne(a, b) { - return a + b + 1; -} - -let CHAR_NAMES = [ - 'top', - 'top-mid', - 'top-left', - 'top-right', - 'bottom', - 'bottom-mid', - 'bottom-left', - 'bottom-right', - 'left', - 'left-mid', - 'mid', - 'mid-mid', - 'right', - 'right-mid', - 'middle', -]; - -module.exports = Cell; -module.exports.ColSpanCell = ColSpanCell; -module.exports.RowSpanCell = RowSpanCell; diff --git a/deps/npm/node_modules/cli-table3/src/debug.js b/deps/npm/node_modules/cli-table3/src/debug.js deleted file mode 100644 index 6acfb030321597..00000000000000 --- a/deps/npm/node_modules/cli-table3/src/debug.js +++ /dev/null @@ -1,28 +0,0 @@ -let messages = []; -let level = 0; - -const debug = (msg, min) => { - if (level >= min) { - messages.push(msg); - } -}; - -debug.WARN = 1; -debug.INFO = 2; -debug.DEBUG = 3; - -debug.reset = () => { - messages = []; -}; - -debug.setDebugLevel = (v) => { - level = v; -}; - -debug.warn = (msg) => debug(msg, debug.WARN); -debug.info = (msg) => debug(msg, debug.INFO); -debug.debug = (msg) => debug(msg, debug.DEBUG); - -debug.debugMessages = () => messages; - -module.exports = debug; diff --git a/deps/npm/node_modules/cli-table3/src/layout-manager.js b/deps/npm/node_modules/cli-table3/src/layout-manager.js deleted file mode 100644 index 3937452274d721..00000000000000 --- a/deps/npm/node_modules/cli-table3/src/layout-manager.js +++ /dev/null @@ -1,254 +0,0 @@ -const { warn, debug } = require('./debug'); -const Cell = require('./cell'); -const { ColSpanCell, RowSpanCell } = Cell; - -(function () { - function next(alloc, col) { - if (alloc[col] > 0) { - return next(alloc, col + 1); - } - return col; - } - - function layoutTable(table) { - let alloc = {}; - table.forEach(function (row, rowIndex) { - let col = 0; - row.forEach(function (cell) { - cell.y = rowIndex; - // Avoid erroneous call to next() on first row - cell.x = rowIndex ? next(alloc, col) : col; - const rowSpan = cell.rowSpan || 1; - const colSpan = cell.colSpan || 1; - if (rowSpan > 1) { - for (let cs = 0; cs < colSpan; cs++) { - alloc[cell.x + cs] = rowSpan; - } - } - col = cell.x + colSpan; - }); - Object.keys(alloc).forEach((idx) => { - alloc[idx]--; - if (alloc[idx] < 1) delete alloc[idx]; - }); - }); - } - - function maxWidth(table) { - let mw = 0; - table.forEach(function (row) { - row.forEach(function (cell) { - mw = Math.max(mw, cell.x + (cell.colSpan || 1)); - }); - }); - return mw; - } - - function maxHeight(table) { - return table.length; - } - - function cellsConflict(cell1, cell2) { - let yMin1 = cell1.y; - let yMax1 = cell1.y - 1 + (cell1.rowSpan || 1); - let yMin2 = cell2.y; - let yMax2 = cell2.y - 1 + (cell2.rowSpan || 1); - let yConflict = !(yMin1 > yMax2 || yMin2 > yMax1); - - let xMin1 = cell1.x; - let xMax1 = cell1.x - 1 + (cell1.colSpan || 1); - let xMin2 = cell2.x; - let xMax2 = cell2.x - 1 + (cell2.colSpan || 1); - let xConflict = !(xMin1 > xMax2 || xMin2 > xMax1); - - return yConflict && xConflict; - } - - function conflictExists(rows, x, y) { - let i_max = Math.min(rows.length - 1, y); - let cell = { x: x, y: y }; - for (let i = 0; i <= i_max; i++) { - let row = rows[i]; - for (let j = 0; j < row.length; j++) { - if (cellsConflict(cell, row[j])) { - return true; - } - } - } - return false; - } - - function allBlank(rows, y, xMin, xMax) { - for (let x = xMin; x < xMax; x++) { - if (conflictExists(rows, x, y)) { - return false; - } - } - return true; - } - - function addRowSpanCells(table) { - table.forEach(function (row, rowIndex) { - row.forEach(function (cell) { - for (let i = 1; i < cell.rowSpan; i++) { - let rowSpanCell = new RowSpanCell(cell); - rowSpanCell.x = cell.x; - rowSpanCell.y = cell.y + i; - rowSpanCell.colSpan = cell.colSpan; - insertCell(rowSpanCell, table[rowIndex + i]); - } - }); - }); - } - - function addColSpanCells(cellRows) { - for (let rowIndex = cellRows.length - 1; rowIndex >= 0; rowIndex--) { - let cellColumns = cellRows[rowIndex]; - for (let columnIndex = 0; columnIndex < cellColumns.length; columnIndex++) { - let cell = cellColumns[columnIndex]; - for (let k = 1; k < cell.colSpan; k++) { - let colSpanCell = new ColSpanCell(); - colSpanCell.x = cell.x + k; - colSpanCell.y = cell.y; - cellColumns.splice(columnIndex + 1, 0, colSpanCell); - } - } - } - } - - function insertCell(cell, row) { - let x = 0; - while (x < row.length && row[x].x < cell.x) { - x++; - } - row.splice(x, 0, cell); - } - - function fillInTable(table) { - let h_max = maxHeight(table); - let w_max = maxWidth(table); - debug(`Max rows: ${h_max}; Max cols: ${w_max}`); - for (let y = 0; y < h_max; y++) { - for (let x = 0; x < w_max; x++) { - if (!conflictExists(table, x, y)) { - let opts = { x: x, y: y, colSpan: 1, rowSpan: 1 }; - x++; - while (x < w_max && !conflictExists(table, x, y)) { - opts.colSpan++; - x++; - } - let y2 = y + 1; - while (y2 < h_max && allBlank(table, y2, opts.x, opts.x + opts.colSpan)) { - opts.rowSpan++; - y2++; - } - let cell = new Cell(opts); - cell.x = opts.x; - cell.y = opts.y; - warn(`Missing cell at ${cell.y}-${cell.x}.`); - insertCell(cell, table[y]); - } - } - } - } - - function generateCells(rows) { - return rows.map(function (row) { - if (!Array.isArray(row)) { - let key = Object.keys(row)[0]; - row = row[key]; - if (Array.isArray(row)) { - row = row.slice(); - row.unshift(key); - } else { - row = [key, row]; - } - } - return row.map(function (cell) { - return new Cell(cell); - }); - }); - } - - function makeTableLayout(rows) { - let cellRows = generateCells(rows); - layoutTable(cellRows); - fillInTable(cellRows); - addRowSpanCells(cellRows); - addColSpanCells(cellRows); - return cellRows; - } - - module.exports = { - makeTableLayout: makeTableLayout, - layoutTable: layoutTable, - addRowSpanCells: addRowSpanCells, - maxWidth: maxWidth, - fillInTable: fillInTable, - computeWidths: makeComputeWidths('colSpan', 'desiredWidth', 'x', 1), - computeHeights: makeComputeWidths('rowSpan', 'desiredHeight', 'y', 1), - }; -})(); - -function makeComputeWidths(colSpan, desiredWidth, x, forcedMin) { - return function (vals, table) { - let result = []; - let spanners = []; - let auto = {}; - table.forEach(function (row) { - row.forEach(function (cell) { - if ((cell[colSpan] || 1) > 1) { - spanners.push(cell); - } else { - result[cell[x]] = Math.max(result[cell[x]] || 0, cell[desiredWidth] || 0, forcedMin); - } - }); - }); - - vals.forEach(function (val, index) { - if (typeof val === 'number') { - result[index] = val; - } - }); - - //spanners.forEach(function(cell){ - for (let k = spanners.length - 1; k >= 0; k--) { - let cell = spanners[k]; - let span = cell[colSpan]; - let col = cell[x]; - let existingWidth = result[col]; - let editableCols = typeof vals[col] === 'number' ? 0 : 1; - if (typeof existingWidth === 'number') { - for (let i = 1; i < span; i++) { - existingWidth += 1 + result[col + i]; - if (typeof vals[col + i] !== 'number') { - editableCols++; - } - } - } else { - existingWidth = desiredWidth === 'desiredWidth' ? cell.desiredWidth - 1 : 1; - if (!auto[col] || auto[col] < existingWidth) { - auto[col] = existingWidth; - } - } - - if (cell[desiredWidth] > existingWidth) { - let i = 0; - while (editableCols > 0 && cell[desiredWidth] > existingWidth) { - if (typeof vals[col + i] !== 'number') { - let dif = Math.round((cell[desiredWidth] - existingWidth) / editableCols); - existingWidth += dif; - result[col + i] += dif; - editableCols--; - } - i++; - } - } - } - - Object.assign(vals, result, auto); - for (let j = 0; j < vals.length; j++) { - vals[j] = Math.max(forcedMin, vals[j] || 0); - } - }; -} diff --git a/deps/npm/node_modules/cli-table3/src/table.js b/deps/npm/node_modules/cli-table3/src/table.js deleted file mode 100644 index eb4a9bda9a3649..00000000000000 --- a/deps/npm/node_modules/cli-table3/src/table.js +++ /dev/null @@ -1,106 +0,0 @@ -const debug = require('./debug'); -const utils = require('./utils'); -const tableLayout = require('./layout-manager'); - -class Table extends Array { - constructor(opts) { - super(); - - const options = utils.mergeOptions(opts); - Object.defineProperty(this, 'options', { - value: options, - enumerable: options.debug, - }); - - if (options.debug) { - switch (typeof options.debug) { - case 'boolean': - debug.setDebugLevel(debug.WARN); - break; - case 'number': - debug.setDebugLevel(options.debug); - break; - case 'string': - debug.setDebugLevel(parseInt(options.debug, 10)); - break; - default: - debug.setDebugLevel(debug.WARN); - debug.warn(`Debug option is expected to be boolean, number, or string. Received a ${typeof options.debug}`); - } - Object.defineProperty(this, 'messages', { - get() { - return debug.debugMessages(); - }, - }); - } - } - - toString() { - let array = this; - let headersPresent = this.options.head && this.options.head.length; - if (headersPresent) { - array = [this.options.head]; - if (this.length) { - array.push.apply(array, this); - } - } else { - this.options.style.head = []; - } - - let cells = tableLayout.makeTableLayout(array); - - cells.forEach(function (row) { - row.forEach(function (cell) { - cell.mergeTableOptions(this.options, cells); - }, this); - }, this); - - tableLayout.computeWidths(this.options.colWidths, cells); - tableLayout.computeHeights(this.options.rowHeights, cells); - - cells.forEach(function (row) { - row.forEach(function (cell) { - cell.init(this.options); - }, this); - }, this); - - let result = []; - - for (let rowIndex = 0; rowIndex < cells.length; rowIndex++) { - let row = cells[rowIndex]; - let heightOfRow = this.options.rowHeights[rowIndex]; - - if (rowIndex === 0 || !this.options.style.compact || (rowIndex == 1 && headersPresent)) { - doDraw(row, 'top', result); - } - - for (let lineNum = 0; lineNum < heightOfRow; lineNum++) { - doDraw(row, lineNum, result); - } - - if (rowIndex + 1 == cells.length) { - doDraw(row, 'bottom', result); - } - } - - return result.join('\n'); - } - - get width() { - let str = this.toString().split('\n'); - return str[0].length; - } -} - -Table.reset = () => debug.reset(); - -function doDraw(row, lineNum, result) { - let line = []; - row.forEach(function (cell) { - line.push(cell.draw(lineNum)); - }); - let str = line.join(''); - if (str.length) result.push(str); -} - -module.exports = Table; diff --git a/deps/npm/node_modules/cli-table3/src/utils.js b/deps/npm/node_modules/cli-table3/src/utils.js deleted file mode 100644 index c922c5b9adb62c..00000000000000 --- a/deps/npm/node_modules/cli-table3/src/utils.js +++ /dev/null @@ -1,336 +0,0 @@ -const stringWidth = require('string-width'); - -function codeRegex(capture) { - return capture ? /\u001b\[((?:\d*;){0,5}\d*)m/g : /\u001b\[(?:\d*;){0,5}\d*m/g; -} - -function strlen(str) { - let code = codeRegex(); - let stripped = ('' + str).replace(code, ''); - let split = stripped.split('\n'); - return split.reduce(function (memo, s) { - return stringWidth(s) > memo ? stringWidth(s) : memo; - }, 0); -} - -function repeat(str, times) { - return Array(times + 1).join(str); -} - -function pad(str, len, pad, dir) { - let length = strlen(str); - if (len + 1 >= length) { - let padlen = len - length; - switch (dir) { - case 'right': { - str = repeat(pad, padlen) + str; - break; - } - case 'center': { - let right = Math.ceil(padlen / 2); - let left = padlen - right; - str = repeat(pad, left) + str + repeat(pad, right); - break; - } - default: { - str = str + repeat(pad, padlen); - break; - } - } - } - return str; -} - -let codeCache = {}; - -function addToCodeCache(name, on, off) { - on = '\u001b[' + on + 'm'; - off = '\u001b[' + off + 'm'; - codeCache[on] = { set: name, to: true }; - codeCache[off] = { set: name, to: false }; - codeCache[name] = { on: on, off: off }; -} - -//https://github.com/Marak/colors.js/blob/master/lib/styles.js -addToCodeCache('bold', 1, 22); -addToCodeCache('italics', 3, 23); -addToCodeCache('underline', 4, 24); -addToCodeCache('inverse', 7, 27); -addToCodeCache('strikethrough', 9, 29); - -function updateState(state, controlChars) { - let controlCode = controlChars[1] ? parseInt(controlChars[1].split(';')[0]) : 0; - if ((controlCode >= 30 && controlCode <= 39) || (controlCode >= 90 && controlCode <= 97)) { - state.lastForegroundAdded = controlChars[0]; - return; - } - if ((controlCode >= 40 && controlCode <= 49) || (controlCode >= 100 && controlCode <= 107)) { - state.lastBackgroundAdded = controlChars[0]; - return; - } - if (controlCode === 0) { - for (let i in state) { - /* istanbul ignore else */ - if (Object.prototype.hasOwnProperty.call(state, i)) { - delete state[i]; - } - } - return; - } - let info = codeCache[controlChars[0]]; - if (info) { - state[info.set] = info.to; - } -} - -function readState(line) { - let code = codeRegex(true); - let controlChars = code.exec(line); - let state = {}; - while (controlChars !== null) { - updateState(state, controlChars); - controlChars = code.exec(line); - } - return state; -} - -function unwindState(state, ret) { - let lastBackgroundAdded = state.lastBackgroundAdded; - let lastForegroundAdded = state.lastForegroundAdded; - - delete state.lastBackgroundAdded; - delete state.lastForegroundAdded; - - Object.keys(state).forEach(function (key) { - if (state[key]) { - ret += codeCache[key].off; - } - }); - - if (lastBackgroundAdded && lastBackgroundAdded != '\u001b[49m') { - ret += '\u001b[49m'; - } - if (lastForegroundAdded && lastForegroundAdded != '\u001b[39m') { - ret += '\u001b[39m'; - } - - return ret; -} - -function rewindState(state, ret) { - let lastBackgroundAdded = state.lastBackgroundAdded; - let lastForegroundAdded = state.lastForegroundAdded; - - delete state.lastBackgroundAdded; - delete state.lastForegroundAdded; - - Object.keys(state).forEach(function (key) { - if (state[key]) { - ret = codeCache[key].on + ret; - } - }); - - if (lastBackgroundAdded && lastBackgroundAdded != '\u001b[49m') { - ret = lastBackgroundAdded + ret; - } - if (lastForegroundAdded && lastForegroundAdded != '\u001b[39m') { - ret = lastForegroundAdded + ret; - } - - return ret; -} - -function truncateWidth(str, desiredLength) { - if (str.length === strlen(str)) { - return str.substr(0, desiredLength); - } - - while (strlen(str) > desiredLength) { - str = str.slice(0, -1); - } - - return str; -} - -function truncateWidthWithAnsi(str, desiredLength) { - let code = codeRegex(true); - let split = str.split(codeRegex()); - let splitIndex = 0; - let retLen = 0; - let ret = ''; - let myArray; - let state = {}; - - while (retLen < desiredLength) { - myArray = code.exec(str); - let toAdd = split[splitIndex]; - splitIndex++; - if (retLen + strlen(toAdd) > desiredLength) { - toAdd = truncateWidth(toAdd, desiredLength - retLen); - } - ret += toAdd; - retLen += strlen(toAdd); - - if (retLen < desiredLength) { - if (!myArray) { - break; - } // full-width chars may cause a whitespace which cannot be filled - ret += myArray[0]; - updateState(state, myArray); - } - } - - return unwindState(state, ret); -} - -function truncate(str, desiredLength, truncateChar) { - truncateChar = truncateChar || '…'; - let lengthOfStr = strlen(str); - if (lengthOfStr <= desiredLength) { - return str; - } - desiredLength -= strlen(truncateChar); - - let ret = truncateWidthWithAnsi(str, desiredLength); - - return ret + truncateChar; -} - -function defaultOptions() { - return { - chars: { - top: '─', - 'top-mid': '┬', - 'top-left': '┌', - 'top-right': '┐', - bottom: '─', - 'bottom-mid': '┴', - 'bottom-left': '└', - 'bottom-right': '┘', - left: '│', - 'left-mid': '├', - mid: '─', - 'mid-mid': '┼', - right: '│', - 'right-mid': '┤', - middle: '│', - }, - truncate: '…', - colWidths: [], - rowHeights: [], - colAligns: [], - rowAligns: [], - style: { - 'padding-left': 1, - 'padding-right': 1, - head: ['red'], - border: ['grey'], - compact: false, - }, - head: [], - }; -} - -function mergeOptions(options, defaults) { - options = options || {}; - defaults = defaults || defaultOptions(); - let ret = Object.assign({}, defaults, options); - ret.chars = Object.assign({}, defaults.chars, options.chars); - ret.style = Object.assign({}, defaults.style, options.style); - return ret; -} - -// Wrap on word boundary -function wordWrap(maxLength, input) { - let lines = []; - let split = input.split(/(\s+)/g); - let line = []; - let lineLength = 0; - let whitespace; - for (let i = 0; i < split.length; i += 2) { - let word = split[i]; - let newLength = lineLength + strlen(word); - if (lineLength > 0 && whitespace) { - newLength += whitespace.length; - } - if (newLength > maxLength) { - if (lineLength !== 0) { - lines.push(line.join('')); - } - line = [word]; - lineLength = strlen(word); - } else { - line.push(whitespace || '', word); - lineLength = newLength; - } - whitespace = split[i + 1]; - } - if (lineLength) { - lines.push(line.join('')); - } - return lines; -} - -// Wrap text (ignoring word boundaries) -function textWrap(maxLength, input) { - let lines = []; - let line = ''; - function pushLine(str, ws) { - if (line.length && ws) line += ws; - line += str; - while (line.length > maxLength) { - lines.push(line.slice(0, maxLength)); - line = line.slice(maxLength); - } - } - let split = input.split(/(\s+)/g); - for (let i = 0; i < split.length; i += 2) { - pushLine(split[i], i && split[i - 1]); - } - if (line.length) lines.push(line); - return lines; -} - -function multiLineWordWrap(maxLength, input, wrapOnWordBoundary = true) { - let output = []; - input = input.split('\n'); - const handler = wrapOnWordBoundary ? wordWrap : textWrap; - for (let i = 0; i < input.length; i++) { - output.push.apply(output, handler(maxLength, input[i])); - } - return output; -} - -function colorizeLines(input) { - let state = {}; - let output = []; - for (let i = 0; i < input.length; i++) { - let line = rewindState(state, input[i]); - state = readState(line); - let temp = Object.assign({}, state); - output.push(unwindState(temp, line)); - } - return output; -} - -/** - * Credit: Matheus Sampaio https://github.com/matheussampaio - */ -function hyperlink(url, text) { - const OSC = '\u001B]'; - const BEL = '\u0007'; - const SEP = ';'; - - return [OSC, '8', SEP, SEP, url || text, BEL, text, OSC, '8', SEP, SEP, BEL].join(''); -} - -module.exports = { - strlen: strlen, - repeat: repeat, - pad: pad, - truncate: truncate, - mergeOptions: mergeOptions, - wordWrap: multiLineWordWrap, - colorizeLines: colorizeLines, - hyperlink, -}; diff --git a/deps/npm/node_modules/clone/LICENSE b/deps/npm/node_modules/clone/LICENSE deleted file mode 100644 index cc3c87bc3bfd85..00000000000000 --- a/deps/npm/node_modules/clone/LICENSE +++ /dev/null @@ -1,18 +0,0 @@ -Copyright © 2011-2015 Paul Vorbach - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the “Software”), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, OUT OF OR IN CONNECTION WITH THE -SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/deps/npm/node_modules/clone/clone.iml b/deps/npm/node_modules/clone/clone.iml deleted file mode 100644 index 30de8aee9ba303..00000000000000 --- a/deps/npm/node_modules/clone/clone.iml +++ /dev/null @@ -1,10 +0,0 @@ - - - - - - - - - - \ No newline at end of file diff --git a/deps/npm/node_modules/clone/clone.js b/deps/npm/node_modules/clone/clone.js deleted file mode 100644 index ba200c2f99288d..00000000000000 --- a/deps/npm/node_modules/clone/clone.js +++ /dev/null @@ -1,166 +0,0 @@ -var clone = (function() { -'use strict'; - -/** - * Clones (copies) an Object using deep copying. - * - * This function supports circular references by default, but if you are certain - * there are no circular references in your object, you can save some CPU time - * by calling clone(obj, false). - * - * Caution: if `circular` is false and `parent` contains circular references, - * your program may enter an infinite loop and crash. - * - * @param `parent` - the object to be cloned - * @param `circular` - set to true if the object to be cloned may contain - * circular references. (optional - true by default) - * @param `depth` - set to a number if the object is only to be cloned to - * a particular depth. (optional - defaults to Infinity) - * @param `prototype` - sets the prototype to be used when cloning an object. - * (optional - defaults to parent prototype). -*/ -function clone(parent, circular, depth, prototype) { - var filter; - if (typeof circular === 'object') { - depth = circular.depth; - prototype = circular.prototype; - filter = circular.filter; - circular = circular.circular - } - // maintain two arrays for circular references, where corresponding parents - // and children have the same index - var allParents = []; - var allChildren = []; - - var useBuffer = typeof Buffer != 'undefined'; - - if (typeof circular == 'undefined') - circular = true; - - if (typeof depth == 'undefined') - depth = Infinity; - - // recurse this function so we don't reset allParents and allChildren - function _clone(parent, depth) { - // cloning null always returns null - if (parent === null) - return null; - - if (depth == 0) - return parent; - - var child; - var proto; - if (typeof parent != 'object') { - return parent; - } - - if (clone.__isArray(parent)) { - child = []; - } else if (clone.__isRegExp(parent)) { - child = new RegExp(parent.source, __getRegExpFlags(parent)); - if (parent.lastIndex) child.lastIndex = parent.lastIndex; - } else if (clone.__isDate(parent)) { - child = new Date(parent.getTime()); - } else if (useBuffer && Buffer.isBuffer(parent)) { - if (Buffer.allocUnsafe) { - // Node.js >= 4.5.0 - child = Buffer.allocUnsafe(parent.length); - } else { - // Older Node.js versions - child = new Buffer(parent.length); - } - parent.copy(child); - return child; - } else { - if (typeof prototype == 'undefined') { - proto = Object.getPrototypeOf(parent); - child = Object.create(proto); - } - else { - child = Object.create(prototype); - proto = prototype; - } - } - - if (circular) { - var index = allParents.indexOf(parent); - - if (index != -1) { - return allChildren[index]; - } - allParents.push(parent); - allChildren.push(child); - } - - for (var i in parent) { - var attrs; - if (proto) { - attrs = Object.getOwnPropertyDescriptor(proto, i); - } - - if (attrs && attrs.set == null) { - continue; - } - child[i] = _clone(parent[i], depth - 1); - } - - return child; - } - - return _clone(parent, depth); -} - -/** - * Simple flat clone using prototype, accepts only objects, usefull for property - * override on FLAT configuration object (no nested props). - * - * USE WITH CAUTION! This may not behave as you wish if you do not know how this - * works. - */ -clone.clonePrototype = function clonePrototype(parent) { - if (parent === null) - return null; - - var c = function () {}; - c.prototype = parent; - return new c(); -}; - -// private utility functions - -function __objToStr(o) { - return Object.prototype.toString.call(o); -}; -clone.__objToStr = __objToStr; - -function __isDate(o) { - return typeof o === 'object' && __objToStr(o) === '[object Date]'; -}; -clone.__isDate = __isDate; - -function __isArray(o) { - return typeof o === 'object' && __objToStr(o) === '[object Array]'; -}; -clone.__isArray = __isArray; - -function __isRegExp(o) { - return typeof o === 'object' && __objToStr(o) === '[object RegExp]'; -}; -clone.__isRegExp = __isRegExp; - -function __getRegExpFlags(re) { - var flags = ''; - if (re.global) flags += 'g'; - if (re.ignoreCase) flags += 'i'; - if (re.multiline) flags += 'm'; - return flags; -}; -clone.__getRegExpFlags = __getRegExpFlags; - -return clone; -})(); - -if (typeof module === 'object' && module.exports) { - module.exports = clone; -} diff --git a/deps/npm/node_modules/clone/package.json b/deps/npm/node_modules/clone/package.json deleted file mode 100644 index 3ddd242f4a5108..00000000000000 --- a/deps/npm/node_modules/clone/package.json +++ /dev/null @@ -1,51 +0,0 @@ -{ - "name": "clone", - "description": "deep cloning of objects and arrays", - "tags": [ - "clone", - "object", - "array", - "function", - "date" - ], - "version": "1.0.4", - "repository": { - "type": "git", - "url": "git://github.com/pvorb/node-clone.git" - }, - "bugs": { - "url": "https://github.com/pvorb/node-clone/issues" - }, - "main": "clone.js", - "author": "Paul Vorbach (http://paul.vorba.ch/)", - "contributors": [ - "Blake Miner (http://www.blakeminer.com/)", - "Tian You (http://blog.axqd.net/)", - "George Stagas (http://stagas.com/)", - "Tobiasz Cudnik (https://github.com/TobiaszCudnik)", - "Pavel Lang (https://github.com/langpavel)", - "Dan MacTough (http://yabfog.com/)", - "w1nk (https://github.com/w1nk)", - "Hugh Kennedy (http://twitter.com/hughskennedy)", - "Dustin Diaz (http://dustindiaz.com)", - "Ilya Shaisultanov (https://github.com/diversario)", - "Nathan MacInnes (http://macinn.es/)", - "Benjamin E. Coe (https://twitter.com/benjamincoe)", - "Nathan Zadoks (https://github.com/nathan7)", - "Róbert Oroszi (https://github.com/oroce)", - "Aurélio A. Heckert (http://softwarelivre.org/aurium)", - "Guy Ellis (http://www.guyellisrocks.com/)" - ], - "license": "MIT", - "engines": { - "node": ">=0.8" - }, - "dependencies": {}, - "devDependencies": { - "nodeunit": "~0.9.0" - }, - "optionalDependencies": {}, - "scripts": { - "test": "nodeunit test.js" - } -} diff --git a/deps/npm/node_modules/color-support/README.md b/deps/npm/node_modules/color-support/README.md deleted file mode 100644 index f89aa17d3526a3..00000000000000 --- a/deps/npm/node_modules/color-support/README.md +++ /dev/null @@ -1,129 +0,0 @@ -# color-support - -A module which will endeavor to guess your terminal's level of color -support. - -[![Build Status](https://travis-ci.org/isaacs/color-support.svg?branch=master)](https://travis-ci.org/isaacs/color-support) [![Coverage Status](https://coveralls.io/repos/github/isaacs/color-support/badge.svg?branch=master)](https://coveralls.io/github/isaacs/color-support?branch=master) - -This is similar to `supports-color`, but it does not read -`process.argv`. - -1. If not in a node environment, not supported. - -2. If stdout is not a TTY, not supported, unless the `ignoreTTY` - option is set. - -3. If the `TERM` environ is `dumb`, not supported, unless the - `ignoreDumb` option is set. - -4. If on Windows, then support 16 colors. - -5. If using Tmux, then support 256 colors. - -7. Handle continuous-integration servers. If `CI` or - `TEAMCITY_VERSION` are set in the environment, and `TRAVIS` is not - set, then color is not supported, unless `ignoreCI` option is set. - -6. Guess based on the `TERM_PROGRAM` environ. These terminals support - 16m colors: - - - `iTerm.app` version 3.x supports 16m colors, below support 256 - - `MacTerm` supports 16m colors - - `Apple_Terminal` supports 256 colors - - Have more things that belong on this list? Send a PR! - -8. Make a guess based on the `TERM` environment variable. Any - `xterm-256color` will get 256 colors. Any screen, xterm, vt100, - color, ansi, cygwin, or linux `TERM` will get 16 colors. - -9. If `COLORTERM` environment variable is set, then support 16 colors. - -10. At this point, we assume that color is not supported. - -## USAGE - -```javascript -var testColorSupport = require('color-support') -var colorSupport = testColorSupport(/* options object */) - -if (!colorSupport) { - console.log('color is not supported') -} else if (colorSupport.has16m) { - console.log('\x1b[38;2;102;194;255m16m colors\x1b[0m') -} else if (colorSupport.has256) { - console.log('\x1b[38;5;119m256 colors\x1b[0m') -} else if (colorSupport.hasBasic) { - console.log('\x1b[31mbasic colors\x1b[0m') -} else { - console.log('this is impossible, but colors are not supported') -} -``` - -If you don't have any options to set, you can also just look at the -flags which will all be set on the test function itself. (Of course, -this doesn't return a falsey value when colors aren't supported, and -doesn't allow you to set options.) - -```javascript -var colorSupport = require('color-support') - -if (colorSupport.has16m) { - console.log('\x1b[38;2;102;194;255m16m colors\x1b[0m') -} else if (colorSupport.has256) { - console.log('\x1b[38;5;119m256 colors\x1b[0m') -} else if (colorSupport.hasBasic) { - console.log('\x1b[31mbasic colors\x1b[0m') -} else { - console.log('colors are not supported') -} -``` - -## Options - -You can pass in the following options. - -* ignoreTTY - default false. Ignore the `isTTY` check. -* ignoreDumb - default false. Ignore `TERM=dumb` environ check. -* ignoreCI - default false. Ignore `CI` environ check. -* env - Object for environment vars. Defaults to `process.env`. -* stream - Stream for `isTTY` check. Defaults to `process.stdout`. -* term - String for `TERM` checking. Defaults to `env.TERM`. -* alwaysReturn - default false. Return an object when colors aren't - supported (instead of returning `false`). -* level - A number from 0 to 3. This will return a result for the - specified level. This is useful if you want to be able to set the - color support level explicitly as a number in an environment - variable or config, but then use the object flags in your program. - Except for `alwaysReturn` to return an object for level 0, all other - options are ignored, since no checking is done if a level is - explicitly set. - -## Return Value - -If no color support is available, then `false` is returned by default, -unless the `alwaysReturn` flag is set to `true`. This is so that the -simple question of "can I use colors or not" can treat any truthy -return as "yes". - -Otherwise, the return object has the following fields: - -* `level` - A number from 0 to 3 - * `0` - No color support - * `1` - Basic (16) color support - * `2` - 256 color support - * `3` - 16 million (true) color support -* `hasBasic` - Boolean -* `has256` - Boolean -* `has16m` - Boolean - -## CLI - -You can run the `color-support` bin from the command line which will -just dump the values as this module calculates them in whatever env -it's run. It takes no command line arguments. - -## Credits - -This is a spiritual, if not actual, fork of -[supports-color](http://npm.im/supports-color) by the ever prolific -[Sindre Sorhus](http://npm.im/~sindresorhus). diff --git a/deps/npm/node_modules/color-support/bin.js b/deps/npm/node_modules/color-support/bin.js deleted file mode 100755 index 3c0a9672180835..00000000000000 --- a/deps/npm/node_modules/color-support/bin.js +++ /dev/null @@ -1,3 +0,0 @@ -#!/usr/bin/env node -var colorSupport = require('./')({alwaysReturn: true }) -console.log(JSON.stringify(colorSupport, null, 2)) diff --git a/deps/npm/node_modules/color-support/browser.js b/deps/npm/node_modules/color-support/browser.js deleted file mode 100644 index ab5c6631a35b80..00000000000000 --- a/deps/npm/node_modules/color-support/browser.js +++ /dev/null @@ -1,14 +0,0 @@ -module.exports = colorSupport({ alwaysReturn: true }, colorSupport) - -function colorSupport(options, obj) { - obj = obj || {} - options = options || {} - obj.level = 0 - obj.hasBasic = false - obj.has256 = false - obj.has16m = false - if (!options.alwaysReturn) { - return false - } - return obj -} diff --git a/deps/npm/node_modules/color-support/index.js b/deps/npm/node_modules/color-support/index.js deleted file mode 100644 index 6b6f3b28194243..00000000000000 --- a/deps/npm/node_modules/color-support/index.js +++ /dev/null @@ -1,134 +0,0 @@ -// call it on itself so we can test the export val for basic stuff -module.exports = colorSupport({ alwaysReturn: true }, colorSupport) - -function hasNone (obj, options) { - obj.level = 0 - obj.hasBasic = false - obj.has256 = false - obj.has16m = false - if (!options.alwaysReturn) { - return false - } - return obj -} - -function hasBasic (obj) { - obj.hasBasic = true - obj.has256 = false - obj.has16m = false - obj.level = 1 - return obj -} - -function has256 (obj) { - obj.hasBasic = true - obj.has256 = true - obj.has16m = false - obj.level = 2 - return obj -} - -function has16m (obj) { - obj.hasBasic = true - obj.has256 = true - obj.has16m = true - obj.level = 3 - return obj -} - -function colorSupport (options, obj) { - options = options || {} - - obj = obj || {} - - // if just requesting a specific level, then return that. - if (typeof options.level === 'number') { - switch (options.level) { - case 0: - return hasNone(obj, options) - case 1: - return hasBasic(obj) - case 2: - return has256(obj) - case 3: - return has16m(obj) - } - } - - obj.level = 0 - obj.hasBasic = false - obj.has256 = false - obj.has16m = false - - if (typeof process === 'undefined' || - !process || - !process.stdout || - !process.env || - !process.platform) { - return hasNone(obj, options) - } - - var env = options.env || process.env - var stream = options.stream || process.stdout - var term = options.term || env.TERM || '' - var platform = options.platform || process.platform - - if (!options.ignoreTTY && !stream.isTTY) { - return hasNone(obj, options) - } - - if (!options.ignoreDumb && term === 'dumb' && !env.COLORTERM) { - return hasNone(obj, options) - } - - if (platform === 'win32') { - return hasBasic(obj) - } - - if (env.TMUX) { - return has256(obj) - } - - if (!options.ignoreCI && (env.CI || env.TEAMCITY_VERSION)) { - if (env.TRAVIS) { - return has256(obj) - } else { - return hasNone(obj, options) - } - } - - // TODO: add more term programs - switch (env.TERM_PROGRAM) { - case 'iTerm.app': - var ver = env.TERM_PROGRAM_VERSION || '0.' - if (/^[0-2]\./.test(ver)) { - return has256(obj) - } else { - return has16m(obj) - } - - case 'HyperTerm': - case 'Hyper': - return has16m(obj) - - case 'MacTerm': - return has16m(obj) - - case 'Apple_Terminal': - return has256(obj) - } - - if (/^xterm-256/.test(term)) { - return has256(obj) - } - - if (/^screen|^xterm|^vt100|color|ansi|cygwin|linux/i.test(term)) { - return hasBasic(obj) - } - - if (env.COLORTERM) { - return hasBasic(obj) - } - - return hasNone(obj, options) -} diff --git a/deps/npm/node_modules/color-support/package.json b/deps/npm/node_modules/color-support/package.json deleted file mode 100644 index f3e3b77145d6ba..00000000000000 --- a/deps/npm/node_modules/color-support/package.json +++ /dev/null @@ -1,36 +0,0 @@ -{ - "name": "color-support", - "version": "1.1.3", - "description": "A module which will endeavor to guess your terminal's level of color support.", - "main": "index.js", - "browser": "browser.js", - "bin": "bin.js", - "devDependencies": { - "tap": "^10.3.3" - }, - "scripts": { - "test": "tap test/*.js --100 -J", - "preversion": "npm test", - "postversion": "npm publish", - "postpublish": "git push origin --all; git push origin --tags" - }, - "repository": { - "type": "git", - "url": "git+https://github.com/isaacs/color-support.git" - }, - "keywords": [ - "terminal", - "color", - "support", - "xterm", - "truecolor", - "256" - ], - "author": "Isaac Z. Schlueter (http://blog.izs.me/)", - "license": "ISC", - "files": [ - "browser.js", - "index.js", - "bin.js" - ] -} diff --git a/deps/npm/node_modules/columnify/LICENSE b/deps/npm/node_modules/columnify/LICENSE deleted file mode 100644 index ed47678e61c408..00000000000000 --- a/deps/npm/node_modules/columnify/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2013 Tim Oxley - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/deps/npm/node_modules/columnify/Makefile b/deps/npm/node_modules/columnify/Makefile deleted file mode 100644 index 3a67c57a3b1e08..00000000000000 --- a/deps/npm/node_modules/columnify/Makefile +++ /dev/null @@ -1,9 +0,0 @@ - -all: columnify.js - -prepublish: all - -columnify.js: index.js package.json - babel index.js > columnify.js - -.PHONY: all prepublish diff --git a/deps/npm/node_modules/columnify/columnify.js b/deps/npm/node_modules/columnify/columnify.js deleted file mode 100644 index dcef9236e18436..00000000000000 --- a/deps/npm/node_modules/columnify/columnify.js +++ /dev/null @@ -1,306 +0,0 @@ -"use strict"; - -var wcwidth = require('./width'); - -var _require = require('./utils'), - padRight = _require.padRight, - padCenter = _require.padCenter, - padLeft = _require.padLeft, - splitIntoLines = _require.splitIntoLines, - splitLongWords = _require.splitLongWords, - truncateString = _require.truncateString; - -var DEFAULT_HEADING_TRANSFORM = function DEFAULT_HEADING_TRANSFORM(key) { - return key.toUpperCase(); -}; - -var DEFAULT_DATA_TRANSFORM = function DEFAULT_DATA_TRANSFORM(cell, column, index) { - return cell; -}; - -var DEFAULTS = Object.freeze({ - maxWidth: Infinity, - minWidth: 0, - columnSplitter: ' ', - truncate: false, - truncateMarker: '…', - preserveNewLines: false, - paddingChr: ' ', - showHeaders: true, - headingTransform: DEFAULT_HEADING_TRANSFORM, - dataTransform: DEFAULT_DATA_TRANSFORM -}); - -module.exports = function (items) { - var options = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {}; - - - var columnConfigs = options.config || {}; - delete options.config; // remove config so doesn't appear on every column. - - var maxLineWidth = options.maxLineWidth || Infinity; - if (maxLineWidth === 'auto') maxLineWidth = process.stdout.columns || Infinity; - delete options.maxLineWidth; // this is a line control option, don't pass it to column - - // Option defaults inheritance: - // options.config[columnName] => options => DEFAULTS - options = mixin({}, DEFAULTS, options); - - options.config = options.config || Object.create(null); - - options.spacing = options.spacing || '\n'; // probably useless - options.preserveNewLines = !!options.preserveNewLines; - options.showHeaders = !!options.showHeaders; - options.columns = options.columns || options.include; // alias include/columns, prefer columns if supplied - var columnNames = options.columns || []; // optional user-supplied columns to include - - items = toArray(items, columnNames); - - // if not suppled column names, automatically determine columns from data keys - if (!columnNames.length) { - items.forEach(function (item) { - for (var columnName in item) { - if (columnNames.indexOf(columnName) === -1) columnNames.push(columnName); - } - }); - } - - // initialize column defaults (each column inherits from options.config) - var columns = columnNames.reduce(function (columns, columnName) { - var column = Object.create(options); - columns[columnName] = mixin(column, columnConfigs[columnName]); - return columns; - }, Object.create(null)); - - // sanitize column settings - columnNames.forEach(function (columnName) { - var column = columns[columnName]; - column.name = columnName; - column.maxWidth = Math.ceil(column.maxWidth); - column.minWidth = Math.ceil(column.minWidth); - column.truncate = !!column.truncate; - column.align = column.align || 'left'; - }); - - // sanitize data - items = items.map(function (item) { - var result = Object.create(null); - columnNames.forEach(function (columnName) { - // null/undefined -> '' - result[columnName] = item[columnName] != null ? item[columnName] : ''; - // toString everything - result[columnName] = '' + result[columnName]; - if (columns[columnName].preserveNewLines) { - // merge non-newline whitespace chars - result[columnName] = result[columnName].replace(/[^\S\n]/gmi, ' '); - } else { - // merge all whitespace chars - result[columnName] = result[columnName].replace(/\s/gmi, ' '); - } - }); - return result; - }); - - // transform data cells - columnNames.forEach(function (columnName) { - var column = columns[columnName]; - items = items.map(function (item, index) { - var col = Object.create(column); - item[columnName] = column.dataTransform(item[columnName], col, index); - - var changedKeys = Object.keys(col); - // disable default heading transform if we wrote to column.name - if (changedKeys.indexOf('name') !== -1) { - if (column.headingTransform !== DEFAULT_HEADING_TRANSFORM) return; - column.headingTransform = function (heading) { - return heading; - }; - } - changedKeys.forEach(function (key) { - return column[key] = col[key]; - }); - return item; - }); - }); - - // add headers - var headers = {}; - if (options.showHeaders) { - columnNames.forEach(function (columnName) { - var column = columns[columnName]; - - if (!column.showHeaders) { - headers[columnName] = ''; - return; - } - - headers[columnName] = column.headingTransform(column.name); - }); - items.unshift(headers); - } - // get actual max-width between min & max - // based on length of data in columns - columnNames.forEach(function (columnName) { - var column = columns[columnName]; - column.width = items.map(function (item) { - return item[columnName]; - }).reduce(function (min, cur) { - // if already at maxWidth don't bother testing - if (min >= column.maxWidth) return min; - return Math.max(min, Math.min(column.maxWidth, Math.max(column.minWidth, wcwidth(cur)))); - }, 0); - }); - - // split long words so they can break onto multiple lines - columnNames.forEach(function (columnName) { - var column = columns[columnName]; - items = items.map(function (item) { - item[columnName] = splitLongWords(item[columnName], column.width, column.truncateMarker); - return item; - }); - }); - - // wrap long lines. each item is now an array of lines. - columnNames.forEach(function (columnName) { - var column = columns[columnName]; - items = items.map(function (item, index) { - var cell = item[columnName]; - item[columnName] = splitIntoLines(cell, column.width); - - // if truncating required, only include first line + add truncation char - if (column.truncate && item[columnName].length > 1) { - item[columnName] = splitIntoLines(cell, column.width - wcwidth(column.truncateMarker)); - var firstLine = item[columnName][0]; - if (!endsWith(firstLine, column.truncateMarker)) item[columnName][0] += column.truncateMarker; - item[columnName] = item[columnName].slice(0, 1); - } - return item; - }); - }); - - // recalculate column widths from truncated output/lines - columnNames.forEach(function (columnName) { - var column = columns[columnName]; - column.width = items.map(function (item) { - return item[columnName].reduce(function (min, cur) { - if (min >= column.maxWidth) return min; - return Math.max(min, Math.min(column.maxWidth, Math.max(column.minWidth, wcwidth(cur)))); - }, 0); - }).reduce(function (min, cur) { - if (min >= column.maxWidth) return min; - return Math.max(min, Math.min(column.maxWidth, Math.max(column.minWidth, cur))); - }, 0); - }); - - var rows = createRows(items, columns, columnNames, options.paddingChr); // merge lines into rows - // conceive output - return rows.reduce(function (output, row) { - return output.concat(row.reduce(function (rowOut, line) { - return rowOut.concat(line.join(options.columnSplitter)); - }, [])); - }, []).map(function (line) { - return truncateString(line, maxLineWidth); - }).join(options.spacing); -}; - -/** - * Convert wrapped lines into rows with padded values. - * - * @param Array items data to process - * @param Array columns column width settings for wrapping - * @param Array columnNames column ordering - * @return Array items wrapped in arrays, corresponding to lines - */ - -function createRows(items, columns, columnNames, paddingChr) { - return items.map(function (item) { - var row = []; - var numLines = 0; - columnNames.forEach(function (columnName) { - numLines = Math.max(numLines, item[columnName].length); - }); - // combine matching lines of each rows - - var _loop = function _loop(i) { - row[i] = row[i] || []; - columnNames.forEach(function (columnName) { - var column = columns[columnName]; - var val = item[columnName][i] || ''; // || '' ensures empty columns get padded - if (column.align === 'right') row[i].push(padLeft(val, column.width, paddingChr));else if (column.align === 'center' || column.align === 'centre') row[i].push(padCenter(val, column.width, paddingChr));else row[i].push(padRight(val, column.width, paddingChr)); - }); - }; - - for (var i = 0; i < numLines; i++) { - _loop(i); - } - return row; - }); -} - -/** - * Object.assign - * - * @return Object Object with properties mixed in. - */ - -function mixin() { - if (Object.assign) return Object.assign.apply(Object, arguments); - return ObjectAssign.apply(undefined, arguments); -} - -function ObjectAssign(target, firstSource) { - "use strict"; - - if (target === undefined || target === null) throw new TypeError("Cannot convert first argument to object"); - - var to = Object(target); - - var hasPendingException = false; - var pendingException; - - for (var i = 1; i < arguments.length; i++) { - var nextSource = arguments[i]; - if (nextSource === undefined || nextSource === null) continue; - - var keysArray = Object.keys(Object(nextSource)); - for (var nextIndex = 0, len = keysArray.length; nextIndex < len; nextIndex++) { - var nextKey = keysArray[nextIndex]; - try { - var desc = Object.getOwnPropertyDescriptor(nextSource, nextKey); - if (desc !== undefined && desc.enumerable) to[nextKey] = nextSource[nextKey]; - } catch (e) { - if (!hasPendingException) { - hasPendingException = true; - pendingException = e; - } - } - } - - if (hasPendingException) throw pendingException; - } - return to; -} - -/** - * Adapted from String.prototype.endsWith polyfill. - */ - -function endsWith(target, searchString, position) { - position = position || target.length; - position = position - searchString.length; - var lastIndex = target.lastIndexOf(searchString); - return lastIndex !== -1 && lastIndex === position; -} - -function toArray(items, columnNames) { - if (Array.isArray(items)) return items; - var rows = []; - for (var key in items) { - var item = {}; - item[columnNames[0] || 'key'] = key; - item[columnNames[1] || 'value'] = items[key]; - rows.push(item); - } - return rows; -} - diff --git a/deps/npm/node_modules/columnify/index.js b/deps/npm/node_modules/columnify/index.js deleted file mode 100644 index 221269b3e76b72..00000000000000 --- a/deps/npm/node_modules/columnify/index.js +++ /dev/null @@ -1,297 +0,0 @@ -"use strict" - -const wcwidth = require('./width') -const { - padRight, - padCenter, - padLeft, - splitIntoLines, - splitLongWords, - truncateString -} = require('./utils') - -const DEFAULT_HEADING_TRANSFORM = key => key.toUpperCase() - -const DEFAULT_DATA_TRANSFORM = (cell, column, index) => cell - -const DEFAULTS = Object.freeze({ - maxWidth: Infinity, - minWidth: 0, - columnSplitter: ' ', - truncate: false, - truncateMarker: '…', - preserveNewLines: false, - paddingChr: ' ', - showHeaders: true, - headingTransform: DEFAULT_HEADING_TRANSFORM, - dataTransform: DEFAULT_DATA_TRANSFORM -}) - -module.exports = function(items, options = {}) { - - let columnConfigs = options.config || {} - delete options.config // remove config so doesn't appear on every column. - - let maxLineWidth = options.maxLineWidth || Infinity - if (maxLineWidth === 'auto') maxLineWidth = process.stdout.columns || Infinity - delete options.maxLineWidth // this is a line control option, don't pass it to column - - // Option defaults inheritance: - // options.config[columnName] => options => DEFAULTS - options = mixin({}, DEFAULTS, options) - - options.config = options.config || Object.create(null) - - options.spacing = options.spacing || '\n' // probably useless - options.preserveNewLines = !!options.preserveNewLines - options.showHeaders = !!options.showHeaders; - options.columns = options.columns || options.include // alias include/columns, prefer columns if supplied - let columnNames = options.columns || [] // optional user-supplied columns to include - - items = toArray(items, columnNames) - - // if not suppled column names, automatically determine columns from data keys - if (!columnNames.length) { - items.forEach(function(item) { - for (let columnName in item) { - if (columnNames.indexOf(columnName) === -1) columnNames.push(columnName) - } - }) - } - - // initialize column defaults (each column inherits from options.config) - let columns = columnNames.reduce((columns, columnName) => { - let column = Object.create(options) - columns[columnName] = mixin(column, columnConfigs[columnName]) - return columns - }, Object.create(null)) - - // sanitize column settings - columnNames.forEach(columnName => { - let column = columns[columnName] - column.name = columnName - column.maxWidth = Math.ceil(column.maxWidth) - column.minWidth = Math.ceil(column.minWidth) - column.truncate = !!column.truncate - column.align = column.align || 'left' - }) - - // sanitize data - items = items.map(item => { - let result = Object.create(null) - columnNames.forEach(columnName => { - // null/undefined -> '' - result[columnName] = item[columnName] != null ? item[columnName] : '' - // toString everything - result[columnName] = '' + result[columnName] - if (columns[columnName].preserveNewLines) { - // merge non-newline whitespace chars - result[columnName] = result[columnName].replace(/[^\S\n]/gmi, ' ') - } else { - // merge all whitespace chars - result[columnName] = result[columnName].replace(/\s/gmi, ' ') - } - }) - return result - }) - - // transform data cells - columnNames.forEach(columnName => { - let column = columns[columnName] - items = items.map((item, index) => { - let col = Object.create(column) - item[columnName] = column.dataTransform(item[columnName], col, index) - - let changedKeys = Object.keys(col) - // disable default heading transform if we wrote to column.name - if (changedKeys.indexOf('name') !== -1) { - if (column.headingTransform !== DEFAULT_HEADING_TRANSFORM) return - column.headingTransform = heading => heading - } - changedKeys.forEach(key => column[key] = col[key]) - return item - }) - }) - - // add headers - let headers = {} - if(options.showHeaders) { - columnNames.forEach(columnName => { - let column = columns[columnName] - - if(!column.showHeaders){ - headers[columnName] = ''; - return; - } - - headers[columnName] = column.headingTransform(column.name) - }) - items.unshift(headers) - } - // get actual max-width between min & max - // based on length of data in columns - columnNames.forEach(columnName => { - let column = columns[columnName] - column.width = items - .map(item => item[columnName]) - .reduce((min, cur) => { - // if already at maxWidth don't bother testing - if (min >= column.maxWidth) return min - return Math.max(min, Math.min(column.maxWidth, Math.max(column.minWidth, wcwidth(cur)))) - }, 0) - }) - - // split long words so they can break onto multiple lines - columnNames.forEach(columnName => { - let column = columns[columnName] - items = items.map(item => { - item[columnName] = splitLongWords(item[columnName], column.width, column.truncateMarker) - return item - }) - }) - - // wrap long lines. each item is now an array of lines. - columnNames.forEach(columnName => { - let column = columns[columnName] - items = items.map((item, index) => { - let cell = item[columnName] - item[columnName] = splitIntoLines(cell, column.width) - - // if truncating required, only include first line + add truncation char - if (column.truncate && item[columnName].length > 1) { - item[columnName] = splitIntoLines(cell, column.width - wcwidth(column.truncateMarker)) - let firstLine = item[columnName][0] - if (!endsWith(firstLine, column.truncateMarker)) item[columnName][0] += column.truncateMarker - item[columnName] = item[columnName].slice(0, 1) - } - return item - }) - }) - - // recalculate column widths from truncated output/lines - columnNames.forEach(columnName => { - let column = columns[columnName] - column.width = items.map(item => { - return item[columnName].reduce((min, cur) => { - if (min >= column.maxWidth) return min - return Math.max(min, Math.min(column.maxWidth, Math.max(column.minWidth, wcwidth(cur)))) - }, 0) - }).reduce((min, cur) => { - if (min >= column.maxWidth) return min - return Math.max(min, Math.min(column.maxWidth, Math.max(column.minWidth, cur))) - }, 0) - }) - - - let rows = createRows(items, columns, columnNames, options.paddingChr) // merge lines into rows - // conceive output - return rows.reduce((output, row) => { - return output.concat(row.reduce((rowOut, line) => { - return rowOut.concat(line.join(options.columnSplitter)) - }, [])) - }, []) - .map(line => truncateString(line, maxLineWidth)) - .join(options.spacing) -} - -/** - * Convert wrapped lines into rows with padded values. - * - * @param Array items data to process - * @param Array columns column width settings for wrapping - * @param Array columnNames column ordering - * @return Array items wrapped in arrays, corresponding to lines - */ - -function createRows(items, columns, columnNames, paddingChr) { - return items.map(item => { - let row = [] - let numLines = 0 - columnNames.forEach(columnName => { - numLines = Math.max(numLines, item[columnName].length) - }) - // combine matching lines of each rows - for (let i = 0; i < numLines; i++) { - row[i] = row[i] || [] - columnNames.forEach(columnName => { - let column = columns[columnName] - let val = item[columnName][i] || '' // || '' ensures empty columns get padded - if (column.align === 'right') row[i].push(padLeft(val, column.width, paddingChr)) - else if (column.align === 'center' || column.align === 'centre') row[i].push(padCenter(val, column.width, paddingChr)) - else row[i].push(padRight(val, column.width, paddingChr)) - }) - } - return row - }) -} - -/** - * Object.assign - * - * @return Object Object with properties mixed in. - */ - -function mixin(...args) { - if (Object.assign) return Object.assign(...args) - return ObjectAssign(...args) -} - -function ObjectAssign(target, firstSource) { - "use strict"; - if (target === undefined || target === null) - throw new TypeError("Cannot convert first argument to object"); - - var to = Object(target); - - var hasPendingException = false; - var pendingException; - - for (var i = 1; i < arguments.length; i++) { - var nextSource = arguments[i]; - if (nextSource === undefined || nextSource === null) - continue; - - var keysArray = Object.keys(Object(nextSource)); - for (var nextIndex = 0, len = keysArray.length; nextIndex < len; nextIndex++) { - var nextKey = keysArray[nextIndex]; - try { - var desc = Object.getOwnPropertyDescriptor(nextSource, nextKey); - if (desc !== undefined && desc.enumerable) - to[nextKey] = nextSource[nextKey]; - } catch (e) { - if (!hasPendingException) { - hasPendingException = true; - pendingException = e; - } - } - } - - if (hasPendingException) - throw pendingException; - } - return to; -} - -/** - * Adapted from String.prototype.endsWith polyfill. - */ - -function endsWith(target, searchString, position) { - position = position || target.length; - position = position - searchString.length; - let lastIndex = target.lastIndexOf(searchString); - return lastIndex !== -1 && lastIndex === position; -} - - -function toArray(items, columnNames) { - if (Array.isArray(items)) return items - let rows = [] - for (let key in items) { - let item = {} - item[columnNames[0] || 'key'] = key - item[columnNames[1] || 'value'] = items[key] - rows.push(item) - } - return rows -} diff --git a/deps/npm/node_modules/columnify/package.json b/deps/npm/node_modules/columnify/package.json deleted file mode 100644 index 29565407a8cd7b..00000000000000 --- a/deps/npm/node_modules/columnify/package.json +++ /dev/null @@ -1,53 +0,0 @@ -{ - "name": "columnify", - "version": "1.6.0", - "description": "Render data in text columns. Supports in-column text-wrap.", - "main": "columnify.js", - "scripts": { - "pretest": "npm prune", - "test": "make prepublish && tape test/*.js | tap-spec", - "bench": "npm test && node bench", - "prepublish": "make prepublish" - }, - "babel": { - "presets": [ - "es2015" - ] - }, - "author": "Tim Oxley", - "license": "MIT", - "devDependencies": { - "babel-cli": "^6.26.0", - "babel-preset-es2015": "^6.3.13", - "chalk": "^1.1.1", - "tap-spec": "^5.0.0", - "tape": "^4.4.0" - }, - "repository": { - "type": "git", - "url": "git://github.com/timoxley/columnify.git" - }, - "keywords": [ - "column", - "text", - "ansi", - "console", - "terminal", - "wrap", - "table" - ], - "bugs": { - "url": "https://github.com/timoxley/columnify/issues" - }, - "homepage": "https://github.com/timoxley/columnify", - "engines": { - "node": ">=8.0.0" - }, - "dependencies": { - "strip-ansi": "^6.0.1", - "wcwidth": "^1.0.0" - }, - "directories": { - "test": "test" - } -} diff --git a/deps/npm/node_modules/columnify/utils.js b/deps/npm/node_modules/columnify/utils.js deleted file mode 100644 index df3e6cc44e8561..00000000000000 --- a/deps/npm/node_modules/columnify/utils.js +++ /dev/null @@ -1,193 +0,0 @@ -"use strict" - -var wcwidth = require('./width') - -/** - * repeat string `str` up to total length of `len` - * - * @param String str string to repeat - * @param Number len total length of output string - */ - -function repeatString(str, len) { - return Array.apply(null, {length: len + 1}).join(str).slice(0, len) -} - -/** - * Pad `str` up to total length `max` with `chr`. - * If `str` is longer than `max`, padRight will return `str` unaltered. - * - * @param String str string to pad - * @param Number max total length of output string - * @param String chr optional. Character to pad with. default: ' ' - * @return String padded str - */ - -function padRight(str, max, chr) { - str = str != null ? str : '' - str = String(str) - var length = max - wcwidth(str) - if (length <= 0) return str - return str + repeatString(chr || ' ', length) -} - -/** - * Pad `str` up to total length `max` with `chr`. - * If `str` is longer than `max`, padCenter will return `str` unaltered. - * - * @param String str string to pad - * @param Number max total length of output string - * @param String chr optional. Character to pad with. default: ' ' - * @return String padded str - */ - -function padCenter(str, max, chr) { - str = str != null ? str : '' - str = String(str) - var length = max - wcwidth(str) - if (length <= 0) return str - var lengthLeft = Math.floor(length/2) - var lengthRight = length - lengthLeft - return repeatString(chr || ' ', lengthLeft) + str + repeatString(chr || ' ', lengthRight) -} - -/** - * Pad `str` up to total length `max` with `chr`, on the left. - * If `str` is longer than `max`, padRight will return `str` unaltered. - * - * @param String str string to pad - * @param Number max total length of output string - * @param String chr optional. Character to pad with. default: ' ' - * @return String padded str - */ - -function padLeft(str, max, chr) { - str = str != null ? str : '' - str = String(str) - var length = max - wcwidth(str) - if (length <= 0) return str - return repeatString(chr || ' ', length) + str -} - -/** - * Split a String `str` into lines of maxiumum length `max`. - * Splits on word boundaries. Preserves existing new lines. - * - * @param String str string to split - * @param Number max length of each line - * @return Array Array containing lines. - */ - -function splitIntoLines(str, max) { - function _splitIntoLines(str, max) { - return str.trim().split(' ').reduce(function(lines, word) { - var line = lines[lines.length - 1] - if (line && wcwidth(line.join(' ')) + wcwidth(word) < max) { - lines[lines.length - 1].push(word) // add to line - } - else lines.push([word]) // new line - return lines - }, []).map(function(l) { - return l.join(' ') - }) - } - return str.split('\n').map(function(str) { - return _splitIntoLines(str, max) - }).reduce(function(lines, line) { - return lines.concat(line) - }, []) -} - -/** - * Add spaces and `truncationChar` between words of - * `str` which are longer than `max`. - * - * @param String str string to split - * @param Number max length of each line - * @param Number truncationChar character to append to split words - * @return String - */ - -function splitLongWords(str, max, truncationChar) { - str = str.trim() - var result = [] - var words = str.split(' ') - var remainder = '' - - var truncationWidth = wcwidth(truncationChar) - - while (remainder || words.length) { - if (remainder) { - var word = remainder - remainder = '' - } else { - var word = words.shift() - } - - if (wcwidth(word) > max) { - // slice is based on length no wcwidth - var i = 0 - var wwidth = 0 - var limit = max - truncationWidth - while (i < word.length) { - var w = wcwidth(word.charAt(i)) - if (w + wwidth > limit) { - break - } - wwidth += w - ++i - } - - remainder = word.slice(i) // get remainder - // save remainder for next loop - - word = word.slice(0, i) // grab truncated word - word += truncationChar // add trailing … or whatever - } - result.push(word) - } - - return result.join(' ') -} - - -/** - * Truncate `str` into total width `max` - * If `str` is shorter than `max`, will return `str` unaltered. - * - * @param String str string to truncated - * @param Number max total wcwidth of output string - * @return String truncated str - */ - -function truncateString(str, max) { - - str = str != null ? str : '' - str = String(str) - - if(max == Infinity) return str - - var i = 0 - var wwidth = 0 - while (i < str.length) { - var w = wcwidth(str.charAt(i)) - if(w + wwidth > max) - break - wwidth += w - ++i - } - return str.slice(0, i) -} - - - -/** - * Exports - */ - -module.exports.padRight = padRight -module.exports.padCenter = padCenter -module.exports.padLeft = padLeft -module.exports.splitIntoLines = splitIntoLines -module.exports.splitLongWords = splitLongWords -module.exports.truncateString = truncateString diff --git a/deps/npm/node_modules/columnify/width.js b/deps/npm/node_modules/columnify/width.js deleted file mode 100644 index a9f5333b40b2fa..00000000000000 --- a/deps/npm/node_modules/columnify/width.js +++ /dev/null @@ -1,6 +0,0 @@ -var stripAnsi = require('strip-ansi') -var wcwidth = require('wcwidth') - -module.exports = function(str) { - return wcwidth(stripAnsi(str)) -} diff --git a/deps/npm/node_modules/console-control-strings/LICENSE b/deps/npm/node_modules/console-control-strings/LICENSE deleted file mode 100644 index e756052969b780..00000000000000 --- a/deps/npm/node_modules/console-control-strings/LICENSE +++ /dev/null @@ -1,13 +0,0 @@ -Copyright (c) 2014, Rebecca Turner - -Permission to use, copy, modify, and/or distribute this software for any -purpose with or without fee is hereby granted, provided that the above -copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/deps/npm/node_modules/console-control-strings/index.js b/deps/npm/node_modules/console-control-strings/index.js deleted file mode 100644 index bf890348ec6e35..00000000000000 --- a/deps/npm/node_modules/console-control-strings/index.js +++ /dev/null @@ -1,125 +0,0 @@ -'use strict' - -// These tables borrowed from `ansi` - -var prefix = '\x1b[' - -exports.up = function up (num) { - return prefix + (num || '') + 'A' -} - -exports.down = function down (num) { - return prefix + (num || '') + 'B' -} - -exports.forward = function forward (num) { - return prefix + (num || '') + 'C' -} - -exports.back = function back (num) { - return prefix + (num || '') + 'D' -} - -exports.nextLine = function nextLine (num) { - return prefix + (num || '') + 'E' -} - -exports.previousLine = function previousLine (num) { - return prefix + (num || '') + 'F' -} - -exports.horizontalAbsolute = function horizontalAbsolute (num) { - if (num == null) throw new Error('horizontalAboslute requires a column to position to') - return prefix + num + 'G' -} - -exports.eraseData = function eraseData () { - return prefix + 'J' -} - -exports.eraseLine = function eraseLine () { - return prefix + 'K' -} - -exports.goto = function (x, y) { - return prefix + y + ';' + x + 'H' -} - -exports.gotoSOL = function () { - return '\r' -} - -exports.beep = function () { - return '\x07' -} - -exports.hideCursor = function hideCursor () { - return prefix + '?25l' -} - -exports.showCursor = function showCursor () { - return prefix + '?25h' -} - -var colors = { - reset: 0, -// styles - bold: 1, - italic: 3, - underline: 4, - inverse: 7, -// resets - stopBold: 22, - stopItalic: 23, - stopUnderline: 24, - stopInverse: 27, -// colors - white: 37, - black: 30, - blue: 34, - cyan: 36, - green: 32, - magenta: 35, - red: 31, - yellow: 33, - bgWhite: 47, - bgBlack: 40, - bgBlue: 44, - bgCyan: 46, - bgGreen: 42, - bgMagenta: 45, - bgRed: 41, - bgYellow: 43, - - grey: 90, - brightBlack: 90, - brightRed: 91, - brightGreen: 92, - brightYellow: 93, - brightBlue: 94, - brightMagenta: 95, - brightCyan: 96, - brightWhite: 97, - - bgGrey: 100, - bgBrightBlack: 100, - bgBrightRed: 101, - bgBrightGreen: 102, - bgBrightYellow: 103, - bgBrightBlue: 104, - bgBrightMagenta: 105, - bgBrightCyan: 106, - bgBrightWhite: 107 -} - -exports.color = function color (colorWith) { - if (arguments.length !== 1 || !Array.isArray(colorWith)) { - colorWith = Array.prototype.slice.call(arguments) - } - return prefix + colorWith.map(colorNameToCode).join(';') + 'm' -} - -function colorNameToCode (color) { - if (colors[color] != null) return colors[color] - throw new Error('Unknown color or style name: ' + color) -} diff --git a/deps/npm/node_modules/console-control-strings/package.json b/deps/npm/node_modules/console-control-strings/package.json deleted file mode 100644 index eb6c62ae2dac76..00000000000000 --- a/deps/npm/node_modules/console-control-strings/package.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "name": "console-control-strings", - "version": "1.1.0", - "description": "A library of cross-platform tested terminal/console command strings for doing things like color and cursor positioning. This is a subset of both ansi and vt100. All control codes included work on both Windows & Unix-like OSes, except where noted.", - "main": "index.js", - "directories": { - "test": "test" - }, - "scripts": { - "test": "standard && tap test/*.js" - }, - "repository": { - "type": "git", - "url": "https://github.com/iarna/console-control-strings" - }, - "keywords": [], - "author": "Rebecca Turner (http://re-becca.org/)", - "license": "ISC", - "files": [ - "LICENSE", - "index.js" - ], - "devDependencies": { - "standard": "^7.1.2", - "tap": "^5.7.2" - } -} diff --git a/deps/npm/node_modules/defaults/LICENSE b/deps/npm/node_modules/defaults/LICENSE deleted file mode 100644 index 11eb6fdebf3b63..00000000000000 --- a/deps/npm/node_modules/defaults/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2022 Sindre Sorhus -Copyright (c) 2015 Elijah Insua - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/deps/npm/node_modules/defaults/index.js b/deps/npm/node_modules/defaults/index.js deleted file mode 100644 index cb7d75c9c6beb0..00000000000000 --- a/deps/npm/node_modules/defaults/index.js +++ /dev/null @@ -1,13 +0,0 @@ -var clone = require('clone'); - -module.exports = function(options, defaults) { - options = options || {}; - - Object.keys(defaults).forEach(function(key) { - if (typeof options[key] === 'undefined') { - options[key] = clone(defaults[key]); - } - }); - - return options; -}; \ No newline at end of file diff --git a/deps/npm/node_modules/defaults/package.json b/deps/npm/node_modules/defaults/package.json deleted file mode 100644 index 44f72b1714ce4e..00000000000000 --- a/deps/npm/node_modules/defaults/package.json +++ /dev/null @@ -1,33 +0,0 @@ -{ - "name": "defaults", - "version": "1.0.4", - "description": "merge single level defaults over a config object", - "main": "index.js", - "funding": "https://github.com/sponsors/sindresorhus", - "scripts": { - "test": "node test.js" - }, - "repository": { - "type": "git", - "url": "git://github.com/sindresorhus/node-defaults.git" - }, - "keywords": [ - "config", - "defaults", - "options", - "object", - "merge", - "assign", - "properties", - "deep" - ], - "author": "Elijah Insua ", - "license": "MIT", - "readmeFilename": "README.md", - "dependencies": { - "clone": "^1.0.2" - }, - "devDependencies": { - "tap": "^2.0.0" - } -} diff --git a/deps/npm/node_modules/defaults/test.js b/deps/npm/node_modules/defaults/test.js deleted file mode 100644 index 60e0ffba8b4aab..00000000000000 --- a/deps/npm/node_modules/defaults/test.js +++ /dev/null @@ -1,34 +0,0 @@ -var defaults = require('./'), - test = require('tap').test; - -test("ensure options is an object", function(t) { - var options = defaults(false, { a : true }); - t.ok(options.a); - t.end() -}); - -test("ensure defaults override keys", function(t) { - var result = defaults({}, { a: false, b: true }); - t.ok(result.b, 'b merges over undefined'); - t.equal(result.a, false, 'a merges over undefined'); - t.end(); -}); - -test("ensure defined keys are not overwritten", function(t) { - var result = defaults({ b: false }, { a: false, b: true }); - t.equal(result.b, false, 'b not merged'); - t.equal(result.a, false, 'a merges over undefined'); - t.end(); -}); - -test("ensure defaults clone nested objects", function(t) { - var d = { a: [1,2,3], b: { hello : 'world' } }; - var result = defaults({}, d); - t.equal(result.a.length, 3, 'objects should be clones'); - t.ok(result.a !== d.a, 'objects should be clones'); - - t.equal(Object.keys(result.b).length, 1, 'objects should be clones'); - t.ok(result.b !== d.b, 'objects should be clones'); - t.end(); -}); - diff --git a/deps/npm/node_modules/gauge/LICENSE.md b/deps/npm/node_modules/gauge/LICENSE.md deleted file mode 100644 index 5fc208ff122e08..00000000000000 --- a/deps/npm/node_modules/gauge/LICENSE.md +++ /dev/null @@ -1,20 +0,0 @@ - - -ISC License - -Copyright npm, Inc. - -Permission to use, copy, modify, and/or distribute this -software for any purpose with or without fee is hereby -granted, provided that the above copyright notice and this -permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND NPM DISCLAIMS ALL -WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO -EVENT SHALL NPM BE LIABLE FOR ANY SPECIAL, DIRECT, -INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, -WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER -TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE -USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/deps/npm/node_modules/gauge/lib/base-theme.js b/deps/npm/node_modules/gauge/lib/base-theme.js deleted file mode 100644 index 00bf5684cddab8..00000000000000 --- a/deps/npm/node_modules/gauge/lib/base-theme.js +++ /dev/null @@ -1,18 +0,0 @@ -'use strict' -var spin = require('./spin.js') -var progressBar = require('./progress-bar.js') - -module.exports = { - activityIndicator: function (values, theme, width) { - if (values.spun == null) { - return - } - return spin(theme, values.spun) - }, - progressbar: function (values, theme, width) { - if (values.completed == null) { - return - } - return progressBar(theme, width, values.completed) - }, -} diff --git a/deps/npm/node_modules/gauge/lib/error.js b/deps/npm/node_modules/gauge/lib/error.js deleted file mode 100644 index d9914ba5335d25..00000000000000 --- a/deps/npm/node_modules/gauge/lib/error.js +++ /dev/null @@ -1,24 +0,0 @@ -'use strict' -var util = require('util') - -var User = exports.User = function User (msg) { - var err = new Error(msg) - Error.captureStackTrace(err, User) - err.code = 'EGAUGE' - return err -} - -exports.MissingTemplateValue = function MissingTemplateValue (item, values) { - var err = new User(util.format('Missing template value "%s"', item.type)) - Error.captureStackTrace(err, MissingTemplateValue) - err.template = item - err.values = values - return err -} - -exports.Internal = function Internal (msg) { - var err = new Error(msg) - Error.captureStackTrace(err, Internal) - err.code = 'EGAUGEINTERNAL' - return err -} diff --git a/deps/npm/node_modules/gauge/lib/has-color.js b/deps/npm/node_modules/gauge/lib/has-color.js deleted file mode 100644 index 16cba0eb47d332..00000000000000 --- a/deps/npm/node_modules/gauge/lib/has-color.js +++ /dev/null @@ -1,4 +0,0 @@ -'use strict' -var colorSupport = require('color-support') - -module.exports = colorSupport().hasBasic diff --git a/deps/npm/node_modules/gauge/lib/index.js b/deps/npm/node_modules/gauge/lib/index.js deleted file mode 100644 index be94f53f3b5f46..00000000000000 --- a/deps/npm/node_modules/gauge/lib/index.js +++ /dev/null @@ -1,289 +0,0 @@ -'use strict' -var Plumbing = require('./plumbing.js') -var hasUnicode = require('has-unicode') -var hasColor = require('./has-color.js') -var onExit = require('signal-exit').onExit -var defaultThemes = require('./themes') -var setInterval = require('./set-interval.js') -var process = require('./process.js') -var setImmediate = require('./set-immediate') - -module.exports = Gauge - -function callWith (obj, method) { - return function () { - return method.call(obj) - } -} - -function Gauge (arg1, arg2) { - var options, writeTo - if (arg1 && arg1.write) { - writeTo = arg1 - options = arg2 || {} - } else if (arg2 && arg2.write) { - writeTo = arg2 - options = arg1 || {} - } else { - writeTo = process.stderr - options = arg1 || arg2 || {} - } - - this._status = { - spun: 0, - section: '', - subsection: '', - } - this._paused = false // are we paused for back pressure? - this._disabled = true // are all progress bar updates disabled? - this._showing = false // do we WANT the progress bar on screen - this._onScreen = false // IS the progress bar on screen - this._needsRedraw = false // should we print something at next tick? - this._hideCursor = options.hideCursor == null ? true : options.hideCursor - this._fixedFramerate = options.fixedFramerate == null - ? !(/^v0\.8\./.test(process.version)) - : options.fixedFramerate - this._lastUpdateAt = null - this._updateInterval = options.updateInterval == null ? 50 : options.updateInterval - - this._themes = options.themes || defaultThemes - this._theme = options.theme - var theme = this._computeTheme(options.theme) - var template = options.template || [ - { type: 'progressbar', length: 20 }, - { type: 'activityIndicator', kerning: 1, length: 1 }, - { type: 'section', kerning: 1, default: '' }, - { type: 'subsection', kerning: 1, default: '' }, - ] - this.setWriteTo(writeTo, options.tty) - var PlumbingClass = options.Plumbing || Plumbing - this._gauge = new PlumbingClass(theme, template, this.getWidth()) - - this._$$doRedraw = callWith(this, this._doRedraw) - this._$$handleSizeChange = callWith(this, this._handleSizeChange) - - this._cleanupOnExit = options.cleanupOnExit == null || options.cleanupOnExit - this._removeOnExit = null - - if (options.enabled || (options.enabled == null && this._tty && this._tty.isTTY)) { - this.enable() - } else { - this.disable() - } -} -Gauge.prototype = {} - -Gauge.prototype.isEnabled = function () { - return !this._disabled -} - -Gauge.prototype.setTemplate = function (template) { - this._gauge.setTemplate(template) - if (this._showing) { - this._requestRedraw() - } -} - -Gauge.prototype._computeTheme = function (theme) { - if (!theme) { - theme = {} - } - if (typeof theme === 'string') { - theme = this._themes.getTheme(theme) - } else if ( - Object.keys(theme).length === 0 || theme.hasUnicode != null || theme.hasColor != null - ) { - var useUnicode = theme.hasUnicode == null ? hasUnicode() : theme.hasUnicode - var useColor = theme.hasColor == null ? hasColor : theme.hasColor - theme = this._themes.getDefault({ - hasUnicode: useUnicode, - hasColor: useColor, - platform: theme.platform, - }) - } - return theme -} - -Gauge.prototype.setThemeset = function (themes) { - this._themes = themes - this.setTheme(this._theme) -} - -Gauge.prototype.setTheme = function (theme) { - this._gauge.setTheme(this._computeTheme(theme)) - if (this._showing) { - this._requestRedraw() - } - this._theme = theme -} - -Gauge.prototype._requestRedraw = function () { - this._needsRedraw = true - if (!this._fixedFramerate) { - this._doRedraw() - } -} - -Gauge.prototype.getWidth = function () { - return ((this._tty && this._tty.columns) || 80) - 1 -} - -Gauge.prototype.setWriteTo = function (writeTo, tty) { - var enabled = !this._disabled - if (enabled) { - this.disable() - } - this._writeTo = writeTo - this._tty = tty || - (writeTo === process.stderr && process.stdout.isTTY && process.stdout) || - (writeTo.isTTY && writeTo) || - this._tty - if (this._gauge) { - this._gauge.setWidth(this.getWidth()) - } - if (enabled) { - this.enable() - } -} - -Gauge.prototype.enable = function () { - if (!this._disabled) { - return - } - this._disabled = false - if (this._tty) { - this._enableEvents() - } - if (this._showing) { - this.show() - } -} - -Gauge.prototype.disable = function () { - if (this._disabled) { - return - } - if (this._showing) { - this._lastUpdateAt = null - this._showing = false - this._doRedraw() - this._showing = true - } - this._disabled = true - if (this._tty) { - this._disableEvents() - } -} - -Gauge.prototype._enableEvents = function () { - if (this._cleanupOnExit) { - this._removeOnExit = onExit(callWith(this, this.disable)) - } - this._tty.on('resize', this._$$handleSizeChange) - if (this._fixedFramerate) { - this.redrawTracker = setInterval(this._$$doRedraw, this._updateInterval) - if (this.redrawTracker.unref) { - this.redrawTracker.unref() - } - } -} - -Gauge.prototype._disableEvents = function () { - this._tty.removeListener('resize', this._$$handleSizeChange) - if (this._fixedFramerate) { - clearInterval(this.redrawTracker) - } - if (this._removeOnExit) { - this._removeOnExit() - } -} - -Gauge.prototype.hide = function (cb) { - if (this._disabled) { - return cb && process.nextTick(cb) - } - if (!this._showing) { - return cb && process.nextTick(cb) - } - this._showing = false - this._doRedraw() - cb && setImmediate(cb) -} - -Gauge.prototype.show = function (section, completed) { - this._showing = true - if (typeof section === 'string') { - this._status.section = section - } else if (typeof section === 'object') { - var sectionKeys = Object.keys(section) - for (var ii = 0; ii < sectionKeys.length; ++ii) { - var key = sectionKeys[ii] - this._status[key] = section[key] - } - } - if (completed != null) { - this._status.completed = completed - } - if (this._disabled) { - return - } - this._requestRedraw() -} - -Gauge.prototype.pulse = function (subsection) { - this._status.subsection = subsection || '' - this._status.spun++ - if (this._disabled) { - return - } - if (!this._showing) { - return - } - this._requestRedraw() -} - -Gauge.prototype._handleSizeChange = function () { - this._gauge.setWidth(this._tty.columns - 1) - this._requestRedraw() -} - -Gauge.prototype._doRedraw = function () { - if (this._disabled || this._paused) { - return - } - if (!this._fixedFramerate) { - var now = Date.now() - if (this._lastUpdateAt && now - this._lastUpdateAt < this._updateInterval) { - return - } - this._lastUpdateAt = now - } - if (!this._showing && this._onScreen) { - this._onScreen = false - var result = this._gauge.hide() - if (this._hideCursor) { - result += this._gauge.showCursor() - } - return this._writeTo.write(result) - } - if (!this._showing && !this._onScreen) { - return - } - if (this._showing && !this._onScreen) { - this._onScreen = true - this._needsRedraw = true - if (this._hideCursor) { - this._writeTo.write(this._gauge.hideCursor()) - } - } - if (!this._needsRedraw) { - return - } - if (!this._writeTo.write(this._gauge.show(this._status))) { - this._paused = true - this._writeTo.on('drain', callWith(this, function () { - this._paused = false - this._doRedraw() - })) - } -} diff --git a/deps/npm/node_modules/gauge/lib/plumbing.js b/deps/npm/node_modules/gauge/lib/plumbing.js deleted file mode 100644 index c4dc3e074b95e8..00000000000000 --- a/deps/npm/node_modules/gauge/lib/plumbing.js +++ /dev/null @@ -1,50 +0,0 @@ -'use strict' -var consoleControl = require('console-control-strings') -var renderTemplate = require('./render-template.js') -var validate = require('aproba') - -var Plumbing = module.exports = function (theme, template, width) { - if (!width) { - width = 80 - } - validate('OAN', [theme, template, width]) - this.showing = false - this.theme = theme - this.width = width - this.template = template -} -Plumbing.prototype = {} - -Plumbing.prototype.setTheme = function (theme) { - validate('O', [theme]) - this.theme = theme -} - -Plumbing.prototype.setTemplate = function (template) { - validate('A', [template]) - this.template = template -} - -Plumbing.prototype.setWidth = function (width) { - validate('N', [width]) - this.width = width -} - -Plumbing.prototype.hide = function () { - return consoleControl.gotoSOL() + consoleControl.eraseLine() -} - -Plumbing.prototype.hideCursor = consoleControl.hideCursor - -Plumbing.prototype.showCursor = consoleControl.showCursor - -Plumbing.prototype.show = function (status) { - var values = Object.create(this.theme) - for (var key in status) { - values[key] = status[key] - } - - return renderTemplate(this.width, this.template, values).trim() + - consoleControl.color('reset') + - consoleControl.eraseLine() + consoleControl.gotoSOL() -} diff --git a/deps/npm/node_modules/gauge/lib/process.js b/deps/npm/node_modules/gauge/lib/process.js deleted file mode 100644 index 05e85694d755b6..00000000000000 --- a/deps/npm/node_modules/gauge/lib/process.js +++ /dev/null @@ -1,3 +0,0 @@ -'use strict' -// this exists so we can replace it during testing -module.exports = process diff --git a/deps/npm/node_modules/gauge/lib/progress-bar.js b/deps/npm/node_modules/gauge/lib/progress-bar.js deleted file mode 100644 index 184ff2500aae4d..00000000000000 --- a/deps/npm/node_modules/gauge/lib/progress-bar.js +++ /dev/null @@ -1,41 +0,0 @@ -'use strict' -var validate = require('aproba') -var renderTemplate = require('./render-template.js') -var wideTruncate = require('./wide-truncate') -var stringWidth = require('string-width') - -module.exports = function (theme, width, completed) { - validate('ONN', [theme, width, completed]) - if (completed < 0) { - completed = 0 - } - if (completed > 1) { - completed = 1 - } - if (width <= 0) { - return '' - } - var sofar = Math.round(width * completed) - var rest = width - sofar - var template = [ - { type: 'complete', value: repeat(theme.complete, sofar), length: sofar }, - { type: 'remaining', value: repeat(theme.remaining, rest), length: rest }, - ] - return renderTemplate(width, template, theme) -} - -// lodash's way of repeating -function repeat (string, width) { - var result = '' - var n = width - do { - if (n % 2) { - result += string - } - n = Math.floor(n / 2) - /* eslint no-self-assign: 0 */ - string += string - } while (n && stringWidth(result) < width) - - return wideTruncate(result, width) -} diff --git a/deps/npm/node_modules/gauge/lib/render-template.js b/deps/npm/node_modules/gauge/lib/render-template.js deleted file mode 100644 index d1b52c0f48095a..00000000000000 --- a/deps/npm/node_modules/gauge/lib/render-template.js +++ /dev/null @@ -1,222 +0,0 @@ -'use strict' -var align = require('wide-align') -var validate = require('aproba') -var wideTruncate = require('./wide-truncate') -var error = require('./error') -var TemplateItem = require('./template-item') - -function renderValueWithValues (values) { - return function (item) { - return renderValue(item, values) - } -} - -var renderTemplate = module.exports = function (width, template, values) { - var items = prepareItems(width, template, values) - var rendered = items.map(renderValueWithValues(values)).join('') - return align.left(wideTruncate(rendered, width), width) -} - -function preType (item) { - var cappedTypeName = item.type[0].toUpperCase() + item.type.slice(1) - return 'pre' + cappedTypeName -} - -function postType (item) { - var cappedTypeName = item.type[0].toUpperCase() + item.type.slice(1) - return 'post' + cappedTypeName -} - -function hasPreOrPost (item, values) { - if (!item.type) { - return - } - return values[preType(item)] || values[postType(item)] -} - -function generatePreAndPost (baseItem, parentValues) { - var item = Object.assign({}, baseItem) - var values = Object.create(parentValues) - var template = [] - var pre = preType(item) - var post = postType(item) - if (values[pre]) { - template.push({ value: values[pre] }) - values[pre] = null - } - item.minLength = null - item.length = null - item.maxLength = null - template.push(item) - values[item.type] = values[item.type] - if (values[post]) { - template.push({ value: values[post] }) - values[post] = null - } - return function ($1, $2, length) { - return renderTemplate(length, template, values) - } -} - -function prepareItems (width, template, values) { - function cloneAndObjectify (item, index, arr) { - var cloned = new TemplateItem(item, width) - var type = cloned.type - if (cloned.value == null) { - if (!(type in values)) { - if (cloned.default == null) { - throw new error.MissingTemplateValue(cloned, values) - } else { - cloned.value = cloned.default - } - } else { - cloned.value = values[type] - } - } - if (cloned.value == null || cloned.value === '') { - return null - } - cloned.index = index - cloned.first = index === 0 - cloned.last = index === arr.length - 1 - if (hasPreOrPost(cloned, values)) { - cloned.value = generatePreAndPost(cloned, values) - } - return cloned - } - - var output = template.map(cloneAndObjectify).filter(function (item) { - return item != null - }) - - var remainingSpace = width - var variableCount = output.length - - function consumeSpace (length) { - if (length > remainingSpace) { - length = remainingSpace - } - remainingSpace -= length - } - - function finishSizing (item, length) { - if (item.finished) { - throw new error.Internal('Tried to finish template item that was already finished') - } - if (length === Infinity) { - throw new error.Internal('Length of template item cannot be infinity') - } - if (length != null) { - item.length = length - } - item.minLength = null - item.maxLength = null - --variableCount - item.finished = true - if (item.length == null) { - item.length = item.getBaseLength() - } - if (item.length == null) { - throw new error.Internal('Finished template items must have a length') - } - consumeSpace(item.getLength()) - } - - output.forEach(function (item) { - if (!item.kerning) { - return - } - var prevPadRight = item.first ? 0 : output[item.index - 1].padRight - if (!item.first && prevPadRight < item.kerning) { - item.padLeft = item.kerning - prevPadRight - } - if (!item.last) { - item.padRight = item.kerning - } - }) - - // Finish any that have a fixed (literal or intuited) length - output.forEach(function (item) { - if (item.getBaseLength() == null) { - return - } - finishSizing(item) - }) - - var resized = 0 - var resizing - var hunkSize - do { - resizing = false - hunkSize = Math.round(remainingSpace / variableCount) - output.forEach(function (item) { - if (item.finished) { - return - } - if (!item.maxLength) { - return - } - if (item.getMaxLength() < hunkSize) { - finishSizing(item, item.maxLength) - resizing = true - } - }) - } while (resizing && resized++ < output.length) - if (resizing) { - throw new error.Internal('Resize loop iterated too many times while determining maxLength') - } - - resized = 0 - do { - resizing = false - hunkSize = Math.round(remainingSpace / variableCount) - output.forEach(function (item) { - if (item.finished) { - return - } - if (!item.minLength) { - return - } - if (item.getMinLength() >= hunkSize) { - finishSizing(item, item.minLength) - resizing = true - } - }) - } while (resizing && resized++ < output.length) - if (resizing) { - throw new error.Internal('Resize loop iterated too many times while determining minLength') - } - - hunkSize = Math.round(remainingSpace / variableCount) - output.forEach(function (item) { - if (item.finished) { - return - } - finishSizing(item, hunkSize) - }) - - return output -} - -function renderFunction (item, values, length) { - validate('OON', arguments) - if (item.type) { - return item.value(values, values[item.type + 'Theme'] || {}, length) - } else { - return item.value(values, {}, length) - } -} - -function renderValue (item, values) { - var length = item.getBaseLength() - var value = typeof item.value === 'function' ? renderFunction(item, values, length) : item.value - if (value == null || value === '') { - return '' - } - var alignWith = align[item.align] || align.left - var leftPadding = item.padLeft ? align.left('', item.padLeft) : '' - var rightPadding = item.padRight ? align.right('', item.padRight) : '' - var truncated = wideTruncate(String(value), length) - var aligned = alignWith(truncated, length) - return leftPadding + aligned + rightPadding -} diff --git a/deps/npm/node_modules/gauge/lib/set-immediate.js b/deps/npm/node_modules/gauge/lib/set-immediate.js deleted file mode 100644 index 6650a485c49933..00000000000000 --- a/deps/npm/node_modules/gauge/lib/set-immediate.js +++ /dev/null @@ -1,7 +0,0 @@ -'use strict' -var process = require('./process') -try { - module.exports = setImmediate -} catch (ex) { - module.exports = process.nextTick -} diff --git a/deps/npm/node_modules/gauge/lib/set-interval.js b/deps/npm/node_modules/gauge/lib/set-interval.js deleted file mode 100644 index 576198793c5504..00000000000000 --- a/deps/npm/node_modules/gauge/lib/set-interval.js +++ /dev/null @@ -1,3 +0,0 @@ -'use strict' -// this exists so we can replace it during testing -module.exports = setInterval diff --git a/deps/npm/node_modules/gauge/lib/spin.js b/deps/npm/node_modules/gauge/lib/spin.js deleted file mode 100644 index 34142ee31acc7c..00000000000000 --- a/deps/npm/node_modules/gauge/lib/spin.js +++ /dev/null @@ -1,5 +0,0 @@ -'use strict' - -module.exports = function spin (spinstr, spun) { - return spinstr[spun % spinstr.length] -} diff --git a/deps/npm/node_modules/gauge/lib/template-item.js b/deps/npm/node_modules/gauge/lib/template-item.js deleted file mode 100644 index e307e9b7421e73..00000000000000 --- a/deps/npm/node_modules/gauge/lib/template-item.js +++ /dev/null @@ -1,87 +0,0 @@ -'use strict' -var stringWidth = require('string-width') - -module.exports = TemplateItem - -function isPercent (num) { - if (typeof num !== 'string') { - return false - } - return num.slice(-1) === '%' -} - -function percent (num) { - return Number(num.slice(0, -1)) / 100 -} - -function TemplateItem (values, outputLength) { - this.overallOutputLength = outputLength - this.finished = false - this.type = null - this.value = null - this.length = null - this.maxLength = null - this.minLength = null - this.kerning = null - this.align = 'left' - this.padLeft = 0 - this.padRight = 0 - this.index = null - this.first = null - this.last = null - if (typeof values === 'string') { - this.value = values - } else { - for (var prop in values) { - this[prop] = values[prop] - } - } - // Realize percents - if (isPercent(this.length)) { - this.length = Math.round(this.overallOutputLength * percent(this.length)) - } - if (isPercent(this.minLength)) { - this.minLength = Math.round(this.overallOutputLength * percent(this.minLength)) - } - if (isPercent(this.maxLength)) { - this.maxLength = Math.round(this.overallOutputLength * percent(this.maxLength)) - } - return this -} - -TemplateItem.prototype = {} - -TemplateItem.prototype.getBaseLength = function () { - var length = this.length - if ( - length == null && - typeof this.value === 'string' && - this.maxLength == null && - this.minLength == null - ) { - length = stringWidth(this.value) - } - return length -} - -TemplateItem.prototype.getLength = function () { - var length = this.getBaseLength() - if (length == null) { - return null - } - return length + this.padLeft + this.padRight -} - -TemplateItem.prototype.getMaxLength = function () { - if (this.maxLength == null) { - return null - } - return this.maxLength + this.padLeft + this.padRight -} - -TemplateItem.prototype.getMinLength = function () { - if (this.minLength == null) { - return null - } - return this.minLength + this.padLeft + this.padRight -} diff --git a/deps/npm/node_modules/gauge/lib/theme-set.js b/deps/npm/node_modules/gauge/lib/theme-set.js deleted file mode 100644 index 643d7dbb1da346..00000000000000 --- a/deps/npm/node_modules/gauge/lib/theme-set.js +++ /dev/null @@ -1,122 +0,0 @@ -'use strict' - -module.exports = function () { - return ThemeSetProto.newThemeSet() -} - -var ThemeSetProto = {} - -ThemeSetProto.baseTheme = require('./base-theme.js') - -ThemeSetProto.newTheme = function (parent, theme) { - if (!theme) { - theme = parent - parent = this.baseTheme - } - return Object.assign({}, parent, theme) -} - -ThemeSetProto.getThemeNames = function () { - return Object.keys(this.themes) -} - -ThemeSetProto.addTheme = function (name, parent, theme) { - this.themes[name] = this.newTheme(parent, theme) -} - -ThemeSetProto.addToAllThemes = function (theme) { - var themes = this.themes - Object.keys(themes).forEach(function (name) { - Object.assign(themes[name], theme) - }) - Object.assign(this.baseTheme, theme) -} - -ThemeSetProto.getTheme = function (name) { - if (!this.themes[name]) { - throw this.newMissingThemeError(name) - } - return this.themes[name] -} - -ThemeSetProto.setDefault = function (opts, name) { - if (name == null) { - name = opts - opts = {} - } - var platform = opts.platform == null ? 'fallback' : opts.platform - var hasUnicode = !!opts.hasUnicode - var hasColor = !!opts.hasColor - if (!this.defaults[platform]) { - this.defaults[platform] = { true: {}, false: {} } - } - this.defaults[platform][hasUnicode][hasColor] = name -} - -ThemeSetProto.getDefault = function (opts) { - if (!opts) { - opts = {} - } - var platformName = opts.platform || process.platform - var platform = this.defaults[platformName] || this.defaults.fallback - var hasUnicode = !!opts.hasUnicode - var hasColor = !!opts.hasColor - if (!platform) { - throw this.newMissingDefaultThemeError(platformName, hasUnicode, hasColor) - } - if (!platform[hasUnicode][hasColor]) { - if (hasUnicode && hasColor && platform[!hasUnicode][hasColor]) { - hasUnicode = false - } else if (hasUnicode && hasColor && platform[hasUnicode][!hasColor]) { - hasColor = false - } else if (hasUnicode && hasColor && platform[!hasUnicode][!hasColor]) { - hasUnicode = false - hasColor = false - } else if (hasUnicode && !hasColor && platform[!hasUnicode][hasColor]) { - hasUnicode = false - } else if (!hasUnicode && hasColor && platform[hasUnicode][!hasColor]) { - hasColor = false - } else if (platform === this.defaults.fallback) { - throw this.newMissingDefaultThemeError(platformName, hasUnicode, hasColor) - } - } - if (platform[hasUnicode][hasColor]) { - return this.getTheme(platform[hasUnicode][hasColor]) - } else { - return this.getDefault(Object.assign({}, opts, { platform: 'fallback' })) - } -} - -ThemeSetProto.newMissingThemeError = function newMissingThemeError (name) { - var err = new Error('Could not find a gauge theme named "' + name + '"') - Error.captureStackTrace.call(err, newMissingThemeError) - err.theme = name - err.code = 'EMISSINGTHEME' - return err -} - -ThemeSetProto.newMissingDefaultThemeError = - function newMissingDefaultThemeError (platformName, hasUnicode, hasColor) { - var err = new Error( - 'Could not find a gauge theme for your platform/unicode/color use combo:\n' + - ' platform = ' + platformName + '\n' + - ' hasUnicode = ' + hasUnicode + '\n' + - ' hasColor = ' + hasColor) - Error.captureStackTrace.call(err, newMissingDefaultThemeError) - err.platform = platformName - err.hasUnicode = hasUnicode - err.hasColor = hasColor - err.code = 'EMISSINGTHEME' - return err - } - -ThemeSetProto.newThemeSet = function () { - var themeset = function (opts) { - return themeset.getDefault(opts) - } - return Object.assign(themeset, ThemeSetProto, { - themes: Object.assign({}, this.themes), - baseTheme: Object.assign({}, this.baseTheme), - defaults: JSON.parse(JSON.stringify(this.defaults || {})), - }) -} diff --git a/deps/npm/node_modules/gauge/lib/themes.js b/deps/npm/node_modules/gauge/lib/themes.js deleted file mode 100644 index d2e62bbccb3d82..00000000000000 --- a/deps/npm/node_modules/gauge/lib/themes.js +++ /dev/null @@ -1,56 +0,0 @@ -'use strict' -var color = require('console-control-strings').color -var ThemeSet = require('./theme-set.js') - -var themes = module.exports = new ThemeSet() - -themes.addTheme('ASCII', { - preProgressbar: '[', - postProgressbar: ']', - progressbarTheme: { - complete: '#', - remaining: '.', - }, - activityIndicatorTheme: '-\\|/', - preSubsection: '>', -}) - -themes.addTheme('colorASCII', themes.getTheme('ASCII'), { - progressbarTheme: { - preComplete: color('bgBrightWhite', 'brightWhite'), - complete: '#', - postComplete: color('reset'), - preRemaining: color('bgBrightBlack', 'brightBlack'), - remaining: '.', - postRemaining: color('reset'), - }, -}) - -themes.addTheme('brailleSpinner', { - preProgressbar: '(', - postProgressbar: ')', - progressbarTheme: { - complete: '#', - remaining: '⠂', - }, - activityIndicatorTheme: '⠋⠙⠹⠸⠼⠴⠦⠧⠇⠏', - preSubsection: '>', -}) - -themes.addTheme('colorBrailleSpinner', themes.getTheme('brailleSpinner'), { - progressbarTheme: { - preComplete: color('bgBrightWhite', 'brightWhite'), - complete: '#', - postComplete: color('reset'), - preRemaining: color('bgBrightBlack', 'brightBlack'), - remaining: '⠂', - postRemaining: color('reset'), - }, -}) - -themes.setDefault({}, 'ASCII') -themes.setDefault({ hasColor: true }, 'colorASCII') -themes.setDefault({ platform: 'darwin', hasUnicode: true }, 'brailleSpinner') -themes.setDefault({ platform: 'darwin', hasUnicode: true, hasColor: true }, 'colorBrailleSpinner') -themes.setDefault({ platform: 'linux', hasUnicode: true }, 'brailleSpinner') -themes.setDefault({ platform: 'linux', hasUnicode: true, hasColor: true }, 'colorBrailleSpinner') diff --git a/deps/npm/node_modules/gauge/lib/wide-truncate.js b/deps/npm/node_modules/gauge/lib/wide-truncate.js deleted file mode 100644 index 5284a699ac3fb5..00000000000000 --- a/deps/npm/node_modules/gauge/lib/wide-truncate.js +++ /dev/null @@ -1,31 +0,0 @@ -'use strict' -var stringWidth = require('string-width') -var stripAnsi = require('strip-ansi') - -module.exports = wideTruncate - -function wideTruncate (str, target) { - if (stringWidth(str) === 0) { - return str - } - if (target <= 0) { - return '' - } - if (stringWidth(str) <= target) { - return str - } - - // We compute the number of bytes of ansi sequences here and add - // that to our initial truncation to ensure that we don't slice one - // that we want to keep in half. - var noAnsi = stripAnsi(str) - var ansiSize = str.length + noAnsi.length - var truncated = str.slice(0, target + ansiSize) - - // we have to shrink the result to account for our ansi sequence buffer - // (if an ansi sequence was truncated) and double width characters. - while (stringWidth(truncated) > target) { - truncated = truncated.slice(0, -1) - } - return truncated -} diff --git a/deps/npm/node_modules/gauge/package.json b/deps/npm/node_modules/gauge/package.json deleted file mode 100644 index 449d9dd3ed3920..00000000000000 --- a/deps/npm/node_modules/gauge/package.json +++ /dev/null @@ -1,68 +0,0 @@ -{ - "name": "gauge", - "version": "5.0.1", - "description": "A terminal based horizontal gauge", - "main": "lib", - "scripts": { - "test": "tap", - "lint": "eslint \"**/*.js\"", - "postlint": "template-oss-check", - "lintfix": "npm run lint -- --fix", - "snap": "tap", - "posttest": "npm run lint", - "template-oss-apply": "template-oss-apply --force" - }, - "repository": { - "type": "git", - "url": "https://github.com/npm/gauge.git" - }, - "keywords": [ - "progressbar", - "progress", - "gauge" - ], - "author": "GitHub Inc.", - "license": "ISC", - "bugs": { - "url": "https://github.com/npm/gauge/issues" - }, - "homepage": "https://github.com/npm/gauge", - "dependencies": { - "aproba": "^1.0.3 || ^2.0.0", - "color-support": "^1.1.3", - "console-control-strings": "^1.1.0", - "has-unicode": "^2.0.1", - "signal-exit": "^4.0.1", - "string-width": "^4.2.3", - "strip-ansi": "^6.0.1", - "wide-align": "^1.1.5" - }, - "devDependencies": { - "@npmcli/eslint-config": "^4.0.0", - "@npmcli/template-oss": "4.14.1", - "readable-stream": "^4.0.0", - "tap": "^16.0.1" - }, - "files": [ - "bin/", - "lib/" - ], - "engines": { - "node": "^14.17.0 || ^16.13.0 || >=18.0.0" - }, - "tap": { - "branches": 79, - "statements": 89, - "functions": 92, - "lines": 90, - "nyc-arg": [ - "--exclude", - "tap-snapshots/**" - ] - }, - "templateOSS": { - "//@npmcli/template-oss": "This file is partially managed by @npmcli/template-oss. Edits may be overwritten.", - "version": "4.14.1", - "publish": "true" - } -} diff --git a/deps/npm/node_modules/has-unicode/LICENSE b/deps/npm/node_modules/has-unicode/LICENSE deleted file mode 100644 index d42e25e95655bb..00000000000000 --- a/deps/npm/node_modules/has-unicode/LICENSE +++ /dev/null @@ -1,14 +0,0 @@ -Copyright (c) 2014, Rebecca Turner - -Permission to use, copy, modify, and/or distribute this software for any -purpose with or without fee is hereby granted, provided that the above -copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - diff --git a/deps/npm/node_modules/has-unicode/index.js b/deps/npm/node_modules/has-unicode/index.js deleted file mode 100644 index 9b0fe445401311..00000000000000 --- a/deps/npm/node_modules/has-unicode/index.js +++ /dev/null @@ -1,16 +0,0 @@ -"use strict" -var os = require("os") - -var hasUnicode = module.exports = function () { - // Recent Win32 platforms (>XP) CAN support unicode in the console but - // don't have to, and in non-english locales often use traditional local - // code pages. There's no way, short of windows system calls or execing - // the chcp command line program to figure this out. As such, we default - // this to false and encourage your users to override it via config if - // appropriate. - if (os.type() == "Windows_NT") { return false } - - var isUTF8 = /UTF-?8$/i - var ctype = process.env.LC_ALL || process.env.LC_CTYPE || process.env.LANG - return isUTF8.test(ctype) -} diff --git a/deps/npm/node_modules/has-unicode/package.json b/deps/npm/node_modules/has-unicode/package.json deleted file mode 100644 index ebe9d76d621587..00000000000000 --- a/deps/npm/node_modules/has-unicode/package.json +++ /dev/null @@ -1,30 +0,0 @@ -{ - "name": "has-unicode", - "version": "2.0.1", - "description": "Try to guess if your terminal supports unicode", - "main": "index.js", - "scripts": { - "test": "tap test/*.js" - }, - "repository": { - "type": "git", - "url": "https://github.com/iarna/has-unicode" - }, - "keywords": [ - "unicode", - "terminal" - ], - "files": [ - "index.js" - ], - "author": "Rebecca Turner ", - "license": "ISC", - "bugs": { - "url": "https://github.com/iarna/has-unicode/issues" - }, - "homepage": "https://github.com/iarna/has-unicode", - "devDependencies": { - "require-inject": "^1.3.0", - "tap": "^2.3.1" - } -} diff --git a/deps/npm/node_modules/libnpmaccess/package.json b/deps/npm/node_modules/libnpmaccess/package.json index 81a87d6395455e..59b5afa7c7b5f6 100644 --- a/deps/npm/node_modules/libnpmaccess/package.json +++ b/deps/npm/node_modules/libnpmaccess/package.json @@ -1,6 +1,6 @@ { "name": "libnpmaccess", - "version": "8.0.3", + "version": "8.0.5", "description": "programmatic library for `npm access` commands", "author": "GitHub Inc.", "license": "ISC", @@ -23,14 +23,14 @@ }, "repository": { "type": "git", - "url": "https://github.com/npm/cli.git", + "url": "git+https://github.com/npm/cli.git", "directory": "workspaces/libnpmaccess" }, "bugs": "https://github.com/npm/libnpmaccess/issues", "homepage": "https://npmjs.com/package/libnpmaccess", "dependencies": { - "npm-package-arg": "^11.0.1", - "npm-registry-fetch": "^16.2.0" + "npm-package-arg": "^11.0.2", + "npm-registry-fetch": "^17.0.0" }, "engines": { "node": "^16.14.0 || >=18.0.0" diff --git a/deps/npm/node_modules/libnpmdiff/lib/format-diff.js b/deps/npm/node_modules/libnpmdiff/lib/format-diff.js index 211386cb5390ee..a6606d94b8b302 100644 --- a/deps/npm/node_modules/libnpmdiff/lib/format-diff.js +++ b/deps/npm/node_modules/libnpmdiff/lib/format-diff.js @@ -1,10 +1,24 @@ -const EOL = '\n' - -const colorizeDiff = require('@npmcli/disparity-colors') const jsDiff = require('diff') const shouldPrintPatch = require('./should-print-patch.js') +const colors = { + // red + removed: { open: '\x1B[31m', close: '\x1B[39m' }, + // green + added: { open: '\x1B[32m', close: '\x1B[39m' }, + // blue + header: { open: '\x1B[34m', close: '\x1B[39m' }, + // cyan + section: { open: '\x1B[36m', close: '\x1B[39m' }, +} + +const color = (colorStr, colorId) => { + const { open, close } = colors[colorId] + // avoid highlighting the "\n" (would highlight till the end of the line) + return colorStr.replace(/[^\n\r]+/g, open + '$&' + close) +} + const formatDiff = ({ files, opts = {}, refs, versions }) => { let res = '' const srcPrefix = opts.diffNoPrefix ? '' : opts.diffSrcPrefix || 'a/' @@ -35,7 +49,7 @@ const formatDiff = ({ files, opts = {}, refs, versions }) => { } if (opts.diffNameOnly) { - res += `${filename}${EOL}` + res += `${filename}\n` continue } @@ -43,7 +57,7 @@ const formatDiff = ({ files, opts = {}, refs, versions }) => { let headerLength = 0 const header = str => { headerLength++ - patch += `${str}${EOL}` + patch += `${str}\n` } // manually build a git diff-compatible header @@ -85,9 +99,17 @@ const formatDiff = ({ files, opts = {}, refs, versions }) => { header(`+++ ${names.b}`) } - res += (opts.color - ? colorizeDiff(patch, { headerLength }) - : patch) + if (opts.color) { + // this RegExp will include all the `\n` chars into the lines, easier to join + const lines = patch.split(/^/m) + res += color(lines.slice(0, headerLength).join(''), 'header') + res += lines.slice(headerLength).join('') + .replace(/^-.*/gm, color('$&', 'removed')) + .replace(/^\+.*/gm, color('$&', 'added')) + .replace(/^@@.+@@/gm, color('$&', 'section')) + } else { + res += patch + } } return res.trim() diff --git a/deps/npm/node_modules/libnpmdiff/lib/tarball.js b/deps/npm/node_modules/libnpmdiff/lib/tarball.js index 930d624f2d5b6a..41ea84a6885a5e 100644 --- a/deps/npm/node_modules/libnpmdiff/lib/tarball.js +++ b/deps/npm/node_modules/libnpmdiff/lib/tarball.js @@ -9,7 +9,7 @@ const tar = require('tar') // returns a simplified tarball when reading files from node_modules folder, // thus avoiding running the prepare scripts and the extra logic from packlist -const nodeModulesTarball = (manifest, opts) => +const nodeModulesTarball = (manifest) => pkgContents({ path: manifest._resolved, depth: 1 }) .then(files => files.map(file => relative(manifest._resolved, file)) diff --git a/deps/npm/node_modules/libnpmdiff/package.json b/deps/npm/node_modules/libnpmdiff/package.json index 98229e99bd5618..d601ff61ca0218 100644 --- a/deps/npm/node_modules/libnpmdiff/package.json +++ b/deps/npm/node_modules/libnpmdiff/package.json @@ -1,10 +1,10 @@ { "name": "libnpmdiff", - "version": "6.0.9", + "version": "6.1.1", "description": "The registry diff", "repository": { "type": "git", - "url": "https://github.com/npm/cli.git", + "url": "git+https://github.com/npm/cli.git", "directory": "workspaces/libnpmdiff" }, "main": "lib/index.js", @@ -47,13 +47,12 @@ }, "dependencies": { "@npmcli/arborist": "^7.2.1", - "@npmcli/disparity-colors": "^3.0.0", - "@npmcli/installed-package-contents": "^2.0.2", + "@npmcli/installed-package-contents": "^2.1.0", "binary-extensions": "^2.3.0", "diff": "^5.1.0", "minimatch": "^9.0.4", - "npm-package-arg": "^11.0.1", - "pacote": "^17.0.4", + "npm-package-arg": "^11.0.2", + "pacote": "^18.0.1", "tar": "^6.2.1" }, "templateOSS": { diff --git a/deps/npm/node_modules/libnpmexec/README.md b/deps/npm/node_modules/libnpmexec/README.md index fb4a1e32b18df7..acd037c110b4ba 100644 --- a/deps/npm/node_modules/libnpmexec/README.md +++ b/deps/npm/node_modules/libnpmexec/README.md @@ -35,7 +35,6 @@ await libexec({ - `localBin`: Location to the `node_modules/.bin` folder of the local project to start scanning for bin files **String**, defaults to `./node_modules/.bin`. **libexec** will walk up the directory structure looking for `node_modules/.bin` folders in parent folders that might satisfy the current `arg` and will use that bin if found. - `locationMsg`: Overrides "at location" message when entering interactive mode **String** - `globalBin`: Location to the global space bin folder, same as: `$(npm bin -g)` **String**, defaults to empty string. - - `output`: A function to print output to **Function** - `packages`: A list of packages to be used (possibly fetch from the registry) **Array**, defaults to `[]` - `path`: Location to where to read local project info (`package.json`) **String**, defaults to `.` - `runPath`: Location to where to execute the script **String**, defaults to `.` diff --git a/deps/npm/node_modules/libnpmexec/lib/index.js b/deps/npm/node_modules/libnpmexec/lib/index.js index 6f548b943e2e65..28cba79a7f227a 100644 --- a/deps/npm/node_modules/libnpmexec/lib/index.js +++ b/deps/npm/node_modules/libnpmexec/lib/index.js @@ -4,19 +4,16 @@ const { mkdir } = require('fs/promises') const Arborist = require('@npmcli/arborist') const ciInfo = require('ci-info') const crypto = require('crypto') -const log = require('proc-log') +const { log, input } = require('proc-log') const npa = require('npm-package-arg') -const npmlog = require('npmlog') const pacote = require('pacote') const { read } = require('read') const semver = require('semver') - const { fileExists, localFileExists } = require('./file-exists.js') const getBinFromManifest = require('./get-bin-from-manifest.js') const noTTY = require('./no-tty.js') const runScript = require('./run-script.js') const isWindows = require('./is-windows.js') - const { dirname, resolve } = require('path') const binPaths = [] @@ -84,7 +81,6 @@ const exec = async (opts) => { locationMsg = undefined, globalBin = '', globalPath, - output, // dereference values because we manipulate it later packages: [...packages] = [], path = '.', @@ -99,7 +95,6 @@ const exec = async (opts) => { call, flatOptions, locationMsg, - output, path, binPaths, runPath, @@ -245,27 +240,24 @@ const exec = async (opts) => { if (add.length) { if (!yes) { - const missingPackages = add.map(a => `${a.replace(/@$/, '')}`) + const addList = add.map(a => `${a.replace(/@$/, '')}`) + // set -n to always say no if (yes === false) { // Error message lists missing package(s) when process is canceled /* eslint-disable-next-line max-len */ - throw new Error(`npx canceled due to missing packages and no YES option: ${JSON.stringify(missingPackages)}`) + throw new Error(`npx canceled due to missing packages and no YES option: ${JSON.stringify(addList)}`) } if (noTTY() || ciInfo.isCI) { - log.warn('exec', `The following package${ - add.length === 1 ? ' was' : 's were' - } not found and will be installed: ${ - add.map((pkg) => pkg.replace(/@$/, '')).join(', ') - }`) + /* eslint-disable-next-line max-len */ + log.warn('exec', `The following package${add.length === 1 ? ' was' : 's were'} not found and will be installed: ${addList.join(', ')}`) } else { - const addList = missingPackages.join('\n') + '\n' - const prompt = `Need to install the following packages:\n${ - addList - }Ok to proceed? ` - npmlog.clearProgress() - const confirm = await read({ prompt, default: 'y' }) + const confirm = await input.read(() => read({ + /* eslint-disable-next-line max-len */ + prompt: `Need to install the following packages:\n${addList.join('\n')}\nOk to proceed? `, + default: 'y', + })) if (confirm.trim().toLowerCase().charAt(0) !== 'y') { throw new Error('canceled') } diff --git a/deps/npm/node_modules/libnpmexec/lib/run-script.js b/deps/npm/node_modules/libnpmexec/lib/run-script.js index 89dcf2e653036e..1f621edcbc9aa2 100644 --- a/deps/npm/node_modules/libnpmexec/lib/run-script.js +++ b/deps/npm/node_modules/libnpmexec/lib/run-script.js @@ -1,8 +1,7 @@ const ciInfo = require('ci-info') const runScript = require('@npmcli/run-script') const readPackageJson = require('read-package-json-fast') -const npmlog = require('npmlog') -const log = require('proc-log') +const { log, output } = require('proc-log') const noTTY = require('./no-tty.js') const run = async ({ @@ -10,7 +9,6 @@ const run = async ({ call, flatOptions, locationMsg, - output = () => {}, path, binPaths, runPath, @@ -21,8 +19,7 @@ const run = async ({ // do the fakey runScript dance // still should work if no package.json in cwd - const realPkg = await readPackageJson(`${path}/package.json`) - .catch(() => ({})) + const realPkg = await readPackageJson(`${path}/package.json`).catch(() => ({})) const pkg = { ...realPkg, scripts: { @@ -31,41 +28,34 @@ const run = async ({ }, } - npmlog.disableProgress() - - try { - if (script === scriptShell) { - if (!noTTY()) { - if (ciInfo.isCI) { - return log.warn('exec', 'Interactive mode disabled in CI environment') - } + if (script === scriptShell) { + if (!noTTY()) { + if (ciInfo.isCI) { + return log.warn('exec', 'Interactive mode disabled in CI environment') + } - locationMsg = locationMsg || ` at location:\n${flatOptions.chalk.dim(runPath)}` + const { chalk } = flatOptions - output(`${ - flatOptions.chalk.reset('\nEntering npm script environment') - }${ - flatOptions.chalk.reset(locationMsg) - }${ - flatOptions.chalk.bold('\nType \'exit\' or ^D when finished\n') - }`) - } + output.standard(`${ + chalk.reset('\nEntering npm script environment') + }${ + chalk.reset(locationMsg || ` at location:\n${chalk.dim(runPath)}`) + }${ + chalk.bold('\nType \'exit\' or ^D when finished\n') + }`) } - return await runScript({ - ...flatOptions, - pkg, - banner: false, - // we always run in cwd, not --prefix - path: runPath, - binPaths, - event: 'npx', - args, - stdio: 'inherit', - scriptShell, - }) - } finally { - npmlog.enableProgress() } + return runScript({ + ...flatOptions, + pkg, + // we always run in cwd, not --prefix + path: runPath, + binPaths, + event: 'npx', + args, + stdio: 'inherit', + scriptShell, + }) } module.exports = run diff --git a/deps/npm/node_modules/libnpmexec/package.json b/deps/npm/node_modules/libnpmexec/package.json index 39f12270e35a7e..fcb30087cb5d22 100644 --- a/deps/npm/node_modules/libnpmexec/package.json +++ b/deps/npm/node_modules/libnpmexec/package.json @@ -1,6 +1,6 @@ { "name": "libnpmexec", - "version": "7.0.10", + "version": "8.1.0", "files": [ "bin/", "lib/" @@ -12,7 +12,7 @@ "description": "npm exec (npx) programmatic API", "repository": { "type": "git", - "url": "https://github.com/npm/cli.git", + "url": "git+https://github.com/npm/cli.git", "directory": "workspaces/libnpmexec" }, "keywords": [ @@ -60,12 +60,11 @@ }, "dependencies": { "@npmcli/arborist": "^7.2.1", - "@npmcli/run-script": "^7.0.2", + "@npmcli/run-script": "^8.1.0", "ci-info": "^4.0.0", - "npm-package-arg": "^11.0.1", - "npmlog": "^7.0.1", - "pacote": "^17.0.4", - "proc-log": "^3.0.0", + "npm-package-arg": "^11.0.2", + "pacote": "^18.0.1", + "proc-log": "^4.2.0", "read": "^3.0.1", "read-package-json-fast": "^3.0.2", "semver": "^7.3.7", diff --git a/deps/npm/node_modules/libnpmfund/lib/index.js b/deps/npm/node_modules/libnpmfund/lib/index.js index a53893dc1cf87b..39b69afc0abcc5 100644 --- a/deps/npm/node_modules/libnpmfund/lib/index.js +++ b/deps/npm/node_modules/libnpmfund/lib/index.js @@ -133,7 +133,7 @@ function readTree (tree, opts) { }) return directDepsWithFunding.reduce( - (res, { node, fundingItem }, i) => { + (res, { node, fundingItem }) => { if (!fundingItem || fundingItem.length === 0 || !node) { diff --git a/deps/npm/node_modules/libnpmfund/package.json b/deps/npm/node_modules/libnpmfund/package.json index 978252999e92eb..70a53646910a57 100644 --- a/deps/npm/node_modules/libnpmfund/package.json +++ b/deps/npm/node_modules/libnpmfund/package.json @@ -1,6 +1,6 @@ { "name": "libnpmfund", - "version": "5.0.7", + "version": "5.0.9", "main": "lib/index.js", "files": [ "bin/", @@ -9,7 +9,7 @@ "description": "Programmatic API for npm fund", "repository": { "type": "git", - "url": "https://github.com/npm/cli.git", + "url": "git+https://github.com/npm/cli.git", "directory": "workspaces/libnpmfund" }, "keywords": [ diff --git a/deps/npm/node_modules/libnpmhook/package.json b/deps/npm/node_modules/libnpmhook/package.json index 7613c1c86fbc1c..2cfa16df091be3 100644 --- a/deps/npm/node_modules/libnpmhook/package.json +++ b/deps/npm/node_modules/libnpmhook/package.json @@ -1,6 +1,6 @@ { "name": "libnpmhook", - "version": "10.0.2", + "version": "10.0.4", "description": "programmatic API for managing npm registry hooks", "main": "lib/index.js", "files": [ @@ -18,7 +18,7 @@ }, "repository": { "type": "git", - "url": "https://github.com/npm/cli.git", + "url": "git+https://github.com/npm/cli.git", "directory": "workspaces/libnpmhook" }, "keywords": [ @@ -31,7 +31,7 @@ "license": "ISC", "dependencies": { "aproba": "^2.0.0", - "npm-registry-fetch": "^16.2.0" + "npm-registry-fetch": "^17.0.0" }, "devDependencies": { "@npmcli/eslint-config": "^4.0.0", diff --git a/deps/npm/node_modules/libnpmorg/package.json b/deps/npm/node_modules/libnpmorg/package.json index 49671afd46371a..1a7486bfc681d2 100644 --- a/deps/npm/node_modules/libnpmorg/package.json +++ b/deps/npm/node_modules/libnpmorg/package.json @@ -1,6 +1,6 @@ { "name": "libnpmorg", - "version": "6.0.3", + "version": "6.0.5", "description": "Programmatic api for `npm org` commands", "author": "GitHub Inc.", "main": "lib/index.js", @@ -35,14 +35,14 @@ }, "repository": { "type": "git", - "url": "https://github.com/npm/cli.git", + "url": "git+https://github.com/npm/cli.git", "directory": "workspaces/libnpmorg" }, "bugs": "https://github.com/npm/libnpmorg/issues", "homepage": "https://npmjs.com/package/libnpmorg", "dependencies": { "aproba": "^2.0.0", - "npm-registry-fetch": "^16.2.0" + "npm-registry-fetch": "^17.0.0" }, "engines": { "node": "^16.14.0 || >=18.0.0" diff --git a/deps/npm/node_modules/libnpmpack/lib/index.js b/deps/npm/node_modules/libnpmpack/lib/index.js index 70d67d360f0d9b..c71716cf544285 100644 --- a/deps/npm/node_modules/libnpmpack/lib/index.js +++ b/deps/npm/node_modules/libnpmpack/lib/index.js @@ -4,9 +4,8 @@ const pacote = require('pacote') const npa = require('npm-package-arg') const runScript = require('@npmcli/run-script') const path = require('path') -const util = require('util') const Arborist = require('@npmcli/arborist') -const writeFile = util.promisify(require('fs').writeFile) +const { writeFile } = require('fs/promises') module.exports = pack async function pack (spec = 'file:.', opts = {}) { @@ -15,10 +14,6 @@ async function pack (spec = 'file:.', opts = {}) { const manifest = await pacote.manifest(spec, opts) - // Default to true if no log options passed, set to false if we're in silent - // mode - const banner = !opts.silent - const stdio = opts.foregroundScripts ? 'inherit' : 'pipe' if (spec.type === 'directory' && !opts.ignoreScripts) { @@ -29,7 +24,6 @@ async function pack (spec = 'file:.', opts = {}) { path: spec.fetchSpec, stdio, pkg: manifest, - banner, }) } @@ -56,7 +50,6 @@ async function pack (spec = 'file:.', opts = {}) { path: spec.fetchSpec, stdio, pkg: manifest, - banner, env: { npm_package_from: tarball.from, npm_package_resolved: tarball.resolved, diff --git a/deps/npm/node_modules/libnpmpack/package.json b/deps/npm/node_modules/libnpmpack/package.json index 1782ab7143186a..9e40f294cc54f6 100644 --- a/deps/npm/node_modules/libnpmpack/package.json +++ b/deps/npm/node_modules/libnpmpack/package.json @@ -1,6 +1,6 @@ { "name": "libnpmpack", - "version": "6.0.9", + "version": "7.0.1", "description": "Programmatic API for the bits behind npm pack", "author": "GitHub Inc.", "main": "lib/index.js", @@ -30,16 +30,16 @@ }, "repository": { "type": "git", - "url": "https://github.com/npm/cli.git", + "url": "git+https://github.com/npm/cli.git", "directory": "workspaces/libnpmpack" }, "bugs": "https://github.com/npm/libnpmpack/issues", "homepage": "https://npmjs.com/package/libnpmpack", "dependencies": { "@npmcli/arborist": "^7.2.1", - "@npmcli/run-script": "^7.0.2", - "npm-package-arg": "^11.0.1", - "pacote": "^17.0.4" + "@npmcli/run-script": "^8.1.0", + "npm-package-arg": "^11.0.2", + "pacote": "^18.0.1" }, "engines": { "node": "^16.14.0 || >=18.0.0" diff --git a/deps/npm/node_modules/libnpmpublish/lib/publish.js b/deps/npm/node_modules/libnpmpublish/lib/publish.js index b0ef782a166c66..9ffbc934263099 100644 --- a/deps/npm/node_modules/libnpmpublish/lib/publish.js +++ b/deps/npm/node_modules/libnpmpublish/lib/publish.js @@ -1,7 +1,7 @@ const { fixer } = require('normalize-package-data') const npmFetch = require('npm-registry-fetch') const npa = require('npm-package-arg') -const log = require('proc-log') +const { log } = require('proc-log') const semver = require('semver') const { URL } = require('url') const ssri = require('ssri') diff --git a/deps/npm/node_modules/libnpmpublish/package.json b/deps/npm/node_modules/libnpmpublish/package.json index 34f642794af40f..31faaa7b59f266 100644 --- a/deps/npm/node_modules/libnpmpublish/package.json +++ b/deps/npm/node_modules/libnpmpublish/package.json @@ -1,6 +1,6 @@ { "name": "libnpmpublish", - "version": "9.0.5", + "version": "9.0.7", "description": "Programmatic API for the bits behind npm publish and unpublish", "author": "GitHub Inc.", "main": "lib/index.js", @@ -32,7 +32,7 @@ }, "repository": { "type": "git", - "url": "https://github.com/npm/cli.git", + "url": "git+https://github.com/npm/cli.git", "directory": "workspaces/libnpmpublish" }, "bugs": "https://github.com/npm/cli/issues", @@ -40,9 +40,9 @@ "dependencies": { "ci-info": "^4.0.0", "normalize-package-data": "^6.0.0", - "npm-package-arg": "^11.0.1", - "npm-registry-fetch": "^16.2.0", - "proc-log": "^3.0.0", + "npm-package-arg": "^11.0.2", + "npm-registry-fetch": "^17.0.0", + "proc-log": "^4.2.0", "semver": "^7.3.7", "sigstore": "^2.2.0", "ssri": "^10.0.5" diff --git a/deps/npm/node_modules/libnpmsearch/package.json b/deps/npm/node_modules/libnpmsearch/package.json index c27673d2202c06..cb21747310eaec 100644 --- a/deps/npm/node_modules/libnpmsearch/package.json +++ b/deps/npm/node_modules/libnpmsearch/package.json @@ -1,6 +1,6 @@ { "name": "libnpmsearch", - "version": "7.0.2", + "version": "7.0.4", "description": "Programmatic API for searching in npm and compatible registries.", "author": "GitHub Inc.", "main": "lib/index.js", @@ -32,13 +32,13 @@ }, "repository": { "type": "git", - "url": "https://github.com/npm/cli.git", + "url": "git+https://github.com/npm/cli.git", "directory": "workspaces/libnpmsearch" }, "bugs": "https://github.com/npm/libnpmsearch/issues", "homepage": "https://npmjs.com/package/libnpmsearch", "dependencies": { - "npm-registry-fetch": "^16.2.0" + "npm-registry-fetch": "^17.0.0" }, "engines": { "node": "^16.14.0 || >=18.0.0" diff --git a/deps/npm/node_modules/libnpmteam/package.json b/deps/npm/node_modules/libnpmteam/package.json index 110304fa0a156d..94f264bd93bf7c 100644 --- a/deps/npm/node_modules/libnpmteam/package.json +++ b/deps/npm/node_modules/libnpmteam/package.json @@ -1,7 +1,7 @@ { "name": "libnpmteam", "description": "npm Team management APIs", - "version": "6.0.2", + "version": "6.0.4", "author": "GitHub Inc.", "license": "ISC", "main": "lib/index.js", @@ -22,7 +22,7 @@ }, "repository": { "type": "git", - "url": "https://github.com/npm/cli.git", + "url": "git+https://github.com/npm/cli.git", "directory": "workspaces/libnpmteam" }, "files": [ @@ -32,7 +32,7 @@ "homepage": "https://npmjs.com/package/libnpmteam", "dependencies": { "aproba": "^2.0.0", - "npm-registry-fetch": "^16.2.0" + "npm-registry-fetch": "^17.0.0" }, "engines": { "node": "^16.14.0 || >=18.0.0" diff --git a/deps/npm/node_modules/libnpmversion/README.md b/deps/npm/node_modules/libnpmversion/README.md index ac9ee50ae35d91..857c4d52dc1831 100644 --- a/deps/npm/node_modules/libnpmversion/README.md +++ b/deps/npm/node_modules/libnpmversion/README.md @@ -31,7 +31,6 @@ npmVersion(arg, { ignoreScripts: false, // do not run pre/post/version lifecycle scripts scriptShell: '/bin/bash', // shell to run lifecycle scripts in message: 'v%s', // message for tag and commit, replace %s with the version - silent: false, // passed to @npmcli/run-script to control whether it logs }).then(newVersion => { console.error('version updated!', newVersion) }) diff --git a/deps/npm/node_modules/libnpmversion/lib/enforce-clean.js b/deps/npm/node_modules/libnpmversion/lib/enforce-clean.js index 721f146221c155..25ebb5590e9171 100644 --- a/deps/npm/node_modules/libnpmversion/lib/enforce-clean.js +++ b/deps/npm/node_modules/libnpmversion/lib/enforce-clean.js @@ -1,5 +1,5 @@ const git = require('@npmcli/git') -const log = require('proc-log') +const { log } = require('proc-log') // returns true if it's cool to do git stuff // throws if it's unclean, and not forced. diff --git a/deps/npm/node_modules/libnpmversion/lib/index.js b/deps/npm/node_modules/libnpmversion/lib/index.js index 95acd11b5e4331..4d2fb45945a7b9 100644 --- a/deps/npm/node_modules/libnpmversion/lib/index.js +++ b/deps/npm/node_modules/libnpmversion/lib/index.js @@ -15,7 +15,6 @@ module.exports = async (newversion, opts = {}) => { scriptShell = undefined, preid = null, message = 'v%s', - silent, } = opts const pkg = opts.pkg || await readJson(path + '/package.json') @@ -35,6 +34,5 @@ module.exports = async (newversion, opts = {}) => { preid, pkg, message, - silent, }) } diff --git a/deps/npm/node_modules/libnpmversion/lib/read-json.js b/deps/npm/node_modules/libnpmversion/lib/read-json.js index 2dd0f7aa4902e8..32c7289507697f 100644 --- a/deps/npm/node_modules/libnpmversion/lib/read-json.js +++ b/deps/npm/node_modules/libnpmversion/lib/read-json.js @@ -1,7 +1,6 @@ // can't use read-package-json-fast, because we want to ensure // that we make as few changes as possible, even for safety issues. -const { promisify } = require('util') -const readFile = promisify(require('fs').readFile) +const { readFile } = require('fs/promises') const parse = require('json-parse-even-better-errors') module.exports = async path => parse(await readFile(path)) diff --git a/deps/npm/node_modules/libnpmversion/lib/version.js b/deps/npm/node_modules/libnpmversion/lib/version.js index f14b95e3233f06..bfcd8a521496d5 100644 --- a/deps/npm/node_modules/libnpmversion/lib/version.js +++ b/deps/npm/node_modules/libnpmversion/lib/version.js @@ -8,7 +8,7 @@ const readJson = require('./read-json.js') const git = require('@npmcli/git') const commit = require('./commit.js') const tag = require('./tag.js') -const log = require('proc-log') +const { log } = require('proc-log') const runScript = require('@npmcli/run-script') @@ -20,7 +20,6 @@ module.exports = async (newversion, opts) => { ignoreScripts, preid, pkg, - silent, } = opts const { valid, clean, inc } = semver @@ -65,7 +64,6 @@ module.exports = async (newversion, opts) => { pkg, stdio: 'inherit', event: 'preversion', - banner: !silent, env: { npm_old_version: current, npm_new_version: newV, @@ -101,7 +99,6 @@ module.exports = async (newversion, opts) => { pkg, stdio: 'inherit', event: 'version', - banner: !silent, env: { npm_old_version: current, npm_new_version: newV, @@ -128,7 +125,6 @@ module.exports = async (newversion, opts) => { pkg, stdio: 'inherit', event: 'postversion', - banner: !silent, env: { npm_old_version: current, npm_new_version: newV, diff --git a/deps/npm/node_modules/libnpmversion/lib/write-json.js b/deps/npm/node_modules/libnpmversion/lib/write-json.js index f066d72c67e124..425be8e8e3efb2 100644 --- a/deps/npm/node_modules/libnpmversion/lib/write-json.js +++ b/deps/npm/node_modules/libnpmversion/lib/write-json.js @@ -1,6 +1,5 @@ // write the json back, preserving the line breaks and indent -const { promisify } = require('util') -const writeFile = promisify(require('fs').writeFile) +const { writeFile } = require('fs/promises') const kIndent = Symbol.for('indent') const kNewline = Symbol.for('newline') diff --git a/deps/npm/node_modules/libnpmversion/package.json b/deps/npm/node_modules/libnpmversion/package.json index 782eeca7d2b795..43b0d2ff825d7b 100644 --- a/deps/npm/node_modules/libnpmversion/package.json +++ b/deps/npm/node_modules/libnpmversion/package.json @@ -1,6 +1,6 @@ { "name": "libnpmversion", - "version": "5.0.2", + "version": "6.0.1", "main": "lib/index.js", "files": [ "bin/", @@ -9,7 +9,7 @@ "description": "library to do the things that 'npm version' does", "repository": { "type": "git", - "url": "https://github.com/npm/cli.git", + "url": "git+https://github.com/npm/cli.git", "directory": "workspaces/libnpmversion" }, "author": "GitHub Inc.", @@ -37,10 +37,10 @@ "tap": "^16.3.8" }, "dependencies": { - "@npmcli/git": "^5.0.3", - "@npmcli/run-script": "^7.0.2", + "@npmcli/git": "^5.0.6", + "@npmcli/run-script": "^8.1.0", "json-parse-even-better-errors": "^3.0.0", - "proc-log": "^3.0.0", + "proc-log": "^4.2.0", "semver": "^7.3.7" }, "engines": { diff --git a/deps/npm/node_modules/lru-cache/dist/commonjs/index.min.js b/deps/npm/node_modules/lru-cache/dist/commonjs/index.min.js new file mode 100644 index 00000000000000..b7149f5415a45f --- /dev/null +++ b/deps/npm/node_modules/lru-cache/dist/commonjs/index.min.js @@ -0,0 +1,2 @@ +"use strict";var G=(o,t,e)=>{if(!t.has(o))throw TypeError("Cannot "+e)};var j=(o,t,e)=>(G(o,t,"read from private field"),e?e.call(o):t.get(o)),I=(o,t,e)=>{if(t.has(o))throw TypeError("Cannot add the same private member more than once");t instanceof WeakSet?t.add(o):t.set(o,e)},D=(o,t,e,i)=>(G(o,t,"write to private field"),i?i.call(o,e):t.set(o,e),e);Object.defineProperty(exports,"__esModule",{value:!0});exports.LRUCache=void 0;var T=typeof performance=="object"&&performance&&typeof performance.now=="function"?performance:Date,N=new Set,L=typeof process=="object"&&process?process:{},P=(o,t,e,i)=>{typeof L.emitWarning=="function"?L.emitWarning(o,t,e,i):console.error(`[${e}] ${t}: ${o}`)},W=globalThis.AbortController,M=globalThis.AbortSignal;if(typeof W>"u"){M=class{onabort;_onabort=[];reason;aborted=!1;addEventListener(i,s){this._onabort.push(s)}},W=class{constructor(){t()}signal=new M;abort(i){if(!this.signal.aborted){this.signal.reason=i,this.signal.aborted=!0;for(let s of this.signal._onabort)s(i);this.signal.onabort?.(i)}}};let o=L.env?.LRU_CACHE_IGNORE_AC_WARNING!=="1",t=()=>{o&&(o=!1,P("AbortController is not defined. If using lru-cache in node 14, load an AbortController polyfill from the `node-abort-controller` package. A minimal polyfill is provided for use by LRUCache.fetch(), but it should not be relied upon in other contexts (eg, passing it to other APIs that use AbortController/AbortSignal might have undesirable effects). You may disable this with LRU_CACHE_IGNORE_AC_WARNING=1 in the env.","NO_ABORT_CONTROLLER","ENOTSUP",t))}}var V=o=>!N.has(o),Y=Symbol("type"),A=o=>o&&o===Math.floor(o)&&o>0&&isFinite(o),H=o=>A(o)?o<=Math.pow(2,8)?Uint8Array:o<=Math.pow(2,16)?Uint16Array:o<=Math.pow(2,32)?Uint32Array:o<=Number.MAX_SAFE_INTEGER?E:null:null,E=class extends Array{constructor(t){super(t),this.fill(0)}},v,z=class{heap;length;static create(t){let e=H(t);if(!e)return[];D(z,v,!0);let i=new z(t,e);return D(z,v,!1),i}constructor(t,e){if(!j(z,v))throw new TypeError("instantiate Stack using Stack.create(n)");this.heap=new e(t),this.length=0}push(t){this.heap[this.length++]=t}pop(){return this.heap[--this.length]}},R=z;v=new WeakMap,I(R,v,!1);var C=class{#g;#f;#p;#w;#C;ttl;ttlResolution;ttlAutopurge;updateAgeOnGet;updateAgeOnHas;allowStale;noDisposeOnSet;noUpdateTTL;maxEntrySize;sizeCalculation;noDeleteOnFetchRejection;noDeleteOnStaleGet;allowStaleOnFetchAbort;allowStaleOnFetchRejection;ignoreFetchAbort;#n;#S;#s;#i;#t;#l;#c;#o;#h;#_;#r;#b;#y;#u;#m;#T;#a;static unsafeExposeInternals(t){return{starts:t.#y,ttls:t.#u,sizes:t.#b,keyMap:t.#s,keyList:t.#i,valList:t.#t,next:t.#l,prev:t.#c,get head(){return t.#o},get tail(){return t.#h},free:t.#_,isBackgroundFetch:e=>t.#e(e),backgroundFetch:(e,i,s,n)=>t.#D(e,i,s,n),moveToTail:e=>t.#v(e),indexes:e=>t.#A(e),rindexes:e=>t.#F(e),isStale:e=>t.#d(e)}}get max(){return this.#g}get maxSize(){return this.#f}get calculatedSize(){return this.#S}get size(){return this.#n}get fetchMethod(){return this.#C}get dispose(){return this.#p}get disposeAfter(){return this.#w}constructor(t){let{max:e=0,ttl:i,ttlResolution:s=1,ttlAutopurge:n,updateAgeOnGet:h,updateAgeOnHas:l,allowStale:r,dispose:g,disposeAfter:b,noDisposeOnSet:f,noUpdateTTL:u,maxSize:c=0,maxEntrySize:F=0,sizeCalculation:d,fetchMethod:S,noDeleteOnFetchRejection:a,noDeleteOnStaleGet:w,allowStaleOnFetchRejection:y,allowStaleOnFetchAbort:p,ignoreFetchAbort:_}=t;if(e!==0&&!A(e))throw new TypeError("max option must be a nonnegative integer");let O=e?H(e):Array;if(!O)throw new Error("invalid max value: "+e);if(this.#g=e,this.#f=c,this.maxEntrySize=F||this.#f,this.sizeCalculation=d,this.sizeCalculation){if(!this.#f&&!this.maxEntrySize)throw new TypeError("cannot set sizeCalculation without setting maxSize or maxEntrySize");if(typeof this.sizeCalculation!="function")throw new TypeError("sizeCalculation set to non-function")}if(S!==void 0&&typeof S!="function")throw new TypeError("fetchMethod must be a function if specified");if(this.#C=S,this.#T=!!S,this.#s=new Map,this.#i=new Array(e).fill(void 0),this.#t=new Array(e).fill(void 0),this.#l=new O(e),this.#c=new O(e),this.#o=0,this.#h=0,this.#_=R.create(e),this.#n=0,this.#S=0,typeof g=="function"&&(this.#p=g),typeof b=="function"?(this.#w=b,this.#r=[]):(this.#w=void 0,this.#r=void 0),this.#m=!!this.#p,this.#a=!!this.#w,this.noDisposeOnSet=!!f,this.noUpdateTTL=!!u,this.noDeleteOnFetchRejection=!!a,this.allowStaleOnFetchRejection=!!y,this.allowStaleOnFetchAbort=!!p,this.ignoreFetchAbort=!!_,this.maxEntrySize!==0){if(this.#f!==0&&!A(this.#f))throw new TypeError("maxSize must be a positive integer if specified");if(!A(this.maxEntrySize))throw new TypeError("maxEntrySize must be a positive integer if specified");this.#I()}if(this.allowStale=!!r,this.noDeleteOnStaleGet=!!w,this.updateAgeOnGet=!!h,this.updateAgeOnHas=!!l,this.ttlResolution=A(s)||s===0?s:1,this.ttlAutopurge=!!n,this.ttl=i||0,this.ttl){if(!A(this.ttl))throw new TypeError("ttl must be a positive integer if specified");this.#L()}if(this.#g===0&&this.ttl===0&&this.#f===0)throw new TypeError("At least one of max, maxSize, or ttl is required");if(!this.ttlAutopurge&&!this.#g&&!this.#f){let m="LRU_CACHE_UNBOUNDED";V(m)&&(N.add(m),P("TTL caching without ttlAutopurge, max, or maxSize can result in unbounded memory consumption.","UnboundedCacheWarning",m,C))}}getRemainingTTL(t){return this.#s.has(t)?1/0:0}#L(){let t=new E(this.#g),e=new E(this.#g);this.#u=t,this.#y=e,this.#U=(n,h,l=T.now())=>{if(e[n]=h!==0?l:0,t[n]=h,h!==0&&this.ttlAutopurge){let r=setTimeout(()=>{this.#d(n)&&this.delete(this.#i[n])},h+1);r.unref&&r.unref()}},this.#z=n=>{e[n]=t[n]!==0?T.now():0},this.#O=(n,h)=>{if(t[h]){let l=t[h],r=e[h];if(!l||!r)return;n.ttl=l,n.start=r,n.now=i||s();let g=n.now-r;n.remainingTTL=l-g}};let i=0,s=()=>{let n=T.now();if(this.ttlResolution>0){i=n;let h=setTimeout(()=>i=0,this.ttlResolution);h.unref&&h.unref()}return n};this.getRemainingTTL=n=>{let h=this.#s.get(n);if(h===void 0)return 0;let l=t[h],r=e[h];if(!l||!r)return 1/0;let g=(i||s())-r;return l-g},this.#d=n=>{let h=e[n],l=t[n];return!!l&&!!h&&(i||s())-h>l}}#z=()=>{};#O=()=>{};#U=()=>{};#d=()=>!1;#I(){let t=new E(this.#g);this.#S=0,this.#b=t,this.#E=e=>{this.#S-=t[e],t[e]=0},this.#x=(e,i,s,n)=>{if(this.#e(i))return 0;if(!A(s))if(n){if(typeof n!="function")throw new TypeError("sizeCalculation must be a function");if(s=n(i,e),!A(s))throw new TypeError("sizeCalculation return invalid (expect positive integer)")}else throw new TypeError("invalid size value (must be positive integer). When maxSize or maxEntrySize is used, sizeCalculation or size must be set.");return s},this.#R=(e,i,s)=>{if(t[e]=i,this.#f){let n=this.#f-t[e];for(;this.#S>n;)this.#W(!0)}this.#S+=t[e],s&&(s.entrySize=i,s.totalCalculatedSize=this.#S)}}#E=t=>{};#R=(t,e,i)=>{};#x=(t,e,i,s)=>{if(i||s)throw new TypeError("cannot set size without setting maxSize or maxEntrySize on cache");return 0};*#A({allowStale:t=this.allowStale}={}){if(this.#n)for(let e=this.#h;!(!this.#G(e)||((t||!this.#d(e))&&(yield e),e===this.#o));)e=this.#c[e]}*#F({allowStale:t=this.allowStale}={}){if(this.#n)for(let e=this.#o;!(!this.#G(e)||((t||!this.#d(e))&&(yield e),e===this.#h));)e=this.#l[e]}#G(t){return t!==void 0&&this.#s.get(this.#i[t])===t}*entries(){for(let t of this.#A())this.#t[t]!==void 0&&this.#i[t]!==void 0&&!this.#e(this.#t[t])&&(yield[this.#i[t],this.#t[t]])}*rentries(){for(let t of this.#F())this.#t[t]!==void 0&&this.#i[t]!==void 0&&!this.#e(this.#t[t])&&(yield[this.#i[t],this.#t[t]])}*keys(){for(let t of this.#A()){let e=this.#i[t];e!==void 0&&!this.#e(this.#t[t])&&(yield e)}}*rkeys(){for(let t of this.#F()){let e=this.#i[t];e!==void 0&&!this.#e(this.#t[t])&&(yield e)}}*values(){for(let t of this.#A())this.#t[t]!==void 0&&!this.#e(this.#t[t])&&(yield this.#t[t])}*rvalues(){for(let t of this.#F())this.#t[t]!==void 0&&!this.#e(this.#t[t])&&(yield this.#t[t])}[Symbol.iterator](){return this.entries()}[Symbol.toStringTag]="LRUCache";find(t,e={}){for(let i of this.#A()){let s=this.#t[i],n=this.#e(s)?s.__staleWhileFetching:s;if(n!==void 0&&t(n,this.#i[i],this))return this.get(this.#i[i],e)}}forEach(t,e=this){for(let i of this.#A()){let s=this.#t[i],n=this.#e(s)?s.__staleWhileFetching:s;n!==void 0&&t.call(e,n,this.#i[i],this)}}rforEach(t,e=this){for(let i of this.#F()){let s=this.#t[i],n=this.#e(s)?s.__staleWhileFetching:s;n!==void 0&&t.call(e,n,this.#i[i],this)}}purgeStale(){let t=!1;for(let e of this.#F({allowStale:!0}))this.#d(e)&&(this.delete(this.#i[e]),t=!0);return t}info(t){let e=this.#s.get(t);if(e===void 0)return;let i=this.#t[e],s=this.#e(i)?i.__staleWhileFetching:i;if(s===void 0)return;let n={value:s};if(this.#u&&this.#y){let h=this.#u[e],l=this.#y[e];if(h&&l){let r=h-(T.now()-l);n.ttl=r,n.start=Date.now()}}return this.#b&&(n.size=this.#b[e]),n}dump(){let t=[];for(let e of this.#A({allowStale:!0})){let i=this.#i[e],s=this.#t[e],n=this.#e(s)?s.__staleWhileFetching:s;if(n===void 0||i===void 0)continue;let h={value:n};if(this.#u&&this.#y){h.ttl=this.#u[e];let l=T.now()-this.#y[e];h.start=Math.floor(Date.now()-l)}this.#b&&(h.size=this.#b[e]),t.unshift([i,h])}return t}load(t){this.clear();for(let[e,i]of t){if(i.start){let s=Date.now()-i.start;i.start=T.now()-s}this.set(e,i.value,i)}}set(t,e,i={}){if(e===void 0)return this.delete(t),this;let{ttl:s=this.ttl,start:n,noDisposeOnSet:h=this.noDisposeOnSet,sizeCalculation:l=this.sizeCalculation,status:r}=i,{noUpdateTTL:g=this.noUpdateTTL}=i,b=this.#x(t,e,i.size||0,l);if(this.maxEntrySize&&b>this.maxEntrySize)return r&&(r.set="miss",r.maxEntrySizeExceeded=!0),this.delete(t),this;let f=this.#n===0?void 0:this.#s.get(t);if(f===void 0)f=this.#n===0?this.#h:this.#_.length!==0?this.#_.pop():this.#n===this.#g?this.#W(!1):this.#n,this.#i[f]=t,this.#t[f]=e,this.#s.set(t,f),this.#l[this.#h]=f,this.#c[f]=this.#h,this.#h=f,this.#n++,this.#R(f,b,r),r&&(r.set="add"),g=!1;else{this.#v(f);let u=this.#t[f];if(e!==u){if(this.#T&&this.#e(u)){u.__abortController.abort(new Error("replaced"));let{__staleWhileFetching:c}=u;c!==void 0&&!h&&(this.#m&&this.#p?.(c,t,"set"),this.#a&&this.#r?.push([c,t,"set"]))}else h||(this.#m&&this.#p?.(u,t,"set"),this.#a&&this.#r?.push([u,t,"set"]));if(this.#E(f),this.#R(f,b,r),this.#t[f]=e,r){r.set="replace";let c=u&&this.#e(u)?u.__staleWhileFetching:u;c!==void 0&&(r.oldValue=c)}}else r&&(r.set="update")}if(s!==0&&!this.#u&&this.#L(),this.#u&&(g||this.#U(f,s,n),r&&this.#O(r,f)),!h&&this.#a&&this.#r){let u=this.#r,c;for(;c=u?.shift();)this.#w?.(...c)}return this}pop(){try{for(;this.#n;){let t=this.#t[this.#o];if(this.#W(!0),this.#e(t)){if(t.__staleWhileFetching)return t.__staleWhileFetching}else if(t!==void 0)return t}}finally{if(this.#a&&this.#r){let t=this.#r,e;for(;e=t?.shift();)this.#w?.(...e)}}}#W(t){let e=this.#o,i=this.#i[e],s=this.#t[e];return this.#T&&this.#e(s)?s.__abortController.abort(new Error("evicted")):(this.#m||this.#a)&&(this.#m&&this.#p?.(s,i,"evict"),this.#a&&this.#r?.push([s,i,"evict"])),this.#E(e),t&&(this.#i[e]=void 0,this.#t[e]=void 0,this.#_.push(e)),this.#n===1?(this.#o=this.#h=0,this.#_.length=0):this.#o=this.#l[e],this.#s.delete(i),this.#n--,e}has(t,e={}){let{updateAgeOnHas:i=this.updateAgeOnHas,status:s}=e,n=this.#s.get(t);if(n!==void 0){let h=this.#t[n];if(this.#e(h)&&h.__staleWhileFetching===void 0)return!1;if(this.#d(n))s&&(s.has="stale",this.#O(s,n));else return i&&this.#z(n),s&&(s.has="hit",this.#O(s,n)),!0}else s&&(s.has="miss");return!1}peek(t,e={}){let{allowStale:i=this.allowStale}=e,s=this.#s.get(t);if(s===void 0||!i&&this.#d(s))return;let n=this.#t[s];return this.#e(n)?n.__staleWhileFetching:n}#D(t,e,i,s){let n=e===void 0?void 0:this.#t[e];if(this.#e(n))return n;let h=new W,{signal:l}=i;l?.addEventListener("abort",()=>h.abort(l.reason),{signal:h.signal});let r={signal:h.signal,options:i,context:s},g=(d,S=!1)=>{let{aborted:a}=h.signal,w=i.ignoreFetchAbort&&d!==void 0;if(i.status&&(a&&!S?(i.status.fetchAborted=!0,i.status.fetchError=h.signal.reason,w&&(i.status.fetchAbortIgnored=!0)):i.status.fetchResolved=!0),a&&!w&&!S)return f(h.signal.reason);let y=c;return this.#t[e]===c&&(d===void 0?y.__staleWhileFetching?this.#t[e]=y.__staleWhileFetching:this.delete(t):(i.status&&(i.status.fetchUpdated=!0),this.set(t,d,r.options))),d},b=d=>(i.status&&(i.status.fetchRejected=!0,i.status.fetchError=d),f(d)),f=d=>{let{aborted:S}=h.signal,a=S&&i.allowStaleOnFetchAbort,w=a||i.allowStaleOnFetchRejection,y=w||i.noDeleteOnFetchRejection,p=c;if(this.#t[e]===c&&(!y||p.__staleWhileFetching===void 0?this.delete(t):a||(this.#t[e]=p.__staleWhileFetching)),w)return i.status&&p.__staleWhileFetching!==void 0&&(i.status.returnedStale=!0),p.__staleWhileFetching;if(p.__returned===p)throw d},u=(d,S)=>{let a=this.#C?.(t,n,r);a&&a instanceof Promise&&a.then(w=>d(w===void 0?void 0:w),S),h.signal.addEventListener("abort",()=>{(!i.ignoreFetchAbort||i.allowStaleOnFetchAbort)&&(d(void 0),i.allowStaleOnFetchAbort&&(d=w=>g(w,!0)))})};i.status&&(i.status.fetchDispatched=!0);let c=new Promise(u).then(g,b),F=Object.assign(c,{__abortController:h,__staleWhileFetching:n,__returned:void 0});return e===void 0?(this.set(t,F,{...r.options,status:void 0}),e=this.#s.get(t)):this.#t[e]=F,F}#e(t){if(!this.#T)return!1;let e=t;return!!e&&e instanceof Promise&&e.hasOwnProperty("__staleWhileFetching")&&e.__abortController instanceof W}async fetch(t,e={}){let{allowStale:i=this.allowStale,updateAgeOnGet:s=this.updateAgeOnGet,noDeleteOnStaleGet:n=this.noDeleteOnStaleGet,ttl:h=this.ttl,noDisposeOnSet:l=this.noDisposeOnSet,size:r=0,sizeCalculation:g=this.sizeCalculation,noUpdateTTL:b=this.noUpdateTTL,noDeleteOnFetchRejection:f=this.noDeleteOnFetchRejection,allowStaleOnFetchRejection:u=this.allowStaleOnFetchRejection,ignoreFetchAbort:c=this.ignoreFetchAbort,allowStaleOnFetchAbort:F=this.allowStaleOnFetchAbort,context:d,forceRefresh:S=!1,status:a,signal:w}=e;if(!this.#T)return a&&(a.fetch="get"),this.get(t,{allowStale:i,updateAgeOnGet:s,noDeleteOnStaleGet:n,status:a});let y={allowStale:i,updateAgeOnGet:s,noDeleteOnStaleGet:n,ttl:h,noDisposeOnSet:l,size:r,sizeCalculation:g,noUpdateTTL:b,noDeleteOnFetchRejection:f,allowStaleOnFetchRejection:u,allowStaleOnFetchAbort:F,ignoreFetchAbort:c,status:a,signal:w},p=this.#s.get(t);if(p===void 0){a&&(a.fetch="miss");let _=this.#D(t,p,y,d);return _.__returned=_}else{let _=this.#t[p];if(this.#e(_)){let x=i&&_.__staleWhileFetching!==void 0;return a&&(a.fetch="inflight",x&&(a.returnedStale=!0)),x?_.__staleWhileFetching:_.__returned=_}let O=this.#d(p);if(!S&&!O)return a&&(a.fetch="hit"),this.#v(p),s&&this.#z(p),a&&this.#O(a,p),_;let m=this.#D(t,p,y,d),U=m.__staleWhileFetching!==void 0&&i;return a&&(a.fetch=O?"stale":"refresh",U&&O&&(a.returnedStale=!0)),U?m.__staleWhileFetching:m.__returned=m}}get(t,e={}){let{allowStale:i=this.allowStale,updateAgeOnGet:s=this.updateAgeOnGet,noDeleteOnStaleGet:n=this.noDeleteOnStaleGet,status:h}=e,l=this.#s.get(t);if(l!==void 0){let r=this.#t[l],g=this.#e(r);return h&&this.#O(h,l),this.#d(l)?(h&&(h.get="stale"),g?(h&&i&&r.__staleWhileFetching!==void 0&&(h.returnedStale=!0),i?r.__staleWhileFetching:void 0):(n||this.delete(t),h&&i&&(h.returnedStale=!0),i?r:void 0)):(h&&(h.get="hit"),g?r.__staleWhileFetching:(this.#v(l),s&&this.#z(l),r))}else h&&(h.get="miss")}#j(t,e){this.#c[e]=t,this.#l[t]=e}#v(t){t!==this.#h&&(t===this.#o?this.#o=this.#l[t]:this.#j(this.#c[t],this.#l[t]),this.#j(this.#h,t),this.#h=t)}delete(t){let e=!1;if(this.#n!==0){let i=this.#s.get(t);if(i!==void 0)if(e=!0,this.#n===1)this.clear();else{this.#E(i);let s=this.#t[i];if(this.#e(s)?s.__abortController.abort(new Error("deleted")):(this.#m||this.#a)&&(this.#m&&this.#p?.(s,t,"delete"),this.#a&&this.#r?.push([s,t,"delete"])),this.#s.delete(t),this.#i[i]=void 0,this.#t[i]=void 0,i===this.#h)this.#h=this.#c[i];else if(i===this.#o)this.#o=this.#l[i];else{let n=this.#c[i];this.#l[n]=this.#l[i];let h=this.#l[i];this.#c[h]=this.#c[i]}this.#n--,this.#_.push(i)}}if(this.#a&&this.#r?.length){let i=this.#r,s;for(;s=i?.shift();)this.#w?.(...s)}return e}clear(){for(let t of this.#F({allowStale:!0})){let e=this.#t[t];if(this.#e(e))e.__abortController.abort(new Error("deleted"));else{let i=this.#i[t];this.#m&&this.#p?.(e,i,"delete"),this.#a&&this.#r?.push([e,i,"delete"])}}if(this.#s.clear(),this.#t.fill(void 0),this.#i.fill(void 0),this.#u&&this.#y&&(this.#u.fill(0),this.#y.fill(0)),this.#b&&this.#b.fill(0),this.#o=0,this.#h=0,this.#_.length=0,this.#S=0,this.#n=0,this.#a&&this.#r){let t=this.#r,e;for(;e=t?.shift();)this.#w?.(...e)}}};exports.LRUCache=C; +//# sourceMappingURL=index.min.js.map diff --git a/deps/npm/node_modules/lru-cache/dist/esm/index.min.js b/deps/npm/node_modules/lru-cache/dist/esm/index.min.js new file mode 100644 index 00000000000000..4285815f9abb17 --- /dev/null +++ b/deps/npm/node_modules/lru-cache/dist/esm/index.min.js @@ -0,0 +1,2 @@ +var G=(o,t,e)=>{if(!t.has(o))throw TypeError("Cannot "+e)};var I=(o,t,e)=>(G(o,t,"read from private field"),e?e.call(o):t.get(o)),j=(o,t,e)=>{if(t.has(o))throw TypeError("Cannot add the same private member more than once");t instanceof WeakSet?t.add(o):t.set(o,e)},D=(o,t,e,i)=>(G(o,t,"write to private field"),i?i.call(o,e):t.set(o,e),e);var O=typeof performance=="object"&&performance&&typeof performance.now=="function"?performance:Date,M=new Set,L=typeof process=="object"&&process?process:{},P=(o,t,e,i)=>{typeof L.emitWarning=="function"?L.emitWarning(o,t,e,i):console.error(`[${e}] ${t}: ${o}`)},W=globalThis.AbortController,N=globalThis.AbortSignal;if(typeof W>"u"){N=class{onabort;_onabort=[];reason;aborted=!1;addEventListener(i,s){this._onabort.push(s)}},W=class{constructor(){t()}signal=new N;abort(i){if(!this.signal.aborted){this.signal.reason=i,this.signal.aborted=!0;for(let s of this.signal._onabort)s(i);this.signal.onabort?.(i)}}};let o=L.env?.LRU_CACHE_IGNORE_AC_WARNING!=="1",t=()=>{o&&(o=!1,P("AbortController is not defined. If using lru-cache in node 14, load an AbortController polyfill from the `node-abort-controller` package. A minimal polyfill is provided for use by LRUCache.fetch(), but it should not be relied upon in other contexts (eg, passing it to other APIs that use AbortController/AbortSignal might have undesirable effects). You may disable this with LRU_CACHE_IGNORE_AC_WARNING=1 in the env.","NO_ABORT_CONTROLLER","ENOTSUP",t))}}var V=o=>!M.has(o),Y=Symbol("type"),A=o=>o&&o===Math.floor(o)&&o>0&&isFinite(o),H=o=>A(o)?o<=Math.pow(2,8)?Uint8Array:o<=Math.pow(2,16)?Uint16Array:o<=Math.pow(2,32)?Uint32Array:o<=Number.MAX_SAFE_INTEGER?E:null:null,E=class extends Array{constructor(t){super(t),this.fill(0)}},v,z=class{heap;length;static create(t){let e=H(t);if(!e)return[];D(z,v,!0);let i=new z(t,e);return D(z,v,!1),i}constructor(t,e){if(!I(z,v))throw new TypeError("instantiate Stack using Stack.create(n)");this.heap=new e(t),this.length=0}push(t){this.heap[this.length++]=t}pop(){return this.heap[--this.length]}},C=z;v=new WeakMap,j(C,v,!1);var R=class{#g;#f;#p;#w;#C;ttl;ttlResolution;ttlAutopurge;updateAgeOnGet;updateAgeOnHas;allowStale;noDisposeOnSet;noUpdateTTL;maxEntrySize;sizeCalculation;noDeleteOnFetchRejection;noDeleteOnStaleGet;allowStaleOnFetchAbort;allowStaleOnFetchRejection;ignoreFetchAbort;#n;#S;#s;#i;#t;#l;#c;#o;#h;#_;#r;#b;#y;#u;#m;#O;#a;static unsafeExposeInternals(t){return{starts:t.#y,ttls:t.#u,sizes:t.#b,keyMap:t.#s,keyList:t.#i,valList:t.#t,next:t.#l,prev:t.#c,get head(){return t.#o},get tail(){return t.#h},free:t.#_,isBackgroundFetch:e=>t.#e(e),backgroundFetch:(e,i,s,n)=>t.#D(e,i,s,n),moveToTail:e=>t.#v(e),indexes:e=>t.#A(e),rindexes:e=>t.#F(e),isStale:e=>t.#d(e)}}get max(){return this.#g}get maxSize(){return this.#f}get calculatedSize(){return this.#S}get size(){return this.#n}get fetchMethod(){return this.#C}get dispose(){return this.#p}get disposeAfter(){return this.#w}constructor(t){let{max:e=0,ttl:i,ttlResolution:s=1,ttlAutopurge:n,updateAgeOnGet:h,updateAgeOnHas:l,allowStale:r,dispose:g,disposeAfter:b,noDisposeOnSet:f,noUpdateTTL:u,maxSize:c=0,maxEntrySize:F=0,sizeCalculation:d,fetchMethod:S,noDeleteOnFetchRejection:a,noDeleteOnStaleGet:w,allowStaleOnFetchRejection:y,allowStaleOnFetchAbort:p,ignoreFetchAbort:_}=t;if(e!==0&&!A(e))throw new TypeError("max option must be a nonnegative integer");let T=e?H(e):Array;if(!T)throw new Error("invalid max value: "+e);if(this.#g=e,this.#f=c,this.maxEntrySize=F||this.#f,this.sizeCalculation=d,this.sizeCalculation){if(!this.#f&&!this.maxEntrySize)throw new TypeError("cannot set sizeCalculation without setting maxSize or maxEntrySize");if(typeof this.sizeCalculation!="function")throw new TypeError("sizeCalculation set to non-function")}if(S!==void 0&&typeof S!="function")throw new TypeError("fetchMethod must be a function if specified");if(this.#C=S,this.#O=!!S,this.#s=new Map,this.#i=new Array(e).fill(void 0),this.#t=new Array(e).fill(void 0),this.#l=new T(e),this.#c=new T(e),this.#o=0,this.#h=0,this.#_=C.create(e),this.#n=0,this.#S=0,typeof g=="function"&&(this.#p=g),typeof b=="function"?(this.#w=b,this.#r=[]):(this.#w=void 0,this.#r=void 0),this.#m=!!this.#p,this.#a=!!this.#w,this.noDisposeOnSet=!!f,this.noUpdateTTL=!!u,this.noDeleteOnFetchRejection=!!a,this.allowStaleOnFetchRejection=!!y,this.allowStaleOnFetchAbort=!!p,this.ignoreFetchAbort=!!_,this.maxEntrySize!==0){if(this.#f!==0&&!A(this.#f))throw new TypeError("maxSize must be a positive integer if specified");if(!A(this.maxEntrySize))throw new TypeError("maxEntrySize must be a positive integer if specified");this.#j()}if(this.allowStale=!!r,this.noDeleteOnStaleGet=!!w,this.updateAgeOnGet=!!h,this.updateAgeOnHas=!!l,this.ttlResolution=A(s)||s===0?s:1,this.ttlAutopurge=!!n,this.ttl=i||0,this.ttl){if(!A(this.ttl))throw new TypeError("ttl must be a positive integer if specified");this.#L()}if(this.#g===0&&this.ttl===0&&this.#f===0)throw new TypeError("At least one of max, maxSize, or ttl is required");if(!this.ttlAutopurge&&!this.#g&&!this.#f){let m="LRU_CACHE_UNBOUNDED";V(m)&&(M.add(m),P("TTL caching without ttlAutopurge, max, or maxSize can result in unbounded memory consumption.","UnboundedCacheWarning",m,R))}}getRemainingTTL(t){return this.#s.has(t)?1/0:0}#L(){let t=new E(this.#g),e=new E(this.#g);this.#u=t,this.#y=e,this.#x=(n,h,l=O.now())=>{if(e[n]=h!==0?l:0,t[n]=h,h!==0&&this.ttlAutopurge){let r=setTimeout(()=>{this.#d(n)&&this.delete(this.#i[n])},h+1);r.unref&&r.unref()}},this.#z=n=>{e[n]=t[n]!==0?O.now():0},this.#T=(n,h)=>{if(t[h]){let l=t[h],r=e[h];if(!l||!r)return;n.ttl=l,n.start=r,n.now=i||s();let g=n.now-r;n.remainingTTL=l-g}};let i=0,s=()=>{let n=O.now();if(this.ttlResolution>0){i=n;let h=setTimeout(()=>i=0,this.ttlResolution);h.unref&&h.unref()}return n};this.getRemainingTTL=n=>{let h=this.#s.get(n);if(h===void 0)return 0;let l=t[h],r=e[h];if(!l||!r)return 1/0;let g=(i||s())-r;return l-g},this.#d=n=>{let h=e[n],l=t[n];return!!l&&!!h&&(i||s())-h>l}}#z=()=>{};#T=()=>{};#x=()=>{};#d=()=>!1;#j(){let t=new E(this.#g);this.#S=0,this.#b=t,this.#E=e=>{this.#S-=t[e],t[e]=0},this.#U=(e,i,s,n)=>{if(this.#e(i))return 0;if(!A(s))if(n){if(typeof n!="function")throw new TypeError("sizeCalculation must be a function");if(s=n(i,e),!A(s))throw new TypeError("sizeCalculation return invalid (expect positive integer)")}else throw new TypeError("invalid size value (must be positive integer). When maxSize or maxEntrySize is used, sizeCalculation or size must be set.");return s},this.#W=(e,i,s)=>{if(t[e]=i,this.#f){let n=this.#f-t[e];for(;this.#S>n;)this.#R(!0)}this.#S+=t[e],s&&(s.entrySize=i,s.totalCalculatedSize=this.#S)}}#E=t=>{};#W=(t,e,i)=>{};#U=(t,e,i,s)=>{if(i||s)throw new TypeError("cannot set size without setting maxSize or maxEntrySize on cache");return 0};*#A({allowStale:t=this.allowStale}={}){if(this.#n)for(let e=this.#h;!(!this.#G(e)||((t||!this.#d(e))&&(yield e),e===this.#o));)e=this.#c[e]}*#F({allowStale:t=this.allowStale}={}){if(this.#n)for(let e=this.#o;!(!this.#G(e)||((t||!this.#d(e))&&(yield e),e===this.#h));)e=this.#l[e]}#G(t){return t!==void 0&&this.#s.get(this.#i[t])===t}*entries(){for(let t of this.#A())this.#t[t]!==void 0&&this.#i[t]!==void 0&&!this.#e(this.#t[t])&&(yield[this.#i[t],this.#t[t]])}*rentries(){for(let t of this.#F())this.#t[t]!==void 0&&this.#i[t]!==void 0&&!this.#e(this.#t[t])&&(yield[this.#i[t],this.#t[t]])}*keys(){for(let t of this.#A()){let e=this.#i[t];e!==void 0&&!this.#e(this.#t[t])&&(yield e)}}*rkeys(){for(let t of this.#F()){let e=this.#i[t];e!==void 0&&!this.#e(this.#t[t])&&(yield e)}}*values(){for(let t of this.#A())this.#t[t]!==void 0&&!this.#e(this.#t[t])&&(yield this.#t[t])}*rvalues(){for(let t of this.#F())this.#t[t]!==void 0&&!this.#e(this.#t[t])&&(yield this.#t[t])}[Symbol.iterator](){return this.entries()}[Symbol.toStringTag]="LRUCache";find(t,e={}){for(let i of this.#A()){let s=this.#t[i],n=this.#e(s)?s.__staleWhileFetching:s;if(n!==void 0&&t(n,this.#i[i],this))return this.get(this.#i[i],e)}}forEach(t,e=this){for(let i of this.#A()){let s=this.#t[i],n=this.#e(s)?s.__staleWhileFetching:s;n!==void 0&&t.call(e,n,this.#i[i],this)}}rforEach(t,e=this){for(let i of this.#F()){let s=this.#t[i],n=this.#e(s)?s.__staleWhileFetching:s;n!==void 0&&t.call(e,n,this.#i[i],this)}}purgeStale(){let t=!1;for(let e of this.#F({allowStale:!0}))this.#d(e)&&(this.delete(this.#i[e]),t=!0);return t}info(t){let e=this.#s.get(t);if(e===void 0)return;let i=this.#t[e],s=this.#e(i)?i.__staleWhileFetching:i;if(s===void 0)return;let n={value:s};if(this.#u&&this.#y){let h=this.#u[e],l=this.#y[e];if(h&&l){let r=h-(O.now()-l);n.ttl=r,n.start=Date.now()}}return this.#b&&(n.size=this.#b[e]),n}dump(){let t=[];for(let e of this.#A({allowStale:!0})){let i=this.#i[e],s=this.#t[e],n=this.#e(s)?s.__staleWhileFetching:s;if(n===void 0||i===void 0)continue;let h={value:n};if(this.#u&&this.#y){h.ttl=this.#u[e];let l=O.now()-this.#y[e];h.start=Math.floor(Date.now()-l)}this.#b&&(h.size=this.#b[e]),t.unshift([i,h])}return t}load(t){this.clear();for(let[e,i]of t){if(i.start){let s=Date.now()-i.start;i.start=O.now()-s}this.set(e,i.value,i)}}set(t,e,i={}){if(e===void 0)return this.delete(t),this;let{ttl:s=this.ttl,start:n,noDisposeOnSet:h=this.noDisposeOnSet,sizeCalculation:l=this.sizeCalculation,status:r}=i,{noUpdateTTL:g=this.noUpdateTTL}=i,b=this.#U(t,e,i.size||0,l);if(this.maxEntrySize&&b>this.maxEntrySize)return r&&(r.set="miss",r.maxEntrySizeExceeded=!0),this.delete(t),this;let f=this.#n===0?void 0:this.#s.get(t);if(f===void 0)f=this.#n===0?this.#h:this.#_.length!==0?this.#_.pop():this.#n===this.#g?this.#R(!1):this.#n,this.#i[f]=t,this.#t[f]=e,this.#s.set(t,f),this.#l[this.#h]=f,this.#c[f]=this.#h,this.#h=f,this.#n++,this.#W(f,b,r),r&&(r.set="add"),g=!1;else{this.#v(f);let u=this.#t[f];if(e!==u){if(this.#O&&this.#e(u)){u.__abortController.abort(new Error("replaced"));let{__staleWhileFetching:c}=u;c!==void 0&&!h&&(this.#m&&this.#p?.(c,t,"set"),this.#a&&this.#r?.push([c,t,"set"]))}else h||(this.#m&&this.#p?.(u,t,"set"),this.#a&&this.#r?.push([u,t,"set"]));if(this.#E(f),this.#W(f,b,r),this.#t[f]=e,r){r.set="replace";let c=u&&this.#e(u)?u.__staleWhileFetching:u;c!==void 0&&(r.oldValue=c)}}else r&&(r.set="update")}if(s!==0&&!this.#u&&this.#L(),this.#u&&(g||this.#x(f,s,n),r&&this.#T(r,f)),!h&&this.#a&&this.#r){let u=this.#r,c;for(;c=u?.shift();)this.#w?.(...c)}return this}pop(){try{for(;this.#n;){let t=this.#t[this.#o];if(this.#R(!0),this.#e(t)){if(t.__staleWhileFetching)return t.__staleWhileFetching}else if(t!==void 0)return t}}finally{if(this.#a&&this.#r){let t=this.#r,e;for(;e=t?.shift();)this.#w?.(...e)}}}#R(t){let e=this.#o,i=this.#i[e],s=this.#t[e];return this.#O&&this.#e(s)?s.__abortController.abort(new Error("evicted")):(this.#m||this.#a)&&(this.#m&&this.#p?.(s,i,"evict"),this.#a&&this.#r?.push([s,i,"evict"])),this.#E(e),t&&(this.#i[e]=void 0,this.#t[e]=void 0,this.#_.push(e)),this.#n===1?(this.#o=this.#h=0,this.#_.length=0):this.#o=this.#l[e],this.#s.delete(i),this.#n--,e}has(t,e={}){let{updateAgeOnHas:i=this.updateAgeOnHas,status:s}=e,n=this.#s.get(t);if(n!==void 0){let h=this.#t[n];if(this.#e(h)&&h.__staleWhileFetching===void 0)return!1;if(this.#d(n))s&&(s.has="stale",this.#T(s,n));else return i&&this.#z(n),s&&(s.has="hit",this.#T(s,n)),!0}else s&&(s.has="miss");return!1}peek(t,e={}){let{allowStale:i=this.allowStale}=e,s=this.#s.get(t);if(s===void 0||!i&&this.#d(s))return;let n=this.#t[s];return this.#e(n)?n.__staleWhileFetching:n}#D(t,e,i,s){let n=e===void 0?void 0:this.#t[e];if(this.#e(n))return n;let h=new W,{signal:l}=i;l?.addEventListener("abort",()=>h.abort(l.reason),{signal:h.signal});let r={signal:h.signal,options:i,context:s},g=(d,S=!1)=>{let{aborted:a}=h.signal,w=i.ignoreFetchAbort&&d!==void 0;if(i.status&&(a&&!S?(i.status.fetchAborted=!0,i.status.fetchError=h.signal.reason,w&&(i.status.fetchAbortIgnored=!0)):i.status.fetchResolved=!0),a&&!w&&!S)return f(h.signal.reason);let y=c;return this.#t[e]===c&&(d===void 0?y.__staleWhileFetching?this.#t[e]=y.__staleWhileFetching:this.delete(t):(i.status&&(i.status.fetchUpdated=!0),this.set(t,d,r.options))),d},b=d=>(i.status&&(i.status.fetchRejected=!0,i.status.fetchError=d),f(d)),f=d=>{let{aborted:S}=h.signal,a=S&&i.allowStaleOnFetchAbort,w=a||i.allowStaleOnFetchRejection,y=w||i.noDeleteOnFetchRejection,p=c;if(this.#t[e]===c&&(!y||p.__staleWhileFetching===void 0?this.delete(t):a||(this.#t[e]=p.__staleWhileFetching)),w)return i.status&&p.__staleWhileFetching!==void 0&&(i.status.returnedStale=!0),p.__staleWhileFetching;if(p.__returned===p)throw d},u=(d,S)=>{let a=this.#C?.(t,n,r);a&&a instanceof Promise&&a.then(w=>d(w===void 0?void 0:w),S),h.signal.addEventListener("abort",()=>{(!i.ignoreFetchAbort||i.allowStaleOnFetchAbort)&&(d(void 0),i.allowStaleOnFetchAbort&&(d=w=>g(w,!0)))})};i.status&&(i.status.fetchDispatched=!0);let c=new Promise(u).then(g,b),F=Object.assign(c,{__abortController:h,__staleWhileFetching:n,__returned:void 0});return e===void 0?(this.set(t,F,{...r.options,status:void 0}),e=this.#s.get(t)):this.#t[e]=F,F}#e(t){if(!this.#O)return!1;let e=t;return!!e&&e instanceof Promise&&e.hasOwnProperty("__staleWhileFetching")&&e.__abortController instanceof W}async fetch(t,e={}){let{allowStale:i=this.allowStale,updateAgeOnGet:s=this.updateAgeOnGet,noDeleteOnStaleGet:n=this.noDeleteOnStaleGet,ttl:h=this.ttl,noDisposeOnSet:l=this.noDisposeOnSet,size:r=0,sizeCalculation:g=this.sizeCalculation,noUpdateTTL:b=this.noUpdateTTL,noDeleteOnFetchRejection:f=this.noDeleteOnFetchRejection,allowStaleOnFetchRejection:u=this.allowStaleOnFetchRejection,ignoreFetchAbort:c=this.ignoreFetchAbort,allowStaleOnFetchAbort:F=this.allowStaleOnFetchAbort,context:d,forceRefresh:S=!1,status:a,signal:w}=e;if(!this.#O)return a&&(a.fetch="get"),this.get(t,{allowStale:i,updateAgeOnGet:s,noDeleteOnStaleGet:n,status:a});let y={allowStale:i,updateAgeOnGet:s,noDeleteOnStaleGet:n,ttl:h,noDisposeOnSet:l,size:r,sizeCalculation:g,noUpdateTTL:b,noDeleteOnFetchRejection:f,allowStaleOnFetchRejection:u,allowStaleOnFetchAbort:F,ignoreFetchAbort:c,status:a,signal:w},p=this.#s.get(t);if(p===void 0){a&&(a.fetch="miss");let _=this.#D(t,p,y,d);return _.__returned=_}else{let _=this.#t[p];if(this.#e(_)){let U=i&&_.__staleWhileFetching!==void 0;return a&&(a.fetch="inflight",U&&(a.returnedStale=!0)),U?_.__staleWhileFetching:_.__returned=_}let T=this.#d(p);if(!S&&!T)return a&&(a.fetch="hit"),this.#v(p),s&&this.#z(p),a&&this.#T(a,p),_;let m=this.#D(t,p,y,d),x=m.__staleWhileFetching!==void 0&&i;return a&&(a.fetch=T?"stale":"refresh",x&&T&&(a.returnedStale=!0)),x?m.__staleWhileFetching:m.__returned=m}}get(t,e={}){let{allowStale:i=this.allowStale,updateAgeOnGet:s=this.updateAgeOnGet,noDeleteOnStaleGet:n=this.noDeleteOnStaleGet,status:h}=e,l=this.#s.get(t);if(l!==void 0){let r=this.#t[l],g=this.#e(r);return h&&this.#T(h,l),this.#d(l)?(h&&(h.get="stale"),g?(h&&i&&r.__staleWhileFetching!==void 0&&(h.returnedStale=!0),i?r.__staleWhileFetching:void 0):(n||this.delete(t),h&&i&&(h.returnedStale=!0),i?r:void 0)):(h&&(h.get="hit"),g?r.__staleWhileFetching:(this.#v(l),s&&this.#z(l),r))}else h&&(h.get="miss")}#I(t,e){this.#c[e]=t,this.#l[t]=e}#v(t){t!==this.#h&&(t===this.#o?this.#o=this.#l[t]:this.#I(this.#c[t],this.#l[t]),this.#I(this.#h,t),this.#h=t)}delete(t){let e=!1;if(this.#n!==0){let i=this.#s.get(t);if(i!==void 0)if(e=!0,this.#n===1)this.clear();else{this.#E(i);let s=this.#t[i];if(this.#e(s)?s.__abortController.abort(new Error("deleted")):(this.#m||this.#a)&&(this.#m&&this.#p?.(s,t,"delete"),this.#a&&this.#r?.push([s,t,"delete"])),this.#s.delete(t),this.#i[i]=void 0,this.#t[i]=void 0,i===this.#h)this.#h=this.#c[i];else if(i===this.#o)this.#o=this.#l[i];else{let n=this.#c[i];this.#l[n]=this.#l[i];let h=this.#l[i];this.#c[h]=this.#c[i]}this.#n--,this.#_.push(i)}}if(this.#a&&this.#r?.length){let i=this.#r,s;for(;s=i?.shift();)this.#w?.(...s)}return e}clear(){for(let t of this.#F({allowStale:!0})){let e=this.#t[t];if(this.#e(e))e.__abortController.abort(new Error("deleted"));else{let i=this.#i[t];this.#m&&this.#p?.(e,i,"delete"),this.#a&&this.#r?.push([e,i,"delete"])}}if(this.#s.clear(),this.#t.fill(void 0),this.#i.fill(void 0),this.#u&&this.#y&&(this.#u.fill(0),this.#y.fill(0)),this.#b&&this.#b.fill(0),this.#o=0,this.#h=0,this.#_.length=0,this.#S=0,this.#n=0,this.#a&&this.#r){let t=this.#r,e;for(;e=t?.shift();)this.#w?.(...e)}}};export{R as LRUCache}; +//# sourceMappingURL=index.min.js.map diff --git a/deps/npm/node_modules/lru-cache/package.json b/deps/npm/node_modules/lru-cache/package.json index 348e2118a7c25c..ef118623196112 100644 --- a/deps/npm/node_modules/lru-cache/package.json +++ b/deps/npm/node_modules/lru-cache/package.json @@ -1,7 +1,7 @@ { "name": "lru-cache", "description": "A cache object that deletes the least-recently-used items.", - "version": "10.2.0", + "version": "10.2.2", "author": "Isaac Z. Schlueter ", "keywords": [ "mru", @@ -11,8 +11,7 @@ "sideEffects": false, "scripts": { "build": "npm run prepare", - "prepare": "tshy", - "postprepare": "bash fixup.sh", + "prepare": "tshy && bash fixup.sh", "pretest": "npm run prepare", "presnap": "npm run prepare", "test": "tap", @@ -35,8 +34,8 @@ ".": "./src/index.ts", "./min": { "import": { - "types": "./dist/mjs/index.d.ts", - "default": "./dist/mjs/index.min.js" + "types": "./dist/esm/index.d.ts", + "default": "./dist/esm/index.min.js" }, "require": { "types": "./dist/commonjs/index.d.ts", @@ -105,8 +104,8 @@ }, "./min": { "import": { - "types": "./dist/mjs/index.d.ts", - "default": "./dist/mjs/index.min.js" + "types": "./dist/esm/index.d.ts", + "default": "./dist/esm/index.min.js" }, "require": { "types": "./dist/commonjs/index.d.ts", diff --git a/deps/npm/node_modules/make-fetch-happen/lib/cache/entry.js b/deps/npm/node_modules/make-fetch-happen/lib/cache/entry.js index 45141095074ecb..bfcfacbcc95e18 100644 --- a/deps/npm/node_modules/make-fetch-happen/lib/cache/entry.js +++ b/deps/npm/node_modules/make-fetch-happen/lib/cache/entry.js @@ -274,6 +274,8 @@ class CacheEntry { const cacheWritePromise = new Promise((resolve, reject) => { cacheWriteResolve = resolve cacheWriteReject = reject + }).catch((err) => { + body.emit('error', err) }) body = new CachingMinipassPipeline({ events: ['integrity', 'size'] }, new MinipassFlush({ diff --git a/deps/npm/node_modules/make-fetch-happen/lib/remote.js b/deps/npm/node_modules/make-fetch-happen/lib/remote.js index 2aef9f8f969b00..8554564074de6e 100644 --- a/deps/npm/node_modules/make-fetch-happen/lib/remote.js +++ b/deps/npm/node_modules/make-fetch-happen/lib/remote.js @@ -2,6 +2,7 @@ const { Minipass } = require('minipass') const fetch = require('minipass-fetch') const promiseRetry = require('promise-retry') const ssri = require('ssri') +const { log } = require('proc-log') const CachingMinipassPipeline = require('./pipeline.js') const { getAgent } = require('@npmcli/agent') @@ -89,6 +90,8 @@ const remoteFetch = (request, options) => { options.onRetry(res) } + /* eslint-disable-next-line max-len */ + log.http('fetch', `${req.method} ${req.url} attempt ${attemptNum} failed with ${res.status}`) return retryHandler(res) } @@ -112,6 +115,7 @@ const remoteFetch = (request, options) => { options.onRetry(err) } + log.http('fetch', `${req.method} ${req.url} attempt ${attemptNum} failed with ${err.code}`) return retryHandler(err) } }, options.retry).catch((err) => { diff --git a/deps/npm/node_modules/make-fetch-happen/package.json b/deps/npm/node_modules/make-fetch-happen/package.json index a874ace6d1d472..7adb4d1e7f9719 100644 --- a/deps/npm/node_modules/make-fetch-happen/package.json +++ b/deps/npm/node_modules/make-fetch-happen/package.json @@ -1,6 +1,6 @@ { "name": "make-fetch-happen", - "version": "13.0.0", + "version": "13.0.1", "description": "Opinionated, caching, retrying fetch client", "main": "lib/index.js", "files": [ @@ -11,7 +11,7 @@ "test": "tap", "posttest": "npm run lint", "eslint": "eslint", - "lint": "eslint \"**/*.js\"", + "lint": "eslint \"**/*.{js,cjs,ts,mjs,jsx,tsx}\"", "lintfix": "npm run lint -- --fix", "postlint": "template-oss-check", "snap": "tap", @@ -42,12 +42,13 @@ "minipass-flush": "^1.0.5", "minipass-pipeline": "^1.2.4", "negotiator": "^0.6.3", + "proc-log": "^4.2.0", "promise-retry": "^2.0.1", "ssri": "^10.0.0" }, "devDependencies": { "@npmcli/eslint-config": "^4.0.0", - "@npmcli/template-oss": "4.18.0", + "@npmcli/template-oss": "4.21.4", "nock": "^13.2.4", "safe-buffer": "^5.2.1", "standard-version": "^9.3.2", @@ -68,13 +69,7 @@ }, "templateOSS": { "//@npmcli/template-oss": "This file is partially managed by @npmcli/template-oss. Edits may be overwritten.", - "ciVersions": [ - "16.14.0", - "16.x", - "18.0.0", - "18.x" - ], - "version": "4.18.0", + "version": "4.21.4", "publish": "true" } } diff --git a/deps/npm/node_modules/@npmcli/disparity-colors/LICENSE b/deps/npm/node_modules/node-gyp/node_modules/proc-log/LICENSE similarity index 96% rename from deps/npm/node_modules/@npmcli/disparity-colors/LICENSE rename to deps/npm/node_modules/node-gyp/node_modules/proc-log/LICENSE index dedcd7d2f9daec..83837797202b70 100644 --- a/deps/npm/node_modules/@npmcli/disparity-colors/LICENSE +++ b/deps/npm/node_modules/node-gyp/node_modules/proc-log/LICENSE @@ -1,6 +1,6 @@ The ISC License -Copyright (c) npm Inc. +Copyright (c) GitHub, Inc. Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted, provided that the above diff --git a/deps/npm/node_modules/node-gyp/node_modules/proc-log/lib/index.js b/deps/npm/node_modules/node-gyp/node_modules/proc-log/lib/index.js new file mode 100644 index 00000000000000..7c5dfad3b7ba3f --- /dev/null +++ b/deps/npm/node_modules/node-gyp/node_modules/proc-log/lib/index.js @@ -0,0 +1,23 @@ +// emits 'log' events on the process +const LEVELS = [ + 'notice', + 'error', + 'warn', + 'info', + 'verbose', + 'http', + 'silly', + 'pause', + 'resume', +] + +const log = level => (...args) => process.emit('log', level, ...args) + +const logger = {} +for (const level of LEVELS) { + logger[level] = log(level) +} + +logger.LEVELS = LEVELS + +module.exports = logger diff --git a/deps/npm/node_modules/npmlog/package.json b/deps/npm/node_modules/node-gyp/node_modules/proc-log/package.json similarity index 59% rename from deps/npm/node_modules/npmlog/package.json rename to deps/npm/node_modules/node-gyp/node_modules/proc-log/package.json index dbcc772d37ab7a..d335fa965ace51 100644 --- a/deps/npm/node_modules/npmlog/package.json +++ b/deps/npm/node_modules/node-gyp/node_modules/proc-log/package.json @@ -1,52 +1,44 @@ { - "author": "GitHub Inc.", - "name": "npmlog", - "description": "logger for npm", - "version": "7.0.1", - "repository": { - "type": "git", - "url": "https://github.com/npm/npmlog.git" - }, - "main": "lib/log.js", + "name": "proc-log", + "version": "3.0.0", "files": [ "bin/", "lib/" ], + "main": "lib/index.js", + "description": "just emit 'log' events on the process object", + "repository": { + "type": "git", + "url": "https://github.com/npm/proc-log.git" + }, + "author": "GitHub Inc.", + "license": "ISC", "scripts": { "test": "tap", - "npmclilint": "npmcli-lint", - "lint": "eslint \"**/*.js\"", - "lintfix": "npm run lint -- --fix", + "snap": "tap", "posttest": "npm run lint", - "postsnap": "npm run lintfix --", + "postsnap": "eslint index.js test/*.js --fix", + "lint": "eslint \"**/*.js\"", "postlint": "template-oss-check", - "snap": "tap", + "lintfix": "npm run lint -- --fix", "template-oss-apply": "template-oss-apply --force" }, - "dependencies": { - "are-we-there-yet": "^4.0.0", - "console-control-strings": "^1.1.0", - "gauge": "^5.0.0", - "set-blocking": "^2.0.0" - }, "devDependencies": { - "@npmcli/eslint-config": "^4.0.0", - "@npmcli/template-oss": "4.6.1", + "@npmcli/eslint-config": "^3.0.1", + "@npmcli/template-oss": "4.5.1", "tap": "^16.0.1" }, - "license": "ISC", "engines": { "node": "^14.17.0 || ^16.13.0 || >=18.0.0" }, + "templateOSS": { + "//@npmcli/template-oss": "This file is partially managed by @npmcli/template-oss. Edits may be overwritten.", + "version": "4.5.1" + }, "tap": { - "branches": 95, "nyc-arg": [ "--exclude", "tap-snapshots/**" ] - }, - "templateOSS": { - "//@npmcli/template-oss": "This file is partially managed by @npmcli/template-oss. Edits may be overwritten.", - "version": "4.6.1" } } diff --git a/deps/npm/node_modules/npm-package-arg/lib/npa.js b/deps/npm/node_modules/npm-package-arg/lib/npa.js index 23bf68d2e04a39..6a3f07da929d87 100644 --- a/deps/npm/node_modules/npm-package-arg/lib/npa.js +++ b/deps/npm/node_modules/npm-package-arg/lib/npa.js @@ -10,7 +10,7 @@ const semver = require('semver') const path = global.FAKE_WINDOWS ? require('path').win32 : require('path') const validatePackageName = require('validate-npm-package-name') const { homedir } = require('os') -const log = require('proc-log') +const { log } = require('proc-log') const isWindows = process.platform === 'win32' || global.FAKE_WINDOWS const hasSlashes = isWindows ? /\\|[/]/ : /[/]/ diff --git a/deps/npm/node_modules/npm-package-arg/package.json b/deps/npm/node_modules/npm-package-arg/package.json index f7965d5a007c9d..c4cee1f928e89d 100644 --- a/deps/npm/node_modules/npm-package-arg/package.json +++ b/deps/npm/node_modules/npm-package-arg/package.json @@ -1,6 +1,6 @@ { "name": "npm-package-arg", - "version": "11.0.1", + "version": "11.0.2", "description": "Parse the things that can be arguments to `npm install`", "main": "./lib/npa.js", "directories": { @@ -12,20 +12,20 @@ ], "dependencies": { "hosted-git-info": "^7.0.0", - "proc-log": "^3.0.0", + "proc-log": "^4.0.0", "semver": "^7.3.5", "validate-npm-package-name": "^5.0.0" }, "devDependencies": { "@npmcli/eslint-config": "^4.0.0", - "@npmcli/template-oss": "4.18.0", + "@npmcli/template-oss": "4.21.3", "tap": "^16.0.1" }, "scripts": { "test": "tap", "snap": "tap", "npmclilint": "npmcli-lint", - "lint": "eslint \"**/*.js\"", + "lint": "eslint \"**/*.{js,cjs,ts,mjs,jsx,tsx}\"", "lintfix": "npm run lint -- --fix", "posttest": "npm run lint", "postsnap": "npm run lintfix --", @@ -54,14 +54,7 @@ }, "templateOSS": { "//@npmcli/template-oss": "This file is partially managed by @npmcli/template-oss. Edits may be overwritten.", - "version": "4.18.0", - "publish": true, - "ciVersions": [ - "16.14.0", - "16.x", - "18.0.0", - "18.x" - ], - "npmSpec": "next-9" + "version": "4.21.3", + "publish": true } } diff --git a/deps/npm/node_modules/npm-profile/lib/index.js b/deps/npm/node_modules/npm-profile/lib/index.js index ce78882a55438d..e5b5dd046baf2e 100644 --- a/deps/npm/node_modules/npm-profile/lib/index.js +++ b/deps/npm/node_modules/npm-profile/lib/index.js @@ -5,7 +5,7 @@ const { HttpErrorBase } = require('npm-registry-fetch/lib/errors') const EventEmitter = require('events') const os = require('os') const { URL } = require('url') -const log = require('proc-log') +const { log } = require('proc-log') // try loginWeb, catch the "not supported" message and fall back to couch const login = (opener, prompter, opts = {}) => { @@ -276,8 +276,7 @@ class WebLoginNotSupported extends HttpErrorBase { } } -const sleep = (ms) => - new Promise((resolve, reject) => setTimeout(resolve, ms)) +const sleep = (ms) => new Promise((resolve) => setTimeout(resolve, ms)) module.exports = { adduserCouch, diff --git a/deps/npm/node_modules/npm-profile/package.json b/deps/npm/node_modules/npm-profile/package.json index af57e9e73509c3..acdf4d6baf2ee3 100644 --- a/deps/npm/node_modules/npm-profile/package.json +++ b/deps/npm/node_modules/npm-profile/package.json @@ -1,13 +1,13 @@ { "name": "npm-profile", - "version": "9.0.0", + "version": "9.0.2", "description": "Library for updating an npmjs.com profile", "keywords": [], "author": "GitHub Inc.", "license": "ISC", "dependencies": { - "npm-registry-fetch": "^16.0.0", - "proc-log": "^3.0.0" + "npm-registry-fetch": "^17.0.0", + "proc-log": "^4.0.0" }, "main": "./lib/index.js", "repository": { @@ -20,7 +20,7 @@ ], "devDependencies": { "@npmcli/eslint-config": "^4.0.0", - "@npmcli/template-oss": "4.18.0", + "@npmcli/template-oss": "4.21.4", "nock": "^13.2.4", "tap": "^16.0.1" }, @@ -28,7 +28,7 @@ "posttest": "npm run lint", "test": "tap", "snap": "tap", - "lint": "eslint \"**/*.js\"", + "lint": "eslint \"**/*.{js,cjs,ts,mjs,jsx,tsx}\"", "postlint": "template-oss-check", "lintfix": "npm run lint -- --fix", "template-oss-apply": "template-oss-apply --force" @@ -45,13 +45,7 @@ }, "templateOSS": { "//@npmcli/template-oss": "This file is partially managed by @npmcli/template-oss. Edits may be overwritten.", - "version": "4.18.0", - "ciVersions": [ - "16.14.0", - "16.x", - "18.0.0", - "18.x" - ], + "version": "4.21.4", "publish": true } } diff --git a/deps/npm/node_modules/npm-registry-fetch/lib/check-response.js b/deps/npm/node_modules/npm-registry-fetch/lib/check-response.js index 183311d8403977..65eea2963b0b4c 100644 --- a/deps/npm/node_modules/npm-registry-fetch/lib/check-response.js +++ b/deps/npm/node_modules/npm-registry-fetch/lib/check-response.js @@ -3,7 +3,7 @@ const errors = require('./errors.js') const { Response } = require('minipass-fetch') const defaultOpts = require('./default-opts.js') -const log = require('proc-log') +const { log } = require('proc-log') const { redact: cleanUrl } = require('@npmcli/redact') /* eslint-disable-next-line max-len */ diff --git a/deps/npm/node_modules/npm-registry-fetch/lib/index.js b/deps/npm/node_modules/npm-registry-fetch/lib/index.js index 1d77a77024bf50..bce6e6b1aae0ac 100644 --- a/deps/npm/node_modules/npm-registry-fetch/lib/index.js +++ b/deps/npm/node_modules/npm-registry-fetch/lib/index.js @@ -10,7 +10,6 @@ const qs = require('querystring') const url = require('url') const zlib = require('minizlib') const { Minipass } = require('minipass') -const { redact: cleanUrl } = require('@npmcli/redact') const defaultOpts = require('./default-opts.js') @@ -246,7 +245,3 @@ function getHeaders (uri, auth, opts) { return headers } - -// export cleanUrl to avoid a breaking change -// TODO: next semver major remove this. Consumers should use @npmcli/redact instead -module.exports.cleanUrl = cleanUrl diff --git a/deps/npm/node_modules/npm-registry-fetch/package.json b/deps/npm/node_modules/npm-registry-fetch/package.json index 88455a4971af0b..52820a6a206ecb 100644 --- a/deps/npm/node_modules/npm-registry-fetch/package.json +++ b/deps/npm/node_modules/npm-registry-fetch/package.json @@ -1,6 +1,6 @@ { "name": "npm-registry-fetch", - "version": "16.2.0", + "version": "17.0.0", "description": "Fetch-based http client for use with npm registry APIs", "main": "lib", "files": [ @@ -31,18 +31,18 @@ "author": "GitHub Inc.", "license": "ISC", "dependencies": { - "@npmcli/redact": "^1.1.0", + "@npmcli/redact": "^2.0.0", "make-fetch-happen": "^13.0.0", "minipass": "^7.0.2", "minipass-fetch": "^3.0.0", "minipass-json-stream": "^1.0.1", "minizlib": "^2.1.2", "npm-package-arg": "^11.0.0", - "proc-log": "^3.0.0" + "proc-log": "^4.0.0" }, "devDependencies": { "@npmcli/eslint-config": "^4.0.0", - "@npmcli/template-oss": "4.21.3", + "@npmcli/template-oss": "4.21.4", "cacache": "^18.0.0", "nock": "^13.2.4", "require-inject": "^1.4.4", @@ -62,7 +62,7 @@ }, "templateOSS": { "//@npmcli/template-oss": "This file is partially managed by @npmcli/template-oss. Edits may be overwritten.", - "version": "4.21.3", + "version": "4.21.4", "publish": "true" } } diff --git a/deps/npm/node_modules/npmlog/LICENSE.md b/deps/npm/node_modules/npmlog/LICENSE.md deleted file mode 100644 index 5fc208ff122e08..00000000000000 --- a/deps/npm/node_modules/npmlog/LICENSE.md +++ /dev/null @@ -1,20 +0,0 @@ - - -ISC License - -Copyright npm, Inc. - -Permission to use, copy, modify, and/or distribute this -software for any purpose with or without fee is hereby -granted, provided that the above copyright notice and this -permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND NPM DISCLAIMS ALL -WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO -EVENT SHALL NPM BE LIABLE FOR ANY SPECIAL, DIRECT, -INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, -WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER -TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE -USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/deps/npm/node_modules/npmlog/lib/log.js b/deps/npm/node_modules/npmlog/lib/log.js deleted file mode 100644 index 38106ea34ae08d..00000000000000 --- a/deps/npm/node_modules/npmlog/lib/log.js +++ /dev/null @@ -1,400 +0,0 @@ -'use strict' -var Progress = require('are-we-there-yet') -var Gauge = require('gauge') -var EE = require('events').EventEmitter -var log = exports = module.exports = new EE() -var util = require('util') - -var setBlocking = require('set-blocking') -var consoleControl = require('console-control-strings') - -setBlocking(true) -var stream = process.stderr -Object.defineProperty(log, 'stream', { - set: function (newStream) { - stream = newStream - if (this.gauge) { - this.gauge.setWriteTo(stream, stream) - } - }, - get: function () { - return stream - }, -}) - -// by default, decide based on tty-ness. -var colorEnabled -log.useColor = function () { - return colorEnabled != null ? colorEnabled : stream.isTTY -} - -log.enableColor = function () { - colorEnabled = true - this.gauge.setTheme({ hasColor: colorEnabled, hasUnicode: unicodeEnabled }) -} -log.disableColor = function () { - colorEnabled = false - this.gauge.setTheme({ hasColor: colorEnabled, hasUnicode: unicodeEnabled }) -} - -// default level -log.level = 'info' - -log.gauge = new Gauge(stream, { - enabled: false, // no progress bars unless asked - theme: { hasColor: log.useColor() }, - template: [ - { type: 'progressbar', length: 20 }, - { type: 'activityIndicator', kerning: 1, length: 1 }, - { type: 'section', default: '' }, - ':', - { type: 'logline', kerning: 1, default: '' }, - ], -}) - -log.tracker = new Progress.TrackerGroup() - -// we track this separately as we may need to temporarily disable the -// display of the status bar for our own loggy purposes. -log.progressEnabled = log.gauge.isEnabled() - -var unicodeEnabled - -log.enableUnicode = function () { - unicodeEnabled = true - this.gauge.setTheme({ hasColor: this.useColor(), hasUnicode: unicodeEnabled }) -} - -log.disableUnicode = function () { - unicodeEnabled = false - this.gauge.setTheme({ hasColor: this.useColor(), hasUnicode: unicodeEnabled }) -} - -log.setGaugeThemeset = function (themes) { - this.gauge.setThemeset(themes) -} - -log.setGaugeTemplate = function (template) { - this.gauge.setTemplate(template) -} - -log.enableProgress = function () { - if (this.progressEnabled || this._paused) { - return - } - - this.progressEnabled = true - this.tracker.on('change', this.showProgress) - this.gauge.enable() -} - -log.disableProgress = function () { - if (!this.progressEnabled) { - return - } - this.progressEnabled = false - this.tracker.removeListener('change', this.showProgress) - this.gauge.disable() -} - -var trackerConstructors = ['newGroup', 'newItem', 'newStream'] - -var mixinLog = function (tracker) { - // mixin the public methods from log into the tracker - // (except: conflicts and one's we handle specially) - Object.keys(log).forEach(function (P) { - if (P[0] === '_') { - return - } - - if (trackerConstructors.filter(function (C) { - return C === P - }).length) { - return - } - - if (tracker[P]) { - return - } - - if (typeof log[P] !== 'function') { - return - } - - var func = log[P] - tracker[P] = function () { - return func.apply(log, arguments) - } - }) - // if the new tracker is a group, make sure any subtrackers get - // mixed in too - if (tracker instanceof Progress.TrackerGroup) { - trackerConstructors.forEach(function (C) { - var func = tracker[C] - tracker[C] = function () { - return mixinLog(func.apply(tracker, arguments)) - } - }) - } - return tracker -} - -// Add tracker constructors to the top level log object -trackerConstructors.forEach(function (C) { - log[C] = function () { - return mixinLog(this.tracker[C].apply(this.tracker, arguments)) - } -}) - -log.clearProgress = function (cb) { - if (!this.progressEnabled) { - return cb && process.nextTick(cb) - } - - this.gauge.hide(cb) -} - -log.showProgress = function (name, completed) { - if (!this.progressEnabled) { - return - } - - var values = {} - if (name) { - values.section = name - } - - var last = log.record[log.record.length - 1] - if (last) { - values.subsection = last.prefix - var disp = log.disp[last.level] || last.level - var logline = this._format(disp, log.style[last.level]) - if (last.prefix) { - logline += ' ' + this._format(last.prefix, this.prefixStyle) - } - - logline += ' ' + last.message.split(/\r?\n/)[0] - values.logline = logline - } - values.completed = completed || this.tracker.completed() - this.gauge.show(values) -}.bind(log) // bind for use in tracker's on-change listener - -// temporarily stop emitting, but don't drop -log.pause = function () { - this._paused = true - if (this.progressEnabled) { - this.gauge.disable() - } -} - -log.resume = function () { - if (!this._paused) { - return - } - - this._paused = false - - var b = this._buffer - this._buffer = [] - b.forEach(function (m) { - this.emitLog(m) - }, this) - if (this.progressEnabled) { - this.gauge.enable() - } -} - -log._buffer = [] - -var id = 0 -log.record = [] -log.maxRecordSize = 10000 -log.log = function (lvl, prefix, message) { - var l = this.levels[lvl] - if (l === undefined) { - return this.emit('error', new Error(util.format( - 'Undefined log level: %j', lvl))) - } - - var a = new Array(arguments.length - 2) - var stack = null - for (var i = 2; i < arguments.length; i++) { - var arg = a[i - 2] = arguments[i] - - // resolve stack traces to a plain string. - if (typeof arg === 'object' && arg instanceof Error && arg.stack) { - Object.defineProperty(arg, 'stack', { - value: stack = arg.stack + '', - enumerable: true, - writable: true, - }) - } - } - if (stack) { - a.unshift(stack + '\n') - } - message = util.format.apply(util, a) - - var m = { - id: id++, - level: lvl, - prefix: String(prefix || ''), - message: message, - messageRaw: a, - } - - this.emit('log', m) - this.emit('log.' + lvl, m) - if (m.prefix) { - this.emit(m.prefix, m) - } - - this.record.push(m) - var mrs = this.maxRecordSize - var n = this.record.length - mrs - if (n > mrs / 10) { - var newSize = Math.floor(mrs * 0.9) - this.record = this.record.slice(-1 * newSize) - } - - this.emitLog(m) -}.bind(log) - -log.emitLog = function (m) { - if (this._paused) { - this._buffer.push(m) - return - } - if (this.progressEnabled) { - this.gauge.pulse(m.prefix) - } - - var l = this.levels[m.level] - if (l === undefined) { - return - } - - if (l < this.levels[this.level]) { - return - } - - if (l > 0 && !isFinite(l)) { - return - } - - // If 'disp' is null or undefined, use the lvl as a default - // Allows: '', 0 as valid disp - var disp = log.disp[m.level] != null ? log.disp[m.level] : m.level - this.clearProgress() - m.message.split(/\r?\n/).forEach(function (line) { - var heading = this.heading - if (heading) { - this.write(heading, this.headingStyle) - this.write(' ') - } - this.write(disp, log.style[m.level]) - var p = m.prefix || '' - if (p) { - this.write(' ') - } - - this.write(p, this.prefixStyle) - this.write(' ' + line + '\n') - }, this) - this.showProgress() -} - -log._format = function (msg, style) { - if (!stream) { - return - } - - var output = '' - if (this.useColor()) { - style = style || {} - var settings = [] - if (style.fg) { - settings.push(style.fg) - } - - if (style.bg) { - settings.push('bg' + style.bg[0].toUpperCase() + style.bg.slice(1)) - } - - if (style.bold) { - settings.push('bold') - } - - if (style.underline) { - settings.push('underline') - } - - if (style.inverse) { - settings.push('inverse') - } - - if (settings.length) { - output += consoleControl.color(settings) - } - - if (style.beep) { - output += consoleControl.beep() - } - } - output += msg - if (this.useColor()) { - output += consoleControl.color('reset') - } - - return output -} - -log.write = function (msg, style) { - if (!stream) { - return - } - - stream.write(this._format(msg, style)) -} - -log.addLevel = function (lvl, n, style, disp) { - // If 'disp' is null or undefined, use the lvl as a default - if (disp == null) { - disp = lvl - } - - this.levels[lvl] = n - this.style[lvl] = style - if (!this[lvl]) { - this[lvl] = function () { - var a = new Array(arguments.length + 1) - a[0] = lvl - for (var i = 0; i < arguments.length; i++) { - a[i + 1] = arguments[i] - } - - return this.log.apply(this, a) - }.bind(this) - } - this.disp[lvl] = disp -} - -log.prefixStyle = { fg: 'magenta' } -log.headingStyle = { fg: 'white', bg: 'black' } - -log.style = {} -log.levels = {} -log.disp = {} -log.addLevel('silly', -Infinity, { inverse: true }, 'sill') -log.addLevel('verbose', 1000, { fg: 'cyan', bg: 'black' }, 'verb') -log.addLevel('info', 2000, { fg: 'green' }) -log.addLevel('timing', 2500, { fg: 'green', bg: 'black' }) -log.addLevel('http', 3000, { fg: 'green', bg: 'black' }) -log.addLevel('notice', 3500, { fg: 'cyan', bg: 'black' }) -log.addLevel('warn', 4000, { fg: 'black', bg: 'yellow' }, 'WARN') -log.addLevel('error', 5000, { fg: 'red', bg: 'black' }, 'ERR!') -log.addLevel('silent', Infinity) - -// allow 'error' prefix -log.on('error', function () {}) diff --git a/deps/npm/node_modules/pacote/README.md b/deps/npm/node_modules/pacote/README.md index 17c027dfa048f3..dbb0051de23a4d 100644 --- a/deps/npm/node_modules/pacote/README.md +++ b/deps/npm/node_modules/pacote/README.md @@ -166,8 +166,6 @@ resolved, and other properties, as they are determined. calls. This allows you to easily avoid hitting the registry multiple times (even just to validate the cache) for a given packument, since it is unlikely to change in the span of a single command. -* `silent` A boolean that determines whether the banner is displayed - when calling `@npmcli/run-script`. * `verifySignatures` A boolean that will make pacote verify the integrity signature of a manifest, if present. There must be a configured `_keys` entry in the config that is scoped to the diff --git a/deps/npm/node_modules/pacote/lib/dir.js b/deps/npm/node_modules/pacote/lib/dir.js index 420afc5802cb2f..135be8e6cba833 100644 --- a/deps/npm/node_modules/pacote/lib/dir.js +++ b/deps/npm/node_modules/pacote/lib/dir.js @@ -41,16 +41,11 @@ class DirFetcher extends Fetcher { // but this function is *also* run when installing git deps const stdio = this.opts.foregroundScripts ? 'inherit' : 'pipe' - // hide the banner if silent opt is passed in, or if prepare running - // in the background. - const banner = this.opts.silent ? false : stdio === 'inherit' - return runScript({ pkg: mani, event: 'prepare', path: this.resolved, stdio, - banner, env: { npm_package_resolved: this.resolved, npm_package_integrity: this.integrity, @@ -92,7 +87,7 @@ class DirFetcher extends Fetcher { return Promise.resolve(this.package) } - return this[_readPackageJson](this.resolved + '/package.json') + return this[_readPackageJson](this.resolved) .then(mani => this.package = { ...mani, _integrity: this.integrity && String(this.integrity), diff --git a/deps/npm/node_modules/pacote/lib/fetcher.js b/deps/npm/node_modules/pacote/lib/fetcher.js index f961a45c7d3461..c4a707e7ef81ef 100644 --- a/deps/npm/node_modules/pacote/lib/fetcher.js +++ b/deps/npm/node_modules/pacote/lib/fetcher.js @@ -5,10 +5,9 @@ const npa = require('npm-package-arg') const ssri = require('ssri') -const { promisify } = require('util') const { basename, dirname } = require('path') const tar = require('tar') -const log = require('proc-log') +const { log } = require('proc-log') const retry = require('promise-retry') const fs = require('fs/promises') const fsm = require('fs-minipass') @@ -16,12 +15,14 @@ const cacache = require('cacache') const isPackageBin = require('./util/is-package-bin.js') const removeTrailingSlashes = require('./util/trailing-slashes.js') const getContents = require('@npmcli/installed-package-contents') -const readPackageJsonFast = require('read-package-json-fast') -const readPackageJson = promisify(require('read-package-json')) +const PackageJson = require('@npmcli/package-json') const { Minipass } = require('minipass') - const cacheDir = require('./util/cache-dir.js') +// Pacote is only concerned with the package.json contents +const packageJsonPrepare = (p) => PackageJson.prepare(p).then(pkg => pkg.content) +const packageJsonNormalize = (p) => PackageJson.normalize(p).then(pkg => pkg.content) + // Private methods. // Child classes should not have to override these. // Users should never call them. @@ -93,9 +94,9 @@ class FetcherBase { this.fullMetadata = this.before ? true : !!opts.fullMetadata this.fullReadJson = !!opts.fullReadJson if (this.fullReadJson) { - this[_readPackageJson] = readPackageJson + this[_readPackageJson] = packageJsonPrepare } else { - this[_readPackageJson] = readPackageJsonFast + this[_readPackageJson] = packageJsonNormalize } // rrh is a registry hostname or 'never' or 'always' diff --git a/deps/npm/node_modules/pacote/lib/file.js b/deps/npm/node_modules/pacote/lib/file.js index bf99bb86e359ed..95769de1374c97 100644 --- a/deps/npm/node_modules/pacote/lib/file.js +++ b/deps/npm/node_modules/pacote/lib/file.js @@ -1,10 +1,11 @@ -const Fetcher = require('./fetcher.js') const fsm = require('fs-minipass') const cacache = require('cacache') -const _tarballFromResolved = Symbol.for('pacote.Fetcher._tarballFromResolved') -const _exeBins = Symbol('_exeBins') const { resolve } = require('path') -const fs = require('fs') +const { stat, chmod } = require('fs/promises') +const Fetcher = require('./fetcher.js') + +const _exeBins = Symbol('_exeBins') +const _tarballFromResolved = Symbol.for('pacote.Fetcher._tarballFromResolved') const _readPackageJson = Symbol.for('package.Fetcher._readPackageJson') class FileFetcher extends Fetcher { @@ -26,7 +27,7 @@ class FileFetcher extends Fetcher { // have to unpack the tarball for this. return cacache.tmp.withTmp(this.cache, this.opts, dir => this.extract(dir) - .then(() => this[_readPackageJson](dir + '/package.json')) + .then(() => this[_readPackageJson](dir)) .then(mani => this.package = { ...mani, _integrity: this.integrity && String(this.integrity), @@ -40,23 +41,23 @@ class FileFetcher extends Fetcher { return Promise.resolve() } - return Promise.all(Object.keys(pkg.bin).map(k => new Promise(res => { + return Promise.all(Object.keys(pkg.bin).map(async k => { const script = resolve(dest, pkg.bin[k]) // Best effort. Ignore errors here, the only result is that // a bin script is not executable. But if it's missing or // something, we just leave it for a later stage to trip over // when we can provide a more useful contextual error. - fs.stat(script, (er, st) => { - if (er) { - return res() - } + try { + const st = await stat(script) const mode = st.mode | 0o111 if (mode === st.mode) { - return res() + return } - fs.chmod(script, mode, res) - }) - }))) + await chmod(script, mode) + } catch { + // Ignore errors here + } + })) } extract (dest) { @@ -64,7 +65,7 @@ class FileFetcher extends Fetcher { // but if not, read the unpacked manifest and chmod properly. return super.extract(dest) .then(result => this.package ? result - : this[_readPackageJson](dest + '/package.json').then(pkg => + : this[_readPackageJson](dest).then(pkg => this[_exeBins](pkg, dest)).then(() => result)) } diff --git a/deps/npm/node_modules/pacote/lib/git.js b/deps/npm/node_modules/pacote/lib/git.js index 5d24f72497ec9e..2cac44ae528e6e 100644 --- a/deps/npm/node_modules/pacote/lib/git.js +++ b/deps/npm/node_modules/pacote/lib/git.js @@ -8,7 +8,7 @@ const pickManifest = require('npm-pick-manifest') const npa = require('npm-package-arg') const { Minipass } = require('minipass') const cacache = require('cacache') -const log = require('proc-log') +const { log } = require('proc-log') const npm = require('./util/npm.js') const _resolvedFromRepo = Symbol('_resolvedFromRepo') @@ -156,11 +156,11 @@ class GitFetcher extends Fetcher { [_resolvedFromClone] () { // do a full or shallow clone, then look at the HEAD // kind of wasteful, but no other option, really - return this[_clone](dir => this.resolved) + return this[_clone](() => this.resolved) } [_prepareDir] (dir) { - return this[_readPackageJson](dir + '/package.json').then(mani => { + return this[_readPackageJson](dir).then(mani => { // no need if we aren't going to do any preparation. const scripts = mani.scripts if (!mani.workspaces && (!scripts || !( @@ -312,7 +312,7 @@ class GitFetcher extends Fetcher { return this.spec.hosted && this.resolved ? FileFetcher.prototype.manifest.apply(this) : this[_clone](dir => - this[_readPackageJson](dir + '/package.json') + this[_readPackageJson](dir) .then(mani => this.package = { ...mani, _resolved: this.resolved, diff --git a/deps/npm/node_modules/pacote/lib/registry.js b/deps/npm/node_modules/pacote/lib/registry.js index de25a11af46672..b6a8d49b84f321 100644 --- a/deps/npm/node_modules/pacote/lib/registry.js +++ b/deps/npm/node_modules/pacote/lib/registry.js @@ -3,7 +3,7 @@ const RemoteFetcher = require('./remote.js') const _tarballFromResolved = Symbol.for('pacote.Fetcher._tarballFromResolved') const pacoteVersion = require('../package.json').version const removeTrailingSlashes = require('./util/trailing-slashes.js') -const rpj = require('read-package-json-fast') +const PackageJson = require('@npmcli/package-json') const pickManifest = require('npm-pick-manifest') const ssri = require('ssri') const crypto = require('crypto') @@ -127,12 +127,13 @@ class RegistryFetcher extends Fetcher { } const packument = await this.packument() - let mani = await pickManifest(packument, this.spec.fetchSpec, { + const steps = PackageJson.normalizeSteps.filter(s => s !== '_attributes') + const mani = await new PackageJson().fromContent(pickManifest(packument, this.spec.fetchSpec, { ...this.opts, defaultTag: this.defaultTag, before: this.before, - }) - mani = rpj.normalize(mani) + })).normalize({ steps }).then(p => p.content) + /* XXX add ETARGET and E403 revalidation of cached packuments here */ // add _time from packument if fetched with fullMetadata diff --git a/deps/npm/node_modules/pacote/package.json b/deps/npm/node_modules/pacote/package.json index 8fc0bb707f1e52..4c1751644cd57a 100644 --- a/deps/npm/node_modules/pacote/package.json +++ b/deps/npm/node_modules/pacote/package.json @@ -1,6 +1,6 @@ { "name": "pacote", - "version": "17.0.6", + "version": "18.0.3", "description": "JavaScript package downloader", "author": "GitHub Inc.", "bin": { @@ -27,7 +27,7 @@ "devDependencies": { "@npmcli/arborist": "^7.1.0", "@npmcli/eslint-config": "^4.0.0", - "@npmcli/template-oss": "4.21.3", + "@npmcli/template-oss": "4.21.4", "hosted-git-info": "^7.0.0", "mutate-fs": "^2.1.1", "nock": "^13.2.4", @@ -46,19 +46,18 @@ "dependencies": { "@npmcli/git": "^5.0.0", "@npmcli/installed-package-contents": "^2.0.1", + "@npmcli/package-json": "^5.1.0", "@npmcli/promise-spawn": "^7.0.0", - "@npmcli/run-script": "^7.0.0", + "@npmcli/run-script": "^8.0.0", "cacache": "^18.0.0", "fs-minipass": "^3.0.0", "minipass": "^7.0.2", "npm-package-arg": "^11.0.0", "npm-packlist": "^8.0.0", "npm-pick-manifest": "^9.0.0", - "npm-registry-fetch": "^16.0.0", - "proc-log": "^3.0.0", + "npm-registry-fetch": "^17.0.0", + "proc-log": "^4.0.0", "promise-retry": "^2.0.1", - "read-package-json": "^7.0.0", - "read-package-json-fast": "^3.0.0", "sigstore": "^2.2.0", "ssri": "^10.0.0", "tar": "^6.1.11" @@ -72,7 +71,7 @@ }, "templateOSS": { "//@npmcli/template-oss": "This file is partially managed by @npmcli/template-oss. Edits may be overwritten.", - "version": "4.21.3", + "version": "4.21.4", "windowsCI": false, "publish": "true" } diff --git a/deps/npm/node_modules/proc-log/lib/index.js b/deps/npm/node_modules/proc-log/lib/index.js index 7c5dfad3b7ba3f..86d90861078dab 100644 --- a/deps/npm/node_modules/proc-log/lib/index.js +++ b/deps/npm/node_modules/proc-log/lib/index.js @@ -1,23 +1,153 @@ -// emits 'log' events on the process -const LEVELS = [ - 'notice', - 'error', - 'warn', - 'info', - 'verbose', - 'http', - 'silly', - 'pause', - 'resume', -] - -const log = level => (...args) => process.emit('log', level, ...args) - -const logger = {} -for (const level of LEVELS) { - logger[level] = log(level) +const META = Symbol('proc-log.meta') +module.exports = { + META: META, + output: { + LEVELS: [ + 'standard', + 'error', + 'buffer', + 'flush', + ], + KEYS: { + standard: 'standard', + error: 'error', + buffer: 'buffer', + flush: 'flush', + }, + standard: function (...args) { + return process.emit('output', 'standard', ...args) + }, + error: function (...args) { + return process.emit('output', 'error', ...args) + }, + buffer: function (...args) { + return process.emit('output', 'buffer', ...args) + }, + flush: function (...args) { + return process.emit('output', 'flush', ...args) + }, + }, + log: { + LEVELS: [ + 'notice', + 'error', + 'warn', + 'info', + 'verbose', + 'http', + 'silly', + 'timing', + 'pause', + 'resume', + ], + KEYS: { + notice: 'notice', + error: 'error', + warn: 'warn', + info: 'info', + verbose: 'verbose', + http: 'http', + silly: 'silly', + timing: 'timing', + pause: 'pause', + resume: 'resume', + }, + error: function (...args) { + return process.emit('log', 'error', ...args) + }, + notice: function (...args) { + return process.emit('log', 'notice', ...args) + }, + warn: function (...args) { + return process.emit('log', 'warn', ...args) + }, + info: function (...args) { + return process.emit('log', 'info', ...args) + }, + verbose: function (...args) { + return process.emit('log', 'verbose', ...args) + }, + http: function (...args) { + return process.emit('log', 'http', ...args) + }, + silly: function (...args) { + return process.emit('log', 'silly', ...args) + }, + timing: function (...args) { + return process.emit('log', 'timing', ...args) + }, + pause: function () { + return process.emit('log', 'pause') + }, + resume: function () { + return process.emit('log', 'resume') + }, + }, + time: { + LEVELS: [ + 'start', + 'end', + ], + KEYS: { + start: 'start', + end: 'end', + }, + start: function (name, fn) { + process.emit('time', 'start', name) + function end () { + return process.emit('time', 'end', name) + } + if (typeof fn === 'function') { + const res = fn() + if (res && res.finally) { + return res.finally(end) + } + end() + return res + } + return end + }, + end: function (name) { + return process.emit('time', 'end', name) + }, + }, + input: { + LEVELS: [ + 'start', + 'end', + 'read', + ], + KEYS: { + start: 'start', + end: 'end', + read: 'read', + }, + start: function (fn) { + process.emit('input', 'start') + function end () { + return process.emit('input', 'end') + } + if (typeof fn === 'function') { + const res = fn() + if (res && res.finally) { + return res.finally(end) + } + end() + return res + } + return end + }, + end: function () { + return process.emit('input', 'end') + }, + read: function (...args) { + let resolve, reject + const promise = new Promise((_resolve, _reject) => { + resolve = _resolve + reject = _reject + }) + process.emit('input', 'read', resolve, reject, ...args) + return promise + }, + }, } - -logger.LEVELS = LEVELS - -module.exports = logger diff --git a/deps/npm/node_modules/proc-log/package.json b/deps/npm/node_modules/proc-log/package.json index d335fa965ace51..4ab89102ecc9b5 100644 --- a/deps/npm/node_modules/proc-log/package.json +++ b/deps/npm/node_modules/proc-log/package.json @@ -1,6 +1,6 @@ { "name": "proc-log", - "version": "3.0.0", + "version": "4.2.0", "files": [ "bin/", "lib/" @@ -18,14 +18,14 @@ "snap": "tap", "posttest": "npm run lint", "postsnap": "eslint index.js test/*.js --fix", - "lint": "eslint \"**/*.js\"", + "lint": "eslint \"**/*.{js,cjs,ts,mjs,jsx,tsx}\"", "postlint": "template-oss-check", "lintfix": "npm run lint -- --fix", "template-oss-apply": "template-oss-apply --force" }, "devDependencies": { - "@npmcli/eslint-config": "^3.0.1", - "@npmcli/template-oss": "4.5.1", + "@npmcli/eslint-config": "^4.0.0", + "@npmcli/template-oss": "4.21.3", "tap": "^16.0.1" }, "engines": { @@ -33,7 +33,8 @@ }, "templateOSS": { "//@npmcli/template-oss": "This file is partially managed by @npmcli/template-oss. Edits may be overwritten.", - "version": "4.5.1" + "version": "4.21.3", + "publish": true }, "tap": { "nyc-arg": [ diff --git a/deps/npm/node_modules/color-support/LICENSE b/deps/npm/node_modules/proggy/LICENSE similarity index 93% rename from deps/npm/node_modules/color-support/LICENSE rename to deps/npm/node_modules/proggy/LICENSE index 19129e315fe593..83837797202b70 100644 --- a/deps/npm/node_modules/color-support/LICENSE +++ b/deps/npm/node_modules/proggy/LICENSE @@ -1,6 +1,6 @@ The ISC License -Copyright (c) Isaac Z. Schlueter and Contributors +Copyright (c) GitHub, Inc. Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted, provided that the above diff --git a/deps/npm/node_modules/proggy/lib/client.js b/deps/npm/node_modules/proggy/lib/client.js new file mode 100644 index 00000000000000..2eafb9c75addc2 --- /dev/null +++ b/deps/npm/node_modules/proggy/lib/client.js @@ -0,0 +1,114 @@ +const EE = require('events') +const onProgress = Symbol('onProgress') +const bars = Symbol('bars') +const listener = Symbol('listener') +const normData = Symbol('normData') +class Client extends EE { + constructor ({ normalize = false, stopOnDone = false } = {}) { + super() + this.normalize = !!normalize + this.stopOnDone = !!stopOnDone + this[bars] = new Map() + this[listener] = null + } + + get size () { + return this[bars].size + } + + get listening () { + return !!this[listener] + } + + addListener (...args) { + return this.on(...args) + } + + on (ev, ...args) { + if (ev === 'progress' && !this[listener]) { + this.start() + } + return super.on(ev, ...args) + } + + off (ev, ...args) { + return this.removeListener(ev, ...args) + } + + removeListener (ev, ...args) { + const ret = super.removeListener(ev, ...args) + if (ev === 'progress' && this.listeners(ev).length === 0) { + this.stop() + } + return ret + } + + stop () { + if (this[listener]) { + process.removeListener('progress', this[listener]) + this[listener] = null + } + } + + start () { + if (!this[listener]) { + this[listener] = (...args) => this[onProgress](...args) + process.on('progress', this[listener]) + } + } + + [onProgress] (key, data) { + data = this[normData](key, data) + if (!this[bars].has(key)) { + this.emit('bar', key, data) + } + this[bars].set(key, data) + this.emit('progress', key, data) + if (data.done) { + this[bars].delete(key) + this.emit('barDone', key, data) + if (this.size === 0) { + if (this.stopOnDone) { + this.stop() + } + this.emit('done') + } + } + } + + [normData] (key, data) { + const actualValue = data.value + const actualTotal = data.total + let value = actualValue + let total = actualTotal + const done = data.done || value >= total + if (this.normalize) { + const bar = this[bars].get(key) + total = 100 + if (done) { + value = 100 + } else { + // show value as a portion of 100 + const pct = 100 * actualValue / actualTotal + if (bar) { + // don't ever go backwards, and don't stand still + // move at least 1% of the remaining value if it wouldn't move. + value = (pct > bar.value) ? pct + : (100 - bar.value) / 100 + bar.value + } + } + } + // include the key + return { + ...data, + key, + name: data.name || key, + value, + total, + actualValue, + actualTotal, + done, + } + } +} +module.exports = Client diff --git a/deps/npm/node_modules/proggy/lib/index.js b/deps/npm/node_modules/proggy/lib/index.js new file mode 100644 index 00000000000000..834948b4ff8603 --- /dev/null +++ b/deps/npm/node_modules/proggy/lib/index.js @@ -0,0 +1,15 @@ +exports.Client = require('./client.js') +exports.Tracker = require('./tracker.js') + +const trackers = new Map() +exports.createTracker = (name, key, total) => { + const tracker = new exports.Tracker(name, key, total) + if (trackers.has(tracker.key)) { + const msg = `proggy: duplicate progress id ${JSON.stringify(tracker.key)}` + throw new Error(msg) + } + trackers.set(tracker.key, tracker) + tracker.on('done', () => trackers.delete(tracker.key)) + return tracker +} +exports.createClient = (options = {}) => new exports.Client(options) diff --git a/deps/npm/node_modules/proggy/lib/tracker.js b/deps/npm/node_modules/proggy/lib/tracker.js new file mode 100644 index 00000000000000..56c78d9434dc7c --- /dev/null +++ b/deps/npm/node_modules/proggy/lib/tracker.js @@ -0,0 +1,68 @@ +// The tracker class is intentionally as naive as possible. it is just +// an ergonomic wrapper around process.emit('progress', ...) +const EE = require('events') +class Tracker extends EE { + constructor (name, key, total) { + super() + if (!name) { + throw new Error('proggy: Tracker needs a name') + } + + if (typeof key === 'number' && !total) { + total = key + key = null + } + + if (!total) { + total = 100 + } + + if (!key) { + key = name + } + + this.done = false + this.name = name + this.key = key + this.value = 0 + this.total = total + } + + finish (metadata = {}) { + this.update(this.total, this.total, metadata) + } + + update (value, total, metadata) { + if (!metadata) { + if (total && typeof total === 'object') { + metadata = total + } else { + metadata = {} + } + } + if (typeof total !== 'number') { + total = this.total + } + + if (this.done) { + const msg = `proggy: updating completed tracker: ${JSON.stringify(this.key)}` + throw new Error(msg) + } + this.value = value + this.total = total + const done = this.value >= this.total + process.emit('progress', this.key, { + ...metadata, + name: this.name, + key: this.key, + value, + total, + done, + }) + if (done) { + this.done = true + this.emit('done') + } + } +} +module.exports = Tracker diff --git a/deps/npm/node_modules/are-we-there-yet/package.json b/deps/npm/node_modules/proggy/package.json similarity index 53% rename from deps/npm/node_modules/are-we-there-yet/package.json rename to deps/npm/node_modules/proggy/package.json index f072a21abb444b..4940fc9d002a64 100644 --- a/deps/npm/node_modules/are-we-there-yet/package.json +++ b/deps/npm/node_modules/proggy/package.json @@ -1,53 +1,48 @@ { - "name": "are-we-there-yet", - "version": "4.0.2", - "description": "Keep track of the overall completion of many disparate processes", + "name": "proggy", + "version": "2.0.0", + "files": [ + "bin/", + "lib/" + ], "main": "lib/index.js", + "description": "Progress bar updates at a distance", + "repository": { + "type": "git", + "url": "https://github.com/npm/proggy.git" + }, + "author": "GitHub Inc.", + "license": "ISC", "scripts": { "test": "tap", - "lint": "eslint \"**/*.{js,cjs,ts,mjs,jsx,tsx}\"", - "lintfix": "npm run lint -- --fix", "posttest": "npm run lint", - "postsnap": "npm run lintfix --", "snap": "tap", + "postsnap": "eslint lib test --fix", + "lint": "eslint \"**/*.js\"", "postlint": "template-oss-check", + "lintfix": "npm run lint -- --fix", "template-oss-apply": "template-oss-apply --force" }, - "repository": { - "type": "git", - "url": "https://github.com/npm/are-we-there-yet.git" - }, - "author": "GitHub Inc.", - "license": "ISC", - "bugs": { - "url": "https://github.com/npm/are-we-there-yet/issues" - }, - "homepage": "https://github.com/npm/are-we-there-yet", "devDependencies": { - "@npmcli/eslint-config": "^4.0.0", - "@npmcli/template-oss": "4.21.3", + "@npmcli/eslint-config": "^3.0.1", + "@npmcli/template-oss": "4.5.1", + "chalk": "^4.1.2", + "cli-progress": "^3.10.0", + "npmlog": "^6.0.1", "tap": "^16.0.1" }, - "files": [ - "bin/", - "lib/" - ], - "engines": { - "node": "^14.17.0 || ^16.13.0 || >=18.0.0" - }, "tap": { - "branches": 68, - "statements": 92, - "functions": 86, - "lines": 92, + "coverage-map": "map.js", "nyc-arg": [ "--exclude", "tap-snapshots/**" ] }, + "engines": { + "node": "^14.17.0 || ^16.13.0 || >=18.0.0" + }, "templateOSS": { "//@npmcli/template-oss": "This file is partially managed by @npmcli/template-oss. Edits may be overwritten.", - "version": "4.21.3", - "publish": true + "version": "4.5.1" } } diff --git a/deps/npm/node_modules/read-package-json/LICENSE b/deps/npm/node_modules/read-package-json/LICENSE deleted file mode 100644 index 052085c436514a..00000000000000 --- a/deps/npm/node_modules/read-package-json/LICENSE +++ /dev/null @@ -1,15 +0,0 @@ -The ISC License - -Copyright (c) Isaac Z. Schlueter - -Permission to use, copy, modify, and/or distribute this software for any -purpose with or without fee is hereby granted, provided that the above -copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH -REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND -FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, -INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM -LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR -OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR -PERFORMANCE OF THIS SOFTWARE. diff --git a/deps/npm/node_modules/read-package-json/lib/read-json.js b/deps/npm/node_modules/read-package-json/lib/read-json.js deleted file mode 100644 index d35f09ebd208f9..00000000000000 --- a/deps/npm/node_modules/read-package-json/lib/read-json.js +++ /dev/null @@ -1,589 +0,0 @@ -var fs = require('fs') - -var path = require('path') - -var { glob } = require('glob') -var normalizeData = require('normalize-package-data') -var safeJSON = require('json-parse-even-better-errors') -var util = require('util') -var normalizePackageBin = require('npm-normalize-package-bin') - -module.exports = readJson - -// put more stuff on here to customize. -readJson.extraSet = [ - bundleDependencies, - gypfile, - serverjs, - scriptpath, - authors, - readme, - mans, - bins, - githead, - fillTypes, -] - -var typoWarned = {} -var cache = {} - -function readJson (file, log_, strict_, cb_) { - var log, strict, cb - for (var i = 1; i < arguments.length - 1; i++) { - if (typeof arguments[i] === 'boolean') { - strict = arguments[i] - } else if (typeof arguments[i] === 'function') { - log = arguments[i] - } - } - - if (!log) { - log = function () {} - } - cb = arguments[arguments.length - 1] - - readJson_(file, log, strict, cb) -} - -function readJson_ (file, log, strict, cb) { - fs.readFile(file, 'utf8', function (er, d) { - parseJson(file, er, d, log, strict, cb) - }) -} - -function stripBOM (content) { - // Remove byte order marker. This catches EF BB BF (the UTF-8 BOM) - // because the buffer-to-string conversion in `fs.readFileSync()` - // translates it to FEFF, the UTF-16 BOM. - if (content.charCodeAt(0) === 0xFEFF) { - content = content.slice(1) - } - return content -} - -function jsonClone (obj) { - if (obj == null) { - return obj - } else if (Array.isArray(obj)) { - var newarr = new Array(obj.length) - for (var ii in obj) { - newarr[ii] = jsonClone(obj[ii]) - } - return newarr - } else if (typeof obj === 'object') { - var newobj = {} - for (var kk in obj) { - newobj[kk] = jsonClone(obj[kk]) - } - return newobj - } else { - return obj - } -} - -function parseJson (file, er, d, log, strict, cb) { - if (er && er.code === 'ENOENT') { - return fs.stat(path.dirname(file), function (err, stat) { - if (!err && stat && !stat.isDirectory()) { - // ENOTDIR isn't used on Windows, but npm expects it. - er = Object.create(er) - er.code = 'ENOTDIR' - return cb(er) - } else { - return indexjs(file, er, log, strict, cb) - } - }) - } - if (er) { - return cb(er) - } - - if (cache[d]) { - return cb(null, jsonClone(cache[d])) - } - - var data - - try { - data = safeJSON(stripBOM(d)) - for (var key in data) { - if (/^_/.test(key)) { - delete data[key] - } - } - } catch (jsonErr) { - data = parseIndex(d) - if (!data) { - return cb(parseError(jsonErr, file)) - } - } - extrasCached(file, d, data, log, strict, cb) -} - -function extrasCached (file, d, data, log, strict, cb) { - extras(file, data, log, strict, function (err, extrasData) { - if (!err) { - cache[d] = jsonClone(extrasData) - } - cb(err, extrasData) - }) -} - -function indexjs (file, er, log, strict, cb) { - if (path.basename(file) === 'index.js') { - return cb(er) - } - - var index = path.resolve(path.dirname(file), 'index.js') - fs.readFile(index, 'utf8', function (er2, d) { - if (er2) { - return cb(er) - } - - if (cache[d]) { - return cb(null, cache[d]) - } - - var data = parseIndex(d) - if (!data) { - return cb(er) - } - - extrasCached(file, d, data, log, strict, cb) - }) -} - -readJson.extras = extras -function extras (file, data, log_, strict_, cb_) { - var log, strict, cb - for (var i = 2; i < arguments.length - 1; i++) { - if (typeof arguments[i] === 'boolean') { - strict = arguments[i] - } else if (typeof arguments[i] === 'function') { - log = arguments[i] - } - } - - if (!log) { - log = function () {} - } - cb = arguments[i] - - var set = readJson.extraSet - var n = set.length - var errState = null - set.forEach(function (fn) { - fn(file, data, then) - }) - - function then (er) { - if (errState) { - return - } - if (er) { - return cb(errState = er) - } - if (--n > 0) { - return - } - final(file, data, log, strict, cb) - } -} - -function scriptpath (file, data, cb) { - if (!data.scripts) { - return cb(null, data) - } - var k = Object.keys(data.scripts) - k.forEach(scriptpath_, data.scripts) - cb(null, data) -} - -function scriptpath_ (key) { - var s = this[key] - // This is never allowed, and only causes problems - if (typeof s !== 'string') { - return delete this[key] - } - - var spre = /^(\.[/\\])?node_modules[/\\].bin[\\/]/ - if (s.match(spre)) { - this[key] = this[key].replace(spre, '') - } -} - -function gypfile (file, data, cb) { - var dir = path.dirname(file) - var s = data.scripts || {} - if (s.install || s.preinstall) { - return cb(null, data) - } - - if (data.gypfile === false) { - return cb(null, data) - } - glob('*.gyp', { cwd: dir }) - .then(files => gypfile_(file, data, files, cb)) - .catch(er => cb(er)) -} - -function gypfile_ (file, data, files, cb) { - if (!files.length) { - return cb(null, data) - } - var s = data.scripts || {} - s.install = 'node-gyp rebuild' - data.scripts = s - data.gypfile = true - return cb(null, data) -} - -function serverjs (file, data, cb) { - var dir = path.dirname(file) - var s = data.scripts || {} - if (s.start) { - return cb(null, data) - } - fs.access(path.join(dir, 'server.js'), (err) => { - if (!err) { - s.start = 'node server.js' - data.scripts = s - } - return cb(null, data) - }) -} - -function authors (file, data, cb) { - if (data.contributors) { - return cb(null, data) - } - var af = path.resolve(path.dirname(file), 'AUTHORS') - fs.readFile(af, 'utf8', function (er, ad) { - // ignore error. just checking it. - if (er) { - return cb(null, data) - } - authors_(file, data, ad, cb) - }) -} - -function authors_ (file, data, ad, cb) { - ad = ad.split(/\r?\n/g).map(function (line) { - return line.replace(/^\s*#.*$/, '').trim() - }).filter(function (line) { - return line - }) - data.contributors = ad - return cb(null, data) -} - -function readme (file, data, cb) { - if (data.readme) { - return cb(null, data) - } - var dir = path.dirname(file) - var globOpts = { cwd: dir, nocase: true, mark: true } - glob('{README,README.*}', globOpts) - .then(files => { - // don't accept directories. - files = files.filter(function (filtered) { - return !filtered.match(/\/$/) - }) - if (!files.length) { - return cb() - } - var fn = preferMarkdownReadme(files) - var rm = path.resolve(dir, fn) - return readme_(file, data, rm, cb) - }) - .catch(er => cb(er)) -} - -function preferMarkdownReadme (files) { - var fallback = 0 - var re = /\.m?a?r?k?d?o?w?n?$/i - for (var i = 0; i < files.length; i++) { - if (files[i].match(re)) { - return files[i] - } else if (files[i].match(/README$/)) { - fallback = i - } - } - // prefer README.md, followed by README; otherwise, return - // the first filename (which could be README) - return files[fallback] -} - -function readme_ (file, data, rm, cb) { - var rmfn = path.basename(rm) - fs.readFile(rm, 'utf8', function (er, rmData) { - // maybe not readable, or something. - if (er) { - return cb() - } - data.readme = rmData - data.readmeFilename = rmfn - return cb(er, data) - }) -} - -function mans (file, data, cb) { - let cwd = data.directories && data.directories.man - if (data.man || !cwd) { - return cb(null, data) - } - const dirname = path.dirname(file) - cwd = path.resolve(path.dirname(file), cwd) - glob('**/*.[0-9]', { cwd }) - .then(mansGlob => { - data.man = mansGlob.map(man => - path.relative(dirname, path.join(cwd, man)).split(path.sep).join('/') - ) - return cb(null, data) - }) - .catch(er => cb(er)) -} - -function bins (file, data, cb) { - data = normalizePackageBin(data) - - var m = data.directories && data.directories.bin - if (data.bin || !m) { - return cb(null, data) - } - - m = path.resolve(path.dirname(file), path.join('.', path.join('/', m))) - glob('**', { cwd: m }) - .then(binsGlob => bins_(file, data, binsGlob, cb)) - .catch(er => cb(er)) -} - -function bins_ (file, data, binsGlob, cb) { - var m = (data.directories && data.directories.bin) || '.' - data.bin = binsGlob.reduce(function (acc, mf) { - if (mf && mf.charAt(0) !== '.') { - var f = path.basename(mf) - acc[f] = path.join(m, mf) - } - return acc - }, {}) - return cb(null, normalizePackageBin(data)) -} - -function bundleDependencies (file, data, cb) { - var bd = 'bundleDependencies' - var bdd = 'bundledDependencies' - // normalize key name - if (data[bdd] !== undefined) { - if (data[bd] === undefined) { - data[bd] = data[bdd] - } - delete data[bdd] - } - if (data[bd] === false) { - delete data[bd] - } else if (data[bd] === true) { - data[bd] = Object.keys(data.dependencies || {}) - } else if (data[bd] !== undefined && !Array.isArray(data[bd])) { - delete data[bd] - } - return cb(null, data) -} - -function githead (file, data, cb) { - if (data.gitHead) { - return cb(null, data) - } - var dir = path.dirname(file) - var head = path.resolve(dir, '.git/HEAD') - fs.readFile(head, 'utf8', function (er, headData) { - if (er) { - var parent = path.dirname(dir) - if (parent === dir) { - return cb(null, data) - } - return githead(dir, data, cb) - } - githead_(data, dir, headData, cb) - }) -} - -function githead_ (data, dir, head, cb) { - if (!head.match(/^ref: /)) { - data.gitHead = head.trim() - return cb(null, data) - } - var headRef = head.replace(/^ref: /, '').trim() - var headFile = path.resolve(dir, '.git', headRef) - fs.readFile(headFile, 'utf8', function (er, headData) { - if (er || !headData) { - var packFile = path.resolve(dir, '.git/packed-refs') - return fs.readFile(packFile, 'utf8', function (readFileErr, refs) { - if (readFileErr || !refs) { - return cb(null, data) - } - refs = refs.split('\n') - for (var i = 0; i < refs.length; i++) { - var match = refs[i].match(/^([0-9a-f]{40}) (.+)$/) - if (match && match[2].trim() === headRef) { - data.gitHead = match[1] - break - } - } - return cb(null, data) - }) - } - headData = headData.replace(/^ref: /, '').trim() - data.gitHead = headData - return cb(null, data) - }) -} - -/** - * Warn if the bin references don't point to anything. This might be better in - * normalize-package-data if it had access to the file path. - */ -function checkBinReferences_ (file, data, warn, cb) { - if (!(data.bin instanceof Object)) { - return cb() - } - - var keys = Object.keys(data.bin) - var keysLeft = keys.length - if (!keysLeft) { - return cb() - } - - function handleExists (relName, result) { - keysLeft-- - if (!result) { - warn('No bin file found at ' + relName) - } - if (!keysLeft) { - cb() - } - } - - keys.forEach(function (key) { - var dirName = path.dirname(file) - var relName = data.bin[key] - /* istanbul ignore if - impossible, bins have been normalized */ - if (typeof relName !== 'string') { - var msg = 'Bin filename for ' + key + - ' is not a string: ' + util.inspect(relName) - warn(msg) - delete data.bin[key] - handleExists(relName, true) - return - } - var binPath = path.resolve(dirName, relName) - fs.stat(binPath, (err) => handleExists(relName, !err)) - }) -} - -function final (file, data, log, strict, cb) { - var pId = makePackageId(data) - - function warn (msg) { - if (typoWarned[pId]) { - return - } - if (log) { - log('package.json', pId, msg) - } - } - - try { - normalizeData(data, warn, strict) - } catch (error) { - return cb(error) - } - - checkBinReferences_(file, data, warn, function () { - typoWarned[pId] = true - cb(null, data) - }) -} - -function fillTypes (file, data, cb) { - var index = data.main || 'index.js' - - if (typeof index !== 'string') { - return cb(new TypeError('The "main" attribute must be of type string.')) - } - - // TODO exports is much more complicated than this in verbose format - // We need to support for instance - - // "exports": { - // ".": [ - // { - // "default": "./lib/npm.js" - // }, - // "./lib/npm.js" - // ], - // "./package.json": "./package.json" - // }, - // as well as conditional exports - - // if (data.exports && typeof data.exports === 'string') { - // index = data.exports - // } - - // if (data.exports && data.exports['.']) { - // index = data.exports['.'] - // if (typeof index !== 'string') { - // } - // } - - var extless = - path.join(path.dirname(index), path.basename(index, path.extname(index))) - var dts = `./${extless}.d.ts` - var dtsPath = path.join(path.dirname(file), dts) - var hasDTSFields = 'types' in data || 'typings' in data - if (!hasDTSFields && fs.existsSync(dtsPath)) { - data.types = dts.split(path.sep).join('/') - } - - cb(null, data) -} - -function makePackageId (data) { - var name = cleanString(data.name) - var ver = cleanString(data.version) - return name + '@' + ver -} - -function cleanString (str) { - return (!str || typeof (str) !== 'string') ? '' : str.trim() -} - -// /**package { "name": "foo", "version": "1.2.3", ... } **/ -function parseIndex (data) { - data = data.split(/^\/\*\*package(?:\s|$)/m) - - if (data.length < 2) { - return null - } - data = data[1] - data = data.split(/\*\*\/$/m) - - if (data.length < 2) { - return null - } - data = data[0] - data = data.replace(/^\s*\*/mg, '') - - try { - return safeJSON(data) - } catch (er) { - return null - } -} - -function parseError (ex, file) { - var e = new Error('Failed to parse json\n' + ex.message) - e.code = 'EJSONPARSE' - e.path = file - return e -} diff --git a/deps/npm/node_modules/read-package-json/package.json b/deps/npm/node_modules/read-package-json/package.json deleted file mode 100644 index 01061f2bc27921..00000000000000 --- a/deps/npm/node_modules/read-package-json/package.json +++ /dev/null @@ -1,65 +0,0 @@ -{ - "name": "read-package-json", - "version": "7.0.0", - "author": "GitHub Inc.", - "description": "The thing npm uses to read package.json files with semantics and defaults and validation", - "repository": { - "type": "git", - "url": "https://github.com/npm/read-package-json.git" - }, - "main": "lib/read-json.js", - "scripts": { - "prerelease": "npm t", - "postrelease": "npm publish && git push --follow-tags", - "release": "standard-version -s", - "test": "tap", - "npmclilint": "npmcli-lint", - "lint": "eslint \"**/*.js\"", - "lintfix": "npm run lint -- --fix", - "posttest": "npm run lint", - "postsnap": "npm run lintfix --", - "postlint": "template-oss-check", - "snap": "tap", - "template-oss-apply": "template-oss-apply --force" - }, - "dependencies": { - "glob": "^10.2.2", - "json-parse-even-better-errors": "^3.0.0", - "normalize-package-data": "^6.0.0", - "npm-normalize-package-bin": "^3.0.0" - }, - "devDependencies": { - "@npmcli/eslint-config": "^4.0.0", - "@npmcli/template-oss": "4.18.0", - "tap": "^16.0.1" - }, - "license": "ISC", - "files": [ - "bin/", - "lib/" - ], - "engines": { - "node": "^16.14.0 || >=18.0.0" - }, - "tap": { - "branches": 73, - "functions": 77, - "lines": 77, - "statements": 77, - "nyc-arg": [ - "--exclude", - "tap-snapshots/**" - ] - }, - "templateOSS": { - "//@npmcli/template-oss": "This file is partially managed by @npmcli/template-oss. Edits may be overwritten.", - "version": "4.18.0", - "publish": "true", - "ciVersions": [ - "16.14.0", - "16.x", - "18.0.0", - "18.x" - ] - } -} diff --git a/deps/npm/node_modules/set-blocking/LICENSE.txt b/deps/npm/node_modules/set-blocking/LICENSE.txt deleted file mode 100644 index 836440bef7cf14..00000000000000 --- a/deps/npm/node_modules/set-blocking/LICENSE.txt +++ /dev/null @@ -1,14 +0,0 @@ -Copyright (c) 2016, Contributors - -Permission to use, copy, modify, and/or distribute this software -for any purpose with or without fee is hereby granted, provided -that the above copyright notice and this permission notice -appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES -OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE -LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES -OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, -WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, -ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/deps/npm/node_modules/set-blocking/index.js b/deps/npm/node_modules/set-blocking/index.js deleted file mode 100644 index 6f78774bb63ee6..00000000000000 --- a/deps/npm/node_modules/set-blocking/index.js +++ /dev/null @@ -1,7 +0,0 @@ -module.exports = function (blocking) { - [process.stdout, process.stderr].forEach(function (stream) { - if (stream._handle && stream.isTTY && typeof stream._handle.setBlocking === 'function') { - stream._handle.setBlocking(blocking) - } - }) -} diff --git a/deps/npm/node_modules/set-blocking/package.json b/deps/npm/node_modules/set-blocking/package.json deleted file mode 100644 index c082db72c6259d..00000000000000 --- a/deps/npm/node_modules/set-blocking/package.json +++ /dev/null @@ -1,42 +0,0 @@ -{ - "name": "set-blocking", - "version": "2.0.0", - "description": "set blocking stdio and stderr ensuring that terminal output does not truncate", - "main": "index.js", - "scripts": { - "pretest": "standard", - "test": "nyc mocha ./test/*.js", - "coverage": "nyc report --reporter=text-lcov | coveralls", - "version": "standard-version" - }, - "repository": { - "type": "git", - "url": "git+https://github.com/yargs/set-blocking.git" - }, - "keywords": [ - "flush", - "terminal", - "blocking", - "shim", - "stdio", - "stderr" - ], - "author": "Ben Coe ", - "license": "ISC", - "bugs": { - "url": "https://github.com/yargs/set-blocking/issues" - }, - "homepage": "https://github.com/yargs/set-blocking#readme", - "devDependencies": { - "chai": "^3.5.0", - "coveralls": "^2.11.9", - "mocha": "^2.4.5", - "nyc": "^6.4.4", - "standard": "^7.0.1", - "standard-version": "^2.2.1" - }, - "files": [ - "index.js", - "LICENSE.txt" - ] -} \ No newline at end of file diff --git a/deps/npm/node_modules/ip-address/node_modules/sprintf-js/CONTRIBUTORS.md b/deps/npm/node_modules/sprintf-js/CONTRIBUTORS.md similarity index 100% rename from deps/npm/node_modules/ip-address/node_modules/sprintf-js/CONTRIBUTORS.md rename to deps/npm/node_modules/sprintf-js/CONTRIBUTORS.md diff --git a/deps/npm/node_modules/ip-address/node_modules/sprintf-js/LICENSE b/deps/npm/node_modules/sprintf-js/LICENSE similarity index 100% rename from deps/npm/node_modules/ip-address/node_modules/sprintf-js/LICENSE rename to deps/npm/node_modules/sprintf-js/LICENSE diff --git a/deps/npm/node_modules/sprintf-js/bower.json b/deps/npm/node_modules/sprintf-js/bower.json new file mode 100644 index 00000000000000..d90a75989f7b05 --- /dev/null +++ b/deps/npm/node_modules/sprintf-js/bower.json @@ -0,0 +1,14 @@ +{ + "name": "sprintf", + "description": "JavaScript sprintf implementation", + "version": "1.0.3", + "main": "src/sprintf.js", + "license": "BSD-3-Clause-Clear", + "keywords": ["sprintf", "string", "formatting"], + "authors": ["Alexandru Marasteanu (http://alexei.ro/)"], + "homepage": "https://github.com/alexei/sprintf.js", + "repository": { + "type": "git", + "url": "git://github.com/alexei/sprintf.js.git" + } +} diff --git a/deps/npm/node_modules/sprintf-js/demo/angular.html b/deps/npm/node_modules/sprintf-js/demo/angular.html new file mode 100644 index 00000000000000..3559efd7635634 --- /dev/null +++ b/deps/npm/node_modules/sprintf-js/demo/angular.html @@ -0,0 +1,20 @@ + + + + + + + + +
    {{ "%+010d"|sprintf:-123 }}
    +
    {{ "%+010d"|vsprintf:[-123] }}
    +
    {{ "%+010d"|fmt:-123 }}
    +
    {{ "%+010d"|vfmt:[-123] }}
    +
    {{ "I've got %2$d apples and %1$d oranges."|fmt:4:2 }}
    +
    {{ "I've got %(apples)d apples and %(oranges)d oranges."|fmt:{apples: 2, oranges: 4} }}
    + + + + diff --git a/deps/npm/node_modules/ip-address/node_modules/sprintf-js/dist/.gitattributes b/deps/npm/node_modules/sprintf-js/dist/.gitattributes similarity index 100% rename from deps/npm/node_modules/ip-address/node_modules/sprintf-js/dist/.gitattributes rename to deps/npm/node_modules/sprintf-js/dist/.gitattributes diff --git a/deps/npm/node_modules/ip-address/node_modules/sprintf-js/dist/angular-sprintf.min.js b/deps/npm/node_modules/sprintf-js/dist/angular-sprintf.min.js similarity index 100% rename from deps/npm/node_modules/ip-address/node_modules/sprintf-js/dist/angular-sprintf.min.js rename to deps/npm/node_modules/sprintf-js/dist/angular-sprintf.min.js diff --git a/deps/npm/node_modules/ip-address/node_modules/sprintf-js/dist/sprintf.min.js b/deps/npm/node_modules/sprintf-js/dist/sprintf.min.js similarity index 100% rename from deps/npm/node_modules/ip-address/node_modules/sprintf-js/dist/sprintf.min.js rename to deps/npm/node_modules/sprintf-js/dist/sprintf.min.js diff --git a/deps/npm/node_modules/sprintf-js/gruntfile.js b/deps/npm/node_modules/sprintf-js/gruntfile.js new file mode 100644 index 00000000000000..246e1c3b9801fc --- /dev/null +++ b/deps/npm/node_modules/sprintf-js/gruntfile.js @@ -0,0 +1,36 @@ +module.exports = function(grunt) { + grunt.initConfig({ + pkg: grunt.file.readJSON("package.json"), + + uglify: { + options: { + banner: "/*! <%= pkg.name %> | <%= pkg.author %> | <%= pkg.license %> */\n", + sourceMap: true + }, + build: { + files: [ + { + src: "src/sprintf.js", + dest: "dist/sprintf.min.js" + }, + { + src: "src/angular-sprintf.js", + dest: "dist/angular-sprintf.min.js" + } + ] + } + }, + + watch: { + js: { + files: "src/*.js", + tasks: ["uglify"] + } + } + }) + + grunt.loadNpmTasks("grunt-contrib-uglify") + grunt.loadNpmTasks("grunt-contrib-watch") + + grunt.registerTask("default", ["uglify", "watch"]) +} diff --git a/deps/npm/node_modules/ip-address/node_modules/sprintf-js/package.json b/deps/npm/node_modules/sprintf-js/package.json similarity index 100% rename from deps/npm/node_modules/ip-address/node_modules/sprintf-js/package.json rename to deps/npm/node_modules/sprintf-js/package.json diff --git a/deps/npm/node_modules/ip-address/node_modules/sprintf-js/src/angular-sprintf.js b/deps/npm/node_modules/sprintf-js/src/angular-sprintf.js similarity index 100% rename from deps/npm/node_modules/ip-address/node_modules/sprintf-js/src/angular-sprintf.js rename to deps/npm/node_modules/sprintf-js/src/angular-sprintf.js diff --git a/deps/npm/node_modules/ip-address/node_modules/sprintf-js/src/sprintf.js b/deps/npm/node_modules/sprintf-js/src/sprintf.js similarity index 100% rename from deps/npm/node_modules/ip-address/node_modules/sprintf-js/src/sprintf.js rename to deps/npm/node_modules/sprintf-js/src/sprintf.js diff --git a/deps/npm/node_modules/sprintf-js/test/test.js b/deps/npm/node_modules/sprintf-js/test/test.js new file mode 100644 index 00000000000000..6f57b2538c8522 --- /dev/null +++ b/deps/npm/node_modules/sprintf-js/test/test.js @@ -0,0 +1,82 @@ +var assert = require("assert"), + sprintfjs = require("../src/sprintf.js"), + sprintf = sprintfjs.sprintf, + vsprintf = sprintfjs.vsprintf + +describe("sprintfjs", function() { + var pi = 3.141592653589793 + + it("should return formated strings for simple placeholders", function() { + assert.equal("%", sprintf("%%")) + assert.equal("10", sprintf("%b", 2)) + assert.equal("A", sprintf("%c", 65)) + assert.equal("2", sprintf("%d", 2)) + assert.equal("2", sprintf("%i", 2)) + assert.equal("2", sprintf("%d", "2")) + assert.equal("2", sprintf("%i", "2")) + assert.equal('{"foo":"bar"}', sprintf("%j", {foo: "bar"})) + assert.equal('["foo","bar"]', sprintf("%j", ["foo", "bar"])) + assert.equal("2e+0", sprintf("%e", 2)) + assert.equal("2", sprintf("%u", 2)) + assert.equal("4294967294", sprintf("%u", -2)) + assert.equal("2.2", sprintf("%f", 2.2)) + assert.equal("3.141592653589793", sprintf("%g", pi)) + assert.equal("10", sprintf("%o", 8)) + assert.equal("%s", sprintf("%s", "%s")) + assert.equal("ff", sprintf("%x", 255)) + assert.equal("FF", sprintf("%X", 255)) + assert.equal("Polly wants a cracker", sprintf("%2$s %3$s a %1$s", "cracker", "Polly", "wants")) + assert.equal("Hello world!", sprintf("Hello %(who)s!", {"who": "world"})) + }) + + it("should return formated strings for complex placeholders", function() { + // sign + assert.equal("2", sprintf("%d", 2)) + assert.equal("-2", sprintf("%d", -2)) + assert.equal("+2", sprintf("%+d", 2)) + assert.equal("-2", sprintf("%+d", -2)) + assert.equal("2", sprintf("%i", 2)) + assert.equal("-2", sprintf("%i", -2)) + assert.equal("+2", sprintf("%+i", 2)) + assert.equal("-2", sprintf("%+i", -2)) + assert.equal("2.2", sprintf("%f", 2.2)) + assert.equal("-2.2", sprintf("%f", -2.2)) + assert.equal("+2.2", sprintf("%+f", 2.2)) + assert.equal("-2.2", sprintf("%+f", -2.2)) + assert.equal("-2.3", sprintf("%+.1f", -2.34)) + assert.equal("-0.0", sprintf("%+.1f", -0.01)) + assert.equal("3.14159", sprintf("%.6g", pi)) + assert.equal("3.14", sprintf("%.3g", pi)) + assert.equal("3", sprintf("%.1g", pi)) + assert.equal("-000000123", sprintf("%+010d", -123)) + assert.equal("______-123", sprintf("%+'_10d", -123)) + assert.equal("-234.34 123.2", sprintf("%f %f", -234.34, 123.2)) + + // padding + assert.equal("-0002", sprintf("%05d", -2)) + assert.equal("-0002", sprintf("%05i", -2)) + assert.equal(" <", sprintf("%5s", "<")) + assert.equal("0000<", sprintf("%05s", "<")) + assert.equal("____<", sprintf("%'_5s", "<")) + assert.equal("> ", sprintf("%-5s", ">")) + assert.equal(">0000", sprintf("%0-5s", ">")) + assert.equal(">____", sprintf("%'_-5s", ">")) + assert.equal("xxxxxx", sprintf("%5s", "xxxxxx")) + assert.equal("1234", sprintf("%02u", 1234)) + assert.equal(" -10.235", sprintf("%8.3f", -10.23456)) + assert.equal("-12.34 xxx", sprintf("%f %s", -12.34, "xxx")) + assert.equal('{\n "foo": "bar"\n}', sprintf("%2j", {foo: "bar"})) + assert.equal('[\n "foo",\n "bar"\n]', sprintf("%2j", ["foo", "bar"])) + + // precision + assert.equal("2.3", sprintf("%.1f", 2.345)) + assert.equal("xxxxx", sprintf("%5.5s", "xxxxxx")) + assert.equal(" x", sprintf("%5.1s", "xxxxxx")) + + }) + + it("should return formated strings for callbacks", function() { + assert.equal("foobar", sprintf("%s", function() { return "foobar" })) + assert.equal(Date.now(), sprintf("%s", Date.now)) // should pass... + }) +}) diff --git a/deps/npm/node_modules/wcwidth/LICENSE b/deps/npm/node_modules/wcwidth/LICENSE deleted file mode 100644 index 313ef1e888e41b..00000000000000 --- a/deps/npm/node_modules/wcwidth/LICENSE +++ /dev/null @@ -1,30 +0,0 @@ -wcwidth.js: JavaScript Portng of Markus Kuhn's wcwidth() Implementation -======================================================================= - -Copyright (C) 2012 by Jun Woong. - -This package is a JavaScript porting of `wcwidth()` implementation -[by Markus Kuhn](http://www.cl.cam.ac.uk/~mgk25/ucs/wcwidth.c). - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -of the Software, and to permit persons to whom the Software is furnished to do -so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - - -THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, -INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND -FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR -OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR -BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER -IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -POSSIBILITY OF SUCH DAMAGE. - diff --git a/deps/npm/node_modules/wcwidth/combining.js b/deps/npm/node_modules/wcwidth/combining.js deleted file mode 100644 index dac9789d35f0f1..00000000000000 --- a/deps/npm/node_modules/wcwidth/combining.js +++ /dev/null @@ -1,50 +0,0 @@ -module.exports = [ - [ 0x0300, 0x036F ], [ 0x0483, 0x0486 ], [ 0x0488, 0x0489 ], - [ 0x0591, 0x05BD ], [ 0x05BF, 0x05BF ], [ 0x05C1, 0x05C2 ], - [ 0x05C4, 0x05C5 ], [ 0x05C7, 0x05C7 ], [ 0x0600, 0x0603 ], - [ 0x0610, 0x0615 ], [ 0x064B, 0x065E ], [ 0x0670, 0x0670 ], - [ 0x06D6, 0x06E4 ], [ 0x06E7, 0x06E8 ], [ 0x06EA, 0x06ED ], - [ 0x070F, 0x070F ], [ 0x0711, 0x0711 ], [ 0x0730, 0x074A ], - [ 0x07A6, 0x07B0 ], [ 0x07EB, 0x07F3 ], [ 0x0901, 0x0902 ], - [ 0x093C, 0x093C ], [ 0x0941, 0x0948 ], [ 0x094D, 0x094D ], - [ 0x0951, 0x0954 ], [ 0x0962, 0x0963 ], [ 0x0981, 0x0981 ], - [ 0x09BC, 0x09BC ], [ 0x09C1, 0x09C4 ], [ 0x09CD, 0x09CD ], - [ 0x09E2, 0x09E3 ], [ 0x0A01, 0x0A02 ], [ 0x0A3C, 0x0A3C ], - [ 0x0A41, 0x0A42 ], [ 0x0A47, 0x0A48 ], [ 0x0A4B, 0x0A4D ], - [ 0x0A70, 0x0A71 ], [ 0x0A81, 0x0A82 ], [ 0x0ABC, 0x0ABC ], - [ 0x0AC1, 0x0AC5 ], [ 0x0AC7, 0x0AC8 ], [ 0x0ACD, 0x0ACD ], - [ 0x0AE2, 0x0AE3 ], [ 0x0B01, 0x0B01 ], [ 0x0B3C, 0x0B3C ], - [ 0x0B3F, 0x0B3F ], [ 0x0B41, 0x0B43 ], [ 0x0B4D, 0x0B4D ], - [ 0x0B56, 0x0B56 ], [ 0x0B82, 0x0B82 ], [ 0x0BC0, 0x0BC0 ], - [ 0x0BCD, 0x0BCD ], [ 0x0C3E, 0x0C40 ], [ 0x0C46, 0x0C48 ], - [ 0x0C4A, 0x0C4D ], [ 0x0C55, 0x0C56 ], [ 0x0CBC, 0x0CBC ], - [ 0x0CBF, 0x0CBF ], [ 0x0CC6, 0x0CC6 ], [ 0x0CCC, 0x0CCD ], - [ 0x0CE2, 0x0CE3 ], [ 0x0D41, 0x0D43 ], [ 0x0D4D, 0x0D4D ], - [ 0x0DCA, 0x0DCA ], [ 0x0DD2, 0x0DD4 ], [ 0x0DD6, 0x0DD6 ], - [ 0x0E31, 0x0E31 ], [ 0x0E34, 0x0E3A ], [ 0x0E47, 0x0E4E ], - [ 0x0EB1, 0x0EB1 ], [ 0x0EB4, 0x0EB9 ], [ 0x0EBB, 0x0EBC ], - [ 0x0EC8, 0x0ECD ], [ 0x0F18, 0x0F19 ], [ 0x0F35, 0x0F35 ], - [ 0x0F37, 0x0F37 ], [ 0x0F39, 0x0F39 ], [ 0x0F71, 0x0F7E ], - [ 0x0F80, 0x0F84 ], [ 0x0F86, 0x0F87 ], [ 0x0F90, 0x0F97 ], - [ 0x0F99, 0x0FBC ], [ 0x0FC6, 0x0FC6 ], [ 0x102D, 0x1030 ], - [ 0x1032, 0x1032 ], [ 0x1036, 0x1037 ], [ 0x1039, 0x1039 ], - [ 0x1058, 0x1059 ], [ 0x1160, 0x11FF ], [ 0x135F, 0x135F ], - [ 0x1712, 0x1714 ], [ 0x1732, 0x1734 ], [ 0x1752, 0x1753 ], - [ 0x1772, 0x1773 ], [ 0x17B4, 0x17B5 ], [ 0x17B7, 0x17BD ], - [ 0x17C6, 0x17C6 ], [ 0x17C9, 0x17D3 ], [ 0x17DD, 0x17DD ], - [ 0x180B, 0x180D ], [ 0x18A9, 0x18A9 ], [ 0x1920, 0x1922 ], - [ 0x1927, 0x1928 ], [ 0x1932, 0x1932 ], [ 0x1939, 0x193B ], - [ 0x1A17, 0x1A18 ], [ 0x1B00, 0x1B03 ], [ 0x1B34, 0x1B34 ], - [ 0x1B36, 0x1B3A ], [ 0x1B3C, 0x1B3C ], [ 0x1B42, 0x1B42 ], - [ 0x1B6B, 0x1B73 ], [ 0x1DC0, 0x1DCA ], [ 0x1DFE, 0x1DFF ], - [ 0x200B, 0x200F ], [ 0x202A, 0x202E ], [ 0x2060, 0x2063 ], - [ 0x206A, 0x206F ], [ 0x20D0, 0x20EF ], [ 0x302A, 0x302F ], - [ 0x3099, 0x309A ], [ 0xA806, 0xA806 ], [ 0xA80B, 0xA80B ], - [ 0xA825, 0xA826 ], [ 0xFB1E, 0xFB1E ], [ 0xFE00, 0xFE0F ], - [ 0xFE20, 0xFE23 ], [ 0xFEFF, 0xFEFF ], [ 0xFFF9, 0xFFFB ], - [ 0x10A01, 0x10A03 ], [ 0x10A05, 0x10A06 ], [ 0x10A0C, 0x10A0F ], - [ 0x10A38, 0x10A3A ], [ 0x10A3F, 0x10A3F ], [ 0x1D167, 0x1D169 ], - [ 0x1D173, 0x1D182 ], [ 0x1D185, 0x1D18B ], [ 0x1D1AA, 0x1D1AD ], - [ 0x1D242, 0x1D244 ], [ 0xE0001, 0xE0001 ], [ 0xE0020, 0xE007F ], - [ 0xE0100, 0xE01EF ] -] diff --git a/deps/npm/node_modules/wcwidth/docs/index.md b/deps/npm/node_modules/wcwidth/docs/index.md deleted file mode 100644 index 5c5126d03287b4..00000000000000 --- a/deps/npm/node_modules/wcwidth/docs/index.md +++ /dev/null @@ -1,65 +0,0 @@ -### Javascript porting of Markus Kuhn's wcwidth() implementation - -The following explanation comes from the original C implementation: - -This is an implementation of wcwidth() and wcswidth() (defined in -IEEE Std 1002.1-2001) for Unicode. - -http://www.opengroup.org/onlinepubs/007904975/functions/wcwidth.html -http://www.opengroup.org/onlinepubs/007904975/functions/wcswidth.html - -In fixed-width output devices, Latin characters all occupy a single -"cell" position of equal width, whereas ideographic CJK characters -occupy two such cells. Interoperability between terminal-line -applications and (teletype-style) character terminals using the -UTF-8 encoding requires agreement on which character should advance -the cursor by how many cell positions. No established formal -standards exist at present on which Unicode character shall occupy -how many cell positions on character terminals. These routines are -a first attempt of defining such behavior based on simple rules -applied to data provided by the Unicode Consortium. - -For some graphical characters, the Unicode standard explicitly -defines a character-cell width via the definition of the East Asian -FullWidth (F), Wide (W), Half-width (H), and Narrow (Na) classes. -In all these cases, there is no ambiguity about which width a -terminal shall use. For characters in the East Asian Ambiguous (A) -class, the width choice depends purely on a preference of backward -compatibility with either historic CJK or Western practice. -Choosing single-width for these characters is easy to justify as -the appropriate long-term solution, as the CJK practice of -displaying these characters as double-width comes from historic -implementation simplicity (8-bit encoded characters were displayed -single-width and 16-bit ones double-width, even for Greek, -Cyrillic, etc.) and not any typographic considerations. - -Much less clear is the choice of width for the Not East Asian -(Neutral) class. Existing practice does not dictate a width for any -of these characters. It would nevertheless make sense -typographically to allocate two character cells to characters such -as for instance EM SPACE or VOLUME INTEGRAL, which cannot be -represented adequately with a single-width glyph. The following -routines at present merely assign a single-cell width to all -neutral characters, in the interest of simplicity. This is not -entirely satisfactory and should be reconsidered before -establishing a formal standard in this area. At the moment, the -decision which Not East Asian (Neutral) characters should be -represented by double-width glyphs cannot yet be answered by -applying a simple rule from the Unicode database content. Setting -up a proper standard for the behavior of UTF-8 character terminals -will require a careful analysis not only of each Unicode character, -but also of each presentation form, something the author of these -routines has avoided to do so far. - -http://www.unicode.org/unicode/reports/tr11/ - -Markus Kuhn -- 2007-05-26 (Unicode 5.0) - -Permission to use, copy, modify, and distribute this software -for any purpose and without fee is hereby granted. The author -disclaims all warranties with regard to this software. - -Latest version: http://www.cl.cam.ac.uk/~mgk25/ucs/wcwidth.c - - - diff --git a/deps/npm/node_modules/wcwidth/index.js b/deps/npm/node_modules/wcwidth/index.js deleted file mode 100644 index 48cbb6020aebe9..00000000000000 --- a/deps/npm/node_modules/wcwidth/index.js +++ /dev/null @@ -1,99 +0,0 @@ -"use strict" - -var defaults = require('defaults') -var combining = require('./combining') - -var DEFAULTS = { - nul: 0, - control: 0 -} - -module.exports = function wcwidth(str) { - return wcswidth(str, DEFAULTS) -} - -module.exports.config = function(opts) { - opts = defaults(opts || {}, DEFAULTS) - return function wcwidth(str) { - return wcswidth(str, opts) - } -} - -/* - * The following functions define the column width of an ISO 10646 - * character as follows: - * - The null character (U+0000) has a column width of 0. - * - Other C0/C1 control characters and DEL will lead to a return value - * of -1. - * - Non-spacing and enclosing combining characters (general category - * code Mn or Me in the - * Unicode database) have a column width of 0. - * - SOFT HYPHEN (U+00AD) has a column width of 1. - * - Other format characters (general category code Cf in the Unicode - * database) and ZERO WIDTH - * SPACE (U+200B) have a column width of 0. - * - Hangul Jamo medial vowels and final consonants (U+1160-U+11FF) - * have a column width of 0. - * - Spacing characters in the East Asian Wide (W) or East Asian - * Full-width (F) category as - * defined in Unicode Technical Report #11 have a column width of 2. - * - All remaining characters (including all printable ISO 8859-1 and - * WGL4 characters, Unicode control characters, etc.) have a column - * width of 1. - * This implementation assumes that characters are encoded in ISO 10646. -*/ - -function wcswidth(str, opts) { - if (typeof str !== 'string') return wcwidth(str, opts) - - var s = 0 - for (var i = 0; i < str.length; i++) { - var n = wcwidth(str.charCodeAt(i), opts) - if (n < 0) return -1 - s += n - } - - return s -} - -function wcwidth(ucs, opts) { - // test for 8-bit control characters - if (ucs === 0) return opts.nul - if (ucs < 32 || (ucs >= 0x7f && ucs < 0xa0)) return opts.control - - // binary search in table of non-spacing characters - if (bisearch(ucs)) return 0 - - // if we arrive here, ucs is not a combining or C0/C1 control character - return 1 + - (ucs >= 0x1100 && - (ucs <= 0x115f || // Hangul Jamo init. consonants - ucs == 0x2329 || ucs == 0x232a || - (ucs >= 0x2e80 && ucs <= 0xa4cf && - ucs != 0x303f) || // CJK ... Yi - (ucs >= 0xac00 && ucs <= 0xd7a3) || // Hangul Syllables - (ucs >= 0xf900 && ucs <= 0xfaff) || // CJK Compatibility Ideographs - (ucs >= 0xfe10 && ucs <= 0xfe19) || // Vertical forms - (ucs >= 0xfe30 && ucs <= 0xfe6f) || // CJK Compatibility Forms - (ucs >= 0xff00 && ucs <= 0xff60) || // Fullwidth Forms - (ucs >= 0xffe0 && ucs <= 0xffe6) || - (ucs >= 0x20000 && ucs <= 0x2fffd) || - (ucs >= 0x30000 && ucs <= 0x3fffd))); -} - -function bisearch(ucs) { - var min = 0 - var max = combining.length - 1 - var mid - - if (ucs < combining[0][0] || ucs > combining[max][1]) return false - - while (max >= min) { - mid = Math.floor((min + max) / 2) - if (ucs > combining[mid][1]) min = mid + 1 - else if (ucs < combining[mid][0]) max = mid - 1 - else return true - } - - return false -} diff --git a/deps/npm/node_modules/wcwidth/package.json b/deps/npm/node_modules/wcwidth/package.json deleted file mode 100644 index eb2df9d0076d20..00000000000000 --- a/deps/npm/node_modules/wcwidth/package.json +++ /dev/null @@ -1,42 +0,0 @@ -{ - "name": "wcwidth", - "version": "1.0.1", - "description": "Port of C's wcwidth() and wcswidth()", - "author": "Tim Oxley", - "contributors": [ - "Woong Jun (http://code.woong.org/)" - ], - "main": "index.js", - "dependencies": { - "defaults": "^1.0.3" - }, - "devDependencies": { - "tape": "^4.5.1" - }, - "license": "MIT", - "keywords": [ - "wide character", - "wc", - "wide character string", - "wcs", - "terminal", - "width", - "wcwidth", - "wcswidth" - ], - "directories": { - "doc": "docs", - "test": "test" - }, - "scripts": { - "test": "tape test/*.js" - }, - "repository": { - "type": "git", - "url": "git+https://github.com/timoxley/wcwidth.git" - }, - "bugs": { - "url": "https://github.com/timoxley/wcwidth/issues" - }, - "homepage": "https://github.com/timoxley/wcwidth#readme" -} diff --git a/deps/npm/node_modules/wcwidth/test/index.js b/deps/npm/node_modules/wcwidth/test/index.js deleted file mode 100644 index 5180599a2ff285..00000000000000 --- a/deps/npm/node_modules/wcwidth/test/index.js +++ /dev/null @@ -1,64 +0,0 @@ -"use strict" - -var wcwidth = require('../') -var test = require('tape') - -test('handles regular strings', function(t) { - t.strictEqual(wcwidth('abc'), 3) - t.end() -}) - -test('handles multibyte strings', function(t) { - t.strictEqual(wcwidth('字的模块'), 8) - t.end() -}) - -test('handles multibyte characters mixed with regular characters', function(t) { - t.strictEqual(wcwidth('abc 字的模块'), 12) - t.end() -}) - -test('ignores control characters e.g. \\n', function(t) { - t.strictEqual(wcwidth('abc\n字的模块\ndef'), 14) - t.end() -}) - -test('ignores bad input', function(t) { - t.strictEqual(wcwidth(''), 0) - t.strictEqual(wcwidth(3), 0) - t.strictEqual(wcwidth({}), 0) - t.strictEqual(wcwidth([]), 0) - t.strictEqual(wcwidth(), 0) - t.end() -}) - -test('ignores nul (charcode 0)', function(t) { - t.strictEqual(wcwidth(String.fromCharCode(0)), 0) - t.end() -}) - -test('ignores nul mixed with chars', function(t) { - t.strictEqual(wcwidth('a' + String.fromCharCode(0) + '\n字的'), 5) - t.end() -}) - -test('can have custom value for nul', function(t) { - t.strictEqual(wcwidth.config({ - nul: 10 - })(String.fromCharCode(0) + 'a字的'), 15) - t.end() -}) - -test('can have custom control char value', function(t) { - t.strictEqual(wcwidth.config({ - control: 1 - })('abc\n字的模块\ndef'), 16) - t.end() -}) - -test('negative custom control chars == -1', function(t) { - t.strictEqual(wcwidth.config({ - control: -1 - })('abc\n字的模块\ndef'), -1) - t.end() -}) diff --git a/deps/npm/node_modules/wide-align/LICENSE b/deps/npm/node_modules/wide-align/LICENSE deleted file mode 100755 index f4be44d881b2d9..00000000000000 --- a/deps/npm/node_modules/wide-align/LICENSE +++ /dev/null @@ -1,14 +0,0 @@ -Copyright (c) 2015, Rebecca Turner - -Permission to use, copy, modify, and/or distribute this software for any -purpose with or without fee is hereby granted, provided that the above -copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - diff --git a/deps/npm/node_modules/wide-align/align.js b/deps/npm/node_modules/wide-align/align.js deleted file mode 100755 index 4f94ca4cde19b5..00000000000000 --- a/deps/npm/node_modules/wide-align/align.js +++ /dev/null @@ -1,65 +0,0 @@ -'use strict' -var stringWidth = require('string-width') - -exports.center = alignCenter -exports.left = alignLeft -exports.right = alignRight - -// lodash's way of generating pad characters. - -function createPadding (width) { - var result = '' - var string = ' ' - var n = width - do { - if (n % 2) { - result += string; - } - n = Math.floor(n / 2); - string += string; - } while (n); - - return result; -} - -function alignLeft (str, width) { - var trimmed = str.trimRight() - if (trimmed.length === 0 && str.length >= width) return str - var padding = '' - var strWidth = stringWidth(trimmed) - - if (strWidth < width) { - padding = createPadding(width - strWidth) - } - - return trimmed + padding -} - -function alignRight (str, width) { - var trimmed = str.trimLeft() - if (trimmed.length === 0 && str.length >= width) return str - var padding = '' - var strWidth = stringWidth(trimmed) - - if (strWidth < width) { - padding = createPadding(width - strWidth) - } - - return padding + trimmed -} - -function alignCenter (str, width) { - var trimmed = str.trim() - if (trimmed.length === 0 && str.length >= width) return str - var padLeft = '' - var padRight = '' - var strWidth = stringWidth(trimmed) - - if (strWidth < width) { - var padLeftBy = parseInt((width - strWidth) / 2, 10) - padLeft = createPadding(padLeftBy) - padRight = createPadding(width - (strWidth + padLeftBy)) - } - - return padLeft + trimmed + padRight -} diff --git a/deps/npm/node_modules/wide-align/package.json b/deps/npm/node_modules/wide-align/package.json deleted file mode 100755 index 2dd27074c77770..00000000000000 --- a/deps/npm/node_modules/wide-align/package.json +++ /dev/null @@ -1,33 +0,0 @@ -{ - "name": "wide-align", - "version": "1.1.5", - "description": "A wide-character aware text alignment function for use on the console or with fixed width fonts.", - "main": "align.js", - "scripts": { - "test": "tap --coverage test/*.js" - }, - "keywords": [ - "wide", - "double", - "unicode", - "cjkv", - "pad", - "align" - ], - "author": "Rebecca Turner (http://re-becca.org/)", - "license": "ISC", - "repository": { - "type": "git", - "url": "https://github.com/iarna/wide-align" - }, - "//": "But not version 5 of string-width, as that's ESM only", - "dependencies": { - "string-width": "^1.0.2 || 2 || 3 || 4" - }, - "devDependencies": { - "tap": "*" - }, - "files": [ - "align.js" - ] -} diff --git a/deps/npm/package.json b/deps/npm/package.json index d157883a10bbc7..1aae41fbe75769 100644 --- a/deps/npm/package.json +++ b/deps/npm/package.json @@ -1,5 +1,5 @@ { - "version": "10.5.2", + "version": "10.7.0", "name": "npm", "description": "a package manager for JavaScript", "workspaces": [ @@ -27,7 +27,7 @@ "author": "GitHub Inc.", "repository": { "type": "git", - "url": "https://github.com/npm/cli.git" + "url": "git+https://github.com/npm/cli.git" }, "bugs": { "url": "https://github.com/npm/cli/issues" @@ -56,10 +56,10 @@ "@npmcli/config": "^8.0.2", "@npmcli/fs": "^3.1.0", "@npmcli/map-workspaces": "^3.0.6", - "@npmcli/package-json": "^5.0.2", + "@npmcli/package-json": "^5.1.0", "@npmcli/promise-spawn": "^7.0.1", - "@npmcli/redact": "^1.1.0", - "@npmcli/run-script": "^7.0.4", + "@npmcli/redact": "^2.0.0", + "@npmcli/run-script": "^8.1.0", "@sigstore/tuf": "^2.3.2", "abbrev": "^2.0.0", "archy": "~1.0.0", @@ -67,8 +67,6 @@ "chalk": "^5.3.0", "ci-info": "^4.0.0", "cli-columns": "^4.0.0", - "cli-table3": "^0.6.4", - "columnify": "^1.6.0", "fastest-levenshtein": "^1.0.16", "fs-minipass": "^3.0.3", "glob": "^10.3.12", @@ -80,16 +78,16 @@ "json-parse-even-better-errors": "^3.0.1", "libnpmaccess": "^8.0.1", "libnpmdiff": "^6.0.3", - "libnpmexec": "^7.0.4", + "libnpmexec": "^8.0.0", "libnpmfund": "^5.0.1", "libnpmhook": "^10.0.0", "libnpmorg": "^6.0.1", - "libnpmpack": "^6.0.3", + "libnpmpack": "^7.0.0", "libnpmpublish": "^9.0.2", "libnpmsearch": "^7.0.0", "libnpmteam": "^6.0.0", - "libnpmversion": "^5.0.1", - "make-fetch-happen": "^13.0.0", + "libnpmversion": "^6.0.0", + "make-fetch-happen": "^13.0.1", "minimatch": "^9.0.4", "minipass": "^7.0.4", "minipass-pipeline": "^1.2.4", @@ -99,16 +97,15 @@ "normalize-package-data": "^6.0.0", "npm-audit-report": "^5.0.0", "npm-install-checks": "^6.3.0", - "npm-package-arg": "^11.0.1", + "npm-package-arg": "^11.0.2", "npm-pick-manifest": "^9.0.0", - "npm-profile": "^9.0.0", - "npm-registry-fetch": "^16.2.0", + "npm-profile": "^9.0.2", + "npm-registry-fetch": "^17.0.0", "npm-user-validate": "^2.0.0", - "npmlog": "^7.0.1", "p-map": "^4.0.0", - "pacote": "^17.0.6", + "pacote": "^18.0.3", "parse-conflict-json": "^3.0.1", - "proc-log": "^3.0.0", + "proc-log": "^4.2.0", "qrcode-terminal": "^0.12.0", "read": "^3.0.1", "semver": "^7.6.0", @@ -140,8 +137,6 @@ "chalk", "ci-info", "cli-columns", - "cli-table3", - "columnify", "fastest-levenshtein", "fs-minipass", "glob", @@ -177,7 +172,6 @@ "npm-profile", "npm-registry-fetch", "npm-user-validate", - "npmlog", "p-map", "pacote", "parse-conflict-json", @@ -199,7 +193,7 @@ "devDependencies": { "@npmcli/docs": "^1.0.0", "@npmcli/eslint-config": "^4.0.2", - "@npmcli/git": "^5.0.5", + "@npmcli/git": "^5.0.6", "@npmcli/mock-globals": "^1.0.0", "@npmcli/mock-registry": "^1.0.0", "@npmcli/template-oss": "4.21.3", @@ -207,20 +201,21 @@ "ajv": "^8.12.0", "ajv-formats": "^2.1.1", "ajv-formats-draft2019": "^1.6.1", + "cli-table3": "^0.6.4", "diff": "^5.2.0", - "licensee": "^10.0.0", "nock": "^13.4.0", "npm-packlist": "^8.0.2", "remark": "^14.0.2", "remark-gfm": "^3.0.1", "remark-github": "^11.2.4", + "rimraf": "^5.0.5", "spawk": "^1.7.1", "tap": "^16.3.9" }, "scripts": { "dependencies": "node scripts/bundle-and-gitignore-deps.js && node scripts/dependency-graph.js", "dumpconf": "env | grep npm | sort | uniq", - "licenses": "licensee --production --errors-only", + "licenses": "npx licensee --production --errors-only", "test": "tap", "test:nocolor": "CI=true tap -Rclassic", "test-all": "node . run test -ws -iwr --if-present", diff --git a/deps/npm/tap-snapshots/test/lib/utils/exit-handler.js.test.cjs b/deps/npm/tap-snapshots/test/lib/cli/exit-handler.js.test.cjs similarity index 63% rename from deps/npm/tap-snapshots/test/lib/utils/exit-handler.js.test.cjs rename to deps/npm/tap-snapshots/test/lib/cli/exit-handler.js.test.cjs index 3e7bc4570dd4ad..cedb56642f26d7 100644 --- a/deps/npm/tap-snapshots/test/lib/utils/exit-handler.js.test.cjs +++ b/deps/npm/tap-snapshots/test/lib/cli/exit-handler.js.test.cjs @@ -5,63 +5,62 @@ * Make sure to inspect the output below. Do not ignore changes! */ 'use strict' -exports[`test/lib/utils/exit-handler.js TAP handles unknown error with logs and debug file > debug file contents 1`] = ` +exports[`test/lib/cli/exit-handler.js TAP handles unknown error with logs and debug file > debug file contents 1`] = ` XX timing npm:load:whichnode Completed in {TIME}ms -XX timing config:load Completed in {TIME}ms +XX silly config:load:file:{CWD}/npmrc +XX silly config:load:file:{CWD}/prefix/.npmrc +XX silly config:load:file:{CWD}/home/.npmrc +XX silly config:load:file:{CWD}/global/etc/npmrc XX timing npm:load:configload Completed in {TIME}ms XX timing npm:load:mkdirpcache Completed in {TIME}ms XX timing npm:load:mkdirplogs Completed in {TIME}ms XX verbose title npm -XX verbose argv "--fetch-retries" "0" "--cache" "{CWD}/cache" "--loglevel" "notice" +XX verbose argv "--fetch-retries" "0" "--cache" "{CWD}/cache" "--loglevel" "silly" "--color" "false" "--timing" "true" XX timing npm:load:setTitle Completed in {TIME}ms -XX timing npm:load:display Completed in {TIME}ms XX verbose logfile logs-max:10 dir:{CWD}/cache/_logs/{DATE}- XX verbose logfile {CWD}/cache/_logs/{DATE}-debug-0.log -XX timing npm:load:logFile Completed in {TIME}ms -XX timing npm:load:timers Completed in {TIME}ms -XX timing npm:load:configScope Completed in {TIME}ms XX timing npm:load Completed in {TIME}ms XX verbose stack Error: Unknown error XX verbose cwd {CWD}/prefix -XX verbose Foo 1.0.0 -XX verbose node v1.0.0 -XX verbose npm v1.0.0 +XX verbose {OS} +XX verbose {NODE-VERSION} +XX verbose npm {NPM-VERSION} XX error code ECODE XX error ERR SUMMARY Unknown error XX error ERR DETAIL Unknown error XX verbose exit 1 -XX timing npm Completed in {TIME}ms XX verbose code 1 +XX timing npm Completed in {TIME}ms +XX info timing Timing info written to: {CWD}/cache/_logs/{DATE}-timing.json XX error A complete log of this run can be found in: {CWD}/cache/_logs/{DATE}-debug-0.log ` -exports[`test/lib/utils/exit-handler.js TAP handles unknown error with logs and debug file > logs 1`] = ` +exports[`test/lib/cli/exit-handler.js TAP handles unknown error with logs and debug file > logs 1`] = ` timing npm:load:whichnode Completed in {TIME}ms -timing config:load Completed in {TIME}ms +silly config:load:file:{CWD}/npmrc +silly config:load:file:{CWD}/prefix/.npmrc +silly config:load:file:{CWD}/home/.npmrc +silly config:load:file:{CWD}/global/etc/npmrc timing npm:load:configload Completed in {TIME}ms timing npm:load:mkdirpcache Completed in {TIME}ms timing npm:load:mkdirplogs Completed in {TIME}ms verbose title npm -verbose argv "--fetch-retries" "0" "--cache" "{CWD}/cache" "--loglevel" "notice" +verbose argv "--fetch-retries" "0" "--cache" "{CWD}/cache" "--loglevel" "silly" "--color" "false" "--timing" "true" timing npm:load:setTitle Completed in {TIME}ms -timing npm:load:display Completed in {TIME}ms verbose logfile logs-max:10 dir:{CWD}/cache/_logs/{DATE}- verbose logfile {CWD}/cache/_logs/{DATE}-debug-0.log -timing npm:load:logFile Completed in {TIME}ms -timing npm:load:timers Completed in {TIME}ms -timing npm:load:configScope Completed in {TIME}ms timing npm:load Completed in {TIME}ms verbose stack Error: Unknown error verbose cwd {CWD}/prefix -verbose Foo 1.0.0 -verbose node v1.0.0 -verbose npm v1.0.0 +verbose {OS} +verbose {NODE-VERSION} +verbose npm {NPM-VERSION} error code ECODE error ERR SUMMARY Unknown error error ERR DETAIL Unknown error verbose exit 1 -timing npm Completed in {TIME}ms verbose code 1 -error A complete log of this run can be found in: {CWD}/cache/_logs/{DATE}-debug-0.log -silly logfile done cleaning log files +timing npm Completed in {TIME}ms +info timing Timing info written to: {CWD}/cache/_logs/{DATE}-timing.json +error A complete log of this run can be found in: {CWD}/cache/_logs/{DATE}-debug-0.log ` diff --git a/deps/npm/tap-snapshots/test/lib/cli/update-notifier.js.test.cjs b/deps/npm/tap-snapshots/test/lib/cli/update-notifier.js.test.cjs new file mode 100644 index 00000000000000..244d5216340f80 --- /dev/null +++ b/deps/npm/tap-snapshots/test/lib/cli/update-notifier.js.test.cjs @@ -0,0 +1,102 @@ +/* IMPORTANT + * This snapshot file is auto-generated, but designed for humans. + * It should be checked into source control and tracked carefully. + * Re-generate by setting TAP_SNAPSHOT=1 and running tests. + * Make sure to inspect the output below. Do not ignore changes! + */ +'use strict' +exports[`test/lib/cli/update-notifier.js TAP notification situations 122.420.69 - color=always > must match snapshot 1`] = ` + +New major version of npm available! 122.420.69 -> 123.420.69 +Changelog: https://github.com/npm/cli/releases/tag/v123.420.69 +To update run: npm install -g npm@123.420.69 + +` + +exports[`test/lib/cli/update-notifier.js TAP notification situations 122.420.69 - color=false > must match snapshot 1`] = ` + +New major version of npm available! 122.420.69 -> 123.420.69 +Changelog: https://github.com/npm/cli/releases/tag/v123.420.69 +To update run: npm install -g npm@123.420.69 + +` + +exports[`test/lib/cli/update-notifier.js TAP notification situations 123.419.69 - color=always > must match snapshot 1`] = ` + +New minor version of npm available! 123.419.69 -> 123.420.69 +Changelog: https://github.com/npm/cli/releases/tag/v123.420.69 +To update run: npm install -g npm@123.420.69 + +` + +exports[`test/lib/cli/update-notifier.js TAP notification situations 123.419.69 - color=false > must match snapshot 1`] = ` + +New minor version of npm available! 123.419.69 -> 123.420.69 +Changelog: https://github.com/npm/cli/releases/tag/v123.420.69 +To update run: npm install -g npm@123.420.69 + +` + +exports[`test/lib/cli/update-notifier.js TAP notification situations 123.420.68 - color=always > must match snapshot 1`] = ` + +New patch version of npm available! 123.420.68 -> 123.420.69 +Changelog: https://github.com/npm/cli/releases/tag/v123.420.69 +To update run: npm install -g npm@123.420.69 + +` + +exports[`test/lib/cli/update-notifier.js TAP notification situations 123.420.68 - color=false > must match snapshot 1`] = ` + +New patch version of npm available! 123.420.68 -> 123.420.69 +Changelog: https://github.com/npm/cli/releases/tag/v123.420.69 +To update run: npm install -g npm@123.420.69 + +` + +exports[`test/lib/cli/update-notifier.js TAP notification situations 123.420.70 - color=always > must match snapshot 1`] = ` + +New minor version of npm available! 123.420.70 -> 123.421.70 +Changelog: https://github.com/npm/cli/releases/tag/v123.421.70 +To update run: npm install -g npm@123.421.70 + +` + +exports[`test/lib/cli/update-notifier.js TAP notification situations 123.420.70 - color=false > must match snapshot 1`] = ` + +New minor version of npm available! 123.420.70 -> 123.421.70 +Changelog: https://github.com/npm/cli/releases/tag/v123.421.70 +To update run: npm install -g npm@123.421.70 + +` + +exports[`test/lib/cli/update-notifier.js TAP notification situations 123.421.69 - color=always > must match snapshot 1`] = ` + +New patch version of npm available! 123.421.69 -> 123.421.70 +Changelog: https://github.com/npm/cli/releases/tag/v123.421.70 +To update run: npm install -g npm@123.421.70 + +` + +exports[`test/lib/cli/update-notifier.js TAP notification situations 123.421.69 - color=false > must match snapshot 1`] = ` + +New patch version of npm available! 123.421.69 -> 123.421.70 +Changelog: https://github.com/npm/cli/releases/tag/v123.421.70 +To update run: npm install -g npm@123.421.70 + +` + +exports[`test/lib/cli/update-notifier.js TAP notification situations 124.0.0-beta.0 - color=always > must match snapshot 1`] = ` + +New prerelease version of npm available! 124.0.0-beta.0 -> 124.0.0-beta.99999 +Changelog: https://github.com/npm/cli/releases/tag/v124.0.0-beta.99999 +To update run: npm install -g npm@124.0.0-beta.99999 + +` + +exports[`test/lib/cli/update-notifier.js TAP notification situations 124.0.0-beta.0 - color=false > must match snapshot 1`] = ` + +New prerelease version of npm available! 124.0.0-beta.0 -> 124.0.0-beta.99999 +Changelog: https://github.com/npm/cli/releases/tag/v124.0.0-beta.99999 +To update run: npm install -g npm@124.0.0-beta.99999 + +` diff --git a/deps/npm/tap-snapshots/test/lib/commands/audit.js.test.cjs b/deps/npm/tap-snapshots/test/lib/commands/audit.js.test.cjs index 7611191688268c..21c22b26c12e6b 100644 --- a/deps/npm/tap-snapshots/test/lib/commands/audit.js.test.cjs +++ b/deps/npm/tap-snapshots/test/lib/commands/audit.js.test.cjs @@ -45,7 +45,6 @@ exports[`test/lib/commands/audit.js TAP audit signatures ignores optional depend audited 1 package in xxx 1 package has a verified registry signature - ` exports[`test/lib/commands/audit.js TAP audit signatures json output with invalid and missing signatures > must match snapshot 1`] = ` @@ -131,14 +130,12 @@ exports[`test/lib/commands/audit.js TAP audit signatures multiple registries wit audited 2 packages in xxx 2 packages have verified registry signatures - ` exports[`test/lib/commands/audit.js TAP audit signatures omit dev dependencies with missing signature > must match snapshot 1`] = ` audited 1 package in xxx 1 package has a verified registry signature - ` exports[`test/lib/commands/audit.js TAP audit signatures output details about missing signatures > must match snapshot 1`] = ` @@ -157,7 +154,6 @@ audited 1 package in xxx @npmcli/arborist@1.0.14 (https://verdaccio-clone.org/) Someone might have tampered with this package since it was published on the registry! - ` exports[`test/lib/commands/audit.js TAP audit signatures third-party registry with keys and missing signatures errors > must match snapshot 1`] = ` @@ -172,21 +168,18 @@ exports[`test/lib/commands/audit.js TAP audit signatures third-party registry wi audited 1 package in xxx 1 package has a verified registry signature - ` exports[`test/lib/commands/audit.js TAP audit signatures third-party registry with sub-path (trailing slash) > must match snapshot 1`] = ` audited 1 package in xxx 1 package has a verified registry signature - ` exports[`test/lib/commands/audit.js TAP audit signatures third-party registry with sub-path > must match snapshot 1`] = ` audited 1 package in xxx 1 package has a verified registry signature - ` exports[`test/lib/commands/audit.js TAP audit signatures with both invalid and missing signatures > must match snapshot 1`] = ` @@ -201,14 +194,12 @@ async@1.1.1 (https://registry.npmjs.org/) kms-demo@1.0.0 (https://registry.npmjs.org/) Someone might have tampered with this package since it was published on the registry! - ` exports[`test/lib/commands/audit.js TAP audit signatures with bundled and peer deps and no signatures > must match snapshot 1`] = ` audited 1 package in xxx 1 package has a verified registry signature - ` exports[`test/lib/commands/audit.js TAP audit signatures with invalid attestations > must match snapshot 1`] = ` @@ -219,7 +210,6 @@ audited 1 package in xxx sigstore@1.0.0 (https://registry.npmjs.org/) Someone might have tampered with this package since it was published on the registry! - ` exports[`test/lib/commands/audit.js TAP audit signatures with invalid signatures > must match snapshot 1`] = ` @@ -230,25 +220,22 @@ audited 1 package in xxx kms-demo@1.0.0 (https://registry.npmjs.org/) Someone might have tampered with this package since it was published on the registry! - ` -exports[`test/lib/commands/audit.js TAP audit signatures with invalid signtaures and color output enabled > must match snapshot 1`] = ` +exports[`test/lib/commands/audit.js TAP audit signatures with invalid signatures and color output enabled > must match snapshot 1`] = ` audited 1 package in xxx -1 package has an invalid registry signature: +1 package has an invalid registry signature: kms-demo@1.0.0 (https://registry.npmjs.org/) Someone might have tampered with this package since it was published on the registry! - ` exports[`test/lib/commands/audit.js TAP audit signatures with key fallback to legacy API > must match snapshot 1`] = ` audited 1 package in xxx 1 package has a verified registry signature - ` exports[`test/lib/commands/audit.js TAP audit signatures with keys but missing signature > must match snapshot 1`] = ` @@ -268,7 +255,6 @@ sigstore@1.0.0 (https://registry.npmjs.org/) tuf-js@1.0.0 (https://registry.npmjs.org/) Someone might have tampered with these packages since they were published on the registry! - ` exports[`test/lib/commands/audit.js TAP audit signatures with multiple invalid signatures > must match snapshot 1`] = ` @@ -280,7 +266,6 @@ async@1.1.1 (https://registry.npmjs.org/) kms-demo@1.0.0 (https://registry.npmjs.org/) Someone might have tampered with these packages since they were published on the registry! - ` exports[`test/lib/commands/audit.js TAP audit signatures with multiple missing signatures > must match snapshot 1`] = ` @@ -302,7 +287,6 @@ audited 3 packages in xxx node-fetch@1.6.0 (https://registry.npmjs.org/) Someone might have tampered with this package since it was published on the registry! - ` exports[`test/lib/commands/audit.js TAP audit signatures with valid and missing signatures > must match snapshot 1`] = ` @@ -321,35 +305,30 @@ audited 1 package in xxx 1 package has a verified registry signature 1 package has a verified attestation - ` exports[`test/lib/commands/audit.js TAP audit signatures with valid signatures > must match snapshot 1`] = ` audited 1 package in xxx 1 package has a verified registry signature - ` exports[`test/lib/commands/audit.js TAP audit signatures with valid signatures using alias > must match snapshot 1`] = ` audited 1 package in xxx 1 package has a verified registry signature - ` exports[`test/lib/commands/audit.js TAP audit signatures workspaces verifies registry deps and ignores local workspace deps > must match snapshot 1`] = ` audited 3 packages in xxx 3 packages have verified registry signatures - ` exports[`test/lib/commands/audit.js TAP audit signatures workspaces verifies registry deps when filtering by workspace name > must match snapshot 1`] = ` audited 2 packages in xxx 2 packages have verified registry signatures - ` exports[`test/lib/commands/audit.js TAP fallback audit > must match snapshot 1`] = ` diff --git a/deps/npm/tap-snapshots/test/lib/commands/completion.js.test.cjs b/deps/npm/tap-snapshots/test/lib/commands/completion.js.test.cjs index 089d92440f653f..a538e3c0688633 100644 --- a/deps/npm/tap-snapshots/test/lib/commands/completion.js.test.cjs +++ b/deps/npm/tap-snapshots/test/lib/commands/completion.js.test.cjs @@ -7,12 +7,10 @@ 'use strict' exports[`test/lib/commands/completion.js TAP completion --no- flags > flags 1`] = ` Array [ - Array [ - String( - --no-version - --no-versions - ), - ], + String( + --no-version + --no-versions + ), ] ` @@ -42,133 +40,131 @@ Array [] exports[`test/lib/commands/completion.js TAP completion double dashes escape from flag completion > full command list 1`] = ` Array [ - Array [ - String( - access - adduser - audit - bugs - cache - ci - completion - config - dedupe - deprecate - diff - dist-tag - docs - doctor - edit - exec - explain - explore - find-dupes - fund - get - help - help-search - hook - init - install - install-ci-test - install-test - link - ll - login - logout - ls - org - outdated - owner - pack - ping - pkg - prefix - profile - prune - publish - query - rebuild - repo - restart - root - run-script - sbom - search - set - shrinkwrap - star - stars - start - stop - team - test - token - uninstall - unpublish - unstar - update - version - view - whoami - author - home - issues - info - show - find - add - unlink - remove - rm - r - un - rb - list - ln - create - i - it - cit - up - c - s - se - tst - t - ddp - v - run - clean-install - clean-install-test - x - why - la - verison - ic - innit - in - ins - inst - insta - instal - isnt - isnta - isntal - isntall - install-clean - isntall-clean - hlep - dist-tags - upgrade - udpate - rum - sit - urn - ogr - add-user - ), - ], + String( + access + adduser + audit + bugs + cache + ci + completion + config + dedupe + deprecate + diff + dist-tag + docs + doctor + edit + exec + explain + explore + find-dupes + fund + get + help + help-search + hook + init + install + install-ci-test + install-test + link + ll + login + logout + ls + org + outdated + owner + pack + ping + pkg + prefix + profile + prune + publish + query + rebuild + repo + restart + root + run-script + sbom + search + set + shrinkwrap + star + stars + start + stop + team + test + token + uninstall + unpublish + unstar + update + version + view + whoami + author + home + issues + info + show + find + add + unlink + remove + rm + r + un + rb + list + ln + create + i + it + cit + up + c + s + se + tst + t + ddp + v + run + clean-install + clean-install-test + x + why + la + verison + ic + innit + in + ins + inst + insta + instal + isnt + isnta + isntal + isntall + install-clean + isntall-clean + hlep + dist-tags + upgrade + udpate + rum + sit + urn + ogr + add-user + ), ] ` @@ -178,52 +174,44 @@ Array [] exports[`test/lib/commands/completion.js TAP completion flags > flags 1`] = ` Array [ - Array [ - String( - --version - --versions - --viewer - --verbose - --v - ), - ], + String( + --version + --versions + --viewer + --verbose + --v + ), ] ` exports[`test/lib/commands/completion.js TAP completion multiple command names > multiple command names 1`] = ` Array [ - Array [ - String( - access - adduser - audit - author - add - add-user - ), - ], + String( + access + adduser + audit + author + add + add-user + ), ] ` exports[`test/lib/commands/completion.js TAP completion single command name > single command name 1`] = ` Array [ - Array [ - "config", - ], + "config", ] ` exports[`test/lib/commands/completion.js TAP completion subcommand completion > subcommands 1`] = ` Array [ - Array [ - String( - get - grant - list - revoke - set - ), - ], + String( + get + grant + list + revoke + set + ), ] ` diff --git a/deps/npm/tap-snapshots/test/lib/commands/config.js.test.cjs b/deps/npm/tap-snapshots/test/lib/commands/config.js.test.cjs index 9d67091f7a0d4e..c018a356f7d21c 100644 --- a/deps/npm/tap-snapshots/test/lib/commands/config.js.test.cjs +++ b/deps/npm/tap-snapshots/test/lib/commands/config.js.test.cjs @@ -7,9 +7,7 @@ 'use strict' exports[`test/lib/commands/config.js TAP config list --json > output matches snapshot 1`] = ` { - "prefix": "{LOCALPREFIX}", - "userconfig": "{HOME}/.npmrc", - "cache": "{NPMDIR}/test/lib/commands/tap-testdir-config-config-list---json-sandbox/cache", + "cache": "{CACHE}", "json": true, "projectloaded": "yes", "userloaded": "yes", @@ -31,7 +29,7 @@ exports[`test/lib/commands/config.js TAP config list --json > output matches sna "call": "", "cert": null, "cidr": null, - "color": true, + "color": {COLOR}, "commit-hooks": true, "cpu": null, "depth": null, @@ -62,7 +60,7 @@ exports[`test/lib/commands/config.js TAP config list --json > output matches sna "git": "git", "git-tag-version": true, "global": false, - "globalconfig": "{GLOBALPREFIX}/npmrc", + "globalconfig": "{CWD}/global/etc/npmrc", "global-style": false, "heading": "npm", "https-proxy": null, @@ -75,13 +73,13 @@ exports[`test/lib/commands/config.js TAP config list --json > output matches sna "init-author-name": "", "init-author-url": "", "init-license": "ISC", - "init-module": "{HOME}/.npm-init.js", + "init-module": "{CWD}/home/.npm-init.js", "init-version": "1.0.0", "init.author.email": "", "init.author.name": "", "init.author.url": "", "init.license": "ISC", - "init.module": "{HOME}/.npm-init.js", + "init.module": "{CWD}/home/.npm-init.js", "init.version": "1.0.0", "install-links": false, "install-strategy": "hoisted", @@ -118,9 +116,10 @@ exports[`test/lib/commands/config.js TAP config list --json > output matches sna "prefer-dedupe": false, "prefer-offline": false, "prefer-online": false, + "prefix": "{CWD}/global", "preid": "", "production": null, - "progress": true, + "progress": {PROGRESS}, "provenance": false, "provenance-file": null, "proxy": null, @@ -158,6 +157,7 @@ exports[`test/lib/commands/config.js TAP config list --json > output matches sna "update-notifier": true, "usage": false, "user-agent": "npm/{npm-version} node/{node-version} {platform} {arch} workspaces/{workspaces} {ci}", + "userconfig": "{CWD}/home/.npmrc", "version": false, "versions": false, "viewer": "{VIEWER}", @@ -192,7 +192,7 @@ cafile = null call = "" cert = null cidr = null -color = true +color = {COLOR} commit-hooks = true cpu = null depth = null @@ -224,7 +224,7 @@ git = "git" git-tag-version = true global = false global-style = false -globalconfig = "{GLOBALPREFIX}/npmrc" +globalconfig = "{CWD}/global/etc/npmrc" heading = "npm" https-proxy = null if-present = false @@ -236,13 +236,13 @@ init-author-email = "" init-author-name = "" init-author-url = "" init-license = "ISC" -init-module = "{HOME}/.npm-init.js" +init-module = "{CWD}/home/.npm-init.js" init-version = "1.0.0" init.author.email = "" init.author.name = "" init.author.url = "" init.license = "ISC" -init.module = "{HOME}/.npm-init.js" +init.module = "{CWD}/home/.npm-init.js" init.version = "1.0.0" install-links = false install-strategy = "hoisted" @@ -279,10 +279,10 @@ parseable = false prefer-dedupe = false prefer-offline = false prefer-online = false -; prefix = "{REALGLOBALREFIX}" ; overridden by cli +prefix = "{CWD}/global" preid = "" production = null -progress = true +progress = {PROGRESS} provenance = false provenance-file = null proxy = null @@ -320,7 +320,7 @@ unicode = false update-notifier = true usage = false user-agent = "npm/{npm-version} node/{node-version} {platform} {arch} workspaces/{workspaces} {ci}" -; userconfig = "{HOME}/.npmrc" ; overridden by cli +userconfig = "{CWD}/home/.npmrc" version = false versions = false viewer = "{VIEWER}" @@ -330,98 +330,81 @@ workspaces = null workspaces-update = true yes = null -; "global" config from {GLOBALPREFIX}/npmrc +; "global" config from {CWD}/global/etc/npmrc globalloaded = "yes" -; "user" config from {HOME}/.npmrc +; "user" config from {CWD}/home/.npmrc userloaded = "yes" -; "project" config from {LOCALPREFIX}/.npmrc +; "project" config from {CWD}/prefix/.npmrc projectloaded = "yes" ; "cli" config from command line options -cache = "{NPMDIR}/test/lib/commands/tap-testdir-config-config-list---long-sandbox/cache" +cache = "{CACHE}" long = true -prefix = "{LOCALPREFIX}" -userconfig = "{HOME}/.npmrc" ` exports[`test/lib/commands/config.js TAP config list > output matches snapshot 1`] = ` -; "global" config from {GLOBALPREFIX}/npmrc +; "global" config from {CWD}/global/etc/npmrc globalloaded = "yes" -; "user" config from {HOME}/.npmrc +; "user" config from {CWD}/home/.npmrc userloaded = "yes" -; "project" config from {LOCALPREFIX}/.npmrc +; "project" config from {CWD}/prefix/.npmrc projectloaded = "yes" ; "cli" config from command line options -cache = "{NPMDIR}/test/lib/commands/tap-testdir-config-config-list-sandbox/cache" -prefix = "{LOCALPREFIX}" -userconfig = "{HOME}/.npmrc" +cache = "{CACHE}" -; node bin location = {EXECPATH} +; node bin location = {NODE-BIN-LOCATION} ; node version = {NODE-VERSION} -; npm local prefix = {LOCALPREFIX} +; npm local prefix = {CWD}/prefix ; npm version = {NPM-VERSION} -; cwd = {NPMDIR} -; HOME = {HOME} +; cwd = {CWD}/prefix +; HOME = {CWD}/home ; Run \`npm config ls -l\` to show all defaults. ` -exports[`test/lib/commands/config.js TAP config list with publishConfig > output matches snapshot 1`] = ` +exports[`test/lib/commands/config.js TAP config list with publishConfig global > output matches snapshot 1`] = ` ; "cli" config from command line options -cache = "{NPMDIR}/test/lib/commands/tap-testdir-config-config-list-with-publishConfig-sandbox/cache" -prefix = "{LOCALPREFIX}" -userconfig = "{HOME}/.npmrc" +cache = "{CACHE}" +global = true -; node bin location = {EXECPATH} +; node bin location = {NODE-BIN-LOCATION} ; node version = {NODE-VERSION} -; npm local prefix = {LOCALPREFIX} +; npm local prefix = {CWD}/prefix ; npm version = {NPM-VERSION} -; cwd = {NPMDIR} -; HOME = {HOME} +; cwd = {CWD}/prefix +; HOME = {CWD}/home ; Run \`npm config ls -l\` to show all defaults. +` -; "publishConfig" from {LOCALPREFIX}/package.json -; This set of config values will be used at publish-time. - -_authToken = (protected) -registry = "https://some.registry" -; "env" config from environment - -; cache = "{NPMDIR}/test/lib/commands/tap-testdir-config-config-list-with-publishConfig-sandbox/cache" ; overridden by cli -global-prefix = "{LOCALPREFIX}" -globalconfig = "{GLOBALPREFIX}/npmrc" -init-module = "{HOME}/.npm-init.js" -local-prefix = "{LOCALPREFIX}" -npm-version = "{NPM-VERSION}" -; prefix = "{LOCALPREFIX}" ; overridden by cli -user-agent = "npm/{NPM-VERSION} node/{NODE-VERSION} {PLATFORM} {ARCH} workspaces/false" -; userconfig = "{HOME}/.npmrc" ; overridden by cli - +exports[`test/lib/commands/config.js TAP config list with publishConfig local > output matches snapshot 1`] = ` ; "cli" config from command line options -cache = "{NPMDIR}/test/lib/commands/tap-testdir-config-config-list-with-publishConfig-sandbox/cache" -global = true -prefix = "{LOCALPREFIX}" -userconfig = "{HOME}/.npmrc" +cache = "{CACHE}" -; node bin location = {EXECPATH} +; node bin location = {NODE-BIN-LOCATION} ; node version = {NODE-VERSION} -; npm local prefix = {LOCALPREFIX} +; npm local prefix = {CWD}/prefix ; npm version = {NPM-VERSION} -; cwd = {NPMDIR} -; HOME = {HOME} +; cwd = {CWD}/prefix +; HOME = {CWD}/home ; Run \`npm config ls -l\` to show all defaults. + +; "publishConfig" from {CWD}/prefix/package.json +; This set of config values will be used at publish-time. + +_authToken = (protected) +registry = "https://some.registry" ` diff --git a/deps/npm/tap-snapshots/test/lib/commands/dist-tag.js.test.cjs b/deps/npm/tap-snapshots/test/lib/commands/dist-tag.js.test.cjs index ebc823e7e06bbd..854f93ff1e5f2c 100644 --- a/deps/npm/tap-snapshots/test/lib/commands/dist-tag.js.test.cjs +++ b/deps/npm/tap-snapshots/test/lib/commands/dist-tag.js.test.cjs @@ -20,7 +20,22 @@ latest: 1.0.0 ` exports[`test/lib/commands/dist-tag.js TAP ls on missing package > should log no dist-tag found msg 1`] = ` -dist-tag ls Couldn't get dist-tag data for foo@* +dist-tag ls Couldn't get dist-tag data for Result { +dist-tag ls type: 'range', +dist-tag ls registry: true, +dist-tag ls where: undefined, +dist-tag ls raw: 'foo', +dist-tag ls name: 'foo', +dist-tag ls escapedName: 'foo', +dist-tag ls scope: undefined, +dist-tag ls rawSpec: '*', +dist-tag ls saveSpec: null, +dist-tag ls fetchSpec: '*', +dist-tag ls gitRange: undefined, +dist-tag ls gitCommittish: undefined, +dist-tag ls gitSubdir: undefined, +dist-tag ls hosted: undefined +dist-tag ls } ` exports[`test/lib/commands/dist-tag.js TAP ls on named package > should list tags for the specified package 1`] = ` diff --git a/deps/npm/tap-snapshots/test/lib/commands/doctor.js.test.cjs b/deps/npm/tap-snapshots/test/lib/commands/doctor.js.test.cjs index 98d10c2bb5d4bb..0481c6d86823e8 100644 --- a/deps/npm/tap-snapshots/test/lib/commands/doctor.js.test.cjs +++ b/deps/npm/tap-snapshots/test/lib/commands/doctor.js.test.cjs @@ -9,123 +9,111 @@ exports[`test/lib/commands/doctor.js TAP all clear > logs 1`] = ` Object { "error": Array [], "info": Array [ - Array [ - "Running checkup", - ], - Array [ - "checkPing", - "Pinging registry", - ], - Array [ - "getLatestNpmVersion", - "Getting npm package information", - ], - Array [ - "getLatestNodejsVersion", - "Getting Node.js release information", - ], - Array [ - "getGitPath", - "Finding git in your PATH", - ], - Array [ - "getBinPath", - "Finding npm global bin in your PATH", - ], - Array [ - "verifyCachedFiles", - "Verifying the npm cache", - ], - Array [ - "verifyCachedFiles", - String( - Verification complete. Stats: { - "badContentCount": 0, - "reclaimedCount": 0, - "missingContent": 0, - "verifiedContent": 0 - } - ), - ], + "doctor Running checkup", + "doctor Pinging registry", + "doctor Getting npm package information", + "doctor Getting Node.js release information", + "doctor Finding git in your PATH", + "doctor getBinPath Finding npm global bin in your PATH", + "doctor verifyCachedFiles Verifying the npm cache", + String( + doctor verifyCachedFiles Verification complete. Stats: { + doctor "badContentCount": 0, + doctor "reclaimedCount": 0, + doctor "missingContent": 0, + doctor "verifiedContent": 0 + doctor } + ), ], "warn": Array [], } ` exports[`test/lib/commands/doctor.js TAP all clear > output 1`] = ` -Check Value Recommendation/Notes -npm ping ok -npm -v ok current: v1.0.0, latest: v1.0.0 -node -v ok current: v1.0.0, recommended: v1.0.0 -npm config get registry ok using default registry (https://registry.npmjs.org/) -git executable in PATH ok /path/to/git -global bin folder in PATH ok {CWD}/global/bin -Perms check on cached files ok -Perms check on local node_modules ok -Perms check on global node_modules ok -Perms check on local bin folder ok -Perms check on global bin folder ok -Verify cache contents ok verified 0 tarballs +Connecting to the registry +Ok +Checking npm version +Ok +current: v1.0.0, latest: v1.0.0 +Checking node version +Ok +current: v1.0.0, recommended: v1.0.0 +Checking configured npm registry +Ok +using default registry (https://registry.npmjs.org/) +Checking for git executable in PATH +Ok +/path/to/git +Checking for global bin folder in PATH +Ok +{CWD}/global/bin +Checking permissions on cached files (this may take awhile) +Ok +Checking permissions on local node_modules (this may take awhile) +Ok +Checking permissions on global node_modules (this may take awhile) +Ok +Checking permissions on local bin folder +Ok +Checking permissions on global bin folder +Ok +Verifying cache contents (this may take awhile) +Ok +verified 0 tarballs ` exports[`test/lib/commands/doctor.js TAP all clear in color > everything is ok in color 1`] = ` -Check Value Recommendation/Notes -npm ping ok -npm -v ok current: v1.0.0, latest: v1.0.0 -node -v ok current: v1.0.0, recommended: v1.0.0 -npm config get registry ok using default registry (https://registry.npmjs.org/) -git executable in PATH ok /path/to/git -global bin folder in PATH ok {CWD}/global/bin -Perms check on cached files ok -Perms check on local node_modules ok -Perms check on global node_modules ok -Perms check on local bin folder ok -Perms check on global bin folder ok -Verify cache contents ok verified 0 tarballs +Connecting to the registry +Ok +Checking npm version +Ok +current: v1.0.0, latest: v1.0.0 +Checking node version +Ok +current: v1.0.0, recommended: v1.0.0 +Checking configured npm registry +Ok +using default registry (https://registry.npmjs.org/) +Checking for git executable in PATH +Ok +/path/to/git +Checking for global bin folder in PATH +Ok +{CWD}/global/bin +Checking permissions on cached files (this may take awhile) +Ok +Checking permissions on local node_modules (this may take awhile) +Ok +Checking permissions on global node_modules (this may take awhile) +Ok +Checking permissions on local bin folder +Ok +Checking permissions on global bin folder +Ok +Verifying cache contents (this may take awhile) +Ok +verified 0 tarballs ` exports[`test/lib/commands/doctor.js TAP all clear in color > logs 1`] = ` Object { "error": Array [], "info": Array [ - Array [ - "Running checkup", - ], - Array [ - "checkPing", - "Pinging registry", - ], - Array [ - "getLatestNpmVersion", - "Getting npm package information", - ], - Array [ - "getLatestNodejsVersion", - "Getting Node.js release information", - ], - Array [ - "getGitPath", - "Finding git in your PATH", - ], - Array [ - "getBinPath", - "Finding npm global bin in your PATH", - ], - Array [ - "verifyCachedFiles", - "Verifying the npm cache", - ], - Array [ - "verifyCachedFiles", - String( - Verification complete. Stats: { - "badContentCount": 0, - "reclaimedCount": 0, - "missingContent": 0, - "verifiedContent": 0 - } - ), - ], + "/u001b[94mdoctor/u001b[39m Running checkup", + "/u001b[94mdoctor/u001b[39m Pinging registry", + "/u001b[94mdoctor/u001b[39m Getting npm package information", + "/u001b[94mdoctor/u001b[39m Getting Node.js release information", + "/u001b[94mdoctor/u001b[39m Finding git in your PATH", + "/u001b[94mdoctor/u001b[39m getBinPath Finding npm global bin in your PATH", + "/u001b[94mdoctor/u001b[39m verifyCachedFiles Verifying the npm cache", + String( + /u001b[94mdoctor/u001b[39m verifyCachedFiles Verification complete. Stats: { + /u001b[94mdoctor/u001b[39m "badContentCount": 0, + /u001b[94mdoctor/u001b[39m "reclaimedCount": 0, + /u001b[94mdoctor/u001b[39m "missingContent": 0, + /u001b[94mdoctor/u001b[39m "verifiedContent": 0 + /u001b[94mdoctor/u001b[39m } + ), ], "warn": Array [], } @@ -135,133 +123,116 @@ exports[`test/lib/commands/doctor.js TAP bad proxy > logs 1`] = ` Object { "error": Array [], "info": Array [ - Array [ - "Running checkup", - ], - Array [ - "checkPing", - "Pinging registry", - ], - Array [ - "getLatestNpmVersion", - "Getting npm package information", - ], - Array [ - "getLatestNodejsVersion", - "Getting Node.js release information", - ], - Array [ - "getGitPath", - "Finding git in your PATH", - ], - Array [ - "getBinPath", - "Finding npm global bin in your PATH", - ], - Array [ - "verifyCachedFiles", - "Verifying the npm cache", - ], - Array [ - "verifyCachedFiles", - String( - Verification complete. Stats: { - "badContentCount": 0, - "reclaimedCount": 0, - "missingContent": 0, - "verifiedContent": 0 - } - ), - ], + "doctor Running checkup", + "doctor Pinging registry", + "doctor Getting npm package information", + "doctor Getting Node.js release information", + "doctor Finding git in your PATH", + "doctor getBinPath Finding npm global bin in your PATH", + "doctor verifyCachedFiles Verifying the npm cache", + String( + doctor verifyCachedFiles Verification complete. Stats: { + doctor "badContentCount": 0, + doctor "reclaimedCount": 0, + doctor "missingContent": 0, + doctor "verifiedContent": 0 + doctor } + ), ], "warn": Array [], } ` exports[`test/lib/commands/doctor.js TAP bad proxy > output 1`] = ` -Check Value Recommendation/Notes -npm ping not ok Invalid protocol \`ssh:\` connecting to proxy \`npmjs.org\` -npm -v not ok Error: Invalid protocol \`ssh:\` connecting to proxy \`npmjs.org\` -node -v not ok Error: Invalid protocol \`ssh:\` connecting to proxy \`npmjs.org\` -npm config get registry ok using default registry (https://registry.npmjs.org/) -git executable in PATH ok /path/to/git -global bin folder in PATH ok {CWD}/global/bin -Perms check on cached files ok -Perms check on local node_modules ok -Perms check on global node_modules ok -Perms check on local bin folder ok -Perms check on global bin folder ok -Verify cache contents ok verified 0 tarballs +Connecting to the registry +Not ok +Invalid protocol \`ssh:\` connecting to proxy \`npmjs.org\` +Checking npm version +Not ok +Error: Invalid protocol \`ssh:\` connecting to proxy \`npmjs.org\` +Checking node version +Not ok +Error: Invalid protocol \`ssh:\` connecting to proxy \`npmjs.org\` +Checking configured npm registry +Ok +using default registry (https://registry.npmjs.org/) +Checking for git executable in PATH +Ok +/path/to/git +Checking for global bin folder in PATH +Ok +{CWD}/global/bin +Checking permissions on cached files (this may take awhile) +Ok +Checking permissions on local node_modules (this may take awhile) +Ok +Checking permissions on global node_modules (this may take awhile) +Ok +Checking permissions on local bin folder +Ok +Checking permissions on global bin folder +Ok +Verifying cache contents (this may take awhile) +Ok +verified 0 tarballs ` exports[`test/lib/commands/doctor.js TAP cacache badContent > corrupted cache content 1`] = ` -Check Value Recommendation/Notes -npm ping ok -npm -v ok current: v1.0.0, latest: v1.0.0 -node -v ok current: v1.0.0, recommended: v1.0.0 -npm config get registry ok using default registry (https://registry.npmjs.org/) -git executable in PATH ok /path/to/git -global bin folder in PATH ok {CWD}/global/bin -Perms check on cached files ok -Perms check on local node_modules ok -Perms check on global node_modules ok -Perms check on local bin folder ok -Perms check on global bin folder ok -Verify cache contents ok verified 2 tarballs +Connecting to the registry +Ok +Checking npm version +Ok +current: v1.0.0, latest: v1.0.0 +Checking node version +Ok +current: v1.0.0, recommended: v1.0.0 +Checking configured npm registry +Ok +using default registry (https://registry.npmjs.org/) +Checking for git executable in PATH +Ok +/path/to/git +Checking for global bin folder in PATH +Ok +{CWD}/global/bin +Checking permissions on cached files (this may take awhile) +Ok +Checking permissions on local node_modules (this may take awhile) +Ok +Checking permissions on global node_modules (this may take awhile) +Ok +Checking permissions on local bin folder +Ok +Checking permissions on global bin folder +Ok +Verifying cache contents (this may take awhile) +Ok +verified 2 tarballs ` exports[`test/lib/commands/doctor.js TAP cacache badContent > logs 1`] = ` Object { "error": Array [], "info": Array [ - Array [ - "Running checkup", - ], - Array [ - "checkPing", - "Pinging registry", - ], - Array [ - "getLatestNpmVersion", - "Getting npm package information", - ], - Array [ - "getLatestNodejsVersion", - "Getting Node.js release information", - ], - Array [ - "getGitPath", - "Finding git in your PATH", - ], - Array [ - "getBinPath", - "Finding npm global bin in your PATH", - ], - Array [ - "verifyCachedFiles", - "Verifying the npm cache", - ], - Array [ - "verifyCachedFiles", - String( - Verification complete. Stats: { - "badContentCount": 1, - "reclaimedCount": 0, - "missingContent": 0, - "verifiedContent": 2 - } - ), - ], + "doctor Running checkup", + "doctor Pinging registry", + "doctor Getting npm package information", + "doctor Getting Node.js release information", + "doctor Finding git in your PATH", + "doctor getBinPath Finding npm global bin in your PATH", + "doctor verifyCachedFiles Verifying the npm cache", + String( + doctor verifyCachedFiles Verification complete. Stats: { + doctor "badContentCount": 1, + doctor "reclaimedCount": 0, + doctor "missingContent": 0, + doctor "verifiedContent": 2 + doctor } + ), ], "warn": Array [ - Array [ - "verifyCachedFiles", - "Corrupted content removed: 1", - ], - Array [ - "verifyCachedFiles", - "Cache issues have been fixed", - ], + "doctor verifyCachedFiles Corrupted content removed: 1", + "doctor verifyCachedFiles Cache issues have been fixed", ], } ` @@ -270,142 +241,118 @@ exports[`test/lib/commands/doctor.js TAP cacache missingContent > logs 1`] = ` Object { "error": Array [], "info": Array [ - Array [ - "Running checkup", - ], - Array [ - "checkPing", - "Pinging registry", - ], - Array [ - "getLatestNpmVersion", - "Getting npm package information", - ], - Array [ - "getLatestNodejsVersion", - "Getting Node.js release information", - ], - Array [ - "getGitPath", - "Finding git in your PATH", - ], - Array [ - "getBinPath", - "Finding npm global bin in your PATH", - ], - Array [ - "verifyCachedFiles", - "Verifying the npm cache", - ], - Array [ - "verifyCachedFiles", - String( - Verification complete. Stats: { - "badContentCount": 0, - "reclaimedCount": 0, - "missingContent": 1, - "verifiedContent": 2 - } - ), - ], + "doctor Running checkup", + "doctor Pinging registry", + "doctor Getting npm package information", + "doctor Getting Node.js release information", + "doctor Finding git in your PATH", + "doctor getBinPath Finding npm global bin in your PATH", + "doctor verifyCachedFiles Verifying the npm cache", + String( + doctor verifyCachedFiles Verification complete. Stats: { + doctor "badContentCount": 0, + doctor "reclaimedCount": 0, + doctor "missingContent": 1, + doctor "verifiedContent": 2 + doctor } + ), ], "warn": Array [ - Array [ - "verifyCachedFiles", - "Missing content: 1", - ], - Array [ - "verifyCachedFiles", - "Cache issues have been fixed", - ], + "doctor verifyCachedFiles Missing content: 1", + "doctor verifyCachedFiles Cache issues have been fixed", ], } ` exports[`test/lib/commands/doctor.js TAP cacache missingContent > missing content 1`] = ` -Check Value Recommendation/Notes -npm ping ok -npm -v ok current: v1.0.0, latest: v1.0.0 -node -v ok current: v1.0.0, recommended: v1.0.0 -npm config get registry ok using default registry (https://registry.npmjs.org/) -git executable in PATH ok /path/to/git -global bin folder in PATH ok {CWD}/global/bin -Perms check on cached files ok -Perms check on local node_modules ok -Perms check on global node_modules ok -Perms check on local bin folder ok -Perms check on global bin folder ok -Verify cache contents ok verified 2 tarballs +Connecting to the registry +Ok +Checking npm version +Ok +current: v1.0.0, latest: v1.0.0 +Checking node version +Ok +current: v1.0.0, recommended: v1.0.0 +Checking configured npm registry +Ok +using default registry (https://registry.npmjs.org/) +Checking for git executable in PATH +Ok +/path/to/git +Checking for global bin folder in PATH +Ok +{CWD}/global/bin +Checking permissions on cached files (this may take awhile) +Ok +Checking permissions on local node_modules (this may take awhile) +Ok +Checking permissions on global node_modules (this may take awhile) +Ok +Checking permissions on local bin folder +Ok +Checking permissions on global bin folder +Ok +Verifying cache contents (this may take awhile) +Ok +verified 2 tarballs ` exports[`test/lib/commands/doctor.js TAP cacache reclaimedCount > content garbage collected 1`] = ` -Check Value Recommendation/Notes -npm ping ok -npm -v ok current: v1.0.0, latest: v1.0.0 -node -v ok current: v1.0.0, recommended: v1.0.0 -npm config get registry ok using default registry (https://registry.npmjs.org/) -git executable in PATH ok /path/to/git -global bin folder in PATH ok {CWD}/global/bin -Perms check on cached files ok -Perms check on local node_modules ok -Perms check on global node_modules ok -Perms check on local bin folder ok -Perms check on global bin folder ok -Verify cache contents ok verified 2 tarballs +Connecting to the registry +Ok +Checking npm version +Ok +current: v1.0.0, latest: v1.0.0 +Checking node version +Ok +current: v1.0.0, recommended: v1.0.0 +Checking configured npm registry +Ok +using default registry (https://registry.npmjs.org/) +Checking for git executable in PATH +Ok +/path/to/git +Checking for global bin folder in PATH +Ok +{CWD}/global/bin +Checking permissions on cached files (this may take awhile) +Ok +Checking permissions on local node_modules (this may take awhile) +Ok +Checking permissions on global node_modules (this may take awhile) +Ok +Checking permissions on local bin folder +Ok +Checking permissions on global bin folder +Ok +Verifying cache contents (this may take awhile) +Ok +verified 2 tarballs ` exports[`test/lib/commands/doctor.js TAP cacache reclaimedCount > logs 1`] = ` Object { "error": Array [], "info": Array [ - Array [ - "Running checkup", - ], - Array [ - "checkPing", - "Pinging registry", - ], - Array [ - "getLatestNpmVersion", - "Getting npm package information", - ], - Array [ - "getLatestNodejsVersion", - "Getting Node.js release information", - ], - Array [ - "getGitPath", - "Finding git in your PATH", - ], - Array [ - "getBinPath", - "Finding npm global bin in your PATH", - ], - Array [ - "verifyCachedFiles", - "Verifying the npm cache", - ], - Array [ - "verifyCachedFiles", - String( - Verification complete. Stats: { - "badContentCount": 0, - "reclaimedCount": 1, - "missingContent": 0, - "verifiedContent": 2 - } - ), - ], + "doctor Running checkup", + "doctor Pinging registry", + "doctor Getting npm package information", + "doctor Getting Node.js release information", + "doctor Finding git in your PATH", + "doctor getBinPath Finding npm global bin in your PATH", + "doctor verifyCachedFiles Verifying the npm cache", + String( + doctor verifyCachedFiles Verification complete. Stats: { + doctor "badContentCount": 0, + doctor "reclaimedCount": 1, + doctor "missingContent": 0, + doctor "verifiedContent": 2 + doctor } + ), ], "warn": Array [ - Array [ - "verifyCachedFiles", - "Content garbage-collected: 1 (undefined bytes)", - ], - Array [ - "verifyCachedFiles", - "Cache issues have been fixed", - ], + "doctor verifyCachedFiles Content garbage-collected: 1 (undefined bytes)", + "doctor verifyCachedFiles Cache issues have been fixed", ], } ` @@ -414,414 +361,348 @@ exports[`test/lib/commands/doctor.js TAP discrete checks cache > logs 1`] = ` Object { "error": Array [], "info": Array [ - Array [ - "Running checkup", - ], - Array [ - "verifyCachedFiles", - "Verifying the npm cache", - ], - Array [ - "verifyCachedFiles", - String( - Verification complete. Stats: { - "badContentCount": 0, - "reclaimedCount": 0, - "missingContent": 0, - "verifiedContent": 0 - } - ), - ], + "doctor Running checkup", + "doctor verifyCachedFiles Verifying the npm cache", + String( + doctor verifyCachedFiles Verification complete. Stats: { + doctor "badContentCount": 0, + doctor "reclaimedCount": 0, + doctor "missingContent": 0, + doctor "verifiedContent": 0 + doctor } + ), ], "warn": Array [], } ` exports[`test/lib/commands/doctor.js TAP discrete checks cache > output 1`] = ` -Check Value Recommendation/Notes -Perms check on cached files ok -Verify cache contents ok verified 0 tarballs +Checking permissions on cached files (this may take awhile) +Ok +Verifying cache contents (this may take awhile) +Ok +verified 0 tarballs ` exports[`test/lib/commands/doctor.js TAP discrete checks git > logs 1`] = ` Object { "error": Array [], "info": Array [ - Array [ - "Running checkup", - ], + "doctor Running checkup", ], "warn": Array [], } ` exports[`test/lib/commands/doctor.js TAP discrete checks git > output 1`] = ` -Check Value Recommendation/Notes + ` exports[`test/lib/commands/doctor.js TAP discrete checks invalid environment > logs 1`] = ` Object { "error": Array [], "info": Array [ - Array [ - "Running checkup", - ], - Array [ - "getGitPath", - "Finding git in your PATH", - ], - Array [ - "getBinPath", - "Finding npm global bin in your PATH", - ], + "doctor Running checkup", + "doctor Finding git in your PATH", + "doctor getBinPath Finding npm global bin in your PATH", ], "warn": Array [], } ` exports[`test/lib/commands/doctor.js TAP discrete checks invalid environment > output 1`] = ` -Check Value Recommendation/Notes -git executable in PATH ok /path/to/git -global bin folder in PATH not ok Error: Add {CWD}/global/bin to your $PATH +Checking for git executable in PATH +Ok +/path/to/git +Checking for global bin folder in PATH +Not ok +Error: Add {CWD}/global/bin to your $PATH ` exports[`test/lib/commands/doctor.js TAP discrete checks permissions - not windows > logs 1`] = ` Object { "error": Array [], "info": Array [ - Array [ - "Running checkup", - ], + "doctor Running checkup", ], "warn": Array [], } ` exports[`test/lib/commands/doctor.js TAP discrete checks permissions - not windows > output 1`] = ` -Check Value Recommendation/Notes -Perms check on cached files ok -Perms check on local node_modules ok -Perms check on global node_modules ok -Perms check on local bin folder ok -Perms check on global bin folder ok +Checking permissions on cached files (this may take awhile) +Ok +Checking permissions on local node_modules (this may take awhile) +Ok +Checking permissions on global node_modules (this may take awhile) +Ok +Checking permissions on local bin folder +Ok +Checking permissions on global bin folder +Ok ` exports[`test/lib/commands/doctor.js TAP discrete checks permissions - windows > logs 1`] = ` Object { "error": Array [], "info": Array [ - Array [ - "Running checkup", - ], + "doctor Running checkup", ], "warn": Array [], } ` exports[`test/lib/commands/doctor.js TAP discrete checks permissions - windows > output 1`] = ` -Check Value Recommendation/Notes + ` exports[`test/lib/commands/doctor.js TAP discrete checks ping > logs 1`] = ` Object { "error": Array [], "info": Array [ - Array [ - "Running checkup", - ], - Array [ - "checkPing", - "Pinging registry", - ], + "doctor Running checkup", + "doctor Pinging registry", ], "warn": Array [], } ` exports[`test/lib/commands/doctor.js TAP discrete checks ping > output 1`] = ` -Check Value Recommendation/Notes -npm ping ok +Connecting to the registry +Ok ` exports[`test/lib/commands/doctor.js TAP discrete checks registry > logs 1`] = ` Object { "error": Array [], "info": Array [ - Array [ - "Running checkup", - ], - Array [ - "checkPing", - "Pinging registry", - ], + "doctor Running checkup", + "doctor Pinging registry", ], "warn": Array [], } ` exports[`test/lib/commands/doctor.js TAP discrete checks registry > output 1`] = ` -Check Value Recommendation/Notes -npm ping ok -npm config get registry ok using default registry (https://registry.npmjs.org/) +Connecting to the registry +Ok +Checking configured npm registry +Ok +using default registry (https://registry.npmjs.org/) ` exports[`test/lib/commands/doctor.js TAP discrete checks versions > logs 1`] = ` Object { "error": Array [], "info": Array [ - Array [ - "Running checkup", - ], - Array [ - "getLatestNpmVersion", - "Getting npm package information", - ], - Array [ - "getLatestNodejsVersion", - "Getting Node.js release information", - ], + "doctor Running checkup", + "doctor Getting npm package information", + "doctor Getting Node.js release information", ], "warn": Array [], } ` exports[`test/lib/commands/doctor.js TAP discrete checks versions > output 1`] = ` -Check Value Recommendation/Notes -npm -v ok current: v1.0.0, latest: v1.0.0 -node -v ok current: v1.0.0, recommended: v1.0.0 +Checking npm version +Ok +current: v1.0.0, latest: v1.0.0 +Checking node version +Ok +current: v1.0.0, recommended: v1.0.0 ` exports[`test/lib/commands/doctor.js TAP error reading directory > logs 1`] = ` Object { "error": Array [], "info": Array [ - Array [ - "Running checkup", - ], - Array [ - "checkPing", - "Pinging registry", - ], - Array [ - "getLatestNpmVersion", - "Getting npm package information", - ], - Array [ - "getLatestNodejsVersion", - "Getting Node.js release information", - ], - Array [ - "getGitPath", - "Finding git in your PATH", - ], - Array [ - "getBinPath", - "Finding npm global bin in your PATH", - ], - Array [ - "verifyCachedFiles", - "Verifying the npm cache", - ], - Array [ - "verifyCachedFiles", - String( - Verification complete. Stats: { - "badContentCount": 0, - "reclaimedCount": 0, - "missingContent": 0, - "verifiedContent": 0 - } - ), - ], + "doctor Running checkup", + "doctor Pinging registry", + "doctor Getting npm package information", + "doctor Getting Node.js release information", + "doctor Finding git in your PATH", + "doctor getBinPath Finding npm global bin in your PATH", + "doctor verifyCachedFiles Verifying the npm cache", + String( + doctor verifyCachedFiles Verification complete. Stats: { + doctor "badContentCount": 0, + doctor "reclaimedCount": 0, + doctor "missingContent": 0, + doctor "verifiedContent": 0 + doctor } + ), ], "warn": Array [ - Array [ - "checkFilesPermission", - "error reading directory {CWD}/cache", - ], - Array [ - "checkFilesPermission", - "error reading directory {CWD}/prefix/node_modules", - ], - Array [ - "checkFilesPermission", - "error reading directory {CWD}/global/node_modules", - ], - Array [ - "checkFilesPermission", - "error reading directory {CWD}/prefix/node_modules/.bin", - ], - Array [ - "checkFilesPermission", - "error reading directory {CWD}/global/bin", - ], + "doctor checkFilesPermission error reading directory {CWD}/cache", + "doctor checkFilesPermission error reading directory {CWD}/prefix/node_modules", + "doctor checkFilesPermission error reading directory {CWD}/global/node_modules", + "doctor checkFilesPermission error reading directory {CWD}/prefix/node_modules/.bin", + "doctor checkFilesPermission error reading directory {CWD}/global/bin", ], } ` exports[`test/lib/commands/doctor.js TAP error reading directory > readdir error 1`] = ` -Check Value Recommendation/Notes -npm ping ok -npm -v ok current: v1.0.0, latest: v1.0.0 -node -v ok current: v1.0.0, recommended: v1.0.0 -npm config get registry ok using default registry (https://registry.npmjs.org/) -git executable in PATH ok /path/to/git -global bin folder in PATH ok {CWD}/global/bin -Perms check on cached files not ok Check the permissions of files in {CWD}/cache (should be owned by current user) -Perms check on local node_modules not ok Check the permissions of files in {CWD}/prefix/node_modules (should be owned by current user) -Perms check on global node_modules not ok Check the permissions of files in {CWD}/global/node_modules -Perms check on local bin folder not ok Check the permissions of files in {CWD}/prefix/node_modules/.bin -Perms check on global bin folder not ok Check the permissions of files in {CWD}/global/bin -Verify cache contents ok verified 0 tarballs +Connecting to the registry +Ok +Checking npm version +Ok +current: v1.0.0, latest: v1.0.0 +Checking node version +Ok +current: v1.0.0, recommended: v1.0.0 +Checking configured npm registry +Ok +using default registry (https://registry.npmjs.org/) +Checking for git executable in PATH +Ok +/path/to/git +Checking for global bin folder in PATH +Ok +{CWD}/global/bin +Checking permissions on cached files (this may take awhile) +Not ok +Check the permissions of files in {CWD}/cache (should be owned by current user) +Checking permissions on local node_modules (this may take awhile) +Not ok +Check the permissions of files in {CWD}/prefix/node_modules (should be owned by current user) +Checking permissions on global node_modules (this may take awhile) +Not ok +Check the permissions of files in {CWD}/global/node_modules +Checking permissions on local bin folder +Not ok +Check the permissions of files in {CWD}/prefix/node_modules/.bin +Checking permissions on global bin folder +Not ok +Check the permissions of files in {CWD}/global/bin +Verifying cache contents (this may take awhile) +Ok +verified 0 tarballs ` exports[`test/lib/commands/doctor.js TAP incorrect owner > incorrect owner 1`] = ` -Check Value Recommendation/Notes -npm ping ok -npm -v ok current: v1.0.0, latest: v1.0.0 -node -v ok current: v1.0.0, recommended: v1.0.0 -npm config get registry ok using default registry (https://registry.npmjs.org/) -git executable in PATH ok /path/to/git -global bin folder in PATH ok {CWD}/global/bin -Perms check on cached files not ok Check the permissions of files in {CWD}/cache (should be owned by current user) -Perms check on local node_modules ok -Perms check on global node_modules ok -Perms check on local bin folder ok -Perms check on global bin folder ok -Verify cache contents ok verified 0 tarballs +Connecting to the registry +Ok +Checking npm version +Ok +current: v1.0.0, latest: v1.0.0 +Checking node version +Ok +current: v1.0.0, recommended: v1.0.0 +Checking configured npm registry +Ok +using default registry (https://registry.npmjs.org/) +Checking for git executable in PATH +Ok +/path/to/git +Checking for global bin folder in PATH +Ok +{CWD}/global/bin +Checking permissions on cached files (this may take awhile) +Not ok +Check the permissions of files in {CWD}/cache (should be owned by current user) +Checking permissions on local node_modules (this may take awhile) +Ok +Checking permissions on global node_modules (this may take awhile) +Ok +Checking permissions on local bin folder +Ok +Checking permissions on global bin folder +Ok +Verifying cache contents (this may take awhile) +Ok +verified 0 tarballs ` exports[`test/lib/commands/doctor.js TAP incorrect owner > logs 1`] = ` Object { "error": Array [], "info": Array [ - Array [ - "Running checkup", - ], - Array [ - "checkPing", - "Pinging registry", - ], - Array [ - "getLatestNpmVersion", - "Getting npm package information", - ], - Array [ - "getLatestNodejsVersion", - "Getting Node.js release information", - ], - Array [ - "getGitPath", - "Finding git in your PATH", - ], - Array [ - "getBinPath", - "Finding npm global bin in your PATH", - ], - Array [ - "verifyCachedFiles", - "Verifying the npm cache", - ], - Array [ - "verifyCachedFiles", - String( - Verification complete. Stats: { - "badContentCount": 0, - "reclaimedCount": 0, - "missingContent": 0, - "verifiedContent": 0 - } - ), - ], + "doctor Running checkup", + "doctor Pinging registry", + "doctor Getting npm package information", + "doctor Getting Node.js release information", + "doctor Finding git in your PATH", + "doctor getBinPath Finding npm global bin in your PATH", + "doctor verifyCachedFiles Verifying the npm cache", + String( + doctor verifyCachedFiles Verification complete. Stats: { + doctor "badContentCount": 0, + doctor "reclaimedCount": 0, + doctor "missingContent": 0, + doctor "verifiedContent": 0 + doctor } + ), ], "warn": Array [ - Array [ - "checkFilesPermission", - "should be owner of {CWD}/cache/_cacache", - ], + "doctor checkFilesPermission should be owner of {CWD}/cache/_cacache", ], } ` exports[`test/lib/commands/doctor.js TAP incorrect permissions > incorrect owner 1`] = ` -Check Value Recommendation/Notes -npm ping ok -npm -v ok current: v1.0.0, latest: v1.0.0 -node -v ok current: v1.0.0, recommended: v1.0.0 -npm config get registry ok using default registry (https://registry.npmjs.org/) -git executable in PATH ok /path/to/git -global bin folder in PATH ok {CWD}/global/bin -Perms check on cached files not ok Check the permissions of files in {CWD}/cache (should be owned by current user) -Perms check on local node_modules not ok Check the permissions of files in {CWD}/prefix/node_modules (should be owned by current user) -Perms check on global node_modules not ok Check the permissions of files in {CWD}/global/node_modules -Perms check on local bin folder not ok Check the permissions of files in {CWD}/prefix/node_modules/.bin -Perms check on global bin folder not ok Check the permissions of files in {CWD}/global/bin -Verify cache contents ok verified 0 tarballs +Connecting to the registry +Ok +Checking npm version +Ok +current: v1.0.0, latest: v1.0.0 +Checking node version +Ok +current: v1.0.0, recommended: v1.0.0 +Checking configured npm registry +Ok +using default registry (https://registry.npmjs.org/) +Checking for git executable in PATH +Ok +/path/to/git +Checking for global bin folder in PATH +Ok +{CWD}/global/bin +Checking permissions on cached files (this may take awhile) +Not ok +Check the permissions of files in {CWD}/cache (should be owned by current user) +Checking permissions on local node_modules (this may take awhile) +Not ok +Check the permissions of files in {CWD}/prefix/node_modules (should be owned by current user) +Checking permissions on global node_modules (this may take awhile) +Not ok +Check the permissions of files in {CWD}/global/node_modules +Checking permissions on local bin folder +Not ok +Check the permissions of files in {CWD}/prefix/node_modules/.bin +Checking permissions on global bin folder +Not ok +Check the permissions of files in {CWD}/global/bin +Verifying cache contents (this may take awhile) +Ok +verified 0 tarballs ` exports[`test/lib/commands/doctor.js TAP incorrect permissions > logs 1`] = ` Object { "error": Array [ - Array [ - "checkFilesPermission", - "Missing permissions on {CWD}/cache (expect: readable)", - ], - Array [ - "checkFilesPermission", - "Missing permissions on {CWD}/prefix/node_modules (expect: readable, writable)", - ], - Array [ - "checkFilesPermission", - "Missing permissions on {CWD}/global/node_modules (expect: readable)", - ], - Array [ - "checkFilesPermission", - "Missing permissions on {CWD}/prefix/node_modules/.bin (expect: readable, writable, executable)", - ], - Array [ - "checkFilesPermission", - "Missing permissions on {CWD}/global/bin (expect: executable)", - ], + "doctor checkFilesPermission Missing permissions on {CWD}/cache (expect: readable)", + "doctor checkFilesPermission Missing permissions on {CWD}/prefix/node_modules (expect: readable, writable)", + "doctor checkFilesPermission Missing permissions on {CWD}/global/node_modules (expect: readable)", + "doctor checkFilesPermission Missing permissions on {CWD}/prefix/node_modules/.bin (expect: readable, writable, executable)", + "doctor checkFilesPermission Missing permissions on {CWD}/global/bin (expect: executable)", ], "info": Array [ - Array [ - "Running checkup", - ], - Array [ - "checkPing", - "Pinging registry", - ], - Array [ - "getLatestNpmVersion", - "Getting npm package information", - ], - Array [ - "getLatestNodejsVersion", - "Getting Node.js release information", - ], - Array [ - "getGitPath", - "Finding git in your PATH", - ], - Array [ - "getBinPath", - "Finding npm global bin in your PATH", - ], - Array [ - "verifyCachedFiles", - "Verifying the npm cache", - ], - Array [ - "verifyCachedFiles", - String( - Verification complete. Stats: { - "badContentCount": 0, - "reclaimedCount": 0, - "missingContent": 0, - "verifiedContent": 0 - } - ), - ], + "doctor Running checkup", + "doctor Pinging registry", + "doctor Getting npm package information", + "doctor Getting Node.js release information", + "doctor Finding git in your PATH", + "doctor getBinPath Finding npm global bin in your PATH", + "doctor verifyCachedFiles Verifying the npm cache", + String( + doctor verifyCachedFiles Verification complete. Stats: { + doctor "badContentCount": 0, + doctor "reclaimedCount": 0, + doctor "missingContent": 0, + doctor "verifiedContent": 0 + doctor } + ), ], "warn": Array [], } @@ -831,720 +712,651 @@ exports[`test/lib/commands/doctor.js TAP missing git > logs 1`] = ` Object { "error": Array [], "info": Array [ - Array [ - "Running checkup", - ], - Array [ - "checkPing", - "Pinging registry", - ], - Array [ - "getLatestNpmVersion", - "Getting npm package information", - ], - Array [ - "getLatestNodejsVersion", - "Getting Node.js release information", - ], - Array [ - "getGitPath", - "Finding git in your PATH", - ], - Array [ - "getBinPath", - "Finding npm global bin in your PATH", - ], - Array [ - "verifyCachedFiles", - "Verifying the npm cache", - ], - Array [ - "verifyCachedFiles", - String( - Verification complete. Stats: { - "badContentCount": 0, - "reclaimedCount": 0, - "missingContent": 0, - "verifiedContent": 0 - } - ), - ], + "doctor Running checkup", + "doctor Pinging registry", + "doctor Getting npm package information", + "doctor Getting Node.js release information", + "doctor Finding git in your PATH", + "doctor getBinPath Finding npm global bin in your PATH", + "doctor verifyCachedFiles Verifying the npm cache", + String( + doctor verifyCachedFiles Verification complete. Stats: { + doctor "badContentCount": 0, + doctor "reclaimedCount": 0, + doctor "missingContent": 0, + doctor "verifiedContent": 0 + doctor } + ), ], "warn": Array [ - Array [ - Error: test error, - ], + String( + doctor getGitPath Error: test error + doctor at {STACK} + doctor at {STACK} + doctor at {STACK} + doctor at {STACK} + doctor at {STACK} + ), ], } ` exports[`test/lib/commands/doctor.js TAP missing git > missing git 1`] = ` -Check Value Recommendation/Notes -npm ping ok -npm -v ok current: v1.0.0, latest: v1.0.0 -node -v ok current: v1.0.0, recommended: v1.0.0 -npm config get registry ok using default registry (https://registry.npmjs.org/) -git executable in PATH not ok Error: Install git and ensure it's in your PATH. -global bin folder in PATH ok {CWD}/global/bin -Perms check on cached files ok -Perms check on local node_modules ok -Perms check on global node_modules ok -Perms check on local bin folder ok -Perms check on global bin folder ok -Verify cache contents ok verified 0 tarballs +Connecting to the registry +Ok +Checking npm version +Ok +current: v1.0.0, latest: v1.0.0 +Checking node version +Ok +current: v1.0.0, recommended: v1.0.0 +Checking configured npm registry +Ok +using default registry (https://registry.npmjs.org/) +Checking for git executable in PATH +Not ok +Error: Install git and ensure it's in your PATH. +Checking for global bin folder in PATH +Ok +{CWD}/global/bin +Checking permissions on cached files (this may take awhile) +Ok +Checking permissions on local node_modules (this may take awhile) +Ok +Checking permissions on global node_modules (this may take awhile) +Ok +Checking permissions on local bin folder +Ok +Checking permissions on global bin folder +Ok +Verifying cache contents (this may take awhile) +Ok +verified 0 tarballs ` exports[`test/lib/commands/doctor.js TAP missing global directories > logs 1`] = ` Object { "error": Array [], "info": Array [ - Array [ - "Running checkup", - ], - Array [ - "checkPing", - "Pinging registry", - ], - Array [ - "getLatestNpmVersion", - "Getting npm package information", - ], - Array [ - "getLatestNodejsVersion", - "Getting Node.js release information", - ], - Array [ - "getGitPath", - "Finding git in your PATH", - ], - Array [ - "getBinPath", - "Finding npm global bin in your PATH", - ], - Array [ - "verifyCachedFiles", - "Verifying the npm cache", - ], - Array [ - "verifyCachedFiles", - String( - Verification complete. Stats: { - "badContentCount": 0, - "reclaimedCount": 0, - "missingContent": 0, - "verifiedContent": 0 - } - ), - ], + "doctor Running checkup", + "doctor Pinging registry", + "doctor Getting npm package information", + "doctor Getting Node.js release information", + "doctor Finding git in your PATH", + "doctor getBinPath Finding npm global bin in your PATH", + "doctor verifyCachedFiles Verifying the npm cache", + String( + doctor verifyCachedFiles Verification complete. Stats: { + doctor "badContentCount": 0, + doctor "reclaimedCount": 0, + doctor "missingContent": 0, + doctor "verifiedContent": 0 + doctor } + ), ], "warn": Array [ - Array [ - "checkFilesPermission", - "error getting info for {CWD}/global/node_modules", - ], - Array [ - "checkFilesPermission", - "error getting info for {CWD}/global/bin", - ], + "doctor checkFilesPermission error getting info for {CWD}/global/node_modules", + "doctor checkFilesPermission error getting info for {CWD}/global/bin", ], } ` exports[`test/lib/commands/doctor.js TAP missing global directories > missing global directories 1`] = ` -Check Value Recommendation/Notes -npm ping ok -npm -v ok current: v1.0.0, latest: v1.0.0 -node -v ok current: v1.0.0, recommended: v1.0.0 -npm config get registry ok using default registry (https://registry.npmjs.org/) -git executable in PATH ok /path/to/git -global bin folder in PATH ok {CWD}/global/bin -Perms check on cached files ok -Perms check on local node_modules ok -Perms check on global node_modules not ok Check the permissions of files in {CWD}/global/node_modules -Perms check on local bin folder ok -Perms check on global bin folder not ok Check the permissions of files in {CWD}/global/bin -Verify cache contents ok verified 0 tarballs +Connecting to the registry +Ok +Checking npm version +Ok +current: v1.0.0, latest: v1.0.0 +Checking node version +Ok +current: v1.0.0, recommended: v1.0.0 +Checking configured npm registry +Ok +using default registry (https://registry.npmjs.org/) +Checking for git executable in PATH +Ok +/path/to/git +Checking for global bin folder in PATH +Ok +{CWD}/global/bin +Checking permissions on cached files (this may take awhile) +Ok +Checking permissions on local node_modules (this may take awhile) +Ok +Checking permissions on global node_modules (this may take awhile) +Not ok +Check the permissions of files in {CWD}/global/node_modules +Checking permissions on local bin folder +Ok +Checking permissions on global bin folder +Not ok +Check the permissions of files in {CWD}/global/bin +Verifying cache contents (this may take awhile) +Ok +verified 0 tarballs ` exports[`test/lib/commands/doctor.js TAP missing local node_modules > logs 1`] = ` Object { "error": Array [], "info": Array [ - Array [ - "Running checkup", - ], - Array [ - "checkPing", - "Pinging registry", - ], - Array [ - "getLatestNpmVersion", - "Getting npm package information", - ], - Array [ - "getLatestNodejsVersion", - "Getting Node.js release information", - ], - Array [ - "getGitPath", - "Finding git in your PATH", - ], - Array [ - "getBinPath", - "Finding npm global bin in your PATH", - ], - Array [ - "verifyCachedFiles", - "Verifying the npm cache", - ], - Array [ - "verifyCachedFiles", - String( - Verification complete. Stats: { - "badContentCount": 0, - "reclaimedCount": 0, - "missingContent": 0, - "verifiedContent": 0 - } - ), - ], + "doctor Running checkup", + "doctor Pinging registry", + "doctor Getting npm package information", + "doctor Getting Node.js release information", + "doctor Finding git in your PATH", + "doctor getBinPath Finding npm global bin in your PATH", + "doctor verifyCachedFiles Verifying the npm cache", + String( + doctor verifyCachedFiles Verification complete. Stats: { + doctor "badContentCount": 0, + doctor "reclaimedCount": 0, + doctor "missingContent": 0, + doctor "verifiedContent": 0 + doctor } + ), ], "warn": Array [], } ` exports[`test/lib/commands/doctor.js TAP missing local node_modules > missing local node_modules 1`] = ` -Check Value Recommendation/Notes -npm ping ok -npm -v ok current: v1.0.0, latest: v1.0.0 -node -v ok current: v1.0.0, recommended: v1.0.0 -npm config get registry ok using default registry (https://registry.npmjs.org/) -git executable in PATH ok /path/to/git -global bin folder in PATH ok {CWD}/global/bin -Perms check on cached files ok -Perms check on local node_modules ok -Perms check on global node_modules ok -Perms check on local bin folder ok -Perms check on global bin folder ok -Verify cache contents ok verified 0 tarballs +Connecting to the registry +Ok +Checking npm version +Ok +current: v1.0.0, latest: v1.0.0 +Checking node version +Ok +current: v1.0.0, recommended: v1.0.0 +Checking configured npm registry +Ok +using default registry (https://registry.npmjs.org/) +Checking for git executable in PATH +Ok +/path/to/git +Checking for global bin folder in PATH +Ok +{CWD}/global/bin +Checking permissions on cached files (this may take awhile) +Ok +Checking permissions on local node_modules (this may take awhile) +Ok +Checking permissions on global node_modules (this may take awhile) +Ok +Checking permissions on local bin folder +Ok +Checking permissions on global bin folder +Ok +Verifying cache contents (this may take awhile) +Ok +verified 0 tarballs ` exports[`test/lib/commands/doctor.js TAP node out of date - current > logs 1`] = ` Object { "error": Array [], "info": Array [ - Array [ - "Running checkup", - ], - Array [ - "checkPing", - "Pinging registry", - ], - Array [ - "getLatestNpmVersion", - "Getting npm package information", - ], - Array [ - "getLatestNodejsVersion", - "Getting Node.js release information", - ], - Array [ - "getGitPath", - "Finding git in your PATH", - ], - Array [ - "getBinPath", - "Finding npm global bin in your PATH", - ], - Array [ - "verifyCachedFiles", - "Verifying the npm cache", - ], - Array [ - "verifyCachedFiles", - String( - Verification complete. Stats: { - "badContentCount": 0, - "reclaimedCount": 0, - "missingContent": 0, - "verifiedContent": 0 - } - ), - ], + "doctor Running checkup", + "doctor Pinging registry", + "doctor Getting npm package information", + "doctor Getting Node.js release information", + "doctor Finding git in your PATH", + "doctor getBinPath Finding npm global bin in your PATH", + "doctor verifyCachedFiles Verifying the npm cache", + String( + doctor verifyCachedFiles Verification complete. Stats: { + doctor "badContentCount": 0, + doctor "reclaimedCount": 0, + doctor "missingContent": 0, + doctor "verifiedContent": 0 + doctor } + ), ], "warn": Array [], } ` exports[`test/lib/commands/doctor.js TAP node out of date - current > node is out of date 1`] = ` -Check Value Recommendation/Notes -npm ping ok -npm -v ok current: v1.0.0, latest: v1.0.0 -node -v not ok Use node v2.0.1 (current: v2.0.0) -npm config get registry ok using default registry (https://registry.npmjs.org/) -git executable in PATH ok /path/to/git -global bin folder in PATH ok {CWD}/global/bin -Perms check on cached files ok -Perms check on local node_modules ok -Perms check on global node_modules ok -Perms check on local bin folder ok -Perms check on global bin folder ok -Verify cache contents ok verified 0 tarballs +Connecting to the registry +Ok +Checking npm version +Ok +current: v1.0.0, latest: v1.0.0 +Checking node version +Not ok +Use node v2.0.1 (current: v2.0.0) +Checking configured npm registry +Ok +using default registry (https://registry.npmjs.org/) +Checking for git executable in PATH +Ok +/path/to/git +Checking for global bin folder in PATH +Ok +{CWD}/global/bin +Checking permissions on cached files (this may take awhile) +Ok +Checking permissions on local node_modules (this may take awhile) +Ok +Checking permissions on global node_modules (this may take awhile) +Ok +Checking permissions on local bin folder +Ok +Checking permissions on global bin folder +Ok +Verifying cache contents (this may take awhile) +Ok +verified 0 tarballs ` exports[`test/lib/commands/doctor.js TAP node out of date - lts > logs 1`] = ` Object { "error": Array [], "info": Array [ - Array [ - "Running checkup", - ], - Array [ - "checkPing", - "Pinging registry", - ], - Array [ - "getLatestNpmVersion", - "Getting npm package information", - ], - Array [ - "getLatestNodejsVersion", - "Getting Node.js release information", - ], - Array [ - "getGitPath", - "Finding git in your PATH", - ], - Array [ - "getBinPath", - "Finding npm global bin in your PATH", - ], - Array [ - "verifyCachedFiles", - "Verifying the npm cache", - ], - Array [ - "verifyCachedFiles", - String( - Verification complete. Stats: { - "badContentCount": 0, - "reclaimedCount": 0, - "missingContent": 0, - "verifiedContent": 0 - } - ), - ], + "doctor Running checkup", + "doctor Pinging registry", + "doctor Getting npm package information", + "doctor Getting Node.js release information", + "doctor Finding git in your PATH", + "doctor getBinPath Finding npm global bin in your PATH", + "doctor verifyCachedFiles Verifying the npm cache", + String( + doctor verifyCachedFiles Verification complete. Stats: { + doctor "badContentCount": 0, + doctor "reclaimedCount": 0, + doctor "missingContent": 0, + doctor "verifiedContent": 0 + doctor } + ), ], "warn": Array [], } ` exports[`test/lib/commands/doctor.js TAP node out of date - lts > node is out of date 1`] = ` -Check Value Recommendation/Notes -npm ping ok -npm -v ok current: v1.0.0, latest: v1.0.0 -node -v not ok Use node v1.0.0 (current: v0.0.1) -npm config get registry ok using default registry (https://registry.npmjs.org/) -git executable in PATH ok /path/to/git -global bin folder in PATH ok {CWD}/global/bin -Perms check on cached files ok -Perms check on local node_modules ok -Perms check on global node_modules ok -Perms check on local bin folder ok -Perms check on global bin folder ok -Verify cache contents ok verified 0 tarballs +Connecting to the registry +Ok +Checking npm version +Ok +current: v1.0.0, latest: v1.0.0 +Checking node version +Not ok +Use node v1.0.0 (current: v0.0.1) +Checking configured npm registry +Ok +using default registry (https://registry.npmjs.org/) +Checking for git executable in PATH +Ok +/path/to/git +Checking for global bin folder in PATH +Ok +{CWD}/global/bin +Checking permissions on cached files (this may take awhile) +Ok +Checking permissions on local node_modules (this may take awhile) +Ok +Checking permissions on global node_modules (this may take awhile) +Ok +Checking permissions on local bin folder +Ok +Checking permissions on global bin folder +Ok +Verifying cache contents (this may take awhile) +Ok +verified 0 tarballs ` exports[`test/lib/commands/doctor.js TAP non-default registry > logs 1`] = ` Object { "error": Array [], "info": Array [ - Array [ - "Running checkup", - ], - Array [ - "checkPing", - "Pinging registry", - ], - Array [ - "getLatestNpmVersion", - "Getting npm package information", - ], - Array [ - "getLatestNodejsVersion", - "Getting Node.js release information", - ], - Array [ - "getGitPath", - "Finding git in your PATH", - ], - Array [ - "getBinPath", - "Finding npm global bin in your PATH", - ], - Array [ - "verifyCachedFiles", - "Verifying the npm cache", - ], - Array [ - "verifyCachedFiles", - String( - Verification complete. Stats: { - "badContentCount": 0, - "reclaimedCount": 0, - "missingContent": 0, - "verifiedContent": 0 - } - ), - ], + "doctor Running checkup", + "doctor Pinging registry", + "doctor Getting npm package information", + "doctor Getting Node.js release information", + "doctor Finding git in your PATH", + "doctor getBinPath Finding npm global bin in your PATH", + "doctor verifyCachedFiles Verifying the npm cache", + String( + doctor verifyCachedFiles Verification complete. Stats: { + doctor "badContentCount": 0, + doctor "reclaimedCount": 0, + doctor "missingContent": 0, + doctor "verifiedContent": 0 + doctor } + ), ], "warn": Array [], } ` exports[`test/lib/commands/doctor.js TAP non-default registry > non default registry 1`] = ` -Check Value Recommendation/Notes -npm ping ok -npm -v ok current: v1.0.0, latest: v1.0.0 -node -v ok current: v1.0.0, recommended: v1.0.0 -npm config get registry not ok Try \`npm config set registry=https://registry.npmjs.org/\` -git executable in PATH ok /path/to/git -global bin folder in PATH ok {CWD}/global/bin -Perms check on cached files ok -Perms check on local node_modules ok -Perms check on global node_modules ok -Perms check on local bin folder ok -Perms check on global bin folder ok -Verify cache contents ok verified 0 tarballs +Connecting to the registry +Ok +Checking npm version +Ok +current: v1.0.0, latest: v1.0.0 +Checking node version +Ok +current: v1.0.0, recommended: v1.0.0 +Checking configured npm registry +Not ok +Try \`npm config set registry=https://registry.npmjs.org/\` +Checking for git executable in PATH +Ok +/path/to/git +Checking for global bin folder in PATH +Ok +{CWD}/global/bin +Checking permissions on cached files (this may take awhile) +Ok +Checking permissions on local node_modules (this may take awhile) +Ok +Checking permissions on global node_modules (this may take awhile) +Ok +Checking permissions on local bin folder +Ok +Checking permissions on global bin folder +Ok +Verifying cache contents (this may take awhile) +Ok +verified 0 tarballs ` exports[`test/lib/commands/doctor.js TAP npm out of date > logs 1`] = ` Object { "error": Array [], "info": Array [ - Array [ - "Running checkup", - ], - Array [ - "checkPing", - "Pinging registry", - ], - Array [ - "getLatestNpmVersion", - "Getting npm package information", - ], - Array [ - "getLatestNodejsVersion", - "Getting Node.js release information", - ], - Array [ - "getGitPath", - "Finding git in your PATH", - ], - Array [ - "getBinPath", - "Finding npm global bin in your PATH", - ], - Array [ - "verifyCachedFiles", - "Verifying the npm cache", - ], - Array [ - "verifyCachedFiles", - String( - Verification complete. Stats: { - "badContentCount": 0, - "reclaimedCount": 0, - "missingContent": 0, - "verifiedContent": 0 - } - ), - ], + "doctor Running checkup", + "doctor Pinging registry", + "doctor Getting npm package information", + "doctor Getting Node.js release information", + "doctor Finding git in your PATH", + "doctor getBinPath Finding npm global bin in your PATH", + "doctor verifyCachedFiles Verifying the npm cache", + String( + doctor verifyCachedFiles Verification complete. Stats: { + doctor "badContentCount": 0, + doctor "reclaimedCount": 0, + doctor "missingContent": 0, + doctor "verifiedContent": 0 + doctor } + ), ], "warn": Array [], } ` exports[`test/lib/commands/doctor.js TAP npm out of date > npm is out of date 1`] = ` -Check Value Recommendation/Notes -npm ping ok -npm -v not ok Use npm v2.0.0 -node -v ok current: v1.0.0, recommended: v1.0.0 -npm config get registry ok using default registry (https://registry.npmjs.org/) -git executable in PATH ok /path/to/git -global bin folder in PATH ok {CWD}/global/bin -Perms check on cached files ok -Perms check on local node_modules ok -Perms check on global node_modules ok -Perms check on local bin folder ok -Perms check on global bin folder ok -Verify cache contents ok verified 0 tarballs +Connecting to the registry +Ok +Checking npm version +Not ok +Use npm v2.0.0 +Checking node version +Ok +current: v1.0.0, recommended: v1.0.0 +Checking configured npm registry +Ok +using default registry (https://registry.npmjs.org/) +Checking for git executable in PATH +Ok +/path/to/git +Checking for global bin folder in PATH +Ok +{CWD}/global/bin +Checking permissions on cached files (this may take awhile) +Ok +Checking permissions on local node_modules (this may take awhile) +Ok +Checking permissions on global node_modules (this may take awhile) +Ok +Checking permissions on local bin folder +Ok +Checking permissions on global bin folder +Ok +Verifying cache contents (this may take awhile) +Ok +verified 0 tarballs ` exports[`test/lib/commands/doctor.js TAP ping 404 > logs 1`] = ` Object { "error": Array [], "info": Array [ - Array [ - "Running checkup", - ], - Array [ - "checkPing", - "Pinging registry", - ], - Array [ - "getLatestNpmVersion", - "Getting npm package information", - ], - Array [ - "getLatestNodejsVersion", - "Getting Node.js release information", - ], - Array [ - "getGitPath", - "Finding git in your PATH", - ], - Array [ - "getBinPath", - "Finding npm global bin in your PATH", - ], - Array [ - "verifyCachedFiles", - "Verifying the npm cache", - ], - Array [ - "verifyCachedFiles", - String( - Verification complete. Stats: { - "badContentCount": 0, - "reclaimedCount": 0, - "missingContent": 0, - "verifiedContent": 0 - } - ), - ], + "doctor Running checkup", + "doctor Pinging registry", + "doctor Getting npm package information", + "doctor Getting Node.js release information", + "doctor Finding git in your PATH", + "doctor getBinPath Finding npm global bin in your PATH", + "doctor verifyCachedFiles Verifying the npm cache", + String( + doctor verifyCachedFiles Verification complete. Stats: { + doctor "badContentCount": 0, + doctor "reclaimedCount": 0, + doctor "missingContent": 0, + doctor "verifiedContent": 0 + doctor } + ), ], "warn": Array [], } ` exports[`test/lib/commands/doctor.js TAP ping 404 > ping 404 1`] = ` -Check Value Recommendation/Notes -npm ping not ok 404 404 Not Found - GET https://registry.npmjs.org/-/ping?write=true -npm -v ok current: v1.0.0, latest: v1.0.0 -node -v ok current: v1.0.0, recommended: v1.0.0 -npm config get registry ok using default registry (https://registry.npmjs.org/) -git executable in PATH ok /path/to/git -global bin folder in PATH ok {CWD}/global/bin -Perms check on cached files ok -Perms check on local node_modules ok -Perms check on global node_modules ok -Perms check on local bin folder ok -Perms check on global bin folder ok -Verify cache contents ok verified 0 tarballs +Connecting to the registry +Not ok +404 404 Not Found - GET https://registry.npmjs.org/-/ping?write=true +Checking npm version +Ok +current: v1.0.0, latest: v1.0.0 +Checking node version +Ok +current: v1.0.0, recommended: v1.0.0 +Checking configured npm registry +Ok +using default registry (https://registry.npmjs.org/) +Checking for git executable in PATH +Ok +/path/to/git +Checking for global bin folder in PATH +Ok +{CWD}/global/bin +Checking permissions on cached files (this may take awhile) +Ok +Checking permissions on local node_modules (this may take awhile) +Ok +Checking permissions on global node_modules (this may take awhile) +Ok +Checking permissions on local bin folder +Ok +Checking permissions on global bin folder +Ok +Verifying cache contents (this may take awhile) +Ok +verified 0 tarballs ` exports[`test/lib/commands/doctor.js TAP ping 404 in color > logs 1`] = ` Object { "error": Array [], "info": Array [ - Array [ - "Running checkup", - ], - Array [ - "checkPing", - "Pinging registry", - ], - Array [ - "getLatestNpmVersion", - "Getting npm package information", - ], - Array [ - "getLatestNodejsVersion", - "Getting Node.js release information", - ], - Array [ - "getGitPath", - "Finding git in your PATH", - ], - Array [ - "getBinPath", - "Finding npm global bin in your PATH", - ], - Array [ - "verifyCachedFiles", - "Verifying the npm cache", - ], - Array [ - "verifyCachedFiles", - String( - Verification complete. Stats: { - "badContentCount": 0, - "reclaimedCount": 0, - "missingContent": 0, - "verifiedContent": 0 - } - ), - ], + "/u001b[94mdoctor/u001b[39m Running checkup", + "/u001b[94mdoctor/u001b[39m Pinging registry", + "/u001b[94mdoctor/u001b[39m Getting npm package information", + "/u001b[94mdoctor/u001b[39m Getting Node.js release information", + "/u001b[94mdoctor/u001b[39m Finding git in your PATH", + "/u001b[94mdoctor/u001b[39m getBinPath Finding npm global bin in your PATH", + "/u001b[94mdoctor/u001b[39m verifyCachedFiles Verifying the npm cache", + String( + /u001b[94mdoctor/u001b[39m verifyCachedFiles Verification complete. Stats: { + /u001b[94mdoctor/u001b[39m "badContentCount": 0, + /u001b[94mdoctor/u001b[39m "reclaimedCount": 0, + /u001b[94mdoctor/u001b[39m "missingContent": 0, + /u001b[94mdoctor/u001b[39m "verifiedContent": 0 + /u001b[94mdoctor/u001b[39m } + ), ], "warn": Array [], } ` exports[`test/lib/commands/doctor.js TAP ping 404 in color > ping 404 in color 1`] = ` -Check Value Recommendation/Notes -npm ping not ok 404 404 Not Found - GET https://registry.npmjs.org/-/ping?write=true -npm -v ok current: v1.0.0, latest: v1.0.0 -node -v ok current: v1.0.0, recommended: v1.0.0 -npm config get registry ok using default registry (https://registry.npmjs.org/) -git executable in PATH ok /path/to/git -global bin folder in PATH ok {CWD}/global/bin -Perms check on cached files ok -Perms check on local node_modules ok -Perms check on global node_modules ok -Perms check on local bin folder ok -Perms check on global bin folder ok -Verify cache contents ok verified 0 tarballs +Connecting to the registry +Not ok +404 404 Not Found - GET https://registry.npmjs.org/-/ping?write=true +Checking npm version +Ok +current: v1.0.0, latest: v1.0.0 +Checking node version +Ok +current: v1.0.0, recommended: v1.0.0 +Checking configured npm registry +Ok +using default registry (https://registry.npmjs.org/) +Checking for git executable in PATH +Ok +/path/to/git +Checking for global bin folder in PATH +Ok +{CWD}/global/bin +Checking permissions on cached files (this may take awhile) +Ok +Checking permissions on local node_modules (this may take awhile) +Ok +Checking permissions on global node_modules (this may take awhile) +Ok +Checking permissions on local bin folder +Ok +Checking permissions on global bin folder +Ok +Verifying cache contents (this may take awhile) +Ok +verified 0 tarballs ` exports[`test/lib/commands/doctor.js TAP ping exception with code > logs 1`] = ` Object { "error": Array [], "info": Array [ - Array [ - "Running checkup", - ], - Array [ - "checkPing", - "Pinging registry", - ], - Array [ - "getLatestNpmVersion", - "Getting npm package information", - ], - Array [ - "getLatestNodejsVersion", - "Getting Node.js release information", - ], - Array [ - "getGitPath", - "Finding git in your PATH", - ], - Array [ - "getBinPath", - "Finding npm global bin in your PATH", - ], - Array [ - "verifyCachedFiles", - "Verifying the npm cache", - ], - Array [ - "verifyCachedFiles", - String( - Verification complete. Stats: { - "badContentCount": 0, - "reclaimedCount": 0, - "missingContent": 0, - "verifiedContent": 0 - } - ), - ], + "doctor Running checkup", + "doctor Pinging registry", + "doctor Getting npm package information", + "doctor Getting Node.js release information", + "doctor Finding git in your PATH", + "doctor getBinPath Finding npm global bin in your PATH", + "doctor verifyCachedFiles Verifying the npm cache", + String( + doctor verifyCachedFiles Verification complete. Stats: { + doctor "badContentCount": 0, + doctor "reclaimedCount": 0, + doctor "missingContent": 0, + doctor "verifiedContent": 0 + doctor } + ), ], "warn": Array [], } ` exports[`test/lib/commands/doctor.js TAP ping exception with code > ping failure 1`] = ` -Check Value Recommendation/Notes -npm ping not ok request to https://registry.npmjs.org/-/ping?write=true failed, reason: Test Error -npm -v ok current: v1.0.0, latest: v1.0.0 -node -v ok current: v1.0.0, recommended: v1.0.0 -npm config get registry ok using default registry (https://registry.npmjs.org/) -git executable in PATH ok /path/to/git -global bin folder in PATH ok {CWD}/global/bin -Perms check on cached files ok -Perms check on local node_modules ok -Perms check on global node_modules ok -Perms check on local bin folder ok -Perms check on global bin folder ok -Verify cache contents ok verified 0 tarballs +Connecting to the registry +Not ok +request to https://registry.npmjs.org/-/ping?write=true failed, reason: Test Error +Checking npm version +Ok +current: v1.0.0, latest: v1.0.0 +Checking node version +Ok +current: v1.0.0, recommended: v1.0.0 +Checking configured npm registry +Ok +using default registry (https://registry.npmjs.org/) +Checking for git executable in PATH +Ok +/path/to/git +Checking for global bin folder in PATH +Ok +{CWD}/global/bin +Checking permissions on cached files (this may take awhile) +Ok +Checking permissions on local node_modules (this may take awhile) +Ok +Checking permissions on global node_modules (this may take awhile) +Ok +Checking permissions on local bin folder +Ok +Checking permissions on global bin folder +Ok +Verifying cache contents (this may take awhile) +Ok +verified 0 tarballs ` exports[`test/lib/commands/doctor.js TAP ping exception without code > logs 1`] = ` Object { "error": Array [], "info": Array [ - Array [ - "Running checkup", - ], - Array [ - "checkPing", - "Pinging registry", - ], - Array [ - "getLatestNpmVersion", - "Getting npm package information", - ], - Array [ - "getLatestNodejsVersion", - "Getting Node.js release information", - ], - Array [ - "getGitPath", - "Finding git in your PATH", - ], - Array [ - "getBinPath", - "Finding npm global bin in your PATH", - ], - Array [ - "verifyCachedFiles", - "Verifying the npm cache", - ], - Array [ - "verifyCachedFiles", - String( - Verification complete. Stats: { - "badContentCount": 0, - "reclaimedCount": 0, - "missingContent": 0, - "verifiedContent": 0 - } - ), - ], + "doctor Running checkup", + "doctor Pinging registry", + "doctor Getting npm package information", + "doctor Getting Node.js release information", + "doctor Finding git in your PATH", + "doctor getBinPath Finding npm global bin in your PATH", + "doctor verifyCachedFiles Verifying the npm cache", + String( + doctor verifyCachedFiles Verification complete. Stats: { + doctor "badContentCount": 0, + doctor "reclaimedCount": 0, + doctor "missingContent": 0, + doctor "verifiedContent": 0 + doctor } + ), ], "warn": Array [], } ` exports[`test/lib/commands/doctor.js TAP ping exception without code > ping failure 1`] = ` -Check Value Recommendation/Notes -npm ping not ok request to https://registry.npmjs.org/-/ping?write=true failed, reason: Test Error -npm -v ok current: v1.0.0, latest: v1.0.0 -node -v ok current: v1.0.0, recommended: v1.0.0 -npm config get registry ok using default registry (https://registry.npmjs.org/) -git executable in PATH ok /path/to/git -global bin folder in PATH ok {CWD}/global/bin -Perms check on cached files ok -Perms check on local node_modules ok -Perms check on global node_modules ok -Perms check on local bin folder ok -Perms check on global bin folder ok -Verify cache contents ok verified 0 tarballs +Connecting to the registry +Not ok +request to https://registry.npmjs.org/-/ping?write=true failed, reason: Test Error +Checking npm version +Ok +current: v1.0.0, latest: v1.0.0 +Checking node version +Ok +current: v1.0.0, recommended: v1.0.0 +Checking configured npm registry +Ok +using default registry (https://registry.npmjs.org/) +Checking for git executable in PATH +Ok +/path/to/git +Checking for global bin folder in PATH +Ok +{CWD}/global/bin +Checking permissions on cached files (this may take awhile) +Ok +Checking permissions on local node_modules (this may take awhile) +Ok +Checking permissions on global node_modules (this may take awhile) +Ok +Checking permissions on local bin folder +Ok +Checking permissions on global bin folder +Ok +Verifying cache contents (this may take awhile) +Ok +verified 0 tarballs ` exports[`test/lib/commands/doctor.js TAP silent errors > logs 1`] = ` Object { "error": Array [], - "info": Array [ - Array [ - "Running checkup", - ], - Array [ - "checkPing", - "Pinging registry", - ], - ], + "info": Array [], "warn": Array [], } ` @@ -1556,46 +1368,7 @@ exports[`test/lib/commands/doctor.js TAP silent errors > output 1`] = ` exports[`test/lib/commands/doctor.js TAP silent success > logs 1`] = ` Object { "error": Array [], - "info": Array [ - Array [ - "Running checkup", - ], - Array [ - "checkPing", - "Pinging registry", - ], - Array [ - "getLatestNpmVersion", - "Getting npm package information", - ], - Array [ - "getLatestNodejsVersion", - "Getting Node.js release information", - ], - Array [ - "getGitPath", - "Finding git in your PATH", - ], - Array [ - "getBinPath", - "Finding npm global bin in your PATH", - ], - Array [ - "verifyCachedFiles", - "Verifying the npm cache", - ], - Array [ - "verifyCachedFiles", - String( - Verification complete. Stats: { - "badContentCount": 0, - "reclaimedCount": 0, - "missingContent": 0, - "verifiedContent": 0 - } - ), - ], - ], + "info": Array [], "warn": Array [], } ` @@ -1608,40 +1381,33 @@ exports[`test/lib/commands/doctor.js TAP windows skips permissions checks > logs Object { "error": Array [], "info": Array [ - Array [ - "Running checkup", - ], - Array [ - "checkPing", - "Pinging registry", - ], - Array [ - "getLatestNpmVersion", - "Getting npm package information", - ], - Array [ - "getLatestNodejsVersion", - "Getting Node.js release information", - ], - Array [ - "getGitPath", - "Finding git in your PATH", - ], - Array [ - "getBinPath", - "Finding npm global bin in your PATH", - ], + "doctor Running checkup", + "doctor Pinging registry", + "doctor Getting npm package information", + "doctor Getting Node.js release information", + "doctor Finding git in your PATH", + "doctor getBinPath Finding npm global bin in your PATH", ], "warn": Array [], } ` exports[`test/lib/commands/doctor.js TAP windows skips permissions checks > no permissions checks 1`] = ` -Check Value Recommendation/Notes -npm ping ok -npm -v ok current: v1.0.0, latest: v1.0.0 -node -v ok current: v1.0.0, recommended: v1.0.0 -npm config get registry ok using default registry (https://registry.npmjs.org/) -git executable in PATH ok /path/to/git -global bin folder in PATH ok {CWD}/global +Connecting to the registry +Ok +Checking npm version +Ok +current: v1.0.0, latest: v1.0.0 +Checking node version +Ok +current: v1.0.0, recommended: v1.0.0 +Checking configured npm registry +Ok +using default registry (https://registry.npmjs.org/) +Checking for git executable in PATH +Ok +/path/to/git +Checking for global bin folder in PATH +Ok +{CWD}/global ` diff --git a/deps/npm/tap-snapshots/test/lib/commands/fund.js.test.cjs b/deps/npm/tap-snapshots/test/lib/commands/fund.js.test.cjs index 011315a9211ef0..28ffd76d5c7360 100644 --- a/deps/npm/tap-snapshots/test/lib/commands/fund.js.test.cjs +++ b/deps/npm/tap-snapshots/test/lib/commands/fund.js.test.cjs @@ -12,16 +12,15 @@ Run \`npm fund [] --which=1\`, for example, to open the first fund ` exports[`test/lib/commands/fund.js TAP fund colors > should print output with color info 1`] = ` -test-fund-colors@1.0.0 -+-- http://example.com/a -| \`-- a@1.0.0 -\`-- http://example.com/b - | \`-- b@1.0.0, c@1.0.0 - +-- http://example.com/d - | \`-- d@1.0.0 - \`-- http://example.com/e - \`-- e@1.0.0 - +test-fund-colors@1.0.0 ++-- http://example.com/a +| \`-- a@1.0.0 +\`-- http://example.com/b + | \`-- b@1.0.0, c@1.0.0 + +-- http://example.com/d + | \`-- d@1.0.0 + \`-- http://example.com/e + \`-- e@1.0.0 ` exports[`test/lib/commands/fund.js TAP fund containing multi-level nested deps with no funding > should omit dependencies with no funding declared 1`] = ` @@ -30,19 +29,16 @@ nested-no-funding-packages@1.0.0 | \`-- lorem@1.0.0 \`-- http://example.com/donate \`-- bar@1.0.0 - ` exports[`test/lib/commands/fund.js TAP fund in which same maintainer owns all its deps > should print stack packages together 1`] = ` http://example.com/donate \`-- maintainer-owns-all-deps@1.0.0, dep-foo@1.0.0, dep-sub-foo@1.0.0, dep-bar@1.0.0 - ` exports[`test/lib/commands/fund.js TAP fund pkg missing version number > should print name only 1`] = ` http://example.com/foo \`-- foo - ` exports[`test/lib/commands/fund.js TAP fund using bad which value: index too high > should print message about invalid which 1`] = ` @@ -61,7 +57,6 @@ Run \`npm fund [] --which=1\`, for example, to open the first fund exports[`test/lib/commands/fund.js TAP fund with no package containing funding > should print empty funding info 1`] = ` no-funding-package@0.0.0 - ` exports[`test/lib/commands/fund.js TAP sub dep with fund info and a parent with no funding info > should nest sub dep as child of root 1`] = ` @@ -70,7 +65,6 @@ test-multiple-funding-sources@1.0.0 | \`-- b@1.0.0 \`-- http://example.com/c \`-- c@1.0.0 - ` exports[`test/lib/commands/fund.js TAP workspaces filter funding info by a specific workspace name > should display only filtered workspace name and its deps 1`] = ` @@ -79,7 +73,6 @@ workspaces-support@1.0.0 | \`-- a@1.0.0 \`-- http://example.com/c \`-- c@1.0.0 - ` exports[`test/lib/commands/fund.js TAP workspaces filter funding info by a specific workspace path > should display only filtered workspace name and its deps 1`] = ` @@ -88,5 +81,4 @@ workspaces-support@1.0.0 | \`-- a@1.0.0 \`-- http://example.com/c \`-- c@1.0.0 - ` diff --git a/deps/npm/tap-snapshots/test/lib/commands/init.js.test.cjs b/deps/npm/tap-snapshots/test/lib/commands/init.js.test.cjs index 821193a55e1a98..eae04d77d2e82e 100644 --- a/deps/npm/tap-snapshots/test/lib/commands/init.js.test.cjs +++ b/deps/npm/tap-snapshots/test/lib/commands/init.js.test.cjs @@ -20,5 +20,6 @@ Press ^C at any time to quit. exports[`test/lib/commands/init.js TAP workspaces no args -- yes > should print helper info 1`] = ` + added 1 package in {TIME} ` diff --git a/deps/npm/tap-snapshots/test/lib/commands/ls.js.test.cjs b/deps/npm/tap-snapshots/test/lib/commands/ls.js.test.cjs index a6e4472cae95a1..184259eafff836 100644 --- a/deps/npm/tap-snapshots/test/lib/commands/ls.js.test.cjs +++ b/deps/npm/tap-snapshots/test/lib/commands/ls.js.test.cjs @@ -42,14 +42,12 @@ test-npm-ls-ignore-missing-optional@1.2.3 {CWD}/prefix +-- UNMET DEPENDENCY prod-missing@1 +-- prod-ok@1.2.3 \`-- prod-wrong@3.2.1 invalid: "1" from the root project - ` exports[`test/lib/commands/ls.js TAP ls --depth=0 > should output tree containing only top-level dependencies 1`] = ` test-npm-ls@1.0.0 {CWD}/prefix +-- chai@1.0.0 \`-- foo@1.0.0 - ` exports[`test/lib/commands/ls.js TAP ls --depth=1 > should output tree containing top-level deps and their deps only 1`] = ` @@ -57,7 +55,6 @@ test-npm-ls@1.0.0 {CWD}/prefix +-- a@1.0.0 | \`-- b@1.0.0 \`-- e@1.0.0 - ` exports[`test/lib/commands/ls.js TAP ls --dev > should output tree containing dev deps 1`] = ` @@ -65,13 +62,11 @@ test-npm-ls@1.0.0 {CWD}/prefix \`-- dev-dep@1.0.0 \`-- foo@1.0.0 \`-- dog@1.0.0 - ` exports[`test/lib/commands/ls.js TAP ls --link > should output tree containing linked deps 1`] = ` test-npm-ls@1.0.0 {CWD}/prefix \`-- linked-dep@1.0.0 -> ./linked-dep - ` exports[`test/lib/commands/ls.js TAP ls --long --depth=0 > should output tree containing top-level deps with descriptions 1`] = ` @@ -88,7 +83,6 @@ test-npm-ls@1.0.0 | Peer-dep description here \`-- prod-dep@1.0.0 A PROD dep kind of dep - ` exports[`test/lib/commands/ls.js TAP ls --long > should output tree info with descriptions 1`] = ` @@ -111,7 +105,6 @@ test-npm-ls@1.0.0 | A PROD dep kind of dep \`-- dog@2.0.0 A dep that bars - ` exports[`test/lib/commands/ls.js TAP ls --parseable --depth=0 > should output tree containing only top-level dependencies 1`] = ` @@ -318,21 +311,19 @@ test-npm-ls@1.0.0 {CWD}/prefix +-- optional-dep@1.0.0 \`-- prod-dep@1.0.0 \`-- dog@2.0.0 - ` exports[`test/lib/commands/ls.js TAP ls broken resolved field > should NOT print git refs in output tree 1`] = ` npm-broken-resolved-field-test@1.0.0 {CWD}/prefix \`-- a@1.0.1 - ` exports[`test/lib/commands/ls.js TAP ls colored output > should output tree containing color info 1`] = ` test-npm-ls@1.0.0 {CWD}/prefix -+-- chai@1.0.0 extraneous -+-- foo@1.0.0 invalid: "^2.0.0" from the root project ++-- chai@1.0.0 extraneous ++-- foo@1.0.0 invalid: "^2.0.0" from the root project | \`-- dog@1.0.0 -\`-- UNMET DEPENDENCY ipsum@^1.0.0 +\`-- UNMET DEPENDENCY ipsum@^1.0.0  ` @@ -341,14 +332,13 @@ test-npm-ls@1.0.0 {CWD}/prefix \`-- a@1.0.0 \`-- b@1.0.0 \`-- a@1.0.0 deduped - ` exports[`test/lib/commands/ls.js TAP ls cycle deps with filter args > should print tree output containing deduped ref 1`] = ` test-npm-ls@1.0.0 {CWD}/prefix -\`-- a@1.0.0 +\`-- a@1.0.0  \`-- b@1.0.0 - \`-- a@1.0.0 deduped + \`-- a@1.0.0 deduped  ` @@ -357,20 +347,17 @@ test-npm-ls@1.0.0 {CWD}/prefix +-- a@1.0.0 | \`-- UNMET DEPENDENCY b@^1.0.0 \`-- UNMET DEPENDENCY b@^1.0.0 - ` exports[`test/lib/commands/ls.js TAP ls default --depth value should be 0 > should output tree containing only top-level dependencies 1`] = ` test-npm-ls@1.0.0 {CWD}/prefix +-- chai@1.0.0 \`-- foo@1.0.0 - ` exports[`test/lib/commands/ls.js TAP ls empty location > should print empty result 1`] = ` {CWD}/prefix \`-- (empty) - ` exports[`test/lib/commands/ls.js TAP ls extraneous deps > should output containing problems info 1`] = ` @@ -378,19 +365,16 @@ test-npm-ls@1.0.0 {CWD}/prefix +-- chai@1.0.0 extraneous \`-- foo@1.0.0 \`-- dog@1.0.0 - ` exports[`test/lib/commands/ls.js TAP ls filter pkg arg using depth option should list a in top-level only > output 1`] = ` test-pkg-arg-filter-with-depth-opt@1.0.0 {CWD}/prefix \`-- a@1.0.0 - ` exports[`test/lib/commands/ls.js TAP ls filter pkg arg using depth option should print empty results msg > output 1`] = ` test-pkg-arg-filter-with-depth-opt@1.0.0 {CWD}/prefix \`-- (empty) - ` exports[`test/lib/commands/ls.js TAP ls filter pkg arg using depth option should print expected result > output 1`] = ` @@ -398,7 +382,6 @@ test-pkg-arg-filter-with-depth-opt@1.0.0 {CWD}/prefix \`-- b@1.0.0 \`-- c@1.0.0 \`-- d@1.0.0 - ` exports[`test/lib/commands/ls.js TAP ls filtering by child of missing dep > should print tree and not duplicate child of missing items 1`] = ` @@ -408,13 +391,11 @@ filter-by-child-of-missing-dep@1.0.0 {CWD}/prefix +-- c@1.0.0 extraneous \`-- d@1.0.0 extraneous \`-- c@2.0.0 extraneous - ` exports[`test/lib/commands/ls.js TAP ls from and resolved properties > should not be printed in tree output 1`] = ` test-npm-ls@1.0.0 {CWD}/prefix \`-- simple-output@2.1.1 - ` exports[`test/lib/commands/ls.js TAP ls global > should print tree and not mark top-level items extraneous 1`] = ` @@ -422,14 +403,13 @@ exports[`test/lib/commands/ls.js TAP ls global > should print tree and not mark +-- a@1.0.0 \`-- b@1.0.0 \`-- c@1.0.0 - ` exports[`test/lib/commands/ls.js TAP ls invalid deduped dep > should output tree signaling mismatching peer dep in problems 1`] = ` invalid-deduped-dep@1.0.0 {CWD}/prefix +-- a@1.0.0 -| \`-- b@1.0.0 deduped invalid: "^2.0.0" from the root project, "^2.0.0" from node_modules/a -\`-- b@1.0.0 invalid: "^2.0.0" from the root project, "^2.0.0" from node_modules/a +| \`-- b@1.0.0 deduped invalid: "^2.0.0" from the root project, "^2.0.0" from node_modules/a +\`-- b@1.0.0 invalid: "^2.0.0" from the root project, "^2.0.0" from node_modules/a  ` @@ -443,20 +423,17 @@ test-npm-ls@1.0.0 {CWD}/prefix +-- peer-dep@1.0.0 invalid: "^2.0.0" from the root project \`-- prod-dep@1.0.0 \`-- dog@2.0.0 - ` exports[`test/lib/commands/ls.js TAP ls json read problems > should print empty result 1`] = ` {CWD}/prefix \`-- (empty) - ` exports[`test/lib/commands/ls.js TAP ls loading a tree containing workspaces should filter by parent folder workspace config > output 1`] = ` workspaces-tree@1.0.0 {CWD}/prefix +-- e@1.0.0 -> ./group/e \`-- f@1.0.0 -> ./group/f - ` exports[`test/lib/commands/ls.js TAP ls loading a tree containing workspaces should filter single workspace > output 1`] = ` @@ -464,7 +441,6 @@ workspaces-tree@1.0.0 {CWD}/prefix +-- a@1.0.0 -> ./a | \`-- d@1.0.0 deduped -> ./d \`-- d@1.0.0 -> ./d - ` exports[`test/lib/commands/ls.js TAP ls loading a tree containing workspaces should filter using workspace config > output 1`] = ` @@ -475,7 +451,6 @@ workspaces-tree@1.0.0 {CWD}/prefix \`-- d@1.0.0 -> ./d \`-- foo@1.1.1 \`-- bar@1.0.0 - ` exports[`test/lib/commands/ls.js TAP ls loading a tree containing workspaces should inlude root and specified workspace > output 1`] = ` @@ -484,7 +459,6 @@ workspaces-tree@1.0.0 {CWD}/prefix | \`-- foo@1.1.1 | \`-- bar@1.0.0 \`-- pacote@1.0.0 - ` exports[`test/lib/commands/ls.js TAP ls loading a tree containing workspaces should list --all workspaces properly > output 1`] = ` @@ -500,7 +474,6 @@ workspaces-tree@1.0.0 {CWD}/prefix +-- e@1.0.0 -> ./group/e +-- f@1.0.0 -> ./group/f \`-- pacote@1.0.0 - ` exports[`test/lib/commands/ls.js TAP ls loading a tree containing workspaces should list only prod deps of workspaces > output 1`] = ` @@ -515,20 +488,19 @@ workspaces-tree@1.0.0 {CWD}/prefix +-- e@1.0.0 -> ./group/e +-- f@1.0.0 -> ./group/f \`-- pacote@1.0.0 - ` exports[`test/lib/commands/ls.js TAP ls loading a tree containing workspaces should list workspaces properly with default configs > output 1`] = ` workspaces-tree@1.0.0 {CWD}/prefix -+-- a@1.0.0 -> ./a ++-- a@1.0.0 -> ./a | +-- baz@1.0.0 | +-- c@1.0.0 -| \`-- d@1.0.0 deduped -> ./d -+-- b@1.0.0 -> ./b -+-- d@1.0.0 -> ./d +| \`-- d@1.0.0 deduped -> ./d ++-- b@1.0.0 -> ./b ++-- d@1.0.0 -> ./d | \`-- foo@1.1.1 -+-- e@1.0.0 -> ./group/e -+-- f@1.0.0 -> ./group/f ++-- e@1.0.0 -> ./group/e ++-- f@1.0.0 -> ./group/f \`-- pacote@1.0.0  ` @@ -544,7 +516,6 @@ workspaces-tree@1.0.0 {CWD}/prefix \`-- d@1.0.0 -> ./d \`-- foo@1.1.1 \`-- bar@1.0.0 - ` exports[`test/lib/commands/ls.js TAP ls missing package.json > should output tree missing name/version of top-level package 1`] = ` @@ -553,7 +524,6 @@ exports[`test/lib/commands/ls.js TAP ls missing package.json > should output tre +-- dog@1.0.0 extraneous \`-- foo@1.0.0 extraneous \`-- dog@1.0.0 deduped - ` exports[`test/lib/commands/ls.js TAP ls missing/invalid/extraneous > should output tree containing missing, invalid, extraneous labels 1`] = ` @@ -562,7 +532,6 @@ test-npm-ls@1.0.0 {CWD}/prefix +-- foo@1.0.0 invalid: "^2.0.0" from the root project | \`-- dog@1.0.0 \`-- UNMET DEPENDENCY ipsum@^1.0.0 - ` exports[`test/lib/commands/ls.js TAP ls no args > should output tree representation of dependencies structure 1`] = ` @@ -570,20 +539,18 @@ test-npm-ls@1.0.0 {CWD}/prefix +-- chai@1.0.0 \`-- foo@1.0.0 \`-- dog@1.0.0 - ` exports[`test/lib/commands/ls.js TAP ls overridden dep > should contain overridden outout 1`] = ` test-overridden@1.0.0 {CWD}/prefix \`-- foo@1.0.0 \`-- bar@1.0.0 overridden - ` exports[`test/lib/commands/ls.js TAP ls overridden dep w/ color > should contain overridden outout 1`] = ` test-overridden@1.0.0 {CWD}/prefix \`-- foo@1.0.0 - \`-- bar@1.0.0 overridden + \`-- bar@1.0.0 overridden  ` @@ -592,13 +559,11 @@ print-deduped-symlinks@1.0.0 {CWD}/prefix +-- a@1.0.0 | \`-- b@1.0.0 deduped -> ./b \`-- b@1.0.0 -> ./b - ` exports[`test/lib/commands/ls.js TAP ls resolved points to git ref > should output tree containing git refs 1`] = ` test-npm-ls@1.0.0 {CWD}/prefix \`-- abbrev@1.1.1 (git+ssh://git@github.com/isaacs/abbrev-js.git#b8f3a2fc0c3bb8ffd8b0d0072cc6b5a3667e963c) - ` exports[`test/lib/commands/ls.js TAP ls unmet optional dep > should output tree with empty entry for missing optional deps 1`] = ` @@ -607,8 +572,8 @@ exports[`test/lib/commands/ls.js TAP ls unmet optional dep > should output tree +-- dev-dep@1.0.0 | \`-- foo@1.0.0 | \`-- dog@1.0.0 -+-- UNMET OPTIONAL DEPENDENCY missing-optional-dep@^1.0.0 -+-- optional-dep@1.0.0 invalid: "^2.0.0" from the root project ++-- UNMET OPTIONAL DEPENDENCY missing-optional-dep@^1.0.0 ++-- optional-dep@1.0.0 invalid: "^2.0.0" from the root project +-- peer-dep@1.0.0 \`-- prod-dep@1.0.0  \`-- dog@2.0.0 @@ -618,22 +583,20 @@ exports[`test/lib/commands/ls.js TAP ls unmet optional dep > should output tree exports[`test/lib/commands/ls.js TAP ls unmet peer dep > should output tree signaling missing peer dep in problems 1`] = ` test-npm-ls@1.0.0 {CWD}/prefix \`-- UNMET DEPENDENCY peer-dep@* - ` exports[`test/lib/commands/ls.js TAP ls using aliases > should output tree containing aliases 1`] = ` test-npm-ls@1.0.0 {CWD}/prefix \`-- a@npm:b@1.0.0 - ` exports[`test/lib/commands/ls.js TAP ls with args and dedupe entries > should print tree output containing deduped ref 1`] = ` dedupe-entries@1.0.0 {CWD}/prefix +-- @npmcli/a@1.0.0 -| \`-- @npmcli/b@1.1.2 deduped -+-- @npmcli/b@1.1.2 +| \`-- @npmcli/b@1.1.2 deduped ++-- @npmcli/b@1.1.2 \`-- @npmcli/c@1.0.0 - \`-- @npmcli/b@1.1.2 deduped + \`-- @npmcli/b@1.1.2 deduped  ` @@ -644,18 +607,16 @@ dedupe-entries@1.0.0 {CWD}/prefix +-- @npmcli/b@1.1.2 | \`-- @npmcli/c@1.0.0 deduped \`-- @npmcli/c@1.0.0 - ` exports[`test/lib/commands/ls.js TAP ls with dot filter arg > should output tree contaning only occurrences of filtered by package and colored output 1`] = ` test-npm-ls@1.0.0 {CWD}/prefix \`-- (empty) - ` exports[`test/lib/commands/ls.js TAP ls with filter arg > should output tree contaning only occurrences of filtered by package and colored output 1`] = ` test-npm-ls@1.0.0 {CWD}/prefix -\`-- chai@1.0.0 +\`-- chai@1.0.0  ` @@ -663,13 +624,11 @@ exports[`test/lib/commands/ls.js TAP ls with filter arg nested dep > should outp test-npm-ls@1.0.0 {CWD}/prefix \`-- foo@1.0.0 \`-- dog@1.0.0 - ` exports[`test/lib/commands/ls.js TAP ls with missing filter arg > should output tree containing no dependencies info 1`] = ` test-npm-ls@1.0.0 {CWD}/prefix \`-- (empty) - ` exports[`test/lib/commands/ls.js TAP ls with multiple filter args > should output tree contaning only occurrences of multiple filtered packages and their ancestors 1`] = ` @@ -677,7 +636,6 @@ test-npm-ls@1.0.0 {CWD}/prefix +-- chai@1.0.0 \`-- foo@1.0.0 \`-- dog@1.0.0 - ` exports[`test/lib/commands/ls.js TAP ls with no args dedupe entries > should print tree output containing deduped ref 1`] = ` @@ -687,7 +645,6 @@ dedupe-entries@1.0.0 {CWD}/prefix +-- @npmcli/b@1.1.2 \`-- @npmcli/c@1.0.0 \`-- @npmcli/b@1.1.2 deduped - ` exports[`test/lib/commands/ls.js TAP ls with no args dedupe entries and not displaying all > should print tree output containing deduped ref 1`] = ` @@ -695,14 +652,12 @@ dedupe-entries@1.0.0 {CWD}/prefix +-- @npmcli/a@1.0.0 +-- @npmcli/b@1.1.2 \`-- @npmcli/c@1.0.0 - ` exports[`test/lib/commands/ls.js TAP ls workspace and missing optional dep > should omit missing optional dep 1`] = ` root@ {CWD}/prefix +-- baz@1.0.0 -> ./baz \`-- foo@1.0.0 - ` exports[`test/lib/commands/ls.js TAP show multiple invalid reasons > ls result 1`] = ` @@ -713,5 +668,4 @@ test-npm-ls@1.0.0 {CWD}/prefix | \`-- dog@1.0.0 deduped invalid: "^1.2.3" from the root project, "^2.0.0" from node_modules/cat, "2.x" from node_modules/chai \`-- dog@1.0.0 invalid: "^1.2.3" from the root project, "^2.0.0" from node_modules/cat, "2.x" from node_modules/chai \`-- cat@1.0.0 deduped invalid: "^2.0.0" from the root project - ` diff --git a/deps/npm/tap-snapshots/test/lib/commands/outdated.js.test.cjs b/deps/npm/tap-snapshots/test/lib/commands/outdated.js.test.cjs index a72338b0bacc56..ec0298fcf4fa7b 100644 --- a/deps/npm/tap-snapshots/test/lib/commands/outdated.js.test.cjs +++ b/deps/npm/tap-snapshots/test/lib/commands/outdated.js.test.cjs @@ -93,25 +93,25 @@ theta MISSING 1.0.1 1.0.1 - prefix dependencies ` exports[`test/lib/commands/outdated.js TAP should display outdated deps outdated --omit=dev --omit=peer > must match snapshot 1`] = ` -Package Current Wanted Latest Location Depended by -cat 1.0.0 1.0.1 1.0.1 node_modules/cat prefix -dog 1.0.1 1.0.1 2.0.0 node_modules/dog prefix -theta MISSING 1.0.1 1.0.1 - prefix +Package Current Wanted Latest Location Depended by +cat 1.0.0 1.0.1 1.0.1 node_modules/cat prefix +dog 1.0.1 1.0.1 2.0.0 node_modules/dog prefix +theta MISSING 1.0.1 1.0.1 - prefix ` exports[`test/lib/commands/outdated.js TAP should display outdated deps outdated --omit=dev > must match snapshot 1`] = ` -Package Current Wanted Latest Location Depended by -cat 1.0.0 1.0.1 1.0.1 node_modules/cat prefix -chai 1.0.0 1.0.1 1.0.1 node_modules/chai prefix -dog 1.0.1 1.0.1 2.0.0 node_modules/dog prefix -theta MISSING 1.0.1 1.0.1 - prefix +Package Current Wanted Latest Location Depended by +cat 1.0.0 1.0.1 1.0.1 node_modules/cat prefix +chai 1.0.0 1.0.1 1.0.1 node_modules/chai prefix +dog 1.0.1 1.0.1 2.0.0 node_modules/dog prefix +theta MISSING 1.0.1 1.0.1 - prefix ` exports[`test/lib/commands/outdated.js TAP should display outdated deps outdated --omit=prod > must match snapshot 1`] = ` -Package Current Wanted Latest Location Depended by -cat 1.0.0 1.0.1 1.0.1 node_modules/cat prefix -chai 1.0.0 1.0.1 1.0.1 node_modules/chai prefix -dog 1.0.1 1.0.1 2.0.0 node_modules/dog prefix +Package Current Wanted Latest Location Depended by +cat 1.0.0 1.0.1 1.0.1 node_modules/cat prefix +chai 1.0.0 1.0.1 1.0.1 node_modules/chai prefix +dog 1.0.1 1.0.1 2.0.0 node_modules/dog prefix ` exports[`test/lib/commands/outdated.js TAP should display outdated deps outdated --parseable --long > must match snapshot 1`] = ` @@ -129,11 +129,11 @@ exports[`test/lib/commands/outdated.js TAP should display outdated deps outdated ` exports[`test/lib/commands/outdated.js TAP should display outdated deps outdated > must match snapshot 1`] = ` -Package Current Wanted Latest Location Depended by -cat 1.0.0 1.0.1 1.0.1 node_modules/cat prefix -chai 1.0.0 1.0.1 1.0.1 node_modules/chai prefix -dog 1.0.1 1.0.1 2.0.0 node_modules/dog prefix -theta MISSING 1.0.1 1.0.1 - prefix +Package Current Wanted Latest Location Depended by +cat 1.0.0 1.0.1 1.0.1 node_modules/cat prefix +chai 1.0.0 1.0.1 1.0.1 node_modules/chai prefix +dog 1.0.1 1.0.1 2.0.0 node_modules/dog prefix +theta MISSING 1.0.1 1.0.1 - prefix ` exports[`test/lib/commands/outdated.js TAP should display outdated deps outdated global > must match snapshot 1`] = ` @@ -232,8 +232,8 @@ exports[`test/lib/commands/outdated.js TAP workspaces should display ws outdated ` exports[`test/lib/commands/outdated.js TAP workspaces should highlight ws in dependend by section > output 1`] = ` -Package Current Wanted Latest Location Depended by -cat 1.0.0 1.0.1 1.0.1 node_modules/cat a@1.0.0 -dog 1.0.1 1.0.1 2.0.0 node_modules/dog prefix -theta MISSING 1.0.1 1.0.1 - c@1.0.0 +Package Current Wanted Latest Location Depended by +cat 1.0.0 1.0.1 1.0.1 node_modules/cat a@1.0.0 +dog 1.0.1 1.0.1 2.0.0 node_modules/dog prefix +theta MISSING 1.0.1 1.0.1 - c@1.0.0 ` diff --git a/deps/npm/tap-snapshots/test/lib/commands/pack.js.test.cjs b/deps/npm/tap-snapshots/test/lib/commands/pack.js.test.cjs index 5cdcdd9a8d08a5..f8c35e1c2c1fd2 100644 --- a/deps/npm/tap-snapshots/test/lib/commands/pack.js.test.cjs +++ b/deps/npm/tap-snapshots/test/lib/commands/pack.js.test.cjs @@ -7,64 +7,52 @@ 'use strict' exports[`test/lib/commands/pack.js TAP dry run > logs pack contents 1`] = ` Array [ - undefined, "package: test-package@1.0.0", - undefined, + "Tarball Contents", "41B package.json", - undefined, - String( - name: test-package - version: 1.0.0 - filename: test-package-1.0.0.tgz - package size: {size} - unpacked size: 41 B - shasum: {sha} - integrity: {integrity} - total files: 1 - ), - "", + "Tarball Details", + "name: test-package", + "version: 1.0.0", + "filename: test-package-1.0.0.tgz", + "package size: {size}", + "unpacked size: 41 B", + "shasum: {sha}", + "integrity: {integrity} + "total files: 1", ] ` exports[`test/lib/commands/pack.js TAP foreground-scripts can still be set to false > logs pack contents 1`] = ` Array [ - undefined, "package: test-fg-scripts@0.0.0", - undefined, + "Tarball Contents", "110B package.json", - undefined, - String( - name: test-fg-scripts - version: 0.0.0 - filename: test-fg-scripts-0.0.0.tgz - package size: {size} - unpacked size: 110 B - shasum: {sha} - integrity: {integrity} - total files: 1 - ), - "", + "Tarball Details", + "name: test-fg-scripts", + "version: 0.0.0", + "filename: test-fg-scripts-0.0.0.tgz", + "package size: {size}", + "unpacked size: 110 B", + "shasum: {sha}", + "integrity: {integrity} + "total files: 1", ] ` exports[`test/lib/commands/pack.js TAP foreground-scripts defaults to true > logs pack contents 1`] = ` Array [ - undefined, "package: test-fg-scripts@0.0.0", - undefined, + "Tarball Contents", "110B package.json", - undefined, - String( - name: test-fg-scripts - version: 0.0.0 - filename: test-fg-scripts-0.0.0.tgz - package size: {size} - unpacked size: 110 B - shasum: {sha} - integrity: {integrity} - total files: 1 - ), - "", + "Tarball Details", + "name: test-fg-scripts", + "version: 0.0.0", + "filename: test-fg-scripts-0.0.0.tgz", + "package size: {size}", + "unpacked size: 110 B", + "shasum: {sha}", + "integrity: {integrity} + "total files: 1", ] ` @@ -121,30 +109,37 @@ Array [ "name": "@myscope/test-package", "shasum": "{sha}", "size": "{size}", - "unpackedSize": 50, + "unpackedSize": 88, "version": "1.0.0", }, ], ] ` +exports[`test/lib/commands/pack.js TAP should log scoped package output as valid json > stderr has banners 1`] = ` +Array [ + String( + + > @myscope/test-package@1.0.0 prepack + > echo prepack! + + ), +] +` + exports[`test/lib/commands/pack.js TAP should pack current directory with no arguments > logs pack contents 1`] = ` Array [ - undefined, "package: test-package@1.0.0", - undefined, + "Tarball Contents", "41B package.json", - undefined, - String( - name: test-package - version: 1.0.0 - filename: test-package-1.0.0.tgz - package size: {size} - unpacked size: 41 B - shasum: {sha} - integrity: {integrity} - total files: 1 - ), - "", + "Tarball Details", + "name: test-package", + "version: 1.0.0", + "filename: test-package-1.0.0.tgz", + "package size: {size}", + "unpacked size: 41 B", + "shasum: {sha}", + "integrity: {integrity} + "total files: 1", ] ` diff --git a/deps/npm/tap-snapshots/test/lib/commands/profile.js.test.cjs b/deps/npm/tap-snapshots/test/lib/commands/profile.js.test.cjs index 4530dbf95cec24..1fbb09de29f3ca 100644 --- a/deps/npm/tap-snapshots/test/lib/commands/profile.js.test.cjs +++ b/deps/npm/tap-snapshots/test/lib/commands/profile.js.test.cjs @@ -31,19 +31,6 @@ exports[`test/lib/commands/profile.js TAP profile get multiple args default outp foo foo@github.com (verified) https://github.com/npm ` -exports[`test/lib/commands/profile.js TAP profile get no args --color > should output all profile info with color result 1`] = ` -name: foo -email: foo@github.com (verified) -two-factor auth: auth-and-writes -fullname: Foo Bar -homepage: https://github.com -freenode: foobar -twitter: https://twitter.com/npmjs -github: https://github.com/npm -created: 2015-02-26T01:26:37.384Z -updated: 2020-08-12T16:19:35.326Z -` - exports[`test/lib/commands/profile.js TAP profile get no args --parseable > should output all profile info as parseable result 1`] = ` tfa auth-and-writes name foo diff --git a/deps/npm/tap-snapshots/test/lib/commands/publish.js.test.cjs b/deps/npm/tap-snapshots/test/lib/commands/publish.js.test.cjs index 4f947be5cf5dec..c13834d5d694cc 100644 --- a/deps/npm/tap-snapshots/test/lib/commands/publish.js.test.cjs +++ b/deps/npm/tap-snapshots/test/lib/commands/publish.js.test.cjs @@ -15,130 +15,55 @@ exports[`test/lib/commands/publish.js TAP bare _auth and registry config > new p exports[`test/lib/commands/publish.js TAP dry-run > must match snapshot 1`] = ` Array [ - Array [ - "", - ], - Array [ - "", - "package: test-package@1.0.0", - ], - Array [ - "=== Tarball Contents ===", - ], - Array [ - "", - "87B package.json", - ], - Array [ - "=== Tarball Details ===", - ], - Array [ - "", - String( - name: test-package - version: 1.0.0 - filename: test-package-1.0.0.tgz - package size: {size} - unpacked size: 87 B - shasum: {sha} - integrity: {integrity} - total files: 1 - ), - ], - Array [ - "", - "", - ], - Array [ - "", - "Publishing to https://registry.npmjs.org/ with tag latest and default access (dry-run)", - ], + "package: test-package@1.0.0", + "Tarball Contents", + "87B package.json", + "Tarball Details", + "name: test-package", + "version: 1.0.0", + "filename: test-package-1.0.0.tgz", + "package size: {size}", + "unpacked size: 87 B", + "shasum: {sha}", + "integrity: {integrity} + "total files: 1", + "Publishing to https://registry.npmjs.org/ with tag latest and default access (dry-run)", ] ` exports[`test/lib/commands/publish.js TAP foreground-scripts can still be set to false > must match snapshot 1`] = ` Array [ - Array [ - "", - ], - Array [ - "", - "package: test-fg-scripts@0.0.0", - ], - Array [ - "=== Tarball Contents ===", - ], - Array [ - "", - "110B package.json", - ], - Array [ - "=== Tarball Details ===", - ], - Array [ - "", - String( - name: test-fg-scripts - version: 0.0.0 - filename: test-fg-scripts-0.0.0.tgz - package size: {size} - unpacked size: 110 B - shasum: {sha} - integrity: {integrity} - total files: 1 - ), - ], - Array [ - "", - "", - ], - Array [ - "", - "Publishing to https://registry.npmjs.org/ with tag latest and default access (dry-run)", - ], + "package: test-fg-scripts@0.0.0", + "Tarball Contents", + "110B package.json", + "Tarball Details", + "name: test-fg-scripts", + "version: 0.0.0", + "filename: test-fg-scripts-0.0.0.tgz", + "package size: {size}", + "unpacked size: 110 B", + "shasum: {sha}", + "integrity: {integrity} + "total files: 1", + "Publishing to https://registry.npmjs.org/ with tag latest and default access (dry-run)", ] ` exports[`test/lib/commands/publish.js TAP foreground-scripts defaults to true > must match snapshot 1`] = ` Array [ - Array [ - "", - ], - Array [ - "", - "package: test-fg-scripts@0.0.0", - ], - Array [ - "=== Tarball Contents ===", - ], - Array [ - "", - "110B package.json", - ], - Array [ - "=== Tarball Details ===", - ], - Array [ - "", - String( - name: test-fg-scripts - version: 0.0.0 - filename: test-fg-scripts-0.0.0.tgz - package size: {size} - unpacked size: 110 B - shasum: {sha} - integrity: {integrity} - total files: 1 - ), - ], - Array [ - "", - "", - ], - Array [ - "", - "Publishing to https://registry.npmjs.org/ with tag latest and default access (dry-run)", - ], + "package: test-fg-scripts@0.0.0", + "Tarball Contents", + "110B package.json", + "Tarball Details", + "name: test-fg-scripts", + "version: 0.0.0", + "filename: test-fg-scripts-0.0.0.tgz", + "package size: {size}", + "unpacked size: 110 B", + "shasum: {sha}", + "integrity: {integrity} + "total files: 1", + "Publishing to https://registry.npmjs.org/ with tag latest and default access (dry-run)", ] ` @@ -156,10 +81,7 @@ exports[`test/lib/commands/publish.js TAP ignore-scripts > new package version 1 exports[`test/lib/commands/publish.js TAP json > must match snapshot 1`] = ` Array [ - Array [ - "", - "Publishing to https://registry.npmjs.org/ with tag latest and default access", - ], + "Publishing to https://registry.npmjs.org/ with tag latest and default access", ] ` @@ -332,21 +254,12 @@ exports[`test/lib/commands/publish.js TAP no auth dry-run > must match snapshot exports[`test/lib/commands/publish.js TAP no auth dry-run > warns about auth being needed 1`] = ` Array [ - Array [ - "publish", - "npm auto-corrected some errors in your package.json when publishing. Please run \\"npm pkg fix\\" to address these errors.", - ], - Array [ - "publish", - String( - errors corrected: - Removed invalid "scripts" - ), - ], - Array [ - "", - "This command requires you to be logged in to https://registry.npmjs.org/ (dry-run)", - ], + "publish npm auto-corrected some errors in your package.json when publishing. Please run \\"npm pkg fix\\" to address these errors.", + String( + publish errors corrected: + publish Removed invalid "scripts" + ), + "This command requires you to be logged in to https://registry.npmjs.org/ (dry-run)", ] ` @@ -356,44 +269,19 @@ exports[`test/lib/commands/publish.js TAP prioritize CLI flags over publishConfi exports[`test/lib/commands/publish.js TAP public access > must match snapshot 1`] = ` Array [ - Array [ - "", - ], - Array [ - "", - "package: @npm/test-package@1.0.0", - ], - Array [ - "=== Tarball Contents ===", - ], - Array [ - "", - "55B package.json", - ], - Array [ - "=== Tarball Details ===", - ], - Array [ - "", - String( - name: @npm/test-package - version: 1.0.0 - filename: npm-test-package-1.0.0.tgz - package size: {size} - unpacked size: 55 B - shasum: {sha} - integrity: {integrity} - total files: 1 - ), - ], - Array [ - "", - "", - ], - Array [ - "", - "Publishing to https://registry.npmjs.org/ with tag latest and public access", - ], + "package: @npm/test-package@1.0.0", + "Tarball Contents", + "55B package.json", + "Tarball Details", + "name: @npm/test-package", + "version: 1.0.0", + "filename: npm-test-package-1.0.0.tgz", + "package size: {size}", + "unpacked size: 55 B", + "shasum: {sha}", + "integrity: {integrity} + "total files: 1", + "Publishing to https://registry.npmjs.org/ with tag latest and public access", ] ` @@ -411,44 +299,19 @@ exports[`test/lib/commands/publish.js TAP respects publishConfig.registry, runs exports[`test/lib/commands/publish.js TAP restricted access > must match snapshot 1`] = ` Array [ - Array [ - "", - ], - Array [ - "", - "package: @npm/test-package@1.0.0", - ], - Array [ - "=== Tarball Contents ===", - ], - Array [ - "", - "55B package.json", - ], - Array [ - "=== Tarball Details ===", - ], - Array [ - "", - String( - name: @npm/test-package - version: 1.0.0 - filename: npm-test-package-1.0.0.tgz - package size: {size} - unpacked size: 55 B - shasum: {sha} - integrity: {integrity} - total files: 1 - ), - ], - Array [ - "", - "", - ], - Array [ - "", - "Publishing to https://registry.npmjs.org/ with tag latest and restricted access", - ], + "package: @npm/test-package@1.0.0", + "Tarball Contents", + "55B package.json", + "Tarball Details", + "name: @npm/test-package", + "version: 1.0.0", + "filename: npm-test-package-1.0.0.tgz", + "package size: {size}", + "unpacked size: 55 B", + "shasum: {sha}", + "integrity: {integrity} + "total files: 1", + "Publishing to https://registry.npmjs.org/ with tag latest and restricted access", ] ` @@ -462,47 +325,22 @@ exports[`test/lib/commands/publish.js TAP scoped _auth config scoped registry > exports[`test/lib/commands/publish.js TAP tarball > must match snapshot 1`] = ` Array [ - Array [ - "", - ], - Array [ - "", - "package: test-tar-package@1.0.0", - ], - Array [ - "=== Tarball Contents ===", - ], - Array [ - "", - String( - 26B index.js - 98B package.json - ), - ], - Array [ - "=== Tarball Details ===", - ], - Array [ - "", - String( - name: test-tar-package - version: 1.0.0 - filename: test-tar-package-1.0.0.tgz - package size: {size} - unpacked size: 124 B - shasum: {sha} - integrity: {integrity} - total files: 2 - ), - ], - Array [ - "", - "", - ], - Array [ - "", - "Publishing to https://registry.npmjs.org/ with tag latest and default access", - ], + "package: test-tar-package@1.0.0", + "Tarball Contents", + String( + 26B index.js + 98B package.json + ), + "Tarball Details", + "name: test-tar-package", + "version: 1.0.0", + "filename: test-tar-package-1.0.0.tgz", + "package size: {size}", + "unpacked size: 124 B", + "shasum: {sha}", + "integrity: {integrity} + "total files: 2", + "Publishing to https://registry.npmjs.org/ with tag latest and default access", ] ` @@ -518,57 +356,30 @@ exports[`test/lib/commands/publish.js TAP workspaces all workspaces - color > al exports[`test/lib/commands/publish.js TAP workspaces all workspaces - color > warns about skipped private workspace in color 1`] = ` Array [ - Array [ - "publish", - "npm auto-corrected some errors in your package.json when publishing. Please run \\"npm pkg fix\\" to address these errors.", - ], - Array [ - "publish", - String( - errors corrected: - Removed invalid "scripts" - "repository" was changed from a string to an object - ), - ], - Array [ - "publish", - "npm auto-corrected some errors in your package.json when publishing. Please run \\"npm pkg fix\\" to address these errors.", - ], - Array [ - "publish", - String( - errors corrected: - Removed invalid "scripts" - "repository" was changed from a string to an object - "repository.url" was normalized to "git+https://github.com/npm/workspace-b.git" - ), - ], - Array [ - "publish", - "npm auto-corrected some errors in your package.json when publishing. Please run \\"npm pkg fix\\" to address these errors.", - ], - Array [ - "publish", - String( - errors corrected: - Removed invalid "scripts" - ), - ], - Array [ - "publish", - "npm auto-corrected some errors in your package.json when publishing. Please run \\"npm pkg fix\\" to address these errors.", - ], - Array [ - "publish", - String( - errors corrected: - Removed invalid "scripts" - ), - ], - Array [ - "publish", - "Skipping workspace \\u001b[32mworkspace-p\\u001b[39m, marked as \\u001b[1mprivate\\u001b[22m", - ], + "\\u001b[94mpublish\\u001b[39m npm auto-corrected some errors in your package.json when publishing. Please run \\"npm pkg fix\\" to address these errors.", + String( + \\u001b[94mpublish\\u001b[39m errors corrected: + \\u001b[94mpublish\\u001b[39m Removed invalid "scripts" + \\u001b[94mpublish\\u001b[39m "repository" was changed from a string to an object + ), + "\\u001b[94mpublish\\u001b[39m npm auto-corrected some errors in your package.json when publishing. Please run \\"npm pkg fix\\" to address these errors.", + String( + \\u001b[94mpublish\\u001b[39m errors corrected: + \\u001b[94mpublish\\u001b[39m Removed invalid "scripts" + \\u001b[94mpublish\\u001b[39m "repository" was changed from a string to an object + \\u001b[94mpublish\\u001b[39m "repository.url" was normalized to "git+https://github.com/npm/workspace-b.git" + ), + "\\u001b[94mpublish\\u001b[39m npm auto-corrected some errors in your package.json when publishing. Please run \\"npm pkg fix\\" to address these errors.", + String( + \\u001b[94mpublish\\u001b[39m errors corrected: + \\u001b[94mpublish\\u001b[39m Removed invalid "scripts" + ), + "\\u001b[94mpublish\\u001b[39m npm auto-corrected some errors in your package.json when publishing. Please run \\"npm pkg fix\\" to address these errors.", + String( + \\u001b[94mpublish\\u001b[39m errors corrected: + \\u001b[94mpublish\\u001b[39m Removed invalid "scripts" + ), + "\\u001b[94mpublish\\u001b[39m Skipping workspace \\u001b[36mworkspace-p\\u001b[39m, marked as \\u001b[1mprivate\\u001b[22m", ] ` @@ -580,57 +391,30 @@ exports[`test/lib/commands/publish.js TAP workspaces all workspaces - no color > exports[`test/lib/commands/publish.js TAP workspaces all workspaces - no color > warns about skipped private workspace 1`] = ` Array [ - Array [ - "publish", - "npm auto-corrected some errors in your package.json when publishing. Please run \\"npm pkg fix\\" to address these errors.", - ], - Array [ - "publish", - String( - errors corrected: - Removed invalid "scripts" - "repository" was changed from a string to an object - ), - ], - Array [ - "publish", - "npm auto-corrected some errors in your package.json when publishing. Please run \\"npm pkg fix\\" to address these errors.", - ], - Array [ - "publish", - String( - errors corrected: - Removed invalid "scripts" - "repository" was changed from a string to an object - "repository.url" was normalized to "git+https://github.com/npm/workspace-b.git" - ), - ], - Array [ - "publish", - "npm auto-corrected some errors in your package.json when publishing. Please run \\"npm pkg fix\\" to address these errors.", - ], - Array [ - "publish", - String( - errors corrected: - Removed invalid "scripts" - ), - ], - Array [ - "publish", - "npm auto-corrected some errors in your package.json when publishing. Please run \\"npm pkg fix\\" to address these errors.", - ], - Array [ - "publish", - String( - errors corrected: - Removed invalid "scripts" - ), - ], - Array [ - "publish", - "Skipping workspace workspace-p, marked as private", - ], + "publish npm auto-corrected some errors in your package.json when publishing. Please run \\"npm pkg fix\\" to address these errors.", + String( + publish errors corrected: + publish Removed invalid "scripts" + publish "repository" was changed from a string to an object + ), + "publish npm auto-corrected some errors in your package.json when publishing. Please run \\"npm pkg fix\\" to address these errors.", + String( + publish errors corrected: + publish Removed invalid "scripts" + publish "repository" was changed from a string to an object + publish "repository.url" was normalized to "git+https://github.com/npm/workspace-b.git" + ), + "publish npm auto-corrected some errors in your package.json when publishing. Please run \\"npm pkg fix\\" to address these errors.", + String( + publish errors corrected: + publish Removed invalid "scripts" + ), + "publish npm auto-corrected some errors in your package.json when publishing. Please run \\"npm pkg fix\\" to address these errors.", + String( + publish errors corrected: + publish Removed invalid "scripts" + ), + "publish Skipping workspace workspace-p, marked as private", ] ` diff --git a/deps/npm/tap-snapshots/test/lib/commands/search.js.test.cjs b/deps/npm/tap-snapshots/test/lib/commands/search.js.test.cjs index 3fd12d699bd77f..d5485853545882 100644 --- a/deps/npm/tap-snapshots/test/lib/commands/search.js.test.cjs +++ b/deps/npm/tap-snapshots/test/lib/commands/search.js.test.cjs @@ -5,142 +5,1022 @@ * Make sure to inspect the output below. Do not ignore changes! */ 'use strict' -exports[`test/lib/commands/search.js TAP empty search results > should have expected search results 1`] = ` -No matches found for "foo" -` - exports[`test/lib/commands/search.js TAP search //--color > should have expected search results with color 1`] = ` -NAME | DESCRIPTION | AUTHOR | DATE | VERSION | KEYWORDS -libnpm | Collection of… | =nlf… | 2019-07-16 | 3.0.1 | npm api package manager lib -libnpmaccess | programmatic… | =nlf… | 2020-11-03 | 4.0.1 | libnpmaccess -@evocateur/libnpmaccess | programmatic… | =evocateur | 2019-07-16 | 3.1.2 | -@evocateur/libnpmpublish | Programmatic API… | =evocateur | 2019-07-16 | 1.2.2 | -libnpmorg | Programmatic api… | =nlf… | 2020-11-03 | 2.0.1 | libnpm npm package manager api orgs teams -libnpmsearch | Programmatic API… | =nlf… | 2020-12-08 | 3.1.0 | npm search api libnpm -libnpmteam | npm Team management… | =nlf… | 2020-11-03 | 2.0.2 | -libnpmhook | programmatic API… | =nlf… | 2020-11-03 | 6.0.1 | npm hooks registry npm api -libnpmpublish | Programmatic API… | =nlf… | 2020-11-03 | 4.0.0 | -libnpmfund | Programmatic API… | =nlf… | 2020-12-08 | 1.0.2 | npm npmcli libnpm cli git fund gitfund -@npmcli/map-workspaces | Retrieves a… | =nlf… | 2020-09-30 | 1.0.1 | npm bad map npmcli libnpm cli workspaces map-workspaces -libnpmversion | library to do the… | =nlf… | 2020-11-04 | 1.0.7 | -@types/libnpmsearch | TypeScript… | =types | 2019-09-26 | 2.0.1 | -pkg-no-desc | | =lukekarrys | 2019-09-26 | 1.0.0 | +libnpm +Collection of programmatic APIs for the npm CLI +Version 3.0.1 published 2019-07-16 by isaacs +Maintainers: nlf ruyadorno darcyclarke isaacs +Keywords: npm api package manager lib +https://npm.im/libnpm +libnpmaccess +programmatic library for \`npm access\` commands +Version 4.0.1 published 2020-11-03 by nlf +Maintainers: nlf ruyadorno darcyclarke isaacs +Keywords: libnpmaccess +https://npm.im/libnpmaccess +@evocateur/libnpmaccess +programmatic library for \`npm access\` commands +Version 3.1.2 published 2019-07-16 by evocateur +Maintainers: evocateur +https://npm.im/@evocateur/libnpmaccess +@evocateur/libnpmpublish +Programmatic API for the bits behind npm publish and unpublish +Version 1.2.2 published 2019-07-16 by evocateur +Maintainers: evocateur +https://npm.im/@evocateur/libnpmpublish +libnpmorg +Programmatic api for \`npm org\` commands +Version 2.0.1 published 2020-11-03 by nlf +Maintainers: nlf ruyadorno darcyclarke isaacs +Keywords: libnpm npm package manager api orgs teams +https://npm.im/libnpmorg +libnpmsearch +Programmatic API for searching in npm and compatible registries. +Version 3.1.0 published 2020-12-08 by isaacs +Maintainers: nlf ruyadorno darcyclarke isaacs +Keywords: npm search api libnpm +https://npm.im/libnpmsearch +libnpmteam +npm Team management APIs +Version 2.0.2 published 2020-11-03 by nlf +Maintainers: nlf ruyadorno darcyclarke isaacs +https://npm.im/libnpmteam +libnpmhook +programmatic API for managing npm registry hooks +Version 6.0.1 published 2020-11-03 by nlf +Maintainers: nlf ruyadorno darcyclarke isaacs +Keywords: npm hooks registry npm api +https://npm.im/libnpmhook +libnpmpublish +Programmatic API for the bits behind npm publish and unpublish +Version 4.0.0 published 2020-11-03 by nlf +Maintainers: nlf ruyadorno darcyclarke isaacs +https://npm.im/libnpmpublish +libnpmfund +Programmatic API for npm fund +Version 1.0.2 published 2020-12-08 by isaacs +Maintainers: nlf ruyadorno darcyclarke isaacs +Keywords: npm npmcli libnpm cli git fund gitfund +https://npm.im/libnpmfund +@npmcli/map-workspaces +Retrieves a name:pathname Map for a given workspaces config +Version 1.0.1 published 2020-09-30 by ruyadorno +Maintainers: nlf ruyadorno darcyclarke isaacs +Keywords: npm bad map npmcli libnpm cli workspaces map-workspaces +https://npm.im/@npmcli/map-workspaces +libnpmversion +library to do the things that 'npm version' does +Version 1.0.7 published 2020-11-04 by isaacs +Maintainers: nlf ruyadorno darcyclarke isaacs +https://npm.im/libnpmversion +@types/libnpmsearch +TypeScript definitions for libnpmsearch +Version 2.0.1 published 2019-09-26 by types +Maintainers: types +https://npm.im/@types/libnpmsearch +pkg-no-desc +Version 1.0.0 published 2019-09-26 by lukekarrys +Maintainers: lukekarrys +https://npm.im/pkg-no-desc ` exports[`test/lib/commands/search.js TAP search --color > should have expected search results with color 1`] = ` -NAME | DESCRIPTION | AUTHOR | DATE | VERSION | KEYWORDS -libnpm | Collection of… | =nlf… | 2019-07-16 | 3.0.1 | npm api package manager lib -libnpmaccess | programmatic… | =nlf… | 2020-11-03 | 4.0.1 | libnpmaccess -@evocateur/libnpmaccess | programmatic… | =evocateur | 2019-07-16 | 3.1.2 |  -@evocateur/libnpmpublish | Programmatic API… | =evocateur | 2019-07-16 | 1.2.2 |  -libnpmorg | Programmatic api… | =nlf… | 2020-11-03 | 2.0.1 | libnpm npm package manager api orgs teams -libnpmsearch | Programmatic API… | =nlf… | 2020-12-08 | 3.1.0 | npm search api libnpm -libnpmteam | npm Team management… | =nlf… | 2020-11-03 | 2.0.2 |  -libnpmhook | programmatic API… | =nlf… | 2020-11-03 | 6.0.1 | npm hooks registry npm api -libnpmpublish | Programmatic API… | =nlf… | 2020-11-03 | 4.0.0 |  -libnpmfund | Programmatic API… | =nlf… | 2020-12-08 | 1.0.2 | npm npmcli libnpm cli git fund gitfund -@npmcli/map-workspaces | Retrieves a… | =nlf… | 2020-09-30 | 1.0.1 | npm bad map npmcli libnpm cli workspaces map-workspaces -libnpmversion | library to do the… | =nlf… | 2020-11-04 | 1.0.7 |  -@types/libnpmsearch | TypeScript… | =types | 2019-09-26 | 2.0.1 |  -pkg-no-desc | | =lukekarrys | 2019-09-26 | 1.0.0 |  +libnpm +Collection of programmatic APIs for the npm CLI +Version 3.0.1 published 2019-07-16 by isaacs +Maintainers: nlf ruyadorno darcyclarke isaacs +Keywords: npm api package manager lib +https://npm.im/libnpm +libnpmaccess +programmatic library for \`npm access\` commands +Version 4.0.1 published 2020-11-03 by nlf +Maintainers: nlf ruyadorno darcyclarke isaacs +Keywords: libnpmaccess +https://npm.im/libnpmaccess +@evocateur/libnpmaccess +programmatic library for \`npm access\` commands +Version 3.1.2 published 2019-07-16 by evocateur +Maintainers: evocateur +https://npm.im/@evocateur/libnpmaccess +@evocateur/libnpmpublish +Programmatic API for the bits behind npm publish and unpublish +Version 1.2.2 published 2019-07-16 by evocateur +Maintainers: evocateur +https://npm.im/@evocateur/libnpmpublish +libnpmorg +Programmatic api for \`npm org\` commands +Version 2.0.1 published 2020-11-03 by nlf +Maintainers: nlf ruyadorno darcyclarke isaacs +Keywords: libnpm npm package manager api orgs teams +https://npm.im/libnpmorg +libnpmsearch +Programmatic API for searching in npm and compatible registries. +Version 3.1.0 published 2020-12-08 by isaacs +Maintainers: nlf ruyadorno darcyclarke isaacs +Keywords: npm search api libnpm +https://npm.im/libnpmsearch +libnpmteam +npm Team management APIs +Version 2.0.2 published 2020-11-03 by nlf +Maintainers: nlf ruyadorno darcyclarke isaacs +https://npm.im/libnpmteam +libnpmhook +programmatic API for managing npm registry hooks +Version 6.0.1 published 2020-11-03 by nlf +Maintainers: nlf ruyadorno darcyclarke isaacs +Keywords: npm hooks registry npm api +https://npm.im/libnpmhook +libnpmpublish +Programmatic API for the bits behind npm publish and unpublish +Version 4.0.0 published 2020-11-03 by nlf +Maintainers: nlf ruyadorno darcyclarke isaacs +https://npm.im/libnpmpublish +libnpmfund +Programmatic API for npm fund +Version 1.0.2 published 2020-12-08 by isaacs +Maintainers: nlf ruyadorno darcyclarke isaacs +Keywords: npm npmcli libnpm cli git fund gitfund +https://npm.im/libnpmfund +@npmcli/map-workspaces +Retrieves a name:pathname Map for a given workspaces config +Version 1.0.1 published 2020-09-30 by ruyadorno +Maintainers: nlf ruyadorno darcyclarke isaacs +Keywords: npm bad map npmcli libnpm cli workspaces map-workspaces +https://npm.im/@npmcli/map-workspaces +libnpmversion +library to do the things that 'npm version' does +Version 1.0.7 published 2020-11-04 by isaacs +Maintainers: nlf ruyadorno darcyclarke isaacs +https://npm.im/libnpmversion +@types/libnpmsearch +TypeScript definitions for libnpmsearch +Version 2.0.1 published 2019-09-26 by types +Maintainers: types +https://npm.im/@types/libnpmsearch +pkg-no-desc +Version 1.0.0 published 2019-09-26 by lukekarrys +Maintainers: lukekarrys +https://npm.im/pkg-no-desc ` exports[`test/lib/commands/search.js TAP search --parseable > should have expected search results as parseable 1`] = ` -libnpm Collection of programmatic APIs for the npm CLI =nlf =ruyadorno =darcyclarke =isaacs 2019-07-16 3.0.1 npm api package manager lib -libnpmaccess programmatic library for \`npm access\` commands =nlf =ruyadorno =darcyclarke =isaacs 2020-11-03 4.0.1 libnpmaccess -@evocateur/libnpmaccess programmatic library for \`npm access\` commands =evocateur 2019-07-16 3.1.2 -@evocateur/libnpmpublish Programmatic API for the bits behind npm publish and unpublish =evocateur 2019-07-16 1.2.2 -libnpmorg Programmatic api for \`npm org\` commands =nlf =ruyadorno =darcyclarke =isaacs 2020-11-03 2.0.1 libnpm npm package manager api orgs teams -libnpmsearch Programmatic API for searching in npm and compatible registries. =nlf =ruyadorno =darcyclarke =isaacs 2020-12-08 3.1.0 npm search api libnpm -libnpmteam npm Team management APIs =nlf =ruyadorno =darcyclarke =isaacs 2020-11-03 2.0.2 -libnpmhook programmatic API for managing npm registry hooks =nlf =ruyadorno =darcyclarke =isaacs 2020-11-03 6.0.1 npm hooks registry npm api -libnpmpublish Programmatic API for the bits behind npm publish and unpublish =nlf =ruyadorno =darcyclarke =isaacs 2020-11-03 4.0.0 -libnpmfund Programmatic API for npm fund =nlf =ruyadorno =darcyclarke =isaacs 2020-12-08 1.0.2 npm npmcli libnpm cli git fund gitfund -@npmcli/map-workspaces Retrieves a name:pathname Map for a given workspaces config =nlf =ruyadorno =darcyclarke =isaacs 2020-09-30 1.0.1 npm bad map npmcli libnpm cli workspaces map-workspaces -libnpmversion library to do the things that 'npm version' does =nlf =ruyadorno =darcyclarke =isaacs 2020-11-04 1.0.7 -@types/libnpmsearch TypeScript definitions for libnpmsearch =types 2019-09-26 2.0.1 -pkg-no-desc =lukekarrys 2019-09-26 1.0.0 +libnpm Collection of programmatic APIs for the npm CLI 2019-07-16 3.0.1 npm,api,package manager,lib +libnpmaccess programmatic library for \`npm access\` commands 2020-11-03 4.0.1 libnpmaccess +@evocateur/libnpmaccess programmatic library for \`npm access\` commands 2019-07-16 3.1.2 +@evocateur/libnpmpublish Programmatic API for the bits behind npm publish and unpublish 2019-07-16 1.2.2 +libnpmorg Programmatic api for \`npm org\` commands 2020-11-03 2.0.1 libnpm,npm,package manager,api,orgs,teams +libnpmsearch Programmatic API for searching in npm and compatible registries. 2020-12-08 3.1.0 npm,search,api,libnpm +libnpmteam npm Team management APIs 2020-11-03 2.0.2 +libnpmhook programmatic API for managing npm registry hooks 2020-11-03 6.0.1 npm,hooks,registry,npm api +libnpmpublish Programmatic API for the bits behind npm publish and unpublish 2020-11-03 4.0.0 +libnpmfund Programmatic API for npm fund 2020-12-08 1.0.2 npm,npmcli,libnpm,cli,git,fund,gitfund +@npmcli/map-workspaces Retrieves a name:pathname Map for a given workspaces config 2020-09-30 1.0.1 npm,,bad map,npmcli,libnpm,cli,workspaces,map-workspaces +libnpmversion library to do the things that 'npm version' does 2020-11-04 1.0.7 +@types/libnpmsearch TypeScript definitions for libnpmsearch 2019-09-26 2.0.1 +pkg-no-desc 2019-09-26 1.0.0 ` exports[`test/lib/commands/search.js TAP search > should have filtered expected search results 1`] = ` -NAME | DESCRIPTION | AUTHOR | DATE | VERSION | KEYWORDS -foo | | =foo | prehistoric | 1.0.0 | -libnpmversion | | =foo | prehistoric | 1.0.0 | +foo +Version 1.0.0 published prehistoric by foo +Maintainers: foo +https://npm.im/foo +custom-registry +Version 1.0.0 published prehistoric by ??? +Maintainers: foo +https://npm.im/custom-registry +libnpmversion +Version 1.0.0 published prehistoric by foo +Maintainers: foo +https://npm.im/libnpmversion ` exports[`test/lib/commands/search.js TAP search text > should have expected search results 1`] = ` -NAME | DESCRIPTION | AUTHOR | DATE | VERSION | KEYWORDS -libnpm | Collection of… | =nlf… | 2019-07-16 | 3.0.1 | npm api package manager lib -libnpmaccess | programmatic… | =nlf… | 2020-11-03 | 4.0.1 | libnpmaccess -@evocateur/libnpmaccess | programmatic… | =evocateur | 2019-07-16 | 3.1.2 | -@evocateur/libnpmpublish | Programmatic API… | =evocateur | 2019-07-16 | 1.2.2 | -libnpmorg | Programmatic api… | =nlf… | 2020-11-03 | 2.0.1 | libnpm npm package manager api orgs teams -libnpmsearch | Programmatic API… | =nlf… | 2020-12-08 | 3.1.0 | npm search api libnpm -libnpmteam | npm Team management… | =nlf… | 2020-11-03 | 2.0.2 | -libnpmhook | programmatic API… | =nlf… | 2020-11-03 | 6.0.1 | npm hooks registry npm api -libnpmpublish | Programmatic API… | =nlf… | 2020-11-03 | 4.0.0 | -libnpmfund | Programmatic API… | =nlf… | 2020-12-08 | 1.0.2 | npm npmcli libnpm cli git fund gitfund -@npmcli/map-workspaces | Retrieves a… | =nlf… | 2020-09-30 | 1.0.1 | npm bad map npmcli libnpm cli workspaces map-workspaces -libnpmversion | library to do the… | =nlf… | 2020-11-04 | 1.0.7 | -@types/libnpmsearch | TypeScript… | =types | 2019-09-26 | 2.0.1 | -pkg-no-desc | | =lukekarrys | 2019-09-26 | 1.0.0 | +libnpm +Collection of programmatic APIs for the npm CLI +Version 3.0.1 published 2019-07-16 by isaacs +Maintainers: nlf ruyadorno darcyclarke isaacs +Keywords: npm api package manager lib +https://npm.im/libnpm +libnpmaccess +programmatic library for \`npm access\` commands +Version 4.0.1 published 2020-11-03 by nlf +Maintainers: nlf ruyadorno darcyclarke isaacs +Keywords: libnpmaccess +https://npm.im/libnpmaccess +@evocateur/libnpmaccess +programmatic library for \`npm access\` commands +Version 3.1.2 published 2019-07-16 by evocateur +Maintainers: evocateur +https://npm.im/@evocateur/libnpmaccess +@evocateur/libnpmpublish +Programmatic API for the bits behind npm publish and unpublish +Version 1.2.2 published 2019-07-16 by evocateur +Maintainers: evocateur +https://npm.im/@evocateur/libnpmpublish +libnpmorg +Programmatic api for \`npm org\` commands +Version 2.0.1 published 2020-11-03 by nlf +Maintainers: nlf ruyadorno darcyclarke isaacs +Keywords: libnpm npm package manager api orgs teams +https://npm.im/libnpmorg +libnpmsearch +Programmatic API for searching in npm and compatible registries. +Version 3.1.0 published 2020-12-08 by isaacs +Maintainers: nlf ruyadorno darcyclarke isaacs +Keywords: npm search api libnpm +https://npm.im/libnpmsearch +libnpmteam +npm Team management APIs +Version 2.0.2 published 2020-11-03 by nlf +Maintainers: nlf ruyadorno darcyclarke isaacs +https://npm.im/libnpmteam +libnpmhook +programmatic API for managing npm registry hooks +Version 6.0.1 published 2020-11-03 by nlf +Maintainers: nlf ruyadorno darcyclarke isaacs +Keywords: npm hooks registry npm api +https://npm.im/libnpmhook +libnpmpublish +Programmatic API for the bits behind npm publish and unpublish +Version 4.0.0 published 2020-11-03 by nlf +Maintainers: nlf ruyadorno darcyclarke isaacs +https://npm.im/libnpmpublish +libnpmfund +Programmatic API for npm fund +Version 1.0.2 published 2020-12-08 by isaacs +Maintainers: nlf ruyadorno darcyclarke isaacs +Keywords: npm npmcli libnpm cli git fund gitfund +https://npm.im/libnpmfund +@npmcli/map-workspaces +Retrieves a name:pathname Map for a given workspaces config +Version 1.0.1 published 2020-09-30 by ruyadorno +Maintainers: nlf ruyadorno darcyclarke isaacs +Keywords: npm bad map npmcli libnpm cli workspaces map-workspaces +https://npm.im/@npmcli/map-workspaces +libnpmversion +library to do the things that 'npm version' does +Version 1.0.7 published 2020-11-04 by isaacs +Maintainers: nlf ruyadorno darcyclarke isaacs +https://npm.im/libnpmversion +@types/libnpmsearch +TypeScript definitions for libnpmsearch +Version 2.0.1 published 2019-09-26 by types +Maintainers: types +https://npm.im/@types/libnpmsearch +pkg-no-desc +Version 1.0.0 published 2019-09-26 by lukekarrys +Maintainers: lukekarrys +https://npm.im/pkg-no-desc +` + +exports[`test/lib/commands/search.js TAP search empty search results > should have expected search results 1`] = ` +No matches found for "foo" ` exports[`test/lib/commands/search.js TAP search exclude forward slash > results should not have libnpmversion 1`] = ` -NAME | DESCRIPTION | AUTHOR | DATE | VERSION | KEYWORDS -libnpm | Collection of… | =nlf… | 2019-07-16 | 3.0.1 | npm api package manager lib -libnpmaccess | programmatic… | =nlf… | 2020-11-03 | 4.0.1 | libnpmaccess -@evocateur/libnpmaccess | programmatic… | =evocateur | 2019-07-16 | 3.1.2 | -@evocateur/libnpmpublish | Programmatic API… | =evocateur | 2019-07-16 | 1.2.2 | -libnpmorg | Programmatic api… | =nlf… | 2020-11-03 | 2.0.1 | libnpm npm package manager api orgs teams -libnpmsearch | Programmatic API… | =nlf… | 2020-12-08 | 3.1.0 | npm search api libnpm -libnpmteam | npm Team management… | =nlf… | 2020-11-03 | 2.0.2 | -libnpmhook | programmatic API… | =nlf… | 2020-11-03 | 6.0.1 | npm hooks registry npm api -libnpmpublish | Programmatic API… | =nlf… | 2020-11-03 | 4.0.0 | -libnpmfund | Programmatic API… | =nlf… | 2020-12-08 | 1.0.2 | npm npmcli libnpm cli git fund gitfund -@npmcli/map-workspaces | Retrieves a… | =nlf… | 2020-09-30 | 1.0.1 | npm bad map npmcli libnpm cli workspaces map-workspaces -@types/libnpmsearch | TypeScript… | =types | 2019-09-26 | 2.0.1 | -pkg-no-desc | | =lukekarrys | 2019-09-26 | 1.0.0 | +libnpm +Collection of programmatic APIs for the npm CLI +Version 3.0.1 published 2019-07-16 by isaacs +Maintainers: nlf ruyadorno darcyclarke isaacs +Keywords: npm api package manager lib +https://npm.im/libnpm +libnpmaccess +programmatic library for \`npm access\` commands +Version 4.0.1 published 2020-11-03 by nlf +Maintainers: nlf ruyadorno darcyclarke isaacs +Keywords: libnpmaccess +https://npm.im/libnpmaccess +@evocateur/libnpmaccess +programmatic library for \`npm access\` commands +Version 3.1.2 published 2019-07-16 by evocateur +Maintainers: evocateur +https://npm.im/@evocateur/libnpmaccess +@evocateur/libnpmpublish +Programmatic API for the bits behind npm publish and unpublish +Version 1.2.2 published 2019-07-16 by evocateur +Maintainers: evocateur +https://npm.im/@evocateur/libnpmpublish +libnpmorg +Programmatic api for \`npm org\` commands +Version 2.0.1 published 2020-11-03 by nlf +Maintainers: nlf ruyadorno darcyclarke isaacs +Keywords: libnpm npm package manager api orgs teams +https://npm.im/libnpmorg +libnpmsearch +Programmatic API for searching in npm and compatible registries. +Version 3.1.0 published 2020-12-08 by isaacs +Maintainers: nlf ruyadorno darcyclarke isaacs +Keywords: npm search api libnpm +https://npm.im/libnpmsearch +libnpmteam +npm Team management APIs +Version 2.0.2 published 2020-11-03 by nlf +Maintainers: nlf ruyadorno darcyclarke isaacs +https://npm.im/libnpmteam +libnpmhook +programmatic API for managing npm registry hooks +Version 6.0.1 published 2020-11-03 by nlf +Maintainers: nlf ruyadorno darcyclarke isaacs +Keywords: npm hooks registry npm api +https://npm.im/libnpmhook +libnpmpublish +Programmatic API for the bits behind npm publish and unpublish +Version 4.0.0 published 2020-11-03 by nlf +Maintainers: nlf ruyadorno darcyclarke isaacs +https://npm.im/libnpmpublish +libnpmfund +Programmatic API for npm fund +Version 1.0.2 published 2020-12-08 by isaacs +Maintainers: nlf ruyadorno darcyclarke isaacs +Keywords: npm npmcli libnpm cli git fund gitfund +https://npm.im/libnpmfund +@npmcli/map-workspaces +Retrieves a name:pathname Map for a given workspaces config +Version 1.0.1 published 2020-09-30 by ruyadorno +Maintainers: nlf ruyadorno darcyclarke isaacs +Keywords: npm bad map npmcli libnpm cli workspaces map-workspaces +https://npm.im/@npmcli/map-workspaces +@types/libnpmsearch +TypeScript definitions for libnpmsearch +Version 2.0.1 published 2019-09-26 by types +Maintainers: types +https://npm.im/@types/libnpmsearch +pkg-no-desc +Version 1.0.0 published 2019-09-26 by lukekarrys +Maintainers: lukekarrys +https://npm.im/pkg-no-desc ` exports[`test/lib/commands/search.js TAP search exclude regex > results should not have libnpmversion 1`] = ` -NAME | DESCRIPTION | AUTHOR | DATE | VERSION | KEYWORDS -libnpm | Collection of… | =nlf… | 2019-07-16 | 3.0.1 | npm api package manager lib -libnpmaccess | programmatic… | =nlf… | 2020-11-03 | 4.0.1 | libnpmaccess -@evocateur/libnpmaccess | programmatic… | =evocateur | 2019-07-16 | 3.1.2 | -@evocateur/libnpmpublish | Programmatic API… | =evocateur | 2019-07-16 | 1.2.2 | -libnpmorg | Programmatic api… | =nlf… | 2020-11-03 | 2.0.1 | libnpm npm package manager api orgs teams -libnpmsearch | Programmatic API… | =nlf… | 2020-12-08 | 3.1.0 | npm search api libnpm -libnpmteam | npm Team management… | =nlf… | 2020-11-03 | 2.0.2 | -libnpmhook | programmatic API… | =nlf… | 2020-11-03 | 6.0.1 | npm hooks registry npm api -libnpmpublish | Programmatic API… | =nlf… | 2020-11-03 | 4.0.0 | -libnpmfund | Programmatic API… | =nlf… | 2020-12-08 | 1.0.2 | npm npmcli libnpm cli git fund gitfund -@npmcli/map-workspaces | Retrieves a… | =nlf… | 2020-09-30 | 1.0.1 | npm bad map npmcli libnpm cli workspaces map-workspaces -@types/libnpmsearch | TypeScript… | =types | 2019-09-26 | 2.0.1 | -pkg-no-desc | | =lukekarrys | 2019-09-26 | 1.0.0 | +libnpm +Collection of programmatic APIs for the npm CLI +Version 3.0.1 published 2019-07-16 by isaacs +Maintainers: nlf ruyadorno darcyclarke isaacs +Keywords: npm api package manager lib +https://npm.im/libnpm +libnpmaccess +programmatic library for \`npm access\` commands +Version 4.0.1 published 2020-11-03 by nlf +Maintainers: nlf ruyadorno darcyclarke isaacs +Keywords: libnpmaccess +https://npm.im/libnpmaccess +@evocateur/libnpmaccess +programmatic library for \`npm access\` commands +Version 3.1.2 published 2019-07-16 by evocateur +Maintainers: evocateur +https://npm.im/@evocateur/libnpmaccess +@evocateur/libnpmpublish +Programmatic API for the bits behind npm publish and unpublish +Version 1.2.2 published 2019-07-16 by evocateur +Maintainers: evocateur +https://npm.im/@evocateur/libnpmpublish +libnpmorg +Programmatic api for \`npm org\` commands +Version 2.0.1 published 2020-11-03 by nlf +Maintainers: nlf ruyadorno darcyclarke isaacs +Keywords: libnpm npm package manager api orgs teams +https://npm.im/libnpmorg +libnpmsearch +Programmatic API for searching in npm and compatible registries. +Version 3.1.0 published 2020-12-08 by isaacs +Maintainers: nlf ruyadorno darcyclarke isaacs +Keywords: npm search api libnpm +https://npm.im/libnpmsearch +libnpmteam +npm Team management APIs +Version 2.0.2 published 2020-11-03 by nlf +Maintainers: nlf ruyadorno darcyclarke isaacs +https://npm.im/libnpmteam +libnpmhook +programmatic API for managing npm registry hooks +Version 6.0.1 published 2020-11-03 by nlf +Maintainers: nlf ruyadorno darcyclarke isaacs +Keywords: npm hooks registry npm api +https://npm.im/libnpmhook +libnpmpublish +Programmatic API for the bits behind npm publish and unpublish +Version 4.0.0 published 2020-11-03 by nlf +Maintainers: nlf ruyadorno darcyclarke isaacs +https://npm.im/libnpmpublish +libnpmfund +Programmatic API for npm fund +Version 1.0.2 published 2020-12-08 by isaacs +Maintainers: nlf ruyadorno darcyclarke isaacs +Keywords: npm npmcli libnpm cli git fund gitfund +https://npm.im/libnpmfund +@npmcli/map-workspaces +Retrieves a name:pathname Map for a given workspaces config +Version 1.0.1 published 2020-09-30 by ruyadorno +Maintainers: nlf ruyadorno darcyclarke isaacs +Keywords: npm bad map npmcli libnpm cli workspaces map-workspaces +https://npm.im/@npmcli/map-workspaces +@types/libnpmsearch +TypeScript definitions for libnpmsearch +Version 2.0.1 published 2019-09-26 by types +Maintainers: types +https://npm.im/@types/libnpmsearch +pkg-no-desc +Version 1.0.0 published 2019-09-26 by lukekarrys +Maintainers: lukekarrys +https://npm.im/pkg-no-desc ` exports[`test/lib/commands/search.js TAP search exclude string > results should not have libnpmversion 1`] = ` -NAME | DESCRIPTION | AUTHOR | DATE | VERSION | KEYWORDS -libnpm | Collection of… | =nlf… | 2019-07-16 | 3.0.1 | npm api package manager lib -libnpmaccess | programmatic… | =nlf… | 2020-11-03 | 4.0.1 | libnpmaccess -@evocateur/libnpmaccess | programmatic… | =evocateur | 2019-07-16 | 3.1.2 | -@evocateur/libnpmpublish | Programmatic API… | =evocateur | 2019-07-16 | 1.2.2 | -libnpmorg | Programmatic api… | =nlf… | 2020-11-03 | 2.0.1 | libnpm npm package manager api orgs teams -libnpmsearch | Programmatic API… | =nlf… | 2020-12-08 | 3.1.0 | npm search api libnpm -libnpmteam | npm Team management… | =nlf… | 2020-11-03 | 2.0.2 | -libnpmhook | programmatic API… | =nlf… | 2020-11-03 | 6.0.1 | npm hooks registry npm api -libnpmpublish | Programmatic API… | =nlf… | 2020-11-03 | 4.0.0 | -libnpmfund | Programmatic API… | =nlf… | 2020-12-08 | 1.0.2 | npm npmcli libnpm cli git fund gitfund -@npmcli/map-workspaces | Retrieves a… | =nlf… | 2020-09-30 | 1.0.1 | npm bad map npmcli libnpm cli workspaces map-workspaces -@types/libnpmsearch | TypeScript… | =types | 2019-09-26 | 2.0.1 | -pkg-no-desc | | =lukekarrys | 2019-09-26 | 1.0.0 | +libnpm +Collection of programmatic APIs for the npm CLI +Version 3.0.1 published 2019-07-16 by isaacs +Maintainers: nlf ruyadorno darcyclarke isaacs +Keywords: npm api package manager lib +https://npm.im/libnpm +libnpmaccess +programmatic library for \`npm access\` commands +Version 4.0.1 published 2020-11-03 by nlf +Maintainers: nlf ruyadorno darcyclarke isaacs +Keywords: libnpmaccess +https://npm.im/libnpmaccess +@evocateur/libnpmaccess +programmatic library for \`npm access\` commands +Version 3.1.2 published 2019-07-16 by evocateur +Maintainers: evocateur +https://npm.im/@evocateur/libnpmaccess +@evocateur/libnpmpublish +Programmatic API for the bits behind npm publish and unpublish +Version 1.2.2 published 2019-07-16 by evocateur +Maintainers: evocateur +https://npm.im/@evocateur/libnpmpublish +libnpmorg +Programmatic api for \`npm org\` commands +Version 2.0.1 published 2020-11-03 by nlf +Maintainers: nlf ruyadorno darcyclarke isaacs +Keywords: libnpm npm package manager api orgs teams +https://npm.im/libnpmorg +libnpmsearch +Programmatic API for searching in npm and compatible registries. +Version 3.1.0 published 2020-12-08 by isaacs +Maintainers: nlf ruyadorno darcyclarke isaacs +Keywords: npm search api libnpm +https://npm.im/libnpmsearch +libnpmteam +npm Team management APIs +Version 2.0.2 published 2020-11-03 by nlf +Maintainers: nlf ruyadorno darcyclarke isaacs +https://npm.im/libnpmteam +libnpmhook +programmatic API for managing npm registry hooks +Version 6.0.1 published 2020-11-03 by nlf +Maintainers: nlf ruyadorno darcyclarke isaacs +Keywords: npm hooks registry npm api +https://npm.im/libnpmhook +libnpmpublish +Programmatic API for the bits behind npm publish and unpublish +Version 4.0.0 published 2020-11-03 by nlf +Maintainers: nlf ruyadorno darcyclarke isaacs +https://npm.im/libnpmpublish +libnpmfund +Programmatic API for npm fund +Version 1.0.2 published 2020-12-08 by isaacs +Maintainers: nlf ruyadorno darcyclarke isaacs +Keywords: npm npmcli libnpm cli git fund gitfund +https://npm.im/libnpmfund +@npmcli/map-workspaces +Retrieves a name:pathname Map for a given workspaces config +Version 1.0.1 published 2020-09-30 by ruyadorno +Maintainers: nlf ruyadorno darcyclarke isaacs +Keywords: npm bad map npmcli libnpm cli workspaces map-workspaces +https://npm.im/@npmcli/map-workspaces +@types/libnpmsearch +TypeScript definitions for libnpmsearch +Version 2.0.1 published 2019-09-26 by types +Maintainers: types +https://npm.im/@types/libnpmsearch +pkg-no-desc +Version 1.0.0 published 2019-09-26 by lukekarrys +Maintainers: lukekarrys +https://npm.im/pkg-no-desc +` + +exports[`test/lib/commands/search.js TAP search exclude string json > results should not have libnpmversion 1`] = ` +Array [ + Object { + "author": Object { + "email": "kzm@zkat.tech", + "name": "Kat Marchán", + }, + "date": "2019-07-16T17:50:00.572Z", + "description": "Collection of programmatic APIs for the npm CLI", + "keywords": Array [ + "npm", + "api", + "package manager", + "lib", + ], + "links": Object { + "bugs": "https://github.com/npm/libnpm/issues", + "homepage": "https://github.com/npm/libnpm#readme", + "npm": "https://www.npmjs.com/package/libnpm", + "repository": "https://github.com/npm/libnpm", + }, + "maintainers": Array [ + Object { + "email": "quitlahok@gmail.com", + "username": "nlf", + }, + Object { + "email": "ruyadorno@hotmail.com", + "username": "ruyadorno", + }, + Object { + "email": "darcy@darcyclarke.me", + "username": "darcyclarke", + }, + Object { + "email": "i@izs.me", + "username": "isaacs", + }, + ], + "name": "libnpm", + "publisher": Object { + "email": "i@izs.me", + "username": "isaacs", + }, + "scope": "unscoped", + "version": "3.0.1", + }, + Object { + "author": Object { + "email": "kzm@sykosomatic.org", + "name": "Kat Marchán", + }, + "date": "2020-11-03T19:19:00.526Z", + "description": "programmatic library for \`npm access\` commands", + "keywords": "libnpmaccess", + "links": Object { + "bugs": "https://github.com/npm/libnpmaccess/issues", + "homepage": "https://npmjs.com/package/libnpmaccess", + "npm": "https://www.npmjs.com/package/libnpmaccess", + "repository": "https://github.com/npm/libnpmaccess", + }, + "maintainers": Array [ + Object { + "email": "quitlahok@gmail.com", + "username": "nlf", + }, + Object { + "email": "ruyadorno@hotmail.com", + "username": "ruyadorno", + }, + Object { + "email": "darcy@darcyclarke.me", + "username": "darcyclarke", + }, + Object { + "email": "i@izs.me", + "username": "isaacs", + }, + ], + "name": "libnpmaccess", + "publisher": Object { + "email": "quitlahok@gmail.com", + "username": "nlf", + }, + "scope": "unscoped", + "version": "4.0.1", + }, + Object { + "author": Object { + "email": "kzm@zkat.tech", + "name": "Kat Marchán", + }, + "date": "2019-07-16T19:43:33.959Z", + "description": "programmatic library for \`npm access\` commands", + "links": Object { + "bugs": "https://github.com/evocateur/libnpmaccess/issues", + "homepage": "https://npmjs.com/package/@evocateur/libnpmaccess", + "npm": "https://www.npmjs.com/package/%40evocateur%2Flibnpmaccess", + "repository": "https://github.com/evocateur/libnpmaccess", + }, + "maintainers": Array [ + Object { + "email": "daniel.stockman@gmail.com", + "username": "evocateur", + }, + ], + "name": "@evocateur/libnpmaccess", + "publisher": Object { + "email": "daniel.stockman@gmail.com", + "username": "evocateur", + }, + "scope": "evocateur", + "version": "3.1.2", + }, + Object { + "author": Object { + "email": "kzm@zkat.tech", + "name": "Kat Marchán", + }, + "date": "2019-07-16T19:40:40.850Z", + "description": "Programmatic API for the bits behind npm publish and unpublish", + "links": Object { + "bugs": "https://github.com/evocateur/libnpmpublish/issues", + "homepage": "https://npmjs.com/package/@evocateur/libnpmpublish", + "npm": "https://www.npmjs.com/package/%40evocateur%2Flibnpmpublish", + "repository": "https://github.com/evocateur/libnpmpublish", + }, + "maintainers": Array [ + Object { + "email": "daniel.stockman@gmail.com", + "username": "evocateur", + }, + ], + "name": "@evocateur/libnpmpublish", + "publisher": Object { + "email": "daniel.stockman@gmail.com", + "username": "evocateur", + }, + "scope": "evocateur", + "version": "1.2.2", + }, + Object { + "author": Object { + "email": "kzm@sykosomatic.org", + "name": "Kat Marchán", + }, + "date": "2020-11-03T19:21:57.757Z", + "description": "Programmatic api for \`npm org\` commands", + "keywords": Array [ + "libnpm", + "npm", + "package manager", + "api", + "orgs", + "teams", + ], + "links": Object { + "bugs": "https://github.com/npm/libnpmorg/issues", + "homepage": "https://npmjs.com/package/libnpmorg", + "npm": "https://www.npmjs.com/package/libnpmorg", + "repository": "https://github.com/npm/libnpmorg", + }, + "maintainers": Array [ + Object { + "email": "quitlahok@gmail.com", + "username": "nlf", + }, + Object { + "email": "ruyadorno@hotmail.com", + "username": "ruyadorno", + }, + Object { + "email": "darcy@darcyclarke.me", + "username": "darcyclarke", + }, + Object { + "email": "i@izs.me", + "username": "isaacs", + }, + ], + "name": "libnpmorg", + "publisher": Object { + "email": "quitlahok@gmail.com", + "username": "nlf", + }, + "scope": "unscoped", + "version": "2.0.1", + }, + Object { + "author": Object { + "email": "kzm@sykosomatic.org", + "name": "Kat Marchán", + }, + "date": "2020-12-08T23:54:18.374Z", + "description": "Programmatic API for searching in npm and compatible registries.", + "keywords": Array [ + "npm", + "search", + "api", + "libnpm", + ], + "links": Object { + "bugs": "https://github.com/npm/libnpmsearch/issues", + "homepage": "https://npmjs.com/package/libnpmsearch", + "npm": "https://www.npmjs.com/package/libnpmsearch", + "repository": "https://github.com/npm/libnpmsearch", + }, + "maintainers": Array [ + Object { + "email": "quitlahok@gmail.com", + "username": "nlf", + }, + Object { + "email": "ruyadorno@hotmail.com", + "username": "ruyadorno", + }, + Object { + "email": "darcy@darcyclarke.me", + "username": "darcyclarke", + }, + Object { + "email": "i@izs.me", + "username": "isaacs", + }, + ], + "name": "libnpmsearch", + "publisher": Object { + "email": "i@izs.me", + "username": "isaacs", + }, + "scope": "unscoped", + "version": "3.1.0", + }, + Object { + "author": Object { + "email": "kzm@zkat.tech", + "name": "Kat Marchán", + }, + "date": "2020-11-03T19:24:42.380Z", + "description": "npm Team management APIs", + "links": Object { + "bugs": "https://github.com/npm/libnpmteam/issues", + "homepage": "https://npmjs.com/package/libnpmteam", + "npm": "https://www.npmjs.com/package/libnpmteam", + "repository": "https://github.com/npm/libnpmteam", + }, + "maintainers": Array [ + Object { + "email": "quitlahok@gmail.com", + "username": "nlf", + }, + Object { + "email": "ruyadorno@hotmail.com", + "username": "ruyadorno", + }, + Object { + "email": "darcy@darcyclarke.me", + "username": "darcyclarke", + }, + Object { + "email": "i@izs.me", + "username": "isaacs", + }, + ], + "name": "libnpmteam", + "publisher": Object { + "email": "quitlahok@gmail.com", + "username": "nlf", + }, + "scope": "unscoped", + "version": "2.0.2", + }, + Object { + "author": Object { + "email": "kzm@sykosomatic.org", + "name": "Kat Marchán", + }, + "date": "2020-11-03T19:20:45.818Z", + "description": "programmatic API for managing npm registry hooks", + "keywords": Array [ + "npm", + "hooks", + "registry", + "npm api", + ], + "links": Object { + "bugs": "https://github.com/npm/libnpmhook/issues", + "homepage": "https://github.com/npm/libnpmhook#readme", + "npm": "https://www.npmjs.com/package/libnpmhook", + "repository": "https://github.com/npm/libnpmhook", + }, + "maintainers": Array [ + Object { + "email": "quitlahok@gmail.com", + "username": "nlf", + }, + Object { + "email": "ruyadorno@hotmail.com", + "username": "ruyadorno", + }, + Object { + "email": "darcy@darcyclarke.me", + "username": "darcyclarke", + }, + Object { + "email": "i@izs.me", + "username": "isaacs", + }, + ], + "name": "libnpmhook", + "publisher": Object { + "email": "quitlahok@gmail.com", + "username": "nlf", + }, + "scope": "unscoped", + "version": "6.0.1", + }, + Object { + "author": Object { + "email": "support@npmjs.com", + "name": "npm Inc.", + }, + "date": "2020-11-03T19:13:43.780Z", + "description": "Programmatic API for the bits behind npm publish and unpublish", + "links": Object { + "bugs": "https://github.com/npm/libnpmpublish/issues", + "homepage": "https://npmjs.com/package/libnpmpublish", + "npm": "https://www.npmjs.com/package/libnpmpublish", + "repository": "https://github.com/npm/libnpmpublish", + }, + "maintainers": Array [ + Object { + "email": "quitlahok@gmail.com", + "username": "nlf", + }, + Object { + "email": "ruyadorno@hotmail.com", + "username": "ruyadorno", + }, + Object { + "email": "darcy@darcyclarke.me", + "username": "darcyclarke", + }, + Object { + "email": "i@izs.me", + "username": "isaacs", + }, + ], + "name": "libnpmpublish", + "publisher": Object { + "email": "quitlahok@gmail.com", + "username": "nlf", + }, + "scope": "unscoped", + "version": "4.0.0", + }, + Object { + "author": Object { + "email": "support@npmjs.com", + "name": "npm Inc.", + }, + "date": "2020-12-08T23:22:00.213Z", + "description": "Programmatic API for npm fund", + "keywords": Array [ + "npm", + "npmcli", + "libnpm", + "cli", + "git", + "fund", + "gitfund", + ], + "links": Object { + "bugs": "https://github.com/npm/libnpmfund/issues", + "homepage": "https://github.com/npm/libnpmfund#readme", + "npm": "https://www.npmjs.com/package/libnpmfund", + "repository": "https://github.com/npm/libnpmfund", + }, + "maintainers": Array [ + Object { + "email": "quitlahok@gmail.com", + "username": "nlf", + }, + Object { + "email": "ruyadorno@hotmail.com", + "username": "ruyadorno", + }, + Object { + "email": "darcy@darcyclarke.me", + "username": "darcyclarke", + }, + Object { + "email": "i@izs.me", + "username": "isaacs", + }, + ], + "name": "libnpmfund", + "publisher": Object { + "email": "i@izs.me", + "username": "isaacs", + }, + "scope": "unscoped", + "version": "1.0.2", + }, + Object { + "author": Object { + "email": "support@npmjs.com", + "name": "npm Inc.", + }, + "date": "2020-09-30T15:16:29.017Z", + "description": "Retrieves a name:pathname Map for a given workspaces config", + "keywords": Array [ + "\\u001b[33mnpm\\u001b[39m", + "\\u001b]4;0;?\\u0007", + "\\u001b[Hbad map", + "npmcli", + "libnpm", + "cli", + "workspaces", + "map-workspaces", + ], + "links": Object { + "bugs": "https://github.com/npm/map-workspaces/issues", + "homepage": "https://github.com/npm/map-workspaces#readme", + "npm": "https://www.npmjs.com/package/%40npmcli%2Fmap-workspaces", + "repository": "https://github.com/npm/map-workspaces", + }, + "maintainers": Array [ + Object { + "email": "quitlahok@gmail.com", + "username": "nlf", + }, + Object { + "email": "ruyadorno@hotmail.com", + "username": "ruyadorno", + }, + Object { + "email": "darcy@darcyclarke.me", + "username": "darcyclarke", + }, + Object { + "email": "i@izs.me", + "username": "isaacs", + }, + ], + "name": "@npmcli/map-workspaces", + "publisher": Object { + "email": "ruyadorno@hotmail.com", + "username": "ruyadorno", + }, + "scope": "npmcli", + "version": "1.0.1", + }, + Object { + "date": "2019-09-26T22:24:28.713Z", + "description": "TypeScript definitions for libnpmsearch", + "links": Object { + "npm": "https://www.npmjs.com/package/%40types%2Flibnpmsearch", + }, + "maintainers": Array [ + Object { + "email": "ts-npm-types@microsoft.com", + "username": "types", + }, + ], + "name": "@types/libnpmsearch", + "publisher": Object { + "email": "ts-npm-types@microsoft.com", + "username": "types", + }, + "scope": "types", + "version": "2.0.1", + }, + Object { + "date": "2019-09-26T22:24:28.713Z", + "maintainers": Array [ + Object { + "email": "lukekarrys", + "username": "lukekarrys", + }, + ], + "name": "pkg-no-desc", + "publisher": Object { + "email": "lukekarrys", + "username": "lukekarrys", + }, + "scope": "unscoped", + "version": "1.0.0", + }, +] ` exports[`test/lib/commands/search.js TAP search exclude username with upper case letters > results should not have nlf 1`] = ` -NAME | DESCRIPTION | AUTHOR | DATE | VERSION | KEYWORDS -@evocateur/libnpmaccess | programmatic… | =evocateur | 2019-07-16 | 3.1.2 | -@evocateur/libnpmpublish | Programmatic API… | =evocateur | 2019-07-16 | 1.2.2 | -@types/libnpmsearch | TypeScript… | =types | 2019-09-26 | 2.0.1 | -pkg-no-desc | | =lukekarrys | 2019-09-26 | 1.0.0 | +@evocateur/libnpmaccess +programmatic library for \`npm access\` commands +Version 3.1.2 published 2019-07-16 by evocateur +Maintainers: evocateur +https://npm.im/@evocateur/libnpmaccess +@evocateur/libnpmpublish +Programmatic API for the bits behind npm publish and unpublish +Version 1.2.2 published 2019-07-16 by evocateur +Maintainers: evocateur +https://npm.im/@evocateur/libnpmpublish +@types/libnpmsearch +TypeScript definitions for libnpmsearch +Version 2.0.1 published 2019-09-26 by types +Maintainers: types +https://npm.im/@types/libnpmsearch +pkg-no-desc +Version 1.0.0 published 2019-09-26 by lukekarrys +Maintainers: lukekarrys +https://npm.im/pkg-no-desc +` + +exports[`test/lib/commands/search.js TAP search no publisher > should have filtered expected search results 1`] = ` +custom-registry +Version 1.0.0 published prehistoric by ??? +Maintainers: foo +https://npm.im/custom-registry +libnpmversion +Version 1.0.0 published prehistoric by foo +Maintainers: foo +https://npm.im/libnpmversion ` diff --git a/deps/npm/tap-snapshots/test/lib/commands/shrinkwrap.js.test.cjs b/deps/npm/tap-snapshots/test/lib/commands/shrinkwrap.js.test.cjs index d97f13d2ed85c4..f0ac314925b28f 100644 --- a/deps/npm/tap-snapshots/test/lib/commands/shrinkwrap.js.test.cjs +++ b/deps/npm/tap-snapshots/test/lib/commands/shrinkwrap.js.test.cjs @@ -22,7 +22,8 @@ exports[`test/lib/commands/shrinkwrap.js TAP with hidden lockfile ancient > must }, "logs": [ "created a lockfile as npm-shrinkwrap.json" - ] + ], + "warn": [] } ` @@ -46,6 +47,9 @@ exports[`test/lib/commands/shrinkwrap.js TAP with hidden lockfile ancient upgrad }, "logs": [ "created a lockfile as npm-shrinkwrap.json with version 3" + ], + "warn": [ + "Converting lock file (npm-shrinkwrap.json) from v1 -> v3" ] } ` @@ -68,7 +72,8 @@ exports[`test/lib/commands/shrinkwrap.js TAP with hidden lockfile existing > mus }, "logs": [ "created a lockfile as npm-shrinkwrap.json" - ] + ], + "warn": [] } ` @@ -91,6 +96,9 @@ exports[`test/lib/commands/shrinkwrap.js TAP with hidden lockfile existing downg }, "logs": [ "created a lockfile as npm-shrinkwrap.json with version 1" + ], + "warn": [ + "Converting lock file (npm-shrinkwrap.json) from v2 -> v1" ] } ` @@ -115,6 +123,9 @@ exports[`test/lib/commands/shrinkwrap.js TAP with hidden lockfile existing upgra }, "logs": [ "created a lockfile as npm-shrinkwrap.json with version 3" + ], + "warn": [ + "Converting lock file (npm-shrinkwrap.json) from v2 -> v3" ] } ` @@ -131,7 +142,8 @@ exports[`test/lib/commands/shrinkwrap.js TAP with nothing ancient > must match s }, "logs": [ "created a lockfile as npm-shrinkwrap.json with version 3" - ] + ], + "warn": [] } ` @@ -149,7 +161,8 @@ exports[`test/lib/commands/shrinkwrap.js TAP with nothing ancient upgrade > must }, "logs": [ "created a lockfile as npm-shrinkwrap.json with version 3" - ] + ], + "warn": [] } ` @@ -173,6 +186,9 @@ exports[`test/lib/commands/shrinkwrap.js TAP with npm-shrinkwrap.json ancient > }, "logs": [ "npm-shrinkwrap.json updated to version 3" + ], + "warn": [ + "Converting lock file (npm-shrinkwrap.json) from v1 -> v3" ] } ` @@ -199,6 +215,9 @@ exports[`test/lib/commands/shrinkwrap.js TAP with npm-shrinkwrap.json ancient up }, "logs": [ "npm-shrinkwrap.json updated to version 3" + ], + "warn": [ + "Converting lock file (npm-shrinkwrap.json) from v1 -> v3" ] } ` @@ -223,7 +242,8 @@ exports[`test/lib/commands/shrinkwrap.js TAP with npm-shrinkwrap.json existing > }, "logs": [ "npm-shrinkwrap.json up to date" - ] + ], + "warn": [] } ` @@ -244,6 +264,9 @@ exports[`test/lib/commands/shrinkwrap.js TAP with npm-shrinkwrap.json existing d }, "logs": [ "npm-shrinkwrap.json updated to version 1" + ], + "warn": [ + "Converting lock file (npm-shrinkwrap.json) from v2 -> v1" ] } ` @@ -270,6 +293,9 @@ exports[`test/lib/commands/shrinkwrap.js TAP with npm-shrinkwrap.json existing u }, "logs": [ "npm-shrinkwrap.json updated to version 3" + ], + "warn": [ + "Converting lock file (npm-shrinkwrap.json) from v2 -> v3" ] } ` @@ -294,6 +320,9 @@ exports[`test/lib/commands/shrinkwrap.js TAP with package-lock.json ancient > mu }, "logs": [ "package-lock.json has been renamed to npm-shrinkwrap.json and updated to version 3" + ], + "warn": [ + "Converting lock file (npm-shrinkwrap.json) from v1 -> v3" ] } ` @@ -320,6 +349,9 @@ exports[`test/lib/commands/shrinkwrap.js TAP with package-lock.json ancient upgr }, "logs": [ "package-lock.json has been renamed to npm-shrinkwrap.json and updated to version 3" + ], + "warn": [ + "Converting lock file (npm-shrinkwrap.json) from v1 -> v3" ] } ` @@ -344,7 +376,8 @@ exports[`test/lib/commands/shrinkwrap.js TAP with package-lock.json existing > m }, "logs": [ "package-lock.json has been renamed to npm-shrinkwrap.json" - ] + ], + "warn": [] } ` @@ -365,6 +398,9 @@ exports[`test/lib/commands/shrinkwrap.js TAP with package-lock.json existing dow }, "logs": [ "package-lock.json has been renamed to npm-shrinkwrap.json and updated to version 1" + ], + "warn": [ + "Converting lock file (npm-shrinkwrap.json) from v2 -> v1" ] } ` @@ -391,6 +427,9 @@ exports[`test/lib/commands/shrinkwrap.js TAP with package-lock.json existing upg }, "logs": [ "package-lock.json has been renamed to npm-shrinkwrap.json and updated to version 3" + ], + "warn": [ + "Converting lock file (npm-shrinkwrap.json) from v2 -> v3" ] } ` diff --git a/deps/npm/tap-snapshots/test/lib/commands/view.js.test.cjs b/deps/npm/tap-snapshots/test/lib/commands/view.js.test.cjs index 3bda4e7de28531..3e06ecf5d054ee 100644 --- a/deps/npm/tap-snapshots/test/lib/commands/view.js.test.cjs +++ b/deps/npm/tap-snapshots/test/lib/commands/view.js.test.cjs @@ -7,135 +7,129 @@ 'use strict' exports[`test/lib/commands/view.js TAP deprecated package with license, bugs, repository and other fields > must match snapshot 1`] = ` -green@1.0.0 | ACME | deps: 2 | versions: 2 +green@1.0.0 | ACME | deps: 2 | versions: 2 green is a very important color -DEPRECATED!! - true +DEPRECATED!! - true -keywords: colors, green, crayola +keywords: colors, green, crayola -bin: green +bin: green dist -.tarball: http://hm.green.com/1.0.0.tgz -.shasum: 123 -.integrity: --- -.unpackedSize: 1.0 GB +.tarball: http://hm.green.com/1.0.0.tgz +.shasum: 123 +.integrity: --- +.unpackedSize: 1.0 GB dependencies: -red: 1.0.0 -yellow: 1.0.0 +red: 1.0.0 +yellow: 1.0.0 maintainers: -- claudia <c@yellow.com> -- isaacs <i@yellow.com> +- claudia <c@yellow.com> +- isaacs <i@yellow.com> dist-tags: -latest: 1.0.0 +latest: 1.0.0 ` exports[`test/lib/commands/view.js TAP deprecated package with unicode > must match snapshot 1`] = ` -green@1.0.0 | ACME | deps: 2 | versions: 2 +green@1.0.0 | ACME | deps: 2 | versions: 2 green is a very important color -DEPRECATED ⚠️ - true +DEPRECATED ⚠️ - true -keywords: colors, green, crayola +keywords: colors, green, crayola -bin: green +bin: green dist -.tarball: http://hm.green.com/1.0.0.tgz -.shasum: 123 -.integrity: --- -.unpackedSize: 1.0 GB +.tarball: http://hm.green.com/1.0.0.tgz +.shasum: 123 +.integrity: --- +.unpackedSize: 1.0 GB dependencies: -red: 1.0.0 -yellow: 1.0.0 +red: 1.0.0 +yellow: 1.0.0 maintainers: -- claudia <c@yellow.com> -- isaacs <i@yellow.com> +- claudia <c@yellow.com> +- isaacs <i@yellow.com> dist-tags: -latest: 1.0.0 +latest: 1.0.0 ` exports[`test/lib/commands/view.js TAP package from git > must match snapshot 1`] = ` -green@1.0.0 | ACME | deps: 2 | versions: 2 +green@1.0.0 | ACME | deps: 2 | versions: 2 green is a very important color -DEPRECATED!! - true +DEPRECATED!! - true -keywords: colors, green, crayola +keywords: colors, green, crayola -bin: green +bin: green dist -.tarball: http://hm.green.com/1.0.0.tgz -.shasum: 123 -.integrity: --- -.unpackedSize: 1.0 GB +.tarball: http://hm.green.com/1.0.0.tgz +.shasum: 123 +.integrity: --- +.unpackedSize: 1.0 GB dependencies: -red: 1.0.0 -yellow: 1.0.0 +red: 1.0.0 +yellow: 1.0.0 maintainers: -- claudia <c@yellow.com> -- isaacs <i@yellow.com> +- claudia <c@yellow.com> +- isaacs <i@yellow.com> dist-tags: -latest: 1.0.0 +latest: 1.0.0 ` exports[`test/lib/commands/view.js TAP package in cwd directory > must match snapshot 1`] = ` -blue@1.0.0 | Proprietary | deps: none | versions: 2 +blue@1.0.0 | Proprietary | deps: none | versions: 2 dist -.tarball: http://hm.blue.com/1.0.0.tgz -.shasum: 123 -.integrity: --- -.unpackedSize: 1 B +.tarball: http://hm.blue.com/1.0.0.tgz +.shasum: 123 dist-tags: -latest: 1.0.0 +latest: 1.0.0 published {TIME} ago ` exports[`test/lib/commands/view.js TAP package in cwd non-specific version > must match snapshot 1`] = ` -blue@1.0.0 | Proprietary | deps: none | versions: 2 +blue@1.0.0 | Proprietary | deps: none | versions: 2 dist -.tarball: http://hm.blue.com/1.0.0.tgz -.shasum: 123 -.integrity: --- -.unpackedSize: 1 B +.tarball: http://hm.blue.com/1.0.0.tgz +.shasum: 123 dist-tags: -latest: 1.0.0 +latest: 1.0.0 published {TIME} ago ` exports[`test/lib/commands/view.js TAP package in cwd specific version > must match snapshot 1`] = ` -blue@1.0.0 | Proprietary | deps: none | versions: 2 +blue@1.0.0 | Proprietary | deps: none | versions: 2 dist -.tarball: http://hm.blue.com/1.0.0.tgz -.shasum: 123 -.integrity: --- -.unpackedSize: 1 B +.tarball: http://hm.blue.com/1.0.0.tgz +.shasum: 123 dist-tags: -latest: 1.0.0 +latest: 1.0.0 published {TIME} ago ` @@ -177,17 +171,17 @@ exports[`test/lib/commands/view.js TAP package with --json and semver range > mu exports[`test/lib/commands/view.js TAP package with homepage > must match snapshot 1`] = ` -orange@1.0.0 | Proprietary | deps: none | versions: 2 -http://hm.orange.com +orange@1.0.0 | Proprietary | deps: none | versions: 2 +http://hm.orange.com dist -.tarball: http://hm.orange.com/1.0.0.tgz -.shasum: 123 -.integrity: --- -.unpackedSize: 1 B +.tarball: http://hm.orange.com/1.0.0.tgz +.shasum: 123 +.integrity: --- +.unpackedSize: 1 B dist-tags: -latest: 1.0.0 +latest: 1.0.0 ` exports[`test/lib/commands/view.js TAP package with invalid version > must match snapshot 1`] = ` @@ -196,118 +190,114 @@ exports[`test/lib/commands/view.js TAP package with invalid version > must match exports[`test/lib/commands/view.js TAP package with maintainers info as object > must match snapshot 1`] = ` -pink@1.0.0 | Proprietary | deps: none | versions: 2 +pink@1.0.0 | Proprietary | deps: none | versions: 2 dist -.tarball: http://hm.pink.com/1.0.0.tgz -.shasum: 123 -.integrity: --- -.unpackedSize: 1 B +.tarball: http://hm.pink.com/1.0.0.tgz +.shasum: 123 +.integrity: --- +.unpackedSize: 1 B dist-tags: -latest: 1.0.0 +latest: 1.0.0 ` exports[`test/lib/commands/view.js TAP package with more than 25 deps > must match snapshot 1`] = ` -black@1.0.0 | Proprietary | deps: 25 | versions: 2 +black@1.0.0 | Proprietary | deps: 25 | versions: 2 dist -.tarball: http://hm.black.com/1.0.0.tgz -.shasum: 123 -.integrity: --- -.unpackedSize: 1 B +.tarball: http://hm.black.com/1.0.0.tgz +.shasum: 123 +.integrity: --- +.unpackedSize: 1 B dependencies: -0: 1.0.0 -10: 1.0.0 -11: 1.0.0 -12: 1.0.0 -13: 1.0.0 -14: 1.0.0 -15: 1.0.0 -16: 1.0.0 -17: 1.0.0 -18: 1.0.0 -19: 1.0.0 -1: 1.0.0 -20: 1.0.0 -21: 1.0.0 -22: 1.0.0 -23: 1.0.0 -2: 1.0.0 -3: 1.0.0 -4: 1.0.0 -5: 1.0.0 -6: 1.0.0 -7: 1.0.0 -8: 1.0.0 -9: 1.0.0 -(...and 1 more.) +0: 1.0.0 +10: 1.0.0 +11: 1.0.0 +12: 1.0.0 +13: 1.0.0 +14: 1.0.0 +15: 1.0.0 +16: 1.0.0 +17: 1.0.0 +18: 1.0.0 +19: 1.0.0 +1: 1.0.0 +20: 1.0.0 +21: 1.0.0 +22: 1.0.0 +23: 1.0.0 +2: 1.0.0 +3: 1.0.0 +4: 1.0.0 +5: 1.0.0 +6: 1.0.0 +7: 1.0.0 +8: 1.0.0 +9: 1.0.0 +(...and 1 more.) dist-tags: -latest: 1.0.0 +latest: 1.0.0 ` exports[`test/lib/commands/view.js TAP package with no modified time > must match snapshot 1`] = ` -cyan@1.0.0 | Proprietary | deps: none | versions: 2 +cyan@1.0.0 | Proprietary | deps: none | versions: 2 dist -.tarball: http://hm.cyan.com/1.0.0.tgz -.shasum: 123 -.integrity: --- -.unpackedSize: 1.0 MB +.tarball: http://hm.cyan.com/1.0.0.tgz +.shasum: 123 +.integrity: --- +.unpackedSize: 1.0 MB dist-tags: -latest: 1.0.0 +latest: 1.0.0 -published by claudia <claudia@cyan.com> +published by claudia <claudia@cyan.com> ` exports[`test/lib/commands/view.js TAP package with no repo or homepage > must match snapshot 1`] = ` -blue@1.0.0 | Proprietary | deps: none | versions: 2 +blue@1.0.0 | Proprietary | deps: none | versions: 2 dist -.tarball: http://hm.blue.com/1.0.0.tgz -.shasum: 123 -.integrity: --- -.unpackedSize: 1 B +.tarball: http://hm.blue.com/1.0.0.tgz +.shasum: 123 dist-tags: -latest: 1.0.0 +latest: 1.0.0 published {TIME} ago ` exports[`test/lib/commands/view.js TAP package with semver range > must match snapshot 1`] = ` -blue@1.0.0 | Proprietary | deps: none | versions: 2 +blue@1.0.0 | Proprietary | deps: none | versions: 2 dist -.tarball: http://hm.blue.com/1.0.0.tgz -.shasum: 123 -.integrity: --- -.unpackedSize: 1 B +.tarball: http://hm.blue.com/1.0.0.tgz +.shasum: 123 dist-tags: -latest: 1.0.0 +latest: 1.0.0 published {TIME} ago -blue@1.0.1 | Proprietary | deps: none | versions: 2 +blue@1.0.1 | Proprietary | deps: none | versions: 2 dist -.tarball: http://hm.blue.com/1.0.1.tgz -.shasum: 124 -.integrity: --- -.unpackedSize: 1.0 kB +.tarball: http://hm.blue.com/1.0.1.tgz +.shasum: 124 +.integrity: --- +.unpackedSize: 1.0 kB dist-tags: -latest: 1.0.0 +latest: 1.0.0 -published over a year from now +published over a year from now ` exports[`test/lib/commands/view.js TAP specific field names array field - 1 element > must match snapshot 1`] = ` @@ -435,43 +425,43 @@ exports[`test/lib/commands/view.js TAP workspaces all workspaces --json > must m exports[`test/lib/commands/view.js TAP workspaces all workspaces > must match snapshot 1`] = ` -green@1.0.0 | ACME | deps: 2 | versions: 2 +green@1.0.0 | ACME | deps: 2 | versions: 2 green is a very important color -DEPRECATED!! - true +DEPRECATED!! - true -keywords: colors, green, crayola +keywords: colors, green, crayola -bin: green +bin: green dist -.tarball: http://hm.green.com/1.0.0.tgz -.shasum: 123 -.integrity: --- -.unpackedSize: 1.0 GB +.tarball: http://hm.green.com/1.0.0.tgz +.shasum: 123 +.integrity: --- +.unpackedSize: 1.0 GB dependencies: -red: 1.0.0 -yellow: 1.0.0 +red: 1.0.0 +yellow: 1.0.0 maintainers: -- claudia <c@yellow.com> -- isaacs <i@yellow.com> +- claudia <c@yellow.com> +- isaacs <i@yellow.com> dist-tags: -latest: 1.0.0 +latest: 1.0.0 -orange@1.0.0 | Proprietary | deps: none | versions: 2 -http://hm.orange.com +orange@1.0.0 | Proprietary | deps: none | versions: 2 +http://hm.orange.com dist -.tarball: http://hm.orange.com/1.0.0.tgz -.shasum: 123 -.integrity: --- -.unpackedSize: 1 B +.tarball: http://hm.orange.com/1.0.0.tgz +.shasum: 123 +.integrity: --- +.unpackedSize: 1 B dist-tags: -latest: 1.0.0 +latest: 1.0.0 ` exports[`test/lib/commands/view.js TAP workspaces all workspaces nonexistent field --json > must match snapshot 1`] = ` @@ -499,52 +489,50 @@ orange exports[`test/lib/commands/view.js TAP workspaces one specific workspace > must match snapshot 1`] = ` -green@1.0.0 | ACME | deps: 2 | versions: 2 +green@1.0.0 | ACME | deps: 2 | versions: 2 green is a very important color -DEPRECATED!! - true +DEPRECATED!! - true -keywords: colors, green, crayola +keywords: colors, green, crayola -bin: green +bin: green dist -.tarball: http://hm.green.com/1.0.0.tgz -.shasum: 123 -.integrity: --- -.unpackedSize: 1.0 GB +.tarball: http://hm.green.com/1.0.0.tgz +.shasum: 123 +.integrity: --- +.unpackedSize: 1.0 GB dependencies: -red: 1.0.0 -yellow: 1.0.0 +red: 1.0.0 +yellow: 1.0.0 maintainers: -- claudia <c@yellow.com> -- isaacs <i@yellow.com> +- claudia <c@yellow.com> +- isaacs <i@yellow.com> dist-tags: -latest: 1.0.0 +latest: 1.0.0 ` exports[`test/lib/commands/view.js TAP workspaces remote package name > must match snapshot 1`] = ` -pink@1.0.0 | Proprietary | deps: none | versions: 2 +pink@1.0.0 | Proprietary | deps: none | versions: 2 dist -.tarball: http://hm.pink.com/1.0.0.tgz -.shasum: 123 -.integrity: --- -.unpackedSize: 1 B +.tarball: http://hm.pink.com/1.0.0.tgz +.shasum: 123 +.integrity: --- +.unpackedSize: 1 B dist-tags: -latest: 1.0.0 +latest: 1.0.0 ` exports[`test/lib/commands/view.js TAP workspaces remote package name > should have warning of ignoring workspaces 1`] = ` Array [ - Array [ - "Ignoring workspaces for specified package(s)", - ], + "\\u001b[94mIgnoring workspaces for specified package(s)\\u001b[39m", ] ` diff --git a/deps/npm/tap-snapshots/test/lib/docs.js.test.cjs b/deps/npm/tap-snapshots/test/lib/docs.js.test.cjs index 64dad96903ec32..5ae16bc2d2c729 100644 --- a/deps/npm/tap-snapshots/test/lib/docs.js.test.cjs +++ b/deps/npm/tap-snapshots/test/lib/docs.js.test.cjs @@ -2552,7 +2552,7 @@ exports[`test/lib/docs.js TAP usage access > must match snapshot 1`] = ` Set access level on published packages Usage: -npm access list packages [|| [] +npm access list packages [||] [] npm access list collaborators [ []] npm access get status [] npm access set status=public|private [] @@ -2566,7 +2566,7 @@ Options: Run "npm help access" for more info \`\`\`bash -npm access list packages [|| [] +npm access list packages [||] [] npm access list collaborators [ []] npm access get status [] npm access set status=public|private [] @@ -2958,7 +2958,7 @@ exports[`test/lib/docs.js TAP usage doctor > must match snapshot 1`] = ` Check the health of your npm environment Usage: -npm doctor [ping] [registry] [versions] [environment] [permissions] [cache] +npm doctor [connection] [registry] [versions] [environment] [permissions] [cache] Options: [--registry ] @@ -2966,7 +2966,7 @@ Options: Run "npm help doctor" for more info \`\`\`bash -npm doctor [ping] [registry] [versions] [environment] [permissions] [cache] +npm doctor [connection] [registry] [versions] [environment] [permissions] [cache] \`\`\` Note: This command is unaware of workspaces. @@ -4065,11 +4065,11 @@ exports[`test/lib/docs.js TAP usage search > must match snapshot 1`] = ` Search for packages Usage: -npm search [search terms ...] +npm search [ ...] Options: -[-l|--long] [--json] [--color|--no-color|--color always] [-p|--parseable] -[--no-description] [--searchlimit ] [--searchopts ] +[--json] [--color|--no-color|--color always] [-p|--parseable] [--no-description] +[--searchlimit ] [--searchopts ] [--searchexclude ] [--registry ] [--prefer-online] [--prefer-offline] [--offline] @@ -4078,14 +4078,13 @@ aliases: find, s, se Run "npm help search" for more info \`\`\`bash -npm search [search terms ...] +npm search [ ...] aliases: find, s, se \`\`\` Note: This command is unaware of workspaces. -#### \`long\` #### \`json\` #### \`color\` #### \`parseable\` diff --git a/deps/npm/tap-snapshots/test/lib/npm.js.test.cjs b/deps/npm/tap-snapshots/test/lib/npm.js.test.cjs index e29061291137eb..32ab47ef06b18e 100644 --- a/deps/npm/tap-snapshots/test/lib/npm.js.test.cjs +++ b/deps/npm/tap-snapshots/test/lib/npm.js.test.cjs @@ -5,6 +5,16 @@ * Make sure to inspect the output below. Do not ignore changes! */ 'use strict' +exports[`test/lib/npm.js TAP npm.load workspace-aware configs and commands > should exec workspaces version of commands 1`] = ` +Lifecycle scripts included in a@1.0.0: + test + echo test a + +Lifecycle scripts included in b@1.0.0: + test + echo test b +` + exports[`test/lib/npm.js TAP usage set process.stdout.columns column width 0 > must match snapshot 1`] = ` npm diff --git a/deps/npm/tap-snapshots/test/lib/utils/error-message.js.test.cjs b/deps/npm/tap-snapshots/test/lib/utils/error-message.js.test.cjs index fe581fb6beb291..e2301a60386582 100644 --- a/deps/npm/tap-snapshots/test/lib/utils/error-message.js.test.cjs +++ b/deps/npm/tap-snapshots/test/lib/utils/error-message.js.test.cjs @@ -190,7 +190,7 @@ Object { "r", "g", "s", - "https://evil:***@npmjs.org/", + "https://evil:***@npmjs.org", ], Array [ "", @@ -519,22 +519,10 @@ Object { exports[`test/lib/utils/error-message.js TAP eacces/eperm {"windows":false,"loaded":true,"cachePath":false,"cacheDest":false} > must match snapshot 2`] = ` Array [ - Array [ - "title", - "npm", - ], - Array [ - "argv", - "/"--fetch-retries/" /"0/" /"--cache/" /"{CWD}/cache/"", - ], - Array [ - "logfile", - "logs-max:10 dir:{CWD}/cache/_logs/{DATE}-", - ], - Array [ - "logfile", - "{CWD}/cache/_logs/{DATE}-debug-0.log", - ], + "title npm", + "argv /"--fetch-retries/" /"0/" /"--cache/" /"{CWD}/cache/" /"--loglevel/" /"silly/" /"--color/" /"false/"", + "logfile logs-max:10 dir:{CWD}/cache/_logs/{DATE}-", + "logfile {CWD}/cache/_logs/{DATE}-debug-0.log", ] ` @@ -559,22 +547,11 @@ Object { exports[`test/lib/utils/error-message.js TAP eacces/eperm {"windows":false,"loaded":true,"cachePath":false,"cacheDest":true} > must match snapshot 2`] = ` Array [ - Array [ - "title", - "npm", - ], - Array [ - "argv", - "/"--fetch-retries/" /"0/" /"--cache/" /"{CWD}/cache/"", - ], - Array [ - "logfile", - "logs-max:10 dir:{CWD}/cache/_logs/{DATE}-", - ], - Array [ - "logfile", - "{CWD}/cache/_logs/{DATE}-debug-0.log", - ], + "title npm", + "argv /"--fetch-retries/" /"0/" /"--cache/" /"{CWD}/cache/" /"--loglevel/" /"silly/" /"--color/" /"false/"", + "logfile logs-max:10 dir:{CWD}/cache/_logs/{DATE}-", + "logfile {CWD}/cache/_logs/{DATE}-debug-0.log", + "dummy stack trace", ] ` @@ -599,22 +576,11 @@ Object { exports[`test/lib/utils/error-message.js TAP eacces/eperm {"windows":false,"loaded":true,"cachePath":true,"cacheDest":false} > must match snapshot 2`] = ` Array [ - Array [ - "title", - "npm", - ], - Array [ - "argv", - "/"--fetch-retries/" /"0/" /"--cache/" /"{CWD}/cache/"", - ], - Array [ - "logfile", - "logs-max:10 dir:{CWD}/cache/_logs/{DATE}-", - ], - Array [ - "logfile", - "{CWD}/cache/_logs/{DATE}-debug-0.log", - ], + "title npm", + "argv /"--fetch-retries/" /"0/" /"--cache/" /"{CWD}/cache/" /"--loglevel/" /"silly/" /"--color/" /"false/"", + "logfile logs-max:10 dir:{CWD}/cache/_logs/{DATE}-", + "logfile {CWD}/cache/_logs/{DATE}-debug-0.log", + "dummy stack trace", ] ` @@ -639,22 +605,11 @@ Object { exports[`test/lib/utils/error-message.js TAP eacces/eperm {"windows":false,"loaded":true,"cachePath":true,"cacheDest":true} > must match snapshot 2`] = ` Array [ - Array [ - "title", - "npm", - ], - Array [ - "argv", - "/"--fetch-retries/" /"0/" /"--cache/" /"{CWD}/cache/"", - ], - Array [ - "logfile", - "logs-max:10 dir:{CWD}/cache/_logs/{DATE}-", - ], - Array [ - "logfile", - "{CWD}/cache/_logs/{DATE}-debug-0.log", - ], + "title npm", + "argv /"--fetch-retries/" /"0/" /"--cache/" /"{CWD}/cache/" /"--loglevel/" /"silly/" /"--color/" /"false/"", + "logfile logs-max:10 dir:{CWD}/cache/_logs/{DATE}-", + "logfile {CWD}/cache/_logs/{DATE}-debug-0.log", + "dummy stack trace", ] ` @@ -826,22 +781,10 @@ Object { exports[`test/lib/utils/error-message.js TAP eacces/eperm {"windows":true,"loaded":true,"cachePath":false,"cacheDest":false} > must match snapshot 2`] = ` Array [ - Array [ - "title", - "npm", - ], - Array [ - "argv", - "/"--fetch-retries/" /"0/" /"--cache/" /"{CWD}/cache/"", - ], - Array [ - "logfile", - "logs-max:10 dir:{CWD}/cache/_logs/{DATE}-", - ], - Array [ - "logfile", - "{CWD}/cache/_logs/{DATE}-debug-0.log", - ], + "title npm", + "argv /"--fetch-retries/" /"0/" /"--cache/" /"{CWD}/cache/" /"--loglevel/" /"silly/" /"--color/" /"false/"", + "logfile logs-max:10 dir:{CWD}/cache/_logs/{DATE}-", + "logfile {CWD}/cache/_logs/{DATE}-debug-0.log", ] ` @@ -877,22 +820,10 @@ Object { exports[`test/lib/utils/error-message.js TAP eacces/eperm {"windows":true,"loaded":true,"cachePath":false,"cacheDest":true} > must match snapshot 2`] = ` Array [ - Array [ - "title", - "npm", - ], - Array [ - "argv", - "/"--fetch-retries/" /"0/" /"--cache/" /"{CWD}/cache/"", - ], - Array [ - "logfile", - "logs-max:10 dir:{CWD}/cache/_logs/{DATE}-", - ], - Array [ - "logfile", - "{CWD}/cache/_logs/{DATE}-debug-0.log", - ], + "title npm", + "argv /"--fetch-retries/" /"0/" /"--cache/" /"{CWD}/cache/" /"--loglevel/" /"silly/" /"--color/" /"false/"", + "logfile logs-max:10 dir:{CWD}/cache/_logs/{DATE}-", + "logfile {CWD}/cache/_logs/{DATE}-debug-0.log", ] ` @@ -928,22 +859,10 @@ Object { exports[`test/lib/utils/error-message.js TAP eacces/eperm {"windows":true,"loaded":true,"cachePath":true,"cacheDest":false} > must match snapshot 2`] = ` Array [ - Array [ - "title", - "npm", - ], - Array [ - "argv", - "/"--fetch-retries/" /"0/" /"--cache/" /"{CWD}/cache/"", - ], - Array [ - "logfile", - "logs-max:10 dir:{CWD}/cache/_logs/{DATE}-", - ], - Array [ - "logfile", - "{CWD}/cache/_logs/{DATE}-debug-0.log", - ], + "title npm", + "argv /"--fetch-retries/" /"0/" /"--cache/" /"{CWD}/cache/" /"--loglevel/" /"silly/" /"--color/" /"false/"", + "logfile logs-max:10 dir:{CWD}/cache/_logs/{DATE}-", + "logfile {CWD}/cache/_logs/{DATE}-debug-0.log", ] ` @@ -979,22 +898,10 @@ Object { exports[`test/lib/utils/error-message.js TAP eacces/eperm {"windows":true,"loaded":true,"cachePath":true,"cacheDest":true} > must match snapshot 2`] = ` Array [ - Array [ - "title", - "npm", - ], - Array [ - "argv", - "/"--fetch-retries/" /"0/" /"--cache/" /"{CWD}/cache/"", - ], - Array [ - "logfile", - "logs-max:10 dir:{CWD}/cache/_logs/{DATE}-", - ], - Array [ - "logfile", - "{CWD}/cache/_logs/{DATE}-debug-0.log", - ], + "title npm", + "argv /"--fetch-retries/" /"0/" /"--cache/" /"{CWD}/cache/" /"--loglevel/" /"silly/" /"--color/" /"false/"", + "logfile logs-max:10 dir:{CWD}/cache/_logs/{DATE}-", + "logfile {CWD}/cache/_logs/{DATE}-debug-0.log", ] ` diff --git a/deps/npm/tap-snapshots/test/lib/utils/explain-dep.js.test.cjs b/deps/npm/tap-snapshots/test/lib/utils/explain-dep.js.test.cjs index 876cc6552b7605..34620d5c749bc0 100644 --- a/deps/npm/tap-snapshots/test/lib/utils/explain-dep.js.test.cjs +++ b/deps/npm/tap-snapshots/test/lib/utils/explain-dep.js.test.cjs @@ -22,9 +22,9 @@ manydep@1.0.0 ` exports[`test/lib/utils/explain-dep.js TAP basic bundled > explain color deep 1`] = ` -bundle-of-joy@1.0.0 bundled +bundle-of-joy@1.0.0 bundled node_modules/bundle-of-joy - bundled prod-dep@"1.x" from the root project + bundled prod-dep@"1.x" from the root project ` exports[`test/lib/utils/explain-dep.js TAP basic bundled > explain nocolor shallow 1`] = ` @@ -34,7 +34,7 @@ node_modules/bundle-of-joy ` exports[`test/lib/utils/explain-dep.js TAP basic bundled > print color 1`] = ` -bundle-of-joy@1.0.0 bundled +bundle-of-joy@1.0.0 bundled node_modules/bundle-of-joy ` @@ -44,13 +44,13 @@ node_modules/bundle-of-joy ` exports[`test/lib/utils/explain-dep.js TAP basic deepDev > explain color deep 1`] = ` -deep-dev@2.3.4 dev +deep-dev@2.3.4 dev node_modules/deep-dev - deep-dev@"2.x" from metadev@3.4.5 + deep-dev@"2.x" from metadev@3.4.5 node_modules/dev/node_modules/metadev - metadev@"3.x" from topdev@4.5.6 + metadev@"3.x" from topdev@4.5.6 node_modules/topdev - dev topdev@"4.x" from the root project + dev topdev@"4.x" from the root project ` exports[`test/lib/utils/explain-dep.js TAP basic deepDev > explain nocolor shallow 1`] = ` @@ -63,7 +63,7 @@ node_modules/deep-dev ` exports[`test/lib/utils/explain-dep.js TAP basic deepDev > print color 1`] = ` -deep-dev@2.3.4 dev +deep-dev@2.3.4 dev node_modules/deep-dev ` @@ -73,7 +73,7 @@ node_modules/deep-dev ` exports[`test/lib/utils/explain-dep.js TAP basic extraneous > explain color deep 1`] = ` -extra-neos@1337.420.69-lol extraneous +extra-neos@1337.420.69-lol extraneous node_modules/extra-neos ` @@ -83,7 +83,7 @@ node_modules/extra-neos ` exports[`test/lib/utils/explain-dep.js TAP basic extraneous > print color 1`] = ` -extra-neos@1337.420.69-lol extraneous +extra-neos@1337.420.69-lol extraneous node_modules/extra-neos ` @@ -93,29 +93,29 @@ node_modules/extra-neos ` exports[`test/lib/utils/explain-dep.js TAP basic manyDeps > explain color deep 1`] = ` -manydep@1.0.0 - manydep@"1.0.0" from prod-dep@1.2.3 +manydep@1.0.0 + manydep@"1.0.0" from prod-dep@1.2.3 node_modules/prod-dep - prod-dep@"1.x" from the root project - optional manydep@"1.x" from optdep@1.0.0 optional + prod-dep@"1.x" from the root project + optional manydep@"1.x" from optdep@1.0.0 optional node_modules/optdep - optional optdep@"1.0.0" from the root project - manydep@"1.0.x" from extra-neos@1337.420.69-lol extraneous + optional optdep@"1.0.0" from the root project + manydep@"1.0.x" from extra-neos@1337.420.69-lol extraneous node_modules/extra-neos - dev manydep@"*" from deep-dev@2.3.4 dev + dev manydep@"*" from deep-dev@2.3.4 dev node_modules/deep-dev - deep-dev@"2.x" from metadev@3.4.5 + deep-dev@"2.x" from metadev@3.4.5 node_modules/dev/node_modules/metadev - metadev@"3.x" from topdev@4.5.6 + metadev@"3.x" from topdev@4.5.6 node_modules/topdev - dev topdev@"4.x" from the root project - peer manydep@">1.0.0-beta <1.0.1" from peer@1.0.0 peer + dev topdev@"4.x" from the root project + peer manydep@">1.0.0-beta <1.0.1" from peer@1.0.0 peer node_modules/peer - peer peer@"1.0.0" from the root project - manydep@">1.0.0-beta <1.0.1" from the root project - manydep@"1" from a package with a pretty long name@1.2.3 - manydep@"1" from another package with a pretty long name@1.2.3 - manydep@"1" from yet another a package with a pretty long name@1.2.3 + peer peer@"1.0.0" from the root project + manydep@">1.0.0-beta <1.0.1" from the root project + manydep@"1" from a package with a pretty long name@1.2.3 + manydep@"1" from another package with a pretty long name@1.2.3 + manydep@"1" from yet another a package with a pretty long name@1.2.3 ` exports[`test/lib/utils/explain-dep.js TAP basic manyDeps > explain nocolor shallow 1`] = ` @@ -127,7 +127,7 @@ manydep@1.0.0 ` exports[`test/lib/utils/explain-dep.js TAP basic manyDeps > print color 1`] = ` -manydep@1.0.0 +manydep@1.0.0 ` exports[`test/lib/utils/explain-dep.js TAP basic manyDeps > print nocolor 1`] = ` @@ -135,9 +135,9 @@ manydep@1.0.0 ` exports[`test/lib/utils/explain-dep.js TAP basic optional > explain color deep 1`] = ` -optdep@1.0.0 optional +optdep@1.0.0 optional node_modules/optdep - optional optdep@"1.0.0" from the root project + optional optdep@"1.0.0" from the root project ` exports[`test/lib/utils/explain-dep.js TAP basic optional > explain nocolor shallow 1`] = ` @@ -147,7 +147,7 @@ node_modules/optdep ` exports[`test/lib/utils/explain-dep.js TAP basic optional > print color 1`] = ` -optdep@1.0.0 optional +optdep@1.0.0 optional node_modules/optdep ` @@ -157,9 +157,9 @@ node_modules/optdep ` exports[`test/lib/utils/explain-dep.js TAP basic overridden > explain color deep 1`] = ` -overridden-root@1.0.0 overridden +overridden-root@1.0.0 overridden node_modules/overridden-root - overridden overridden-dep@"1.0.0" (was "^2.0.0") from the root project + overridden overridden-dep@"1.0.0" (was "^2.0.0") from the root project ` exports[`test/lib/utils/explain-dep.js TAP basic overridden > explain nocolor shallow 1`] = ` @@ -169,7 +169,7 @@ node_modules/overridden-root ` exports[`test/lib/utils/explain-dep.js TAP basic overridden > print color 1`] = ` -overridden-root@1.0.0 overridden +overridden-root@1.0.0 overridden node_modules/overridden-root ` @@ -179,9 +179,9 @@ node_modules/overridden-root ` exports[`test/lib/utils/explain-dep.js TAP basic peer > explain color deep 1`] = ` -peer@1.0.0 peer +peer@1.0.0 peer node_modules/peer - peer peer@"1.0.0" from the root project + peer peer@"1.0.0" from the root project ` exports[`test/lib/utils/explain-dep.js TAP basic peer > explain nocolor shallow 1`] = ` @@ -191,7 +191,7 @@ node_modules/peer ` exports[`test/lib/utils/explain-dep.js TAP basic peer > print color 1`] = ` -peer@1.0.0 peer +peer@1.0.0 peer node_modules/peer ` @@ -201,9 +201,9 @@ node_modules/peer ` exports[`test/lib/utils/explain-dep.js TAP basic prodDep > explain color deep 1`] = ` -prod-dep@1.2.3 +prod-dep@1.2.3 node_modules/prod-dep - prod-dep@"1.x" from the root project + prod-dep@"1.x" from the root project ` exports[`test/lib/utils/explain-dep.js TAP basic prodDep > explain nocolor shallow 1`] = ` @@ -213,7 +213,7 @@ node_modules/prod-dep ` exports[`test/lib/utils/explain-dep.js TAP basic prodDep > print color 1`] = ` -prod-dep@1.2.3 +prod-dep@1.2.3 node_modules/prod-dep ` @@ -223,11 +223,11 @@ node_modules/prod-dep ` exports[`test/lib/utils/explain-dep.js TAP basic workspaces > explain color deep 1`] = ` -a@1.0.0 +a@1.0.0 a - a@1.0.0 + a@1.0.0 node_modules/a - workspace a from the root project + workspace a from the root project ` exports[`test/lib/utils/explain-dep.js TAP basic workspaces > explain nocolor shallow 1`] = ` @@ -239,7 +239,7 @@ a ` exports[`test/lib/utils/explain-dep.js TAP basic workspaces > print color 1`] = ` -a@1.0.0 +a@1.0.0 a ` diff --git a/deps/npm/tap-snapshots/test/lib/utils/explain-eresolve.js.test.cjs b/deps/npm/tap-snapshots/test/lib/utils/explain-eresolve.js.test.cjs index 3d73019d3e45b9..5190ead244697c 100644 --- a/deps/npm/tap-snapshots/test/lib/utils/explain-eresolve.js.test.cjs +++ b/deps/npm/tap-snapshots/test/lib/utils/explain-eresolve.js.test.cjs @@ -6,15 +6,15 @@ */ 'use strict' exports[`test/lib/utils/explain-eresolve.js TAP basic chain-conflict > explain with color, depth of 2 1`] = ` -While resolving: project@1.2.3 -Found: @isaacs/testing-peer-dep-conflict-chain-d@2.0.0 +While resolving: project@1.2.3 +Found: @isaacs/testing-peer-dep-conflict-chain-d@2.0.0 node_modules/@isaacs/testing-peer-dep-conflict-chain-d - @isaacs/testing-peer-dep-conflict-chain-d@"2" from the root project + @isaacs/testing-peer-dep-conflict-chain-d@"2" from the root project Could not resolve dependency: -peer @isaacs/testing-peer-dep-conflict-chain-d@"1" from @isaacs/testing-peer-dep-conflict-chain-c@1.0.0 +peer @isaacs/testing-peer-dep-conflict-chain-d@"1" from @isaacs/testing-peer-dep-conflict-chain-c@1.0.0 node_modules/@isaacs/testing-peer-dep-conflict-chain-c - @isaacs/testing-peer-dep-conflict-chain-c@"1" from the root project + @isaacs/testing-peer-dep-conflict-chain-c@"1" from the root project ` exports[`test/lib/utils/explain-eresolve.js TAP basic chain-conflict > explain with no color, depth of 6 1`] = ` @@ -48,15 +48,15 @@ to accept an incorrect (and potentially broken) dependency resolution. ` exports[`test/lib/utils/explain-eresolve.js TAP basic chain-conflict > report with color 1`] = ` -While resolving: project@1.2.3 -Found: @isaacs/testing-peer-dep-conflict-chain-d@2.0.0 +While resolving: project@1.2.3 +Found: @isaacs/testing-peer-dep-conflict-chain-d@2.0.0 node_modules/@isaacs/testing-peer-dep-conflict-chain-d - @isaacs/testing-peer-dep-conflict-chain-d@"2" from the root project + @isaacs/testing-peer-dep-conflict-chain-d@"2" from the root project Could not resolve dependency: -peer @isaacs/testing-peer-dep-conflict-chain-d@"1" from @isaacs/testing-peer-dep-conflict-chain-c@1.0.0 +peer @isaacs/testing-peer-dep-conflict-chain-d@"1" from @isaacs/testing-peer-dep-conflict-chain-c@1.0.0 node_modules/@isaacs/testing-peer-dep-conflict-chain-c - @isaacs/testing-peer-dep-conflict-chain-c@"1" from the root project + @isaacs/testing-peer-dep-conflict-chain-c@"1" from the root project Fix the upstream dependency conflict, or retry this command with --force or --legacy-peer-deps @@ -80,20 +80,20 @@ to accept an incorrect (and potentially broken) dependency resolution. ` exports[`test/lib/utils/explain-eresolve.js TAP basic cycleNested > explain with color, depth of 2 1`] = ` -Found: @isaacs/peer-dep-cycle-c@2.0.0 +Found: @isaacs/peer-dep-cycle-c@2.0.0 node_modules/@isaacs/peer-dep-cycle-c - @isaacs/peer-dep-cycle-c@"2.x" from the root project + @isaacs/peer-dep-cycle-c@"2.x" from the root project Could not resolve dependency: -peer @isaacs/peer-dep-cycle-b@"1" from @isaacs/peer-dep-cycle-a@1.0.0 +peer @isaacs/peer-dep-cycle-b@"1" from @isaacs/peer-dep-cycle-a@1.0.0 node_modules/@isaacs/peer-dep-cycle-a - @isaacs/peer-dep-cycle-a@"1.x" from the root project + @isaacs/peer-dep-cycle-a@"1.x" from the root project -Conflicting peer dependency: @isaacs/peer-dep-cycle-c@1.0.0 +Conflicting peer dependency: @isaacs/peer-dep-cycle-c@1.0.0 node_modules/@isaacs/peer-dep-cycle-c - peer @isaacs/peer-dep-cycle-c@"1" from @isaacs/peer-dep-cycle-b@1.0.0 + peer @isaacs/peer-dep-cycle-c@"1" from @isaacs/peer-dep-cycle-b@1.0.0 node_modules/@isaacs/peer-dep-cycle-b - peer @isaacs/peer-dep-cycle-b@"1" from @isaacs/peer-dep-cycle-a@1.0.0 + peer @isaacs/peer-dep-cycle-b@"1" from @isaacs/peer-dep-cycle-a@1.0.0 node_modules/@isaacs/peer-dep-cycle-a ` @@ -142,22 +142,22 @@ to accept an incorrect (and potentially broken) dependency resolution. ` exports[`test/lib/utils/explain-eresolve.js TAP basic cycleNested > report with color 1`] = ` -Found: @isaacs/peer-dep-cycle-c@2.0.0 +Found: @isaacs/peer-dep-cycle-c@2.0.0 node_modules/@isaacs/peer-dep-cycle-c - @isaacs/peer-dep-cycle-c@"2.x" from the root project + @isaacs/peer-dep-cycle-c@"2.x" from the root project Could not resolve dependency: -peer @isaacs/peer-dep-cycle-b@"1" from @isaacs/peer-dep-cycle-a@1.0.0 +peer @isaacs/peer-dep-cycle-b@"1" from @isaacs/peer-dep-cycle-a@1.0.0 node_modules/@isaacs/peer-dep-cycle-a - @isaacs/peer-dep-cycle-a@"1.x" from the root project + @isaacs/peer-dep-cycle-a@"1.x" from the root project -Conflicting peer dependency: @isaacs/peer-dep-cycle-c@1.0.0 +Conflicting peer dependency: @isaacs/peer-dep-cycle-c@1.0.0 node_modules/@isaacs/peer-dep-cycle-c - peer @isaacs/peer-dep-cycle-c@"1" from @isaacs/peer-dep-cycle-b@1.0.0 + peer @isaacs/peer-dep-cycle-c@"1" from @isaacs/peer-dep-cycle-b@1.0.0 node_modules/@isaacs/peer-dep-cycle-b - peer @isaacs/peer-dep-cycle-b@"1" from @isaacs/peer-dep-cycle-a@1.0.0 + peer @isaacs/peer-dep-cycle-b@"1" from @isaacs/peer-dep-cycle-a@1.0.0 node_modules/@isaacs/peer-dep-cycle-a - @isaacs/peer-dep-cycle-a@"1.x" from the root project + @isaacs/peer-dep-cycle-a@"1.x" from the root project Fix the upstream dependency conflict, or retry this command with --no-strict-peer-deps, --force, or --legacy-peer-deps @@ -188,20 +188,20 @@ to accept an incorrect (and potentially broken) dependency resolution. ` exports[`test/lib/utils/explain-eresolve.js TAP basic eslint-plugin case > explain with color, depth of 2 1`] = ` -While resolving: eslint-plugin-react@7.24.0 -Found: eslint@6.8.0 +While resolving: eslint-plugin-react@7.24.0 +Found: eslint@6.8.0 node_modules/eslint - dev eslint@"^3 || ^4 || ^5 || ^6 || ^7" from the root project + dev eslint@"^3 || ^4 || ^5 || ^6 || ^7" from the root project 3 more (@typescript-eslint/parser, ...) Could not resolve dependency: -dev eslint-plugin-eslint-plugin@"^3.1.0" from the root project +dev eslint-plugin-eslint-plugin@"^3.1.0" from the root project -Conflicting peer dependency: eslint@7.31.0 +Conflicting peer dependency: eslint@7.31.0 node_modules/eslint - peer eslint@"^7.0.0" from eslint-plugin-eslint-plugin@3.5.1 + peer eslint@"^7.0.0" from eslint-plugin-eslint-plugin@3.5.1 node_modules/eslint-plugin-eslint-plugin - dev eslint-plugin-eslint-plugin@"^3.1.0" from the root project + dev eslint-plugin-eslint-plugin@"^3.1.0" from the root project ` exports[`test/lib/utils/explain-eresolve.js TAP basic eslint-plugin case > explain with no color, depth of 6 1`] = ` @@ -262,23 +262,23 @@ to accept an incorrect (and potentially broken) dependency resolution. ` exports[`test/lib/utils/explain-eresolve.js TAP basic eslint-plugin case > report with color 1`] = ` -While resolving: eslint-plugin-react@7.24.0 -Found: eslint@6.8.0 +While resolving: eslint-plugin-react@7.24.0 +Found: eslint@6.8.0 node_modules/eslint - dev eslint@"^3 || ^4 || ^5 || ^6 || ^7" from the root project - peer eslint@"^5.0.0 || ^6.0.0" from @typescript-eslint/parser@2.34.0 + dev eslint@"^3 || ^4 || ^5 || ^6 || ^7" from the root project + peer eslint@"^5.0.0 || ^6.0.0" from @typescript-eslint/parser@2.34.0 node_modules/@typescript-eslint/parser - dev @typescript-eslint/parser@"^2.34.0" from the root project + dev @typescript-eslint/parser@"^2.34.0" from the root project 2 more (eslint-config-airbnb-base, eslint-plugin-import) Could not resolve dependency: -dev eslint-plugin-eslint-plugin@"^3.1.0" from the root project +dev eslint-plugin-eslint-plugin@"^3.1.0" from the root project -Conflicting peer dependency: eslint@7.31.0 +Conflicting peer dependency: eslint@7.31.0 node_modules/eslint - peer eslint@"^7.0.0" from eslint-plugin-eslint-plugin@3.5.1 + peer eslint@"^7.0.0" from eslint-plugin-eslint-plugin@3.5.1 node_modules/eslint-plugin-eslint-plugin - dev eslint-plugin-eslint-plugin@"^3.1.0" from the root project + dev eslint-plugin-eslint-plugin@"^3.1.0" from the root project Fix the upstream dependency conflict, or retry this command with --force or --legacy-peer-deps @@ -310,18 +310,18 @@ to accept an incorrect (and potentially broken) dependency resolution. ` exports[`test/lib/utils/explain-eresolve.js TAP basic gatsby > explain with color, depth of 2 1`] = ` -While resolving: gatsby-recipes@0.2.31 -Found: ink@3.0.0-7 +While resolving: gatsby-recipes@0.2.31 +Found: ink@3.0.0-7 node_modules/ink - dev ink@"next" from gatsby-recipes@0.2.31 + dev ink@"next" from gatsby-recipes@0.2.31 node_modules/gatsby-recipes - gatsby-recipes@"^0.2.31" from gatsby-cli@2.12.107 + gatsby-recipes@"^0.2.31" from gatsby-cli@2.12.107 node_modules/gatsby-cli Could not resolve dependency: -peer ink@">=2.0.0" from ink-box@1.0.0 +peer ink@">=2.0.0" from ink-box@1.0.0 node_modules/ink-box - ink-box@"^1.0.0" from gatsby-recipes@0.2.31 + ink-box@"^1.0.0" from gatsby-recipes@0.2.31 node_modules/gatsby-recipes ` @@ -380,25 +380,25 @@ to accept an incorrect (and potentially broken) dependency resolution. ` exports[`test/lib/utils/explain-eresolve.js TAP basic gatsby > report with color 1`] = ` -While resolving: gatsby-recipes@0.2.31 -Found: ink@3.0.0-7 +While resolving: gatsby-recipes@0.2.31 +Found: ink@3.0.0-7 node_modules/ink - dev ink@"next" from gatsby-recipes@0.2.31 + dev ink@"next" from gatsby-recipes@0.2.31 node_modules/gatsby-recipes - gatsby-recipes@"^0.2.31" from gatsby-cli@2.12.107 + gatsby-recipes@"^0.2.31" from gatsby-cli@2.12.107 node_modules/gatsby-cli - gatsby-cli@"^2.12.107" from gatsby@2.24.74 + gatsby-cli@"^2.12.107" from gatsby@2.24.74 node_modules/gatsby - gatsby@"" from the root project + gatsby@"" from the root project Could not resolve dependency: -peer ink@">=2.0.0" from ink-box@1.0.0 +peer ink@">=2.0.0" from ink-box@1.0.0 node_modules/ink-box - ink-box@"^1.0.0" from gatsby-recipes@0.2.31 + ink-box@"^1.0.0" from gatsby-recipes@0.2.31 node_modules/gatsby-recipes - gatsby-recipes@"^0.2.31" from gatsby-cli@2.12.107 + gatsby-recipes@"^0.2.31" from gatsby-cli@2.12.107 node_modules/gatsby-cli - gatsby-cli@"^2.12.107" from gatsby@2.24.74 + gatsby-cli@"^2.12.107" from gatsby@2.24.74 node_modules/gatsby Fix the upstream dependency conflict, or retry @@ -434,13 +434,13 @@ to accept an incorrect (and potentially broken) dependency resolution. ` exports[`test/lib/utils/explain-eresolve.js TAP basic no current node, but has current edge > explain with color, depth of 2 1`] = ` -While resolving: eslint@7.22.0 -Found: dev eslint@"file:." from the root project +While resolving: eslint@7.22.0 +Found: dev eslint@"file:." from the root project Could not resolve dependency: -peer eslint@"^6.0.0" from eslint-plugin-jsdoc@22.2.0 +peer eslint@"^6.0.0" from eslint-plugin-jsdoc@22.2.0 node_modules/eslint-plugin-jsdoc - dev eslint-plugin-jsdoc@"^22.1.0" from the root project + dev eslint-plugin-jsdoc@"^22.1.0" from the root project ` exports[`test/lib/utils/explain-eresolve.js TAP basic no current node, but has current edge > explain with no color, depth of 6 1`] = ` @@ -470,13 +470,13 @@ to accept an incorrect (and potentially broken) dependency resolution. ` exports[`test/lib/utils/explain-eresolve.js TAP basic no current node, but has current edge > report with color 1`] = ` -While resolving: eslint@7.22.0 -Found: dev eslint@"file:." from the root project +While resolving: eslint@7.22.0 +Found: dev eslint@"file:." from the root project Could not resolve dependency: -peer eslint@"^6.0.0" from eslint-plugin-jsdoc@22.2.0 +peer eslint@"^6.0.0" from eslint-plugin-jsdoc@22.2.0 node_modules/eslint-plugin-jsdoc - dev eslint-plugin-jsdoc@"^22.1.0" from the root project + dev eslint-plugin-jsdoc@"^22.1.0" from the root project Fix the upstream dependency conflict, or retry this command with --force or --legacy-peer-deps @@ -498,15 +498,15 @@ to accept an incorrect (and potentially broken) dependency resolution. ` exports[`test/lib/utils/explain-eresolve.js TAP basic no current node, no current edge, idk > explain with color, depth of 2 1`] = ` -While resolving: eslint@7.22.0 -Found: peer eslint@"^6.0.0" from eslint-plugin-jsdoc@22.2.0 +While resolving: eslint@7.22.0 +Found: peer eslint@"^6.0.0" from eslint-plugin-jsdoc@22.2.0 node_modules/eslint-plugin-jsdoc - dev eslint-plugin-jsdoc@"^22.1.0" from the root project + dev eslint-plugin-jsdoc@"^22.1.0" from the root project Could not resolve dependency: -peer eslint@"^6.0.0" from eslint-plugin-jsdoc@22.2.0 +peer eslint@"^6.0.0" from eslint-plugin-jsdoc@22.2.0 node_modules/eslint-plugin-jsdoc - dev eslint-plugin-jsdoc@"^22.1.0" from the root project + dev eslint-plugin-jsdoc@"^22.1.0" from the root project ` exports[`test/lib/utils/explain-eresolve.js TAP basic no current node, no current edge, idk > explain with no color, depth of 6 1`] = ` @@ -540,15 +540,15 @@ to accept an incorrect (and potentially broken) dependency resolution. ` exports[`test/lib/utils/explain-eresolve.js TAP basic no current node, no current edge, idk > report with color 1`] = ` -While resolving: eslint@7.22.0 -Found: peer eslint@"^6.0.0" from eslint-plugin-jsdoc@22.2.0 +While resolving: eslint@7.22.0 +Found: peer eslint@"^6.0.0" from eslint-plugin-jsdoc@22.2.0 node_modules/eslint-plugin-jsdoc - dev eslint-plugin-jsdoc@"^22.1.0" from the root project + dev eslint-plugin-jsdoc@"^22.1.0" from the root project Could not resolve dependency: -peer eslint@"^6.0.0" from eslint-plugin-jsdoc@22.2.0 +peer eslint@"^6.0.0" from eslint-plugin-jsdoc@22.2.0 node_modules/eslint-plugin-jsdoc - dev eslint-plugin-jsdoc@"^22.1.0" from the root project + dev eslint-plugin-jsdoc@"^22.1.0" from the root project Fix the upstream dependency conflict, or retry this command with --force or --legacy-peer-deps @@ -572,15 +572,15 @@ to accept an incorrect (and potentially broken) dependency resolution. ` exports[`test/lib/utils/explain-eresolve.js TAP basic withShrinkwrap > explain with color, depth of 2 1`] = ` -While resolving: @isaacs/peer-dep-cycle-b@1.0.0 -Found: @isaacs/peer-dep-cycle-c@2.0.0 +While resolving: @isaacs/peer-dep-cycle-b@1.0.0 +Found: @isaacs/peer-dep-cycle-c@2.0.0 node_modules/@isaacs/peer-dep-cycle-c - @isaacs/peer-dep-cycle-c@"2.x" from the root project + @isaacs/peer-dep-cycle-c@"2.x" from the root project Could not resolve dependency: -peer @isaacs/peer-dep-cycle-c@"1" from @isaacs/peer-dep-cycle-b@1.0.0 +peer @isaacs/peer-dep-cycle-c@"1" from @isaacs/peer-dep-cycle-b@1.0.0 node_modules/@isaacs/peer-dep-cycle-b - peer @isaacs/peer-dep-cycle-b@"1" from @isaacs/peer-dep-cycle-a@1.0.0 + peer @isaacs/peer-dep-cycle-b@"1" from @isaacs/peer-dep-cycle-a@1.0.0 node_modules/@isaacs/peer-dep-cycle-a ` @@ -619,17 +619,17 @@ to accept an incorrect (and potentially broken) dependency resolution. ` exports[`test/lib/utils/explain-eresolve.js TAP basic withShrinkwrap > report with color 1`] = ` -While resolving: @isaacs/peer-dep-cycle-b@1.0.0 -Found: @isaacs/peer-dep-cycle-c@2.0.0 +While resolving: @isaacs/peer-dep-cycle-b@1.0.0 +Found: @isaacs/peer-dep-cycle-c@2.0.0 node_modules/@isaacs/peer-dep-cycle-c - @isaacs/peer-dep-cycle-c@"2.x" from the root project + @isaacs/peer-dep-cycle-c@"2.x" from the root project Could not resolve dependency: -peer @isaacs/peer-dep-cycle-c@"1" from @isaacs/peer-dep-cycle-b@1.0.0 +peer @isaacs/peer-dep-cycle-c@"1" from @isaacs/peer-dep-cycle-b@1.0.0 node_modules/@isaacs/peer-dep-cycle-b - peer @isaacs/peer-dep-cycle-b@"1" from @isaacs/peer-dep-cycle-a@1.0.0 + peer @isaacs/peer-dep-cycle-b@"1" from @isaacs/peer-dep-cycle-a@1.0.0 node_modules/@isaacs/peer-dep-cycle-a - @isaacs/peer-dep-cycle-a@"1.x" from the root project + @isaacs/peer-dep-cycle-a@"1.x" from the root project Fix the upstream dependency conflict, or retry this command with --no-strict-peer-deps, --force, or --legacy-peer-deps diff --git a/deps/npm/tap-snapshots/test/lib/utils/log-file.js.test.cjs b/deps/npm/tap-snapshots/test/lib/utils/log-file.js.test.cjs index 0a4af7cadf0607..34002b8133e229 100644 --- a/deps/npm/tap-snapshots/test/lib/utils/log-file.js.test.cjs +++ b/deps/npm/tap-snapshots/test/lib/utils/log-file.js.test.cjs @@ -7,64 +7,65 @@ 'use strict' exports[`test/lib/utils/log-file.js TAP snapshot > must match snapshot 1`] = ` 0 verbose logfile logs-max:10 dir:{CWD}/{DATE}- -1 silly logfile done cleaning log files -2 error no prefix -3 error prefix with prefix -4 error prefix 1 2 3 -5 verbose { obj: { with: { many: [Object] } } } -6 verbose {"obj":{"with":{"many":{"props":1}}}} -7 verbose { -7 verbose "obj": { -7 verbose "with": { -7 verbose "many": { -7 verbose "props": 1 -7 verbose } -7 verbose } -7 verbose } -7 verbose } -8 verbose [ 'test', 'with', 'an', 'array' ] -9 verbose ["test","with","an","array"] -10 verbose [ -10 verbose "test", -10 verbose "with", -10 verbose "an", -10 verbose "array" -10 verbose ] -11 verbose [ 'test', [ 'with', [ 'an', [Array] ] ] ] -12 verbose ["test",["with",["an",["array"]]]] -13 verbose [ -13 verbose "test", -13 verbose [ -13 verbose "with", -13 verbose [ -13 verbose "an", -13 verbose [ -13 verbose "array" -13 verbose ] -13 verbose ] -13 verbose ] -13 verbose ] -14 error pre has many errors Error: message -14 error pre at stack trace line 0 -14 error pre at stack trace line 1 -14 error pre at stack trace line 2 -14 error pre at stack trace line 3 -14 error pre at stack trace line 4 -14 error pre at stack trace line 5 -14 error pre at stack trace line 6 -14 error pre at stack trace line 7 -14 error pre at stack trace line 8 -14 error pre at stack trace line 9 Error: message2 -14 error pre at stack trace line 0 -14 error pre at stack trace line 1 -14 error pre at stack trace line 2 -14 error pre at stack trace line 3 -14 error pre at stack trace line 4 -14 error pre at stack trace line 5 -14 error pre at stack trace line 6 -14 error pre at stack trace line 7 -14 error pre at stack trace line 8 -14 error pre at stack trace line 9 -15 error nostack [Error: message] +1 verbose logfile {CWD}/{DATE}-debug-0.log +2 silly logfile done cleaning log files +3 error no prefix +4 error prefix with prefix +5 error prefix 1 2 3 +6 verbose { obj: { with: { many: [Object] } } } +7 verbose {"obj":{"with":{"many":{"props":1}}}} +8 verbose { +8 verbose "obj": { +8 verbose "with": { +8 verbose "many": { +8 verbose "props": 1 +8 verbose } +8 verbose } +8 verbose } +8 verbose } +9 verbose [ 'test', 'with', 'an', 'array' ] +10 verbose ["test","with","an","array"] +11 verbose [ +11 verbose "test", +11 verbose "with", +11 verbose "an", +11 verbose "array" +11 verbose ] +12 verbose [ 'test', [ 'with', [ 'an', [Array] ] ] ] +13 verbose ["test",["with",["an",["array"]]]] +14 verbose [ +14 verbose "test", +14 verbose [ +14 verbose "with", +14 verbose [ +14 verbose "an", +14 verbose [ +14 verbose "array" +14 verbose ] +14 verbose ] +14 verbose ] +14 verbose ] +15 error pre has many errors Error: message +15 error pre at stack trace line 0 +15 error pre at stack trace line 1 +15 error pre at stack trace line 2 +15 error pre at stack trace line 3 +15 error pre at stack trace line 4 +15 error pre at stack trace line 5 +15 error pre at stack trace line 6 +15 error pre at stack trace line 7 +15 error pre at stack trace line 8 +15 error pre at stack trace line 9 Error: message2 +15 error pre at stack trace line 0 +15 error pre at stack trace line 1 +15 error pre at stack trace line 2 +15 error pre at stack trace line 3 +15 error pre at stack trace line 4 +15 error pre at stack trace line 5 +15 error pre at stack trace line 6 +15 error pre at stack trace line 7 +15 error pre at stack trace line 8 +15 error pre at stack trace line 9 +16 error nostack [Error: message] ` diff --git a/deps/npm/tap-snapshots/test/lib/utils/open-url-prompt.js.test.cjs b/deps/npm/tap-snapshots/test/lib/utils/open-url-prompt.js.test.cjs index f31ec8e041f517..cf5feed44cc373 100644 --- a/deps/npm/tap-snapshots/test/lib/utils/open-url-prompt.js.test.cjs +++ b/deps/npm/tap-snapshots/test/lib/utils/open-url-prompt.js.test.cjs @@ -10,7 +10,6 @@ npm home: https://www.npmjs.com Browser unavailable. Please open the URL manually: https://www.npmjs.com - ` exports[`test/lib/utils/open-url-prompt.js TAP opens a url > must match snapshot 1`] = ` diff --git a/deps/npm/tap-snapshots/test/lib/utils/open-url.js.test.cjs b/deps/npm/tap-snapshots/test/lib/utils/open-url.js.test.cjs index 8c8159ebcfc04c..f1560db686cde0 100644 --- a/deps/npm/tap-snapshots/test/lib/utils/open-url.js.test.cjs +++ b/deps/npm/tap-snapshots/test/lib/utils/open-url.js.test.cjs @@ -8,7 +8,6 @@ exports[`test/lib/utils/open-url.js TAP prints where to go when browser is disabled > printed expected message 1`] = ` npm home: https://www.npmjs.com - ` exports[`test/lib/utils/open-url.js TAP prints where to go when browser is disabled and json is enabled > printed expected message 1`] = ` @@ -21,5 +20,4 @@ exports[`test/lib/utils/open-url.js TAP prints where to go when browser is disab exports[`test/lib/utils/open-url.js TAP prints where to go when given browser does not exist > printed expected message 1`] = ` npm home: https://www.npmjs.com - ` diff --git a/deps/npm/tap-snapshots/test/lib/utils/reify-output.js.test.cjs b/deps/npm/tap-snapshots/test/lib/utils/reify-output.js.test.cjs index 3e3df9039efb91..d653d4c1fadc0d 100644 --- a/deps/npm/tap-snapshots/test/lib/utils/reify-output.js.test.cjs +++ b/deps/npm/tap-snapshots/test/lib/utils/reify-output.js.test.cjs @@ -1634,19 +1634,17 @@ exports[`test/lib/utils/reify-output.js TAP packages changed message > {"added" ` exports[`test/lib/utils/reify-output.js TAP prints dedupe difference on dry-run > diff table 1`] = ` - -change bar 1.0.0 -> 2.1.0 -remove bar 1.0.0 -add foo 1.0.0 +change bar 1.0.0 => 2.1.0 +remove bar 1.0.0 +add foo 1.0.0 removed 1 package, and changed 1 package in {TIME} ` exports[`test/lib/utils/reify-output.js TAP prints dedupe difference on long > diff table 1`] = ` - -change bar 1.0.0 -> 2.1.0 -remove bar 1.0.0 -add foo 1.0.0 +change bar 1.0.0 => 2.1.0 +remove bar 1.0.0 +add foo 1.0.0 removed 1 package, and changed 1 package in {TIME} ` diff --git a/deps/npm/tap-snapshots/test/lib/utils/tar.js.test.cjs b/deps/npm/tap-snapshots/test/lib/utils/tar.js.test.cjs index e92314b57025eb..fda9577c843901 100644 --- a/deps/npm/tap-snapshots/test/lib/utils/tar.js.test.cjs +++ b/deps/npm/tap-snapshots/test/lib/utils/tar.js.test.cjs @@ -9,28 +9,38 @@ exports[`test/lib/utils/tar.js TAP should log tarball contents > must match snap package: my-cool-pkg@1.0.0 -=== Tarball Contents === +Tarball Contents -4B cat -4B chai -4B dog +4B cat +4B chai +4B dog 114B package.json -=== Bundled Dependencies === +Bundled Dependencies bundle-dep -=== Tarball Details === +Tarball Details + +name: my-cool-pkg + +version: 1.0.0 + +filename: my-cool-pkg-1.0.0.tgz + +package size: {size} -name: my-cool-pkg -version: 1.0.0 -filename: my-cool-pkg-1.0.0.tgz -package size: {size} unpacked size: 126 B -shasum: {sha} -integrity: {integrity} -bundled deps: 1 + +shasum: {sha} + +integrity: {integrity} + +bundled deps: 1 + bundled files: 0 -own files: 5 -total files: 5 + +own files: 5 + +total files: 5 ` @@ -39,28 +49,38 @@ exports[`test/lib/utils/tar.js TAP should log tarball contents of a scoped packa package: @myscope/my-cool-pkg@1.0.0 -=== Tarball Contents === +Tarball Contents -4B cat -4B chai -4B dog +4B cat +4B chai +4B dog 123B package.json -=== Bundled Dependencies === +Bundled Dependencies bundle-dep -=== Tarball Details === +Tarball Details + +name: @myscope/my-cool-pkg + +version: 1.0.0 + +filename: myscope-my-cool-pkg-1.0.0.tgz + +package size: {size} -name: @myscope/my-cool-pkg -version: 1.0.0 -filename: myscope-my-cool-pkg-1.0.0.tgz -package size: {size} unpacked size: 135 B -shasum: {sha} -integrity: {integrity} -bundled deps: 1 -bundled files: 0 -own files: 5 -total files: 5 + +shasum: {sha} + +integrity: {integrity} + +bundled deps: 1 + +bundled files: 0 + +own files: 5 + +total files: 5 ` diff --git a/deps/npm/tap-snapshots/test/lib/utils/update-notifier.js.test.cjs b/deps/npm/tap-snapshots/test/lib/utils/update-notifier.js.test.cjs deleted file mode 100644 index e5e9dd77569e00..00000000000000 --- a/deps/npm/tap-snapshots/test/lib/utils/update-notifier.js.test.cjs +++ /dev/null @@ -1,102 +0,0 @@ -/* IMPORTANT - * This snapshot file is auto-generated, but designed for humans. - * It should be checked into source control and tracked carefully. - * Re-generate by setting TAP_SNAPSHOT=1 and running tests. - * Make sure to inspect the output below. Do not ignore changes! - */ -'use strict' -exports[`test/lib/utils/update-notifier.js TAP notification situations 122.420.69 - color=always > must match snapshot 1`] = ` - -New major version of npm available! 122.420.69 -> 123.420.69 -Changelog: https://github.com/npm/cli/releases/tag/v123.420.69 -Run npm install -g npm@123.420.69 to update! - -` - -exports[`test/lib/utils/update-notifier.js TAP notification situations 122.420.69 - color=false > must match snapshot 1`] = ` - -New major version of npm available! 122.420.69 -> 123.420.69 -Changelog: -Run \`npm install -g npm@123.420.69\` to update! - -` - -exports[`test/lib/utils/update-notifier.js TAP notification situations 123.419.69 - color=always > must match snapshot 1`] = ` - -New minor version of npm available! 123.419.69 -> 123.420.69 -Changelog: https://github.com/npm/cli/releases/tag/v123.420.69 -Run npm install -g npm@123.420.69 to update! - -` - -exports[`test/lib/utils/update-notifier.js TAP notification situations 123.419.69 - color=false > must match snapshot 1`] = ` - -New minor version of npm available! 123.419.69 -> 123.420.69 -Changelog: -Run \`npm install -g npm@123.420.69\` to update! - -` - -exports[`test/lib/utils/update-notifier.js TAP notification situations 123.420.68 - color=always > must match snapshot 1`] = ` - -New patch version of npm available! 123.420.68 -> 123.420.69 -Changelog: https://github.com/npm/cli/releases/tag/v123.420.69 -Run npm install -g npm@123.420.69 to update! - -` - -exports[`test/lib/utils/update-notifier.js TAP notification situations 123.420.68 - color=false > must match snapshot 1`] = ` - -New patch version of npm available! 123.420.68 -> 123.420.69 -Changelog: -Run \`npm install -g npm@123.420.69\` to update! - -` - -exports[`test/lib/utils/update-notifier.js TAP notification situations 123.420.70 - color=always > must match snapshot 1`] = ` - -New minor version of npm available! 123.420.70 -> 123.421.70 -Changelog: https://github.com/npm/cli/releases/tag/v123.421.70 -Run npm install -g npm@123.421.70 to update! - -` - -exports[`test/lib/utils/update-notifier.js TAP notification situations 123.420.70 - color=false > must match snapshot 1`] = ` - -New minor version of npm available! 123.420.70 -> 123.421.70 -Changelog: -Run \`npm install -g npm@123.421.70\` to update! - -` - -exports[`test/lib/utils/update-notifier.js TAP notification situations 123.421.69 - color=always > must match snapshot 1`] = ` - -New patch version of npm available! 123.421.69 -> 123.421.70 -Changelog: https://github.com/npm/cli/releases/tag/v123.421.70 -Run npm install -g npm@123.421.70 to update! - -` - -exports[`test/lib/utils/update-notifier.js TAP notification situations 123.421.69 - color=false > must match snapshot 1`] = ` - -New patch version of npm available! 123.421.69 -> 123.421.70 -Changelog: -Run \`npm install -g npm@123.421.70\` to update! - -` - -exports[`test/lib/utils/update-notifier.js TAP notification situations 124.0.0-beta.0 - color=always > must match snapshot 1`] = ` - -New prerelease version of npm available! 124.0.0-beta.0 -> 124.0.0-beta.99999 -Changelog: https://github.com/npm/cli/releases/tag/v124.0.0-beta.99999 -Run npm install -g npm@124.0.0-beta.99999 to update! - -` - -exports[`test/lib/utils/update-notifier.js TAP notification situations 124.0.0-beta.0 - color=false > must match snapshot 1`] = ` - -New prerelease version of npm available! 124.0.0-beta.0 -> 124.0.0-beta.99999 -Changelog: -Run \`npm install -g npm@124.0.0-beta.99999\` to update! - -` diff --git a/deps/npm/test/bin/windows-shims.js b/deps/npm/test/bin/windows-shims.js index 2abe5013bf10bd..71f12dc8e1cdda 100644 --- a/deps/npm/test/bin/windows-shims.js +++ b/deps/npm/test/bin/windows-shims.js @@ -1,8 +1,9 @@ const t = require('tap') const { spawnSync } = require('child_process') -const { resolve, join, extname, basename, sep } = require('path') -const { copyFileSync, readFileSync, chmodSync, readdirSync, rmSync, statSync } = require('fs') +const { resolve, join, extname, basename } = require('path') +const { readFileSync, chmodSync, readdirSync, statSync } = require('fs') const Diff = require('diff') +const { moveRemove } = require('rimraf') const { sync: which } = require('which') const { version } = require('../../package.json') @@ -20,12 +21,6 @@ const SHIMS = readNonJsFiles(BIN) const NODE_GYP = readNonJsFiles(join(BIN, 'node-gyp-bin')) const SHIM_EXTS = [...new Set(Object.keys(SHIMS).map(p => extname(p)))] -// windows requires each segment of a command path to be quoted when using shell: true -const quotePath = (cmd) => cmd - .split(sep) - .map(p => p.includes(' ') ? `"${p}"` : p) - .join(sep) - t.test('shim contents', t => { // these scripts should be kept in sync so this tests the contents of each // and does a diff to ensure the only differences between them are necessary @@ -82,6 +77,7 @@ t.test('node-gyp', t => { t.test('run shims', t => { const path = t.testdir({ ...SHIMS, + 'node.exe': readFileSync(process.execPath), // simulate the state where one version of npm is installed // with node, but we should load the globally installed one 'global-prefix': { @@ -105,26 +101,23 @@ t.test('run shims', t => { }, }) - // hacky fix to decrease flakes of this test from `NOTEMPTY: directory not empty, rmdir` - // this should get better in tap@18 and we can try removing it then - copyFileSync(process.execPath, join(path, 'node.exe')) - t.teardown(async () => { - rmSync(join(path, 'node.exe')) - await new Promise(res => setTimeout(res, 100)) - // this is superstition - rmSync(join(path, 'node.exe'), { force: true }) - }) + // The removal of this fixture causes this test to fail when done with + // the default tap removal. Using rimraf's `moveRemove` seems to make this + // work reliably. Don't remove this line in the future without making sure + // this test passes the full windows suite at least 3 consecutive times. + t.teardown(() => moveRemove(join(path, 'node.exe'))) const spawnPath = (cmd, args, { log, stdioString = true, ...opts } = {}) => { if (cmd.endsWith('bash.exe')) { // only cygwin *requires* the -l, but the others are ok with it args.unshift('-l') } - const result = spawnSync(cmd, args, { + const result = spawnSync(`"${cmd}"`, args, { // don't hit the registry for the update check env: { PATH: path, npm_config_update_notifier: 'false' }, cwd: path, windowsHide: true, + shell: true, ...opts, }) if (stdioString) { @@ -235,9 +228,7 @@ t.test('run shims', t => { args.push(bin) break case 'pwsh.exe': - cmd = quotePath(cmd) args.push(`${bin}.ps1`) - opts.shell = true break default: throw new Error('unknown shell') diff --git a/deps/npm/test/fixtures/libnpmsearch-stream-result.js b/deps/npm/test/fixtures/libnpmsearch-stream-result.js index 68245beff6e46f..872a7940340d47 100644 --- a/deps/npm/test/fixtures/libnpmsearch-stream-result.js +++ b/deps/npm/test/fixtures/libnpmsearch-stream-result.js @@ -283,6 +283,7 @@ module.exports = [ scope: 'unscoped', version: '1.0.0', date: '2019-09-26T22:24:28.713Z', + publisher: { username: 'lukekarrys', email: 'lukekarrys' }, maintainers: [{ username: 'lukekarrys', email: 'lukekarrys' }], }, ] diff --git a/deps/npm/test/fixtures/mock-logs.js b/deps/npm/test/fixtures/mock-logs.js index c75de5e509463d..ce4c189219467d 100644 --- a/deps/npm/test/fixtures/mock-logs.js +++ b/deps/npm/test/fixtures/mock-logs.js @@ -1,131 +1,101 @@ +const { log: { LEVELS } } = require('proc-log') +const { stripVTControlCharacters: stripAnsi } = require('util') -const NPMLOG = require('npmlog') -const { LEVELS } = require('proc-log') +const logPrefix = new RegExp(`^npm (${LEVELS.join('|')})\\s`) +const isLog = (str) => logPrefix.test(stripAnsi(str)) -const npmEmitLog = NPMLOG.emitLog.bind(NPMLOG) -const npmLog = NPMLOG.log.bind(NPMLOG) +// We only strip trailing newlines since some output will +// have significant tabs and spaces +const trimTrailingNewline = (str) => str.replace(/\n$/, '') -const merge = (...objs) => objs.reduce((acc, obj) => ({ ...acc, ...obj })) +const joinAndTrimTrailingNewlines = (arr) => + trimTrailingNewline(arr.map(trimTrailingNewline).join('\n')) -const mockLogs = (otherMocks = {}) => { - // Return mocks as an array with getters for each level - // that return an array of logged properties with the - // level removed. This is for convenience throughout tests - const logs = Object.defineProperties( - [], - ['timing', ...LEVELS].reduce((acc, level) => { - acc[level] = { - get () { - return this - .filter(([l]) => level === l) - .map(([l, ...args]) => args) - }, - } - return acc - }, {}) - ) +const logsByTitle = (logs) => ({ + byTitle: { + value: (title) => { + return logs + .filter((l) => stripAnsi(l.message).startsWith(`${title} `)) + .map((l) => l.message) + }, + }, +}) - // the above logs array is anything logged and it not filtered by level. - // this display array is filtered and will not include items that - // would not be shown in the terminal - const display = Object.defineProperties( - [], - ['timing', ...LEVELS].reduce((acc, level) => { +module.exports = () => { + const outputs = [] + const outputErrors = [] + + const levelLogs = [] + const logs = Object.defineProperties([], { + ...logsByTitle(levelLogs), + ...LEVELS.reduce((acc, level) => { acc[level] = { get () { - return this - .filter(([l]) => level === l) - .map(([l, ...args]) => args) + const byLevel = levelLogs.filter((l) => l.level === level) + return Object.defineProperties(byLevel.map((l) => l.message), logsByTitle(byLevel)) }, } return acc - }, {}) - ) + }, {}), + }) - const npmLogBuffer = [] + const streams = { + stderr: { + cursorTo: () => {}, + clearLine: () => {}, + write: (str) => { + str = trimTrailingNewline(str) - // This returns an object with mocked versions of all necessary - // logging modules. It mocks them with methods that add logs - // to an array which it also returns. The reason it also returns - // the mocks is that in tests the same instance of these mocks - // should be passed to multiple calls to t.mock. - // XXX: this is messy and fragile and should be removed in favor - // of some other way to collect and filter logs across all tests - const logMocks = { - 'proc-log': merge( - { LEVELS }, - LEVELS.reduce((acc, l) => { - acc[l] = (...args) => { - // Re-emit log item for since the log file listens on these - process.emit('log', l, ...args) - // Dont add pause/resume events to the logs. Those aren't displayed - // and emitting them is tested in the display layer - if (l !== 'pause' && l !== 'resume') { - logs.push([l, ...args]) - } + // Use the beginning of each line to determine if its a log + // or an output error since we write both of those to stderr. + // This couples logging format to this test but we only need + // to do it in a single place so hopefully its easy to change + // in the future if/when we refactor what logs look like. + if (!isLog(str)) { + outputErrors.push(str) + return } - return acc - }, {}), - otherMocks['proc-log'] - ), - // Object.assign is important here because we need to assign - // mocked properties directly to npmlog and then mock with that - // object. This is necessary so tests can still directly set - // `log.level = 'silent'` anywhere in the test and have that - // that reflected in the npmlog singleton. - // XXX: remove with npmlog - npmlog: Object.assign(NPMLOG, merge( - { - log: (level, ...args) => { - // timing does not exist on proclog, so if it got logged - // with npmlog we need to push it to our logs - if (level === 'timing') { - logs.push([level, ...args]) - } - npmLog(level, ...args) - }, - write: (msg) => { - // npmlog.write is what outputs to the terminal. - // it writes in chunks so we push each chunk to an - // array that we will log and zero out - npmLogBuffer.push(msg) - }, - emitLog: (m) => { - // this calls the original emitLog method - // which will filter based on loglevel - npmEmitLog(m) - // if anything was logged then we push to our display - // array which we can assert against in tests - if (npmLogBuffer.length) { - // first two parts are 'npm' and a single space - display.push(npmLogBuffer.slice(2)) - } - npmLogBuffer.length = 0 - }, - newItem: () => { - return { - info: (...p) => { - logs.push(['info', ...p]) - }, - warn: (...p) => { - logs.push(['warn', ...p]) - }, - error: (...p) => { - logs.push(['error', ...p]) - }, - silly: (...p) => { - logs.push(['silly', ...p]) - }, - completeWork: () => {}, - finish: () => {}, - } - }, + + // Split on spaces for the heading and level/label. We know that + // none of those have spaces but could be colorized so there's no + // other good way to get each of those including control chars + const [rawHeading, rawLevel] = str.split(' ') + const rawPrefix = `${rawHeading} ${rawLevel} ` + // If message is colorized we can just replaceAll with the string since + // it will be unique due to control chars. Otherwise we create a regex + // that will only match the beginning of each line. + const prefix = stripAnsi(str) !== str ? rawPrefix : new RegExp(`^${rawPrefix}`, 'gm') + + // The level needs color stripped always because we use it to filter logs + const level = stripAnsi(rawLevel) + + logs.push(str.replaceAll(prefix, `${level} `)) + levelLogs.push({ level, message: str.replaceAll(prefix, '') }) + }, + }, + stdout: { + write: (str) => { + outputs.push(trimTrailingNewline(str)) }, - otherMocks.npmlog - )), + }, } - return { logs, logMocks, display } + return { + streams, + logs: { + outputs, + joinedOutput: () => joinAndTrimTrailingNewlines(outputs), + clearOutput: () => { + outputs.length = 0 + outputErrors.length = 0 + }, + outputErrors, + joinedOutputError: () => joinAndTrimTrailingNewlines(outputs), + logs, + clearLogs: () => { + levelLogs.length = 0 + logs.length = 0 + }, + }, + } } - -module.exports = mockLogs diff --git a/deps/npm/test/fixtures/mock-npm.js b/deps/npm/test/fixtures/mock-npm.js index 4646e79146e86e..d8a4834a9abfff 100644 --- a/deps/npm/test/fixtures/mock-npm.js +++ b/deps/npm/test/fixtures/mock-npm.js @@ -2,8 +2,9 @@ const os = require('os') const fs = require('fs').promises const path = require('path') const tap = require('tap') +const { output, META } = require('proc-log') const errorMessage = require('../../lib/utils/error-message') -const mockLogs = require('./mock-logs') +const mockLogs = require('./mock-logs.js') const mockGlobals = require('@npmcli/mock-globals') const tmock = require('./tmock') const defExitCode = process.exitCode @@ -48,29 +49,41 @@ const setGlobalNodeModules = (globalDir) => { } const buildMocks = (t, mocks) => { - const allMocks = { - '{LIB}/utils/update-notifier.js': async () => {}, - ...mocks, - } + const allMocks = { ...mocks } // The definitions must be mocked since they are a singleton that reads from // process and environs to build defaults in order to break the requiure // cache. We also need to mock them with any mocks that were passed in for the // test in case those mocks are for things like ci-info which is used there. const definitions = '@npmcli/config/lib/definitions' allMocks[definitions] = tmock(t, definitions, allMocks) - return allMocks } const getMockNpm = async (t, { mocks, init, load, npm: npmOpts }) => { - const { logMocks, logs, display } = mockLogs(mocks) - const allMocks = buildMocks(t, { ...mocks, ...logMocks }) + const { streams, logs } = mockLogs() + const allMocks = buildMocks(t, mocks) const Npm = tmock(t, '{LIB}/npm.js', allMocks) - const outputs = [] - const outputErrors = [] - class MockNpm extends Npm { + constructor (opts) { + super({ + ...opts, + ...streams, + ...npmOpts, + }) + } + + async load () { + const res = await super.load() + // Wait for any promises (currently only log file cleaning) to be + // done before returning from load in tests. This helps create more + // deterministic testing behavior because in reality that promise + // is left hanging on purpose as a best-effort and the process gets + // closed regardless of if it has finished or not. + await Promise.all(this.unrefPromises) + return res + } + async exec (...args) { const [res, err] = await super.exec(...args).then((r) => [r]).catch(e => [null, e]) // This mimics how the exit handler flushes output for commands that have @@ -78,32 +91,16 @@ const getMockNpm = async (t, { mocks, init, load, npm: npmOpts }) => { // error message fn. This is necessary for commands with buffered output // to read the output after exec is called. This is not *exactly* how it // works in practice, but it is close enough for now. - this.flushOutput(err ? errorMessage(err, this).json : null) + const jsonError = err && errorMessage(err, this).json + output.flush({ [META]: true, jsonError }) if (err) { throw err } return res } - - // lib/npm.js tests needs this to actually test the function! - originalOutput (...args) { - super.output(...args) - } - - originalOutputError (...args) { - super.outputError(...args) - } - - output (...args) { - outputs.push(args) - } - - outputError (...args) { - outputErrors.push(args) - } } - const npm = init ? new MockNpm(npmOpts) : null + const npm = init ? new MockNpm() : null if (npm && load) { await npm.load() } @@ -111,12 +108,7 @@ const getMockNpm = async (t, { mocks, init, load, npm: npmOpts }) => { return { Npm: MockNpm, npm, - outputs, - outputErrors, - joinedOutput: () => outputs.map(o => o.join(' ')).join('\n'), - logMocks, - logs, - display, + ...logs, } } @@ -128,7 +120,6 @@ const setupMockNpm = async (t, { // preload a command command = null, // string name of the command exec = null, // optionally exec the command before returning - setCmd = false, // test dirs prefixDir = {}, homeDir = {}, @@ -142,7 +133,6 @@ const setupMockNpm = async (t, { globals = {}, npm: npmOpts = {}, argv: rawArgv = [], - ...r } = {}) => { // easy to accidentally forget to pass in tap if (!(t instanceof tap.Test)) { @@ -213,6 +203,11 @@ const setupMockNpm = async (t, { // explicitly set in a test. 'fetch-retries': 0, cache: dirs.cache, + // This will give us all the loglevels including timing in a non-colorized way + // so we can easily assert their contents. Individual tests can overwrite these + // with my passing in configs if they need to test other forms of output. + loglevel: 'silly', + color: false, } const { argv, env, config } = Object.entries({ ...defaultConfigs, ...withDirs(_config) }) @@ -221,11 +216,13 @@ const setupMockNpm = async (t, { // and quoted with `"` so mock globals will ignore that it contains dots if (key.startsWith('//')) { acc.env[`process.env."npm_config_${key}"`] = value - } else { + } else if (value !== undefined) { const values = [].concat(value) - acc.argv.push(...values.flatMap(v => `--${key}=${v.toString()}`)) + acc.argv.push(...values.flatMap(v => v === '' ? `--${key}` : `--${key}=${v.toString()}`)) + } + if (value !== undefined) { + acc.config[key] = value } - acc.config[key] = value return acc }, { argv: [...rawArgv], env: {}, config: {} }) @@ -244,7 +241,11 @@ const setupMockNpm = async (t, { init, load, mocks: withDirs(mocks), - npm: { argv, excludeNpmCwd: true, ...withDirs(npmOpts) }, + npm: { + argv: command ? [command, ...argv] : argv, + excludeNpmCwd: true, + ...withDirs(npmOpts), + }, }) if (config.omit?.includes('prod')) { @@ -269,16 +270,6 @@ const setupMockNpm = async (t, { const mockCommand = {} if (command) { const Cmd = mockNpm.Npm.cmd(command) - if (setCmd) { - // XXX(hack): This is a hack to allow fake-ish tests to set the currently - // running npm command without running exec. Generally, we should rely on - // actually exec-ing the command to asserting the state of the world - // through what is printed/on disk/etc. This is a stop-gap to allow tests - // that are time intensive to convert to continue setting the npm command - // this way. TODO: remove setCmd from all tests and remove the setCmd - // method from `lib/npm.js` - npm.setCmd(command) - } mockCommand.cmd = new Cmd(npm) mockCommand[command] = { usage: Cmd.describeUsage, @@ -308,7 +299,7 @@ const setupMockNpm = async (t, { .join('\n') }, timingFile: async () => { - const data = await fs.readFile(npm.timingFile, 'utf8') + const data = await fs.readFile(npm.logPath + 'timing.json', 'utf8') return JSON.parse(data) }, } diff --git a/deps/npm/test/fixtures/sandbox.js b/deps/npm/test/fixtures/sandbox.js deleted file mode 100644 index 5be02fcf80c1eb..00000000000000 --- a/deps/npm/test/fixtures/sandbox.js +++ /dev/null @@ -1,336 +0,0 @@ -const { createHook, executionAsyncId } = require('async_hooks') -const { EventEmitter } = require('events') -const { homedir, tmpdir } = require('os') -const { dirname, join } = require('path') -const { mkdir, rm } = require('fs/promises') -const mockLogs = require('./mock-logs') -const pkg = require('../../package.json') - -const chain = new Map() -const sandboxes = new Map() - -// keep a reference to the real process -const _process = process - -createHook({ - init: (asyncId, type, triggerAsyncId, resource) => { - // track parentage of asyncIds - chain.set(asyncId, triggerAsyncId) - }, - before: (asyncId) => { - // find the nearest parent id that has a sandbox - let parent = asyncId - while (chain.has(parent) && !sandboxes.has(parent)) { - parent = chain.get(parent) - } - - process = sandboxes.has(parent) - ? sandboxes.get(parent) - : _process - }, -}).enable() - -const _data = Symbol('sandbox.data') -const _dirs = Symbol('sandbox.dirs') -const _test = Symbol('sandbox.test') -const _mocks = Symbol('sandbox.mocks') -const _npm = Symbol('sandbox.npm') -const _parent = Symbol('sandbox.parent') -const _output = Symbol('sandbox.output') -const _proxy = Symbol('sandbox.proxy') -const _get = Symbol('sandbox.proxy.get') -const _set = Symbol('sandbox.proxy.set') -const _logs = Symbol('sandbox.logs') - -// we can't just replace these values everywhere because they're known to be -// very short strings that could be present all over the place, so we only -// replace them if they're located within quotes for now -const vagueRedactedDefaults = [ - 'editor', - 'shell', -] - -const normalize = (str) => str - .replace(/\r\n/g, '\n') // normalize line endings (for ini) - .replace(/[A-z]:\\/g, '\\') // turn windows roots to posix ones - .replace(/\\+/g, '/') // replace \ with / - -class Sandbox extends EventEmitter { - constructor (test, options = {}) { - super() - - this[_test] = test - this[_mocks] = options.mocks || {} - this[_data] = new Map() - this[_output] = [] - const tempDir = `${test.testdirName}-sandbox` - this[_dirs] = { - temp: tempDir, - global: options.global || join(tempDir, 'global'), - home: options.home || join(tempDir, 'home'), - project: options.project || join(tempDir, 'project'), - cache: options.cache || join(tempDir, 'cache'), - } - - this[_proxy] = new Proxy(_process, { - get: this[_get].bind(this), - set: this[_set].bind(this), - }) - this[_proxy].env = { ...options.env } - this[_proxy].argv = [] - - test.cleanSnapshot = this.cleanSnapshot.bind(this) - test.afterEach(() => this.reset()) - test.teardown(() => this.teardown()) - } - - get config () { - return this[_npm] && this[_npm].config - } - - get logs () { - return this[_logs] - } - - get global () { - return this[_dirs].global - } - - get home () { - return this[_dirs].home - } - - get project () { - return this[_dirs].project - } - - get cache () { - return this[_dirs].cache - } - - get process () { - return this[_proxy] - } - - get output () { - return this[_output].map((line) => line.join(' ')).join('\n') - } - - cleanSnapshot (snapshot) { - let clean = normalize(snapshot) - - const viewer = _process.platform === 'win32' - ? /"browser"([^:]+|$)/g - : /"man"([^:]+|$)/g - - // the global prefix is platform dependent - const realGlobalPrefix = _process.platform === 'win32' - ? dirname(_process.execPath) - : dirname(dirname(_process.execPath)) - - const cache = _process.platform === 'win32' - ? /\{HOME\}\/npm-cache(\r?\n|"|\/|$)/g - : /\{HOME\}\/\.npm(\n|"|\/|$)/g - - // and finally replace some paths we know could be present - clean = clean - .replace(viewer, '"{VIEWER}"$1') - .split(normalize(this[_proxy].execPath)).join('{EXECPATH}') - .split(normalize(_process.execPath)).join('{REALEXECPATH}') - .split(normalize(this.global)).join('{GLOBALPREFIX}') - .split(normalize(realGlobalPrefix)).join('{REALGLOBALREFIX}') - .split(normalize(this.project)).join('{LOCALPREFIX}') - .split(normalize(this.home)).join('{HOME}') - .replace(cache, '{CACHE}$1') - .split(normalize(dirname(dirname(__dirname)))).join('{NPMDIR}') - .split(normalize(tmpdir())).join('{TMP}') - .split(normalize(homedir())).join('{REALHOME}') - .split(this[_proxy].platform).join('{PLATFORM}') - .split(this[_proxy].arch).join('{ARCH}') - .replace(new RegExp(process.version, 'g'), '{NODE-VERSION}') - .replace(new RegExp(pkg.version, 'g'), '{NPM-VERSION}') - - // We do the defaults after everything else so that they don't cause the - // other cleaners to miss values we would have clobbered here. For - // instance if execPath is /home/user/.nvm/versions/node/1.0.0/bin/node, - // and we replaced the node version first, the real execPath we're trying - // to replace would no longer be represented, and be missed. - if (this[_npm]) { - // replace vague default config values that are present within quotes - // with placeholders - for (const name of vagueRedactedDefaults) { - const value = this[_npm].config.defaults[name] - clean = clean.split(`"${normalize(value)}"`).join(`"{${name.toUpperCase()}}"`) - } - } - - return clean - } - - // test.afterEach hook - reset () { - this.removeAllListeners() - this[_parent] = undefined - this[_output] = [] - this[_data].clear() - this[_proxy].env = {} - this[_proxy].argv = [] - this[_npm] = undefined - } - - // test.teardown hook - teardown () { - if (this[_parent]) { - const sandboxProcess = sandboxes.get(this[_parent]) - sandboxProcess.removeAllListeners('log') - sandboxes.delete(this[_parent]) - } - if (this[_npm]) { - this[_npm].unload() - } - return rm(this[_dirs].temp, { recursive: true, force: true }).catch(() => null) - } - - // proxy get handler - [_get] (target, prop, receiver) { - if (this[_data].has(prop)) { - return this[_data].get(prop) - } - - if (this[prop] !== undefined) { - return Reflect.get(this, prop, this) - } - - return Reflect.get(target, prop, receiver) - } - - // proxy set handler - [_set] (target, prop, value) { - if (prop === 'env') { - value = { - ...value, - HOME: this.home, - } - } - - if (prop === 'argv') { - value = [ - process.execPath, - join(dirname(process.execPath), 'npm'), - ...value, - ] - } - - return this[_data].set(prop, value) - } - - async run (command, argv = []) { - await Promise.all([ - mkdir(this.project, { recursive: true }), - mkdir(this.home, { recursive: true }), - mkdir(this.global, { recursive: true }), - ]) - - // attach the sandbox process now, doing it after the promise above is - // necessary to make sure that only async calls spawned as part of this - // call to run will receive the sandbox. if we attach it too early, we - // end up interfering with tap - this[_parent] = executionAsyncId() - this[_data].set('_asyncId', this[_parent]) - sandboxes.set(this[_parent], this[_proxy]) - process = this[_proxy] - - this[_proxy].argv = [ - '--prefix', this.project, - '--userconfig', join(this.home, '.npmrc'), - '--globalconfig', join(this.global, 'npmrc'), - '--cache', this.cache, - command, - ...argv, - ] - - const mockedLogs = mockLogs(this[_mocks]) - this[_logs] = mockedLogs.logs - const definitions = this[_test].mock('@npmcli/config/lib/definitions') - const Npm = this[_test].mock('../../lib/npm.js', { - '@npmcli/config/lib/definitions': definitions, - '../../lib/utils/update-notifier.js': async () => {}, - ...this[_mocks], - ...mockedLogs.logMocks, - }) - this.process.on('log', (l, ...args) => { - if (l !== 'pause' && l !== 'resume') { - this[_logs].push([l, ...args]) - } - }) - - this[_npm] = new Npm() - this[_npm].output = (...args) => this[_output].push(args) - await this[_npm].load() - - const cmd = this[_npm].argv.shift() - return this[_npm].exec(cmd, this[_npm].argv) - } - - async complete (command, argv, partial) { - if (!Array.isArray(argv)) { - partial = argv - argv = [] - } - - await Promise.all([ - mkdir(this.project, { recursive: true }), - mkdir(this.home, { recursive: true }), - mkdir(this.global, { recursive: true }), - ]) - - // attach the sandbox process now, doing it after the promise above is - // necessary to make sure that only async calls spawned as part of this - // call to run will receive the sandbox. if we attach it too early, we - // end up interfering with tap - this[_parent] = executionAsyncId() - this[_data].set('_asyncId', this[_parent]) - sandboxes.set(this[_parent], this[_proxy]) - process = this[_proxy] - - this[_proxy].argv = [ - '--prefix', this.project, - '--userconfig', join(this.home, '.npmrc'), - '--globalconfig', join(this.global, 'npmrc'), - '--cache', this.cache, - command, - ...argv, - ] - - const mockedLogs = mockLogs(this[_mocks]) - this[_logs] = mockedLogs.logs - const definitions = this[_test].mock('@npmcli/config/lib/definitions') - const Npm = this[_test].mock('../../lib/npm.js', { - '@npmcli/config/lib/definitions': definitions, - '../../lib/utils/update-notifier.js': async () => {}, - ...this[_mocks], - ...mockedLogs.logMocks, - }) - this.process.on('log', (l, ...args) => { - if (l !== 'pause' && l !== 'resume') { - this[_logs].push([l, ...args]) - } - }) - - this[_npm] = new Npm() - this[_npm].output = (...args) => this[_output].push(args) - await this[_npm].load() - - const Cmd = Npm.cmd(command) - return Cmd.completion({ - partialWord: partial, - conf: { - argv: { - remain: ['npm', command, ...argv], - }, - }, - }) - } -} - -module.exports = Sandbox diff --git a/deps/npm/test/lib/fixtures/sigstore/valid-sigstore-attestations.json b/deps/npm/test/fixtures/sigstore/valid-sigstore-attestations.json similarity index 100% rename from deps/npm/test/lib/fixtures/sigstore/valid-sigstore-attestations.json rename to deps/npm/test/fixtures/sigstore/valid-sigstore-attestations.json diff --git a/deps/npm/test/lib/fixtures/sigstore/valid-tuf-js-attestations.json b/deps/npm/test/fixtures/sigstore/valid-tuf-js-attestations.json similarity index 100% rename from deps/npm/test/lib/fixtures/sigstore/valid-tuf-js-attestations.json rename to deps/npm/test/fixtures/sigstore/valid-tuf-js-attestations.json diff --git a/deps/npm/test/lib/arborist-cmd.js b/deps/npm/test/lib/arborist-cmd.js index 44afe9763f620c..dd90d47b9a0003 100644 --- a/deps/npm/test/lib/arborist-cmd.js +++ b/deps/npm/test/lib/arborist-cmd.js @@ -117,7 +117,8 @@ t.test('arborist-cmd', async t => { chdir: (dirs) => dirs.testdir, }) - npm.localPrefix = prefix + // TODO there has to be a better way to do this + npm.config.localPrefix = prefix await cmd.execWorkspaces([]) t.same(cmd.workspaceNames, ['a', 'c'], 'should set array with single ws name') @@ -127,7 +128,7 @@ t.test('arborist-cmd', async t => { t.test('handle getWorkspaces raising an error', async t => { const { cmd } = await mockArboristCmd(t, null, 'a', { mocks: { - '{LIB}/workspaces/get-workspaces.js': async () => { + '{LIB}/utils/get-workspaces.js': async () => { throw new Error('oopsie') }, }, @@ -212,7 +213,7 @@ t.test('location detection and audit', async (t) => { }) t.equal(npm.config.get('location'), 'user') t.equal(npm.config.get('audit'), true) - t.equal(logs.warn[0][0], 'config') - t.equal(logs.warn[0][1], 'includes both --global and --audit, which is currently unsupported.') + t.equal(logs.warn[0], + 'config includes both --global and --audit, which is currently unsupported.') }) }) diff --git a/deps/npm/test/lib/cli.js b/deps/npm/test/lib/cli.js index a6cb576e886ee9..6c3079b8efe31e 100644 --- a/deps/npm/test/lib/cli.js +++ b/deps/npm/test/lib/cli.js @@ -3,7 +3,7 @@ const tmock = require('../fixtures/tmock') t.test('returns cli-entry function', async t => { const cli = tmock(t, '{LIB}/cli.js', { - '{LIB}/cli-entry.js': () => 'ENTRY', + '{LIB}/cli/entry.js': () => 'ENTRY', }) t.equal(cli(process), 'ENTRY') diff --git a/deps/npm/test/lib/cli-entry.js b/deps/npm/test/lib/cli/entry.js similarity index 52% rename from deps/npm/test/lib/cli-entry.js rename to deps/npm/test/lib/cli/entry.js index 22dca32f1a934e..60a38b70abf895 100644 --- a/deps/npm/test/lib/cli-entry.js +++ b/deps/npm/test/lib/cli/entry.js @@ -1,7 +1,9 @@ const t = require('tap') -const { load: loadMockNpm } = require('../fixtures/mock-npm.js') -const tmock = require('../fixtures/tmock.js') -const validateEngines = require('../../lib/es6/validate-engines.js') +const { readdirSync } = require('fs') +const { dirname } = require('path') +const { load: loadMockNpm } = require('../../fixtures/mock-npm.js') +const tmock = require('../../fixtures/tmock.js') +const validateEngines = require('../../../lib/cli/validate-engines.js') const cliMock = async (t, opts) => { let exitHandlerArgs = null @@ -12,62 +14,56 @@ const cliMock = async (t, opts) => { } exitHandlerMock.setNpm = _npm => npm = _npm - const { Npm, outputs, logMocks, logs } = await loadMockNpm(t, { ...opts, init: false }) - const cli = tmock(t, '{LIB}/cli-entry.js', { + const { Npm, ...mock } = await loadMockNpm(t, { ...opts, init: false }) + const cli = tmock(t, '{LIB}/cli/entry.js', { '{LIB}/npm.js': Npm, - '{LIB}/utils/exit-handler.js': exitHandlerMock, - ...logMocks, + '{LIB}/cli/exit-handler.js': exitHandlerMock, }) return { + ...mock, Npm, cli: (p) => validateEngines(p, () => cli), - outputs, exitHandlerCalled: () => exitHandlerArgs, exitHandlerNpm: () => npm, - logs, - logsBy: (title) => logs.verbose.filter(([p]) => p === title).map(([p, ...rest]) => rest), } } t.test('print the version, and treat npm_g as npm -g', async t => { - const { logsBy, logs, cli, Npm, outputs, exitHandlerCalled } = await cliMock(t, { - globals: { 'process.argv': ['node', 'npm_g', '-v'] }, + const { logs, cli, Npm, outputs, exitHandlerCalled } = await cliMock(t, { + globals: { 'process.argv': ['node', 'npm_g', 'root'] }, }) await cli(process) - t.strictSame(process.argv, ['node', 'npm', '-g', '-v'], 'system process.argv was rewritten') - t.strictSame(logsBy('cli'), [['node npm']]) - t.strictSame(logsBy('title'), [['npm']]) - t.match(logsBy('argv'), [['"--global" "--version"']]) - t.strictSame(logs.info, [ - ['using', 'npm@%s', Npm.version], - ['using', 'node@%s', process.version], - ]) + t.strictSame(process.argv, ['node', 'npm', '-g', 'root'], 'system process.argv was rewritten') + t.strictSame(logs.verbose.byTitle('cli'), ['cli node npm']) + t.strictSame(logs.verbose.byTitle('title'), ['title npm root']) + t.match(logs.verbose.byTitle('argv'), ['argv "--global" "root"']) + t.strictSame(logs.info, [`using npm@${Npm.version}`, `using node@${process.version}`]) t.equal(outputs.length, 1) - t.strictSame(outputs, [[Npm.version]]) + t.match(outputs[0], dirname(process.cwd())) t.strictSame(exitHandlerCalled(), []) }) t.test('calling with --versions calls npm version with no args', async t => { - const { logsBy, cli, outputs, exitHandlerCalled } = await cliMock(t, { + const { logs, cli, outputs, exitHandlerCalled } = await cliMock(t, { globals: { - 'process.argv': ['node', 'npm', 'install', 'or', 'whatever', '--versions'], + 'process.argv': ['node', 'npm', 'install', 'or', 'whatever', '--versions', '--json'], }, }) await cli(process) t.equal(process.title, 'npm install or whatever') - t.strictSame(logsBy('cli'), [['node npm']]) - t.strictSame(logsBy('title'), [['npm install or whatever']]) - t.match(logsBy('argv'), [['"install" "or" "whatever" "--versions"']]) + t.strictSame(logs.verbose.byTitle('cli'), ['cli node npm']) + t.strictSame(logs.verbose.byTitle('title'), ['title npm install or whatever']) + t.match(logs.verbose.byTitle('argv'), ['argv "install" "or" "whatever" "--versions"']) t.equal(outputs.length, 1) - t.match(outputs[0][0], { npm: String, node: String, v8: String }) + t.match(JSON.parse(outputs[0]), { npm: String, node: String, v8: String }) t.strictSame(exitHandlerCalled(), []) }) t.test('logged argv is sanitized', async t => { - const { logsBy, cli } = await cliMock(t, { + const { logs, cli } = await cliMock(t, { globals: { 'process.argv': [ 'node', @@ -81,13 +77,14 @@ t.test('logged argv is sanitized', async t => { await cli(process) t.equal(process.title, 'npm version') - t.strictSame(logsBy('cli'), [['node npm']]) - t.strictSame(logsBy('title'), [['npm version']]) - t.match(logsBy('argv'), [['"version" "--registry" "https://u:***@npmjs.org/password"']]) + t.strictSame(logs.verbose.byTitle('cli'), ['cli node npm']) + t.strictSame(logs.verbose.byTitle('title'), ['title npm version']) + t.match(logs.verbose.byTitle('argv'), + ['argv "version" "--registry" "https://u:***@npmjs.org/password"']) }) t.test('logged argv is sanitized with equals', async t => { - const { logsBy, cli } = await cliMock(t, { + const { logs, cli } = await cliMock(t, { globals: { 'process.argv': [ 'node', @@ -99,7 +96,7 @@ t.test('logged argv is sanitized with equals', async t => { }) await cli(process) - t.match(logsBy('argv'), [['"version" "--registry" "https://u:***@npmjs.org/"']]) + t.match(logs.verbose.byTitle('argv'), ['argv "version" "--registry" "https://u:***@npmjs.org"']) }) t.test('print usage if no params provided', async t => { @@ -110,7 +107,7 @@ t.test('print usage if no params provided', async t => { }) await cli(process) - t.match(outputs[0][0], 'Usage:', 'outputs npm usage') + t.match(outputs[0], 'Usage:', 'outputs npm usage') t.match(exitHandlerCalled(), [], 'should call exitHandler with no args') t.ok(exitHandlerNpm(), 'exitHandler npm is set') t.match(process.exitCode, 1) @@ -124,8 +121,8 @@ t.test('print usage if non-command param provided', async t => { }) await cli(process) - t.match(outputs[0][0], 'Unknown command: "tset"') - t.match(outputs[0][0], 'Did you mean this?') + t.match(outputs[0], 'Unknown command: "tset"') + t.match(outputs[0], 'Did you mean this?') t.match(exitHandlerCalled(), [], 'should call exitHandler with no args') t.ok(exitHandlerNpm(), 'exitHandler npm is set') t.match(process.exitCode, 1) @@ -157,7 +154,31 @@ t.test('unsupported node version', async t => { }) await cli(process) t.match( - logs.warn[0][1], + logs.warn[0], /npm v.* does not support Node\.js 12\.6\.0\./ ) }) + +t.test('non-ascii dash', async t => { + const { cli, logs } = await cliMock(t, { + globals: { + 'process.argv': ['node', 'npm', 'scope', '\u2010not-a-dash'], + }, + }) + await cli(process) + t.equal( + logs.error[0], + 'arg Argument starts with non-ascii dash, this is probably invalid: \u2010not-a-dash' + ) +}) + +t.test('exit early for --version', async t => { + const { cli, outputs, Npm, cache } = await cliMock(t, { + globals: { + 'process.argv': ['node', 'npm', '-v'], + }, + }) + await cli(process) + t.strictSame(readdirSync(cache), [], 'nothing created in cache') + t.equal(outputs[0], Npm.version) +}) diff --git a/deps/npm/test/lib/utils/exit-handler.js b/deps/npm/test/lib/cli/exit-handler.js similarity index 69% rename from deps/npm/test/lib/utils/exit-handler.js rename to deps/npm/test/lib/cli/exit-handler.js index b48f96d581775a..7b465643504c76 100644 --- a/deps/npm/test/lib/utils/exit-handler.js +++ b/deps/npm/test/lib/cli/exit-handler.js @@ -1,14 +1,15 @@ +const fs = require('node:fs') +const { join, resolve } = require('node:path') +const EventEmitter = require('node:events') +const os = require('node:os') const t = require('tap') -const os = require('os') -const fs = require('fs') const fsMiniPass = require('fs-minipass') -const { join, resolve } = require('path') -const EventEmitter = require('events') -const { format } = require('../../../lib/utils/log-file') +const { output, time } = require('proc-log') const { load: loadMockNpm } = require('../../fixtures/mock-npm') const mockGlobals = require('@npmcli/mock-globals') const { cleanCwd, cleanDate } = require('../../fixtures/clean-snapshot') const tmock = require('../../fixtures/tmock') +const { version: NPM_VERSION } = require('../../../package.json') const pick = (obj, ...keys) => keys.reduce((acc, key) => { acc[key] = obj[key] @@ -32,6 +33,9 @@ t.cleanSnapshot = (path) => cleanDate(cleanCwd(path)) .replace(/.*silly logfile.*cleaning.*\n/gm, '') .replace(/(Completed in )\d+(ms)/g, '$1{TIME}$2') .replace(/(removing )\d+( files)/g, '$1${NUM}2') + .replaceAll(`node ${process.version}`, '{NODE-VERSION}') + .replaceAll(`${os.type()} ${os.release()}`, '{OS}') + .replaceAll(`v${NPM_VERSION}`, '{NPM-VERSION}') // cut off process from script so that it won't quit the test runner // while trying to run through the myriad of cases. need to make it @@ -40,9 +44,9 @@ t.cleanSnapshot = (path) => cleanDate(cleanCwd(path)) mockGlobals(t, { process: Object.assign(new EventEmitter(), { // these are process properties that are needed in the running code and tests - ...pick(process, 'execPath', 'stdout', 'stderr', 'stdin', 'cwd', 'chdir', 'env', 'umask'), + // eslint-disable-next-line max-len + ...pick(process, 'version', 'execPath', 'stdout', 'stderr', 'stdin', 'cwd', 'chdir', 'env', 'umask'), argv: ['/node', ...process.argv.slice(1)], - version: 'v1.0.0', kill: () => {}, reallyExit: (code) => process.exit(code), pid: 123456, @@ -56,14 +60,9 @@ mockGlobals(t, { const mockExitHandler = async (t, { config, mocks, files, ...opts } = {}) => { const errors = [] - const { npm, logMocks, ...rest } = await loadMockNpm(t, { + const { npm, ...rest } = await loadMockNpm(t, { ...opts, - mocks: { - '{ROOT}/package.json': { - version: '1.0.0', - }, - ...mocks, - }, + mocks, config: (dirs) => ({ loglevel: 'notice', ...(typeof config === 'function' ? config(dirs) : config), @@ -73,24 +72,17 @@ const mockExitHandler = async (t, { config, mocks, files, ...opts } = {}) => { }, }) - const exitHandler = tmock(t, '{LIB}/utils/exit-handler.js', { + const exitHandler = tmock(t, '{LIB}/cli/exit-handler.js', { '{LIB}/utils/error-message.js': (err) => ({ summary: [['ERR SUMMARY', err.message]], detail: [['ERR DETAIL', err.message]], ...(files ? { files } : {}), json: { - error: { - code: err.code, - summary: err.message, - detail: err.message, - }, + code: err.code, + summary: err.message, + detail: err.message, }, }), - os: { - type: () => 'Foo', - release: () => '1.0.0', - }, - ...logMocks, ...mocks, }) @@ -104,7 +96,10 @@ const mockExitHandler = async (t, { config, mocks, files, ...opts } = {}) => { return { ...rest, - errors, + errors: () => [ + ...rest.outputErrors, + ...errors, + ], npm, // Make it async to make testing ergonomics a little easier so we dont need // to t.plan() every test to make sure we get process.exit called. @@ -129,11 +124,11 @@ const err = (message = '', options = {}, noStack = false) => { } t.test('handles unknown error with logs and debug file', async (t) => { - const { exitHandler, debugFile, logs } = await mockExitHandler(t) + const { exitHandler, debugFile, logs } = await mockExitHandler(t, { + config: { loglevel: 'silly', timing: true }, + }) await exitHandler(err('Unknown error', 'ECODE')) - // force logfile cleaning logs to happen since those are purposefully not awaited - await require('timers/promises').setTimeout(200) const fileLogs = await debugFile() const fileLines = fileLogs.split('\n') @@ -143,23 +138,18 @@ t.test('handles unknown error with logs and debug file', async (t) => { t.equal(process.exitCode, 1) - let skippedLogs = 0 logs.forEach((logItem, i) => { - const logLines = format(i, ...logItem).trim().split(os.EOL) + const logLines = logItem.split('\n').map(l => `${i} ${l}`) for (const line of logLines) { - if (line.includes('logfile') && line.includes('cleaning')) { - skippedLogs++ - continue - } t.match(fileLogs.trim(), line, 'log appears in debug file') } }) - t.equal(logs.length - skippedLogs, parseInt(lastLog) + 1) + t.equal(logs.length, parseInt(lastLog) + 1) t.match(logs.error, [ - ['code', 'ECODE'], - ['ERR SUMMARY', 'Unknown error'], - ['ERR DETAIL', 'Unknown error'], + 'code ECODE', + 'ERR SUMMARY Unknown error', + 'ERR DETAIL Unknown error', ]) t.match(fileLogs, /\d+ error code ECODE/) t.match(fileLogs, /\d+ error ERR SUMMARY Unknown error/) @@ -173,11 +163,8 @@ t.test('exit handler never called - loglevel silent', async (t) => { config: { loglevel: 'silent' }, }) process.emit('exit', 1) - t.match(logs.error, [ - ['', /Exit handler never called/], - ['', /error with npm itself/], - ]) - t.strictSame(errors, [''], 'logs one empty string to console.error') + t.strictSame(logs.error, []) + t.strictSame(errors(), [''], 'one empty string') }) t.test('exit handler never called - loglevel notice', async (t) => { @@ -185,45 +172,45 @@ t.test('exit handler never called - loglevel notice', async (t) => { process.emit('exit', 1) t.equal(process.exitCode, 1) t.match(logs.error, [ - ['', /Exit handler never called/], - ['', /error with npm itself/], + 'Exit handler never called!', + /error with npm itself/, ]) - t.strictSame(errors, ['', ''], 'logs two empty strings to console.error') + t.strictSame(errors(), ['', ''], 'two empty string on output') }) t.test('exit handler never called - no npm', async (t) => { const { logs, errors } = await mockExitHandler(t, { init: false }) process.emit('exit', 1) t.equal(process.exitCode, 1) - t.match(logs.error, [ - ['', /Exit handler never called/], - ['', /error with npm itself/], - ]) - t.strictSame(errors, [''], 'logs one empty string to console.error') + t.strictSame(logs.error, []) + t.strictSame(errors(), [''], 'one empty string') }) t.test('exit handler called - no npm', async (t) => { const { exitHandler, errors } = await mockExitHandler(t, { init: false }) await exitHandler() t.equal(process.exitCode, 1) - t.match(errors, [/Error: Exit prior to setting npm in exit handler/]) + t.equal(errors().length, 1) + t.match(errors(), [/Error: Exit prior to setting npm in exit handler/]) }) t.test('exit handler called - no npm with error', async (t) => { const { exitHandler, errors } = await mockExitHandler(t, { init: false }) await exitHandler(err('something happened')) t.equal(process.exitCode, 1) - t.match(errors, [/Error: something happened/]) + t.equal(errors().length, 1) + t.match(errors(), [/Error: something happened/]) }) t.test('exit handler called - no npm with error without stack', async (t) => { const { exitHandler, errors } = await mockExitHandler(t, { init: false }) await exitHandler(err('something happened', {}, true)) t.equal(process.exitCode, 1) - t.match(errors, [/something happened/]) + t.equal(errors().length, 1) + t.match(errors(), [/something happened/]) }) -t.test('console.log output using --json', async (t) => { +t.test('standard output using --json', async (t) => { const { exitHandler, outputs } = await mockExitHandler(t, { config: { json: true }, }) @@ -245,13 +232,13 @@ t.test('console.log output using --json', async (t) => { }) t.test('merges output buffers errors with --json', async (t) => { - const { exitHandler, outputs, npm } = await mockExitHandler(t, { + const { exitHandler, outputs } = await mockExitHandler(t, { config: { json: true }, }) - npm.outputBuffer({ output_data: 1 }) - npm.outputBuffer(JSON.stringify({ more_data: 2 })) - npm.outputBuffer('not json, will be ignored') + output.buffer({ output_data: 1 }) + output.buffer(JSON.stringify({ more_data: 2 })) + output.buffer('not json, will be ignored') await exitHandler(err('Error: EBADTHING Something happened')) @@ -272,20 +259,20 @@ t.test('merges output buffers errors with --json', async (t) => { }) t.test('output buffer without json', async (t) => { - const { exitHandler, outputs, npm, logs } = await mockExitHandler(t) + const { exitHandler, outputs, logs } = await mockExitHandler(t) - npm.outputBuffer('output_data') - npm.outputBuffer('more_data') + output.buffer('output_data') + output.buffer('more_data') await exitHandler(err('Error: EBADTHING Something happened')) t.equal(process.exitCode, 1) t.same( outputs, - [['output_data'], ['more_data']], + ['output_data', 'more_data'], 'should output expected output' ) - t.match(logs.error, [['code', 'EBADTHING']]) + t.match(logs.error, ['code EBADTHING']) }) t.test('throw a non-error obj', async (t) => { @@ -298,7 +285,7 @@ t.test('throw a non-error obj', async (t) => { t.equal(process.exitCode, 1) t.match(logs.error, [ - ['weird error', { code: 'ESOMETHING', message: 'foo bar' }], + "weird error { code: 'ESOMETHING', message: 'foo bar' }", ]) }) @@ -309,21 +296,34 @@ t.test('throw a string error', async (t) => { t.equal(process.exitCode, 1) t.match(logs.error, [ - ['', 'foo bar'], + 'foo bar', ]) }) -t.test('update notification', async (t) => { - const { exitHandler, logs, npm } = await mockExitHandler(t) +t.test('update notification - shows even with loglevel error', async (t) => { + const { exitHandler, logs, npm } = await mockExitHandler(t, { + config: { loglevel: 'error' }, + }) npm.updateNotification = 'you should update npm!' await exitHandler() t.match(logs.notice, [ - ['', 'you should update npm!'], + 'you should update npm!', ]) }) +t.test('update notification - hidden with silent', async (t) => { + const { exitHandler, logs, npm } = await mockExitHandler(t, { + config: { loglevel: 'silent' }, + }) + npm.updateNotification = 'you should update npm!' + + await exitHandler() + + t.strictSame(logs.notice, []) +}) + t.test('npm.config not ready', async (t) => { const { exitHandler, logs, errors } = await mockExitHandler(t, { load: false, @@ -332,12 +332,11 @@ t.test('npm.config not ready', async (t) => { await exitHandler() t.equal(process.exitCode, 1) - t.match(errors, [ + t.equal(errors().length, 1) + t.match(errors(), [ /Error: Exit prior to config file resolving./, ], 'should exit with config error msg') - t.match(logs.verbose, [ - ['stack', /Error: Exit prior to config file resolving./], - ], 'should exit with config error msg') + t.strictSame(logs, [], 'no logs if it doesnt load') }) t.test('no logs dir', async (t) => { @@ -346,69 +345,62 @@ t.test('no logs dir', async (t) => { }) await exitHandler(new Error()) - t.match(logs.error.filter(([t]) => t === ''), [ - ['', 'Log files were not written due to the config logs-max=0'], - ]) - t.match(logs.filter(([_, task]) => task === 'npm.load.mkdirplogs'), []) + t.match(logs.error[2], + 'Log files were not written due to the config logs-max=0') + t.match(logs.filter((l) => l.includes('npm.load.mkdirplogs')), []) }) t.test('timers fail to write', async (t) => { - // we want the fs.writeFileSync in the Timers class to fail - const mockTimers = tmock(t, '{LIB}/utils/timers.js', { - fs: { - ...fs, - writeFileSync: (file, ...rest) => { - if (file.includes('LOGS_DIR')) { - throw new Error('err') - } - - return fs.writeFileSync(file, ...rest) - }, - }, - }) - const { exitHandler, logs } = await mockExitHandler(t, { config: (dirs) => ({ 'logs-dir': resolve(dirs.prefix, 'LOGS_DIR'), timing: true, }), mocks: { - // note, this is relative to test/fixtures/mock-npm.js not this file - '{LIB}/utils/timers.js': mockTimers, + // we want the fs.writeFileSync in the Timers class to fail + '{LIB}/utils/timers.js': tmock(t, '{LIB}/utils/timers.js', { + 'node:fs': { + ...fs, + writeFileSync: (file, ...rest) => { + if (file.includes('LOGS_DIR')) { + throw new Error('err') + } + + return fs.writeFileSync(file, ...rest) + }, + }, + }), }, }) await exitHandler(new Error()) - t.match(logs.error.filter(([t]) => t === ''), [['', `error writing to the directory`]]) + t.match(logs.warn[0], `timing could not write timing file: Error: err`) }) t.test('log files fail to write', async (t) => { - // we want the fsMiniPass.WriteStreamSync in the LogFile class to fail - const mockLogFile = tmock(t, '{LIB}/utils/log-file.js', { - 'fs-minipass': { - ...fsMiniPass, - WriteStreamSync: (file, ...rest) => { - if (file.includes('LOGS_DIR')) { - throw new Error('err') - } - }, - }, - }) - const { exitHandler, logs } = await mockExitHandler(t, { config: (dirs) => ({ 'logs-dir': resolve(dirs.prefix, 'LOGS_DIR'), }), mocks: { - // note, this is relative to test/fixtures/mock-npm.js not this file - '{LIB}/utils/log-file.js': mockLogFile, + // we want the fsMiniPass.WriteStreamSync in the LogFile class to fail + '{LIB}/utils/log-file.js': tmock(t, '{LIB}/utils/log-file.js', { + 'fs-minipass': { + ...fsMiniPass, + WriteStreamSync: (file) => { + if (file.includes('LOGS_DIR')) { + throw new Error('err') + } + }, + }, + }), }, }) await exitHandler(new Error()) - t.match(logs.error.filter(([t]) => t === ''), [['', `error writing to the directory`]]) + t.match(logs.error[2], `error writing to the directory`) }) t.test('files from error message', async (t) => { @@ -424,9 +416,7 @@ t.test('files from error message', async (t) => { const errorFileName = logFiles.find(f => f.endsWith('error-file.txt')) const errorFile = fs.readFileSync(join(cache, '_logs', errorFileName)).toString() - const [log] = logs.error.filter(([t]) => t === '') - - t.match(log[1], /For a full report see:\n.*-error-file\.txt/) + t.match(logs[2], /For a full report see:\n.*-error-file\.txt/) t.match(errorFile, '# error file content') t.match(errorFile, 'Log files:') }) @@ -440,7 +430,7 @@ t.test('files from error message with error', async (t) => { ['error-file.txt', '# error file content'], ], mocks: { - fs: { + 'node:fs': { ...fs, writeFileSync: (dir) => { if (dir.includes('LOGS_DIR') && dir.endsWith('error-file.txt')) { @@ -453,14 +443,12 @@ t.test('files from error message with error', async (t) => { await exitHandler(err('Error message')) - const [log] = logs.warn.filter(([t]) => t === '') - - t.match(log[1], /Could not write error message to.*error-file\.txt.*err/) + t.match(logs.warn[0], /Could not write error message to.*error-file\.txt.*err/) }) t.test('timing with no error', async (t) => { const { exitHandler, timingFile, npm, logs } = await mockExitHandler(t, { - config: { timing: true }, + config: { timing: true, loglevel: 'silly' }, }) await exitHandler() @@ -468,47 +456,50 @@ t.test('timing with no error', async (t) => { t.equal(process.exitCode, 0) - const msg = logs.info.filter(([t]) => t === '')[0][1] - t.match(msg, /A complete log of this run can be found in:/) + const msg = logs.info.byTitle('timing')[0] t.match(msg, /Timing info written to:/) - t.match( - timingFileData.timers, - Object.keys(npm.finishedTimers).reduce((acc, k) => { - acc[k] = Number - return acc - }, {}) - ) - t.strictSame(npm.unfinishedTimers, new Map()) t.match(timingFileData, { metadata: { command: [], - version: '1.0.0', + version: npm.version, logfiles: [String], }, timers: { + 'npm:load': Number, npm: Number, }, }) }) +t.test('timing message hidden by loglevel', async (t) => { + const { exitHandler, logs } = await mockExitHandler(t, { + config: { timing: true, loglevel: 'notice' }, + }) + + await exitHandler() + + t.equal(process.exitCode, 0) + + t.strictSame(logs.info, [], 'no log message') +}) + t.test('unfinished timers', async (t) => { const { exitHandler, timingFile, npm } = await mockExitHandler(t, { config: { timing: true }, }) - process.emit('time', 'foo') - process.emit('time', 'bar') + time.start('foo') + time.start('bar') await exitHandler() const timingFileData = await timingFile() t.equal(process.exitCode, 0) - t.match(npm.unfinishedTimers, new Map([['foo', Number], ['bar', Number]])) t.match(timingFileData, { metadata: { command: [], - version: '1.0.0', + version: npm.version, logfiles: [String], }, timers: { @@ -526,7 +517,7 @@ t.test('uses code from errno', async (t) => { await exitHandler(err('Error with errno', { errno: 127 })) t.equal(process.exitCode, 127) - t.match(logs.error, [['errno', 127]]) + t.match(logs.error, ['errno 127']) }) t.test('uses code from number', async (t) => { @@ -534,7 +525,7 @@ t.test('uses code from number', async (t) => { await exitHandler(err('Error with code type number', 404)) t.equal(process.exitCode, 404) - t.match(logs.error, [['code', 404]]) + t.match(logs.error, ['code 404']) }) t.test('uses all err special properties', async t => { @@ -548,11 +539,13 @@ t.test('uses all err special properties', async t => { await exitHandler(err('Error with code type number', properties)) t.equal(process.exitCode, 1) - t.match(logs.error, keys.map((k) => [k, `${k}-hey`]), 'all special keys get logged') + t.match(logs.error, keys.map((k) => `${k} ${k}-hey`), 'all special keys get logged') }) t.test('verbose logs replace info on err props', async t => { - const { exitHandler, logs } = await mockExitHandler(t) + const { exitHandler, logs } = await mockExitHandler(t, { + config: { loglevel: 'verbose' }, + }) const keys = ['type', 'stack', 'pkgid'] const properties = keys.reduce((acc, k) => { @@ -563,8 +556,8 @@ t.test('verbose logs replace info on err props', async t => { await exitHandler(err('Error with code type number', properties)) t.equal(process.exitCode, 1) t.match( - logs.verbose.filter(([p]) => !['logfile', 'title', 'argv'].includes(p)), - keys.map((k) => [k, `${k}-https://user:***@registry.npmjs.org/`]), + logs.verbose.filter(l => !/^(logfile|title|argv)/.test(l)), + keys.map((k) => `${k} ${k}-https://user:***@registry.npmjs.org/`), 'all special keys get replaced' ) }) @@ -583,11 +576,8 @@ t.test('defaults to log error msg if stack is missing when unloaded', async (t) await exitHandler(err('Error with no stack', { code: 'ENOSTACK', errno: 127 }, true)) t.equal(process.exitCode, 127) - t.same(errors, ['Error with no stack'], 'should use error msg') - t.match(logs.error, [ - ['code', 'ENOSTACK'], - ['errno', 127], - ]) + t.strictSame(errors(), ['Error with no stack'], 'should use error msg') + t.strictSame(logs.error, []) }) t.test('exits uncleanly when only emitting exit event', async (t) => { @@ -595,36 +585,43 @@ t.test('exits uncleanly when only emitting exit event', async (t) => { process.emit('exit') - t.match(logs.error, [['', 'Exit handler never called!']]) + t.match(logs.error, ['Exit handler never called!']) t.equal(process.exitCode, 1, 'exitCode coerced to 1') }) t.test('do no fancy handling for shellouts', async t => { - const { exitHandler, logs } = await mockExitHandler(t, { + const mockShelloutExit = (t) => mockExitHandler(t, { command: 'exec', exec: true, argv: ['-c', 'exit'], + config: { + timing: false, + }, }) - const loudNoises = () => - logs.filter(([level]) => ['warn', 'error'].includes(level)) - t.test('shellout with a numeric error code', async t => { + const { exitHandler, logs, errors } = await mockShelloutExit(t) await exitHandler(err('', 5)) t.equal(process.exitCode, 5, 'got expected exit code') - t.strictSame(loudNoises(), [], 'no noisy warnings') + t.strictSame(logs.error, [], 'no noisy warnings') + t.strictSame(logs.warn, [], 'no noisy warnings') + t.strictSame(errors(), []) }) t.test('shellout without a numeric error code (something in npm)', async t => { + const { exitHandler, logs, errors } = await mockShelloutExit(t) await exitHandler(err('', 'banana stand')) t.equal(process.exitCode, 1, 'got expected exit code') // should log some warnings and errors, because something weird happened - t.strictNotSame(loudNoises(), [], 'bring the noise') + t.strictNotSame(logs.error, [], 'bring the noise') + t.strictSame(errors(), ['']) }) t.test('shellout with code=0 (extra weird?)', async t => { + const { exitHandler, logs, errors } = await mockShelloutExit(t) await exitHandler(Object.assign(new Error(), { code: 0 })) t.equal(process.exitCode, 1, 'got expected exit code') - t.strictNotSame(loudNoises(), [], 'bring the noise') + t.strictNotSame(logs.error, [], 'bring the noise') + t.strictSame(errors(), ['']) }) }) diff --git a/deps/npm/test/lib/utils/update-notifier.js b/deps/npm/test/lib/cli/update-notifier.js similarity index 99% rename from deps/npm/test/lib/utils/update-notifier.js rename to deps/npm/test/lib/cli/update-notifier.js index 052367c60dadb1..2d29868b792a17 100644 --- a/deps/npm/test/lib/utils/update-notifier.js +++ b/deps/npm/test/lib/cli/update-notifier.js @@ -83,7 +83,7 @@ const runUpdateNotifier = async (t, { prefixDir, argv, }) - const updateNotifier = tmock(t, '{LIB}/utils/update-notifier.js', mocks) + const updateNotifier = tmock(t, '{LIB}/cli/update-notifier.js', mocks) const result = await updateNotifier(mock.npm) diff --git a/deps/npm/test/lib/es6/validate-engines.js b/deps/npm/test/lib/cli/validate-engines.js similarity index 94% rename from deps/npm/test/lib/es6/validate-engines.js rename to deps/npm/test/lib/cli/validate-engines.js index 0e6bce726af966..1c0b59700a7738 100644 --- a/deps/npm/test/lib/es6/validate-engines.js +++ b/deps/npm/test/lib/cli/validate-engines.js @@ -3,7 +3,7 @@ const mockGlobals = require('@npmcli/mock-globals') const tmock = require('../../fixtures/tmock') const mockValidateEngines = (t) => { - const validateEngines = tmock(t, '{LIB}/es6/validate-engines.js', { + const validateEngines = tmock(t, '{LIB}/cli/validate-engines.js', { '{ROOT}/package.json': { version: '1.2.3', engines: { node: '>=0' } }, }) mockGlobals(t, { 'process.version': 'v4.5.6' }) diff --git a/deps/npm/test/lib/commands/access.js b/deps/npm/test/lib/commands/access.js index 7aec33701297ca..96f1bcd074282e 100644 --- a/deps/npm/test/lib/commands/access.js +++ b/deps/npm/test/lib/commands/access.js @@ -130,8 +130,8 @@ t.test('list', t => { registry.getPackages({ team: '@npm:test-team', packages }) await npm.exec('access', ['list', 'packages', '@npm:test-team']) t.same(outputs, [ - ['@npmcli/other-package: read-write'], - ['@npmcli/test-package: read-only'], + '@npmcli/other-package: read-write', + '@npmcli/test-package: read-only', ]) }) @@ -146,8 +146,8 @@ t.test('list', t => { registry.getPackages({ team: 'npm', packages }) await npm.exec('access', ['list', 'packages']) t.same(outputs, [ - ['@npmcli/other-package: read-write'], - ['@npmcli/test-package: read-only'], + '@npmcli/other-package: read-write', + '@npmcli/test-package: read-only', ]) }) @@ -174,8 +174,8 @@ t.test('list', t => { registry.getCollaborators({ spec: '@npmcli/test-package', collaborators }) await npm.exec('access', ['list', 'collaborators', '@npmcli/test-package']) t.same(outputs, [ - ['github: read-only'], - ['npm: read-write'], + 'github: read-only', + 'npm: read-write', ]) }) @@ -188,7 +188,7 @@ t.test('list', t => { registry.getCollaborators({ spec: '@npmcli/test-package', collaborators }) await npm.exec('access', ['list', 'collaborators', '@npmcli/test-package', 'npm']) t.same(outputs, [ - ['npm: read-write'], + 'npm: read-write', ]) }) t.end() @@ -208,7 +208,7 @@ t.test('get', t => { }) registry.getVisibility({ spec: '@npmcli/test-package', visibility: { public: true } }) await npm.exec('access', ['get', 'status', '@npmcli/test-package']) - t.same(outputs, [['@npmcli/test-package: public']]) + t.same(outputs, ['@npmcli/test-package: public']) }) t.test('status implicit package', async t => { const { npm, outputs } = await loadMockNpm(t, { @@ -222,7 +222,7 @@ t.test('get', t => { }) registry.getVisibility({ spec: '@npmcli/test-package', visibility: { public: true } }) await npm.exec('access', ['get', 'status']) - t.same(outputs, [['@npmcli/test-package: public']]) + t.same(outputs, ['@npmcli/test-package: public']) }) t.test('status no package', async t => { const { npm } = await loadMockNpm(t) @@ -263,7 +263,7 @@ t.test('set', t => { registry.setAccess({ spec: '@npmcli/test-package', body: { access: 'public' } }) registry.getVisibility({ spec: '@npmcli/test-package', visibility: { public: true } }) await npm.exec('access', ['set', 'status=public', '@npmcli/test-package']) - t.same(outputs, [['@npmcli/test-package: public']]) + t.same(outputs, ['@npmcli/test-package: public']) }) t.test('status=private', async t => { const { npm, outputs } = await loadMockNpm(t) @@ -274,7 +274,7 @@ t.test('set', t => { registry.setAccess({ spec: '@npmcli/test-package', body: { access: 'restricted' } }) registry.getVisibility({ spec: '@npmcli/test-package', visibility: { public: false } }) await npm.exec('access', ['set', 'status=private', '@npmcli/test-package']) - t.same(outputs, [['@npmcli/test-package: private']]) + t.same(outputs, ['@npmcli/test-package: private']) }) t.test('status=invalid', async t => { const { npm } = await loadMockNpm(t) diff --git a/deps/npm/test/lib/commands/audit.js b/deps/npm/test/lib/commands/audit.js index 9a57a02184ea18..701d374ade9850 100644 --- a/deps/npm/test/lib/commands/audit.js +++ b/deps/npm/test/lib/commands/audit.js @@ -1860,7 +1860,7 @@ t.test('audit signatures', async t => { ) }) - t.test('with invalid signtaures and color output enabled', async t => { + t.test('with invalid signatures and color output enabled', async t => { const { npm, joinedOutput } = await loadMockNpm(t, { prefixDir: installWithValidSigs, config: { color: 'always' }, @@ -1875,7 +1875,7 @@ t.test('audit signatures', async t => { t.match( joinedOutput(), // eslint-disable-next-line no-control-regex - /\u001b\[1m\u001b\[31minvalid\u001b\[39m\u001b\[22m registry signature/ + /\u001b\[91minvalid\u001b\[39m registry signature/ ) t.matchSnapshot(joinedOutput()) }) @@ -1892,7 +1892,7 @@ t.test('audit signatures', async t => { const registry = new MockRegistry({ tap: t, registry: npm.config.get('registry') }) await manifestWithValidAttestations({ registry }) const fixture = fs.readFileSync( - path.join(__dirname, '..', 'fixtures', 'sigstore/valid-sigstore-attestations.json'), + path.resolve(__dirname, '../../fixtures/sigstore/valid-sigstore-attestations.json'), 'utf8' ) registry.nock.get('/-/npm/v1/attestations/sigstore@1.0.0').reply(200, fixture) @@ -1918,11 +1918,11 @@ t.test('audit signatures', async t => { await manifestWithValidAttestations({ registry }) await manifestWithMultipleValidAttestations({ registry }) const fixture1 = fs.readFileSync( - path.join(__dirname, '..', 'fixtures', 'sigstore/valid-sigstore-attestations.json'), + path.join(__dirname, '../../fixtures/sigstore/valid-sigstore-attestations.json'), 'utf8' ) const fixture2 = fs.readFileSync( - path.join(__dirname, '..', 'fixtures', 'sigstore/valid-tuf-js-attestations.json'), + path.join(__dirname, '../../fixtures/sigstore/valid-tuf-js-attestations.json'), 'utf8' ) registry.nock.get('/-/npm/v1/attestations/sigstore@1.0.0').reply(200, fixture1) @@ -1951,7 +1951,7 @@ t.test('audit signatures', async t => { const registry = new MockRegistry({ tap: t, registry: npm.config.get('registry') }) await manifestWithValidAttestations({ registry }) const fixture = fs.readFileSync( - path.join(__dirname, '..', 'fixtures', 'sigstore/valid-sigstore-attestations.json'), + path.join(__dirname, '../../fixtures/sigstore/valid-sigstore-attestations.json'), 'utf8' ) registry.nock.get('/-/npm/v1/attestations/sigstore@1.0.0').reply(200, fixture) @@ -1986,7 +1986,7 @@ t.test('audit signatures', async t => { const registry = new MockRegistry({ tap: t, registry: npm.config.get('registry') }) await manifestWithValidAttestations({ registry }) const fixture = fs.readFileSync( - path.join(__dirname, '..', 'fixtures', 'sigstore/valid-sigstore-attestations.json'), + path.join(__dirname, '../../fixtures/sigstore/valid-sigstore-attestations.json'), 'utf8' ) registry.nock.get('/-/npm/v1/attestations/sigstore@1.0.0').reply(200, fixture) @@ -2016,11 +2016,11 @@ t.test('audit signatures', async t => { await manifestWithValidAttestations({ registry }) await manifestWithMultipleValidAttestations({ registry }) const fixture1 = fs.readFileSync( - path.join(__dirname, '..', 'fixtures', 'sigstore/valid-sigstore-attestations.json'), + path.join(__dirname, '../../fixtures/sigstore/valid-sigstore-attestations.json'), 'utf8' ) const fixture2 = fs.readFileSync( - path.join(__dirname, '..', 'fixtures', 'sigstore/valid-tuf-js-attestations.json'), + path.join(__dirname, '../../fixtures/sigstore/valid-tuf-js-attestations.json'), 'utf8' ) registry.nock.get('/-/npm/v1/attestations/sigstore@1.0.0').reply(200, fixture1) diff --git a/deps/npm/test/lib/commands/ci.js b/deps/npm/test/lib/commands/ci.js index 681ccad7d87a75..c4b855932a9ed5 100644 --- a/deps/npm/test/lib/commands/ci.js +++ b/deps/npm/test/lib/commands/ci.js @@ -142,7 +142,7 @@ t.test('--no-audit and --ignore-scripts', async t => { 'package-lock.json': JSON.stringify(packageLock), }, }) - require('nock').emitter.on('no match', req => { + require('nock').emitter.on('no match', () => { t.fail('Should not audit') }) const manifest = registry.manifest({ name: 'abbrev' }) @@ -164,7 +164,6 @@ t.test('lifecycle scripts', async t => { }, mocks: { '@npmcli/run-script': (opts) => { - t.ok(opts.banner) scripts.push(opts.event) }, }, diff --git a/deps/npm/test/lib/commands/config.js b/deps/npm/test/lib/commands/config.js index b54096fd216f27..0806326e2e8e42 100644 --- a/deps/npm/test/lib/commands/config.js +++ b/deps/npm/test/lib/commands/config.js @@ -3,16 +3,49 @@ const fs = require('fs/promises') const ini = require('ini') const tspawk = require('../../fixtures/tspawk') const t = require('tap') +const { load: _loadMockNpm } = require('../../fixtures/mock-npm') +const { cleanCwd } = require('../../fixtures/clean-snapshot.js') const spawk = tspawk(t) -const Sandbox = require('../../fixtures/sandbox.js') +const replaceJsonOrIni = (key) => [ + new RegExp(`(\\s(?:${key} = |"${key}": )"?)[^"\\n,]+`, 'g'), + `$1{${key.toUpperCase()}}`, +] + +const replaceIniComment = (key) => [ + new RegExp(`(; ${key} = ).*`, 'g'), + `$1{${key.replaceAll(' ', '-').toUpperCase()}}`, +] + +t.cleanSnapshot = (s) => cleanCwd(s) + .replaceAll(...replaceIniComment('node version')) + .replaceAll(...replaceIniComment('npm version')) + .replaceAll(...replaceIniComment('node bin location')) + .replaceAll(...replaceJsonOrIni('npm-version')) + .replaceAll(...replaceJsonOrIni('viewer')) + .replaceAll(...replaceJsonOrIni('shell')) + .replaceAll(...replaceJsonOrIni('editor')) + .replaceAll(...replaceJsonOrIni('progress')) + .replaceAll(...replaceJsonOrIni('color')) + .replaceAll(...replaceJsonOrIni('cache')) + +const loadMockNpm = (t, opts = {}) => _loadMockNpm(t, { + ...opts, + config: { + ...opts.config, + // Reset configs that mock npm sets by default + 'fetch-retries': undefined, + loglevel: undefined, + color: undefined, + }, +}) t.test('config no args', async t => { - const sandbox = new Sandbox(t) + const { npm } = await loadMockNpm(t) await t.rejects( - sandbox.run('config', []), + npm.exec('config', []), { code: 'EUSAGE', }, @@ -21,10 +54,14 @@ t.test('config no args', async t => { }) t.test('config ignores workspaces', async t => { - const sandbox = new Sandbox(t) + const { npm } = await loadMockNpm(t, { + config: { + workspaces: true, + }, + }) await t.rejects( - sandbox.run('config', ['--workspaces']), + npm.exec('config'), { code: 'ENOWORKSPACES', }, @@ -33,74 +70,92 @@ t.test('config ignores workspaces', async t => { }) t.test('config list', async t => { - const temp = t.testdir({ - global: { - npmrc: 'globalloaded=yes', - }, - project: { + const { npm, joinedOutput } = await loadMockNpm(t, { + prefixDir: { '.npmrc': 'projectloaded=yes', }, - home: { + globalPrefixDir: { + etc: { + npmrc: 'globalloaded=yes', + }, + }, + homeDir: { '.npmrc': 'userloaded=yes', }, }) - const global = join(temp, 'global') - const project = join(temp, 'project') - const home = join(temp, 'home') - const sandbox = new Sandbox(t, { global, project, home }) - await sandbox.run('config', ['list']) + await npm.exec('config', ['list']) + + const output = joinedOutput() + + t.match(output, 'projectloaded = "yes"') + t.match(output, 'globalloaded = "yes"') + t.match(output, 'userloaded = "yes"') - t.matchSnapshot(sandbox.output, 'output matches snapshot') + t.matchSnapshot(output, 'output matches snapshot') }) t.test('config list --long', async t => { - const temp = t.testdir({ - global: { - npmrc: 'globalloaded=yes', - }, - project: { + const { npm, joinedOutput } = await loadMockNpm(t, { + prefixDir: { '.npmrc': 'projectloaded=yes', }, - home: { + globalPrefixDir: { + etc: { + npmrc: 'globalloaded=yes', + }, + }, + homeDir: { '.npmrc': 'userloaded=yes', }, + config: { + long: true, + }, }) - const global = join(temp, 'global') - const project = join(temp, 'project') - const home = join(temp, 'home') - const sandbox = new Sandbox(t, { global, project, home }) - await sandbox.run('config', ['list', '--long']) + await npm.exec('config', ['list']) + + const output = joinedOutput() - t.matchSnapshot(sandbox.output, 'output matches snapshot') + t.match(output, 'projectloaded = "yes"') + t.match(output, 'globalloaded = "yes"') + t.match(output, 'userloaded = "yes"') + + t.matchSnapshot(output, 'output matches snapshot') }) t.test('config list --json', async t => { - const temp = t.testdir({ - global: { - npmrc: 'globalloaded=yes', - }, - project: { + const { npm, joinedOutput } = await loadMockNpm(t, { + prefixDir: { '.npmrc': 'projectloaded=yes', }, - home: { + globalPrefixDir: { + etc: { + npmrc: 'globalloaded=yes', + }, + }, + homeDir: { '.npmrc': 'userloaded=yes', }, + config: { + json: true, + }, }) - const global = join(temp, 'global') - const project = join(temp, 'project') - const home = join(temp, 'home') - const sandbox = new Sandbox(t, { global, project, home }) - await sandbox.run('config', ['list', '--json']) + await npm.exec('config', ['list']) + + const output = joinedOutput() + + t.match(output, '"projectloaded": "yes",') + t.match(output, '"globalloaded": "yes",') + t.match(output, '"userloaded": "yes",') - t.matchSnapshot(sandbox.output, 'output matches snapshot') + t.matchSnapshot(output, 'output matches snapshot') }) t.test('config list with publishConfig', async t => { - const temp = t.testdir({ - project: { + const loadMockNpmWithPublishConfig = (t, opts) => loadMockNpm(t, { + prefixDir: { 'package.json': JSON.stringify({ publishConfig: { registry: 'https://some.registry', @@ -108,21 +163,43 @@ t.test('config list with publishConfig', async t => { }, }), }, + ...opts, }) - const project = join(temp, 'project') - const sandbox = new Sandbox(t, { project }) - await sandbox.run('config', ['list', '']) - await sandbox.run('config', ['list', '--global']) + t.test('local', async t => { + const { npm, joinedOutput } = await loadMockNpmWithPublishConfig(t) - t.matchSnapshot(sandbox.output, 'output matches snapshot') + await npm.exec('config', ['list']) + + const output = joinedOutput() + + t.match(output, 'registry = "https://some.registry"') + + t.matchSnapshot(output, 'output matches snapshot') + }) + + t.test('global', async t => { + const { npm, joinedOutput } = await loadMockNpmWithPublishConfig(t, { + config: { + global: true, + }, + }) + + await npm.exec('config', ['list']) + + const output = joinedOutput() + + t.notMatch(output, 'registry = "https://some.registry"') + + t.matchSnapshot(output, 'output matches snapshot') + }) }) t.test('config delete no args', async t => { - const sandbox = new Sandbox(t) + const { npm } = await loadMockNpm(t) await t.rejects( - sandbox.run('config', ['delete']), + npm.exec('config', ['delete']), { code: 'EUSAGE', }, @@ -132,14 +209,15 @@ t.test('config delete no args', async t => { t.test('config delete single key', async t => { // location defaults to user, so we work with a userconfig - const home = t.testdir({ - '.npmrc': 'access=public\nall=true', + const { npm, home } = await loadMockNpm(t, { + homeDir: { + '.npmrc': 'access=public\nall=true', + }, }) - const sandbox = new Sandbox(t, { home }) - await sandbox.run('config', ['delete', 'access']) + await npm.exec('config', ['delete', 'access']) - t.equal(sandbox.config.get('access'), null, 'acces should be defaulted') + t.equal(npm.config.get('access'), null, 'acces should be defaulted') const contents = await fs.readFile(join(home, '.npmrc'), { encoding: 'utf8' }) const rc = ini.parse(contents) @@ -147,15 +225,16 @@ t.test('config delete single key', async t => { }) t.test('config delete multiple keys', async t => { - const home = t.testdir({ - '.npmrc': 'access=public\nall=true\naudit=false', + const { npm, home } = await loadMockNpm(t, { + homeDir: { + '.npmrc': 'access=public\nall=true\naudit=false', + }, }) - const sandbox = new Sandbox(t, { home }) - await sandbox.run('config', ['delete', 'access', 'all']) + await npm.exec('config', ['delete', 'access', 'all']) - t.equal(sandbox.config.get('access'), null, 'access should be defaulted') - t.equal(sandbox.config.get('all'), false, 'all should be defaulted') + t.equal(npm.config.get('access'), null, 'access should be defaulted') + t.equal(npm.config.get('all'), false, 'all should be defaulted') const contents = await fs.readFile(join(home, '.npmrc'), { encoding: 'utf8' }) const rc = ini.parse(contents) @@ -164,76 +243,87 @@ t.test('config delete multiple keys', async t => { }) t.test('config delete key --location=global', async t => { - const global = t.testdir({ - npmrc: 'access=public\nall=true', + const { npm, globalPrefix } = await loadMockNpm(t, { + globalPrefixDir: { + etc: { + npmrc: 'access=public\nall=true', + }, + }, + config: { + location: 'global', + }, }) + await npm.exec('config', ['delete', 'access']) - const sandbox = new Sandbox(t, { global }) - await sandbox.run('config', ['delete', 'access', '--location=global']) + t.equal(npm.config.get('access', 'global'), undefined, 'access should be defaulted') - t.equal(sandbox.config.get('access', 'global'), undefined, 'access should be defaulted') - - const contents = await fs.readFile(join(global, 'npmrc'), { encoding: 'utf8' }) + const contents = await fs.readFile(join(globalPrefix, 'etc/npmrc'), { encoding: 'utf8' }) const rc = ini.parse(contents) t.not(rc.access, 'access is not set') }) t.test('config delete key --global', async t => { - const global = t.testdir({ - npmrc: 'access=public\nall=true', + const { npm, globalPrefix } = await loadMockNpm(t, { + globalPrefixDir: { + etc: { + npmrc: 'access=public\nall=true', + }, + }, + config: { + global: true, + }, }) - const sandbox = new Sandbox(t, { global }) - await sandbox.run('config', ['delete', 'access', '--global']) + await npm.exec('config', ['delete', 'access']) - t.equal(sandbox.config.get('access', 'global'), undefined, 'access should no longer be set') + t.equal(npm.config.get('access', 'global'), undefined, 'access should no longer be set') - const contents = await fs.readFile(join(global, 'npmrc'), { encoding: 'utf8' }) + const contents = await fs.readFile(join(globalPrefix, 'etc/npmrc'), { encoding: 'utf8' }) const rc = ini.parse(contents) t.not(rc.access, 'access is not set') }) t.test('config set invalid option', async t => { - const sandbox = new Sandbox(t) + const { npm } = await loadMockNpm(t) await t.rejects( - sandbox.run('config', ['set', 'nonexistantconfigoption', 'something']), + npm.exec('config', ['set', 'nonexistantconfigoption', 'something']), /not a valid npm option/ ) }) t.test('config set deprecated option', async t => { - const sandbox = new Sandbox(t) + const { npm } = await loadMockNpm(t) await t.rejects( - sandbox.run('config', ['set', 'shrinkwrap', 'true']), + npm.exec('config', ['set', 'shrinkwrap', 'true']), /deprecated/ ) }) t.test('config set nerf-darted option', async t => { - const sandbox = new Sandbox(t) - await sandbox.run('config', ['set', '//npm.pkg.github.com/:_authToken', '0xdeadbeef']) + const { npm } = await loadMockNpm(t) + await npm.exec('config', ['set', '//npm.pkg.github.com/:_authToken', '0xdeadbeef']) t.equal( - sandbox.config.get('//npm.pkg.github.com/:_authToken'), + npm.config.get('//npm.pkg.github.com/:_authToken'), '0xdeadbeef', 'nerf-darted config is set' ) }) t.test('config set scoped optoin', async t => { - const sandbox = new Sandbox(t) - await sandbox.run('config', ['set', '@npm:registry', 'https://registry.npmjs.org']) + const { npm } = await loadMockNpm(t) + await npm.exec('config', ['set', '@npm:registry', 'https://registry.npmjs.org']) t.equal( - sandbox.config.get('@npm:registry'), + npm.config.get('@npm:registry'), 'https://registry.npmjs.org', 'scoped config is set' ) }) t.test('config set no args', async t => { - const sandbox = new Sandbox(t) + const { npm } = await loadMockNpm(t) await t.rejects( - sandbox.run('config', ['set']), + npm.exec('config', ['set']), { code: 'EUSAGE', }, @@ -242,45 +332,45 @@ t.test('config set no args', async t => { }) t.test('config set key', async t => { - const home = t.testdir({ - '.npmrc': 'access=public', + const { npm, home } = await loadMockNpm(t, { + homeDir: { + '.npmrc': 'access=public', + }, }) - const sandbox = new Sandbox(t, { home }) + await npm.exec('config', ['set', 'access']) - await sandbox.run('config', ['set', 'access']) - - t.equal(sandbox.config.get('access'), null, 'set the value for access') + t.equal(npm.config.get('access'), null, 'set the value for access') await t.rejects(fs.stat(join(home, '.npmrc'), { encoding: 'utf8' }), 'removed empty config') }) t.test('config set key value', async t => { - const home = t.testdir({ - '.npmrc': 'access=public', + const { npm, home } = await loadMockNpm(t, { + homeDir: { + '.npmrc': 'access=public', + }, }) - const sandbox = new Sandbox(t, { home }) + await npm.exec('config', ['set', 'access', 'restricted']) - await sandbox.run('config', ['set', 'access', 'restricted']) - - t.equal(sandbox.config.get('access'), 'restricted', 'set the value for access') + t.equal(npm.config.get('access'), 'restricted', 'set the value for access') const contents = await fs.readFile(join(home, '.npmrc'), { encoding: 'utf8' }) const rc = ini.parse(contents) t.equal(rc.access, 'restricted', 'access is set to restricted') }) -t.test('config set key=value', async t => { - const home = t.testdir({ - '.npmrc': 'access=public', +t.test('config set key value with equals', async t => { + const { npm, home } = await loadMockNpm(t, { + homeDir: { + '.npmrc': 'access=public', + }, }) - const sandbox = new Sandbox(t, { home }) - - await sandbox.run('config', ['set', 'access=restricted']) + await npm.exec('config', ['set', 'access=restricted']) - t.equal(sandbox.config.get('access'), 'restricted', 'set the value for access') + t.equal(npm.config.get('access'), 'restricted', 'set the value for access') const contents = await fs.readFile(join(home, '.npmrc'), { encoding: 'utf8' }) const rc = ini.parse(contents) @@ -288,16 +378,17 @@ t.test('config set key=value', async t => { }) t.test('config set key1 value1 key2=value2 key3', async t => { - const home = t.testdir({ - '.npmrc': 'access=public\nall=true\naudit=true', + const { npm, home } = await loadMockNpm(t, { + homeDir: { + '.npmrc': 'access=public\nall=true\naudit=true', + }, }) - const sandbox = new Sandbox(t, { home }) - await sandbox.run('config', ['set', 'access', 'restricted', 'all=false', 'audit']) + await npm.exec('config', ['set', 'access', 'restricted', 'all=false', 'audit']) - t.equal(sandbox.config.get('access'), 'restricted', 'access was set') - t.equal(sandbox.config.get('all'), false, 'all was set') - t.equal(sandbox.config.get('audit'), true, 'audit was unset and restored to its default') + t.equal(npm.config.get('access'), 'restricted', 'access was set') + t.equal(npm.config.get('all'), false, 'all was set') + t.equal(npm.config.get('audit'), true, 'audit was unset and restored to its default') const contents = await fs.readFile(join(home, '.npmrc'), { encoding: 'utf8' }) const rc = ini.parse(contents) @@ -307,113 +398,120 @@ t.test('config set key1 value1 key2=value2 key3', async t => { }) t.test('config set invalid key logs warning', async t => { - const sandbox = new Sandbox(t) + const { npm, logs, home } = await loadMockNpm(t) // this doesn't reject, it only logs a warning - await sandbox.run('config', ['set', 'access=foo']) - t.match( - sandbox.logs.warn, - [['invalid config', 'access="foo"', `set in ${join(sandbox.home, '.npmrc')}`]], + await npm.exec('config', ['set', 'access=foo']) + t.equal(logs.warn[0], + `invalid config access="foo" set in ${join(home, '.npmrc')}`, 'logged warning' ) }) t.test('config set key=value --location=global', async t => { - const global = t.testdir({ - npmrc: 'access=public\nall=true', + const { npm, globalPrefix } = await loadMockNpm(t, { + globalPrefixDir: { + etc: { + npmrc: 'access=public\nall=true', + }, + }, + config: { + location: 'global', + }, }) - const sandbox = new Sandbox(t, { global }) - await sandbox.run('config', ['set', 'access=restricted', '--location=global']) + await npm.exec('config', ['set', 'access=restricted']) - t.equal(sandbox.config.get('access', 'global'), 'restricted', 'foo should be set') + t.equal(npm.config.get('access', 'global'), 'restricted', 'foo should be set') - const contents = await fs.readFile(join(global, 'npmrc'), { encoding: 'utf8' }) + const contents = await fs.readFile(join(globalPrefix, 'etc/npmrc'), { encoding: 'utf8' }) const rc = ini.parse(contents) t.equal(rc.access, 'restricted', 'access is set to restricted') }) t.test('config set key=value --global', async t => { - const global = t.testdir({ - npmrc: 'access=public\nall=true', + const { npm, globalPrefix } = await loadMockNpm(t, { + globalPrefixDir: { + etc: { + npmrc: 'access=public\nall=true', + }, + }, + config: { + global: true, + }, }) - const sandbox = new Sandbox(t, { global }) - await sandbox.run('config', ['set', 'access=restricted', '--global']) + await npm.exec('config', ['set', 'access=restricted']) - t.equal(sandbox.config.get('access', 'global'), 'restricted', 'access should be set') + t.equal(npm.config.get('access', 'global'), 'restricted', 'access should be set') - const contents = await fs.readFile(join(global, 'npmrc'), { encoding: 'utf8' }) + const contents = await fs.readFile(join(globalPrefix, 'etc/npmrc'), { encoding: 'utf8' }) const rc = ini.parse(contents) t.equal(rc.access, 'restricted', 'access is set to restricted') }) t.test('config get no args', async t => { - const sandbox = new Sandbox(t) + const { npm, joinedOutput, clearOutput } = await loadMockNpm(t) - await sandbox.run('config', ['get']) - const getOutput = sandbox.output + await npm.exec('config', ['get']) + const getOutput = joinedOutput() - sandbox.reset() - - await sandbox.run('config', ['list']) - const listOutput = sandbox.output + clearOutput() + await npm.exec('config', ['list']) + const listOutput = joinedOutput() t.equal(listOutput, getOutput, 'get with no args outputs list') }) t.test('config get single key', async t => { - const sandbox = new Sandbox(t) + const { npm, joinedOutput } = await loadMockNpm(t) - await sandbox.run('config', ['get', 'all']) - t.equal(sandbox.output, `${sandbox.config.get('all')}`, 'should get the value') + await npm.exec('config', ['get', 'all']) + t.equal(joinedOutput(), `${npm.config.get('all')}`, 'should get the value') }) t.test('config get multiple keys', async t => { - const sandbox = new Sandbox(t) + const { npm, joinedOutput } = await loadMockNpm(t) - await sandbox.run('config', ['get', 'yes', 'all']) - t.ok( - sandbox.output.includes(`yes=${sandbox.config.get('yes')}`), - 'outputs yes' - ) - t.ok( - sandbox.output.includes(`all=${sandbox.config.get('all')}`), - 'outputs all' - ) + await npm.exec('config', ['get', 'yes', 'all']) + t.equal(joinedOutput(), `yes=${npm.config.get('yes')}\nall=${npm.config.get('all')}`) }) t.test('config get private key', async t => { - const sandbox = new Sandbox(t) + const { npm } = await loadMockNpm(t) await t.rejects( - sandbox.run('config', ['get', '_authToken']), + npm.exec('config', ['get', '_authToken']), /_authToken option is protected/, 'rejects with protected string' ) await t.rejects( - sandbox.run('config', ['get', '//localhost:8080/:_password']), + npm.exec('config', ['get', '//localhost:8080/:_password']), /_password option is protected/, 'rejects with protected string' ) }) t.test('config edit', async t => { - const home = t.testdir({ - '.npmrc': 'foo=bar\nbar=baz', - }) - const EDITOR = 'vim' const editor = spawk.spawn(EDITOR).exit(0) - const sandbox = new Sandbox(t, { home, env: { EDITOR } }) - await sandbox.run('config', ['edit']) + const { npm, home } = await loadMockNpm(t, { + homeDir: { + '.npmrc': 'foo=bar\nbar=baz', + }, + config: { + editor: EDITOR, + }, + }) + + await npm.exec('config', ['edit']) t.ok(editor.called, 'editor was spawned') t.same( editor.calledWith.args, - [join(sandbox.home, '.npmrc')], + [join(home, '.npmrc')], 'editor opened the user config file' ) @@ -427,10 +525,14 @@ t.test('config edit - editor exits non-0', async t => { const EDITOR = 'vim' const editor = spawk.spawn(EDITOR).exit(1) - const sandbox = new Sandbox(t) - sandbox.process.env.EDITOR = EDITOR + const { npm, home } = await loadMockNpm(t, { + config: { + editor: EDITOR, + }, + }) + await t.rejects( - sandbox.run('config', ['edit']), + npm.exec('config', ['edit']), { message: 'editor process exited with code: 1', }, @@ -440,101 +542,102 @@ t.test('config edit - editor exits non-0', async t => { t.ok(editor.called, 'editor was spawned') t.same( editor.calledWith.args, - [join(sandbox.home, '.npmrc')], + [join(home, '.npmrc')], 'editor opened the user config file' ) }) t.test('config fix', (t) => { t.test('no problems', async (t) => { - const home = t.testdir({ - '.npmrc': '', + const { npm, joinedOutput } = await loadMockNpm(t, { + homeDir: { + '.npmrc': '', + }, }) - const sandbox = new Sandbox(t, { home }) - await sandbox.run('config', ['fix']) - t.equal(sandbox.output, '', 'printed nothing') + await npm.exec('config', ['fix']) + t.equal(joinedOutput(), '', 'printed nothing') }) t.test('repairs all configs by default', async (t) => { - const root = t.testdir({ - global: { - npmrc: '_authtoken=notatoken\n_authToken=afaketoken', + const { npm, home, globalPrefix, joinedOutput } = await loadMockNpm(t, { + globalPrefixDir: { + etc: { + npmrc: '_authtoken=notatoken\n_authToken=afaketoken', + }, }, - home: { + homeDir: { '.npmrc': '_authtoken=thisisinvalid\n_auth=beef', }, }) + const registry = `//registry.npmjs.org/` - const sandbox = new Sandbox(t, { - global: join(root, 'global'), - home: join(root, 'home'), - }) - await sandbox.run('config', ['fix']) + await npm.exec('config', ['fix']) // global config fixes - t.match(sandbox.output, '`_authtoken` deleted from global config', + t.match(joinedOutput(), '`_authtoken` deleted from global config', 'output has deleted global _authtoken') - t.match(sandbox.output, `\`_authToken\` renamed to \`${registry}:_authToken\` in global config`, + t.match(joinedOutput(), `\`_authToken\` renamed to \`${registry}:_authToken\` in global config`, 'output has renamed global _authToken') - t.not(sandbox.config.get('_authtoken', 'global'), '_authtoken is not set globally') - t.not(sandbox.config.get('_authToken', 'global'), '_authToken is not set globally') - t.equal(sandbox.config.get(`${registry}:_authToken`, 'global'), 'afaketoken', + t.not(npm.config.get('_authtoken', 'global'), '_authtoken is not set globally') + t.not(npm.config.get('_authToken', 'global'), '_authToken is not set globally') + t.equal(npm.config.get(`${registry}:_authToken`, 'global'), 'afaketoken', 'global _authToken was scoped') - const globalConfig = await fs.readFile(join(root, 'global', 'npmrc'), { encoding: 'utf8' }) + const globalConfig = await fs.readFile(join(globalPrefix, 'etc/npmrc'), { encoding: 'utf8' }) t.equal(globalConfig, `${registry}:_authToken=afaketoken\n`, 'global config was written') // user config fixes - t.match(sandbox.output, '`_authtoken` deleted from user config', + t.match(joinedOutput(), '`_authtoken` deleted from user config', 'output has deleted user _authtoken') - t.match(sandbox.output, `\`_auth\` renamed to \`${registry}:_auth\` in user config`, + t.match(joinedOutput(), `\`_auth\` renamed to \`${registry}:_auth\` in user config`, 'output has renamed user _auth') - t.not(sandbox.config.get('_authtoken', 'user'), '_authtoken is not set in user config') - t.not(sandbox.config.get('_auth'), '_auth is not set in user config') - t.equal(sandbox.config.get(`${registry}:_auth`, 'user'), 'beef', 'user _auth was scoped') - const userConfig = await fs.readFile(join(root, 'home', '.npmrc'), { encoding: 'utf8' }) + t.not(npm.config.get('_authtoken', 'user'), '_authtoken is not set in user config') + t.not(npm.config.get('_auth'), '_auth is not set in user config') + t.equal(npm.config.get(`${registry}:_auth`, 'user'), 'beef', 'user _auth was scoped') + const userConfig = await fs.readFile(join(home, '.npmrc'), { encoding: 'utf8' }) t.equal(userConfig, `${registry}:_auth=beef\n`, 'user config was written') }) t.test('repairs only the config specified by --location if asked', async (t) => { - const root = t.testdir({ - global: { - npmrc: '_authtoken=notatoken\n_authToken=afaketoken', + const { npm, home, globalPrefix, joinedOutput } = await loadMockNpm(t, { + globalPrefixDir: { + etc: { + npmrc: '_authtoken=notatoken\n_authToken=afaketoken', + }, }, - home: { + homeDir: { '.npmrc': '_authtoken=thisisinvalid\n_auth=beef', }, + config: { + location: 'user', + }, }) const registry = `//registry.npmjs.org/` - const sandbox = new Sandbox(t, { - global: join(root, 'global'), - home: join(root, 'home'), - }) - await sandbox.run('config', ['fix', '--location=user']) + await npm.exec('config', ['fix']) // global config should be untouched - t.notMatch(sandbox.output, '`_authtoken` deleted from global', + t.notMatch(joinedOutput(), '`_authtoken` deleted from global', 'output has deleted global _authtoken') - t.notMatch(sandbox.output, `\`_authToken\` renamed to \`${registry}:_authToken\` in global`, + t.notMatch(joinedOutput(), `\`_authToken\` renamed to \`${registry}:_authToken\` in global`, 'output has renamed global _authToken') - t.equal(sandbox.config.get('_authtoken', 'global'), 'notatoken', 'global _authtoken untouched') - t.equal(sandbox.config.get('_authToken', 'global'), 'afaketoken', 'global _authToken untouched') - t.not(sandbox.config.get(`${registry}:_authToken`, 'global'), 'global _authToken not scoped') - const globalConfig = await fs.readFile(join(root, 'global', 'npmrc'), { encoding: 'utf8' }) + t.equal(npm.config.get('_authtoken', 'global'), 'notatoken', 'global _authtoken untouched') + t.equal(npm.config.get('_authToken', 'global'), 'afaketoken', 'global _authToken untouched') + t.not(npm.config.get(`${registry}:_authToken`, 'global'), 'global _authToken not scoped') + const globalConfig = await fs.readFile(join(globalPrefix, 'etc/npmrc'), { encoding: 'utf8' }) t.equal(globalConfig, '_authtoken=notatoken\n_authToken=afaketoken', 'global config was not written') // user config fixes - t.match(sandbox.output, '`_authtoken` deleted from user', + t.match(joinedOutput(), '`_authtoken` deleted from user', 'output has deleted user _authtoken') - t.match(sandbox.output, `\`_auth\` renamed to \`${registry}:_auth\` in user`, + t.match(joinedOutput(), `\`_auth\` renamed to \`${registry}:_auth\` in user`, 'output has renamed user _auth') - t.not(sandbox.config.get('_authtoken', 'user'), '_authtoken is not set in user config') - t.not(sandbox.config.get('_auth', 'user'), '_auth is not set in user config') - t.equal(sandbox.config.get(`${registry}:_auth`, 'user'), 'beef', 'user _auth was scoped') - const userConfig = await fs.readFile(join(root, 'home', '.npmrc'), { encoding: 'utf8' }) + t.not(npm.config.get('_authtoken', 'user'), '_authtoken is not set in user config') + t.not(npm.config.get('_auth', 'user'), '_auth is not set in user config') + t.equal(npm.config.get(`${registry}:_auth`, 'user'), 'beef', 'user _auth was scoped') + const userConfig = await fs.readFile(join(home, '.npmrc'), { encoding: 'utf8' }) t.equal(userConfig, `${registry}:_auth=beef\n`, 'user config was written') }) @@ -542,15 +645,21 @@ t.test('config fix', (t) => { }) t.test('completion', async t => { - const sandbox = new Sandbox(t) - - let allKeys - const testComp = async (argv, expect) => { - t.match(await sandbox.complete('config', argv), expect, argv.join(' ')) - if (!allKeys) { - allKeys = Object.keys(sandbox.config.definitions) - } - sandbox.reset() + const { config, npm } = await loadMockNpm(t, { command: 'config' }) + + const allKeys = Object.keys(npm.config.definitions) + + const testComp = async (argv, expect, msg) => { + const options = Array.isArray(argv) ? { + conf: { + argv: { + remain: ['config', ...argv], + }, + }, + } : argv + options.conf.argv.remain.unshift('npm') + const res = await config.completion(options) + t.strictSame(res, expect, msg ?? argv.join(' ')) } await testComp([], ['get', 'set', 'delete', 'ls', 'rm', 'edit', 'fix', 'list']) @@ -564,10 +673,12 @@ t.test('completion', async t => { await testComp(['list'], []) await testComp(['ls'], []) - const getCommand = await sandbox.complete('get') - t.match(getCommand, allKeys, 'also works for just npm get') - sandbox.reset() + await testComp({ + conf: { argv: { remain: ['get'] } }, + }, allKeys, 'also works for just npm get') - const partial = await sandbox.complete('config', 'l') - t.match(partial, ['get', 'set', 'delete', 'ls', 'rm', 'edit'], 'and works on partials') + await testComp({ + partialWord: 'l', + conf: { argv: { remain: ['config'] } }, + }, ['get', 'set', 'delete', 'ls', 'rm', 'edit', 'fix'], 'and works on partials') }) diff --git a/deps/npm/test/lib/commands/dist-tag.js b/deps/npm/test/lib/commands/dist-tag.js index 918f658c6462aa..5de9acb1da81f5 100644 --- a/deps/npm/test/lib/commands/dist-tag.js +++ b/deps/npm/test/lib/commands/dist-tag.js @@ -88,12 +88,7 @@ const mockDist = async (t, { ...npmOpts } = {}) => { distTag: mock['dist-tag'], fetchOpts: () => fetchOpts, result: () => mock.joinedOutput(), - logs: () => { - const distLogs = mock.logs.filter(l => l[1].startsWith('dist-tag')) - return distLogs.map(([, ...parts]) => { - return parts.map(p => p.toString()).join(' ').trim() - }).join('\n').trim() - }, + joinedLogs: () => mock.logs.byTitle('dist-tag').join('\n').trim(), } } @@ -159,13 +154,13 @@ t.test('ls on named package', async t => { }) t.test('ls on missing package', async t => { - const { distTag, logs } = await mockDist(t) + const { distTag, joinedLogs } = await mockDist(t) await t.rejects( distTag.exec(['ls', 'foo']), distTag.usage ) t.matchSnapshot( - logs(), + joinedLogs(), 'should log no dist-tag found msg' ) }) @@ -245,8 +240,8 @@ t.test('workspaces', async t => { }) t.test('two args -- list, @scoped/pkg, logs a warning and ignores workspaces', async t => { - const { result, logs } = await mockWorkspaces(t, ['list', '@scoped/pkg']) - t.match(logs(), 'Ignoring workspaces for specified package', 'logs a warning') + const { result, joinedLogs } = await mockWorkspaces(t, ['list', '@scoped/pkg']) + t.match(joinedLogs(), 'Ignoring workspaces for specified package', 'logs a warning') t.matchSnapshot(result(), 'printed the expected output') }) @@ -266,7 +261,10 @@ t.test('workspaces', async t => { }, }) - t.match(logs(), 'dist-tag ls Couldn\'t get dist-tag data for workspace-d@*', 'logs the error') + const error = logs.error.byTitle('dist-tag ls')[0] + + t.match(error, 'Couldn\'t get dist-tag data for Result {') + t.match(error, `name: 'workspace-d',`) t.matchSnapshot(result(), 'printed the expected output') }) }) @@ -284,14 +282,14 @@ t.test('add new tag', async t => { }) t.test('add using valid semver range as name', async t => { - const { distTag, logs } = await mockDist(t) + const { distTag, joinedLogs } = await mockDist(t) await t.rejects( distTag.exec(['add', '@scoped/another@7.7.7', '1.0.0']), /Tag name must not be a valid SemVer range: 1.0.0/, 'should exit with semver range error' ) t.matchSnapshot( - logs(), + joinedLogs(), 'should return success msg' ) }) @@ -328,31 +326,31 @@ t.test('add invalid tag', async t => { }) t.test('set existing version', async t => { - const { distTag, logs } = await mockDist(t) + const { distTag, joinedLogs } = await mockDist(t) await distTag.exec(['set', '@scoped/another@0.6.0', 'b']) t.matchSnapshot( - logs(), + joinedLogs(), 'should log warn msg' ) }) t.test('remove existing tag', async t => { - const { distTag, result, logs, fetchOpts } = await mockDist(t) + const { distTag, result, joinedLogs, fetchOpts } = await mockDist(t) await distTag.exec(['rm', '@scoped/another', 'c']) const opts = fetchOpts() t.equal(opts.method, 'DELETE', 'should trigger request to remove tag') - t.matchSnapshot(logs(), 'should log remove info') + t.matchSnapshot(joinedLogs(), 'should log remove info') t.matchSnapshot(result(), 'should return success msg') }) t.test('remove non-existing tag', async t => { - const { distTag, logs } = await mockDist(t) + const { distTag, joinedLogs } = await mockDist(t) await t.rejects( distTag.exec(['rm', '@scoped/another', 'nonexistent']), /nonexistent is not a dist-tag on @scoped\/another/, 'should exit with error' ) - t.matchSnapshot(logs(), 'should log error msg') + t.matchSnapshot(joinedLogs(), 'should log error msg') }) t.test('remove missing pkg name', async t => { diff --git a/deps/npm/test/lib/commands/doctor.js b/deps/npm/test/lib/commands/doctor.js index 1682a6cccfa483..bf4ea46a918a17 100644 --- a/deps/npm/test/lib/commands/doctor.js +++ b/deps/npm/test/lib/commands/doctor.js @@ -1,5 +1,5 @@ const t = require('tap') -const fs = require('fs') +const fs = require('fs/promises') const path = require('path') const { load: loadMockNpm } = require('../../fixtures/mock-npm') @@ -11,6 +11,7 @@ const cleanCacheSha = (str) => str.replace(/content-v2\/sha512\/[^"]+/g, 'content-v2/sha512/{sha}') t.cleanSnapshot = p => cleanCacheSha(cleanDate(cleanCwd(p))) + .replace(/(doctor\s+at\s).*$/gm, '$1{STACK}') const npmManifest = (version) => { return { @@ -389,15 +390,15 @@ t.test('incorrect owner', async t => { const { joinedOutput, logs, npm } = await loadMockNpm(t, { mocks: { ...mocks, - fs: { + 'fs/promises': { ...fs, - lstat: (p, cb) => { - const stat = fs.lstatSync(p) + lstat: async (p) => { + const stat = await fs.lstat(p) if (p.endsWith('_cacache')) { stat.uid += 1 stat.gid += 1 } - return cb(null, stat) + return stat }, }, }, @@ -418,9 +419,9 @@ t.test('incorrect permissions', async t => { const { joinedOutput, logs, npm } = await loadMockNpm(t, { mocks: { ...mocks, - fs: { + 'fs/promises': { ...fs, - access: () => { + access: async () => { throw new Error('Test Error') }, }, @@ -442,9 +443,13 @@ t.test('error reading directory', async t => { const { joinedOutput, logs, npm } = await loadMockNpm(t, { mocks: { ...mocks, - fs: { + 'fs/promises': { ...fs, - readdir: () => { + readdir: async (s, ...args) => { + if (s.endsWith('_logs')) { + return fs.readdir(s, ...args) + } + // if (s.endsWith) throw new Error('Test Error') }, }, diff --git a/deps/npm/test/lib/commands/exec.js b/deps/npm/test/lib/commands/exec.js index 094cb7113d07a3..d0aa5f9a33974c 100644 --- a/deps/npm/test/lib/commands/exec.js +++ b/deps/npm/test/lib/commands/exec.js @@ -76,7 +76,7 @@ t.test('--prefix', async t => { }) // This is what `--prefix` does - npm.globalPrefix = npm.localPrefix + npm.config.globalPrefix = npm.config.localPrefix await registry.package({ manifest, diff --git a/deps/npm/test/lib/commands/help-search.js b/deps/npm/test/lib/commands/help-search.js index 8da725fad76924..354fb0291eb2f2 100644 --- a/deps/npm/test/lib/commands/help-search.js +++ b/deps/npm/test/lib/commands/help-search.js @@ -71,7 +71,7 @@ t.test('npm help-search long output with color', async t => { const chalk = await import('chalk').then(v => v.default) - const highlightedText = chalk.bgBlack.red('help-search') + const highlightedText = chalk.blue('help-search') t.equal( output.split('\n').some(line => line.includes(highlightedText)), true, diff --git a/deps/npm/test/lib/commands/hook.js b/deps/npm/test/lib/commands/hook.js index 382bc177e7001a..003dae647a35a2 100644 --- a/deps/npm/test/lib/commands/hook.js +++ b/deps/npm/test/lib/commands/hook.js @@ -1,6 +1,5 @@ const t = require('tap') const mockNpm = require('../../fixtures/mock-npm') -const { stripVTControlCharacters } = require('node:util') const mockHook = async (t, { hookResponse, ...npmOpts } = {}) => { const now = Date.now() @@ -31,6 +30,7 @@ const mockHook = async (t, { hookResponse, ...npmOpts } = {}) => { type: pkgTypes[name], endpoint: 'https://google.com', last_delivery: id % 2 === 0 ? now : undefined, + response_code: 200, })) }, rm: async (id, opts) => { @@ -85,7 +85,7 @@ t.test('npm hook add', async t => { }, 'provided the correct arguments to libnpmhook' ) - t.strictSame(outputs[0], ['+ semver -> https://google.com'], 'prints the correct output') + t.strictSame(outputs[0], '+ semver -> https://google.com', 'prints the correct output') }) t.test('npm hook add - correct owner hook output', async t => { @@ -102,7 +102,7 @@ t.test('npm hook add - correct owner hook output', async t => { }, 'provided the correct arguments to libnpmhook' ) - t.strictSame(outputs[0], ['+ ~npm -> https://google.com'], 'prints the correct output') + t.strictSame(outputs[0], '+ ~npm -> https://google.com', 'prints the correct output') }) t.test('npm hook add - correct scope hook output', async t => { @@ -119,7 +119,7 @@ t.test('npm hook add - correct scope hook output', async t => { }, 'provided the correct arguments to libnpmhook' ) - t.strictSame(outputs[0], ['+ @npmcli -> https://google.com'], 'prints the correct output') + t.strictSame(outputs[0], '+ @npmcli -> https://google.com', 'prints the correct output') }) t.test('npm hook add - unicode output', async t => { @@ -142,7 +142,7 @@ t.test('npm hook add - unicode output', async t => { }, 'provided the correct arguments to libnpmhook' ) - t.strictSame(outputs[0], ['+ semver ➜ https://google.com'], 'prints the correct output') + t.strictSame(outputs[0], '+ semver ➜ https://google.com', 'prints the correct output') }) t.test('npm hook add - json output', async t => { @@ -166,7 +166,7 @@ t.test('npm hook add - json output', async t => { 'provided the correct arguments to libnpmhook' ) t.strictSame( - JSON.parse(outputs[0][0]), + JSON.parse(outputs[0]), { id: 1, name: '@npmcli', @@ -199,12 +199,12 @@ t.test('npm hook add - parseable output', async t => { ) t.strictSame( - outputs[0][0].split(/\t/), + outputs[0].split(/\t/), ['id', 'name', 'type', 'endpoint'], 'prints the correct parseable output headers' ) t.strictSame( - outputs[1][0].split(/\t/), + outputs[1].split(/\t/), ['1', '@npmcli', 'scope', 'https://google.com'], 'prints the correct parseable values' ) @@ -243,11 +243,18 @@ t.test('npm hook ls', async t => { }, 'received the correct arguments' ) - t.equal(outputs[0][0], 'You have 3 hooks configured.', 'prints the correct header') - const out = stripVTControlCharacters(outputs[1][0]) - t.match(out, /semver.*https:\/\/google.com.*\n.*\n.*never triggered/, 'prints package hook') - t.match(out, /@npmcli.*https:\/\/google.com.*\n.*\n.*triggered just now/, 'prints scope hook') - t.match(out, /~npm.*https:\/\/google.com.*\n.*\n.*never triggered/, 'prints owner hook') + t.strictSame(outputs, [ + 'You have 3 hooks configured.', + 'Hook 1: semver', + 'Endpoint: https://google.com', + 'Never triggered\n', + 'Hook 2: @npmcli', + 'Endpoint: https://google.com', + 'Triggered just now, response code was "200"\n', + 'Hook 3: ~npm', + 'Endpoint: https://google.com', + 'Never triggered\n', + ]) }) t.test('npm hook ls, no results', async t => { @@ -266,7 +273,7 @@ t.test('npm hook ls, no results', async t => { }, 'received the correct arguments' ) - t.equal(outputs[0][0], "You don't have any hooks configured yet.", 'prints the correct result') + t.strictSame(outputs, [`You don't have any hooks configured yet.`]) }) t.test('npm hook ls, single result', async t => { @@ -292,9 +299,12 @@ t.test('npm hook ls, single result', async t => { }, 'received the correct arguments' ) - t.equal(outputs[0][0], 'You have one hook configured.', 'prints the correct header') - const out = stripVTControlCharacters(outputs[1][0]) - t.match(out, /semver.*https:\/\/google.com.*\n.*\n.*never triggered/, 'prints package hook') + t.strictSame(outputs, [ + 'You have 1 hook configured.', + 'Hook 1: semver', + 'Endpoint: https://google.com', + 'Never triggered\n', + ]) }) t.test('npm hook ls - json output', async t => { @@ -361,12 +371,12 @@ t.test('npm hook ls - parseable output', async t => { 'received the correct arguments' ) t.strictSame( - outputs.map(line => line[0].split(/\t/)), + outputs.map(line => line.split(/\t/)), [ - ['id', 'name', 'type', 'endpoint', 'last_delivery'], - ['1', 'semver', 'package', 'https://google.com', ''], - ['2', '@npmcli', 'scope', 'https://google.com', `${now}`], - ['3', 'npm', 'owner', 'https://google.com', ''], + ['id', 'name', 'type', 'endpoint', 'last_delivery', 'response_code'], + ['1', 'semver', 'package', 'https://google.com', '', '200'], + ['2', '@npmcli', 'scope', 'https://google.com', `${now}`, '200'], + ['3', 'npm', 'owner', 'https://google.com', '', '200'], ], 'prints the correct result' ) @@ -404,7 +414,7 @@ t.test('npm hook rm', async t => { }, 'received the correct arguments' ) - t.strictSame(outputs[0], ['- semver X https://google.com'], 'printed the correct output') + t.strictSame(outputs[0], '- semver X https://google.com', 'printed the correct output') }) t.test('npm hook rm - unicode output', async t => { @@ -425,7 +435,7 @@ t.test('npm hook rm - unicode output', async t => { }, 'received the correct arguments' ) - t.strictSame(outputs[0], ['- semver ✘ https://google.com'], 'printed the correct output') + t.strictSame(outputs[0], '- semver ✘ https://google.com', 'printed the correct output') }) t.test('npm hook rm - silent output', async t => { @@ -496,7 +506,7 @@ t.test('npm hook rm - parseable output', async t => { 'received the correct arguments' ) t.strictSame( - outputs.map(line => line[0].split(/\t/)), + outputs.map(line => line.split(/\t/)), [ ['id', 'name', 'type', 'endpoint'], ['1', 'semver', 'package', 'https://google.com'], @@ -520,7 +530,7 @@ t.test('npm hook update', async t => { }, 'received the correct arguments' ) - t.strictSame(outputs[0], ['+ semver -> https://google.com'], 'printed the correct output') + t.strictSame(outputs[0], '+ semver -> https://google.com', 'printed the correct output') }) t.test('npm hook update - unicode', async t => { @@ -543,7 +553,7 @@ t.test('npm hook update - unicode', async t => { }, 'received the correct arguments' ) - t.strictSame(outputs[0], ['+ semver ➜ https://google.com'], 'printed the correct output') + t.strictSame(outputs[0], '+ semver ➜ https://google.com', 'printed the correct output') }) t.test('npm hook update - json output', async t => { @@ -599,7 +609,7 @@ t.test('npm hook update - parseable output', async t => { 'received the correct arguments' ) t.strictSame( - outputs.map(line => line[0].split(/\t/)), + outputs.map(line => line.split(/\t/)), [ ['id', 'name', 'type', 'endpoint'], ['1', 'semver', 'package', 'https://google.com'], diff --git a/deps/npm/test/lib/commands/init.js b/deps/npm/test/lib/commands/init.js index cb708303f405a4..6dd23560bf8fa3 100644 --- a/deps/npm/test/lib/commands/init.js +++ b/deps/npm/test/lib/commands/init.js @@ -238,8 +238,7 @@ t.test('npm init cancel', async t => { await npm.exec('init', []) - t.equal(logs.warn[0][0], 'init', 'should have init title') - t.equal(logs.warn[0][1], 'canceled', 'should log canceled') + t.equal(logs.warn[0], 'init canceled', 'should have init title and canceled') }) t.test('npm init error', async t => { @@ -335,7 +334,7 @@ t.test('workspaces', async t => { 'should exit with missing package.json file error' ) - t.equal(logs.warn[0][0], 'Missing package.json. Try with `--include-workspace-root`.') + t.equal(logs.warn[0], 'init Missing package.json. Try with `--include-workspace-root`.') }) await t.test('bad package.json when settting workspace', async t => { diff --git a/deps/npm/test/lib/commands/login.js b/deps/npm/test/lib/commands/login.js index b42d3001ebb903..a55637f9e00e2f 100644 --- a/deps/npm/test/lib/commands/login.js +++ b/deps/npm/test/lib/commands/login.js @@ -1,12 +1,12 @@ const t = require('tap') -const fs = require('fs') -const path = require('path') +const fs = require('node:fs') +const path = require('node:path') +const stream = require('node:stream') const ini = require('ini') const { load: loadMockNpm } = require('../../fixtures/mock-npm.js') const mockGlobals = require('@npmcli/mock-globals') const MockRegistry = require('@npmcli/mock-registry') -const stream = require('stream') const mockLogin = async (t, { stdin: stdinLines, registry: registryUrl, ...options } = {}) => { let stdin diff --git a/deps/npm/test/lib/commands/logout.js b/deps/npm/test/lib/commands/logout.js index 881003729ab4a1..3087c8bb1e61d1 100644 --- a/deps/npm/test/lib/commands/logout.js +++ b/deps/npm/test/lib/commands/logout.js @@ -18,8 +18,8 @@ t.test('token logout - user config', async t => { mockRegistry.logout('@foo/') await npm.exec('logout', []) t.equal( - logs.verbose.find(l => l[0] === 'logout')[1], - 'clearing token for https://registry.npmjs.org/', + logs.verbose.byTitle('logout')[0], + 'logout clearing token for https://registry.npmjs.org/', 'should log message with correct registry' ) const userRc = await fs.readFile(join(home, '.npmrc'), 'utf-8') @@ -45,8 +45,8 @@ t.test('token scoped logout - user config', async t => { mockRegistry.logout('@bar/') await npm.exec('logout', []) t.equal( - logs.verbose.find(l => l[0] === 'logout')[1], - 'clearing token for https://diff-registry.npmjs.com/', + logs.verbose.byTitle('logout')[0], + 'logout clearing token for https://diff-registry.npmjs.com/', 'should log message with correct registry' ) @@ -67,8 +67,8 @@ t.test('user/pass logout - user config', async t => { await npm.exec('logout', []) t.equal( - logs.verbose.find(l => l[0] === 'logout')[1], - 'clearing user credentials for https://registry.npmjs.org/', + logs.verbose.byTitle('logout')[0], + 'logout clearing user credentials for https://registry.npmjs.org/', 'should log message with correct registry' ) @@ -106,8 +106,8 @@ t.test('ignore invalid scoped registry config', async t => { await npm.exec('logout', []) t.equal( - logs.verbose.find(l => l[0] === 'logout')[1], - 'clearing token for https://registry.npmjs.org/', + logs.verbose.byTitle('logout')[0], + 'logout clearing token for https://registry.npmjs.org/', 'should log message with correct registry' ) const userRc = await fs.readFile(join(home, '.npmrc'), 'utf-8') @@ -135,8 +135,8 @@ t.test('token logout - project config', async t => { await npm.exec('logout', []) t.equal( - logs.verbose.find(l => l[0] === 'logout')[1], - 'clearing token for https://registry.npmjs.org/', + logs.verbose.byTitle('logout')[0], + 'logout clearing token for https://registry.npmjs.org/', 'should log message with correct registry' ) const userRc = await fs.readFile(join(home, '.npmrc'), 'utf-8') @@ -145,8 +145,8 @@ t.test('token logout - project config', async t => { 'other-config=true', ].join('\n'), 'leaves user config alone') t.equal( - logs.verbose.find(l => l[0] === 'logout')[1], - 'clearing token for https://registry.npmjs.org/', + logs.verbose.byTitle('logout')[0], + 'logout clearing token for https://registry.npmjs.org/', 'should log message with correct registry' ) const projectRc = await fs.readFile(join(prefix, '.npmrc'), 'utf-8') diff --git a/deps/npm/test/lib/commands/org.js b/deps/npm/test/lib/commands/org.js index 0c343f028d6dcf..7a1538d9c69e4a 100644 --- a/deps/npm/test/lib/commands/org.js +++ b/deps/npm/test/lib/commands/org.js @@ -1,6 +1,5 @@ const t = require('tap') const mockNpm = require('../../fixtures/mock-npm') -const { stripVTControlCharacters } = require('node:util') const mockOrg = async (t, { orgSize = 1, orgList = {}, ...npmOpts } = {}) => { let setArgs = null @@ -92,7 +91,7 @@ t.test('npm org add', async t => { 'received the correct arguments' ) t.equal( - outputs[0][0], + outputs[0], 'Added username as developer to orgname. You now have 1 member in this org.', 'printed the correct output' ) @@ -142,7 +141,7 @@ t.test('npm org add - more users', async t => { 'received the correct arguments' ) t.equal( - outputs[0][0], + outputs[0], 'Added username as developer to orgname. You now have 5 members in this org.', 'printed the correct output' ) @@ -198,7 +197,7 @@ t.test('npm org add - parseable output', async t => { 'received the correct arguments' ) t.strictSame( - outputs.map(line => line[0].split(/\t/)), + outputs.map(line => line.split(/\t/)), [ ['org', 'orgsize', 'user', 'role'], ['orgname', '1', 'username', 'developer'], @@ -251,7 +250,7 @@ t.test('npm org rm', async t => { 'libnpmorg.ls received the correct args' ) t.equal( - outputs[0][0], + outputs[0], 'Successfully removed username from orgname. You now have 0 members in this org.', 'printed the correct output' ) @@ -301,7 +300,7 @@ t.test('npm org rm - one user left', async t => { 'libnpmorg.ls received the correct args' ) t.equal( - outputs[0][0], + outputs[0], 'Successfully removed username from orgname. You now have 1 member in this org.', 'printed the correct output' ) @@ -370,7 +369,7 @@ t.test('npm org rm - parseable output', async t => { 'libnpmorg.ls received the correct args' ) t.strictSame( - outputs.map(line => line[0].split(/\t/)), + outputs.map(line => line.split(/\t/)), [ ['user', 'org', 'userCount', 'deleted'], ['username', 'orgname', '0', 'true'], @@ -427,10 +426,11 @@ t.test('npm org ls', async t => { }, 'receieved the correct args' ) - const out = stripVTControlCharacters(outputs[0][0]) - t.match(out, /one.*developer/, 'contains the developer member') - t.match(out, /two.*admin/, 'contains the admin member') - t.match(out, /three.*owner/, 'contains the owner member') + t.strictSame(outputs, [ + 'one - developer', + 'three - owner', + 'two - admin', + ]) }) t.test('npm org ls - user filter', async t => { @@ -452,9 +452,9 @@ t.test('npm org ls - user filter', async t => { }, 'receieved the correct args' ) - const out = stripVTControlCharacters(outputs[0][0]) - t.match(out, /username.*admin/, 'contains the filtered member') - t.notMatch(out, /missing.*admin/, 'does not contain other members') + t.strictSame(outputs, [ + 'username - admin', + ]) }) t.test('npm org ls - user filter, missing user', async t => { @@ -475,9 +475,7 @@ t.test('npm org ls - user filter, missing user', async t => { }, 'receieved the correct args' ) - const out = stripVTControlCharacters(outputs[0][0]) - t.notMatch(out, /username/, 'does not contain the requested member') - t.notMatch(out, /missing.*admin/, 'does not contain other members') + t.strictSame(outputs, []) }) t.test('npm org ls - no org', async t => { @@ -533,7 +531,7 @@ t.test('npm org ls - parseable output', async t => { 'receieved the correct args' ) t.strictSame( - outputs.map(line => line[0].split(/\t/)), + outputs.map(line => line.split(/\t/)), [ ['user', 'role'], ['one', 'developer'], diff --git a/deps/npm/test/lib/commands/owner.js b/deps/npm/test/lib/commands/owner.js index 9329e8985e60c0..ec774d16470484 100644 --- a/deps/npm/test/lib/commands/owner.js +++ b/deps/npm/test/lib/commands/owner.js @@ -123,7 +123,7 @@ t.test('owner ls fails to retrieve packument', async t => { }) registry.nock.get(`/${spec.escapedName}`).reply(404) await t.rejects(npm.exec('owner', ['ls'])) - t.match(logs.error, [['owner ls', "Couldn't get owner data", '@npmcli/test-package']]) + t.match(logs.error.byTitle('owner ls'), [`owner ls Couldn't get owner data @npmcli/test-package`]) }) t.test('owner ls ', async t => { @@ -240,8 +240,8 @@ t.test('owner add already an owner', async t => { await npm.exec('owner', ['add', username, packageName]) t.equal(joinedOutput(), '') t.match( - logs.info, - [['owner add', 'Already a package owner: test-user-a ']] + logs.info.byTitle('owner add'), + [`Already a package owner: test-user-a `] ) }) @@ -256,7 +256,7 @@ t.test('owner add fails to retrieve user', async t => { }) registry.couchuser({ username, responseCode: 404, body: {} }) await t.rejects(npm.exec('owner', ['add', username, packageName])) - t.match(logs.error, [['owner mutate', `Error getting user data for ${username}`]]) + t.match(logs.error.byTitle('owner mutate'), [`Error getting user data for ${username}`]) }) t.test('owner add fails to PUT updates', async t => { @@ -380,7 +380,7 @@ t.test('owner rm not a current owner', async t => { registry.couchuser({ username }) await registry.package({ manifest }) await npm.exec('owner', ['rm', username, packageName]) - t.match(logs.info, [['owner rm', `Not a package owner: ${username}`]]) + t.match(logs.info.byTitle('owner rm'), [`Not a package owner: ${username}`]) }) t.test('owner rm cwd package', async t => { diff --git a/deps/npm/test/lib/commands/pack.js b/deps/npm/test/lib/commands/pack.js index baec163c7b34d0..3ea67c78d996a3 100644 --- a/deps/npm/test/lib/commands/pack.js +++ b/deps/npm/test/lib/commands/pack.js @@ -17,8 +17,8 @@ t.test('should pack current directory with no arguments', async t => { }) await npm.exec('pack', []) const filename = 'test-package-1.0.0.tgz' - t.strictSame(outputs, [[filename]]) - t.matchSnapshot(logs.notice.map(([, m]) => m), 'logs pack contents') + t.strictSame(outputs, [filename]) + t.matchSnapshot(logs.notice, 'logs pack contents') t.ok(fs.statSync(path.resolve(npm.prefix, filename))) }) @@ -35,7 +35,7 @@ t.test('follows pack-destination config', async t => { }) await npm.exec('pack', []) const filename = 'test-package-1.0.0.tgz' - t.strictSame(outputs, [[filename]]) + t.strictSame(outputs, [filename]) t.ok(fs.statSync(path.resolve(npm.prefix, 'tar-destination', filename))) }) @@ -50,7 +50,7 @@ t.test('should pack given directory for scoped package', async t => { }) await npm.exec('pack', []) const filename = 'npm-test-package-1.0.0.tgz' - t.strictSame(outputs, [[filename]]) + t.strictSame(outputs, [filename]) t.ok(fs.statSync(path.resolve(npm.prefix, filename))) }) @@ -67,16 +67,19 @@ t.test('should log output as valid json', async t => { await npm.exec('pack', []) const filename = 'test-package-1.0.0.tgz' t.matchSnapshot(outputs.map(JSON.parse), 'outputs as json') - t.matchSnapshot(logs.notice.map(([, m]) => m), 'logs pack contents') + t.matchSnapshot(logs.notice, 'logs pack contents') t.ok(fs.statSync(path.resolve(npm.prefix, filename))) }) t.test('should log scoped package output as valid json', async t => { - const { npm, outputs, logs } = await loadMockNpm(t, { + const { npm, outputs, outputErrors, logs } = await loadMockNpm(t, { prefixDir: { 'package.json': JSON.stringify({ name: '@myscope/test-package', version: '1.0.0', + scripts: { + prepack: 'echo prepack!', + }, }), }, config: { json: true }, @@ -84,7 +87,8 @@ t.test('should log scoped package output as valid json', async t => { await npm.exec('pack', []) const filename = 'myscope-test-package-1.0.0.tgz' t.matchSnapshot(outputs.map(JSON.parse), 'outputs as json') - t.matchSnapshot(logs.notice.map(([, m]) => m), 'logs pack contents') + t.matchSnapshot(outputErrors, 'stderr has banners') + t.matchSnapshot(logs.notice, 'logs pack contents') t.ok(fs.statSync(path.resolve(npm.prefix, filename))) }) @@ -100,8 +104,8 @@ t.test('dry run', async t => { }) await npm.exec('pack', []) const filename = 'test-package-1.0.0.tgz' - t.strictSame(outputs, [[filename]]) - t.matchSnapshot(logs.notice.map(([, m]) => m), 'logs pack contents') + t.strictSame(outputs, [filename]) + t.matchSnapshot(logs.notice, 'logs pack contents') t.throws(() => fs.statSync(path.resolve(npm.prefix, filename))) }) @@ -121,29 +125,18 @@ t.test('foreground-scripts defaults to true', async t => { config: { 'dry-run': true }, }) - /* eslint no-console: 0 */ - // TODO: replace this with `const results = t.intercept(console, 'log')` - const log = console.log - t.teardown(() => { - console.log = log - }) - const caughtLogs = [] - console.log = (...args) => { - caughtLogs.push(args) - } - // end TODO - await npm.exec('pack', []) const filename = 'test-fg-scripts-0.0.0.tgz' - t.same( - caughtLogs, + t.strictSame( + outputs, [ - ['\n> test-fg-scripts@0.0.0 prepack\n> echo prepack!\n'], - ['\n> test-fg-scripts@0.0.0 postpack\n> echo postpack!\n'], + '\n> test-fg-scripts@0.0.0 prepack\n> echo prepack!\n', + '\n> test-fg-scripts@0.0.0 postpack\n> echo postpack!\n', + filename, ], - 'prepack and postpack log to stdout') - t.strictSame(outputs, [[filename]]) - t.matchSnapshot(logs.notice.map(([, m]) => m), 'logs pack contents') + 'prepack and postpack log to stdout' + ) + t.matchSnapshot(logs.notice, 'logs pack contents') t.throws(() => fs.statSync(path.resolve(npm.prefix, filename))) }) @@ -163,26 +156,11 @@ t.test('foreground-scripts can still be set to false', async t => { config: { 'dry-run': true, 'foreground-scripts': false }, }) - /* eslint no-console: 0 */ - // TODO: replace this with `const results = t.intercept(console, 'log')` - const log = console.log - t.teardown(() => { - console.log = log - }) - const caughtLogs = [] - console.log = (...args) => { - caughtLogs.push(args) - } - // end TODO - await npm.exec('pack', []) const filename = 'test-fg-scripts-0.0.0.tgz' - t.same( - caughtLogs, - [], - 'prepack and postpack do not log to stdout') - t.strictSame(outputs, [[filename]]) - t.matchSnapshot(logs.notice.map(([, m]) => m), 'logs pack contents') + + t.strictSame(outputs, [filename], 'prepack and postpack do not log to stdout') + t.matchSnapshot(logs.notice, 'logs pack contents') t.throws(() => fs.statSync(path.resolve(npm.prefix, filename))) }) @@ -235,24 +213,24 @@ t.test('workspaces', async t => { t.test('all workspaces', async t => { const { npm, outputs } = await loadWorkspaces(t) await npm.exec('pack', []) - t.strictSame(outputs, [['workspace-a-1.0.0.tgz'], ['workspace-b-1.0.0.tgz']]) + t.strictSame(outputs, ['workspace-a-1.0.0.tgz', 'workspace-b-1.0.0.tgz']) }) t.test('all workspaces, `.` first arg', async t => { const { npm, outputs } = await loadWorkspaces(t) await npm.exec('pack', ['.']) - t.strictSame(outputs, [['workspace-a-1.0.0.tgz'], ['workspace-b-1.0.0.tgz']]) + t.strictSame(outputs, ['workspace-a-1.0.0.tgz', 'workspace-b-1.0.0.tgz']) }) t.test('one workspace', async t => { const { npm, outputs } = await loadWorkspaces(t) await npm.exec('pack', ['workspace-a']) - t.strictSame(outputs, [['workspace-a-1.0.0.tgz']]) + t.strictSame(outputs, ['workspace-a-1.0.0.tgz']) }) t.test('specific package', async t => { const { npm, outputs } = await loadWorkspaces(t) await npm.exec('pack', [npm.prefix]) - t.strictSame(outputs, [['workspaces-test-1.0.0.tgz']]) + t.strictSame(outputs, ['workspaces-test-1.0.0.tgz']) }) }) diff --git a/deps/npm/test/lib/commands/ping.js b/deps/npm/test/lib/commands/ping.js index 77201955ff2a8b..7f90ea394f9aeb 100644 --- a/deps/npm/test/lib/commands/ping.js +++ b/deps/npm/test/lib/commands/ping.js @@ -10,7 +10,10 @@ t.test('no details', async t => { }) registry.ping() await npm.exec('ping', []) - t.match(logs.notice, [['PING', 'https://registry.npmjs.org/'], ['PONG', /[0-9]+ms/]]) + t.match(logs.notice, [ + 'PING https://registry.npmjs.org/', + /PONG [0-9]+ms/, + ]) t.equal(joinedOutput(), '') }) @@ -20,12 +23,12 @@ t.test('with details', async t => { tap: t, registry: npm.config.get('registry'), }) - registry.ping({ body: { test: true } }) + registry.ping({ body: { test: true, test2: true } }) await npm.exec('ping', []) t.match(logs.notice, [ - ['PING', 'https://registry.npmjs.org/'], - ['PONG', /[0-9]+ms/], - ['PONG', '{\n "test": true\n}'], + `PING https://registry.npmjs.org/`, + /PONG [0-9]+ms/, + `PONG {\nPONG "test": true,\nPONG "test2": true\nPONG }`, ]) t.match(joinedOutput(), '') }) @@ -40,7 +43,10 @@ t.test('valid json', async t => { }) registry.ping() await npm.exec('ping', []) - t.match(logs.notice, [['PING', 'https://registry.npmjs.org/'], ['PONG', /[0-9]+ms/]]) + t.match(logs.notice, [ + 'PING https://registry.npmjs.org/', + /PONG [0-9]+ms/, + ]) t.match(JSON.parse(joinedOutput()), { registry: npm.config.get('registry'), time: /[0-9]+/, @@ -58,7 +64,10 @@ t.test('invalid json', async t => { }) registry.ping({ body: '{not: real"json]' }) await npm.exec('ping', []) - t.match(logs.notice, [['PING', 'https://registry.npmjs.org/'], ['PONG', /[0-9]+ms/]]) + t.match(logs.notice, [ + 'PING https://registry.npmjs.org/', + /PONG [0-9]+ms/, + ]) t.match(JSON.parse(joinedOutput()), { registry: npm.config.get('registry'), time: /[0-9]+/, diff --git a/deps/npm/test/lib/commands/profile.js b/deps/npm/test/lib/commands/profile.js index 784523f7ccd8ad..8bbffd1675d07f 100644 --- a/deps/npm/test/lib/commands/profile.js +++ b/deps/npm/test/lib/commands/profile.js @@ -9,13 +9,6 @@ const mockProfile = async (t, { npmProfile, readUserInfo, qrcode, config, ...opt async createToken () {}, }, 'qrcode-terminal': qrcode || { generate: (url, cb) => cb() }, - 'cli-table3': class extends Array { - toString () { - return this.filter(Boolean) - .map(i => [...Object.entries(i)].map(v => v.join(': '))) - .join('\n') - } - }, '{LIB}/utils/read-user-info.js': readUserInfo || { async password () {}, async otp () {}, @@ -96,16 +89,6 @@ t.test('profile get no args', async t => { t.matchSnapshot(result(), 'should output all profile info as parseable result') }) - t.test('--color', async t => { - const { profile, result } = await mockProfile(t, { - npmProfile: defaultNpmProfile, - config: { color: 'always' }, - }) - - await profile.exec(['get']) - t.matchSnapshot(result(), 'should output all profile info with color result') - }) - t.test('no tfa enabled', async t => { const npmProfile = { async get () { @@ -473,8 +456,8 @@ t.test('profile set ', async t => { await profile.exec(['set', 'password']) t.equal( - logs.warn[0][1], - 'Passwords do not match, please try again.', + logs.warn.byTitle('profile')[0], + 'profile Passwords do not match, please try again.', 'should log password mismatch message' ) @@ -557,7 +540,7 @@ t.test('enable-2fa', async t => { t.test('from basic username/password auth', async t => { const npmProfile = { - async createToken (pass) { + async createToken () { return {} }, } @@ -604,7 +587,7 @@ t.test('enable-2fa', async t => { async get () { return userProfile }, - async set (newProfile, conf) { + async set (newProfile) { t.match( newProfile, { @@ -676,7 +659,7 @@ t.test('enable-2fa', async t => { }, } }, - async set (newProfile, conf) { + async set (newProfile) { setCount++ // when profile response shows that 2fa is pending the @@ -764,7 +747,7 @@ t.test('enable-2fa', async t => { }, } }, - async set (newProfile, conf) { + async set () { return { ...userProfile, tfa: 'http://foo?secret=1234', @@ -776,7 +759,7 @@ t.test('enable-2fa', async t => { async password () { return 'password1234' }, - async otp (label) { + async otp () { return '123456' }, } @@ -803,7 +786,7 @@ t.test('enable-2fa', async t => { async get () { return userProfile }, - async set (newProfile, conf) { + async set () { return { ...userProfile, tfa: null, @@ -826,7 +809,7 @@ t.test('enable-2fa', async t => { config: { otp: '123456' }, }) - npm.config.getCredentialsByURI = reg => { + npm.config.getCredentialsByURI = () => { return { token: 'token' } } @@ -847,7 +830,7 @@ t.test('enable-2fa', async t => { tfa: undefined, } }, - async set (newProfile, conf) { + async set () { return { ...userProfile, tfa: null, @@ -869,7 +852,7 @@ t.test('enable-2fa', async t => { readUserInfo, }) - npm.config.getCredentialsByURI = reg => { + npm.config.getCredentialsByURI = () => { return { token: 'token' } } @@ -890,7 +873,7 @@ t.test('enable-2fa', async t => { tfa: undefined, } }, - async set (newProfile, conf) { + async set () { return { ...userProfile, tfa: null, @@ -912,7 +895,7 @@ t.test('enable-2fa', async t => { readUserInfo, }) - npm.config.getCredentialsByURI = reg => { + npm.config.getCredentialsByURI = () => { return { token: 'token' } } @@ -950,7 +933,7 @@ t.test('disable-2fa', async t => { async get () { return userProfile }, - async set (newProfile, conf) { + async set (newProfile) { t.same( newProfile, { @@ -1031,7 +1014,7 @@ t.test('disable-2fa', async t => { async get () { return userProfile }, - async set (newProfile, conf) { + async set (newProfile) { t.same( newProfile, { @@ -1049,7 +1032,7 @@ t.test('disable-2fa', async t => { async password () { return 'password1234' }, - async otp (label) { + async otp () { throw new Error('should not ask for otp') }, } diff --git a/deps/npm/test/lib/commands/publish.js b/deps/npm/test/lib/commands/publish.js index 751cd97d8acf6e..85a66d88b8b340 100644 --- a/deps/npm/test/lib/commands/publish.js +++ b/deps/npm/test/lib/commands/publish.js @@ -83,6 +83,8 @@ t.test('re-loads publishConfig.registry if added during script process', async t const { joinedOutput, npm } = await loadMockNpm(t, { config: { [`${alternateRegistry.slice(6)}/:_authToken`]: 'test-other-token', + // Keep output from leaking into tap logs for readability + 'foreground-scripts': false, }, prefixDir: { 'package.json': JSON.stringify({ @@ -136,6 +138,8 @@ t.test('prioritize CLI flags over publishConfig', async t => { const { joinedOutput, npm } = await loadMockNpm(t, { config: { [`${alternateRegistry.slice(6)}/:_authToken`]: 'test-other-token', + // Keep output from leaking into tap logs for readability + 'foreground-scripts': false, }, prefixDir: { 'package.json': JSON.stringify({ @@ -220,7 +224,7 @@ t.test('dry-run', async t => { }) t.test('foreground-scripts defaults to true', async t => { - const { joinedOutput, npm, logs } = await loadMockNpm(t, { + const { outputs, npm, logs } = await loadMockNpm(t, { config: { 'dry-run': true, ...auth, @@ -238,33 +242,22 @@ t.test('foreground-scripts defaults to true', async t => { }, }) - /* eslint no-console: 0 */ - // TODO: replace this with `const results = t.intercept(console, 'log')` - const log = console.log - t.teardown(() => { - console.log = log - }) - const caughtLogs = [] - console.log = (...args) => { - caughtLogs.push(args) - } - // end TODO - await npm.exec('publish', []) - t.equal(joinedOutput(), `+ test-fg-scripts@0.0.0`) + t.matchSnapshot(logs.notice) - t.same( - caughtLogs, + t.strictSame( + outputs, [ - ['\n> test-fg-scripts@0.0.0 prepack\n> echo prepack!\n'], - ['\n> test-fg-scripts@0.0.0 postpack\n> echo postpack!\n'], + '\n> test-fg-scripts@0.0.0 prepack\n> echo prepack!\n', + '\n> test-fg-scripts@0.0.0 postpack\n> echo postpack!\n', + `+ test-fg-scripts@0.0.0`, ], 'prepack and postpack log to stdout') }) t.test('foreground-scripts can still be set to false', async t => { - const { joinedOutput, npm, logs } = await loadMockNpm(t, { + const { outputs, npm, logs } = await loadMockNpm(t, { config: { 'dry-run': true, 'foreground-scripts': false, @@ -283,25 +276,13 @@ t.test('foreground-scripts can still be set to false', async t => { }, }) - /* eslint no-console: 0 */ - // TODO: replace this with `const results = t.intercept(console, 'log')` - const log = console.log - t.teardown(() => { - console.log = log - }) - const caughtLogs = [] - console.log = (...args) => { - caughtLogs.push(args) - } - // end TODO - await npm.exec('publish', []) - t.equal(joinedOutput(), `+ test-fg-scripts@0.0.0`) + t.matchSnapshot(logs.notice) - t.same( - caughtLogs, - [], + t.strictSame( + outputs, + [`+ test-fg-scripts@0.0.0`], 'prepack and postpack do not log to stdout') }) @@ -871,6 +852,7 @@ t.test('manifest', async t => { const { npm } = await loadMockNpm(t, { config: { ...auth, + 'foreground-scripts': false, }, chdir: () => root, mocks: { diff --git a/deps/npm/test/lib/commands/query.js b/deps/npm/test/lib/commands/query.js index 5292c50e1d365b..0907a9d0f4206f 100644 --- a/deps/npm/test/lib/commands/query.js +++ b/deps/npm/test/lib/commands/query.js @@ -258,7 +258,7 @@ t.test('expect entries', t => { npm.config.set('expect-results', false) await npm.exec('query', ['#a']) t.not(joinedOutput(), '[]', 'has entries') - t.same(logs.warn, [['query', 'Expected no results, got 1']]) + t.same(logs.warn.byTitle('query'), ['query Expected no results, got 1']) t.ok(process.exitCode, 'exits with code') }) t.test('false, no entries', async t => { @@ -286,7 +286,7 @@ t.test('expect entries', t => { npm.config.set('expect-results', true) await npm.exec('query', ['#b']) t.equal(joinedOutput(), '[]', 'does not have entries') - t.same(logs.warn, [['query', 'Expected results, got 0']]) + t.same(logs.warn.byTitle('query'), ['query Expected results, got 0']) t.ok(process.exitCode, 'exits with code') }) t.test('count, matches', async t => { @@ -305,7 +305,7 @@ t.test('expect entries', t => { npm.config.set('expect-result-count', 1) await npm.exec('query', ['#b']) t.equal(joinedOutput(), '[]', 'does not have entries') - t.same(logs.warn, [['query', 'Expected 1 result, got 0']]) + t.same(logs.warn.byTitle('query'), ['query Expected 1 result, got 0']) t.ok(process.exitCode, 'exits with code') }) t.test('count 3, does not match', async t => { @@ -315,7 +315,7 @@ t.test('expect entries', t => { npm.config.set('expect-result-count', 3) await npm.exec('query', ['#b']) t.equal(joinedOutput(), '[]', 'does not have entries') - t.same(logs.warn, [['query', 'Expected 3 results, got 0']]) + t.same(logs.warn.byTitle('query'), ['query Expected 3 results, got 0']) t.ok(process.exitCode, 'exits with code') }) t.end() diff --git a/deps/npm/test/lib/commands/run-script.js b/deps/npm/test/lib/commands/run-script.js index 24f51400e8dfc3..c5bb2b488c053d 100644 --- a/deps/npm/test/lib/commands/run-script.js +++ b/deps/npm/test/lib/commands/run-script.js @@ -30,7 +30,7 @@ const mockRs = async (t, { windows = false, runScript, ...opts } = {}) => { ...mock, RUN_SCRIPTS: () => RUN_SCRIPTS, runScript: mock['run-script'], - cleanLogs: () => mock.logs.error.flat().map(v => v.toString()).map(cleanCwd), + cleanLogs: () => mock.logs.error.map(cleanCwd), } } @@ -347,7 +347,6 @@ t.test('skip pre/post hooks when using ignoreScripts', async t => { env: 'env', }, }, - banner: true, event: 'env', }, ]) @@ -388,7 +387,6 @@ t.test('run silent', async t => { }, }, event: 'env', - banner: false, }, { event: 'postenv', @@ -428,14 +426,14 @@ t.test('list scripts', async t => { t.strictSame( output, [ - ['Lifecycle scripts included in x@1.2.3:'], - [' test\n exit 2'], - [' start\n node server.js'], - [' stop\n node kill-server.js'], - ['\navailable via `npm run-script`:'], - [' preenv\n echo before the env'], - [' postenv\n echo after the env'], - [''], + 'Lifecycle scripts included in x@1.2.3:', + ' test\n exit 2', + ' start\n node server.js', + ' stop\n node kill-server.js', + '\navailable via `npm run-script`:', + ' preenv\n echo before the env', + ' postenv\n echo after the env', + '', ], 'basic report' ) @@ -447,17 +445,17 @@ t.test('list scripts', async t => { }) t.test('warn json', async t => { const outputs = await mockList(t, { json: true }) - t.strictSame(outputs, [[JSON.stringify(scripts, 0, 2)]], 'json report') + t.strictSame(outputs, [JSON.stringify(scripts, 0, 2)], 'json report') }) t.test('parseable', async t => { const outputs = await mockList(t, { parseable: true }) t.strictSame(outputs, [ - ['test:exit 2'], - ['start:node server.js'], - ['stop:node kill-server.js'], - ['preenv:echo before the env'], - ['postenv:echo after the env'], + 'test:exit 2', + 'start:node server.js', + 'stop:node kill-server.js', + 'preenv:echo before the env', + 'postenv:echo after the env', ]) }) }) @@ -489,9 +487,9 @@ t.test('list scripts, only commands', async t => { await runScript.exec([]) t.strictSame(outputs, [ - ['Lifecycle scripts included in x@1.2.3:'], - [' preversion\n echo doing the version dance'], - [''], + 'Lifecycle scripts included in x@1.2.3:', + ' preversion\n echo doing the version dance', + '', ]) }) @@ -508,9 +506,9 @@ t.test('list scripts, only non-commands', async t => { await runScript.exec([]) t.strictSame(outputs, [ - ['Scripts available in x@1.2.3 via `npm run-script`:'], - [' glorp\n echo doing the glerp glop'], - [''], + 'Scripts available in x@1.2.3 via `npm run-script`:', + ' glorp\n echo doing the glerp glop', + '', ]) }) @@ -594,113 +592,109 @@ t.test('workspaces', async t => { t.test('list all scripts', async t => { const { outputs } = await mockWorkspaces(t) t.strictSame(outputs, [ - ['Scripts available in a@1.0.0 via `npm run-script`:'], - [' glorp\n echo a doing the glerp glop'], - [''], - ['Scripts available in b@2.0.0 via `npm run-script`:'], - [' glorp\n echo b doing the glerp glop'], - [''], - ['Lifecycle scripts included in c@1.0.0:'], - [' test\n exit 0'], - [' posttest\n echo posttest'], - ['\navailable via `npm run-script`:'], - [' lorem\n echo c lorem'], - [''], - ['Lifecycle scripts included in d@1.0.0:'], - [' test\n exit 0'], - [' posttest\n echo posttest'], - [''], - ['Lifecycle scripts included in e:'], - [' test\n exit 0'], - [' start\n echo start something'], - [''], + 'Scripts available in a@1.0.0 via `npm run-script`:', + ' glorp\n echo a doing the glerp glop', + '', + 'Scripts available in b@2.0.0 via `npm run-script`:', + ' glorp\n echo b doing the glerp glop', + '', + 'Lifecycle scripts included in c@1.0.0:', + ' test\n exit 0', + ' posttest\n echo posttest', + '\navailable via `npm run-script`:', + ' lorem\n echo c lorem', + '', + 'Lifecycle scripts included in d@1.0.0:', + ' test\n exit 0', + ' posttest\n echo posttest', + '', + 'Lifecycle scripts included in e:', + ' test\n exit 0', + ' start\n echo start something', + '', ]) }) t.test('list regular scripts, filtered by name', async t => { const { outputs } = await mockWorkspaces(t, { workspaces: ['a', 'b'] }) t.strictSame(outputs, [ - ['Scripts available in a@1.0.0 via `npm run-script`:'], - [' glorp\n echo a doing the glerp glop'], - [''], - ['Scripts available in b@2.0.0 via `npm run-script`:'], - [' glorp\n echo b doing the glerp glop'], - [''], + 'Scripts available in a@1.0.0 via `npm run-script`:', + ' glorp\n echo a doing the glerp glop', + '', + 'Scripts available in b@2.0.0 via `npm run-script`:', + ' glorp\n echo b doing the glerp glop', + '', ]) }) t.test('list regular scripts, filtered by path', async t => { const { outputs } = await mockWorkspaces(t, { workspaces: ['./packages/a'] }) t.strictSame(outputs, [ - ['Scripts available in a@1.0.0 via `npm run-script`:'], - [' glorp\n echo a doing the glerp glop'], - [''], + 'Scripts available in a@1.0.0 via `npm run-script`:', + ' glorp\n echo a doing the glerp glop', + '', ]) }) t.test('list regular scripts, filtered by parent folder', async t => { const { outputs } = await mockWorkspaces(t, { workspaces: ['./packages'] }) t.strictSame(outputs, [ - ['Scripts available in a@1.0.0 via `npm run-script`:'], - [' glorp\n echo a doing the glerp glop'], - [''], - ['Scripts available in b@2.0.0 via `npm run-script`:'], - [' glorp\n echo b doing the glerp glop'], - [''], - ['Lifecycle scripts included in c@1.0.0:'], - [' test\n exit 0'], - [' posttest\n echo posttest'], - ['\navailable via `npm run-script`:'], - [' lorem\n echo c lorem'], - [''], - ['Lifecycle scripts included in d@1.0.0:'], - [' test\n exit 0'], - [' posttest\n echo posttest'], - [''], - ['Lifecycle scripts included in e:'], - [' test\n exit 0'], - [' start\n echo start something'], - [''], + 'Scripts available in a@1.0.0 via `npm run-script`:', + ' glorp\n echo a doing the glerp glop', + '', + 'Scripts available in b@2.0.0 via `npm run-script`:', + ' glorp\n echo b doing the glerp glop', + '', + 'Lifecycle scripts included in c@1.0.0:', + ' test\n exit 0', + ' posttest\n echo posttest', + '\navailable via `npm run-script`:', + ' lorem\n echo c lorem', + '', + 'Lifecycle scripts included in d@1.0.0:', + ' test\n exit 0', + ' posttest\n echo posttest', + '', + 'Lifecycle scripts included in e:', + ' test\n exit 0', + ' start\n echo start something', + '', ]) }) t.test('list all scripts with colors', async t => { const { outputs } = await mockWorkspaces(t, { color: 'always' }) t.strictSame(outputs, [ - [ - /* eslint-disable-next-line max-len */ - '\u001b[1mScripts\u001b[22m available in \x1B[32ma@1.0.0\x1B[39m via `\x1B[34mnpm run-script\x1B[39m`:', - ], - [' glorp\n \x1B[2mecho a doing the glerp glop\x1B[22m'], - [''], - [ - /* eslint-disable-next-line max-len */ - '\u001b[1mScripts\u001b[22m available in \x1B[32mb@2.0.0\x1B[39m via `\x1B[34mnpm run-script\x1B[39m`:', - ], - [' glorp\n \x1B[2mecho b doing the glerp glop\x1B[22m'], - [''], - ['\x1B[0m\x1B[1mLifecycle scripts\x1B[22m\x1B[0m included in \x1B[32mc@1.0.0\x1B[39m:'], - [' test\n \x1B[2mexit 0\x1B[22m'], - [' posttest\n \x1B[2mecho posttest\x1B[22m'], - ['\navailable via `\x1B[34mnpm run-script\x1B[39m`:'], - [' lorem\n \x1B[2mecho c lorem\x1B[22m'], - [''], - ['\x1B[0m\x1B[1mLifecycle scripts\x1B[22m\x1B[0m included in \x1B[32md@1.0.0\x1B[39m:'], - [' test\n \x1B[2mexit 0\x1B[22m'], - [' posttest\n \x1B[2mecho posttest\x1B[22m'], - [''], - ['\x1B[0m\x1B[1mLifecycle scripts\x1B[22m\x1B[0m included in \x1B[32me\x1B[39m:'], - [' test\n \x1B[2mexit 0\x1B[22m'], - [' start\n \x1B[2mecho start something\x1B[22m'], - [''], + /* eslint-disable-next-line max-len */ + '\u001b[1mScripts\u001b[22m available in \x1B[32ma@1.0.0\x1B[39m via `\x1B[34mnpm run-script\x1B[39m`:', + ' glorp\n \x1B[2mecho a doing the glerp glop\x1B[22m', + '', + /* eslint-disable-next-line max-len */ + '\u001b[1mScripts\u001b[22m available in \x1B[32mb@2.0.0\x1B[39m via `\x1B[34mnpm run-script\x1B[39m`:', + ' glorp\n \x1B[2mecho b doing the glerp glop\x1B[22m', + '', + '\x1B[0m\x1B[1mLifecycle scripts\x1B[22m\x1B[0m included in \x1B[32mc@1.0.0\x1B[39m:', + ' test\n \x1B[2mexit 0\x1B[22m', + ' posttest\n \x1B[2mecho posttest\x1B[22m', + '\navailable via `\x1B[34mnpm run-script\x1B[39m`:', + ' lorem\n \x1B[2mecho c lorem\x1B[22m', + '', + '\x1B[0m\x1B[1mLifecycle scripts\x1B[22m\x1B[0m included in \x1B[32md@1.0.0\x1B[39m:', + ' test\n \x1B[2mexit 0\x1B[22m', + ' posttest\n \x1B[2mecho posttest\x1B[22m', + '', + '\x1B[0m\x1B[1mLifecycle scripts\x1B[22m\x1B[0m included in \x1B[32me\x1B[39m:', + ' test\n \x1B[2mexit 0\x1B[22m', + ' start\n \x1B[2mecho start something\x1B[22m', + '', ]) }) t.test('list all scripts --json', async t => { const { outputs } = await mockWorkspaces(t, { json: true }) t.strictSame(outputs, [ - [ - '{\n' + + + '{\n' + ' "a": {\n' + ' "glorp": "echo a doing the glerp glop"\n' + ' },\n' + @@ -722,22 +716,22 @@ t.test('workspaces', async t => { ' },\n' + ' "noscripts": {}\n' + '}', - ], + ]) }) t.test('list all scripts --parseable', async t => { const { outputs } = await mockWorkspaces(t, { parseable: true }) t.strictSame(outputs, [ - ['a:glorp:echo a doing the glerp glop'], - ['b:glorp:echo b doing the glerp glop'], - ['c:test:exit 0'], - ['c:posttest:echo posttest'], - ['c:lorem:echo c lorem'], - ['d:test:exit 0'], - ['d:posttest:echo posttest'], - ['e:test:exit 0'], - ['e:start:echo start something'], + 'a:glorp:echo a doing the glerp glop', + 'b:glorp:echo b doing the glerp glop', + 'c:test:exit 0', + 'c:posttest:echo posttest', + 'c:lorem:echo c lorem', + 'd:test:exit 0', + 'd:posttest:echo posttest', + 'e:test:exit 0', + 'e:start:echo start something', ]) }) diff --git a/deps/npm/test/lib/commands/search.js b/deps/npm/test/lib/commands/search.js index 596c8499092291..de4a58ca78a8f5 100644 --- a/deps/npm/test/lib/commands/search.js +++ b/deps/npm/test/lib/commands/search.js @@ -4,206 +4,277 @@ const MockRegistry = require('@npmcli/mock-registry') const libnpmsearchResultFixture = require('../../fixtures/libnpmsearch-stream-result.js') -t.test('no args', async t => { - const { npm } = await loadMockNpm(t) - await t.rejects( - npm.exec('search', []), - /search must be called with arguments/, - 'should throw usage instructions' - ) -}) - -t.test('search text', async t => { - const { npm, joinedOutput } = await loadMockNpm(t) - const registry = new MockRegistry({ - tap: t, - registry: npm.config.get('registry'), +t.test('search', t => { + t.test('no args', async t => { + const { npm } = await loadMockNpm(t) + await t.rejects( + npm.exec('search', []), + /search must be called with arguments/, + 'should throw usage instructions' + ) }) - registry.search({ results: libnpmsearchResultFixture }) - await npm.exec('search', ['libnpm']) - t.matchSnapshot(joinedOutput(), 'should have expected search results') -}) + t.test(' text', async t => { + const { npm, joinedOutput } = await loadMockNpm(t) + const registry = new MockRegistry({ + tap: t, + registry: npm.config.get('registry'), + }) -t.test('search --json', async t => { - const { npm, joinedOutput } = await loadMockNpm(t, { config: { json: true } }) - const registry = new MockRegistry({ - tap: t, - registry: npm.config.get('registry'), + registry.search({ results: libnpmsearchResultFixture }) + await npm.exec('search', ['libnpm']) + t.matchSnapshot(joinedOutput(), 'should have expected search results') }) - registry.search({ results: libnpmsearchResultFixture }) + t.test(' --json', async t => { + const { npm, joinedOutput } = await loadMockNpm(t, { config: { json: true } }) + const registry = new MockRegistry({ + tap: t, + registry: npm.config.get('registry'), + }) - await npm.exec('search', ['libnpm']) + registry.search({ results: libnpmsearchResultFixture }) - t.same( - JSON.parse(joinedOutput()), - libnpmsearchResultFixture, - 'should have expected search results as json' - ) -}) + await npm.exec('search', ['libnpm']) -t.test('search --parseable', async t => { - const { npm, joinedOutput } = await loadMockNpm(t, { config: { parseable: true } }) - const registry = new MockRegistry({ - tap: t, - registry: npm.config.get('registry'), + t.same( + JSON.parse(joinedOutput()), + libnpmsearchResultFixture, + 'should have expected search results as json' + ) }) - registry.search({ results: libnpmsearchResultFixture }) - await npm.exec('search', ['libnpm']) - t.matchSnapshot(joinedOutput(), 'should have expected search results as parseable') -}) + t.test(' --parseable', async t => { + const { npm, joinedOutput } = await loadMockNpm(t, { config: { parseable: true } }) + const registry = new MockRegistry({ + tap: t, + registry: npm.config.get('registry'), + }) -t.test('search --color', async t => { - const { npm, joinedOutput } = await loadMockNpm(t, { config: { color: 'always' } }) - const registry = new MockRegistry({ - tap: t, - registry: npm.config.get('registry'), + registry.search({ results: libnpmsearchResultFixture }) + await npm.exec('search', ['libnpm']) + t.matchSnapshot(joinedOutput(), 'should have expected search results as parseable') }) - registry.search({ results: libnpmsearchResultFixture }) - await npm.exec('search', ['libnpm']) - t.matchSnapshot(joinedOutput(), 'should have expected search results with color') -}) + t.test(' --color', async t => { + const { npm, joinedOutput } = await loadMockNpm(t, { config: { color: 'always' } }) + const registry = new MockRegistry({ + tap: t, + registry: npm.config.get('registry'), + }) -t.test('search //--color', async t => { - const { npm, joinedOutput } = await loadMockNpm(t, { config: { color: 'always' } }) - const registry = new MockRegistry({ - tap: t, - registry: npm.config.get('registry'), + registry.search({ results: libnpmsearchResultFixture }) + await npm.exec('search', ['libnpm']) + t.matchSnapshot(joinedOutput(), 'should have expected search results with color') }) - registry.search({ results: libnpmsearchResultFixture }) - await npm.exec('search', ['/libnpm/']) - t.matchSnapshot(joinedOutput(), 'should have expected search results with color') -}) + t.test('//--color', async t => { + const { npm, joinedOutput } = await loadMockNpm(t, { config: { color: 'always' } }) + const registry = new MockRegistry({ + tap: t, + registry: npm.config.get('registry'), + }) -t.test('search ', async t => { - const { npm, joinedOutput } = await loadMockNpm(t) - const registry = new MockRegistry({ - tap: t, - registry: npm.config.get('registry'), - }) - - registry.search({ results: [{ - name: 'foo', - scope: 'unscoped', - version: '1.0.0', - description: '', - keywords: [], - date: null, - author: { name: 'Foo', email: 'foo@npmjs.com' }, - publisher: { name: 'Foo', email: 'foo@npmjs.com' }, - maintainers: [ - { username: 'foo', email: 'foo@npmjs.com' }, - ], - }, { - name: 'libnpmversion', - scope: 'unscoped', - version: '1.0.0', - description: '', - keywords: [], - date: null, - author: { name: 'Foo', email: 'foo@npmjs.com' }, - publisher: { name: 'Foo', email: 'foo@npmjs.com' }, - maintainers: [ - { username: 'foo', email: 'foo@npmjs.com' }, - ], - }] }) - - await npm.exec('search', ['foo']) - - t.matchSnapshot(joinedOutput(), 'should have filtered expected search results') -}) + registry.search({ results: libnpmsearchResultFixture }) + await npm.exec('search', ['/libnpm/']) + t.matchSnapshot(joinedOutput(), 'should have expected search results with color') + }) -t.test('empty search results', async t => { - const { npm, joinedOutput } = await loadMockNpm(t) - const registry = new MockRegistry({ - tap: t, - registry: npm.config.get('registry'), + t.test('', async t => { + const { npm, joinedOutput } = await loadMockNpm(t) + const registry = new MockRegistry({ + tap: t, + registry: npm.config.get('registry'), + }) + + registry.search({ results: [{ + name: 'foo', + scope: 'unscoped', + version: '1.0.0', + description: '', + keywords: [], + date: null, + author: { name: 'Foo', email: 'foo@npmjs.com' }, + publisher: { username: 'foo', email: 'foo@npmjs.com' }, + maintainers: [ + { username: 'foo', email: 'foo@npmjs.com' }, + ], + }, { + name: 'custom-registry', + scope: 'unscoped', + version: '1.0.0', + description: '', + keywords: [], + date: null, + author: { name: 'Foo', email: 'foo@npmjs.com' }, + maintainers: [ + { username: 'foo', email: 'foo@npmjs.com' }, + ], + }, { + name: 'libnpmversion', + scope: 'unscoped', + version: '1.0.0', + description: '', + keywords: [], + date: null, + author: { name: 'Foo', email: 'foo@npmjs.com' }, + publisher: { username: 'foo', email: 'foo@npmjs.com' }, + maintainers: [ + { username: 'foo', email: 'foo@npmjs.com' }, + ], + }] }) + + await npm.exec('search', ['foo']) + + t.matchSnapshot(joinedOutput(), 'should have filtered expected search results') }) - registry.search({ results: [] }) - await npm.exec('search', ['foo']) + t.test('no publisher', async t => { + const { npm, joinedOutput } = await loadMockNpm(t) + const registry = new MockRegistry({ + tap: t, + registry: npm.config.get('registry'), + }) + + registry.search({ results: [{ + name: 'custom-registry', + scope: 'unscoped', + version: '1.0.0', + description: '', + keywords: [], + date: null, + author: { name: 'Foo', email: 'foo@npmjs.com' }, + maintainers: [ + { username: 'foo', email: 'foo@npmjs.com' }, + ], + }, { + name: 'libnpmversion', + scope: 'unscoped', + version: '1.0.0', + description: '', + keywords: [], + date: null, + author: { name: 'Foo', email: 'foo@npmjs.com' }, + publisher: { username: 'foo', email: 'foo@npmjs.com' }, + maintainers: [ + { username: 'foo', email: 'foo@npmjs.com' }, + ], + }] }) + + await npm.exec('search', ['custom']) + + t.matchSnapshot(joinedOutput(), 'should have filtered expected search results') + }) - t.matchSnapshot(joinedOutput(), 'should have expected search results') -}) + t.test('empty search results', async t => { + const { npm, joinedOutput } = await loadMockNpm(t) + const registry = new MockRegistry({ + tap: t, + registry: npm.config.get('registry'), + }) -t.test('empty search results --json', async t => { - const { npm, joinedOutput } = await loadMockNpm(t, { config: { json: true } }) - const registry = new MockRegistry({ - tap: t, - registry: npm.config.get('registry'), - }) + registry.search({ results: [] }) + await npm.exec('search', ['foo']) - registry.search({ results: [] }) + t.matchSnapshot(joinedOutput(), 'should have expected search results') + }) - await npm.exec('search', ['foo']) - t.equal(joinedOutput(), '\n[]\n', 'should have expected empty square brackets') -}) + t.test('empty search results --json', async t => { + const { npm, joinedOutput } = await loadMockNpm(t, { config: { json: true } }) + const registry = new MockRegistry({ + tap: t, + registry: npm.config.get('registry'), + }) -t.test('search api response error', async t => { - const { npm } = await loadMockNpm(t) + registry.search({ results: [] }) - const registry = new MockRegistry({ - tap: t, - registry: npm.config.get('registry'), + await npm.exec('search', ['foo']) + t.equal(joinedOutput(), '\n[]', 'should have expected empty square brackets') }) - registry.search({ error: 'ERR' }) + t.test('api response error', async t => { + const { npm } = await loadMockNpm(t) - await t.rejects( - npm.exec('search', ['foo']), - /ERR/, - 'should throw response error' - ) -}) + const registry = new MockRegistry({ + tap: t, + registry: npm.config.get('registry'), + }) -t.test('search exclude string', async t => { - const { npm, joinedOutput } = await loadMockNpm(t, { config: { searchexclude: 'libnpmversion' } }) - const registry = new MockRegistry({ - tap: t, - registry: npm.config.get('registry'), - }) + registry.search({ error: 'ERR' }) - registry.search({ results: libnpmsearchResultFixture }) - await npm.exec('search', ['libnpm']) - t.matchSnapshot(joinedOutput(), 'results should not have libnpmversion') -}) + await t.rejects( + npm.exec('search', ['foo']), + /ERR/, + 'should throw response error' + ) + }) -t.test('search exclude username with upper case letters', async t => { - const { npm, joinedOutput } = await loadMockNpm(t, { config: { searchexclude: 'NLF' } }) - const registry = new MockRegistry({ - tap: t, - registry: npm.config.get('registry'), + t.test('exclude string', async t => { + const { npm, joinedOutput } = await loadMockNpm(t, { + config: { + searchexclude: 'libnpmversion', + }, + }) + const registry = new MockRegistry({ + tap: t, + registry: npm.config.get('registry'), + }) + + registry.search({ results: libnpmsearchResultFixture }) + await npm.exec('search', ['libnpm']) + t.matchSnapshot(joinedOutput(), 'results should not have libnpmversion') + }) + t.test('exclude string json', async t => { + const { npm, joinedOutput } = await loadMockNpm(t, { + config: { + json: true, + searchexclude: 'libnpmversion', + }, + }) + const registry = new MockRegistry({ + tap: t, + registry: npm.config.get('registry'), + }) + + registry.search({ results: libnpmsearchResultFixture }) + await npm.exec('search', ['libnpm']) + t.matchSnapshot(JSON.parse(joinedOutput()), 'results should not have libnpmversion') }) - registry.search({ results: libnpmsearchResultFixture }) - await npm.exec('search', ['libnpm']) - t.matchSnapshot(joinedOutput(), 'results should not have nlf') -}) + t.test('exclude username with upper case letters', async t => { + const { npm, joinedOutput } = await loadMockNpm(t, { config: { searchexclude: 'NLF' } }) + const registry = new MockRegistry({ + tap: t, + registry: npm.config.get('registry'), + }) -t.test('search exclude regex', async t => { - const { npm, joinedOutput } = await loadMockNpm(t, { config: { searchexclude: '/version/' } }) - const registry = new MockRegistry({ - tap: t, - registry: npm.config.get('registry'), + registry.search({ results: libnpmsearchResultFixture }) + await npm.exec('search', ['libnpm']) + t.matchSnapshot(joinedOutput(), 'results should not have nlf') }) - registry.search({ results: libnpmsearchResultFixture }) - await npm.exec('search', ['libnpm']) - t.matchSnapshot(joinedOutput(), 'results should not have libnpmversion') -}) + t.test('exclude regex', async t => { + const { npm, joinedOutput } = await loadMockNpm(t, { config: { searchexclude: '/version/' } }) + const registry = new MockRegistry({ + tap: t, + registry: npm.config.get('registry'), + }) -t.test('search exclude forward slash', async t => { - const { npm, joinedOutput } = await loadMockNpm(t, { config: { searchexclude: '/version' } }) - const registry = new MockRegistry({ - tap: t, - registry: npm.config.get('registry'), + registry.search({ results: libnpmsearchResultFixture }) + await npm.exec('search', ['libnpm']) + t.matchSnapshot(joinedOutput(), 'results should not have libnpmversion') }) - registry.search({ results: libnpmsearchResultFixture }) - await npm.exec('search', ['libnpm']) - t.matchSnapshot(joinedOutput(), 'results should not have libnpmversion') + t.test('exclude forward slash', async t => { + const { npm, joinedOutput } = await loadMockNpm(t, { config: { searchexclude: '/version' } }) + const registry = new MockRegistry({ + tap: t, + registry: npm.config.get('registry'), + }) + + registry.search({ results: libnpmsearchResultFixture }) + await npm.exec('search', ['libnpm']) + t.matchSnapshot(joinedOutput(), 'results should not have libnpmversion') + }) + t.end() }) diff --git a/deps/npm/test/lib/commands/shrinkwrap.js b/deps/npm/test/lib/commands/shrinkwrap.js index 604a7db7a0b353..c5909a3ceaeac2 100644 --- a/deps/npm/test/lib/commands/shrinkwrap.js +++ b/deps/npm/test/lib/commands/shrinkwrap.js @@ -24,9 +24,8 @@ t.formatSnapshot = obj => // Run shrinkwrap against a specified prefixDir with config items // and make some assertions that should always be true. Sets // the results on t.context for use in child tests -const shrinkwrap = async (t, prefixDir = {}, config = {}, mocks = {}) => { +const shrinkwrap = async (t, prefixDir = {}, config = {}) => { const { npm, logs } = await loadMockNpm(t, { - mocks, config, prefixDir, }) @@ -37,13 +36,13 @@ const shrinkwrap = async (t, prefixDir = {}, config = {}, mocks = {}) => { const oldFile = resolve(npm.prefix, 'package-lock.json') t.notOk(fs.existsSync(oldFile), 'package-lock is always deleted') - t.same(logs.warn, [], 'no warnings') t.teardown(() => delete t.context) t.context = { localPrefix: prefixDir, config, shrinkwrap: JSON.parse(fs.readFileSync(newFile)), - logs: logs.notice.map(([, m]) => m), + logs: logs.notice, + warn: logs.warn, } } @@ -107,6 +106,8 @@ const NOTICES = { ], UPDATED: (v = '') => [`npm-shrinkwrap.json updated to version ${v}`], SAME: () => [`npm-shrinkwrap.json up to date`], + CONVERTING: (current, next) => + [`Converting lock file (npm-shrinkwrap.json) from v${current} -> v${next}`], } t.test('with nothing', t => @@ -114,10 +115,12 @@ t.test('with nothing', t => ancient: { shrinkwrap: { lockfileVersion: 3 }, logs: NOTICES.CREATED(3), + warn: [], }, ancientUpgrade: { shrinkwrap: { lockfileVersion: 3 }, logs: NOTICES.CREATED(3), + warn: [], }, }) ) @@ -127,22 +130,27 @@ t.test('with package-lock.json', t => ancient: { shrinkwrap: { lockfileVersion: 3 }, logs: NOTICES.RENAMED(3), + warn: NOTICES.CONVERTING(1, 3), }, ancientUpgrade: { shrinkwrap: { lockfileVersion: 3 }, logs: NOTICES.RENAMED(3), + warn: NOTICES.CONVERTING(1, 3), }, existing: { shrinkwrap: { lockfileVersion: 2 }, logs: NOTICES.RENAMED(), + warn: [], }, existingUpgrade: { shrinkwrap: { lockfileVersion: 3 }, logs: NOTICES.RENAMED(3), + warn: NOTICES.CONVERTING(2, 3), }, existingDowngrade: { shrinkwrap: { lockfileVersion: 1 }, logs: NOTICES.RENAMED(1), + warn: NOTICES.CONVERTING(2, 1), }, }) ) @@ -152,22 +160,27 @@ t.test('with npm-shrinkwrap.json', t => ancient: { shrinkwrap: { lockfileVersion: 3 }, logs: NOTICES.UPDATED(3), + warn: NOTICES.CONVERTING(1, 3), }, ancientUpgrade: { shrinkwrap: { lockfileVersion: 3 }, logs: NOTICES.UPDATED(3), + warn: NOTICES.CONVERTING(1, 3), }, existing: { shrinkwrap: { lockfileVersion: 2 }, logs: NOTICES.SAME(), + warn: [], }, existingUpgrade: { shrinkwrap: { lockfileVersion: 3 }, logs: NOTICES.UPDATED(3), + warn: NOTICES.CONVERTING(2, 3), }, existingDowngrade: { shrinkwrap: { lockfileVersion: 1 }, logs: NOTICES.UPDATED(1), + warn: NOTICES.CONVERTING(2, 1), }, }) ) @@ -177,22 +190,27 @@ t.test('with hidden lockfile', t => ancient: { shrinkwrap: { lockfileVersion: 1 }, logs: NOTICES.CREATED(), + warn: [], }, ancientUpgrade: { shrinkwrap: { lockfileVersion: 3 }, logs: NOTICES.CREATED(), + warn: NOTICES.CONVERTING(1, 3), }, existing: { shrinkwrap: { lockfileVersion: 2 }, logs: NOTICES.CREATED(), + warn: [], }, existingUpgrade: { shrinkwrap: { lockfileVersion: 3 }, logs: NOTICES.CREATED(3), + warn: NOTICES.CONVERTING(2, 3), }, existingDowngrade: { shrinkwrap: { lockfileVersion: 1 }, logs: NOTICES.CREATED(1), + warn: NOTICES.CONVERTING(2, 1), }, }) ) diff --git a/deps/npm/test/lib/commands/stars.js b/deps/npm/test/lib/commands/stars.js index d92ced950291f5..fc38ca77ac7810 100644 --- a/deps/npm/test/lib/commands/stars.js +++ b/deps/npm/test/lib/commands/stars.js @@ -18,7 +18,7 @@ const mockStars = async (t, { npmFetch = noop, exec = true, ...opts }) => { return { ...mock, result: mock.stars.output, - logs: () => mock.logs.filter(l => l[1] === 'stars').map(l => l[2]), + logs: () => mock.logs.byTitle('stars'), } } @@ -87,7 +87,7 @@ t.test('unauthorized request', async t => { t.strictSame( logs(), - ['auth is required to look up your username'], + ['stars auth is required to look up your username'], 'should warn auth required msg' ) @@ -121,7 +121,7 @@ t.test('no pkg starred', async t => { t.strictSame( logs(), - ['user has not starred any packages'], + ['stars user has not starred any packages'], 'should warn no starred packages msg' ) }) diff --git a/deps/npm/test/lib/commands/token.js b/deps/npm/test/lib/commands/token.js index 2bc4af4a81a3d1..f60a938b5b34b3 100644 --- a/deps/npm/test/lib/commands/token.js +++ b/deps/npm/test/lib/commands/token.js @@ -1,33 +1,38 @@ const t = require('tap') -const mockNpm = require('../../fixtures/mock-npm') +const { load: loadMockNpm } = require('../../fixtures/mock-npm.js') +const MockRegistry = require('@npmcli/mock-registry') +const mockGlobals = require('@npmcli/mock-globals') +const stream = require('node:stream') -const mockToken = async (t, { profile, getCredentialsByURI, readUserInfo, ...opts } = {}) => { - const mocks = {} +const authToken = 'abcd1234' +const password = 'this is not really a password' - if (profile) { - mocks['npm-profile'] = profile - } - - if (readUserInfo) { - mocks['{LIB}/utils/read-user-info.js'] = readUserInfo - } - - const mock = await mockNpm(t, { - ...opts, - command: 'token', - mocks, - }) - - // XXX: replace with mock registry - if (getCredentialsByURI) { - mock.npm.config.getCredentialsByURI = getCredentialsByURI - } - - return mock +const auth = { + '//registry.npmjs.org/:_authToken': authToken, } +const now = new Date().toISOString() +const tokens = [ + { + key: 'abcd1234abcd1234', + token: 'efgh5678efgh5678', + cidr_whitelist: null, + readonly: false, + created: now, + updated: now, + }, + { + key: 'abcd1256', + token: 'hgfe8765', + cidr_whitelist: ['192.168.1.1/32'], + readonly: true, + created: now, + updated: now, + }, +] + t.test('completion', async t => { - const { token } = await mockToken(t) + const { token } = await loadMockNpm(t, { command: 'token' }) const testComp = (argv, expect) => { t.resolveMatch(token.completion({ conf: { argv: { remain: argv } } }), expect, argv.join(' ')) @@ -44,476 +49,334 @@ t.test('completion', async t => { }) t.test('token foobar', async t => { - const { token } = await mockToken(t) + const { npm } = await loadMockNpm(t) - await t.rejects(token.exec(['foobar']), /foobar is not a recognized subcommand/) + await t.rejects(npm.exec('token', ['foobar']), /foobar is not a recognized subcommand/) }) t.test('token list', async t => { - const now = new Date().toISOString() - const tokens = [ - { - key: 'abcd1234abcd1234', - token: 'efgh5678efgh5678', - cidr_whitelist: null, - readonly: false, - created: now, - updated: now, - }, - { - key: 'abcd1256', - token: 'hgfe8765', - cidr_whitelist: ['192.168.1.1/32'], - readonly: true, - created: now, - updated: now, - }, - ] - - const { token, joinedOutput } = await mockToken(t, { - config: { registry: 'https://registry.npmjs.org', otp: '123456' }, - getCredentialsByURI: uri => { - t.equal(uri, 'https://registry.npmjs.org/', 'requests correct registry') - return { token: 'thisisnotarealtoken' } - }, - profile: { - listTokens: conf => { - t.same(conf.auth, { token: 'thisisnotarealtoken', otp: '123456' }) - return tokens - }, - }, + const { npm, outputs } = await loadMockNpm(t, { + config: { ...auth }, }) - - await token.exec([]) - - const lines = joinedOutput().split(/\r?\n/) - t.match(lines[3], ' abcd123 ', 'includes the trimmed key') - t.match(lines[3], ' efgh56… ', 'includes the trimmed token') - t.match(lines[3], ` ${now.slice(0, 10)} `, 'includes the trimmed creation timestamp') - t.match(lines[3], ' no ', 'includes the "no" string for readonly state') - t.match(lines[5], ' abcd125 ', 'includes the trimmed key') - t.match(lines[5], ' hgfe87… ', 'includes the trimmed token') - t.match(lines[5], ` ${now.slice(0, 10)} `, 'includes the trimmed creation timestamp') - t.match(lines[5], ' yes ', 'includes the "no" string for readonly state') - t.match(lines[5], ` ${tokens[1].cidr_whitelist.join(',')} `, 'includes the cidr whitelist') + const registry = new MockRegistry({ + tap: t, + registry: npm.config.get('registry'), + authorization: authToken, + }) + registry.getTokens(tokens) + await npm.exec('token', []) + t.strictSame(outputs, [ + `Publish token efgh5678efgh5678… with id abcd123 created ${now.slice(0, 10)}`, + '', + `Read only token hgfe8765… with id abcd125 created ${now.slice(0, 10)}`, + 'with IP whitelist: 192.168.1.1/32', + '', + ]) }) t.test('token list json output', async t => { - const now = new Date().toISOString() - const tokens = [ - { - key: 'abcd1234abcd1234', - token: 'efgh5678efgh5678', - cidr_whitelist: null, - readonly: false, - created: now, - updated: now, - }, - ] - - const { token, joinedOutput } = await mockToken(t, { - config: { registry: 'https://registry.npmjs.org', json: true }, - getCredentialsByURI: uri => { - t.equal(uri, 'https://registry.npmjs.org/', 'requests correct registry') - return { username: 'foo', password: 'bar' } - }, - profile: { - listTokens: conf => { - t.same( - conf.auth, - { basic: { username: 'foo', password: 'bar' } }, - 'passes the correct auth' - ) - return tokens - }, + const { npm, joinedOutput } = await loadMockNpm(t, { + config: { + ...auth, + json: true, }, - }) - - await token.exec(['list']) - + const registry = new MockRegistry({ + tap: t, + registry: npm.config.get('registry'), + authorization: authToken, + }) + registry.getTokens(tokens) + await npm.exec('token', ['list']) const parsed = JSON.parse(joinedOutput()) t.match(parsed, tokens, 'prints the json parsed tokens') }) t.test('token list parseable output', async t => { - const now = new Date().toISOString() - const tokens = [ - { - key: 'abcd1234abcd1234', - token: 'efgh5678efgh5678', - cidr_whitelist: null, - readonly: false, - created: now, - updated: now, - }, - { - key: 'efgh5678ijkl9101', - token: 'hgfe8765', - cidr_whitelist: ['192.168.1.1/32'], - readonly: true, - created: now, - updated: now, - }, - ] - - const { token, joinedOutput } = await mockToken(t, { - config: { registry: 'https://registry.npmjs.org', parseable: true }, - getCredentialsByURI: uri => { - t.equal(uri, 'https://registry.npmjs.org/', 'requests correct registry') - return { auth: Buffer.from('foo:bar').toString('base64') } - }, - profile: { - listTokens: conf => { - t.same( - conf.auth, - { basic: { username: 'foo', password: 'bar' } }, - 'passes the correct auth' - ) - return tokens - }, + const { npm, outputs } = await loadMockNpm(t, { + config: { + ...auth, + parseable: true, }, }) - - await token.exec(['list']) - - const lines = joinedOutput().split(/\r?\n/) - - t.equal( - lines[0], - ['key', 'token', 'created', 'readonly', 'CIDR whitelist'].join('\t'), - 'prints header' - ) - - t.equal( - lines[1], - [tokens[0].key, tokens[0].token, tokens[0].created, tokens[0].readonly, ''].join('\t'), - 'prints token info' - ) - - t.equal( - lines[2], - [ - tokens[1].key, - tokens[1].token, - tokens[1].created, - tokens[1].readonly, - tokens[1].cidr_whitelist.join(','), - ].join('\t'), - 'prints token info' - ) + const registry = new MockRegistry({ + tap: t, + registry: npm.config.get('registry'), + authorization: authToken, + }) + registry.getTokens(tokens) + await npm.exec('token', []) + t.strictSame(outputs, [ + 'key\ttoken\tcreated\treadonly\tCIDR whitelist', + `abcd1234abcd1234\tefgh5678efgh5678\t${now}\tfalse\t`, + `abcd1256\thgfe8765\t${now}\ttrue\t192.168.1.1/32`, + ]) }) t.test('token revoke', async t => { - const { token, joinedOutput } = await mockToken(t, { - config: { registry: 'https://registry.npmjs.org' }, - getCredentialsByURI: uri => { - t.equal(uri, 'https://registry.npmjs.org/', 'requests correct registry') - return {} - }, - profile: { - listTokens: conf => { - t.same(conf.auth, {}, 'passes the correct empty auth') - return Promise.resolve([{ key: 'abcd1234' }]) - }, - removeToken: key => { - t.equal(key, 'abcd1234', 'deletes the correct token') - }, - }, + const { npm, joinedOutput } = await loadMockNpm(t, { + config: { ...auth }, + }) + const registry = new MockRegistry({ + tap: t, + registry: npm.config.get('registry'), + authorization: authToken, }) - await token.exec(['rm', 'abcd']) + registry.getTokens(tokens) + registry.nock.delete(`/-/npm/v1/tokens/token/${tokens[0].key}`).reply(200) + await npm.exec('token', ['rm', tokens[0].key.slice(0, 8)]) t.equal(joinedOutput(), 'Removed 1 token') }) t.test('token revoke multiple tokens', async t => { - const { token, joinedOutput } = await mockToken(t, { - config: { registry: 'https://registry.npmjs.org' }, - getCredentialsByURI: uri => { - t.equal(uri, 'https://registry.npmjs.org/', 'requests correct registry') - return { token: 'thisisnotarealtoken' } - }, - profile: { - listTokens: () => Promise.resolve([{ key: 'abcd1234' }, { key: 'efgh5678' }]), - removeToken: key => { - // this will run twice - t.ok(['abcd1234', 'efgh5678'].includes(key), 'deletes the correct token') - }, - }, + const { npm, joinedOutput } = await loadMockNpm(t, { + config: { ...auth }, + }) + const registry = new MockRegistry({ + tap: t, + registry: npm.config.get('registry'), + authorization: authToken, }) - await token.exec(['revoke', 'abcd', 'efgh']) + registry.getTokens(tokens) + registry.nock.delete(`/-/npm/v1/tokens/token/${tokens[0].key}`).reply(200) + registry.nock.delete(`/-/npm/v1/tokens/token/${tokens[1].key}`).reply(200) + await npm.exec('token', ['rm', tokens[0].key.slice(0, 8), tokens[1].key.slice(0, 8)]) t.equal(joinedOutput(), 'Removed 2 tokens') }) t.test('token revoke json output', async t => { - const { token, joinedOutput } = await mockToken(t, { - config: { registry: 'https://registry.npmjs.org', json: true }, - getCredentialsByURI: uri => { - t.equal(uri, 'https://registry.npmjs.org/', 'requests correct registry') - return { token: 'thisisnotarealtoken' } - }, - profile: { - listTokens: () => Promise.resolve([{ key: 'abcd1234' }]), - removeToken: key => { - t.equal(key, 'abcd1234', 'deletes the correct token') - }, + const { npm, joinedOutput } = await loadMockNpm(t, { + config: { + ...auth, + json: true, }, - + }) + const registry = new MockRegistry({ + tap: t, + registry: npm.config.get('registry'), + authorization: authToken, }) - await token.exec(['delete', 'abcd']) + registry.getTokens(tokens) + registry.nock.delete(`/-/npm/v1/tokens/token/${tokens[0].key}`).reply(200) + await npm.exec('token', ['rm', tokens[0].key.slice(0, 8)]) const parsed = JSON.parse(joinedOutput()) - t.same(parsed, ['abcd1234'], 'logs the token as json') + t.same(parsed, [tokens[0].key], 'logs the token as json') }) t.test('token revoke parseable output', async t => { - const { token, joinedOutput } = await mockToken(t, { - config: { registry: 'https://registry.npmjs.org', parseable: true }, - getCredentialsByURI: uri => { - t.equal(uri, 'https://registry.npmjs.org/', 'requests correct registry') - return { token: 'thisisnotarealtoken' } - }, - profile: { - listTokens: () => Promise.resolve([{ key: 'abcd1234' }]), - removeToken: key => { - t.equal(key, 'abcd1234', 'deletes the correct token') - }, + const { npm, joinedOutput } = await loadMockNpm(t, { + config: { + ...auth, + parseable: true, }, }) + const registry = new MockRegistry({ + tap: t, + registry: npm.config.get('registry'), + authorization: authToken, + }) - await token.exec(['remove', 'abcd']) - - t.equal(joinedOutput(), 'abcd1234', 'logs the token as a string') + registry.getTokens(tokens) + registry.nock.delete(`/-/npm/v1/tokens/token/${tokens[0].key}`).reply(200) + await npm.exec('token', ['rm', tokens[0].key.slice(0, 8)]) + t.equal(joinedOutput(), tokens[0].key, 'logs the token as a string') }) t.test('token revoke by token', async t => { - const { token, joinedOutput } = await mockToken(t, { - config: { registry: 'https://registry.npmjs.org' }, - getCredentialsByURI: uri => { - t.equal(uri, 'https://registry.npmjs.org/', 'requests correct registry') - return { token: 'thisisnotarealtoken' } - }, - profile: { - listTokens: () => Promise.resolve([{ key: 'abcd1234', token: 'efgh5678' }]), - removeToken: key => { - t.equal(key, 'efgh5678', 'passes through user input') - }, - }, + const { npm, joinedOutput } = await loadMockNpm(t, { + config: { ...auth }, }) - - await token.exec(['rm', 'efgh5678']) + const registry = new MockRegistry({ + tap: t, + registry: npm.config.get('registry'), + authorization: authToken, + }) + registry.getTokens(tokens) + registry.nock.delete(`/-/npm/v1/tokens/token/${tokens[0].token}`).reply(200) + await npm.exec('token', ['rm', tokens[0].token]) t.equal(joinedOutput(), 'Removed 1 token') }) t.test('token revoke requires an id', async t => { - const { token } = await mockToken(t) + const { npm } = await loadMockNpm(t) - await t.rejects(token.exec(['rm']), /`` argument is required/) + await t.rejects(npm.exec('token', ['rm']), { + code: 'EUSAGE', + message: '`` argument is required', + }) }) t.test('token revoke ambiguous id errors', async t => { - const { token } = await mockToken(t, { - config: { registry: 'https://registry.npmjs.org' }, - getCredentialsByURI: uri => { - t.equal(uri, 'https://registry.npmjs.org/', 'requests correct registry') - return { token: 'thisisnotarealtoken' } - }, - profile: { - listTokens: () => Promise.resolve([{ key: 'abcd1234' }, { key: 'abcd5678' }]), - }, + const { npm } = await loadMockNpm(t, { + config: { ...auth }, + }) + const registry = new MockRegistry({ + tap: t, + registry: npm.config.get('registry'), + authorization: authToken, + }) + registry.getTokens(tokens) + await t.rejects(npm.exec('token', ['rm', 'abcd']), { + message: /Token ID "abcd" was ambiguous/, }) - - await t.rejects(token.exec(['rm', 'abcd']), /Token ID "abcd" was ambiguous/) }) -t.test('token revoke unknown id errors', async t => { - const { token } = await mockToken(t, { - config: { registry: 'https://registry.npmjs.org' }, - getCredentialsByURI: uri => { - t.equal(uri, 'https://registry.npmjs.org/', 'requests correct registry') - return { token: 'thisisnotarealtoken' } - }, - profile: { - listTokens: () => Promise.resolve([{ key: 'abcd1234' }]), - }, +t.test('token revoke unknown token', async t => { + const { npm } = await loadMockNpm(t, { + config: { ...auth }, + }) + const registry = new MockRegistry({ + tap: t, + registry: npm.config.get('registry'), + authorization: authToken, }) - await t.rejects(token.exec(['rm', 'efgh']), /Unknown token id or value "efgh"./) + registry.getTokens(tokens) + await t.rejects(npm.exec('token', ['rm', '0xnotreal']), + 'Unknown token id or value 0xnotreal' + ) }) t.test('token create', async t => { - const now = new Date().toISOString() - const password = 'thisisnotreallyapassword' - - const { token, joinedOutput } = await mockToken(t, { + const cidr = ['10.0.0.0/8', '192.168.1.0/24'] + const { npm, outputs } = await loadMockNpm(t, { config: { - registry: 'https://registry.npmjs.org', - cidr: ['10.0.0.0/8', '192.168.1.0/24'], - }, - getCredentialsByURI: uri => { - t.equal(uri, 'https://registry.npmjs.org/', 'requests correct registry') - return { token: 'thisisnotarealtoken' } - }, - readUserInfo: { - password: () => Promise.resolve(password), + ...auth, + cidr, }, - profile: { - createToken: (pw, readonly, cidr) => { - t.equal(pw, password) - t.equal(readonly, false) - t.same(cidr, ['10.0.0.0/8', '192.168.1.0/24'], 'defaults to empty array') - return { - key: 'abcd1234', - token: 'efgh5678', - created: now, - updated: now, - readonly: false, - cidr_whitelist: [], - } - }, - }, - }) + const registry = new MockRegistry({ + tap: t, + registry: npm.config.get('registry'), + authorization: authToken, + }) + const stdin = new stream.PassThrough() + stdin.write(`${password}\n`) + mockGlobals(t, { + 'process.stdin': stdin, + 'process.stdout': new stream.PassThrough(), // to quiet readline + }, { replace: true }) + registry.createToken({ password, cidr }) + await npm.exec('token', ['create']) + t.strictSame(outputs, [ + '', + 'Created publish token n3wt0k3n', + 'with IP whitelist: 10.0.0.0/8,192.168.1.0/24', + ]) +}) - await token.exec(['create']) - - const lines = joinedOutput().split(/\r?\n/) - t.match(lines[1], 'token') - t.match(lines[1], 'efgh5678', 'prints the whole token') - t.match(lines[3], 'created') - t.match(lines[3], now, 'prints the correct timestamp') - t.match(lines[5], 'readonly') - t.match(lines[5], 'false', 'prints the readonly flag') - t.match(lines[7], 'cidr_whitelist') +t.test('token create read only', async t => { + const { npm, outputs } = await loadMockNpm(t, { + config: { + ...auth, + 'read-only': true, + }, + }) + const registry = new MockRegistry({ + tap: t, + registry: npm.config.get('registry'), + authorization: authToken, + }) + const stdin = new stream.PassThrough() + stdin.write(`${password}\n`) + mockGlobals(t, { + 'process.stdin': stdin, + 'process.stdout': new stream.PassThrough(), // to quiet readline + }, { replace: true }) + registry.createToken({ readonly: true, password }) + await npm.exec('token', ['create']) + t.strictSame(outputs, [ + '', + 'Created read only token n3wt0k3n', + ]) }) t.test('token create json output', async t => { - const now = new Date().toISOString() - const password = 'thisisnotreallyapassword' - - const { token } = await mockToken(t, { - config: { registry: 'https://registry.npmjs.org', json: true }, - getCredentialsByURI: uri => { - t.equal(uri, 'https://registry.npmjs.org/', 'requests correct registry') - return { token: 'thisisnotarealtoken' } - }, - readUserInfo: { - password: () => Promise.resolve(password), - }, - profile: { - createToken: (pw, readonly, cidr) => { - t.equal(pw, password) - t.equal(readonly, false) - t.same(cidr, [], 'defaults to empty array') - return { - key: 'abcd1234', - token: 'efgh5678', - created: now, - updated: now, - readonly: false, - cidr_whitelist: [], - } - }, - }, - output: spec => { - t.type(spec, 'string', 'outputs a string') - const parsed = JSON.parse(spec) - t.same( - parsed, - { token: 'efgh5678', created: now, readonly: false, cidr_whitelist: [] }, - 'outputs the correct object' - ) + const cidr = ['10.0.0.0/8', '192.168.1.0/24'] + const { npm, joinedOutput } = await loadMockNpm(t, { + config: { + ...auth, + json: true, + cidr, }, }) - - await token.exec(['create']) + const registry = new MockRegistry({ + tap: t, + registry: npm.config.get('registry'), + authorization: authToken, + }) + const stdin = new stream.PassThrough() + stdin.write(`${password}\n`) + mockGlobals(t, { + 'process.stdin': stdin, + 'process.stdout': new stream.PassThrough(), // to quiet readline + }, { replace: true }) + registry.createToken({ password, cidr }) + await npm.exec('token', ['create']) + const parsed = JSON.parse(joinedOutput()) + t.match( + parsed, + { token: 'n3wt0k3n', readonly: false, cidr_whitelist: cidr } + ) + t.ok(parsed.created, 'also returns created') }) t.test('token create parseable output', async t => { - const now = new Date().toISOString() - const password = 'thisisnotreallyapassword' - - const { token, joinedOutput } = await mockToken(t, { - config: { registry: 'https://registry.npmjs.org', parseable: true }, - getCredentialsByURI: uri => { - t.equal(uri, 'https://registry.npmjs.org/', 'requests correct registry') - return { token: 'thisisnotarealtoken' } - }, - readUserInfo: { - password: () => Promise.resolve(password), - }, - profile: { - createToken: (pw, readonly, cidr) => { - t.equal(pw, password) - t.equal(readonly, false) - t.same(cidr, [], 'defaults to empty array') - return { - key: 'abcd1234', - token: 'efgh5678', - created: now, - updated: now, - readonly: false, - cidr_whitelist: [], - } - }, + const cidr = ['10.0.0.0/8', '192.168.1.0/24'] + const { npm, outputs } = await loadMockNpm(t, { + config: { + ...auth, + parseable: true, + cidr, }, }) - - await token.exec(['create']) - - const spec = joinedOutput().split(/\r?\n/) - - t.match(spec[0], 'token\tefgh5678', 'prints the token') - t.match(spec[1], `created\t${now}`, 'prints the created timestamp') - t.match(spec[2], 'readonly\tfalse', 'prints the readonly flag') - t.match(spec[3], 'cidr_whitelist\t', 'prints the cidr whitelist') + const registry = new MockRegistry({ + tap: t, + registry: npm.config.get('registry'), + authorization: authToken, + }) + const stdin = new stream.PassThrough() + stdin.write(`${password}\n`) + mockGlobals(t, { + 'process.stdin': stdin, + 'process.stdout': new stream.PassThrough(), // to quiet readline + }, { replace: true }) + registry.createToken({ password, cidr }) + await npm.exec('token', ['create']) + t.equal(outputs[1], 'token\tn3wt0k3n') + t.ok(outputs[2].startsWith('created\t')) + t.equal(outputs[3], 'readonly\tfalse') + t.equal(outputs[4], 'cidr_whitelist\t10.0.0.0/8,192.168.1.0/24') }) t.test('token create ipv6 cidr', async t => { - const password = 'thisisnotreallyapassword' - - const { token } = await mockToken(t, { - config: { registry: 'https://registry.npmjs.org', cidr: '::1/128' }, - getCredentialsByURI: uri => { - t.equal(uri, 'https://registry.npmjs.org/', 'requests correct registry') - return { token: 'thisisnotarealtoken' } - }, - readUserInfo: { - password: () => Promise.resolve(password), + const { npm } = await loadMockNpm(t, { + config: { + ...auth, + cidr: '::1/128', }, }) - - await t.rejects( - token.exec(['create']), - { - code: 'EINVALIDCIDR', - message: /CIDR whitelist can only contain IPv4 addresses, ::1\/128 is IPv6/, - }, - 'returns correct error' - ) + await t.rejects(npm.exec('token', ['create'], { + code: 'EINVALIDCIDR', + message: /CIDR whitelist can only contain IPv4 addresses, ::1\/128 is IPv6/, + })) }) t.test('token create invalid cidr', async t => { - const password = 'thisisnotreallyapassword' - - const { token } = await mockToken(t, { - config: { registry: 'https://registry.npmjs.org', cidr: 'apple/cider' }, - getCredentialsByURI: uri => { - t.equal(uri, 'https://registry.npmjs.org/', 'requests correct registry') - return { token: 'thisisnotarealtoken' } - }, - readUserInfo: { - password: () => Promise.resolve(password), + const { npm } = await loadMockNpm(t, { + config: { + ...auth, + cidr: 'apple/cider', }, }) - - await t.rejects( - token.exec(['create']), - { code: 'EINVALIDCIDR', message: /CIDR whitelist contains invalid CIDR entry: apple\/cider/ }, - 'returns correct error' - ) + await t.rejects(npm.exec('token', ['create'], { + code: 'EINVALIDCIDR', + message: 'CIDR whitelist contains invalid CIDR entry: apple/cider', + })) }) diff --git a/deps/npm/test/lib/commands/update.js b/deps/npm/test/lib/commands/update.js index f42fb8a4146b02..e84e2c3142141b 100644 --- a/deps/npm/test/lib/commands/update.js +++ b/deps/npm/test/lib/commands/update.js @@ -64,10 +64,8 @@ t.test('update --depth=', async t => { config: { depth: 1 }, }) - const [title, msg] = logs.warn[0] - t.equal(title, 'update', 'should print expected title') t.match( - msg, + logs.warn.byTitle('update')[0], /The --depth option no longer has any effect/, 'should print expected warning message' ) diff --git a/deps/npm/test/lib/commands/version.js b/deps/npm/test/lib/commands/version.js index 8aa6c088bfc9b5..1f02f368f67bc0 100644 --- a/deps/npm/test/lib/commands/version.js +++ b/deps/npm/test/lib/commands/version.js @@ -36,11 +36,7 @@ t.test('node@1', async t => { t.strictSame( result(), - [{ - 'test-version-no-args': '3.2.1', - node: '1.0.0', - npm: '1.0.0', - }], + "{ 'test-version-no-args': '3.2.1', npm: '1.0.0', node: '1.0.0' }", 'should output expected values for various versions in npm' ) }) @@ -75,10 +71,7 @@ t.test('node@1', async t => { t.strictSame( result(), - [{ - npm: '1.0.0', - node: '1.0.0', - }], + `{ npm: '1.0.0', node: '1.0.0' }`, 'should not have package name on returning object' ) }) @@ -93,7 +86,7 @@ t.test('empty versions', async t => { }) await version.exec([]) - t.same(result(), ['{\n "npm": "1.0.0"\n}'], 'should return json stringified result') + t.same(result(), '{\n "npm": "1.0.0"\n}', 'should return json stringified result') }) t.test('with one arg', async t => { @@ -104,7 +97,7 @@ t.test('empty versions', async t => { }) await version.exec(['major']) - t.same(result(), ['v4.0.0'], 'outputs the new version prefixed by the tagVersionPrefix') + t.same(result(), 'v4.0.0', 'outputs the new version prefixed by the tagVersionPrefix') }) t.test('workspaces', async t => { @@ -139,14 +132,12 @@ t.test('empty versions', async t => { await version.exec([]) t.same( result(), - [ - { - 'workspaces-test': '1.0.0', - 'workspace-a': '1.0.0', - 'workspace-b': '1.0.0', - npm: '1.0.0', - }, - ], + `{ + 'workspace-a': '1.0.0', + 'workspace-b': '1.0.0', + 'workspaces-test': '1.0.0', + npm: '1.0.0' +}`, 'outputs includes main package and workspace versions' ) }) @@ -184,13 +175,7 @@ t.test('empty versions', async t => { await version.exec([]) t.same( result(), - [ - { - 'workspaces-test': '1.0.0', - 'workspace-a': '1.0.0', - npm: '1.0.0', - }, - ], + "{ 'workspace-a': '1.0.0', 'workspaces-test': '1.0.0', npm: '1.0.0' }", 'outputs includes main package and requested workspace versions' ) }) @@ -230,13 +215,7 @@ t.test('empty versions', async t => { await version.exec([]) t.same( result(), - [ - { - 'workspaces-test': '1.0.0', - 'workspace-a': '1.0.0', - npm: '1.0.0', - }, - ], + "{ 'workspace-a': '1.0.0', 'workspaces-test': '1.0.0', npm: '1.0.0' }", 'outputs includes main package and valid workspace versions' ) }) @@ -271,7 +250,7 @@ t.test('empty versions', async t => { await version.exec(['major']) t.same( - outputs.map(o => o[0]).slice(0, 4), + outputs.slice(0, 4), ['workspace-a', 'v2.0.0', 'workspace-b', 'v2.0.0'], 'outputs the new version for only the workspaces prefixed by the tagVersionPrefix' ) @@ -316,7 +295,7 @@ t.test('empty versions', async t => { await version.exec(['major']) t.same( - outputs.map(o => o[0]).slice(0, 4), + outputs.slice(0, 4), ['workspace-a', 'v2.0.0', 'workspace-b', 'v2.0.0'], 'outputs the new version for only the workspaces prefixed by the tagVersionPrefix' ) @@ -360,7 +339,7 @@ t.test('empty versions', async t => { }, }, mocks: { - libnpmversion: (arg, opts) => { + libnpmversion: () => { return '2.0.0' }, }, @@ -372,7 +351,7 @@ t.test('empty versions', async t => { await version.exec(['major']) t.same( - outputs.map(o => o[0]).slice(0, 4), + outputs.slice(0, 4), ['workspace-a', 'v2.0.0', 'workspace-b', 'v2.0.0'], 'outputs the new version for only the workspaces prefixed by the tagVersionPrefix' ) diff --git a/deps/npm/test/lib/commands/view.js b/deps/npm/test/lib/commands/view.js index 92c7fe47bda062..c50668791bbe59 100644 --- a/deps/npm/test/lib/commands/view.js +++ b/deps/npm/test/lib/commands/view.js @@ -48,9 +48,7 @@ const packument = (nv, opts) => { dist: { shasum: '123', tarball: 'http://hm.blue.com/1.0.0.tgz', - integrity: '---', fileCount: 1, - unpackedSize: 1, }, }, '1.0.1': { @@ -282,81 +280,81 @@ const loadMockNpm = async function (t, opts = {}) { } t.test('package from git', async t => { - const { view, outputs } = await loadMockNpm(t, { config: { unicode: false } }) + const { view, joinedOutput } = await loadMockNpm(t, { config: { unicode: false } }) await view.exec(['https://github.com/npm/green']) - t.matchSnapshot(outputs.join('\n')) + t.matchSnapshot(joinedOutput()) }) t.test('deprecated package with license, bugs, repository and other fields', async t => { - const { view, outputs } = await loadMockNpm(t, { config: { unicode: false } }) + const { view, joinedOutput } = await loadMockNpm(t, { config: { unicode: false } }) await view.exec(['green@1.0.0']) - t.matchSnapshot(outputs.join('\n')) + t.matchSnapshot(joinedOutput()) }) t.test('deprecated package with unicode', async t => { - const { view, outputs } = await loadMockNpm(t, { config: { unicode: true } }) + const { view, joinedOutput } = await loadMockNpm(t, { config: { unicode: true } }) await view.exec(['green@1.0.0']) - t.matchSnapshot(outputs.join('\n')) + t.matchSnapshot(joinedOutput()) }) t.test('package with more than 25 deps', async t => { - const { view, outputs } = await loadMockNpm(t, { config: { unicode: false } }) + const { view, joinedOutput } = await loadMockNpm(t, { config: { unicode: false } }) await view.exec(['black@1.0.0']) - t.matchSnapshot(outputs.join('\n')) + t.matchSnapshot(joinedOutput()) }) t.test('package with maintainers info as object', async t => { - const { view, outputs } = await loadMockNpm(t, { config: { unicode: false } }) + const { view, joinedOutput } = await loadMockNpm(t, { config: { unicode: false } }) await view.exec(['pink@1.0.0']) - t.matchSnapshot(outputs.join('\n')) + t.matchSnapshot(joinedOutput()) }) t.test('package with homepage', async t => { - const { view, outputs } = await loadMockNpm(t, { config: { unicode: false } }) + const { view, joinedOutput } = await loadMockNpm(t, { config: { unicode: false } }) await view.exec(['orange@1.0.0']) - t.matchSnapshot(outputs.join('\n')) + t.matchSnapshot(joinedOutput()) }) t.test('package with invalid version', async t => { - const { view, outputs } = await loadMockNpm(t, { config: { unicode: false } }) + const { view, joinedOutput } = await loadMockNpm(t, { config: { unicode: false } }) await view.exec(['orange', 'versions']) - t.matchSnapshot(outputs.join('\n')) + t.matchSnapshot(joinedOutput()) }) t.test('package with no versions', async t => { - const { view, outputs } = await loadMockNpm(t, { config: { unicode: false } }) + const { view, joinedOutput } = await loadMockNpm(t, { config: { unicode: false } }) await view.exec(['brown']) - t.equal(outputs.join('\n'), '', 'no info to display') + t.equal(joinedOutput(), '', 'no info to display') }) t.test('package with no repo or homepage', async t => { - const { view, outputs } = await loadMockNpm(t, { config: { unicode: false } }) + const { view, joinedOutput } = await loadMockNpm(t, { config: { unicode: false } }) await view.exec(['blue@1.0.0']) - t.matchSnapshot(outputs.join('\n')) + t.matchSnapshot(joinedOutput()) }) t.test('package with semver range', async t => { - const { view, outputs } = await loadMockNpm(t, { config: { unicode: false } }) + const { view, joinedOutput } = await loadMockNpm(t, { config: { unicode: false } }) await view.exec(['blue@^1.0.0']) - t.matchSnapshot(outputs.join('\n')) + t.matchSnapshot(joinedOutput()) }) t.test('package with no modified time', async t => { - const { view, outputs } = await loadMockNpm(t, { config: { unicode: false } }) + const { view, joinedOutput } = await loadMockNpm(t, { config: { unicode: false } }) await view.exec(['cyan@1.0.0']) - t.matchSnapshot(outputs.join('\n')) + t.matchSnapshot(joinedOutput()) }) t.test('package with --json and semver range', async t => { - const { view, outputs } = await loadMockNpm(t, { config: { json: true } }) + const { view, joinedOutput } = await loadMockNpm(t, { config: { json: true } }) await view.exec(['cyan@^1.0.0']) - t.matchSnapshot(outputs.join('\n')) + t.matchSnapshot(joinedOutput()) }) t.test('package with --json and no versions', async t => { - const { view, outputs } = await loadMockNpm(t, { config: { json: true } }) + const { view, joinedOutput } = await loadMockNpm(t, { config: { json: true } }) await view.exec(['brown']) - t.equal(outputs.join('\n'), '', 'no info to display') + t.equal(joinedOutput(), '', 'no info to display') }) t.test('package in cwd', async t => { @@ -368,76 +366,76 @@ t.test('package in cwd', async t => { } t.test('specific version', async t => { - const { view, outputs } = await loadMockNpm(t, { prefixDir }) + const { view, joinedOutput } = await loadMockNpm(t, { prefixDir }) await view.exec(['.@1.0.0']) - t.matchSnapshot(outputs.join('\n')) + t.matchSnapshot(joinedOutput()) }) t.test('non-specific version', async t => { - const { view, outputs } = await loadMockNpm(t, { prefixDir }) + const { view, joinedOutput } = await loadMockNpm(t, { prefixDir }) await view.exec(['.']) - t.matchSnapshot(outputs.join('\n')) + t.matchSnapshot(joinedOutput()) }) t.test('directory', async t => { - const { view, outputs } = await loadMockNpm(t, { prefixDir }) + const { view, joinedOutput } = await loadMockNpm(t, { prefixDir }) await view.exec(['./blue']) - t.matchSnapshot(outputs.join('\n')) + t.matchSnapshot(joinedOutput()) }) }) t.test('specific field names', async t => { - const { view, outputs } = await loadMockNpm(t, { config: { color: false } }) - t.afterEach(() => outputs.length = 0) + const { view, joinedOutput, clearOutput } = await loadMockNpm(t, { config: { color: false } }) + t.afterEach(() => clearOutput()) t.test('readme', async t => { await view.exec(['yellow@1.0.0', 'readme']) - t.matchSnapshot(outputs.join('\n')) + t.matchSnapshot(joinedOutput()) }) t.test('several fields', async t => { await view.exec(['yellow@1.0.0', 'name', 'version', 'foo[bar]']) - t.matchSnapshot(outputs.join('\n')) + t.matchSnapshot(joinedOutput()) }) t.test('several fields with several versions', async t => { await view.exec(['yellow@1.x.x', 'author']) - t.matchSnapshot(outputs.join('\n')) + t.matchSnapshot(joinedOutput()) }) t.test('nested field with brackets', async t => { await view.exec(['orange@1.0.0', 'dist[shasum]']) - t.matchSnapshot(outputs.join('\n')) + t.matchSnapshot(joinedOutput()) }) t.test('maintainers with email', async t => { await view.exec(['yellow@1.0.0', 'maintainers', 'name']) - t.matchSnapshot(outputs.join('\n')) + t.matchSnapshot(joinedOutput()) }) t.test('maintainers with url', async t => { await view.exec(['pink@1.0.0', 'maintainers']) - t.matchSnapshot(outputs.join('\n')) + t.matchSnapshot(joinedOutput()) }) t.test('unknown nested field ', async t => { await view.exec(['yellow@1.0.0', 'dist.foobar']) - t.equal(outputs.join('\n'), '', 'no info to display') + t.equal(joinedOutput(), '', 'no info to display') }) t.test('array field - 1 element', async t => { await view.exec(['purple@1.0.0', 'maintainers.name']) - t.matchSnapshot(outputs.join('\n')) + t.matchSnapshot(joinedOutput()) }) t.test('array field - 2 elements', async t => { await view.exec(['yellow@1.x.x', 'maintainers.name']) - t.matchSnapshot(outputs.join('\n')) + t.matchSnapshot(joinedOutput()) }) t.test('fields with empty values', async t => { await view.exec(['yellow', 'empty']) - t.matchSnapshot(outputs.join('\n')) + t.matchSnapshot(joinedOutput()) }) }) @@ -507,84 +505,84 @@ t.test('workspaces', async t => { } t.test('all workspaces', async t => { - const { view, outputs } = await loadMockNpm(t, { + const { view, joinedOutput } = await loadMockNpm(t, { prefixDir, config: { unicode: false, workspaces: true }, }) await view.exec([]) - t.matchSnapshot(outputs.join('\n')) + t.matchSnapshot(joinedOutput()) }) t.test('one specific workspace', async t => { - const { view, outputs } = await loadMockNpm(t, { + const { view, joinedOutput } = await loadMockNpm(t, { prefixDir, config: { unicode: false, workspace: ['green'] }, }) await view.exec([]) - t.matchSnapshot(outputs.join('\n')) + t.matchSnapshot(joinedOutput()) }) t.test('all workspaces --json', async t => { - const { view, outputs } = await loadMockNpm(t, { + const { view, joinedOutput } = await loadMockNpm(t, { prefixDir, config: { unicode: false, workspaces: true, json: true }, }) await view.exec([]) - t.matchSnapshot(outputs.join('\n')) + t.matchSnapshot(joinedOutput()) }) t.test('all workspaces single field', async t => { - const { view, outputs } = await loadMockNpm(t, { + const { view, joinedOutput } = await loadMockNpm(t, { prefixDir, config: { unicode: false, workspaces: true }, }) await view.exec(['.', 'name']) - t.matchSnapshot(outputs.join('\n')) + t.matchSnapshot(joinedOutput()) }) t.test('all workspaces nonexistent field', async t => { - const { view, outputs } = await loadMockNpm(t, { + const { view, joinedOutput } = await loadMockNpm(t, { prefixDir, config: { unicode: false, workspaces: true }, }) await view.exec(['.', 'foo']) - t.matchSnapshot(outputs.join('\n')) + t.matchSnapshot(joinedOutput()) }) t.test('all workspaces nonexistent field --json', async t => { - const { view, outputs } = await loadMockNpm(t, { + const { view, joinedOutput } = await loadMockNpm(t, { prefixDir, config: { unicode: false, workspaces: true, json: true }, }) await view.exec(['.', 'foo']) - t.matchSnapshot(outputs.join('\n')) + t.matchSnapshot(joinedOutput()) }) t.test('all workspaces single field --json', async t => { - const { view, outputs } = await loadMockNpm(t, { + const { view, joinedOutput } = await loadMockNpm(t, { prefixDir, config: { unicode: false, workspaces: true, json: true }, }) await view.exec(['.', 'name']) - t.matchSnapshot(outputs.join('\n')) + t.matchSnapshot(joinedOutput()) }) t.test('single workspace --json', async t => { - const { view, outputs } = await loadMockNpm(t, { + const { view, joinedOutput } = await loadMockNpm(t, { prefixDir, config: { unicode: false, workspace: ['green'], json: true }, }) await view.exec([]) - t.matchSnapshot(outputs.join('\n')) + t.matchSnapshot(joinedOutput()) }) t.test('remote package name', async t => { - const { view, logs, outputs } = await loadMockNpm(t, { + const { view, logs, joinedOutput } = await loadMockNpm(t, { prefixDir, config: { unicode: false, workspaces: true }, }) await view.exec(['pink']) - t.matchSnapshot(outputs.join('\n')) + t.matchSnapshot(joinedOutput()) t.matchSnapshot(logs.warn, 'should have warning of ignoring workspaces') }) }) diff --git a/deps/npm/test/lib/docs.js b/deps/npm/test/lib/docs.js index 7ace9b99957019..67afd1b54d91e6 100644 --- a/deps/npm/test/lib/docs.js +++ b/deps/npm/test/lib/docs.js @@ -22,7 +22,7 @@ t.test('shorthands', async t => { t.test('config', async t => { const keys = Object.keys(definitions) - const flat = Object.entries(definitions).filter(([_, d]) => d.flatten).map(([k]) => k) + const flat = Object.entries(definitions).filter(([, d]) => d.flatten).map(([k]) => k) const notFlat = keys.filter(k => !flat.includes(k)) t.matchSnapshot(keys, 'all keys') t.matchSnapshot(flat, 'keys that are flattened') @@ -74,7 +74,7 @@ t.test('basic usage', async t => { // are generated in the following test const { npm } = await loadMockNpm(t, { mocks: { - '{LIB}/utils/cmd-list.js': { commands: [] }, + '{LIB}/utils/cmd-list.js': { ...cmdList, commands: [] }, }, config: { userconfig: '/some/config/file/.npmrc' }, globals: { process: { platform: 'posix' } }, diff --git a/deps/npm/test/lib/load-all-commands.js b/deps/npm/test/lib/load-all-commands.js index d3846434489cee..c00da37eff3fc3 100644 --- a/deps/npm/test/lib/load-all-commands.js +++ b/deps/npm/test/lib/load-all-commands.js @@ -6,7 +6,7 @@ const t = require('tap') const util = require('util') const { load: loadMockNpm } = require('../fixtures/mock-npm.js') const { commands } = require('../../lib/utils/cmd-list.js') -const BaseCommand = require('../../lib/base-command.js') +const BaseCommand = require('../../lib/base-cmd.js') const isAsyncFn = (v) => typeof v === 'function' && /^\[AsyncFunction:/.test(util.inspect(v)) @@ -72,8 +72,8 @@ t.test('load each command', async t => { // usage t.match(impl.usage, cmd, 'usage contains the command') await npm.exec(cmd, []) - t.match(outputs[0][0], impl.usage, 'usage is what is output') - t.match(outputs[0][0], ctor.describeUsage, 'usage is what is output') + t.match(outputs[0], impl.usage, 'usage is what is output') + t.match(outputs[0], ctor.describeUsage, 'usage is what is output') t.notOk(impl.describeUsage, 'describe usage is only static') }) } diff --git a/deps/npm/test/lib/npm.js b/deps/npm/test/lib/npm.js index e9300ecfa6bd10..a965a79a3f528d 100644 --- a/deps/npm/test/lib/npm.js +++ b/deps/npm/test/lib/npm.js @@ -1,6 +1,7 @@ const t = require('tap') const { resolve, dirname, join } = require('path') const fs = require('fs') +const { time } = require('proc-log') const { load: loadMockNpm } = require('../fixtures/mock-npm.js') const mockGlobals = require('@npmcli/mock-globals') const { commands } = require('../../lib/utils/cmd-list.js') @@ -25,32 +26,20 @@ t.test('not yet loaded', async t => { t.test('npm.load', async t => { await t.test('load error', async t => { const { npm } = await loadMockNpm(t, { load: false }) - const loadError = new Error('load error') npm.config.load = async () => { - throw loadError + throw new Error('load error') } await t.rejects( () => npm.load(), /load error/ ) - - t.equal(npm.loadErr, loadError) - npm.config.load = async () => { - throw new Error('different error') - } - await t.rejects( - () => npm.load(), - /load error/, - 'loading again returns the original error' - ) - t.equal(npm.loadErr, loadError) }) await t.test('basic loading', async t => { - const { npm, logs, prefix: dir, cache, other } = await loadMockNpm(t, { + const { npm, logs, cache } = await loadMockNpm(t, { prefixDir: { node_modules: {} }, - otherDirs: { - newCache: {}, + config: { + timing: true, }, }) @@ -62,31 +51,18 @@ t.test('npm.load', async t => { t.match(npm, { flatOptions: {}, }) - t.match(logs.timing.filter(([p]) => p === 'npm:load'), [ - ['npm:load', /Completed in [0-9.]+ms/], + + t.match(logs.timing.filter((p) => /^npm:load/.test(p)), [ + /npm:load.* Completed in [0-9.]+ms/, ]) mockGlobals(t, { process: { platform: 'posix' } }) t.equal(resolve(npm.cache), resolve(cache), 'cache is cache') - npm.cache = other.newCache - t.equal(npm.config.get('cache'), other.newCache, 'cache setter sets config') - t.equal(npm.cache, other.newCache, 'cache getter gets new config') t.equal(npm.lockfileVersion, 2, 'lockfileVersion getter') t.equal(npm.prefix, npm.localPrefix, 'prefix is local prefix') t.not(npm.prefix, npm.globalPrefix, 'prefix is not global prefix') - npm.globalPrefix = npm.prefix - t.equal(npm.prefix, npm.globalPrefix, 'globalPrefix setter') - npm.localPrefix = dir + '/extra/prefix' - t.equal(npm.prefix, npm.localPrefix, 'prefix is local prefix after localPrefix setter') - t.not(npm.prefix, npm.globalPrefix, 'prefix is not global prefix after localPrefix setter') - - npm.prefix = dir + '/some/prefix' - t.equal(npm.prefix, npm.localPrefix, 'prefix is local prefix after prefix setter') - t.not(npm.prefix, npm.globalPrefix, 'prefix is not global prefix after prefix setter') - t.equal(npm.bin, npm.localBin, 'bin is local bin after prefix setter') - t.not(npm.bin, npm.globalBin, 'bin is not global bin after prefix setter') - t.equal(npm.dir, npm.localDir, 'dir is local dir after prefix setter') - t.not(npm.dir, npm.globalDir, 'dir is not global dir after prefix setter') + t.equal(npm.bin, npm.localBin, 'bin is local bin') + t.not(npm.bin, npm.globalBin, 'bin is not global bin') npm.config.set('global', true) t.equal(npm.prefix, npm.globalPrefix, 'prefix is global prefix after setting global') @@ -96,12 +72,6 @@ t.test('npm.load', async t => { t.equal(npm.dir, npm.globalDir, 'dir is global dir after setting global') t.not(npm.dir, npm.localDir, 'dir is not local dir after setting global') - npm.prefix = dir + '/new/global/prefix' - t.equal(npm.prefix, npm.globalPrefix, 'prefix is global prefix after prefix setter') - t.not(npm.prefix, npm.localPrefix, 'prefix is not local prefix after prefix setter') - t.equal(npm.bin, npm.globalBin, 'bin is global bin after prefix setter') - t.not(npm.bin, npm.localBin, 'bin is not local bin after prefix setter') - mockGlobals(t, { process: { platform: 'win32' } }) t.equal(npm.bin, npm.globalBin, 'bin is global bin in windows mode') t.equal(npm.dir, npm.globalDir, 'dir is global dir in windows mode') @@ -109,15 +79,12 @@ t.test('npm.load', async t => { await t.test('forceful loading', async t => { const { logs } = await loadMockNpm(t, { - globals: { - 'process.argv': [...process.argv, '--force', '--color', 'always'], + config: { + force: true, }, }) t.match(logs.warn, [ - [ - 'using --force', - 'Recommended protections disabled.', - ], + 'using --force Recommended protections disabled.', ]) }) @@ -127,33 +94,39 @@ t.test('npm.load', async t => { prefixDir: { bin: t.fixture('symlink', dirname(process.execPath)), }, + config: { + timing: true, + usage: '', + scope: 'foo', + }, + argv: [ + 'token', + 'revoke', + 'blergggg', + ], globals: (dirs) => ({ 'process.env.PATH': resolve(dirs.prefix, 'bin'), 'process.argv': [ node, process.argv[1], - '--usage', - '--scope=foo', - 'token', - 'revoke', - 'blergggg', ], }), }) t.equal(npm.config.get('scope'), '@foo', 'added the @ sign to scope') + t.match([ - ...logs.timing.filter(([p]) => p === 'npm:load:whichnode'), + ...logs.timing.filter((p) => p.startsWith('npm:load:whichnode')), ...logs.verbose, - ...logs.timing.filter(([p]) => p === 'npm:load'), + ...logs.timing.filter((p) => p.startsWith('npm:load')), ], [ - ['npm:load:whichnode', /Completed in [0-9.]+ms/], - ['node symlink', resolve(prefix, 'bin', node)], - ['title', 'npm token revoke blergggg'], - ['argv', '"--usage" "--scope" "foo" "token" "revoke" "blergggg"'], - ['logfile', /logs-max:\d+ dir:.*/], - ['logfile', /.*-debug-0.log/], - ['npm:load', /Completed in [0-9.]+ms/], + /npm:load:whichnode Completed in [0-9.]+ms/, + `node symlink ${resolve(prefix, 'bin', node)}`, + /title npm token revoke blergggg/, + /argv "token" "revoke" "blergggg".*"--usage" "--scope" "foo"/, + /logfile logs-max:\d+ dir:.*/, + /logfile .*-debug-0.log/, + /npm:load:.* Completed in [0-9.]+ms/, ]) t.equal(process.execPath, resolve(prefix, 'bin', node)) @@ -165,35 +138,21 @@ t.test('npm.load', async t => { t.equal(npm.flatOptions.npmCommand, 'll', 'npmCommand flatOption set') const ll = Npm.cmd('ll') - t.same(outputs, [[ll.describeUsage]], 'print usage') + t.same(outputs, [ll.describeUsage], 'print usage') npm.config.set('usage', false) outputs.length = 0 logs.length = 0 - await npm.exec('get', ['scope', '\u2010not-a-dash']) + await npm.exec('get', ['scope', 'usage']) t.strictSame([npm.command, npm.flatOptions.npmCommand], ['ll', 'll'], 'does not change npm.command when another command is called') t.match(logs, [ - [ - 'error', - 'arg', - 'Argument starts with non-ascii dash, this is probably invalid:', - '\u2010not-a-dash', - ], - [ - 'timing', - 'command:config', - /Completed in [0-9.]+ms/, - ], - [ - 'timing', - 'command:get', - /Completed in [0-9.]+ms/, - ], + /timing command:config Completed in [0-9.]+ms/, + /timing command:get Completed in [0-9.]+ms/, ]) - t.same(outputs, [['scope=@foo\n\u2010not-a-dash=undefined']]) + t.same(outputs, ['scope=@foo\nusage=false']) }) await t.test('--no-workspaces with --workspace', async t => { @@ -214,14 +173,9 @@ t.test('npm.load', async t => { workspaces: ['./packages/*'], }), }, - globals: { - 'process.argv': [ - process.execPath, - process.argv[1], - '--color', 'false', - '--workspaces', 'false', - '--workspace', 'a', - ], + config: { + workspaces: false, + workspace: 'a', }, }) await t.rejects( @@ -231,7 +185,7 @@ t.test('npm.load', async t => { }) await t.test('workspace-aware configs and commands', async t => { - const { npm, outputs } = await loadMockNpm(t, { + const { npm, joinedOutput } = await loadMockNpm(t, { prefixDir: { packages: { a: { @@ -255,13 +209,8 @@ t.test('npm.load', async t => { workspaces: ['./packages/*'], }), }, - globals: { - 'process.argv': [ - process.execPath, - process.argv[1], - '--color', 'false', - '--workspaces', 'true', - ], + config: { + workspaces: true, }, }) @@ -269,18 +218,7 @@ t.test('npm.load', async t => { t.equal(npm.command, 'run-script', 'npm.command set to canonical name') - t.match( - outputs, - [ - ['Lifecycle scripts included in a@1.0.0:'], - [' test\n echo test a'], - [''], - ['Lifecycle scripts included in b@1.0.0:'], - [' test\n echo test b'], - [''], - ], - 'should exec workspaces version of commands' - ) + t.matchSnapshot(joinedOutput(), 'should exec workspaces version of commands') }) await t.test('workspaces in global mode', async t => { @@ -308,16 +246,9 @@ t.test('npm.load', async t => { workspaces: ['./packages/*'], }), }, - globals: { - 'process.argv': [ - process.execPath, - process.argv[1], - '--color', - 'false', - '--workspaces', - '--global', - 'true', - ], + config: { + workspaces: true, + global: true, }, }) @@ -331,15 +262,11 @@ t.test('npm.load', async t => { t.test('set process.title', async t => { t.test('basic title setting', async t => { const { npm } = await loadMockNpm(t, { - globals: { - 'process.argv': [ - process.execPath, - process.argv[1], - '--usage', - '--scope=foo', - 'ls', - ], + config: { + usage: true, + scope: 'foo', }, + argv: ['ls'], }) t.equal(npm.title, 'npm ls') t.equal(process.title, 'npm ls') @@ -347,17 +274,11 @@ t.test('set process.title', async t => { t.test('do not expose token being revoked', async t => { const { npm } = await loadMockNpm(t, { - globals: { - 'process.argv': [ - process.execPath, - process.argv[1], - '--usage', - '--scope=foo', - 'token', - 'revoke', - `npm_${'a'.repeat(36)}`, - ], + config: { + usage: true, + scope: 'foo', }, + argv: ['token', 'revoke', `npm_${'a'.repeat(36)}`], }) t.equal(npm.title, 'npm token revoke npm_***') t.equal(process.title, 'npm token revoke npm_***') @@ -365,17 +286,11 @@ t.test('set process.title', async t => { t.test('do show *** unless a token is actually being revoked', async t => { const { npm } = await loadMockNpm(t, { - globals: { - 'process.argv': [ - process.execPath, - process.argv[1], - '--usage', - '--scope=foo', - 'token', - 'revoke', - 'notatoken', - ], + config: { + usage: true, + scope: 'foo', }, + argv: ['token', 'revoke', 'notatoken'], }) t.equal(npm.title, 'npm token revoke notatoken') t.equal(process.title, 'npm token revoke notatoken') @@ -442,42 +357,14 @@ t.test('cache dir', async t => { }) t.test('timings', async t => { - t.test('gets/sets timers', async t => { - const { npm, logs } = await loadMockNpm(t, { load: false }) - process.emit('time', 'foo') - process.emit('time', 'bar') - t.match(npm.unfinishedTimers.get('foo'), Number, 'foo timer is a number') - t.match(npm.unfinishedTimers.get('bar'), Number, 'foo timer is a number') - process.emit('timeEnd', 'foo') - process.emit('timeEnd', 'bar') - process.emit('timeEnd', 'baz') - // npm timer is started by default - process.emit('timeEnd', 'npm') - t.match(logs.timing, [ - ['foo', /Completed in [0-9]+ms/], - ['bar', /Completed in [0-9]+ms/], - ['npm', /Completed in [0-9]+ms/], - ]) - t.match(logs.silly, [[ - 'timing', - "Tried to end timer that doesn't exist:", - 'baz', - ]]) - t.notOk(npm.unfinishedTimers.has('foo'), 'foo timer is gone') - t.notOk(npm.unfinishedTimers.has('bar'), 'bar timer is gone') - t.match(npm.finishedTimers, { foo: Number, bar: Number, npm: Number }) - }) - t.test('writes timings file', async t => { - const { npm, cache, timingFile } = await loadMockNpm(t, { + const { npm, timingFile } = await loadMockNpm(t, { config: { timing: true }, }) - process.emit('time', 'foo') - process.emit('timeEnd', 'foo') - process.emit('time', 'bar') - npm.writeTimingFile() - t.match(npm.timingFile, cache) - t.match(npm.timingFile, /-timing.json$/) + time.start('foo') + time.end('foo') + time.start('bar') + npm.finish() const timings = await timingFile() t.match(timings, { metadata: { @@ -487,7 +374,6 @@ t.test('timings', async t => { }, unfinishedTimers: { bar: [Number, Number], - npm: [Number, Number], }, timers: { foo: Number, @@ -500,7 +386,7 @@ t.test('timings', async t => { const { npm, timingFile } = await loadMockNpm(t, { config: { timing: false }, }) - npm.writeTimingFile() + npm.finish() await t.rejects(() => timingFile()) }) @@ -513,44 +399,13 @@ t.test('timings', async t => { for (const [config, expectedDisplay, expectedTiming] of timingDisplay) { const msg = `${JSON.stringify(config)}, display:${expectedDisplay}, timing:${expectedTiming}` await t.test(`timing display: ${msg}`, async t => { - const { display } = await loadMockNpm(t, { config }) - t.equal(!!display.length, expectedDisplay, 'display') - t.equal(!!display.timing.length, expectedTiming, 'timing display') + const { logs } = await loadMockNpm(t, { config }) + t.equal(!!logs.length, expectedDisplay, 'display') + t.equal(!!logs.timing.length, expectedTiming, 'timing display') }) } }) -t.test('output clears progress and console.logs cleaned messages', async t => { - t.plan(4) - let showingProgress = true - const logs = [] - const errors = [] - const { npm } = await loadMockNpm(t, { - load: false, - mocks: { - npmlog: { - clearProgress: () => showingProgress = false, - showProgress: () => showingProgress = true, - }, - }, - globals: { - 'console.log': (...args) => { - t.equal(showingProgress, false, 'should not be showing progress right now') - logs.push(args) - }, - 'console.error': (...args) => { - t.equal(showingProgress, false, 'should not be showing progress right now') - errors.push(args) - }, - }, - }) - npm.originalOutput('hello\x00world') - npm.originalOutputError('error\x00world') - - t.match(logs, [['hello^@world']]) - t.match(errors, [['error^@world']]) -}) - t.test('aliases and typos', async t => { const { Npm } = await loadMockNpm(t, { init: false }) t.throws(() => Npm.cmd('thisisnotacommand'), { code: 'EUNKNOWNCOMMAND' }) @@ -580,13 +435,8 @@ t.test('explicit workspace rejection', async t => { workspaces: ['./packages/a'], }), }, - globals: { - 'process.argv': [ - process.execPath, - process.argv[1], - '--color', 'false', - '--workspace', './packages/a', - ], + config: { + workspace: './packages/a', }, }) await t.rejects( @@ -614,13 +464,8 @@ t.test('implicit workspace rejection', async t => { }), }, chdir: ({ prefix }) => join(prefix, 'packages', 'a'), - globals: { - 'process.argv': [ - process.execPath, - process.argv[1], - '--color', 'false', - '--workspace', './packages/a', - ], + config: { + workspace: './packages/a', }, }) await t.rejects( @@ -648,13 +493,6 @@ t.test('implicit workspace accept', async t => { }), }, chdir: ({ prefix }) => join(prefix, 'packages', 'a'), - globals: { - 'process.argv': [ - process.execPath, - process.argv[1], - '--color', 'false', - ], - }, }) await t.rejects(mock.npm.exec('org', []), /.*Usage/) }) diff --git a/deps/npm/test/lib/utils/audit-error.js b/deps/npm/test/lib/utils/audit-error.js index f6be56a152f710..9d6192fbc31be2 100644 --- a/deps/npm/test/lib/utils/audit-error.js +++ b/deps/npm/test/lib/utils/audit-error.js @@ -1,11 +1,9 @@ const t = require('tap') -const mockLogs = require('../../fixtures/mock-logs') const mockNpm = require('../../fixtures/mock-npm') const tmock = require('../../fixtures/tmock') const auditError = async (t, { command, error, ...config } = {}) => { - const { logs, logMocks } = mockLogs() - const mockAuditError = tmock(t, '{LIB}/utils/audit-error', logMocks) + const mockAuditError = tmock(t, '{LIB}/utils/audit-error') const mock = await mockNpm(t, { command, @@ -23,7 +21,7 @@ const auditError = async (t, { command, error, ...config } = {}) => { return { ...res, - logs: logs.warn.filter((l) => l[0] === 'audit'), + logs: mock.logs.warn.byTitle('audit'), output: mock.joinedOutput(), } } @@ -80,7 +78,7 @@ t.test('error, audit command, not json', async t => { t.ok(error, 'throws error') t.match(output, 'body error text', 'some output') - t.strictSame(logs, [['audit', 'message']], 'some warnings') + t.strictSame(logs, ['audit message'], 'some warnings') }) t.test('error, audit command, json', async t => { @@ -117,5 +115,5 @@ t.test('error, audit command, json', async t => { ' }\n' + '}' , 'some output') - t.strictSame(logs, [['audit', 'message']], 'some warnings') + t.strictSame(logs, ['audit message'], 'some warnings') }) diff --git a/deps/npm/test/lib/utils/display.js b/deps/npm/test/lib/utils/display.js index 2b9db0e6725100..33f9360e5728cf 100644 --- a/deps/npm/test/lib/utils/display.js +++ b/deps/npm/test/lib/utils/display.js @@ -1,161 +1,199 @@ const t = require('tap') -const log = require('../../../lib/utils/log-shim') +const timers = require('node:timers/promises') +const tmock = require('../../fixtures/tmock') const mockLogs = require('../../fixtures/mock-logs') const mockGlobals = require('@npmcli/mock-globals') -const tmock = require('../../fixtures/tmock') -const util = require('util') +const { inspect } = require('util') + +const mockDisplay = async (t, { mocks, load } = {}) => { + const procLog = require('proc-log') -const mockDisplay = (t, mocks) => { - const { logs, logMocks } = mockLogs(mocks) - const Display = tmock(t, '{LIB}/utils/display', { - ...mocks, - ...logMocks, + const logs = mockLogs() + + const Display = tmock(t, '{LIB}/utils/display', mocks) + const display = new Display(logs.streams) + const displayLoad = async (opts) => display.load({ + loglevel: 'silly', + stderrColor: false, + stdoutColot: false, + heading: 'npm', + ...opts, }) - const display = new Display() + + if (load !== false) { + await displayLoad(load) + } + t.teardown(() => display.off()) - return { display, logs } + return { + ...procLog, + display, + displayLoad, + ...logs.logs, + } } -t.test('setup', async (t) => { - const { display } = mockDisplay(t) +t.test('can log cleanly', async (t) => { + const { log, logs } = await mockDisplay(t) - display.load({ timing: true, loglevel: 'notice' }) - t.equal(log.level, 'notice') + log.error('', 'test\x00message') + t.match(logs.error, ['test^@message']) +}) - display.load({ timing: false, loglevel: 'notice' }) - t.equal(log.level, 'notice') +t.test('can handle special eresolves', async (t) => { + const explains = [] + const { log, logs } = await mockDisplay(t, { + mocks: { + '{LIB}/utils/explain-eresolve.js': { + explain: (...args) => { + explains.push(args) + return 'EXPLAIN' + }, + }, + }, + }) - display.load({ color: true }) - t.equal(log.useColor(), true) + log.warn('ERESOLVE', 'hello', { some: 'object' }) + t.strictSame(logs.warn, ['ERESOLVE hello', 'EXPLAIN']) + t.match(explains, [[{ some: 'object' }, Function, 2]]) +}) - display.load({ unicode: true }) - t.equal(log.gauge._theme.hasUnicode, true) +t.test('can buffer output when paused', async t => { + const { displayLoad, outputs, output } = await mockDisplay(t, { + load: false, + }) - display.load({ unicode: false }) - t.equal(log.gauge._theme.hasUnicode, false) + output.buffer('Message 1') + output.standard('Message 2') - mockGlobals(t, { 'process.stderr.isTTY': true }) - display.load({ progress: true }) - t.equal(log.progressEnabled, true) + t.strictSame(outputs, []) + await displayLoad() + t.strictSame(outputs, ['Message 1', 'Message 2']) }) -t.test('can log cleanly', async (t) => { - const explains = [] - const { display, logs } = mockDisplay(t, { - npmlog: { - error: (...args) => logs.push(['error', ...args]), - warn: (...args) => logs.push(['warn', ...args]), - }, - '{LIB}/utils/explain-eresolve.js': { - explain: (...args) => { - explains.push(args) - return 'explanation' - }, +t.test('can do progress', async (t) => { + const { log, logs, outputs, outputErrors, output, input } = await mockDisplay(t, { + load: { + progress: true, }, }) - display.log('error', 'test\x00message') - t.match(logs.error, [['test^@message']]) + // wait for initial timer interval to load + await timers.setTimeout(200) + + log.error('', 'before input') + output.standard('before input') - display.log('warn', 'ERESOLVE', 'hello', { some: 'object' }) - t.match(logs.warn, [['ERESOLVE', 'hello']]) - t.match(explains, [[{ some: 'object' }, null, 2]]) + const end = input.start() + log.error('', 'during input') + output.standard('during input') + end() + + // wait long enough for all spinner frames to render + await timers.setTimeout(800) + log.error('', 'after input') + output.standard('after input') + + t.strictSame([...new Set(outputErrors)].sort(), ['-', '/', '\\', '|']) + t.strictSame(logs, ['error before input', 'error during input', 'error after input']) + t.strictSame(outputs, ['before input', 'during input', 'after input']) }) t.test('handles log throwing', async (t) => { + class ThrowInspect { + #crashes = 0; + + [inspect.custom] () { + throw new Error(`Crashed ${++this.#crashes}`) + } + } + const errors = [] - mockGlobals(t, { - 'console.error': (...args) => errors.push(args), - }) - const { display } = mockDisplay(t, { - npmlog: { - verbose: () => { - throw new Error('verbose') - }, - }, - '{LIB}/utils/explain-eresolve.js': { - explain: () => { - throw new Error('explain') - }, - }, - }) + mockGlobals(t, { 'console.error': (...msg) => errors.push(msg) }) - display.log('warn', 'ERESOLVE', 'hello', { some: 'object' }) - t.match(errors, [ - [/attempt to log .* crashed/, Error('explain'), Error('verbose')], + const { log, logs } = await mockDisplay(t) + + log.error('woah', new ThrowInspect()) + + t.strictSame(logs.error, []) + t.equal(errors.length, 1) + t.match(errors[0], [ + 'attempt to log crashed', + new Error('Crashed 1'), + new Error('Crashed 2'), ]) }) -class CustomObj { - [util.inspect.custom] () { - return this.inspected - } -} +t.test('incorrect levels', async t => { + const { outputs } = await mockDisplay(t) + process.emit('output', 'not a real level') + t.strictSame(outputs, [], 'output is ignored') +}) t.test('Display.clean', async (t) => { - const Display = require('../../../lib/utils/display') - const customNaN = new CustomObj() - const customNull = new CustomObj() - const customNumber = new CustomObj() - const customObject = new CustomObj() - const customString = new CustomObj() - const customUndefined = new CustomObj() - customNaN.inspected = NaN - customNull.inspected = null - customNumber.inspected = 477 - customObject.inspected = { custom: 'rend\x00ering' } - customString.inspected = 'custom\x00rendering' - customUndefined.inspected = undefined - t.test('strings', async (t) => { - const tests = [ - [477, '477'], - [null, 'null'], - [NaN, 'NaN'], - [true, 'true'], - [undefined, 'undefined'], - ['🚀', '🚀'], - // Cover the bounds of each range and a few characters from inside each range - // \x00 through \x1f - ['hello\x00world', 'hello^@world'], - ['hello\x07world', 'hello^Gworld'], - ['hello\x1bworld', 'hello^[world'], - ['hello\x1eworld', 'hello^^world'], - ['hello\x1fworld', 'hello^_world'], - // \x7f is C0 - ['hello\x7fworld', 'hello^?world'], - // \x80 through \x9f - ['hello\x80world', 'hello^@world'], - ['hello\x87world', 'hello^Gworld'], - ['hello\x9eworld', 'hello^^world'], - ['hello\x9fworld', 'hello^_world'], - // Allowed C0 - ['hello\tworld', 'hello\tworld'], - ['hello\nworld', 'hello\nworld'], - ['hello\vworld', 'hello\vworld'], - ['hello\rworld', 'hello\rworld'], - // Allowed SGR - ['hello\x1b[38;5;254mworld', 'hello\x1b[38;5;254mworld'], - ['hello\x1b[mworld', 'hello\x1b[mworld'], - // Unallowed CSI / OSC - ['hello\x1b[2Aworld', 'hello^[[2Aworld'], - ['hello\x9b[2Aworld', 'hello^[[2Aworld'], - ['hello\x9decho goodbye\x9cworld', 'hello^]echo goodbye^\\world'], - // This is done twice to ensure we define inspect.custom as writable - [{ test: 'object' }, "{ test: 'object' }"], - [{ test: 'object' }, "{ test: 'object' }"], - // Make sure custom util.inspect doesn't bypass our cleaning - [customNaN, 'NaN'], - [customNull, 'null'], - [customNumber, '477'], - [customObject, "{ custom: 'rend\\x00ering' }"], - [customString, 'custom^@rendering'], - [customUndefined, 'undefined'], - // UTF-16 form of 8-bit C1 - ['hello\xc2\x9bworld', 'hello\xc2^[world'], - ] - for (const [dirty, clean] of tests) { - const cleaned = Display.clean(dirty) - t.equal(util.format(cleaned), clean) + const { output, outputs, clearOutput } = await mockDisplay(t) + + class CustomObj { + #inspected + + constructor (val) { + this.#inspected = val } - }) + + [inspect.custom] () { + return this.#inspected + } + } + + const tests = [ + [477, '477'], + [null, 'null'], + [NaN, 'NaN'], + [true, 'true'], + [undefined, 'undefined'], + ['🚀', '🚀'], + // Cover the bounds of each range and a few characters from inside each range + // \x00 through \x1f + ['hello\x00world', 'hello^@world'], + ['hello\x07world', 'hello^Gworld'], + ['hello\x1bworld', 'hello^[world'], + ['hello\x1eworld', 'hello^^world'], + ['hello\x1fworld', 'hello^_world'], + // \x7f is C0 + ['hello\x7fworld', 'hello^?world'], + // \x80 through \x9f + ['hello\x80world', 'hello^@world'], + ['hello\x87world', 'hello^Gworld'], + ['hello\x9eworld', 'hello^^world'], + ['hello\x9fworld', 'hello^_world'], + // Allowed C0 + ['hello\tworld', 'hello\tworld'], + ['hello\nworld', 'hello\nworld'], + ['hello\vworld', 'hello\vworld'], + ['hello\rworld', 'hello\rworld'], + // Allowed SGR + ['hello\x1b[38;5;254mworld', 'hello\x1b[38;5;254mworld'], + ['hello\x1b[mworld', 'hello\x1b[mworld'], + // Unallowed CSI / OSC + ['hello\x1b[2Aworld', 'hello^[[2Aworld'], + ['hello\x9b[2Aworld', 'hello^[[2Aworld'], + ['hello\x9decho goodbye\x9cworld', 'hello^]echo goodbye^\\world'], + // This is done twice to ensure we define inspect.custom as writable + [{ test: 'object' }, "{ test: 'object' }"], + // Make sure custom util.inspect doesn't bypass our cleaning + [new CustomObj(NaN), 'NaN'], + [new CustomObj(null), 'null'], + [new CustomObj(477), '477'], + [new CustomObj({ custom: 'rend\x00ering' }), "{ custom: 'rend\\x00ering' }"], + [new CustomObj('custom\x00rendering'), 'custom^@rendering'], + [new CustomObj(undefined), 'undefined'], + // UTF-16 form of 8-bit C1 + ['hello\xc2\x9bworld', 'hello\xc2^[world'], + ] + + for (const [dirty, clean] of tests) { + output.standard(dirty) + t.equal(outputs[0], clean) + clearOutput() + } }) diff --git a/deps/npm/test/lib/workspaces/get-workspaces.js b/deps/npm/test/lib/utils/get-workspaces.js similarity index 98% rename from deps/npm/test/lib/workspaces/get-workspaces.js rename to deps/npm/test/lib/utils/get-workspaces.js index 4e48b1c4b7707e..931f90d13046fd 100644 --- a/deps/npm/test/lib/workspaces/get-workspaces.js +++ b/deps/npm/test/lib/utils/get-workspaces.js @@ -1,6 +1,6 @@ const { resolve } = require('path') const t = require('tap') -const getWorkspaces = require('../../../lib/workspaces/get-workspaces.js') +const getWorkspaces = require('../../../lib/utils/get-workspaces.js') const normalizePath = p => p .replace(/\\+/g, '/') diff --git a/deps/npm/test/lib/utils/completion/installed-deep.js b/deps/npm/test/lib/utils/installed-deep.js similarity index 96% rename from deps/npm/test/lib/utils/completion/installed-deep.js rename to deps/npm/test/lib/utils/installed-deep.js index 0af26861ff83a5..20e001aaec751e 100644 --- a/deps/npm/test/lib/utils/completion/installed-deep.js +++ b/deps/npm/test/lib/utils/installed-deep.js @@ -1,6 +1,6 @@ const t = require('tap') -const installedDeep = require('../../../../lib/utils/completion/installed-deep.js') -const mockNpm = require('../../../fixtures/mock-npm') +const installedDeep = require('../../../lib/utils/installed-deep.js') +const mockNpm = require('../../fixtures/mock-npm') const fixture = { 'package.json': JSON.stringify({ diff --git a/deps/npm/test/lib/utils/completion/installed-shallow.js b/deps/npm/test/lib/utils/installed-shallow.js similarity index 89% rename from deps/npm/test/lib/utils/completion/installed-shallow.js rename to deps/npm/test/lib/utils/installed-shallow.js index 3666803979cb38..67b49292a64c78 100644 --- a/deps/npm/test/lib/utils/completion/installed-shallow.js +++ b/deps/npm/test/lib/utils/installed-shallow.js @@ -1,6 +1,6 @@ const t = require('tap') -const installed = require('../../../../lib/utils/completion/installed-shallow.js') -const mockNpm = require('../../../fixtures/mock-npm') +const installed = require('../../../lib/utils/installed-shallow.js') +const mockNpm = require('../../fixtures/mock-npm') const mockShallow = async (t, config) => { const res = await mockNpm(t, { diff --git a/deps/npm/test/lib/utils/log-file.js b/deps/npm/test/lib/utils/log-file.js index f34dda8f524337..8e07da8671b6ad 100644 --- a/deps/npm/test/lib/utils/log-file.js +++ b/deps/npm/test/lib/utils/log-file.js @@ -46,6 +46,8 @@ const loadLogFile = async (t, { buffer = [], mocks, testdir = {}, ...options } = const MockLogFile = tmock(t, '{LIB}/utils/log-file.js', mocks) const logFile = new MockLogFile(Object.keys(options).length ? options : undefined) + // Create a fake public method since there is not one on logFile anymore + logFile.log = (...b) => process.emit('log', ...b) buffer.forEach((b) => logFile.log(...b)) const id = getId() @@ -165,7 +167,7 @@ t.test('initial stream error', async t => { mocks: { 'fs-minipass': { WriteStreamSync: class { - constructor (...args) { + constructor () { throw new Error('no stream') } }, diff --git a/deps/npm/test/lib/utils/log-shim.js b/deps/npm/test/lib/utils/log-shim.js deleted file mode 100644 index 7c8fb7ce3c9569..00000000000000 --- a/deps/npm/test/lib/utils/log-shim.js +++ /dev/null @@ -1,101 +0,0 @@ -const t = require('tap') -const tmock = require('../../fixtures/tmock') - -const makeShim = (mocks) => tmock(t, '{LIB}/utils/log-shim.js', mocks) - -const loggers = [ - 'notice', - 'error', - 'warn', - 'info', - 'verbose', - 'http', - 'silly', - 'pause', - 'resume', -] - -t.test('has properties', (t) => { - const shim = makeShim() - - t.match(shim, { - level: String, - levels: {}, - gauge: {}, - stream: {}, - heading: undefined, - enableColor: Function, - disableColor: Function, - enableUnicode: Function, - disableUnicode: Function, - enableProgress: Function, - disableProgress: Function, - ...loggers.reduce((acc, l) => { - acc[l] = Function - return acc - }, {}), - }) - - t.match(Object.keys(shim).sort(), [ - 'level', - 'heading', - 'levels', - 'gauge', - 'stream', - 'tracker', - 'useColor', - 'enableColor', - 'disableColor', - 'enableUnicode', - 'disableUnicode', - 'enableProgress', - 'disableProgress', - 'progressEnabled', - 'clearProgress', - 'showProgress', - 'newItem', - 'newGroup', - ...loggers, - ].sort()) - - t.end() -}) - -t.test('works with npmlog/proclog proxy', t => { - const procLog = { silly: () => 'SILLY' } - const npmlog = { level: 'woo', enableColor: () => true } - const shim = makeShim({ npmlog, 'proc-log': procLog }) - - t.equal(shim.level, 'woo', 'can get a property') - - npmlog.level = 'hey' - t.strictSame( - [shim.level, npmlog.level], - ['hey', 'hey'], - 'can get a property after update on npmlog' - ) - - shim.level = 'test' - t.strictSame( - [shim.level, npmlog.level], - ['test', 'test'], - 'can get a property after update on shim' - ) - - t.ok(shim.enableColor(), 'can call method on shim to call npmlog') - t.equal(shim.silly(), 'SILLY', 'can call method on proclog') - t.notOk(shim.LEVELS, 'only includes levels from npmlog') - t.throws(() => shim.gauge = 100, 'cant set getters properies') - - t.end() -}) - -t.test('works with npmlog/proclog proxy', t => { - const shim = makeShim() - - loggers.forEach((k) => { - t.doesNotThrow(() => shim[k]('test')) - }) - - t.end() -}) diff --git a/deps/npm/test/lib/utils/otplease.js b/deps/npm/test/lib/utils/otplease.js index d788c39da842c5..6dc3ee0f0b069d 100644 --- a/deps/npm/test/lib/utils/otplease.js +++ b/deps/npm/test/lib/utils/otplease.js @@ -119,7 +119,7 @@ t.test('prompts for otp for 401', async (t) => { }) t.test('does not prompt for non-otp errors', async (t) => { - const fn = async (opts) => { + const fn = async () => { throw new Error('nope') } @@ -132,7 +132,7 @@ t.test('does not prompt for non-otp errors', async (t) => { }) t.test('does not prompt if stdin or stdout is not a tty', async (t) => { - const fn = async (opts) => { + const fn = async () => { throw Object.assign(new Error('nope'), { code: 'EOTP' }) } diff --git a/deps/npm/test/lib/utils/pulse-till-done.js b/deps/npm/test/lib/utils/pulse-till-done.js deleted file mode 100644 index 3b3f4b2f2253ef..00000000000000 --- a/deps/npm/test/lib/utils/pulse-till-done.js +++ /dev/null @@ -1,35 +0,0 @@ -const t = require('tap') -const tmock = require('../../fixtures/tmock') - -let pulseStarted = null - -const pulseTillDone = tmock(t, '{LIB}/utils/pulse-till-done.js', { - npmlog: { - gauge: { - pulse: () => { - if (pulseStarted) { - pulseStarted() - } - }, - }, - }, -}) - -t.test('pulses (with promise)', async (t) => { - t.teardown(() => { - pulseStarted = null - }) - - let resolver - const promise = new Promise(resolve => { - resolver = resolve - }) - - const result = pulseTillDone.withPromise(promise) - // wait until the gauge has fired at least once - await new Promise(resolve => { - pulseStarted = resolve - }) - resolver('value') - t.resolveMatch(result, 'value', 'returned the resolved promise') -}) diff --git a/deps/npm/test/lib/utils/read-user-info.js b/deps/npm/test/lib/utils/read-user-info.js index a1c2f980cf745b..35628f7f2faac5 100644 --- a/deps/npm/test/lib/utils/read-user-info.js +++ b/deps/npm/test/lib/utils/read-user-info.js @@ -1,41 +1,45 @@ const t = require('tap') +const procLog = require('proc-log') const tmock = require('../../fixtures/tmock') let readOpts = null let readResult = null -const read = { read: async (opts) => { - readOpts = opts - return readResult -} } - -const npmUserValidate = { - username: (username) => { - if (username === 'invalid') { - return new Error('invalid username') - } - - return null - }, - email: (email) => { - if (email.startsWith('invalid')) { - return new Error('invalid email') - } - - return null - }, -} - let logMsg = null + const readUserInfo = tmock(t, '{LIB}/utils/read-user-info.js', { - read, - npmlog: { - clearProgress: () => {}, - showProgress: () => {}, + read: { + read: async (opts) => { + readOpts = opts + return readResult + }, }, 'proc-log': { - warn: (msg) => logMsg = msg, + ...procLog, + log: { + ...procLog.log, + warn: (msg) => logMsg = msg, + }, + input: { + ...procLog.input, + read: (fn) => fn(), + }, + }, + 'npm-user-validate': { + username: (username) => { + if (username === 'invalid') { + return new Error('invalid username') + } + + return null + }, + email: (email) => { + if (email.startsWith('invalid')) { + return new Error('invalid email') + } + + return null + }, }, - 'npm-user-validate': npmUserValidate, }) t.beforeEach(() => { diff --git a/deps/npm/test/lib/utils/reify-output.js b/deps/npm/test/lib/utils/reify-output.js index fd15e25a749842..205b7baf421f79 100644 --- a/deps/npm/test/lib/utils/reify-output.js +++ b/deps/npm/test/lib/utils/reify-output.js @@ -8,7 +8,18 @@ const mockReify = async (t, reify, { command, ...config } = {}) => { const mock = await mockNpm(t, { command, config, - setCmd: true, + }) + + // Hack to adapt existing fake test. Make npm.command + // return whatever was passed in to this function. + // What it should be doing is npm.exec(command) but that + // breaks most of these tests because they dont expect + // a command to actually run. + Object.defineProperty(mock.npm, 'command', { + get () { + return command + }, + enumerable: true, }) reifyOutput(mock.npm, reify) diff --git a/deps/npm/test/lib/utils/tar.js b/deps/npm/test/lib/utils/tar.js index 274bad95c0af3f..45ba720ac54edb 100644 --- a/deps/npm/test/lib/utils/tar.js +++ b/deps/npm/test/lib/utils/tar.js @@ -9,14 +9,16 @@ t.cleanSnapshot = data => cleanZlib(data) const mockTar = ({ notice }) => tmock(t, '{LIB}/utils/tar.js', { 'proc-log': { - notice, + log: { + notice, + }, }, }) const printLogs = (tarball, options) => { const logs = [] const { logTar } = mockTar({ - notice: (...args) => args.map(el => logs.push(el)), + notice: (...args) => logs.push(...args), }) logTar(tarball, options) return logs.join('\n') diff --git a/deps/npm/test/lib/utils/timers.js b/deps/npm/test/lib/utils/timers.js index 74df6c28cd361c..4e5bfb104db975 100644 --- a/deps/npm/test/lib/utils/timers.js +++ b/deps/npm/test/lib/utils/timers.js @@ -1,86 +1,58 @@ const t = require('tap') const { resolve, join } = require('path') const fs = require('graceful-fs') -const mockLogs = require('../../fixtures/mock-logs') +const { log, time } = require('proc-log') const tmock = require('../../fixtures/tmock') -const mockTimers = (t, options) => { - const { logs, logMocks } = mockLogs() - const Timers = tmock(t, '{LIB}/utils/timers', { - ...logMocks, +const mockTimers = (t) => { + const logs = log.LEVELS.reduce((acc, l) => { + acc[l] = [] + return acc + }, {}) + const logHandler = (level, ...args) => { + logs[level].push(args.join(' ')) + } + process.on('log', logHandler) + const Timers = tmock(t, '{LIB}/utils/timers') + const timers = new Timers() + t.teardown(() => { + timers.off() + process.off('log', logHandler) }) - const timers = new Timers(options) - t.teardown(() => timers.off()) return { timers, logs } } -t.test('getters', async (t) => { - const { timers } = mockTimers(t) - t.match(timers.unfinished, new Map()) - t.match(timers.finished, {}) -}) - -t.test('listens/stops on process', async (t) => { - const { timers } = mockTimers(t) - process.emit('time', 'foo') - process.emit('time', 'bar') - process.emit('timeEnd', 'bar') - t.match(timers.unfinished, new Map([['foo', Number]])) - t.match(timers.finished, { bar: Number }) +t.test('logs timing events', async (t) => { + const { timers, logs } = mockTimers(t) + time.start('foo') + time.start('bar') + time.end('bar') timers.off() - process.emit('time', 'baz') - t.notOk(timers.unfinished.get('baz')) -}) - -t.test('convenience time method', async (t) => { - const { timers } = mockTimers(t) - - const end = timers.time('later') - timers.time('sync', () => {}) - await timers.time('async', () => new Promise(r => setTimeout(r, 10))) - end() - - t.match(timers.finished, { later: Number, sync: Number, async: Number }) -}) - -t.test('initial timer', async (t) => { - const { timers } = mockTimers(t, { start: 'foo' }) - process.emit('timeEnd', 'foo') - t.match(timers.finished, { foo: Number }) -}) - -t.test('initial listener', async (t) => { - const events = [] - const listener = (...args) => events.push(args) - const { timers } = mockTimers(t, { listener }) - process.emit('time', 'foo') - process.emit('time', 'bar') - process.emit('timeEnd', 'bar') - timers.off(listener) - process.emit('timeEnd', 'foo') - t.equal(events.length, 1) - t.match(events, [['bar', Number]]) + time.end('foo') + t.equal(logs.timing.length, 1) + t.match(logs.timing[0], /^bar Completed in [0-9]ms/) }) t.test('finish unstarted timer', async (t) => { const { logs } = mockTimers(t) - process.emit('timeEnd', 'foo') - t.match(logs.silly, [['timing', /^Tried to end timer/, 'foo']]) + time.end('foo') + t.match(logs.silly, ["timing Tried to end timer that doesn't exist: foo"]) }) t.test('writes file', async (t) => { const { timers } = mockTimers(t) const dir = t.testdir() - process.emit('time', 'foo') - process.emit('timeEnd', 'foo') - timers.load({ path: resolve(dir, `TIMING_FILE-`) }) - timers.writeFile({ some: 'data' }) + time.start('foo') + time.end('foo') + time.start('ohno') + timers.load({ timing: true, path: resolve(dir, `TIMING_FILE-`) }) + timers.finish({ some: 'data' }) const data = JSON.parse(fs.readFileSync(resolve(dir, 'TIMING_FILE-timing.json'))) t.match(data, { metadata: { some: 'data' }, - timers: { foo: Number }, + timers: { foo: Number, npm: Number }, unfinishedTimers: { - npm: [Number, Number], + ohno: [Number, Number], }, }) }) @@ -89,19 +61,18 @@ t.test('fails to write file', async (t) => { const { logs, timers } = mockTimers(t) const dir = t.testdir() - timers.load({ path: join(dir, 'does', 'not', 'exist') }) - timers.writeFile() + timers.load({ timing: true, path: join(dir, 'does', 'not', 'exist') }) + timers.finish() - t.match(logs.warn, [['timing', 'could not write timing file']]) - t.equal(timers.file, null) + t.match(logs.warn, ['timing could not write timing file:']) }) t.test('no dir and no file', async (t) => { const { logs, timers } = mockTimers(t) timers.load() - timers.writeFile() + timers.finish() - t.strictSame(logs, []) - t.equal(timers.file, null) + t.strictSame(logs.warn, []) + t.strictSame(logs.silly, []) }) diff --git a/deps/npm/test/lib/utils/web-auth.js b/deps/npm/test/lib/utils/web-auth.js index a4e8f4bbc755dc..ec8c1d17e9fa1f 100644 --- a/deps/npm/test/lib/utils/web-auth.js +++ b/deps/npm/test/lib/utils/web-auth.js @@ -21,8 +21,8 @@ t.test('returns token on success', async (t) => { t.test('closes opener when auth check finishes', async (t) => { const opener = (_url, emitter) => { - return new Promise((resolve, reject) => { - // the only way to finish this promise is to emit aboter on the emitter + return new Promise((resolve) => { + // the only way to finish this promise is to emit abort on the emitter emitter.addListener('abort', () => { resolve() }) From 14e857bea293667d35eea91f9dc51f385d0b5f91 Mon Sep 17 00:00:00 2001 From: "Node.js GitHub Bot" Date: Tue, 23 Apr 2024 03:48:14 +0300 Subject: [PATCH 31/41] deps: update corepack to 0.28.0 PR-URL: https://github.com/nodejs/node/pull/52616 Reviewed-By: Antoine du Hamel Reviewed-By: Marco Ippolito Reviewed-By: James M Snell Reviewed-By: Rafael Gonzaga --- deps/corepack/CHANGELOG.md | 35 + deps/corepack/README.md | 7 +- deps/corepack/dist/lib/corepack.cjs | 15867 +++++++++++++------------- deps/corepack/package.json | 9 +- 4 files changed, 7911 insertions(+), 8007 deletions(-) diff --git a/deps/corepack/CHANGELOG.md b/deps/corepack/CHANGELOG.md index b17f502afde01f..f5cef27cc047f9 100644 --- a/deps/corepack/CHANGELOG.md +++ b/deps/corepack/CHANGELOG.md @@ -1,5 +1,40 @@ # Changelog +## [0.28.0](https://github.com/nodejs/corepack/compare/v0.27.0...v0.28.0) (2024-04-20) + + +### ⚠ BREAKING CHANGES + +* call `executePackageManagerRequest` directly ([#430](https://github.com/nodejs/corepack/issues/430)) + +### Bug Fixes + +* call `executePackageManagerRequest` directly ([#430](https://github.com/nodejs/corepack/issues/430)) ([0f9b748](https://github.com/nodejs/corepack/commit/0f9b74864048d5dc150a63cc582966af0c5f363f)) + +## [0.27.0](https://github.com/nodejs/corepack/compare/v0.26.0...v0.27.0) (2024-04-19) + + +### ⚠ BREAKING CHANGES + +* attempting to download a version from the npm registry (or a mirror) that was published using the now deprecated PGP signature without providing a hash will trigger an error. Users can disable the signature verification using a environment variable. + +### Features + +* separate read and write operations on lastKnownGood.json ([#446](https://github.com/nodejs/corepack/issues/446)) ([c449adc](https://github.com/nodejs/corepack/commit/c449adc81822a604ee8f00ae2b53fc411535f96d)) +* update package manager versions ([#425](https://github.com/nodejs/corepack/issues/425)) ([1423190](https://github.com/nodejs/corepack/commit/142319056424b1e0da2bdbe801c52c5910023707)) +* update package manager versions ([#462](https://github.com/nodejs/corepack/issues/462)) ([56816c2](https://github.com/nodejs/corepack/commit/56816c2b7ebc9926f07048b0ec4ff6025bb4e293)) +* verify integrity signature when downloading from npm registry ([#432](https://github.com/nodejs/corepack/issues/432)) ([e561dd0](https://github.com/nodejs/corepack/commit/e561dd00bbacc5bc15a492fc36574fa0e37bff7b)) + + +### Bug Fixes + +* add path to `package.json` in error message ([#456](https://github.com/nodejs/corepack/issues/456)) ([32a93ea](https://github.com/nodejs/corepack/commit/32a93ea4f51eb7db7dc95a16c5719695edf4b53e)) +* correctly set `Dispatcher` prototype for `ProxyAgent` ([#451](https://github.com/nodejs/corepack/issues/451)) ([73d9a1e](https://github.com/nodejs/corepack/commit/73d9a1e2d2f84906bf01952f1dca8adab576b7bf)) +* download fewer metadata from npm registry ([#436](https://github.com/nodejs/corepack/issues/436)) ([082fabf](https://github.com/nodejs/corepack/commit/082fabf8b15658e69e4fb62bb854fe9aace78b70)) +* hash check when downloading Yarn Berry from npm ([#439](https://github.com/nodejs/corepack/issues/439)) ([4672162](https://github.com/nodejs/corepack/commit/467216281e1719a739d0eeea370b335adfb37b8d)) +* Incorrect authorization prefix for basic auth, and undocumented env var ([#454](https://github.com/nodejs/corepack/issues/454)) ([2d63536](https://github.com/nodejs/corepack/commit/2d63536413971d43f570deb035845aa0bd5202f0)) +* re-add support for custom registries with auth ([#397](https://github.com/nodejs/corepack/issues/397)) ([d267753](https://github.com/nodejs/corepack/commit/d2677538cdb613fcab6d2a45bb07f349bdc65c2b)) + ## [0.26.0](https://github.com/nodejs/corepack/compare/v0.25.2...v0.26.0) (2024-03-08) diff --git a/deps/corepack/README.md b/deps/corepack/README.md index 4d65c2b1a6c3f3..42dc8c19085e88 100644 --- a/deps/corepack/README.md +++ b/deps/corepack/README.md @@ -1,5 +1,7 @@ # corepack +[![Join us on OpenJS slack (channel #nodejs-corepack)](https://img.shields.io/badge/OpenJS%20Slack-%23nodejs--corepack-blue)](https://slack-invite.openjsf.org/) + Corepack is a zero-runtime-dependency Node.js script that acts as a bridge between Node.js projects and the package managers they are intended to be used with during development. In practical terms, **Corepack lets you use Yarn, npm, @@ -54,7 +56,7 @@ projects, `pnpm install` in pnpm projects, and `npm` in npm projects. Corepack will catch these calls, and depending on the situation: - **If the local project is configured for the package manager you're using**, - Corepack will silently download and cache the latest compatible version. + Corepack will download and cache the latest compatible version. - **If the local project is configured for a different package manager**, Corepack will request you to run the command again using the right package @@ -294,6 +296,9 @@ same major line. Should you need to upgrade to a new major, use an explicit - `HTTP_PROXY`, `HTTPS_PROXY`, and `NO_PROXY` are supported through [`node-proxy-agent`](https://github.com/TooTallNate/node-proxy-agent). +- `COREPACK_INTEGRITY_KEYS` can be set to an empty string to instruct Corepack + to skip integrity checks, or a JSON string containing custom keys. + ## Troubleshooting ### Networking diff --git a/deps/corepack/dist/lib/corepack.cjs b/deps/corepack/dist/lib/corepack.cjs index 7b378339d0577f..f81b561ce54d87 100644 --- a/deps/corepack/dist/lib/corepack.cjs +++ b/deps/corepack/dist/lib/corepack.cjs @@ -1037,9 +1037,9 @@ var init_lib = __esm({ } }); -// .yarn/cache/semver-npm-7.5.4-c4ad957fcd-5160b06975.zip/node_modules/semver/internal/constants.js +// .yarn/cache/semver-npm-7.6.0-f4630729f6-fbfe717094.zip/node_modules/semver/internal/constants.js var require_constants = __commonJS({ - ".yarn/cache/semver-npm-7.5.4-c4ad957fcd-5160b06975.zip/node_modules/semver/internal/constants.js"(exports, module2) { + ".yarn/cache/semver-npm-7.6.0-f4630729f6-fbfe717094.zip/node_modules/semver/internal/constants.js"(exports, module2) { var SEMVER_SPEC_VERSION = "2.0.0"; var MAX_LENGTH = 256; var MAX_SAFE_INTEGER = Number.MAX_SAFE_INTEGER || /* istanbul ignore next */ @@ -1068,18 +1068,18 @@ var require_constants = __commonJS({ } }); -// .yarn/cache/semver-npm-7.5.4-c4ad957fcd-5160b06975.zip/node_modules/semver/internal/debug.js +// .yarn/cache/semver-npm-7.6.0-f4630729f6-fbfe717094.zip/node_modules/semver/internal/debug.js var require_debug = __commonJS({ - ".yarn/cache/semver-npm-7.5.4-c4ad957fcd-5160b06975.zip/node_modules/semver/internal/debug.js"(exports, module2) { + ".yarn/cache/semver-npm-7.6.0-f4630729f6-fbfe717094.zip/node_modules/semver/internal/debug.js"(exports, module2) { var debug2 = typeof process === "object" && process.env && process.env.NODE_DEBUG && /\bsemver\b/i.test(process.env.NODE_DEBUG) ? (...args) => console.error("SEMVER", ...args) : () => { }; module2.exports = debug2; } }); -// .yarn/cache/semver-npm-7.5.4-c4ad957fcd-5160b06975.zip/node_modules/semver/internal/re.js +// .yarn/cache/semver-npm-7.6.0-f4630729f6-fbfe717094.zip/node_modules/semver/internal/re.js var require_re = __commonJS({ - ".yarn/cache/semver-npm-7.5.4-c4ad957fcd-5160b06975.zip/node_modules/semver/internal/re.js"(exports, module2) { + ".yarn/cache/semver-npm-7.6.0-f4630729f6-fbfe717094.zip/node_modules/semver/internal/re.js"(exports, module2) { var { MAX_SAFE_COMPONENT_LENGTH, MAX_SAFE_BUILD_LENGTH, @@ -1135,8 +1135,11 @@ var require_re = __commonJS({ createToken("XRANGEPLAINLOOSE", `[v=\\s]*(${src[t.XRANGEIDENTIFIERLOOSE]})(?:\\.(${src[t.XRANGEIDENTIFIERLOOSE]})(?:\\.(${src[t.XRANGEIDENTIFIERLOOSE]})(?:${src[t.PRERELEASELOOSE]})?${src[t.BUILD]}?)?)?`); createToken("XRANGE", `^${src[t.GTLT]}\\s*${src[t.XRANGEPLAIN]}$`); createToken("XRANGELOOSE", `^${src[t.GTLT]}\\s*${src[t.XRANGEPLAINLOOSE]}$`); - createToken("COERCE", `${"(^|[^\\d])(\\d{1,"}${MAX_SAFE_COMPONENT_LENGTH}})(?:\\.(\\d{1,${MAX_SAFE_COMPONENT_LENGTH}}))?(?:\\.(\\d{1,${MAX_SAFE_COMPONENT_LENGTH}}))?(?:$|[^\\d])`); + createToken("COERCEPLAIN", `${"(^|[^\\d])(\\d{1,"}${MAX_SAFE_COMPONENT_LENGTH}})(?:\\.(\\d{1,${MAX_SAFE_COMPONENT_LENGTH}}))?(?:\\.(\\d{1,${MAX_SAFE_COMPONENT_LENGTH}}))?`); + createToken("COERCE", `${src[t.COERCEPLAIN]}(?:$|[^\\d])`); + createToken("COERCEFULL", src[t.COERCEPLAIN] + `(?:${src[t.PRERELEASE]})?(?:${src[t.BUILD]})?(?:$|[^\\d])`); createToken("COERCERTL", src[t.COERCE], true); + createToken("COERCERTLFULL", src[t.COERCEFULL], true); createToken("LONETILDE", "(?:~>?)"); createToken("TILDETRIM", `(\\s*)${src[t.LONETILDE]}\\s+`, true); exports.tildeTrimReplace = "$1~"; @@ -1159,9 +1162,9 @@ var require_re = __commonJS({ } }); -// .yarn/cache/semver-npm-7.5.4-c4ad957fcd-5160b06975.zip/node_modules/semver/internal/parse-options.js +// .yarn/cache/semver-npm-7.6.0-f4630729f6-fbfe717094.zip/node_modules/semver/internal/parse-options.js var require_parse_options = __commonJS({ - ".yarn/cache/semver-npm-7.5.4-c4ad957fcd-5160b06975.zip/node_modules/semver/internal/parse-options.js"(exports, module2) { + ".yarn/cache/semver-npm-7.6.0-f4630729f6-fbfe717094.zip/node_modules/semver/internal/parse-options.js"(exports, module2) { var looseOption = Object.freeze({ loose: true }); var emptyOpts = Object.freeze({}); var parseOptions = (options) => { @@ -1177,9 +1180,9 @@ var require_parse_options = __commonJS({ } }); -// .yarn/cache/semver-npm-7.5.4-c4ad957fcd-5160b06975.zip/node_modules/semver/internal/identifiers.js +// .yarn/cache/semver-npm-7.6.0-f4630729f6-fbfe717094.zip/node_modules/semver/internal/identifiers.js var require_identifiers = __commonJS({ - ".yarn/cache/semver-npm-7.5.4-c4ad957fcd-5160b06975.zip/node_modules/semver/internal/identifiers.js"(exports, module2) { + ".yarn/cache/semver-npm-7.6.0-f4630729f6-fbfe717094.zip/node_modules/semver/internal/identifiers.js"(exports, module2) { var numeric = /^[0-9]+$/; var compareIdentifiers = (a, b) => { const anum = numeric.test(a); @@ -1198,9 +1201,9 @@ var require_identifiers = __commonJS({ } }); -// .yarn/cache/semver-npm-7.5.4-c4ad957fcd-5160b06975.zip/node_modules/semver/classes/semver.js +// .yarn/cache/semver-npm-7.6.0-f4630729f6-fbfe717094.zip/node_modules/semver/classes/semver.js var require_semver = __commonJS({ - ".yarn/cache/semver-npm-7.5.4-c4ad957fcd-5160b06975.zip/node_modules/semver/classes/semver.js"(exports, module2) { + ".yarn/cache/semver-npm-7.6.0-f4630729f6-fbfe717094.zip/node_modules/semver/classes/semver.js"(exports, module2) { var debug2 = require_debug(); var { MAX_LENGTH, MAX_SAFE_INTEGER } = require_constants(); var { safeRe: re, t } = require_re(); @@ -1440,9 +1443,9 @@ var require_semver = __commonJS({ } }); -// .yarn/cache/semver-npm-7.5.4-c4ad957fcd-5160b06975.zip/node_modules/semver/functions/parse.js +// .yarn/cache/semver-npm-7.6.0-f4630729f6-fbfe717094.zip/node_modules/semver/functions/parse.js var require_parse = __commonJS({ - ".yarn/cache/semver-npm-7.5.4-c4ad957fcd-5160b06975.zip/node_modules/semver/functions/parse.js"(exports, module2) { + ".yarn/cache/semver-npm-7.6.0-f4630729f6-fbfe717094.zip/node_modules/semver/functions/parse.js"(exports, module2) { var SemVer = require_semver(); var parse = (version2, options, throwErrors = false) => { if (version2 instanceof SemVer) { @@ -1461,9 +1464,9 @@ var require_parse = __commonJS({ } }); -// .yarn/cache/semver-npm-7.5.4-c4ad957fcd-5160b06975.zip/node_modules/semver/functions/valid.js +// .yarn/cache/semver-npm-7.6.0-f4630729f6-fbfe717094.zip/node_modules/semver/functions/valid.js var require_valid = __commonJS({ - ".yarn/cache/semver-npm-7.5.4-c4ad957fcd-5160b06975.zip/node_modules/semver/functions/valid.js"(exports, module2) { + ".yarn/cache/semver-npm-7.6.0-f4630729f6-fbfe717094.zip/node_modules/semver/functions/valid.js"(exports, module2) { var parse = require_parse(); var valid = (version2, options) => { const v = parse(version2, options); @@ -1473,9 +1476,9 @@ var require_valid = __commonJS({ } }); -// .yarn/cache/semver-npm-7.5.4-c4ad957fcd-5160b06975.zip/node_modules/semver/functions/clean.js +// .yarn/cache/semver-npm-7.6.0-f4630729f6-fbfe717094.zip/node_modules/semver/functions/clean.js var require_clean = __commonJS({ - ".yarn/cache/semver-npm-7.5.4-c4ad957fcd-5160b06975.zip/node_modules/semver/functions/clean.js"(exports, module2) { + ".yarn/cache/semver-npm-7.6.0-f4630729f6-fbfe717094.zip/node_modules/semver/functions/clean.js"(exports, module2) { var parse = require_parse(); var clean = (version2, options) => { const s = parse(version2.trim().replace(/^[=v]+/, ""), options); @@ -1485,9 +1488,9 @@ var require_clean = __commonJS({ } }); -// .yarn/cache/semver-npm-7.5.4-c4ad957fcd-5160b06975.zip/node_modules/semver/functions/inc.js +// .yarn/cache/semver-npm-7.6.0-f4630729f6-fbfe717094.zip/node_modules/semver/functions/inc.js var require_inc = __commonJS({ - ".yarn/cache/semver-npm-7.5.4-c4ad957fcd-5160b06975.zip/node_modules/semver/functions/inc.js"(exports, module2) { + ".yarn/cache/semver-npm-7.6.0-f4630729f6-fbfe717094.zip/node_modules/semver/functions/inc.js"(exports, module2) { var SemVer = require_semver(); var inc = (version2, release, options, identifier, identifierBase) => { if (typeof options === "string") { @@ -1508,9 +1511,9 @@ var require_inc = __commonJS({ } }); -// .yarn/cache/semver-npm-7.5.4-c4ad957fcd-5160b06975.zip/node_modules/semver/functions/diff.js +// .yarn/cache/semver-npm-7.6.0-f4630729f6-fbfe717094.zip/node_modules/semver/functions/diff.js var require_diff = __commonJS({ - ".yarn/cache/semver-npm-7.5.4-c4ad957fcd-5160b06975.zip/node_modules/semver/functions/diff.js"(exports, module2) { + ".yarn/cache/semver-npm-7.6.0-f4630729f6-fbfe717094.zip/node_modules/semver/functions/diff.js"(exports, module2) { var parse = require_parse(); var diff = (version1, version2) => { const v1 = parse(version1, null, true); @@ -1552,36 +1555,36 @@ var require_diff = __commonJS({ } }); -// .yarn/cache/semver-npm-7.5.4-c4ad957fcd-5160b06975.zip/node_modules/semver/functions/major.js +// .yarn/cache/semver-npm-7.6.0-f4630729f6-fbfe717094.zip/node_modules/semver/functions/major.js var require_major = __commonJS({ - ".yarn/cache/semver-npm-7.5.4-c4ad957fcd-5160b06975.zip/node_modules/semver/functions/major.js"(exports, module2) { + ".yarn/cache/semver-npm-7.6.0-f4630729f6-fbfe717094.zip/node_modules/semver/functions/major.js"(exports, module2) { var SemVer = require_semver(); var major = (a, loose) => new SemVer(a, loose).major; module2.exports = major; } }); -// .yarn/cache/semver-npm-7.5.4-c4ad957fcd-5160b06975.zip/node_modules/semver/functions/minor.js +// .yarn/cache/semver-npm-7.6.0-f4630729f6-fbfe717094.zip/node_modules/semver/functions/minor.js var require_minor = __commonJS({ - ".yarn/cache/semver-npm-7.5.4-c4ad957fcd-5160b06975.zip/node_modules/semver/functions/minor.js"(exports, module2) { + ".yarn/cache/semver-npm-7.6.0-f4630729f6-fbfe717094.zip/node_modules/semver/functions/minor.js"(exports, module2) { var SemVer = require_semver(); var minor = (a, loose) => new SemVer(a, loose).minor; module2.exports = minor; } }); -// .yarn/cache/semver-npm-7.5.4-c4ad957fcd-5160b06975.zip/node_modules/semver/functions/patch.js +// .yarn/cache/semver-npm-7.6.0-f4630729f6-fbfe717094.zip/node_modules/semver/functions/patch.js var require_patch = __commonJS({ - ".yarn/cache/semver-npm-7.5.4-c4ad957fcd-5160b06975.zip/node_modules/semver/functions/patch.js"(exports, module2) { + ".yarn/cache/semver-npm-7.6.0-f4630729f6-fbfe717094.zip/node_modules/semver/functions/patch.js"(exports, module2) { var SemVer = require_semver(); var patch = (a, loose) => new SemVer(a, loose).patch; module2.exports = patch; } }); -// .yarn/cache/semver-npm-7.5.4-c4ad957fcd-5160b06975.zip/node_modules/semver/functions/prerelease.js +// .yarn/cache/semver-npm-7.6.0-f4630729f6-fbfe717094.zip/node_modules/semver/functions/prerelease.js var require_prerelease = __commonJS({ - ".yarn/cache/semver-npm-7.5.4-c4ad957fcd-5160b06975.zip/node_modules/semver/functions/prerelease.js"(exports, module2) { + ".yarn/cache/semver-npm-7.6.0-f4630729f6-fbfe717094.zip/node_modules/semver/functions/prerelease.js"(exports, module2) { var parse = require_parse(); var prerelease = (version2, options) => { const parsed = parse(version2, options); @@ -1591,36 +1594,36 @@ var require_prerelease = __commonJS({ } }); -// .yarn/cache/semver-npm-7.5.4-c4ad957fcd-5160b06975.zip/node_modules/semver/functions/compare.js +// .yarn/cache/semver-npm-7.6.0-f4630729f6-fbfe717094.zip/node_modules/semver/functions/compare.js var require_compare = __commonJS({ - ".yarn/cache/semver-npm-7.5.4-c4ad957fcd-5160b06975.zip/node_modules/semver/functions/compare.js"(exports, module2) { + ".yarn/cache/semver-npm-7.6.0-f4630729f6-fbfe717094.zip/node_modules/semver/functions/compare.js"(exports, module2) { var SemVer = require_semver(); var compare = (a, b, loose) => new SemVer(a, loose).compare(new SemVer(b, loose)); module2.exports = compare; } }); -// .yarn/cache/semver-npm-7.5.4-c4ad957fcd-5160b06975.zip/node_modules/semver/functions/rcompare.js +// .yarn/cache/semver-npm-7.6.0-f4630729f6-fbfe717094.zip/node_modules/semver/functions/rcompare.js var require_rcompare = __commonJS({ - ".yarn/cache/semver-npm-7.5.4-c4ad957fcd-5160b06975.zip/node_modules/semver/functions/rcompare.js"(exports, module2) { + ".yarn/cache/semver-npm-7.6.0-f4630729f6-fbfe717094.zip/node_modules/semver/functions/rcompare.js"(exports, module2) { var compare = require_compare(); var rcompare = (a, b, loose) => compare(b, a, loose); module2.exports = rcompare; } }); -// .yarn/cache/semver-npm-7.5.4-c4ad957fcd-5160b06975.zip/node_modules/semver/functions/compare-loose.js +// .yarn/cache/semver-npm-7.6.0-f4630729f6-fbfe717094.zip/node_modules/semver/functions/compare-loose.js var require_compare_loose = __commonJS({ - ".yarn/cache/semver-npm-7.5.4-c4ad957fcd-5160b06975.zip/node_modules/semver/functions/compare-loose.js"(exports, module2) { + ".yarn/cache/semver-npm-7.6.0-f4630729f6-fbfe717094.zip/node_modules/semver/functions/compare-loose.js"(exports, module2) { var compare = require_compare(); var compareLoose = (a, b) => compare(a, b, true); module2.exports = compareLoose; } }); -// .yarn/cache/semver-npm-7.5.4-c4ad957fcd-5160b06975.zip/node_modules/semver/functions/compare-build.js +// .yarn/cache/semver-npm-7.6.0-f4630729f6-fbfe717094.zip/node_modules/semver/functions/compare-build.js var require_compare_build = __commonJS({ - ".yarn/cache/semver-npm-7.5.4-c4ad957fcd-5160b06975.zip/node_modules/semver/functions/compare-build.js"(exports, module2) { + ".yarn/cache/semver-npm-7.6.0-f4630729f6-fbfe717094.zip/node_modules/semver/functions/compare-build.js"(exports, module2) { var SemVer = require_semver(); var compareBuild = (a, b, loose) => { const versionA = new SemVer(a, loose); @@ -1631,81 +1634,81 @@ var require_compare_build = __commonJS({ } }); -// .yarn/cache/semver-npm-7.5.4-c4ad957fcd-5160b06975.zip/node_modules/semver/functions/sort.js +// .yarn/cache/semver-npm-7.6.0-f4630729f6-fbfe717094.zip/node_modules/semver/functions/sort.js var require_sort = __commonJS({ - ".yarn/cache/semver-npm-7.5.4-c4ad957fcd-5160b06975.zip/node_modules/semver/functions/sort.js"(exports, module2) { + ".yarn/cache/semver-npm-7.6.0-f4630729f6-fbfe717094.zip/node_modules/semver/functions/sort.js"(exports, module2) { var compareBuild = require_compare_build(); var sort = (list, loose) => list.sort((a, b) => compareBuild(a, b, loose)); module2.exports = sort; } }); -// .yarn/cache/semver-npm-7.5.4-c4ad957fcd-5160b06975.zip/node_modules/semver/functions/rsort.js +// .yarn/cache/semver-npm-7.6.0-f4630729f6-fbfe717094.zip/node_modules/semver/functions/rsort.js var require_rsort = __commonJS({ - ".yarn/cache/semver-npm-7.5.4-c4ad957fcd-5160b06975.zip/node_modules/semver/functions/rsort.js"(exports, module2) { + ".yarn/cache/semver-npm-7.6.0-f4630729f6-fbfe717094.zip/node_modules/semver/functions/rsort.js"(exports, module2) { var compareBuild = require_compare_build(); var rsort = (list, loose) => list.sort((a, b) => compareBuild(b, a, loose)); module2.exports = rsort; } }); -// .yarn/cache/semver-npm-7.5.4-c4ad957fcd-5160b06975.zip/node_modules/semver/functions/gt.js +// .yarn/cache/semver-npm-7.6.0-f4630729f6-fbfe717094.zip/node_modules/semver/functions/gt.js var require_gt = __commonJS({ - ".yarn/cache/semver-npm-7.5.4-c4ad957fcd-5160b06975.zip/node_modules/semver/functions/gt.js"(exports, module2) { + ".yarn/cache/semver-npm-7.6.0-f4630729f6-fbfe717094.zip/node_modules/semver/functions/gt.js"(exports, module2) { var compare = require_compare(); var gt = (a, b, loose) => compare(a, b, loose) > 0; module2.exports = gt; } }); -// .yarn/cache/semver-npm-7.5.4-c4ad957fcd-5160b06975.zip/node_modules/semver/functions/lt.js +// .yarn/cache/semver-npm-7.6.0-f4630729f6-fbfe717094.zip/node_modules/semver/functions/lt.js var require_lt = __commonJS({ - ".yarn/cache/semver-npm-7.5.4-c4ad957fcd-5160b06975.zip/node_modules/semver/functions/lt.js"(exports, module2) { + ".yarn/cache/semver-npm-7.6.0-f4630729f6-fbfe717094.zip/node_modules/semver/functions/lt.js"(exports, module2) { var compare = require_compare(); var lt = (a, b, loose) => compare(a, b, loose) < 0; module2.exports = lt; } }); -// .yarn/cache/semver-npm-7.5.4-c4ad957fcd-5160b06975.zip/node_modules/semver/functions/eq.js +// .yarn/cache/semver-npm-7.6.0-f4630729f6-fbfe717094.zip/node_modules/semver/functions/eq.js var require_eq = __commonJS({ - ".yarn/cache/semver-npm-7.5.4-c4ad957fcd-5160b06975.zip/node_modules/semver/functions/eq.js"(exports, module2) { + ".yarn/cache/semver-npm-7.6.0-f4630729f6-fbfe717094.zip/node_modules/semver/functions/eq.js"(exports, module2) { var compare = require_compare(); var eq = (a, b, loose) => compare(a, b, loose) === 0; module2.exports = eq; } }); -// .yarn/cache/semver-npm-7.5.4-c4ad957fcd-5160b06975.zip/node_modules/semver/functions/neq.js +// .yarn/cache/semver-npm-7.6.0-f4630729f6-fbfe717094.zip/node_modules/semver/functions/neq.js var require_neq = __commonJS({ - ".yarn/cache/semver-npm-7.5.4-c4ad957fcd-5160b06975.zip/node_modules/semver/functions/neq.js"(exports, module2) { + ".yarn/cache/semver-npm-7.6.0-f4630729f6-fbfe717094.zip/node_modules/semver/functions/neq.js"(exports, module2) { var compare = require_compare(); var neq = (a, b, loose) => compare(a, b, loose) !== 0; module2.exports = neq; } }); -// .yarn/cache/semver-npm-7.5.4-c4ad957fcd-5160b06975.zip/node_modules/semver/functions/gte.js +// .yarn/cache/semver-npm-7.6.0-f4630729f6-fbfe717094.zip/node_modules/semver/functions/gte.js var require_gte = __commonJS({ - ".yarn/cache/semver-npm-7.5.4-c4ad957fcd-5160b06975.zip/node_modules/semver/functions/gte.js"(exports, module2) { + ".yarn/cache/semver-npm-7.6.0-f4630729f6-fbfe717094.zip/node_modules/semver/functions/gte.js"(exports, module2) { var compare = require_compare(); var gte = (a, b, loose) => compare(a, b, loose) >= 0; module2.exports = gte; } }); -// .yarn/cache/semver-npm-7.5.4-c4ad957fcd-5160b06975.zip/node_modules/semver/functions/lte.js +// .yarn/cache/semver-npm-7.6.0-f4630729f6-fbfe717094.zip/node_modules/semver/functions/lte.js var require_lte = __commonJS({ - ".yarn/cache/semver-npm-7.5.4-c4ad957fcd-5160b06975.zip/node_modules/semver/functions/lte.js"(exports, module2) { + ".yarn/cache/semver-npm-7.6.0-f4630729f6-fbfe717094.zip/node_modules/semver/functions/lte.js"(exports, module2) { var compare = require_compare(); var lte = (a, b, loose) => compare(a, b, loose) <= 0; module2.exports = lte; } }); -// .yarn/cache/semver-npm-7.5.4-c4ad957fcd-5160b06975.zip/node_modules/semver/functions/cmp.js +// .yarn/cache/semver-npm-7.6.0-f4630729f6-fbfe717094.zip/node_modules/semver/functions/cmp.js var require_cmp = __commonJS({ - ".yarn/cache/semver-npm-7.5.4-c4ad957fcd-5160b06975.zip/node_modules/semver/functions/cmp.js"(exports, module2) { + ".yarn/cache/semver-npm-7.6.0-f4630729f6-fbfe717094.zip/node_modules/semver/functions/cmp.js"(exports, module2) { var eq = require_eq(); var neq = require_neq(); var gt = require_gt(); @@ -1752,9 +1755,9 @@ var require_cmp = __commonJS({ } }); -// .yarn/cache/semver-npm-7.5.4-c4ad957fcd-5160b06975.zip/node_modules/semver/functions/coerce.js +// .yarn/cache/semver-npm-7.6.0-f4630729f6-fbfe717094.zip/node_modules/semver/functions/coerce.js var require_coerce = __commonJS({ - ".yarn/cache/semver-npm-7.5.4-c4ad957fcd-5160b06975.zip/node_modules/semver/functions/coerce.js"(exports, module2) { + ".yarn/cache/semver-npm-7.6.0-f4630729f6-fbfe717094.zip/node_modules/semver/functions/coerce.js"(exports, module2) { var SemVer = require_semver(); var parse = require_parse(); var { safeRe: re, t } = require_re(); @@ -1771,21 +1774,27 @@ var require_coerce = __commonJS({ options = options || {}; let match = null; if (!options.rtl) { - match = version2.match(re[t.COERCE]); + match = version2.match(options.includePrerelease ? re[t.COERCEFULL] : re[t.COERCE]); } else { + const coerceRtlRegex = options.includePrerelease ? re[t.COERCERTLFULL] : re[t.COERCERTL]; let next; - while ((next = re[t.COERCERTL].exec(version2)) && (!match || match.index + match[0].length !== version2.length)) { + while ((next = coerceRtlRegex.exec(version2)) && (!match || match.index + match[0].length !== version2.length)) { if (!match || next.index + next[0].length !== match.index + match[0].length) { match = next; } - re[t.COERCERTL].lastIndex = next.index + next[1].length + next[2].length; + coerceRtlRegex.lastIndex = next.index + next[1].length + next[2].length; } - re[t.COERCERTL].lastIndex = -1; + coerceRtlRegex.lastIndex = -1; } if (match === null) { return null; } - return parse(`${match[2]}.${match[3] || "0"}.${match[4] || "0"}`, options); + const major = match[2]; + const minor = match[3] || "0"; + const patch = match[4] || "0"; + const prerelease = options.includePrerelease && match[5] ? `-${match[5]}` : ""; + const build = options.includePrerelease && match[6] ? `+${match[6]}` : ""; + return parse(`${major}.${minor}.${patch}${prerelease}${build}`, options); }; module2.exports = coerce; } @@ -2444,9 +2453,9 @@ var require_lru_cache = __commonJS({ } }); -// .yarn/cache/semver-npm-7.5.4-c4ad957fcd-5160b06975.zip/node_modules/semver/classes/range.js +// .yarn/cache/semver-npm-7.6.0-f4630729f6-fbfe717094.zip/node_modules/semver/classes/range.js var require_range = __commonJS({ - ".yarn/cache/semver-npm-7.5.4-c4ad957fcd-5160b06975.zip/node_modules/semver/classes/range.js"(exports, module2) { + ".yarn/cache/semver-npm-7.6.0-f4630729f6-fbfe717094.zip/node_modules/semver/classes/range.js"(exports, module2) { var Range = class _Range { constructor(range, options) { options = parseOptions(options); @@ -2801,9 +2810,9 @@ var require_range = __commonJS({ } }); -// .yarn/cache/semver-npm-7.5.4-c4ad957fcd-5160b06975.zip/node_modules/semver/classes/comparator.js +// .yarn/cache/semver-npm-7.6.0-f4630729f6-fbfe717094.zip/node_modules/semver/classes/comparator.js var require_comparator = __commonJS({ - ".yarn/cache/semver-npm-7.5.4-c4ad957fcd-5160b06975.zip/node_modules/semver/classes/comparator.js"(exports, module2) { + ".yarn/cache/semver-npm-7.6.0-f4630729f6-fbfe717094.zip/node_modules/semver/classes/comparator.js"(exports, module2) { var ANY = Symbol("SemVer ANY"); var Comparator = class _Comparator { static get ANY() { @@ -2913,9 +2922,9 @@ var require_comparator = __commonJS({ } }); -// .yarn/cache/semver-npm-7.5.4-c4ad957fcd-5160b06975.zip/node_modules/semver/functions/satisfies.js +// .yarn/cache/semver-npm-7.6.0-f4630729f6-fbfe717094.zip/node_modules/semver/functions/satisfies.js var require_satisfies = __commonJS({ - ".yarn/cache/semver-npm-7.5.4-c4ad957fcd-5160b06975.zip/node_modules/semver/functions/satisfies.js"(exports, module2) { + ".yarn/cache/semver-npm-7.6.0-f4630729f6-fbfe717094.zip/node_modules/semver/functions/satisfies.js"(exports, module2) { var Range = require_range(); var satisfies = (version2, range, options) => { try { @@ -2929,18 +2938,18 @@ var require_satisfies = __commonJS({ } }); -// .yarn/cache/semver-npm-7.5.4-c4ad957fcd-5160b06975.zip/node_modules/semver/ranges/to-comparators.js +// .yarn/cache/semver-npm-7.6.0-f4630729f6-fbfe717094.zip/node_modules/semver/ranges/to-comparators.js var require_to_comparators = __commonJS({ - ".yarn/cache/semver-npm-7.5.4-c4ad957fcd-5160b06975.zip/node_modules/semver/ranges/to-comparators.js"(exports, module2) { + ".yarn/cache/semver-npm-7.6.0-f4630729f6-fbfe717094.zip/node_modules/semver/ranges/to-comparators.js"(exports, module2) { var Range = require_range(); var toComparators = (range, options) => new Range(range, options).set.map((comp) => comp.map((c) => c.value).join(" ").trim().split(" ")); module2.exports = toComparators; } }); -// .yarn/cache/semver-npm-7.5.4-c4ad957fcd-5160b06975.zip/node_modules/semver/ranges/max-satisfying.js +// .yarn/cache/semver-npm-7.6.0-f4630729f6-fbfe717094.zip/node_modules/semver/ranges/max-satisfying.js var require_max_satisfying = __commonJS({ - ".yarn/cache/semver-npm-7.5.4-c4ad957fcd-5160b06975.zip/node_modules/semver/ranges/max-satisfying.js"(exports, module2) { + ".yarn/cache/semver-npm-7.6.0-f4630729f6-fbfe717094.zip/node_modules/semver/ranges/max-satisfying.js"(exports, module2) { var SemVer = require_semver(); var Range = require_range(); var maxSatisfying = (versions, range, options) => { @@ -2966,9 +2975,9 @@ var require_max_satisfying = __commonJS({ } }); -// .yarn/cache/semver-npm-7.5.4-c4ad957fcd-5160b06975.zip/node_modules/semver/ranges/min-satisfying.js +// .yarn/cache/semver-npm-7.6.0-f4630729f6-fbfe717094.zip/node_modules/semver/ranges/min-satisfying.js var require_min_satisfying = __commonJS({ - ".yarn/cache/semver-npm-7.5.4-c4ad957fcd-5160b06975.zip/node_modules/semver/ranges/min-satisfying.js"(exports, module2) { + ".yarn/cache/semver-npm-7.6.0-f4630729f6-fbfe717094.zip/node_modules/semver/ranges/min-satisfying.js"(exports, module2) { var SemVer = require_semver(); var Range = require_range(); var minSatisfying = (versions, range, options) => { @@ -2994,9 +3003,9 @@ var require_min_satisfying = __commonJS({ } }); -// .yarn/cache/semver-npm-7.5.4-c4ad957fcd-5160b06975.zip/node_modules/semver/ranges/min-version.js +// .yarn/cache/semver-npm-7.6.0-f4630729f6-fbfe717094.zip/node_modules/semver/ranges/min-version.js var require_min_version = __commonJS({ - ".yarn/cache/semver-npm-7.5.4-c4ad957fcd-5160b06975.zip/node_modules/semver/ranges/min-version.js"(exports, module2) { + ".yarn/cache/semver-npm-7.6.0-f4630729f6-fbfe717094.zip/node_modules/semver/ranges/min-version.js"(exports, module2) { var SemVer = require_semver(); var Range = require_range(); var gt = require_gt(); @@ -3050,9 +3059,9 @@ var require_min_version = __commonJS({ } }); -// .yarn/cache/semver-npm-7.5.4-c4ad957fcd-5160b06975.zip/node_modules/semver/ranges/valid.js +// .yarn/cache/semver-npm-7.6.0-f4630729f6-fbfe717094.zip/node_modules/semver/ranges/valid.js var require_valid2 = __commonJS({ - ".yarn/cache/semver-npm-7.5.4-c4ad957fcd-5160b06975.zip/node_modules/semver/ranges/valid.js"(exports, module2) { + ".yarn/cache/semver-npm-7.6.0-f4630729f6-fbfe717094.zip/node_modules/semver/ranges/valid.js"(exports, module2) { var Range = require_range(); var validRange = (range, options) => { try { @@ -3065,9 +3074,9 @@ var require_valid2 = __commonJS({ } }); -// .yarn/cache/semver-npm-7.5.4-c4ad957fcd-5160b06975.zip/node_modules/semver/ranges/outside.js +// .yarn/cache/semver-npm-7.6.0-f4630729f6-fbfe717094.zip/node_modules/semver/ranges/outside.js var require_outside = __commonJS({ - ".yarn/cache/semver-npm-7.5.4-c4ad957fcd-5160b06975.zip/node_modules/semver/ranges/outside.js"(exports, module2) { + ".yarn/cache/semver-npm-7.6.0-f4630729f6-fbfe717094.zip/node_modules/semver/ranges/outside.js"(exports, module2) { var SemVer = require_semver(); var Comparator = require_comparator(); var { ANY } = Comparator; @@ -3133,27 +3142,27 @@ var require_outside = __commonJS({ } }); -// .yarn/cache/semver-npm-7.5.4-c4ad957fcd-5160b06975.zip/node_modules/semver/ranges/gtr.js +// .yarn/cache/semver-npm-7.6.0-f4630729f6-fbfe717094.zip/node_modules/semver/ranges/gtr.js var require_gtr = __commonJS({ - ".yarn/cache/semver-npm-7.5.4-c4ad957fcd-5160b06975.zip/node_modules/semver/ranges/gtr.js"(exports, module2) { + ".yarn/cache/semver-npm-7.6.0-f4630729f6-fbfe717094.zip/node_modules/semver/ranges/gtr.js"(exports, module2) { var outside = require_outside(); var gtr = (version2, range, options) => outside(version2, range, ">", options); module2.exports = gtr; } }); -// .yarn/cache/semver-npm-7.5.4-c4ad957fcd-5160b06975.zip/node_modules/semver/ranges/ltr.js +// .yarn/cache/semver-npm-7.6.0-f4630729f6-fbfe717094.zip/node_modules/semver/ranges/ltr.js var require_ltr = __commonJS({ - ".yarn/cache/semver-npm-7.5.4-c4ad957fcd-5160b06975.zip/node_modules/semver/ranges/ltr.js"(exports, module2) { + ".yarn/cache/semver-npm-7.6.0-f4630729f6-fbfe717094.zip/node_modules/semver/ranges/ltr.js"(exports, module2) { var outside = require_outside(); var ltr = (version2, range, options) => outside(version2, range, "<", options); module2.exports = ltr; } }); -// .yarn/cache/semver-npm-7.5.4-c4ad957fcd-5160b06975.zip/node_modules/semver/ranges/intersects.js +// .yarn/cache/semver-npm-7.6.0-f4630729f6-fbfe717094.zip/node_modules/semver/ranges/intersects.js var require_intersects = __commonJS({ - ".yarn/cache/semver-npm-7.5.4-c4ad957fcd-5160b06975.zip/node_modules/semver/ranges/intersects.js"(exports, module2) { + ".yarn/cache/semver-npm-7.6.0-f4630729f6-fbfe717094.zip/node_modules/semver/ranges/intersects.js"(exports, module2) { var Range = require_range(); var intersects = (r1, r2, options) => { r1 = new Range(r1, options); @@ -3164,9 +3173,9 @@ var require_intersects = __commonJS({ } }); -// .yarn/cache/semver-npm-7.5.4-c4ad957fcd-5160b06975.zip/node_modules/semver/ranges/simplify.js +// .yarn/cache/semver-npm-7.6.0-f4630729f6-fbfe717094.zip/node_modules/semver/ranges/simplify.js var require_simplify = __commonJS({ - ".yarn/cache/semver-npm-7.5.4-c4ad957fcd-5160b06975.zip/node_modules/semver/ranges/simplify.js"(exports, module2) { + ".yarn/cache/semver-npm-7.6.0-f4630729f6-fbfe717094.zip/node_modules/semver/ranges/simplify.js"(exports, module2) { var satisfies = require_satisfies(); var compare = require_compare(); module2.exports = (versions, range, options) => { @@ -3213,9 +3222,9 @@ var require_simplify = __commonJS({ } }); -// .yarn/cache/semver-npm-7.5.4-c4ad957fcd-5160b06975.zip/node_modules/semver/ranges/subset.js +// .yarn/cache/semver-npm-7.6.0-f4630729f6-fbfe717094.zip/node_modules/semver/ranges/subset.js var require_subset = __commonJS({ - ".yarn/cache/semver-npm-7.5.4-c4ad957fcd-5160b06975.zip/node_modules/semver/ranges/subset.js"(exports, module2) { + ".yarn/cache/semver-npm-7.6.0-f4630729f6-fbfe717094.zip/node_modules/semver/ranges/subset.js"(exports, module2) { var Range = require_range(); var Comparator = require_comparator(); var { ANY } = Comparator; @@ -3375,9 +3384,9 @@ var require_subset = __commonJS({ } }); -// .yarn/cache/semver-npm-7.5.4-c4ad957fcd-5160b06975.zip/node_modules/semver/index.js +// .yarn/cache/semver-npm-7.6.0-f4630729f6-fbfe717094.zip/node_modules/semver/index.js var require_semver2 = __commonJS({ - ".yarn/cache/semver-npm-7.5.4-c4ad957fcd-5160b06975.zip/node_modules/semver/index.js"(exports, module2) { + ".yarn/cache/semver-npm-7.6.0-f4630729f6-fbfe717094.zip/node_modules/semver/index.js"(exports, module2) { var internalRe = require_re(); var constants = require_constants(); var SemVer = require_semver(); @@ -4307,78 +4316,9 @@ var require_proxy_from_env = __commonJS({ } }); -// .yarn/cache/undici-npm-6.6.2-a0bd6785a6-c8c8a43605.zip/node_modules/undici/lib/core/symbols.js -var require_symbols = __commonJS({ - ".yarn/cache/undici-npm-6.6.2-a0bd6785a6-c8c8a43605.zip/node_modules/undici/lib/core/symbols.js"(exports, module2) { - module2.exports = { - kClose: Symbol("close"), - kDestroy: Symbol("destroy"), - kDispatch: Symbol("dispatch"), - kUrl: Symbol("url"), - kWriting: Symbol("writing"), - kResuming: Symbol("resuming"), - kQueue: Symbol("queue"), - kConnect: Symbol("connect"), - kConnecting: Symbol("connecting"), - kHeadersList: Symbol("headers list"), - kKeepAliveDefaultTimeout: Symbol("default keep alive timeout"), - kKeepAliveMaxTimeout: Symbol("max keep alive timeout"), - kKeepAliveTimeoutThreshold: Symbol("keep alive timeout threshold"), - kKeepAliveTimeoutValue: Symbol("keep alive timeout"), - kKeepAlive: Symbol("keep alive"), - kHeadersTimeout: Symbol("headers timeout"), - kBodyTimeout: Symbol("body timeout"), - kServerName: Symbol("server name"), - kLocalAddress: Symbol("local address"), - kHost: Symbol("host"), - kNoRef: Symbol("no ref"), - kBodyUsed: Symbol("used"), - kRunning: Symbol("running"), - kBlocking: Symbol("blocking"), - kPending: Symbol("pending"), - kSize: Symbol("size"), - kBusy: Symbol("busy"), - kQueued: Symbol("queued"), - kFree: Symbol("free"), - kConnected: Symbol("connected"), - kClosed: Symbol("closed"), - kNeedDrain: Symbol("need drain"), - kReset: Symbol("reset"), - kDestroyed: Symbol.for("nodejs.stream.destroyed"), - kMaxHeadersSize: Symbol("max headers size"), - kRunningIdx: Symbol("running index"), - kPendingIdx: Symbol("pending index"), - kError: Symbol("error"), - kClients: Symbol("clients"), - kClient: Symbol("client"), - kParser: Symbol("parser"), - kOnDestroyed: Symbol("destroy callbacks"), - kPipelining: Symbol("pipelining"), - kSocket: Symbol("socket"), - kHostHeader: Symbol("host header"), - kConnector: Symbol("connector"), - kStrictContentLength: Symbol("strict content length"), - kMaxRedirections: Symbol("maxRedirections"), - kMaxRequests: Symbol("maxRequestsPerClient"), - kProxy: Symbol("proxy agent options"), - kCounter: Symbol("socket request counter"), - kInterceptors: Symbol("dispatch interceptors"), - kMaxResponseSize: Symbol("max response size"), - kHTTP2Session: Symbol("http2Session"), - kHTTP2SessionState: Symbol("http2Session state"), - kHTTP2BuildRequest: Symbol("http2 build request"), - kHTTP1BuildRequest: Symbol("http1 build request"), - kHTTP2CopyHeaders: Symbol("http2 copy headers"), - kHTTPConnVersion: Symbol("http connection version"), - kRetryHandlerDefaultRetry: Symbol("retry agent default retry"), - kConstruct: Symbol("constructable") - }; - } -}); - -// .yarn/cache/undici-npm-6.6.2-a0bd6785a6-c8c8a43605.zip/node_modules/undici/lib/core/errors.js +// .yarn/cache/undici-npm-6.13.0-1545cd855e-b1b0456e7d.zip/node_modules/undici/lib/core/errors.js var require_errors = __commonJS({ - ".yarn/cache/undici-npm-6.6.2-a0bd6785a6-c8c8a43605.zip/node_modules/undici/lib/core/errors.js"(exports, module2) { + ".yarn/cache/undici-npm-6.13.0-1545cd855e-b1b0456e7d.zip/node_modules/undici/lib/core/errors.js"(exports, module2) { "use strict"; var UndiciError = class extends Error { constructor(message) { @@ -4554,6 +4494,15 @@ var require_errors = __commonJS({ this.headers = headers; } }; + var SecureProxyConnectionError = class extends UndiciError { + constructor(cause, message, options) { + super(message, { cause, ...options ?? {} }); + this.name = "SecureProxyConnectionError"; + this.message = message || "Secure Proxy Connection failed"; + this.code = "UND_ERR_PRX_TLS"; + this.cause = cause; + } + }; module2.exports = { AbortError, HTTPParserError, @@ -4575,4716 +4524,4094 @@ var require_errors = __commonJS({ ResponseContentLengthMismatchError, BalancedPoolMissingUpstreamError, ResponseExceededMaxSizeError, - RequestRetryError + RequestRetryError, + SecureProxyConnectionError }; } }); -// .yarn/cache/undici-npm-6.6.2-a0bd6785a6-c8c8a43605.zip/node_modules/undici/lib/dispatcher.js -var require_dispatcher = __commonJS({ - ".yarn/cache/undici-npm-6.6.2-a0bd6785a6-c8c8a43605.zip/node_modules/undici/lib/dispatcher.js"(exports, module2) { +// .yarn/cache/undici-npm-6.13.0-1545cd855e-b1b0456e7d.zip/node_modules/undici/lib/core/symbols.js +var require_symbols = __commonJS({ + ".yarn/cache/undici-npm-6.13.0-1545cd855e-b1b0456e7d.zip/node_modules/undici/lib/core/symbols.js"(exports, module2) { + module2.exports = { + kClose: Symbol("close"), + kDestroy: Symbol("destroy"), + kDispatch: Symbol("dispatch"), + kUrl: Symbol("url"), + kWriting: Symbol("writing"), + kResuming: Symbol("resuming"), + kQueue: Symbol("queue"), + kConnect: Symbol("connect"), + kConnecting: Symbol("connecting"), + kHeadersList: Symbol("headers list"), + kKeepAliveDefaultTimeout: Symbol("default keep alive timeout"), + kKeepAliveMaxTimeout: Symbol("max keep alive timeout"), + kKeepAliveTimeoutThreshold: Symbol("keep alive timeout threshold"), + kKeepAliveTimeoutValue: Symbol("keep alive timeout"), + kKeepAlive: Symbol("keep alive"), + kHeadersTimeout: Symbol("headers timeout"), + kBodyTimeout: Symbol("body timeout"), + kServerName: Symbol("server name"), + kLocalAddress: Symbol("local address"), + kHost: Symbol("host"), + kNoRef: Symbol("no ref"), + kBodyUsed: Symbol("used"), + kRunning: Symbol("running"), + kBlocking: Symbol("blocking"), + kPending: Symbol("pending"), + kSize: Symbol("size"), + kBusy: Symbol("busy"), + kQueued: Symbol("queued"), + kFree: Symbol("free"), + kConnected: Symbol("connected"), + kClosed: Symbol("closed"), + kNeedDrain: Symbol("need drain"), + kReset: Symbol("reset"), + kDestroyed: Symbol.for("nodejs.stream.destroyed"), + kResume: Symbol("resume"), + kOnError: Symbol("on error"), + kMaxHeadersSize: Symbol("max headers size"), + kRunningIdx: Symbol("running index"), + kPendingIdx: Symbol("pending index"), + kError: Symbol("error"), + kClients: Symbol("clients"), + kClient: Symbol("client"), + kParser: Symbol("parser"), + kOnDestroyed: Symbol("destroy callbacks"), + kPipelining: Symbol("pipelining"), + kSocket: Symbol("socket"), + kHostHeader: Symbol("host header"), + kConnector: Symbol("connector"), + kStrictContentLength: Symbol("strict content length"), + kMaxRedirections: Symbol("maxRedirections"), + kMaxRequests: Symbol("maxRequestsPerClient"), + kProxy: Symbol("proxy agent options"), + kCounter: Symbol("socket request counter"), + kInterceptors: Symbol("dispatch interceptors"), + kMaxResponseSize: Symbol("max response size"), + kHTTP2Session: Symbol("http2Session"), + kHTTP2SessionState: Symbol("http2Session state"), + kRetryHandlerDefaultRetry: Symbol("retry agent default retry"), + kConstruct: Symbol("constructable"), + kListeners: Symbol("listeners"), + kHTTPContext: Symbol("http context"), + kMaxConcurrentStreams: Symbol("max concurrent streams") + }; + } +}); + +// .yarn/cache/undici-npm-6.13.0-1545cd855e-b1b0456e7d.zip/node_modules/undici/lib/core/constants.js +var require_constants2 = __commonJS({ + ".yarn/cache/undici-npm-6.13.0-1545cd855e-b1b0456e7d.zip/node_modules/undici/lib/core/constants.js"(exports, module2) { "use strict"; - var EventEmitter = require("node:events"); - var Dispatcher = class extends EventEmitter { - dispatch() { - throw new Error("not implemented"); - } - close() { - throw new Error("not implemented"); - } - destroy() { - throw new Error("not implemented"); - } + var headerNameLowerCasedRecord = {}; + var wellknownHeaderNames = [ + "Accept", + "Accept-Encoding", + "Accept-Language", + "Accept-Ranges", + "Access-Control-Allow-Credentials", + "Access-Control-Allow-Headers", + "Access-Control-Allow-Methods", + "Access-Control-Allow-Origin", + "Access-Control-Expose-Headers", + "Access-Control-Max-Age", + "Access-Control-Request-Headers", + "Access-Control-Request-Method", + "Age", + "Allow", + "Alt-Svc", + "Alt-Used", + "Authorization", + "Cache-Control", + "Clear-Site-Data", + "Connection", + "Content-Disposition", + "Content-Encoding", + "Content-Language", + "Content-Length", + "Content-Location", + "Content-Range", + "Content-Security-Policy", + "Content-Security-Policy-Report-Only", + "Content-Type", + "Cookie", + "Cross-Origin-Embedder-Policy", + "Cross-Origin-Opener-Policy", + "Cross-Origin-Resource-Policy", + "Date", + "Device-Memory", + "Downlink", + "ECT", + "ETag", + "Expect", + "Expect-CT", + "Expires", + "Forwarded", + "From", + "Host", + "If-Match", + "If-Modified-Since", + "If-None-Match", + "If-Range", + "If-Unmodified-Since", + "Keep-Alive", + "Last-Modified", + "Link", + "Location", + "Max-Forwards", + "Origin", + "Permissions-Policy", + "Pragma", + "Proxy-Authenticate", + "Proxy-Authorization", + "RTT", + "Range", + "Referer", + "Referrer-Policy", + "Refresh", + "Retry-After", + "Sec-WebSocket-Accept", + "Sec-WebSocket-Extensions", + "Sec-WebSocket-Key", + "Sec-WebSocket-Protocol", + "Sec-WebSocket-Version", + "Server", + "Server-Timing", + "Service-Worker-Allowed", + "Service-Worker-Navigation-Preload", + "Set-Cookie", + "SourceMap", + "Strict-Transport-Security", + "Supports-Loading-Mode", + "TE", + "Timing-Allow-Origin", + "Trailer", + "Transfer-Encoding", + "Upgrade", + "Upgrade-Insecure-Requests", + "User-Agent", + "Vary", + "Via", + "WWW-Authenticate", + "X-Content-Type-Options", + "X-DNS-Prefetch-Control", + "X-Frame-Options", + "X-Permitted-Cross-Domain-Policies", + "X-Powered-By", + "X-Requested-With", + "X-XSS-Protection" + ]; + for (let i = 0; i < wellknownHeaderNames.length; ++i) { + const key = wellknownHeaderNames[i]; + const lowerCasedKey = key.toLowerCase(); + headerNameLowerCasedRecord[key] = headerNameLowerCasedRecord[lowerCasedKey] = lowerCasedKey; + } + Object.setPrototypeOf(headerNameLowerCasedRecord, null); + module2.exports = { + wellknownHeaderNames, + headerNameLowerCasedRecord }; - module2.exports = Dispatcher; } }); -// .yarn/cache/undici-npm-6.6.2-a0bd6785a6-c8c8a43605.zip/node_modules/undici/lib/dispatcher-base.js -var require_dispatcher_base = __commonJS({ - ".yarn/cache/undici-npm-6.6.2-a0bd6785a6-c8c8a43605.zip/node_modules/undici/lib/dispatcher-base.js"(exports, module2) { +// .yarn/cache/undici-npm-6.13.0-1545cd855e-b1b0456e7d.zip/node_modules/undici/lib/core/tree.js +var require_tree = __commonJS({ + ".yarn/cache/undici-npm-6.13.0-1545cd855e-b1b0456e7d.zip/node_modules/undici/lib/core/tree.js"(exports, module2) { "use strict"; - var Dispatcher = require_dispatcher(); var { - ClientDestroyedError, - ClientClosedError, - InvalidArgumentError - } = require_errors(); - var { kDestroy, kClose, kDispatch, kInterceptors } = require_symbols(); - var kDestroyed = Symbol("destroyed"); - var kClosed = Symbol("closed"); - var kOnDestroyed = Symbol("onDestroyed"); - var kOnClosed = Symbol("onClosed"); - var kInterceptedDispatch = Symbol("Intercepted Dispatch"); - var DispatcherBase = class extends Dispatcher { - constructor() { - super(); - this[kDestroyed] = false; - this[kOnDestroyed] = null; - this[kClosed] = false; - this[kOnClosed] = []; - } - get destroyed() { - return this[kDestroyed]; - } - get closed() { - return this[kClosed]; - } - get interceptors() { - return this[kInterceptors]; - } - set interceptors(newInterceptors) { - if (newInterceptors) { - for (let i = newInterceptors.length - 1; i >= 0; i--) { - const interceptor = this[kInterceptors][i]; - if (typeof interceptor !== "function") { - throw new InvalidArgumentError("interceptor must be an function"); - } - } + wellknownHeaderNames, + headerNameLowerCasedRecord + } = require_constants2(); + var TstNode = class _TstNode { + /** @type {any} */ + value = null; + /** @type {null | TstNode} */ + left = null; + /** @type {null | TstNode} */ + middle = null; + /** @type {null | TstNode} */ + right = null; + /** @type {number} */ + code; + /** + * @param {string} key + * @param {any} value + * @param {number} index + */ + constructor(key, value, index) { + if (index === void 0 || index >= key.length) { + throw new TypeError("Unreachable"); } - this[kInterceptors] = newInterceptors; - } - close(callback) { - if (callback === void 0) { - return new Promise((resolve, reject) => { - this.close((err, data) => { - return err ? reject(err) : resolve(data); - }); - }); + const code = this.code = key.charCodeAt(index); + if (code > 127) { + throw new TypeError("key must be ascii string"); } - if (typeof callback !== "function") { - throw new InvalidArgumentError("invalid callback"); + if (key.length !== ++index) { + this.middle = new _TstNode(key, value, index); + } else { + this.value = value; } - if (this[kDestroyed]) { - queueMicrotask(() => callback(new ClientDestroyedError(), null)); - return; + } + /** + * @param {string} key + * @param {any} value + */ + add(key, value) { + const length = key.length; + if (length === 0) { + throw new TypeError("Unreachable"); } - if (this[kClosed]) { - if (this[kOnClosed]) { - this[kOnClosed].push(callback); + let index = 0; + let node = this; + while (true) { + const code = key.charCodeAt(index); + if (code > 127) { + throw new TypeError("key must be ascii string"); + } + if (node.code === code) { + if (length === ++index) { + node.value = value; + break; + } else if (node.middle !== null) { + node = node.middle; + } else { + node.middle = new _TstNode(key, value, index); + break; + } + } else if (node.code < code) { + if (node.left !== null) { + node = node.left; + } else { + node.left = new _TstNode(key, value, index); + break; + } + } else if (node.right !== null) { + node = node.right; } else { - queueMicrotask(() => callback(null, null)); + node.right = new _TstNode(key, value, index); + break; } - return; } - this[kClosed] = true; - this[kOnClosed].push(callback); - const onClosed = () => { - const callbacks = this[kOnClosed]; - this[kOnClosed] = null; - for (let i = 0; i < callbacks.length; i++) { - callbacks[i](null, null); - } - }; - this[kClose]().then(() => this.destroy()).then(() => { - queueMicrotask(onClosed); - }); } - destroy(err, callback) { - if (typeof err === "function") { - callback = err; - err = null; - } - if (callback === void 0) { - return new Promise((resolve, reject) => { - this.destroy(err, (err2, data) => { - return err2 ? ( - /* istanbul ignore next: should never error */ - reject(err2) - ) : resolve(data); - }); - }); - } - if (typeof callback !== "function") { - throw new InvalidArgumentError("invalid callback"); - } - if (this[kDestroyed]) { - if (this[kOnDestroyed]) { - this[kOnDestroyed].push(callback); - } else { - queueMicrotask(() => callback(null, null)); + /** + * @param {Uint8Array} key + * @return {TstNode | null} + */ + search(key) { + const keylength = key.length; + let index = 0; + let node = this; + while (node !== null && index < keylength) { + let code = key[index]; + if (code <= 90 && code >= 65) { + code |= 32; } - return; - } - if (!err) { - err = new ClientDestroyedError(); - } - this[kDestroyed] = true; - this[kOnDestroyed] = this[kOnDestroyed] || []; - this[kOnDestroyed].push(callback); - const onDestroyed = () => { - const callbacks = this[kOnDestroyed]; - this[kOnDestroyed] = null; - for (let i = 0; i < callbacks.length; i++) { - callbacks[i](null, null); + while (node !== null) { + if (code === node.code) { + if (keylength === ++index) { + return node; + } + node = node.middle; + break; + } + node = node.code < code ? node.left : node.right; } - }; - this[kDestroy](err).then(() => { - queueMicrotask(onDestroyed); - }); - } - [kInterceptedDispatch](opts, handler) { - if (!this[kInterceptors] || this[kInterceptors].length === 0) { - this[kInterceptedDispatch] = this[kDispatch]; - return this[kDispatch](opts, handler); } - let dispatch = this[kDispatch].bind(this); - for (let i = this[kInterceptors].length - 1; i >= 0; i--) { - dispatch = this[kInterceptors][i](dispatch); - } - this[kInterceptedDispatch] = dispatch; - return dispatch(opts, handler); + return null; } - dispatch(opts, handler) { - if (!handler || typeof handler !== "object") { - throw new InvalidArgumentError("handler must be an object"); - } - try { - if (!opts || typeof opts !== "object") { - throw new InvalidArgumentError("opts must be an object."); - } - if (this[kDestroyed] || this[kOnDestroyed]) { - throw new ClientDestroyedError(); - } - if (this[kClosed]) { - throw new ClientClosedError(); - } - return this[kInterceptedDispatch](opts, handler); - } catch (err) { - if (typeof handler.onError !== "function") { - throw new InvalidArgumentError("invalid onError method"); - } - handler.onError(err); - return false; + }; + var TernarySearchTree = class { + /** @type {TstNode | null} */ + node = null; + /** + * @param {string} key + * @param {any} value + * */ + insert(key, value) { + if (this.node === null) { + this.node = new TstNode(key, value, 0); + } else { + this.node.add(key, value); } } + /** + * @param {Uint8Array} key + * @return {any} + */ + lookup(key) { + return this.node?.search(key)?.value ?? null; + } + }; + var tree = new TernarySearchTree(); + for (let i = 0; i < wellknownHeaderNames.length; ++i) { + const key = headerNameLowerCasedRecord[wellknownHeaderNames[i]]; + tree.insert(key, key); + } + module2.exports = { + TernarySearchTree, + tree }; - module2.exports = DispatcherBase; } }); -// .yarn/cache/undici-npm-6.6.2-a0bd6785a6-c8c8a43605.zip/node_modules/undici/lib/node/fixed-queue.js -var require_fixed_queue = __commonJS({ - ".yarn/cache/undici-npm-6.6.2-a0bd6785a6-c8c8a43605.zip/node_modules/undici/lib/node/fixed-queue.js"(exports, module2) { +// .yarn/cache/undici-npm-6.13.0-1545cd855e-b1b0456e7d.zip/node_modules/undici/lib/core/util.js +var require_util = __commonJS({ + ".yarn/cache/undici-npm-6.13.0-1545cd855e-b1b0456e7d.zip/node_modules/undici/lib/core/util.js"(exports, module2) { "use strict"; - var kSize = 2048; - var kMask = kSize - 1; - var FixedCircularBuffer = class { - constructor() { - this.bottom = 0; - this.top = 0; - this.list = new Array(kSize); - this.next = null; - } - isEmpty() { - return this.top === this.bottom; + var assert3 = require("node:assert"); + var { kDestroyed, kBodyUsed, kListeners } = require_symbols(); + var { IncomingMessage } = require("node:http"); + var stream = require("node:stream"); + var net = require("node:net"); + var { InvalidArgumentError } = require_errors(); + var { Blob: Blob2 } = require("node:buffer"); + var nodeUtil = require("node:util"); + var { stringify } = require("node:querystring"); + var { headerNameLowerCasedRecord } = require_constants2(); + var { tree } = require_tree(); + var [nodeMajor, nodeMinor] = process.versions.node.split(".").map((v) => Number(v)); + function nop() { + } + function isStream(obj) { + return obj && typeof obj === "object" && typeof obj.pipe === "function" && typeof obj.on === "function"; + } + function isBlobLike(object) { + if (object === null) { + return false; + } else if (object instanceof Blob2) { + return true; + } else if (typeof object !== "object") { + return false; + } else { + const sTag = object[Symbol.toStringTag]; + return (sTag === "Blob" || sTag === "File") && ("stream" in object && typeof object.stream === "function" || "arrayBuffer" in object && typeof object.arrayBuffer === "function"); } - isFull() { - return (this.top + 1 & kMask) === this.bottom; + } + function buildURL(url, queryParams) { + if (url.includes("?") || url.includes("#")) { + throw new Error('Query params cannot be passed when url already contains "?" or "#".'); } - push(data) { - this.list[this.top] = data; - this.top = this.top + 1 & kMask; + const stringified = stringify(queryParams); + if (stringified) { + url += "?" + stringified; } - shift() { - const nextItem = this.list[this.bottom]; - if (nextItem === void 0) - return null; - this.list[this.bottom] = void 0; - this.bottom = this.bottom + 1 & kMask; - return nextItem; + return url; + } + function parseURL(url) { + if (typeof url === "string") { + url = new URL(url); + if (!/^https?:/.test(url.origin || url.protocol)) { + throw new InvalidArgumentError("Invalid URL protocol: the URL must start with `http:` or `https:`."); + } + return url; } - }; - module2.exports = class FixedQueue { - constructor() { - this.head = this.tail = new FixedCircularBuffer(); + if (!url || typeof url !== "object") { + throw new InvalidArgumentError("Invalid URL: The URL argument must be a non-null object."); } - isEmpty() { - return this.head.isEmpty(); + if (!/^https?:/.test(url.origin || url.protocol)) { + throw new InvalidArgumentError("Invalid URL protocol: the URL must start with `http:` or `https:`."); } - push(data) { - if (this.head.isFull()) { - this.head = this.head.next = new FixedCircularBuffer(); + if (!(url instanceof URL)) { + if (url.port != null && url.port !== "" && !Number.isFinite(parseInt(url.port))) { + throw new InvalidArgumentError("Invalid URL: port must be a valid integer or a string representation of an integer."); } - this.head.push(data); - } - shift() { - const tail = this.tail; - const next = tail.shift(); - if (tail.isEmpty() && tail.next !== null) { - this.tail = tail.next; + if (url.path != null && typeof url.path !== "string") { + throw new InvalidArgumentError("Invalid URL path: the path must be a string or null/undefined."); } - return next; + if (url.pathname != null && typeof url.pathname !== "string") { + throw new InvalidArgumentError("Invalid URL pathname: the pathname must be a string or null/undefined."); + } + if (url.hostname != null && typeof url.hostname !== "string") { + throw new InvalidArgumentError("Invalid URL hostname: the hostname must be a string or null/undefined."); + } + if (url.origin != null && typeof url.origin !== "string") { + throw new InvalidArgumentError("Invalid URL origin: the origin must be a string or null/undefined."); + } + const port = url.port != null ? url.port : url.protocol === "https:" ? 443 : 80; + let origin = url.origin != null ? url.origin : `${url.protocol}//${url.hostname}:${port}`; + let path10 = url.path != null ? url.path : `${url.pathname || ""}${url.search || ""}`; + if (origin.endsWith("/")) { + origin = origin.substring(0, origin.length - 1); + } + if (path10 && !path10.startsWith("/")) { + path10 = `/${path10}`; + } + url = new URL(origin + path10); } - }; - } -}); - -// .yarn/cache/undici-npm-6.6.2-a0bd6785a6-c8c8a43605.zip/node_modules/undici/lib/pool-stats.js -var require_pool_stats = __commonJS({ - ".yarn/cache/undici-npm-6.6.2-a0bd6785a6-c8c8a43605.zip/node_modules/undici/lib/pool-stats.js"(exports, module2) { - var { kFree, kConnected, kPending, kQueued, kRunning, kSize } = require_symbols(); - var kPool = Symbol("pool"); - var PoolStats = class { - constructor(pool) { - this[kPool] = pool; + return url; + } + function parseOrigin(url) { + url = parseURL(url); + if (url.pathname !== "/" || url.search || url.hash) { + throw new InvalidArgumentError("invalid url"); } - get connected() { - return this[kPool][kConnected]; + return url; + } + function getHostname(host) { + if (host[0] === "[") { + const idx2 = host.indexOf("]"); + assert3(idx2 !== -1); + return host.substring(1, idx2); } - get free() { - return this[kPool][kFree]; + const idx = host.indexOf(":"); + if (idx === -1) + return host; + return host.substring(0, idx); + } + function getServerName(host) { + if (!host) { + return null; } - get pending() { - return this[kPool][kPending]; + assert3.strictEqual(typeof host, "string"); + const servername = getHostname(host); + if (net.isIP(servername)) { + return ""; } - get queued() { - return this[kPool][kQueued]; + return servername; + } + function deepClone(obj) { + return JSON.parse(JSON.stringify(obj)); + } + function isAsyncIterable(obj) { + return !!(obj != null && typeof obj[Symbol.asyncIterator] === "function"); + } + function isIterable(obj) { + return !!(obj != null && (typeof obj[Symbol.iterator] === "function" || typeof obj[Symbol.asyncIterator] === "function")); + } + function bodyLength(body) { + if (body == null) { + return 0; + } else if (isStream(body)) { + const state = body._readableState; + return state && state.objectMode === false && state.ended === true && Number.isFinite(state.length) ? state.length : null; + } else if (isBlobLike(body)) { + return body.size != null ? body.size : null; + } else if (isBuffer(body)) { + return body.byteLength; } - get running() { - return this[kPool][kRunning]; + return null; + } + function isDestroyed(body) { + return body && !!(body.destroyed || body[kDestroyed] || stream.isDestroyed?.(body)); + } + function isReadableAborted(stream2) { + const state = stream2?._readableState; + return isDestroyed(stream2) && state && !state.endEmitted; + } + function destroy(stream2, err) { + if (stream2 == null || !isStream(stream2) || isDestroyed(stream2)) { + return; } - get size() { - return this[kPool][kSize]; + if (typeof stream2.destroy === "function") { + if (Object.getPrototypeOf(stream2).constructor === IncomingMessage) { + stream2.socket = null; + } + stream2.destroy(err); + } else if (err) { + queueMicrotask(() => { + stream2.emit("error", err); + }); } - }; - module2.exports = PoolStats; - } -}); - -// .yarn/cache/undici-npm-6.6.2-a0bd6785a6-c8c8a43605.zip/node_modules/undici/lib/pool-base.js -var require_pool_base = __commonJS({ - ".yarn/cache/undici-npm-6.6.2-a0bd6785a6-c8c8a43605.zip/node_modules/undici/lib/pool-base.js"(exports, module2) { - "use strict"; - var DispatcherBase = require_dispatcher_base(); - var FixedQueue = require_fixed_queue(); - var { kConnected, kSize, kRunning, kPending, kQueued, kBusy, kFree, kUrl, kClose, kDestroy, kDispatch } = require_symbols(); - var PoolStats = require_pool_stats(); - var kClients = Symbol("clients"); - var kNeedDrain = Symbol("needDrain"); - var kQueue = Symbol("queue"); - var kClosedResolve = Symbol("closed resolve"); - var kOnDrain = Symbol("onDrain"); - var kOnConnect = Symbol("onConnect"); - var kOnDisconnect = Symbol("onDisconnect"); - var kOnConnectionError = Symbol("onConnectionError"); - var kGetDispatcher = Symbol("get dispatcher"); - var kAddClient = Symbol("add client"); - var kRemoveClient = Symbol("remove client"); - var kStats = Symbol("stats"); - var PoolBase = class extends DispatcherBase { - constructor() { - super(); - this[kQueue] = new FixedQueue(); - this[kClients] = []; - this[kQueued] = 0; - const pool = this; - this[kOnDrain] = function onDrain(origin, targets) { - const queue = pool[kQueue]; - let needDrain = false; - while (!needDrain) { - const item = queue.shift(); - if (!item) { - break; - } - pool[kQueued]--; - needDrain = !this.dispatch(item.opts, item.handler); - } - this[kNeedDrain] = needDrain; - if (!this[kNeedDrain] && pool[kNeedDrain]) { - pool[kNeedDrain] = false; - pool.emit("drain", origin, [pool, ...targets]); + if (stream2.destroyed !== true) { + stream2[kDestroyed] = true; + } + } + var KEEPALIVE_TIMEOUT_EXPR = /timeout=(\d+)/; + function parseKeepAliveTimeout(val) { + const m = val.toString().match(KEEPALIVE_TIMEOUT_EXPR); + return m ? parseInt(m[1], 10) * 1e3 : null; + } + function headerNameToString(value) { + return typeof value === "string" ? headerNameLowerCasedRecord[value] ?? value.toLowerCase() : tree.lookup(value) ?? value.toString("latin1").toLowerCase(); + } + function bufferToLowerCasedHeaderName(value) { + return tree.lookup(value) ?? value.toString("latin1").toLowerCase(); + } + function parseHeaders(headers, obj) { + if (obj === void 0) + obj = {}; + for (let i = 0; i < headers.length; i += 2) { + const key = headerNameToString(headers[i]); + let val = obj[key]; + if (val) { + if (typeof val === "string") { + val = [val]; + obj[key] = val; } - if (pool[kClosedResolve] && queue.isEmpty()) { - Promise.all(pool[kClients].map((c) => c.close())).then(pool[kClosedResolve]); + val.push(headers[i + 1].toString("utf8")); + } else { + const headersValue = headers[i + 1]; + if (typeof headersValue === "string") { + obj[key] = headersValue; + } else { + obj[key] = Array.isArray(headersValue) ? headersValue.map((x) => x.toString("utf8")) : headersValue.toString("utf8"); } - }; - this[kOnConnect] = (origin, targets) => { - pool.emit("connect", origin, [pool, ...targets]); - }; - this[kOnDisconnect] = (origin, targets, err) => { - pool.emit("disconnect", origin, [pool, ...targets], err); - }; - this[kOnConnectionError] = (origin, targets, err) => { - pool.emit("connectionError", origin, [pool, ...targets], err); - }; - this[kStats] = new PoolStats(this); + } } - get [kBusy]() { - return this[kNeedDrain]; + if ("content-length" in obj && "content-disposition" in obj) { + obj["content-disposition"] = Buffer.from(obj["content-disposition"]).toString("latin1"); } - get [kConnected]() { - return this[kClients].filter((client) => client[kConnected]).length; + return obj; + } + function parseRawHeaders(headers) { + const len = headers.length; + const ret = new Array(len); + let hasContentLength = false; + let contentDispositionIdx = -1; + let key; + let val; + let kLen = 0; + for (let n = 0; n < headers.length; n += 2) { + key = headers[n]; + val = headers[n + 1]; + typeof key !== "string" && (key = key.toString()); + typeof val !== "string" && (val = val.toString("utf8")); + kLen = key.length; + if (kLen === 14 && key[7] === "-" && (key === "content-length" || key.toLowerCase() === "content-length")) { + hasContentLength = true; + } else if (kLen === 19 && key[7] === "-" && (key === "content-disposition" || key.toLowerCase() === "content-disposition")) { + contentDispositionIdx = n + 1; + } + ret[n] = key; + ret[n + 1] = val; } - get [kFree]() { - return this[kClients].filter((client) => client[kConnected] && !client[kNeedDrain]).length; + if (hasContentLength && contentDispositionIdx !== -1) { + ret[contentDispositionIdx] = Buffer.from(ret[contentDispositionIdx]).toString("latin1"); } - get [kPending]() { - let ret = this[kQueued]; - for (const { [kPending]: pending } of this[kClients]) { - ret += pending; - } - return ret; + return ret; + } + function isBuffer(buffer) { + return buffer instanceof Uint8Array || Buffer.isBuffer(buffer); + } + function validateHandler(handler, method, upgrade) { + if (!handler || typeof handler !== "object") { + throw new InvalidArgumentError("handler must be an object"); } - get [kRunning]() { - let ret = 0; - for (const { [kRunning]: running } of this[kClients]) { - ret += running; - } - return ret; + if (typeof handler.onConnect !== "function") { + throw new InvalidArgumentError("invalid onConnect method"); } - get [kSize]() { - let ret = this[kQueued]; - for (const { [kSize]: size } of this[kClients]) { - ret += size; - } - return ret; + if (typeof handler.onError !== "function") { + throw new InvalidArgumentError("invalid onError method"); } - get stats() { - return this[kStats]; + if (typeof handler.onBodySent !== "function" && handler.onBodySent !== void 0) { + throw new InvalidArgumentError("invalid onBodySent method"); } - async [kClose]() { - if (this[kQueue].isEmpty()) { - return Promise.all(this[kClients].map((c) => c.close())); - } else { - return new Promise((resolve) => { - this[kClosedResolve] = resolve; - }); + if (upgrade || method === "CONNECT") { + if (typeof handler.onUpgrade !== "function") { + throw new InvalidArgumentError("invalid onUpgrade method"); } - } - async [kDestroy](err) { - while (true) { - const item = this[kQueue].shift(); - if (!item) { - break; - } - item.handler.onError(err); + } else { + if (typeof handler.onHeaders !== "function") { + throw new InvalidArgumentError("invalid onHeaders method"); } - return Promise.all(this[kClients].map((c) => c.destroy(err))); - } - [kDispatch](opts, handler) { - const dispatcher = this[kGetDispatcher](); - if (!dispatcher) { - this[kNeedDrain] = true; - this[kQueue].push({ opts, handler }); - this[kQueued]++; - } else if (!dispatcher.dispatch(opts, handler)) { - dispatcher[kNeedDrain] = true; - this[kNeedDrain] = !this[kGetDispatcher](); + if (typeof handler.onData !== "function") { + throw new InvalidArgumentError("invalid onData method"); + } + if (typeof handler.onComplete !== "function") { + throw new InvalidArgumentError("invalid onComplete method"); } - return !this[kNeedDrain]; } - [kAddClient](client) { - client.on("drain", this[kOnDrain]).on("connect", this[kOnConnect]).on("disconnect", this[kOnDisconnect]).on("connectionError", this[kOnConnectionError]); - this[kClients].push(client); - if (this[kNeedDrain]) { - process.nextTick(() => { - if (this[kNeedDrain]) { - this[kOnDrain](client[kUrl], [this, client]); + } + function isDisturbed(body) { + return !!(body && (stream.isDisturbed(body) || body[kBodyUsed])); + } + function isErrored(body) { + return !!(body && stream.isErrored(body)); + } + function isReadable(body) { + return !!(body && stream.isReadable(body)); + } + function getSocketInfo(socket) { + return { + localAddress: socket.localAddress, + localPort: socket.localPort, + remoteAddress: socket.remoteAddress, + remotePort: socket.remotePort, + remoteFamily: socket.remoteFamily, + timeout: socket.timeout, + bytesWritten: socket.bytesWritten, + bytesRead: socket.bytesRead + }; + } + function ReadableStreamFrom(iterable) { + let iterator; + return new ReadableStream( + { + async start() { + iterator = iterable[Symbol.asyncIterator](); + }, + async pull(controller) { + const { done, value } = await iterator.next(); + if (done) { + queueMicrotask(() => { + controller.close(); + controller.byobRequest?.respond(0); + }); + } else { + const buf = Buffer.isBuffer(value) ? value : Buffer.from(value); + if (buf.byteLength) { + controller.enqueue(new Uint8Array(buf)); + } } - }); + return controller.desiredSize > 0; + }, + async cancel(reason) { + await iterator.return(); + }, + type: "bytes" } - return this; + ); + } + function isFormDataLike(object) { + return object && typeof object === "object" && typeof object.append === "function" && typeof object.delete === "function" && typeof object.get === "function" && typeof object.getAll === "function" && typeof object.has === "function" && typeof object.set === "function" && object[Symbol.toStringTag] === "FormData"; + } + function addAbortListener(signal, listener) { + if ("addEventListener" in signal) { + signal.addEventListener("abort", listener, { once: true }); + return () => signal.removeEventListener("abort", listener); } - [kRemoveClient](client) { - client.close(() => { - const idx = this[kClients].indexOf(client); - if (idx !== -1) { - this[kClients].splice(idx, 1); - } - }); - this[kNeedDrain] = this[kClients].some((dispatcher) => !dispatcher[kNeedDrain] && dispatcher.closed !== true && dispatcher.destroyed !== true); + signal.addListener("abort", listener); + return () => signal.removeListener("abort", listener); + } + var hasToWellFormed = typeof String.prototype.toWellFormed === "function"; + var hasIsWellFormed = typeof String.prototype.isWellFormed === "function"; + function toUSVString(val) { + return hasToWellFormed ? `${val}`.toWellFormed() : nodeUtil.toUSVString(val); + } + function isUSVString(val) { + return hasIsWellFormed ? `${val}`.isWellFormed() : toUSVString(val) === `${val}`; + } + function isTokenCharCode(c) { + switch (c) { + case 34: + case 40: + case 41: + case 44: + case 47: + case 58: + case 59: + case 60: + case 61: + case 62: + case 63: + case 64: + case 91: + case 92: + case 93: + case 123: + case 125: + return false; + default: + return c >= 33 && c <= 126; } - }; + } + function isValidHTTPToken(characters) { + if (characters.length === 0) { + return false; + } + for (let i = 0; i < characters.length; ++i) { + if (!isTokenCharCode(characters.charCodeAt(i))) { + return false; + } + } + return true; + } + var headerCharRegex = /[^\t\x20-\x7e\x80-\xff]/; + function isValidHeaderChar(characters) { + return !headerCharRegex.test(characters); + } + function parseRangeHeader(range) { + if (range == null || range === "") + return { start: 0, end: null, size: null }; + const m = range ? range.match(/^bytes (\d+)-(\d+)\/(\d+)?$/) : null; + return m ? { + start: parseInt(m[1]), + end: m[2] ? parseInt(m[2]) : null, + size: m[3] ? parseInt(m[3]) : null + } : null; + } + function addListener(obj, name, listener) { + const listeners = obj[kListeners] ??= []; + listeners.push([name, listener]); + obj.on(name, listener); + return obj; + } + function removeAllListeners(obj) { + for (const [name, listener] of obj[kListeners] ?? []) { + obj.removeListener(name, listener); + } + obj[kListeners] = null; + } + function errorRequest(client, request, err) { + try { + request.onError(err); + assert3(request.aborted); + } catch (err2) { + client.emit("error", err2); + } + } + var kEnumerableProperty = /* @__PURE__ */ Object.create(null); + kEnumerableProperty.enumerable = true; module2.exports = { - PoolBase, - kClients, - kNeedDrain, - kAddClient, - kRemoveClient, - kGetDispatcher - }; - } -}); - -// .yarn/cache/undici-npm-6.6.2-a0bd6785a6-c8c8a43605.zip/node_modules/undici/lib/core/constants.js -var require_constants2 = __commonJS({ - ".yarn/cache/undici-npm-6.6.2-a0bd6785a6-c8c8a43605.zip/node_modules/undici/lib/core/constants.js"(exports, module2) { - "use strict"; - var headerNameLowerCasedRecord = {}; - var wellknownHeaderNames = [ - "Accept", - "Accept-Encoding", - "Accept-Language", - "Accept-Ranges", - "Access-Control-Allow-Credentials", - "Access-Control-Allow-Headers", - "Access-Control-Allow-Methods", - "Access-Control-Allow-Origin", - "Access-Control-Expose-Headers", - "Access-Control-Max-Age", - "Access-Control-Request-Headers", - "Access-Control-Request-Method", - "Age", - "Allow", - "Alt-Svc", - "Alt-Used", - "Authorization", - "Cache-Control", - "Clear-Site-Data", - "Connection", - "Content-Disposition", - "Content-Encoding", - "Content-Language", - "Content-Length", - "Content-Location", - "Content-Range", - "Content-Security-Policy", - "Content-Security-Policy-Report-Only", - "Content-Type", - "Cookie", - "Cross-Origin-Embedder-Policy", - "Cross-Origin-Opener-Policy", - "Cross-Origin-Resource-Policy", - "Date", - "Device-Memory", - "Downlink", - "ECT", - "ETag", - "Expect", - "Expect-CT", - "Expires", - "Forwarded", - "From", - "Host", - "If-Match", - "If-Modified-Since", - "If-None-Match", - "If-Range", - "If-Unmodified-Since", - "Keep-Alive", - "Last-Modified", - "Link", - "Location", - "Max-Forwards", - "Origin", - "Permissions-Policy", - "Pragma", - "Proxy-Authenticate", - "Proxy-Authorization", - "RTT", - "Range", - "Referer", - "Referrer-Policy", - "Refresh", - "Retry-After", - "Sec-WebSocket-Accept", - "Sec-WebSocket-Extensions", - "Sec-WebSocket-Key", - "Sec-WebSocket-Protocol", - "Sec-WebSocket-Version", - "Server", - "Server-Timing", - "Service-Worker-Allowed", - "Service-Worker-Navigation-Preload", - "Set-Cookie", - "SourceMap", - "Strict-Transport-Security", - "Supports-Loading-Mode", - "TE", - "Timing-Allow-Origin", - "Trailer", - "Transfer-Encoding", - "Upgrade", - "Upgrade-Insecure-Requests", - "User-Agent", - "Vary", - "Via", - "WWW-Authenticate", - "X-Content-Type-Options", - "X-DNS-Prefetch-Control", - "X-Frame-Options", - "X-Permitted-Cross-Domain-Policies", - "X-Powered-By", - "X-Requested-With", - "X-XSS-Protection" - ]; - for (let i = 0; i < wellknownHeaderNames.length; ++i) { - const key = wellknownHeaderNames[i]; - const lowerCasedKey = key.toLowerCase(); - headerNameLowerCasedRecord[key] = headerNameLowerCasedRecord[lowerCasedKey] = lowerCasedKey; - } - Object.setPrototypeOf(headerNameLowerCasedRecord, null); - module2.exports = { - wellknownHeaderNames, - headerNameLowerCasedRecord + kEnumerableProperty, + nop, + isDisturbed, + isErrored, + isReadable, + toUSVString, + isUSVString, + isReadableAborted, + isBlobLike, + parseOrigin, + parseURL, + getServerName, + isStream, + isIterable, + isAsyncIterable, + isDestroyed, + headerNameToString, + bufferToLowerCasedHeaderName, + addListener, + removeAllListeners, + errorRequest, + parseRawHeaders, + parseHeaders, + parseKeepAliveTimeout, + destroy, + bodyLength, + deepClone, + ReadableStreamFrom, + isBuffer, + validateHandler, + getSocketInfo, + isFormDataLike, + buildURL, + addAbortListener, + isValidHTTPToken, + isValidHeaderChar, + isTokenCharCode, + parseRangeHeader, + nodeMajor, + nodeMinor, + nodeHasAutoSelectFamily: nodeMajor > 18 || nodeMajor === 18 && nodeMinor >= 13, + safeHTTPMethods: ["GET", "HEAD", "OPTIONS", "TRACE"] }; } }); -// .yarn/cache/undici-npm-6.6.2-a0bd6785a6-c8c8a43605.zip/node_modules/undici/lib/core/tree.js -var require_tree = __commonJS({ - ".yarn/cache/undici-npm-6.6.2-a0bd6785a6-c8c8a43605.zip/node_modules/undici/lib/core/tree.js"(exports, module2) { +// .yarn/cache/undici-npm-6.13.0-1545cd855e-b1b0456e7d.zip/node_modules/undici/lib/api/readable.js +var require_readable = __commonJS({ + ".yarn/cache/undici-npm-6.13.0-1545cd855e-b1b0456e7d.zip/node_modules/undici/lib/api/readable.js"(exports, module2) { "use strict"; - var { - wellknownHeaderNames, - headerNameLowerCasedRecord - } = require_constants2(); - var TstNode = class _TstNode { - /** @type {any} */ - value = null; - /** @type {null | TstNode} */ - left = null; - /** @type {null | TstNode} */ - middle = null; - /** @type {null | TstNode} */ - right = null; - /** @type {number} */ - code; - /** - * @param {Uint8Array} key - * @param {any} value - * @param {number} index - */ - constructor(key, value, index) { - if (index === void 0 || index >= key.length) { - throw new TypeError("Unreachable"); + var assert3 = require("node:assert"); + var { Readable: Readable2 } = require("node:stream"); + var { RequestAbortedError, NotSupportedError, InvalidArgumentError, AbortError } = require_errors(); + var util = require_util(); + var { ReadableStreamFrom } = require_util(); + var kConsume = Symbol("kConsume"); + var kReading = Symbol("kReading"); + var kBody = Symbol("kBody"); + var kAbort = Symbol("kAbort"); + var kContentType = Symbol("kContentType"); + var kContentLength = Symbol("kContentLength"); + var noop = () => { + }; + var BodyReadable = class extends Readable2 { + constructor({ + resume, + abort, + contentType = "", + contentLength, + highWaterMark = 64 * 1024 + // Same as nodejs fs streams. + }) { + super({ + autoDestroy: true, + read: resume, + highWaterMark + }); + this._readableState.dataEmitted = false; + this[kAbort] = abort; + this[kConsume] = null; + this[kBody] = null; + this[kContentType] = contentType; + this[kContentLength] = contentLength; + this[kReading] = false; + } + destroy(err) { + if (!err && !this._readableState.endEmitted) { + err = new RequestAbortedError(); } - this.code = key[index]; - if (key.length !== ++index) { - this.middle = new _TstNode(key, value, index); - } else { - this.value = value; + if (err) { + this[kAbort](); } + return super.destroy(err); } - /** - * @param {Uint8Array} key - * @param {any} value - * @param {number} index - */ - add(key, value, index) { - if (index === void 0 || index >= key.length) { - throw new TypeError("Unreachable"); + _destroy(err, callback) { + setImmediate(() => { + callback(err); + }); + } + on(ev, ...args) { + if (ev === "data" || ev === "readable") { + this[kReading] = true; } - const code = key[index]; - if (this.code === code) { - if (key.length === ++index) { - this.value = value; - } else if (this.middle !== null) { - this.middle.add(key, value, index); - } else { - this.middle = new _TstNode(key, value, index); - } - } else if (this.code < code) { - if (this.left !== null) { - this.left.add(key, value, index); - } else { - this.left = new _TstNode(key, value, index); + return super.on(ev, ...args); + } + addListener(ev, ...args) { + return this.on(ev, ...args); + } + off(ev, ...args) { + const ret = super.off(ev, ...args); + if (ev === "data" || ev === "readable") { + this[kReading] = this.listenerCount("data") > 0 || this.listenerCount("readable") > 0; + } + return ret; + } + removeListener(ev, ...args) { + return this.off(ev, ...args); + } + push(chunk) { + if (this[kConsume] && chunk !== null) { + consumePush(this[kConsume], chunk); + return this[kReading] ? super.push(chunk) : true; + } + return super.push(chunk); + } + // https://fetch.spec.whatwg.org/#dom-body-text + async text() { + return consume(this, "text"); + } + // https://fetch.spec.whatwg.org/#dom-body-json + async json() { + return consume(this, "json"); + } + // https://fetch.spec.whatwg.org/#dom-body-blob + async blob() { + return consume(this, "blob"); + } + // https://fetch.spec.whatwg.org/#dom-body-arraybuffer + async arrayBuffer() { + return consume(this, "arrayBuffer"); + } + // https://fetch.spec.whatwg.org/#dom-body-formdata + async formData() { + throw new NotSupportedError(); + } + // https://fetch.spec.whatwg.org/#dom-body-bodyused + get bodyUsed() { + return util.isDisturbed(this); + } + // https://fetch.spec.whatwg.org/#dom-body-body + get body() { + if (!this[kBody]) { + this[kBody] = ReadableStreamFrom(this); + if (this[kConsume]) { + this[kBody].getReader(); + assert3(this[kBody].locked); } - } else if (this.right !== null) { - this.right.add(key, value, index); - } else { - this.right = new _TstNode(key, value, index); } + return this[kBody]; } - /** - * @param {Uint8Array} key - * @return {TstNode | null} - */ - search(key) { - const keylength = key.length; - let index = 0; - let node = this; - while (node !== null && index < keylength) { - let code = key[index]; - if (code >= 65 && code <= 90) { - code |= 32; + async dump(opts) { + let limit = Number.isFinite(opts?.limit) ? opts.limit : 128 * 1024; + const signal = opts?.signal; + if (signal != null && (typeof signal !== "object" || !("aborted" in signal))) { + throw new InvalidArgumentError("signal must be an AbortSignal"); + } + signal?.throwIfAborted(); + if (this._readableState.closeEmitted) { + return null; + } + return await new Promise((resolve, reject) => { + if (this[kContentLength] > limit) { + this.destroy(new AbortError()); } - while (node !== null) { - if (code === node.code) { - if (keylength === ++index) { - return node; - } - node = node.middle; - break; + const onAbort = () => { + this.destroy(signal.reason ?? new AbortError()); + }; + signal?.addEventListener("abort", onAbort); + this.on("close", function() { + signal?.removeEventListener("abort", onAbort); + if (signal?.aborted) { + reject(signal.reason ?? new AbortError()); + } else { + resolve(null); } - node = node.code < code ? node.left : node.right; - } - } - return null; + }).on("error", noop).on("data", function(chunk) { + limit -= chunk.length; + if (limit <= 0) { + this.destroy(); + } + }).resume(); + }); } }; - var TernarySearchTree = class { - /** @type {TstNode | null} */ - node = null; - /** - * @param {Uint8Array} key - * @param {any} value - * */ - insert(key, value) { - if (this.node === null) { - this.node = new TstNode(key, value, 0); + function isLocked(self2) { + return self2[kBody] && self2[kBody].locked === true || self2[kConsume]; + } + function isUnusable(self2) { + return util.isDisturbed(self2) || isLocked(self2); + } + async function consume(stream, type) { + assert3(!stream[kConsume]); + return new Promise((resolve, reject) => { + if (isUnusable(stream)) { + const rState = stream._readableState; + if (rState.destroyed && rState.closeEmitted === false) { + stream.on("error", (err) => { + reject(err); + }).on("close", () => { + reject(new TypeError("unusable")); + }); + } else { + reject(rState.errored ?? new TypeError("unusable")); + } } else { - this.node.add(key, value, 0); + queueMicrotask(() => { + stream[kConsume] = { + type, + stream, + resolve, + reject, + length: 0, + body: [] + }; + stream.on("error", function(err) { + consumeFinish(this[kConsume], err); + }).on("close", function() { + if (this[kConsume].body !== null) { + consumeFinish(this[kConsume], new RequestAbortedError()); + } + }); + consumeStart(stream[kConsume]); + }); } + }); + } + function consumeStart(consume2) { + if (consume2.body === null) { + return; } - /** - * @param {Uint8Array} key - */ - lookup(key) { - return this.node?.search(key)?.value ?? null; + const { _readableState: state } = consume2.stream; + if (state.bufferIndex) { + const start = state.bufferIndex; + const end = state.buffer.length; + for (let n = start; n < end; n++) { + consumePush(consume2, state.buffer[n]); + } + } else { + for (const chunk of state.buffer) { + consumePush(consume2, chunk); + } + } + if (state.endEmitted) { + consumeEnd(this[kConsume]); + } else { + consume2.stream.on("end", function() { + consumeEnd(this[kConsume]); + }); + } + consume2.stream.resume(); + while (consume2.stream.read() != null) { } - }; - var tree = new TernarySearchTree(); - for (let i = 0; i < wellknownHeaderNames.length; ++i) { - const key = headerNameLowerCasedRecord[wellknownHeaderNames[i]]; - tree.insert(Buffer.from(key), key); } - module2.exports = { - TernarySearchTree, - tree - }; - } -}); - -// .yarn/cache/undici-npm-6.6.2-a0bd6785a6-c8c8a43605.zip/node_modules/undici/lib/core/util.js -var require_util = __commonJS({ - ".yarn/cache/undici-npm-6.6.2-a0bd6785a6-c8c8a43605.zip/node_modules/undici/lib/core/util.js"(exports, module2) { - "use strict"; - var assert3 = require("node:assert"); - var { kDestroyed, kBodyUsed } = require_symbols(); - var { IncomingMessage } = require("node:http"); - var stream = require("node:stream"); - var net = require("node:net"); - var { InvalidArgumentError } = require_errors(); - var { Blob: Blob2 } = require("node:buffer"); - var nodeUtil = require("node:util"); - var { stringify } = require("node:querystring"); - var { headerNameLowerCasedRecord } = require_constants2(); - var { tree } = require_tree(); - var [nodeMajor, nodeMinor] = process.versions.node.split(".").map((v) => Number(v)); - function nop() { + function chunksDecode(chunks, length) { + if (chunks.length === 0 || length === 0) { + return ""; + } + const buffer = chunks.length === 1 ? chunks[0] : Buffer.concat(chunks, length); + const bufferLength = buffer.length; + const start = bufferLength > 2 && buffer[0] === 239 && buffer[1] === 187 && buffer[2] === 191 ? 3 : 0; + return buffer.utf8Slice(start, bufferLength); } - function isStream(obj) { - return obj && typeof obj === "object" && typeof obj.pipe === "function" && typeof obj.on === "function"; + function consumeEnd(consume2) { + const { type, body, resolve, stream, length } = consume2; + try { + if (type === "text") { + resolve(chunksDecode(body, length)); + } else if (type === "json") { + resolve(JSON.parse(chunksDecode(body, length))); + } else if (type === "arrayBuffer") { + const dst = new Uint8Array(length); + let pos = 0; + for (const buf of body) { + dst.set(buf, pos); + pos += buf.byteLength; + } + resolve(dst.buffer); + } else if (type === "blob") { + resolve(new Blob(body, { type: stream[kContentType] })); + } + consumeFinish(consume2); + } catch (err) { + stream.destroy(err); + } } - function isBlobLike(object) { - return Blob2 && object instanceof Blob2 || object && typeof object === "object" && (typeof object.stream === "function" || typeof object.arrayBuffer === "function") && /^(Blob|File)$/.test(object[Symbol.toStringTag]); + function consumePush(consume2, chunk) { + consume2.length += chunk.length; + consume2.body.push(chunk); } - function buildURL(url, queryParams) { - if (url.includes("?") || url.includes("#")) { - throw new Error('Query params cannot be passed when url already contains "?" or "#".'); + function consumeFinish(consume2, err) { + if (consume2.body === null) { + return; } - const stringified = stringify(queryParams); - if (stringified) { - url += "?" + stringified; + if (err) { + consume2.reject(err); + } else { + consume2.resolve(); } - return url; + consume2.type = null; + consume2.stream = null; + consume2.resolve = null; + consume2.reject = null; + consume2.length = 0; + consume2.body = null; } - function parseURL(url) { - if (typeof url === "string") { - url = new URL(url); - if (!/^https?:/.test(url.origin || url.protocol)) { - throw new InvalidArgumentError("Invalid URL protocol: the URL must start with `http:` or `https:`."); + module2.exports = { Readable: BodyReadable, chunksDecode }; + } +}); + +// .yarn/cache/undici-npm-6.13.0-1545cd855e-b1b0456e7d.zip/node_modules/undici/lib/api/util.js +var require_util2 = __commonJS({ + ".yarn/cache/undici-npm-6.13.0-1545cd855e-b1b0456e7d.zip/node_modules/undici/lib/api/util.js"(exports, module2) { + var assert3 = require("node:assert"); + var { + ResponseStatusCodeError + } = require_errors(); + var { chunksDecode } = require_readable(); + var CHUNK_LIMIT = 128 * 1024; + async function getResolveErrorBodyCallback({ callback, body, contentType, statusCode, statusMessage, headers }) { + assert3(body); + let chunks = []; + let length = 0; + for await (const chunk of body) { + chunks.push(chunk); + length += chunk.length; + if (length > CHUNK_LIMIT) { + chunks = null; + break; } - return url; } - if (!url || typeof url !== "object") { - throw new InvalidArgumentError("Invalid URL: The URL argument must be a non-null object."); - } - if (!/^https?:/.test(url.origin || url.protocol)) { - throw new InvalidArgumentError("Invalid URL protocol: the URL must start with `http:` or `https:`."); + const message = `Response status code ${statusCode}${statusMessage ? `: ${statusMessage}` : ""}`; + if (statusCode === 204 || !contentType || !chunks) { + queueMicrotask(() => callback(new ResponseStatusCodeError(message, statusCode, headers))); + return; } - if (!(url instanceof URL)) { - if (url.port != null && url.port !== "" && !Number.isFinite(parseInt(url.port))) { - throw new InvalidArgumentError("Invalid URL: port must be a valid integer or a string representation of an integer."); - } - if (url.path != null && typeof url.path !== "string") { - throw new InvalidArgumentError("Invalid URL path: the path must be a string or null/undefined."); - } - if (url.pathname != null && typeof url.pathname !== "string") { - throw new InvalidArgumentError("Invalid URL pathname: the pathname must be a string or null/undefined."); - } - if (url.hostname != null && typeof url.hostname !== "string") { - throw new InvalidArgumentError("Invalid URL hostname: the hostname must be a string or null/undefined."); - } - if (url.origin != null && typeof url.origin !== "string") { - throw new InvalidArgumentError("Invalid URL origin: the origin must be a string or null/undefined."); - } - const port = url.port != null ? url.port : url.protocol === "https:" ? 443 : 80; - let origin = url.origin != null ? url.origin : `${url.protocol}//${url.hostname}:${port}`; - let path10 = url.path != null ? url.path : `${url.pathname || ""}${url.search || ""}`; - if (origin.endsWith("/")) { - origin = origin.substring(0, origin.length - 1); - } - if (path10 && !path10.startsWith("/")) { - path10 = `/${path10}`; + const stackTraceLimit = Error.stackTraceLimit; + Error.stackTraceLimit = 0; + let payload; + try { + if (isContentTypeApplicationJson(contentType)) { + payload = JSON.parse(chunksDecode(chunks, length)); + } else if (isContentTypeText(contentType)) { + payload = chunksDecode(chunks, length); } - url = new URL(origin + path10); - } - return url; - } - function parseOrigin(url) { - url = parseURL(url); - if (url.pathname !== "/" || url.search || url.hash) { - throw new InvalidArgumentError("invalid url"); + } catch { + } finally { + Error.stackTraceLimit = stackTraceLimit; } - return url; + queueMicrotask(() => callback(new ResponseStatusCodeError(message, statusCode, headers, payload))); } - function getHostname(host) { - if (host[0] === "[") { - const idx2 = host.indexOf("]"); - assert3(idx2 !== -1); - return host.substring(1, idx2); + var isContentTypeApplicationJson = (contentType) => { + return contentType.length > 15 && contentType[11] === "/" && contentType[0] === "a" && contentType[1] === "p" && contentType[2] === "p" && contentType[3] === "l" && contentType[4] === "i" && contentType[5] === "c" && contentType[6] === "a" && contentType[7] === "t" && contentType[8] === "i" && contentType[9] === "o" && contentType[10] === "n" && contentType[12] === "j" && contentType[13] === "s" && contentType[14] === "o" && contentType[15] === "n"; + }; + var isContentTypeText = (contentType) => { + return contentType.length > 4 && contentType[4] === "/" && contentType[0] === "t" && contentType[1] === "e" && contentType[2] === "x" && contentType[3] === "t"; + }; + module2.exports = { + getResolveErrorBodyCallback, + isContentTypeApplicationJson, + isContentTypeText + }; + } +}); + +// .yarn/cache/undici-npm-6.13.0-1545cd855e-b1b0456e7d.zip/node_modules/undici/lib/api/abort-signal.js +var require_abort_signal = __commonJS({ + ".yarn/cache/undici-npm-6.13.0-1545cd855e-b1b0456e7d.zip/node_modules/undici/lib/api/abort-signal.js"(exports, module2) { + var { addAbortListener } = require_util(); + var { RequestAbortedError } = require_errors(); + var kListener = Symbol("kListener"); + var kSignal = Symbol("kSignal"); + function abort(self2) { + if (self2.abort) { + self2.abort(self2[kSignal]?.reason); + } else { + self2.reason = self2[kSignal]?.reason ?? new RequestAbortedError(); } - const idx = host.indexOf(":"); - if (idx === -1) - return host; - return host.substring(0, idx); + removeSignal(self2); } - function getServerName(host) { - if (!host) { - return null; - } - assert3.strictEqual(typeof host, "string"); - const servername = getHostname(host); - if (net.isIP(servername)) { - return ""; + function addSignal(self2, signal) { + self2.reason = null; + self2[kSignal] = null; + self2[kListener] = null; + if (!signal) { + return; } - return servername; - } - function deepClone(obj) { - return JSON.parse(JSON.stringify(obj)); - } - function isAsyncIterable(obj) { - return !!(obj != null && typeof obj[Symbol.asyncIterator] === "function"); - } - function isIterable(obj) { - return !!(obj != null && (typeof obj[Symbol.iterator] === "function" || typeof obj[Symbol.asyncIterator] === "function")); - } - function bodyLength(body) { - if (body == null) { - return 0; - } else if (isStream(body)) { - const state = body._readableState; - return state && state.objectMode === false && state.ended === true && Number.isFinite(state.length) ? state.length : null; - } else if (isBlobLike(body)) { - return body.size != null ? body.size : null; - } else if (isBuffer(body)) { - return body.byteLength; + if (signal.aborted) { + abort(self2); + return; } - return null; - } - function isDestroyed(stream2) { - return !stream2 || !!(stream2.destroyed || stream2[kDestroyed]); - } - function isReadableAborted(stream2) { - const state = stream2?._readableState; - return isDestroyed(stream2) && state && !state.endEmitted; + self2[kSignal] = signal; + self2[kListener] = () => { + abort(self2); + }; + addAbortListener(self2[kSignal], self2[kListener]); } - function destroy(stream2, err) { - if (stream2 == null || !isStream(stream2) || isDestroyed(stream2)) { + function removeSignal(self2) { + if (!self2[kSignal]) { return; } - if (typeof stream2.destroy === "function") { - if (Object.getPrototypeOf(stream2).constructor === IncomingMessage) { - stream2.socket = null; + if ("removeEventListener" in self2[kSignal]) { + self2[kSignal].removeEventListener("abort", self2[kListener]); + } else { + self2[kSignal].removeListener("abort", self2[kListener]); + } + self2[kSignal] = null; + self2[kListener] = null; + } + module2.exports = { + addSignal, + removeSignal + }; + } +}); + +// .yarn/cache/undici-npm-6.13.0-1545cd855e-b1b0456e7d.zip/node_modules/undici/lib/api/api-request.js +var require_api_request = __commonJS({ + ".yarn/cache/undici-npm-6.13.0-1545cd855e-b1b0456e7d.zip/node_modules/undici/lib/api/api-request.js"(exports, module2) { + "use strict"; + var assert3 = require("node:assert"); + var { Readable: Readable2 } = require_readable(); + var { InvalidArgumentError } = require_errors(); + var util = require_util(); + var { getResolveErrorBodyCallback } = require_util2(); + var { AsyncResource } = require("node:async_hooks"); + var { addSignal, removeSignal } = require_abort_signal(); + var RequestHandler = class extends AsyncResource { + constructor(opts, callback) { + if (!opts || typeof opts !== "object") { + throw new InvalidArgumentError("invalid opts"); + } + const { signal, method, opaque, body, onInfo, responseHeaders, throwOnError, highWaterMark } = opts; + try { + if (typeof callback !== "function") { + throw new InvalidArgumentError("invalid callback"); + } + if (highWaterMark && (typeof highWaterMark !== "number" || highWaterMark < 0)) { + throw new InvalidArgumentError("invalid highWaterMark"); + } + if (signal && typeof signal.on !== "function" && typeof signal.addEventListener !== "function") { + throw new InvalidArgumentError("signal must be an EventEmitter or EventTarget"); + } + if (method === "CONNECT") { + throw new InvalidArgumentError("invalid method"); + } + if (onInfo && typeof onInfo !== "function") { + throw new InvalidArgumentError("invalid onInfo callback"); + } + super("UNDICI_REQUEST"); + } catch (err) { + if (util.isStream(body)) { + util.destroy(body.on("error", util.nop), err); + } + throw err; } - stream2.destroy(err); - } else if (err) { - process.nextTick((stream3, err2) => { - stream3.emit("error", err2); - }, stream2, err); + this.responseHeaders = responseHeaders || null; + this.opaque = opaque || null; + this.callback = callback; + this.res = null; + this.abort = null; + this.body = body; + this.trailers = {}; + this.context = null; + this.onInfo = onInfo || null; + this.throwOnError = throwOnError; + this.highWaterMark = highWaterMark; + if (util.isStream(body)) { + body.on("error", (err) => { + this.onError(err); + }); + } + addSignal(this, signal); } - if (stream2.destroyed !== true) { - stream2[kDestroyed] = true; + onConnect(abort, context) { + if (this.reason) { + abort(this.reason); + return; + } + assert3(this.callback); + this.abort = abort; + this.context = context; } - } - var KEEPALIVE_TIMEOUT_EXPR = /timeout=(\d+)/; - function parseKeepAliveTimeout(val) { - const m = val.toString().match(KEEPALIVE_TIMEOUT_EXPR); - return m ? parseInt(m[1], 10) * 1e3 : null; - } - function headerNameToString(value) { - return typeof value === "string" ? headerNameLowerCasedRecord[value] ?? value.toLowerCase() : tree.lookup(value) ?? value.toString("latin1").toLowerCase(); - } - function bufferToLowerCasedHeaderName(value) { - return tree.lookup(value) ?? value.toString("latin1").toLowerCase(); - } - function parseHeaders(headers, obj) { - if (!Array.isArray(headers)) - return headers; - if (obj === void 0) - obj = {}; - for (let i = 0; i < headers.length; i += 2) { - const key = headerNameToString(headers[i]); - let val = obj[key]; - if (val) { - if (typeof val === "string") { - val = [val]; - obj[key] = val; + onHeaders(statusCode, rawHeaders, resume, statusMessage) { + const { callback, opaque, abort, context, responseHeaders, highWaterMark } = this; + const headers = responseHeaders === "raw" ? util.parseRawHeaders(rawHeaders) : util.parseHeaders(rawHeaders); + if (statusCode < 200) { + if (this.onInfo) { + this.onInfo({ statusCode, headers }); } - val.push(headers[i + 1].toString("utf8")); - } else { - const headersValue = headers[i + 1]; - if (typeof headersValue === "string") { - obj[key] = headersValue; + return; + } + const parsedHeaders = responseHeaders === "raw" ? util.parseHeaders(rawHeaders) : headers; + const contentType = parsedHeaders["content-type"]; + const contentLength = parsedHeaders["content-length"]; + const body = new Readable2({ resume, abort, contentType, contentLength, highWaterMark }); + this.callback = null; + this.res = body; + if (callback !== null) { + if (this.throwOnError && statusCode >= 400) { + this.runInAsyncScope( + getResolveErrorBodyCallback, + null, + { callback, body, contentType, statusCode, statusMessage, headers } + ); } else { - obj[key] = Array.isArray(headersValue) ? headersValue.map((x) => x.toString("utf8")) : headersValue.toString("utf8"); + this.runInAsyncScope(callback, null, null, { + statusCode, + headers, + trailers: this.trailers, + opaque, + body, + context + }); } } } - if ("content-length" in obj && "content-disposition" in obj) { - obj["content-disposition"] = Buffer.from(obj["content-disposition"]).toString("latin1"); + onData(chunk) { + const { res } = this; + return res.push(chunk); } - return obj; - } - function parseRawHeaders(headers) { - const ret = []; - let hasContentLength = false; - let contentDispositionIdx = -1; - for (let n = 0; n < headers.length; n += 2) { - const key = headers[n + 0].toString(); - const val = headers[n + 1].toString("utf8"); - if (key.length === 14 && (key === "content-length" || key.toLowerCase() === "content-length")) { - ret.push(key, val); - hasContentLength = true; - } else if (key.length === 19 && (key === "content-disposition" || key.toLowerCase() === "content-disposition")) { - contentDispositionIdx = ret.push(key, val) - 1; - } else { - ret.push(key, val); + onComplete(trailers) { + const { res } = this; + removeSignal(this); + util.parseHeaders(trailers, this.trailers); + res.push(null); + } + onError(err) { + const { res, callback, body, opaque } = this; + removeSignal(this); + if (callback) { + this.callback = null; + queueMicrotask(() => { + this.runInAsyncScope(callback, null, err, { opaque }); + }); + } + if (res) { + this.res = null; + queueMicrotask(() => { + util.destroy(res, err); + }); + } + if (body) { + this.body = null; + util.destroy(body, err); } } - if (hasContentLength && contentDispositionIdx !== -1) { - ret[contentDispositionIdx] = Buffer.from(ret[contentDispositionIdx]).toString("latin1"); + }; + function request(opts, callback) { + if (callback === void 0) { + return new Promise((resolve, reject) => { + request.call(this, opts, (err, data) => { + return err ? reject(err) : resolve(data); + }); + }); + } + try { + this.dispatch(opts, new RequestHandler(opts, callback)); + } catch (err) { + if (typeof callback !== "function") { + throw err; + } + const opaque = opts?.opaque; + queueMicrotask(() => callback(err, { opaque })); } - return ret; } - function isBuffer(buffer) { - return buffer instanceof Uint8Array || Buffer.isBuffer(buffer); - } - function validateHandler(handler, method, upgrade) { - if (!handler || typeof handler !== "object") { - throw new InvalidArgumentError("handler must be an object"); + module2.exports = request; + module2.exports.RequestHandler = RequestHandler; + } +}); + +// .yarn/cache/undici-npm-6.13.0-1545cd855e-b1b0456e7d.zip/node_modules/undici/lib/api/api-stream.js +var require_api_stream = __commonJS({ + ".yarn/cache/undici-npm-6.13.0-1545cd855e-b1b0456e7d.zip/node_modules/undici/lib/api/api-stream.js"(exports, module2) { + "use strict"; + var assert3 = require("node:assert"); + var { finished, PassThrough } = require("node:stream"); + var { InvalidArgumentError, InvalidReturnValueError } = require_errors(); + var util = require_util(); + var { getResolveErrorBodyCallback } = require_util2(); + var { AsyncResource } = require("node:async_hooks"); + var { addSignal, removeSignal } = require_abort_signal(); + var StreamHandler = class extends AsyncResource { + constructor(opts, factory, callback) { + if (!opts || typeof opts !== "object") { + throw new InvalidArgumentError("invalid opts"); + } + const { signal, method, opaque, body, onInfo, responseHeaders, throwOnError } = opts; + try { + if (typeof callback !== "function") { + throw new InvalidArgumentError("invalid callback"); + } + if (typeof factory !== "function") { + throw new InvalidArgumentError("invalid factory"); + } + if (signal && typeof signal.on !== "function" && typeof signal.addEventListener !== "function") { + throw new InvalidArgumentError("signal must be an EventEmitter or EventTarget"); + } + if (method === "CONNECT") { + throw new InvalidArgumentError("invalid method"); + } + if (onInfo && typeof onInfo !== "function") { + throw new InvalidArgumentError("invalid onInfo callback"); + } + super("UNDICI_STREAM"); + } catch (err) { + if (util.isStream(body)) { + util.destroy(body.on("error", util.nop), err); + } + throw err; + } + this.responseHeaders = responseHeaders || null; + this.opaque = opaque || null; + this.factory = factory; + this.callback = callback; + this.res = null; + this.abort = null; + this.context = null; + this.trailers = null; + this.body = body; + this.onInfo = onInfo || null; + this.throwOnError = throwOnError || false; + if (util.isStream(body)) { + body.on("error", (err) => { + this.onError(err); + }); + } + addSignal(this, signal); } - if (typeof handler.onConnect !== "function") { - throw new InvalidArgumentError("invalid onConnect method"); + onConnect(abort, context) { + if (this.reason) { + abort(this.reason); + return; + } + assert3(this.callback); + this.abort = abort; + this.context = context; } - if (typeof handler.onError !== "function") { - throw new InvalidArgumentError("invalid onError method"); + onHeaders(statusCode, rawHeaders, resume, statusMessage) { + const { factory, opaque, context, callback, responseHeaders } = this; + const headers = responseHeaders === "raw" ? util.parseRawHeaders(rawHeaders) : util.parseHeaders(rawHeaders); + if (statusCode < 200) { + if (this.onInfo) { + this.onInfo({ statusCode, headers }); + } + return; + } + this.factory = null; + let res; + if (this.throwOnError && statusCode >= 400) { + const parsedHeaders = responseHeaders === "raw" ? util.parseHeaders(rawHeaders) : headers; + const contentType = parsedHeaders["content-type"]; + res = new PassThrough(); + this.callback = null; + this.runInAsyncScope( + getResolveErrorBodyCallback, + null, + { callback, body: res, contentType, statusCode, statusMessage, headers } + ); + } else { + if (factory === null) { + return; + } + res = this.runInAsyncScope(factory, null, { + statusCode, + headers, + opaque, + context + }); + if (!res || typeof res.write !== "function" || typeof res.end !== "function" || typeof res.on !== "function") { + throw new InvalidReturnValueError("expected Writable"); + } + finished(res, { readable: false }, (err) => { + const { callback: callback2, res: res2, opaque: opaque2, trailers, abort } = this; + this.res = null; + if (err || !res2.readable) { + util.destroy(res2, err); + } + this.callback = null; + this.runInAsyncScope(callback2, null, err || null, { opaque: opaque2, trailers }); + if (err) { + abort(); + } + }); + } + res.on("drain", resume); + this.res = res; + const needDrain = res.writableNeedDrain !== void 0 ? res.writableNeedDrain : res._writableState?.needDrain; + return needDrain !== true; } - if (typeof handler.onBodySent !== "function" && handler.onBodySent !== void 0) { - throw new InvalidArgumentError("invalid onBodySent method"); + onData(chunk) { + const { res } = this; + return res ? res.write(chunk) : true; } - if (upgrade || method === "CONNECT") { - if (typeof handler.onUpgrade !== "function") { - throw new InvalidArgumentError("invalid onUpgrade method"); + onComplete(trailers) { + const { res } = this; + removeSignal(this); + if (!res) { + return; } - } else { - if (typeof handler.onHeaders !== "function") { - throw new InvalidArgumentError("invalid onHeaders method"); + this.trailers = util.parseHeaders(trailers); + res.end(); + } + onError(err) { + const { res, callback, opaque, body } = this; + removeSignal(this); + this.factory = null; + if (res) { + this.res = null; + util.destroy(res, err); + } else if (callback) { + this.callback = null; + queueMicrotask(() => { + this.runInAsyncScope(callback, null, err, { opaque }); + }); } - if (typeof handler.onData !== "function") { - throw new InvalidArgumentError("invalid onData method"); + if (body) { + this.body = null; + util.destroy(body, err); } - if (typeof handler.onComplete !== "function") { - throw new InvalidArgumentError("invalid onComplete method"); + } + }; + function stream(opts, factory, callback) { + if (callback === void 0) { + return new Promise((resolve, reject) => { + stream.call(this, opts, factory, (err, data) => { + return err ? reject(err) : resolve(data); + }); + }); + } + try { + this.dispatch(opts, new StreamHandler(opts, factory, callback)); + } catch (err) { + if (typeof callback !== "function") { + throw err; } + const opaque = opts?.opaque; + queueMicrotask(() => callback(err, { opaque })); } } - function isDisturbed(body) { - return !!(body && (stream.isDisturbed(body) || body[kBodyUsed])); - } - function isErrored(body) { - return !!(body && stream.isErrored(body)); - } - function isReadable(body) { - return !!(body && stream.isReadable(body)); - } - function getSocketInfo(socket) { - return { - localAddress: socket.localAddress, - localPort: socket.localPort, - remoteAddress: socket.remoteAddress, - remotePort: socket.remotePort, - remoteFamily: socket.remoteFamily, - timeout: socket.timeout, - bytesWritten: socket.bytesWritten, - bytesRead: socket.bytesRead - }; - } - function ReadableStreamFrom(iterable) { - let iterator; - return new ReadableStream( - { - async start() { - iterator = iterable[Symbol.asyncIterator](); + module2.exports = stream; + } +}); + +// .yarn/cache/undici-npm-6.13.0-1545cd855e-b1b0456e7d.zip/node_modules/undici/lib/api/api-pipeline.js +var require_api_pipeline = __commonJS({ + ".yarn/cache/undici-npm-6.13.0-1545cd855e-b1b0456e7d.zip/node_modules/undici/lib/api/api-pipeline.js"(exports, module2) { + "use strict"; + var { + Readable: Readable2, + Duplex, + PassThrough + } = require("node:stream"); + var { + InvalidArgumentError, + InvalidReturnValueError, + RequestAbortedError + } = require_errors(); + var util = require_util(); + var { AsyncResource } = require("node:async_hooks"); + var { addSignal, removeSignal } = require_abort_signal(); + var assert3 = require("node:assert"); + var kResume = Symbol("resume"); + var PipelineRequest = class extends Readable2 { + constructor() { + super({ autoDestroy: true }); + this[kResume] = null; + } + _read() { + const { [kResume]: resume } = this; + if (resume) { + this[kResume] = null; + resume(); + } + } + _destroy(err, callback) { + this._read(); + callback(err); + } + }; + var PipelineResponse = class extends Readable2 { + constructor(resume) { + super({ autoDestroy: true }); + this[kResume] = resume; + } + _read() { + this[kResume](); + } + _destroy(err, callback) { + if (!err && !this._readableState.endEmitted) { + err = new RequestAbortedError(); + } + callback(err); + } + }; + var PipelineHandler = class extends AsyncResource { + constructor(opts, handler) { + if (!opts || typeof opts !== "object") { + throw new InvalidArgumentError("invalid opts"); + } + if (typeof handler !== "function") { + throw new InvalidArgumentError("invalid handler"); + } + const { signal, method, opaque, onInfo, responseHeaders } = opts; + if (signal && typeof signal.on !== "function" && typeof signal.addEventListener !== "function") { + throw new InvalidArgumentError("signal must be an EventEmitter or EventTarget"); + } + if (method === "CONNECT") { + throw new InvalidArgumentError("invalid method"); + } + if (onInfo && typeof onInfo !== "function") { + throw new InvalidArgumentError("invalid onInfo callback"); + } + super("UNDICI_PIPELINE"); + this.opaque = opaque || null; + this.responseHeaders = responseHeaders || null; + this.handler = handler; + this.abort = null; + this.context = null; + this.onInfo = onInfo || null; + this.req = new PipelineRequest().on("error", util.nop); + this.ret = new Duplex({ + readableObjectMode: opts.objectMode, + autoDestroy: true, + read: () => { + const { body } = this; + if (body?.resume) { + body.resume(); + } }, - async pull(controller) { - const { done, value } = await iterator.next(); - if (done) { - queueMicrotask(() => { - controller.close(); - controller.byobRequest?.respond(0); - }); + write: (chunk, encoding, callback) => { + const { req } = this; + if (req.push(chunk, encoding) || req._readableState.destroyed) { + callback(); } else { - const buf = Buffer.isBuffer(value) ? value : Buffer.from(value); - if (buf.byteLength) { - controller.enqueue(new Uint8Array(buf)); - } + req[kResume] = callback; } - return controller.desiredSize > 0; - }, - async cancel(reason) { - await iterator.return(); }, - type: "bytes" + destroy: (err, callback) => { + const { body, req, res, ret, abort } = this; + if (!err && !ret._readableState.endEmitted) { + err = new RequestAbortedError(); + } + if (abort && err) { + abort(); + } + util.destroy(body, err); + util.destroy(req, err); + util.destroy(res, err); + removeSignal(this); + callback(err); + } + }).on("prefinish", () => { + const { req } = this; + req.push(null); + }); + this.res = null; + addSignal(this, signal); + } + onConnect(abort, context) { + const { ret, res } = this; + if (this.reason) { + abort(this.reason); + return; } - ); - } - function isFormDataLike(object) { - return object && typeof object === "object" && typeof object.append === "function" && typeof object.delete === "function" && typeof object.get === "function" && typeof object.getAll === "function" && typeof object.has === "function" && typeof object.set === "function" && object[Symbol.toStringTag] === "FormData"; - } - function addAbortListener(signal, listener) { - if ("addEventListener" in signal) { - signal.addEventListener("abort", listener, { once: true }); - return () => signal.removeEventListener("abort", listener); + assert3(!res, "pipeline cannot be retried"); + assert3(!ret.destroyed); + this.abort = abort; + this.context = context; } - signal.addListener("abort", listener); - return () => signal.removeListener("abort", listener); - } - var hasToWellFormed = !!String.prototype.toWellFormed; - function toUSVString(val) { - if (hasToWellFormed) { - return `${val}`.toWellFormed(); - } else if (nodeUtil.toUSVString) { - return nodeUtil.toUSVString(val); + onHeaders(statusCode, rawHeaders, resume) { + const { opaque, handler, context } = this; + if (statusCode < 200) { + if (this.onInfo) { + const headers = this.responseHeaders === "raw" ? util.parseRawHeaders(rawHeaders) : util.parseHeaders(rawHeaders); + this.onInfo({ statusCode, headers }); + } + return; + } + this.res = new PipelineResponse(resume); + let body; + try { + this.handler = null; + const headers = this.responseHeaders === "raw" ? util.parseRawHeaders(rawHeaders) : util.parseHeaders(rawHeaders); + body = this.runInAsyncScope(handler, null, { + statusCode, + headers, + opaque, + body: this.res, + context + }); + } catch (err) { + this.res.on("error", util.nop); + throw err; + } + if (!body || typeof body.on !== "function") { + throw new InvalidReturnValueError("expected Readable"); + } + body.on("data", (chunk) => { + const { ret, body: body2 } = this; + if (!ret.push(chunk) && body2.pause) { + body2.pause(); + } + }).on("error", (err) => { + const { ret } = this; + util.destroy(ret, err); + }).on("end", () => { + const { ret } = this; + ret.push(null); + }).on("close", () => { + const { ret } = this; + if (!ret._readableState.ended) { + util.destroy(ret, new RequestAbortedError()); + } + }); + this.body = body; } - return `${val}`; - } - function isTokenCharCode(c) { - switch (c) { - case 34: - case 40: - case 41: - case 44: - case 47: - case 58: - case 59: - case 60: - case 61: - case 62: - case 63: - case 64: - case 91: - case 92: - case 93: - case 123: - case 125: - return false; - default: - return c >= 33 && c <= 126; + onData(chunk) { + const { res } = this; + return res.push(chunk); + } + onComplete(trailers) { + const { res } = this; + res.push(null); + } + onError(err) { + const { ret } = this; + this.handler = null; + util.destroy(ret, err); + } + }; + function pipeline(opts, handler) { + try { + const pipelineHandler = new PipelineHandler(opts, handler); + this.dispatch({ ...opts, body: pipelineHandler.req }, pipelineHandler); + return pipelineHandler.ret; + } catch (err) { + return new PassThrough().destroy(err); } } - function isValidHTTPToken(characters) { - if (characters.length === 0) { - return false; + module2.exports = pipeline; + } +}); + +// .yarn/cache/undici-npm-6.13.0-1545cd855e-b1b0456e7d.zip/node_modules/undici/lib/api/api-upgrade.js +var require_api_upgrade = __commonJS({ + ".yarn/cache/undici-npm-6.13.0-1545cd855e-b1b0456e7d.zip/node_modules/undici/lib/api/api-upgrade.js"(exports, module2) { + "use strict"; + var { InvalidArgumentError, SocketError } = require_errors(); + var { AsyncResource } = require("node:async_hooks"); + var util = require_util(); + var { addSignal, removeSignal } = require_abort_signal(); + var assert3 = require("node:assert"); + var UpgradeHandler = class extends AsyncResource { + constructor(opts, callback) { + if (!opts || typeof opts !== "object") { + throw new InvalidArgumentError("invalid opts"); + } + if (typeof callback !== "function") { + throw new InvalidArgumentError("invalid callback"); + } + const { signal, opaque, responseHeaders } = opts; + if (signal && typeof signal.on !== "function" && typeof signal.addEventListener !== "function") { + throw new InvalidArgumentError("signal must be an EventEmitter or EventTarget"); + } + super("UNDICI_UPGRADE"); + this.responseHeaders = responseHeaders || null; + this.opaque = opaque || null; + this.callback = callback; + this.abort = null; + this.context = null; + addSignal(this, signal); } - for (let i = 0; i < characters.length; ++i) { - if (!isTokenCharCode(characters.charCodeAt(i))) { - return false; + onConnect(abort, context) { + if (this.reason) { + abort(this.reason); + return; + } + assert3(this.callback); + this.abort = abort; + this.context = null; + } + onHeaders() { + throw new SocketError("bad upgrade", null); + } + onUpgrade(statusCode, rawHeaders, socket) { + const { callback, opaque, context } = this; + assert3.strictEqual(statusCode, 101); + removeSignal(this); + this.callback = null; + const headers = this.responseHeaders === "raw" ? util.parseRawHeaders(rawHeaders) : util.parseHeaders(rawHeaders); + this.runInAsyncScope(callback, null, null, { + headers, + socket, + opaque, + context + }); + } + onError(err) { + const { callback, opaque } = this; + removeSignal(this); + if (callback) { + this.callback = null; + queueMicrotask(() => { + this.runInAsyncScope(callback, null, err, { opaque }); + }); } } - return true; + }; + function upgrade(opts, callback) { + if (callback === void 0) { + return new Promise((resolve, reject) => { + upgrade.call(this, opts, (err, data) => { + return err ? reject(err) : resolve(data); + }); + }); + } + try { + const upgradeHandler = new UpgradeHandler(opts, callback); + this.dispatch({ + ...opts, + method: opts.method || "GET", + upgrade: opts.protocol || "Websocket" + }, upgradeHandler); + } catch (err) { + if (typeof callback !== "function") { + throw err; + } + const opaque = opts?.opaque; + queueMicrotask(() => callback(err, { opaque })); + } } - function parseRangeHeader(range) { - if (range == null || range === "") - return { start: 0, end: null, size: null }; - const m = range ? range.match(/^bytes (\d+)-(\d+)\/(\d+)?$/) : null; - return m ? { - start: parseInt(m[1]), - end: m[2] ? parseInt(m[2]) : null, - size: m[3] ? parseInt(m[3]) : null - } : null; - } - var kEnumerableProperty = /* @__PURE__ */ Object.create(null); - kEnumerableProperty.enumerable = true; - module2.exports = { - kEnumerableProperty, - nop, - isDisturbed, - isErrored, - isReadable, - toUSVString, - isReadableAborted, - isBlobLike, - parseOrigin, - parseURL, - getServerName, - isStream, - isIterable, - isAsyncIterable, - isDestroyed, - headerNameToString, - bufferToLowerCasedHeaderName, - parseRawHeaders, - parseHeaders, - parseKeepAliveTimeout, - destroy, - bodyLength, - deepClone, - ReadableStreamFrom, - isBuffer, - validateHandler, - getSocketInfo, - isFormDataLike, - buildURL, - addAbortListener, - isValidHTTPToken, - isTokenCharCode, - parseRangeHeader, - nodeMajor, - nodeMinor, - nodeHasAutoSelectFamily: nodeMajor > 18 || nodeMajor === 18 && nodeMinor >= 13, - safeHTTPMethods: ["GET", "HEAD", "OPTIONS", "TRACE"] - }; + module2.exports = upgrade; } }); -// .yarn/cache/undici-npm-6.6.2-a0bd6785a6-c8c8a43605.zip/node_modules/undici/lib/core/diagnostics.js -var require_diagnostics = __commonJS({ - ".yarn/cache/undici-npm-6.6.2-a0bd6785a6-c8c8a43605.zip/node_modules/undici/lib/core/diagnostics.js"(exports, module2) { +// .yarn/cache/undici-npm-6.13.0-1545cd855e-b1b0456e7d.zip/node_modules/undici/lib/api/api-connect.js +var require_api_connect = __commonJS({ + ".yarn/cache/undici-npm-6.13.0-1545cd855e-b1b0456e7d.zip/node_modules/undici/lib/api/api-connect.js"(exports, module2) { "use strict"; - var diagnosticsChannel = require("node:diagnostics_channel"); - var util = require("node:util"); - var undiciDebugLog = util.debuglog("undici"); - var fetchDebuglog = util.debuglog("fetch"); - var websocketDebuglog = util.debuglog("websocket"); - var isClientSet = false; - var channels = { - // Client - beforeConnect: diagnosticsChannel.channel("undici:client:beforeConnect"), - connected: diagnosticsChannel.channel("undici:client:connected"), - connectError: diagnosticsChannel.channel("undici:client:connectError"), - sendHeaders: diagnosticsChannel.channel("undici:client:sendHeaders"), - // Request - create: diagnosticsChannel.channel("undici:request:create"), - bodySent: diagnosticsChannel.channel("undici:request:bodySent"), - headers: diagnosticsChannel.channel("undici:request:headers"), - trailers: diagnosticsChannel.channel("undici:request:trailers"), - error: diagnosticsChannel.channel("undici:request:error"), - // WebSocket - open: diagnosticsChannel.channel("undici:websocket:open"), - close: diagnosticsChannel.channel("undici:websocket:close"), - socketError: diagnosticsChannel.channel("undici:websocket:socket_error"), - ping: diagnosticsChannel.channel("undici:websocket:ping"), - pong: diagnosticsChannel.channel("undici:websocket:pong") - }; - if (undiciDebugLog.enabled || fetchDebuglog.enabled) { - const debuglog = fetchDebuglog.enabled ? fetchDebuglog : undiciDebugLog; - diagnosticsChannel.channel("undici:client:beforeConnect").subscribe((evt) => { - const { - connectParams: { version: version2, protocol, port, host } - } = evt; - debuglog( - "connecting to %s using %s%s", - `${host}${port ? `:${port}` : ""}`, - protocol, - version2 - ); - }); - diagnosticsChannel.channel("undici:client:connected").subscribe((evt) => { - const { - connectParams: { version: version2, protocol, port, host } - } = evt; - debuglog( - "connected to %s using %s%s", - `${host}${port ? `:${port}` : ""}`, - protocol, - version2 - ); - }); - diagnosticsChannel.channel("undici:client:connectError").subscribe((evt) => { - const { - connectParams: { version: version2, protocol, port, host }, - error - } = evt; - debuglog( - "connection to %s using %s%s errored - %s", - `${host}${port ? `:${port}` : ""}`, - protocol, - version2, - error.message - ); - }); - diagnosticsChannel.channel("undici:client:sendHeaders").subscribe((evt) => { - const { - request: { method, path: path10, origin } - } = evt; - debuglog("sending request to %s %s/%s", method, origin, path10); - }); - diagnosticsChannel.channel("undici:request:headers").subscribe((evt) => { - const { - request: { method, path: path10, origin }, - response: { statusCode } - } = evt; - debuglog( - "received response to %s %s/%s - HTTP %d", - method, - origin, - path10, - statusCode - ); - }); - diagnosticsChannel.channel("undici:request:trailers").subscribe((evt) => { - const { - request: { method, path: path10, origin } - } = evt; - debuglog("trailers received from %s %s/%s", method, origin, path10); - }); - diagnosticsChannel.channel("undici:request:error").subscribe((evt) => { - const { - request: { method, path: path10, origin }, - error - } = evt; - debuglog( - "request to %s %s/%s errored - %s", - method, - origin, - path10, - error.message - ); - }); - isClientSet = true; - } - if (websocketDebuglog.enabled) { - if (!isClientSet) { - const debuglog = undiciDebugLog.enabled ? undiciDebugLog : websocketDebuglog; - diagnosticsChannel.channel("undici:client:beforeConnect").subscribe((evt) => { - const { - connectParams: { version: version2, protocol, port, host } - } = evt; - debuglog( - "connecting to %s%s using %s%s", - host, - port ? `:${port}` : "", - protocol, - version2 - ); - }); - diagnosticsChannel.channel("undici:client:connected").subscribe((evt) => { - const { - connectParams: { version: version2, protocol, port, host } - } = evt; - debuglog( - "connected to %s%s using %s%s", - host, - port ? `:${port}` : "", - protocol, - version2 - ); - }); - diagnosticsChannel.channel("undici:client:connectError").subscribe((evt) => { - const { - connectParams: { version: version2, protocol, port, host }, - error - } = evt; - debuglog( - "connection to %s%s using %s%s errored - %s", - host, - port ? `:${port}` : "", - protocol, - version2, - error.message - ); + var assert3 = require("node:assert"); + var { AsyncResource } = require("node:async_hooks"); + var { InvalidArgumentError, SocketError } = require_errors(); + var util = require_util(); + var { addSignal, removeSignal } = require_abort_signal(); + var ConnectHandler = class extends AsyncResource { + constructor(opts, callback) { + if (!opts || typeof opts !== "object") { + throw new InvalidArgumentError("invalid opts"); + } + if (typeof callback !== "function") { + throw new InvalidArgumentError("invalid callback"); + } + const { signal, opaque, responseHeaders } = opts; + if (signal && typeof signal.on !== "function" && typeof signal.addEventListener !== "function") { + throw new InvalidArgumentError("signal must be an EventEmitter or EventTarget"); + } + super("UNDICI_CONNECT"); + this.opaque = opaque || null; + this.responseHeaders = responseHeaders || null; + this.callback = callback; + this.abort = null; + addSignal(this, signal); + } + onConnect(abort, context) { + if (this.reason) { + abort(this.reason); + return; + } + assert3(this.callback); + this.abort = abort; + this.context = context; + } + onHeaders() { + throw new SocketError("bad connect", null); + } + onUpgrade(statusCode, rawHeaders, socket) { + const { callback, opaque, context } = this; + removeSignal(this); + this.callback = null; + let headers = rawHeaders; + if (headers != null) { + headers = this.responseHeaders === "raw" ? util.parseRawHeaders(rawHeaders) : util.parseHeaders(rawHeaders); + } + this.runInAsyncScope(callback, null, null, { + statusCode, + headers, + socket, + opaque, + context }); - diagnosticsChannel.channel("undici:client:sendHeaders").subscribe((evt) => { - const { - request: { method, path: path10, origin } - } = evt; - debuglog("sending request to %s %s/%s", method, origin, path10); + } + onError(err) { + const { callback, opaque } = this; + removeSignal(this); + if (callback) { + this.callback = null; + queueMicrotask(() => { + this.runInAsyncScope(callback, null, err, { opaque }); + }); + } + } + }; + function connect(opts, callback) { + if (callback === void 0) { + return new Promise((resolve, reject) => { + connect.call(this, opts, (err, data) => { + return err ? reject(err) : resolve(data); + }); }); } - diagnosticsChannel.channel("undici:websocket:open").subscribe((evt) => { - const { - address: { address, port } - } = evt; - websocketDebuglog("connection opened %s%s", address, port ? `:${port}` : ""); - }); - diagnosticsChannel.channel("undici:websocket:close").subscribe((evt) => { - const { websocket, code, reason } = evt; - websocketDebuglog( - "closed connection to %s - %s %s", - websocket.url, - code, - reason - ); - }); - diagnosticsChannel.channel("undici:websocket:socket_error").subscribe((err) => { - websocketDebuglog("connection errored - %s", err.message); - }); - diagnosticsChannel.channel("undici:websocket:ping").subscribe((evt) => { - websocketDebuglog("ping received"); - }); - diagnosticsChannel.channel("undici:websocket:pong").subscribe((evt) => { - websocketDebuglog("pong received"); - }); + try { + const connectHandler = new ConnectHandler(opts, callback); + this.dispatch({ ...opts, method: "CONNECT" }, connectHandler); + } catch (err) { + if (typeof callback !== "function") { + throw err; + } + const opaque = opts?.opaque; + queueMicrotask(() => callback(err, { opaque })); + } } - module2.exports = { - channels - }; + module2.exports = connect; } }); -// .yarn/cache/undici-npm-6.6.2-a0bd6785a6-c8c8a43605.zip/node_modules/undici/lib/timers.js -var require_timers = __commonJS({ - ".yarn/cache/undici-npm-6.6.2-a0bd6785a6-c8c8a43605.zip/node_modules/undici/lib/timers.js"(exports, module2) { +// .yarn/cache/undici-npm-6.13.0-1545cd855e-b1b0456e7d.zip/node_modules/undici/lib/api/index.js +var require_api = __commonJS({ + ".yarn/cache/undici-npm-6.13.0-1545cd855e-b1b0456e7d.zip/node_modules/undici/lib/api/index.js"(exports, module2) { "use strict"; - var fastNow = Date.now(); - var fastNowTimeout; - var fastTimers = []; - function onTimeout() { - fastNow = Date.now(); - let len = fastTimers.length; - let idx = 0; - while (idx < len) { - const timer = fastTimers[idx]; - if (timer.state === 0) { - timer.state = fastNow + timer.delay; - } else if (timer.state > 0 && fastNow >= timer.state) { - timer.state = -1; - timer.callback(timer.opaque); - } - if (timer.state === -1) { - timer.state = -2; - if (idx !== len - 1) { - fastTimers[idx] = fastTimers.pop(); - } else { - fastTimers.pop(); - } - len -= 1; - } else { - idx += 1; - } - } - if (fastTimers.length > 0) { - refreshTimeout(); - } - } - function refreshTimeout() { - if (fastNowTimeout?.refresh) { - fastNowTimeout.refresh(); - } else { - clearTimeout(fastNowTimeout); - fastNowTimeout = setTimeout(onTimeout, 1e3); - if (fastNowTimeout.unref) { - fastNowTimeout.unref(); - } - } - } - var Timeout = class { - constructor(callback, delay, opaque) { - this.callback = callback; - this.delay = delay; - this.opaque = opaque; - this.state = -2; - this.refresh(); - } - refresh() { - if (this.state === -2) { - fastTimers.push(this); - if (!fastNowTimeout || fastTimers.length === 1) { - refreshTimeout(); - } - } - this.state = 0; - } - clear() { - this.state = -1; - } - }; - module2.exports = { - setTimeout(callback, delay, opaque) { - return delay < 1e3 ? setTimeout(callback, delay, opaque) : new Timeout(callback, delay, opaque); - }, - clearTimeout(timeout) { - if (timeout instanceof Timeout) { - timeout.clear(); - } else { - clearTimeout(timeout); - } - } - }; + module2.exports.request = require_api_request(); + module2.exports.stream = require_api_stream(); + module2.exports.pipeline = require_api_pipeline(); + module2.exports.upgrade = require_api_upgrade(); + module2.exports.connect = require_api_connect(); } }); -// .yarn/cache/@fastify-busboy-npm-2.1.0-960844a007-7bb641080a.zip/node_modules/@fastify/busboy/deps/streamsearch/sbmh.js -var require_sbmh = __commonJS({ - ".yarn/cache/@fastify-busboy-npm-2.1.0-960844a007-7bb641080a.zip/node_modules/@fastify/busboy/deps/streamsearch/sbmh.js"(exports, module2) { +// .yarn/cache/undici-npm-6.13.0-1545cd855e-b1b0456e7d.zip/node_modules/undici/lib/dispatcher/dispatcher.js +var require_dispatcher = __commonJS({ + ".yarn/cache/undici-npm-6.13.0-1545cd855e-b1b0456e7d.zip/node_modules/undici/lib/dispatcher/dispatcher.js"(exports, module2) { "use strict"; - var EventEmitter = require("node:events").EventEmitter; - var inherits = require("node:util").inherits; - function SBMH(needle) { - if (typeof needle === "string") { - needle = Buffer.from(needle); - } - if (!Buffer.isBuffer(needle)) { - throw new TypeError("The needle has to be a String or a Buffer."); - } - const needleLength = needle.length; - if (needleLength === 0) { - throw new Error("The needle cannot be an empty String/Buffer."); - } - if (needleLength > 256) { - throw new Error("The needle cannot have a length bigger than 256."); - } - this.maxMatches = Infinity; - this.matches = 0; - this._occ = new Array(256).fill(needleLength); - this._lookbehind_size = 0; - this._needle = needle; - this._bufpos = 0; - this._lookbehind = Buffer.alloc(needleLength); - for (var i = 0; i < needleLength - 1; ++i) { - this._occ[needle[i]] = needleLength - 1 - i; - } - } - inherits(SBMH, EventEmitter); - SBMH.prototype.reset = function() { - this._lookbehind_size = 0; - this.matches = 0; - this._bufpos = 0; - }; - SBMH.prototype.push = function(chunk, pos) { - if (!Buffer.isBuffer(chunk)) { - chunk = Buffer.from(chunk, "binary"); - } - const chlen = chunk.length; - this._bufpos = pos || 0; - let r; - while (r !== chlen && this.matches < this.maxMatches) { - r = this._sbmh_feed(chunk); + var EventEmitter = require("node:events"); + var Dispatcher = class extends EventEmitter { + dispatch() { + throw new Error("not implemented"); } - return r; - }; - SBMH.prototype._sbmh_feed = function(data) { - const len = data.length; - const needle = this._needle; - const needleLength = needle.length; - const lastNeedleChar = needle[needleLength - 1]; - let pos = -this._lookbehind_size; - let ch; - if (pos < 0) { - while (pos < 0 && pos <= len - needleLength) { - ch = this._sbmh_lookup_char(data, pos + needleLength - 1); - if (ch === lastNeedleChar && this._sbmh_memcmp(data, pos, needleLength - 1)) { - this._lookbehind_size = 0; - ++this.matches; - this.emit("info", true); - return this._bufpos = pos + needleLength; - } - pos += this._occ[ch]; - } - if (pos < 0) { - while (pos < 0 && !this._sbmh_memcmp(data, pos, len - pos)) { - ++pos; - } - } - if (pos >= 0) { - this.emit("info", false, this._lookbehind, 0, this._lookbehind_size); - this._lookbehind_size = 0; - } else { - const bytesToCutOff = this._lookbehind_size + pos; - if (bytesToCutOff > 0) { - this.emit("info", false, this._lookbehind, 0, bytesToCutOff); - } - this._lookbehind.copy( - this._lookbehind, - 0, - bytesToCutOff, - this._lookbehind_size - bytesToCutOff - ); - this._lookbehind_size -= bytesToCutOff; - data.copy(this._lookbehind, this._lookbehind_size); - this._lookbehind_size += len; - this._bufpos = len; - return len; - } - } - pos += (pos >= 0) * this._bufpos; - if (data.indexOf(needle, pos) !== -1) { - pos = data.indexOf(needle, pos); - ++this.matches; - if (pos > 0) { - this.emit("info", true, data, this._bufpos, pos); - } else { - this.emit("info", true); + close() { + throw new Error("not implemented"); + } + destroy() { + throw new Error("not implemented"); + } + compose(...args) { + const interceptors = Array.isArray(args[0]) ? args[0] : args; + let dispatch = this.dispatch.bind(this); + for (const interceptor of interceptors) { + if (interceptor == null) { + continue; + } + if (typeof interceptor !== "function") { + throw new TypeError(`invalid interceptor, expected function received ${typeof interceptor}`); + } + dispatch = interceptor(dispatch); + if (dispatch == null || typeof dispatch !== "function" || dispatch.length !== 2) { + throw new TypeError("invalid interceptor"); + } } - return this._bufpos = pos + needleLength; - } else { - pos = len - needleLength; + return new ComposedDispatcher(this, dispatch); } - while (pos < len && (data[pos] !== needle[0] || Buffer.compare( - data.subarray(pos, pos + len - pos), - needle.subarray(0, len - pos) - ) !== 0)) { - ++pos; + }; + var ComposedDispatcher = class extends Dispatcher { + #dispatcher = null; + #dispatch = null; + constructor(dispatcher, dispatch) { + super(); + this.#dispatcher = dispatcher; + this.#dispatch = dispatch; } - if (pos < len) { - data.copy(this._lookbehind, 0, pos, pos + (len - pos)); - this._lookbehind_size = len - pos; + dispatch(...args) { + this.#dispatch(...args); } - if (pos > 0) { - this.emit("info", false, data, this._bufpos, pos < len ? pos : len); + close(...args) { + return this.#dispatcher.close(...args); } - this._bufpos = len; - return len; - }; - SBMH.prototype._sbmh_lookup_char = function(data, pos) { - return pos < 0 ? this._lookbehind[this._lookbehind_size + pos] : data[pos]; - }; - SBMH.prototype._sbmh_memcmp = function(data, pos, len) { - for (var i = 0; i < len; ++i) { - if (this._sbmh_lookup_char(data, pos + i) !== this._needle[i]) { - return false; - } + destroy(...args) { + return this.#dispatcher.destroy(...args); } - return true; }; - module2.exports = SBMH; - } -}); - -// .yarn/cache/@fastify-busboy-npm-2.1.0-960844a007-7bb641080a.zip/node_modules/@fastify/busboy/deps/dicer/lib/PartStream.js -var require_PartStream = __commonJS({ - ".yarn/cache/@fastify-busboy-npm-2.1.0-960844a007-7bb641080a.zip/node_modules/@fastify/busboy/deps/dicer/lib/PartStream.js"(exports, module2) { - "use strict"; - var inherits = require("node:util").inherits; - var ReadableStream2 = require("node:stream").Readable; - function PartStream(opts) { - ReadableStream2.call(this, opts); - } - inherits(PartStream, ReadableStream2); - PartStream.prototype._read = function(n) { - }; - module2.exports = PartStream; + module2.exports = Dispatcher; } }); -// .yarn/cache/@fastify-busboy-npm-2.1.0-960844a007-7bb641080a.zip/node_modules/@fastify/busboy/lib/utils/getLimit.js -var require_getLimit = __commonJS({ - ".yarn/cache/@fastify-busboy-npm-2.1.0-960844a007-7bb641080a.zip/node_modules/@fastify/busboy/lib/utils/getLimit.js"(exports, module2) { +// .yarn/cache/undici-npm-6.13.0-1545cd855e-b1b0456e7d.zip/node_modules/undici/lib/dispatcher/dispatcher-base.js +var require_dispatcher_base = __commonJS({ + ".yarn/cache/undici-npm-6.13.0-1545cd855e-b1b0456e7d.zip/node_modules/undici/lib/dispatcher/dispatcher-base.js"(exports, module2) { "use strict"; - module2.exports = function getLimit(limits, name, defaultLimit) { - if (!limits || limits[name] === void 0 || limits[name] === null) { - return defaultLimit; + var Dispatcher = require_dispatcher(); + var { + ClientDestroyedError, + ClientClosedError, + InvalidArgumentError + } = require_errors(); + var { kDestroy, kClose, kDispatch, kInterceptors } = require_symbols(); + var kDestroyed = Symbol("destroyed"); + var kClosed = Symbol("closed"); + var kOnDestroyed = Symbol("onDestroyed"); + var kOnClosed = Symbol("onClosed"); + var kInterceptedDispatch = Symbol("Intercepted Dispatch"); + var DispatcherBase = class extends Dispatcher { + constructor() { + super(); + this[kDestroyed] = false; + this[kOnDestroyed] = null; + this[kClosed] = false; + this[kOnClosed] = []; } - if (typeof limits[name] !== "number" || isNaN(limits[name])) { - throw new TypeError("Limit " + name + " is not a valid number"); + get destroyed() { + return this[kDestroyed]; } - return limits[name]; - }; - } -}); - -// .yarn/cache/@fastify-busboy-npm-2.1.0-960844a007-7bb641080a.zip/node_modules/@fastify/busboy/deps/dicer/lib/HeaderParser.js -var require_HeaderParser = __commonJS({ - ".yarn/cache/@fastify-busboy-npm-2.1.0-960844a007-7bb641080a.zip/node_modules/@fastify/busboy/deps/dicer/lib/HeaderParser.js"(exports, module2) { - "use strict"; - var EventEmitter = require("node:events").EventEmitter; - var inherits = require("node:util").inherits; - var getLimit = require_getLimit(); - var StreamSearch = require_sbmh(); - var B_DCRLF = Buffer.from("\r\n\r\n"); - var RE_CRLF = /\r\n/g; - var RE_HDR = /^([^:]+):[ \t]?([\x00-\xFF]+)?$/; - function HeaderParser(cfg) { - EventEmitter.call(this); - cfg = cfg || {}; - const self2 = this; - this.nread = 0; - this.maxed = false; - this.npairs = 0; - this.maxHeaderPairs = getLimit(cfg, "maxHeaderPairs", 2e3); - this.maxHeaderSize = getLimit(cfg, "maxHeaderSize", 80 * 1024); - this.buffer = ""; - this.header = {}; - this.finished = false; - this.ss = new StreamSearch(B_DCRLF); - this.ss.on("info", function(isMatch, data, start, end) { - if (data && !self2.maxed) { - if (self2.nread + end - start >= self2.maxHeaderSize) { - end = self2.maxHeaderSize - self2.nread + start; - self2.nread = self2.maxHeaderSize; - self2.maxed = true; - } else { - self2.nread += end - start; - } - self2.buffer += data.toString("binary", start, end); - } - if (isMatch) { - self2._finish(); - } - }); - } - inherits(HeaderParser, EventEmitter); - HeaderParser.prototype.push = function(data) { - const r = this.ss.push(data); - if (this.finished) { - return r; - } - }; - HeaderParser.prototype.reset = function() { - this.finished = false; - this.buffer = ""; - this.header = {}; - this.ss.reset(); - }; - HeaderParser.prototype._finish = function() { - if (this.buffer) { - this._parseHeader(); - } - this.ss.matches = this.ss.maxMatches; - const header = this.header; - this.header = {}; - this.buffer = ""; - this.finished = true; - this.nread = this.npairs = 0; - this.maxed = false; - this.emit("header", header); - }; - HeaderParser.prototype._parseHeader = function() { - if (this.npairs === this.maxHeaderPairs) { - return; + get closed() { + return this[kClosed]; } - const lines = this.buffer.split(RE_CRLF); - const len = lines.length; - let m, h; - for (var i = 0; i < len; ++i) { - if (lines[i].length === 0) { - continue; - } - if (lines[i][0] === " " || lines[i][0] === " ") { - if (h) { - this.header[h][this.header[h].length - 1] += lines[i]; - continue; + get interceptors() { + return this[kInterceptors]; + } + set interceptors(newInterceptors) { + if (newInterceptors) { + for (let i = newInterceptors.length - 1; i >= 0; i--) { + const interceptor = this[kInterceptors][i]; + if (typeof interceptor !== "function") { + throw new InvalidArgumentError("interceptor must be an function"); + } } } - const posColon = lines[i].indexOf(":"); - if (posColon === -1 || posColon === 0) { - return; - } - m = RE_HDR.exec(lines[i]); - h = m[1].toLowerCase(); - this.header[h] = this.header[h] || []; - this.header[h].push(m[2] || ""); - if (++this.npairs === this.maxHeaderPairs) { - break; - } + this[kInterceptors] = newInterceptors; } - }; - module2.exports = HeaderParser; - } -}); - -// .yarn/cache/@fastify-busboy-npm-2.1.0-960844a007-7bb641080a.zip/node_modules/@fastify/busboy/deps/dicer/lib/Dicer.js -var require_Dicer = __commonJS({ - ".yarn/cache/@fastify-busboy-npm-2.1.0-960844a007-7bb641080a.zip/node_modules/@fastify/busboy/deps/dicer/lib/Dicer.js"(exports, module2) { - "use strict"; - var WritableStream = require("node:stream").Writable; - var inherits = require("node:util").inherits; - var StreamSearch = require_sbmh(); - var PartStream = require_PartStream(); - var HeaderParser = require_HeaderParser(); - var DASH = 45; - var B_ONEDASH = Buffer.from("-"); - var B_CRLF = Buffer.from("\r\n"); - var EMPTY_FN = function() { - }; - function Dicer(cfg) { - if (!(this instanceof Dicer)) { - return new Dicer(cfg); - } - WritableStream.call(this, cfg); - if (!cfg || !cfg.headerFirst && typeof cfg.boundary !== "string") { - throw new TypeError("Boundary required"); - } - if (typeof cfg.boundary === "string") { - this.setBoundary(cfg.boundary); - } else { - this._bparser = void 0; - } - this._headerFirst = cfg.headerFirst; - this._dashes = 0; - this._parts = 0; - this._finished = false; - this._realFinish = false; - this._isPreamble = true; - this._justMatched = false; - this._firstWrite = true; - this._inHeader = true; - this._part = void 0; - this._cb = void 0; - this._ignoreData = false; - this._partOpts = { highWaterMark: cfg.partHwm }; - this._pause = false; - const self2 = this; - this._hparser = new HeaderParser(cfg); - this._hparser.on("header", function(header) { - self2._inHeader = false; - self2._part.emit("header", header); - }); - } - inherits(Dicer, WritableStream); - Dicer.prototype.emit = function(ev) { - if (ev === "finish" && !this._realFinish) { - if (!this._finished) { - const self2 = this; - process.nextTick(function() { - self2.emit("error", new Error("Unexpected end of multipart data")); - if (self2._part && !self2._ignoreData) { - const type = self2._isPreamble ? "Preamble" : "Part"; - self2._part.emit("error", new Error(type + " terminated early due to unexpected end of multipart data")); - self2._part.push(null); - process.nextTick(function() { - self2._realFinish = true; - self2.emit("finish"); - self2._realFinish = false; - }); - return; - } - self2._realFinish = true; - self2.emit("finish"); - self2._realFinish = false; + close(callback) { + if (callback === void 0) { + return new Promise((resolve, reject) => { + this.close((err, data) => { + return err ? reject(err) : resolve(data); + }); }); } - } else { - WritableStream.prototype.emit.apply(this, arguments); - } - }; - Dicer.prototype._write = function(data, encoding, cb) { - if (!this._hparser && !this._bparser) { - return cb(); - } - if (this._headerFirst && this._isPreamble) { - if (!this._part) { - this._part = new PartStream(this._partOpts); - if (this._events.preamble) { - this.emit("preamble", this._part); - } else { - this._ignore(); - } + if (typeof callback !== "function") { + throw new InvalidArgumentError("invalid callback"); } - const r = this._hparser.push(data); - if (!this._inHeader && r !== void 0 && r < data.length) { - data = data.slice(r); - } else { - return cb(); + if (this[kDestroyed]) { + queueMicrotask(() => callback(new ClientDestroyedError(), null)); + return; } - } - if (this._firstWrite) { - this._bparser.push(B_CRLF); - this._firstWrite = false; - } - this._bparser.push(data); - if (this._pause) { - this._cb = cb; - } else { - cb(); - } - }; - Dicer.prototype.reset = function() { - this._part = void 0; - this._bparser = void 0; - this._hparser = void 0; - }; - Dicer.prototype.setBoundary = function(boundary) { - const self2 = this; - this._bparser = new StreamSearch("\r\n--" + boundary); - this._bparser.on("info", function(isMatch, data, start, end) { - self2._oninfo(isMatch, data, start, end); - }); - }; - Dicer.prototype._ignore = function() { - if (this._part && !this._ignoreData) { - this._ignoreData = true; - this._part.on("error", EMPTY_FN); - this._part.resume(); - } - }; - Dicer.prototype._oninfo = function(isMatch, data, start, end) { - let buf; - const self2 = this; - let i = 0; - let r; - let shouldWriteMore = true; - if (!this._part && this._justMatched && data) { - while (this._dashes < 2 && start + i < end) { - if (data[start + i] === DASH) { - ++i; - ++this._dashes; + if (this[kClosed]) { + if (this[kOnClosed]) { + this[kOnClosed].push(callback); } else { - if (this._dashes) { - buf = B_ONEDASH; - } - this._dashes = 0; - break; - } - } - if (this._dashes === 2) { - if (start + i < end && this._events.trailer) { - this.emit("trailer", data.slice(start + i, end)); - } - this.reset(); - this._finished = true; - if (self2._parts === 0) { - self2._realFinish = true; - self2.emit("finish"); - self2._realFinish = false; + queueMicrotask(() => callback(null, null)); } - } - if (this._dashes) { return; } - } - if (this._justMatched) { - this._justMatched = false; - } - if (!this._part) { - this._part = new PartStream(this._partOpts); - this._part._read = function(n) { - self2._unpause(); + this[kClosed] = true; + this[kOnClosed].push(callback); + const onClosed = () => { + const callbacks = this[kOnClosed]; + this[kOnClosed] = null; + for (let i = 0; i < callbacks.length; i++) { + callbacks[i](null, null); + } }; - if (this._isPreamble && this._events.preamble) { - this.emit("preamble", this._part); - } else if (this._isPreamble !== true && this._events.part) { - this.emit("part", this._part); - } else { - this._ignore(); - } - if (!this._isPreamble) { - this._inHeader = true; - } + this[kClose]().then(() => this.destroy()).then(() => { + queueMicrotask(onClosed); + }); } - if (data && start < end && !this._ignoreData) { - if (this._isPreamble || !this._inHeader) { - if (buf) { - shouldWriteMore = this._part.push(buf); - } - shouldWriteMore = this._part.push(data.slice(start, end)); - if (!shouldWriteMore) { - this._pause = true; - } - } else if (!this._isPreamble && this._inHeader) { - if (buf) { - this._hparser.push(buf); - } - r = this._hparser.push(data.slice(start, end)); - if (!this._inHeader && r !== void 0 && r < end) { - this._oninfo(false, data, start + r, end); - } + destroy(err, callback) { + if (typeof err === "function") { + callback = err; + err = null; } - } - if (isMatch) { - this._hparser.reset(); - if (this._isPreamble) { - this._isPreamble = false; - } else { - if (start !== end) { - ++this._parts; - this._part.on("end", function() { - if (--self2._parts === 0) { - if (self2._finished) { - self2._realFinish = true; - self2.emit("finish"); - self2._realFinish = false; - } else { - self2._unpause(); - } - } + if (callback === void 0) { + return new Promise((resolve, reject) => { + this.destroy(err, (err2, data) => { + return err2 ? ( + /* istanbul ignore next: should never error */ + reject(err2) + ) : resolve(data); }); - } + }); } - this._part.push(null); - this._part = void 0; - this._ignoreData = false; - this._justMatched = true; - this._dashes = 0; - } - }; - Dicer.prototype._unpause = function() { - if (!this._pause) { - return; - } - this._pause = false; - if (this._cb) { - const cb = this._cb; - this._cb = void 0; - cb(); - } - }; - module2.exports = Dicer; - } -}); - -// .yarn/cache/@fastify-busboy-npm-2.1.0-960844a007-7bb641080a.zip/node_modules/@fastify/busboy/lib/utils/decodeText.js -var require_decodeText = __commonJS({ - ".yarn/cache/@fastify-busboy-npm-2.1.0-960844a007-7bb641080a.zip/node_modules/@fastify/busboy/lib/utils/decodeText.js"(exports, module2) { - "use strict"; - var utf8Decoder = new TextDecoder("utf-8"); - var textDecoders = /* @__PURE__ */ new Map([ - ["utf-8", utf8Decoder], - ["utf8", utf8Decoder] - ]); - function getDecoder(charset) { - let lc; - while (true) { - switch (charset) { - case "utf-8": - case "utf8": - return decoders.utf8; - case "latin1": - case "ascii": - case "us-ascii": - case "iso-8859-1": - case "iso8859-1": - case "iso88591": - case "iso_8859-1": - case "windows-1252": - case "iso_8859-1:1987": - case "cp1252": - case "x-cp1252": - return decoders.latin1; - case "utf16le": - case "utf-16le": - case "ucs2": - case "ucs-2": - return decoders.utf16le; - case "base64": - return decoders.base64; - default: - if (lc === void 0) { - lc = true; - charset = charset.toLowerCase(); - continue; - } - return decoders.other.bind(charset); - } - } - } - var decoders = { - utf8: (data, sourceEncoding) => { - if (data.length === 0) { - return ""; - } - if (typeof data === "string") { - data = Buffer.from(data, sourceEncoding); - } - return data.utf8Slice(0, data.length); - }, - latin1: (data, sourceEncoding) => { - if (data.length === 0) { - return ""; - } - if (typeof data === "string") { - return data; - } - return data.latin1Slice(0, data.length); - }, - utf16le: (data, sourceEncoding) => { - if (data.length === 0) { - return ""; + if (typeof callback !== "function") { + throw new InvalidArgumentError("invalid callback"); } - if (typeof data === "string") { - data = Buffer.from(data, sourceEncoding); + if (this[kDestroyed]) { + if (this[kOnDestroyed]) { + this[kOnDestroyed].push(callback); + } else { + queueMicrotask(() => callback(null, null)); + } + return; } - return data.ucs2Slice(0, data.length); - }, - base64: (data, sourceEncoding) => { - if (data.length === 0) { - return ""; + if (!err) { + err = new ClientDestroyedError(); } - if (typeof data === "string") { - data = Buffer.from(data, sourceEncoding); + this[kDestroyed] = true; + this[kOnDestroyed] = this[kOnDestroyed] || []; + this[kOnDestroyed].push(callback); + const onDestroyed = () => { + const callbacks = this[kOnDestroyed]; + this[kOnDestroyed] = null; + for (let i = 0; i < callbacks.length; i++) { + callbacks[i](null, null); + } + }; + this[kDestroy](err).then(() => { + queueMicrotask(onDestroyed); + }); + } + [kInterceptedDispatch](opts, handler) { + if (!this[kInterceptors] || this[kInterceptors].length === 0) { + this[kInterceptedDispatch] = this[kDispatch]; + return this[kDispatch](opts, handler); } - return data.base64Slice(0, data.length); - }, - other: (data, sourceEncoding) => { - if (data.length === 0) { - return ""; + let dispatch = this[kDispatch].bind(this); + for (let i = this[kInterceptors].length - 1; i >= 0; i--) { + dispatch = this[kInterceptors][i](dispatch); } - if (typeof data === "string") { - data = Buffer.from(data, sourceEncoding); + this[kInterceptedDispatch] = dispatch; + return dispatch(opts, handler); + } + dispatch(opts, handler) { + if (!handler || typeof handler !== "object") { + throw new InvalidArgumentError("handler must be an object"); } - if (textDecoders.has(exports.toString())) { - try { - return textDecoders.get(exports).decode(data); - } catch (e) { + try { + if (!opts || typeof opts !== "object") { + throw new InvalidArgumentError("opts must be an object."); + } + if (this[kDestroyed] || this[kOnDestroyed]) { + throw new ClientDestroyedError(); + } + if (this[kClosed]) { + throw new ClientClosedError(); + } + return this[kInterceptedDispatch](opts, handler); + } catch (err) { + if (typeof handler.onError !== "function") { + throw new InvalidArgumentError("invalid onError method"); } + handler.onError(err); + return false; } - return typeof data === "string" ? data : data.toString(); } }; - function decodeText(text, sourceEncoding, destEncoding) { - if (text) { - return getDecoder(destEncoding)(text, sourceEncoding); - } - return text; - } - module2.exports = decodeText; + module2.exports = DispatcherBase; } }); -// .yarn/cache/@fastify-busboy-npm-2.1.0-960844a007-7bb641080a.zip/node_modules/@fastify/busboy/lib/utils/parseParams.js -var require_parseParams = __commonJS({ - ".yarn/cache/@fastify-busboy-npm-2.1.0-960844a007-7bb641080a.zip/node_modules/@fastify/busboy/lib/utils/parseParams.js"(exports, module2) { +// .yarn/cache/undici-npm-6.13.0-1545cd855e-b1b0456e7d.zip/node_modules/undici/lib/dispatcher/fixed-queue.js +var require_fixed_queue = __commonJS({ + ".yarn/cache/undici-npm-6.13.0-1545cd855e-b1b0456e7d.zip/node_modules/undici/lib/dispatcher/fixed-queue.js"(exports, module2) { "use strict"; - var decodeText = require_decodeText(); - var RE_ENCODED = /%[a-fA-F0-9][a-fA-F0-9]/g; - var EncodedLookup = { - "%00": "\0", - "%01": "", - "%02": "", - "%03": "", - "%04": "", - "%05": "", - "%06": "", - "%07": "\x07", - "%08": "\b", - "%09": " ", - "%0a": "\n", - "%0A": "\n", - "%0b": "\v", - "%0B": "\v", - "%0c": "\f", - "%0C": "\f", - "%0d": "\r", - "%0D": "\r", - "%0e": "", - "%0E": "", - "%0f": "", - "%0F": "", - "%10": "", - "%11": "", - "%12": "", - "%13": "", - "%14": "", - "%15": "", - "%16": "", - "%17": "", - "%18": "", - "%19": "", - "%1a": "", - "%1A": "", - "%1b": "\x1B", - "%1B": "\x1B", - "%1c": "", - "%1C": "", - "%1d": "", - "%1D": "", - "%1e": "", - "%1E": "", - "%1f": "", - "%1F": "", - "%20": " ", - "%21": "!", - "%22": '"', - "%23": "#", - "%24": "$", - "%25": "%", - "%26": "&", - "%27": "'", - "%28": "(", - "%29": ")", - "%2a": "*", - "%2A": "*", - "%2b": "+", - "%2B": "+", - "%2c": ",", - "%2C": ",", - "%2d": "-", - "%2D": "-", - "%2e": ".", - "%2E": ".", - "%2f": "/", - "%2F": "/", - "%30": "0", - "%31": "1", - "%32": "2", - "%33": "3", - "%34": "4", - "%35": "5", - "%36": "6", - "%37": "7", - "%38": "8", - "%39": "9", - "%3a": ":", - "%3A": ":", - "%3b": ";", - "%3B": ";", - "%3c": "<", - "%3C": "<", - "%3d": "=", - "%3D": "=", - "%3e": ">", - "%3E": ">", - "%3f": "?", - "%3F": "?", - "%40": "@", - "%41": "A", - "%42": "B", - "%43": "C", - "%44": "D", - "%45": "E", - "%46": "F", - "%47": "G", - "%48": "H", - "%49": "I", - "%4a": "J", - "%4A": "J", - "%4b": "K", - "%4B": "K", - "%4c": "L", - "%4C": "L", - "%4d": "M", - "%4D": "M", - "%4e": "N", - "%4E": "N", - "%4f": "O", - "%4F": "O", - "%50": "P", - "%51": "Q", - "%52": "R", - "%53": "S", - "%54": "T", - "%55": "U", - "%56": "V", - "%57": "W", - "%58": "X", - "%59": "Y", - "%5a": "Z", - "%5A": "Z", - "%5b": "[", - "%5B": "[", - "%5c": "\\", - "%5C": "\\", - "%5d": "]", - "%5D": "]", - "%5e": "^", - "%5E": "^", - "%5f": "_", - "%5F": "_", - "%60": "`", - "%61": "a", - "%62": "b", - "%63": "c", - "%64": "d", - "%65": "e", - "%66": "f", - "%67": "g", - "%68": "h", - "%69": "i", - "%6a": "j", - "%6A": "j", - "%6b": "k", - "%6B": "k", - "%6c": "l", - "%6C": "l", - "%6d": "m", - "%6D": "m", - "%6e": "n", - "%6E": "n", - "%6f": "o", - "%6F": "o", - "%70": "p", - "%71": "q", - "%72": "r", - "%73": "s", - "%74": "t", - "%75": "u", - "%76": "v", - "%77": "w", - "%78": "x", - "%79": "y", - "%7a": "z", - "%7A": "z", - "%7b": "{", - "%7B": "{", - "%7c": "|", - "%7C": "|", - "%7d": "}", - "%7D": "}", - "%7e": "~", - "%7E": "~", - "%7f": "\x7F", - "%7F": "\x7F", - "%80": "\x80", - "%81": "\x81", - "%82": "\x82", - "%83": "\x83", - "%84": "\x84", - "%85": "\x85", - "%86": "\x86", - "%87": "\x87", - "%88": "\x88", - "%89": "\x89", - "%8a": "\x8A", - "%8A": "\x8A", - "%8b": "\x8B", - "%8B": "\x8B", - "%8c": "\x8C", - "%8C": "\x8C", - "%8d": "\x8D", - "%8D": "\x8D", - "%8e": "\x8E", - "%8E": "\x8E", - "%8f": "\x8F", - "%8F": "\x8F", - "%90": "\x90", - "%91": "\x91", - "%92": "\x92", - "%93": "\x93", - "%94": "\x94", - "%95": "\x95", - "%96": "\x96", - "%97": "\x97", - "%98": "\x98", - "%99": "\x99", - "%9a": "\x9A", - "%9A": "\x9A", - "%9b": "\x9B", - "%9B": "\x9B", - "%9c": "\x9C", - "%9C": "\x9C", - "%9d": "\x9D", - "%9D": "\x9D", - "%9e": "\x9E", - "%9E": "\x9E", - "%9f": "\x9F", - "%9F": "\x9F", - "%a0": "\xA0", - "%A0": "\xA0", - "%a1": "\xA1", - "%A1": "\xA1", - "%a2": "\xA2", - "%A2": "\xA2", - "%a3": "\xA3", - "%A3": "\xA3", - "%a4": "\xA4", - "%A4": "\xA4", - "%a5": "\xA5", - "%A5": "\xA5", - "%a6": "\xA6", - "%A6": "\xA6", - "%a7": "\xA7", - "%A7": "\xA7", - "%a8": "\xA8", - "%A8": "\xA8", - "%a9": "\xA9", - "%A9": "\xA9", - "%aa": "\xAA", - "%Aa": "\xAA", - "%aA": "\xAA", - "%AA": "\xAA", - "%ab": "\xAB", - "%Ab": "\xAB", - "%aB": "\xAB", - "%AB": "\xAB", - "%ac": "\xAC", - "%Ac": "\xAC", - "%aC": "\xAC", - "%AC": "\xAC", - "%ad": "\xAD", - "%Ad": "\xAD", - "%aD": "\xAD", - "%AD": "\xAD", - "%ae": "\xAE", - "%Ae": "\xAE", - "%aE": "\xAE", - "%AE": "\xAE", - "%af": "\xAF", - "%Af": "\xAF", - "%aF": "\xAF", - "%AF": "\xAF", - "%b0": "\xB0", - "%B0": "\xB0", - "%b1": "\xB1", - "%B1": "\xB1", - "%b2": "\xB2", - "%B2": "\xB2", - "%b3": "\xB3", - "%B3": "\xB3", - "%b4": "\xB4", - "%B4": "\xB4", - "%b5": "\xB5", - "%B5": "\xB5", - "%b6": "\xB6", - "%B6": "\xB6", - "%b7": "\xB7", - "%B7": "\xB7", - "%b8": "\xB8", - "%B8": "\xB8", - "%b9": "\xB9", - "%B9": "\xB9", - "%ba": "\xBA", - "%Ba": "\xBA", - "%bA": "\xBA", - "%BA": "\xBA", - "%bb": "\xBB", - "%Bb": "\xBB", - "%bB": "\xBB", - "%BB": "\xBB", - "%bc": "\xBC", - "%Bc": "\xBC", - "%bC": "\xBC", - "%BC": "\xBC", - "%bd": "\xBD", - "%Bd": "\xBD", - "%bD": "\xBD", - "%BD": "\xBD", - "%be": "\xBE", - "%Be": "\xBE", - "%bE": "\xBE", - "%BE": "\xBE", - "%bf": "\xBF", - "%Bf": "\xBF", - "%bF": "\xBF", - "%BF": "\xBF", - "%c0": "\xC0", - "%C0": "\xC0", - "%c1": "\xC1", - "%C1": "\xC1", - "%c2": "\xC2", - "%C2": "\xC2", - "%c3": "\xC3", - "%C3": "\xC3", - "%c4": "\xC4", - "%C4": "\xC4", - "%c5": "\xC5", - "%C5": "\xC5", - "%c6": "\xC6", - "%C6": "\xC6", - "%c7": "\xC7", - "%C7": "\xC7", - "%c8": "\xC8", - "%C8": "\xC8", - "%c9": "\xC9", - "%C9": "\xC9", - "%ca": "\xCA", - "%Ca": "\xCA", - "%cA": "\xCA", - "%CA": "\xCA", - "%cb": "\xCB", - "%Cb": "\xCB", - "%cB": "\xCB", - "%CB": "\xCB", - "%cc": "\xCC", - "%Cc": "\xCC", - "%cC": "\xCC", - "%CC": "\xCC", - "%cd": "\xCD", - "%Cd": "\xCD", - "%cD": "\xCD", - "%CD": "\xCD", - "%ce": "\xCE", - "%Ce": "\xCE", - "%cE": "\xCE", - "%CE": "\xCE", - "%cf": "\xCF", - "%Cf": "\xCF", - "%cF": "\xCF", - "%CF": "\xCF", - "%d0": "\xD0", - "%D0": "\xD0", - "%d1": "\xD1", - "%D1": "\xD1", - "%d2": "\xD2", - "%D2": "\xD2", - "%d3": "\xD3", - "%D3": "\xD3", - "%d4": "\xD4", - "%D4": "\xD4", - "%d5": "\xD5", - "%D5": "\xD5", - "%d6": "\xD6", - "%D6": "\xD6", - "%d7": "\xD7", - "%D7": "\xD7", - "%d8": "\xD8", - "%D8": "\xD8", - "%d9": "\xD9", - "%D9": "\xD9", - "%da": "\xDA", - "%Da": "\xDA", - "%dA": "\xDA", - "%DA": "\xDA", - "%db": "\xDB", - "%Db": "\xDB", - "%dB": "\xDB", - "%DB": "\xDB", - "%dc": "\xDC", - "%Dc": "\xDC", - "%dC": "\xDC", - "%DC": "\xDC", - "%dd": "\xDD", - "%Dd": "\xDD", - "%dD": "\xDD", - "%DD": "\xDD", - "%de": "\xDE", - "%De": "\xDE", - "%dE": "\xDE", - "%DE": "\xDE", - "%df": "\xDF", - "%Df": "\xDF", - "%dF": "\xDF", - "%DF": "\xDF", - "%e0": "\xE0", - "%E0": "\xE0", - "%e1": "\xE1", - "%E1": "\xE1", - "%e2": "\xE2", - "%E2": "\xE2", - "%e3": "\xE3", - "%E3": "\xE3", - "%e4": "\xE4", - "%E4": "\xE4", - "%e5": "\xE5", - "%E5": "\xE5", - "%e6": "\xE6", - "%E6": "\xE6", - "%e7": "\xE7", - "%E7": "\xE7", - "%e8": "\xE8", - "%E8": "\xE8", - "%e9": "\xE9", - "%E9": "\xE9", - "%ea": "\xEA", - "%Ea": "\xEA", - "%eA": "\xEA", - "%EA": "\xEA", - "%eb": "\xEB", - "%Eb": "\xEB", - "%eB": "\xEB", - "%EB": "\xEB", - "%ec": "\xEC", - "%Ec": "\xEC", - "%eC": "\xEC", - "%EC": "\xEC", - "%ed": "\xED", - "%Ed": "\xED", - "%eD": "\xED", - "%ED": "\xED", - "%ee": "\xEE", - "%Ee": "\xEE", - "%eE": "\xEE", - "%EE": "\xEE", - "%ef": "\xEF", - "%Ef": "\xEF", - "%eF": "\xEF", - "%EF": "\xEF", - "%f0": "\xF0", - "%F0": "\xF0", - "%f1": "\xF1", - "%F1": "\xF1", - "%f2": "\xF2", - "%F2": "\xF2", - "%f3": "\xF3", - "%F3": "\xF3", - "%f4": "\xF4", - "%F4": "\xF4", - "%f5": "\xF5", - "%F5": "\xF5", - "%f6": "\xF6", - "%F6": "\xF6", - "%f7": "\xF7", - "%F7": "\xF7", - "%f8": "\xF8", - "%F8": "\xF8", - "%f9": "\xF9", - "%F9": "\xF9", - "%fa": "\xFA", - "%Fa": "\xFA", - "%fA": "\xFA", - "%FA": "\xFA", - "%fb": "\xFB", - "%Fb": "\xFB", - "%fB": "\xFB", - "%FB": "\xFB", - "%fc": "\xFC", - "%Fc": "\xFC", - "%fC": "\xFC", - "%FC": "\xFC", - "%fd": "\xFD", - "%Fd": "\xFD", - "%fD": "\xFD", - "%FD": "\xFD", - "%fe": "\xFE", - "%Fe": "\xFE", - "%fE": "\xFE", - "%FE": "\xFE", - "%ff": "\xFF", - "%Ff": "\xFF", - "%fF": "\xFF", - "%FF": "\xFF" - }; - function encodedReplacer(match) { - return EncodedLookup[match]; - } - var STATE_KEY = 0; - var STATE_VALUE = 1; - var STATE_CHARSET = 2; - var STATE_LANG = 3; - function parseParams(str) { - const res = []; - let state = STATE_KEY; - let charset = ""; - let inquote = false; - let escaping = false; - let p = 0; - let tmp = ""; - const len = str.length; - for (var i = 0; i < len; ++i) { - const char = str[i]; - if (char === "\\" && inquote) { - if (escaping) { - escaping = false; - } else { - escaping = true; - continue; - } - } else if (char === '"') { - if (!escaping) { - if (inquote) { - inquote = false; - state = STATE_KEY; - } else { - inquote = true; - } - continue; - } else { - escaping = false; - } - } else { - if (escaping && inquote) { - tmp += "\\"; - } - escaping = false; - if ((state === STATE_CHARSET || state === STATE_LANG) && char === "'") { - if (state === STATE_CHARSET) { - state = STATE_LANG; - charset = tmp.substring(1); - } else { - state = STATE_VALUE; - } - tmp = ""; - continue; - } else if (state === STATE_KEY && (char === "*" || char === "=") && res.length) { - state = char === "*" ? STATE_CHARSET : STATE_VALUE; - res[p] = [tmp, void 0]; - tmp = ""; - continue; - } else if (!inquote && char === ";") { - state = STATE_KEY; - if (charset) { - if (tmp.length) { - tmp = decodeText( - tmp.replace(RE_ENCODED, encodedReplacer), - "binary", - charset - ); - } - charset = ""; - } else if (tmp.length) { - tmp = decodeText(tmp, "binary", "utf8"); - } - if (res[p] === void 0) { - res[p] = tmp; - } else { - res[p][1] = tmp; - } - tmp = ""; - ++p; - continue; - } else if (!inquote && (char === " " || char === " ")) { - continue; - } - } - tmp += char; + var kSize = 2048; + var kMask = kSize - 1; + var FixedCircularBuffer = class { + constructor() { + this.bottom = 0; + this.top = 0; + this.list = new Array(kSize); + this.next = null; } - if (charset && tmp.length) { - tmp = decodeText( - tmp.replace(RE_ENCODED, encodedReplacer), - "binary", - charset - ); - } else if (tmp) { - tmp = decodeText(tmp, "binary", "utf8"); + isEmpty() { + return this.top === this.bottom; } - if (res[p] === void 0) { - if (tmp) { - res[p] = tmp; - } - } else { - res[p][1] = tmp; + isFull() { + return (this.top + 1 & kMask) === this.bottom; } - return res; - } - module2.exports = parseParams; - } -}); - -// .yarn/cache/@fastify-busboy-npm-2.1.0-960844a007-7bb641080a.zip/node_modules/@fastify/busboy/lib/utils/basename.js -var require_basename = __commonJS({ - ".yarn/cache/@fastify-busboy-npm-2.1.0-960844a007-7bb641080a.zip/node_modules/@fastify/busboy/lib/utils/basename.js"(exports, module2) { - "use strict"; - module2.exports = function basename(path10) { - if (typeof path10 !== "string") { - return ""; + push(data) { + this.list[this.top] = data; + this.top = this.top + 1 & kMask; } - for (var i = path10.length - 1; i >= 0; --i) { - switch (path10.charCodeAt(i)) { - case 47: - case 92: - path10 = path10.slice(i + 1); - return path10 === ".." || path10 === "." ? "" : path10; - } + shift() { + const nextItem = this.list[this.bottom]; + if (nextItem === void 0) + return null; + this.list[this.bottom] = void 0; + this.bottom = this.bottom + 1 & kMask; + return nextItem; } - return path10 === ".." || path10 === "." ? "" : path10; }; - } -}); - -// .yarn/cache/@fastify-busboy-npm-2.1.0-960844a007-7bb641080a.zip/node_modules/@fastify/busboy/lib/types/multipart.js -var require_multipart = __commonJS({ - ".yarn/cache/@fastify-busboy-npm-2.1.0-960844a007-7bb641080a.zip/node_modules/@fastify/busboy/lib/types/multipart.js"(exports, module2) { - "use strict"; - var { Readable: Readable2 } = require("node:stream"); - var { inherits } = require("node:util"); - var Dicer = require_Dicer(); - var parseParams = require_parseParams(); - var decodeText = require_decodeText(); - var basename = require_basename(); - var getLimit = require_getLimit(); - var RE_BOUNDARY = /^boundary$/i; - var RE_FIELD = /^form-data$/i; - var RE_CHARSET = /^charset$/i; - var RE_FILENAME = /^filename$/i; - var RE_NAME = /^name$/i; - Multipart.detect = /^multipart\/form-data/i; - function Multipart(boy, cfg) { - let i; - let len; - const self2 = this; - let boundary; - const limits = cfg.limits; - const isPartAFile = cfg.isPartAFile || ((fieldName, contentType, fileName) => contentType === "application/octet-stream" || fileName !== void 0); - const parsedConType = cfg.parsedConType || []; - const defCharset = cfg.defCharset || "utf8"; - const preservePath = cfg.preservePath; - const fileOpts = { highWaterMark: cfg.fileHwm }; - for (i = 0, len = parsedConType.length; i < len; ++i) { - if (Array.isArray(parsedConType[i]) && RE_BOUNDARY.test(parsedConType[i][0])) { - boundary = parsedConType[i][1]; - break; - } + module2.exports = class FixedQueue { + constructor() { + this.head = this.tail = new FixedCircularBuffer(); } - function checkFinished() { - if (nends === 0 && finished && !boy._done) { - finished = false; - self2.end(); - } + isEmpty() { + return this.head.isEmpty(); } - if (typeof boundary !== "string") { - throw new Error("Multipart: Boundary not found"); + push(data) { + if (this.head.isFull()) { + this.head = this.head.next = new FixedCircularBuffer(); + } + this.head.push(data); } - const fieldSizeLimit = getLimit(limits, "fieldSize", 1 * 1024 * 1024); - const fileSizeLimit = getLimit(limits, "fileSize", Infinity); - const filesLimit = getLimit(limits, "files", Infinity); - const fieldsLimit = getLimit(limits, "fields", Infinity); - const partsLimit = getLimit(limits, "parts", Infinity); - const headerPairsLimit = getLimit(limits, "headerPairs", 2e3); - const headerSizeLimit = getLimit(limits, "headerSize", 80 * 1024); - let nfiles = 0; - let nfields = 0; - let nends = 0; - let curFile; - let curField; - let finished = false; - this._needDrain = false; - this._pause = false; - this._cb = void 0; - this._nparts = 0; - this._boy = boy; - const parserCfg = { - boundary, - maxHeaderPairs: headerPairsLimit, - maxHeaderSize: headerSizeLimit, - partHwm: fileOpts.highWaterMark, - highWaterMark: cfg.highWaterMark - }; - this.parser = new Dicer(parserCfg); - this.parser.on("drain", function() { - self2._needDrain = false; - if (self2._cb && !self2._pause) { - const cb = self2._cb; - self2._cb = void 0; - cb(); + shift() { + const tail = this.tail; + const next = tail.shift(); + if (tail.isEmpty() && tail.next !== null) { + this.tail = tail.next; } - }).on("part", function onPart(part) { - if (++self2._nparts > partsLimit) { - self2.parser.removeListener("part", onPart); - self2.parser.on("part", skipPart); - boy.hitPartsLimit = true; - boy.emit("partsLimit"); - return skipPart(part); - } - if (curField) { - const field = curField; - field.emit("end"); - field.removeAllListeners("end"); - } - part.on("header", function(header) { - let contype; - let fieldname; - let parsed; - let charset; - let encoding; - let filename; - let nsize = 0; - if (header["content-type"]) { - parsed = parseParams(header["content-type"][0]); - if (parsed[0]) { - contype = parsed[0].toLowerCase(); - for (i = 0, len = parsed.length; i < len; ++i) { - if (RE_CHARSET.test(parsed[i][0])) { - charset = parsed[i][1].toLowerCase(); - break; - } - } - } - } - if (contype === void 0) { - contype = "text/plain"; - } - if (charset === void 0) { - charset = defCharset; - } - if (header["content-disposition"]) { - parsed = parseParams(header["content-disposition"][0]); - if (!RE_FIELD.test(parsed[0])) { - return skipPart(part); - } - for (i = 0, len = parsed.length; i < len; ++i) { - if (RE_NAME.test(parsed[i][0])) { - fieldname = parsed[i][1]; - } else if (RE_FILENAME.test(parsed[i][0])) { - filename = parsed[i][1]; - if (!preservePath) { - filename = basename(filename); - } - } - } - } else { - return skipPart(part); - } - if (header["content-transfer-encoding"]) { - encoding = header["content-transfer-encoding"][0].toLowerCase(); - } else { - encoding = "7bit"; - } - let onData, onEnd; - if (isPartAFile(fieldname, contype, filename)) { - if (nfiles === filesLimit) { - if (!boy.hitFilesLimit) { - boy.hitFilesLimit = true; - boy.emit("filesLimit"); - } - return skipPart(part); - } - ++nfiles; - if (!boy._events.file) { - self2.parser._ignore(); - return; - } - ++nends; - const file = new FileStream(fileOpts); - curFile = file; - file.on("end", function() { - --nends; - self2._pause = false; - checkFinished(); - if (self2._cb && !self2._needDrain) { - const cb = self2._cb; - self2._cb = void 0; - cb(); - } - }); - file._read = function(n) { - if (!self2._pause) { - return; - } - self2._pause = false; - if (self2._cb && !self2._needDrain) { - const cb = self2._cb; - self2._cb = void 0; - cb(); - } - }; - boy.emit("file", fieldname, file, filename, encoding, contype); - onData = function(data) { - if ((nsize += data.length) > fileSizeLimit) { - const extralen = fileSizeLimit - nsize + data.length; - if (extralen > 0) { - file.push(data.slice(0, extralen)); - } - file.truncated = true; - file.bytesRead = fileSizeLimit; - part.removeAllListeners("data"); - file.emit("limit"); - return; - } else if (!file.push(data)) { - self2._pause = true; - } - file.bytesRead = nsize; - }; - onEnd = function() { - curFile = void 0; - file.push(null); - }; - } else { - if (nfields === fieldsLimit) { - if (!boy.hitFieldsLimit) { - boy.hitFieldsLimit = true; - boy.emit("fieldsLimit"); - } - return skipPart(part); - } - ++nfields; - ++nends; - let buffer = ""; - let truncated = false; - curField = part; - onData = function(data) { - if ((nsize += data.length) > fieldSizeLimit) { - const extralen = fieldSizeLimit - (nsize - data.length); - buffer += data.toString("binary", 0, extralen); - truncated = true; - part.removeAllListeners("data"); - } else { - buffer += data.toString("binary"); - } - }; - onEnd = function() { - curField = void 0; - if (buffer.length) { - buffer = decodeText(buffer, "binary", charset); - } - boy.emit("field", fieldname, buffer, false, truncated, encoding, contype); - --nends; - checkFinished(); - }; - } - part._readableState.sync = false; - part.on("data", onData); - part.on("end", onEnd); - }).on("error", function(err) { - if (curFile) { - curFile.emit("error", err); - } - }); - }).on("error", function(err) { - boy.emit("error", err); - }).on("finish", function() { - finished = true; - checkFinished(); - }); - } - Multipart.prototype.write = function(chunk, cb) { - const r = this.parser.write(chunk); - if (r && !this._pause) { - cb(); - } else { - this._needDrain = !r; - this._cb = cb; - } - }; - Multipart.prototype.end = function() { - const self2 = this; - if (self2.parser.writable) { - self2.parser.end(); - } else if (!self2._boy._done) { - process.nextTick(function() { - self2._boy._done = true; - self2._boy.emit("finish"); - }); + return next; } }; - function skipPart(part) { - part.resume(); - } - function FileStream(opts) { - Readable2.call(this, opts); - this.bytesRead = 0; - this.truncated = false; - } - inherits(FileStream, Readable2); - FileStream.prototype._read = function(n) { - }; - module2.exports = Multipart; } }); -// .yarn/cache/@fastify-busboy-npm-2.1.0-960844a007-7bb641080a.zip/node_modules/@fastify/busboy/lib/utils/Decoder.js -var require_Decoder = __commonJS({ - ".yarn/cache/@fastify-busboy-npm-2.1.0-960844a007-7bb641080a.zip/node_modules/@fastify/busboy/lib/utils/Decoder.js"(exports, module2) { - "use strict"; - var RE_PLUS = /\+/g; - var HEX = [ - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 1, - 1, - 1, - 1, - 1, - 1, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 1, - 1, - 1, - 1, - 1, - 1, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0 - ]; - function Decoder() { - this.buffer = void 0; - } - Decoder.prototype.write = function(str) { - str = str.replace(RE_PLUS, " "); - let res = ""; - let i = 0; - let p = 0; - const len = str.length; - for (; i < len; ++i) { - if (this.buffer !== void 0) { - if (!HEX[str.charCodeAt(i)]) { - res += "%" + this.buffer; - this.buffer = void 0; - --i; - } else { - this.buffer += str[i]; - ++p; - if (this.buffer.length === 2) { - res += String.fromCharCode(parseInt(this.buffer, 16)); - this.buffer = void 0; - } - } - } else if (str[i] === "%") { - if (i > p) { - res += str.substring(p, i); - p = i; - } - this.buffer = ""; - ++p; - } +// .yarn/cache/undici-npm-6.13.0-1545cd855e-b1b0456e7d.zip/node_modules/undici/lib/dispatcher/pool-stats.js +var require_pool_stats = __commonJS({ + ".yarn/cache/undici-npm-6.13.0-1545cd855e-b1b0456e7d.zip/node_modules/undici/lib/dispatcher/pool-stats.js"(exports, module2) { + var { kFree, kConnected, kPending, kQueued, kRunning, kSize } = require_symbols(); + var kPool = Symbol("pool"); + var PoolStats = class { + constructor(pool) { + this[kPool] = pool; + } + get connected() { + return this[kPool][kConnected]; } - if (p < len && this.buffer === void 0) { - res += str.substring(p); + get free() { + return this[kPool][kFree]; + } + get pending() { + return this[kPool][kPending]; + } + get queued() { + return this[kPool][kQueued]; + } + get running() { + return this[kPool][kRunning]; + } + get size() { + return this[kPool][kSize]; } - return res; - }; - Decoder.prototype.reset = function() { - this.buffer = void 0; }; - module2.exports = Decoder; + module2.exports = PoolStats; } }); -// .yarn/cache/@fastify-busboy-npm-2.1.0-960844a007-7bb641080a.zip/node_modules/@fastify/busboy/lib/types/urlencoded.js -var require_urlencoded = __commonJS({ - ".yarn/cache/@fastify-busboy-npm-2.1.0-960844a007-7bb641080a.zip/node_modules/@fastify/busboy/lib/types/urlencoded.js"(exports, module2) { +// .yarn/cache/undici-npm-6.13.0-1545cd855e-b1b0456e7d.zip/node_modules/undici/lib/dispatcher/pool-base.js +var require_pool_base = __commonJS({ + ".yarn/cache/undici-npm-6.13.0-1545cd855e-b1b0456e7d.zip/node_modules/undici/lib/dispatcher/pool-base.js"(exports, module2) { "use strict"; - var Decoder = require_Decoder(); - var decodeText = require_decodeText(); - var getLimit = require_getLimit(); - var RE_CHARSET = /^charset$/i; - UrlEncoded.detect = /^application\/x-www-form-urlencoded/i; - function UrlEncoded(boy, cfg) { - const limits = cfg.limits; - const parsedConType = cfg.parsedConType; - this.boy = boy; - this.fieldSizeLimit = getLimit(limits, "fieldSize", 1 * 1024 * 1024); - this.fieldNameSizeLimit = getLimit(limits, "fieldNameSize", 100); - this.fieldsLimit = getLimit(limits, "fields", Infinity); - let charset; - for (var i = 0, len = parsedConType.length; i < len; ++i) { - if (Array.isArray(parsedConType[i]) && RE_CHARSET.test(parsedConType[i][0])) { - charset = parsedConType[i][1].toLowerCase(); - break; - } - } - if (charset === void 0) { - charset = cfg.defCharset || "utf8"; - } - this.decoder = new Decoder(); - this.charset = charset; - this._fields = 0; - this._state = "key"; - this._checkingBytes = true; - this._bytesKey = 0; - this._bytesVal = 0; - this._key = ""; - this._val = ""; - this._keyTrunc = false; - this._valTrunc = false; - this._hitLimit = false; - } - UrlEncoded.prototype.write = function(data, cb) { - if (this._fields === this.fieldsLimit) { - if (!this.boy.hitFieldsLimit) { - this.boy.hitFieldsLimit = true; - this.boy.emit("fieldsLimit"); - } - return cb(); - } - let idxeq; - let idxamp; - let i; - let p = 0; - const len = data.length; - while (p < len) { - if (this._state === "key") { - idxeq = idxamp = void 0; - for (i = p; i < len; ++i) { - if (!this._checkingBytes) { - ++p; - } - if (data[i] === 61) { - idxeq = i; - break; - } else if (data[i] === 38) { - idxamp = i; - break; - } - if (this._checkingBytes && this._bytesKey === this.fieldNameSizeLimit) { - this._hitLimit = true; + var DispatcherBase = require_dispatcher_base(); + var FixedQueue = require_fixed_queue(); + var { kConnected, kSize, kRunning, kPending, kQueued, kBusy, kFree, kUrl, kClose, kDestroy, kDispatch } = require_symbols(); + var PoolStats = require_pool_stats(); + var kClients = Symbol("clients"); + var kNeedDrain = Symbol("needDrain"); + var kQueue = Symbol("queue"); + var kClosedResolve = Symbol("closed resolve"); + var kOnDrain = Symbol("onDrain"); + var kOnConnect = Symbol("onConnect"); + var kOnDisconnect = Symbol("onDisconnect"); + var kOnConnectionError = Symbol("onConnectionError"); + var kGetDispatcher = Symbol("get dispatcher"); + var kAddClient = Symbol("add client"); + var kRemoveClient = Symbol("remove client"); + var kStats = Symbol("stats"); + var PoolBase = class extends DispatcherBase { + constructor() { + super(); + this[kQueue] = new FixedQueue(); + this[kClients] = []; + this[kQueued] = 0; + const pool = this; + this[kOnDrain] = function onDrain(origin, targets) { + const queue = pool[kQueue]; + let needDrain = false; + while (!needDrain) { + const item = queue.shift(); + if (!item) { break; - } else if (this._checkingBytes) { - ++this._bytesKey; - } - } - if (idxeq !== void 0) { - if (idxeq > p) { - this._key += this.decoder.write(data.toString("binary", p, idxeq)); - } - this._state = "val"; - this._hitLimit = false; - this._checkingBytes = true; - this._val = ""; - this._bytesVal = 0; - this._valTrunc = false; - this.decoder.reset(); - p = idxeq + 1; - } else if (idxamp !== void 0) { - ++this._fields; - let key; - const keyTrunc = this._keyTrunc; - if (idxamp > p) { - key = this._key += this.decoder.write(data.toString("binary", p, idxamp)); - } else { - key = this._key; - } - this._hitLimit = false; - this._checkingBytes = true; - this._key = ""; - this._bytesKey = 0; - this._keyTrunc = false; - this.decoder.reset(); - if (key.length) { - this.boy.emit( - "field", - decodeText(key, "binary", this.charset), - "", - keyTrunc, - false - ); - } - p = idxamp + 1; - if (this._fields === this.fieldsLimit) { - return cb(); - } - } else if (this._hitLimit) { - if (i > p) { - this._key += this.decoder.write(data.toString("binary", p, i)); - } - p = i; - if ((this._bytesKey = this._key.length) === this.fieldNameSizeLimit) { - this._checkingBytes = false; - this._keyTrunc = true; } - } else { - if (p < len) { - this._key += this.decoder.write(data.toString("binary", p)); - } - p = len; + pool[kQueued]--; + needDrain = !this.dispatch(item.opts, item.handler); } - } else { - idxamp = void 0; - for (i = p; i < len; ++i) { - if (!this._checkingBytes) { - ++p; - } - if (data[i] === 38) { - idxamp = i; - break; - } - if (this._checkingBytes && this._bytesVal === this.fieldSizeLimit) { - this._hitLimit = true; - break; - } else if (this._checkingBytes) { - ++this._bytesVal; - } + this[kNeedDrain] = needDrain; + if (!this[kNeedDrain] && pool[kNeedDrain]) { + pool[kNeedDrain] = false; + pool.emit("drain", origin, [pool, ...targets]); } - if (idxamp !== void 0) { - ++this._fields; - if (idxamp > p) { - this._val += this.decoder.write(data.toString("binary", p, idxamp)); - } - this.boy.emit( - "field", - decodeText(this._key, "binary", this.charset), - decodeText(this._val, "binary", this.charset), - this._keyTrunc, - this._valTrunc - ); - this._state = "key"; - this._hitLimit = false; - this._checkingBytes = true; - this._key = ""; - this._bytesKey = 0; - this._keyTrunc = false; - this.decoder.reset(); - p = idxamp + 1; - if (this._fields === this.fieldsLimit) { - return cb(); - } - } else if (this._hitLimit) { - if (i > p) { - this._val += this.decoder.write(data.toString("binary", p, i)); - } - p = i; - if (this._val === "" && this.fieldSizeLimit === 0 || (this._bytesVal = this._val.length) === this.fieldSizeLimit) { - this._checkingBytes = false; - this._valTrunc = true; - } - } else { - if (p < len) { - this._val += this.decoder.write(data.toString("binary", p)); - } - p = len; + if (pool[kClosedResolve] && queue.isEmpty()) { + Promise.all(pool[kClients].map((c) => c.close())).then(pool[kClosedResolve]); } + }; + this[kOnConnect] = (origin, targets) => { + pool.emit("connect", origin, [pool, ...targets]); + }; + this[kOnDisconnect] = (origin, targets, err) => { + pool.emit("disconnect", origin, [pool, ...targets], err); + }; + this[kOnConnectionError] = (origin, targets, err) => { + pool.emit("connectionError", origin, [pool, ...targets], err); + }; + this[kStats] = new PoolStats(this); + } + get [kBusy]() { + return this[kNeedDrain]; + } + get [kConnected]() { + return this[kClients].filter((client) => client[kConnected]).length; + } + get [kFree]() { + return this[kClients].filter((client) => client[kConnected] && !client[kNeedDrain]).length; + } + get [kPending]() { + let ret = this[kQueued]; + for (const { [kPending]: pending } of this[kClients]) { + ret += pending; } + return ret; } - cb(); - }; - UrlEncoded.prototype.end = function() { - if (this.boy._done) { - return; + get [kRunning]() { + let ret = 0; + for (const { [kRunning]: running } of this[kClients]) { + ret += running; + } + return ret; } - if (this._state === "key" && this._key.length > 0) { - this.boy.emit( - "field", - decodeText(this._key, "binary", this.charset), - "", - this._keyTrunc, - false - ); - } else if (this._state === "val") { - this.boy.emit( - "field", - decodeText(this._key, "binary", this.charset), - decodeText(this._val, "binary", this.charset), - this._keyTrunc, - this._valTrunc - ); + get [kSize]() { + let ret = this[kQueued]; + for (const { [kSize]: size } of this[kClients]) { + ret += size; + } + return ret; } - this.boy._done = true; - this.boy.emit("finish"); - }; - module2.exports = UrlEncoded; - } -}); - -// .yarn/cache/@fastify-busboy-npm-2.1.0-960844a007-7bb641080a.zip/node_modules/@fastify/busboy/lib/main.js -var require_main = __commonJS({ - ".yarn/cache/@fastify-busboy-npm-2.1.0-960844a007-7bb641080a.zip/node_modules/@fastify/busboy/lib/main.js"(exports, module2) { - "use strict"; - var WritableStream = require("node:stream").Writable; - var { inherits } = require("node:util"); - var Dicer = require_Dicer(); - var MultipartParser = require_multipart(); - var UrlencodedParser = require_urlencoded(); - var parseParams = require_parseParams(); - function Busboy(opts) { - if (!(this instanceof Busboy)) { - return new Busboy(opts); - } - if (typeof opts !== "object") { - throw new TypeError("Busboy expected an options-Object."); - } - if (typeof opts.headers !== "object") { - throw new TypeError("Busboy expected an options-Object with headers-attribute."); - } - if (typeof opts.headers["content-type"] !== "string") { - throw new TypeError("Missing Content-Type-header."); - } - const { - headers, - ...streamOptions - } = opts; - this.opts = { - autoDestroy: false, - ...streamOptions - }; - WritableStream.call(this, this.opts); - this._done = false; - this._parser = this.getParserByHeaders(headers); - this._finished = false; - } - inherits(Busboy, WritableStream); - Busboy.prototype.emit = function(ev) { - if (ev === "finish") { - if (!this._done) { - this._parser?.end(); - return; - } else if (this._finished) { - return; + get stats() { + return this[kStats]; + } + async [kClose]() { + if (this[kQueue].isEmpty()) { + return Promise.all(this[kClients].map((c) => c.close())); + } else { + return new Promise((resolve) => { + this[kClosedResolve] = resolve; + }); } - this._finished = true; } - WritableStream.prototype.emit.apply(this, arguments); - }; - Busboy.prototype.getParserByHeaders = function(headers) { - const parsed = parseParams(headers["content-type"]); - const cfg = { - defCharset: this.opts.defCharset, - fileHwm: this.opts.fileHwm, - headers, - highWaterMark: this.opts.highWaterMark, - isPartAFile: this.opts.isPartAFile, - limits: this.opts.limits, - parsedConType: parsed, - preservePath: this.opts.preservePath - }; - if (MultipartParser.detect.test(parsed[0])) { - return new MultipartParser(this, cfg); + async [kDestroy](err) { + while (true) { + const item = this[kQueue].shift(); + if (!item) { + break; + } + item.handler.onError(err); + } + return Promise.all(this[kClients].map((c) => c.destroy(err))); + } + [kDispatch](opts, handler) { + const dispatcher = this[kGetDispatcher](); + if (!dispatcher) { + this[kNeedDrain] = true; + this[kQueue].push({ opts, handler }); + this[kQueued]++; + } else if (!dispatcher.dispatch(opts, handler)) { + dispatcher[kNeedDrain] = true; + this[kNeedDrain] = !this[kGetDispatcher](); + } + return !this[kNeedDrain]; + } + [kAddClient](client) { + client.on("drain", this[kOnDrain]).on("connect", this[kOnConnect]).on("disconnect", this[kOnDisconnect]).on("connectionError", this[kOnConnectionError]); + this[kClients].push(client); + if (this[kNeedDrain]) { + queueMicrotask(() => { + if (this[kNeedDrain]) { + this[kOnDrain](client[kUrl], [this, client]); + } + }); + } + return this; } - if (UrlencodedParser.detect.test(parsed[0])) { - return new UrlencodedParser(this, cfg); + [kRemoveClient](client) { + client.close(() => { + const idx = this[kClients].indexOf(client); + if (idx !== -1) { + this[kClients].splice(idx, 1); + } + }); + this[kNeedDrain] = this[kClients].some((dispatcher) => !dispatcher[kNeedDrain] && dispatcher.closed !== true && dispatcher.destroyed !== true); } - throw new Error("Unsupported Content-Type."); }; - Busboy.prototype._write = function(chunk, encoding, cb) { - this._parser.write(chunk, cb); + module2.exports = { + PoolBase, + kClients, + kNeedDrain, + kAddClient, + kRemoveClient, + kGetDispatcher }; - module2.exports = Busboy; - module2.exports.default = Busboy; - module2.exports.Busboy = Busboy; - module2.exports.Dicer = Dicer; } }); -// .yarn/cache/undici-npm-6.6.2-a0bd6785a6-c8c8a43605.zip/node_modules/undici/lib/fetch/constants.js -var require_constants3 = __commonJS({ - ".yarn/cache/undici-npm-6.6.2-a0bd6785a6-c8c8a43605.zip/node_modules/undici/lib/fetch/constants.js"(exports, module2) { +// .yarn/cache/undici-npm-6.13.0-1545cd855e-b1b0456e7d.zip/node_modules/undici/lib/core/diagnostics.js +var require_diagnostics = __commonJS({ + ".yarn/cache/undici-npm-6.13.0-1545cd855e-b1b0456e7d.zip/node_modules/undici/lib/core/diagnostics.js"(exports, module2) { "use strict"; - var corsSafeListedMethods = ["GET", "HEAD", "POST"]; - var corsSafeListedMethodsSet = new Set(corsSafeListedMethods); - var nullBodyStatus = [101, 204, 205, 304]; - var redirectStatus = [301, 302, 303, 307, 308]; - var redirectStatusSet = new Set(redirectStatus); - var badPorts = [ - "1", - "7", - "9", - "11", - "13", - "15", - "17", - "19", - "20", - "21", - "22", - "23", - "25", - "37", - "42", - "43", - "53", - "69", - "77", - "79", - "87", - "95", - "101", - "102", - "103", - "104", - "109", - "110", - "111", - "113", - "115", - "117", - "119", - "123", - "135", - "137", - "139", - "143", - "161", - "179", - "389", - "427", - "465", - "512", - "513", - "514", - "515", - "526", - "530", - "531", - "532", - "540", - "548", - "554", - "556", - "563", - "587", - "601", - "636", - "989", - "990", - "993", - "995", - "1719", - "1720", - "1723", - "2049", - "3659", - "4045", - "5060", - "5061", - "6000", - "6566", - "6665", - "6666", - "6667", - "6668", - "6669", - "6697", - "10080" - ]; - var badPortsSet = new Set(badPorts); - var referrerPolicy = [ - "", - "no-referrer", - "no-referrer-when-downgrade", - "same-origin", - "origin", - "strict-origin", - "origin-when-cross-origin", - "strict-origin-when-cross-origin", - "unsafe-url" - ]; - var referrerPolicySet = new Set(referrerPolicy); - var requestRedirect = ["follow", "manual", "error"]; - var safeMethods = ["GET", "HEAD", "OPTIONS", "TRACE"]; - var safeMethodsSet = new Set(safeMethods); - var requestMode = ["navigate", "same-origin", "no-cors", "cors"]; - var requestCredentials = ["omit", "same-origin", "include"]; - var requestCache = [ - "default", - "no-store", - "reload", - "no-cache", - "force-cache", - "only-if-cached" - ]; - var requestBodyHeader = [ - "content-encoding", - "content-language", - "content-location", - "content-type", - // See https://github.com/nodejs/undici/issues/2021 - // 'Content-Length' is a forbidden header name, which is typically - // removed in the Headers implementation. However, undici doesn't - // filter out headers, so we add it here. - "content-length" - ]; - var requestDuplex = [ - "half" - ]; - var forbiddenMethods = ["CONNECT", "TRACE", "TRACK"]; - var forbiddenMethodsSet = new Set(forbiddenMethods); - var subresource = [ - "audio", - "audioworklet", - "font", - "image", - "manifest", - "paintworklet", - "script", - "style", - "track", - "video", - "xslt", - "" - ]; - var subresourceSet = new Set(subresource); - module2.exports = { - subresource, - forbiddenMethods, - requestBodyHeader, - referrerPolicy, - requestRedirect, - requestMode, - requestCredentials, - requestCache, - redirectStatus, - corsSafeListedMethods, - nullBodyStatus, - safeMethods, - badPorts, - requestDuplex, - subresourceSet, - badPortsSet, - redirectStatusSet, - corsSafeListedMethodsSet, - safeMethodsSet, - forbiddenMethodsSet, - referrerPolicySet + var diagnosticsChannel = require("node:diagnostics_channel"); + var util = require("node:util"); + var undiciDebugLog = util.debuglog("undici"); + var fetchDebuglog = util.debuglog("fetch"); + var websocketDebuglog = util.debuglog("websocket"); + var isClientSet = false; + var channels = { + // Client + beforeConnect: diagnosticsChannel.channel("undici:client:beforeConnect"), + connected: diagnosticsChannel.channel("undici:client:connected"), + connectError: diagnosticsChannel.channel("undici:client:connectError"), + sendHeaders: diagnosticsChannel.channel("undici:client:sendHeaders"), + // Request + create: diagnosticsChannel.channel("undici:request:create"), + bodySent: diagnosticsChannel.channel("undici:request:bodySent"), + headers: diagnosticsChannel.channel("undici:request:headers"), + trailers: diagnosticsChannel.channel("undici:request:trailers"), + error: diagnosticsChannel.channel("undici:request:error"), + // WebSocket + open: diagnosticsChannel.channel("undici:websocket:open"), + close: diagnosticsChannel.channel("undici:websocket:close"), + socketError: diagnosticsChannel.channel("undici:websocket:socket_error"), + ping: diagnosticsChannel.channel("undici:websocket:ping"), + pong: diagnosticsChannel.channel("undici:websocket:pong") }; - } -}); - -// .yarn/cache/undici-npm-6.6.2-a0bd6785a6-c8c8a43605.zip/node_modules/undici/lib/fetch/global.js -var require_global = __commonJS({ - ".yarn/cache/undici-npm-6.6.2-a0bd6785a6-c8c8a43605.zip/node_modules/undici/lib/fetch/global.js"(exports, module2) { - "use strict"; - var globalOrigin = Symbol.for("undici.globalOrigin.1"); - function getGlobalOrigin() { - return globalThis[globalOrigin]; + if (undiciDebugLog.enabled || fetchDebuglog.enabled) { + const debuglog = fetchDebuglog.enabled ? fetchDebuglog : undiciDebugLog; + diagnosticsChannel.channel("undici:client:beforeConnect").subscribe((evt) => { + const { + connectParams: { version: version2, protocol, port, host } + } = evt; + debuglog( + "connecting to %s using %s%s", + `${host}${port ? `:${port}` : ""}`, + protocol, + version2 + ); + }); + diagnosticsChannel.channel("undici:client:connected").subscribe((evt) => { + const { + connectParams: { version: version2, protocol, port, host } + } = evt; + debuglog( + "connected to %s using %s%s", + `${host}${port ? `:${port}` : ""}`, + protocol, + version2 + ); + }); + diagnosticsChannel.channel("undici:client:connectError").subscribe((evt) => { + const { + connectParams: { version: version2, protocol, port, host }, + error + } = evt; + debuglog( + "connection to %s using %s%s errored - %s", + `${host}${port ? `:${port}` : ""}`, + protocol, + version2, + error.message + ); + }); + diagnosticsChannel.channel("undici:client:sendHeaders").subscribe((evt) => { + const { + request: { method, path: path10, origin } + } = evt; + debuglog("sending request to %s %s/%s", method, origin, path10); + }); + diagnosticsChannel.channel("undici:request:headers").subscribe((evt) => { + const { + request: { method, path: path10, origin }, + response: { statusCode } + } = evt; + debuglog( + "received response to %s %s/%s - HTTP %d", + method, + origin, + path10, + statusCode + ); + }); + diagnosticsChannel.channel("undici:request:trailers").subscribe((evt) => { + const { + request: { method, path: path10, origin } + } = evt; + debuglog("trailers received from %s %s/%s", method, origin, path10); + }); + diagnosticsChannel.channel("undici:request:error").subscribe((evt) => { + const { + request: { method, path: path10, origin }, + error + } = evt; + debuglog( + "request to %s %s/%s errored - %s", + method, + origin, + path10, + error.message + ); + }); + isClientSet = true; } - function setGlobalOrigin(newOrigin) { - if (newOrigin === void 0) { - Object.defineProperty(globalThis, globalOrigin, { - value: void 0, - writable: true, - enumerable: false, - configurable: false + if (websocketDebuglog.enabled) { + if (!isClientSet) { + const debuglog = undiciDebugLog.enabled ? undiciDebugLog : websocketDebuglog; + diagnosticsChannel.channel("undici:client:beforeConnect").subscribe((evt) => { + const { + connectParams: { version: version2, protocol, port, host } + } = evt; + debuglog( + "connecting to %s%s using %s%s", + host, + port ? `:${port}` : "", + protocol, + version2 + ); + }); + diagnosticsChannel.channel("undici:client:connected").subscribe((evt) => { + const { + connectParams: { version: version2, protocol, port, host } + } = evt; + debuglog( + "connected to %s%s using %s%s", + host, + port ? `:${port}` : "", + protocol, + version2 + ); + }); + diagnosticsChannel.channel("undici:client:connectError").subscribe((evt) => { + const { + connectParams: { version: version2, protocol, port, host }, + error + } = evt; + debuglog( + "connection to %s%s using %s%s errored - %s", + host, + port ? `:${port}` : "", + protocol, + version2, + error.message + ); + }); + diagnosticsChannel.channel("undici:client:sendHeaders").subscribe((evt) => { + const { + request: { method, path: path10, origin } + } = evt; + debuglog("sending request to %s %s/%s", method, origin, path10); }); - return; - } - const parsedURL = new URL(newOrigin); - if (parsedURL.protocol !== "http:" && parsedURL.protocol !== "https:") { - throw new TypeError(`Only http & https urls are allowed, received ${parsedURL.protocol}`); } - Object.defineProperty(globalThis, globalOrigin, { - value: parsedURL, - writable: true, - enumerable: false, - configurable: false + diagnosticsChannel.channel("undici:websocket:open").subscribe((evt) => { + const { + address: { address, port } + } = evt; + websocketDebuglog("connection opened %s%s", address, port ? `:${port}` : ""); + }); + diagnosticsChannel.channel("undici:websocket:close").subscribe((evt) => { + const { websocket, code, reason } = evt; + websocketDebuglog( + "closed connection to %s - %s %s", + websocket.url, + code, + reason + ); + }); + diagnosticsChannel.channel("undici:websocket:socket_error").subscribe((err) => { + websocketDebuglog("connection errored - %s", err.message); + }); + diagnosticsChannel.channel("undici:websocket:ping").subscribe((evt) => { + websocketDebuglog("ping received"); + }); + diagnosticsChannel.channel("undici:websocket:pong").subscribe((evt) => { + websocketDebuglog("pong received"); }); } module2.exports = { - getGlobalOrigin, - setGlobalOrigin + channels }; } }); -// .yarn/cache/undici-npm-6.6.2-a0bd6785a6-c8c8a43605.zip/node_modules/undici/lib/fetch/dataURL.js -var require_dataURL = __commonJS({ - ".yarn/cache/undici-npm-6.6.2-a0bd6785a6-c8c8a43605.zip/node_modules/undici/lib/fetch/dataURL.js"(exports, module2) { +// .yarn/cache/undici-npm-6.13.0-1545cd855e-b1b0456e7d.zip/node_modules/undici/lib/core/request.js +var require_request = __commonJS({ + ".yarn/cache/undici-npm-6.13.0-1545cd855e-b1b0456e7d.zip/node_modules/undici/lib/core/request.js"(exports, module2) { + "use strict"; + var { + InvalidArgumentError, + NotSupportedError + } = require_errors(); var assert3 = require("node:assert"); - var encoder = new TextEncoder(); - var HTTP_TOKEN_CODEPOINTS = /^[!#$%&'*+-.^_|~A-Za-z0-9]+$/; - var HTTP_WHITESPACE_REGEX = /[\u000A|\u000D|\u0009|\u0020]/; - var ASCII_WHITESPACE_REPLACE_REGEX = /[\u0009\u000A\u000C\u000D\u0020]/g; - var HTTP_QUOTED_STRING_TOKENS = /[\u0009|\u0020-\u007E|\u0080-\u00FF]/; - function dataURLProcessor(dataURL) { - assert3(dataURL.protocol === "data:"); - let input = URLSerializer(dataURL, true); - input = input.slice(5); - const position = { position: 0 }; - let mimeType = collectASequenceOfCodePointsFast( - ",", - input, - position - ); - const mimeTypeLength = mimeType.length; - mimeType = removeASCIIWhitespace(mimeType, true, true); - if (position.position >= input.length) { - return "failure"; + var { + isValidHTTPToken, + isValidHeaderChar, + isStream, + destroy, + isBuffer, + isFormDataLike, + isIterable, + isBlobLike, + buildURL, + validateHandler, + getServerName + } = require_util(); + var { channels } = require_diagnostics(); + var { headerNameLowerCasedRecord } = require_constants2(); + var invalidPathRegex = /[^\u0021-\u00ff]/; + var kHandler = Symbol("handler"); + var Request = class { + constructor(origin, { + path: path10, + method, + body, + headers, + query, + idempotent, + blocking, + upgrade, + headersTimeout, + bodyTimeout, + reset, + throwOnError, + expectContinue, + servername + }, handler) { + if (typeof path10 !== "string") { + throw new InvalidArgumentError("path must be a string"); + } else if (path10[0] !== "/" && !(path10.startsWith("http://") || path10.startsWith("https://")) && method !== "CONNECT") { + throw new InvalidArgumentError("path must be an absolute URL or start with a slash"); + } else if (invalidPathRegex.exec(path10) !== null) { + throw new InvalidArgumentError("invalid request path"); + } + if (typeof method !== "string") { + throw new InvalidArgumentError("method must be a string"); + } else if (!isValidHTTPToken(method)) { + throw new InvalidArgumentError("invalid request method"); + } + if (upgrade && typeof upgrade !== "string") { + throw new InvalidArgumentError("upgrade must be a string"); + } + if (headersTimeout != null && (!Number.isFinite(headersTimeout) || headersTimeout < 0)) { + throw new InvalidArgumentError("invalid headersTimeout"); + } + if (bodyTimeout != null && (!Number.isFinite(bodyTimeout) || bodyTimeout < 0)) { + throw new InvalidArgumentError("invalid bodyTimeout"); + } + if (reset != null && typeof reset !== "boolean") { + throw new InvalidArgumentError("invalid reset"); + } + if (expectContinue != null && typeof expectContinue !== "boolean") { + throw new InvalidArgumentError("invalid expectContinue"); + } + this.headersTimeout = headersTimeout; + this.bodyTimeout = bodyTimeout; + this.throwOnError = throwOnError === true; + this.method = method; + this.abort = null; + if (body == null) { + this.body = null; + } else if (isStream(body)) { + this.body = body; + const rState = this.body._readableState; + if (!rState || !rState.autoDestroy) { + this.endHandler = function autoDestroy() { + destroy(this); + }; + this.body.on("end", this.endHandler); + } + this.errorHandler = (err) => { + if (this.abort) { + this.abort(err); + } else { + this.error = err; + } + }; + this.body.on("error", this.errorHandler); + } else if (isBuffer(body)) { + this.body = body.byteLength ? body : null; + } else if (ArrayBuffer.isView(body)) { + this.body = body.buffer.byteLength ? Buffer.from(body.buffer, body.byteOffset, body.byteLength) : null; + } else if (body instanceof ArrayBuffer) { + this.body = body.byteLength ? Buffer.from(body) : null; + } else if (typeof body === "string") { + this.body = body.length ? Buffer.from(body) : null; + } else if (isFormDataLike(body) || isIterable(body) || isBlobLike(body)) { + this.body = body; + } else { + throw new InvalidArgumentError("body must be a string, a Buffer, a Readable stream, an iterable, or an async iterable"); + } + this.completed = false; + this.aborted = false; + this.upgrade = upgrade || null; + this.path = query ? buildURL(path10, query) : path10; + this.origin = origin; + this.idempotent = idempotent == null ? method === "HEAD" || method === "GET" : idempotent; + this.blocking = blocking == null ? false : blocking; + this.reset = reset == null ? null : reset; + this.host = null; + this.contentLength = null; + this.contentType = null; + this.headers = []; + this.expectContinue = expectContinue != null ? expectContinue : false; + if (Array.isArray(headers)) { + if (headers.length % 2 !== 0) { + throw new InvalidArgumentError("headers array must be even"); + } + for (let i = 0; i < headers.length; i += 2) { + processHeader(this, headers[i], headers[i + 1]); + } + } else if (headers && typeof headers === "object") { + if (headers[Symbol.iterator]) { + for (const header of headers) { + if (!Array.isArray(header) || header.length !== 2) { + throw new InvalidArgumentError("headers must be in key-value pair format"); + } + processHeader(this, header[0], header[1]); + } + } else { + const keys = Object.keys(headers); + for (let i = 0; i < keys.length; ++i) { + processHeader(this, keys[i], headers[keys[i]]); + } + } + } else if (headers != null) { + throw new InvalidArgumentError("headers must be an object or an array"); + } + validateHandler(handler, method, upgrade); + this.servername = servername || getServerName(this.host); + this[kHandler] = handler; + if (channels.create.hasSubscribers) { + channels.create.publish({ request: this }); + } } - position.position++; - const encodedBody = input.slice(mimeTypeLength + 1); - let body = stringPercentDecode(encodedBody); - if (/;(\u0020){0,}base64$/i.test(mimeType)) { - const stringBody = isomorphicDecode(body); - body = forgivingBase64(stringBody); - if (body === "failure") { - return "failure"; + onBodySent(chunk) { + if (this[kHandler].onBodySent) { + try { + return this[kHandler].onBodySent(chunk); + } catch (err) { + this.abort(err); + } } - mimeType = mimeType.slice(0, -6); - mimeType = mimeType.replace(/(\u0020)+$/, ""); - mimeType = mimeType.slice(0, -1); } - if (mimeType.startsWith(";")) { - mimeType = "text/plain" + mimeType; + onRequestSent() { + if (channels.bodySent.hasSubscribers) { + channels.bodySent.publish({ request: this }); + } + if (this[kHandler].onRequestSent) { + try { + return this[kHandler].onRequestSent(); + } catch (err) { + this.abort(err); + } + } } - let mimeTypeRecord = parseMIMEType(mimeType); - if (mimeTypeRecord === "failure") { - mimeTypeRecord = parseMIMEType("text/plain;charset=US-ASCII"); + onConnect(abort) { + assert3(!this.aborted); + assert3(!this.completed); + if (this.error) { + abort(this.error); + } else { + this.abort = abort; + return this[kHandler].onConnect(abort); + } } - return { mimeType: mimeTypeRecord, body }; - } - function URLSerializer(url, excludeFragment = false) { - if (!excludeFragment) { - return url.href; + onResponseStarted() { + return this[kHandler].onResponseStarted?.(); } - const href = url.href; - const hashLength = url.hash.length; - const serialized = hashLength === 0 ? href : href.substring(0, href.length - hashLength); - if (!hashLength && href.endsWith("#")) { - return serialized.slice(0, -1); + onHeaders(statusCode, headers, resume, statusText) { + assert3(!this.aborted); + assert3(!this.completed); + if (channels.headers.hasSubscribers) { + channels.headers.publish({ request: this, response: { statusCode, headers, statusText } }); + } + try { + return this[kHandler].onHeaders(statusCode, headers, resume, statusText); + } catch (err) { + this.abort(err); + } } - return serialized; - } - function collectASequenceOfCodePoints(condition, input, position) { - let result = ""; - while (position.position < input.length && condition(input[position.position])) { - result += input[position.position]; - position.position++; + onData(chunk) { + assert3(!this.aborted); + assert3(!this.completed); + try { + return this[kHandler].onData(chunk); + } catch (err) { + this.abort(err); + return false; + } } - return result; - } - function collectASequenceOfCodePointsFast(char, input, position) { - const idx = input.indexOf(char, position.position); - const start = position.position; - if (idx === -1) { - position.position = input.length; - return input.slice(start); - } - position.position = idx; - return input.slice(start, position.position); - } - function stringPercentDecode(input) { - const bytes = encoder.encode(input); - return percentDecode(bytes); - } - function isHexCharByte(byte) { - return byte >= 48 && byte <= 57 || byte >= 65 && byte <= 70 || byte >= 97 && byte <= 102; - } - function hexByteToNumber(byte) { - return ( - // 0-9 - byte >= 48 && byte <= 57 ? byte - 48 : (byte & 223) - 55 - ); - } - function percentDecode(input) { - const length = input.length; - const output = new Uint8Array(length); - let j = 0; - for (let i = 0; i < length; ++i) { - const byte = input[i]; - if (byte !== 37) { - output[j++] = byte; - } else if (byte === 37 && !(isHexCharByte(input[i + 1]) && isHexCharByte(input[i + 2]))) { - output[j++] = 37; - } else { - output[j++] = hexByteToNumber(input[i + 1]) << 4 | hexByteToNumber(input[i + 2]); - i += 2; - } - } - return length === j ? output : output.subarray(0, j); - } - function parseMIMEType(input) { - input = removeHTTPWhitespace(input, true, true); - const position = { position: 0 }; - const type = collectASequenceOfCodePointsFast( - "/", - input, - position - ); - if (type.length === 0 || !HTTP_TOKEN_CODEPOINTS.test(type)) { - return "failure"; - } - if (position.position > input.length) { - return "failure"; - } - position.position++; - let subtype = collectASequenceOfCodePointsFast( - ";", - input, - position - ); - subtype = removeHTTPWhitespace(subtype, false, true); - if (subtype.length === 0 || !HTTP_TOKEN_CODEPOINTS.test(subtype)) { - return "failure"; + onUpgrade(statusCode, headers, socket) { + assert3(!this.aborted); + assert3(!this.completed); + return this[kHandler].onUpgrade(statusCode, headers, socket); } - const typeLowercase = type.toLowerCase(); - const subtypeLowercase = subtype.toLowerCase(); - const mimeType = { - type: typeLowercase, - subtype: subtypeLowercase, - /** @type {Map} */ - parameters: /* @__PURE__ */ new Map(), - // https://mimesniff.spec.whatwg.org/#mime-type-essence - essence: `${typeLowercase}/${subtypeLowercase}` - }; - while (position.position < input.length) { - position.position++; - collectASequenceOfCodePoints( - // https://fetch.spec.whatwg.org/#http-whitespace - (char) => HTTP_WHITESPACE_REGEX.test(char), - input, - position - ); - let parameterName = collectASequenceOfCodePoints( - (char) => char !== ";" && char !== "=", - input, - position - ); - parameterName = parameterName.toLowerCase(); - if (position.position < input.length) { - if (input[position.position] === ";") { - continue; - } - position.position++; + onComplete(trailers) { + this.onFinally(); + assert3(!this.aborted); + this.completed = true; + if (channels.trailers.hasSubscribers) { + channels.trailers.publish({ request: this, trailers }); } - if (position.position > input.length) { - break; + try { + return this[kHandler].onComplete(trailers); + } catch (err) { + this.onError(err); } - let parameterValue = null; - if (input[position.position] === '"') { - parameterValue = collectAnHTTPQuotedString(input, position, true); - collectASequenceOfCodePointsFast( - ";", - input, - position - ); - } else { - parameterValue = collectASequenceOfCodePointsFast( - ";", - input, - position - ); - parameterValue = removeHTTPWhitespace(parameterValue, false, true); - if (parameterValue.length === 0) { - continue; - } + } + onError(error) { + this.onFinally(); + if (channels.error.hasSubscribers) { + channels.error.publish({ request: this, error }); } - if (parameterName.length !== 0 && HTTP_TOKEN_CODEPOINTS.test(parameterName) && (parameterValue.length === 0 || HTTP_QUOTED_STRING_TOKENS.test(parameterValue)) && !mimeType.parameters.has(parameterName)) { - mimeType.parameters.set(parameterName, parameterValue); + if (this.aborted) { + return; } + this.aborted = true; + return this[kHandler].onError(error); } - return mimeType; - } - function forgivingBase64(data) { - data = data.replace(ASCII_WHITESPACE_REPLACE_REGEX, ""); - let dataLength = data.length; - if (dataLength % 4 === 0) { - if (data.charCodeAt(dataLength - 1) === 61) { - --dataLength; - if (data.charCodeAt(dataLength - 1) === 61) { - --dataLength; - } + onFinally() { + if (this.errorHandler) { + this.body.off("error", this.errorHandler); + this.errorHandler = null; + } + if (this.endHandler) { + this.body.off("end", this.endHandler); + this.endHandler = null; } } - if (dataLength % 4 === 1) { - return "failure"; + addHeader(key, value) { + processHeader(this, key, value); + return this; } - if (/[^+/0-9A-Za-z]/.test(data.length === dataLength ? data : data.substring(0, dataLength))) { - return "failure"; + }; + function processHeader(request, key, val) { + if (val && (typeof val === "object" && !Array.isArray(val))) { + throw new InvalidArgumentError(`invalid ${key} header`); + } else if (val === void 0) { + return; } - const buffer = Buffer.from(data, "base64"); - return new Uint8Array(buffer.buffer, buffer.byteOffset, buffer.byteLength); - } - function collectAnHTTPQuotedString(input, position, extractValue) { - const positionStart = position.position; - let value = ""; - assert3(input[position.position] === '"'); - position.position++; - while (true) { - value += collectASequenceOfCodePoints( - (char) => char !== '"' && char !== "\\", - input, - position - ); - if (position.position >= input.length) { - break; + let headerName = headerNameLowerCasedRecord[key]; + if (headerName === void 0) { + headerName = key.toLowerCase(); + if (headerNameLowerCasedRecord[headerName] === void 0 && !isValidHTTPToken(headerName)) { + throw new InvalidArgumentError("invalid header key"); } - const quoteOrBackslash = input[position.position]; - position.position++; - if (quoteOrBackslash === "\\") { - if (position.position >= input.length) { - value += "\\"; - break; + } + if (Array.isArray(val)) { + const arr = []; + for (let i = 0; i < val.length; i++) { + if (typeof val[i] === "string") { + if (!isValidHeaderChar(val[i])) { + throw new InvalidArgumentError(`invalid ${key} header`); + } + arr.push(val[i]); + } else if (val[i] === null) { + arr.push(""); + } else if (typeof val[i] === "object") { + throw new InvalidArgumentError(`invalid ${key} header`); + } else { + arr.push(`${val[i]}`); } - value += input[position.position]; - position.position++; - } else { - assert3(quoteOrBackslash === '"'); - break; } + val = arr; + } else if (typeof val === "string") { + if (!isValidHeaderChar(val)) { + throw new InvalidArgumentError(`invalid ${key} header`); + } + } else if (val === null) { + val = ""; + } else if (typeof val === "object") { + throw new InvalidArgumentError(`invalid ${key} header`); + } else { + val = `${val}`; } - if (extractValue) { - return value; - } - return input.slice(positionStart, position.position); - } - function serializeAMimeType(mimeType) { - assert3(mimeType !== "failure"); - const { parameters, essence } = mimeType; - let serialization = essence; - for (let [name, value] of parameters.entries()) { - serialization += ";"; - serialization += name; - serialization += "="; - if (!HTTP_TOKEN_CODEPOINTS.test(value)) { - value = value.replace(/(\\|")/g, "\\$1"); - value = '"' + value; - value += '"'; + if (request.host === null && headerName === "host") { + if (typeof val !== "string") { + throw new InvalidArgumentError("invalid host header"); } - serialization += value; + request.host = val; + } else if (request.contentLength === null && headerName === "content-length") { + request.contentLength = parseInt(val, 10); + if (!Number.isFinite(request.contentLength)) { + throw new InvalidArgumentError("invalid content-length header"); + } + } else if (request.contentType === null && headerName === "content-type") { + request.contentType = val; + request.headers.push(key, val); + } else if (headerName === "transfer-encoding" || headerName === "keep-alive" || headerName === "upgrade") { + throw new InvalidArgumentError(`invalid ${headerName} header`); + } else if (headerName === "connection") { + const value = typeof val === "string" ? val.toLowerCase() : null; + if (value !== "close" && value !== "keep-alive") { + throw new InvalidArgumentError("invalid connection header"); + } + if (value === "close") { + request.reset = true; + } + } else if (headerName === "expect") { + throw new NotSupportedError("expect header not supported"); + } else { + request.headers.push(key, val); } - return serialization; - } - function isHTTPWhiteSpace(char) { - return char === 13 || char === 10 || char === 9 || char === 32; - } - function removeHTTPWhitespace(str, leading = true, trailing = true) { - return removeChars(str, leading, trailing, isHTTPWhiteSpace); - } - function isASCIIWhitespace(char) { - return char === 13 || char === 10 || char === 9 || char === 12 || char === 32; - } - function removeASCIIWhitespace(str, leading = true, trailing = true) { - return removeChars(str, leading, trailing, isASCIIWhitespace); } - function removeChars(str, leading, trailing, predicate) { - let lead = 0; - let trail = str.length - 1; - if (leading) { - while (lead < str.length && predicate(str.charCodeAt(lead))) - lead++; - } - if (trailing) { - while (trail > 0 && predicate(str.charCodeAt(trail))) - trail--; - } - return lead === 0 && trail === str.length - 1 ? str : str.slice(lead, trail + 1); - } - function isomorphicDecode(input) { - const length = input.length; - if ((2 << 15) - 1 > length) { - return String.fromCharCode.apply(null, input); - } - let result = ""; - let i = 0; - let addition = (2 << 15) - 1; - while (i < length) { - if (i + addition > length) { - addition = length - i; - } - result += String.fromCharCode.apply(null, input.subarray(i, i += addition)); - } - return result; - } - function minimizeSupportedMimeType(mimeType) { - switch (mimeType.essence) { - case "application/ecmascript": - case "application/javascript": - case "application/x-ecmascript": - case "application/x-javascript": - case "text/ecmascript": - case "text/javascript": - case "text/javascript1.0": - case "text/javascript1.1": - case "text/javascript1.2": - case "text/javascript1.3": - case "text/javascript1.4": - case "text/javascript1.5": - case "text/jscript": - case "text/livescript": - case "text/x-ecmascript": - case "text/x-javascript": - return "text/javascript"; - case "application/json": - case "text/json": - return "application/json"; - case "image/svg+xml": - return "image/svg+xml"; - case "text/xml": - case "application/xml": - return "application/xml"; - } - if (mimeType.subtype.endsWith("+json")) { - return "application/json"; - } - if (mimeType.subtype.endsWith("+xml")) { - return "application/xml"; - } - return ""; - } - module2.exports = { - dataURLProcessor, - URLSerializer, - collectASequenceOfCodePoints, - collectASequenceOfCodePointsFast, - stringPercentDecode, - parseMIMEType, - collectAnHTTPQuotedString, - serializeAMimeType, - removeChars, - minimizeSupportedMimeType - }; + module2.exports = Request; } }); -// .yarn/cache/undici-npm-6.6.2-a0bd6785a6-c8c8a43605.zip/node_modules/undici/lib/fetch/util.js -var require_util2 = __commonJS({ - ".yarn/cache/undici-npm-6.6.2-a0bd6785a6-c8c8a43605.zip/node_modules/undici/lib/fetch/util.js"(exports, module2) { +// .yarn/cache/undici-npm-6.13.0-1545cd855e-b1b0456e7d.zip/node_modules/undici/lib/core/connect.js +var require_connect = __commonJS({ + ".yarn/cache/undici-npm-6.13.0-1545cd855e-b1b0456e7d.zip/node_modules/undici/lib/core/connect.js"(exports, module2) { "use strict"; - var { Transform } = require("node:stream"); - var zlib = require("node:zlib"); - var { redirectStatusSet, referrerPolicySet: referrerPolicyTokens, badPortsSet } = require_constants3(); - var { getGlobalOrigin } = require_global(); - var { collectASequenceOfCodePoints, collectAnHTTPQuotedString, removeChars, parseMIMEType } = require_dataURL(); - var { performance } = require("node:perf_hooks"); - var { isBlobLike, toUSVString, ReadableStreamFrom, isValidHTTPToken } = require_util(); + var net = require("node:net"); var assert3 = require("node:assert"); - var { isUint8Array } = require("util/types"); - var crypto; - try { - crypto = require("node:crypto"); - } catch { - } - function responseURL(response) { - const urlList = response.urlList; - const length = urlList.length; - return length === 0 ? null : urlList[length - 1].toString(); - } - function responseLocationURL(response, requestFragment) { - if (!redirectStatusSet.has(response.status)) { - return null; - } - let location = response.headersList.get("location", true); - if (location !== null && isValidHeaderValue(location)) { - location = new URL(location, responseURL(response)); - } - if (location && !location.hash) { - location.hash = requestFragment; - } - return location; - } - function requestCurrentURL(request) { - return request.urlList[request.urlList.length - 1]; - } - function requestBadPort(request) { - const url = requestCurrentURL(request); - if (urlIsHttpHttpsScheme(url) && badPortsSet.has(url.port)) { - return "blocked"; - } - return "allowed"; - } - function isErrorLike(object) { - return object instanceof Error || (object?.constructor?.name === "Error" || object?.constructor?.name === "DOMException"); - } - function isValidReasonPhrase(statusText) { - for (let i = 0; i < statusText.length; ++i) { - const c = statusText.charCodeAt(i); - if (!(c === 9 || // HTAB - c >= 32 && c <= 126 || // SP / VCHAR - c >= 128 && c <= 255)) { - return false; + var util = require_util(); + var { InvalidArgumentError, ConnectTimeoutError } = require_errors(); + var tls; + var SessionCache; + if (global.FinalizationRegistry && !(process.env.NODE_V8_COVERAGE || process.env.UNDICI_NO_FG)) { + SessionCache = class WeakSessionCache { + constructor(maxCachedSessions) { + this._maxCachedSessions = maxCachedSessions; + this._sessionCache = /* @__PURE__ */ new Map(); + this._sessionRegistry = new global.FinalizationRegistry((key) => { + if (this._sessionCache.size < this._maxCachedSessions) { + return; + } + const ref = this._sessionCache.get(key); + if (ref !== void 0 && ref.deref() === void 0) { + this._sessionCache.delete(key); + } + }); } - } - return true; - } - function isValidHeaderName(potentialValue) { - return isValidHTTPToken(potentialValue); - } - function isValidHeaderValue(potentialValue) { - if (potentialValue.startsWith(" ") || potentialValue.startsWith(" ") || potentialValue.endsWith(" ") || potentialValue.endsWith(" ")) { - return false; - } - if (potentialValue.includes("\0") || potentialValue.includes("\r") || potentialValue.includes("\n")) { - return false; - } - return true; - } - function setRequestReferrerPolicyOnRedirect(request, actualResponse) { - const { headersList } = actualResponse; - const policyHeader = (headersList.get("referrer-policy", true) ?? "").split(","); - let policy = ""; - if (policyHeader.length > 0) { - for (let i = policyHeader.length; i !== 0; i--) { - const token = policyHeader[i - 1].trim(); - if (referrerPolicyTokens.has(token)) { - policy = token; - break; + get(sessionKey) { + const ref = this._sessionCache.get(sessionKey); + return ref ? ref.deref() : null; + } + set(sessionKey, session) { + if (this._maxCachedSessions === 0) { + return; } + this._sessionCache.set(sessionKey, new WeakRef(session)); + this._sessionRegistry.register(session, sessionKey); } - } - if (policy !== "") { - request.referrerPolicy = policy; - } - } - function crossOriginResourcePolicyCheck() { - return "allowed"; - } - function corsCheck() { - return "success"; - } - function TAOCheck() { - return "success"; - } - function appendFetchMetadata(httpRequest) { - let header = null; - header = httpRequest.mode; - httpRequest.headersList.set("sec-fetch-mode", header, true); - } - function appendRequestOriginHeader(request) { - let serializedOrigin = request.origin; - if (request.responseTainting === "cors" || request.mode === "websocket") { - if (serializedOrigin) { - request.headersList.append("origin", serializedOrigin, true); + }; + } else { + SessionCache = class SimpleSessionCache { + constructor(maxCachedSessions) { + this._maxCachedSessions = maxCachedSessions; + this._sessionCache = /* @__PURE__ */ new Map(); } - } else if (request.method !== "GET" && request.method !== "HEAD") { - switch (request.referrerPolicy) { - case "no-referrer": - serializedOrigin = null; - break; - case "no-referrer-when-downgrade": - case "strict-origin": - case "strict-origin-when-cross-origin": - if (request.origin && urlHasHttpsScheme(request.origin) && !urlHasHttpsScheme(requestCurrentURL(request))) { - serializedOrigin = null; - } - break; - case "same-origin": - if (!sameOrigin(request, requestCurrentURL(request))) { - serializedOrigin = null; - } - break; - default: + get(sessionKey) { + return this._sessionCache.get(sessionKey); } - if (serializedOrigin) { - request.headersList.append("origin", serializedOrigin, true); + set(sessionKey, session) { + if (this._maxCachedSessions === 0) { + return; + } + if (this._sessionCache.size >= this._maxCachedSessions) { + const { value: oldestKey } = this._sessionCache.keys().next(); + this._sessionCache.delete(oldestKey); + } + this._sessionCache.set(sessionKey, session); } - } - } - function coarsenTime(timestamp, crossOriginIsolatedCapability) { - return timestamp; + }; } - function clampAndCoarsenConnectionTimingInfo(connectionTimingInfo, defaultStartTime, crossOriginIsolatedCapability) { - if (!connectionTimingInfo?.startTime || connectionTimingInfo.startTime < defaultStartTime) { - return { - domainLookupStartTime: defaultStartTime, - domainLookupEndTime: defaultStartTime, - connectionStartTime: defaultStartTime, - connectionEndTime: defaultStartTime, - secureConnectionStartTime: defaultStartTime, - ALPNNegotiatedProtocol: connectionTimingInfo?.ALPNNegotiatedProtocol - }; + function buildConnector({ allowH2, maxCachedSessions, socketPath, timeout, ...opts }) { + if (maxCachedSessions != null && (!Number.isInteger(maxCachedSessions) || maxCachedSessions < 0)) { + throw new InvalidArgumentError("maxCachedSessions must be a positive integer or zero"); } - return { - domainLookupStartTime: coarsenTime(connectionTimingInfo.domainLookupStartTime, crossOriginIsolatedCapability), - domainLookupEndTime: coarsenTime(connectionTimingInfo.domainLookupEndTime, crossOriginIsolatedCapability), - connectionStartTime: coarsenTime(connectionTimingInfo.connectionStartTime, crossOriginIsolatedCapability), - connectionEndTime: coarsenTime(connectionTimingInfo.connectionEndTime, crossOriginIsolatedCapability), - secureConnectionStartTime: coarsenTime(connectionTimingInfo.secureConnectionStartTime, crossOriginIsolatedCapability), - ALPNNegotiatedProtocol: connectionTimingInfo.ALPNNegotiatedProtocol - }; - } - function coarsenedSharedCurrentTime(crossOriginIsolatedCapability) { - return coarsenTime(performance.now(), crossOriginIsolatedCapability); - } - function createOpaqueTimingInfo(timingInfo) { - return { - startTime: timingInfo.startTime ?? 0, - redirectStartTime: 0, - redirectEndTime: 0, - postRedirectStartTime: timingInfo.startTime ?? 0, - finalServiceWorkerStartTime: 0, - finalNetworkResponseStartTime: 0, - finalNetworkRequestStartTime: 0, - endTime: 0, - encodedBodySize: 0, - decodedBodySize: 0, - finalConnectionTimingInfo: null + const options = { path: socketPath, ...opts }; + const sessionCache = new SessionCache(maxCachedSessions == null ? 100 : maxCachedSessions); + timeout = timeout == null ? 1e4 : timeout; + allowH2 = allowH2 != null ? allowH2 : false; + return function connect({ hostname, host, protocol, port, servername, localAddress, httpSocket }, callback) { + let socket; + if (protocol === "https:") { + if (!tls) { + tls = require("node:tls"); + } + servername = servername || options.servername || util.getServerName(host) || null; + const sessionKey = servername || hostname; + const session = sessionCache.get(sessionKey) || null; + assert3(sessionKey); + socket = tls.connect({ + highWaterMark: 16384, + // TLS in node can't have bigger HWM anyway... + ...options, + servername, + session, + localAddress, + // TODO(HTTP/2): Add support for h2c + ALPNProtocols: allowH2 ? ["http/1.1", "h2"] : ["http/1.1"], + socket: httpSocket, + // upgrade socket connection + port: port || 443, + host: hostname + }); + socket.on("session", function(session2) { + sessionCache.set(sessionKey, session2); + }); + } else { + assert3(!httpSocket, "httpSocket can only be sent on TLS update"); + socket = net.connect({ + highWaterMark: 64 * 1024, + // Same as nodejs fs streams. + ...options, + localAddress, + port: port || 80, + host: hostname + }); + } + if (options.keepAlive == null || options.keepAlive) { + const keepAliveInitialDelay = options.keepAliveInitialDelay === void 0 ? 6e4 : options.keepAliveInitialDelay; + socket.setKeepAlive(true, keepAliveInitialDelay); + } + const cancelTimeout = setupTimeout(() => onConnectTimeout(socket), timeout); + socket.setNoDelay(true).once(protocol === "https:" ? "secureConnect" : "connect", function() { + cancelTimeout(); + if (callback) { + const cb = callback; + callback = null; + cb(null, this); + } + }).on("error", function(err) { + cancelTimeout(); + if (callback) { + const cb = callback; + callback = null; + cb(err); + } + }); + return socket; }; } - function makePolicyContainer() { - return { - referrerPolicy: "strict-origin-when-cross-origin" + function setupTimeout(onConnectTimeout2, timeout) { + if (!timeout) { + return () => { + }; + } + let s1 = null; + let s2 = null; + const timeoutId = setTimeout(() => { + s1 = setImmediate(() => { + if (process.platform === "win32") { + s2 = setImmediate(() => onConnectTimeout2()); + } else { + onConnectTimeout2(); + } + }); + }, timeout); + return () => { + clearTimeout(timeoutId); + clearImmediate(s1); + clearImmediate(s2); }; } - function clonePolicyContainer(policyContainer) { - return { - referrerPolicy: policyContainer.referrerPolicy - }; + function onConnectTimeout(socket) { + let message = "Connect Timeout Error"; + if (Array.isArray(socket.autoSelectFamilyAttemptedAddresses)) { + message += ` (attempted addresses: ${socket.autoSelectFamilyAttemptedAddresses.join(", ")})`; + } + util.destroy(socket, new ConnectTimeoutError(message)); } - function determineRequestsReferrer(request) { - const policy = request.referrerPolicy; - assert3(policy); - let referrerSource = null; - if (request.referrer === "client") { - const globalOrigin = getGlobalOrigin(); - if (!globalOrigin || globalOrigin.origin === "null") { - return "no-referrer"; + module2.exports = buildConnector; + } +}); + +// .yarn/cache/undici-npm-6.13.0-1545cd855e-b1b0456e7d.zip/node_modules/undici/lib/util/timers.js +var require_timers = __commonJS({ + ".yarn/cache/undici-npm-6.13.0-1545cd855e-b1b0456e7d.zip/node_modules/undici/lib/util/timers.js"(exports, module2) { + "use strict"; + var fastNow = Date.now(); + var fastNowTimeout; + var fastTimers = []; + function onTimeout() { + fastNow = Date.now(); + let len = fastTimers.length; + let idx = 0; + while (idx < len) { + const timer = fastTimers[idx]; + if (timer.state === 0) { + timer.state = fastNow + timer.delay; + } else if (timer.state > 0 && fastNow >= timer.state) { + timer.state = -1; + timer.callback(timer.opaque); } - referrerSource = new URL(globalOrigin); - } else if (request.referrer instanceof URL) { - referrerSource = request.referrer; - } - let referrerURL = stripURLForReferrer(referrerSource); - const referrerOrigin = stripURLForReferrer(referrerSource, true); - if (referrerURL.toString().length > 4096) { - referrerURL = referrerOrigin; - } - const areSameOrigin = sameOrigin(request, referrerURL); - const isNonPotentiallyTrustWorthy = isURLPotentiallyTrustworthy(referrerURL) && !isURLPotentiallyTrustworthy(request.url); - switch (policy) { - case "origin": - return referrerOrigin != null ? referrerOrigin : stripURLForReferrer(referrerSource, true); - case "unsafe-url": - return referrerURL; - case "same-origin": - return areSameOrigin ? referrerOrigin : "no-referrer"; - case "origin-when-cross-origin": - return areSameOrigin ? referrerURL : referrerOrigin; - case "strict-origin-when-cross-origin": { - const currentURL = requestCurrentURL(request); - if (sameOrigin(referrerURL, currentURL)) { - return referrerURL; - } - if (isURLPotentiallyTrustworthy(referrerURL) && !isURLPotentiallyTrustworthy(currentURL)) { - return "no-referrer"; + if (timer.state === -1) { + timer.state = -2; + if (idx !== len - 1) { + fastTimers[idx] = fastTimers.pop(); + } else { + fastTimers.pop(); } - return referrerOrigin; + len -= 1; + } else { + idx += 1; } - case "strict-origin": - case "no-referrer-when-downgrade": - default: - return isNonPotentiallyTrustWorthy ? "no-referrer" : referrerOrigin; - } - } - function stripURLForReferrer(url, originOnly) { - assert3(url instanceof URL); - if (url.protocol === "file:" || url.protocol === "about:" || url.protocol === "blank:") { - return "no-referrer"; } - url.username = ""; - url.password = ""; - url.hash = ""; - if (originOnly) { - url.pathname = ""; - url.search = ""; + if (fastTimers.length > 0) { + refreshTimeout(); } - return url; } - function isURLPotentiallyTrustworthy(url) { - if (!(url instanceof URL)) { - return false; - } - if (url.href === "about:blank" || url.href === "about:srcdoc") { - return true; - } - if (url.protocol === "data:") - return true; - if (url.protocol === "file:") - return true; - return isOriginPotentiallyTrustworthy(url.origin); - function isOriginPotentiallyTrustworthy(origin) { - if (origin == null || origin === "null") - return false; - const originAsURL = new URL(origin); - if (originAsURL.protocol === "https:" || originAsURL.protocol === "wss:") { - return true; - } - if (/^127(?:\.[0-9]+){0,2}\.[0-9]+$|^\[(?:0*:)*?:?0*1\]$/.test(originAsURL.hostname) || (originAsURL.hostname === "localhost" || originAsURL.hostname.includes("localhost.")) || originAsURL.hostname.endsWith(".localhost")) { - return true; + function refreshTimeout() { + if (fastNowTimeout?.refresh) { + fastNowTimeout.refresh(); + } else { + clearTimeout(fastNowTimeout); + fastNowTimeout = setTimeout(onTimeout, 1e3); + if (fastNowTimeout.unref) { + fastNowTimeout.unref(); } - return false; } } - function bytesMatch(bytes, metadataList) { - if (crypto === void 0) { - return true; - } - const parsedMetadata = parseMetadata(metadataList); - if (parsedMetadata === "no metadata") { - return true; + var Timeout = class { + constructor(callback, delay, opaque) { + this.callback = callback; + this.delay = delay; + this.opaque = opaque; + this.state = -2; + this.refresh(); } - if (parsedMetadata.length === 0) { - return true; + refresh() { + if (this.state === -2) { + fastTimers.push(this); + if (!fastNowTimeout || fastTimers.length === 1) { + refreshTimeout(); + } + } + this.state = 0; } - const list = parsedMetadata.sort((c, d) => d.algo.localeCompare(c.algo)); - const strongest = list[0].algo; - const metadata = list.filter((item) => item.algo === strongest); - for (const item of metadata) { - const algorithm = item.algo; - let expectedValue = item.hash; - if (expectedValue.endsWith("==")) { - expectedValue = expectedValue.slice(0, -2); - } - let actualValue = crypto.createHash(algorithm).update(bytes).digest("base64"); - if (actualValue.endsWith("==")) { - actualValue = actualValue.slice(0, -2); - } - if (actualValue === expectedValue) { - return true; - } - let actualBase64URL = crypto.createHash(algorithm).update(bytes).digest("base64url"); - if (actualBase64URL.endsWith("==")) { - actualBase64URL = actualBase64URL.slice(0, -2); - } - if (actualBase64URL === expectedValue) { - return true; - } - } - return false; - } - var parseHashWithOptions = /(?sha256|sha384|sha512)-(?[A-Za-z0-9+/]+={0,2}(?=\s|$))( +[!-~]*)?/i; - function parseMetadata(metadata) { - const result = []; - let empty = true; - const supportedHashes = crypto.getHashes(); - for (const token of metadata.split(" ")) { - empty = false; - const parsedToken = parseHashWithOptions.exec(token); - if (parsedToken === null || parsedToken.groups === void 0) { - continue; - } - const algorithm = parsedToken.groups.algo; - if (supportedHashes.includes(algorithm.toLowerCase())) { - result.push(parsedToken.groups); - } - } - if (empty === true) { - return "no metadata"; - } - return result; - } - function tryUpgradeRequestToAPotentiallyTrustworthyURL(request) { - } - function sameOrigin(A, B) { - if (A.origin === B.origin && A.origin === "null") { - return true; - } - if (A.protocol === B.protocol && A.hostname === B.hostname && A.port === B.port) { - return true; + clear() { + this.state = -1; } - return false; - } - function createDeferredPromise() { - let res; - let rej; - const promise = new Promise((resolve, reject) => { - res = resolve; - rej = reject; - }); - return { promise, resolve: res, reject: rej }; - } - function isAborted(fetchParams) { - return fetchParams.controller.state === "aborted"; - } - function isCancelled(fetchParams) { - return fetchParams.controller.state === "aborted" || fetchParams.controller.state === "terminated"; - } - var normalizeMethodRecordBase = { - delete: "DELETE", - DELETE: "DELETE", - get: "GET", - GET: "GET", - head: "HEAD", - HEAD: "HEAD", - options: "OPTIONS", - OPTIONS: "OPTIONS", - post: "POST", - POST: "POST", - put: "PUT", - PUT: "PUT" - }; - var normalizeMethodRecord = { - ...normalizeMethodRecordBase, - patch: "patch", - PATCH: "PATCH" }; - Object.setPrototypeOf(normalizeMethodRecordBase, null); - Object.setPrototypeOf(normalizeMethodRecord, null); - function normalizeMethod(method) { - return normalizeMethodRecordBase[method.toLowerCase()] ?? method; - } - function serializeJavascriptValueToJSONString(value) { - const result = JSON.stringify(value); - if (result === void 0) { - throw new TypeError("Value is not JSON serializable"); - } - assert3(typeof result === "string"); - return result; - } - var esIteratorPrototype = Object.getPrototypeOf(Object.getPrototypeOf([][Symbol.iterator]())); - function makeIterator(iterator, name, kind, keyIndex = 0, valueIndex = 1) { - const object = { - index: 0, - kind, - target: iterator - }; - const iteratorObject = Object.create(esIteratorPrototype); - Object.defineProperty(iteratorObject, "next", { - value: function next() { - if (Object.getPrototypeOf(this) !== iteratorObject) { - throw new TypeError( - `'next' called on an object that does not implement interface ${name} Iterator.` - ); - } - const { index, kind: kind2, target } = object; - const values = target(); - const len = values.length; - if (index >= len) { - return { value: void 0, done: true }; - } - const { [keyIndex]: key, [valueIndex]: value } = values[index]; - object.index = index + 1; - let result; - switch (kind2) { - case "key": - result = key; - break; - case "value": - result = value; - break; - case "key+value": - result = [key, value]; - break; - } - return { - value: result, - done: false - }; - }, - writable: true, - enumerable: true, - configurable: true - }); - Object.defineProperty(iteratorObject, Symbol.toStringTag, { - value: `${name} Iterator`, - writable: false, - enumerable: false, - configurable: true - }); - return Object.create(iteratorObject); - } - async function fullyReadBody(body, processBody, processBodyError) { - const successSteps = processBody; - const errorSteps = processBodyError; - let reader; - try { - reader = body.stream.getReader(); - } catch (e) { - errorSteps(e); - return; - } - try { - const result = await readAllBytes(reader); - successSteps(result); - } catch (e) { - errorSteps(e); - } - } - function isReadableStreamLike(stream) { - return stream instanceof ReadableStream || stream[Symbol.toStringTag] === "ReadableStream" && typeof stream.tee === "function"; - } - function readableStreamClose(controller) { - try { - controller.close(); - controller.byobRequest?.respond(0); - } catch (err) { - if (!err.message.includes("Controller is already closed") && !err.message.includes("ReadableStream is already closed")) { - throw err; + module2.exports = { + setTimeout(callback, delay, opaque) { + return delay < 1e3 ? setTimeout(callback, delay, opaque) : new Timeout(callback, delay, opaque); + }, + clearTimeout(timeout) { + if (timeout instanceof Timeout) { + timeout.clear(); + } else { + clearTimeout(timeout); } } - } - function isomorphicEncode(input) { - for (let i = 0; i < input.length; i++) { - assert3(input.charCodeAt(i) <= 255); - } - return input; - } - async function readAllBytes(reader) { - const bytes = []; - let byteLength = 0; - while (true) { - const { done, value: chunk } = await reader.read(); - if (done) { - return Buffer.concat(bytes, byteLength); - } - if (!isUint8Array(chunk)) { - throw new TypeError("Received non-Uint8Array chunk"); + }; + } +}); + +// .yarn/cache/undici-npm-6.13.0-1545cd855e-b1b0456e7d.zip/node_modules/undici/lib/llhttp/utils.js +var require_utils = __commonJS({ + ".yarn/cache/undici-npm-6.13.0-1545cd855e-b1b0456e7d.zip/node_modules/undici/lib/llhttp/utils.js"(exports) { + "use strict"; + Object.defineProperty(exports, "__esModule", { value: true }); + exports.enumToMap = void 0; + function enumToMap(obj) { + const res = {}; + Object.keys(obj).forEach((key) => { + const value = obj[key]; + if (typeof value === "number") { + res[key] = value; } - bytes.push(chunk); - byteLength += chunk.length; - } - } - function urlIsLocal(url) { - assert3("protocol" in url); - const protocol = url.protocol; - return protocol === "about:" || protocol === "blob:" || protocol === "data:"; - } - function urlHasHttpsScheme(url) { - if (typeof url === "string") { - return url.startsWith("https:"); - } - return url.protocol === "https:"; - } - function urlIsHttpHttpsScheme(url) { - assert3("protocol" in url); - const protocol = url.protocol; - return protocol === "http:" || protocol === "https:"; + }); + return res; } - function simpleRangeHeaderValue(value, allowWhitespace) { - const data = value; - if (!data.startsWith("bytes")) { - return "failure"; - } - const position = { position: 5 }; - if (allowWhitespace) { - collectASequenceOfCodePoints( - (char) => char === " " || char === " ", - data, - position - ); - } - if (data.charCodeAt(position.position) !== 61) { - return "failure"; - } - position.position++; - if (allowWhitespace) { - collectASequenceOfCodePoints( - (char) => char === " " || char === " ", - data, - position - ); - } - const rangeStart = collectASequenceOfCodePoints( - (char) => { - const code = char.charCodeAt(0); - return code >= 48 && code <= 57; - }, - data, - position - ); - const rangeStartValue = rangeStart.length ? Number(rangeStart) : null; - if (allowWhitespace) { - collectASequenceOfCodePoints( - (char) => char === " " || char === " ", - data, - position - ); - } - if (data.charCodeAt(position.position) !== 45) { - return "failure"; - } - position.position++; - if (allowWhitespace) { - collectASequenceOfCodePoints( - (char) => char === " " || char === " ", - data, - position - ); - } - const rangeEnd = collectASequenceOfCodePoints( - (char) => { - const code = char.charCodeAt(0); - return code >= 48 && code <= 57; - }, - data, - position - ); - const rangeEndValue = rangeEnd.length ? Number(rangeEnd) : null; - if (position.position < data.length) { - return "failure"; - } - if (rangeEndValue === null && rangeStartValue === null) { - return "failure"; - } - if (rangeStartValue > rangeEndValue) { - return "failure"; - } - return { rangeStartValue, rangeEndValue }; - } - function buildContentRange(rangeStart, rangeEnd, fullLength) { - let contentRange = "bytes "; - contentRange += isomorphicEncode(`${rangeStart}`); - contentRange += "-"; - contentRange += isomorphicEncode(`${rangeEnd}`); - contentRange += "/"; - contentRange += isomorphicEncode(`${fullLength}`); - return contentRange; - } - var InflateStream = class extends Transform { - _transform(chunk, encoding, callback) { - if (!this._inflateStream) { - if (chunk.length === 0) { - callback(); - return; - } - this._inflateStream = (chunk[0] & 15) === 8 ? zlib.createInflate() : zlib.createInflateRaw(); - this._inflateStream.on("data", this.push.bind(this)); - this._inflateStream.on("end", () => this.push(null)); - this._inflateStream.on("error", (err) => this.destroy(err)); - } - this._inflateStream.write(chunk, encoding, callback); - } - _final(callback) { - if (this._inflateStream) { - this._inflateStream.end(); - this._inflateStream = null; - } - callback(); - } - }; - function createInflate() { - return new InflateStream(); - } - function extractMimeType(headers) { - let charset = null; - let essence = null; - let mimeType = null; - const values = getDecodeSplit("content-type", headers); - if (values === null) { - return "failure"; - } - for (const value of values) { - const temporaryMimeType = parseMIMEType(value); - if (temporaryMimeType === "failure" || temporaryMimeType.essence === "*/*") { - continue; - } - mimeType = temporaryMimeType; - if (mimeType.essence !== essence) { - charset = null; - if (mimeType.parameters.has("charset")) { - charset = mimeType.parameters.get("charset"); - } - essence = mimeType.essence; - } else if (!mimeType.parameters.has("charset") && charset !== null) { - mimeType.parameters.set("charset", charset); - } - } - if (mimeType == null) { - return "failure"; - } - return mimeType; - } - function gettingDecodingSplitting(value) { - const input = value; - const position = { position: 0 }; - const values = []; - let temporaryValue = ""; - while (position.position < input.length) { - temporaryValue += collectASequenceOfCodePoints( - (char) => char !== '"' && char !== ",", - input, - position - ); - if (position.position < input.length) { - if (input.charCodeAt(position.position) === 34) { - temporaryValue += collectAnHTTPQuotedString( - input, - position - ); - if (position.position < input.length) { - continue; - } - } else { - assert3(input.charCodeAt(position.position) === 44); - position.position++; - } - } - temporaryValue = removeChars(temporaryValue, true, true, (char) => char === 9 || char === 32); - values.push(temporaryValue); - temporaryValue = ""; - } - return values; - } - function getDecodeSplit(name, list) { - const value = list.get(name, true); - if (value === null) { - return null; - } - return gettingDecodingSplitting(value); - } - module2.exports = { - isAborted, - isCancelled, - createDeferredPromise, - ReadableStreamFrom, - toUSVString, - tryUpgradeRequestToAPotentiallyTrustworthyURL, - clampAndCoarsenConnectionTimingInfo, - coarsenedSharedCurrentTime, - determineRequestsReferrer, - makePolicyContainer, - clonePolicyContainer, - appendFetchMetadata, - appendRequestOriginHeader, - TAOCheck, - corsCheck, - crossOriginResourcePolicyCheck, - createOpaqueTimingInfo, - setRequestReferrerPolicyOnRedirect, - isValidHTTPToken, - requestBadPort, - requestCurrentURL, - responseURL, - responseLocationURL, - isBlobLike, - isURLPotentiallyTrustworthy, - isValidReasonPhrase, - sameOrigin, - normalizeMethod, - serializeJavascriptValueToJSONString, - makeIterator, - isValidHeaderName, - isValidHeaderValue, - isErrorLike, - fullyReadBody, - bytesMatch, - isReadableStreamLike, - readableStreamClose, - isomorphicEncode, - urlIsLocal, - urlHasHttpsScheme, - urlIsHttpHttpsScheme, - readAllBytes, - normalizeMethodRecord, - simpleRangeHeaderValue, - buildContentRange, - parseMetadata, - createInflate, - extractMimeType - }; - } -}); - -// .yarn/cache/undici-npm-6.6.2-a0bd6785a6-c8c8a43605.zip/node_modules/undici/lib/fetch/symbols.js -var require_symbols2 = __commonJS({ - ".yarn/cache/undici-npm-6.6.2-a0bd6785a6-c8c8a43605.zip/node_modules/undici/lib/fetch/symbols.js"(exports, module2) { - "use strict"; - module2.exports = { - kUrl: Symbol("url"), - kHeaders: Symbol("headers"), - kSignal: Symbol("signal"), - kState: Symbol("state"), - kGuard: Symbol("guard"), - kRealm: Symbol("realm") - }; + exports.enumToMap = enumToMap; } }); -// .yarn/cache/undici-npm-6.6.2-a0bd6785a6-c8c8a43605.zip/node_modules/undici/lib/fetch/webidl.js -var require_webidl = __commonJS({ - ".yarn/cache/undici-npm-6.6.2-a0bd6785a6-c8c8a43605.zip/node_modules/undici/lib/fetch/webidl.js"(exports, module2) { +// .yarn/cache/undici-npm-6.13.0-1545cd855e-b1b0456e7d.zip/node_modules/undici/lib/llhttp/constants.js +var require_constants3 = __commonJS({ + ".yarn/cache/undici-npm-6.13.0-1545cd855e-b1b0456e7d.zip/node_modules/undici/lib/llhttp/constants.js"(exports) { "use strict"; - var { types } = require("node:util"); - var { toUSVString } = require_util2(); - var webidl = {}; - webidl.converters = {}; - webidl.util = {}; - webidl.errors = {}; - webidl.errors.exception = function(message) { - return new TypeError(`${message.header}: ${message.message}`); - }; - webidl.errors.conversionFailed = function(context) { - const plural2 = context.types.length === 1 ? "" : " one of"; - const message = `${context.argument} could not be converted to${plural2}: ${context.types.join(", ")}.`; - return webidl.errors.exception({ - header: context.prefix, - message - }); - }; - webidl.errors.invalidArgument = function(context) { - return webidl.errors.exception({ - header: context.prefix, - message: `"${context.value}" is an invalid ${context.type}.` - }); - }; - webidl.brandCheck = function(V, I, opts = void 0) { - if (opts?.strict !== false) { - if (!(V instanceof I)) { - throw new TypeError("Illegal invocation"); - } - } else { - if (V?.[Symbol.toStringTag] !== I.prototype[Symbol.toStringTag]) { - throw new TypeError("Illegal invocation"); - } - } - }; - webidl.argumentLengthCheck = function({ length }, min, ctx) { - if (length < min) { - throw webidl.errors.exception({ - message: `${min} argument${min !== 1 ? "s" : ""} required, but${length ? " only" : ""} ${length} found.`, - ...ctx - }); - } - }; - webidl.illegalConstructor = function() { - throw webidl.errors.exception({ - header: "TypeError", - message: "Illegal constructor" - }); - }; - webidl.util.Type = function(V) { - switch (typeof V) { - case "undefined": - return "Undefined"; - case "boolean": - return "Boolean"; - case "string": - return "String"; - case "symbol": - return "Symbol"; - case "number": - return "Number"; - case "bigint": - return "BigInt"; - case "function": - case "object": { - if (V === null) { - return "Null"; - } - return "Object"; - } - } - }; + Object.defineProperty(exports, "__esModule", { value: true }); + exports.SPECIAL_HEADERS = exports.HEADER_STATE = exports.MINOR = exports.MAJOR = exports.CONNECTION_TOKEN_CHARS = exports.HEADER_CHARS = exports.TOKEN = exports.STRICT_TOKEN = exports.HEX = exports.URL_CHAR = exports.STRICT_URL_CHAR = exports.USERINFO_CHARS = exports.MARK = exports.ALPHANUM = exports.NUM = exports.HEX_MAP = exports.NUM_MAP = exports.ALPHA = exports.FINISH = exports.H_METHOD_MAP = exports.METHOD_MAP = exports.METHODS_RTSP = exports.METHODS_ICE = exports.METHODS_HTTP = exports.METHODS = exports.LENIENT_FLAGS = exports.FLAGS = exports.TYPE = exports.ERROR = void 0; + var utils_1 = require_utils(); + var ERROR; + (function(ERROR2) { + ERROR2[ERROR2["OK"] = 0] = "OK"; + ERROR2[ERROR2["INTERNAL"] = 1] = "INTERNAL"; + ERROR2[ERROR2["STRICT"] = 2] = "STRICT"; + ERROR2[ERROR2["LF_EXPECTED"] = 3] = "LF_EXPECTED"; + ERROR2[ERROR2["UNEXPECTED_CONTENT_LENGTH"] = 4] = "UNEXPECTED_CONTENT_LENGTH"; + ERROR2[ERROR2["CLOSED_CONNECTION"] = 5] = "CLOSED_CONNECTION"; + ERROR2[ERROR2["INVALID_METHOD"] = 6] = "INVALID_METHOD"; + ERROR2[ERROR2["INVALID_URL"] = 7] = "INVALID_URL"; + ERROR2[ERROR2["INVALID_CONSTANT"] = 8] = "INVALID_CONSTANT"; + ERROR2[ERROR2["INVALID_VERSION"] = 9] = "INVALID_VERSION"; + ERROR2[ERROR2["INVALID_HEADER_TOKEN"] = 10] = "INVALID_HEADER_TOKEN"; + ERROR2[ERROR2["INVALID_CONTENT_LENGTH"] = 11] = "INVALID_CONTENT_LENGTH"; + ERROR2[ERROR2["INVALID_CHUNK_SIZE"] = 12] = "INVALID_CHUNK_SIZE"; + ERROR2[ERROR2["INVALID_STATUS"] = 13] = "INVALID_STATUS"; + ERROR2[ERROR2["INVALID_EOF_STATE"] = 14] = "INVALID_EOF_STATE"; + ERROR2[ERROR2["INVALID_TRANSFER_ENCODING"] = 15] = "INVALID_TRANSFER_ENCODING"; + ERROR2[ERROR2["CB_MESSAGE_BEGIN"] = 16] = "CB_MESSAGE_BEGIN"; + ERROR2[ERROR2["CB_HEADERS_COMPLETE"] = 17] = "CB_HEADERS_COMPLETE"; + ERROR2[ERROR2["CB_MESSAGE_COMPLETE"] = 18] = "CB_MESSAGE_COMPLETE"; + ERROR2[ERROR2["CB_CHUNK_HEADER"] = 19] = "CB_CHUNK_HEADER"; + ERROR2[ERROR2["CB_CHUNK_COMPLETE"] = 20] = "CB_CHUNK_COMPLETE"; + ERROR2[ERROR2["PAUSED"] = 21] = "PAUSED"; + ERROR2[ERROR2["PAUSED_UPGRADE"] = 22] = "PAUSED_UPGRADE"; + ERROR2[ERROR2["PAUSED_H2_UPGRADE"] = 23] = "PAUSED_H2_UPGRADE"; + ERROR2[ERROR2["USER"] = 24] = "USER"; + })(ERROR = exports.ERROR || (exports.ERROR = {})); + var TYPE; + (function(TYPE2) { + TYPE2[TYPE2["BOTH"] = 0] = "BOTH"; + TYPE2[TYPE2["REQUEST"] = 1] = "REQUEST"; + TYPE2[TYPE2["RESPONSE"] = 2] = "RESPONSE"; + })(TYPE = exports.TYPE || (exports.TYPE = {})); + var FLAGS; + (function(FLAGS2) { + FLAGS2[FLAGS2["CONNECTION_KEEP_ALIVE"] = 1] = "CONNECTION_KEEP_ALIVE"; + FLAGS2[FLAGS2["CONNECTION_CLOSE"] = 2] = "CONNECTION_CLOSE"; + FLAGS2[FLAGS2["CONNECTION_UPGRADE"] = 4] = "CONNECTION_UPGRADE"; + FLAGS2[FLAGS2["CHUNKED"] = 8] = "CHUNKED"; + FLAGS2[FLAGS2["UPGRADE"] = 16] = "UPGRADE"; + FLAGS2[FLAGS2["CONTENT_LENGTH"] = 32] = "CONTENT_LENGTH"; + FLAGS2[FLAGS2["SKIPBODY"] = 64] = "SKIPBODY"; + FLAGS2[FLAGS2["TRAILING"] = 128] = "TRAILING"; + FLAGS2[FLAGS2["TRANSFER_ENCODING"] = 512] = "TRANSFER_ENCODING"; + })(FLAGS = exports.FLAGS || (exports.FLAGS = {})); + var LENIENT_FLAGS; + (function(LENIENT_FLAGS2) { + LENIENT_FLAGS2[LENIENT_FLAGS2["HEADERS"] = 1] = "HEADERS"; + LENIENT_FLAGS2[LENIENT_FLAGS2["CHUNKED_LENGTH"] = 2] = "CHUNKED_LENGTH"; + LENIENT_FLAGS2[LENIENT_FLAGS2["KEEP_ALIVE"] = 4] = "KEEP_ALIVE"; + })(LENIENT_FLAGS = exports.LENIENT_FLAGS || (exports.LENIENT_FLAGS = {})); + var METHODS; + (function(METHODS2) { + METHODS2[METHODS2["DELETE"] = 0] = "DELETE"; + METHODS2[METHODS2["GET"] = 1] = "GET"; + METHODS2[METHODS2["HEAD"] = 2] = "HEAD"; + METHODS2[METHODS2["POST"] = 3] = "POST"; + METHODS2[METHODS2["PUT"] = 4] = "PUT"; + METHODS2[METHODS2["CONNECT"] = 5] = "CONNECT"; + METHODS2[METHODS2["OPTIONS"] = 6] = "OPTIONS"; + METHODS2[METHODS2["TRACE"] = 7] = "TRACE"; + METHODS2[METHODS2["COPY"] = 8] = "COPY"; + METHODS2[METHODS2["LOCK"] = 9] = "LOCK"; + METHODS2[METHODS2["MKCOL"] = 10] = "MKCOL"; + METHODS2[METHODS2["MOVE"] = 11] = "MOVE"; + METHODS2[METHODS2["PROPFIND"] = 12] = "PROPFIND"; + METHODS2[METHODS2["PROPPATCH"] = 13] = "PROPPATCH"; + METHODS2[METHODS2["SEARCH"] = 14] = "SEARCH"; + METHODS2[METHODS2["UNLOCK"] = 15] = "UNLOCK"; + METHODS2[METHODS2["BIND"] = 16] = "BIND"; + METHODS2[METHODS2["REBIND"] = 17] = "REBIND"; + METHODS2[METHODS2["UNBIND"] = 18] = "UNBIND"; + METHODS2[METHODS2["ACL"] = 19] = "ACL"; + METHODS2[METHODS2["REPORT"] = 20] = "REPORT"; + METHODS2[METHODS2["MKACTIVITY"] = 21] = "MKACTIVITY"; + METHODS2[METHODS2["CHECKOUT"] = 22] = "CHECKOUT"; + METHODS2[METHODS2["MERGE"] = 23] = "MERGE"; + METHODS2[METHODS2["M-SEARCH"] = 24] = "M-SEARCH"; + METHODS2[METHODS2["NOTIFY"] = 25] = "NOTIFY"; + METHODS2[METHODS2["SUBSCRIBE"] = 26] = "SUBSCRIBE"; + METHODS2[METHODS2["UNSUBSCRIBE"] = 27] = "UNSUBSCRIBE"; + METHODS2[METHODS2["PATCH"] = 28] = "PATCH"; + METHODS2[METHODS2["PURGE"] = 29] = "PURGE"; + METHODS2[METHODS2["MKCALENDAR"] = 30] = "MKCALENDAR"; + METHODS2[METHODS2["LINK"] = 31] = "LINK"; + METHODS2[METHODS2["UNLINK"] = 32] = "UNLINK"; + METHODS2[METHODS2["SOURCE"] = 33] = "SOURCE"; + METHODS2[METHODS2["PRI"] = 34] = "PRI"; + METHODS2[METHODS2["DESCRIBE"] = 35] = "DESCRIBE"; + METHODS2[METHODS2["ANNOUNCE"] = 36] = "ANNOUNCE"; + METHODS2[METHODS2["SETUP"] = 37] = "SETUP"; + METHODS2[METHODS2["PLAY"] = 38] = "PLAY"; + METHODS2[METHODS2["PAUSE"] = 39] = "PAUSE"; + METHODS2[METHODS2["TEARDOWN"] = 40] = "TEARDOWN"; + METHODS2[METHODS2["GET_PARAMETER"] = 41] = "GET_PARAMETER"; + METHODS2[METHODS2["SET_PARAMETER"] = 42] = "SET_PARAMETER"; + METHODS2[METHODS2["REDIRECT"] = 43] = "REDIRECT"; + METHODS2[METHODS2["RECORD"] = 44] = "RECORD"; + METHODS2[METHODS2["FLUSH"] = 45] = "FLUSH"; + })(METHODS = exports.METHODS || (exports.METHODS = {})); + exports.METHODS_HTTP = [ + METHODS.DELETE, + METHODS.GET, + METHODS.HEAD, + METHODS.POST, + METHODS.PUT, + METHODS.CONNECT, + METHODS.OPTIONS, + METHODS.TRACE, + METHODS.COPY, + METHODS.LOCK, + METHODS.MKCOL, + METHODS.MOVE, + METHODS.PROPFIND, + METHODS.PROPPATCH, + METHODS.SEARCH, + METHODS.UNLOCK, + METHODS.BIND, + METHODS.REBIND, + METHODS.UNBIND, + METHODS.ACL, + METHODS.REPORT, + METHODS.MKACTIVITY, + METHODS.CHECKOUT, + METHODS.MERGE, + METHODS["M-SEARCH"], + METHODS.NOTIFY, + METHODS.SUBSCRIBE, + METHODS.UNSUBSCRIBE, + METHODS.PATCH, + METHODS.PURGE, + METHODS.MKCALENDAR, + METHODS.LINK, + METHODS.UNLINK, + METHODS.PRI, + // TODO(indutny): should we allow it with HTTP? + METHODS.SOURCE + ]; + exports.METHODS_ICE = [ + METHODS.SOURCE + ]; + exports.METHODS_RTSP = [ + METHODS.OPTIONS, + METHODS.DESCRIBE, + METHODS.ANNOUNCE, + METHODS.SETUP, + METHODS.PLAY, + METHODS.PAUSE, + METHODS.TEARDOWN, + METHODS.GET_PARAMETER, + METHODS.SET_PARAMETER, + METHODS.REDIRECT, + METHODS.RECORD, + METHODS.FLUSH, + // For AirPlay + METHODS.GET, + METHODS.POST + ]; + exports.METHOD_MAP = utils_1.enumToMap(METHODS); + exports.H_METHOD_MAP = {}; + Object.keys(exports.METHOD_MAP).forEach((key) => { + if (/^H/.test(key)) { + exports.H_METHOD_MAP[key] = exports.METHOD_MAP[key]; + } + }); + var FINISH; + (function(FINISH2) { + FINISH2[FINISH2["SAFE"] = 0] = "SAFE"; + FINISH2[FINISH2["SAFE_WITH_CB"] = 1] = "SAFE_WITH_CB"; + FINISH2[FINISH2["UNSAFE"] = 2] = "UNSAFE"; + })(FINISH = exports.FINISH || (exports.FINISH = {})); + exports.ALPHA = []; + for (let i = "A".charCodeAt(0); i <= "Z".charCodeAt(0); i++) { + exports.ALPHA.push(String.fromCharCode(i)); + exports.ALPHA.push(String.fromCharCode(i + 32)); + } + exports.NUM_MAP = { + 0: 0, + 1: 1, + 2: 2, + 3: 3, + 4: 4, + 5: 5, + 6: 6, + 7: 7, + 8: 8, + 9: 9 + }; + exports.HEX_MAP = { + 0: 0, + 1: 1, + 2: 2, + 3: 3, + 4: 4, + 5: 5, + 6: 6, + 7: 7, + 8: 8, + 9: 9, + A: 10, + B: 11, + C: 12, + D: 13, + E: 14, + F: 15, + a: 10, + b: 11, + c: 12, + d: 13, + e: 14, + f: 15 + }; + exports.NUM = [ + "0", + "1", + "2", + "3", + "4", + "5", + "6", + "7", + "8", + "9" + ]; + exports.ALPHANUM = exports.ALPHA.concat(exports.NUM); + exports.MARK = ["-", "_", ".", "!", "~", "*", "'", "(", ")"]; + exports.USERINFO_CHARS = exports.ALPHANUM.concat(exports.MARK).concat(["%", ";", ":", "&", "=", "+", "$", ","]); + exports.STRICT_URL_CHAR = [ + "!", + '"', + "$", + "%", + "&", + "'", + "(", + ")", + "*", + "+", + ",", + "-", + ".", + "/", + ":", + ";", + "<", + "=", + ">", + "@", + "[", + "\\", + "]", + "^", + "_", + "`", + "{", + "|", + "}", + "~" + ].concat(exports.ALPHANUM); + exports.URL_CHAR = exports.STRICT_URL_CHAR.concat([" ", "\f"]); + for (let i = 128; i <= 255; i++) { + exports.URL_CHAR.push(i); + } + exports.HEX = exports.NUM.concat(["a", "b", "c", "d", "e", "f", "A", "B", "C", "D", "E", "F"]); + exports.STRICT_TOKEN = [ + "!", + "#", + "$", + "%", + "&", + "'", + "*", + "+", + "-", + ".", + "^", + "_", + "`", + "|", + "~" + ].concat(exports.ALPHANUM); + exports.TOKEN = exports.STRICT_TOKEN.concat([" "]); + exports.HEADER_CHARS = [" "]; + for (let i = 32; i <= 255; i++) { + if (i !== 127) { + exports.HEADER_CHARS.push(i); + } + } + exports.CONNECTION_TOKEN_CHARS = exports.HEADER_CHARS.filter((c) => c !== 44); + exports.MAJOR = exports.NUM_MAP; + exports.MINOR = exports.MAJOR; + var HEADER_STATE; + (function(HEADER_STATE2) { + HEADER_STATE2[HEADER_STATE2["GENERAL"] = 0] = "GENERAL"; + HEADER_STATE2[HEADER_STATE2["CONNECTION"] = 1] = "CONNECTION"; + HEADER_STATE2[HEADER_STATE2["CONTENT_LENGTH"] = 2] = "CONTENT_LENGTH"; + HEADER_STATE2[HEADER_STATE2["TRANSFER_ENCODING"] = 3] = "TRANSFER_ENCODING"; + HEADER_STATE2[HEADER_STATE2["UPGRADE"] = 4] = "UPGRADE"; + HEADER_STATE2[HEADER_STATE2["CONNECTION_KEEP_ALIVE"] = 5] = "CONNECTION_KEEP_ALIVE"; + HEADER_STATE2[HEADER_STATE2["CONNECTION_CLOSE"] = 6] = "CONNECTION_CLOSE"; + HEADER_STATE2[HEADER_STATE2["CONNECTION_UPGRADE"] = 7] = "CONNECTION_UPGRADE"; + HEADER_STATE2[HEADER_STATE2["TRANSFER_ENCODING_CHUNKED"] = 8] = "TRANSFER_ENCODING_CHUNKED"; + })(HEADER_STATE = exports.HEADER_STATE || (exports.HEADER_STATE = {})); + exports.SPECIAL_HEADERS = { + "connection": HEADER_STATE.CONNECTION, + "content-length": HEADER_STATE.CONTENT_LENGTH, + "proxy-connection": HEADER_STATE.CONNECTION, + "transfer-encoding": HEADER_STATE.TRANSFER_ENCODING, + "upgrade": HEADER_STATE.UPGRADE + }; + } +}); + +// .yarn/cache/undici-npm-6.13.0-1545cd855e-b1b0456e7d.zip/node_modules/undici/lib/llhttp/llhttp-wasm.js +var require_llhttp_wasm = __commonJS({ + ".yarn/cache/undici-npm-6.13.0-1545cd855e-b1b0456e7d.zip/node_modules/undici/lib/llhttp/llhttp-wasm.js"(exports, module2) { + var { Buffer: Buffer2 } = require("node:buffer"); + module2.exports = Buffer2.from("AGFzbQEAAAABMAhgAX8Bf2ADf39/AX9gBH9/f38Bf2AAAGADf39/AGABfwBgAn9/AGAGf39/f39/AALLAQgDZW52GHdhc21fb25faGVhZGVyc19jb21wbGV0ZQACA2VudhV3YXNtX29uX21lc3NhZ2VfYmVnaW4AAANlbnYLd2FzbV9vbl91cmwAAQNlbnYOd2FzbV9vbl9zdGF0dXMAAQNlbnYUd2FzbV9vbl9oZWFkZXJfZmllbGQAAQNlbnYUd2FzbV9vbl9oZWFkZXJfdmFsdWUAAQNlbnYMd2FzbV9vbl9ib2R5AAEDZW52GHdhc21fb25fbWVzc2FnZV9jb21wbGV0ZQAAA0ZFAwMEAAAFAAAAAAAABQEFAAUFBQAABgAAAAAGBgYGAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQABAAABAQcAAAUFAwABBAUBcAESEgUDAQACBggBfwFBgNQECwfRBSIGbWVtb3J5AgALX2luaXRpYWxpemUACRlfX2luZGlyZWN0X2Z1bmN0aW9uX3RhYmxlAQALbGxodHRwX2luaXQAChhsbGh0dHBfc2hvdWxkX2tlZXBfYWxpdmUAQQxsbGh0dHBfYWxsb2MADAZtYWxsb2MARgtsbGh0dHBfZnJlZQANBGZyZWUASA9sbGh0dHBfZ2V0X3R5cGUADhVsbGh0dHBfZ2V0X2h0dHBfbWFqb3IADxVsbGh0dHBfZ2V0X2h0dHBfbWlub3IAEBFsbGh0dHBfZ2V0X21ldGhvZAARFmxsaHR0cF9nZXRfc3RhdHVzX2NvZGUAEhJsbGh0dHBfZ2V0X3VwZ3JhZGUAEwxsbGh0dHBfcmVzZXQAFA5sbGh0dHBfZXhlY3V0ZQAVFGxsaHR0cF9zZXR0aW5nc19pbml0ABYNbGxodHRwX2ZpbmlzaAAXDGxsaHR0cF9wYXVzZQAYDWxsaHR0cF9yZXN1bWUAGRtsbGh0dHBfcmVzdW1lX2FmdGVyX3VwZ3JhZGUAGhBsbGh0dHBfZ2V0X2Vycm5vABsXbGxodHRwX2dldF9lcnJvcl9yZWFzb24AHBdsbGh0dHBfc2V0X2Vycm9yX3JlYXNvbgAdFGxsaHR0cF9nZXRfZXJyb3JfcG9zAB4RbGxodHRwX2Vycm5vX25hbWUAHxJsbGh0dHBfbWV0aG9kX25hbWUAIBJsbGh0dHBfc3RhdHVzX25hbWUAIRpsbGh0dHBfc2V0X2xlbmllbnRfaGVhZGVycwAiIWxsaHR0cF9zZXRfbGVuaWVudF9jaHVua2VkX2xlbmd0aAAjHWxsaHR0cF9zZXRfbGVuaWVudF9rZWVwX2FsaXZlACQkbGxodHRwX3NldF9sZW5pZW50X3RyYW5zZmVyX2VuY29kaW5nACUYbGxodHRwX21lc3NhZ2VfbmVlZHNfZW9mAD8JFwEAQQELEQECAwQFCwYHNTk3MS8tJyspCsLgAkUCAAsIABCIgICAAAsZACAAEMKAgIAAGiAAIAI2AjggACABOgAoCxwAIAAgAC8BMiAALQAuIAAQwYCAgAAQgICAgAALKgEBf0HAABDGgICAACIBEMKAgIAAGiABQYCIgIAANgI4IAEgADoAKCABCwoAIAAQyICAgAALBwAgAC0AKAsHACAALQAqCwcAIAAtACsLBwAgAC0AKQsHACAALwEyCwcAIAAtAC4LRQEEfyAAKAIYIQEgAC0ALSECIAAtACghAyAAKAI4IQQgABDCgICAABogACAENgI4IAAgAzoAKCAAIAI6AC0gACABNgIYCxEAIAAgASABIAJqEMOAgIAACxAAIABBAEHcABDMgICAABoLZwEBf0EAIQECQCAAKAIMDQACQAJAAkACQCAALQAvDgMBAAMCCyAAKAI4IgFFDQAgASgCLCIBRQ0AIAAgARGAgICAAAAiAQ0DC0EADwsQyoCAgAAACyAAQcOWgIAANgIQQQ4hAQsgAQseAAJAIAAoAgwNACAAQdGbgIAANgIQIABBFTYCDAsLFgACQCAAKAIMQRVHDQAgAEEANgIMCwsWAAJAIAAoAgxBFkcNACAAQQA2AgwLCwcAIAAoAgwLBwAgACgCEAsJACAAIAE2AhALBwAgACgCFAsiAAJAIABBJEkNABDKgICAAAALIABBAnRBoLOAgABqKAIACyIAAkAgAEEuSQ0AEMqAgIAAAAsgAEECdEGwtICAAGooAgAL7gsBAX9B66iAgAAhAQJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAIABBnH9qDvQDY2IAAWFhYWFhYQIDBAVhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhBgcICQoLDA0OD2FhYWFhEGFhYWFhYWFhYWFhEWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYRITFBUWFxgZGhthYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhHB0eHyAhIiMkJSYnKCkqKywtLi8wMTIzNDU2YTc4OTphYWFhYWFhYTthYWE8YWFhYT0+P2FhYWFhYWFhQGFhQWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYUJDREVGR0hJSktMTU5PUFFSU2FhYWFhYWFhVFVWV1hZWlthXF1hYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFeYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhX2BhC0Hhp4CAAA8LQaShgIAADwtBy6yAgAAPC0H+sYCAAA8LQcCkgIAADwtBq6SAgAAPC0GNqICAAA8LQeKmgIAADwtBgLCAgAAPC0G5r4CAAA8LQdekgIAADwtB75+AgAAPC0Hhn4CAAA8LQfqfgIAADwtB8qCAgAAPC0Gor4CAAA8LQa6ygIAADwtBiLCAgAAPC0Hsp4CAAA8LQYKigIAADwtBjp2AgAAPC0HQroCAAA8LQcqjgIAADwtBxbKAgAAPC0HfnICAAA8LQdKcgIAADwtBxKCAgAAPC0HXoICAAA8LQaKfgIAADwtB7a6AgAAPC0GrsICAAA8LQdSlgIAADwtBzK6AgAAPC0H6roCAAA8LQfyrgIAADwtB0rCAgAAPC0HxnYCAAA8LQbuggIAADwtB96uAgAAPC0GQsYCAAA8LQdexgIAADwtBoq2AgAAPC0HUp4CAAA8LQeCrgIAADwtBn6yAgAAPC0HrsYCAAA8LQdWfgIAADwtByrGAgAAPC0HepYCAAA8LQdSegIAADwtB9JyAgAAPC0GnsoCAAA8LQbGdgIAADwtBoJ2AgAAPC0G5sYCAAA8LQbywgIAADwtBkqGAgAAPC0GzpoCAAA8LQemsgIAADwtBrJ6AgAAPC0HUq4CAAA8LQfemgIAADwtBgKaAgAAPC0GwoYCAAA8LQf6egIAADwtBjaOAgAAPC0GJrYCAAA8LQfeigIAADwtBoLGAgAAPC0Gun4CAAA8LQcalgIAADwtB6J6AgAAPC0GTooCAAA8LQcKvgIAADwtBw52AgAAPC0GLrICAAA8LQeGdgIAADwtBja+AgAAPC0HqoYCAAA8LQbStgIAADwtB0q+AgAAPC0HfsoCAAA8LQdKygIAADwtB8LCAgAAPC0GpooCAAA8LQfmjgIAADwtBmZ6AgAAPC0G1rICAAA8LQZuwgIAADwtBkrKAgAAPC0G2q4CAAA8LQcKigIAADwtB+LKAgAAPC0GepYCAAA8LQdCigIAADwtBup6AgAAPC0GBnoCAAA8LEMqAgIAAAAtB1qGAgAAhAQsgAQsWACAAIAAtAC1B/gFxIAFBAEdyOgAtCxkAIAAgAC0ALUH9AXEgAUEAR0EBdHI6AC0LGQAgACAALQAtQfsBcSABQQBHQQJ0cjoALQsZACAAIAAtAC1B9wFxIAFBAEdBA3RyOgAtCy4BAn9BACEDAkAgACgCOCIERQ0AIAQoAgAiBEUNACAAIAQRgICAgAAAIQMLIAMLSQECf0EAIQMCQCAAKAI4IgRFDQAgBCgCBCIERQ0AIAAgASACIAFrIAQRgYCAgAAAIgNBf0cNACAAQcaRgIAANgIQQRghAwsgAwsuAQJ/QQAhAwJAIAAoAjgiBEUNACAEKAIwIgRFDQAgACAEEYCAgIAAACEDCyADC0kBAn9BACEDAkAgACgCOCIERQ0AIAQoAggiBEUNACAAIAEgAiABayAEEYGAgIAAACIDQX9HDQAgAEH2ioCAADYCEEEYIQMLIAMLLgECf0EAIQMCQCAAKAI4IgRFDQAgBCgCNCIERQ0AIAAgBBGAgICAAAAhAwsgAwtJAQJ/QQAhAwJAIAAoAjgiBEUNACAEKAIMIgRFDQAgACABIAIgAWsgBBGBgICAAAAiA0F/Rw0AIABB7ZqAgAA2AhBBGCEDCyADCy4BAn9BACEDAkAgACgCOCIERQ0AIAQoAjgiBEUNACAAIAQRgICAgAAAIQMLIAMLSQECf0EAIQMCQCAAKAI4IgRFDQAgBCgCECIERQ0AIAAgASACIAFrIAQRgYCAgAAAIgNBf0cNACAAQZWQgIAANgIQQRghAwsgAwsuAQJ/QQAhAwJAIAAoAjgiBEUNACAEKAI8IgRFDQAgACAEEYCAgIAAACEDCyADC0kBAn9BACEDAkAgACgCOCIERQ0AIAQoAhQiBEUNACAAIAEgAiABayAEEYGAgIAAACIDQX9HDQAgAEGqm4CAADYCEEEYIQMLIAMLLgECf0EAIQMCQCAAKAI4IgRFDQAgBCgCQCIERQ0AIAAgBBGAgICAAAAhAwsgAwtJAQJ/QQAhAwJAIAAoAjgiBEUNACAEKAIYIgRFDQAgACABIAIgAWsgBBGBgICAAAAiA0F/Rw0AIABB7ZOAgAA2AhBBGCEDCyADCy4BAn9BACEDAkAgACgCOCIERQ0AIAQoAkQiBEUNACAAIAQRgICAgAAAIQMLIAMLLgECf0EAIQMCQCAAKAI4IgRFDQAgBCgCJCIERQ0AIAAgBBGAgICAAAAhAwsgAwsuAQJ/QQAhAwJAIAAoAjgiBEUNACAEKAIsIgRFDQAgACAEEYCAgIAAACEDCyADC0kBAn9BACEDAkAgACgCOCIERQ0AIAQoAigiBEUNACAAIAEgAiABayAEEYGAgIAAACIDQX9HDQAgAEH2iICAADYCEEEYIQMLIAMLLgECf0EAIQMCQCAAKAI4IgRFDQAgBCgCUCIERQ0AIAAgBBGAgICAAAAhAwsgAwtJAQJ/QQAhAwJAIAAoAjgiBEUNACAEKAIcIgRFDQAgACABIAIgAWsgBBGBgICAAAAiA0F/Rw0AIABBwpmAgAA2AhBBGCEDCyADCy4BAn9BACEDAkAgACgCOCIERQ0AIAQoAkgiBEUNACAAIAQRgICAgAAAIQMLIAMLSQECf0EAIQMCQCAAKAI4IgRFDQAgBCgCICIERQ0AIAAgASACIAFrIAQRgYCAgAAAIgNBf0cNACAAQZSUgIAANgIQQRghAwsgAwsuAQJ/QQAhAwJAIAAoAjgiBEUNACAEKAJMIgRFDQAgACAEEYCAgIAAACEDCyADCy4BAn9BACEDAkAgACgCOCIERQ0AIAQoAlQiBEUNACAAIAQRgICAgAAAIQMLIAMLLgECf0EAIQMCQCAAKAI4IgRFDQAgBCgCWCIERQ0AIAAgBBGAgICAAAAhAwsgAwtFAQF/AkACQCAALwEwQRRxQRRHDQBBASEDIAAtAChBAUYNASAALwEyQeUARiEDDAELIAAtAClBBUYhAwsgACADOgAuQQAL/gEBA39BASEDAkAgAC8BMCIEQQhxDQAgACkDIEIAUiEDCwJAAkAgAC0ALkUNAEEBIQUgAC0AKUEFRg0BQQEhBSAEQcAAcUUgA3FBAUcNAQtBACEFIARBwABxDQBBAiEFIARB//8DcSIDQQhxDQACQCADQYAEcUUNAAJAIAAtAChBAUcNACAALQAtQQpxDQBBBQ8LQQQPCwJAIANBIHENAAJAIAAtAChBAUYNACAALwEyQf//A3EiAEGcf2pB5ABJDQAgAEHMAUYNACAAQbACRg0AQQQhBSAEQShxRQ0CIANBiARxQYAERg0CC0EADwtBAEEDIAApAyBQGyEFCyAFC2IBAn9BACEBAkAgAC0AKEEBRg0AIAAvATJB//8DcSICQZx/akHkAEkNACACQcwBRg0AIAJBsAJGDQAgAC8BMCIAQcAAcQ0AQQEhASAAQYgEcUGABEYNACAAQShxRSEBCyABC6cBAQN/AkACQAJAIAAtACpFDQAgAC0AK0UNAEEAIQMgAC8BMCIEQQJxRQ0BDAILQQAhAyAALwEwIgRBAXFFDQELQQEhAyAALQAoQQFGDQAgAC8BMkH//wNxIgVBnH9qQeQASQ0AIAVBzAFGDQAgBUGwAkYNACAEQcAAcQ0AQQAhAyAEQYgEcUGABEYNACAEQShxQQBHIQMLIABBADsBMCAAQQA6AC8gAwuZAQECfwJAAkACQCAALQAqRQ0AIAAtACtFDQBBACEBIAAvATAiAkECcUUNAQwCC0EAIQEgAC8BMCICQQFxRQ0BC0EBIQEgAC0AKEEBRg0AIAAvATJB//8DcSIAQZx/akHkAEkNACAAQcwBRg0AIABBsAJGDQAgAkHAAHENAEEAIQEgAkGIBHFBgARGDQAgAkEocUEARyEBCyABC1kAIABBGGpCADcDACAAQgA3AwAgAEE4akIANwMAIABBMGpCADcDACAAQShqQgA3AwAgAEEgakIANwMAIABBEGpCADcDACAAQQhqQgA3AwAgAEHdATYCHEEAC3sBAX8CQCAAKAIMIgMNAAJAIAAoAgRFDQAgACABNgIECwJAIAAgASACEMSAgIAAIgMNACAAKAIMDwsgACADNgIcQQAhAyAAKAIEIgFFDQAgACABIAIgACgCCBGBgICAAAAiAUUNACAAIAI2AhQgACABNgIMIAEhAwsgAwvk8wEDDn8DfgR/I4CAgIAAQRBrIgMkgICAgAAgASEEIAEhBSABIQYgASEHIAEhCCABIQkgASEKIAEhCyABIQwgASENIAEhDiABIQ8CQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkAgACgCHCIQQX9qDt0B2gEB2QECAwQFBgcICQoLDA0O2AEPENcBERLWARMUFRYXGBkaG+AB3wEcHR7VAR8gISIjJCXUASYnKCkqKyzTAdIBLS7RAdABLzAxMjM0NTY3ODk6Ozw9Pj9AQUJDREVG2wFHSElKzwHOAUvNAUzMAU1OT1BRUlNUVVZXWFlaW1xdXl9gYWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXp7fH1+f4ABgQGCAYMBhAGFAYYBhwGIAYkBigGLAYwBjQGOAY8BkAGRAZIBkwGUAZUBlgGXAZgBmQGaAZsBnAGdAZ4BnwGgAaEBogGjAaQBpQGmAacBqAGpAaoBqwGsAa0BrgGvAbABsQGyAbMBtAG1AbYBtwHLAcoBuAHJAbkByAG6AbsBvAG9Ab4BvwHAAcEBwgHDAcQBxQHGAQDcAQtBACEQDMYBC0EOIRAMxQELQQ0hEAzEAQtBDyEQDMMBC0EQIRAMwgELQRMhEAzBAQtBFCEQDMABC0EVIRAMvwELQRYhEAy+AQtBFyEQDL0BC0EYIRAMvAELQRkhEAy7AQtBGiEQDLoBC0EbIRAMuQELQRwhEAy4AQtBCCEQDLcBC0EdIRAMtgELQSAhEAy1AQtBHyEQDLQBC0EHIRAMswELQSEhEAyyAQtBIiEQDLEBC0EeIRAMsAELQSMhEAyvAQtBEiEQDK4BC0ERIRAMrQELQSQhEAysAQtBJSEQDKsBC0EmIRAMqgELQSchEAypAQtBwwEhEAyoAQtBKSEQDKcBC0ErIRAMpgELQSwhEAylAQtBLSEQDKQBC0EuIRAMowELQS8hEAyiAQtBxAEhEAyhAQtBMCEQDKABC0E0IRAMnwELQQwhEAyeAQtBMSEQDJ0BC0EyIRAMnAELQTMhEAybAQtBOSEQDJoBC0E1IRAMmQELQcUBIRAMmAELQQshEAyXAQtBOiEQDJYBC0E2IRAMlQELQQohEAyUAQtBNyEQDJMBC0E4IRAMkgELQTwhEAyRAQtBOyEQDJABC0E9IRAMjwELQQkhEAyOAQtBKCEQDI0BC0E+IRAMjAELQT8hEAyLAQtBwAAhEAyKAQtBwQAhEAyJAQtBwgAhEAyIAQtBwwAhEAyHAQtBxAAhEAyGAQtBxQAhEAyFAQtBxgAhEAyEAQtBKiEQDIMBC0HHACEQDIIBC0HIACEQDIEBC0HJACEQDIABC0HKACEQDH8LQcsAIRAMfgtBzQAhEAx9C0HMACEQDHwLQc4AIRAMewtBzwAhEAx6C0HQACEQDHkLQdEAIRAMeAtB0gAhEAx3C0HTACEQDHYLQdQAIRAMdQtB1gAhEAx0C0HVACEQDHMLQQYhEAxyC0HXACEQDHELQQUhEAxwC0HYACEQDG8LQQQhEAxuC0HZACEQDG0LQdoAIRAMbAtB2wAhEAxrC0HcACEQDGoLQQMhEAxpC0HdACEQDGgLQd4AIRAMZwtB3wAhEAxmC0HhACEQDGULQeAAIRAMZAtB4gAhEAxjC0HjACEQDGILQQIhEAxhC0HkACEQDGALQeUAIRAMXwtB5gAhEAxeC0HnACEQDF0LQegAIRAMXAtB6QAhEAxbC0HqACEQDFoLQesAIRAMWQtB7AAhEAxYC0HtACEQDFcLQe4AIRAMVgtB7wAhEAxVC0HwACEQDFQLQfEAIRAMUwtB8gAhEAxSC0HzACEQDFELQfQAIRAMUAtB9QAhEAxPC0H2ACEQDE4LQfcAIRAMTQtB+AAhEAxMC0H5ACEQDEsLQfoAIRAMSgtB+wAhEAxJC0H8ACEQDEgLQf0AIRAMRwtB/gAhEAxGC0H/ACEQDEULQYABIRAMRAtBgQEhEAxDC0GCASEQDEILQYMBIRAMQQtBhAEhEAxAC0GFASEQDD8LQYYBIRAMPgtBhwEhEAw9C0GIASEQDDwLQYkBIRAMOwtBigEhEAw6C0GLASEQDDkLQYwBIRAMOAtBjQEhEAw3C0GOASEQDDYLQY8BIRAMNQtBkAEhEAw0C0GRASEQDDMLQZIBIRAMMgtBkwEhEAwxC0GUASEQDDALQZUBIRAMLwtBlgEhEAwuC0GXASEQDC0LQZgBIRAMLAtBmQEhEAwrC0GaASEQDCoLQZsBIRAMKQtBnAEhEAwoC0GdASEQDCcLQZ4BIRAMJgtBnwEhEAwlC0GgASEQDCQLQaEBIRAMIwtBogEhEAwiC0GjASEQDCELQaQBIRAMIAtBpQEhEAwfC0GmASEQDB4LQacBIRAMHQtBqAEhEAwcC0GpASEQDBsLQaoBIRAMGgtBqwEhEAwZC0GsASEQDBgLQa0BIRAMFwtBrgEhEAwWC0EBIRAMFQtBrwEhEAwUC0GwASEQDBMLQbEBIRAMEgtBswEhEAwRC0GyASEQDBALQbQBIRAMDwtBtQEhEAwOC0G2ASEQDA0LQbcBIRAMDAtBuAEhEAwLC0G5ASEQDAoLQboBIRAMCQtBuwEhEAwIC0HGASEQDAcLQbwBIRAMBgtBvQEhEAwFC0G+ASEQDAQLQb8BIRAMAwtBwAEhEAwCC0HCASEQDAELQcEBIRALA0ACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQCAQDscBAAECAwQFBgcICQoLDA0ODxAREhMUFRYXGBkaGxweHyAhIyUoP0BBREVGR0hJSktMTU9QUVJT3gNXWVtcXWBiZWZnaGlqa2xtb3BxcnN0dXZ3eHl6e3x9foABggGFAYYBhwGJAYsBjAGNAY4BjwGQAZEBlAGVAZYBlwGYAZkBmgGbAZwBnQGeAZ8BoAGhAaIBowGkAaUBpgGnAagBqQGqAasBrAGtAa4BrwGwAbEBsgGzAbQBtQG2AbcBuAG5AboBuwG8Ab0BvgG/AcABwQHCAcMBxAHFAcYBxwHIAckBygHLAcwBzQHOAc8B0AHRAdIB0wHUAdUB1gHXAdgB2QHaAdsB3AHdAd4B4AHhAeIB4wHkAeUB5gHnAegB6QHqAesB7AHtAe4B7wHwAfEB8gHzAZkCpAKwAv4C/gILIAEiBCACRw3zAUHdASEQDP8DCyABIhAgAkcN3QFBwwEhEAz+AwsgASIBIAJHDZABQfcAIRAM/QMLIAEiASACRw2GAUHvACEQDPwDCyABIgEgAkcNf0HqACEQDPsDCyABIgEgAkcNe0HoACEQDPoDCyABIgEgAkcNeEHmACEQDPkDCyABIgEgAkcNGkEYIRAM+AMLIAEiASACRw0UQRIhEAz3AwsgASIBIAJHDVlBxQAhEAz2AwsgASIBIAJHDUpBPyEQDPUDCyABIgEgAkcNSEE8IRAM9AMLIAEiASACRw1BQTEhEAzzAwsgAC0ALkEBRg3rAwyHAgsgACABIgEgAhDAgICAAEEBRw3mASAAQgA3AyAM5wELIAAgASIBIAIQtICAgAAiEA3nASABIQEM9QILAkAgASIBIAJHDQBBBiEQDPADCyAAIAFBAWoiASACELuAgIAAIhAN6AEgASEBDDELIABCADcDIEESIRAM1QMLIAEiECACRw0rQR0hEAztAwsCQCABIgEgAkYNACABQQFqIQFBECEQDNQDC0EHIRAM7AMLIABCACAAKQMgIhEgAiABIhBrrSISfSITIBMgEVYbNwMgIBEgElYiFEUN5QFBCCEQDOsDCwJAIAEiASACRg0AIABBiYCAgAA2AgggACABNgIEIAEhAUEUIRAM0gMLQQkhEAzqAwsgASEBIAApAyBQDeQBIAEhAQzyAgsCQCABIgEgAkcNAEELIRAM6QMLIAAgAUEBaiIBIAIQtoCAgAAiEA3lASABIQEM8gILIAAgASIBIAIQuICAgAAiEA3lASABIQEM8gILIAAgASIBIAIQuICAgAAiEA3mASABIQEMDQsgACABIgEgAhC6gICAACIQDecBIAEhAQzwAgsCQCABIgEgAkcNAEEPIRAM5QMLIAEtAAAiEEE7Rg0IIBBBDUcN6AEgAUEBaiEBDO8CCyAAIAEiASACELqAgIAAIhAN6AEgASEBDPICCwNAAkAgAS0AAEHwtYCAAGotAAAiEEEBRg0AIBBBAkcN6wEgACgCBCEQIABBADYCBCAAIBAgAUEBaiIBELmAgIAAIhAN6gEgASEBDPQCCyABQQFqIgEgAkcNAAtBEiEQDOIDCyAAIAEiASACELqAgIAAIhAN6QEgASEBDAoLIAEiASACRw0GQRshEAzgAwsCQCABIgEgAkcNAEEWIRAM4AMLIABBioCAgAA2AgggACABNgIEIAAgASACELiAgIAAIhAN6gEgASEBQSAhEAzGAwsCQCABIgEgAkYNAANAAkAgAS0AAEHwt4CAAGotAAAiEEECRg0AAkAgEEF/ag4E5QHsAQDrAewBCyABQQFqIQFBCCEQDMgDCyABQQFqIgEgAkcNAAtBFSEQDN8DC0EVIRAM3gMLA0ACQCABLQAAQfC5gIAAai0AACIQQQJGDQAgEEF/ag4E3gHsAeAB6wHsAQsgAUEBaiIBIAJHDQALQRghEAzdAwsCQCABIgEgAkYNACAAQYuAgIAANgIIIAAgATYCBCABIQFBByEQDMQDC0EZIRAM3AMLIAFBAWohAQwCCwJAIAEiFCACRw0AQRohEAzbAwsgFCEBAkAgFC0AAEFzag4U3QLuAu4C7gLuAu4C7gLuAu4C7gLuAu4C7gLuAu4C7gLuAu4C7gIA7gILQQAhECAAQQA2AhwgAEGvi4CAADYCECAAQQI2AgwgACAUQQFqNgIUDNoDCwJAIAEtAAAiEEE7Rg0AIBBBDUcN6AEgAUEBaiEBDOUCCyABQQFqIQELQSIhEAy/AwsCQCABIhAgAkcNAEEcIRAM2AMLQgAhESAQIQEgEC0AAEFQag435wHmAQECAwQFBgcIAAAAAAAAAAkKCwwNDgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADxAREhMUAAtBHiEQDL0DC0ICIREM5QELQgMhEQzkAQtCBCERDOMBC0IFIREM4gELQgYhEQzhAQtCByERDOABC0IIIREM3wELQgkhEQzeAQtCCiERDN0BC0ILIREM3AELQgwhEQzbAQtCDSERDNoBC0IOIREM2QELQg8hEQzYAQtCCiERDNcBC0ILIREM1gELQgwhEQzVAQtCDSERDNQBC0IOIREM0wELQg8hEQzSAQtCACERAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQCAQLQAAQVBqDjflAeQBAAECAwQFBgfmAeYB5gHmAeYB5gHmAQgJCgsMDeYB5gHmAeYB5gHmAeYB5gHmAeYB5gHmAeYB5gHmAeYB5gHmAeYB5gHmAeYB5gHmAeYB5gEODxAREhPmAQtCAiERDOQBC0IDIREM4wELQgQhEQziAQtCBSERDOEBC0IGIREM4AELQgchEQzfAQtCCCERDN4BC0IJIREM3QELQgohEQzcAQtCCyERDNsBC0IMIREM2gELQg0hEQzZAQtCDiERDNgBC0IPIREM1wELQgohEQzWAQtCCyERDNUBC0IMIREM1AELQg0hEQzTAQtCDiERDNIBC0IPIREM0QELIABCACAAKQMgIhEgAiABIhBrrSISfSITIBMgEVYbNwMgIBEgElYiFEUN0gFBHyEQDMADCwJAIAEiASACRg0AIABBiYCAgAA2AgggACABNgIEIAEhAUEkIRAMpwMLQSAhEAy/AwsgACABIhAgAhC+gICAAEF/ag4FtgEAxQIB0QHSAQtBESEQDKQDCyAAQQE6AC8gECEBDLsDCyABIgEgAkcN0gFBJCEQDLsDCyABIg0gAkcNHkHGACEQDLoDCyAAIAEiASACELKAgIAAIhAN1AEgASEBDLUBCyABIhAgAkcNJkHQACEQDLgDCwJAIAEiASACRw0AQSghEAy4AwsgAEEANgIEIABBjICAgAA2AgggACABIAEQsYCAgAAiEA3TASABIQEM2AELAkAgASIQIAJHDQBBKSEQDLcDCyAQLQAAIgFBIEYNFCABQQlHDdMBIBBBAWohAQwVCwJAIAEiASACRg0AIAFBAWohAQwXC0EqIRAMtQMLAkAgASIQIAJHDQBBKyEQDLUDCwJAIBAtAAAiAUEJRg0AIAFBIEcN1QELIAAtACxBCEYN0wEgECEBDJEDCwJAIAEiASACRw0AQSwhEAy0AwsgAS0AAEEKRw3VASABQQFqIQEMyQILIAEiDiACRw3VAUEvIRAMsgMLA0ACQCABLQAAIhBBIEYNAAJAIBBBdmoOBADcAdwBANoBCyABIQEM4AELIAFBAWoiASACRw0AC0ExIRAMsQMLQTIhECABIhQgAkYNsAMgAiAUayAAKAIAIgFqIRUgFCABa0EDaiEWAkADQCAULQAAIhdBIHIgFyAXQb9/akH/AXFBGkkbQf8BcSABQfC7gIAAai0AAEcNAQJAIAFBA0cNAEEGIQEMlgMLIAFBAWohASAUQQFqIhQgAkcNAAsgACAVNgIADLEDCyAAQQA2AgAgFCEBDNkBC0EzIRAgASIUIAJGDa8DIAIgFGsgACgCACIBaiEVIBQgAWtBCGohFgJAA0AgFC0AACIXQSByIBcgF0G/f2pB/wFxQRpJG0H/AXEgAUH0u4CAAGotAABHDQECQCABQQhHDQBBBSEBDJUDCyABQQFqIQEgFEEBaiIUIAJHDQALIAAgFTYCAAywAwsgAEEANgIAIBQhAQzYAQtBNCEQIAEiFCACRg2uAyACIBRrIAAoAgAiAWohFSAUIAFrQQVqIRYCQANAIBQtAAAiF0EgciAXIBdBv39qQf8BcUEaSRtB/wFxIAFB0MKAgABqLQAARw0BAkAgAUEFRw0AQQchAQyUAwsgAUEBaiEBIBRBAWoiFCACRw0ACyAAIBU2AgAMrwMLIABBADYCACAUIQEM1wELAkAgASIBIAJGDQADQAJAIAEtAABBgL6AgABqLQAAIhBBAUYNACAQQQJGDQogASEBDN0BCyABQQFqIgEgAkcNAAtBMCEQDK4DC0EwIRAMrQMLAkAgASIBIAJGDQADQAJAIAEtAAAiEEEgRg0AIBBBdmoOBNkB2gHaAdkB2gELIAFBAWoiASACRw0AC0E4IRAMrQMLQTghEAysAwsDQAJAIAEtAAAiEEEgRg0AIBBBCUcNAwsgAUEBaiIBIAJHDQALQTwhEAyrAwsDQAJAIAEtAAAiEEEgRg0AAkACQCAQQXZqDgTaAQEB2gEACyAQQSxGDdsBCyABIQEMBAsgAUEBaiIBIAJHDQALQT8hEAyqAwsgASEBDNsBC0HAACEQIAEiFCACRg2oAyACIBRrIAAoAgAiAWohFiAUIAFrQQZqIRcCQANAIBQtAABBIHIgAUGAwICAAGotAABHDQEgAUEGRg2OAyABQQFqIQEgFEEBaiIUIAJHDQALIAAgFjYCAAypAwsgAEEANgIAIBQhAQtBNiEQDI4DCwJAIAEiDyACRw0AQcEAIRAMpwMLIABBjICAgAA2AgggACAPNgIEIA8hASAALQAsQX9qDgTNAdUB1wHZAYcDCyABQQFqIQEMzAELAkAgASIBIAJGDQADQAJAIAEtAAAiEEEgciAQIBBBv39qQf8BcUEaSRtB/wFxIhBBCUYNACAQQSBGDQACQAJAAkACQCAQQZ1/ag4TAAMDAwMDAwMBAwMDAwMDAwMDAgMLIAFBAWohAUExIRAMkQMLIAFBAWohAUEyIRAMkAMLIAFBAWohAUEzIRAMjwMLIAEhAQzQAQsgAUEBaiIBIAJHDQALQTUhEAylAwtBNSEQDKQDCwJAIAEiASACRg0AA0ACQCABLQAAQYC8gIAAai0AAEEBRg0AIAEhAQzTAQsgAUEBaiIBIAJHDQALQT0hEAykAwtBPSEQDKMDCyAAIAEiASACELCAgIAAIhAN1gEgASEBDAELIBBBAWohAQtBPCEQDIcDCwJAIAEiASACRw0AQcIAIRAMoAMLAkADQAJAIAEtAABBd2oOGAAC/gL+AoQD/gL+Av4C/gL+Av4C/gL+Av4C/gL+Av4C/gL+Av4C/gL+Av4CAP4CCyABQQFqIgEgAkcNAAtBwgAhEAygAwsgAUEBaiEBIAAtAC1BAXFFDb0BIAEhAQtBLCEQDIUDCyABIgEgAkcN0wFBxAAhEAydAwsDQAJAIAEtAABBkMCAgABqLQAAQQFGDQAgASEBDLcCCyABQQFqIgEgAkcNAAtBxQAhEAycAwsgDS0AACIQQSBGDbMBIBBBOkcNgQMgACgCBCEBIABBADYCBCAAIAEgDRCvgICAACIBDdABIA1BAWohAQyzAgtBxwAhECABIg0gAkYNmgMgAiANayAAKAIAIgFqIRYgDSABa0EFaiEXA0AgDS0AACIUQSByIBQgFEG/f2pB/wFxQRpJG0H/AXEgAUGQwoCAAGotAABHDYADIAFBBUYN9AIgAUEBaiEBIA1BAWoiDSACRw0ACyAAIBY2AgAMmgMLQcgAIRAgASINIAJGDZkDIAIgDWsgACgCACIBaiEWIA0gAWtBCWohFwNAIA0tAAAiFEEgciAUIBRBv39qQf8BcUEaSRtB/wFxIAFBlsKAgABqLQAARw3/AgJAIAFBCUcNAEECIQEM9QILIAFBAWohASANQQFqIg0gAkcNAAsgACAWNgIADJkDCwJAIAEiDSACRw0AQckAIRAMmQMLAkACQCANLQAAIgFBIHIgASABQb9/akH/AXFBGkkbQf8BcUGSf2oOBwCAA4ADgAOAA4ADAYADCyANQQFqIQFBPiEQDIADCyANQQFqIQFBPyEQDP8CC0HKACEQIAEiDSACRg2XAyACIA1rIAAoAgAiAWohFiANIAFrQQFqIRcDQCANLQAAIhRBIHIgFCAUQb9/akH/AXFBGkkbQf8BcSABQaDCgIAAai0AAEcN/QIgAUEBRg3wAiABQQFqIQEgDUEBaiINIAJHDQALIAAgFjYCAAyXAwtBywAhECABIg0gAkYNlgMgAiANayAAKAIAIgFqIRYgDSABa0EOaiEXA0AgDS0AACIUQSByIBQgFEG/f2pB/wFxQRpJG0H/AXEgAUGiwoCAAGotAABHDfwCIAFBDkYN8AIgAUEBaiEBIA1BAWoiDSACRw0ACyAAIBY2AgAMlgMLQcwAIRAgASINIAJGDZUDIAIgDWsgACgCACIBaiEWIA0gAWtBD2ohFwNAIA0tAAAiFEEgciAUIBRBv39qQf8BcUEaSRtB/wFxIAFBwMKAgABqLQAARw37AgJAIAFBD0cNAEEDIQEM8QILIAFBAWohASANQQFqIg0gAkcNAAsgACAWNgIADJUDC0HNACEQIAEiDSACRg2UAyACIA1rIAAoAgAiAWohFiANIAFrQQVqIRcDQCANLQAAIhRBIHIgFCAUQb9/akH/AXFBGkkbQf8BcSABQdDCgIAAai0AAEcN+gICQCABQQVHDQBBBCEBDPACCyABQQFqIQEgDUEBaiINIAJHDQALIAAgFjYCAAyUAwsCQCABIg0gAkcNAEHOACEQDJQDCwJAAkACQAJAIA0tAAAiAUEgciABIAFBv39qQf8BcUEaSRtB/wFxQZ1/ag4TAP0C/QL9Av0C/QL9Av0C/QL9Av0C/QL9AgH9Av0C/QICA/0CCyANQQFqIQFBwQAhEAz9AgsgDUEBaiEBQcIAIRAM/AILIA1BAWohAUHDACEQDPsCCyANQQFqIQFBxAAhEAz6AgsCQCABIgEgAkYNACAAQY2AgIAANgIIIAAgATYCBCABIQFBxQAhEAz6AgtBzwAhEAySAwsgECEBAkACQCAQLQAAQXZqDgQBqAKoAgCoAgsgEEEBaiEBC0EnIRAM+AILAkAgASIBIAJHDQBB0QAhEAyRAwsCQCABLQAAQSBGDQAgASEBDI0BCyABQQFqIQEgAC0ALUEBcUUNxwEgASEBDIwBCyABIhcgAkcNyAFB0gAhEAyPAwtB0wAhECABIhQgAkYNjgMgAiAUayAAKAIAIgFqIRYgFCABa0EBaiEXA0AgFC0AACABQdbCgIAAai0AAEcNzAEgAUEBRg3HASABQQFqIQEgFEEBaiIUIAJHDQALIAAgFjYCAAyOAwsCQCABIgEgAkcNAEHVACEQDI4DCyABLQAAQQpHDcwBIAFBAWohAQzHAQsCQCABIgEgAkcNAEHWACEQDI0DCwJAAkAgAS0AAEF2ag4EAM0BzQEBzQELIAFBAWohAQzHAQsgAUEBaiEBQcoAIRAM8wILIAAgASIBIAIQroCAgAAiEA3LASABIQFBzQAhEAzyAgsgAC0AKUEiRg2FAwymAgsCQCABIgEgAkcNAEHbACEQDIoDC0EAIRRBASEXQQEhFkEAIRACQAJAAkACQAJAAkACQAJAAkAgAS0AAEFQag4K1AHTAQABAgMEBQYI1QELQQIhEAwGC0EDIRAMBQtBBCEQDAQLQQUhEAwDC0EGIRAMAgtBByEQDAELQQghEAtBACEXQQAhFkEAIRQMzAELQQkhEEEBIRRBACEXQQAhFgzLAQsCQCABIgEgAkcNAEHdACEQDIkDCyABLQAAQS5HDcwBIAFBAWohAQymAgsgASIBIAJHDcwBQd8AIRAMhwMLAkAgASIBIAJGDQAgAEGOgICAADYCCCAAIAE2AgQgASEBQdAAIRAM7gILQeAAIRAMhgMLQeEAIRAgASIBIAJGDYUDIAIgAWsgACgCACIUaiEWIAEgFGtBA2ohFwNAIAEtAAAgFEHiwoCAAGotAABHDc0BIBRBA0YNzAEgFEEBaiEUIAFBAWoiASACRw0ACyAAIBY2AgAMhQMLQeIAIRAgASIBIAJGDYQDIAIgAWsgACgCACIUaiEWIAEgFGtBAmohFwNAIAEtAAAgFEHmwoCAAGotAABHDcwBIBRBAkYNzgEgFEEBaiEUIAFBAWoiASACRw0ACyAAIBY2AgAMhAMLQeMAIRAgASIBIAJGDYMDIAIgAWsgACgCACIUaiEWIAEgFGtBA2ohFwNAIAEtAAAgFEHpwoCAAGotAABHDcsBIBRBA0YNzgEgFEEBaiEUIAFBAWoiASACRw0ACyAAIBY2AgAMgwMLAkAgASIBIAJHDQBB5QAhEAyDAwsgACABQQFqIgEgAhCogICAACIQDc0BIAEhAUHWACEQDOkCCwJAIAEiASACRg0AA0ACQCABLQAAIhBBIEYNAAJAAkACQCAQQbh/ag4LAAHPAc8BzwHPAc8BzwHPAc8BAs8BCyABQQFqIQFB0gAhEAztAgsgAUEBaiEBQdMAIRAM7AILIAFBAWohAUHUACEQDOsCCyABQQFqIgEgAkcNAAtB5AAhEAyCAwtB5AAhEAyBAwsDQAJAIAEtAABB8MKAgABqLQAAIhBBAUYNACAQQX5qDgPPAdAB0QHSAQsgAUEBaiIBIAJHDQALQeYAIRAMgAMLAkAgASIBIAJGDQAgAUEBaiEBDAMLQecAIRAM/wILA0ACQCABLQAAQfDEgIAAai0AACIQQQFGDQACQCAQQX5qDgTSAdMB1AEA1QELIAEhAUHXACEQDOcCCyABQQFqIgEgAkcNAAtB6AAhEAz+AgsCQCABIgEgAkcNAEHpACEQDP4CCwJAIAEtAAAiEEF2ag4augHVAdUBvAHVAdUB1QHVAdUB1QHVAdUB1QHVAdUB1QHVAdUB1QHVAdUB1QHKAdUB1QEA0wELIAFBAWohAQtBBiEQDOMCCwNAAkAgAS0AAEHwxoCAAGotAABBAUYNACABIQEMngILIAFBAWoiASACRw0AC0HqACEQDPsCCwJAIAEiASACRg0AIAFBAWohAQwDC0HrACEQDPoCCwJAIAEiASACRw0AQewAIRAM+gILIAFBAWohAQwBCwJAIAEiASACRw0AQe0AIRAM+QILIAFBAWohAQtBBCEQDN4CCwJAIAEiFCACRw0AQe4AIRAM9wILIBQhAQJAAkACQCAULQAAQfDIgIAAai0AAEF/ag4H1AHVAdYBAJwCAQLXAQsgFEEBaiEBDAoLIBRBAWohAQzNAQtBACEQIABBADYCHCAAQZuSgIAANgIQIABBBzYCDCAAIBRBAWo2AhQM9gILAkADQAJAIAEtAABB8MiAgABqLQAAIhBBBEYNAAJAAkAgEEF/ag4H0gHTAdQB2QEABAHZAQsgASEBQdoAIRAM4AILIAFBAWohAUHcACEQDN8CCyABQQFqIgEgAkcNAAtB7wAhEAz2AgsgAUEBaiEBDMsBCwJAIAEiFCACRw0AQfAAIRAM9QILIBQtAABBL0cN1AEgFEEBaiEBDAYLAkAgASIUIAJHDQBB8QAhEAz0AgsCQCAULQAAIgFBL0cNACAUQQFqIQFB3QAhEAzbAgsgAUF2aiIEQRZLDdMBQQEgBHRBiYCAAnFFDdMBDMoCCwJAIAEiASACRg0AIAFBAWohAUHeACEQDNoCC0HyACEQDPICCwJAIAEiFCACRw0AQfQAIRAM8gILIBQhAQJAIBQtAABB8MyAgABqLQAAQX9qDgPJApQCANQBC0HhACEQDNgCCwJAIAEiFCACRg0AA0ACQCAULQAAQfDKgIAAai0AACIBQQNGDQACQCABQX9qDgLLAgDVAQsgFCEBQd8AIRAM2gILIBRBAWoiFCACRw0AC0HzACEQDPECC0HzACEQDPACCwJAIAEiASACRg0AIABBj4CAgAA2AgggACABNgIEIAEhAUHgACEQDNcCC0H1ACEQDO8CCwJAIAEiASACRw0AQfYAIRAM7wILIABBj4CAgAA2AgggACABNgIEIAEhAQtBAyEQDNQCCwNAIAEtAABBIEcNwwIgAUEBaiIBIAJHDQALQfcAIRAM7AILAkAgASIBIAJHDQBB+AAhEAzsAgsgAS0AAEEgRw3OASABQQFqIQEM7wELIAAgASIBIAIQrICAgAAiEA3OASABIQEMjgILAkAgASIEIAJHDQBB+gAhEAzqAgsgBC0AAEHMAEcN0QEgBEEBaiEBQRMhEAzPAQsCQCABIgQgAkcNAEH7ACEQDOkCCyACIARrIAAoAgAiAWohFCAEIAFrQQVqIRADQCAELQAAIAFB8M6AgABqLQAARw3QASABQQVGDc4BIAFBAWohASAEQQFqIgQgAkcNAAsgACAUNgIAQfsAIRAM6AILAkAgASIEIAJHDQBB/AAhEAzoAgsCQAJAIAQtAABBvX9qDgwA0QHRAdEB0QHRAdEB0QHRAdEB0QEB0QELIARBAWohAUHmACEQDM8CCyAEQQFqIQFB5wAhEAzOAgsCQCABIgQgAkcNAEH9ACEQDOcCCyACIARrIAAoAgAiAWohFCAEIAFrQQJqIRACQANAIAQtAAAgAUHtz4CAAGotAABHDc8BIAFBAkYNASABQQFqIQEgBEEBaiIEIAJHDQALIAAgFDYCAEH9ACEQDOcCCyAAQQA2AgAgEEEBaiEBQRAhEAzMAQsCQCABIgQgAkcNAEH+ACEQDOYCCyACIARrIAAoAgAiAWohFCAEIAFrQQVqIRACQANAIAQtAAAgAUH2zoCAAGotAABHDc4BIAFBBUYNASABQQFqIQEgBEEBaiIEIAJHDQALIAAgFDYCAEH+ACEQDOYCCyAAQQA2AgAgEEEBaiEBQRYhEAzLAQsCQCABIgQgAkcNAEH/ACEQDOUCCyACIARrIAAoAgAiAWohFCAEIAFrQQNqIRACQANAIAQtAAAgAUH8zoCAAGotAABHDc0BIAFBA0YNASABQQFqIQEgBEEBaiIEIAJHDQALIAAgFDYCAEH/ACEQDOUCCyAAQQA2AgAgEEEBaiEBQQUhEAzKAQsCQCABIgQgAkcNAEGAASEQDOQCCyAELQAAQdkARw3LASAEQQFqIQFBCCEQDMkBCwJAIAEiBCACRw0AQYEBIRAM4wILAkACQCAELQAAQbJ/ag4DAMwBAcwBCyAEQQFqIQFB6wAhEAzKAgsgBEEBaiEBQewAIRAMyQILAkAgASIEIAJHDQBBggEhEAziAgsCQAJAIAQtAABBuH9qDggAywHLAcsBywHLAcsBAcsBCyAEQQFqIQFB6gAhEAzJAgsgBEEBaiEBQe0AIRAMyAILAkAgASIEIAJHDQBBgwEhEAzhAgsgAiAEayAAKAIAIgFqIRAgBCABa0ECaiEUAkADQCAELQAAIAFBgM+AgABqLQAARw3JASABQQJGDQEgAUEBaiEBIARBAWoiBCACRw0ACyAAIBA2AgBBgwEhEAzhAgtBACEQIABBADYCACAUQQFqIQEMxgELAkAgASIEIAJHDQBBhAEhEAzgAgsgAiAEayAAKAIAIgFqIRQgBCABa0EEaiEQAkADQCAELQAAIAFBg8+AgABqLQAARw3IASABQQRGDQEgAUEBaiEBIARBAWoiBCACRw0ACyAAIBQ2AgBBhAEhEAzgAgsgAEEANgIAIBBBAWohAUEjIRAMxQELAkAgASIEIAJHDQBBhQEhEAzfAgsCQAJAIAQtAABBtH9qDggAyAHIAcgByAHIAcgBAcgBCyAEQQFqIQFB7wAhEAzGAgsgBEEBaiEBQfAAIRAMxQILAkAgASIEIAJHDQBBhgEhEAzeAgsgBC0AAEHFAEcNxQEgBEEBaiEBDIMCCwJAIAEiBCACRw0AQYcBIRAM3QILIAIgBGsgACgCACIBaiEUIAQgAWtBA2ohEAJAA0AgBC0AACABQYjPgIAAai0AAEcNxQEgAUEDRg0BIAFBAWohASAEQQFqIgQgAkcNAAsgACAUNgIAQYcBIRAM3QILIABBADYCACAQQQFqIQFBLSEQDMIBCwJAIAEiBCACRw0AQYgBIRAM3AILIAIgBGsgACgCACIBaiEUIAQgAWtBCGohEAJAA0AgBC0AACABQdDPgIAAai0AAEcNxAEgAUEIRg0BIAFBAWohASAEQQFqIgQgAkcNAAsgACAUNgIAQYgBIRAM3AILIABBADYCACAQQQFqIQFBKSEQDMEBCwJAIAEiASACRw0AQYkBIRAM2wILQQEhECABLQAAQd8ARw3AASABQQFqIQEMgQILAkAgASIEIAJHDQBBigEhEAzaAgsgAiAEayAAKAIAIgFqIRQgBCABa0EBaiEQA0AgBC0AACABQYzPgIAAai0AAEcNwQEgAUEBRg2vAiABQQFqIQEgBEEBaiIEIAJHDQALIAAgFDYCAEGKASEQDNkCCwJAIAEiBCACRw0AQYsBIRAM2QILIAIgBGsgACgCACIBaiEUIAQgAWtBAmohEAJAA0AgBC0AACABQY7PgIAAai0AAEcNwQEgAUECRg0BIAFBAWohASAEQQFqIgQgAkcNAAsgACAUNgIAQYsBIRAM2QILIABBADYCACAQQQFqIQFBAiEQDL4BCwJAIAEiBCACRw0AQYwBIRAM2AILIAIgBGsgACgCACIBaiEUIAQgAWtBAWohEAJAA0AgBC0AACABQfDPgIAAai0AAEcNwAEgAUEBRg0BIAFBAWohASAEQQFqIgQgAkcNAAsgACAUNgIAQYwBIRAM2AILIABBADYCACAQQQFqIQFBHyEQDL0BCwJAIAEiBCACRw0AQY0BIRAM1wILIAIgBGsgACgCACIBaiEUIAQgAWtBAWohEAJAA0AgBC0AACABQfLPgIAAai0AAEcNvwEgAUEBRg0BIAFBAWohASAEQQFqIgQgAkcNAAsgACAUNgIAQY0BIRAM1wILIABBADYCACAQQQFqIQFBCSEQDLwBCwJAIAEiBCACRw0AQY4BIRAM1gILAkACQCAELQAAQbd/ag4HAL8BvwG/Ab8BvwEBvwELIARBAWohAUH4ACEQDL0CCyAEQQFqIQFB+QAhEAy8AgsCQCABIgQgAkcNAEGPASEQDNUCCyACIARrIAAoAgAiAWohFCAEIAFrQQVqIRACQANAIAQtAAAgAUGRz4CAAGotAABHDb0BIAFBBUYNASABQQFqIQEgBEEBaiIEIAJHDQALIAAgFDYCAEGPASEQDNUCCyAAQQA2AgAgEEEBaiEBQRghEAy6AQsCQCABIgQgAkcNAEGQASEQDNQCCyACIARrIAAoAgAiAWohFCAEIAFrQQJqIRACQANAIAQtAAAgAUGXz4CAAGotAABHDbwBIAFBAkYNASABQQFqIQEgBEEBaiIEIAJHDQALIAAgFDYCAEGQASEQDNQCCyAAQQA2AgAgEEEBaiEBQRchEAy5AQsCQCABIgQgAkcNAEGRASEQDNMCCyACIARrIAAoAgAiAWohFCAEIAFrQQZqIRACQANAIAQtAAAgAUGaz4CAAGotAABHDbsBIAFBBkYNASABQQFqIQEgBEEBaiIEIAJHDQALIAAgFDYCAEGRASEQDNMCCyAAQQA2AgAgEEEBaiEBQRUhEAy4AQsCQCABIgQgAkcNAEGSASEQDNICCyACIARrIAAoAgAiAWohFCAEIAFrQQVqIRACQANAIAQtAAAgAUGhz4CAAGotAABHDboBIAFBBUYNASABQQFqIQEgBEEBaiIEIAJHDQALIAAgFDYCAEGSASEQDNICCyAAQQA2AgAgEEEBaiEBQR4hEAy3AQsCQCABIgQgAkcNAEGTASEQDNECCyAELQAAQcwARw24ASAEQQFqIQFBCiEQDLYBCwJAIAQgAkcNAEGUASEQDNACCwJAAkAgBC0AAEG/f2oODwC5AbkBuQG5AbkBuQG5AbkBuQG5AbkBuQG5AQG5AQsgBEEBaiEBQf4AIRAMtwILIARBAWohAUH/ACEQDLYCCwJAIAQgAkcNAEGVASEQDM8CCwJAAkAgBC0AAEG/f2oOAwC4AQG4AQsgBEEBaiEBQf0AIRAMtgILIARBAWohBEGAASEQDLUCCwJAIAQgAkcNAEGWASEQDM4CCyACIARrIAAoAgAiAWohFCAEIAFrQQFqIRACQANAIAQtAAAgAUGnz4CAAGotAABHDbYBIAFBAUYNASABQQFqIQEgBEEBaiIEIAJHDQALIAAgFDYCAEGWASEQDM4CCyAAQQA2AgAgEEEBaiEBQQshEAyzAQsCQCAEIAJHDQBBlwEhEAzNAgsCQAJAAkACQCAELQAAQVNqDiMAuAG4AbgBuAG4AbgBuAG4AbgBuAG4AbgBuAG4AbgBuAG4AbgBuAG4AbgBuAG4AQG4AbgBuAG4AbgBArgBuAG4AQO4AQsgBEEBaiEBQfsAIRAMtgILIARBAWohAUH8ACEQDLUCCyAEQQFqIQRBgQEhEAy0AgsgBEEBaiEEQYIBIRAMswILAkAgBCACRw0AQZgBIRAMzAILIAIgBGsgACgCACIBaiEUIAQgAWtBBGohEAJAA0AgBC0AACABQanPgIAAai0AAEcNtAEgAUEERg0BIAFBAWohASAEQQFqIgQgAkcNAAsgACAUNgIAQZgBIRAMzAILIABBADYCACAQQQFqIQFBGSEQDLEBCwJAIAQgAkcNAEGZASEQDMsCCyACIARrIAAoAgAiAWohFCAEIAFrQQVqIRACQANAIAQtAAAgAUGuz4CAAGotAABHDbMBIAFBBUYNASABQQFqIQEgBEEBaiIEIAJHDQALIAAgFDYCAEGZASEQDMsCCyAAQQA2AgAgEEEBaiEBQQYhEAywAQsCQCAEIAJHDQBBmgEhEAzKAgsgAiAEayAAKAIAIgFqIRQgBCABa0EBaiEQAkADQCAELQAAIAFBtM+AgABqLQAARw2yASABQQFGDQEgAUEBaiEBIARBAWoiBCACRw0ACyAAIBQ2AgBBmgEhEAzKAgsgAEEANgIAIBBBAWohAUEcIRAMrwELAkAgBCACRw0AQZsBIRAMyQILIAIgBGsgACgCACIBaiEUIAQgAWtBAWohEAJAA0AgBC0AACABQbbPgIAAai0AAEcNsQEgAUEBRg0BIAFBAWohASAEQQFqIgQgAkcNAAsgACAUNgIAQZsBIRAMyQILIABBADYCACAQQQFqIQFBJyEQDK4BCwJAIAQgAkcNAEGcASEQDMgCCwJAAkAgBC0AAEGsf2oOAgABsQELIARBAWohBEGGASEQDK8CCyAEQQFqIQRBhwEhEAyuAgsCQCAEIAJHDQBBnQEhEAzHAgsgAiAEayAAKAIAIgFqIRQgBCABa0EBaiEQAkADQCAELQAAIAFBuM+AgABqLQAARw2vASABQQFGDQEgAUEBaiEBIARBAWoiBCACRw0ACyAAIBQ2AgBBnQEhEAzHAgsgAEEANgIAIBBBAWohAUEmIRAMrAELAkAgBCACRw0AQZ4BIRAMxgILIAIgBGsgACgCACIBaiEUIAQgAWtBAWohEAJAA0AgBC0AACABQbrPgIAAai0AAEcNrgEgAUEBRg0BIAFBAWohASAEQQFqIgQgAkcNAAsgACAUNgIAQZ4BIRAMxgILIABBADYCACAQQQFqIQFBAyEQDKsBCwJAIAQgAkcNAEGfASEQDMUCCyACIARrIAAoAgAiAWohFCAEIAFrQQJqIRACQANAIAQtAAAgAUHtz4CAAGotAABHDa0BIAFBAkYNASABQQFqIQEgBEEBaiIEIAJHDQALIAAgFDYCAEGfASEQDMUCCyAAQQA2AgAgEEEBaiEBQQwhEAyqAQsCQCAEIAJHDQBBoAEhEAzEAgsgAiAEayAAKAIAIgFqIRQgBCABa0EDaiEQAkADQCAELQAAIAFBvM+AgABqLQAARw2sASABQQNGDQEgAUEBaiEBIARBAWoiBCACRw0ACyAAIBQ2AgBBoAEhEAzEAgsgAEEANgIAIBBBAWohAUENIRAMqQELAkAgBCACRw0AQaEBIRAMwwILAkACQCAELQAAQbp/ag4LAKwBrAGsAawBrAGsAawBrAGsAQGsAQsgBEEBaiEEQYsBIRAMqgILIARBAWohBEGMASEQDKkCCwJAIAQgAkcNAEGiASEQDMICCyAELQAAQdAARw2pASAEQQFqIQQM6QELAkAgBCACRw0AQaMBIRAMwQILAkACQCAELQAAQbd/ag4HAaoBqgGqAaoBqgEAqgELIARBAWohBEGOASEQDKgCCyAEQQFqIQFBIiEQDKYBCwJAIAQgAkcNAEGkASEQDMACCyACIARrIAAoAgAiAWohFCAEIAFrQQFqIRACQANAIAQtAAAgAUHAz4CAAGotAABHDagBIAFBAUYNASABQQFqIQEgBEEBaiIEIAJHDQALIAAgFDYCAEGkASEQDMACCyAAQQA2AgAgEEEBaiEBQR0hEAylAQsCQCAEIAJHDQBBpQEhEAy/AgsCQAJAIAQtAABBrn9qDgMAqAEBqAELIARBAWohBEGQASEQDKYCCyAEQQFqIQFBBCEQDKQBCwJAIAQgAkcNAEGmASEQDL4CCwJAAkACQAJAAkAgBC0AAEG/f2oOFQCqAaoBqgGqAaoBqgGqAaoBqgGqAQGqAaoBAqoBqgEDqgGqAQSqAQsgBEEBaiEEQYgBIRAMqAILIARBAWohBEGJASEQDKcCCyAEQQFqIQRBigEhEAymAgsgBEEBaiEEQY8BIRAMpQILIARBAWohBEGRASEQDKQCCwJAIAQgAkcNAEGnASEQDL0CCyACIARrIAAoAgAiAWohFCAEIAFrQQJqIRACQANAIAQtAAAgAUHtz4CAAGotAABHDaUBIAFBAkYNASABQQFqIQEgBEEBaiIEIAJHDQALIAAgFDYCAEGnASEQDL0CCyAAQQA2AgAgEEEBaiEBQREhEAyiAQsCQCAEIAJHDQBBqAEhEAy8AgsgAiAEayAAKAIAIgFqIRQgBCABa0ECaiEQAkADQCAELQAAIAFBws+AgABqLQAARw2kASABQQJGDQEgAUEBaiEBIARBAWoiBCACRw0ACyAAIBQ2AgBBqAEhEAy8AgsgAEEANgIAIBBBAWohAUEsIRAMoQELAkAgBCACRw0AQakBIRAMuwILIAIgBGsgACgCACIBaiEUIAQgAWtBBGohEAJAA0AgBC0AACABQcXPgIAAai0AAEcNowEgAUEERg0BIAFBAWohASAEQQFqIgQgAkcNAAsgACAUNgIAQakBIRAMuwILIABBADYCACAQQQFqIQFBKyEQDKABCwJAIAQgAkcNAEGqASEQDLoCCyACIARrIAAoAgAiAWohFCAEIAFrQQJqIRACQANAIAQtAAAgAUHKz4CAAGotAABHDaIBIAFBAkYNASABQQFqIQEgBEEBaiIEIAJHDQALIAAgFDYCAEGqASEQDLoCCyAAQQA2AgAgEEEBaiEBQRQhEAyfAQsCQCAEIAJHDQBBqwEhEAy5AgsCQAJAAkACQCAELQAAQb5/ag4PAAECpAGkAaQBpAGkAaQBpAGkAaQBpAGkAQOkAQsgBEEBaiEEQZMBIRAMogILIARBAWohBEGUASEQDKECCyAEQQFqIQRBlQEhEAygAgsgBEEBaiEEQZYBIRAMnwILAkAgBCACRw0AQawBIRAMuAILIAQtAABBxQBHDZ8BIARBAWohBAzgAQsCQCAEIAJHDQBBrQEhEAy3AgsgAiAEayAAKAIAIgFqIRQgBCABa0ECaiEQAkADQCAELQAAIAFBzc+AgABqLQAARw2fASABQQJGDQEgAUEBaiEBIARBAWoiBCACRw0ACyAAIBQ2AgBBrQEhEAy3AgsgAEEANgIAIBBBAWohAUEOIRAMnAELAkAgBCACRw0AQa4BIRAMtgILIAQtAABB0ABHDZ0BIARBAWohAUElIRAMmwELAkAgBCACRw0AQa8BIRAMtQILIAIgBGsgACgCACIBaiEUIAQgAWtBCGohEAJAA0AgBC0AACABQdDPgIAAai0AAEcNnQEgAUEIRg0BIAFBAWohASAEQQFqIgQgAkcNAAsgACAUNgIAQa8BIRAMtQILIABBADYCACAQQQFqIQFBKiEQDJoBCwJAIAQgAkcNAEGwASEQDLQCCwJAAkAgBC0AAEGrf2oOCwCdAZ0BnQGdAZ0BnQGdAZ0BnQEBnQELIARBAWohBEGaASEQDJsCCyAEQQFqIQRBmwEhEAyaAgsCQCAEIAJHDQBBsQEhEAyzAgsCQAJAIAQtAABBv39qDhQAnAGcAZwBnAGcAZwBnAGcAZwBnAGcAZwBnAGcAZwBnAGcAZwBAZwBCyAEQQFqIQRBmQEhEAyaAgsgBEEBaiEEQZwBIRAMmQILAkAgBCACRw0AQbIBIRAMsgILIAIgBGsgACgCACIBaiEUIAQgAWtBA2ohEAJAA0AgBC0AACABQdnPgIAAai0AAEcNmgEgAUEDRg0BIAFBAWohASAEQQFqIgQgAkcNAAsgACAUNgIAQbIBIRAMsgILIABBADYCACAQQQFqIQFBISEQDJcBCwJAIAQgAkcNAEGzASEQDLECCyACIARrIAAoAgAiAWohFCAEIAFrQQZqIRACQANAIAQtAAAgAUHdz4CAAGotAABHDZkBIAFBBkYNASABQQFqIQEgBEEBaiIEIAJHDQALIAAgFDYCAEGzASEQDLECCyAAQQA2AgAgEEEBaiEBQRohEAyWAQsCQCAEIAJHDQBBtAEhEAywAgsCQAJAAkAgBC0AAEG7f2oOEQCaAZoBmgGaAZoBmgGaAZoBmgEBmgGaAZoBmgGaAQKaAQsgBEEBaiEEQZ0BIRAMmAILIARBAWohBEGeASEQDJcCCyAEQQFqIQRBnwEhEAyWAgsCQCAEIAJHDQBBtQEhEAyvAgsgAiAEayAAKAIAIgFqIRQgBCABa0EFaiEQAkADQCAELQAAIAFB5M+AgABqLQAARw2XASABQQVGDQEgAUEBaiEBIARBAWoiBCACRw0ACyAAIBQ2AgBBtQEhEAyvAgsgAEEANgIAIBBBAWohAUEoIRAMlAELAkAgBCACRw0AQbYBIRAMrgILIAIgBGsgACgCACIBaiEUIAQgAWtBAmohEAJAA0AgBC0AACABQerPgIAAai0AAEcNlgEgAUECRg0BIAFBAWohASAEQQFqIgQgAkcNAAsgACAUNgIAQbYBIRAMrgILIABBADYCACAQQQFqIQFBByEQDJMBCwJAIAQgAkcNAEG3ASEQDK0CCwJAAkAgBC0AAEG7f2oODgCWAZYBlgGWAZYBlgGWAZYBlgGWAZYBlgEBlgELIARBAWohBEGhASEQDJQCCyAEQQFqIQRBogEhEAyTAgsCQCAEIAJHDQBBuAEhEAysAgsgAiAEayAAKAIAIgFqIRQgBCABa0ECaiEQAkADQCAELQAAIAFB7c+AgABqLQAARw2UASABQQJGDQEgAUEBaiEBIARBAWoiBCACRw0ACyAAIBQ2AgBBuAEhEAysAgsgAEEANgIAIBBBAWohAUESIRAMkQELAkAgBCACRw0AQbkBIRAMqwILIAIgBGsgACgCACIBaiEUIAQgAWtBAWohEAJAA0AgBC0AACABQfDPgIAAai0AAEcNkwEgAUEBRg0BIAFBAWohASAEQQFqIgQgAkcNAAsgACAUNgIAQbkBIRAMqwILIABBADYCACAQQQFqIQFBICEQDJABCwJAIAQgAkcNAEG6ASEQDKoCCyACIARrIAAoAgAiAWohFCAEIAFrQQFqIRACQANAIAQtAAAgAUHyz4CAAGotAABHDZIBIAFBAUYNASABQQFqIQEgBEEBaiIEIAJHDQALIAAgFDYCAEG6ASEQDKoCCyAAQQA2AgAgEEEBaiEBQQ8hEAyPAQsCQCAEIAJHDQBBuwEhEAypAgsCQAJAIAQtAABBt39qDgcAkgGSAZIBkgGSAQGSAQsgBEEBaiEEQaUBIRAMkAILIARBAWohBEGmASEQDI8CCwJAIAQgAkcNAEG8ASEQDKgCCyACIARrIAAoAgAiAWohFCAEIAFrQQdqIRACQANAIAQtAAAgAUH0z4CAAGotAABHDZABIAFBB0YNASABQQFqIQEgBEEBaiIEIAJHDQALIAAgFDYCAEG8ASEQDKgCCyAAQQA2AgAgEEEBaiEBQRshEAyNAQsCQCAEIAJHDQBBvQEhEAynAgsCQAJAAkAgBC0AAEG+f2oOEgCRAZEBkQGRAZEBkQGRAZEBkQEBkQGRAZEBkQGRAZEBApEBCyAEQQFqIQRBpAEhEAyPAgsgBEEBaiEEQacBIRAMjgILIARBAWohBEGoASEQDI0CCwJAIAQgAkcNAEG+ASEQDKYCCyAELQAAQc4ARw2NASAEQQFqIQQMzwELAkAgBCACRw0AQb8BIRAMpQILAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkAgBC0AAEG/f2oOFQABAgOcAQQFBpwBnAGcAQcICQoLnAEMDQ4PnAELIARBAWohAUHoACEQDJoCCyAEQQFqIQFB6QAhEAyZAgsgBEEBaiEBQe4AIRAMmAILIARBAWohAUHyACEQDJcCCyAEQQFqIQFB8wAhEAyWAgsgBEEBaiEBQfYAIRAMlQILIARBAWohAUH3ACEQDJQCCyAEQQFqIQFB+gAhEAyTAgsgBEEBaiEEQYMBIRAMkgILIARBAWohBEGEASEQDJECCyAEQQFqIQRBhQEhEAyQAgsgBEEBaiEEQZIBIRAMjwILIARBAWohBEGYASEQDI4CCyAEQQFqIQRBoAEhEAyNAgsgBEEBaiEEQaMBIRAMjAILIARBAWohBEGqASEQDIsCCwJAIAQgAkYNACAAQZCAgIAANgIIIAAgBDYCBEGrASEQDIsCC0HAASEQDKMCCyAAIAUgAhCqgICAACIBDYsBIAUhAQxcCwJAIAYgAkYNACAGQQFqIQUMjQELQcIBIRAMoQILA0ACQCAQLQAAQXZqDgSMAQAAjwEACyAQQQFqIhAgAkcNAAtBwwEhEAygAgsCQCAHIAJGDQAgAEGRgICAADYCCCAAIAc2AgQgByEBQQEhEAyHAgtBxAEhEAyfAgsCQCAHIAJHDQBBxQEhEAyfAgsCQAJAIActAABBdmoOBAHOAc4BAM4BCyAHQQFqIQYMjQELIAdBAWohBQyJAQsCQCAHIAJHDQBBxgEhEAyeAgsCQAJAIActAABBdmoOFwGPAY8BAY8BjwGPAY8BjwGPAY8BjwGPAY8BjwGPAY8BjwGPAY8BjwGPAQCPAQsgB0EBaiEHC0GwASEQDIQCCwJAIAggAkcNAEHIASEQDJ0CCyAILQAAQSBHDY0BIABBADsBMiAIQQFqIQFBswEhEAyDAgsgASEXAkADQCAXIgcgAkYNASAHLQAAQVBqQf8BcSIQQQpPDcwBAkAgAC8BMiIUQZkzSw0AIAAgFEEKbCIUOwEyIBBB//8DcyAUQf7/A3FJDQAgB0EBaiEXIAAgFCAQaiIQOwEyIBBB//8DcUHoB0kNAQsLQQAhECAAQQA2AhwgAEHBiYCAADYCECAAQQ02AgwgACAHQQFqNgIUDJwCC0HHASEQDJsCCyAAIAggAhCugICAACIQRQ3KASAQQRVHDYwBIABByAE2AhwgACAINgIUIABByZeAgAA2AhAgAEEVNgIMQQAhEAyaAgsCQCAJIAJHDQBBzAEhEAyaAgtBACEUQQEhF0EBIRZBACEQAkACQAJAAkACQAJAAkACQAJAIAktAABBUGoOCpYBlQEAAQIDBAUGCJcBC0ECIRAMBgtBAyEQDAULQQQhEAwEC0EFIRAMAwtBBiEQDAILQQchEAwBC0EIIRALQQAhF0EAIRZBACEUDI4BC0EJIRBBASEUQQAhF0EAIRYMjQELAkAgCiACRw0AQc4BIRAMmQILIAotAABBLkcNjgEgCkEBaiEJDMoBCyALIAJHDY4BQdABIRAMlwILAkAgCyACRg0AIABBjoCAgAA2AgggACALNgIEQbcBIRAM/gELQdEBIRAMlgILAkAgBCACRw0AQdIBIRAMlgILIAIgBGsgACgCACIQaiEUIAQgEGtBBGohCwNAIAQtAAAgEEH8z4CAAGotAABHDY4BIBBBBEYN6QEgEEEBaiEQIARBAWoiBCACRw0ACyAAIBQ2AgBB0gEhEAyVAgsgACAMIAIQrICAgAAiAQ2NASAMIQEMuAELAkAgBCACRw0AQdQBIRAMlAILIAIgBGsgACgCACIQaiEUIAQgEGtBAWohDANAIAQtAAAgEEGB0ICAAGotAABHDY8BIBBBAUYNjgEgEEEBaiEQIARBAWoiBCACRw0ACyAAIBQ2AgBB1AEhEAyTAgsCQCAEIAJHDQBB1gEhEAyTAgsgAiAEayAAKAIAIhBqIRQgBCAQa0ECaiELA0AgBC0AACAQQYPQgIAAai0AAEcNjgEgEEECRg2QASAQQQFqIRAgBEEBaiIEIAJHDQALIAAgFDYCAEHWASEQDJICCwJAIAQgAkcNAEHXASEQDJICCwJAAkAgBC0AAEG7f2oOEACPAY8BjwGPAY8BjwGPAY8BjwGPAY8BjwGPAY8BAY8BCyAEQQFqIQRBuwEhEAz5AQsgBEEBaiEEQbwBIRAM+AELAkAgBCACRw0AQdgBIRAMkQILIAQtAABByABHDYwBIARBAWohBAzEAQsCQCAEIAJGDQAgAEGQgICAADYCCCAAIAQ2AgRBvgEhEAz3AQtB2QEhEAyPAgsCQCAEIAJHDQBB2gEhEAyPAgsgBC0AAEHIAEYNwwEgAEEBOgAoDLkBCyAAQQI6AC8gACAEIAIQpoCAgAAiEA2NAUHCASEQDPQBCyAALQAoQX9qDgK3AbkBuAELA0ACQCAELQAAQXZqDgQAjgGOAQCOAQsgBEEBaiIEIAJHDQALQd0BIRAMiwILIABBADoALyAALQAtQQRxRQ2EAgsgAEEAOgAvIABBAToANCABIQEMjAELIBBBFUYN2gEgAEEANgIcIAAgATYCFCAAQaeOgIAANgIQIABBEjYCDEEAIRAMiAILAkAgACAQIAIQtICAgAAiBA0AIBAhAQyBAgsCQCAEQRVHDQAgAEEDNgIcIAAgEDYCFCAAQbCYgIAANgIQIABBFTYCDEEAIRAMiAILIABBADYCHCAAIBA2AhQgAEGnjoCAADYCECAAQRI2AgxBACEQDIcCCyAQQRVGDdYBIABBADYCHCAAIAE2AhQgAEHajYCAADYCECAAQRQ2AgxBACEQDIYCCyAAKAIEIRcgAEEANgIEIBAgEadqIhYhASAAIBcgECAWIBQbIhAQtYCAgAAiFEUNjQEgAEEHNgIcIAAgEDYCFCAAIBQ2AgxBACEQDIUCCyAAIAAvATBBgAFyOwEwIAEhAQtBKiEQDOoBCyAQQRVGDdEBIABBADYCHCAAIAE2AhQgAEGDjICAADYCECAAQRM2AgxBACEQDIICCyAQQRVGDc8BIABBADYCHCAAIAE2AhQgAEGaj4CAADYCECAAQSI2AgxBACEQDIECCyAAKAIEIRAgAEEANgIEAkAgACAQIAEQt4CAgAAiEA0AIAFBAWohAQyNAQsgAEEMNgIcIAAgEDYCDCAAIAFBAWo2AhRBACEQDIACCyAQQRVGDcwBIABBADYCHCAAIAE2AhQgAEGaj4CAADYCECAAQSI2AgxBACEQDP8BCyAAKAIEIRAgAEEANgIEAkAgACAQIAEQt4CAgAAiEA0AIAFBAWohAQyMAQsgAEENNgIcIAAgEDYCDCAAIAFBAWo2AhRBACEQDP4BCyAQQRVGDckBIABBADYCHCAAIAE2AhQgAEHGjICAADYCECAAQSM2AgxBACEQDP0BCyAAKAIEIRAgAEEANgIEAkAgACAQIAEQuYCAgAAiEA0AIAFBAWohAQyLAQsgAEEONgIcIAAgEDYCDCAAIAFBAWo2AhRBACEQDPwBCyAAQQA2AhwgACABNgIUIABBwJWAgAA2AhAgAEECNgIMQQAhEAz7AQsgEEEVRg3FASAAQQA2AhwgACABNgIUIABBxoyAgAA2AhAgAEEjNgIMQQAhEAz6AQsgAEEQNgIcIAAgATYCFCAAIBA2AgxBACEQDPkBCyAAKAIEIQQgAEEANgIEAkAgACAEIAEQuYCAgAAiBA0AIAFBAWohAQzxAQsgAEERNgIcIAAgBDYCDCAAIAFBAWo2AhRBACEQDPgBCyAQQRVGDcEBIABBADYCHCAAIAE2AhQgAEHGjICAADYCECAAQSM2AgxBACEQDPcBCyAAKAIEIRAgAEEANgIEAkAgACAQIAEQuYCAgAAiEA0AIAFBAWohAQyIAQsgAEETNgIcIAAgEDYCDCAAIAFBAWo2AhRBACEQDPYBCyAAKAIEIQQgAEEANgIEAkAgACAEIAEQuYCAgAAiBA0AIAFBAWohAQztAQsgAEEUNgIcIAAgBDYCDCAAIAFBAWo2AhRBACEQDPUBCyAQQRVGDb0BIABBADYCHCAAIAE2AhQgAEGaj4CAADYCECAAQSI2AgxBACEQDPQBCyAAKAIEIRAgAEEANgIEAkAgACAQIAEQt4CAgAAiEA0AIAFBAWohAQyGAQsgAEEWNgIcIAAgEDYCDCAAIAFBAWo2AhRBACEQDPMBCyAAKAIEIQQgAEEANgIEAkAgACAEIAEQt4CAgAAiBA0AIAFBAWohAQzpAQsgAEEXNgIcIAAgBDYCDCAAIAFBAWo2AhRBACEQDPIBCyAAQQA2AhwgACABNgIUIABBzZOAgAA2AhAgAEEMNgIMQQAhEAzxAQtCASERCyAQQQFqIQECQCAAKQMgIhJC//////////8PVg0AIAAgEkIEhiARhDcDICABIQEMhAELIABBADYCHCAAIAE2AhQgAEGtiYCAADYCECAAQQw2AgxBACEQDO8BCyAAQQA2AhwgACAQNgIUIABBzZOAgAA2AhAgAEEMNgIMQQAhEAzuAQsgACgCBCEXIABBADYCBCAQIBGnaiIWIQEgACAXIBAgFiAUGyIQELWAgIAAIhRFDXMgAEEFNgIcIAAgEDYCFCAAIBQ2AgxBACEQDO0BCyAAQQA2AhwgACAQNgIUIABBqpyAgAA2AhAgAEEPNgIMQQAhEAzsAQsgACAQIAIQtICAgAAiAQ0BIBAhAQtBDiEQDNEBCwJAIAFBFUcNACAAQQI2AhwgACAQNgIUIABBsJiAgAA2AhAgAEEVNgIMQQAhEAzqAQsgAEEANgIcIAAgEDYCFCAAQaeOgIAANgIQIABBEjYCDEEAIRAM6QELIAFBAWohEAJAIAAvATAiAUGAAXFFDQACQCAAIBAgAhC7gICAACIBDQAgECEBDHALIAFBFUcNugEgAEEFNgIcIAAgEDYCFCAAQfmXgIAANgIQIABBFTYCDEEAIRAM6QELAkAgAUGgBHFBoARHDQAgAC0ALUECcQ0AIABBADYCHCAAIBA2AhQgAEGWk4CAADYCECAAQQQ2AgxBACEQDOkBCyAAIBAgAhC9gICAABogECEBAkACQAJAAkACQCAAIBAgAhCzgICAAA4WAgEABAQEBAQEBAQEBAQEBAQEBAQEAwQLIABBAToALgsgACAALwEwQcAAcjsBMCAQIQELQSYhEAzRAQsgAEEjNgIcIAAgEDYCFCAAQaWWgIAANgIQIABBFTYCDEEAIRAM6QELIABBADYCHCAAIBA2AhQgAEHVi4CAADYCECAAQRE2AgxBACEQDOgBCyAALQAtQQFxRQ0BQcMBIRAMzgELAkAgDSACRg0AA0ACQCANLQAAQSBGDQAgDSEBDMQBCyANQQFqIg0gAkcNAAtBJSEQDOcBC0ElIRAM5gELIAAoAgQhBCAAQQA2AgQgACAEIA0Qr4CAgAAiBEUNrQEgAEEmNgIcIAAgBDYCDCAAIA1BAWo2AhRBACEQDOUBCyAQQRVGDasBIABBADYCHCAAIAE2AhQgAEH9jYCAADYCECAAQR02AgxBACEQDOQBCyAAQSc2AhwgACABNgIUIAAgEDYCDEEAIRAM4wELIBAhAUEBIRQCQAJAAkACQAJAAkACQCAALQAsQX5qDgcGBQUDAQIABQsgACAALwEwQQhyOwEwDAMLQQIhFAwBC0EEIRQLIABBAToALCAAIAAvATAgFHI7ATALIBAhAQtBKyEQDMoBCyAAQQA2AhwgACAQNgIUIABBq5KAgAA2AhAgAEELNgIMQQAhEAziAQsgAEEANgIcIAAgATYCFCAAQeGPgIAANgIQIABBCjYCDEEAIRAM4QELIABBADoALCAQIQEMvQELIBAhAUEBIRQCQAJAAkACQAJAIAAtACxBe2oOBAMBAgAFCyAAIAAvATBBCHI7ATAMAwtBAiEUDAELQQQhFAsgAEEBOgAsIAAgAC8BMCAUcjsBMAsgECEBC0EpIRAMxQELIABBADYCHCAAIAE2AhQgAEHwlICAADYCECAAQQM2AgxBACEQDN0BCwJAIA4tAABBDUcNACAAKAIEIQEgAEEANgIEAkAgACABIA4QsYCAgAAiAQ0AIA5BAWohAQx1CyAAQSw2AhwgACABNgIMIAAgDkEBajYCFEEAIRAM3QELIAAtAC1BAXFFDQFBxAEhEAzDAQsCQCAOIAJHDQBBLSEQDNwBCwJAAkADQAJAIA4tAABBdmoOBAIAAAMACyAOQQFqIg4gAkcNAAtBLSEQDN0BCyAAKAIEIQEgAEEANgIEAkAgACABIA4QsYCAgAAiAQ0AIA4hAQx0CyAAQSw2AhwgACAONgIUIAAgATYCDEEAIRAM3AELIAAoAgQhASAAQQA2AgQCQCAAIAEgDhCxgICAACIBDQAgDkEBaiEBDHMLIABBLDYCHCAAIAE2AgwgACAOQQFqNgIUQQAhEAzbAQsgACgCBCEEIABBADYCBCAAIAQgDhCxgICAACIEDaABIA4hAQzOAQsgEEEsRw0BIAFBAWohEEEBIQECQAJAAkACQAJAIAAtACxBe2oOBAMBAgQACyAQIQEMBAtBAiEBDAELQQQhAQsgAEEBOgAsIAAgAC8BMCABcjsBMCAQIQEMAQsgACAALwEwQQhyOwEwIBAhAQtBOSEQDL8BCyAAQQA6ACwgASEBC0E0IRAMvQELIAAgAC8BMEEgcjsBMCABIQEMAgsgACgCBCEEIABBADYCBAJAIAAgBCABELGAgIAAIgQNACABIQEMxwELIABBNzYCHCAAIAE2AhQgACAENgIMQQAhEAzUAQsgAEEIOgAsIAEhAQtBMCEQDLkBCwJAIAAtAChBAUYNACABIQEMBAsgAC0ALUEIcUUNkwEgASEBDAMLIAAtADBBIHENlAFBxQEhEAy3AQsCQCAPIAJGDQACQANAAkAgDy0AAEFQaiIBQf8BcUEKSQ0AIA8hAUE1IRAMugELIAApAyAiEUKZs+bMmbPmzBlWDQEgACARQgp+IhE3AyAgESABrUL/AYMiEkJ/hVYNASAAIBEgEnw3AyAgD0EBaiIPIAJHDQALQTkhEAzRAQsgACgCBCECIABBADYCBCAAIAIgD0EBaiIEELGAgIAAIgINlQEgBCEBDMMBC0E5IRAMzwELAkAgAC8BMCIBQQhxRQ0AIAAtAChBAUcNACAALQAtQQhxRQ2QAQsgACABQff7A3FBgARyOwEwIA8hAQtBNyEQDLQBCyAAIAAvATBBEHI7ATAMqwELIBBBFUYNiwEgAEEANgIcIAAgATYCFCAAQfCOgIAANgIQIABBHDYCDEEAIRAMywELIABBwwA2AhwgACABNgIMIAAgDUEBajYCFEEAIRAMygELAkAgAS0AAEE6Rw0AIAAoAgQhECAAQQA2AgQCQCAAIBAgARCvgICAACIQDQAgAUEBaiEBDGMLIABBwwA2AhwgACAQNgIMIAAgAUEBajYCFEEAIRAMygELIABBADYCHCAAIAE2AhQgAEGxkYCAADYCECAAQQo2AgxBACEQDMkBCyAAQQA2AhwgACABNgIUIABBoJmAgAA2AhAgAEEeNgIMQQAhEAzIAQsgAEEANgIACyAAQYASOwEqIAAgF0EBaiIBIAIQqICAgAAiEA0BIAEhAQtBxwAhEAysAQsgEEEVRw2DASAAQdEANgIcIAAgATYCFCAAQeOXgIAANgIQIABBFTYCDEEAIRAMxAELIAAoAgQhECAAQQA2AgQCQCAAIBAgARCngICAACIQDQAgASEBDF4LIABB0gA2AhwgACABNgIUIAAgEDYCDEEAIRAMwwELIABBADYCHCAAIBQ2AhQgAEHBqICAADYCECAAQQc2AgwgAEEANgIAQQAhEAzCAQsgACgCBCEQIABBADYCBAJAIAAgECABEKeAgIAAIhANACABIQEMXQsgAEHTADYCHCAAIAE2AhQgACAQNgIMQQAhEAzBAQtBACEQIABBADYCHCAAIAE2AhQgAEGAkYCAADYCECAAQQk2AgwMwAELIBBBFUYNfSAAQQA2AhwgACABNgIUIABBlI2AgAA2AhAgAEEhNgIMQQAhEAy/AQtBASEWQQAhF0EAIRRBASEQCyAAIBA6ACsgAUEBaiEBAkACQCAALQAtQRBxDQACQAJAAkAgAC0AKg4DAQACBAsgFkUNAwwCCyAUDQEMAgsgF0UNAQsgACgCBCEQIABBADYCBAJAIAAgECABEK2AgIAAIhANACABIQEMXAsgAEHYADYCHCAAIAE2AhQgACAQNgIMQQAhEAy+AQsgACgCBCEEIABBADYCBAJAIAAgBCABEK2AgIAAIgQNACABIQEMrQELIABB2QA2AhwgACABNgIUIAAgBDYCDEEAIRAMvQELIAAoAgQhBCAAQQA2AgQCQCAAIAQgARCtgICAACIEDQAgASEBDKsBCyAAQdoANgIcIAAgATYCFCAAIAQ2AgxBACEQDLwBCyAAKAIEIQQgAEEANgIEAkAgACAEIAEQrYCAgAAiBA0AIAEhAQypAQsgAEHcADYCHCAAIAE2AhQgACAENgIMQQAhEAy7AQsCQCABLQAAQVBqIhBB/wFxQQpPDQAgACAQOgAqIAFBAWohAUHPACEQDKIBCyAAKAIEIQQgAEEANgIEAkAgACAEIAEQrYCAgAAiBA0AIAEhAQynAQsgAEHeADYCHCAAIAE2AhQgACAENgIMQQAhEAy6AQsgAEEANgIAIBdBAWohAQJAIAAtAClBI08NACABIQEMWQsgAEEANgIcIAAgATYCFCAAQdOJgIAANgIQIABBCDYCDEEAIRAMuQELIABBADYCAAtBACEQIABBADYCHCAAIAE2AhQgAEGQs4CAADYCECAAQQg2AgwMtwELIABBADYCACAXQQFqIQECQCAALQApQSFHDQAgASEBDFYLIABBADYCHCAAIAE2AhQgAEGbioCAADYCECAAQQg2AgxBACEQDLYBCyAAQQA2AgAgF0EBaiEBAkAgAC0AKSIQQV1qQQtPDQAgASEBDFULAkAgEEEGSw0AQQEgEHRBygBxRQ0AIAEhAQxVC0EAIRAgAEEANgIcIAAgATYCFCAAQfeJgIAANgIQIABBCDYCDAy1AQsgEEEVRg1xIABBADYCHCAAIAE2AhQgAEG5jYCAADYCECAAQRo2AgxBACEQDLQBCyAAKAIEIRAgAEEANgIEAkAgACAQIAEQp4CAgAAiEA0AIAEhAQxUCyAAQeUANgIcIAAgATYCFCAAIBA2AgxBACEQDLMBCyAAKAIEIRAgAEEANgIEAkAgACAQIAEQp4CAgAAiEA0AIAEhAQxNCyAAQdIANgIcIAAgATYCFCAAIBA2AgxBACEQDLIBCyAAKAIEIRAgAEEANgIEAkAgACAQIAEQp4CAgAAiEA0AIAEhAQxNCyAAQdMANgIcIAAgATYCFCAAIBA2AgxBACEQDLEBCyAAKAIEIRAgAEEANgIEAkAgACAQIAEQp4CAgAAiEA0AIAEhAQxRCyAAQeUANgIcIAAgATYCFCAAIBA2AgxBACEQDLABCyAAQQA2AhwgACABNgIUIABBxoqAgAA2AhAgAEEHNgIMQQAhEAyvAQsgACgCBCEQIABBADYCBAJAIAAgECABEKeAgIAAIhANACABIQEMSQsgAEHSADYCHCAAIAE2AhQgACAQNgIMQQAhEAyuAQsgACgCBCEQIABBADYCBAJAIAAgECABEKeAgIAAIhANACABIQEMSQsgAEHTADYCHCAAIAE2AhQgACAQNgIMQQAhEAytAQsgACgCBCEQIABBADYCBAJAIAAgECABEKeAgIAAIhANACABIQEMTQsgAEHlADYCHCAAIAE2AhQgACAQNgIMQQAhEAysAQsgAEEANgIcIAAgATYCFCAAQdyIgIAANgIQIABBBzYCDEEAIRAMqwELIBBBP0cNASABQQFqIQELQQUhEAyQAQtBACEQIABBADYCHCAAIAE2AhQgAEH9koCAADYCECAAQQc2AgwMqAELIAAoAgQhECAAQQA2AgQCQCAAIBAgARCngICAACIQDQAgASEBDEILIABB0gA2AhwgACABNgIUIAAgEDYCDEEAIRAMpwELIAAoAgQhECAAQQA2AgQCQCAAIBAgARCngICAACIQDQAgASEBDEILIABB0wA2AhwgACABNgIUIAAgEDYCDEEAIRAMpgELIAAoAgQhECAAQQA2AgQCQCAAIBAgARCngICAACIQDQAgASEBDEYLIABB5QA2AhwgACABNgIUIAAgEDYCDEEAIRAMpQELIAAoAgQhASAAQQA2AgQCQCAAIAEgFBCngICAACIBDQAgFCEBDD8LIABB0gA2AhwgACAUNgIUIAAgATYCDEEAIRAMpAELIAAoAgQhASAAQQA2AgQCQCAAIAEgFBCngICAACIBDQAgFCEBDD8LIABB0wA2AhwgACAUNgIUIAAgATYCDEEAIRAMowELIAAoAgQhASAAQQA2AgQCQCAAIAEgFBCngICAACIBDQAgFCEBDEMLIABB5QA2AhwgACAUNgIUIAAgATYCDEEAIRAMogELIABBADYCHCAAIBQ2AhQgAEHDj4CAADYCECAAQQc2AgxBACEQDKEBCyAAQQA2AhwgACABNgIUIABBw4+AgAA2AhAgAEEHNgIMQQAhEAygAQtBACEQIABBADYCHCAAIBQ2AhQgAEGMnICAADYCECAAQQc2AgwMnwELIABBADYCHCAAIBQ2AhQgAEGMnICAADYCECAAQQc2AgxBACEQDJ4BCyAAQQA2AhwgACAUNgIUIABB/pGAgAA2AhAgAEEHNgIMQQAhEAydAQsgAEEANgIcIAAgATYCFCAAQY6bgIAANgIQIABBBjYCDEEAIRAMnAELIBBBFUYNVyAAQQA2AhwgACABNgIUIABBzI6AgAA2AhAgAEEgNgIMQQAhEAybAQsgAEEANgIAIBBBAWohAUEkIRALIAAgEDoAKSAAKAIEIRAgAEEANgIEIAAgECABEKuAgIAAIhANVCABIQEMPgsgAEEANgIAC0EAIRAgAEEANgIcIAAgBDYCFCAAQfGbgIAANgIQIABBBjYCDAyXAQsgAUEVRg1QIABBADYCHCAAIAU2AhQgAEHwjICAADYCECAAQRs2AgxBACEQDJYBCyAAKAIEIQUgAEEANgIEIAAgBSAQEKmAgIAAIgUNASAQQQFqIQULQa0BIRAMewsgAEHBATYCHCAAIAU2AgwgACAQQQFqNgIUQQAhEAyTAQsgACgCBCEGIABBADYCBCAAIAYgEBCpgICAACIGDQEgEEEBaiEGC0GuASEQDHgLIABBwgE2AhwgACAGNgIMIAAgEEEBajYCFEEAIRAMkAELIABBADYCHCAAIAc2AhQgAEGXi4CAADYCECAAQQ02AgxBACEQDI8BCyAAQQA2AhwgACAINgIUIABB45CAgAA2AhAgAEEJNgIMQQAhEAyOAQsgAEEANgIcIAAgCDYCFCAAQZSNgIAANgIQIABBITYCDEEAIRAMjQELQQEhFkEAIRdBACEUQQEhEAsgACAQOgArIAlBAWohCAJAAkAgAC0ALUEQcQ0AAkACQAJAIAAtACoOAwEAAgQLIBZFDQMMAgsgFA0BDAILIBdFDQELIAAoAgQhECAAQQA2AgQgACAQIAgQrYCAgAAiEEUNPSAAQckBNgIcIAAgCDYCFCAAIBA2AgxBACEQDIwBCyAAKAIEIQQgAEEANgIEIAAgBCAIEK2AgIAAIgRFDXYgAEHKATYCHCAAIAg2AhQgACAENgIMQQAhEAyLAQsgACgCBCEEIABBADYCBCAAIAQgCRCtgICAACIERQ10IABBywE2AhwgACAJNgIUIAAgBDYCDEEAIRAMigELIAAoAgQhBCAAQQA2AgQgACAEIAoQrYCAgAAiBEUNciAAQc0BNgIcIAAgCjYCFCAAIAQ2AgxBACEQDIkBCwJAIAstAABBUGoiEEH/AXFBCk8NACAAIBA6ACogC0EBaiEKQbYBIRAMcAsgACgCBCEEIABBADYCBCAAIAQgCxCtgICAACIERQ1wIABBzwE2AhwgACALNgIUIAAgBDYCDEEAIRAMiAELIABBADYCHCAAIAQ2AhQgAEGQs4CAADYCECAAQQg2AgwgAEEANgIAQQAhEAyHAQsgAUEVRg0/IABBADYCHCAAIAw2AhQgAEHMjoCAADYCECAAQSA2AgxBACEQDIYBCyAAQYEEOwEoIAAoAgQhECAAQgA3AwAgACAQIAxBAWoiDBCrgICAACIQRQ04IABB0wE2AhwgACAMNgIUIAAgEDYCDEEAIRAMhQELIABBADYCAAtBACEQIABBADYCHCAAIAQ2AhQgAEHYm4CAADYCECAAQQg2AgwMgwELIAAoAgQhECAAQgA3AwAgACAQIAtBAWoiCxCrgICAACIQDQFBxgEhEAxpCyAAQQI6ACgMVQsgAEHVATYCHCAAIAs2AhQgACAQNgIMQQAhEAyAAQsgEEEVRg03IABBADYCHCAAIAQ2AhQgAEGkjICAADYCECAAQRA2AgxBACEQDH8LIAAtADRBAUcNNCAAIAQgAhC8gICAACIQRQ00IBBBFUcNNSAAQdwBNgIcIAAgBDYCFCAAQdWWgIAANgIQIABBFTYCDEEAIRAMfgtBACEQIABBADYCHCAAQa+LgIAANgIQIABBAjYCDCAAIBRBAWo2AhQMfQtBACEQDGMLQQIhEAxiC0ENIRAMYQtBDyEQDGALQSUhEAxfC0ETIRAMXgtBFSEQDF0LQRYhEAxcC0EXIRAMWwtBGCEQDFoLQRkhEAxZC0EaIRAMWAtBGyEQDFcLQRwhEAxWC0EdIRAMVQtBHyEQDFQLQSEhEAxTC0EjIRAMUgtBxgAhEAxRC0EuIRAMUAtBLyEQDE8LQTshEAxOC0E9IRAMTQtByAAhEAxMC0HJACEQDEsLQcsAIRAMSgtBzAAhEAxJC0HOACEQDEgLQdEAIRAMRwtB1QAhEAxGC0HYACEQDEULQdkAIRAMRAtB2wAhEAxDC0HkACEQDEILQeUAIRAMQQtB8QAhEAxAC0H0ACEQDD8LQY0BIRAMPgtBlwEhEAw9C0GpASEQDDwLQawBIRAMOwtBwAEhEAw6C0G5ASEQDDkLQa8BIRAMOAtBsQEhEAw3C0GyASEQDDYLQbQBIRAMNQtBtQEhEAw0C0G6ASEQDDMLQb0BIRAMMgtBvwEhEAwxC0HBASEQDDALIABBADYCHCAAIAQ2AhQgAEHpi4CAADYCECAAQR82AgxBACEQDEgLIABB2wE2AhwgACAENgIUIABB+paAgAA2AhAgAEEVNgIMQQAhEAxHCyAAQfgANgIcIAAgDDYCFCAAQcqYgIAANgIQIABBFTYCDEEAIRAMRgsgAEHRADYCHCAAIAU2AhQgAEGwl4CAADYCECAAQRU2AgxBACEQDEULIABB+QA2AhwgACABNgIUIAAgEDYCDEEAIRAMRAsgAEH4ADYCHCAAIAE2AhQgAEHKmICAADYCECAAQRU2AgxBACEQDEMLIABB5AA2AhwgACABNgIUIABB45eAgAA2AhAgAEEVNgIMQQAhEAxCCyAAQdcANgIcIAAgATYCFCAAQcmXgIAANgIQIABBFTYCDEEAIRAMQQsgAEEANgIcIAAgATYCFCAAQbmNgIAANgIQIABBGjYCDEEAIRAMQAsgAEHCADYCHCAAIAE2AhQgAEHjmICAADYCECAAQRU2AgxBACEQDD8LIABBADYCBCAAIA8gDxCxgICAACIERQ0BIABBOjYCHCAAIAQ2AgwgACAPQQFqNgIUQQAhEAw+CyAAKAIEIQQgAEEANgIEAkAgACAEIAEQsYCAgAAiBEUNACAAQTs2AhwgACAENgIMIAAgAUEBajYCFEEAIRAMPgsgAUEBaiEBDC0LIA9BAWohAQwtCyAAQQA2AhwgACAPNgIUIABB5JKAgAA2AhAgAEEENgIMQQAhEAw7CyAAQTY2AhwgACAENgIUIAAgAjYCDEEAIRAMOgsgAEEuNgIcIAAgDjYCFCAAIAQ2AgxBACEQDDkLIABB0AA2AhwgACABNgIUIABBkZiAgAA2AhAgAEEVNgIMQQAhEAw4CyANQQFqIQEMLAsgAEEVNgIcIAAgATYCFCAAQYKZgIAANgIQIABBFTYCDEEAIRAMNgsgAEEbNgIcIAAgATYCFCAAQZGXgIAANgIQIABBFTYCDEEAIRAMNQsgAEEPNgIcIAAgATYCFCAAQZGXgIAANgIQIABBFTYCDEEAIRAMNAsgAEELNgIcIAAgATYCFCAAQZGXgIAANgIQIABBFTYCDEEAIRAMMwsgAEEaNgIcIAAgATYCFCAAQYKZgIAANgIQIABBFTYCDEEAIRAMMgsgAEELNgIcIAAgATYCFCAAQYKZgIAANgIQIABBFTYCDEEAIRAMMQsgAEEKNgIcIAAgATYCFCAAQeSWgIAANgIQIABBFTYCDEEAIRAMMAsgAEEeNgIcIAAgATYCFCAAQfmXgIAANgIQIABBFTYCDEEAIRAMLwsgAEEANgIcIAAgEDYCFCAAQdqNgIAANgIQIABBFDYCDEEAIRAMLgsgAEEENgIcIAAgATYCFCAAQbCYgIAANgIQIABBFTYCDEEAIRAMLQsgAEEANgIAIAtBAWohCwtBuAEhEAwSCyAAQQA2AgAgEEEBaiEBQfUAIRAMEQsgASEBAkAgAC0AKUEFRw0AQeMAIRAMEQtB4gAhEAwQC0EAIRAgAEEANgIcIABB5JGAgAA2AhAgAEEHNgIMIAAgFEEBajYCFAwoCyAAQQA2AgAgF0EBaiEBQcAAIRAMDgtBASEBCyAAIAE6ACwgAEEANgIAIBdBAWohAQtBKCEQDAsLIAEhAQtBOCEQDAkLAkAgASIPIAJGDQADQAJAIA8tAABBgL6AgABqLQAAIgFBAUYNACABQQJHDQMgD0EBaiEBDAQLIA9BAWoiDyACRw0AC0E+IRAMIgtBPiEQDCELIABBADoALCAPIQEMAQtBCyEQDAYLQTohEAwFCyABQQFqIQFBLSEQDAQLIAAgAToALCAAQQA2AgAgFkEBaiEBQQwhEAwDCyAAQQA2AgAgF0EBaiEBQQohEAwCCyAAQQA2AgALIABBADoALCANIQFBCSEQDAALC0EAIRAgAEEANgIcIAAgCzYCFCAAQc2QgIAANgIQIABBCTYCDAwXC0EAIRAgAEEANgIcIAAgCjYCFCAAQemKgIAANgIQIABBCTYCDAwWC0EAIRAgAEEANgIcIAAgCTYCFCAAQbeQgIAANgIQIABBCTYCDAwVC0EAIRAgAEEANgIcIAAgCDYCFCAAQZyRgIAANgIQIABBCTYCDAwUC0EAIRAgAEEANgIcIAAgATYCFCAAQc2QgIAANgIQIABBCTYCDAwTC0EAIRAgAEEANgIcIAAgATYCFCAAQemKgIAANgIQIABBCTYCDAwSC0EAIRAgAEEANgIcIAAgATYCFCAAQbeQgIAANgIQIABBCTYCDAwRC0EAIRAgAEEANgIcIAAgATYCFCAAQZyRgIAANgIQIABBCTYCDAwQC0EAIRAgAEEANgIcIAAgATYCFCAAQZeVgIAANgIQIABBDzYCDAwPC0EAIRAgAEEANgIcIAAgATYCFCAAQZeVgIAANgIQIABBDzYCDAwOC0EAIRAgAEEANgIcIAAgATYCFCAAQcCSgIAANgIQIABBCzYCDAwNC0EAIRAgAEEANgIcIAAgATYCFCAAQZWJgIAANgIQIABBCzYCDAwMC0EAIRAgAEEANgIcIAAgATYCFCAAQeGPgIAANgIQIABBCjYCDAwLC0EAIRAgAEEANgIcIAAgATYCFCAAQfuPgIAANgIQIABBCjYCDAwKC0EAIRAgAEEANgIcIAAgATYCFCAAQfGZgIAANgIQIABBAjYCDAwJC0EAIRAgAEEANgIcIAAgATYCFCAAQcSUgIAANgIQIABBAjYCDAwIC0EAIRAgAEEANgIcIAAgATYCFCAAQfKVgIAANgIQIABBAjYCDAwHCyAAQQI2AhwgACABNgIUIABBnJqAgAA2AhAgAEEWNgIMQQAhEAwGC0EBIRAMBQtB1AAhECABIgQgAkYNBCADQQhqIAAgBCACQdjCgIAAQQoQxYCAgAAgAygCDCEEIAMoAggOAwEEAgALEMqAgIAAAAsgAEEANgIcIABBtZqAgAA2AhAgAEEXNgIMIAAgBEEBajYCFEEAIRAMAgsgAEEANgIcIAAgBDYCFCAAQcqagIAANgIQIABBCTYCDEEAIRAMAQsCQCABIgQgAkcNAEEiIRAMAQsgAEGJgICAADYCCCAAIAQ2AgRBISEQCyADQRBqJICAgIAAIBALrwEBAn8gASgCACEGAkACQCACIANGDQAgBCAGaiEEIAYgA2ogAmshByACIAZBf3MgBWoiBmohBQNAAkAgAi0AACAELQAARg0AQQIhBAwDCwJAIAYNAEEAIQQgBSECDAMLIAZBf2ohBiAEQQFqIQQgAkEBaiICIANHDQALIAchBiADIQILIABBATYCACABIAY2AgAgACACNgIEDwsgAUEANgIAIAAgBDYCACAAIAI2AgQLCgAgABDHgICAAAvyNgELfyOAgICAAEEQayIBJICAgIAAAkBBACgCoNCAgAANAEEAEMuAgIAAQYDUhIAAayICQdkASQ0AQQAhAwJAQQAoAuDTgIAAIgQNAEEAQn83AuzTgIAAQQBCgICEgICAwAA3AuTTgIAAQQAgAUEIakFwcUHYqtWqBXMiBDYC4NOAgABBAEEANgL004CAAEEAQQA2AsTTgIAAC0EAIAI2AszTgIAAQQBBgNSEgAA2AsjTgIAAQQBBgNSEgAA2ApjQgIAAQQAgBDYCrNCAgABBAEF/NgKo0ICAAANAIANBxNCAgABqIANBuNCAgABqIgQ2AgAgBCADQbDQgIAAaiIFNgIAIANBvNCAgABqIAU2AgAgA0HM0ICAAGogA0HA0ICAAGoiBTYCACAFIAQ2AgAgA0HU0ICAAGogA0HI0ICAAGoiBDYCACAEIAU2AgAgA0HQ0ICAAGogBDYCACADQSBqIgNBgAJHDQALQYDUhIAAQXhBgNSEgABrQQ9xQQBBgNSEgABBCGpBD3EbIgNqIgRBBGogAkFIaiIFIANrIgNBAXI2AgBBAEEAKALw04CAADYCpNCAgABBACADNgKU0ICAAEEAIAQ2AqDQgIAAQYDUhIAAIAVqQTg2AgQLAkACQAJAAkACQAJAAkACQAJAAkACQAJAIABB7AFLDQACQEEAKAKI0ICAACIGQRAgAEETakFwcSAAQQtJGyICQQN2IgR2IgNBA3FFDQACQAJAIANBAXEgBHJBAXMiBUEDdCIEQbDQgIAAaiIDIARBuNCAgABqKAIAIgQoAggiAkcNAEEAIAZBfiAFd3E2AojQgIAADAELIAMgAjYCCCACIAM2AgwLIARBCGohAyAEIAVBA3QiBUEDcjYCBCAEIAVqIgQgBCgCBEEBcjYCBAwMCyACQQAoApDQgIAAIgdNDQECQCADRQ0AAkACQCADIAR0QQIgBHQiA0EAIANrcnEiA0EAIANrcUF/aiIDIANBDHZBEHEiA3YiBEEFdkEIcSIFIANyIAQgBXYiA0ECdkEEcSIEciADIAR2IgNBAXZBAnEiBHIgAyAEdiIDQQF2QQFxIgRyIAMgBHZqIgRBA3QiA0Gw0ICAAGoiBSADQbjQgIAAaigCACIDKAIIIgBHDQBBACAGQX4gBHdxIgY2AojQgIAADAELIAUgADYCCCAAIAU2AgwLIAMgAkEDcjYCBCADIARBA3QiBGogBCACayIFNgIAIAMgAmoiACAFQQFyNgIEAkAgB0UNACAHQXhxQbDQgIAAaiECQQAoApzQgIAAIQQCQAJAIAZBASAHQQN2dCIIcQ0AQQAgBiAIcjYCiNCAgAAgAiEIDAELIAIoAgghCAsgCCAENgIMIAIgBDYCCCAEIAI2AgwgBCAINgIICyADQQhqIQNBACAANgKc0ICAAEEAIAU2ApDQgIAADAwLQQAoAozQgIAAIglFDQEgCUEAIAlrcUF/aiIDIANBDHZBEHEiA3YiBEEFdkEIcSIFIANyIAQgBXYiA0ECdkEEcSIEciADIAR2IgNBAXZBAnEiBHIgAyAEdiIDQQF2QQFxIgRyIAMgBHZqQQJ0QbjSgIAAaigCACIAKAIEQXhxIAJrIQQgACEFAkADQAJAIAUoAhAiAw0AIAVBFGooAgAiA0UNAgsgAygCBEF4cSACayIFIAQgBSAESSIFGyEEIAMgACAFGyEAIAMhBQwACwsgACgCGCEKAkAgACgCDCIIIABGDQAgACgCCCIDQQAoApjQgIAASRogCCADNgIIIAMgCDYCDAwLCwJAIABBFGoiBSgCACIDDQAgACgCECIDRQ0DIABBEGohBQsDQCAFIQsgAyIIQRRqIgUoAgAiAw0AIAhBEGohBSAIKAIQIgMNAAsgC0EANgIADAoLQX8hAiAAQb9/Sw0AIABBE2oiA0FwcSECQQAoAozQgIAAIgdFDQBBACELAkAgAkGAAkkNAEEfIQsgAkH///8HSw0AIANBCHYiAyADQYD+P2pBEHZBCHEiA3QiBCAEQYDgH2pBEHZBBHEiBHQiBSAFQYCAD2pBEHZBAnEiBXRBD3YgAyAEciAFcmsiA0EBdCACIANBFWp2QQFxckEcaiELC0EAIAJrIQQCQAJAAkACQCALQQJ0QbjSgIAAaigCACIFDQBBACEDQQAhCAwBC0EAIQMgAkEAQRkgC0EBdmsgC0EfRht0IQBBACEIA0ACQCAFKAIEQXhxIAJrIgYgBE8NACAGIQQgBSEIIAYNAEEAIQQgBSEIIAUhAwwDCyADIAVBFGooAgAiBiAGIAUgAEEddkEEcWpBEGooAgAiBUYbIAMgBhshAyAAQQF0IQAgBQ0ACwsCQCADIAhyDQBBACEIQQIgC3QiA0EAIANrciAHcSIDRQ0DIANBACADa3FBf2oiAyADQQx2QRBxIgN2IgVBBXZBCHEiACADciAFIAB2IgNBAnZBBHEiBXIgAyAFdiIDQQF2QQJxIgVyIAMgBXYiA0EBdkEBcSIFciADIAV2akECdEG40oCAAGooAgAhAwsgA0UNAQsDQCADKAIEQXhxIAJrIgYgBEkhAAJAIAMoAhAiBQ0AIANBFGooAgAhBQsgBiAEIAAbIQQgAyAIIAAbIQggBSEDIAUNAAsLIAhFDQAgBEEAKAKQ0ICAACACa08NACAIKAIYIQsCQCAIKAIMIgAgCEYNACAIKAIIIgNBACgCmNCAgABJGiAAIAM2AgggAyAANgIMDAkLAkAgCEEUaiIFKAIAIgMNACAIKAIQIgNFDQMgCEEQaiEFCwNAIAUhBiADIgBBFGoiBSgCACIDDQAgAEEQaiEFIAAoAhAiAw0ACyAGQQA2AgAMCAsCQEEAKAKQ0ICAACIDIAJJDQBBACgCnNCAgAAhBAJAAkAgAyACayIFQRBJDQAgBCACaiIAIAVBAXI2AgRBACAFNgKQ0ICAAEEAIAA2ApzQgIAAIAQgA2ogBTYCACAEIAJBA3I2AgQMAQsgBCADQQNyNgIEIAQgA2oiAyADKAIEQQFyNgIEQQBBADYCnNCAgABBAEEANgKQ0ICAAAsgBEEIaiEDDAoLAkBBACgClNCAgAAiACACTQ0AQQAoAqDQgIAAIgMgAmoiBCAAIAJrIgVBAXI2AgRBACAFNgKU0ICAAEEAIAQ2AqDQgIAAIAMgAkEDcjYCBCADQQhqIQMMCgsCQAJAQQAoAuDTgIAARQ0AQQAoAujTgIAAIQQMAQtBAEJ/NwLs04CAAEEAQoCAhICAgMAANwLk04CAAEEAIAFBDGpBcHFB2KrVqgVzNgLg04CAAEEAQQA2AvTTgIAAQQBBADYCxNOAgABBgIAEIQQLQQAhAwJAIAQgAkHHAGoiB2oiBkEAIARrIgtxIgggAksNAEEAQTA2AvjTgIAADAoLAkBBACgCwNOAgAAiA0UNAAJAQQAoArjTgIAAIgQgCGoiBSAETQ0AIAUgA00NAQtBACEDQQBBMDYC+NOAgAAMCgtBAC0AxNOAgABBBHENBAJAAkACQEEAKAKg0ICAACIERQ0AQcjTgIAAIQMDQAJAIAMoAgAiBSAESw0AIAUgAygCBGogBEsNAwsgAygCCCIDDQALC0EAEMuAgIAAIgBBf0YNBSAIIQYCQEEAKALk04CAACIDQX9qIgQgAHFFDQAgCCAAayAEIABqQQAgA2txaiEGCyAGIAJNDQUgBkH+////B0sNBQJAQQAoAsDTgIAAIgNFDQBBACgCuNOAgAAiBCAGaiIFIARNDQYgBSADSw0GCyAGEMuAgIAAIgMgAEcNAQwHCyAGIABrIAtxIgZB/v///wdLDQQgBhDLgICAACIAIAMoAgAgAygCBGpGDQMgACEDCwJAIANBf0YNACACQcgAaiAGTQ0AAkAgByAGa0EAKALo04CAACIEakEAIARrcSIEQf7///8HTQ0AIAMhAAwHCwJAIAQQy4CAgABBf0YNACAEIAZqIQYgAyEADAcLQQAgBmsQy4CAgAAaDAQLIAMhACADQX9HDQUMAwtBACEIDAcLQQAhAAwFCyAAQX9HDQILQQBBACgCxNOAgABBBHI2AsTTgIAACyAIQf7///8HSw0BIAgQy4CAgAAhAEEAEMuAgIAAIQMgAEF/Rg0BIANBf0YNASAAIANPDQEgAyAAayIGIAJBOGpNDQELQQBBACgCuNOAgAAgBmoiAzYCuNOAgAACQCADQQAoArzTgIAATQ0AQQAgAzYCvNOAgAALAkACQAJAAkBBACgCoNCAgAAiBEUNAEHI04CAACEDA0AgACADKAIAIgUgAygCBCIIakYNAiADKAIIIgMNAAwDCwsCQAJAQQAoApjQgIAAIgNFDQAgACADTw0BC0EAIAA2ApjQgIAAC0EAIQNBACAGNgLM04CAAEEAIAA2AsjTgIAAQQBBfzYCqNCAgABBAEEAKALg04CAADYCrNCAgABBAEEANgLU04CAAANAIANBxNCAgABqIANBuNCAgABqIgQ2AgAgBCADQbDQgIAAaiIFNgIAIANBvNCAgABqIAU2AgAgA0HM0ICAAGogA0HA0ICAAGoiBTYCACAFIAQ2AgAgA0HU0ICAAGogA0HI0ICAAGoiBDYCACAEIAU2AgAgA0HQ0ICAAGogBDYCACADQSBqIgNBgAJHDQALIABBeCAAa0EPcUEAIABBCGpBD3EbIgNqIgQgBkFIaiIFIANrIgNBAXI2AgRBAEEAKALw04CAADYCpNCAgABBACADNgKU0ICAAEEAIAQ2AqDQgIAAIAAgBWpBODYCBAwCCyADLQAMQQhxDQAgBCAFSQ0AIAQgAE8NACAEQXggBGtBD3FBACAEQQhqQQ9xGyIFaiIAQQAoApTQgIAAIAZqIgsgBWsiBUEBcjYCBCADIAggBmo2AgRBAEEAKALw04CAADYCpNCAgABBACAFNgKU0ICAAEEAIAA2AqDQgIAAIAQgC2pBODYCBAwBCwJAIABBACgCmNCAgAAiCE8NAEEAIAA2ApjQgIAAIAAhCAsgACAGaiEFQcjTgIAAIQMCQAJAAkACQAJAAkACQANAIAMoAgAgBUYNASADKAIIIgMNAAwCCwsgAy0ADEEIcUUNAQtByNOAgAAhAwNAAkAgAygCACIFIARLDQAgBSADKAIEaiIFIARLDQMLIAMoAgghAwwACwsgAyAANgIAIAMgAygCBCAGajYCBCAAQXggAGtBD3FBACAAQQhqQQ9xG2oiCyACQQNyNgIEIAVBeCAFa0EPcUEAIAVBCGpBD3EbaiIGIAsgAmoiAmshAwJAIAYgBEcNAEEAIAI2AqDQgIAAQQBBACgClNCAgAAgA2oiAzYClNCAgAAgAiADQQFyNgIEDAMLAkAgBkEAKAKc0ICAAEcNAEEAIAI2ApzQgIAAQQBBACgCkNCAgAAgA2oiAzYCkNCAgAAgAiADQQFyNgIEIAIgA2ogAzYCAAwDCwJAIAYoAgQiBEEDcUEBRw0AIARBeHEhBwJAAkAgBEH/AUsNACAGKAIIIgUgBEEDdiIIQQN0QbDQgIAAaiIARhoCQCAGKAIMIgQgBUcNAEEAQQAoAojQgIAAQX4gCHdxNgKI0ICAAAwCCyAEIABGGiAEIAU2AgggBSAENgIMDAELIAYoAhghCQJAAkAgBigCDCIAIAZGDQAgBigCCCIEIAhJGiAAIAQ2AgggBCAANgIMDAELAkAgBkEUaiIEKAIAIgUNACAGQRBqIgQoAgAiBQ0AQQAhAAwBCwNAIAQhCCAFIgBBFGoiBCgCACIFDQAgAEEQaiEEIAAoAhAiBQ0ACyAIQQA2AgALIAlFDQACQAJAIAYgBigCHCIFQQJ0QbjSgIAAaiIEKAIARw0AIAQgADYCACAADQFBAEEAKAKM0ICAAEF+IAV3cTYCjNCAgAAMAgsgCUEQQRQgCSgCECAGRhtqIAA2AgAgAEUNAQsgACAJNgIYAkAgBigCECIERQ0AIAAgBDYCECAEIAA2AhgLIAYoAhQiBEUNACAAQRRqIAQ2AgAgBCAANgIYCyAHIANqIQMgBiAHaiIGKAIEIQQLIAYgBEF+cTYCBCACIANqIAM2AgAgAiADQQFyNgIEAkAgA0H/AUsNACADQXhxQbDQgIAAaiEEAkACQEEAKAKI0ICAACIFQQEgA0EDdnQiA3ENAEEAIAUgA3I2AojQgIAAIAQhAwwBCyAEKAIIIQMLIAMgAjYCDCAEIAI2AgggAiAENgIMIAIgAzYCCAwDC0EfIQQCQCADQf///wdLDQAgA0EIdiIEIARBgP4/akEQdkEIcSIEdCIFIAVBgOAfakEQdkEEcSIFdCIAIABBgIAPakEQdkECcSIAdEEPdiAEIAVyIAByayIEQQF0IAMgBEEVanZBAXFyQRxqIQQLIAIgBDYCHCACQgA3AhAgBEECdEG40oCAAGohBQJAQQAoAozQgIAAIgBBASAEdCIIcQ0AIAUgAjYCAEEAIAAgCHI2AozQgIAAIAIgBTYCGCACIAI2AgggAiACNgIMDAMLIANBAEEZIARBAXZrIARBH0YbdCEEIAUoAgAhAANAIAAiBSgCBEF4cSADRg0CIARBHXYhACAEQQF0IQQgBSAAQQRxakEQaiIIKAIAIgANAAsgCCACNgIAIAIgBTYCGCACIAI2AgwgAiACNgIIDAILIABBeCAAa0EPcUEAIABBCGpBD3EbIgNqIgsgBkFIaiIIIANrIgNBAXI2AgQgACAIakE4NgIEIAQgBUE3IAVrQQ9xQQAgBUFJakEPcRtqQUFqIgggCCAEQRBqSRsiCEEjNgIEQQBBACgC8NOAgAA2AqTQgIAAQQAgAzYClNCAgABBACALNgKg0ICAACAIQRBqQQApAtDTgIAANwIAIAhBACkCyNOAgAA3AghBACAIQQhqNgLQ04CAAEEAIAY2AszTgIAAQQAgADYCyNOAgABBAEEANgLU04CAACAIQSRqIQMDQCADQQc2AgAgA0EEaiIDIAVJDQALIAggBEYNAyAIIAgoAgRBfnE2AgQgCCAIIARrIgA2AgAgBCAAQQFyNgIEAkAgAEH/AUsNACAAQXhxQbDQgIAAaiEDAkACQEEAKAKI0ICAACIFQQEgAEEDdnQiAHENAEEAIAUgAHI2AojQgIAAIAMhBQwBCyADKAIIIQULIAUgBDYCDCADIAQ2AgggBCADNgIMIAQgBTYCCAwEC0EfIQMCQCAAQf///wdLDQAgAEEIdiIDIANBgP4/akEQdkEIcSIDdCIFIAVBgOAfakEQdkEEcSIFdCIIIAhBgIAPakEQdkECcSIIdEEPdiADIAVyIAhyayIDQQF0IAAgA0EVanZBAXFyQRxqIQMLIAQgAzYCHCAEQgA3AhAgA0ECdEG40oCAAGohBQJAQQAoAozQgIAAIghBASADdCIGcQ0AIAUgBDYCAEEAIAggBnI2AozQgIAAIAQgBTYCGCAEIAQ2AgggBCAENgIMDAQLIABBAEEZIANBAXZrIANBH0YbdCEDIAUoAgAhCANAIAgiBSgCBEF4cSAARg0DIANBHXYhCCADQQF0IQMgBSAIQQRxakEQaiIGKAIAIggNAAsgBiAENgIAIAQgBTYCGCAEIAQ2AgwgBCAENgIIDAMLIAUoAggiAyACNgIMIAUgAjYCCCACQQA2AhggAiAFNgIMIAIgAzYCCAsgC0EIaiEDDAULIAUoAggiAyAENgIMIAUgBDYCCCAEQQA2AhggBCAFNgIMIAQgAzYCCAtBACgClNCAgAAiAyACTQ0AQQAoAqDQgIAAIgQgAmoiBSADIAJrIgNBAXI2AgRBACADNgKU0ICAAEEAIAU2AqDQgIAAIAQgAkEDcjYCBCAEQQhqIQMMAwtBACEDQQBBMDYC+NOAgAAMAgsCQCALRQ0AAkACQCAIIAgoAhwiBUECdEG40oCAAGoiAygCAEcNACADIAA2AgAgAA0BQQAgB0F+IAV3cSIHNgKM0ICAAAwCCyALQRBBFCALKAIQIAhGG2ogADYCACAARQ0BCyAAIAs2AhgCQCAIKAIQIgNFDQAgACADNgIQIAMgADYCGAsgCEEUaigCACIDRQ0AIABBFGogAzYCACADIAA2AhgLAkACQCAEQQ9LDQAgCCAEIAJqIgNBA3I2AgQgCCADaiIDIAMoAgRBAXI2AgQMAQsgCCACaiIAIARBAXI2AgQgCCACQQNyNgIEIAAgBGogBDYCAAJAIARB/wFLDQAgBEF4cUGw0ICAAGohAwJAAkBBACgCiNCAgAAiBUEBIARBA3Z0IgRxDQBBACAFIARyNgKI0ICAACADIQQMAQsgAygCCCEECyAEIAA2AgwgAyAANgIIIAAgAzYCDCAAIAQ2AggMAQtBHyEDAkAgBEH///8HSw0AIARBCHYiAyADQYD+P2pBEHZBCHEiA3QiBSAFQYDgH2pBEHZBBHEiBXQiAiACQYCAD2pBEHZBAnEiAnRBD3YgAyAFciACcmsiA0EBdCAEIANBFWp2QQFxckEcaiEDCyAAIAM2AhwgAEIANwIQIANBAnRBuNKAgABqIQUCQCAHQQEgA3QiAnENACAFIAA2AgBBACAHIAJyNgKM0ICAACAAIAU2AhggACAANgIIIAAgADYCDAwBCyAEQQBBGSADQQF2ayADQR9GG3QhAyAFKAIAIQICQANAIAIiBSgCBEF4cSAERg0BIANBHXYhAiADQQF0IQMgBSACQQRxakEQaiIGKAIAIgINAAsgBiAANgIAIAAgBTYCGCAAIAA2AgwgACAANgIIDAELIAUoAggiAyAANgIMIAUgADYCCCAAQQA2AhggACAFNgIMIAAgAzYCCAsgCEEIaiEDDAELAkAgCkUNAAJAAkAgACAAKAIcIgVBAnRBuNKAgABqIgMoAgBHDQAgAyAINgIAIAgNAUEAIAlBfiAFd3E2AozQgIAADAILIApBEEEUIAooAhAgAEYbaiAINgIAIAhFDQELIAggCjYCGAJAIAAoAhAiA0UNACAIIAM2AhAgAyAINgIYCyAAQRRqKAIAIgNFDQAgCEEUaiADNgIAIAMgCDYCGAsCQAJAIARBD0sNACAAIAQgAmoiA0EDcjYCBCAAIANqIgMgAygCBEEBcjYCBAwBCyAAIAJqIgUgBEEBcjYCBCAAIAJBA3I2AgQgBSAEaiAENgIAAkAgB0UNACAHQXhxQbDQgIAAaiECQQAoApzQgIAAIQMCQAJAQQEgB0EDdnQiCCAGcQ0AQQAgCCAGcjYCiNCAgAAgAiEIDAELIAIoAgghCAsgCCADNgIMIAIgAzYCCCADIAI2AgwgAyAINgIIC0EAIAU2ApzQgIAAQQAgBDYCkNCAgAALIABBCGohAwsgAUEQaiSAgICAACADCwoAIAAQyYCAgAAL4g0BB38CQCAARQ0AIABBeGoiASAAQXxqKAIAIgJBeHEiAGohAwJAIAJBAXENACACQQNxRQ0BIAEgASgCACICayIBQQAoApjQgIAAIgRJDQEgAiAAaiEAAkAgAUEAKAKc0ICAAEYNAAJAIAJB/wFLDQAgASgCCCIEIAJBA3YiBUEDdEGw0ICAAGoiBkYaAkAgASgCDCICIARHDQBBAEEAKAKI0ICAAEF+IAV3cTYCiNCAgAAMAwsgAiAGRhogAiAENgIIIAQgAjYCDAwCCyABKAIYIQcCQAJAIAEoAgwiBiABRg0AIAEoAggiAiAESRogBiACNgIIIAIgBjYCDAwBCwJAIAFBFGoiAigCACIEDQAgAUEQaiICKAIAIgQNAEEAIQYMAQsDQCACIQUgBCIGQRRqIgIoAgAiBA0AIAZBEGohAiAGKAIQIgQNAAsgBUEANgIACyAHRQ0BAkACQCABIAEoAhwiBEECdEG40oCAAGoiAigCAEcNACACIAY2AgAgBg0BQQBBACgCjNCAgABBfiAEd3E2AozQgIAADAMLIAdBEEEUIAcoAhAgAUYbaiAGNgIAIAZFDQILIAYgBzYCGAJAIAEoAhAiAkUNACAGIAI2AhAgAiAGNgIYCyABKAIUIgJFDQEgBkEUaiACNgIAIAIgBjYCGAwBCyADKAIEIgJBA3FBA0cNACADIAJBfnE2AgRBACAANgKQ0ICAACABIABqIAA2AgAgASAAQQFyNgIEDwsgASADTw0AIAMoAgQiAkEBcUUNAAJAAkAgAkECcQ0AAkAgA0EAKAKg0ICAAEcNAEEAIAE2AqDQgIAAQQBBACgClNCAgAAgAGoiADYClNCAgAAgASAAQQFyNgIEIAFBACgCnNCAgABHDQNBAEEANgKQ0ICAAEEAQQA2ApzQgIAADwsCQCADQQAoApzQgIAARw0AQQAgATYCnNCAgABBAEEAKAKQ0ICAACAAaiIANgKQ0ICAACABIABBAXI2AgQgASAAaiAANgIADwsgAkF4cSAAaiEAAkACQCACQf8BSw0AIAMoAggiBCACQQN2IgVBA3RBsNCAgABqIgZGGgJAIAMoAgwiAiAERw0AQQBBACgCiNCAgABBfiAFd3E2AojQgIAADAILIAIgBkYaIAIgBDYCCCAEIAI2AgwMAQsgAygCGCEHAkACQCADKAIMIgYgA0YNACADKAIIIgJBACgCmNCAgABJGiAGIAI2AgggAiAGNgIMDAELAkAgA0EUaiICKAIAIgQNACADQRBqIgIoAgAiBA0AQQAhBgwBCwNAIAIhBSAEIgZBFGoiAigCACIEDQAgBkEQaiECIAYoAhAiBA0ACyAFQQA2AgALIAdFDQACQAJAIAMgAygCHCIEQQJ0QbjSgIAAaiICKAIARw0AIAIgBjYCACAGDQFBAEEAKAKM0ICAAEF+IAR3cTYCjNCAgAAMAgsgB0EQQRQgBygCECADRhtqIAY2AgAgBkUNAQsgBiAHNgIYAkAgAygCECICRQ0AIAYgAjYCECACIAY2AhgLIAMoAhQiAkUNACAGQRRqIAI2AgAgAiAGNgIYCyABIABqIAA2AgAgASAAQQFyNgIEIAFBACgCnNCAgABHDQFBACAANgKQ0ICAAA8LIAMgAkF+cTYCBCABIABqIAA2AgAgASAAQQFyNgIECwJAIABB/wFLDQAgAEF4cUGw0ICAAGohAgJAAkBBACgCiNCAgAAiBEEBIABBA3Z0IgBxDQBBACAEIAByNgKI0ICAACACIQAMAQsgAigCCCEACyAAIAE2AgwgAiABNgIIIAEgAjYCDCABIAA2AggPC0EfIQICQCAAQf///wdLDQAgAEEIdiICIAJBgP4/akEQdkEIcSICdCIEIARBgOAfakEQdkEEcSIEdCIGIAZBgIAPakEQdkECcSIGdEEPdiACIARyIAZyayICQQF0IAAgAkEVanZBAXFyQRxqIQILIAEgAjYCHCABQgA3AhAgAkECdEG40oCAAGohBAJAAkBBACgCjNCAgAAiBkEBIAJ0IgNxDQAgBCABNgIAQQAgBiADcjYCjNCAgAAgASAENgIYIAEgATYCCCABIAE2AgwMAQsgAEEAQRkgAkEBdmsgAkEfRht0IQIgBCgCACEGAkADQCAGIgQoAgRBeHEgAEYNASACQR12IQYgAkEBdCECIAQgBkEEcWpBEGoiAygCACIGDQALIAMgATYCACABIAQ2AhggASABNgIMIAEgATYCCAwBCyAEKAIIIgAgATYCDCAEIAE2AgggAUEANgIYIAEgBDYCDCABIAA2AggLQQBBACgCqNCAgABBf2oiAUF/IAEbNgKo0ICAAAsLBAAAAAtOAAJAIAANAD8AQRB0DwsCQCAAQf//A3ENACAAQX9MDQACQCAAQRB2QAAiAEF/Rw0AQQBBMDYC+NOAgABBfw8LIABBEHQPCxDKgICAAAAL8gICA38BfgJAIAJFDQAgACABOgAAIAIgAGoiA0F/aiABOgAAIAJBA0kNACAAIAE6AAIgACABOgABIANBfWogAToAACADQX5qIAE6AAAgAkEHSQ0AIAAgAToAAyADQXxqIAE6AAAgAkEJSQ0AIABBACAAa0EDcSIEaiIDIAFB/wFxQYGChAhsIgE2AgAgAyACIARrQXxxIgRqIgJBfGogATYCACAEQQlJDQAgAyABNgIIIAMgATYCBCACQXhqIAE2AgAgAkF0aiABNgIAIARBGUkNACADIAE2AhggAyABNgIUIAMgATYCECADIAE2AgwgAkFwaiABNgIAIAJBbGogATYCACACQWhqIAE2AgAgAkFkaiABNgIAIAQgA0EEcUEYciIFayICQSBJDQAgAa1CgYCAgBB+IQYgAyAFaiEBA0AgASAGNwMYIAEgBjcDECABIAY3AwggASAGNwMAIAFBIGohASACQWBqIgJBH0sNAAsLIAALC45IAQBBgAgLhkgBAAAAAgAAAAMAAAAAAAAAAAAAAAQAAAAFAAAAAAAAAAAAAAAGAAAABwAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEludmFsaWQgY2hhciBpbiB1cmwgcXVlcnkAU3BhbiBjYWxsYmFjayBlcnJvciBpbiBvbl9ib2R5AENvbnRlbnQtTGVuZ3RoIG92ZXJmbG93AENodW5rIHNpemUgb3ZlcmZsb3cAUmVzcG9uc2Ugb3ZlcmZsb3cASW52YWxpZCBtZXRob2QgZm9yIEhUVFAveC54IHJlcXVlc3QASW52YWxpZCBtZXRob2QgZm9yIFJUU1AveC54IHJlcXVlc3QARXhwZWN0ZWQgU09VUkNFIG1ldGhvZCBmb3IgSUNFL3gueCByZXF1ZXN0AEludmFsaWQgY2hhciBpbiB1cmwgZnJhZ21lbnQgc3RhcnQARXhwZWN0ZWQgZG90AFNwYW4gY2FsbGJhY2sgZXJyb3IgaW4gb25fc3RhdHVzAEludmFsaWQgcmVzcG9uc2Ugc3RhdHVzAEludmFsaWQgY2hhcmFjdGVyIGluIGNodW5rIGV4dGVuc2lvbnMAVXNlciBjYWxsYmFjayBlcnJvcgBgb25fcmVzZXRgIGNhbGxiYWNrIGVycm9yAGBvbl9jaHVua19oZWFkZXJgIGNhbGxiYWNrIGVycm9yAGBvbl9tZXNzYWdlX2JlZ2luYCBjYWxsYmFjayBlcnJvcgBgb25fY2h1bmtfZXh0ZW5zaW9uX3ZhbHVlYCBjYWxsYmFjayBlcnJvcgBgb25fc3RhdHVzX2NvbXBsZXRlYCBjYWxsYmFjayBlcnJvcgBgb25fdmVyc2lvbl9jb21wbGV0ZWAgY2FsbGJhY2sgZXJyb3IAYG9uX3VybF9jb21wbGV0ZWAgY2FsbGJhY2sgZXJyb3IAYG9uX2NodW5rX2NvbXBsZXRlYCBjYWxsYmFjayBlcnJvcgBgb25faGVhZGVyX3ZhbHVlX2NvbXBsZXRlYCBjYWxsYmFjayBlcnJvcgBgb25fbWVzc2FnZV9jb21wbGV0ZWAgY2FsbGJhY2sgZXJyb3IAYG9uX21ldGhvZF9jb21wbGV0ZWAgY2FsbGJhY2sgZXJyb3IAYG9uX2hlYWRlcl9maWVsZF9jb21wbGV0ZWAgY2FsbGJhY2sgZXJyb3IAYG9uX2NodW5rX2V4dGVuc2lvbl9uYW1lYCBjYWxsYmFjayBlcnJvcgBVbmV4cGVjdGVkIGNoYXIgaW4gdXJsIHNlcnZlcgBJbnZhbGlkIGhlYWRlciB2YWx1ZSBjaGFyAEludmFsaWQgaGVhZGVyIGZpZWxkIGNoYXIAU3BhbiBjYWxsYmFjayBlcnJvciBpbiBvbl92ZXJzaW9uAEludmFsaWQgbWlub3IgdmVyc2lvbgBJbnZhbGlkIG1ham9yIHZlcnNpb24ARXhwZWN0ZWQgc3BhY2UgYWZ0ZXIgdmVyc2lvbgBFeHBlY3RlZCBDUkxGIGFmdGVyIHZlcnNpb24ASW52YWxpZCBIVFRQIHZlcnNpb24ASW52YWxpZCBoZWFkZXIgdG9rZW4AU3BhbiBjYWxsYmFjayBlcnJvciBpbiBvbl91cmwASW52YWxpZCBjaGFyYWN0ZXJzIGluIHVybABVbmV4cGVjdGVkIHN0YXJ0IGNoYXIgaW4gdXJsAERvdWJsZSBAIGluIHVybABFbXB0eSBDb250ZW50LUxlbmd0aABJbnZhbGlkIGNoYXJhY3RlciBpbiBDb250ZW50LUxlbmd0aABEdXBsaWNhdGUgQ29udGVudC1MZW5ndGgASW52YWxpZCBjaGFyIGluIHVybCBwYXRoAENvbnRlbnQtTGVuZ3RoIGNhbid0IGJlIHByZXNlbnQgd2l0aCBUcmFuc2Zlci1FbmNvZGluZwBJbnZhbGlkIGNoYXJhY3RlciBpbiBjaHVuayBzaXplAFNwYW4gY2FsbGJhY2sgZXJyb3IgaW4gb25faGVhZGVyX3ZhbHVlAFNwYW4gY2FsbGJhY2sgZXJyb3IgaW4gb25fY2h1bmtfZXh0ZW5zaW9uX3ZhbHVlAEludmFsaWQgY2hhcmFjdGVyIGluIGNodW5rIGV4dGVuc2lvbnMgdmFsdWUATWlzc2luZyBleHBlY3RlZCBMRiBhZnRlciBoZWFkZXIgdmFsdWUASW52YWxpZCBgVHJhbnNmZXItRW5jb2RpbmdgIGhlYWRlciB2YWx1ZQBJbnZhbGlkIGNoYXJhY3RlciBpbiBjaHVuayBleHRlbnNpb25zIHF1b3RlIHZhbHVlAEludmFsaWQgY2hhcmFjdGVyIGluIGNodW5rIGV4dGVuc2lvbnMgcXVvdGVkIHZhbHVlAFBhdXNlZCBieSBvbl9oZWFkZXJzX2NvbXBsZXRlAEludmFsaWQgRU9GIHN0YXRlAG9uX3Jlc2V0IHBhdXNlAG9uX2NodW5rX2hlYWRlciBwYXVzZQBvbl9tZXNzYWdlX2JlZ2luIHBhdXNlAG9uX2NodW5rX2V4dGVuc2lvbl92YWx1ZSBwYXVzZQBvbl9zdGF0dXNfY29tcGxldGUgcGF1c2UAb25fdmVyc2lvbl9jb21wbGV0ZSBwYXVzZQBvbl91cmxfY29tcGxldGUgcGF1c2UAb25fY2h1bmtfY29tcGxldGUgcGF1c2UAb25faGVhZGVyX3ZhbHVlX2NvbXBsZXRlIHBhdXNlAG9uX21lc3NhZ2VfY29tcGxldGUgcGF1c2UAb25fbWV0aG9kX2NvbXBsZXRlIHBhdXNlAG9uX2hlYWRlcl9maWVsZF9jb21wbGV0ZSBwYXVzZQBvbl9jaHVua19leHRlbnNpb25fbmFtZSBwYXVzZQBVbmV4cGVjdGVkIHNwYWNlIGFmdGVyIHN0YXJ0IGxpbmUAU3BhbiBjYWxsYmFjayBlcnJvciBpbiBvbl9jaHVua19leHRlbnNpb25fbmFtZQBJbnZhbGlkIGNoYXJhY3RlciBpbiBjaHVuayBleHRlbnNpb25zIG5hbWUAUGF1c2Ugb24gQ09OTkVDVC9VcGdyYWRlAFBhdXNlIG9uIFBSSS9VcGdyYWRlAEV4cGVjdGVkIEhUVFAvMiBDb25uZWN0aW9uIFByZWZhY2UAU3BhbiBjYWxsYmFjayBlcnJvciBpbiBvbl9tZXRob2QARXhwZWN0ZWQgc3BhY2UgYWZ0ZXIgbWV0aG9kAFNwYW4gY2FsbGJhY2sgZXJyb3IgaW4gb25faGVhZGVyX2ZpZWxkAFBhdXNlZABJbnZhbGlkIHdvcmQgZW5jb3VudGVyZWQASW52YWxpZCBtZXRob2QgZW5jb3VudGVyZWQAVW5leHBlY3RlZCBjaGFyIGluIHVybCBzY2hlbWEAUmVxdWVzdCBoYXMgaW52YWxpZCBgVHJhbnNmZXItRW5jb2RpbmdgAFNXSVRDSF9QUk9YWQBVU0VfUFJPWFkATUtBQ1RJVklUWQBVTlBST0NFU1NBQkxFX0VOVElUWQBDT1BZAE1PVkVEX1BFUk1BTkVOVExZAFRPT19FQVJMWQBOT1RJRlkARkFJTEVEX0RFUEVOREVOQ1kAQkFEX0dBVEVXQVkAUExBWQBQVVQAQ0hFQ0tPVVQAR0FURVdBWV9USU1FT1VUAFJFUVVFU1RfVElNRU9VVABORVRXT1JLX0NPTk5FQ1RfVElNRU9VVABDT05ORUNUSU9OX1RJTUVPVVQATE9HSU5fVElNRU9VVABORVRXT1JLX1JFQURfVElNRU9VVABQT1NUAE1JU0RJUkVDVEVEX1JFUVVFU1QAQ0xJRU5UX0NMT1NFRF9SRVFVRVNUAENMSUVOVF9DTE9TRURfTE9BRF9CQUxBTkNFRF9SRVFVRVNUAEJBRF9SRVFVRVNUAEhUVFBfUkVRVUVTVF9TRU5UX1RPX0hUVFBTX1BPUlQAUkVQT1JUAElNX0FfVEVBUE9UAFJFU0VUX0NPTlRFTlQATk9fQ09OVEVOVABQQVJUSUFMX0NPTlRFTlQASFBFX0lOVkFMSURfQ09OU1RBTlQASFBFX0NCX1JFU0VUAEdFVABIUEVfU1RSSUNUAENPTkZMSUNUAFRFTVBPUkFSWV9SRURJUkVDVABQRVJNQU5FTlRfUkVESVJFQ1QAQ09OTkVDVABNVUxUSV9TVEFUVVMASFBFX0lOVkFMSURfU1RBVFVTAFRPT19NQU5ZX1JFUVVFU1RTAEVBUkxZX0hJTlRTAFVOQVZBSUxBQkxFX0ZPUl9MRUdBTF9SRUFTT05TAE9QVElPTlMAU1dJVENISU5HX1BST1RPQ09MUwBWQVJJQU5UX0FMU09fTkVHT1RJQVRFUwBNVUxUSVBMRV9DSE9JQ0VTAElOVEVSTkFMX1NFUlZFUl9FUlJPUgBXRUJfU0VSVkVSX1VOS05PV05fRVJST1IAUkFJTEdVTl9FUlJPUgBJREVOVElUWV9QUk9WSURFUl9BVVRIRU5USUNBVElPTl9FUlJPUgBTU0xfQ0VSVElGSUNBVEVfRVJST1IASU5WQUxJRF9YX0ZPUldBUkRFRF9GT1IAU0VUX1BBUkFNRVRFUgBHRVRfUEFSQU1FVEVSAEhQRV9VU0VSAFNFRV9PVEhFUgBIUEVfQ0JfQ0hVTktfSEVBREVSAE1LQ0FMRU5EQVIAU0VUVVAAV0VCX1NFUlZFUl9JU19ET1dOAFRFQVJET1dOAEhQRV9DTE9TRURfQ09OTkVDVElPTgBIRVVSSVNUSUNfRVhQSVJBVElPTgBESVNDT05ORUNURURfT1BFUkFUSU9OAE5PTl9BVVRIT1JJVEFUSVZFX0lORk9STUFUSU9OAEhQRV9JTlZBTElEX1ZFUlNJT04ASFBFX0NCX01FU1NBR0VfQkVHSU4AU0lURV9JU19GUk9aRU4ASFBFX0lOVkFMSURfSEVBREVSX1RPS0VOAElOVkFMSURfVE9LRU4ARk9SQklEREVOAEVOSEFOQ0VfWU9VUl9DQUxNAEhQRV9JTlZBTElEX1VSTABCTE9DS0VEX0JZX1BBUkVOVEFMX0NPTlRST0wATUtDT0wAQUNMAEhQRV9JTlRFUk5BTABSRVFVRVNUX0hFQURFUl9GSUVMRFNfVE9PX0xBUkdFX1VOT0ZGSUNJQUwASFBFX09LAFVOTElOSwBVTkxPQ0sAUFJJAFJFVFJZX1dJVEgASFBFX0lOVkFMSURfQ09OVEVOVF9MRU5HVEgASFBFX1VORVhQRUNURURfQ09OVEVOVF9MRU5HVEgARkxVU0gAUFJPUFBBVENIAE0tU0VBUkNIAFVSSV9UT09fTE9ORwBQUk9DRVNTSU5HAE1JU0NFTExBTkVPVVNfUEVSU0lTVEVOVF9XQVJOSU5HAE1JU0NFTExBTkVPVVNfV0FSTklORwBIUEVfSU5WQUxJRF9UUkFOU0ZFUl9FTkNPRElORwBFeHBlY3RlZCBDUkxGAEhQRV9JTlZBTElEX0NIVU5LX1NJWkUATU9WRQBDT05USU5VRQBIUEVfQ0JfU1RBVFVTX0NPTVBMRVRFAEhQRV9DQl9IRUFERVJTX0NPTVBMRVRFAEhQRV9DQl9WRVJTSU9OX0NPTVBMRVRFAEhQRV9DQl9VUkxfQ09NUExFVEUASFBFX0NCX0NIVU5LX0NPTVBMRVRFAEhQRV9DQl9IRUFERVJfVkFMVUVfQ09NUExFVEUASFBFX0NCX0NIVU5LX0VYVEVOU0lPTl9WQUxVRV9DT01QTEVURQBIUEVfQ0JfQ0hVTktfRVhURU5TSU9OX05BTUVfQ09NUExFVEUASFBFX0NCX01FU1NBR0VfQ09NUExFVEUASFBFX0NCX01FVEhPRF9DT01QTEVURQBIUEVfQ0JfSEVBREVSX0ZJRUxEX0NPTVBMRVRFAERFTEVURQBIUEVfSU5WQUxJRF9FT0ZfU1RBVEUASU5WQUxJRF9TU0xfQ0VSVElGSUNBVEUAUEFVU0UATk9fUkVTUE9OU0UAVU5TVVBQT1JURURfTUVESUFfVFlQRQBHT05FAE5PVF9BQ0NFUFRBQkxFAFNFUlZJQ0VfVU5BVkFJTEFCTEUAUkFOR0VfTk9UX1NBVElTRklBQkxFAE9SSUdJTl9JU19VTlJFQUNIQUJMRQBSRVNQT05TRV9JU19TVEFMRQBQVVJHRQBNRVJHRQBSRVFVRVNUX0hFQURFUl9GSUVMRFNfVE9PX0xBUkdFAFJFUVVFU1RfSEVBREVSX1RPT19MQVJHRQBQQVlMT0FEX1RPT19MQVJHRQBJTlNVRkZJQ0lFTlRfU1RPUkFHRQBIUEVfUEFVU0VEX1VQR1JBREUASFBFX1BBVVNFRF9IMl9VUEdSQURFAFNPVVJDRQBBTk5PVU5DRQBUUkFDRQBIUEVfVU5FWFBFQ1RFRF9TUEFDRQBERVNDUklCRQBVTlNVQlNDUklCRQBSRUNPUkQASFBFX0lOVkFMSURfTUVUSE9EAE5PVF9GT1VORABQUk9QRklORABVTkJJTkQAUkVCSU5EAFVOQVVUSE9SSVpFRABNRVRIT0RfTk9UX0FMTE9XRUQASFRUUF9WRVJTSU9OX05PVF9TVVBQT1JURUQAQUxSRUFEWV9SRVBPUlRFRABBQ0NFUFRFRABOT1RfSU1QTEVNRU5URUQATE9PUF9ERVRFQ1RFRABIUEVfQ1JfRVhQRUNURUQASFBFX0xGX0VYUEVDVEVEAENSRUFURUQASU1fVVNFRABIUEVfUEFVU0VEAFRJTUVPVVRfT0NDVVJFRABQQVlNRU5UX1JFUVVJUkVEAFBSRUNPTkRJVElPTl9SRVFVSVJFRABQUk9YWV9BVVRIRU5USUNBVElPTl9SRVFVSVJFRABORVRXT1JLX0FVVEhFTlRJQ0FUSU9OX1JFUVVJUkVEAExFTkdUSF9SRVFVSVJFRABTU0xfQ0VSVElGSUNBVEVfUkVRVUlSRUQAVVBHUkFERV9SRVFVSVJFRABQQUdFX0VYUElSRUQAUFJFQ09ORElUSU9OX0ZBSUxFRABFWFBFQ1RBVElPTl9GQUlMRUQAUkVWQUxJREFUSU9OX0ZBSUxFRABTU0xfSEFORFNIQUtFX0ZBSUxFRABMT0NLRUQAVFJBTlNGT1JNQVRJT05fQVBQTElFRABOT1RfTU9ESUZJRUQATk9UX0VYVEVOREVEAEJBTkRXSURUSF9MSU1JVF9FWENFRURFRABTSVRFX0lTX09WRVJMT0FERUQASEVBRABFeHBlY3RlZCBIVFRQLwAAXhMAACYTAAAwEAAA8BcAAJ0TAAAVEgAAORcAAPASAAAKEAAAdRIAAK0SAACCEwAATxQAAH8QAACgFQAAIxQAAIkSAACLFAAATRUAANQRAADPFAAAEBgAAMkWAADcFgAAwREAAOAXAAC7FAAAdBQAAHwVAADlFAAACBcAAB8QAABlFQAAoxQAACgVAAACFQAAmRUAACwQAACLGQAATw8AANQOAABqEAAAzhAAAAIXAACJDgAAbhMAABwTAABmFAAAVhcAAMETAADNEwAAbBMAAGgXAABmFwAAXxcAACITAADODwAAaQ4AANgOAABjFgAAyxMAAKoOAAAoFwAAJhcAAMUTAABdFgAA6BEAAGcTAABlEwAA8hYAAHMTAAAdFwAA+RYAAPMRAADPDgAAzhUAAAwSAACzEQAApREAAGEQAAAyFwAAuxMAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAABAQIBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEAAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQAAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAIDAgICAgIAAAICAAICAAICAgICAgICAgIABAAAAAAAAgICAgICAgICAgICAgICAgICAgICAgICAgIAAAACAgICAgICAgICAgICAgICAgICAgICAgICAgICAgACAAIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAACAAICAgICAAACAgACAgACAgICAgICAgICAAMABAAAAAICAgICAgICAgICAgICAgICAgICAgICAgICAAAAAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAAgACAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAbG9zZWVlcC1hbGl2ZQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEAAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEAAAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEBAQEBAQEBAQEBAQIBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAAEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBY2h1bmtlZAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQEAAQEBAQEAAAEBAAEBAAEBAQEBAQEBAQEAAAAAAAAAAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEAAAABAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQABAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABlY3Rpb25lbnQtbGVuZ3Rob25yb3h5LWNvbm5lY3Rpb24AAAAAAAAAAAAAAAAAAAByYW5zZmVyLWVuY29kaW5ncGdyYWRlDQoNCg0KU00NCg0KVFRQL0NFL1RTUC8AAAAAAAAAAAAAAAABAgABAwAAAAAAAAAAAAAAAAAAAAAAAAQBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAAEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAAAAAAAAAAAAAQIAAQMAAAAAAAAAAAAAAAAAAAAAAAAEAQEFAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQABAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQAAAAAAAAAAAAEAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAEBAAEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQABAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEAAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEAAAAAAAAAAAAAAQAAAgAAAAAAAAAAAAAAAAAAAAAAAAMEAAAEBAQEBAQEBAQEBAUEBAQEBAQEBAQEBAQABAAGBwQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAAEAAQABAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAIAAAAAAAADAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwAAAAAAAAMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABAAABAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAIAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMAAAAAAAADAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABOT1VOQ0VFQ0tPVVRORUNURVRFQ1JJQkVMVVNIRVRFQURTRUFSQ0hSR0VDVElWSVRZTEVOREFSVkVPVElGWVBUSU9OU0NIU0VBWVNUQVRDSEdFT1JESVJFQ1RPUlRSQ0hQQVJBTUVURVJVUkNFQlNDUklCRUFSRE9XTkFDRUlORE5LQ0tVQlNDUklCRUhUVFAvQURUUC8=", "base64"); + } +}); + +// .yarn/cache/undici-npm-6.13.0-1545cd855e-b1b0456e7d.zip/node_modules/undici/lib/llhttp/llhttp_simd-wasm.js +var require_llhttp_simd_wasm = __commonJS({ + ".yarn/cache/undici-npm-6.13.0-1545cd855e-b1b0456e7d.zip/node_modules/undici/lib/llhttp/llhttp_simd-wasm.js"(exports, module2) { + var { Buffer: Buffer2 } = require("node:buffer"); + module2.exports = Buffer2.from("AGFzbQEAAAABMAhgAX8Bf2ADf39/AX9gBH9/f38Bf2AAAGADf39/AGABfwBgAn9/AGAGf39/f39/AALLAQgDZW52GHdhc21fb25faGVhZGVyc19jb21wbGV0ZQACA2VudhV3YXNtX29uX21lc3NhZ2VfYmVnaW4AAANlbnYLd2FzbV9vbl91cmwAAQNlbnYOd2FzbV9vbl9zdGF0dXMAAQNlbnYUd2FzbV9vbl9oZWFkZXJfZmllbGQAAQNlbnYUd2FzbV9vbl9oZWFkZXJfdmFsdWUAAQNlbnYMd2FzbV9vbl9ib2R5AAEDZW52GHdhc21fb25fbWVzc2FnZV9jb21wbGV0ZQAAA0ZFAwMEAAAFAAAAAAAABQEFAAUFBQAABgAAAAAGBgYGAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQABAAABAQcAAAUFAwABBAUBcAESEgUDAQACBggBfwFBgNQECwfRBSIGbWVtb3J5AgALX2luaXRpYWxpemUACRlfX2luZGlyZWN0X2Z1bmN0aW9uX3RhYmxlAQALbGxodHRwX2luaXQAChhsbGh0dHBfc2hvdWxkX2tlZXBfYWxpdmUAQQxsbGh0dHBfYWxsb2MADAZtYWxsb2MARgtsbGh0dHBfZnJlZQANBGZyZWUASA9sbGh0dHBfZ2V0X3R5cGUADhVsbGh0dHBfZ2V0X2h0dHBfbWFqb3IADxVsbGh0dHBfZ2V0X2h0dHBfbWlub3IAEBFsbGh0dHBfZ2V0X21ldGhvZAARFmxsaHR0cF9nZXRfc3RhdHVzX2NvZGUAEhJsbGh0dHBfZ2V0X3VwZ3JhZGUAEwxsbGh0dHBfcmVzZXQAFA5sbGh0dHBfZXhlY3V0ZQAVFGxsaHR0cF9zZXR0aW5nc19pbml0ABYNbGxodHRwX2ZpbmlzaAAXDGxsaHR0cF9wYXVzZQAYDWxsaHR0cF9yZXN1bWUAGRtsbGh0dHBfcmVzdW1lX2FmdGVyX3VwZ3JhZGUAGhBsbGh0dHBfZ2V0X2Vycm5vABsXbGxodHRwX2dldF9lcnJvcl9yZWFzb24AHBdsbGh0dHBfc2V0X2Vycm9yX3JlYXNvbgAdFGxsaHR0cF9nZXRfZXJyb3JfcG9zAB4RbGxodHRwX2Vycm5vX25hbWUAHxJsbGh0dHBfbWV0aG9kX25hbWUAIBJsbGh0dHBfc3RhdHVzX25hbWUAIRpsbGh0dHBfc2V0X2xlbmllbnRfaGVhZGVycwAiIWxsaHR0cF9zZXRfbGVuaWVudF9jaHVua2VkX2xlbmd0aAAjHWxsaHR0cF9zZXRfbGVuaWVudF9rZWVwX2FsaXZlACQkbGxodHRwX3NldF9sZW5pZW50X3RyYW5zZmVyX2VuY29kaW5nACUYbGxodHRwX21lc3NhZ2VfbmVlZHNfZW9mAD8JFwEAQQELEQECAwQFCwYHNTk3MS8tJyspCrLgAkUCAAsIABCIgICAAAsZACAAEMKAgIAAGiAAIAI2AjggACABOgAoCxwAIAAgAC8BMiAALQAuIAAQwYCAgAAQgICAgAALKgEBf0HAABDGgICAACIBEMKAgIAAGiABQYCIgIAANgI4IAEgADoAKCABCwoAIAAQyICAgAALBwAgAC0AKAsHACAALQAqCwcAIAAtACsLBwAgAC0AKQsHACAALwEyCwcAIAAtAC4LRQEEfyAAKAIYIQEgAC0ALSECIAAtACghAyAAKAI4IQQgABDCgICAABogACAENgI4IAAgAzoAKCAAIAI6AC0gACABNgIYCxEAIAAgASABIAJqEMOAgIAACxAAIABBAEHcABDMgICAABoLZwEBf0EAIQECQCAAKAIMDQACQAJAAkACQCAALQAvDgMBAAMCCyAAKAI4IgFFDQAgASgCLCIBRQ0AIAAgARGAgICAAAAiAQ0DC0EADwsQyoCAgAAACyAAQcOWgIAANgIQQQ4hAQsgAQseAAJAIAAoAgwNACAAQdGbgIAANgIQIABBFTYCDAsLFgACQCAAKAIMQRVHDQAgAEEANgIMCwsWAAJAIAAoAgxBFkcNACAAQQA2AgwLCwcAIAAoAgwLBwAgACgCEAsJACAAIAE2AhALBwAgACgCFAsiAAJAIABBJEkNABDKgICAAAALIABBAnRBoLOAgABqKAIACyIAAkAgAEEuSQ0AEMqAgIAAAAsgAEECdEGwtICAAGooAgAL7gsBAX9B66iAgAAhAQJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAIABBnH9qDvQDY2IAAWFhYWFhYQIDBAVhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhBgcICQoLDA0OD2FhYWFhEGFhYWFhYWFhYWFhEWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYRITFBUWFxgZGhthYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhHB0eHyAhIiMkJSYnKCkqKywtLi8wMTIzNDU2YTc4OTphYWFhYWFhYTthYWE8YWFhYT0+P2FhYWFhYWFhQGFhQWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYUJDREVGR0hJSktMTU5PUFFSU2FhYWFhYWFhVFVWV1hZWlthXF1hYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFeYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhX2BhC0Hhp4CAAA8LQaShgIAADwtBy6yAgAAPC0H+sYCAAA8LQcCkgIAADwtBq6SAgAAPC0GNqICAAA8LQeKmgIAADwtBgLCAgAAPC0G5r4CAAA8LQdekgIAADwtB75+AgAAPC0Hhn4CAAA8LQfqfgIAADwtB8qCAgAAPC0Gor4CAAA8LQa6ygIAADwtBiLCAgAAPC0Hsp4CAAA8LQYKigIAADwtBjp2AgAAPC0HQroCAAA8LQcqjgIAADwtBxbKAgAAPC0HfnICAAA8LQdKcgIAADwtBxKCAgAAPC0HXoICAAA8LQaKfgIAADwtB7a6AgAAPC0GrsICAAA8LQdSlgIAADwtBzK6AgAAPC0H6roCAAA8LQfyrgIAADwtB0rCAgAAPC0HxnYCAAA8LQbuggIAADwtB96uAgAAPC0GQsYCAAA8LQdexgIAADwtBoq2AgAAPC0HUp4CAAA8LQeCrgIAADwtBn6yAgAAPC0HrsYCAAA8LQdWfgIAADwtByrGAgAAPC0HepYCAAA8LQdSegIAADwtB9JyAgAAPC0GnsoCAAA8LQbGdgIAADwtBoJ2AgAAPC0G5sYCAAA8LQbywgIAADwtBkqGAgAAPC0GzpoCAAA8LQemsgIAADwtBrJ6AgAAPC0HUq4CAAA8LQfemgIAADwtBgKaAgAAPC0GwoYCAAA8LQf6egIAADwtBjaOAgAAPC0GJrYCAAA8LQfeigIAADwtBoLGAgAAPC0Gun4CAAA8LQcalgIAADwtB6J6AgAAPC0GTooCAAA8LQcKvgIAADwtBw52AgAAPC0GLrICAAA8LQeGdgIAADwtBja+AgAAPC0HqoYCAAA8LQbStgIAADwtB0q+AgAAPC0HfsoCAAA8LQdKygIAADwtB8LCAgAAPC0GpooCAAA8LQfmjgIAADwtBmZ6AgAAPC0G1rICAAA8LQZuwgIAADwtBkrKAgAAPC0G2q4CAAA8LQcKigIAADwtB+LKAgAAPC0GepYCAAA8LQdCigIAADwtBup6AgAAPC0GBnoCAAA8LEMqAgIAAAAtB1qGAgAAhAQsgAQsWACAAIAAtAC1B/gFxIAFBAEdyOgAtCxkAIAAgAC0ALUH9AXEgAUEAR0EBdHI6AC0LGQAgACAALQAtQfsBcSABQQBHQQJ0cjoALQsZACAAIAAtAC1B9wFxIAFBAEdBA3RyOgAtCy4BAn9BACEDAkAgACgCOCIERQ0AIAQoAgAiBEUNACAAIAQRgICAgAAAIQMLIAMLSQECf0EAIQMCQCAAKAI4IgRFDQAgBCgCBCIERQ0AIAAgASACIAFrIAQRgYCAgAAAIgNBf0cNACAAQcaRgIAANgIQQRghAwsgAwsuAQJ/QQAhAwJAIAAoAjgiBEUNACAEKAIwIgRFDQAgACAEEYCAgIAAACEDCyADC0kBAn9BACEDAkAgACgCOCIERQ0AIAQoAggiBEUNACAAIAEgAiABayAEEYGAgIAAACIDQX9HDQAgAEH2ioCAADYCEEEYIQMLIAMLLgECf0EAIQMCQCAAKAI4IgRFDQAgBCgCNCIERQ0AIAAgBBGAgICAAAAhAwsgAwtJAQJ/QQAhAwJAIAAoAjgiBEUNACAEKAIMIgRFDQAgACABIAIgAWsgBBGBgICAAAAiA0F/Rw0AIABB7ZqAgAA2AhBBGCEDCyADCy4BAn9BACEDAkAgACgCOCIERQ0AIAQoAjgiBEUNACAAIAQRgICAgAAAIQMLIAMLSQECf0EAIQMCQCAAKAI4IgRFDQAgBCgCECIERQ0AIAAgASACIAFrIAQRgYCAgAAAIgNBf0cNACAAQZWQgIAANgIQQRghAwsgAwsuAQJ/QQAhAwJAIAAoAjgiBEUNACAEKAI8IgRFDQAgACAEEYCAgIAAACEDCyADC0kBAn9BACEDAkAgACgCOCIERQ0AIAQoAhQiBEUNACAAIAEgAiABayAEEYGAgIAAACIDQX9HDQAgAEGqm4CAADYCEEEYIQMLIAMLLgECf0EAIQMCQCAAKAI4IgRFDQAgBCgCQCIERQ0AIAAgBBGAgICAAAAhAwsgAwtJAQJ/QQAhAwJAIAAoAjgiBEUNACAEKAIYIgRFDQAgACABIAIgAWsgBBGBgICAAAAiA0F/Rw0AIABB7ZOAgAA2AhBBGCEDCyADCy4BAn9BACEDAkAgACgCOCIERQ0AIAQoAkQiBEUNACAAIAQRgICAgAAAIQMLIAMLLgECf0EAIQMCQCAAKAI4IgRFDQAgBCgCJCIERQ0AIAAgBBGAgICAAAAhAwsgAwsuAQJ/QQAhAwJAIAAoAjgiBEUNACAEKAIsIgRFDQAgACAEEYCAgIAAACEDCyADC0kBAn9BACEDAkAgACgCOCIERQ0AIAQoAigiBEUNACAAIAEgAiABayAEEYGAgIAAACIDQX9HDQAgAEH2iICAADYCEEEYIQMLIAMLLgECf0EAIQMCQCAAKAI4IgRFDQAgBCgCUCIERQ0AIAAgBBGAgICAAAAhAwsgAwtJAQJ/QQAhAwJAIAAoAjgiBEUNACAEKAIcIgRFDQAgACABIAIgAWsgBBGBgICAAAAiA0F/Rw0AIABBwpmAgAA2AhBBGCEDCyADCy4BAn9BACEDAkAgACgCOCIERQ0AIAQoAkgiBEUNACAAIAQRgICAgAAAIQMLIAMLSQECf0EAIQMCQCAAKAI4IgRFDQAgBCgCICIERQ0AIAAgASACIAFrIAQRgYCAgAAAIgNBf0cNACAAQZSUgIAANgIQQRghAwsgAwsuAQJ/QQAhAwJAIAAoAjgiBEUNACAEKAJMIgRFDQAgACAEEYCAgIAAACEDCyADCy4BAn9BACEDAkAgACgCOCIERQ0AIAQoAlQiBEUNACAAIAQRgICAgAAAIQMLIAMLLgECf0EAIQMCQCAAKAI4IgRFDQAgBCgCWCIERQ0AIAAgBBGAgICAAAAhAwsgAwtFAQF/AkACQCAALwEwQRRxQRRHDQBBASEDIAAtAChBAUYNASAALwEyQeUARiEDDAELIAAtAClBBUYhAwsgACADOgAuQQAL/gEBA39BASEDAkAgAC8BMCIEQQhxDQAgACkDIEIAUiEDCwJAAkAgAC0ALkUNAEEBIQUgAC0AKUEFRg0BQQEhBSAEQcAAcUUgA3FBAUcNAQtBACEFIARBwABxDQBBAiEFIARB//8DcSIDQQhxDQACQCADQYAEcUUNAAJAIAAtAChBAUcNACAALQAtQQpxDQBBBQ8LQQQPCwJAIANBIHENAAJAIAAtAChBAUYNACAALwEyQf//A3EiAEGcf2pB5ABJDQAgAEHMAUYNACAAQbACRg0AQQQhBSAEQShxRQ0CIANBiARxQYAERg0CC0EADwtBAEEDIAApAyBQGyEFCyAFC2IBAn9BACEBAkAgAC0AKEEBRg0AIAAvATJB//8DcSICQZx/akHkAEkNACACQcwBRg0AIAJBsAJGDQAgAC8BMCIAQcAAcQ0AQQEhASAAQYgEcUGABEYNACAAQShxRSEBCyABC6cBAQN/AkACQAJAIAAtACpFDQAgAC0AK0UNAEEAIQMgAC8BMCIEQQJxRQ0BDAILQQAhAyAALwEwIgRBAXFFDQELQQEhAyAALQAoQQFGDQAgAC8BMkH//wNxIgVBnH9qQeQASQ0AIAVBzAFGDQAgBUGwAkYNACAEQcAAcQ0AQQAhAyAEQYgEcUGABEYNACAEQShxQQBHIQMLIABBADsBMCAAQQA6AC8gAwuZAQECfwJAAkACQCAALQAqRQ0AIAAtACtFDQBBACEBIAAvATAiAkECcUUNAQwCC0EAIQEgAC8BMCICQQFxRQ0BC0EBIQEgAC0AKEEBRg0AIAAvATJB//8DcSIAQZx/akHkAEkNACAAQcwBRg0AIABBsAJGDQAgAkHAAHENAEEAIQEgAkGIBHFBgARGDQAgAkEocUEARyEBCyABC0kBAXsgAEEQav0MAAAAAAAAAAAAAAAAAAAAACIB/QsDACAAIAH9CwMAIABBMGogAf0LAwAgAEEgaiAB/QsDACAAQd0BNgIcQQALewEBfwJAIAAoAgwiAw0AAkAgACgCBEUNACAAIAE2AgQLAkAgACABIAIQxICAgAAiAw0AIAAoAgwPCyAAIAM2AhxBACEDIAAoAgQiAUUNACAAIAEgAiAAKAIIEYGAgIAAACIBRQ0AIAAgAjYCFCAAIAE2AgwgASEDCyADC+TzAQMOfwN+BH8jgICAgABBEGsiAySAgICAACABIQQgASEFIAEhBiABIQcgASEIIAEhCSABIQogASELIAEhDCABIQ0gASEOIAEhDwJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQCAAKAIcIhBBf2oO3QHaAQHZAQIDBAUGBwgJCgsMDQ7YAQ8Q1wEREtYBExQVFhcYGRob4AHfARwdHtUBHyAhIiMkJdQBJicoKSorLNMB0gEtLtEB0AEvMDEyMzQ1Njc4OTo7PD0+P0BBQkNERUbbAUdISUrPAc4BS80BTMwBTU5PUFFSU1RVVldYWVpbXF1eX2BhYmNkZWZnaGlqa2xtbm9wcXJzdHV2d3h5ent8fX5/gAGBAYIBgwGEAYUBhgGHAYgBiQGKAYsBjAGNAY4BjwGQAZEBkgGTAZQBlQGWAZcBmAGZAZoBmwGcAZ0BngGfAaABoQGiAaMBpAGlAaYBpwGoAakBqgGrAawBrQGuAa8BsAGxAbIBswG0AbUBtgG3AcsBygG4AckBuQHIAboBuwG8Ab0BvgG/AcABwQHCAcMBxAHFAcYBANwBC0EAIRAMxgELQQ4hEAzFAQtBDSEQDMQBC0EPIRAMwwELQRAhEAzCAQtBEyEQDMEBC0EUIRAMwAELQRUhEAy/AQtBFiEQDL4BC0EXIRAMvQELQRghEAy8AQtBGSEQDLsBC0EaIRAMugELQRshEAy5AQtBHCEQDLgBC0EIIRAMtwELQR0hEAy2AQtBICEQDLUBC0EfIRAMtAELQQchEAyzAQtBISEQDLIBC0EiIRAMsQELQR4hEAywAQtBIyEQDK8BC0ESIRAMrgELQREhEAytAQtBJCEQDKwBC0ElIRAMqwELQSYhEAyqAQtBJyEQDKkBC0HDASEQDKgBC0EpIRAMpwELQSshEAymAQtBLCEQDKUBC0EtIRAMpAELQS4hEAyjAQtBLyEQDKIBC0HEASEQDKEBC0EwIRAMoAELQTQhEAyfAQtBDCEQDJ4BC0ExIRAMnQELQTIhEAycAQtBMyEQDJsBC0E5IRAMmgELQTUhEAyZAQtBxQEhEAyYAQtBCyEQDJcBC0E6IRAMlgELQTYhEAyVAQtBCiEQDJQBC0E3IRAMkwELQTghEAySAQtBPCEQDJEBC0E7IRAMkAELQT0hEAyPAQtBCSEQDI4BC0EoIRAMjQELQT4hEAyMAQtBPyEQDIsBC0HAACEQDIoBC0HBACEQDIkBC0HCACEQDIgBC0HDACEQDIcBC0HEACEQDIYBC0HFACEQDIUBC0HGACEQDIQBC0EqIRAMgwELQccAIRAMggELQcgAIRAMgQELQckAIRAMgAELQcoAIRAMfwtBywAhEAx+C0HNACEQDH0LQcwAIRAMfAtBzgAhEAx7C0HPACEQDHoLQdAAIRAMeQtB0QAhEAx4C0HSACEQDHcLQdMAIRAMdgtB1AAhEAx1C0HWACEQDHQLQdUAIRAMcwtBBiEQDHILQdcAIRAMcQtBBSEQDHALQdgAIRAMbwtBBCEQDG4LQdkAIRAMbQtB2gAhEAxsC0HbACEQDGsLQdwAIRAMagtBAyEQDGkLQd0AIRAMaAtB3gAhEAxnC0HfACEQDGYLQeEAIRAMZQtB4AAhEAxkC0HiACEQDGMLQeMAIRAMYgtBAiEQDGELQeQAIRAMYAtB5QAhEAxfC0HmACEQDF4LQecAIRAMXQtB6AAhEAxcC0HpACEQDFsLQeoAIRAMWgtB6wAhEAxZC0HsACEQDFgLQe0AIRAMVwtB7gAhEAxWC0HvACEQDFULQfAAIRAMVAtB8QAhEAxTC0HyACEQDFILQfMAIRAMUQtB9AAhEAxQC0H1ACEQDE8LQfYAIRAMTgtB9wAhEAxNC0H4ACEQDEwLQfkAIRAMSwtB+gAhEAxKC0H7ACEQDEkLQfwAIRAMSAtB/QAhEAxHC0H+ACEQDEYLQf8AIRAMRQtBgAEhEAxEC0GBASEQDEMLQYIBIRAMQgtBgwEhEAxBC0GEASEQDEALQYUBIRAMPwtBhgEhEAw+C0GHASEQDD0LQYgBIRAMPAtBiQEhEAw7C0GKASEQDDoLQYsBIRAMOQtBjAEhEAw4C0GNASEQDDcLQY4BIRAMNgtBjwEhEAw1C0GQASEQDDQLQZEBIRAMMwtBkgEhEAwyC0GTASEQDDELQZQBIRAMMAtBlQEhEAwvC0GWASEQDC4LQZcBIRAMLQtBmAEhEAwsC0GZASEQDCsLQZoBIRAMKgtBmwEhEAwpC0GcASEQDCgLQZ0BIRAMJwtBngEhEAwmC0GfASEQDCULQaABIRAMJAtBoQEhEAwjC0GiASEQDCILQaMBIRAMIQtBpAEhEAwgC0GlASEQDB8LQaYBIRAMHgtBpwEhEAwdC0GoASEQDBwLQakBIRAMGwtBqgEhEAwaC0GrASEQDBkLQawBIRAMGAtBrQEhEAwXC0GuASEQDBYLQQEhEAwVC0GvASEQDBQLQbABIRAMEwtBsQEhEAwSC0GzASEQDBELQbIBIRAMEAtBtAEhEAwPC0G1ASEQDA4LQbYBIRAMDQtBtwEhEAwMC0G4ASEQDAsLQbkBIRAMCgtBugEhEAwJC0G7ASEQDAgLQcYBIRAMBwtBvAEhEAwGC0G9ASEQDAULQb4BIRAMBAtBvwEhEAwDC0HAASEQDAILQcIBIRAMAQtBwQEhEAsDQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAIBAOxwEAAQIDBAUGBwgJCgsMDQ4PEBESExQVFhcYGRobHB4fICEjJSg/QEFERUZHSElKS0xNT1BRUlPeA1dZW1xdYGJlZmdoaWprbG1vcHFyc3R1dnd4eXp7fH1+gAGCAYUBhgGHAYkBiwGMAY0BjgGPAZABkQGUAZUBlgGXAZgBmQGaAZsBnAGdAZ4BnwGgAaEBogGjAaQBpQGmAacBqAGpAaoBqwGsAa0BrgGvAbABsQGyAbMBtAG1AbYBtwG4AbkBugG7AbwBvQG+Ab8BwAHBAcIBwwHEAcUBxgHHAcgByQHKAcsBzAHNAc4BzwHQAdEB0gHTAdQB1QHWAdcB2AHZAdoB2wHcAd0B3gHgAeEB4gHjAeQB5QHmAecB6AHpAeoB6wHsAe0B7gHvAfAB8QHyAfMBmQKkArAC/gL+AgsgASIEIAJHDfMBQd0BIRAM/wMLIAEiECACRw3dAUHDASEQDP4DCyABIgEgAkcNkAFB9wAhEAz9AwsgASIBIAJHDYYBQe8AIRAM/AMLIAEiASACRw1/QeoAIRAM+wMLIAEiASACRw17QegAIRAM+gMLIAEiASACRw14QeYAIRAM+QMLIAEiASACRw0aQRghEAz4AwsgASIBIAJHDRRBEiEQDPcDCyABIgEgAkcNWUHFACEQDPYDCyABIgEgAkcNSkE/IRAM9QMLIAEiASACRw1IQTwhEAz0AwsgASIBIAJHDUFBMSEQDPMDCyAALQAuQQFGDesDDIcCCyAAIAEiASACEMCAgIAAQQFHDeYBIABCADcDIAznAQsgACABIgEgAhC0gICAACIQDecBIAEhAQz1AgsCQCABIgEgAkcNAEEGIRAM8AMLIAAgAUEBaiIBIAIQu4CAgAAiEA3oASABIQEMMQsgAEIANwMgQRIhEAzVAwsgASIQIAJHDStBHSEQDO0DCwJAIAEiASACRg0AIAFBAWohAUEQIRAM1AMLQQchEAzsAwsgAEIAIAApAyAiESACIAEiEGutIhJ9IhMgEyARVhs3AyAgESASViIURQ3lAUEIIRAM6wMLAkAgASIBIAJGDQAgAEGJgICAADYCCCAAIAE2AgQgASEBQRQhEAzSAwtBCSEQDOoDCyABIQEgACkDIFAN5AEgASEBDPICCwJAIAEiASACRw0AQQshEAzpAwsgACABQQFqIgEgAhC2gICAACIQDeUBIAEhAQzyAgsgACABIgEgAhC4gICAACIQDeUBIAEhAQzyAgsgACABIgEgAhC4gICAACIQDeYBIAEhAQwNCyAAIAEiASACELqAgIAAIhAN5wEgASEBDPACCwJAIAEiASACRw0AQQ8hEAzlAwsgAS0AACIQQTtGDQggEEENRw3oASABQQFqIQEM7wILIAAgASIBIAIQuoCAgAAiEA3oASABIQEM8gILA0ACQCABLQAAQfC1gIAAai0AACIQQQFGDQAgEEECRw3rASAAKAIEIRAgAEEANgIEIAAgECABQQFqIgEQuYCAgAAiEA3qASABIQEM9AILIAFBAWoiASACRw0AC0ESIRAM4gMLIAAgASIBIAIQuoCAgAAiEA3pASABIQEMCgsgASIBIAJHDQZBGyEQDOADCwJAIAEiASACRw0AQRYhEAzgAwsgAEGKgICAADYCCCAAIAE2AgQgACABIAIQuICAgAAiEA3qASABIQFBICEQDMYDCwJAIAEiASACRg0AA0ACQCABLQAAQfC3gIAAai0AACIQQQJGDQACQCAQQX9qDgTlAewBAOsB7AELIAFBAWohAUEIIRAMyAMLIAFBAWoiASACRw0AC0EVIRAM3wMLQRUhEAzeAwsDQAJAIAEtAABB8LmAgABqLQAAIhBBAkYNACAQQX9qDgTeAewB4AHrAewBCyABQQFqIgEgAkcNAAtBGCEQDN0DCwJAIAEiASACRg0AIABBi4CAgAA2AgggACABNgIEIAEhAUEHIRAMxAMLQRkhEAzcAwsgAUEBaiEBDAILAkAgASIUIAJHDQBBGiEQDNsDCyAUIQECQCAULQAAQXNqDhTdAu4C7gLuAu4C7gLuAu4C7gLuAu4C7gLuAu4C7gLuAu4C7gLuAgDuAgtBACEQIABBADYCHCAAQa+LgIAANgIQIABBAjYCDCAAIBRBAWo2AhQM2gMLAkAgAS0AACIQQTtGDQAgEEENRw3oASABQQFqIQEM5QILIAFBAWohAQtBIiEQDL8DCwJAIAEiECACRw0AQRwhEAzYAwtCACERIBAhASAQLQAAQVBqDjfnAeYBAQIDBAUGBwgAAAAAAAAACQoLDA0OAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAPEBESExQAC0EeIRAMvQMLQgIhEQzlAQtCAyERDOQBC0IEIREM4wELQgUhEQziAQtCBiERDOEBC0IHIREM4AELQgghEQzfAQtCCSERDN4BC0IKIREM3QELQgshEQzcAQtCDCERDNsBC0INIREM2gELQg4hEQzZAQtCDyERDNgBC0IKIREM1wELQgshEQzWAQtCDCERDNUBC0INIREM1AELQg4hEQzTAQtCDyERDNIBC0IAIRECQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAIBAtAABBUGoON+UB5AEAAQIDBAUGB+YB5gHmAeYB5gHmAeYBCAkKCwwN5gHmAeYB5gHmAeYB5gHmAeYB5gHmAeYB5gHmAeYB5gHmAeYB5gHmAeYB5gHmAeYB5gHmAQ4PEBESE+YBC0ICIREM5AELQgMhEQzjAQtCBCERDOIBC0IFIREM4QELQgYhEQzgAQtCByERDN8BC0IIIREM3gELQgkhEQzdAQtCCiERDNwBC0ILIREM2wELQgwhEQzaAQtCDSERDNkBC0IOIREM2AELQg8hEQzXAQtCCiERDNYBC0ILIREM1QELQgwhEQzUAQtCDSERDNMBC0IOIREM0gELQg8hEQzRAQsgAEIAIAApAyAiESACIAEiEGutIhJ9IhMgEyARVhs3AyAgESASViIURQ3SAUEfIRAMwAMLAkAgASIBIAJGDQAgAEGJgICAADYCCCAAIAE2AgQgASEBQSQhEAynAwtBICEQDL8DCyAAIAEiECACEL6AgIAAQX9qDgW2AQDFAgHRAdIBC0ERIRAMpAMLIABBAToALyAQIQEMuwMLIAEiASACRw3SAUEkIRAMuwMLIAEiDSACRw0eQcYAIRAMugMLIAAgASIBIAIQsoCAgAAiEA3UASABIQEMtQELIAEiECACRw0mQdAAIRAMuAMLAkAgASIBIAJHDQBBKCEQDLgDCyAAQQA2AgQgAEGMgICAADYCCCAAIAEgARCxgICAACIQDdMBIAEhAQzYAQsCQCABIhAgAkcNAEEpIRAMtwMLIBAtAAAiAUEgRg0UIAFBCUcN0wEgEEEBaiEBDBULAkAgASIBIAJGDQAgAUEBaiEBDBcLQSohEAy1AwsCQCABIhAgAkcNAEErIRAMtQMLAkAgEC0AACIBQQlGDQAgAUEgRw3VAQsgAC0ALEEIRg3TASAQIQEMkQMLAkAgASIBIAJHDQBBLCEQDLQDCyABLQAAQQpHDdUBIAFBAWohAQzJAgsgASIOIAJHDdUBQS8hEAyyAwsDQAJAIAEtAAAiEEEgRg0AAkAgEEF2ag4EANwB3AEA2gELIAEhAQzgAQsgAUEBaiIBIAJHDQALQTEhEAyxAwtBMiEQIAEiFCACRg2wAyACIBRrIAAoAgAiAWohFSAUIAFrQQNqIRYCQANAIBQtAAAiF0EgciAXIBdBv39qQf8BcUEaSRtB/wFxIAFB8LuAgABqLQAARw0BAkAgAUEDRw0AQQYhAQyWAwsgAUEBaiEBIBRBAWoiFCACRw0ACyAAIBU2AgAMsQMLIABBADYCACAUIQEM2QELQTMhECABIhQgAkYNrwMgAiAUayAAKAIAIgFqIRUgFCABa0EIaiEWAkADQCAULQAAIhdBIHIgFyAXQb9/akH/AXFBGkkbQf8BcSABQfS7gIAAai0AAEcNAQJAIAFBCEcNAEEFIQEMlQMLIAFBAWohASAUQQFqIhQgAkcNAAsgACAVNgIADLADCyAAQQA2AgAgFCEBDNgBC0E0IRAgASIUIAJGDa4DIAIgFGsgACgCACIBaiEVIBQgAWtBBWohFgJAA0AgFC0AACIXQSByIBcgF0G/f2pB/wFxQRpJG0H/AXEgAUHQwoCAAGotAABHDQECQCABQQVHDQBBByEBDJQDCyABQQFqIQEgFEEBaiIUIAJHDQALIAAgFTYCAAyvAwsgAEEANgIAIBQhAQzXAQsCQCABIgEgAkYNAANAAkAgAS0AAEGAvoCAAGotAAAiEEEBRg0AIBBBAkYNCiABIQEM3QELIAFBAWoiASACRw0AC0EwIRAMrgMLQTAhEAytAwsCQCABIgEgAkYNAANAAkAgAS0AACIQQSBGDQAgEEF2ag4E2QHaAdoB2QHaAQsgAUEBaiIBIAJHDQALQTghEAytAwtBOCEQDKwDCwNAAkAgAS0AACIQQSBGDQAgEEEJRw0DCyABQQFqIgEgAkcNAAtBPCEQDKsDCwNAAkAgAS0AACIQQSBGDQACQAJAIBBBdmoOBNoBAQHaAQALIBBBLEYN2wELIAEhAQwECyABQQFqIgEgAkcNAAtBPyEQDKoDCyABIQEM2wELQcAAIRAgASIUIAJGDagDIAIgFGsgACgCACIBaiEWIBQgAWtBBmohFwJAA0AgFC0AAEEgciABQYDAgIAAai0AAEcNASABQQZGDY4DIAFBAWohASAUQQFqIhQgAkcNAAsgACAWNgIADKkDCyAAQQA2AgAgFCEBC0E2IRAMjgMLAkAgASIPIAJHDQBBwQAhEAynAwsgAEGMgICAADYCCCAAIA82AgQgDyEBIAAtACxBf2oOBM0B1QHXAdkBhwMLIAFBAWohAQzMAQsCQCABIgEgAkYNAANAAkAgAS0AACIQQSByIBAgEEG/f2pB/wFxQRpJG0H/AXEiEEEJRg0AIBBBIEYNAAJAAkACQAJAIBBBnX9qDhMAAwMDAwMDAwEDAwMDAwMDAwMCAwsgAUEBaiEBQTEhEAyRAwsgAUEBaiEBQTIhEAyQAwsgAUEBaiEBQTMhEAyPAwsgASEBDNABCyABQQFqIgEgAkcNAAtBNSEQDKUDC0E1IRAMpAMLAkAgASIBIAJGDQADQAJAIAEtAABBgLyAgABqLQAAQQFGDQAgASEBDNMBCyABQQFqIgEgAkcNAAtBPSEQDKQDC0E9IRAMowMLIAAgASIBIAIQsICAgAAiEA3WASABIQEMAQsgEEEBaiEBC0E8IRAMhwMLAkAgASIBIAJHDQBBwgAhEAygAwsCQANAAkAgAS0AAEF3ag4YAAL+Av4ChAP+Av4C/gL+Av4C/gL+Av4C/gL+Av4C/gL+Av4C/gL+Av4C/gIA/gILIAFBAWoiASACRw0AC0HCACEQDKADCyABQQFqIQEgAC0ALUEBcUUNvQEgASEBC0EsIRAMhQMLIAEiASACRw3TAUHEACEQDJ0DCwNAAkAgAS0AAEGQwICAAGotAABBAUYNACABIQEMtwILIAFBAWoiASACRw0AC0HFACEQDJwDCyANLQAAIhBBIEYNswEgEEE6Rw2BAyAAKAIEIQEgAEEANgIEIAAgASANEK+AgIAAIgEN0AEgDUEBaiEBDLMCC0HHACEQIAEiDSACRg2aAyACIA1rIAAoAgAiAWohFiANIAFrQQVqIRcDQCANLQAAIhRBIHIgFCAUQb9/akH/AXFBGkkbQf8BcSABQZDCgIAAai0AAEcNgAMgAUEFRg30AiABQQFqIQEgDUEBaiINIAJHDQALIAAgFjYCAAyaAwtByAAhECABIg0gAkYNmQMgAiANayAAKAIAIgFqIRYgDSABa0EJaiEXA0AgDS0AACIUQSByIBQgFEG/f2pB/wFxQRpJG0H/AXEgAUGWwoCAAGotAABHDf8CAkAgAUEJRw0AQQIhAQz1AgsgAUEBaiEBIA1BAWoiDSACRw0ACyAAIBY2AgAMmQMLAkAgASINIAJHDQBByQAhEAyZAwsCQAJAIA0tAAAiAUEgciABIAFBv39qQf8BcUEaSRtB/wFxQZJ/ag4HAIADgAOAA4ADgAMBgAMLIA1BAWohAUE+IRAMgAMLIA1BAWohAUE/IRAM/wILQcoAIRAgASINIAJGDZcDIAIgDWsgACgCACIBaiEWIA0gAWtBAWohFwNAIA0tAAAiFEEgciAUIBRBv39qQf8BcUEaSRtB/wFxIAFBoMKAgABqLQAARw39AiABQQFGDfACIAFBAWohASANQQFqIg0gAkcNAAsgACAWNgIADJcDC0HLACEQIAEiDSACRg2WAyACIA1rIAAoAgAiAWohFiANIAFrQQ5qIRcDQCANLQAAIhRBIHIgFCAUQb9/akH/AXFBGkkbQf8BcSABQaLCgIAAai0AAEcN/AIgAUEORg3wAiABQQFqIQEgDUEBaiINIAJHDQALIAAgFjYCAAyWAwtBzAAhECABIg0gAkYNlQMgAiANayAAKAIAIgFqIRYgDSABa0EPaiEXA0AgDS0AACIUQSByIBQgFEG/f2pB/wFxQRpJG0H/AXEgAUHAwoCAAGotAABHDfsCAkAgAUEPRw0AQQMhAQzxAgsgAUEBaiEBIA1BAWoiDSACRw0ACyAAIBY2AgAMlQMLQc0AIRAgASINIAJGDZQDIAIgDWsgACgCACIBaiEWIA0gAWtBBWohFwNAIA0tAAAiFEEgciAUIBRBv39qQf8BcUEaSRtB/wFxIAFB0MKAgABqLQAARw36AgJAIAFBBUcNAEEEIQEM8AILIAFBAWohASANQQFqIg0gAkcNAAsgACAWNgIADJQDCwJAIAEiDSACRw0AQc4AIRAMlAMLAkACQAJAAkAgDS0AACIBQSByIAEgAUG/f2pB/wFxQRpJG0H/AXFBnX9qDhMA/QL9Av0C/QL9Av0C/QL9Av0C/QL9Av0CAf0C/QL9AgID/QILIA1BAWohAUHBACEQDP0CCyANQQFqIQFBwgAhEAz8AgsgDUEBaiEBQcMAIRAM+wILIA1BAWohAUHEACEQDPoCCwJAIAEiASACRg0AIABBjYCAgAA2AgggACABNgIEIAEhAUHFACEQDPoCC0HPACEQDJIDCyAQIQECQAJAIBAtAABBdmoOBAGoAqgCAKgCCyAQQQFqIQELQSchEAz4AgsCQCABIgEgAkcNAEHRACEQDJEDCwJAIAEtAABBIEYNACABIQEMjQELIAFBAWohASAALQAtQQFxRQ3HASABIQEMjAELIAEiFyACRw3IAUHSACEQDI8DC0HTACEQIAEiFCACRg2OAyACIBRrIAAoAgAiAWohFiAUIAFrQQFqIRcDQCAULQAAIAFB1sKAgABqLQAARw3MASABQQFGDccBIAFBAWohASAUQQFqIhQgAkcNAAsgACAWNgIADI4DCwJAIAEiASACRw0AQdUAIRAMjgMLIAEtAABBCkcNzAEgAUEBaiEBDMcBCwJAIAEiASACRw0AQdYAIRAMjQMLAkACQCABLQAAQXZqDgQAzQHNAQHNAQsgAUEBaiEBDMcBCyABQQFqIQFBygAhEAzzAgsgACABIgEgAhCugICAACIQDcsBIAEhAUHNACEQDPICCyAALQApQSJGDYUDDKYCCwJAIAEiASACRw0AQdsAIRAMigMLQQAhFEEBIRdBASEWQQAhEAJAAkACQAJAAkACQAJAAkACQCABLQAAQVBqDgrUAdMBAAECAwQFBgjVAQtBAiEQDAYLQQMhEAwFC0EEIRAMBAtBBSEQDAMLQQYhEAwCC0EHIRAMAQtBCCEQC0EAIRdBACEWQQAhFAzMAQtBCSEQQQEhFEEAIRdBACEWDMsBCwJAIAEiASACRw0AQd0AIRAMiQMLIAEtAABBLkcNzAEgAUEBaiEBDKYCCyABIgEgAkcNzAFB3wAhEAyHAwsCQCABIgEgAkYNACAAQY6AgIAANgIIIAAgATYCBCABIQFB0AAhEAzuAgtB4AAhEAyGAwtB4QAhECABIgEgAkYNhQMgAiABayAAKAIAIhRqIRYgASAUa0EDaiEXA0AgAS0AACAUQeLCgIAAai0AAEcNzQEgFEEDRg3MASAUQQFqIRQgAUEBaiIBIAJHDQALIAAgFjYCAAyFAwtB4gAhECABIgEgAkYNhAMgAiABayAAKAIAIhRqIRYgASAUa0ECaiEXA0AgAS0AACAUQebCgIAAai0AAEcNzAEgFEECRg3OASAUQQFqIRQgAUEBaiIBIAJHDQALIAAgFjYCAAyEAwtB4wAhECABIgEgAkYNgwMgAiABayAAKAIAIhRqIRYgASAUa0EDaiEXA0AgAS0AACAUQenCgIAAai0AAEcNywEgFEEDRg3OASAUQQFqIRQgAUEBaiIBIAJHDQALIAAgFjYCAAyDAwsCQCABIgEgAkcNAEHlACEQDIMDCyAAIAFBAWoiASACEKiAgIAAIhANzQEgASEBQdYAIRAM6QILAkAgASIBIAJGDQADQAJAIAEtAAAiEEEgRg0AAkACQAJAIBBBuH9qDgsAAc8BzwHPAc8BzwHPAc8BzwECzwELIAFBAWohAUHSACEQDO0CCyABQQFqIQFB0wAhEAzsAgsgAUEBaiEBQdQAIRAM6wILIAFBAWoiASACRw0AC0HkACEQDIIDC0HkACEQDIEDCwNAAkAgAS0AAEHwwoCAAGotAAAiEEEBRg0AIBBBfmoOA88B0AHRAdIBCyABQQFqIgEgAkcNAAtB5gAhEAyAAwsCQCABIgEgAkYNACABQQFqIQEMAwtB5wAhEAz/AgsDQAJAIAEtAABB8MSAgABqLQAAIhBBAUYNAAJAIBBBfmoOBNIB0wHUAQDVAQsgASEBQdcAIRAM5wILIAFBAWoiASACRw0AC0HoACEQDP4CCwJAIAEiASACRw0AQekAIRAM/gILAkAgAS0AACIQQXZqDhq6AdUB1QG8AdUB1QHVAdUB1QHVAdUB1QHVAdUB1QHVAdUB1QHVAdUB1QHVAcoB1QHVAQDTAQsgAUEBaiEBC0EGIRAM4wILA0ACQCABLQAAQfDGgIAAai0AAEEBRg0AIAEhAQyeAgsgAUEBaiIBIAJHDQALQeoAIRAM+wILAkAgASIBIAJGDQAgAUEBaiEBDAMLQesAIRAM+gILAkAgASIBIAJHDQBB7AAhEAz6AgsgAUEBaiEBDAELAkAgASIBIAJHDQBB7QAhEAz5AgsgAUEBaiEBC0EEIRAM3gILAkAgASIUIAJHDQBB7gAhEAz3AgsgFCEBAkACQAJAIBQtAABB8MiAgABqLQAAQX9qDgfUAdUB1gEAnAIBAtcBCyAUQQFqIQEMCgsgFEEBaiEBDM0BC0EAIRAgAEEANgIcIABBm5KAgAA2AhAgAEEHNgIMIAAgFEEBajYCFAz2AgsCQANAAkAgAS0AAEHwyICAAGotAAAiEEEERg0AAkACQCAQQX9qDgfSAdMB1AHZAQAEAdkBCyABIQFB2gAhEAzgAgsgAUEBaiEBQdwAIRAM3wILIAFBAWoiASACRw0AC0HvACEQDPYCCyABQQFqIQEMywELAkAgASIUIAJHDQBB8AAhEAz1AgsgFC0AAEEvRw3UASAUQQFqIQEMBgsCQCABIhQgAkcNAEHxACEQDPQCCwJAIBQtAAAiAUEvRw0AIBRBAWohAUHdACEQDNsCCyABQXZqIgRBFksN0wFBASAEdEGJgIACcUUN0wEMygILAkAgASIBIAJGDQAgAUEBaiEBQd4AIRAM2gILQfIAIRAM8gILAkAgASIUIAJHDQBB9AAhEAzyAgsgFCEBAkAgFC0AAEHwzICAAGotAABBf2oOA8kClAIA1AELQeEAIRAM2AILAkAgASIUIAJGDQADQAJAIBQtAABB8MqAgABqLQAAIgFBA0YNAAJAIAFBf2oOAssCANUBCyAUIQFB3wAhEAzaAgsgFEEBaiIUIAJHDQALQfMAIRAM8QILQfMAIRAM8AILAkAgASIBIAJGDQAgAEGPgICAADYCCCAAIAE2AgQgASEBQeAAIRAM1wILQfUAIRAM7wILAkAgASIBIAJHDQBB9gAhEAzvAgsgAEGPgICAADYCCCAAIAE2AgQgASEBC0EDIRAM1AILA0AgAS0AAEEgRw3DAiABQQFqIgEgAkcNAAtB9wAhEAzsAgsCQCABIgEgAkcNAEH4ACEQDOwCCyABLQAAQSBHDc4BIAFBAWohAQzvAQsgACABIgEgAhCsgICAACIQDc4BIAEhAQyOAgsCQCABIgQgAkcNAEH6ACEQDOoCCyAELQAAQcwARw3RASAEQQFqIQFBEyEQDM8BCwJAIAEiBCACRw0AQfsAIRAM6QILIAIgBGsgACgCACIBaiEUIAQgAWtBBWohEANAIAQtAAAgAUHwzoCAAGotAABHDdABIAFBBUYNzgEgAUEBaiEBIARBAWoiBCACRw0ACyAAIBQ2AgBB+wAhEAzoAgsCQCABIgQgAkcNAEH8ACEQDOgCCwJAAkAgBC0AAEG9f2oODADRAdEB0QHRAdEB0QHRAdEB0QHRAQHRAQsgBEEBaiEBQeYAIRAMzwILIARBAWohAUHnACEQDM4CCwJAIAEiBCACRw0AQf0AIRAM5wILIAIgBGsgACgCACIBaiEUIAQgAWtBAmohEAJAA0AgBC0AACABQe3PgIAAai0AAEcNzwEgAUECRg0BIAFBAWohASAEQQFqIgQgAkcNAAsgACAUNgIAQf0AIRAM5wILIABBADYCACAQQQFqIQFBECEQDMwBCwJAIAEiBCACRw0AQf4AIRAM5gILIAIgBGsgACgCACIBaiEUIAQgAWtBBWohEAJAA0AgBC0AACABQfbOgIAAai0AAEcNzgEgAUEFRg0BIAFBAWohASAEQQFqIgQgAkcNAAsgACAUNgIAQf4AIRAM5gILIABBADYCACAQQQFqIQFBFiEQDMsBCwJAIAEiBCACRw0AQf8AIRAM5QILIAIgBGsgACgCACIBaiEUIAQgAWtBA2ohEAJAA0AgBC0AACABQfzOgIAAai0AAEcNzQEgAUEDRg0BIAFBAWohASAEQQFqIgQgAkcNAAsgACAUNgIAQf8AIRAM5QILIABBADYCACAQQQFqIQFBBSEQDMoBCwJAIAEiBCACRw0AQYABIRAM5AILIAQtAABB2QBHDcsBIARBAWohAUEIIRAMyQELAkAgASIEIAJHDQBBgQEhEAzjAgsCQAJAIAQtAABBsn9qDgMAzAEBzAELIARBAWohAUHrACEQDMoCCyAEQQFqIQFB7AAhEAzJAgsCQCABIgQgAkcNAEGCASEQDOICCwJAAkAgBC0AAEG4f2oOCADLAcsBywHLAcsBywEBywELIARBAWohAUHqACEQDMkCCyAEQQFqIQFB7QAhEAzIAgsCQCABIgQgAkcNAEGDASEQDOECCyACIARrIAAoAgAiAWohECAEIAFrQQJqIRQCQANAIAQtAAAgAUGAz4CAAGotAABHDckBIAFBAkYNASABQQFqIQEgBEEBaiIEIAJHDQALIAAgEDYCAEGDASEQDOECC0EAIRAgAEEANgIAIBRBAWohAQzGAQsCQCABIgQgAkcNAEGEASEQDOACCyACIARrIAAoAgAiAWohFCAEIAFrQQRqIRACQANAIAQtAAAgAUGDz4CAAGotAABHDcgBIAFBBEYNASABQQFqIQEgBEEBaiIEIAJHDQALIAAgFDYCAEGEASEQDOACCyAAQQA2AgAgEEEBaiEBQSMhEAzFAQsCQCABIgQgAkcNAEGFASEQDN8CCwJAAkAgBC0AAEG0f2oOCADIAcgByAHIAcgByAEByAELIARBAWohAUHvACEQDMYCCyAEQQFqIQFB8AAhEAzFAgsCQCABIgQgAkcNAEGGASEQDN4CCyAELQAAQcUARw3FASAEQQFqIQEMgwILAkAgASIEIAJHDQBBhwEhEAzdAgsgAiAEayAAKAIAIgFqIRQgBCABa0EDaiEQAkADQCAELQAAIAFBiM+AgABqLQAARw3FASABQQNGDQEgAUEBaiEBIARBAWoiBCACRw0ACyAAIBQ2AgBBhwEhEAzdAgsgAEEANgIAIBBBAWohAUEtIRAMwgELAkAgASIEIAJHDQBBiAEhEAzcAgsgAiAEayAAKAIAIgFqIRQgBCABa0EIaiEQAkADQCAELQAAIAFB0M+AgABqLQAARw3EASABQQhGDQEgAUEBaiEBIARBAWoiBCACRw0ACyAAIBQ2AgBBiAEhEAzcAgsgAEEANgIAIBBBAWohAUEpIRAMwQELAkAgASIBIAJHDQBBiQEhEAzbAgtBASEQIAEtAABB3wBHDcABIAFBAWohAQyBAgsCQCABIgQgAkcNAEGKASEQDNoCCyACIARrIAAoAgAiAWohFCAEIAFrQQFqIRADQCAELQAAIAFBjM+AgABqLQAARw3BASABQQFGDa8CIAFBAWohASAEQQFqIgQgAkcNAAsgACAUNgIAQYoBIRAM2QILAkAgASIEIAJHDQBBiwEhEAzZAgsgAiAEayAAKAIAIgFqIRQgBCABa0ECaiEQAkADQCAELQAAIAFBjs+AgABqLQAARw3BASABQQJGDQEgAUEBaiEBIARBAWoiBCACRw0ACyAAIBQ2AgBBiwEhEAzZAgsgAEEANgIAIBBBAWohAUECIRAMvgELAkAgASIEIAJHDQBBjAEhEAzYAgsgAiAEayAAKAIAIgFqIRQgBCABa0EBaiEQAkADQCAELQAAIAFB8M+AgABqLQAARw3AASABQQFGDQEgAUEBaiEBIARBAWoiBCACRw0ACyAAIBQ2AgBBjAEhEAzYAgsgAEEANgIAIBBBAWohAUEfIRAMvQELAkAgASIEIAJHDQBBjQEhEAzXAgsgAiAEayAAKAIAIgFqIRQgBCABa0EBaiEQAkADQCAELQAAIAFB8s+AgABqLQAARw2/ASABQQFGDQEgAUEBaiEBIARBAWoiBCACRw0ACyAAIBQ2AgBBjQEhEAzXAgsgAEEANgIAIBBBAWohAUEJIRAMvAELAkAgASIEIAJHDQBBjgEhEAzWAgsCQAJAIAQtAABBt39qDgcAvwG/Ab8BvwG/AQG/AQsgBEEBaiEBQfgAIRAMvQILIARBAWohAUH5ACEQDLwCCwJAIAEiBCACRw0AQY8BIRAM1QILIAIgBGsgACgCACIBaiEUIAQgAWtBBWohEAJAA0AgBC0AACABQZHPgIAAai0AAEcNvQEgAUEFRg0BIAFBAWohASAEQQFqIgQgAkcNAAsgACAUNgIAQY8BIRAM1QILIABBADYCACAQQQFqIQFBGCEQDLoBCwJAIAEiBCACRw0AQZABIRAM1AILIAIgBGsgACgCACIBaiEUIAQgAWtBAmohEAJAA0AgBC0AACABQZfPgIAAai0AAEcNvAEgAUECRg0BIAFBAWohASAEQQFqIgQgAkcNAAsgACAUNgIAQZABIRAM1AILIABBADYCACAQQQFqIQFBFyEQDLkBCwJAIAEiBCACRw0AQZEBIRAM0wILIAIgBGsgACgCACIBaiEUIAQgAWtBBmohEAJAA0AgBC0AACABQZrPgIAAai0AAEcNuwEgAUEGRg0BIAFBAWohASAEQQFqIgQgAkcNAAsgACAUNgIAQZEBIRAM0wILIABBADYCACAQQQFqIQFBFSEQDLgBCwJAIAEiBCACRw0AQZIBIRAM0gILIAIgBGsgACgCACIBaiEUIAQgAWtBBWohEAJAA0AgBC0AACABQaHPgIAAai0AAEcNugEgAUEFRg0BIAFBAWohASAEQQFqIgQgAkcNAAsgACAUNgIAQZIBIRAM0gILIABBADYCACAQQQFqIQFBHiEQDLcBCwJAIAEiBCACRw0AQZMBIRAM0QILIAQtAABBzABHDbgBIARBAWohAUEKIRAMtgELAkAgBCACRw0AQZQBIRAM0AILAkACQCAELQAAQb9/ag4PALkBuQG5AbkBuQG5AbkBuQG5AbkBuQG5AbkBAbkBCyAEQQFqIQFB/gAhEAy3AgsgBEEBaiEBQf8AIRAMtgILAkAgBCACRw0AQZUBIRAMzwILAkACQCAELQAAQb9/ag4DALgBAbgBCyAEQQFqIQFB/QAhEAy2AgsgBEEBaiEEQYABIRAMtQILAkAgBCACRw0AQZYBIRAMzgILIAIgBGsgACgCACIBaiEUIAQgAWtBAWohEAJAA0AgBC0AACABQafPgIAAai0AAEcNtgEgAUEBRg0BIAFBAWohASAEQQFqIgQgAkcNAAsgACAUNgIAQZYBIRAMzgILIABBADYCACAQQQFqIQFBCyEQDLMBCwJAIAQgAkcNAEGXASEQDM0CCwJAAkACQAJAIAQtAABBU2oOIwC4AbgBuAG4AbgBuAG4AbgBuAG4AbgBuAG4AbgBuAG4AbgBuAG4AbgBuAG4AbgBAbgBuAG4AbgBuAECuAG4AbgBA7gBCyAEQQFqIQFB+wAhEAy2AgsgBEEBaiEBQfwAIRAMtQILIARBAWohBEGBASEQDLQCCyAEQQFqIQRBggEhEAyzAgsCQCAEIAJHDQBBmAEhEAzMAgsgAiAEayAAKAIAIgFqIRQgBCABa0EEaiEQAkADQCAELQAAIAFBqc+AgABqLQAARw20ASABQQRGDQEgAUEBaiEBIARBAWoiBCACRw0ACyAAIBQ2AgBBmAEhEAzMAgsgAEEANgIAIBBBAWohAUEZIRAMsQELAkAgBCACRw0AQZkBIRAMywILIAIgBGsgACgCACIBaiEUIAQgAWtBBWohEAJAA0AgBC0AACABQa7PgIAAai0AAEcNswEgAUEFRg0BIAFBAWohASAEQQFqIgQgAkcNAAsgACAUNgIAQZkBIRAMywILIABBADYCACAQQQFqIQFBBiEQDLABCwJAIAQgAkcNAEGaASEQDMoCCyACIARrIAAoAgAiAWohFCAEIAFrQQFqIRACQANAIAQtAAAgAUG0z4CAAGotAABHDbIBIAFBAUYNASABQQFqIQEgBEEBaiIEIAJHDQALIAAgFDYCAEGaASEQDMoCCyAAQQA2AgAgEEEBaiEBQRwhEAyvAQsCQCAEIAJHDQBBmwEhEAzJAgsgAiAEayAAKAIAIgFqIRQgBCABa0EBaiEQAkADQCAELQAAIAFBts+AgABqLQAARw2xASABQQFGDQEgAUEBaiEBIARBAWoiBCACRw0ACyAAIBQ2AgBBmwEhEAzJAgsgAEEANgIAIBBBAWohAUEnIRAMrgELAkAgBCACRw0AQZwBIRAMyAILAkACQCAELQAAQax/ag4CAAGxAQsgBEEBaiEEQYYBIRAMrwILIARBAWohBEGHASEQDK4CCwJAIAQgAkcNAEGdASEQDMcCCyACIARrIAAoAgAiAWohFCAEIAFrQQFqIRACQANAIAQtAAAgAUG4z4CAAGotAABHDa8BIAFBAUYNASABQQFqIQEgBEEBaiIEIAJHDQALIAAgFDYCAEGdASEQDMcCCyAAQQA2AgAgEEEBaiEBQSYhEAysAQsCQCAEIAJHDQBBngEhEAzGAgsgAiAEayAAKAIAIgFqIRQgBCABa0EBaiEQAkADQCAELQAAIAFBus+AgABqLQAARw2uASABQQFGDQEgAUEBaiEBIARBAWoiBCACRw0ACyAAIBQ2AgBBngEhEAzGAgsgAEEANgIAIBBBAWohAUEDIRAMqwELAkAgBCACRw0AQZ8BIRAMxQILIAIgBGsgACgCACIBaiEUIAQgAWtBAmohEAJAA0AgBC0AACABQe3PgIAAai0AAEcNrQEgAUECRg0BIAFBAWohASAEQQFqIgQgAkcNAAsgACAUNgIAQZ8BIRAMxQILIABBADYCACAQQQFqIQFBDCEQDKoBCwJAIAQgAkcNAEGgASEQDMQCCyACIARrIAAoAgAiAWohFCAEIAFrQQNqIRACQANAIAQtAAAgAUG8z4CAAGotAABHDawBIAFBA0YNASABQQFqIQEgBEEBaiIEIAJHDQALIAAgFDYCAEGgASEQDMQCCyAAQQA2AgAgEEEBaiEBQQ0hEAypAQsCQCAEIAJHDQBBoQEhEAzDAgsCQAJAIAQtAABBun9qDgsArAGsAawBrAGsAawBrAGsAawBAawBCyAEQQFqIQRBiwEhEAyqAgsgBEEBaiEEQYwBIRAMqQILAkAgBCACRw0AQaIBIRAMwgILIAQtAABB0ABHDakBIARBAWohBAzpAQsCQCAEIAJHDQBBowEhEAzBAgsCQAJAIAQtAABBt39qDgcBqgGqAaoBqgGqAQCqAQsgBEEBaiEEQY4BIRAMqAILIARBAWohAUEiIRAMpgELAkAgBCACRw0AQaQBIRAMwAILIAIgBGsgACgCACIBaiEUIAQgAWtBAWohEAJAA0AgBC0AACABQcDPgIAAai0AAEcNqAEgAUEBRg0BIAFBAWohASAEQQFqIgQgAkcNAAsgACAUNgIAQaQBIRAMwAILIABBADYCACAQQQFqIQFBHSEQDKUBCwJAIAQgAkcNAEGlASEQDL8CCwJAAkAgBC0AAEGuf2oOAwCoAQGoAQsgBEEBaiEEQZABIRAMpgILIARBAWohAUEEIRAMpAELAkAgBCACRw0AQaYBIRAMvgILAkACQAJAAkACQCAELQAAQb9/ag4VAKoBqgGqAaoBqgGqAaoBqgGqAaoBAaoBqgECqgGqAQOqAaoBBKoBCyAEQQFqIQRBiAEhEAyoAgsgBEEBaiEEQYkBIRAMpwILIARBAWohBEGKASEQDKYCCyAEQQFqIQRBjwEhEAylAgsgBEEBaiEEQZEBIRAMpAILAkAgBCACRw0AQacBIRAMvQILIAIgBGsgACgCACIBaiEUIAQgAWtBAmohEAJAA0AgBC0AACABQe3PgIAAai0AAEcNpQEgAUECRg0BIAFBAWohASAEQQFqIgQgAkcNAAsgACAUNgIAQacBIRAMvQILIABBADYCACAQQQFqIQFBESEQDKIBCwJAIAQgAkcNAEGoASEQDLwCCyACIARrIAAoAgAiAWohFCAEIAFrQQJqIRACQANAIAQtAAAgAUHCz4CAAGotAABHDaQBIAFBAkYNASABQQFqIQEgBEEBaiIEIAJHDQALIAAgFDYCAEGoASEQDLwCCyAAQQA2AgAgEEEBaiEBQSwhEAyhAQsCQCAEIAJHDQBBqQEhEAy7AgsgAiAEayAAKAIAIgFqIRQgBCABa0EEaiEQAkADQCAELQAAIAFBxc+AgABqLQAARw2jASABQQRGDQEgAUEBaiEBIARBAWoiBCACRw0ACyAAIBQ2AgBBqQEhEAy7AgsgAEEANgIAIBBBAWohAUErIRAMoAELAkAgBCACRw0AQaoBIRAMugILIAIgBGsgACgCACIBaiEUIAQgAWtBAmohEAJAA0AgBC0AACABQcrPgIAAai0AAEcNogEgAUECRg0BIAFBAWohASAEQQFqIgQgAkcNAAsgACAUNgIAQaoBIRAMugILIABBADYCACAQQQFqIQFBFCEQDJ8BCwJAIAQgAkcNAEGrASEQDLkCCwJAAkACQAJAIAQtAABBvn9qDg8AAQKkAaQBpAGkAaQBpAGkAaQBpAGkAaQBA6QBCyAEQQFqIQRBkwEhEAyiAgsgBEEBaiEEQZQBIRAMoQILIARBAWohBEGVASEQDKACCyAEQQFqIQRBlgEhEAyfAgsCQCAEIAJHDQBBrAEhEAy4AgsgBC0AAEHFAEcNnwEgBEEBaiEEDOABCwJAIAQgAkcNAEGtASEQDLcCCyACIARrIAAoAgAiAWohFCAEIAFrQQJqIRACQANAIAQtAAAgAUHNz4CAAGotAABHDZ8BIAFBAkYNASABQQFqIQEgBEEBaiIEIAJHDQALIAAgFDYCAEGtASEQDLcCCyAAQQA2AgAgEEEBaiEBQQ4hEAycAQsCQCAEIAJHDQBBrgEhEAy2AgsgBC0AAEHQAEcNnQEgBEEBaiEBQSUhEAybAQsCQCAEIAJHDQBBrwEhEAy1AgsgAiAEayAAKAIAIgFqIRQgBCABa0EIaiEQAkADQCAELQAAIAFB0M+AgABqLQAARw2dASABQQhGDQEgAUEBaiEBIARBAWoiBCACRw0ACyAAIBQ2AgBBrwEhEAy1AgsgAEEANgIAIBBBAWohAUEqIRAMmgELAkAgBCACRw0AQbABIRAMtAILAkACQCAELQAAQat/ag4LAJ0BnQGdAZ0BnQGdAZ0BnQGdAQGdAQsgBEEBaiEEQZoBIRAMmwILIARBAWohBEGbASEQDJoCCwJAIAQgAkcNAEGxASEQDLMCCwJAAkAgBC0AAEG/f2oOFACcAZwBnAGcAZwBnAGcAZwBnAGcAZwBnAGcAZwBnAGcAZwBnAEBnAELIARBAWohBEGZASEQDJoCCyAEQQFqIQRBnAEhEAyZAgsCQCAEIAJHDQBBsgEhEAyyAgsgAiAEayAAKAIAIgFqIRQgBCABa0EDaiEQAkADQCAELQAAIAFB2c+AgABqLQAARw2aASABQQNGDQEgAUEBaiEBIARBAWoiBCACRw0ACyAAIBQ2AgBBsgEhEAyyAgsgAEEANgIAIBBBAWohAUEhIRAMlwELAkAgBCACRw0AQbMBIRAMsQILIAIgBGsgACgCACIBaiEUIAQgAWtBBmohEAJAA0AgBC0AACABQd3PgIAAai0AAEcNmQEgAUEGRg0BIAFBAWohASAEQQFqIgQgAkcNAAsgACAUNgIAQbMBIRAMsQILIABBADYCACAQQQFqIQFBGiEQDJYBCwJAIAQgAkcNAEG0ASEQDLACCwJAAkACQCAELQAAQbt/ag4RAJoBmgGaAZoBmgGaAZoBmgGaAQGaAZoBmgGaAZoBApoBCyAEQQFqIQRBnQEhEAyYAgsgBEEBaiEEQZ4BIRAMlwILIARBAWohBEGfASEQDJYCCwJAIAQgAkcNAEG1ASEQDK8CCyACIARrIAAoAgAiAWohFCAEIAFrQQVqIRACQANAIAQtAAAgAUHkz4CAAGotAABHDZcBIAFBBUYNASABQQFqIQEgBEEBaiIEIAJHDQALIAAgFDYCAEG1ASEQDK8CCyAAQQA2AgAgEEEBaiEBQSghEAyUAQsCQCAEIAJHDQBBtgEhEAyuAgsgAiAEayAAKAIAIgFqIRQgBCABa0ECaiEQAkADQCAELQAAIAFB6s+AgABqLQAARw2WASABQQJGDQEgAUEBaiEBIARBAWoiBCACRw0ACyAAIBQ2AgBBtgEhEAyuAgsgAEEANgIAIBBBAWohAUEHIRAMkwELAkAgBCACRw0AQbcBIRAMrQILAkACQCAELQAAQbt/ag4OAJYBlgGWAZYBlgGWAZYBlgGWAZYBlgGWAQGWAQsgBEEBaiEEQaEBIRAMlAILIARBAWohBEGiASEQDJMCCwJAIAQgAkcNAEG4ASEQDKwCCyACIARrIAAoAgAiAWohFCAEIAFrQQJqIRACQANAIAQtAAAgAUHtz4CAAGotAABHDZQBIAFBAkYNASABQQFqIQEgBEEBaiIEIAJHDQALIAAgFDYCAEG4ASEQDKwCCyAAQQA2AgAgEEEBaiEBQRIhEAyRAQsCQCAEIAJHDQBBuQEhEAyrAgsgAiAEayAAKAIAIgFqIRQgBCABa0EBaiEQAkADQCAELQAAIAFB8M+AgABqLQAARw2TASABQQFGDQEgAUEBaiEBIARBAWoiBCACRw0ACyAAIBQ2AgBBuQEhEAyrAgsgAEEANgIAIBBBAWohAUEgIRAMkAELAkAgBCACRw0AQboBIRAMqgILIAIgBGsgACgCACIBaiEUIAQgAWtBAWohEAJAA0AgBC0AACABQfLPgIAAai0AAEcNkgEgAUEBRg0BIAFBAWohASAEQQFqIgQgAkcNAAsgACAUNgIAQboBIRAMqgILIABBADYCACAQQQFqIQFBDyEQDI8BCwJAIAQgAkcNAEG7ASEQDKkCCwJAAkAgBC0AAEG3f2oOBwCSAZIBkgGSAZIBAZIBCyAEQQFqIQRBpQEhEAyQAgsgBEEBaiEEQaYBIRAMjwILAkAgBCACRw0AQbwBIRAMqAILIAIgBGsgACgCACIBaiEUIAQgAWtBB2ohEAJAA0AgBC0AACABQfTPgIAAai0AAEcNkAEgAUEHRg0BIAFBAWohASAEQQFqIgQgAkcNAAsgACAUNgIAQbwBIRAMqAILIABBADYCACAQQQFqIQFBGyEQDI0BCwJAIAQgAkcNAEG9ASEQDKcCCwJAAkACQCAELQAAQb5/ag4SAJEBkQGRAZEBkQGRAZEBkQGRAQGRAZEBkQGRAZEBkQECkQELIARBAWohBEGkASEQDI8CCyAEQQFqIQRBpwEhEAyOAgsgBEEBaiEEQagBIRAMjQILAkAgBCACRw0AQb4BIRAMpgILIAQtAABBzgBHDY0BIARBAWohBAzPAQsCQCAEIAJHDQBBvwEhEAylAgsCQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQCAELQAAQb9/ag4VAAECA5wBBAUGnAGcAZwBBwgJCgucAQwNDg+cAQsgBEEBaiEBQegAIRAMmgILIARBAWohAUHpACEQDJkCCyAEQQFqIQFB7gAhEAyYAgsgBEEBaiEBQfIAIRAMlwILIARBAWohAUHzACEQDJYCCyAEQQFqIQFB9gAhEAyVAgsgBEEBaiEBQfcAIRAMlAILIARBAWohAUH6ACEQDJMCCyAEQQFqIQRBgwEhEAySAgsgBEEBaiEEQYQBIRAMkQILIARBAWohBEGFASEQDJACCyAEQQFqIQRBkgEhEAyPAgsgBEEBaiEEQZgBIRAMjgILIARBAWohBEGgASEQDI0CCyAEQQFqIQRBowEhEAyMAgsgBEEBaiEEQaoBIRAMiwILAkAgBCACRg0AIABBkICAgAA2AgggACAENgIEQasBIRAMiwILQcABIRAMowILIAAgBSACEKqAgIAAIgENiwEgBSEBDFwLAkAgBiACRg0AIAZBAWohBQyNAQtBwgEhEAyhAgsDQAJAIBAtAABBdmoOBIwBAACPAQALIBBBAWoiECACRw0AC0HDASEQDKACCwJAIAcgAkYNACAAQZGAgIAANgIIIAAgBzYCBCAHIQFBASEQDIcCC0HEASEQDJ8CCwJAIAcgAkcNAEHFASEQDJ8CCwJAAkAgBy0AAEF2ag4EAc4BzgEAzgELIAdBAWohBgyNAQsgB0EBaiEFDIkBCwJAIAcgAkcNAEHGASEQDJ4CCwJAAkAgBy0AAEF2ag4XAY8BjwEBjwGPAY8BjwGPAY8BjwGPAY8BjwGPAY8BjwGPAY8BjwGPAY8BAI8BCyAHQQFqIQcLQbABIRAMhAILAkAgCCACRw0AQcgBIRAMnQILIAgtAABBIEcNjQEgAEEAOwEyIAhBAWohAUGzASEQDIMCCyABIRcCQANAIBciByACRg0BIActAABBUGpB/wFxIhBBCk8NzAECQCAALwEyIhRBmTNLDQAgACAUQQpsIhQ7ATIgEEH//wNzIBRB/v8DcUkNACAHQQFqIRcgACAUIBBqIhA7ATIgEEH//wNxQegHSQ0BCwtBACEQIABBADYCHCAAQcGJgIAANgIQIABBDTYCDCAAIAdBAWo2AhQMnAILQccBIRAMmwILIAAgCCACEK6AgIAAIhBFDcoBIBBBFUcNjAEgAEHIATYCHCAAIAg2AhQgAEHJl4CAADYCECAAQRU2AgxBACEQDJoCCwJAIAkgAkcNAEHMASEQDJoCC0EAIRRBASEXQQEhFkEAIRACQAJAAkACQAJAAkACQAJAAkAgCS0AAEFQag4KlgGVAQABAgMEBQYIlwELQQIhEAwGC0EDIRAMBQtBBCEQDAQLQQUhEAwDC0EGIRAMAgtBByEQDAELQQghEAtBACEXQQAhFkEAIRQMjgELQQkhEEEBIRRBACEXQQAhFgyNAQsCQCAKIAJHDQBBzgEhEAyZAgsgCi0AAEEuRw2OASAKQQFqIQkMygELIAsgAkcNjgFB0AEhEAyXAgsCQCALIAJGDQAgAEGOgICAADYCCCAAIAs2AgRBtwEhEAz+AQtB0QEhEAyWAgsCQCAEIAJHDQBB0gEhEAyWAgsgAiAEayAAKAIAIhBqIRQgBCAQa0EEaiELA0AgBC0AACAQQfzPgIAAai0AAEcNjgEgEEEERg3pASAQQQFqIRAgBEEBaiIEIAJHDQALIAAgFDYCAEHSASEQDJUCCyAAIAwgAhCsgICAACIBDY0BIAwhAQy4AQsCQCAEIAJHDQBB1AEhEAyUAgsgAiAEayAAKAIAIhBqIRQgBCAQa0EBaiEMA0AgBC0AACAQQYHQgIAAai0AAEcNjwEgEEEBRg2OASAQQQFqIRAgBEEBaiIEIAJHDQALIAAgFDYCAEHUASEQDJMCCwJAIAQgAkcNAEHWASEQDJMCCyACIARrIAAoAgAiEGohFCAEIBBrQQJqIQsDQCAELQAAIBBBg9CAgABqLQAARw2OASAQQQJGDZABIBBBAWohECAEQQFqIgQgAkcNAAsgACAUNgIAQdYBIRAMkgILAkAgBCACRw0AQdcBIRAMkgILAkACQCAELQAAQbt/ag4QAI8BjwGPAY8BjwGPAY8BjwGPAY8BjwGPAY8BjwEBjwELIARBAWohBEG7ASEQDPkBCyAEQQFqIQRBvAEhEAz4AQsCQCAEIAJHDQBB2AEhEAyRAgsgBC0AAEHIAEcNjAEgBEEBaiEEDMQBCwJAIAQgAkYNACAAQZCAgIAANgIIIAAgBDYCBEG+ASEQDPcBC0HZASEQDI8CCwJAIAQgAkcNAEHaASEQDI8CCyAELQAAQcgARg3DASAAQQE6ACgMuQELIABBAjoALyAAIAQgAhCmgICAACIQDY0BQcIBIRAM9AELIAAtAChBf2oOArcBuQG4AQsDQAJAIAQtAABBdmoOBACOAY4BAI4BCyAEQQFqIgQgAkcNAAtB3QEhEAyLAgsgAEEAOgAvIAAtAC1BBHFFDYQCCyAAQQA6AC8gAEEBOgA0IAEhAQyMAQsgEEEVRg3aASAAQQA2AhwgACABNgIUIABBp46AgAA2AhAgAEESNgIMQQAhEAyIAgsCQCAAIBAgAhC0gICAACIEDQAgECEBDIECCwJAIARBFUcNACAAQQM2AhwgACAQNgIUIABBsJiAgAA2AhAgAEEVNgIMQQAhEAyIAgsgAEEANgIcIAAgEDYCFCAAQaeOgIAANgIQIABBEjYCDEEAIRAMhwILIBBBFUYN1gEgAEEANgIcIAAgATYCFCAAQdqNgIAANgIQIABBFDYCDEEAIRAMhgILIAAoAgQhFyAAQQA2AgQgECARp2oiFiEBIAAgFyAQIBYgFBsiEBC1gICAACIURQ2NASAAQQc2AhwgACAQNgIUIAAgFDYCDEEAIRAMhQILIAAgAC8BMEGAAXI7ATAgASEBC0EqIRAM6gELIBBBFUYN0QEgAEEANgIcIAAgATYCFCAAQYOMgIAANgIQIABBEzYCDEEAIRAMggILIBBBFUYNzwEgAEEANgIcIAAgATYCFCAAQZqPgIAANgIQIABBIjYCDEEAIRAMgQILIAAoAgQhECAAQQA2AgQCQCAAIBAgARC3gICAACIQDQAgAUEBaiEBDI0BCyAAQQw2AhwgACAQNgIMIAAgAUEBajYCFEEAIRAMgAILIBBBFUYNzAEgAEEANgIcIAAgATYCFCAAQZqPgIAANgIQIABBIjYCDEEAIRAM/wELIAAoAgQhECAAQQA2AgQCQCAAIBAgARC3gICAACIQDQAgAUEBaiEBDIwBCyAAQQ02AhwgACAQNgIMIAAgAUEBajYCFEEAIRAM/gELIBBBFUYNyQEgAEEANgIcIAAgATYCFCAAQcaMgIAANgIQIABBIzYCDEEAIRAM/QELIAAoAgQhECAAQQA2AgQCQCAAIBAgARC5gICAACIQDQAgAUEBaiEBDIsBCyAAQQ42AhwgACAQNgIMIAAgAUEBajYCFEEAIRAM/AELIABBADYCHCAAIAE2AhQgAEHAlYCAADYCECAAQQI2AgxBACEQDPsBCyAQQRVGDcUBIABBADYCHCAAIAE2AhQgAEHGjICAADYCECAAQSM2AgxBACEQDPoBCyAAQRA2AhwgACABNgIUIAAgEDYCDEEAIRAM+QELIAAoAgQhBCAAQQA2AgQCQCAAIAQgARC5gICAACIEDQAgAUEBaiEBDPEBCyAAQRE2AhwgACAENgIMIAAgAUEBajYCFEEAIRAM+AELIBBBFUYNwQEgAEEANgIcIAAgATYCFCAAQcaMgIAANgIQIABBIzYCDEEAIRAM9wELIAAoAgQhECAAQQA2AgQCQCAAIBAgARC5gICAACIQDQAgAUEBaiEBDIgBCyAAQRM2AhwgACAQNgIMIAAgAUEBajYCFEEAIRAM9gELIAAoAgQhBCAAQQA2AgQCQCAAIAQgARC5gICAACIEDQAgAUEBaiEBDO0BCyAAQRQ2AhwgACAENgIMIAAgAUEBajYCFEEAIRAM9QELIBBBFUYNvQEgAEEANgIcIAAgATYCFCAAQZqPgIAANgIQIABBIjYCDEEAIRAM9AELIAAoAgQhECAAQQA2AgQCQCAAIBAgARC3gICAACIQDQAgAUEBaiEBDIYBCyAAQRY2AhwgACAQNgIMIAAgAUEBajYCFEEAIRAM8wELIAAoAgQhBCAAQQA2AgQCQCAAIAQgARC3gICAACIEDQAgAUEBaiEBDOkBCyAAQRc2AhwgACAENgIMIAAgAUEBajYCFEEAIRAM8gELIABBADYCHCAAIAE2AhQgAEHNk4CAADYCECAAQQw2AgxBACEQDPEBC0IBIRELIBBBAWohAQJAIAApAyAiEkL//////////w9WDQAgACASQgSGIBGENwMgIAEhAQyEAQsgAEEANgIcIAAgATYCFCAAQa2JgIAANgIQIABBDDYCDEEAIRAM7wELIABBADYCHCAAIBA2AhQgAEHNk4CAADYCECAAQQw2AgxBACEQDO4BCyAAKAIEIRcgAEEANgIEIBAgEadqIhYhASAAIBcgECAWIBQbIhAQtYCAgAAiFEUNcyAAQQU2AhwgACAQNgIUIAAgFDYCDEEAIRAM7QELIABBADYCHCAAIBA2AhQgAEGqnICAADYCECAAQQ82AgxBACEQDOwBCyAAIBAgAhC0gICAACIBDQEgECEBC0EOIRAM0QELAkAgAUEVRw0AIABBAjYCHCAAIBA2AhQgAEGwmICAADYCECAAQRU2AgxBACEQDOoBCyAAQQA2AhwgACAQNgIUIABBp46AgAA2AhAgAEESNgIMQQAhEAzpAQsgAUEBaiEQAkAgAC8BMCIBQYABcUUNAAJAIAAgECACELuAgIAAIgENACAQIQEMcAsgAUEVRw26ASAAQQU2AhwgACAQNgIUIABB+ZeAgAA2AhAgAEEVNgIMQQAhEAzpAQsCQCABQaAEcUGgBEcNACAALQAtQQJxDQAgAEEANgIcIAAgEDYCFCAAQZaTgIAANgIQIABBBDYCDEEAIRAM6QELIAAgECACEL2AgIAAGiAQIQECQAJAAkACQAJAIAAgECACELOAgIAADhYCAQAEBAQEBAQEBAQEBAQEBAQEBAQDBAsgAEEBOgAuCyAAIAAvATBBwAByOwEwIBAhAQtBJiEQDNEBCyAAQSM2AhwgACAQNgIUIABBpZaAgAA2AhAgAEEVNgIMQQAhEAzpAQsgAEEANgIcIAAgEDYCFCAAQdWLgIAANgIQIABBETYCDEEAIRAM6AELIAAtAC1BAXFFDQFBwwEhEAzOAQsCQCANIAJGDQADQAJAIA0tAABBIEYNACANIQEMxAELIA1BAWoiDSACRw0AC0ElIRAM5wELQSUhEAzmAQsgACgCBCEEIABBADYCBCAAIAQgDRCvgICAACIERQ2tASAAQSY2AhwgACAENgIMIAAgDUEBajYCFEEAIRAM5QELIBBBFUYNqwEgAEEANgIcIAAgATYCFCAAQf2NgIAANgIQIABBHTYCDEEAIRAM5AELIABBJzYCHCAAIAE2AhQgACAQNgIMQQAhEAzjAQsgECEBQQEhFAJAAkACQAJAAkACQAJAIAAtACxBfmoOBwYFBQMBAgAFCyAAIAAvATBBCHI7ATAMAwtBAiEUDAELQQQhFAsgAEEBOgAsIAAgAC8BMCAUcjsBMAsgECEBC0ErIRAMygELIABBADYCHCAAIBA2AhQgAEGrkoCAADYCECAAQQs2AgxBACEQDOIBCyAAQQA2AhwgACABNgIUIABB4Y+AgAA2AhAgAEEKNgIMQQAhEAzhAQsgAEEAOgAsIBAhAQy9AQsgECEBQQEhFAJAAkACQAJAAkAgAC0ALEF7ag4EAwECAAULIAAgAC8BMEEIcjsBMAwDC0ECIRQMAQtBBCEUCyAAQQE6ACwgACAALwEwIBRyOwEwCyAQIQELQSkhEAzFAQsgAEEANgIcIAAgATYCFCAAQfCUgIAANgIQIABBAzYCDEEAIRAM3QELAkAgDi0AAEENRw0AIAAoAgQhASAAQQA2AgQCQCAAIAEgDhCxgICAACIBDQAgDkEBaiEBDHULIABBLDYCHCAAIAE2AgwgACAOQQFqNgIUQQAhEAzdAQsgAC0ALUEBcUUNAUHEASEQDMMBCwJAIA4gAkcNAEEtIRAM3AELAkACQANAAkAgDi0AAEF2ag4EAgAAAwALIA5BAWoiDiACRw0AC0EtIRAM3QELIAAoAgQhASAAQQA2AgQCQCAAIAEgDhCxgICAACIBDQAgDiEBDHQLIABBLDYCHCAAIA42AhQgACABNgIMQQAhEAzcAQsgACgCBCEBIABBADYCBAJAIAAgASAOELGAgIAAIgENACAOQQFqIQEMcwsgAEEsNgIcIAAgATYCDCAAIA5BAWo2AhRBACEQDNsBCyAAKAIEIQQgAEEANgIEIAAgBCAOELGAgIAAIgQNoAEgDiEBDM4BCyAQQSxHDQEgAUEBaiEQQQEhAQJAAkACQAJAAkAgAC0ALEF7ag4EAwECBAALIBAhAQwEC0ECIQEMAQtBBCEBCyAAQQE6ACwgACAALwEwIAFyOwEwIBAhAQwBCyAAIAAvATBBCHI7ATAgECEBC0E5IRAMvwELIABBADoALCABIQELQTQhEAy9AQsgACAALwEwQSByOwEwIAEhAQwCCyAAKAIEIQQgAEEANgIEAkAgACAEIAEQsYCAgAAiBA0AIAEhAQzHAQsgAEE3NgIcIAAgATYCFCAAIAQ2AgxBACEQDNQBCyAAQQg6ACwgASEBC0EwIRAMuQELAkAgAC0AKEEBRg0AIAEhAQwECyAALQAtQQhxRQ2TASABIQEMAwsgAC0AMEEgcQ2UAUHFASEQDLcBCwJAIA8gAkYNAAJAA0ACQCAPLQAAQVBqIgFB/wFxQQpJDQAgDyEBQTUhEAy6AQsgACkDICIRQpmz5syZs+bMGVYNASAAIBFCCn4iETcDICARIAGtQv8BgyISQn+FVg0BIAAgESASfDcDICAPQQFqIg8gAkcNAAtBOSEQDNEBCyAAKAIEIQIgAEEANgIEIAAgAiAPQQFqIgQQsYCAgAAiAg2VASAEIQEMwwELQTkhEAzPAQsCQCAALwEwIgFBCHFFDQAgAC0AKEEBRw0AIAAtAC1BCHFFDZABCyAAIAFB9/sDcUGABHI7ATAgDyEBC0E3IRAMtAELIAAgAC8BMEEQcjsBMAyrAQsgEEEVRg2LASAAQQA2AhwgACABNgIUIABB8I6AgAA2AhAgAEEcNgIMQQAhEAzLAQsgAEHDADYCHCAAIAE2AgwgACANQQFqNgIUQQAhEAzKAQsCQCABLQAAQTpHDQAgACgCBCEQIABBADYCBAJAIAAgECABEK+AgIAAIhANACABQQFqIQEMYwsgAEHDADYCHCAAIBA2AgwgACABQQFqNgIUQQAhEAzKAQsgAEEANgIcIAAgATYCFCAAQbGRgIAANgIQIABBCjYCDEEAIRAMyQELIABBADYCHCAAIAE2AhQgAEGgmYCAADYCECAAQR42AgxBACEQDMgBCyAAQQA2AgALIABBgBI7ASogACAXQQFqIgEgAhCogICAACIQDQEgASEBC0HHACEQDKwBCyAQQRVHDYMBIABB0QA2AhwgACABNgIUIABB45eAgAA2AhAgAEEVNgIMQQAhEAzEAQsgACgCBCEQIABBADYCBAJAIAAgECABEKeAgIAAIhANACABIQEMXgsgAEHSADYCHCAAIAE2AhQgACAQNgIMQQAhEAzDAQsgAEEANgIcIAAgFDYCFCAAQcGogIAANgIQIABBBzYCDCAAQQA2AgBBACEQDMIBCyAAKAIEIRAgAEEANgIEAkAgACAQIAEQp4CAgAAiEA0AIAEhAQxdCyAAQdMANgIcIAAgATYCFCAAIBA2AgxBACEQDMEBC0EAIRAgAEEANgIcIAAgATYCFCAAQYCRgIAANgIQIABBCTYCDAzAAQsgEEEVRg19IABBADYCHCAAIAE2AhQgAEGUjYCAADYCECAAQSE2AgxBACEQDL8BC0EBIRZBACEXQQAhFEEBIRALIAAgEDoAKyABQQFqIQECQAJAIAAtAC1BEHENAAJAAkACQCAALQAqDgMBAAIECyAWRQ0DDAILIBQNAQwCCyAXRQ0BCyAAKAIEIRAgAEEANgIEAkAgACAQIAEQrYCAgAAiEA0AIAEhAQxcCyAAQdgANgIcIAAgATYCFCAAIBA2AgxBACEQDL4BCyAAKAIEIQQgAEEANgIEAkAgACAEIAEQrYCAgAAiBA0AIAEhAQytAQsgAEHZADYCHCAAIAE2AhQgACAENgIMQQAhEAy9AQsgACgCBCEEIABBADYCBAJAIAAgBCABEK2AgIAAIgQNACABIQEMqwELIABB2gA2AhwgACABNgIUIAAgBDYCDEEAIRAMvAELIAAoAgQhBCAAQQA2AgQCQCAAIAQgARCtgICAACIEDQAgASEBDKkBCyAAQdwANgIcIAAgATYCFCAAIAQ2AgxBACEQDLsBCwJAIAEtAABBUGoiEEH/AXFBCk8NACAAIBA6ACogAUEBaiEBQc8AIRAMogELIAAoAgQhBCAAQQA2AgQCQCAAIAQgARCtgICAACIEDQAgASEBDKcBCyAAQd4ANgIcIAAgATYCFCAAIAQ2AgxBACEQDLoBCyAAQQA2AgAgF0EBaiEBAkAgAC0AKUEjTw0AIAEhAQxZCyAAQQA2AhwgACABNgIUIABB04mAgAA2AhAgAEEINgIMQQAhEAy5AQsgAEEANgIAC0EAIRAgAEEANgIcIAAgATYCFCAAQZCzgIAANgIQIABBCDYCDAy3AQsgAEEANgIAIBdBAWohAQJAIAAtAClBIUcNACABIQEMVgsgAEEANgIcIAAgATYCFCAAQZuKgIAANgIQIABBCDYCDEEAIRAMtgELIABBADYCACAXQQFqIQECQCAALQApIhBBXWpBC08NACABIQEMVQsCQCAQQQZLDQBBASAQdEHKAHFFDQAgASEBDFULQQAhECAAQQA2AhwgACABNgIUIABB94mAgAA2AhAgAEEINgIMDLUBCyAQQRVGDXEgAEEANgIcIAAgATYCFCAAQbmNgIAANgIQIABBGjYCDEEAIRAMtAELIAAoAgQhECAAQQA2AgQCQCAAIBAgARCngICAACIQDQAgASEBDFQLIABB5QA2AhwgACABNgIUIAAgEDYCDEEAIRAMswELIAAoAgQhECAAQQA2AgQCQCAAIBAgARCngICAACIQDQAgASEBDE0LIABB0gA2AhwgACABNgIUIAAgEDYCDEEAIRAMsgELIAAoAgQhECAAQQA2AgQCQCAAIBAgARCngICAACIQDQAgASEBDE0LIABB0wA2AhwgACABNgIUIAAgEDYCDEEAIRAMsQELIAAoAgQhECAAQQA2AgQCQCAAIBAgARCngICAACIQDQAgASEBDFELIABB5QA2AhwgACABNgIUIAAgEDYCDEEAIRAMsAELIABBADYCHCAAIAE2AhQgAEHGioCAADYCECAAQQc2AgxBACEQDK8BCyAAKAIEIRAgAEEANgIEAkAgACAQIAEQp4CAgAAiEA0AIAEhAQxJCyAAQdIANgIcIAAgATYCFCAAIBA2AgxBACEQDK4BCyAAKAIEIRAgAEEANgIEAkAgACAQIAEQp4CAgAAiEA0AIAEhAQxJCyAAQdMANgIcIAAgATYCFCAAIBA2AgxBACEQDK0BCyAAKAIEIRAgAEEANgIEAkAgACAQIAEQp4CAgAAiEA0AIAEhAQxNCyAAQeUANgIcIAAgATYCFCAAIBA2AgxBACEQDKwBCyAAQQA2AhwgACABNgIUIABB3IiAgAA2AhAgAEEHNgIMQQAhEAyrAQsgEEE/Rw0BIAFBAWohAQtBBSEQDJABC0EAIRAgAEEANgIcIAAgATYCFCAAQf2SgIAANgIQIABBBzYCDAyoAQsgACgCBCEQIABBADYCBAJAIAAgECABEKeAgIAAIhANACABIQEMQgsgAEHSADYCHCAAIAE2AhQgACAQNgIMQQAhEAynAQsgACgCBCEQIABBADYCBAJAIAAgECABEKeAgIAAIhANACABIQEMQgsgAEHTADYCHCAAIAE2AhQgACAQNgIMQQAhEAymAQsgACgCBCEQIABBADYCBAJAIAAgECABEKeAgIAAIhANACABIQEMRgsgAEHlADYCHCAAIAE2AhQgACAQNgIMQQAhEAylAQsgACgCBCEBIABBADYCBAJAIAAgASAUEKeAgIAAIgENACAUIQEMPwsgAEHSADYCHCAAIBQ2AhQgACABNgIMQQAhEAykAQsgACgCBCEBIABBADYCBAJAIAAgASAUEKeAgIAAIgENACAUIQEMPwsgAEHTADYCHCAAIBQ2AhQgACABNgIMQQAhEAyjAQsgACgCBCEBIABBADYCBAJAIAAgASAUEKeAgIAAIgENACAUIQEMQwsgAEHlADYCHCAAIBQ2AhQgACABNgIMQQAhEAyiAQsgAEEANgIcIAAgFDYCFCAAQcOPgIAANgIQIABBBzYCDEEAIRAMoQELIABBADYCHCAAIAE2AhQgAEHDj4CAADYCECAAQQc2AgxBACEQDKABC0EAIRAgAEEANgIcIAAgFDYCFCAAQYycgIAANgIQIABBBzYCDAyfAQsgAEEANgIcIAAgFDYCFCAAQYycgIAANgIQIABBBzYCDEEAIRAMngELIABBADYCHCAAIBQ2AhQgAEH+kYCAADYCECAAQQc2AgxBACEQDJ0BCyAAQQA2AhwgACABNgIUIABBjpuAgAA2AhAgAEEGNgIMQQAhEAycAQsgEEEVRg1XIABBADYCHCAAIAE2AhQgAEHMjoCAADYCECAAQSA2AgxBACEQDJsBCyAAQQA2AgAgEEEBaiEBQSQhEAsgACAQOgApIAAoAgQhECAAQQA2AgQgACAQIAEQq4CAgAAiEA1UIAEhAQw+CyAAQQA2AgALQQAhECAAQQA2AhwgACAENgIUIABB8ZuAgAA2AhAgAEEGNgIMDJcBCyABQRVGDVAgAEEANgIcIAAgBTYCFCAAQfCMgIAANgIQIABBGzYCDEEAIRAMlgELIAAoAgQhBSAAQQA2AgQgACAFIBAQqYCAgAAiBQ0BIBBBAWohBQtBrQEhEAx7CyAAQcEBNgIcIAAgBTYCDCAAIBBBAWo2AhRBACEQDJMBCyAAKAIEIQYgAEEANgIEIAAgBiAQEKmAgIAAIgYNASAQQQFqIQYLQa4BIRAMeAsgAEHCATYCHCAAIAY2AgwgACAQQQFqNgIUQQAhEAyQAQsgAEEANgIcIAAgBzYCFCAAQZeLgIAANgIQIABBDTYCDEEAIRAMjwELIABBADYCHCAAIAg2AhQgAEHjkICAADYCECAAQQk2AgxBACEQDI4BCyAAQQA2AhwgACAINgIUIABBlI2AgAA2AhAgAEEhNgIMQQAhEAyNAQtBASEWQQAhF0EAIRRBASEQCyAAIBA6ACsgCUEBaiEIAkACQCAALQAtQRBxDQACQAJAAkAgAC0AKg4DAQACBAsgFkUNAwwCCyAUDQEMAgsgF0UNAQsgACgCBCEQIABBADYCBCAAIBAgCBCtgICAACIQRQ09IABByQE2AhwgACAINgIUIAAgEDYCDEEAIRAMjAELIAAoAgQhBCAAQQA2AgQgACAEIAgQrYCAgAAiBEUNdiAAQcoBNgIcIAAgCDYCFCAAIAQ2AgxBACEQDIsBCyAAKAIEIQQgAEEANgIEIAAgBCAJEK2AgIAAIgRFDXQgAEHLATYCHCAAIAk2AhQgACAENgIMQQAhEAyKAQsgACgCBCEEIABBADYCBCAAIAQgChCtgICAACIERQ1yIABBzQE2AhwgACAKNgIUIAAgBDYCDEEAIRAMiQELAkAgCy0AAEFQaiIQQf8BcUEKTw0AIAAgEDoAKiALQQFqIQpBtgEhEAxwCyAAKAIEIQQgAEEANgIEIAAgBCALEK2AgIAAIgRFDXAgAEHPATYCHCAAIAs2AhQgACAENgIMQQAhEAyIAQsgAEEANgIcIAAgBDYCFCAAQZCzgIAANgIQIABBCDYCDCAAQQA2AgBBACEQDIcBCyABQRVGDT8gAEEANgIcIAAgDDYCFCAAQcyOgIAANgIQIABBIDYCDEEAIRAMhgELIABBgQQ7ASggACgCBCEQIABCADcDACAAIBAgDEEBaiIMEKuAgIAAIhBFDTggAEHTATYCHCAAIAw2AhQgACAQNgIMQQAhEAyFAQsgAEEANgIAC0EAIRAgAEEANgIcIAAgBDYCFCAAQdibgIAANgIQIABBCDYCDAyDAQsgACgCBCEQIABCADcDACAAIBAgC0EBaiILEKuAgIAAIhANAUHGASEQDGkLIABBAjoAKAxVCyAAQdUBNgIcIAAgCzYCFCAAIBA2AgxBACEQDIABCyAQQRVGDTcgAEEANgIcIAAgBDYCFCAAQaSMgIAANgIQIABBEDYCDEEAIRAMfwsgAC0ANEEBRw00IAAgBCACELyAgIAAIhBFDTQgEEEVRw01IABB3AE2AhwgACAENgIUIABB1ZaAgAA2AhAgAEEVNgIMQQAhEAx+C0EAIRAgAEEANgIcIABBr4uAgAA2AhAgAEECNgIMIAAgFEEBajYCFAx9C0EAIRAMYwtBAiEQDGILQQ0hEAxhC0EPIRAMYAtBJSEQDF8LQRMhEAxeC0EVIRAMXQtBFiEQDFwLQRchEAxbC0EYIRAMWgtBGSEQDFkLQRohEAxYC0EbIRAMVwtBHCEQDFYLQR0hEAxVC0EfIRAMVAtBISEQDFMLQSMhEAxSC0HGACEQDFELQS4hEAxQC0EvIRAMTwtBOyEQDE4LQT0hEAxNC0HIACEQDEwLQckAIRAMSwtBywAhEAxKC0HMACEQDEkLQc4AIRAMSAtB0QAhEAxHC0HVACEQDEYLQdgAIRAMRQtB2QAhEAxEC0HbACEQDEMLQeQAIRAMQgtB5QAhEAxBC0HxACEQDEALQfQAIRAMPwtBjQEhEAw+C0GXASEQDD0LQakBIRAMPAtBrAEhEAw7C0HAASEQDDoLQbkBIRAMOQtBrwEhEAw4C0GxASEQDDcLQbIBIRAMNgtBtAEhEAw1C0G1ASEQDDQLQboBIRAMMwtBvQEhEAwyC0G/ASEQDDELQcEBIRAMMAsgAEEANgIcIAAgBDYCFCAAQemLgIAANgIQIABBHzYCDEEAIRAMSAsgAEHbATYCHCAAIAQ2AhQgAEH6loCAADYCECAAQRU2AgxBACEQDEcLIABB+AA2AhwgACAMNgIUIABBypiAgAA2AhAgAEEVNgIMQQAhEAxGCyAAQdEANgIcIAAgBTYCFCAAQbCXgIAANgIQIABBFTYCDEEAIRAMRQsgAEH5ADYCHCAAIAE2AhQgACAQNgIMQQAhEAxECyAAQfgANgIcIAAgATYCFCAAQcqYgIAANgIQIABBFTYCDEEAIRAMQwsgAEHkADYCHCAAIAE2AhQgAEHjl4CAADYCECAAQRU2AgxBACEQDEILIABB1wA2AhwgACABNgIUIABByZeAgAA2AhAgAEEVNgIMQQAhEAxBCyAAQQA2AhwgACABNgIUIABBuY2AgAA2AhAgAEEaNgIMQQAhEAxACyAAQcIANgIcIAAgATYCFCAAQeOYgIAANgIQIABBFTYCDEEAIRAMPwsgAEEANgIEIAAgDyAPELGAgIAAIgRFDQEgAEE6NgIcIAAgBDYCDCAAIA9BAWo2AhRBACEQDD4LIAAoAgQhBCAAQQA2AgQCQCAAIAQgARCxgICAACIERQ0AIABBOzYCHCAAIAQ2AgwgACABQQFqNgIUQQAhEAw+CyABQQFqIQEMLQsgD0EBaiEBDC0LIABBADYCHCAAIA82AhQgAEHkkoCAADYCECAAQQQ2AgxBACEQDDsLIABBNjYCHCAAIAQ2AhQgACACNgIMQQAhEAw6CyAAQS42AhwgACAONgIUIAAgBDYCDEEAIRAMOQsgAEHQADYCHCAAIAE2AhQgAEGRmICAADYCECAAQRU2AgxBACEQDDgLIA1BAWohAQwsCyAAQRU2AhwgACABNgIUIABBgpmAgAA2AhAgAEEVNgIMQQAhEAw2CyAAQRs2AhwgACABNgIUIABBkZeAgAA2AhAgAEEVNgIMQQAhEAw1CyAAQQ82AhwgACABNgIUIABBkZeAgAA2AhAgAEEVNgIMQQAhEAw0CyAAQQs2AhwgACABNgIUIABBkZeAgAA2AhAgAEEVNgIMQQAhEAwzCyAAQRo2AhwgACABNgIUIABBgpmAgAA2AhAgAEEVNgIMQQAhEAwyCyAAQQs2AhwgACABNgIUIABBgpmAgAA2AhAgAEEVNgIMQQAhEAwxCyAAQQo2AhwgACABNgIUIABB5JaAgAA2AhAgAEEVNgIMQQAhEAwwCyAAQR42AhwgACABNgIUIABB+ZeAgAA2AhAgAEEVNgIMQQAhEAwvCyAAQQA2AhwgACAQNgIUIABB2o2AgAA2AhAgAEEUNgIMQQAhEAwuCyAAQQQ2AhwgACABNgIUIABBsJiAgAA2AhAgAEEVNgIMQQAhEAwtCyAAQQA2AgAgC0EBaiELC0G4ASEQDBILIABBADYCACAQQQFqIQFB9QAhEAwRCyABIQECQCAALQApQQVHDQBB4wAhEAwRC0HiACEQDBALQQAhECAAQQA2AhwgAEHkkYCAADYCECAAQQc2AgwgACAUQQFqNgIUDCgLIABBADYCACAXQQFqIQFBwAAhEAwOC0EBIQELIAAgAToALCAAQQA2AgAgF0EBaiEBC0EoIRAMCwsgASEBC0E4IRAMCQsCQCABIg8gAkYNAANAAkAgDy0AAEGAvoCAAGotAAAiAUEBRg0AIAFBAkcNAyAPQQFqIQEMBAsgD0EBaiIPIAJHDQALQT4hEAwiC0E+IRAMIQsgAEEAOgAsIA8hAQwBC0ELIRAMBgtBOiEQDAULIAFBAWohAUEtIRAMBAsgACABOgAsIABBADYCACAWQQFqIQFBDCEQDAMLIABBADYCACAXQQFqIQFBCiEQDAILIABBADYCAAsgAEEAOgAsIA0hAUEJIRAMAAsLQQAhECAAQQA2AhwgACALNgIUIABBzZCAgAA2AhAgAEEJNgIMDBcLQQAhECAAQQA2AhwgACAKNgIUIABB6YqAgAA2AhAgAEEJNgIMDBYLQQAhECAAQQA2AhwgACAJNgIUIABBt5CAgAA2AhAgAEEJNgIMDBULQQAhECAAQQA2AhwgACAINgIUIABBnJGAgAA2AhAgAEEJNgIMDBQLQQAhECAAQQA2AhwgACABNgIUIABBzZCAgAA2AhAgAEEJNgIMDBMLQQAhECAAQQA2AhwgACABNgIUIABB6YqAgAA2AhAgAEEJNgIMDBILQQAhECAAQQA2AhwgACABNgIUIABBt5CAgAA2AhAgAEEJNgIMDBELQQAhECAAQQA2AhwgACABNgIUIABBnJGAgAA2AhAgAEEJNgIMDBALQQAhECAAQQA2AhwgACABNgIUIABBl5WAgAA2AhAgAEEPNgIMDA8LQQAhECAAQQA2AhwgACABNgIUIABBl5WAgAA2AhAgAEEPNgIMDA4LQQAhECAAQQA2AhwgACABNgIUIABBwJKAgAA2AhAgAEELNgIMDA0LQQAhECAAQQA2AhwgACABNgIUIABBlYmAgAA2AhAgAEELNgIMDAwLQQAhECAAQQA2AhwgACABNgIUIABB4Y+AgAA2AhAgAEEKNgIMDAsLQQAhECAAQQA2AhwgACABNgIUIABB+4+AgAA2AhAgAEEKNgIMDAoLQQAhECAAQQA2AhwgACABNgIUIABB8ZmAgAA2AhAgAEECNgIMDAkLQQAhECAAQQA2AhwgACABNgIUIABBxJSAgAA2AhAgAEECNgIMDAgLQQAhECAAQQA2AhwgACABNgIUIABB8pWAgAA2AhAgAEECNgIMDAcLIABBAjYCHCAAIAE2AhQgAEGcmoCAADYCECAAQRY2AgxBACEQDAYLQQEhEAwFC0HUACEQIAEiBCACRg0EIANBCGogACAEIAJB2MKAgABBChDFgICAACADKAIMIQQgAygCCA4DAQQCAAsQyoCAgAAACyAAQQA2AhwgAEG1moCAADYCECAAQRc2AgwgACAEQQFqNgIUQQAhEAwCCyAAQQA2AhwgACAENgIUIABBypqAgAA2AhAgAEEJNgIMQQAhEAwBCwJAIAEiBCACRw0AQSIhEAwBCyAAQYmAgIAANgIIIAAgBDYCBEEhIRALIANBEGokgICAgAAgEAuvAQECfyABKAIAIQYCQAJAIAIgA0YNACAEIAZqIQQgBiADaiACayEHIAIgBkF/cyAFaiIGaiEFA0ACQCACLQAAIAQtAABGDQBBAiEEDAMLAkAgBg0AQQAhBCAFIQIMAwsgBkF/aiEGIARBAWohBCACQQFqIgIgA0cNAAsgByEGIAMhAgsgAEEBNgIAIAEgBjYCACAAIAI2AgQPCyABQQA2AgAgACAENgIAIAAgAjYCBAsKACAAEMeAgIAAC/I2AQt/I4CAgIAAQRBrIgEkgICAgAACQEEAKAKg0ICAAA0AQQAQy4CAgABBgNSEgABrIgJB2QBJDQBBACEDAkBBACgC4NOAgAAiBA0AQQBCfzcC7NOAgABBAEKAgISAgIDAADcC5NOAgABBACABQQhqQXBxQdiq1aoFcyIENgLg04CAAEEAQQA2AvTTgIAAQQBBADYCxNOAgAALQQAgAjYCzNOAgABBAEGA1ISAADYCyNOAgABBAEGA1ISAADYCmNCAgABBACAENgKs0ICAAEEAQX82AqjQgIAAA0AgA0HE0ICAAGogA0G40ICAAGoiBDYCACAEIANBsNCAgABqIgU2AgAgA0G80ICAAGogBTYCACADQczQgIAAaiADQcDQgIAAaiIFNgIAIAUgBDYCACADQdTQgIAAaiADQcjQgIAAaiIENgIAIAQgBTYCACADQdDQgIAAaiAENgIAIANBIGoiA0GAAkcNAAtBgNSEgABBeEGA1ISAAGtBD3FBAEGA1ISAAEEIakEPcRsiA2oiBEEEaiACQUhqIgUgA2siA0EBcjYCAEEAQQAoAvDTgIAANgKk0ICAAEEAIAM2ApTQgIAAQQAgBDYCoNCAgABBgNSEgAAgBWpBODYCBAsCQAJAAkACQAJAAkACQAJAAkACQAJAAkAgAEHsAUsNAAJAQQAoAojQgIAAIgZBECAAQRNqQXBxIABBC0kbIgJBA3YiBHYiA0EDcUUNAAJAAkAgA0EBcSAEckEBcyIFQQN0IgRBsNCAgABqIgMgBEG40ICAAGooAgAiBCgCCCICRw0AQQAgBkF+IAV3cTYCiNCAgAAMAQsgAyACNgIIIAIgAzYCDAsgBEEIaiEDIAQgBUEDdCIFQQNyNgIEIAQgBWoiBCAEKAIEQQFyNgIEDAwLIAJBACgCkNCAgAAiB00NAQJAIANFDQACQAJAIAMgBHRBAiAEdCIDQQAgA2tycSIDQQAgA2txQX9qIgMgA0EMdkEQcSIDdiIEQQV2QQhxIgUgA3IgBCAFdiIDQQJ2QQRxIgRyIAMgBHYiA0EBdkECcSIEciADIAR2IgNBAXZBAXEiBHIgAyAEdmoiBEEDdCIDQbDQgIAAaiIFIANBuNCAgABqKAIAIgMoAggiAEcNAEEAIAZBfiAEd3EiBjYCiNCAgAAMAQsgBSAANgIIIAAgBTYCDAsgAyACQQNyNgIEIAMgBEEDdCIEaiAEIAJrIgU2AgAgAyACaiIAIAVBAXI2AgQCQCAHRQ0AIAdBeHFBsNCAgABqIQJBACgCnNCAgAAhBAJAAkAgBkEBIAdBA3Z0IghxDQBBACAGIAhyNgKI0ICAACACIQgMAQsgAigCCCEICyAIIAQ2AgwgAiAENgIIIAQgAjYCDCAEIAg2AggLIANBCGohA0EAIAA2ApzQgIAAQQAgBTYCkNCAgAAMDAtBACgCjNCAgAAiCUUNASAJQQAgCWtxQX9qIgMgA0EMdkEQcSIDdiIEQQV2QQhxIgUgA3IgBCAFdiIDQQJ2QQRxIgRyIAMgBHYiA0EBdkECcSIEciADIAR2IgNBAXZBAXEiBHIgAyAEdmpBAnRBuNKAgABqKAIAIgAoAgRBeHEgAmshBCAAIQUCQANAAkAgBSgCECIDDQAgBUEUaigCACIDRQ0CCyADKAIEQXhxIAJrIgUgBCAFIARJIgUbIQQgAyAAIAUbIQAgAyEFDAALCyAAKAIYIQoCQCAAKAIMIgggAEYNACAAKAIIIgNBACgCmNCAgABJGiAIIAM2AgggAyAINgIMDAsLAkAgAEEUaiIFKAIAIgMNACAAKAIQIgNFDQMgAEEQaiEFCwNAIAUhCyADIghBFGoiBSgCACIDDQAgCEEQaiEFIAgoAhAiAw0ACyALQQA2AgAMCgtBfyECIABBv39LDQAgAEETaiIDQXBxIQJBACgCjNCAgAAiB0UNAEEAIQsCQCACQYACSQ0AQR8hCyACQf///wdLDQAgA0EIdiIDIANBgP4/akEQdkEIcSIDdCIEIARBgOAfakEQdkEEcSIEdCIFIAVBgIAPakEQdkECcSIFdEEPdiADIARyIAVyayIDQQF0IAIgA0EVanZBAXFyQRxqIQsLQQAgAmshBAJAAkACQAJAIAtBAnRBuNKAgABqKAIAIgUNAEEAIQNBACEIDAELQQAhAyACQQBBGSALQQF2ayALQR9GG3QhAEEAIQgDQAJAIAUoAgRBeHEgAmsiBiAETw0AIAYhBCAFIQggBg0AQQAhBCAFIQggBSEDDAMLIAMgBUEUaigCACIGIAYgBSAAQR12QQRxakEQaigCACIFRhsgAyAGGyEDIABBAXQhACAFDQALCwJAIAMgCHINAEEAIQhBAiALdCIDQQAgA2tyIAdxIgNFDQMgA0EAIANrcUF/aiIDIANBDHZBEHEiA3YiBUEFdkEIcSIAIANyIAUgAHYiA0ECdkEEcSIFciADIAV2IgNBAXZBAnEiBXIgAyAFdiIDQQF2QQFxIgVyIAMgBXZqQQJ0QbjSgIAAaigCACEDCyADRQ0BCwNAIAMoAgRBeHEgAmsiBiAESSEAAkAgAygCECIFDQAgA0EUaigCACEFCyAGIAQgABshBCADIAggABshCCAFIQMgBQ0ACwsgCEUNACAEQQAoApDQgIAAIAJrTw0AIAgoAhghCwJAIAgoAgwiACAIRg0AIAgoAggiA0EAKAKY0ICAAEkaIAAgAzYCCCADIAA2AgwMCQsCQCAIQRRqIgUoAgAiAw0AIAgoAhAiA0UNAyAIQRBqIQULA0AgBSEGIAMiAEEUaiIFKAIAIgMNACAAQRBqIQUgACgCECIDDQALIAZBADYCAAwICwJAQQAoApDQgIAAIgMgAkkNAEEAKAKc0ICAACEEAkACQCADIAJrIgVBEEkNACAEIAJqIgAgBUEBcjYCBEEAIAU2ApDQgIAAQQAgADYCnNCAgAAgBCADaiAFNgIAIAQgAkEDcjYCBAwBCyAEIANBA3I2AgQgBCADaiIDIAMoAgRBAXI2AgRBAEEANgKc0ICAAEEAQQA2ApDQgIAACyAEQQhqIQMMCgsCQEEAKAKU0ICAACIAIAJNDQBBACgCoNCAgAAiAyACaiIEIAAgAmsiBUEBcjYCBEEAIAU2ApTQgIAAQQAgBDYCoNCAgAAgAyACQQNyNgIEIANBCGohAwwKCwJAAkBBACgC4NOAgABFDQBBACgC6NOAgAAhBAwBC0EAQn83AuzTgIAAQQBCgICEgICAwAA3AuTTgIAAQQAgAUEMakFwcUHYqtWqBXM2AuDTgIAAQQBBADYC9NOAgABBAEEANgLE04CAAEGAgAQhBAtBACEDAkAgBCACQccAaiIHaiIGQQAgBGsiC3EiCCACSw0AQQBBMDYC+NOAgAAMCgsCQEEAKALA04CAACIDRQ0AAkBBACgCuNOAgAAiBCAIaiIFIARNDQAgBSADTQ0BC0EAIQNBAEEwNgL404CAAAwKC0EALQDE04CAAEEEcQ0EAkACQAJAQQAoAqDQgIAAIgRFDQBByNOAgAAhAwNAAkAgAygCACIFIARLDQAgBSADKAIEaiAESw0DCyADKAIIIgMNAAsLQQAQy4CAgAAiAEF/Rg0FIAghBgJAQQAoAuTTgIAAIgNBf2oiBCAAcUUNACAIIABrIAQgAGpBACADa3FqIQYLIAYgAk0NBSAGQf7///8HSw0FAkBBACgCwNOAgAAiA0UNAEEAKAK404CAACIEIAZqIgUgBE0NBiAFIANLDQYLIAYQy4CAgAAiAyAARw0BDAcLIAYgAGsgC3EiBkH+////B0sNBCAGEMuAgIAAIgAgAygCACADKAIEakYNAyAAIQMLAkAgA0F/Rg0AIAJByABqIAZNDQACQCAHIAZrQQAoAujTgIAAIgRqQQAgBGtxIgRB/v///wdNDQAgAyEADAcLAkAgBBDLgICAAEF/Rg0AIAQgBmohBiADIQAMBwtBACAGaxDLgICAABoMBAsgAyEAIANBf0cNBQwDC0EAIQgMBwtBACEADAULIABBf0cNAgtBAEEAKALE04CAAEEEcjYCxNOAgAALIAhB/v///wdLDQEgCBDLgICAACEAQQAQy4CAgAAhAyAAQX9GDQEgA0F/Rg0BIAAgA08NASADIABrIgYgAkE4ak0NAQtBAEEAKAK404CAACAGaiIDNgK404CAAAJAIANBACgCvNOAgABNDQBBACADNgK804CAAAsCQAJAAkACQEEAKAKg0ICAACIERQ0AQcjTgIAAIQMDQCAAIAMoAgAiBSADKAIEIghqRg0CIAMoAggiAw0ADAMLCwJAAkBBACgCmNCAgAAiA0UNACAAIANPDQELQQAgADYCmNCAgAALQQAhA0EAIAY2AszTgIAAQQAgADYCyNOAgABBAEF/NgKo0ICAAEEAQQAoAuDTgIAANgKs0ICAAEEAQQA2AtTTgIAAA0AgA0HE0ICAAGogA0G40ICAAGoiBDYCACAEIANBsNCAgABqIgU2AgAgA0G80ICAAGogBTYCACADQczQgIAAaiADQcDQgIAAaiIFNgIAIAUgBDYCACADQdTQgIAAaiADQcjQgIAAaiIENgIAIAQgBTYCACADQdDQgIAAaiAENgIAIANBIGoiA0GAAkcNAAsgAEF4IABrQQ9xQQAgAEEIakEPcRsiA2oiBCAGQUhqIgUgA2siA0EBcjYCBEEAQQAoAvDTgIAANgKk0ICAAEEAIAM2ApTQgIAAQQAgBDYCoNCAgAAgACAFakE4NgIEDAILIAMtAAxBCHENACAEIAVJDQAgBCAATw0AIARBeCAEa0EPcUEAIARBCGpBD3EbIgVqIgBBACgClNCAgAAgBmoiCyAFayIFQQFyNgIEIAMgCCAGajYCBEEAQQAoAvDTgIAANgKk0ICAAEEAIAU2ApTQgIAAQQAgADYCoNCAgAAgBCALakE4NgIEDAELAkAgAEEAKAKY0ICAACIITw0AQQAgADYCmNCAgAAgACEICyAAIAZqIQVByNOAgAAhAwJAAkACQAJAAkACQAJAA0AgAygCACAFRg0BIAMoAggiAw0ADAILCyADLQAMQQhxRQ0BC0HI04CAACEDA0ACQCADKAIAIgUgBEsNACAFIAMoAgRqIgUgBEsNAwsgAygCCCEDDAALCyADIAA2AgAgAyADKAIEIAZqNgIEIABBeCAAa0EPcUEAIABBCGpBD3EbaiILIAJBA3I2AgQgBUF4IAVrQQ9xQQAgBUEIakEPcRtqIgYgCyACaiICayEDAkAgBiAERw0AQQAgAjYCoNCAgABBAEEAKAKU0ICAACADaiIDNgKU0ICAACACIANBAXI2AgQMAwsCQCAGQQAoApzQgIAARw0AQQAgAjYCnNCAgABBAEEAKAKQ0ICAACADaiIDNgKQ0ICAACACIANBAXI2AgQgAiADaiADNgIADAMLAkAgBigCBCIEQQNxQQFHDQAgBEF4cSEHAkACQCAEQf8BSw0AIAYoAggiBSAEQQN2IghBA3RBsNCAgABqIgBGGgJAIAYoAgwiBCAFRw0AQQBBACgCiNCAgABBfiAId3E2AojQgIAADAILIAQgAEYaIAQgBTYCCCAFIAQ2AgwMAQsgBigCGCEJAkACQCAGKAIMIgAgBkYNACAGKAIIIgQgCEkaIAAgBDYCCCAEIAA2AgwMAQsCQCAGQRRqIgQoAgAiBQ0AIAZBEGoiBCgCACIFDQBBACEADAELA0AgBCEIIAUiAEEUaiIEKAIAIgUNACAAQRBqIQQgACgCECIFDQALIAhBADYCAAsgCUUNAAJAAkAgBiAGKAIcIgVBAnRBuNKAgABqIgQoAgBHDQAgBCAANgIAIAANAUEAQQAoAozQgIAAQX4gBXdxNgKM0ICAAAwCCyAJQRBBFCAJKAIQIAZGG2ogADYCACAARQ0BCyAAIAk2AhgCQCAGKAIQIgRFDQAgACAENgIQIAQgADYCGAsgBigCFCIERQ0AIABBFGogBDYCACAEIAA2AhgLIAcgA2ohAyAGIAdqIgYoAgQhBAsgBiAEQX5xNgIEIAIgA2ogAzYCACACIANBAXI2AgQCQCADQf8BSw0AIANBeHFBsNCAgABqIQQCQAJAQQAoAojQgIAAIgVBASADQQN2dCIDcQ0AQQAgBSADcjYCiNCAgAAgBCEDDAELIAQoAgghAwsgAyACNgIMIAQgAjYCCCACIAQ2AgwgAiADNgIIDAMLQR8hBAJAIANB////B0sNACADQQh2IgQgBEGA/j9qQRB2QQhxIgR0IgUgBUGA4B9qQRB2QQRxIgV0IgAgAEGAgA9qQRB2QQJxIgB0QQ92IAQgBXIgAHJrIgRBAXQgAyAEQRVqdkEBcXJBHGohBAsgAiAENgIcIAJCADcCECAEQQJ0QbjSgIAAaiEFAkBBACgCjNCAgAAiAEEBIAR0IghxDQAgBSACNgIAQQAgACAIcjYCjNCAgAAgAiAFNgIYIAIgAjYCCCACIAI2AgwMAwsgA0EAQRkgBEEBdmsgBEEfRht0IQQgBSgCACEAA0AgACIFKAIEQXhxIANGDQIgBEEddiEAIARBAXQhBCAFIABBBHFqQRBqIggoAgAiAA0ACyAIIAI2AgAgAiAFNgIYIAIgAjYCDCACIAI2AggMAgsgAEF4IABrQQ9xQQAgAEEIakEPcRsiA2oiCyAGQUhqIgggA2siA0EBcjYCBCAAIAhqQTg2AgQgBCAFQTcgBWtBD3FBACAFQUlqQQ9xG2pBQWoiCCAIIARBEGpJGyIIQSM2AgRBAEEAKALw04CAADYCpNCAgABBACADNgKU0ICAAEEAIAs2AqDQgIAAIAhBEGpBACkC0NOAgAA3AgAgCEEAKQLI04CAADcCCEEAIAhBCGo2AtDTgIAAQQAgBjYCzNOAgABBACAANgLI04CAAEEAQQA2AtTTgIAAIAhBJGohAwNAIANBBzYCACADQQRqIgMgBUkNAAsgCCAERg0DIAggCCgCBEF+cTYCBCAIIAggBGsiADYCACAEIABBAXI2AgQCQCAAQf8BSw0AIABBeHFBsNCAgABqIQMCQAJAQQAoAojQgIAAIgVBASAAQQN2dCIAcQ0AQQAgBSAAcjYCiNCAgAAgAyEFDAELIAMoAgghBQsgBSAENgIMIAMgBDYCCCAEIAM2AgwgBCAFNgIIDAQLQR8hAwJAIABB////B0sNACAAQQh2IgMgA0GA/j9qQRB2QQhxIgN0IgUgBUGA4B9qQRB2QQRxIgV0IgggCEGAgA9qQRB2QQJxIgh0QQ92IAMgBXIgCHJrIgNBAXQgACADQRVqdkEBcXJBHGohAwsgBCADNgIcIARCADcCECADQQJ0QbjSgIAAaiEFAkBBACgCjNCAgAAiCEEBIAN0IgZxDQAgBSAENgIAQQAgCCAGcjYCjNCAgAAgBCAFNgIYIAQgBDYCCCAEIAQ2AgwMBAsgAEEAQRkgA0EBdmsgA0EfRht0IQMgBSgCACEIA0AgCCIFKAIEQXhxIABGDQMgA0EddiEIIANBAXQhAyAFIAhBBHFqQRBqIgYoAgAiCA0ACyAGIAQ2AgAgBCAFNgIYIAQgBDYCDCAEIAQ2AggMAwsgBSgCCCIDIAI2AgwgBSACNgIIIAJBADYCGCACIAU2AgwgAiADNgIICyALQQhqIQMMBQsgBSgCCCIDIAQ2AgwgBSAENgIIIARBADYCGCAEIAU2AgwgBCADNgIIC0EAKAKU0ICAACIDIAJNDQBBACgCoNCAgAAiBCACaiIFIAMgAmsiA0EBcjYCBEEAIAM2ApTQgIAAQQAgBTYCoNCAgAAgBCACQQNyNgIEIARBCGohAwwDC0EAIQNBAEEwNgL404CAAAwCCwJAIAtFDQACQAJAIAggCCgCHCIFQQJ0QbjSgIAAaiIDKAIARw0AIAMgADYCACAADQFBACAHQX4gBXdxIgc2AozQgIAADAILIAtBEEEUIAsoAhAgCEYbaiAANgIAIABFDQELIAAgCzYCGAJAIAgoAhAiA0UNACAAIAM2AhAgAyAANgIYCyAIQRRqKAIAIgNFDQAgAEEUaiADNgIAIAMgADYCGAsCQAJAIARBD0sNACAIIAQgAmoiA0EDcjYCBCAIIANqIgMgAygCBEEBcjYCBAwBCyAIIAJqIgAgBEEBcjYCBCAIIAJBA3I2AgQgACAEaiAENgIAAkAgBEH/AUsNACAEQXhxQbDQgIAAaiEDAkACQEEAKAKI0ICAACIFQQEgBEEDdnQiBHENAEEAIAUgBHI2AojQgIAAIAMhBAwBCyADKAIIIQQLIAQgADYCDCADIAA2AgggACADNgIMIAAgBDYCCAwBC0EfIQMCQCAEQf///wdLDQAgBEEIdiIDIANBgP4/akEQdkEIcSIDdCIFIAVBgOAfakEQdkEEcSIFdCICIAJBgIAPakEQdkECcSICdEEPdiADIAVyIAJyayIDQQF0IAQgA0EVanZBAXFyQRxqIQMLIAAgAzYCHCAAQgA3AhAgA0ECdEG40oCAAGohBQJAIAdBASADdCICcQ0AIAUgADYCAEEAIAcgAnI2AozQgIAAIAAgBTYCGCAAIAA2AgggACAANgIMDAELIARBAEEZIANBAXZrIANBH0YbdCEDIAUoAgAhAgJAA0AgAiIFKAIEQXhxIARGDQEgA0EddiECIANBAXQhAyAFIAJBBHFqQRBqIgYoAgAiAg0ACyAGIAA2AgAgACAFNgIYIAAgADYCDCAAIAA2AggMAQsgBSgCCCIDIAA2AgwgBSAANgIIIABBADYCGCAAIAU2AgwgACADNgIICyAIQQhqIQMMAQsCQCAKRQ0AAkACQCAAIAAoAhwiBUECdEG40oCAAGoiAygCAEcNACADIAg2AgAgCA0BQQAgCUF+IAV3cTYCjNCAgAAMAgsgCkEQQRQgCigCECAARhtqIAg2AgAgCEUNAQsgCCAKNgIYAkAgACgCECIDRQ0AIAggAzYCECADIAg2AhgLIABBFGooAgAiA0UNACAIQRRqIAM2AgAgAyAINgIYCwJAAkAgBEEPSw0AIAAgBCACaiIDQQNyNgIEIAAgA2oiAyADKAIEQQFyNgIEDAELIAAgAmoiBSAEQQFyNgIEIAAgAkEDcjYCBCAFIARqIAQ2AgACQCAHRQ0AIAdBeHFBsNCAgABqIQJBACgCnNCAgAAhAwJAAkBBASAHQQN2dCIIIAZxDQBBACAIIAZyNgKI0ICAACACIQgMAQsgAigCCCEICyAIIAM2AgwgAiADNgIIIAMgAjYCDCADIAg2AggLQQAgBTYCnNCAgABBACAENgKQ0ICAAAsgAEEIaiEDCyABQRBqJICAgIAAIAMLCgAgABDJgICAAAviDQEHfwJAIABFDQAgAEF4aiIBIABBfGooAgAiAkF4cSIAaiEDAkAgAkEBcQ0AIAJBA3FFDQEgASABKAIAIgJrIgFBACgCmNCAgAAiBEkNASACIABqIQACQCABQQAoApzQgIAARg0AAkAgAkH/AUsNACABKAIIIgQgAkEDdiIFQQN0QbDQgIAAaiIGRhoCQCABKAIMIgIgBEcNAEEAQQAoAojQgIAAQX4gBXdxNgKI0ICAAAwDCyACIAZGGiACIAQ2AgggBCACNgIMDAILIAEoAhghBwJAAkAgASgCDCIGIAFGDQAgASgCCCICIARJGiAGIAI2AgggAiAGNgIMDAELAkAgAUEUaiICKAIAIgQNACABQRBqIgIoAgAiBA0AQQAhBgwBCwNAIAIhBSAEIgZBFGoiAigCACIEDQAgBkEQaiECIAYoAhAiBA0ACyAFQQA2AgALIAdFDQECQAJAIAEgASgCHCIEQQJ0QbjSgIAAaiICKAIARw0AIAIgBjYCACAGDQFBAEEAKAKM0ICAAEF+IAR3cTYCjNCAgAAMAwsgB0EQQRQgBygCECABRhtqIAY2AgAgBkUNAgsgBiAHNgIYAkAgASgCECICRQ0AIAYgAjYCECACIAY2AhgLIAEoAhQiAkUNASAGQRRqIAI2AgAgAiAGNgIYDAELIAMoAgQiAkEDcUEDRw0AIAMgAkF+cTYCBEEAIAA2ApDQgIAAIAEgAGogADYCACABIABBAXI2AgQPCyABIANPDQAgAygCBCICQQFxRQ0AAkACQCACQQJxDQACQCADQQAoAqDQgIAARw0AQQAgATYCoNCAgABBAEEAKAKU0ICAACAAaiIANgKU0ICAACABIABBAXI2AgQgAUEAKAKc0ICAAEcNA0EAQQA2ApDQgIAAQQBBADYCnNCAgAAPCwJAIANBACgCnNCAgABHDQBBACABNgKc0ICAAEEAQQAoApDQgIAAIABqIgA2ApDQgIAAIAEgAEEBcjYCBCABIABqIAA2AgAPCyACQXhxIABqIQACQAJAIAJB/wFLDQAgAygCCCIEIAJBA3YiBUEDdEGw0ICAAGoiBkYaAkAgAygCDCICIARHDQBBAEEAKAKI0ICAAEF+IAV3cTYCiNCAgAAMAgsgAiAGRhogAiAENgIIIAQgAjYCDAwBCyADKAIYIQcCQAJAIAMoAgwiBiADRg0AIAMoAggiAkEAKAKY0ICAAEkaIAYgAjYCCCACIAY2AgwMAQsCQCADQRRqIgIoAgAiBA0AIANBEGoiAigCACIEDQBBACEGDAELA0AgAiEFIAQiBkEUaiICKAIAIgQNACAGQRBqIQIgBigCECIEDQALIAVBADYCAAsgB0UNAAJAAkAgAyADKAIcIgRBAnRBuNKAgABqIgIoAgBHDQAgAiAGNgIAIAYNAUEAQQAoAozQgIAAQX4gBHdxNgKM0ICAAAwCCyAHQRBBFCAHKAIQIANGG2ogBjYCACAGRQ0BCyAGIAc2AhgCQCADKAIQIgJFDQAgBiACNgIQIAIgBjYCGAsgAygCFCICRQ0AIAZBFGogAjYCACACIAY2AhgLIAEgAGogADYCACABIABBAXI2AgQgAUEAKAKc0ICAAEcNAUEAIAA2ApDQgIAADwsgAyACQX5xNgIEIAEgAGogADYCACABIABBAXI2AgQLAkAgAEH/AUsNACAAQXhxQbDQgIAAaiECAkACQEEAKAKI0ICAACIEQQEgAEEDdnQiAHENAEEAIAQgAHI2AojQgIAAIAIhAAwBCyACKAIIIQALIAAgATYCDCACIAE2AgggASACNgIMIAEgADYCCA8LQR8hAgJAIABB////B0sNACAAQQh2IgIgAkGA/j9qQRB2QQhxIgJ0IgQgBEGA4B9qQRB2QQRxIgR0IgYgBkGAgA9qQRB2QQJxIgZ0QQ92IAIgBHIgBnJrIgJBAXQgACACQRVqdkEBcXJBHGohAgsgASACNgIcIAFCADcCECACQQJ0QbjSgIAAaiEEAkACQEEAKAKM0ICAACIGQQEgAnQiA3ENACAEIAE2AgBBACAGIANyNgKM0ICAACABIAQ2AhggASABNgIIIAEgATYCDAwBCyAAQQBBGSACQQF2ayACQR9GG3QhAiAEKAIAIQYCQANAIAYiBCgCBEF4cSAARg0BIAJBHXYhBiACQQF0IQIgBCAGQQRxakEQaiIDKAIAIgYNAAsgAyABNgIAIAEgBDYCGCABIAE2AgwgASABNgIIDAELIAQoAggiACABNgIMIAQgATYCCCABQQA2AhggASAENgIMIAEgADYCCAtBAEEAKAKo0ICAAEF/aiIBQX8gARs2AqjQgIAACwsEAAAAC04AAkAgAA0APwBBEHQPCwJAIABB//8DcQ0AIABBf0wNAAJAIABBEHZAACIAQX9HDQBBAEEwNgL404CAAEF/DwsgAEEQdA8LEMqAgIAAAAvyAgIDfwF+AkAgAkUNACAAIAE6AAAgAiAAaiIDQX9qIAE6AAAgAkEDSQ0AIAAgAToAAiAAIAE6AAEgA0F9aiABOgAAIANBfmogAToAACACQQdJDQAgACABOgADIANBfGogAToAACACQQlJDQAgAEEAIABrQQNxIgRqIgMgAUH/AXFBgYKECGwiATYCACADIAIgBGtBfHEiBGoiAkF8aiABNgIAIARBCUkNACADIAE2AgggAyABNgIEIAJBeGogATYCACACQXRqIAE2AgAgBEEZSQ0AIAMgATYCGCADIAE2AhQgAyABNgIQIAMgATYCDCACQXBqIAE2AgAgAkFsaiABNgIAIAJBaGogATYCACACQWRqIAE2AgAgBCADQQRxQRhyIgVrIgJBIEkNACABrUKBgICAEH4hBiADIAVqIQEDQCABIAY3AxggASAGNwMQIAEgBjcDCCABIAY3AwAgAUEgaiEBIAJBYGoiAkEfSw0ACwsgAAsLjkgBAEGACAuGSAEAAAACAAAAAwAAAAAAAAAAAAAABAAAAAUAAAAAAAAAAAAAAAYAAAAHAAAACAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAASW52YWxpZCBjaGFyIGluIHVybCBxdWVyeQBTcGFuIGNhbGxiYWNrIGVycm9yIGluIG9uX2JvZHkAQ29udGVudC1MZW5ndGggb3ZlcmZsb3cAQ2h1bmsgc2l6ZSBvdmVyZmxvdwBSZXNwb25zZSBvdmVyZmxvdwBJbnZhbGlkIG1ldGhvZCBmb3IgSFRUUC94LnggcmVxdWVzdABJbnZhbGlkIG1ldGhvZCBmb3IgUlRTUC94LnggcmVxdWVzdABFeHBlY3RlZCBTT1VSQ0UgbWV0aG9kIGZvciBJQ0UveC54IHJlcXVlc3QASW52YWxpZCBjaGFyIGluIHVybCBmcmFnbWVudCBzdGFydABFeHBlY3RlZCBkb3QAU3BhbiBjYWxsYmFjayBlcnJvciBpbiBvbl9zdGF0dXMASW52YWxpZCByZXNwb25zZSBzdGF0dXMASW52YWxpZCBjaGFyYWN0ZXIgaW4gY2h1bmsgZXh0ZW5zaW9ucwBVc2VyIGNhbGxiYWNrIGVycm9yAGBvbl9yZXNldGAgY2FsbGJhY2sgZXJyb3IAYG9uX2NodW5rX2hlYWRlcmAgY2FsbGJhY2sgZXJyb3IAYG9uX21lc3NhZ2VfYmVnaW5gIGNhbGxiYWNrIGVycm9yAGBvbl9jaHVua19leHRlbnNpb25fdmFsdWVgIGNhbGxiYWNrIGVycm9yAGBvbl9zdGF0dXNfY29tcGxldGVgIGNhbGxiYWNrIGVycm9yAGBvbl92ZXJzaW9uX2NvbXBsZXRlYCBjYWxsYmFjayBlcnJvcgBgb25fdXJsX2NvbXBsZXRlYCBjYWxsYmFjayBlcnJvcgBgb25fY2h1bmtfY29tcGxldGVgIGNhbGxiYWNrIGVycm9yAGBvbl9oZWFkZXJfdmFsdWVfY29tcGxldGVgIGNhbGxiYWNrIGVycm9yAGBvbl9tZXNzYWdlX2NvbXBsZXRlYCBjYWxsYmFjayBlcnJvcgBgb25fbWV0aG9kX2NvbXBsZXRlYCBjYWxsYmFjayBlcnJvcgBgb25faGVhZGVyX2ZpZWxkX2NvbXBsZXRlYCBjYWxsYmFjayBlcnJvcgBgb25fY2h1bmtfZXh0ZW5zaW9uX25hbWVgIGNhbGxiYWNrIGVycm9yAFVuZXhwZWN0ZWQgY2hhciBpbiB1cmwgc2VydmVyAEludmFsaWQgaGVhZGVyIHZhbHVlIGNoYXIASW52YWxpZCBoZWFkZXIgZmllbGQgY2hhcgBTcGFuIGNhbGxiYWNrIGVycm9yIGluIG9uX3ZlcnNpb24ASW52YWxpZCBtaW5vciB2ZXJzaW9uAEludmFsaWQgbWFqb3IgdmVyc2lvbgBFeHBlY3RlZCBzcGFjZSBhZnRlciB2ZXJzaW9uAEV4cGVjdGVkIENSTEYgYWZ0ZXIgdmVyc2lvbgBJbnZhbGlkIEhUVFAgdmVyc2lvbgBJbnZhbGlkIGhlYWRlciB0b2tlbgBTcGFuIGNhbGxiYWNrIGVycm9yIGluIG9uX3VybABJbnZhbGlkIGNoYXJhY3RlcnMgaW4gdXJsAFVuZXhwZWN0ZWQgc3RhcnQgY2hhciBpbiB1cmwARG91YmxlIEAgaW4gdXJsAEVtcHR5IENvbnRlbnQtTGVuZ3RoAEludmFsaWQgY2hhcmFjdGVyIGluIENvbnRlbnQtTGVuZ3RoAER1cGxpY2F0ZSBDb250ZW50LUxlbmd0aABJbnZhbGlkIGNoYXIgaW4gdXJsIHBhdGgAQ29udGVudC1MZW5ndGggY2FuJ3QgYmUgcHJlc2VudCB3aXRoIFRyYW5zZmVyLUVuY29kaW5nAEludmFsaWQgY2hhcmFjdGVyIGluIGNodW5rIHNpemUAU3BhbiBjYWxsYmFjayBlcnJvciBpbiBvbl9oZWFkZXJfdmFsdWUAU3BhbiBjYWxsYmFjayBlcnJvciBpbiBvbl9jaHVua19leHRlbnNpb25fdmFsdWUASW52YWxpZCBjaGFyYWN0ZXIgaW4gY2h1bmsgZXh0ZW5zaW9ucyB2YWx1ZQBNaXNzaW5nIGV4cGVjdGVkIExGIGFmdGVyIGhlYWRlciB2YWx1ZQBJbnZhbGlkIGBUcmFuc2Zlci1FbmNvZGluZ2AgaGVhZGVyIHZhbHVlAEludmFsaWQgY2hhcmFjdGVyIGluIGNodW5rIGV4dGVuc2lvbnMgcXVvdGUgdmFsdWUASW52YWxpZCBjaGFyYWN0ZXIgaW4gY2h1bmsgZXh0ZW5zaW9ucyBxdW90ZWQgdmFsdWUAUGF1c2VkIGJ5IG9uX2hlYWRlcnNfY29tcGxldGUASW52YWxpZCBFT0Ygc3RhdGUAb25fcmVzZXQgcGF1c2UAb25fY2h1bmtfaGVhZGVyIHBhdXNlAG9uX21lc3NhZ2VfYmVnaW4gcGF1c2UAb25fY2h1bmtfZXh0ZW5zaW9uX3ZhbHVlIHBhdXNlAG9uX3N0YXR1c19jb21wbGV0ZSBwYXVzZQBvbl92ZXJzaW9uX2NvbXBsZXRlIHBhdXNlAG9uX3VybF9jb21wbGV0ZSBwYXVzZQBvbl9jaHVua19jb21wbGV0ZSBwYXVzZQBvbl9oZWFkZXJfdmFsdWVfY29tcGxldGUgcGF1c2UAb25fbWVzc2FnZV9jb21wbGV0ZSBwYXVzZQBvbl9tZXRob2RfY29tcGxldGUgcGF1c2UAb25faGVhZGVyX2ZpZWxkX2NvbXBsZXRlIHBhdXNlAG9uX2NodW5rX2V4dGVuc2lvbl9uYW1lIHBhdXNlAFVuZXhwZWN0ZWQgc3BhY2UgYWZ0ZXIgc3RhcnQgbGluZQBTcGFuIGNhbGxiYWNrIGVycm9yIGluIG9uX2NodW5rX2V4dGVuc2lvbl9uYW1lAEludmFsaWQgY2hhcmFjdGVyIGluIGNodW5rIGV4dGVuc2lvbnMgbmFtZQBQYXVzZSBvbiBDT05ORUNUL1VwZ3JhZGUAUGF1c2Ugb24gUFJJL1VwZ3JhZGUARXhwZWN0ZWQgSFRUUC8yIENvbm5lY3Rpb24gUHJlZmFjZQBTcGFuIGNhbGxiYWNrIGVycm9yIGluIG9uX21ldGhvZABFeHBlY3RlZCBzcGFjZSBhZnRlciBtZXRob2QAU3BhbiBjYWxsYmFjayBlcnJvciBpbiBvbl9oZWFkZXJfZmllbGQAUGF1c2VkAEludmFsaWQgd29yZCBlbmNvdW50ZXJlZABJbnZhbGlkIG1ldGhvZCBlbmNvdW50ZXJlZABVbmV4cGVjdGVkIGNoYXIgaW4gdXJsIHNjaGVtYQBSZXF1ZXN0IGhhcyBpbnZhbGlkIGBUcmFuc2Zlci1FbmNvZGluZ2AAU1dJVENIX1BST1hZAFVTRV9QUk9YWQBNS0FDVElWSVRZAFVOUFJPQ0VTU0FCTEVfRU5USVRZAENPUFkATU9WRURfUEVSTUFORU5UTFkAVE9PX0VBUkxZAE5PVElGWQBGQUlMRURfREVQRU5ERU5DWQBCQURfR0FURVdBWQBQTEFZAFBVVABDSEVDS09VVABHQVRFV0FZX1RJTUVPVVQAUkVRVUVTVF9USU1FT1VUAE5FVFdPUktfQ09OTkVDVF9USU1FT1VUAENPTk5FQ1RJT05fVElNRU9VVABMT0dJTl9USU1FT1VUAE5FVFdPUktfUkVBRF9USU1FT1VUAFBPU1QATUlTRElSRUNURURfUkVRVUVTVABDTElFTlRfQ0xPU0VEX1JFUVVFU1QAQ0xJRU5UX0NMT1NFRF9MT0FEX0JBTEFOQ0VEX1JFUVVFU1QAQkFEX1JFUVVFU1QASFRUUF9SRVFVRVNUX1NFTlRfVE9fSFRUUFNfUE9SVABSRVBPUlQASU1fQV9URUFQT1QAUkVTRVRfQ09OVEVOVABOT19DT05URU5UAFBBUlRJQUxfQ09OVEVOVABIUEVfSU5WQUxJRF9DT05TVEFOVABIUEVfQ0JfUkVTRVQAR0VUAEhQRV9TVFJJQ1QAQ09ORkxJQ1QAVEVNUE9SQVJZX1JFRElSRUNUAFBFUk1BTkVOVF9SRURJUkVDVABDT05ORUNUAE1VTFRJX1NUQVRVUwBIUEVfSU5WQUxJRF9TVEFUVVMAVE9PX01BTllfUkVRVUVTVFMARUFSTFlfSElOVFMAVU5BVkFJTEFCTEVfRk9SX0xFR0FMX1JFQVNPTlMAT1BUSU9OUwBTV0lUQ0hJTkdfUFJPVE9DT0xTAFZBUklBTlRfQUxTT19ORUdPVElBVEVTAE1VTFRJUExFX0NIT0lDRVMASU5URVJOQUxfU0VSVkVSX0VSUk9SAFdFQl9TRVJWRVJfVU5LTk9XTl9FUlJPUgBSQUlMR1VOX0VSUk9SAElERU5USVRZX1BST1ZJREVSX0FVVEhFTlRJQ0FUSU9OX0VSUk9SAFNTTF9DRVJUSUZJQ0FURV9FUlJPUgBJTlZBTElEX1hfRk9SV0FSREVEX0ZPUgBTRVRfUEFSQU1FVEVSAEdFVF9QQVJBTUVURVIASFBFX1VTRVIAU0VFX09USEVSAEhQRV9DQl9DSFVOS19IRUFERVIATUtDQUxFTkRBUgBTRVRVUABXRUJfU0VSVkVSX0lTX0RPV04AVEVBUkRPV04ASFBFX0NMT1NFRF9DT05ORUNUSU9OAEhFVVJJU1RJQ19FWFBJUkFUSU9OAERJU0NPTk5FQ1RFRF9PUEVSQVRJT04ATk9OX0FVVEhPUklUQVRJVkVfSU5GT1JNQVRJT04ASFBFX0lOVkFMSURfVkVSU0lPTgBIUEVfQ0JfTUVTU0FHRV9CRUdJTgBTSVRFX0lTX0ZST1pFTgBIUEVfSU5WQUxJRF9IRUFERVJfVE9LRU4ASU5WQUxJRF9UT0tFTgBGT1JCSURERU4ARU5IQU5DRV9ZT1VSX0NBTE0ASFBFX0lOVkFMSURfVVJMAEJMT0NLRURfQllfUEFSRU5UQUxfQ09OVFJPTABNS0NPTABBQ0wASFBFX0lOVEVSTkFMAFJFUVVFU1RfSEVBREVSX0ZJRUxEU19UT09fTEFSR0VfVU5PRkZJQ0lBTABIUEVfT0sAVU5MSU5LAFVOTE9DSwBQUkkAUkVUUllfV0lUSABIUEVfSU5WQUxJRF9DT05URU5UX0xFTkdUSABIUEVfVU5FWFBFQ1RFRF9DT05URU5UX0xFTkdUSABGTFVTSABQUk9QUEFUQ0gATS1TRUFSQ0gAVVJJX1RPT19MT05HAFBST0NFU1NJTkcATUlTQ0VMTEFORU9VU19QRVJTSVNURU5UX1dBUk5JTkcATUlTQ0VMTEFORU9VU19XQVJOSU5HAEhQRV9JTlZBTElEX1RSQU5TRkVSX0VOQ09ESU5HAEV4cGVjdGVkIENSTEYASFBFX0lOVkFMSURfQ0hVTktfU0laRQBNT1ZFAENPTlRJTlVFAEhQRV9DQl9TVEFUVVNfQ09NUExFVEUASFBFX0NCX0hFQURFUlNfQ09NUExFVEUASFBFX0NCX1ZFUlNJT05fQ09NUExFVEUASFBFX0NCX1VSTF9DT01QTEVURQBIUEVfQ0JfQ0hVTktfQ09NUExFVEUASFBFX0NCX0hFQURFUl9WQUxVRV9DT01QTEVURQBIUEVfQ0JfQ0hVTktfRVhURU5TSU9OX1ZBTFVFX0NPTVBMRVRFAEhQRV9DQl9DSFVOS19FWFRFTlNJT05fTkFNRV9DT01QTEVURQBIUEVfQ0JfTUVTU0FHRV9DT01QTEVURQBIUEVfQ0JfTUVUSE9EX0NPTVBMRVRFAEhQRV9DQl9IRUFERVJfRklFTERfQ09NUExFVEUAREVMRVRFAEhQRV9JTlZBTElEX0VPRl9TVEFURQBJTlZBTElEX1NTTF9DRVJUSUZJQ0FURQBQQVVTRQBOT19SRVNQT05TRQBVTlNVUFBPUlRFRF9NRURJQV9UWVBFAEdPTkUATk9UX0FDQ0VQVEFCTEUAU0VSVklDRV9VTkFWQUlMQUJMRQBSQU5HRV9OT1RfU0FUSVNGSUFCTEUAT1JJR0lOX0lTX1VOUkVBQ0hBQkxFAFJFU1BPTlNFX0lTX1NUQUxFAFBVUkdFAE1FUkdFAFJFUVVFU1RfSEVBREVSX0ZJRUxEU19UT09fTEFSR0UAUkVRVUVTVF9IRUFERVJfVE9PX0xBUkdFAFBBWUxPQURfVE9PX0xBUkdFAElOU1VGRklDSUVOVF9TVE9SQUdFAEhQRV9QQVVTRURfVVBHUkFERQBIUEVfUEFVU0VEX0gyX1VQR1JBREUAU09VUkNFAEFOTk9VTkNFAFRSQUNFAEhQRV9VTkVYUEVDVEVEX1NQQUNFAERFU0NSSUJFAFVOU1VCU0NSSUJFAFJFQ09SRABIUEVfSU5WQUxJRF9NRVRIT0QATk9UX0ZPVU5EAFBST1BGSU5EAFVOQklORABSRUJJTkQAVU5BVVRIT1JJWkVEAE1FVEhPRF9OT1RfQUxMT1dFRABIVFRQX1ZFUlNJT05fTk9UX1NVUFBPUlRFRABBTFJFQURZX1JFUE9SVEVEAEFDQ0VQVEVEAE5PVF9JTVBMRU1FTlRFRABMT09QX0RFVEVDVEVEAEhQRV9DUl9FWFBFQ1RFRABIUEVfTEZfRVhQRUNURUQAQ1JFQVRFRABJTV9VU0VEAEhQRV9QQVVTRUQAVElNRU9VVF9PQ0NVUkVEAFBBWU1FTlRfUkVRVUlSRUQAUFJFQ09ORElUSU9OX1JFUVVJUkVEAFBST1hZX0FVVEhFTlRJQ0FUSU9OX1JFUVVJUkVEAE5FVFdPUktfQVVUSEVOVElDQVRJT05fUkVRVUlSRUQATEVOR1RIX1JFUVVJUkVEAFNTTF9DRVJUSUZJQ0FURV9SRVFVSVJFRABVUEdSQURFX1JFUVVJUkVEAFBBR0VfRVhQSVJFRABQUkVDT05ESVRJT05fRkFJTEVEAEVYUEVDVEFUSU9OX0ZBSUxFRABSRVZBTElEQVRJT05fRkFJTEVEAFNTTF9IQU5EU0hBS0VfRkFJTEVEAExPQ0tFRABUUkFOU0ZPUk1BVElPTl9BUFBMSUVEAE5PVF9NT0RJRklFRABOT1RfRVhURU5ERUQAQkFORFdJRFRIX0xJTUlUX0VYQ0VFREVEAFNJVEVfSVNfT1ZFUkxPQURFRABIRUFEAEV4cGVjdGVkIEhUVFAvAABeEwAAJhMAADAQAADwFwAAnRMAABUSAAA5FwAA8BIAAAoQAAB1EgAArRIAAIITAABPFAAAfxAAAKAVAAAjFAAAiRIAAIsUAABNFQAA1BEAAM8UAAAQGAAAyRYAANwWAADBEQAA4BcAALsUAAB0FAAAfBUAAOUUAAAIFwAAHxAAAGUVAACjFAAAKBUAAAIVAACZFQAALBAAAIsZAABPDwAA1A4AAGoQAADOEAAAAhcAAIkOAABuEwAAHBMAAGYUAABWFwAAwRMAAM0TAABsEwAAaBcAAGYXAABfFwAAIhMAAM4PAABpDgAA2A4AAGMWAADLEwAAqg4AACgXAAAmFwAAxRMAAF0WAADoEQAAZxMAAGUTAADyFgAAcxMAAB0XAAD5FgAA8xEAAM8OAADOFQAADBIAALMRAAClEQAAYRAAADIXAAC7EwAAAAAAAAAAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEBAgEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQABAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAgMCAgICAgAAAgIAAgIAAgICAgICAgICAgAEAAAAAAACAgICAgICAgICAgICAgICAgICAgICAgICAgAAAAICAgICAgICAgICAgICAgICAgICAgICAgICAgICAAIAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAIAAgICAgIAAAICAAICAAICAgICAgICAgIAAwAEAAAAAgICAgICAgICAgICAgICAgICAgICAgICAgIAAAACAgICAgICAgICAgICAgICAgICAgICAgICAgICAgACAAIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABsb3NlZWVwLWFsaXZlAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAABAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQABAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQEBAQEBAQEBAQEBAgEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEAAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQFjaHVua2VkAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABAQABAQEBAQAAAQEAAQEAAQEBAQEBAQEBAQAAAAAAAAABAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQAAAAEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAAEAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAGVjdGlvbmVudC1sZW5ndGhvbnJveHktY29ubmVjdGlvbgAAAAAAAAAAAAAAAAAAAHJhbnNmZXItZW5jb2RpbmdwZ3JhZGUNCg0KDQpTTQ0KDQpUVFAvQ0UvVFNQLwAAAAAAAAAAAAAAAAECAAEDAAAAAAAAAAAAAAAAAAAAAAAABAEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEAAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEAAAAAAAAAAAABAgABAwAAAAAAAAAAAAAAAAAAAAAAAAQBAQUBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAAEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAAAAAAAAAAAAAQAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAQEAAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAAEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQABAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQAAAAAAAAAAAAABAAACAAAAAAAAAAAAAAAAAAAAAAAAAwQAAAQEBAQEBAQEBAQEBQQEBAQEBAQEBAQEBAAEAAYHBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEAAQABAAEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgAAAAAAAAMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAAAAAAAAAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAAAAAAAAAgAAAAACAAAAAAAAAAAAAAAAAAAAAAADAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwAAAAAAAAMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAE5PVU5DRUVDS09VVE5FQ1RFVEVDUklCRUxVU0hFVEVBRFNFQVJDSFJHRUNUSVZJVFlMRU5EQVJWRU9USUZZUFRJT05TQ0hTRUFZU1RBVENIR0VPUkRJUkVDVE9SVFJDSFBBUkFNRVRFUlVSQ0VCU0NSSUJFQVJET1dOQUNFSU5ETktDS1VCU0NSSUJFSFRUUC9BRFRQLw==", "base64"); + } +}); + +// .yarn/cache/undici-npm-6.13.0-1545cd855e-b1b0456e7d.zip/node_modules/undici/lib/web/fetch/constants.js +var require_constants4 = __commonJS({ + ".yarn/cache/undici-npm-6.13.0-1545cd855e-b1b0456e7d.zip/node_modules/undici/lib/web/fetch/constants.js"(exports, module2) { + "use strict"; + var corsSafeListedMethods = ["GET", "HEAD", "POST"]; + var corsSafeListedMethodsSet = new Set(corsSafeListedMethods); + var nullBodyStatus = [101, 204, 205, 304]; + var redirectStatus = [301, 302, 303, 307, 308]; + var redirectStatusSet = new Set(redirectStatus); + var badPorts = [ + "1", + "7", + "9", + "11", + "13", + "15", + "17", + "19", + "20", + "21", + "22", + "23", + "25", + "37", + "42", + "43", + "53", + "69", + "77", + "79", + "87", + "95", + "101", + "102", + "103", + "104", + "109", + "110", + "111", + "113", + "115", + "117", + "119", + "123", + "135", + "137", + "139", + "143", + "161", + "179", + "389", + "427", + "465", + "512", + "513", + "514", + "515", + "526", + "530", + "531", + "532", + "540", + "548", + "554", + "556", + "563", + "587", + "601", + "636", + "989", + "990", + "993", + "995", + "1719", + "1720", + "1723", + "2049", + "3659", + "4045", + "4190", + "5060", + "5061", + "6000", + "6566", + "6665", + "6666", + "6667", + "6668", + "6669", + "6679", + "6697", + "10080" + ]; + var badPortsSet = new Set(badPorts); + var referrerPolicy = [ + "", + "no-referrer", + "no-referrer-when-downgrade", + "same-origin", + "origin", + "strict-origin", + "origin-when-cross-origin", + "strict-origin-when-cross-origin", + "unsafe-url" + ]; + var referrerPolicySet = new Set(referrerPolicy); + var requestRedirect = ["follow", "manual", "error"]; + var safeMethods = ["GET", "HEAD", "OPTIONS", "TRACE"]; + var safeMethodsSet = new Set(safeMethods); + var requestMode = ["navigate", "same-origin", "no-cors", "cors"]; + var requestCredentials = ["omit", "same-origin", "include"]; + var requestCache = [ + "default", + "no-store", + "reload", + "no-cache", + "force-cache", + "only-if-cached" + ]; + var requestBodyHeader = [ + "content-encoding", + "content-language", + "content-location", + "content-type", + // See https://github.com/nodejs/undici/issues/2021 + // 'Content-Length' is a forbidden header name, which is typically + // removed in the Headers implementation. However, undici doesn't + // filter out headers, so we add it here. + "content-length" + ]; + var requestDuplex = [ + "half" + ]; + var forbiddenMethods = ["CONNECT", "TRACE", "TRACK"]; + var forbiddenMethodsSet = new Set(forbiddenMethods); + var subresource = [ + "audio", + "audioworklet", + "font", + "image", + "manifest", + "paintworklet", + "script", + "style", + "track", + "video", + "xslt", + "" + ]; + var subresourceSet = new Set(subresource); + module2.exports = { + subresource, + forbiddenMethods, + requestBodyHeader, + referrerPolicy, + requestRedirect, + requestMode, + requestCredentials, + requestCache, + redirectStatus, + corsSafeListedMethods, + nullBodyStatus, + safeMethods, + badPorts, + requestDuplex, + subresourceSet, + badPortsSet, + redirectStatusSet, + corsSafeListedMethodsSet, + safeMethodsSet, + forbiddenMethodsSet, + referrerPolicySet + }; + } +}); + +// .yarn/cache/undici-npm-6.13.0-1545cd855e-b1b0456e7d.zip/node_modules/undici/lib/web/fetch/global.js +var require_global = __commonJS({ + ".yarn/cache/undici-npm-6.13.0-1545cd855e-b1b0456e7d.zip/node_modules/undici/lib/web/fetch/global.js"(exports, module2) { + "use strict"; + var globalOrigin = Symbol.for("undici.globalOrigin.1"); + function getGlobalOrigin() { + return globalThis[globalOrigin]; + } + function setGlobalOrigin(newOrigin) { + if (newOrigin === void 0) { + Object.defineProperty(globalThis, globalOrigin, { + value: void 0, + writable: true, + enumerable: false, + configurable: false + }); + return; + } + const parsedURL = new URL(newOrigin); + if (parsedURL.protocol !== "http:" && parsedURL.protocol !== "https:") { + throw new TypeError(`Only http & https urls are allowed, received ${parsedURL.protocol}`); + } + Object.defineProperty(globalThis, globalOrigin, { + value: parsedURL, + writable: true, + enumerable: false, + configurable: false + }); + } + module2.exports = { + getGlobalOrigin, + setGlobalOrigin + }; + } +}); + +// .yarn/cache/undici-npm-6.13.0-1545cd855e-b1b0456e7d.zip/node_modules/undici/lib/web/fetch/data-url.js +var require_data_url = __commonJS({ + ".yarn/cache/undici-npm-6.13.0-1545cd855e-b1b0456e7d.zip/node_modules/undici/lib/web/fetch/data-url.js"(exports, module2) { + "use strict"; + var assert3 = require("node:assert"); + var encoder = new TextEncoder(); + var HTTP_TOKEN_CODEPOINTS = /^[!#$%&'*+-.^_|~A-Za-z0-9]+$/; + var HTTP_WHITESPACE_REGEX = /[\u000A\u000D\u0009\u0020]/; + var ASCII_WHITESPACE_REPLACE_REGEX = /[\u0009\u000A\u000C\u000D\u0020]/g; + var HTTP_QUOTED_STRING_TOKENS = /[\u0009\u0020-\u007E\u0080-\u00FF]/; + function dataURLProcessor(dataURL) { + assert3(dataURL.protocol === "data:"); + let input = URLSerializer(dataURL, true); + input = input.slice(5); + const position = { position: 0 }; + let mimeType = collectASequenceOfCodePointsFast( + ",", + input, + position + ); + const mimeTypeLength = mimeType.length; + mimeType = removeASCIIWhitespace(mimeType, true, true); + if (position.position >= input.length) { + return "failure"; + } + position.position++; + const encodedBody = input.slice(mimeTypeLength + 1); + let body = stringPercentDecode(encodedBody); + if (/;(\u0020){0,}base64$/i.test(mimeType)) { + const stringBody = isomorphicDecode(body); + body = forgivingBase64(stringBody); + if (body === "failure") { + return "failure"; + } + mimeType = mimeType.slice(0, -6); + mimeType = mimeType.replace(/(\u0020)+$/, ""); + mimeType = mimeType.slice(0, -1); + } + if (mimeType.startsWith(";")) { + mimeType = "text/plain" + mimeType; + } + let mimeTypeRecord = parseMIMEType(mimeType); + if (mimeTypeRecord === "failure") { + mimeTypeRecord = parseMIMEType("text/plain;charset=US-ASCII"); + } + return { mimeType: mimeTypeRecord, body }; + } + function URLSerializer(url, excludeFragment = false) { + if (!excludeFragment) { + return url.href; + } + const href = url.href; + const hashLength = url.hash.length; + const serialized = hashLength === 0 ? href : href.substring(0, href.length - hashLength); + if (!hashLength && href.endsWith("#")) { + return serialized.slice(0, -1); + } + return serialized; + } + function collectASequenceOfCodePoints(condition, input, position) { + let result = ""; + while (position.position < input.length && condition(input[position.position])) { + result += input[position.position]; + position.position++; + } + return result; + } + function collectASequenceOfCodePointsFast(char, input, position) { + const idx = input.indexOf(char, position.position); + const start = position.position; + if (idx === -1) { + position.position = input.length; + return input.slice(start); + } + position.position = idx; + return input.slice(start, position.position); + } + function stringPercentDecode(input) { + const bytes = encoder.encode(input); + return percentDecode(bytes); + } + function isHexCharByte(byte) { + return byte >= 48 && byte <= 57 || byte >= 65 && byte <= 70 || byte >= 97 && byte <= 102; + } + function hexByteToNumber(byte) { + return ( + // 0-9 + byte >= 48 && byte <= 57 ? byte - 48 : (byte & 223) - 55 + ); + } + function percentDecode(input) { + const length = input.length; + const output = new Uint8Array(length); + let j = 0; + for (let i = 0; i < length; ++i) { + const byte = input[i]; + if (byte !== 37) { + output[j++] = byte; + } else if (byte === 37 && !(isHexCharByte(input[i + 1]) && isHexCharByte(input[i + 2]))) { + output[j++] = 37; + } else { + output[j++] = hexByteToNumber(input[i + 1]) << 4 | hexByteToNumber(input[i + 2]); + i += 2; + } + } + return length === j ? output : output.subarray(0, j); + } + function parseMIMEType(input) { + input = removeHTTPWhitespace(input, true, true); + const position = { position: 0 }; + const type = collectASequenceOfCodePointsFast( + "/", + input, + position + ); + if (type.length === 0 || !HTTP_TOKEN_CODEPOINTS.test(type)) { + return "failure"; + } + if (position.position > input.length) { + return "failure"; + } + position.position++; + let subtype = collectASequenceOfCodePointsFast( + ";", + input, + position + ); + subtype = removeHTTPWhitespace(subtype, false, true); + if (subtype.length === 0 || !HTTP_TOKEN_CODEPOINTS.test(subtype)) { + return "failure"; + } + const typeLowercase = type.toLowerCase(); + const subtypeLowercase = subtype.toLowerCase(); + const mimeType = { + type: typeLowercase, + subtype: subtypeLowercase, + /** @type {Map} */ + parameters: /* @__PURE__ */ new Map(), + // https://mimesniff.spec.whatwg.org/#mime-type-essence + essence: `${typeLowercase}/${subtypeLowercase}` + }; + while (position.position < input.length) { + position.position++; + collectASequenceOfCodePoints( + // https://fetch.spec.whatwg.org/#http-whitespace + (char) => HTTP_WHITESPACE_REGEX.test(char), + input, + position + ); + let parameterName = collectASequenceOfCodePoints( + (char) => char !== ";" && char !== "=", + input, + position + ); + parameterName = parameterName.toLowerCase(); + if (position.position < input.length) { + if (input[position.position] === ";") { + continue; + } + position.position++; + } + if (position.position > input.length) { + break; + } + let parameterValue = null; + if (input[position.position] === '"') { + parameterValue = collectAnHTTPQuotedString(input, position, true); + collectASequenceOfCodePointsFast( + ";", + input, + position + ); + } else { + parameterValue = collectASequenceOfCodePointsFast( + ";", + input, + position + ); + parameterValue = removeHTTPWhitespace(parameterValue, false, true); + if (parameterValue.length === 0) { + continue; + } + } + if (parameterName.length !== 0 && HTTP_TOKEN_CODEPOINTS.test(parameterName) && (parameterValue.length === 0 || HTTP_QUOTED_STRING_TOKENS.test(parameterValue)) && !mimeType.parameters.has(parameterName)) { + mimeType.parameters.set(parameterName, parameterValue); + } + } + return mimeType; + } + function forgivingBase64(data) { + data = data.replace(ASCII_WHITESPACE_REPLACE_REGEX, ""); + let dataLength = data.length; + if (dataLength % 4 === 0) { + if (data.charCodeAt(dataLength - 1) === 61) { + --dataLength; + if (data.charCodeAt(dataLength - 1) === 61) { + --dataLength; + } + } + } + if (dataLength % 4 === 1) { + return "failure"; + } + if (/[^+/0-9A-Za-z]/.test(data.length === dataLength ? data : data.substring(0, dataLength))) { + return "failure"; + } + const buffer = Buffer.from(data, "base64"); + return new Uint8Array(buffer.buffer, buffer.byteOffset, buffer.byteLength); + } + function collectAnHTTPQuotedString(input, position, extractValue) { + const positionStart = position.position; + let value = ""; + assert3(input[position.position] === '"'); + position.position++; + while (true) { + value += collectASequenceOfCodePoints( + (char) => char !== '"' && char !== "\\", + input, + position + ); + if (position.position >= input.length) { + break; + } + const quoteOrBackslash = input[position.position]; + position.position++; + if (quoteOrBackslash === "\\") { + if (position.position >= input.length) { + value += "\\"; + break; + } + value += input[position.position]; + position.position++; + } else { + assert3(quoteOrBackslash === '"'); + break; + } + } + if (extractValue) { + return value; + } + return input.slice(positionStart, position.position); + } + function serializeAMimeType(mimeType) { + assert3(mimeType !== "failure"); + const { parameters, essence } = mimeType; + let serialization = essence; + for (let [name, value] of parameters.entries()) { + serialization += ";"; + serialization += name; + serialization += "="; + if (!HTTP_TOKEN_CODEPOINTS.test(value)) { + value = value.replace(/(\\|")/g, "\\$1"); + value = '"' + value; + value += '"'; + } + serialization += value; + } + return serialization; + } + function isHTTPWhiteSpace(char) { + return char === 13 || char === 10 || char === 9 || char === 32; + } + function removeHTTPWhitespace(str, leading = true, trailing = true) { + return removeChars(str, leading, trailing, isHTTPWhiteSpace); + } + function isASCIIWhitespace(char) { + return char === 13 || char === 10 || char === 9 || char === 12 || char === 32; + } + function removeASCIIWhitespace(str, leading = true, trailing = true) { + return removeChars(str, leading, trailing, isASCIIWhitespace); + } + function removeChars(str, leading, trailing, predicate) { + let lead = 0; + let trail = str.length - 1; + if (leading) { + while (lead < str.length && predicate(str.charCodeAt(lead))) + lead++; + } + if (trailing) { + while (trail > 0 && predicate(str.charCodeAt(trail))) + trail--; + } + return lead === 0 && trail === str.length - 1 ? str : str.slice(lead, trail + 1); + } + function isomorphicDecode(input) { + const length = input.length; + if ((2 << 15) - 1 > length) { + return String.fromCharCode.apply(null, input); + } + let result = ""; + let i = 0; + let addition = (2 << 15) - 1; + while (i < length) { + if (i + addition > length) { + addition = length - i; + } + result += String.fromCharCode.apply(null, input.subarray(i, i += addition)); + } + return result; + } + function minimizeSupportedMimeType(mimeType) { + switch (mimeType.essence) { + case "application/ecmascript": + case "application/javascript": + case "application/x-ecmascript": + case "application/x-javascript": + case "text/ecmascript": + case "text/javascript": + case "text/javascript1.0": + case "text/javascript1.1": + case "text/javascript1.2": + case "text/javascript1.3": + case "text/javascript1.4": + case "text/javascript1.5": + case "text/jscript": + case "text/livescript": + case "text/x-ecmascript": + case "text/x-javascript": + return "text/javascript"; + case "application/json": + case "text/json": + return "application/json"; + case "image/svg+xml": + return "image/svg+xml"; + case "text/xml": + case "application/xml": + return "application/xml"; + } + if (mimeType.subtype.endsWith("+json")) { + return "application/json"; + } + if (mimeType.subtype.endsWith("+xml")) { + return "application/xml"; + } + return ""; + } + module2.exports = { + dataURLProcessor, + URLSerializer, + collectASequenceOfCodePoints, + collectASequenceOfCodePointsFast, + stringPercentDecode, + parseMIMEType, + collectAnHTTPQuotedString, + serializeAMimeType, + removeChars, + minimizeSupportedMimeType, + HTTP_TOKEN_CODEPOINTS, + isomorphicDecode + }; + } +}); + +// .yarn/cache/undici-npm-6.13.0-1545cd855e-b1b0456e7d.zip/node_modules/undici/lib/web/fetch/webidl.js +var require_webidl = __commonJS({ + ".yarn/cache/undici-npm-6.13.0-1545cd855e-b1b0456e7d.zip/node_modules/undici/lib/web/fetch/webidl.js"(exports, module2) { + "use strict"; + var { types, inspect } = require("node:util"); + var { toUSVString } = require_util(); + var webidl = {}; + webidl.converters = {}; + webidl.util = {}; + webidl.errors = {}; + webidl.errors.exception = function(message) { + return new TypeError(`${message.header}: ${message.message}`); + }; + webidl.errors.conversionFailed = function(context) { + const plural2 = context.types.length === 1 ? "" : " one of"; + const message = `${context.argument} could not be converted to${plural2}: ${context.types.join(", ")}.`; + return webidl.errors.exception({ + header: context.prefix, + message + }); + }; + webidl.errors.invalidArgument = function(context) { + return webidl.errors.exception({ + header: context.prefix, + message: `"${context.value}" is an invalid ${context.type}.` + }); + }; + webidl.brandCheck = function(V, I, opts = void 0) { + if (opts?.strict !== false) { + if (!(V instanceof I)) { + throw new TypeError("Illegal invocation"); + } + } else { + if (V?.[Symbol.toStringTag] !== I.prototype[Symbol.toStringTag]) { + throw new TypeError("Illegal invocation"); + } + } + }; + webidl.argumentLengthCheck = function({ length }, min, ctx) { + if (length < min) { + throw webidl.errors.exception({ + message: `${min} argument${min !== 1 ? "s" : ""} required, but${length ? " only" : ""} ${length} found.`, + ...ctx + }); + } + }; + webidl.illegalConstructor = function() { + throw webidl.errors.exception({ + header: "TypeError", + message: "Illegal constructor" + }); + }; + webidl.util.Type = function(V) { + switch (typeof V) { + case "undefined": + return "Undefined"; + case "boolean": + return "Boolean"; + case "string": + return "String"; + case "symbol": + return "Symbol"; + case "number": + return "Number"; + case "bigint": + return "BigInt"; + case "function": + case "object": { + if (V === null) { + return "Null"; + } + return "Object"; + } + } + }; webidl.util.ConvertToInt = function(V, bitLength, signedness, opts = {}) { let upperBound; let lowerBound; @@ -9310,7 +8637,7 @@ var require_webidl = __commonJS({ if (Number.isNaN(x) || x === Number.POSITIVE_INFINITY || x === Number.NEGATIVE_INFINITY) { throw webidl.errors.exception({ header: "Integer conversion", - message: `Could not convert ${V} to an integer.` + message: `Could not convert ${webidl.util.Stringify(V)} to an integer.` }); } x = webidl.util.IntegerPart(x); @@ -9348,15 +8675,28 @@ var require_webidl = __commonJS({ } return r; }; + webidl.util.Stringify = function(V) { + const type = webidl.util.Type(V); + switch (type) { + case "Symbol": + return `Symbol(${V.description})`; + case "Object": + return inspect(V); + case "String": + return `"${V}"`; + default: + return `${V}`; + } + }; webidl.sequenceConverter = function(converter) { - return (V) => { + return (V, Iterable) => { if (webidl.util.Type(V) !== "Object") { throw webidl.errors.exception({ header: "Sequence", message: `Value of type ${webidl.util.Type(V)} is not an Object.` }); } - const method = V?.[Symbol.iterator]?.(); + const method = typeof Iterable === "function" ? Iterable() : V?.[Symbol.iterator]?.(); const seq = []; if (method === void 0 || typeof method.next !== "function") { throw webidl.errors.exception({ @@ -9384,7 +8724,7 @@ var require_webidl = __commonJS({ } const result = {}; if (!types.isProxy(O)) { - const keys2 = Object.keys(O); + const keys2 = [...Object.getOwnPropertyNames(O), ...Object.getOwnPropertySymbols(O)]; for (const key of keys2) { const typedKey = keyConverter(key); const typedValue = valueConverter(O[key]); @@ -9409,7 +8749,7 @@ var require_webidl = __commonJS({ if (opts.strict !== false && !(V instanceof i)) { throw webidl.errors.exception({ header: i.name, - message: `Expected ${V} to be an instance of ${i.name}.` + message: `Expected ${webidl.util.Stringify(V)} to be an instance of ${i.name}.` }); } return V; @@ -9511,8 +8851,8 @@ var require_webidl = __commonJS({ webidl.converters.ArrayBuffer = function(V, opts = {}) { if (webidl.util.Type(V) !== "Object" || !types.isAnyArrayBuffer(V)) { throw webidl.errors.conversionFailed({ - prefix: `${V}`, - argument: `${V}`, + prefix: webidl.util.Stringify(V), + argument: webidl.util.Stringify(V), types: ["ArrayBuffer"] }); } @@ -9522,13 +8862,19 @@ var require_webidl = __commonJS({ message: "SharedArrayBuffer is not allowed." }); } + if (V.resizable || V.growable) { + throw webidl.errors.exception({ + header: "ArrayBuffer", + message: "Received a resizable ArrayBuffer." + }); + } return V; }; webidl.converters.TypedArray = function(V, T, opts = {}) { if (webidl.util.Type(V) !== "Object" || !types.isTypedArray(V) || V.constructor.name !== T.name) { throw webidl.errors.conversionFailed({ prefix: `${T.name}`, - argument: `${V}`, + argument: webidl.util.Stringify(V), types: [T.name] }); } @@ -9538,6 +8884,12 @@ var require_webidl = __commonJS({ message: "SharedArrayBuffer is not allowed." }); } + if (V.buffer.resizable || V.buffer.growable) { + throw webidl.errors.exception({ + header: "ArrayBuffer", + message: "Received a resizable ArrayBuffer." + }); + } return V; }; webidl.converters.DataView = function(V, opts = {}) { @@ -9553,6 +8905,12 @@ var require_webidl = __commonJS({ message: "SharedArrayBuffer is not allowed." }); } + if (V.buffer.resizable || V.buffer.growable) { + throw webidl.errors.exception({ + header: "ArrayBuffer", + message: "Received a resizable ArrayBuffer." + }); + } return V; }; webidl.converters.BufferSource = function(V, opts = {}) { @@ -9565,7 +8923,7 @@ var require_webidl = __commonJS({ if (types.isDataView(V)) { return webidl.converters.DataView(V, opts, { ...opts, allowShared: false }); } - throw new TypeError(`Could not convert ${V} to a BufferSource.`); + throw new TypeError(`Could not convert ${webidl.util.Stringify(V)} to a BufferSource.`); }; webidl.converters["sequence"] = webidl.sequenceConverter( webidl.converters.ByteString @@ -9583,708 +8941,804 @@ var require_webidl = __commonJS({ } }); -// .yarn/cache/undici-npm-6.6.2-a0bd6785a6-c8c8a43605.zip/node_modules/undici/lib/fetch/file.js -var require_file = __commonJS({ - ".yarn/cache/undici-npm-6.6.2-a0bd6785a6-c8c8a43605.zip/node_modules/undici/lib/fetch/file.js"(exports, module2) { - "use strict"; - var { Blob: Blob2, File: NativeFile } = require("node:buffer"); - var { types } = require("node:util"); - var { kState } = require_symbols2(); - var { isBlobLike } = require_util2(); - var { webidl } = require_webidl(); - var { parseMIMEType, serializeAMimeType } = require_dataURL(); - var { kEnumerableProperty } = require_util(); - var encoder = new TextEncoder(); - var File = class _File extends Blob2 { - constructor(fileBits, fileName, options = {}) { - webidl.argumentLengthCheck(arguments, 2, { header: "File constructor" }); - fileBits = webidl.converters["sequence"](fileBits); - fileName = webidl.converters.USVString(fileName); - options = webidl.converters.FilePropertyBag(options); - const n = fileName; - let t = options.type; - let d; - substep: { - if (t) { - t = parseMIMEType(t); - if (t === "failure") { - t = ""; - break substep; - } - t = serializeAMimeType(t).toLowerCase(); - } - d = options.lastModified; +// .yarn/cache/undici-npm-6.13.0-1545cd855e-b1b0456e7d.zip/node_modules/undici/lib/web/fetch/util.js +var require_util3 = __commonJS({ + ".yarn/cache/undici-npm-6.13.0-1545cd855e-b1b0456e7d.zip/node_modules/undici/lib/web/fetch/util.js"(exports, module2) { + "use strict"; + var { Transform } = require("node:stream"); + var zlib = require("node:zlib"); + var { redirectStatusSet, referrerPolicySet: referrerPolicyTokens, badPortsSet } = require_constants4(); + var { getGlobalOrigin } = require_global(); + var { collectASequenceOfCodePoints, collectAnHTTPQuotedString, removeChars, parseMIMEType } = require_data_url(); + var { performance } = require("node:perf_hooks"); + var { isBlobLike, ReadableStreamFrom, isValidHTTPToken } = require_util(); + var assert3 = require("node:assert"); + var { isUint8Array } = require("node:util/types"); + var { webidl } = require_webidl(); + var supportedHashes = []; + var crypto; + try { + crypto = require("node:crypto"); + const possibleRelevantHashes = ["sha256", "sha384", "sha512"]; + supportedHashes = crypto.getHashes().filter((hash) => possibleRelevantHashes.includes(hash)); + } catch { + } + function responseURL(response) { + const urlList = response.urlList; + const length = urlList.length; + return length === 0 ? null : urlList[length - 1].toString(); + } + function responseLocationURL(response, requestFragment) { + if (!redirectStatusSet.has(response.status)) { + return null; + } + let location = response.headersList.get("location", true); + if (location !== null && isValidHeaderValue(location)) { + if (!isValidEncodedURL(location)) { + location = normalizeBinaryStringToUtf8(location); } - super(processBlobParts(fileBits, options), { type: t }); - this[kState] = { - name: n, - lastModified: d, - type: t - }; + location = new URL(location, responseURL(response)); } - get name() { - webidl.brandCheck(this, _File); - return this[kState].name; + if (location && !location.hash) { + location.hash = requestFragment; } - get lastModified() { - webidl.brandCheck(this, _File); - return this[kState].lastModified; + return location; + } + function isValidEncodedURL(url) { + for (let i = 0; i < url.length; ++i) { + const code = url.charCodeAt(i); + if (code > 126 || // Non-US-ASCII + DEL + code < 32) { + return false; + } } - get type() { - webidl.brandCheck(this, _File); - return this[kState].type; + return true; + } + function normalizeBinaryStringToUtf8(value) { + return Buffer.from(value, "binary").toString("utf8"); + } + function requestCurrentURL(request) { + return request.urlList[request.urlList.length - 1]; + } + function requestBadPort(request) { + const url = requestCurrentURL(request); + if (urlIsHttpHttpsScheme(url) && badPortsSet.has(url.port)) { + return "blocked"; } - }; - var FileLike = class _FileLike { - constructor(blobLike, fileName, options = {}) { - const n = fileName; - const t = options.type; - const d = options.lastModified ?? Date.now(); - this[kState] = { - blobLike, - name: n, - type: t, - lastModified: d - }; + return "allowed"; + } + function isErrorLike(object) { + return object instanceof Error || (object?.constructor?.name === "Error" || object?.constructor?.name === "DOMException"); + } + function isValidReasonPhrase(statusText) { + for (let i = 0; i < statusText.length; ++i) { + const c = statusText.charCodeAt(i); + if (!(c === 9 || // HTAB + c >= 32 && c <= 126 || // SP / VCHAR + c >= 128 && c <= 255)) { + return false; + } } - stream(...args) { - webidl.brandCheck(this, _FileLike); - return this[kState].blobLike.stream(...args); + return true; + } + var isValidHeaderName = isValidHTTPToken; + function isValidHeaderValue(potentialValue) { + return (potentialValue[0] === " " || potentialValue[0] === " " || potentialValue[potentialValue.length - 1] === " " || potentialValue[potentialValue.length - 1] === " " || potentialValue.includes("\n") || potentialValue.includes("\r") || potentialValue.includes("\0")) === false; + } + function setRequestReferrerPolicyOnRedirect(request, actualResponse) { + const { headersList } = actualResponse; + const policyHeader = (headersList.get("referrer-policy", true) ?? "").split(","); + let policy = ""; + if (policyHeader.length > 0) { + for (let i = policyHeader.length; i !== 0; i--) { + const token = policyHeader[i - 1].trim(); + if (referrerPolicyTokens.has(token)) { + policy = token; + break; + } + } } - arrayBuffer(...args) { - webidl.brandCheck(this, _FileLike); - return this[kState].blobLike.arrayBuffer(...args); + if (policy !== "") { + request.referrerPolicy = policy; } - slice(...args) { - webidl.brandCheck(this, _FileLike); - return this[kState].blobLike.slice(...args); + } + function crossOriginResourcePolicyCheck() { + return "allowed"; + } + function corsCheck() { + return "success"; + } + function TAOCheck() { + return "success"; + } + function appendFetchMetadata(httpRequest) { + let header = null; + header = httpRequest.mode; + httpRequest.headersList.set("sec-fetch-mode", header, true); + } + function appendRequestOriginHeader(request) { + let serializedOrigin = request.origin; + if (request.responseTainting === "cors" || request.mode === "websocket") { + if (serializedOrigin) { + request.headersList.append("origin", serializedOrigin, true); + } + } else if (request.method !== "GET" && request.method !== "HEAD") { + switch (request.referrerPolicy) { + case "no-referrer": + serializedOrigin = null; + break; + case "no-referrer-when-downgrade": + case "strict-origin": + case "strict-origin-when-cross-origin": + if (request.origin && urlHasHttpsScheme(request.origin) && !urlHasHttpsScheme(requestCurrentURL(request))) { + serializedOrigin = null; + } + break; + case "same-origin": + if (!sameOrigin(request, requestCurrentURL(request))) { + serializedOrigin = null; + } + break; + default: + } + if (serializedOrigin) { + request.headersList.append("origin", serializedOrigin, true); + } } - text(...args) { - webidl.brandCheck(this, _FileLike); - return this[kState].blobLike.text(...args); + } + function coarsenTime(timestamp, crossOriginIsolatedCapability) { + return timestamp; + } + function clampAndCoarsenConnectionTimingInfo(connectionTimingInfo, defaultStartTime, crossOriginIsolatedCapability) { + if (!connectionTimingInfo?.startTime || connectionTimingInfo.startTime < defaultStartTime) { + return { + domainLookupStartTime: defaultStartTime, + domainLookupEndTime: defaultStartTime, + connectionStartTime: defaultStartTime, + connectionEndTime: defaultStartTime, + secureConnectionStartTime: defaultStartTime, + ALPNNegotiatedProtocol: connectionTimingInfo?.ALPNNegotiatedProtocol + }; } - get size() { - webidl.brandCheck(this, _FileLike); - return this[kState].blobLike.size; + return { + domainLookupStartTime: coarsenTime(connectionTimingInfo.domainLookupStartTime, crossOriginIsolatedCapability), + domainLookupEndTime: coarsenTime(connectionTimingInfo.domainLookupEndTime, crossOriginIsolatedCapability), + connectionStartTime: coarsenTime(connectionTimingInfo.connectionStartTime, crossOriginIsolatedCapability), + connectionEndTime: coarsenTime(connectionTimingInfo.connectionEndTime, crossOriginIsolatedCapability), + secureConnectionStartTime: coarsenTime(connectionTimingInfo.secureConnectionStartTime, crossOriginIsolatedCapability), + ALPNNegotiatedProtocol: connectionTimingInfo.ALPNNegotiatedProtocol + }; + } + function coarsenedSharedCurrentTime(crossOriginIsolatedCapability) { + return coarsenTime(performance.now(), crossOriginIsolatedCapability); + } + function createOpaqueTimingInfo(timingInfo) { + return { + startTime: timingInfo.startTime ?? 0, + redirectStartTime: 0, + redirectEndTime: 0, + postRedirectStartTime: timingInfo.startTime ?? 0, + finalServiceWorkerStartTime: 0, + finalNetworkResponseStartTime: 0, + finalNetworkRequestStartTime: 0, + endTime: 0, + encodedBodySize: 0, + decodedBodySize: 0, + finalConnectionTimingInfo: null + }; + } + function makePolicyContainer() { + return { + referrerPolicy: "strict-origin-when-cross-origin" + }; + } + function clonePolicyContainer(policyContainer) { + return { + referrerPolicy: policyContainer.referrerPolicy + }; + } + function determineRequestsReferrer(request) { + const policy = request.referrerPolicy; + assert3(policy); + let referrerSource = null; + if (request.referrer === "client") { + const globalOrigin = getGlobalOrigin(); + if (!globalOrigin || globalOrigin.origin === "null") { + return "no-referrer"; + } + referrerSource = new URL(globalOrigin); + } else if (request.referrer instanceof URL) { + referrerSource = request.referrer; } - get type() { - webidl.brandCheck(this, _FileLike); - return this[kState].blobLike.type; + let referrerURL = stripURLForReferrer(referrerSource); + const referrerOrigin = stripURLForReferrer(referrerSource, true); + if (referrerURL.toString().length > 4096) { + referrerURL = referrerOrigin; + } + const areSameOrigin = sameOrigin(request, referrerURL); + const isNonPotentiallyTrustWorthy = isURLPotentiallyTrustworthy(referrerURL) && !isURLPotentiallyTrustworthy(request.url); + switch (policy) { + case "origin": + return referrerOrigin != null ? referrerOrigin : stripURLForReferrer(referrerSource, true); + case "unsafe-url": + return referrerURL; + case "same-origin": + return areSameOrigin ? referrerOrigin : "no-referrer"; + case "origin-when-cross-origin": + return areSameOrigin ? referrerURL : referrerOrigin; + case "strict-origin-when-cross-origin": { + const currentURL = requestCurrentURL(request); + if (sameOrigin(referrerURL, currentURL)) { + return referrerURL; + } + if (isURLPotentiallyTrustworthy(referrerURL) && !isURLPotentiallyTrustworthy(currentURL)) { + return "no-referrer"; + } + return referrerOrigin; + } + case "strict-origin": + case "no-referrer-when-downgrade": + default: + return isNonPotentiallyTrustWorthy ? "no-referrer" : referrerOrigin; + } + } + function stripURLForReferrer(url, originOnly) { + assert3(url instanceof URL); + url = new URL(url); + if (url.protocol === "file:" || url.protocol === "about:" || url.protocol === "blank:") { + return "no-referrer"; } - get name() { - webidl.brandCheck(this, _FileLike); - return this[kState].name; + url.username = ""; + url.password = ""; + url.hash = ""; + if (originOnly) { + url.pathname = ""; + url.search = ""; } - get lastModified() { - webidl.brandCheck(this, _FileLike); - return this[kState].lastModified; + return url; + } + function isURLPotentiallyTrustworthy(url) { + if (!(url instanceof URL)) { + return false; } - get [Symbol.toStringTag]() { - return "File"; + if (url.href === "about:blank" || url.href === "about:srcdoc") { + return true; } - }; - Object.defineProperties(File.prototype, { - [Symbol.toStringTag]: { - value: "File", - configurable: true - }, - name: kEnumerableProperty, - lastModified: kEnumerableProperty - }); - webidl.converters.Blob = webidl.interfaceConverter(Blob2); - webidl.converters.BlobPart = function(V, opts) { - if (webidl.util.Type(V) === "Object") { - if (isBlobLike(V)) { - return webidl.converters.Blob(V, { strict: false }); + if (url.protocol === "data:") + return true; + if (url.protocol === "file:") + return true; + return isOriginPotentiallyTrustworthy(url.origin); + function isOriginPotentiallyTrustworthy(origin) { + if (origin == null || origin === "null") + return false; + const originAsURL = new URL(origin); + if (originAsURL.protocol === "https:" || originAsURL.protocol === "wss:") { + return true; } - if (ArrayBuffer.isView(V) || types.isAnyArrayBuffer(V)) { - return webidl.converters.BufferSource(V, opts); + if (/^127(?:\.[0-9]+){0,2}\.[0-9]+$|^\[(?:0*:)*?:?0*1\]$/.test(originAsURL.hostname) || (originAsURL.hostname === "localhost" || originAsURL.hostname.includes("localhost.")) || originAsURL.hostname.endsWith(".localhost")) { + return true; } + return false; } - return webidl.converters.USVString(V, opts); - }; - webidl.converters["sequence"] = webidl.sequenceConverter( - webidl.converters.BlobPart - ); - webidl.converters.FilePropertyBag = webidl.dictionaryConverter([ - { - key: "lastModified", - converter: webidl.converters["long long"], - get defaultValue() { - return Date.now(); - } - }, - { - key: "type", - converter: webidl.converters.DOMString, - defaultValue: "" - }, - { - key: "endings", - converter: (value) => { - value = webidl.converters.DOMString(value); - value = value.toLowerCase(); - if (value !== "native") { - value = "transparent"; - } - return value; - }, - defaultValue: "transparent" + } + function bytesMatch(bytes, metadataList) { + if (crypto === void 0) { + return true; } - ]); - function processBlobParts(parts, options) { - const bytes = []; - for (const element of parts) { - if (typeof element === "string") { - let s = element; - if (options.endings === "native") { - s = convertLineEndingsNative(s); - } - bytes.push(encoder.encode(s)); - } else if (ArrayBuffer.isView(element) || types.isArrayBuffer(element)) { - if (element.buffer) { - bytes.push( - new Uint8Array(element.buffer, element.byteOffset, element.byteLength) - ); + const parsedMetadata = parseMetadata(metadataList); + if (parsedMetadata === "no metadata") { + return true; + } + if (parsedMetadata.length === 0) { + return true; + } + const strongest = getStrongestMetadata(parsedMetadata); + const metadata = filterMetadataListByAlgorithm(parsedMetadata, strongest); + for (const item of metadata) { + const algorithm = item.algo; + const expectedValue = item.hash; + let actualValue = crypto.createHash(algorithm).update(bytes).digest("base64"); + if (actualValue[actualValue.length - 1] === "=") { + if (actualValue[actualValue.length - 2] === "=") { + actualValue = actualValue.slice(0, -2); } else { - bytes.push(new Uint8Array(element)); + actualValue = actualValue.slice(0, -1); } - } else if (isBlobLike(element)) { - bytes.push(element); + } + if (compareBase64Mixed(actualValue, expectedValue)) { + return true; } } - return bytes; - } - function convertLineEndingsNative(s) { - let nativeLineEnding = "\n"; - if (process.platform === "win32") { - nativeLineEnding = "\r\n"; - } - return s.replace(/\r?\n/g, nativeLineEnding); - } - function isFileLike(object) { - return NativeFile && object instanceof NativeFile || object instanceof File || object && (typeof object.stream === "function" || typeof object.arrayBuffer === "function") && object[Symbol.toStringTag] === "File"; + return false; } - module2.exports = { File, FileLike, isFileLike }; - } -}); - -// .yarn/cache/undici-npm-6.6.2-a0bd6785a6-c8c8a43605.zip/node_modules/undici/lib/fetch/formdata.js -var require_formdata = __commonJS({ - ".yarn/cache/undici-npm-6.6.2-a0bd6785a6-c8c8a43605.zip/node_modules/undici/lib/fetch/formdata.js"(exports, module2) { - "use strict"; - var { isBlobLike, toUSVString, makeIterator } = require_util2(); - var { kState } = require_symbols2(); - var { kEnumerableProperty } = require_util(); - var { File: UndiciFile, FileLike, isFileLike } = require_file(); - var { webidl } = require_webidl(); - var { File: NativeFile } = require("node:buffer"); - var File = NativeFile ?? UndiciFile; - var FormData = class _FormData { - constructor(form) { - if (form !== void 0) { - throw webidl.errors.conversionFailed({ - prefix: "FormData constructor", - argument: "Argument 1", - types: ["undefined"] - }); + var parseHashWithOptions = /(?sha256|sha384|sha512)-((?[A-Za-z0-9+/]+|[A-Za-z0-9_-]+)={0,2}(?:\s|$)( +[!-~]*)?)?/i; + function parseMetadata(metadata) { + const result = []; + let empty = true; + for (const token of metadata.split(" ")) { + empty = false; + const parsedToken = parseHashWithOptions.exec(token); + if (parsedToken === null || parsedToken.groups === void 0 || parsedToken.groups.algo === void 0) { + continue; } - this[kState] = []; - } - append(name, value, filename = void 0) { - webidl.brandCheck(this, _FormData); - webidl.argumentLengthCheck(arguments, 2, { header: "FormData.append" }); - if (arguments.length === 3 && !isBlobLike(value)) { - throw new TypeError( - "Failed to execute 'append' on 'FormData': parameter 2 is not of type 'Blob'" - ); + const algorithm = parsedToken.groups.algo.toLowerCase(); + if (supportedHashes.includes(algorithm)) { + result.push(parsedToken.groups); } - name = webidl.converters.USVString(name); - value = isBlobLike(value) ? webidl.converters.Blob(value, { strict: false }) : webidl.converters.USVString(value); - filename = arguments.length === 3 ? webidl.converters.USVString(filename) : void 0; - const entry = makeEntry(name, value, filename); - this[kState].push(entry); } - delete(name) { - webidl.brandCheck(this, _FormData); - webidl.argumentLengthCheck(arguments, 1, { header: "FormData.delete" }); - name = webidl.converters.USVString(name); - this[kState] = this[kState].filter((entry) => entry.name !== name); + if (empty === true) { + return "no metadata"; } - get(name) { - webidl.brandCheck(this, _FormData); - webidl.argumentLengthCheck(arguments, 1, { header: "FormData.get" }); - name = webidl.converters.USVString(name); - const idx = this[kState].findIndex((entry) => entry.name === name); - if (idx === -1) { - return null; - } - return this[kState][idx].value; + return result; + } + function getStrongestMetadata(metadataList) { + let algorithm = metadataList[0].algo; + if (algorithm[3] === "5") { + return algorithm; } - getAll(name) { - webidl.brandCheck(this, _FormData); - webidl.argumentLengthCheck(arguments, 1, { header: "FormData.getAll" }); - name = webidl.converters.USVString(name); - return this[kState].filter((entry) => entry.name === name).map((entry) => entry.value); + for (let i = 1; i < metadataList.length; ++i) { + const metadata = metadataList[i]; + if (metadata.algo[3] === "5") { + algorithm = "sha512"; + break; + } else if (algorithm[3] === "3") { + continue; + } else if (metadata.algo[3] === "3") { + algorithm = "sha384"; + } } - has(name) { - webidl.brandCheck(this, _FormData); - webidl.argumentLengthCheck(arguments, 1, { header: "FormData.has" }); - name = webidl.converters.USVString(name); - return this[kState].findIndex((entry) => entry.name === name) !== -1; + return algorithm; + } + function filterMetadataListByAlgorithm(metadataList, algorithm) { + if (metadataList.length === 1) { + return metadataList; } - set(name, value, filename = void 0) { - webidl.brandCheck(this, _FormData); - webidl.argumentLengthCheck(arguments, 2, { header: "FormData.set" }); - if (arguments.length === 3 && !isBlobLike(value)) { - throw new TypeError( - "Failed to execute 'set' on 'FormData': parameter 2 is not of type 'Blob'" - ); - } - name = webidl.converters.USVString(name); - value = isBlobLike(value) ? webidl.converters.Blob(value, { strict: false }) : webidl.converters.USVString(value); - filename = arguments.length === 3 ? toUSVString(filename) : void 0; - const entry = makeEntry(name, value, filename); - const idx = this[kState].findIndex((entry2) => entry2.name === name); - if (idx !== -1) { - this[kState] = [ - ...this[kState].slice(0, idx), - entry, - ...this[kState].slice(idx + 1).filter((entry2) => entry2.name !== name) - ]; - } else { - this[kState].push(entry); + let pos = 0; + for (let i = 0; i < metadataList.length; ++i) { + if (metadataList[i].algo === algorithm) { + metadataList[pos++] = metadataList[i]; } } - entries() { - webidl.brandCheck(this, _FormData); - return makeIterator( - () => this[kState], - "FormData", - "key+value", - "name", - "value" - ); + metadataList.length = pos; + return metadataList; + } + function compareBase64Mixed(actualValue, expectedValue) { + if (actualValue.length !== expectedValue.length) { + return false; } - keys() { - webidl.brandCheck(this, _FormData); - return makeIterator( - () => this[kState], - "FormData", - "key", - "name", - "value" - ); + for (let i = 0; i < actualValue.length; ++i) { + if (actualValue[i] !== expectedValue[i]) { + if (actualValue[i] === "+" && expectedValue[i] === "-" || actualValue[i] === "/" && expectedValue[i] === "_") { + continue; + } + return false; + } } - values() { - webidl.brandCheck(this, _FormData); - return makeIterator( - () => this[kState], - "FormData", - "value", - "name", - "value" - ); + return true; + } + function tryUpgradeRequestToAPotentiallyTrustworthyURL(request) { + } + function sameOrigin(A, B) { + if (A.origin === B.origin && A.origin === "null") { + return true; } - /** - * @param {(value: string, key: string, self: FormData) => void} callbackFn - * @param {unknown} thisArg - */ - forEach(callbackFn, thisArg = globalThis) { - webidl.brandCheck(this, _FormData); - webidl.argumentLengthCheck(arguments, 1, { header: "FormData.forEach" }); - if (typeof callbackFn !== "function") { - throw new TypeError( - "Failed to execute 'forEach' on 'FormData': parameter 1 is not of type 'Function'." - ); - } - for (const [key, value] of this) { - callbackFn.call(thisArg, value, key, this); - } + if (A.protocol === B.protocol && A.hostname === B.hostname && A.port === B.port) { + return true; } + return false; + } + function createDeferredPromise() { + let res; + let rej; + const promise = new Promise((resolve, reject) => { + res = resolve; + rej = reject; + }); + return { promise, resolve: res, reject: rej }; + } + function isAborted(fetchParams) { + return fetchParams.controller.state === "aborted"; + } + function isCancelled(fetchParams) { + return fetchParams.controller.state === "aborted" || fetchParams.controller.state === "terminated"; + } + var normalizeMethodRecordBase = { + delete: "DELETE", + DELETE: "DELETE", + get: "GET", + GET: "GET", + head: "HEAD", + HEAD: "HEAD", + options: "OPTIONS", + OPTIONS: "OPTIONS", + post: "POST", + POST: "POST", + put: "PUT", + PUT: "PUT" }; - FormData.prototype[Symbol.iterator] = FormData.prototype.entries; - Object.defineProperties(FormData.prototype, { - append: kEnumerableProperty, - delete: kEnumerableProperty, - get: kEnumerableProperty, - getAll: kEnumerableProperty, - has: kEnumerableProperty, - set: kEnumerableProperty, - entries: kEnumerableProperty, - keys: kEnumerableProperty, - values: kEnumerableProperty, - forEach: kEnumerableProperty, - [Symbol.iterator]: { enumerable: false }, - [Symbol.toStringTag]: { - value: "FormData", - configurable: true + var normalizeMethodRecord = { + ...normalizeMethodRecordBase, + patch: "patch", + PATCH: "PATCH" + }; + Object.setPrototypeOf(normalizeMethodRecordBase, null); + Object.setPrototypeOf(normalizeMethodRecord, null); + function normalizeMethod(method) { + return normalizeMethodRecordBase[method.toLowerCase()] ?? method; + } + function serializeJavascriptValueToJSONString(value) { + const result = JSON.stringify(value); + if (result === void 0) { + throw new TypeError("Value is not JSON serializable"); } - }); - function makeEntry(name, value, filename) { - name = Buffer.from(name).toString("utf8"); - if (typeof value === "string") { - value = Buffer.from(value).toString("utf8"); - } else { - if (!isFileLike(value)) { - value = value instanceof Blob ? new File([value], "blob", { type: value.type }) : new FileLike(value, "blob", { type: value.type }); - } - if (filename !== void 0) { - const options = { - type: value.type, - lastModified: value.lastModified + assert3(typeof result === "string"); + return result; + } + var esIteratorPrototype = Object.getPrototypeOf(Object.getPrototypeOf([][Symbol.iterator]())); + function createIterator(name, kInternalIterator, keyIndex = 0, valueIndex = 1) { + class FastIterableIterator { + /** @type {any} */ + #target; + /** @type {'key' | 'value' | 'key+value'} */ + #kind; + /** @type {number} */ + #index; + /** + * @see https://webidl.spec.whatwg.org/#dfn-default-iterator-object + * @param {unknown} target + * @param {'key' | 'value' | 'key+value'} kind + */ + constructor(target, kind) { + this.#target = target; + this.#kind = kind; + this.#index = 0; + } + next() { + if (typeof this !== "object" || this === null || !(#target in this)) { + throw new TypeError( + `'next' called on an object that does not implement interface ${name} Iterator.` + ); + } + const index = this.#index; + const values = this.#target[kInternalIterator]; + const len = values.length; + if (index >= len) { + return { + value: void 0, + done: true + }; + } + const { [keyIndex]: key, [valueIndex]: value } = values[index]; + this.#index = index + 1; + let result; + switch (this.#kind) { + case "key": + result = key; + break; + case "value": + result = value; + break; + case "key+value": + result = [key, value]; + break; + } + return { + value: result, + done: false }; - value = NativeFile && value instanceof NativeFile || value instanceof UndiciFile ? new File([value], filename, options) : new FileLike(value, filename, options); } } - return { name, value }; + delete FastIterableIterator.prototype.constructor; + Object.setPrototypeOf(FastIterableIterator.prototype, esIteratorPrototype); + Object.defineProperties(FastIterableIterator.prototype, { + [Symbol.toStringTag]: { + writable: false, + enumerable: false, + configurable: true, + value: `${name} Iterator` + }, + next: { writable: true, enumerable: true, configurable: true } + }); + return function(target, kind) { + return new FastIterableIterator(target, kind); + }; } - module2.exports = { FormData }; - } -}); - -// .yarn/cache/undici-npm-6.6.2-a0bd6785a6-c8c8a43605.zip/node_modules/undici/lib/fetch/body.js -var require_body = __commonJS({ - ".yarn/cache/undici-npm-6.6.2-a0bd6785a6-c8c8a43605.zip/node_modules/undici/lib/fetch/body.js"(exports, module2) { - "use strict"; - var Busboy = require_main(); - var util = require_util(); - var { - ReadableStreamFrom, - isBlobLike, - isReadableStreamLike, - readableStreamClose, - createDeferredPromise, - fullyReadBody, - extractMimeType - } = require_util2(); - var { FormData } = require_formdata(); - var { kState } = require_symbols2(); - var { webidl } = require_webidl(); - var { Blob: Blob2, File: NativeFile } = require("node:buffer"); - var { kBodyUsed } = require_symbols(); - var assert3 = require("node:assert"); - var { isErrored } = require_util(); - var { isUint8Array, isArrayBuffer } = require("util/types"); - var { File: UndiciFile } = require_file(); - var { serializeAMimeType } = require_dataURL(); - var File = NativeFile ?? UndiciFile; - var textEncoder = new TextEncoder(); - var textDecoder = new TextDecoder(); - function extractBody(object, keepalive = false) { - let stream = null; - if (object instanceof ReadableStream) { - stream = object; - } else if (isBlobLike(object)) { - stream = object.stream(); - } else { - stream = new ReadableStream({ - async pull(controller) { - const buffer = typeof source === "string" ? textEncoder.encode(source) : source; - if (buffer.byteLength) { - controller.enqueue(buffer); + function iteratorMixin(name, object, kInternalIterator, keyIndex = 0, valueIndex = 1) { + const makeIterator = createIterator(name, kInternalIterator, keyIndex, valueIndex); + const properties = { + keys: { + writable: true, + enumerable: true, + configurable: true, + value: function keys() { + webidl.brandCheck(this, object); + return makeIterator(this, "key"); + } + }, + values: { + writable: true, + enumerable: true, + configurable: true, + value: function values() { + webidl.brandCheck(this, object); + return makeIterator(this, "value"); + } + }, + entries: { + writable: true, + enumerable: true, + configurable: true, + value: function entries() { + webidl.brandCheck(this, object); + return makeIterator(this, "key+value"); + } + }, + forEach: { + writable: true, + enumerable: true, + configurable: true, + value: function forEach(callbackfn, thisArg = globalThis) { + webidl.brandCheck(this, object); + webidl.argumentLengthCheck(arguments, 1, { header: `${name}.forEach` }); + if (typeof callbackfn !== "function") { + throw new TypeError( + `Failed to execute 'forEach' on '${name}': parameter 1 is not of type 'Function'.` + ); } - queueMicrotask(() => readableStreamClose(controller)); - }, - start() { - }, - type: "bytes" - }); - } - assert3(isReadableStreamLike(stream)); - let action = null; - let source = null; - let length = null; - let type = null; - if (typeof object === "string") { - source = object; - type = "text/plain;charset=UTF-8"; - } else if (object instanceof URLSearchParams) { - source = object.toString(); - type = "application/x-www-form-urlencoded;charset=UTF-8"; - } else if (isArrayBuffer(object)) { - source = new Uint8Array(object.slice()); - } else if (ArrayBuffer.isView(object)) { - source = new Uint8Array(object.buffer.slice(object.byteOffset, object.byteOffset + object.byteLength)); - } else if (util.isFormDataLike(object)) { - const boundary = `----formdata-undici-0${`${Math.floor(Math.random() * 1e11)}`.padStart(11, "0")}`; - const prefix = `--${boundary}\r -Content-Disposition: form-data`; - const escape = (str) => str.replace(/\n/g, "%0A").replace(/\r/g, "%0D").replace(/"/g, "%22"); - const normalizeLinefeeds = (value) => value.replace(/\r?\n|\r/g, "\r\n"); - const blobParts = []; - const rn = new Uint8Array([13, 10]); - length = 0; - let hasUnknownSizeValue = false; - for (const [name, value] of object) { - if (typeof value === "string") { - const chunk2 = textEncoder.encode(prefix + `; name="${escape(normalizeLinefeeds(name))}"\r -\r -${normalizeLinefeeds(value)}\r -`); - blobParts.push(chunk2); - length += chunk2.byteLength; - } else { - const chunk2 = textEncoder.encode(`${prefix}; name="${escape(normalizeLinefeeds(name))}"` + (value.name ? `; filename="${escape(value.name)}"` : "") + `\r -Content-Type: ${value.type || "application/octet-stream"}\r -\r -`); - blobParts.push(chunk2, value, rn); - if (typeof value.size === "number") { - length += chunk2.byteLength + value.size + rn.byteLength; - } else { - hasUnknownSizeValue = true; + for (const { 0: key, 1: value } of makeIterator(this, "key+value")) { + callbackfn.call(thisArg, value, key, this); } } } - const chunk = textEncoder.encode(`--${boundary}--`); - blobParts.push(chunk); - length += chunk.byteLength; - if (hasUnknownSizeValue) { - length = null; + }; + return Object.defineProperties(object.prototype, { + ...properties, + [Symbol.iterator]: { + writable: true, + enumerable: false, + configurable: true, + value: properties.entries.value } - source = object; - action = async function* () { - for (const part of blobParts) { - if (part.stream) { - yield* part.stream(); - } else { - yield part; - } - } - }; - type = `multipart/form-data; boundary=${boundary}`; - } else if (isBlobLike(object)) { - source = object; - length = object.size; - if (object.type) { - type = object.type; + }); + } + async function fullyReadBody(body, processBody, processBodyError) { + const successSteps = processBody; + const errorSteps = processBodyError; + let reader; + try { + reader = body.stream.getReader(); + } catch (e) { + errorSteps(e); + return; + } + try { + const result = await readAllBytes(reader); + successSteps(result); + } catch (e) { + errorSteps(e); + } + } + function isReadableStreamLike(stream) { + return stream instanceof ReadableStream || stream[Symbol.toStringTag] === "ReadableStream" && typeof stream.tee === "function"; + } + function readableStreamClose(controller) { + try { + controller.close(); + controller.byobRequest?.respond(0); + } catch (err) { + if (!err.message.includes("Controller is already closed") && !err.message.includes("ReadableStream is already closed")) { + throw err; } - } else if (typeof object[Symbol.asyncIterator] === "function") { - if (keepalive) { - throw new TypeError("keepalive"); + } + } + function isomorphicEncode(input) { + for (let i = 0; i < input.length; i++) { + assert3(input.charCodeAt(i) <= 255); + } + return input; + } + async function readAllBytes(reader) { + const bytes = []; + let byteLength = 0; + while (true) { + const { done, value: chunk } = await reader.read(); + if (done) { + return Buffer.concat(bytes, byteLength); } - if (util.isDisturbed(object) || object.locked) { - throw new TypeError( - "Response body object should not be disturbed or locked" - ); + if (!isUint8Array(chunk)) { + throw new TypeError("Received non-Uint8Array chunk"); } - stream = object instanceof ReadableStream ? object : ReadableStreamFrom(object); + bytes.push(chunk); + byteLength += chunk.length; } - if (typeof source === "string" || util.isBuffer(source)) { - length = Buffer.byteLength(source); + } + function urlIsLocal(url) { + assert3("protocol" in url); + const protocol = url.protocol; + return protocol === "about:" || protocol === "blob:" || protocol === "data:"; + } + function urlHasHttpsScheme(url) { + return typeof url === "string" && url[5] === ":" && url[0] === "h" && url[1] === "t" && url[2] === "t" && url[3] === "p" && url[4] === "s" || url.protocol === "https:"; + } + function urlIsHttpHttpsScheme(url) { + assert3("protocol" in url); + const protocol = url.protocol; + return protocol === "http:" || protocol === "https:"; + } + function simpleRangeHeaderValue(value, allowWhitespace) { + const data = value; + if (!data.startsWith("bytes")) { + return "failure"; } - if (action != null) { - let iterator; - stream = new ReadableStream({ - async start() { - iterator = action(object)[Symbol.asyncIterator](); - }, - async pull(controller) { - const { value, done } = await iterator.next(); - if (done) { - queueMicrotask(() => { - controller.close(); - controller.byobRequest?.respond(0); - }); - } else { - if (!isErrored(stream)) { - const buffer = new Uint8Array(value); - if (buffer.byteLength) { - controller.enqueue(buffer); - } - } - } - return controller.desiredSize > 0; - }, - async cancel(reason) { - await iterator.return(); - }, - type: "bytes" - }); + const position = { position: 5 }; + if (allowWhitespace) { + collectASequenceOfCodePoints( + (char) => char === " " || char === " ", + data, + position + ); } - const body = { stream, source, length }; - return [body, type]; - } - function safelyExtractBody(object, keepalive = false) { - if (object instanceof ReadableStream) { - assert3(!util.isDisturbed(object), "The body has already been consumed."); - assert3(!object.locked, "The stream is locked."); + if (data.charCodeAt(position.position) !== 61) { + return "failure"; } - return extractBody(object, keepalive); + position.position++; + if (allowWhitespace) { + collectASequenceOfCodePoints( + (char) => char === " " || char === " ", + data, + position + ); + } + const rangeStart = collectASequenceOfCodePoints( + (char) => { + const code = char.charCodeAt(0); + return code >= 48 && code <= 57; + }, + data, + position + ); + const rangeStartValue = rangeStart.length ? Number(rangeStart) : null; + if (allowWhitespace) { + collectASequenceOfCodePoints( + (char) => char === " " || char === " ", + data, + position + ); + } + if (data.charCodeAt(position.position) !== 45) { + return "failure"; + } + position.position++; + if (allowWhitespace) { + collectASequenceOfCodePoints( + (char) => char === " " || char === " ", + data, + position + ); + } + const rangeEnd = collectASequenceOfCodePoints( + (char) => { + const code = char.charCodeAt(0); + return code >= 48 && code <= 57; + }, + data, + position + ); + const rangeEndValue = rangeEnd.length ? Number(rangeEnd) : null; + if (position.position < data.length) { + return "failure"; + } + if (rangeEndValue === null && rangeStartValue === null) { + return "failure"; + } + if (rangeStartValue > rangeEndValue) { + return "failure"; + } + return { rangeStartValue, rangeEndValue }; } - function cloneBody(body) { - const [out1, out2] = body.stream.tee(); - const out2Clone = structuredClone(out2, { transfer: [out2] }); - const [, finalClone] = out2Clone.tee(); - body.stream = out1; - return { - stream: finalClone, - length: body.length, - source: body.source - }; + function buildContentRange(rangeStart, rangeEnd, fullLength) { + let contentRange = "bytes "; + contentRange += isomorphicEncode(`${rangeStart}`); + contentRange += "-"; + contentRange += isomorphicEncode(`${rangeEnd}`); + contentRange += "/"; + contentRange += isomorphicEncode(`${fullLength}`); + return contentRange; } - async function* consumeBody(body) { - if (body) { - if (isUint8Array(body)) { - yield body; - } else { - const stream = body.stream; - if (util.isDisturbed(stream)) { - throw new TypeError("The body has already been consumed."); - } - if (stream.locked) { - throw new TypeError("The stream is locked."); + var InflateStream = class extends Transform { + _transform(chunk, encoding, callback) { + if (!this._inflateStream) { + if (chunk.length === 0) { + callback(); + return; } - stream[kBodyUsed] = true; - yield* stream; + this._inflateStream = (chunk[0] & 15) === 8 ? zlib.createInflate() : zlib.createInflateRaw(); + this._inflateStream.on("data", this.push.bind(this)); + this._inflateStream.on("end", () => this.push(null)); + this._inflateStream.on("error", (err) => this.destroy(err)); + } + this._inflateStream.write(chunk, encoding, callback); + } + _final(callback) { + if (this._inflateStream) { + this._inflateStream.end(); + this._inflateStream = null; } + callback(); } + }; + function createInflate() { + return new InflateStream(); } - function throwIfAborted(state) { - if (state.aborted) { - throw new DOMException("The operation was aborted.", "AbortError"); + function extractMimeType(headers) { + let charset = null; + let essence = null; + let mimeType = null; + const values = getDecodeSplit("content-type", headers); + if (values === null) { + return "failure"; + } + for (const value of values) { + const temporaryMimeType = parseMIMEType(value); + if (temporaryMimeType === "failure" || temporaryMimeType.essence === "*/*") { + continue; + } + mimeType = temporaryMimeType; + if (mimeType.essence !== essence) { + charset = null; + if (mimeType.parameters.has("charset")) { + charset = mimeType.parameters.get("charset"); + } + essence = mimeType.essence; + } else if (!mimeType.parameters.has("charset") && charset !== null) { + mimeType.parameters.set("charset", charset); + } } + if (mimeType == null) { + return "failure"; + } + return mimeType; } - function bodyMixinMethods(instance) { - const methods = { - blob() { - return specConsumeBody(this, (bytes) => { - let mimeType = bodyMimeType(this); - if (mimeType === null) { - mimeType = ""; - } else if (mimeType) { - mimeType = serializeAMimeType(mimeType); - } - return new Blob2([bytes], { type: mimeType }); - }, instance); - }, - arrayBuffer() { - return specConsumeBody(this, (bytes) => { - return new Uint8Array(bytes).buffer; - }, instance); - }, - text() { - return specConsumeBody(this, utf8DecodeBytes, instance); - }, - json() { - return specConsumeBody(this, parseJSONFromBytes, instance); - }, - async formData() { - webidl.brandCheck(this, instance); - throwIfAborted(this[kState]); - const mimeType = bodyMimeType(this); - if (mimeType !== null && mimeType.essence === "multipart/form-data") { - const headers = {}; - for (const [key, value] of this.headers) - headers[key] = value; - const responseFormData = new FormData(); - let busboy; - try { - busboy = new Busboy({ - headers, - preservePath: true - }); - } catch (err) { - throw new DOMException(`${err}`, "AbortError"); - } - busboy.on("field", (name, value) => { - responseFormData.append(name, value); - }); - busboy.on("file", (name, value, filename, encoding, mimeType2) => { - const chunks = []; - if (encoding === "base64" || encoding.toLowerCase() === "base64") { - let base64chunk = ""; - value.on("data", (chunk) => { - base64chunk += chunk.toString().replace(/[\r\n]/gm, ""); - const end = base64chunk.length - base64chunk.length % 4; - chunks.push(Buffer.from(base64chunk.slice(0, end), "base64")); - base64chunk = base64chunk.slice(end); - }); - value.on("end", () => { - chunks.push(Buffer.from(base64chunk, "base64")); - responseFormData.append(name, new File(chunks, filename, { type: mimeType2 })); - }); - } else { - value.on("data", (chunk) => { - chunks.push(chunk); - }); - value.on("end", () => { - responseFormData.append(name, new File(chunks, filename, { type: mimeType2 })); - }); - } - }); - const busboyResolve = new Promise((resolve, reject) => { - busboy.on("finish", resolve); - busboy.on("error", (err) => reject(new TypeError(err))); - }); - if (this.body !== null) - for await (const chunk of consumeBody(this[kState].body)) - busboy.write(chunk); - busboy.end(); - await busboyResolve; - return responseFormData; - } else if (mimeType !== null && mimeType.essence === "application/x-www-form-urlencoded") { - let entries; - try { - let text = ""; - const streamingDecoder = new TextDecoder("utf-8", { ignoreBOM: true }); - for await (const chunk of consumeBody(this[kState].body)) { - if (!isUint8Array(chunk)) { - throw new TypeError("Expected Uint8Array chunk"); - } - text += streamingDecoder.decode(chunk, { stream: true }); - } - text += streamingDecoder.decode(); - entries = new URLSearchParams(text); - } catch (err) { - throw new TypeError(void 0, { cause: err }); - } - const formData = new FormData(); - for (const [name, value] of entries) { - formData.append(name, value); + function gettingDecodingSplitting(value) { + const input = value; + const position = { position: 0 }; + const values = []; + let temporaryValue = ""; + while (position.position < input.length) { + temporaryValue += collectASequenceOfCodePoints( + (char) => char !== '"' && char !== ",", + input, + position + ); + if (position.position < input.length) { + if (input.charCodeAt(position.position) === 34) { + temporaryValue += collectAnHTTPQuotedString( + input, + position + ); + if (position.position < input.length) { + continue; } - return formData; } else { - await Promise.resolve(); - throwIfAborted(this[kState]); - throw webidl.errors.exception({ - header: `${instance.name}.formData`, - message: "Could not parse content as FormData." - }); + assert3(input.charCodeAt(position.position) === 44); + position.position++; } } - }; - return methods; - } - function mixinBody(prototype) { - Object.assign(prototype.prototype, bodyMixinMethods(prototype)); - } - async function specConsumeBody(object, convertBytesToJSValue, instance) { - webidl.brandCheck(object, instance); - throwIfAborted(object[kState]); - if (bodyUnusable(object[kState].body)) { - throw new TypeError("Body is unusable"); - } - const promise = createDeferredPromise(); - const errorSteps = (error) => promise.reject(error); - const successSteps = (data) => { - try { - promise.resolve(convertBytesToJSValue(data)); - } catch (e) { - errorSteps(e); - } - }; - if (object[kState].body == null) { - successSteps(new Uint8Array()); - return promise.promise; + temporaryValue = removeChars(temporaryValue, true, true, (char) => char === 9 || char === 32); + values.push(temporaryValue); + temporaryValue = ""; } - await fullyReadBody(object[kState].body, successSteps, errorSteps); - return promise.promise; + return values; } - function bodyUnusable(body) { - return body != null && (body.stream.locked || util.isDisturbed(body.stream)); + function getDecodeSplit(name, list) { + const value = list.get(name, true); + if (value === null) { + return null; + } + return gettingDecodingSplitting(value); } + var textDecoder = new TextDecoder(); function utf8DecodeBytes(buffer) { if (buffer.length === 0) { return ""; @@ -10295,2808 +9749,3131 @@ Content-Type: ${value.type || "application/octet-stream"}\r const output = textDecoder.decode(buffer); return output; } - function parseJSONFromBytes(bytes) { - return JSON.parse(utf8DecodeBytes(bytes)); - } - function bodyMimeType(requestOrResponse) { - const headers = requestOrResponse[kState].headersList; - const mimeType = extractMimeType(headers); - if (mimeType === "failure") { - return null; - } - return mimeType; - } module2.exports = { - extractBody, - safelyExtractBody, - cloneBody, - mixinBody + isAborted, + isCancelled, + isValidEncodedURL, + createDeferredPromise, + ReadableStreamFrom, + tryUpgradeRequestToAPotentiallyTrustworthyURL, + clampAndCoarsenConnectionTimingInfo, + coarsenedSharedCurrentTime, + determineRequestsReferrer, + makePolicyContainer, + clonePolicyContainer, + appendFetchMetadata, + appendRequestOriginHeader, + TAOCheck, + corsCheck, + crossOriginResourcePolicyCheck, + createOpaqueTimingInfo, + setRequestReferrerPolicyOnRedirect, + isValidHTTPToken, + requestBadPort, + requestCurrentURL, + responseURL, + responseLocationURL, + isBlobLike, + isURLPotentiallyTrustworthy, + isValidReasonPhrase, + sameOrigin, + normalizeMethod, + serializeJavascriptValueToJSONString, + iteratorMixin, + createIterator, + isValidHeaderName, + isValidHeaderValue, + isErrorLike, + fullyReadBody, + bytesMatch, + isReadableStreamLike, + readableStreamClose, + isomorphicEncode, + urlIsLocal, + urlHasHttpsScheme, + urlIsHttpHttpsScheme, + readAllBytes, + normalizeMethodRecord, + simpleRangeHeaderValue, + buildContentRange, + parseMetadata, + createInflate, + extractMimeType, + getDecodeSplit, + utf8DecodeBytes }; } }); -// .yarn/cache/undici-npm-6.6.2-a0bd6785a6-c8c8a43605.zip/node_modules/undici/lib/core/request.js -var require_request = __commonJS({ - ".yarn/cache/undici-npm-6.6.2-a0bd6785a6-c8c8a43605.zip/node_modules/undici/lib/core/request.js"(exports, module2) { +// .yarn/cache/undici-npm-6.13.0-1545cd855e-b1b0456e7d.zip/node_modules/undici/lib/web/fetch/symbols.js +var require_symbols2 = __commonJS({ + ".yarn/cache/undici-npm-6.13.0-1545cd855e-b1b0456e7d.zip/node_modules/undici/lib/web/fetch/symbols.js"(exports, module2) { "use strict"; - var { - InvalidArgumentError, - NotSupportedError - } = require_errors(); - var assert3 = require("node:assert"); - var { kHTTP2BuildRequest, kHTTP2CopyHeaders, kHTTP1BuildRequest } = require_symbols(); - var util = require_util(); - var { channels } = require_diagnostics(); - var { headerNameLowerCasedRecord } = require_constants2(); - var headerCharRegex = /[^\t\x20-\x7e\x80-\xff]/; - var invalidPathRegex = /[^\u0021-\u00ff]/; - var kHandler = Symbol("handler"); - var extractBody; - var Request = class _Request { - constructor(origin, { - path: path10, - method, - body, - headers, - query, - idempotent, - blocking, - upgrade, - headersTimeout, - bodyTimeout, - reset, - throwOnError, - expectContinue - }, handler) { - if (typeof path10 !== "string") { - throw new InvalidArgumentError("path must be a string"); - } else if (path10[0] !== "/" && !(path10.startsWith("http://") || path10.startsWith("https://")) && method !== "CONNECT") { - throw new InvalidArgumentError("path must be an absolute URL or start with a slash"); - } else if (invalidPathRegex.exec(path10) !== null) { - throw new InvalidArgumentError("invalid request path"); - } - if (typeof method !== "string") { - throw new InvalidArgumentError("method must be a string"); - } else if (!util.isValidHTTPToken(method)) { - throw new InvalidArgumentError("invalid request method"); - } - if (upgrade && typeof upgrade !== "string") { - throw new InvalidArgumentError("upgrade must be a string"); - } - if (headersTimeout != null && (!Number.isFinite(headersTimeout) || headersTimeout < 0)) { - throw new InvalidArgumentError("invalid headersTimeout"); - } - if (bodyTimeout != null && (!Number.isFinite(bodyTimeout) || bodyTimeout < 0)) { - throw new InvalidArgumentError("invalid bodyTimeout"); + module2.exports = { + kUrl: Symbol("url"), + kHeaders: Symbol("headers"), + kSignal: Symbol("signal"), + kState: Symbol("state"), + kGuard: Symbol("guard"), + kRealm: Symbol("realm"), + kDispatcher: Symbol("dispatcher") + }; + } +}); + +// .yarn/cache/undici-npm-6.13.0-1545cd855e-b1b0456e7d.zip/node_modules/undici/lib/web/fetch/file.js +var require_file = __commonJS({ + ".yarn/cache/undici-npm-6.13.0-1545cd855e-b1b0456e7d.zip/node_modules/undici/lib/web/fetch/file.js"(exports, module2) { + "use strict"; + var { EOL } = require("node:os"); + var { Blob: Blob2, File: NativeFile } = require("node:buffer"); + var { types } = require("node:util"); + var { kState } = require_symbols2(); + var { isBlobLike } = require_util3(); + var { webidl } = require_webidl(); + var { parseMIMEType, serializeAMimeType } = require_data_url(); + var { kEnumerableProperty } = require_util(); + var encoder = new TextEncoder(); + var File = class _File extends Blob2 { + constructor(fileBits, fileName, options = {}) { + webidl.argumentLengthCheck(arguments, 2, { header: "File constructor" }); + fileBits = webidl.converters["sequence"](fileBits); + fileName = webidl.converters.USVString(fileName); + options = webidl.converters.FilePropertyBag(options); + const n = fileName; + let t = options.type; + let d; + substep: { + if (t) { + t = parseMIMEType(t); + if (t === "failure") { + t = ""; + break substep; + } + t = serializeAMimeType(t).toLowerCase(); + } + d = options.lastModified; } - if (reset != null && typeof reset !== "boolean") { - throw new InvalidArgumentError("invalid reset"); + super(processBlobParts(fileBits, options), { type: t }); + this[kState] = { + name: n, + lastModified: d, + type: t + }; + } + get name() { + webidl.brandCheck(this, _File); + return this[kState].name; + } + get lastModified() { + webidl.brandCheck(this, _File); + return this[kState].lastModified; + } + get type() { + webidl.brandCheck(this, _File); + return this[kState].type; + } + }; + var FileLike = class _FileLike { + constructor(blobLike, fileName, options = {}) { + const n = fileName; + const t = options.type; + const d = options.lastModified ?? Date.now(); + this[kState] = { + blobLike, + name: n, + type: t, + lastModified: d + }; + } + stream(...args) { + webidl.brandCheck(this, _FileLike); + return this[kState].blobLike.stream(...args); + } + arrayBuffer(...args) { + webidl.brandCheck(this, _FileLike); + return this[kState].blobLike.arrayBuffer(...args); + } + slice(...args) { + webidl.brandCheck(this, _FileLike); + return this[kState].blobLike.slice(...args); + } + text(...args) { + webidl.brandCheck(this, _FileLike); + return this[kState].blobLike.text(...args); + } + get size() { + webidl.brandCheck(this, _FileLike); + return this[kState].blobLike.size; + } + get type() { + webidl.brandCheck(this, _FileLike); + return this[kState].blobLike.type; + } + get name() { + webidl.brandCheck(this, _FileLike); + return this[kState].name; + } + get lastModified() { + webidl.brandCheck(this, _FileLike); + return this[kState].lastModified; + } + get [Symbol.toStringTag]() { + return "File"; + } + }; + Object.defineProperties(File.prototype, { + [Symbol.toStringTag]: { + value: "File", + configurable: true + }, + name: kEnumerableProperty, + lastModified: kEnumerableProperty + }); + webidl.converters.Blob = webidl.interfaceConverter(Blob2); + webidl.converters.BlobPart = function(V, opts) { + if (webidl.util.Type(V) === "Object") { + if (isBlobLike(V)) { + return webidl.converters.Blob(V, { strict: false }); } - if (expectContinue != null && typeof expectContinue !== "boolean") { - throw new InvalidArgumentError("invalid expectContinue"); + if (ArrayBuffer.isView(V) || types.isAnyArrayBuffer(V)) { + return webidl.converters.BufferSource(V, opts); } - this.headersTimeout = headersTimeout; - this.bodyTimeout = bodyTimeout; - this.throwOnError = throwOnError === true; - this.method = method; - this.abort = null; - if (body == null) { - this.body = null; - } else if (util.isStream(body)) { - this.body = body; - const rState = this.body._readableState; - if (!rState || !rState.autoDestroy) { - this.endHandler = function autoDestroy() { - util.destroy(this); - }; - this.body.on("end", this.endHandler); - } - this.errorHandler = (err) => { - if (this.abort) { - this.abort(err); - } else { - this.error = err; - } - }; - this.body.on("error", this.errorHandler); - } else if (util.isBuffer(body)) { - this.body = body.byteLength ? body : null; - } else if (ArrayBuffer.isView(body)) { - this.body = body.buffer.byteLength ? Buffer.from(body.buffer, body.byteOffset, body.byteLength) : null; - } else if (body instanceof ArrayBuffer) { - this.body = body.byteLength ? Buffer.from(body) : null; - } else if (typeof body === "string") { - this.body = body.length ? Buffer.from(body) : null; - } else if (util.isFormDataLike(body) || util.isIterable(body) || util.isBlobLike(body)) { - this.body = body; - } else { - throw new InvalidArgumentError("body must be a string, a Buffer, a Readable stream, an iterable, or an async iterable"); + } + return webidl.converters.USVString(V, opts); + }; + webidl.converters["sequence"] = webidl.sequenceConverter( + webidl.converters.BlobPart + ); + webidl.converters.FilePropertyBag = webidl.dictionaryConverter([ + { + key: "lastModified", + converter: webidl.converters["long long"], + get defaultValue() { + return Date.now(); } - this.completed = false; - this.aborted = false; - this.upgrade = upgrade || null; - this.path = query ? util.buildURL(path10, query) : path10; - this.origin = origin; - this.idempotent = idempotent == null ? method === "HEAD" || method === "GET" : idempotent; - this.blocking = blocking == null ? false : blocking; - this.reset = reset == null ? null : reset; - this.host = null; - this.contentLength = null; - this.contentType = null; - this.headers = ""; - this.expectContinue = expectContinue != null ? expectContinue : false; - if (Array.isArray(headers)) { - if (headers.length % 2 !== 0) { - throw new InvalidArgumentError("headers array must be even"); - } - for (let i = 0; i < headers.length; i += 2) { - processHeader(this, headers[i], headers[i + 1]); - } - } else if (headers && typeof headers === "object") { - const keys = Object.keys(headers); - for (let i = 0; i < keys.length; i++) { - const key = keys[i]; - processHeader(this, key, headers[key]); + }, + { + key: "type", + converter: webidl.converters.DOMString, + defaultValue: "" + }, + { + key: "endings", + converter: (value) => { + value = webidl.converters.DOMString(value); + value = value.toLowerCase(); + if (value !== "native") { + value = "transparent"; } - } else if (headers != null) { - throw new InvalidArgumentError("headers must be an object or an array"); - } - if (util.isFormDataLike(this.body)) { - if (!extractBody) { - extractBody = require_body().extractBody; + return value; + }, + defaultValue: "transparent" + } + ]); + function processBlobParts(parts, options) { + const bytes = []; + for (const element of parts) { + if (typeof element === "string") { + let s = element; + if (options.endings === "native") { + s = convertLineEndingsNative(s); } - const [bodyStream, contentType] = extractBody(body); - if (this.contentType == null) { - this.contentType = contentType; - this.headers += `content-type: ${contentType}\r -`; + bytes.push(encoder.encode(s)); + } else if (ArrayBuffer.isView(element) || types.isArrayBuffer(element)) { + if (element.buffer) { + bytes.push( + new Uint8Array(element.buffer, element.byteOffset, element.byteLength) + ); + } else { + bytes.push(new Uint8Array(element)); } - this.body = bodyStream.stream; - this.contentLength = bodyStream.length; - } else if (util.isBlobLike(body) && this.contentType == null && body.type) { - this.contentType = body.type; - this.headers += `content-type: ${body.type}\r -`; - } - util.validateHandler(handler, method, upgrade); - this.servername = util.getServerName(this.host); - this[kHandler] = handler; - if (channels.create.hasSubscribers) { - channels.create.publish({ request: this }); + } else if (isBlobLike(element)) { + bytes.push(element); } } - onBodySent(chunk) { - if (this[kHandler].onBodySent) { - try { - return this[kHandler].onBodySent(chunk); - } catch (err) { - this.abort(err); - } + return bytes; + } + function convertLineEndingsNative(s) { + return s.replace(/\r?\n/g, EOL); + } + function isFileLike(object) { + return NativeFile && object instanceof NativeFile || object instanceof File || object && (typeof object.stream === "function" || typeof object.arrayBuffer === "function") && object[Symbol.toStringTag] === "File"; + } + module2.exports = { File, FileLike, isFileLike }; + } +}); + +// .yarn/cache/undici-npm-6.13.0-1545cd855e-b1b0456e7d.zip/node_modules/undici/lib/web/fetch/formdata.js +var require_formdata = __commonJS({ + ".yarn/cache/undici-npm-6.13.0-1545cd855e-b1b0456e7d.zip/node_modules/undici/lib/web/fetch/formdata.js"(exports, module2) { + "use strict"; + var { isBlobLike, iteratorMixin } = require_util3(); + var { kState } = require_symbols2(); + var { kEnumerableProperty } = require_util(); + var { File: UndiciFile, FileLike, isFileLike } = require_file(); + var { webidl } = require_webidl(); + var { File: NativeFile } = require("node:buffer"); + var nodeUtil = require("node:util"); + var File = NativeFile ?? UndiciFile; + var FormData = class _FormData { + constructor(form) { + if (form !== void 0) { + throw webidl.errors.conversionFailed({ + prefix: "FormData constructor", + argument: "Argument 1", + types: ["undefined"] + }); } + this[kState] = []; } - onRequestSent() { - if (channels.bodySent.hasSubscribers) { - channels.bodySent.publish({ request: this }); + append(name, value, filename = void 0) { + webidl.brandCheck(this, _FormData); + webidl.argumentLengthCheck(arguments, 2, { header: "FormData.append" }); + if (arguments.length === 3 && !isBlobLike(value)) { + throw new TypeError( + "Failed to execute 'append' on 'FormData': parameter 2 is not of type 'Blob'" + ); } - if (this[kHandler].onRequestSent) { - try { - return this[kHandler].onRequestSent(); - } catch (err) { - this.abort(err); - } + name = webidl.converters.USVString(name); + value = isBlobLike(value) ? webidl.converters.Blob(value, { strict: false }) : webidl.converters.USVString(value); + filename = arguments.length === 3 ? webidl.converters.USVString(filename) : void 0; + const entry = makeEntry(name, value, filename); + this[kState].push(entry); + } + delete(name) { + webidl.brandCheck(this, _FormData); + webidl.argumentLengthCheck(arguments, 1, { header: "FormData.delete" }); + name = webidl.converters.USVString(name); + this[kState] = this[kState].filter((entry) => entry.name !== name); + } + get(name) { + webidl.brandCheck(this, _FormData); + webidl.argumentLengthCheck(arguments, 1, { header: "FormData.get" }); + name = webidl.converters.USVString(name); + const idx = this[kState].findIndex((entry) => entry.name === name); + if (idx === -1) { + return null; } + return this[kState][idx].value; } - onConnect(abort) { - assert3(!this.aborted); - assert3(!this.completed); - if (this.error) { - abort(this.error); + getAll(name) { + webidl.brandCheck(this, _FormData); + webidl.argumentLengthCheck(arguments, 1, { header: "FormData.getAll" }); + name = webidl.converters.USVString(name); + return this[kState].filter((entry) => entry.name === name).map((entry) => entry.value); + } + has(name) { + webidl.brandCheck(this, _FormData); + webidl.argumentLengthCheck(arguments, 1, { header: "FormData.has" }); + name = webidl.converters.USVString(name); + return this[kState].findIndex((entry) => entry.name === name) !== -1; + } + set(name, value, filename = void 0) { + webidl.brandCheck(this, _FormData); + webidl.argumentLengthCheck(arguments, 2, { header: "FormData.set" }); + if (arguments.length === 3 && !isBlobLike(value)) { + throw new TypeError( + "Failed to execute 'set' on 'FormData': parameter 2 is not of type 'Blob'" + ); + } + name = webidl.converters.USVString(name); + value = isBlobLike(value) ? webidl.converters.Blob(value, { strict: false }) : webidl.converters.USVString(value); + filename = arguments.length === 3 ? webidl.converters.USVString(filename) : void 0; + const entry = makeEntry(name, value, filename); + const idx = this[kState].findIndex((entry2) => entry2.name === name); + if (idx !== -1) { + this[kState] = [ + ...this[kState].slice(0, idx), + entry, + ...this[kState].slice(idx + 1).filter((entry2) => entry2.name !== name) + ]; } else { - this.abort = abort; - return this[kHandler].onConnect(abort); + this[kState].push(entry); } } - onResponseStarted() { - return this[kHandler].onResponseStarted?.(); + [nodeUtil.inspect.custom](depth, options) { + const state = this[kState].reduce((a, b) => { + if (a[b.name]) { + if (Array.isArray(a[b.name])) { + a[b.name].push(b.value); + } else { + a[b.name] = [a[b.name], b.value]; + } + } else { + a[b.name] = b.value; + } + return a; + }, { __proto__: null }); + options.depth ??= depth; + options.colors ??= true; + const output = nodeUtil.formatWithOptions(options, state); + return `FormData ${output.slice(output.indexOf("]") + 2)}`; } - onHeaders(statusCode, headers, resume, statusText) { - assert3(!this.aborted); - assert3(!this.completed); - if (channels.headers.hasSubscribers) { - channels.headers.publish({ request: this, response: { statusCode, headers, statusText } }); + }; + iteratorMixin("FormData", FormData, kState, "name", "value"); + Object.defineProperties(FormData.prototype, { + append: kEnumerableProperty, + delete: kEnumerableProperty, + get: kEnumerableProperty, + getAll: kEnumerableProperty, + has: kEnumerableProperty, + set: kEnumerableProperty, + [Symbol.toStringTag]: { + value: "FormData", + configurable: true + } + }); + function makeEntry(name, value, filename) { + if (typeof value === "string") { + } else { + if (!isFileLike(value)) { + value = value instanceof Blob ? new File([value], "blob", { type: value.type }) : new FileLike(value, "blob", { type: value.type }); } - try { - return this[kHandler].onHeaders(statusCode, headers, resume, statusText); - } catch (err) { - this.abort(err); + if (filename !== void 0) { + const options = { + type: value.type, + lastModified: value.lastModified + }; + value = NativeFile && value instanceof NativeFile || value instanceof UndiciFile ? new File([value], filename, options) : new FileLike(value, filename, options); } } - onData(chunk) { - assert3(!this.aborted); - assert3(!this.completed); - try { - return this[kHandler].onData(chunk); - } catch (err) { - this.abort(err); + return { name, value }; + } + module2.exports = { FormData, makeEntry }; + } +}); + +// .yarn/cache/undici-npm-6.13.0-1545cd855e-b1b0456e7d.zip/node_modules/undici/lib/web/fetch/formdata-parser.js +var require_formdata_parser = __commonJS({ + ".yarn/cache/undici-npm-6.13.0-1545cd855e-b1b0456e7d.zip/node_modules/undici/lib/web/fetch/formdata-parser.js"(exports, module2) { + "use strict"; + var { isUSVString, bufferToLowerCasedHeaderName } = require_util(); + var { utf8DecodeBytes } = require_util3(); + var { HTTP_TOKEN_CODEPOINTS, isomorphicDecode } = require_data_url(); + var { isFileLike, File: UndiciFile } = require_file(); + var { makeEntry } = require_formdata(); + var assert3 = require("node:assert"); + var { File: NodeFile } = require("node:buffer"); + var File = globalThis.File ?? NodeFile ?? UndiciFile; + var formDataNameBuffer = Buffer.from('form-data; name="'); + var filenameBuffer = Buffer.from("; filename"); + var dd = Buffer.from("--"); + var ddcrlf = Buffer.from("--\r\n"); + function isAsciiString(chars) { + for (let i = 0; i < chars.length; ++i) { + if ((chars.charCodeAt(i) & ~127) !== 0) { return false; } } - onUpgrade(statusCode, headers, socket) { - assert3(!this.aborted); - assert3(!this.completed); - return this[kHandler].onUpgrade(statusCode, headers, socket); + return true; + } + function validateBoundary(boundary) { + const length = boundary.length; + if (length < 27 || length > 70) { + return false; } - onComplete(trailers) { - this.onFinally(); - assert3(!this.aborted); - this.completed = true; - if (channels.trailers.hasSubscribers) { - channels.trailers.publish({ request: this, trailers }); - } - try { - return this[kHandler].onComplete(trailers); - } catch (err) { - this.onError(err); + for (let i = 0; i < length; ++i) { + const cp = boundary.charCodeAt(i); + if (!(cp >= 48 && cp <= 57 || cp >= 65 && cp <= 90 || cp >= 97 && cp <= 122 || cp === 39 || cp === 45 || cp === 95)) { + return false; } } - onError(error) { - this.onFinally(); - if (channels.error.hasSubscribers) { - channels.error.publish({ request: this, error }); + return true; + } + function multipartFormDataParser(input, mimeType) { + assert3(mimeType !== "failure" && mimeType.essence === "multipart/form-data"); + const boundaryString = mimeType.parameters.get("boundary"); + if (boundaryString === void 0) { + return "failure"; + } + const boundary = Buffer.from(`--${boundaryString}`, "utf8"); + const entryList = []; + const position = { position: 0 }; + if (input[0] === 13 && input[1] === 10) { + position.position += 2; + } + while (true) { + if (input.subarray(position.position, position.position + boundary.length).equals(boundary)) { + position.position += boundary.length; + } else { + return "failure"; } - if (this.aborted) { - return; + if (position.position === input.length - 2 && bufferStartsWith(input, dd, position) || position.position === input.length - 4 && bufferStartsWith(input, ddcrlf, position)) { + return entryList; } - this.aborted = true; - return this[kHandler].onError(error); - } - onFinally() { - if (this.errorHandler) { - this.body.off("error", this.errorHandler); - this.errorHandler = null; + if (input[position.position] !== 13 || input[position.position + 1] !== 10) { + return "failure"; } - if (this.endHandler) { - this.body.off("end", this.endHandler); - this.endHandler = null; + position.position += 2; + const result = parseMultipartFormDataHeaders(input, position); + if (result === "failure") { + return "failure"; } - } - // TODO: adjust to support H2 - addHeader(key, value) { - processHeader(this, key, value); - return this; - } - static [kHTTP1BuildRequest](origin, opts, handler) { - return new _Request(origin, opts, handler); - } - static [kHTTP2BuildRequest](origin, opts, handler) { - const headers = opts.headers; - opts = { ...opts, headers: null }; - const request = new _Request(origin, opts, handler); - request.headers = {}; - if (Array.isArray(headers)) { - if (headers.length % 2 !== 0) { - throw new InvalidArgumentError("headers array must be even"); + let { name, filename, contentType, encoding } = result; + position.position += 2; + let body; + { + const boundaryIndex = input.indexOf(boundary.subarray(2), position.position); + if (boundaryIndex === -1) { + return "failure"; } - for (let i = 0; i < headers.length; i += 2) { - processHeader(request, headers[i], headers[i + 1], true); + body = input.subarray(position.position, boundaryIndex - 4); + position.position += body.length; + if (encoding === "base64") { + body = Buffer.from(body.toString(), "base64"); } - } else if (headers && typeof headers === "object") { - const keys = Object.keys(headers); - for (let i = 0; i < keys.length; i++) { - const key = keys[i]; - processHeader(request, key, headers[key], true); + } + if (input[position.position] !== 13 || input[position.position + 1] !== 10) { + return "failure"; + } else { + position.position += 2; + } + let value; + if (filename !== null) { + contentType ??= "text/plain"; + if (!isAsciiString(contentType)) { + contentType = ""; } - } else if (headers != null) { - throw new InvalidArgumentError("headers must be an object or an array"); + value = new File([body], filename, { type: contentType }); + } else { + value = utf8DecodeBytes(Buffer.from(body)); } - return request; + assert3(isUSVString(name)); + assert3(typeof value === "string" && isUSVString(value) || isFileLike(value)); + entryList.push(makeEntry(name, value, filename)); } - static [kHTTP2CopyHeaders](raw) { - const rawHeaders = raw.split("\r\n"); - const headers = {}; - for (const header of rawHeaders) { - const [key, value] = header.split(": "); - if (value == null || value.length === 0) - continue; - if (headers[key]) { - headers[key] += `,${value}`; - } else { - headers[key] = value; + } + function parseMultipartFormDataHeaders(input, position) { + let name = null; + let filename = null; + let contentType = null; + let encoding = null; + while (true) { + if (input[position.position] === 13 && input[position.position + 1] === 10) { + if (name === null) { + return "failure"; + } + return { name, filename, contentType, encoding }; + } + let headerName = collectASequenceOfBytes( + (char) => char !== 10 && char !== 13 && char !== 58, + input, + position + ); + headerName = removeChars(headerName, true, true, (char) => char === 9 || char === 32); + if (!HTTP_TOKEN_CODEPOINTS.test(headerName.toString())) { + return "failure"; + } + if (input[position.position] !== 58) { + return "failure"; + } + position.position++; + collectASequenceOfBytes( + (char) => char === 32 || char === 9, + input, + position + ); + switch (bufferToLowerCasedHeaderName(headerName)) { + case "content-disposition": { + name = filename = null; + if (!bufferStartsWith(input, formDataNameBuffer, position)) { + return "failure"; + } + position.position += 17; + name = parseMultipartFormDataName(input, position); + if (name === null) { + return "failure"; + } + if (bufferStartsWith(input, filenameBuffer, position)) { + let check = position.position + filenameBuffer.length; + if (input[check] === 42) { + position.position += 1; + check += 1; + } + if (input[check] !== 61 || input[check + 1] !== 34) { + return "failure"; + } + position.position += 12; + filename = parseMultipartFormDataName(input, position); + if (filename === null) { + return "failure"; + } + } + break; + } + case "content-type": { + let headerValue = collectASequenceOfBytes( + (char) => char !== 10 && char !== 13, + input, + position + ); + headerValue = removeChars(headerValue, false, true, (char) => char === 9 || char === 32); + contentType = isomorphicDecode(headerValue); + break; + } + case "content-transfer-encoding": { + let headerValue = collectASequenceOfBytes( + (char) => char !== 10 && char !== 13, + input, + position + ); + headerValue = removeChars(headerValue, false, true, (char) => char === 9 || char === 32); + encoding = isomorphicDecode(headerValue); + break; + } + default: { + collectASequenceOfBytes( + (char) => char !== 10 && char !== 13, + input, + position + ); } } - return headers; + if (input[position.position] !== 13 && input[position.position + 1] !== 10) { + return "failure"; + } else { + position.position += 2; + } } - }; - function processHeaderValue(key, val, skipAppend) { - if (val && typeof val === "object") { - throw new InvalidArgumentError(`invalid ${key} header`); + } + function parseMultipartFormDataName(input, position) { + assert3(input[position.position - 1] === 34); + let name = collectASequenceOfBytes( + (char) => char !== 10 && char !== 13 && char !== 34, + input, + position + ); + if (input[position.position] !== 34) { + return null; + } else { + position.position++; } - val = val != null ? `${val}` : ""; - if (headerCharRegex.exec(val) !== null) { - throw new InvalidArgumentError(`invalid ${key} header`); + name = new TextDecoder().decode(name).replace(/%0A/ig, "\n").replace(/%0D/ig, "\r").replace(/%22/g, '"'); + return name; + } + function collectASequenceOfBytes(condition, input, position) { + let start = position.position; + while (start < input.length && condition(input[start])) { + ++start; } - return skipAppend ? val : `${key}: ${val}\r -`; + return input.subarray(position.position, position.position = start); } - function processHeader(request, key, val, skipAppend = false) { - if (val && (typeof val === "object" && !Array.isArray(val))) { - throw new InvalidArgumentError(`invalid ${key} header`); - } else if (val === void 0) { - return; + function removeChars(buf, leading, trailing, predicate) { + let lead = 0; + let trail = buf.length - 1; + if (leading) { + while (lead < buf.length && predicate(buf[lead])) + lead++; } - let headerName = headerNameLowerCasedRecord[key]; - if (headerName === void 0) { - headerName = key.toLowerCase(); - if (headerNameLowerCasedRecord[headerName] === void 0 && !util.isValidHTTPToken(headerName)) { - throw new InvalidArgumentError("invalid header key"); - } + if (trailing) { + while (trail > 0 && predicate(buf[trail])) + trail--; } - if (request.host === null && headerName === "host") { - if (headerCharRegex.exec(val) !== null) { - throw new InvalidArgumentError(`invalid ${key} header`); - } - request.host = val; - } else if (request.contentLength === null && headerName === "content-length") { - request.contentLength = parseInt(val, 10); - if (!Number.isFinite(request.contentLength)) { - throw new InvalidArgumentError("invalid content-length header"); - } - } else if (request.contentType === null && headerName === "content-type") { - request.contentType = val; - if (skipAppend) - request.headers[key] = processHeaderValue(key, val, skipAppend); - else - request.headers += processHeaderValue(key, val); - } else if (headerName === "transfer-encoding" || headerName === "keep-alive" || headerName === "upgrade") { - throw new InvalidArgumentError(`invalid ${headerName} header`); - } else if (headerName === "connection") { - const value = typeof val === "string" ? val.toLowerCase() : null; - if (value !== "close" && value !== "keep-alive") { - throw new InvalidArgumentError("invalid connection header"); - } else if (value === "close") { - request.reset = true; + return lead === 0 && trail === buf.length - 1 ? buf : buf.subarray(lead, trail + 1); + } + function bufferStartsWith(buffer, start, position) { + if (buffer.length < start.length) { + return false; + } + for (let i = 0; i < start.length; i++) { + if (start[i] !== buffer[position.position + i]) { + return false; } - } else if (headerName === "expect") { - throw new NotSupportedError("expect header not supported"); - } else if (Array.isArray(val)) { - for (let i = 0; i < val.length; i++) { - if (skipAppend) { - if (request.headers[key]) { - request.headers[key] += `,${processHeaderValue(key, val[i], skipAppend)}`; + } + return true; + } + module2.exports = { + multipartFormDataParser, + validateBoundary + }; + } +}); + +// .yarn/cache/undici-npm-6.13.0-1545cd855e-b1b0456e7d.zip/node_modules/undici/lib/web/fetch/body.js +var require_body = __commonJS({ + ".yarn/cache/undici-npm-6.13.0-1545cd855e-b1b0456e7d.zip/node_modules/undici/lib/web/fetch/body.js"(exports, module2) { + "use strict"; + var util = require_util(); + var { + ReadableStreamFrom, + isBlobLike, + isReadableStreamLike, + readableStreamClose, + createDeferredPromise, + fullyReadBody, + extractMimeType, + utf8DecodeBytes + } = require_util3(); + var { FormData } = require_formdata(); + var { kState } = require_symbols2(); + var { webidl } = require_webidl(); + var { Blob: Blob2 } = require("node:buffer"); + var assert3 = require("node:assert"); + var { isErrored } = require_util(); + var { isArrayBuffer } = require("node:util/types"); + var { serializeAMimeType } = require_data_url(); + var { multipartFormDataParser } = require_formdata_parser(); + var textEncoder = new TextEncoder(); + function extractBody(object, keepalive = false) { + let stream = null; + if (object instanceof ReadableStream) { + stream = object; + } else if (isBlobLike(object)) { + stream = object.stream(); + } else { + stream = new ReadableStream({ + async pull(controller) { + const buffer = typeof source === "string" ? textEncoder.encode(source) : source; + if (buffer.byteLength) { + controller.enqueue(buffer); + } + queueMicrotask(() => readableStreamClose(controller)); + }, + start() { + }, + type: "bytes" + }); + } + assert3(isReadableStreamLike(stream)); + let action = null; + let source = null; + let length = null; + let type = null; + if (typeof object === "string") { + source = object; + type = "text/plain;charset=UTF-8"; + } else if (object instanceof URLSearchParams) { + source = object.toString(); + type = "application/x-www-form-urlencoded;charset=UTF-8"; + } else if (isArrayBuffer(object)) { + source = new Uint8Array(object.slice()); + } else if (ArrayBuffer.isView(object)) { + source = new Uint8Array(object.buffer.slice(object.byteOffset, object.byteOffset + object.byteLength)); + } else if (util.isFormDataLike(object)) { + const boundary = `----formdata-undici-0${`${Math.floor(Math.random() * 1e11)}`.padStart(11, "0")}`; + const prefix = `--${boundary}\r +Content-Disposition: form-data`; + const escape = (str) => str.replace(/\n/g, "%0A").replace(/\r/g, "%0D").replace(/"/g, "%22"); + const normalizeLinefeeds = (value) => value.replace(/\r?\n|\r/g, "\r\n"); + const blobParts = []; + const rn = new Uint8Array([13, 10]); + length = 0; + let hasUnknownSizeValue = false; + for (const [name, value] of object) { + if (typeof value === "string") { + const chunk2 = textEncoder.encode(prefix + `; name="${escape(normalizeLinefeeds(name))}"\r +\r +${normalizeLinefeeds(value)}\r +`); + blobParts.push(chunk2); + length += chunk2.byteLength; + } else { + const chunk2 = textEncoder.encode(`${prefix}; name="${escape(normalizeLinefeeds(name))}"` + (value.name ? `; filename="${escape(value.name)}"` : "") + `\r +Content-Type: ${value.type || "application/octet-stream"}\r +\r +`); + blobParts.push(chunk2, value, rn); + if (typeof value.size === "number") { + length += chunk2.byteLength + value.size + rn.byteLength; } else { - request.headers[key] = processHeaderValue(key, val[i], skipAppend); + hasUnknownSizeValue = true; } - } else { - request.headers += processHeaderValue(key, val[i]); } } - } else if (skipAppend) { - request.headers[key] = processHeaderValue(key, val, skipAppend); - } else { - request.headers += processHeaderValue(key, val); - } - } - module2.exports = Request; - } -}); - -// .yarn/cache/undici-npm-6.6.2-a0bd6785a6-c8c8a43605.zip/node_modules/undici/lib/core/connect.js -var require_connect = __commonJS({ - ".yarn/cache/undici-npm-6.6.2-a0bd6785a6-c8c8a43605.zip/node_modules/undici/lib/core/connect.js"(exports, module2) { - "use strict"; - var net = require("node:net"); - var assert3 = require("node:assert"); - var util = require_util(); - var { InvalidArgumentError, ConnectTimeoutError } = require_errors(); - var tls; - var SessionCache; - if (global.FinalizationRegistry && !(process.env.NODE_V8_COVERAGE || process.env.UNDICI_NO_FG)) { - SessionCache = class WeakSessionCache { - constructor(maxCachedSessions) { - this._maxCachedSessions = maxCachedSessions; - this._sessionCache = /* @__PURE__ */ new Map(); - this._sessionRegistry = new global.FinalizationRegistry((key) => { - if (this._sessionCache.size < this._maxCachedSessions) { - return; - } - const ref = this._sessionCache.get(key); - if (ref !== void 0 && ref.deref() === void 0) { - this._sessionCache.delete(key); - } - }); - } - get(sessionKey) { - const ref = this._sessionCache.get(sessionKey); - return ref ? ref.deref() : null; + const chunk = textEncoder.encode(`--${boundary}--`); + blobParts.push(chunk); + length += chunk.byteLength; + if (hasUnknownSizeValue) { + length = null; } - set(sessionKey, session) { - if (this._maxCachedSessions === 0) { - return; + source = object; + action = async function* () { + for (const part of blobParts) { + if (part.stream) { + yield* part.stream(); + } else { + yield part; + } } - this._sessionCache.set(sessionKey, new WeakRef(session)); - this._sessionRegistry.register(session, sessionKey); - } - }; - } else { - SessionCache = class SimpleSessionCache { - constructor(maxCachedSessions) { - this._maxCachedSessions = maxCachedSessions; - this._sessionCache = /* @__PURE__ */ new Map(); + }; + type = `multipart/form-data; boundary=${boundary}`; + } else if (isBlobLike(object)) { + source = object; + length = object.size; + if (object.type) { + type = object.type; } - get(sessionKey) { - return this._sessionCache.get(sessionKey); + } else if (typeof object[Symbol.asyncIterator] === "function") { + if (keepalive) { + throw new TypeError("keepalive"); } - set(sessionKey, session) { - if (this._maxCachedSessions === 0) { - return; - } - if (this._sessionCache.size >= this._maxCachedSessions) { - const { value: oldestKey } = this._sessionCache.keys().next(); - this._sessionCache.delete(oldestKey); - } - this._sessionCache.set(sessionKey, session); + if (util.isDisturbed(object) || object.locked) { + throw new TypeError( + "Response body object should not be disturbed or locked" + ); } + stream = object instanceof ReadableStream ? object : ReadableStreamFrom(object); + } + if (typeof source === "string" || util.isBuffer(source)) { + length = Buffer.byteLength(source); + } + if (action != null) { + let iterator; + stream = new ReadableStream({ + async start() { + iterator = action(object)[Symbol.asyncIterator](); + }, + async pull(controller) { + const { value, done } = await iterator.next(); + if (done) { + queueMicrotask(() => { + controller.close(); + controller.byobRequest?.respond(0); + }); + } else { + if (!isErrored(stream)) { + const buffer = new Uint8Array(value); + if (buffer.byteLength) { + controller.enqueue(buffer); + } + } + } + return controller.desiredSize > 0; + }, + async cancel(reason) { + await iterator.return(); + }, + type: "bytes" + }); + } + const body = { stream, source, length }; + return [body, type]; + } + function safelyExtractBody(object, keepalive = false) { + if (object instanceof ReadableStream) { + assert3(!util.isDisturbed(object), "The body has already been consumed."); + assert3(!object.locked, "The stream is locked."); + } + return extractBody(object, keepalive); + } + function cloneBody(body) { + const [out1, out2] = body.stream.tee(); + body.stream = out1; + return { + stream: out2, + length: body.length, + source: body.source }; } - function buildConnector({ allowH2, maxCachedSessions, socketPath, timeout, ...opts }) { - if (maxCachedSessions != null && (!Number.isInteger(maxCachedSessions) || maxCachedSessions < 0)) { - throw new InvalidArgumentError("maxCachedSessions must be a positive integer or zero"); + function throwIfAborted(state) { + if (state.aborted) { + throw new DOMException("The operation was aborted.", "AbortError"); } - const options = { path: socketPath, ...opts }; - const sessionCache = new SessionCache(maxCachedSessions == null ? 100 : maxCachedSessions); - timeout = timeout == null ? 1e4 : timeout; - allowH2 = allowH2 != null ? allowH2 : false; - return function connect({ hostname, host, protocol, port, servername, localAddress, httpSocket }, callback) { - let socket; - if (protocol === "https:") { - if (!tls) { - tls = require("node:tls"); - } - servername = servername || options.servername || util.getServerName(host) || null; - const sessionKey = servername || hostname; - const session = sessionCache.get(sessionKey) || null; - assert3(sessionKey); - socket = tls.connect({ - highWaterMark: 16384, - // TLS in node can't have bigger HWM anyway... - ...options, - servername, - session, - localAddress, - // TODO(HTTP/2): Add support for h2c - ALPNProtocols: allowH2 ? ["http/1.1", "h2"] : ["http/1.1"], - socket: httpSocket, - // upgrade socket connection - port: port || 443, - host: hostname - }); - socket.on("session", function(session2) { - sessionCache.set(sessionKey, session2); - }); - } else { - assert3(!httpSocket, "httpSocket can only be sent on TLS update"); - socket = net.connect({ - highWaterMark: 64 * 1024, - // Same as nodejs fs streams. - ...options, - localAddress, - port: port || 80, - host: hostname - }); - } - if (options.keepAlive == null || options.keepAlive) { - const keepAliveInitialDelay = options.keepAliveInitialDelay === void 0 ? 6e4 : options.keepAliveInitialDelay; - socket.setKeepAlive(true, keepAliveInitialDelay); + } + function bodyMixinMethods(instance) { + const methods = { + blob() { + return consumeBody(this, (bytes) => { + let mimeType = bodyMimeType(this); + if (mimeType === null) { + mimeType = ""; + } else if (mimeType) { + mimeType = serializeAMimeType(mimeType); + } + return new Blob2([bytes], { type: mimeType }); + }, instance); + }, + arrayBuffer() { + return consumeBody(this, (bytes) => { + return new Uint8Array(bytes).buffer; + }, instance); + }, + text() { + return consumeBody(this, utf8DecodeBytes, instance); + }, + json() { + return consumeBody(this, parseJSONFromBytes, instance); + }, + formData() { + return consumeBody(this, (value) => { + const mimeType = bodyMimeType(this); + if (mimeType !== null) { + switch (mimeType.essence) { + case "multipart/form-data": { + const parsed = multipartFormDataParser(value, mimeType); + if (parsed === "failure") { + throw new TypeError("Failed to parse body as FormData."); + } + const fd = new FormData(); + fd[kState] = parsed; + return fd; + } + case "application/x-www-form-urlencoded": { + const entries = new URLSearchParams(value.toString()); + const fd = new FormData(); + for (const [name, value2] of entries) { + fd.append(name, value2); + } + return fd; + } + } + } + throw new TypeError( + 'Content-Type was not one of "multipart/form-data" or "application/x-www-form-urlencoded".' + ); + }, instance); } - const cancelTimeout = setupTimeout(() => onConnectTimeout(socket), timeout); - socket.setNoDelay(true).once(protocol === "https:" ? "secureConnect" : "connect", function() { - cancelTimeout(); - if (callback) { - const cb = callback; - callback = null; - cb(null, this); - } - }).on("error", function(err) { - cancelTimeout(); - if (callback) { - const cb = callback; - callback = null; - cb(err); - } - }); - return socket; }; + return methods; } - function setupTimeout(onConnectTimeout2, timeout) { - if (!timeout) { - return () => { - }; + function mixinBody(prototype) { + Object.assign(prototype.prototype, bodyMixinMethods(prototype)); + } + async function consumeBody(object, convertBytesToJSValue, instance) { + webidl.brandCheck(object, instance); + if (bodyUnusable(object[kState].body)) { + throw new TypeError("Body is unusable"); } - let s1 = null; - let s2 = null; - const timeoutId = setTimeout(() => { - s1 = setImmediate(() => { - if (process.platform === "win32") { - s2 = setImmediate(() => onConnectTimeout2()); - } else { - onConnectTimeout2(); - } - }); - }, timeout); - return () => { - clearTimeout(timeoutId); - clearImmediate(s1); - clearImmediate(s2); + throwIfAborted(object[kState]); + const promise = createDeferredPromise(); + const errorSteps = (error) => promise.reject(error); + const successSteps = (data) => { + try { + promise.resolve(convertBytesToJSValue(data)); + } catch (e) { + errorSteps(e); + } }; - } - function onConnectTimeout(socket) { - let message = "Connect Timeout Error"; - if (Array.isArray(socket.autoSelectFamilyAttemptedAddresses)) { - message = +` (attempted addresses: ${socket.autoSelectFamilyAttemptedAddresses.join(", ")})`; + if (object[kState].body == null) { + successSteps(new Uint8Array()); + return promise.promise; } - util.destroy(socket, new ConnectTimeoutError(message)); + await fullyReadBody(object[kState].body, successSteps, errorSteps); + return promise.promise; } - module2.exports = buildConnector; - } -}); - -// .yarn/cache/undici-npm-6.6.2-a0bd6785a6-c8c8a43605.zip/node_modules/undici/lib/llhttp/utils.js -var require_utils = __commonJS({ - ".yarn/cache/undici-npm-6.6.2-a0bd6785a6-c8c8a43605.zip/node_modules/undici/lib/llhttp/utils.js"(exports) { - "use strict"; - Object.defineProperty(exports, "__esModule", { value: true }); - exports.enumToMap = void 0; - function enumToMap(obj) { - const res = {}; - Object.keys(obj).forEach((key) => { - const value = obj[key]; - if (typeof value === "number") { - res[key] = value; - } - }); - return res; + function bodyUnusable(body) { + return body != null && (body.stream.locked || util.isDisturbed(body.stream)); } - exports.enumToMap = enumToMap; - } -}); - -// .yarn/cache/undici-npm-6.6.2-a0bd6785a6-c8c8a43605.zip/node_modules/undici/lib/llhttp/constants.js -var require_constants4 = __commonJS({ - ".yarn/cache/undici-npm-6.6.2-a0bd6785a6-c8c8a43605.zip/node_modules/undici/lib/llhttp/constants.js"(exports) { - "use strict"; - Object.defineProperty(exports, "__esModule", { value: true }); - exports.SPECIAL_HEADERS = exports.HEADER_STATE = exports.MINOR = exports.MAJOR = exports.CONNECTION_TOKEN_CHARS = exports.HEADER_CHARS = exports.TOKEN = exports.STRICT_TOKEN = exports.HEX = exports.URL_CHAR = exports.STRICT_URL_CHAR = exports.USERINFO_CHARS = exports.MARK = exports.ALPHANUM = exports.NUM = exports.HEX_MAP = exports.NUM_MAP = exports.ALPHA = exports.FINISH = exports.H_METHOD_MAP = exports.METHOD_MAP = exports.METHODS_RTSP = exports.METHODS_ICE = exports.METHODS_HTTP = exports.METHODS = exports.LENIENT_FLAGS = exports.FLAGS = exports.TYPE = exports.ERROR = void 0; - var utils_1 = require_utils(); - var ERROR; - (function(ERROR2) { - ERROR2[ERROR2["OK"] = 0] = "OK"; - ERROR2[ERROR2["INTERNAL"] = 1] = "INTERNAL"; - ERROR2[ERROR2["STRICT"] = 2] = "STRICT"; - ERROR2[ERROR2["LF_EXPECTED"] = 3] = "LF_EXPECTED"; - ERROR2[ERROR2["UNEXPECTED_CONTENT_LENGTH"] = 4] = "UNEXPECTED_CONTENT_LENGTH"; - ERROR2[ERROR2["CLOSED_CONNECTION"] = 5] = "CLOSED_CONNECTION"; - ERROR2[ERROR2["INVALID_METHOD"] = 6] = "INVALID_METHOD"; - ERROR2[ERROR2["INVALID_URL"] = 7] = "INVALID_URL"; - ERROR2[ERROR2["INVALID_CONSTANT"] = 8] = "INVALID_CONSTANT"; - ERROR2[ERROR2["INVALID_VERSION"] = 9] = "INVALID_VERSION"; - ERROR2[ERROR2["INVALID_HEADER_TOKEN"] = 10] = "INVALID_HEADER_TOKEN"; - ERROR2[ERROR2["INVALID_CONTENT_LENGTH"] = 11] = "INVALID_CONTENT_LENGTH"; - ERROR2[ERROR2["INVALID_CHUNK_SIZE"] = 12] = "INVALID_CHUNK_SIZE"; - ERROR2[ERROR2["INVALID_STATUS"] = 13] = "INVALID_STATUS"; - ERROR2[ERROR2["INVALID_EOF_STATE"] = 14] = "INVALID_EOF_STATE"; - ERROR2[ERROR2["INVALID_TRANSFER_ENCODING"] = 15] = "INVALID_TRANSFER_ENCODING"; - ERROR2[ERROR2["CB_MESSAGE_BEGIN"] = 16] = "CB_MESSAGE_BEGIN"; - ERROR2[ERROR2["CB_HEADERS_COMPLETE"] = 17] = "CB_HEADERS_COMPLETE"; - ERROR2[ERROR2["CB_MESSAGE_COMPLETE"] = 18] = "CB_MESSAGE_COMPLETE"; - ERROR2[ERROR2["CB_CHUNK_HEADER"] = 19] = "CB_CHUNK_HEADER"; - ERROR2[ERROR2["CB_CHUNK_COMPLETE"] = 20] = "CB_CHUNK_COMPLETE"; - ERROR2[ERROR2["PAUSED"] = 21] = "PAUSED"; - ERROR2[ERROR2["PAUSED_UPGRADE"] = 22] = "PAUSED_UPGRADE"; - ERROR2[ERROR2["PAUSED_H2_UPGRADE"] = 23] = "PAUSED_H2_UPGRADE"; - ERROR2[ERROR2["USER"] = 24] = "USER"; - })(ERROR = exports.ERROR || (exports.ERROR = {})); - var TYPE; - (function(TYPE2) { - TYPE2[TYPE2["BOTH"] = 0] = "BOTH"; - TYPE2[TYPE2["REQUEST"] = 1] = "REQUEST"; - TYPE2[TYPE2["RESPONSE"] = 2] = "RESPONSE"; - })(TYPE = exports.TYPE || (exports.TYPE = {})); - var FLAGS; - (function(FLAGS2) { - FLAGS2[FLAGS2["CONNECTION_KEEP_ALIVE"] = 1] = "CONNECTION_KEEP_ALIVE"; - FLAGS2[FLAGS2["CONNECTION_CLOSE"] = 2] = "CONNECTION_CLOSE"; - FLAGS2[FLAGS2["CONNECTION_UPGRADE"] = 4] = "CONNECTION_UPGRADE"; - FLAGS2[FLAGS2["CHUNKED"] = 8] = "CHUNKED"; - FLAGS2[FLAGS2["UPGRADE"] = 16] = "UPGRADE"; - FLAGS2[FLAGS2["CONTENT_LENGTH"] = 32] = "CONTENT_LENGTH"; - FLAGS2[FLAGS2["SKIPBODY"] = 64] = "SKIPBODY"; - FLAGS2[FLAGS2["TRAILING"] = 128] = "TRAILING"; - FLAGS2[FLAGS2["TRANSFER_ENCODING"] = 512] = "TRANSFER_ENCODING"; - })(FLAGS = exports.FLAGS || (exports.FLAGS = {})); - var LENIENT_FLAGS; - (function(LENIENT_FLAGS2) { - LENIENT_FLAGS2[LENIENT_FLAGS2["HEADERS"] = 1] = "HEADERS"; - LENIENT_FLAGS2[LENIENT_FLAGS2["CHUNKED_LENGTH"] = 2] = "CHUNKED_LENGTH"; - LENIENT_FLAGS2[LENIENT_FLAGS2["KEEP_ALIVE"] = 4] = "KEEP_ALIVE"; - })(LENIENT_FLAGS = exports.LENIENT_FLAGS || (exports.LENIENT_FLAGS = {})); - var METHODS; - (function(METHODS2) { - METHODS2[METHODS2["DELETE"] = 0] = "DELETE"; - METHODS2[METHODS2["GET"] = 1] = "GET"; - METHODS2[METHODS2["HEAD"] = 2] = "HEAD"; - METHODS2[METHODS2["POST"] = 3] = "POST"; - METHODS2[METHODS2["PUT"] = 4] = "PUT"; - METHODS2[METHODS2["CONNECT"] = 5] = "CONNECT"; - METHODS2[METHODS2["OPTIONS"] = 6] = "OPTIONS"; - METHODS2[METHODS2["TRACE"] = 7] = "TRACE"; - METHODS2[METHODS2["COPY"] = 8] = "COPY"; - METHODS2[METHODS2["LOCK"] = 9] = "LOCK"; - METHODS2[METHODS2["MKCOL"] = 10] = "MKCOL"; - METHODS2[METHODS2["MOVE"] = 11] = "MOVE"; - METHODS2[METHODS2["PROPFIND"] = 12] = "PROPFIND"; - METHODS2[METHODS2["PROPPATCH"] = 13] = "PROPPATCH"; - METHODS2[METHODS2["SEARCH"] = 14] = "SEARCH"; - METHODS2[METHODS2["UNLOCK"] = 15] = "UNLOCK"; - METHODS2[METHODS2["BIND"] = 16] = "BIND"; - METHODS2[METHODS2["REBIND"] = 17] = "REBIND"; - METHODS2[METHODS2["UNBIND"] = 18] = "UNBIND"; - METHODS2[METHODS2["ACL"] = 19] = "ACL"; - METHODS2[METHODS2["REPORT"] = 20] = "REPORT"; - METHODS2[METHODS2["MKACTIVITY"] = 21] = "MKACTIVITY"; - METHODS2[METHODS2["CHECKOUT"] = 22] = "CHECKOUT"; - METHODS2[METHODS2["MERGE"] = 23] = "MERGE"; - METHODS2[METHODS2["M-SEARCH"] = 24] = "M-SEARCH"; - METHODS2[METHODS2["NOTIFY"] = 25] = "NOTIFY"; - METHODS2[METHODS2["SUBSCRIBE"] = 26] = "SUBSCRIBE"; - METHODS2[METHODS2["UNSUBSCRIBE"] = 27] = "UNSUBSCRIBE"; - METHODS2[METHODS2["PATCH"] = 28] = "PATCH"; - METHODS2[METHODS2["PURGE"] = 29] = "PURGE"; - METHODS2[METHODS2["MKCALENDAR"] = 30] = "MKCALENDAR"; - METHODS2[METHODS2["LINK"] = 31] = "LINK"; - METHODS2[METHODS2["UNLINK"] = 32] = "UNLINK"; - METHODS2[METHODS2["SOURCE"] = 33] = "SOURCE"; - METHODS2[METHODS2["PRI"] = 34] = "PRI"; - METHODS2[METHODS2["DESCRIBE"] = 35] = "DESCRIBE"; - METHODS2[METHODS2["ANNOUNCE"] = 36] = "ANNOUNCE"; - METHODS2[METHODS2["SETUP"] = 37] = "SETUP"; - METHODS2[METHODS2["PLAY"] = 38] = "PLAY"; - METHODS2[METHODS2["PAUSE"] = 39] = "PAUSE"; - METHODS2[METHODS2["TEARDOWN"] = 40] = "TEARDOWN"; - METHODS2[METHODS2["GET_PARAMETER"] = 41] = "GET_PARAMETER"; - METHODS2[METHODS2["SET_PARAMETER"] = 42] = "SET_PARAMETER"; - METHODS2[METHODS2["REDIRECT"] = 43] = "REDIRECT"; - METHODS2[METHODS2["RECORD"] = 44] = "RECORD"; - METHODS2[METHODS2["FLUSH"] = 45] = "FLUSH"; - })(METHODS = exports.METHODS || (exports.METHODS = {})); - exports.METHODS_HTTP = [ - METHODS.DELETE, - METHODS.GET, - METHODS.HEAD, - METHODS.POST, - METHODS.PUT, - METHODS.CONNECT, - METHODS.OPTIONS, - METHODS.TRACE, - METHODS.COPY, - METHODS.LOCK, - METHODS.MKCOL, - METHODS.MOVE, - METHODS.PROPFIND, - METHODS.PROPPATCH, - METHODS.SEARCH, - METHODS.UNLOCK, - METHODS.BIND, - METHODS.REBIND, - METHODS.UNBIND, - METHODS.ACL, - METHODS.REPORT, - METHODS.MKACTIVITY, - METHODS.CHECKOUT, - METHODS.MERGE, - METHODS["M-SEARCH"], - METHODS.NOTIFY, - METHODS.SUBSCRIBE, - METHODS.UNSUBSCRIBE, - METHODS.PATCH, - METHODS.PURGE, - METHODS.MKCALENDAR, - METHODS.LINK, - METHODS.UNLINK, - METHODS.PRI, - // TODO(indutny): should we allow it with HTTP? - METHODS.SOURCE - ]; - exports.METHODS_ICE = [ - METHODS.SOURCE - ]; - exports.METHODS_RTSP = [ - METHODS.OPTIONS, - METHODS.DESCRIBE, - METHODS.ANNOUNCE, - METHODS.SETUP, - METHODS.PLAY, - METHODS.PAUSE, - METHODS.TEARDOWN, - METHODS.GET_PARAMETER, - METHODS.SET_PARAMETER, - METHODS.REDIRECT, - METHODS.RECORD, - METHODS.FLUSH, - // For AirPlay - METHODS.GET, - METHODS.POST - ]; - exports.METHOD_MAP = utils_1.enumToMap(METHODS); - exports.H_METHOD_MAP = {}; - Object.keys(exports.METHOD_MAP).forEach((key) => { - if (/^H/.test(key)) { - exports.H_METHOD_MAP[key] = exports.METHOD_MAP[key]; + function parseJSONFromBytes(bytes) { + return JSON.parse(utf8DecodeBytes(bytes)); + } + function bodyMimeType(requestOrResponse) { + const headers = requestOrResponse[kState].headersList; + const mimeType = extractMimeType(headers); + if (mimeType === "failure") { + return null; } - }); - var FINISH; - (function(FINISH2) { - FINISH2[FINISH2["SAFE"] = 0] = "SAFE"; - FINISH2[FINISH2["SAFE_WITH_CB"] = 1] = "SAFE_WITH_CB"; - FINISH2[FINISH2["UNSAFE"] = 2] = "UNSAFE"; - })(FINISH = exports.FINISH || (exports.FINISH = {})); - exports.ALPHA = []; - for (let i = "A".charCodeAt(0); i <= "Z".charCodeAt(0); i++) { - exports.ALPHA.push(String.fromCharCode(i)); - exports.ALPHA.push(String.fromCharCode(i + 32)); + return mimeType; } - exports.NUM_MAP = { - 0: 0, - 1: 1, - 2: 2, - 3: 3, - 4: 4, - 5: 5, - 6: 6, - 7: 7, - 8: 8, - 9: 9 - }; - exports.HEX_MAP = { - 0: 0, - 1: 1, - 2: 2, - 3: 3, - 4: 4, - 5: 5, - 6: 6, - 7: 7, - 8: 8, - 9: 9, - A: 10, - B: 11, - C: 12, - D: 13, - E: 14, - F: 15, - a: 10, - b: 11, - c: 12, - d: 13, - e: 14, - f: 15 + module2.exports = { + extractBody, + safelyExtractBody, + cloneBody, + mixinBody }; - exports.NUM = [ - "0", - "1", - "2", - "3", - "4", - "5", - "6", - "7", - "8", - "9" - ]; - exports.ALPHANUM = exports.ALPHA.concat(exports.NUM); - exports.MARK = ["-", "_", ".", "!", "~", "*", "'", "(", ")"]; - exports.USERINFO_CHARS = exports.ALPHANUM.concat(exports.MARK).concat(["%", ";", ":", "&", "=", "+", "$", ","]); - exports.STRICT_URL_CHAR = [ - "!", - '"', - "$", - "%", - "&", - "'", - "(", - ")", - "*", - "+", - ",", - "-", - ".", - "/", - ":", - ";", - "<", - "=", - ">", - "@", - "[", - "\\", - "]", - "^", - "_", - "`", - "{", - "|", - "}", - "~" - ].concat(exports.ALPHANUM); - exports.URL_CHAR = exports.STRICT_URL_CHAR.concat([" ", "\f"]); - for (let i = 128; i <= 255; i++) { - exports.URL_CHAR.push(i); + } +}); + +// .yarn/cache/undici-npm-6.13.0-1545cd855e-b1b0456e7d.zip/node_modules/undici/lib/dispatcher/client-h1.js +var require_client_h1 = __commonJS({ + ".yarn/cache/undici-npm-6.13.0-1545cd855e-b1b0456e7d.zip/node_modules/undici/lib/dispatcher/client-h1.js"(exports, module2) { + "use strict"; + var assert3 = require("node:assert"); + var util = require_util(); + var { channels } = require_diagnostics(); + var timers = require_timers(); + var { + RequestContentLengthMismatchError, + ResponseContentLengthMismatchError, + RequestAbortedError, + HeadersTimeoutError, + HeadersOverflowError, + SocketError, + InformationalError, + BodyTimeoutError, + HTTPParserError, + ResponseExceededMaxSizeError + } = require_errors(); + var { + kUrl, + kReset, + kClient, + kParser, + kBlocking, + kRunning, + kPending, + kSize, + kWriting, + kQueue, + kNoRef, + kKeepAliveDefaultTimeout, + kHostHeader, + kPendingIdx, + kRunningIdx, + kError, + kPipelining, + kSocket, + kKeepAliveTimeoutValue, + kMaxHeadersSize, + kKeepAliveMaxTimeout, + kKeepAliveTimeoutThreshold, + kHeadersTimeout, + kBodyTimeout, + kStrictContentLength, + kMaxRequests, + kCounter, + kMaxResponseSize, + kOnError, + kResume, + kHTTPContext + } = require_symbols(); + var constants = require_constants3(); + var EMPTY_BUF = Buffer.alloc(0); + var FastBuffer = Buffer[Symbol.species]; + var addListener = util.addListener; + var removeAllListeners = util.removeAllListeners; + var extractBody; + async function lazyllhttp() { + const llhttpWasmData = process.env.JEST_WORKER_ID ? require_llhttp_wasm() : void 0; + let mod; + try { + mod = await WebAssembly.compile(require_llhttp_simd_wasm()); + } catch (e) { + mod = await WebAssembly.compile(llhttpWasmData || require_llhttp_wasm()); + } + return await WebAssembly.instantiate(mod, { + env: { + /* eslint-disable camelcase */ + wasm_on_url: (p, at, len) => { + return 0; + }, + wasm_on_status: (p, at, len) => { + assert3.strictEqual(currentParser.ptr, p); + const start = at - currentBufferPtr + currentBufferRef.byteOffset; + return currentParser.onStatus(new FastBuffer(currentBufferRef.buffer, start, len)) || 0; + }, + wasm_on_message_begin: (p) => { + assert3.strictEqual(currentParser.ptr, p); + return currentParser.onMessageBegin() || 0; + }, + wasm_on_header_field: (p, at, len) => { + assert3.strictEqual(currentParser.ptr, p); + const start = at - currentBufferPtr + currentBufferRef.byteOffset; + return currentParser.onHeaderField(new FastBuffer(currentBufferRef.buffer, start, len)) || 0; + }, + wasm_on_header_value: (p, at, len) => { + assert3.strictEqual(currentParser.ptr, p); + const start = at - currentBufferPtr + currentBufferRef.byteOffset; + return currentParser.onHeaderValue(new FastBuffer(currentBufferRef.buffer, start, len)) || 0; + }, + wasm_on_headers_complete: (p, statusCode, upgrade, shouldKeepAlive) => { + assert3.strictEqual(currentParser.ptr, p); + return currentParser.onHeadersComplete(statusCode, Boolean(upgrade), Boolean(shouldKeepAlive)) || 0; + }, + wasm_on_body: (p, at, len) => { + assert3.strictEqual(currentParser.ptr, p); + const start = at - currentBufferPtr + currentBufferRef.byteOffset; + return currentParser.onBody(new FastBuffer(currentBufferRef.buffer, start, len)) || 0; + }, + wasm_on_message_complete: (p) => { + assert3.strictEqual(currentParser.ptr, p); + return currentParser.onMessageComplete() || 0; + } + /* eslint-enable camelcase */ + } + }); } - exports.HEX = exports.NUM.concat(["a", "b", "c", "d", "e", "f", "A", "B", "C", "D", "E", "F"]); - exports.STRICT_TOKEN = [ - "!", - "#", - "$", - "%", - "&", - "'", - "*", - "+", - "-", - ".", - "^", - "_", - "`", - "|", - "~" - ].concat(exports.ALPHANUM); - exports.TOKEN = exports.STRICT_TOKEN.concat([" "]); - exports.HEADER_CHARS = [" "]; - for (let i = 32; i <= 255; i++) { - if (i !== 127) { - exports.HEADER_CHARS.push(i); + var llhttpInstance = null; + var llhttpPromise = lazyllhttp(); + llhttpPromise.catch(); + var currentParser = null; + var currentBufferRef = null; + var currentBufferSize = 0; + var currentBufferPtr = null; + var TIMEOUT_HEADERS = 1; + var TIMEOUT_BODY = 2; + var TIMEOUT_IDLE = 3; + var Parser = class { + constructor(client, socket, { exports: exports2 }) { + assert3(Number.isFinite(client[kMaxHeadersSize]) && client[kMaxHeadersSize] > 0); + this.llhttp = exports2; + this.ptr = this.llhttp.llhttp_alloc(constants.TYPE.RESPONSE); + this.client = client; + this.socket = socket; + this.timeout = null; + this.timeoutValue = null; + this.timeoutType = null; + this.statusCode = null; + this.statusText = ""; + this.upgrade = false; + this.headers = []; + this.headersSize = 0; + this.headersMaxSize = client[kMaxHeadersSize]; + this.shouldKeepAlive = false; + this.paused = false; + this.resume = this.resume.bind(this); + this.bytesRead = 0; + this.keepAlive = ""; + this.contentLength = ""; + this.connection = ""; + this.maxResponseSize = client[kMaxResponseSize]; + } + setTimeout(value, type) { + this.timeoutType = type; + if (value !== this.timeoutValue) { + timers.clearTimeout(this.timeout); + if (value) { + this.timeout = timers.setTimeout(onParserTimeout, value, this); + if (this.timeout.unref) { + this.timeout.unref(); + } + } else { + this.timeout = null; + } + this.timeoutValue = value; + } else if (this.timeout) { + if (this.timeout.refresh) { + this.timeout.refresh(); + } + } } - } - exports.CONNECTION_TOKEN_CHARS = exports.HEADER_CHARS.filter((c) => c !== 44); - exports.MAJOR = exports.NUM_MAP; - exports.MINOR = exports.MAJOR; - var HEADER_STATE; - (function(HEADER_STATE2) { - HEADER_STATE2[HEADER_STATE2["GENERAL"] = 0] = "GENERAL"; - HEADER_STATE2[HEADER_STATE2["CONNECTION"] = 1] = "CONNECTION"; - HEADER_STATE2[HEADER_STATE2["CONTENT_LENGTH"] = 2] = "CONTENT_LENGTH"; - HEADER_STATE2[HEADER_STATE2["TRANSFER_ENCODING"] = 3] = "TRANSFER_ENCODING"; - HEADER_STATE2[HEADER_STATE2["UPGRADE"] = 4] = "UPGRADE"; - HEADER_STATE2[HEADER_STATE2["CONNECTION_KEEP_ALIVE"] = 5] = "CONNECTION_KEEP_ALIVE"; - HEADER_STATE2[HEADER_STATE2["CONNECTION_CLOSE"] = 6] = "CONNECTION_CLOSE"; - HEADER_STATE2[HEADER_STATE2["CONNECTION_UPGRADE"] = 7] = "CONNECTION_UPGRADE"; - HEADER_STATE2[HEADER_STATE2["TRANSFER_ENCODING_CHUNKED"] = 8] = "TRANSFER_ENCODING_CHUNKED"; - })(HEADER_STATE = exports.HEADER_STATE || (exports.HEADER_STATE = {})); - exports.SPECIAL_HEADERS = { - "connection": HEADER_STATE.CONNECTION, - "content-length": HEADER_STATE.CONTENT_LENGTH, - "proxy-connection": HEADER_STATE.CONNECTION, - "transfer-encoding": HEADER_STATE.TRANSFER_ENCODING, - "upgrade": HEADER_STATE.UPGRADE - }; - } -}); - -// .yarn/cache/undici-npm-6.6.2-a0bd6785a6-c8c8a43605.zip/node_modules/undici/lib/handler/RedirectHandler.js -var require_RedirectHandler = __commonJS({ - ".yarn/cache/undici-npm-6.6.2-a0bd6785a6-c8c8a43605.zip/node_modules/undici/lib/handler/RedirectHandler.js"(exports, module2) { - "use strict"; - var util = require_util(); - var { kBodyUsed } = require_symbols(); - var assert3 = require("node:assert"); - var { InvalidArgumentError } = require_errors(); - var EE = require("node:events"); - var redirectableStatusCodes = [300, 301, 302, 303, 307, 308]; - var kBody = Symbol("body"); - var BodyAsyncIterable = class { - constructor(body) { - this[kBody] = body; - this[kBodyUsed] = false; + resume() { + if (this.socket.destroyed || !this.paused) { + return; + } + assert3(this.ptr != null); + assert3(currentParser == null); + this.llhttp.llhttp_resume(this.ptr); + assert3(this.timeoutType === TIMEOUT_BODY); + if (this.timeout) { + if (this.timeout.refresh) { + this.timeout.refresh(); + } + } + this.paused = false; + this.execute(this.socket.read() || EMPTY_BUF); + this.readMore(); } - async *[Symbol.asyncIterator]() { - assert3(!this[kBodyUsed], "disturbed"); - this[kBodyUsed] = true; - yield* this[kBody]; + readMore() { + while (!this.paused && this.ptr) { + const chunk = this.socket.read(); + if (chunk === null) { + break; + } + this.execute(chunk); + } } - }; - var RedirectHandler = class { - constructor(dispatch, maxRedirections, opts, handler) { - if (maxRedirections != null && (!Number.isInteger(maxRedirections) || maxRedirections < 0)) { - throw new InvalidArgumentError("maxRedirections must be a positive number"); + execute(data) { + assert3(this.ptr != null); + assert3(currentParser == null); + assert3(!this.paused); + const { socket, llhttp } = this; + if (data.length > currentBufferSize) { + if (currentBufferPtr) { + llhttp.free(currentBufferPtr); + } + currentBufferSize = Math.ceil(data.length / 4096) * 4096; + currentBufferPtr = llhttp.malloc(currentBufferSize); } - util.validateHandler(handler, opts.method, opts.upgrade); - this.dispatch = dispatch; - this.location = null; - this.abort = null; - this.opts = { ...opts, maxRedirections: 0 }; - this.maxRedirections = maxRedirections; - this.handler = handler; - this.history = []; - this.redirectionLimitReached = false; - if (util.isStream(this.opts.body)) { - if (util.bodyLength(this.opts.body) === 0) { - this.opts.body.on("data", function() { - assert3(false); - }); + new Uint8Array(llhttp.memory.buffer, currentBufferPtr, currentBufferSize).set(data); + try { + let ret; + try { + currentBufferRef = data; + currentParser = this; + ret = llhttp.llhttp_execute(this.ptr, currentBufferPtr, data.length); + } catch (err) { + throw err; + } finally { + currentParser = null; + currentBufferRef = null; } - if (typeof this.opts.body.readableDidRead !== "boolean") { - this.opts.body[kBodyUsed] = false; - EE.prototype.on.call(this.opts.body, "data", function() { - this[kBodyUsed] = true; - }); + const offset = llhttp.llhttp_get_error_pos(this.ptr) - currentBufferPtr; + if (ret === constants.ERROR.PAUSED_UPGRADE) { + this.onUpgrade(data.slice(offset)); + } else if (ret === constants.ERROR.PAUSED) { + this.paused = true; + socket.unshift(data.slice(offset)); + } else if (ret !== constants.ERROR.OK) { + const ptr = llhttp.llhttp_get_error_reason(this.ptr); + let message = ""; + if (ptr) { + const len = new Uint8Array(llhttp.memory.buffer, ptr).indexOf(0); + message = "Response does not match the HTTP/1.1 protocol (" + Buffer.from(llhttp.memory.buffer, ptr, len).toString() + ")"; + } + throw new HTTPParserError(message, constants.ERROR[ret], data.slice(offset)); } - } else if (this.opts.body && typeof this.opts.body.pipeTo === "function") { - this.opts.body = new BodyAsyncIterable(this.opts.body); - } else if (this.opts.body && typeof this.opts.body !== "string" && !ArrayBuffer.isView(this.opts.body) && util.isIterable(this.opts.body)) { - this.opts.body = new BodyAsyncIterable(this.opts.body); + } catch (err) { + util.destroy(socket, err); } } - onConnect(abort) { - this.abort = abort; - this.handler.onConnect(abort, { history: this.history }); + destroy() { + assert3(this.ptr != null); + assert3(currentParser == null); + this.llhttp.llhttp_free(this.ptr); + this.ptr = null; + timers.clearTimeout(this.timeout); + this.timeout = null; + this.timeoutValue = null; + this.timeoutType = null; + this.paused = false; } - onUpgrade(statusCode, headers, socket) { - this.handler.onUpgrade(statusCode, headers, socket); + onStatus(buf) { + this.statusText = buf.toString(); } - onError(error) { - this.handler.onError(error); + onMessageBegin() { + const { socket, client } = this; + if (socket.destroyed) { + return -1; + } + const request = client[kQueue][client[kRunningIdx]]; + if (!request) { + return -1; + } + request.onResponseStarted(); } - onHeaders(statusCode, headers, resume, statusText) { - this.location = this.history.length >= this.maxRedirections || util.isDisturbed(this.opts.body) ? null : parseLocation(statusCode, headers); - if (this.opts.throwOnMaxRedirect && this.history.length >= this.maxRedirections) { - if (this.request) { - this.request.abort(new Error("max redirects")); - } - this.redirectionLimitReached = true; - this.abort(new Error("max redirects")); - return; + onHeaderField(buf) { + const len = this.headers.length; + if ((len & 1) === 0) { + this.headers.push(buf); + } else { + this.headers[len - 1] = Buffer.concat([this.headers[len - 1], buf]); } - if (this.opts.origin) { - this.history.push(new URL(this.opts.path, this.opts.origin)); + this.trackHeader(buf.length); + } + onHeaderValue(buf) { + let len = this.headers.length; + if ((len & 1) === 1) { + this.headers.push(buf); + len += 1; + } else { + this.headers[len - 1] = Buffer.concat([this.headers[len - 1], buf]); } - if (!this.location) { - return this.handler.onHeaders(statusCode, headers, resume, statusText); + const key = this.headers[len - 2]; + if (key.length === 10) { + const headerName = util.bufferToLowerCasedHeaderName(key); + if (headerName === "keep-alive") { + this.keepAlive += buf.toString(); + } else if (headerName === "connection") { + this.connection += buf.toString(); + } + } else if (key.length === 14 && util.bufferToLowerCasedHeaderName(key) === "content-length") { + this.contentLength += buf.toString(); } - const { origin, pathname, search } = util.parseURL(new URL(this.location, this.opts.origin && new URL(this.opts.path, this.opts.origin))); - const path10 = search ? `${pathname}${search}` : pathname; - this.opts.headers = cleanRequestHeaders(this.opts.headers, statusCode === 303, this.opts.origin !== origin); - this.opts.path = path10; - this.opts.origin = origin; - this.opts.maxRedirections = 0; - this.opts.query = null; - if (statusCode === 303 && this.opts.method !== "HEAD") { - this.opts.method = "GET"; - this.opts.body = null; + this.trackHeader(buf.length); + } + trackHeader(len) { + this.headersSize += len; + if (this.headersSize >= this.headersMaxSize) { + util.destroy(this.socket, new HeadersOverflowError()); + } + } + onUpgrade(head) { + const { upgrade, client, socket, headers, statusCode } = this; + assert3(upgrade); + const request = client[kQueue][client[kRunningIdx]]; + assert3(request); + assert3(!socket.destroyed); + assert3(socket === client[kSocket]); + assert3(!this.paused); + assert3(request.upgrade || request.method === "CONNECT"); + this.statusCode = null; + this.statusText = ""; + this.shouldKeepAlive = null; + assert3(this.headers.length % 2 === 0); + this.headers = []; + this.headersSize = 0; + socket.unshift(head); + socket[kParser].destroy(); + socket[kParser] = null; + socket[kClient] = null; + socket[kError] = null; + removeAllListeners(socket); + client[kSocket] = null; + client[kHTTPContext] = null; + client[kQueue][client[kRunningIdx]++] = null; + client.emit("disconnect", client[kUrl], [client], new InformationalError("upgrade")); + try { + request.onUpgrade(statusCode, headers, socket); + } catch (err) { + util.destroy(socket, err); } + client[kResume](); } - onData(chunk) { - if (this.location) { - } else { - return this.handler.onData(chunk); + onHeadersComplete(statusCode, upgrade, shouldKeepAlive) { + const { client, socket, headers, statusText } = this; + if (socket.destroyed) { + return -1; } - } - onComplete(trailers) { - if (this.location) { - this.location = null; - this.abort = null; - this.dispatch(this.opts, this); - } else { - this.handler.onComplete(trailers); + const request = client[kQueue][client[kRunningIdx]]; + if (!request) { + return -1; } - } - onBodySent(chunk) { - if (this.handler.onBodySent) { - this.handler.onBodySent(chunk); + assert3(!this.upgrade); + assert3(this.statusCode < 200); + if (statusCode === 100) { + util.destroy(socket, new SocketError("bad response", util.getSocketInfo(socket))); + return -1; } - } - }; - function parseLocation(statusCode, headers) { - if (redirectableStatusCodes.indexOf(statusCode) === -1) { - return null; - } - for (let i = 0; i < headers.length; i += 2) { - if (headers[i].length === 8 && util.headerNameToString(headers[i]) === "location") { - return headers[i + 1]; + if (upgrade && !request.upgrade) { + util.destroy(socket, new SocketError("bad upgrade", util.getSocketInfo(socket))); + return -1; } - } - } - function shouldRemoveHeader(header, removeContent, unknownOrigin) { - if (header.length === 4) { - return util.headerNameToString(header) === "host"; - } - if (removeContent && util.headerNameToString(header).startsWith("content-")) { - return true; - } - if (unknownOrigin && (header.length === 13 || header.length === 6)) { - const name = util.headerNameToString(header); - return name === "authorization" || name === "cookie"; - } - return false; - } - function cleanRequestHeaders(headers, removeContent, unknownOrigin) { - const ret = []; - if (Array.isArray(headers)) { - for (let i = 0; i < headers.length; i += 2) { - if (!shouldRemoveHeader(headers[i], removeContent, unknownOrigin)) { - ret.push(headers[i], headers[i + 1]); + assert3.strictEqual(this.timeoutType, TIMEOUT_HEADERS); + this.statusCode = statusCode; + this.shouldKeepAlive = shouldKeepAlive || // Override llhttp value which does not allow keepAlive for HEAD. + request.method === "HEAD" && !socket[kReset] && this.connection.toLowerCase() === "keep-alive"; + if (this.statusCode >= 200) { + const bodyTimeout = request.bodyTimeout != null ? request.bodyTimeout : client[kBodyTimeout]; + this.setTimeout(bodyTimeout, TIMEOUT_BODY); + } else if (this.timeout) { + if (this.timeout.refresh) { + this.timeout.refresh(); } } - } else if (headers && typeof headers === "object") { - for (const key of Object.keys(headers)) { - if (!shouldRemoveHeader(key, removeContent, unknownOrigin)) { - ret.push(key, headers[key]); - } + if (request.method === "CONNECT") { + assert3(client[kRunning] === 1); + this.upgrade = true; + return 2; } - } else { - assert3(headers == null, "headers must be an object or an array"); - } - return ret; - } - module2.exports = RedirectHandler; - } -}); - -// .yarn/cache/undici-npm-6.6.2-a0bd6785a6-c8c8a43605.zip/node_modules/undici/lib/interceptor/redirectInterceptor.js -var require_redirectInterceptor = __commonJS({ - ".yarn/cache/undici-npm-6.6.2-a0bd6785a6-c8c8a43605.zip/node_modules/undici/lib/interceptor/redirectInterceptor.js"(exports, module2) { - "use strict"; - var RedirectHandler = require_RedirectHandler(); - function createRedirectInterceptor({ maxRedirections: defaultMaxRedirections }) { - return (dispatch) => { - return function Intercept(opts, handler) { - const { maxRedirections = defaultMaxRedirections } = opts; - if (!maxRedirections) { - return dispatch(opts, handler); - } - const redirectHandler = new RedirectHandler(dispatch, maxRedirections, opts, handler); - opts = { ...opts, maxRedirections: 0 }; - return dispatch(opts, redirectHandler); - }; - }; - } - module2.exports = createRedirectInterceptor; - } -}); - -// .yarn/cache/undici-npm-6.6.2-a0bd6785a6-c8c8a43605.zip/node_modules/undici/lib/llhttp/llhttp-wasm.js -var require_llhttp_wasm = __commonJS({ - ".yarn/cache/undici-npm-6.6.2-a0bd6785a6-c8c8a43605.zip/node_modules/undici/lib/llhttp/llhttp-wasm.js"(exports, module2) { - var { Buffer: Buffer2 } = require("node:buffer"); - module2.exports = Buffer2.from("AGFzbQEAAAABMAhgAX8Bf2ADf39/AX9gBH9/f38Bf2AAAGADf39/AGABfwBgAn9/AGAGf39/f39/AALLAQgDZW52GHdhc21fb25faGVhZGVyc19jb21wbGV0ZQACA2VudhV3YXNtX29uX21lc3NhZ2VfYmVnaW4AAANlbnYLd2FzbV9vbl91cmwAAQNlbnYOd2FzbV9vbl9zdGF0dXMAAQNlbnYUd2FzbV9vbl9oZWFkZXJfZmllbGQAAQNlbnYUd2FzbV9vbl9oZWFkZXJfdmFsdWUAAQNlbnYMd2FzbV9vbl9ib2R5AAEDZW52GHdhc21fb25fbWVzc2FnZV9jb21wbGV0ZQAAA0ZFAwMEAAAFAAAAAAAABQEFAAUFBQAABgAAAAAGBgYGAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQABAAABAQcAAAUFAwABBAUBcAESEgUDAQACBggBfwFBgNQECwfRBSIGbWVtb3J5AgALX2luaXRpYWxpemUACRlfX2luZGlyZWN0X2Z1bmN0aW9uX3RhYmxlAQALbGxodHRwX2luaXQAChhsbGh0dHBfc2hvdWxkX2tlZXBfYWxpdmUAQQxsbGh0dHBfYWxsb2MADAZtYWxsb2MARgtsbGh0dHBfZnJlZQANBGZyZWUASA9sbGh0dHBfZ2V0X3R5cGUADhVsbGh0dHBfZ2V0X2h0dHBfbWFqb3IADxVsbGh0dHBfZ2V0X2h0dHBfbWlub3IAEBFsbGh0dHBfZ2V0X21ldGhvZAARFmxsaHR0cF9nZXRfc3RhdHVzX2NvZGUAEhJsbGh0dHBfZ2V0X3VwZ3JhZGUAEwxsbGh0dHBfcmVzZXQAFA5sbGh0dHBfZXhlY3V0ZQAVFGxsaHR0cF9zZXR0aW5nc19pbml0ABYNbGxodHRwX2ZpbmlzaAAXDGxsaHR0cF9wYXVzZQAYDWxsaHR0cF9yZXN1bWUAGRtsbGh0dHBfcmVzdW1lX2FmdGVyX3VwZ3JhZGUAGhBsbGh0dHBfZ2V0X2Vycm5vABsXbGxodHRwX2dldF9lcnJvcl9yZWFzb24AHBdsbGh0dHBfc2V0X2Vycm9yX3JlYXNvbgAdFGxsaHR0cF9nZXRfZXJyb3JfcG9zAB4RbGxodHRwX2Vycm5vX25hbWUAHxJsbGh0dHBfbWV0aG9kX25hbWUAIBJsbGh0dHBfc3RhdHVzX25hbWUAIRpsbGh0dHBfc2V0X2xlbmllbnRfaGVhZGVycwAiIWxsaHR0cF9zZXRfbGVuaWVudF9jaHVua2VkX2xlbmd0aAAjHWxsaHR0cF9zZXRfbGVuaWVudF9rZWVwX2FsaXZlACQkbGxodHRwX3NldF9sZW5pZW50X3RyYW5zZmVyX2VuY29kaW5nACUYbGxodHRwX21lc3NhZ2VfbmVlZHNfZW9mAD8JFwEAQQELEQECAwQFCwYHNTk3MS8tJyspCsLgAkUCAAsIABCIgICAAAsZACAAEMKAgIAAGiAAIAI2AjggACABOgAoCxwAIAAgAC8BMiAALQAuIAAQwYCAgAAQgICAgAALKgEBf0HAABDGgICAACIBEMKAgIAAGiABQYCIgIAANgI4IAEgADoAKCABCwoAIAAQyICAgAALBwAgAC0AKAsHACAALQAqCwcAIAAtACsLBwAgAC0AKQsHACAALwEyCwcAIAAtAC4LRQEEfyAAKAIYIQEgAC0ALSECIAAtACghAyAAKAI4IQQgABDCgICAABogACAENgI4IAAgAzoAKCAAIAI6AC0gACABNgIYCxEAIAAgASABIAJqEMOAgIAACxAAIABBAEHcABDMgICAABoLZwEBf0EAIQECQCAAKAIMDQACQAJAAkACQCAALQAvDgMBAAMCCyAAKAI4IgFFDQAgASgCLCIBRQ0AIAAgARGAgICAAAAiAQ0DC0EADwsQyoCAgAAACyAAQcOWgIAANgIQQQ4hAQsgAQseAAJAIAAoAgwNACAAQdGbgIAANgIQIABBFTYCDAsLFgACQCAAKAIMQRVHDQAgAEEANgIMCwsWAAJAIAAoAgxBFkcNACAAQQA2AgwLCwcAIAAoAgwLBwAgACgCEAsJACAAIAE2AhALBwAgACgCFAsiAAJAIABBJEkNABDKgICAAAALIABBAnRBoLOAgABqKAIACyIAAkAgAEEuSQ0AEMqAgIAAAAsgAEECdEGwtICAAGooAgAL7gsBAX9B66iAgAAhAQJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAIABBnH9qDvQDY2IAAWFhYWFhYQIDBAVhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhBgcICQoLDA0OD2FhYWFhEGFhYWFhYWFhYWFhEWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYRITFBUWFxgZGhthYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhHB0eHyAhIiMkJSYnKCkqKywtLi8wMTIzNDU2YTc4OTphYWFhYWFhYTthYWE8YWFhYT0+P2FhYWFhYWFhQGFhQWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYUJDREVGR0hJSktMTU5PUFFSU2FhYWFhYWFhVFVWV1hZWlthXF1hYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFeYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhX2BhC0Hhp4CAAA8LQaShgIAADwtBy6yAgAAPC0H+sYCAAA8LQcCkgIAADwtBq6SAgAAPC0GNqICAAA8LQeKmgIAADwtBgLCAgAAPC0G5r4CAAA8LQdekgIAADwtB75+AgAAPC0Hhn4CAAA8LQfqfgIAADwtB8qCAgAAPC0Gor4CAAA8LQa6ygIAADwtBiLCAgAAPC0Hsp4CAAA8LQYKigIAADwtBjp2AgAAPC0HQroCAAA8LQcqjgIAADwtBxbKAgAAPC0HfnICAAA8LQdKcgIAADwtBxKCAgAAPC0HXoICAAA8LQaKfgIAADwtB7a6AgAAPC0GrsICAAA8LQdSlgIAADwtBzK6AgAAPC0H6roCAAA8LQfyrgIAADwtB0rCAgAAPC0HxnYCAAA8LQbuggIAADwtB96uAgAAPC0GQsYCAAA8LQdexgIAADwtBoq2AgAAPC0HUp4CAAA8LQeCrgIAADwtBn6yAgAAPC0HrsYCAAA8LQdWfgIAADwtByrGAgAAPC0HepYCAAA8LQdSegIAADwtB9JyAgAAPC0GnsoCAAA8LQbGdgIAADwtBoJ2AgAAPC0G5sYCAAA8LQbywgIAADwtBkqGAgAAPC0GzpoCAAA8LQemsgIAADwtBrJ6AgAAPC0HUq4CAAA8LQfemgIAADwtBgKaAgAAPC0GwoYCAAA8LQf6egIAADwtBjaOAgAAPC0GJrYCAAA8LQfeigIAADwtBoLGAgAAPC0Gun4CAAA8LQcalgIAADwtB6J6AgAAPC0GTooCAAA8LQcKvgIAADwtBw52AgAAPC0GLrICAAA8LQeGdgIAADwtBja+AgAAPC0HqoYCAAA8LQbStgIAADwtB0q+AgAAPC0HfsoCAAA8LQdKygIAADwtB8LCAgAAPC0GpooCAAA8LQfmjgIAADwtBmZ6AgAAPC0G1rICAAA8LQZuwgIAADwtBkrKAgAAPC0G2q4CAAA8LQcKigIAADwtB+LKAgAAPC0GepYCAAA8LQdCigIAADwtBup6AgAAPC0GBnoCAAA8LEMqAgIAAAAtB1qGAgAAhAQsgAQsWACAAIAAtAC1B/gFxIAFBAEdyOgAtCxkAIAAgAC0ALUH9AXEgAUEAR0EBdHI6AC0LGQAgACAALQAtQfsBcSABQQBHQQJ0cjoALQsZACAAIAAtAC1B9wFxIAFBAEdBA3RyOgAtCy4BAn9BACEDAkAgACgCOCIERQ0AIAQoAgAiBEUNACAAIAQRgICAgAAAIQMLIAMLSQECf0EAIQMCQCAAKAI4IgRFDQAgBCgCBCIERQ0AIAAgASACIAFrIAQRgYCAgAAAIgNBf0cNACAAQcaRgIAANgIQQRghAwsgAwsuAQJ/QQAhAwJAIAAoAjgiBEUNACAEKAIwIgRFDQAgACAEEYCAgIAAACEDCyADC0kBAn9BACEDAkAgACgCOCIERQ0AIAQoAggiBEUNACAAIAEgAiABayAEEYGAgIAAACIDQX9HDQAgAEH2ioCAADYCEEEYIQMLIAMLLgECf0EAIQMCQCAAKAI4IgRFDQAgBCgCNCIERQ0AIAAgBBGAgICAAAAhAwsgAwtJAQJ/QQAhAwJAIAAoAjgiBEUNACAEKAIMIgRFDQAgACABIAIgAWsgBBGBgICAAAAiA0F/Rw0AIABB7ZqAgAA2AhBBGCEDCyADCy4BAn9BACEDAkAgACgCOCIERQ0AIAQoAjgiBEUNACAAIAQRgICAgAAAIQMLIAMLSQECf0EAIQMCQCAAKAI4IgRFDQAgBCgCECIERQ0AIAAgASACIAFrIAQRgYCAgAAAIgNBf0cNACAAQZWQgIAANgIQQRghAwsgAwsuAQJ/QQAhAwJAIAAoAjgiBEUNACAEKAI8IgRFDQAgACAEEYCAgIAAACEDCyADC0kBAn9BACEDAkAgACgCOCIERQ0AIAQoAhQiBEUNACAAIAEgAiABayAEEYGAgIAAACIDQX9HDQAgAEGqm4CAADYCEEEYIQMLIAMLLgECf0EAIQMCQCAAKAI4IgRFDQAgBCgCQCIERQ0AIAAgBBGAgICAAAAhAwsgAwtJAQJ/QQAhAwJAIAAoAjgiBEUNACAEKAIYIgRFDQAgACABIAIgAWsgBBGBgICAAAAiA0F/Rw0AIABB7ZOAgAA2AhBBGCEDCyADCy4BAn9BACEDAkAgACgCOCIERQ0AIAQoAkQiBEUNACAAIAQRgICAgAAAIQMLIAMLLgECf0EAIQMCQCAAKAI4IgRFDQAgBCgCJCIERQ0AIAAgBBGAgICAAAAhAwsgAwsuAQJ/QQAhAwJAIAAoAjgiBEUNACAEKAIsIgRFDQAgACAEEYCAgIAAACEDCyADC0kBAn9BACEDAkAgACgCOCIERQ0AIAQoAigiBEUNACAAIAEgAiABayAEEYGAgIAAACIDQX9HDQAgAEH2iICAADYCEEEYIQMLIAMLLgECf0EAIQMCQCAAKAI4IgRFDQAgBCgCUCIERQ0AIAAgBBGAgICAAAAhAwsgAwtJAQJ/QQAhAwJAIAAoAjgiBEUNACAEKAIcIgRFDQAgACABIAIgAWsgBBGBgICAAAAiA0F/Rw0AIABBwpmAgAA2AhBBGCEDCyADCy4BAn9BACEDAkAgACgCOCIERQ0AIAQoAkgiBEUNACAAIAQRgICAgAAAIQMLIAMLSQECf0EAIQMCQCAAKAI4IgRFDQAgBCgCICIERQ0AIAAgASACIAFrIAQRgYCAgAAAIgNBf0cNACAAQZSUgIAANgIQQRghAwsgAwsuAQJ/QQAhAwJAIAAoAjgiBEUNACAEKAJMIgRFDQAgACAEEYCAgIAAACEDCyADCy4BAn9BACEDAkAgACgCOCIERQ0AIAQoAlQiBEUNACAAIAQRgICAgAAAIQMLIAMLLgECf0EAIQMCQCAAKAI4IgRFDQAgBCgCWCIERQ0AIAAgBBGAgICAAAAhAwsgAwtFAQF/AkACQCAALwEwQRRxQRRHDQBBASEDIAAtAChBAUYNASAALwEyQeUARiEDDAELIAAtAClBBUYhAwsgACADOgAuQQAL/gEBA39BASEDAkAgAC8BMCIEQQhxDQAgACkDIEIAUiEDCwJAAkAgAC0ALkUNAEEBIQUgAC0AKUEFRg0BQQEhBSAEQcAAcUUgA3FBAUcNAQtBACEFIARBwABxDQBBAiEFIARB//8DcSIDQQhxDQACQCADQYAEcUUNAAJAIAAtAChBAUcNACAALQAtQQpxDQBBBQ8LQQQPCwJAIANBIHENAAJAIAAtAChBAUYNACAALwEyQf//A3EiAEGcf2pB5ABJDQAgAEHMAUYNACAAQbACRg0AQQQhBSAEQShxRQ0CIANBiARxQYAERg0CC0EADwtBAEEDIAApAyBQGyEFCyAFC2IBAn9BACEBAkAgAC0AKEEBRg0AIAAvATJB//8DcSICQZx/akHkAEkNACACQcwBRg0AIAJBsAJGDQAgAC8BMCIAQcAAcQ0AQQEhASAAQYgEcUGABEYNACAAQShxRSEBCyABC6cBAQN/AkACQAJAIAAtACpFDQAgAC0AK0UNAEEAIQMgAC8BMCIEQQJxRQ0BDAILQQAhAyAALwEwIgRBAXFFDQELQQEhAyAALQAoQQFGDQAgAC8BMkH//wNxIgVBnH9qQeQASQ0AIAVBzAFGDQAgBUGwAkYNACAEQcAAcQ0AQQAhAyAEQYgEcUGABEYNACAEQShxQQBHIQMLIABBADsBMCAAQQA6AC8gAwuZAQECfwJAAkACQCAALQAqRQ0AIAAtACtFDQBBACEBIAAvATAiAkECcUUNAQwCC0EAIQEgAC8BMCICQQFxRQ0BC0EBIQEgAC0AKEEBRg0AIAAvATJB//8DcSIAQZx/akHkAEkNACAAQcwBRg0AIABBsAJGDQAgAkHAAHENAEEAIQEgAkGIBHFBgARGDQAgAkEocUEARyEBCyABC1kAIABBGGpCADcDACAAQgA3AwAgAEE4akIANwMAIABBMGpCADcDACAAQShqQgA3AwAgAEEgakIANwMAIABBEGpCADcDACAAQQhqQgA3AwAgAEHdATYCHEEAC3sBAX8CQCAAKAIMIgMNAAJAIAAoAgRFDQAgACABNgIECwJAIAAgASACEMSAgIAAIgMNACAAKAIMDwsgACADNgIcQQAhAyAAKAIEIgFFDQAgACABIAIgACgCCBGBgICAAAAiAUUNACAAIAI2AhQgACABNgIMIAEhAwsgAwvk8wEDDn8DfgR/I4CAgIAAQRBrIgMkgICAgAAgASEEIAEhBSABIQYgASEHIAEhCCABIQkgASEKIAEhCyABIQwgASENIAEhDiABIQ8CQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkAgACgCHCIQQX9qDt0B2gEB2QECAwQFBgcICQoLDA0O2AEPENcBERLWARMUFRYXGBkaG+AB3wEcHR7VAR8gISIjJCXUASYnKCkqKyzTAdIBLS7RAdABLzAxMjM0NTY3ODk6Ozw9Pj9AQUJDREVG2wFHSElKzwHOAUvNAUzMAU1OT1BRUlNUVVZXWFlaW1xdXl9gYWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXp7fH1+f4ABgQGCAYMBhAGFAYYBhwGIAYkBigGLAYwBjQGOAY8BkAGRAZIBkwGUAZUBlgGXAZgBmQGaAZsBnAGdAZ4BnwGgAaEBogGjAaQBpQGmAacBqAGpAaoBqwGsAa0BrgGvAbABsQGyAbMBtAG1AbYBtwHLAcoBuAHJAbkByAG6AbsBvAG9Ab4BvwHAAcEBwgHDAcQBxQHGAQDcAQtBACEQDMYBC0EOIRAMxQELQQ0hEAzEAQtBDyEQDMMBC0EQIRAMwgELQRMhEAzBAQtBFCEQDMABC0EVIRAMvwELQRYhEAy+AQtBFyEQDL0BC0EYIRAMvAELQRkhEAy7AQtBGiEQDLoBC0EbIRAMuQELQRwhEAy4AQtBCCEQDLcBC0EdIRAMtgELQSAhEAy1AQtBHyEQDLQBC0EHIRAMswELQSEhEAyyAQtBIiEQDLEBC0EeIRAMsAELQSMhEAyvAQtBEiEQDK4BC0ERIRAMrQELQSQhEAysAQtBJSEQDKsBC0EmIRAMqgELQSchEAypAQtBwwEhEAyoAQtBKSEQDKcBC0ErIRAMpgELQSwhEAylAQtBLSEQDKQBC0EuIRAMowELQS8hEAyiAQtBxAEhEAyhAQtBMCEQDKABC0E0IRAMnwELQQwhEAyeAQtBMSEQDJ0BC0EyIRAMnAELQTMhEAybAQtBOSEQDJoBC0E1IRAMmQELQcUBIRAMmAELQQshEAyXAQtBOiEQDJYBC0E2IRAMlQELQQohEAyUAQtBNyEQDJMBC0E4IRAMkgELQTwhEAyRAQtBOyEQDJABC0E9IRAMjwELQQkhEAyOAQtBKCEQDI0BC0E+IRAMjAELQT8hEAyLAQtBwAAhEAyKAQtBwQAhEAyJAQtBwgAhEAyIAQtBwwAhEAyHAQtBxAAhEAyGAQtBxQAhEAyFAQtBxgAhEAyEAQtBKiEQDIMBC0HHACEQDIIBC0HIACEQDIEBC0HJACEQDIABC0HKACEQDH8LQcsAIRAMfgtBzQAhEAx9C0HMACEQDHwLQc4AIRAMewtBzwAhEAx6C0HQACEQDHkLQdEAIRAMeAtB0gAhEAx3C0HTACEQDHYLQdQAIRAMdQtB1gAhEAx0C0HVACEQDHMLQQYhEAxyC0HXACEQDHELQQUhEAxwC0HYACEQDG8LQQQhEAxuC0HZACEQDG0LQdoAIRAMbAtB2wAhEAxrC0HcACEQDGoLQQMhEAxpC0HdACEQDGgLQd4AIRAMZwtB3wAhEAxmC0HhACEQDGULQeAAIRAMZAtB4gAhEAxjC0HjACEQDGILQQIhEAxhC0HkACEQDGALQeUAIRAMXwtB5gAhEAxeC0HnACEQDF0LQegAIRAMXAtB6QAhEAxbC0HqACEQDFoLQesAIRAMWQtB7AAhEAxYC0HtACEQDFcLQe4AIRAMVgtB7wAhEAxVC0HwACEQDFQLQfEAIRAMUwtB8gAhEAxSC0HzACEQDFELQfQAIRAMUAtB9QAhEAxPC0H2ACEQDE4LQfcAIRAMTQtB+AAhEAxMC0H5ACEQDEsLQfoAIRAMSgtB+wAhEAxJC0H8ACEQDEgLQf0AIRAMRwtB/gAhEAxGC0H/ACEQDEULQYABIRAMRAtBgQEhEAxDC0GCASEQDEILQYMBIRAMQQtBhAEhEAxAC0GFASEQDD8LQYYBIRAMPgtBhwEhEAw9C0GIASEQDDwLQYkBIRAMOwtBigEhEAw6C0GLASEQDDkLQYwBIRAMOAtBjQEhEAw3C0GOASEQDDYLQY8BIRAMNQtBkAEhEAw0C0GRASEQDDMLQZIBIRAMMgtBkwEhEAwxC0GUASEQDDALQZUBIRAMLwtBlgEhEAwuC0GXASEQDC0LQZgBIRAMLAtBmQEhEAwrC0GaASEQDCoLQZsBIRAMKQtBnAEhEAwoC0GdASEQDCcLQZ4BIRAMJgtBnwEhEAwlC0GgASEQDCQLQaEBIRAMIwtBogEhEAwiC0GjASEQDCELQaQBIRAMIAtBpQEhEAwfC0GmASEQDB4LQacBIRAMHQtBqAEhEAwcC0GpASEQDBsLQaoBIRAMGgtBqwEhEAwZC0GsASEQDBgLQa0BIRAMFwtBrgEhEAwWC0EBIRAMFQtBrwEhEAwUC0GwASEQDBMLQbEBIRAMEgtBswEhEAwRC0GyASEQDBALQbQBIRAMDwtBtQEhEAwOC0G2ASEQDA0LQbcBIRAMDAtBuAEhEAwLC0G5ASEQDAoLQboBIRAMCQtBuwEhEAwIC0HGASEQDAcLQbwBIRAMBgtBvQEhEAwFC0G+ASEQDAQLQb8BIRAMAwtBwAEhEAwCC0HCASEQDAELQcEBIRALA0ACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQCAQDscBAAECAwQFBgcICQoLDA0ODxAREhMUFRYXGBkaGxweHyAhIyUoP0BBREVGR0hJSktMTU9QUVJT3gNXWVtcXWBiZWZnaGlqa2xtb3BxcnN0dXZ3eHl6e3x9foABggGFAYYBhwGJAYsBjAGNAY4BjwGQAZEBlAGVAZYBlwGYAZkBmgGbAZwBnQGeAZ8BoAGhAaIBowGkAaUBpgGnAagBqQGqAasBrAGtAa4BrwGwAbEBsgGzAbQBtQG2AbcBuAG5AboBuwG8Ab0BvgG/AcABwQHCAcMBxAHFAcYBxwHIAckBygHLAcwBzQHOAc8B0AHRAdIB0wHUAdUB1gHXAdgB2QHaAdsB3AHdAd4B4AHhAeIB4wHkAeUB5gHnAegB6QHqAesB7AHtAe4B7wHwAfEB8gHzAZkCpAKwAv4C/gILIAEiBCACRw3zAUHdASEQDP8DCyABIhAgAkcN3QFBwwEhEAz+AwsgASIBIAJHDZABQfcAIRAM/QMLIAEiASACRw2GAUHvACEQDPwDCyABIgEgAkcNf0HqACEQDPsDCyABIgEgAkcNe0HoACEQDPoDCyABIgEgAkcNeEHmACEQDPkDCyABIgEgAkcNGkEYIRAM+AMLIAEiASACRw0UQRIhEAz3AwsgASIBIAJHDVlBxQAhEAz2AwsgASIBIAJHDUpBPyEQDPUDCyABIgEgAkcNSEE8IRAM9AMLIAEiASACRw1BQTEhEAzzAwsgAC0ALkEBRg3rAwyHAgsgACABIgEgAhDAgICAAEEBRw3mASAAQgA3AyAM5wELIAAgASIBIAIQtICAgAAiEA3nASABIQEM9QILAkAgASIBIAJHDQBBBiEQDPADCyAAIAFBAWoiASACELuAgIAAIhAN6AEgASEBDDELIABCADcDIEESIRAM1QMLIAEiECACRw0rQR0hEAztAwsCQCABIgEgAkYNACABQQFqIQFBECEQDNQDC0EHIRAM7AMLIABCACAAKQMgIhEgAiABIhBrrSISfSITIBMgEVYbNwMgIBEgElYiFEUN5QFBCCEQDOsDCwJAIAEiASACRg0AIABBiYCAgAA2AgggACABNgIEIAEhAUEUIRAM0gMLQQkhEAzqAwsgASEBIAApAyBQDeQBIAEhAQzyAgsCQCABIgEgAkcNAEELIRAM6QMLIAAgAUEBaiIBIAIQtoCAgAAiEA3lASABIQEM8gILIAAgASIBIAIQuICAgAAiEA3lASABIQEM8gILIAAgASIBIAIQuICAgAAiEA3mASABIQEMDQsgACABIgEgAhC6gICAACIQDecBIAEhAQzwAgsCQCABIgEgAkcNAEEPIRAM5QMLIAEtAAAiEEE7Rg0IIBBBDUcN6AEgAUEBaiEBDO8CCyAAIAEiASACELqAgIAAIhAN6AEgASEBDPICCwNAAkAgAS0AAEHwtYCAAGotAAAiEEEBRg0AIBBBAkcN6wEgACgCBCEQIABBADYCBCAAIBAgAUEBaiIBELmAgIAAIhAN6gEgASEBDPQCCyABQQFqIgEgAkcNAAtBEiEQDOIDCyAAIAEiASACELqAgIAAIhAN6QEgASEBDAoLIAEiASACRw0GQRshEAzgAwsCQCABIgEgAkcNAEEWIRAM4AMLIABBioCAgAA2AgggACABNgIEIAAgASACELiAgIAAIhAN6gEgASEBQSAhEAzGAwsCQCABIgEgAkYNAANAAkAgAS0AAEHwt4CAAGotAAAiEEECRg0AAkAgEEF/ag4E5QHsAQDrAewBCyABQQFqIQFBCCEQDMgDCyABQQFqIgEgAkcNAAtBFSEQDN8DC0EVIRAM3gMLA0ACQCABLQAAQfC5gIAAai0AACIQQQJGDQAgEEF/ag4E3gHsAeAB6wHsAQsgAUEBaiIBIAJHDQALQRghEAzdAwsCQCABIgEgAkYNACAAQYuAgIAANgIIIAAgATYCBCABIQFBByEQDMQDC0EZIRAM3AMLIAFBAWohAQwCCwJAIAEiFCACRw0AQRohEAzbAwsgFCEBAkAgFC0AAEFzag4U3QLuAu4C7gLuAu4C7gLuAu4C7gLuAu4C7gLuAu4C7gLuAu4C7gIA7gILQQAhECAAQQA2AhwgAEGvi4CAADYCECAAQQI2AgwgACAUQQFqNgIUDNoDCwJAIAEtAAAiEEE7Rg0AIBBBDUcN6AEgAUEBaiEBDOUCCyABQQFqIQELQSIhEAy/AwsCQCABIhAgAkcNAEEcIRAM2AMLQgAhESAQIQEgEC0AAEFQag435wHmAQECAwQFBgcIAAAAAAAAAAkKCwwNDgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADxAREhMUAAtBHiEQDL0DC0ICIREM5QELQgMhEQzkAQtCBCERDOMBC0IFIREM4gELQgYhEQzhAQtCByERDOABC0IIIREM3wELQgkhEQzeAQtCCiERDN0BC0ILIREM3AELQgwhEQzbAQtCDSERDNoBC0IOIREM2QELQg8hEQzYAQtCCiERDNcBC0ILIREM1gELQgwhEQzVAQtCDSERDNQBC0IOIREM0wELQg8hEQzSAQtCACERAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQCAQLQAAQVBqDjflAeQBAAECAwQFBgfmAeYB5gHmAeYB5gHmAQgJCgsMDeYB5gHmAeYB5gHmAeYB5gHmAeYB5gHmAeYB5gHmAeYB5gHmAeYB5gHmAeYB5gHmAeYB5gEODxAREhPmAQtCAiERDOQBC0IDIREM4wELQgQhEQziAQtCBSERDOEBC0IGIREM4AELQgchEQzfAQtCCCERDN4BC0IJIREM3QELQgohEQzcAQtCCyERDNsBC0IMIREM2gELQg0hEQzZAQtCDiERDNgBC0IPIREM1wELQgohEQzWAQtCCyERDNUBC0IMIREM1AELQg0hEQzTAQtCDiERDNIBC0IPIREM0QELIABCACAAKQMgIhEgAiABIhBrrSISfSITIBMgEVYbNwMgIBEgElYiFEUN0gFBHyEQDMADCwJAIAEiASACRg0AIABBiYCAgAA2AgggACABNgIEIAEhAUEkIRAMpwMLQSAhEAy/AwsgACABIhAgAhC+gICAAEF/ag4FtgEAxQIB0QHSAQtBESEQDKQDCyAAQQE6AC8gECEBDLsDCyABIgEgAkcN0gFBJCEQDLsDCyABIg0gAkcNHkHGACEQDLoDCyAAIAEiASACELKAgIAAIhAN1AEgASEBDLUBCyABIhAgAkcNJkHQACEQDLgDCwJAIAEiASACRw0AQSghEAy4AwsgAEEANgIEIABBjICAgAA2AgggACABIAEQsYCAgAAiEA3TASABIQEM2AELAkAgASIQIAJHDQBBKSEQDLcDCyAQLQAAIgFBIEYNFCABQQlHDdMBIBBBAWohAQwVCwJAIAEiASACRg0AIAFBAWohAQwXC0EqIRAMtQMLAkAgASIQIAJHDQBBKyEQDLUDCwJAIBAtAAAiAUEJRg0AIAFBIEcN1QELIAAtACxBCEYN0wEgECEBDJEDCwJAIAEiASACRw0AQSwhEAy0AwsgAS0AAEEKRw3VASABQQFqIQEMyQILIAEiDiACRw3VAUEvIRAMsgMLA0ACQCABLQAAIhBBIEYNAAJAIBBBdmoOBADcAdwBANoBCyABIQEM4AELIAFBAWoiASACRw0AC0ExIRAMsQMLQTIhECABIhQgAkYNsAMgAiAUayAAKAIAIgFqIRUgFCABa0EDaiEWAkADQCAULQAAIhdBIHIgFyAXQb9/akH/AXFBGkkbQf8BcSABQfC7gIAAai0AAEcNAQJAIAFBA0cNAEEGIQEMlgMLIAFBAWohASAUQQFqIhQgAkcNAAsgACAVNgIADLEDCyAAQQA2AgAgFCEBDNkBC0EzIRAgASIUIAJGDa8DIAIgFGsgACgCACIBaiEVIBQgAWtBCGohFgJAA0AgFC0AACIXQSByIBcgF0G/f2pB/wFxQRpJG0H/AXEgAUH0u4CAAGotAABHDQECQCABQQhHDQBBBSEBDJUDCyABQQFqIQEgFEEBaiIUIAJHDQALIAAgFTYCAAywAwsgAEEANgIAIBQhAQzYAQtBNCEQIAEiFCACRg2uAyACIBRrIAAoAgAiAWohFSAUIAFrQQVqIRYCQANAIBQtAAAiF0EgciAXIBdBv39qQf8BcUEaSRtB/wFxIAFB0MKAgABqLQAARw0BAkAgAUEFRw0AQQchAQyUAwsgAUEBaiEBIBRBAWoiFCACRw0ACyAAIBU2AgAMrwMLIABBADYCACAUIQEM1wELAkAgASIBIAJGDQADQAJAIAEtAABBgL6AgABqLQAAIhBBAUYNACAQQQJGDQogASEBDN0BCyABQQFqIgEgAkcNAAtBMCEQDK4DC0EwIRAMrQMLAkAgASIBIAJGDQADQAJAIAEtAAAiEEEgRg0AIBBBdmoOBNkB2gHaAdkB2gELIAFBAWoiASACRw0AC0E4IRAMrQMLQTghEAysAwsDQAJAIAEtAAAiEEEgRg0AIBBBCUcNAwsgAUEBaiIBIAJHDQALQTwhEAyrAwsDQAJAIAEtAAAiEEEgRg0AAkACQCAQQXZqDgTaAQEB2gEACyAQQSxGDdsBCyABIQEMBAsgAUEBaiIBIAJHDQALQT8hEAyqAwsgASEBDNsBC0HAACEQIAEiFCACRg2oAyACIBRrIAAoAgAiAWohFiAUIAFrQQZqIRcCQANAIBQtAABBIHIgAUGAwICAAGotAABHDQEgAUEGRg2OAyABQQFqIQEgFEEBaiIUIAJHDQALIAAgFjYCAAypAwsgAEEANgIAIBQhAQtBNiEQDI4DCwJAIAEiDyACRw0AQcEAIRAMpwMLIABBjICAgAA2AgggACAPNgIEIA8hASAALQAsQX9qDgTNAdUB1wHZAYcDCyABQQFqIQEMzAELAkAgASIBIAJGDQADQAJAIAEtAAAiEEEgciAQIBBBv39qQf8BcUEaSRtB/wFxIhBBCUYNACAQQSBGDQACQAJAAkACQCAQQZ1/ag4TAAMDAwMDAwMBAwMDAwMDAwMDAgMLIAFBAWohAUExIRAMkQMLIAFBAWohAUEyIRAMkAMLIAFBAWohAUEzIRAMjwMLIAEhAQzQAQsgAUEBaiIBIAJHDQALQTUhEAylAwtBNSEQDKQDCwJAIAEiASACRg0AA0ACQCABLQAAQYC8gIAAai0AAEEBRg0AIAEhAQzTAQsgAUEBaiIBIAJHDQALQT0hEAykAwtBPSEQDKMDCyAAIAEiASACELCAgIAAIhAN1gEgASEBDAELIBBBAWohAQtBPCEQDIcDCwJAIAEiASACRw0AQcIAIRAMoAMLAkADQAJAIAEtAABBd2oOGAAC/gL+AoQD/gL+Av4C/gL+Av4C/gL+Av4C/gL+Av4C/gL+Av4C/gL+Av4CAP4CCyABQQFqIgEgAkcNAAtBwgAhEAygAwsgAUEBaiEBIAAtAC1BAXFFDb0BIAEhAQtBLCEQDIUDCyABIgEgAkcN0wFBxAAhEAydAwsDQAJAIAEtAABBkMCAgABqLQAAQQFGDQAgASEBDLcCCyABQQFqIgEgAkcNAAtBxQAhEAycAwsgDS0AACIQQSBGDbMBIBBBOkcNgQMgACgCBCEBIABBADYCBCAAIAEgDRCvgICAACIBDdABIA1BAWohAQyzAgtBxwAhECABIg0gAkYNmgMgAiANayAAKAIAIgFqIRYgDSABa0EFaiEXA0AgDS0AACIUQSByIBQgFEG/f2pB/wFxQRpJG0H/AXEgAUGQwoCAAGotAABHDYADIAFBBUYN9AIgAUEBaiEBIA1BAWoiDSACRw0ACyAAIBY2AgAMmgMLQcgAIRAgASINIAJGDZkDIAIgDWsgACgCACIBaiEWIA0gAWtBCWohFwNAIA0tAAAiFEEgciAUIBRBv39qQf8BcUEaSRtB/wFxIAFBlsKAgABqLQAARw3/AgJAIAFBCUcNAEECIQEM9QILIAFBAWohASANQQFqIg0gAkcNAAsgACAWNgIADJkDCwJAIAEiDSACRw0AQckAIRAMmQMLAkACQCANLQAAIgFBIHIgASABQb9/akH/AXFBGkkbQf8BcUGSf2oOBwCAA4ADgAOAA4ADAYADCyANQQFqIQFBPiEQDIADCyANQQFqIQFBPyEQDP8CC0HKACEQIAEiDSACRg2XAyACIA1rIAAoAgAiAWohFiANIAFrQQFqIRcDQCANLQAAIhRBIHIgFCAUQb9/akH/AXFBGkkbQf8BcSABQaDCgIAAai0AAEcN/QIgAUEBRg3wAiABQQFqIQEgDUEBaiINIAJHDQALIAAgFjYCAAyXAwtBywAhECABIg0gAkYNlgMgAiANayAAKAIAIgFqIRYgDSABa0EOaiEXA0AgDS0AACIUQSByIBQgFEG/f2pB/wFxQRpJG0H/AXEgAUGiwoCAAGotAABHDfwCIAFBDkYN8AIgAUEBaiEBIA1BAWoiDSACRw0ACyAAIBY2AgAMlgMLQcwAIRAgASINIAJGDZUDIAIgDWsgACgCACIBaiEWIA0gAWtBD2ohFwNAIA0tAAAiFEEgciAUIBRBv39qQf8BcUEaSRtB/wFxIAFBwMKAgABqLQAARw37AgJAIAFBD0cNAEEDIQEM8QILIAFBAWohASANQQFqIg0gAkcNAAsgACAWNgIADJUDC0HNACEQIAEiDSACRg2UAyACIA1rIAAoAgAiAWohFiANIAFrQQVqIRcDQCANLQAAIhRBIHIgFCAUQb9/akH/AXFBGkkbQf8BcSABQdDCgIAAai0AAEcN+gICQCABQQVHDQBBBCEBDPACCyABQQFqIQEgDUEBaiINIAJHDQALIAAgFjYCAAyUAwsCQCABIg0gAkcNAEHOACEQDJQDCwJAAkACQAJAIA0tAAAiAUEgciABIAFBv39qQf8BcUEaSRtB/wFxQZ1/ag4TAP0C/QL9Av0C/QL9Av0C/QL9Av0C/QL9AgH9Av0C/QICA/0CCyANQQFqIQFBwQAhEAz9AgsgDUEBaiEBQcIAIRAM/AILIA1BAWohAUHDACEQDPsCCyANQQFqIQFBxAAhEAz6AgsCQCABIgEgAkYNACAAQY2AgIAANgIIIAAgATYCBCABIQFBxQAhEAz6AgtBzwAhEAySAwsgECEBAkACQCAQLQAAQXZqDgQBqAKoAgCoAgsgEEEBaiEBC0EnIRAM+AILAkAgASIBIAJHDQBB0QAhEAyRAwsCQCABLQAAQSBGDQAgASEBDI0BCyABQQFqIQEgAC0ALUEBcUUNxwEgASEBDIwBCyABIhcgAkcNyAFB0gAhEAyPAwtB0wAhECABIhQgAkYNjgMgAiAUayAAKAIAIgFqIRYgFCABa0EBaiEXA0AgFC0AACABQdbCgIAAai0AAEcNzAEgAUEBRg3HASABQQFqIQEgFEEBaiIUIAJHDQALIAAgFjYCAAyOAwsCQCABIgEgAkcNAEHVACEQDI4DCyABLQAAQQpHDcwBIAFBAWohAQzHAQsCQCABIgEgAkcNAEHWACEQDI0DCwJAAkAgAS0AAEF2ag4EAM0BzQEBzQELIAFBAWohAQzHAQsgAUEBaiEBQcoAIRAM8wILIAAgASIBIAIQroCAgAAiEA3LASABIQFBzQAhEAzyAgsgAC0AKUEiRg2FAwymAgsCQCABIgEgAkcNAEHbACEQDIoDC0EAIRRBASEXQQEhFkEAIRACQAJAAkACQAJAAkACQAJAAkAgAS0AAEFQag4K1AHTAQABAgMEBQYI1QELQQIhEAwGC0EDIRAMBQtBBCEQDAQLQQUhEAwDC0EGIRAMAgtBByEQDAELQQghEAtBACEXQQAhFkEAIRQMzAELQQkhEEEBIRRBACEXQQAhFgzLAQsCQCABIgEgAkcNAEHdACEQDIkDCyABLQAAQS5HDcwBIAFBAWohAQymAgsgASIBIAJHDcwBQd8AIRAMhwMLAkAgASIBIAJGDQAgAEGOgICAADYCCCAAIAE2AgQgASEBQdAAIRAM7gILQeAAIRAMhgMLQeEAIRAgASIBIAJGDYUDIAIgAWsgACgCACIUaiEWIAEgFGtBA2ohFwNAIAEtAAAgFEHiwoCAAGotAABHDc0BIBRBA0YNzAEgFEEBaiEUIAFBAWoiASACRw0ACyAAIBY2AgAMhQMLQeIAIRAgASIBIAJGDYQDIAIgAWsgACgCACIUaiEWIAEgFGtBAmohFwNAIAEtAAAgFEHmwoCAAGotAABHDcwBIBRBAkYNzgEgFEEBaiEUIAFBAWoiASACRw0ACyAAIBY2AgAMhAMLQeMAIRAgASIBIAJGDYMDIAIgAWsgACgCACIUaiEWIAEgFGtBA2ohFwNAIAEtAAAgFEHpwoCAAGotAABHDcsBIBRBA0YNzgEgFEEBaiEUIAFBAWoiASACRw0ACyAAIBY2AgAMgwMLAkAgASIBIAJHDQBB5QAhEAyDAwsgACABQQFqIgEgAhCogICAACIQDc0BIAEhAUHWACEQDOkCCwJAIAEiASACRg0AA0ACQCABLQAAIhBBIEYNAAJAAkACQCAQQbh/ag4LAAHPAc8BzwHPAc8BzwHPAc8BAs8BCyABQQFqIQFB0gAhEAztAgsgAUEBaiEBQdMAIRAM7AILIAFBAWohAUHUACEQDOsCCyABQQFqIgEgAkcNAAtB5AAhEAyCAwtB5AAhEAyBAwsDQAJAIAEtAABB8MKAgABqLQAAIhBBAUYNACAQQX5qDgPPAdAB0QHSAQsgAUEBaiIBIAJHDQALQeYAIRAMgAMLAkAgASIBIAJGDQAgAUEBaiEBDAMLQecAIRAM/wILA0ACQCABLQAAQfDEgIAAai0AACIQQQFGDQACQCAQQX5qDgTSAdMB1AEA1QELIAEhAUHXACEQDOcCCyABQQFqIgEgAkcNAAtB6AAhEAz+AgsCQCABIgEgAkcNAEHpACEQDP4CCwJAIAEtAAAiEEF2ag4augHVAdUBvAHVAdUB1QHVAdUB1QHVAdUB1QHVAdUB1QHVAdUB1QHVAdUB1QHKAdUB1QEA0wELIAFBAWohAQtBBiEQDOMCCwNAAkAgAS0AAEHwxoCAAGotAABBAUYNACABIQEMngILIAFBAWoiASACRw0AC0HqACEQDPsCCwJAIAEiASACRg0AIAFBAWohAQwDC0HrACEQDPoCCwJAIAEiASACRw0AQewAIRAM+gILIAFBAWohAQwBCwJAIAEiASACRw0AQe0AIRAM+QILIAFBAWohAQtBBCEQDN4CCwJAIAEiFCACRw0AQe4AIRAM9wILIBQhAQJAAkACQCAULQAAQfDIgIAAai0AAEF/ag4H1AHVAdYBAJwCAQLXAQsgFEEBaiEBDAoLIBRBAWohAQzNAQtBACEQIABBADYCHCAAQZuSgIAANgIQIABBBzYCDCAAIBRBAWo2AhQM9gILAkADQAJAIAEtAABB8MiAgABqLQAAIhBBBEYNAAJAAkAgEEF/ag4H0gHTAdQB2QEABAHZAQsgASEBQdoAIRAM4AILIAFBAWohAUHcACEQDN8CCyABQQFqIgEgAkcNAAtB7wAhEAz2AgsgAUEBaiEBDMsBCwJAIAEiFCACRw0AQfAAIRAM9QILIBQtAABBL0cN1AEgFEEBaiEBDAYLAkAgASIUIAJHDQBB8QAhEAz0AgsCQCAULQAAIgFBL0cNACAUQQFqIQFB3QAhEAzbAgsgAUF2aiIEQRZLDdMBQQEgBHRBiYCAAnFFDdMBDMoCCwJAIAEiASACRg0AIAFBAWohAUHeACEQDNoCC0HyACEQDPICCwJAIAEiFCACRw0AQfQAIRAM8gILIBQhAQJAIBQtAABB8MyAgABqLQAAQX9qDgPJApQCANQBC0HhACEQDNgCCwJAIAEiFCACRg0AA0ACQCAULQAAQfDKgIAAai0AACIBQQNGDQACQCABQX9qDgLLAgDVAQsgFCEBQd8AIRAM2gILIBRBAWoiFCACRw0AC0HzACEQDPECC0HzACEQDPACCwJAIAEiASACRg0AIABBj4CAgAA2AgggACABNgIEIAEhAUHgACEQDNcCC0H1ACEQDO8CCwJAIAEiASACRw0AQfYAIRAM7wILIABBj4CAgAA2AgggACABNgIEIAEhAQtBAyEQDNQCCwNAIAEtAABBIEcNwwIgAUEBaiIBIAJHDQALQfcAIRAM7AILAkAgASIBIAJHDQBB+AAhEAzsAgsgAS0AAEEgRw3OASABQQFqIQEM7wELIAAgASIBIAIQrICAgAAiEA3OASABIQEMjgILAkAgASIEIAJHDQBB+gAhEAzqAgsgBC0AAEHMAEcN0QEgBEEBaiEBQRMhEAzPAQsCQCABIgQgAkcNAEH7ACEQDOkCCyACIARrIAAoAgAiAWohFCAEIAFrQQVqIRADQCAELQAAIAFB8M6AgABqLQAARw3QASABQQVGDc4BIAFBAWohASAEQQFqIgQgAkcNAAsgACAUNgIAQfsAIRAM6AILAkAgASIEIAJHDQBB/AAhEAzoAgsCQAJAIAQtAABBvX9qDgwA0QHRAdEB0QHRAdEB0QHRAdEB0QEB0QELIARBAWohAUHmACEQDM8CCyAEQQFqIQFB5wAhEAzOAgsCQCABIgQgAkcNAEH9ACEQDOcCCyACIARrIAAoAgAiAWohFCAEIAFrQQJqIRACQANAIAQtAAAgAUHtz4CAAGotAABHDc8BIAFBAkYNASABQQFqIQEgBEEBaiIEIAJHDQALIAAgFDYCAEH9ACEQDOcCCyAAQQA2AgAgEEEBaiEBQRAhEAzMAQsCQCABIgQgAkcNAEH+ACEQDOYCCyACIARrIAAoAgAiAWohFCAEIAFrQQVqIRACQANAIAQtAAAgAUH2zoCAAGotAABHDc4BIAFBBUYNASABQQFqIQEgBEEBaiIEIAJHDQALIAAgFDYCAEH+ACEQDOYCCyAAQQA2AgAgEEEBaiEBQRYhEAzLAQsCQCABIgQgAkcNAEH/ACEQDOUCCyACIARrIAAoAgAiAWohFCAEIAFrQQNqIRACQANAIAQtAAAgAUH8zoCAAGotAABHDc0BIAFBA0YNASABQQFqIQEgBEEBaiIEIAJHDQALIAAgFDYCAEH/ACEQDOUCCyAAQQA2AgAgEEEBaiEBQQUhEAzKAQsCQCABIgQgAkcNAEGAASEQDOQCCyAELQAAQdkARw3LASAEQQFqIQFBCCEQDMkBCwJAIAEiBCACRw0AQYEBIRAM4wILAkACQCAELQAAQbJ/ag4DAMwBAcwBCyAEQQFqIQFB6wAhEAzKAgsgBEEBaiEBQewAIRAMyQILAkAgASIEIAJHDQBBggEhEAziAgsCQAJAIAQtAABBuH9qDggAywHLAcsBywHLAcsBAcsBCyAEQQFqIQFB6gAhEAzJAgsgBEEBaiEBQe0AIRAMyAILAkAgASIEIAJHDQBBgwEhEAzhAgsgAiAEayAAKAIAIgFqIRAgBCABa0ECaiEUAkADQCAELQAAIAFBgM+AgABqLQAARw3JASABQQJGDQEgAUEBaiEBIARBAWoiBCACRw0ACyAAIBA2AgBBgwEhEAzhAgtBACEQIABBADYCACAUQQFqIQEMxgELAkAgASIEIAJHDQBBhAEhEAzgAgsgAiAEayAAKAIAIgFqIRQgBCABa0EEaiEQAkADQCAELQAAIAFBg8+AgABqLQAARw3IASABQQRGDQEgAUEBaiEBIARBAWoiBCACRw0ACyAAIBQ2AgBBhAEhEAzgAgsgAEEANgIAIBBBAWohAUEjIRAMxQELAkAgASIEIAJHDQBBhQEhEAzfAgsCQAJAIAQtAABBtH9qDggAyAHIAcgByAHIAcgBAcgBCyAEQQFqIQFB7wAhEAzGAgsgBEEBaiEBQfAAIRAMxQILAkAgASIEIAJHDQBBhgEhEAzeAgsgBC0AAEHFAEcNxQEgBEEBaiEBDIMCCwJAIAEiBCACRw0AQYcBIRAM3QILIAIgBGsgACgCACIBaiEUIAQgAWtBA2ohEAJAA0AgBC0AACABQYjPgIAAai0AAEcNxQEgAUEDRg0BIAFBAWohASAEQQFqIgQgAkcNAAsgACAUNgIAQYcBIRAM3QILIABBADYCACAQQQFqIQFBLSEQDMIBCwJAIAEiBCACRw0AQYgBIRAM3AILIAIgBGsgACgCACIBaiEUIAQgAWtBCGohEAJAA0AgBC0AACABQdDPgIAAai0AAEcNxAEgAUEIRg0BIAFBAWohASAEQQFqIgQgAkcNAAsgACAUNgIAQYgBIRAM3AILIABBADYCACAQQQFqIQFBKSEQDMEBCwJAIAEiASACRw0AQYkBIRAM2wILQQEhECABLQAAQd8ARw3AASABQQFqIQEMgQILAkAgASIEIAJHDQBBigEhEAzaAgsgAiAEayAAKAIAIgFqIRQgBCABa0EBaiEQA0AgBC0AACABQYzPgIAAai0AAEcNwQEgAUEBRg2vAiABQQFqIQEgBEEBaiIEIAJHDQALIAAgFDYCAEGKASEQDNkCCwJAIAEiBCACRw0AQYsBIRAM2QILIAIgBGsgACgCACIBaiEUIAQgAWtBAmohEAJAA0AgBC0AACABQY7PgIAAai0AAEcNwQEgAUECRg0BIAFBAWohASAEQQFqIgQgAkcNAAsgACAUNgIAQYsBIRAM2QILIABBADYCACAQQQFqIQFBAiEQDL4BCwJAIAEiBCACRw0AQYwBIRAM2AILIAIgBGsgACgCACIBaiEUIAQgAWtBAWohEAJAA0AgBC0AACABQfDPgIAAai0AAEcNwAEgAUEBRg0BIAFBAWohASAEQQFqIgQgAkcNAAsgACAUNgIAQYwBIRAM2AILIABBADYCACAQQQFqIQFBHyEQDL0BCwJAIAEiBCACRw0AQY0BIRAM1wILIAIgBGsgACgCACIBaiEUIAQgAWtBAWohEAJAA0AgBC0AACABQfLPgIAAai0AAEcNvwEgAUEBRg0BIAFBAWohASAEQQFqIgQgAkcNAAsgACAUNgIAQY0BIRAM1wILIABBADYCACAQQQFqIQFBCSEQDLwBCwJAIAEiBCACRw0AQY4BIRAM1gILAkACQCAELQAAQbd/ag4HAL8BvwG/Ab8BvwEBvwELIARBAWohAUH4ACEQDL0CCyAEQQFqIQFB+QAhEAy8AgsCQCABIgQgAkcNAEGPASEQDNUCCyACIARrIAAoAgAiAWohFCAEIAFrQQVqIRACQANAIAQtAAAgAUGRz4CAAGotAABHDb0BIAFBBUYNASABQQFqIQEgBEEBaiIEIAJHDQALIAAgFDYCAEGPASEQDNUCCyAAQQA2AgAgEEEBaiEBQRghEAy6AQsCQCABIgQgAkcNAEGQASEQDNQCCyACIARrIAAoAgAiAWohFCAEIAFrQQJqIRACQANAIAQtAAAgAUGXz4CAAGotAABHDbwBIAFBAkYNASABQQFqIQEgBEEBaiIEIAJHDQALIAAgFDYCAEGQASEQDNQCCyAAQQA2AgAgEEEBaiEBQRchEAy5AQsCQCABIgQgAkcNAEGRASEQDNMCCyACIARrIAAoAgAiAWohFCAEIAFrQQZqIRACQANAIAQtAAAgAUGaz4CAAGotAABHDbsBIAFBBkYNASABQQFqIQEgBEEBaiIEIAJHDQALIAAgFDYCAEGRASEQDNMCCyAAQQA2AgAgEEEBaiEBQRUhEAy4AQsCQCABIgQgAkcNAEGSASEQDNICCyACIARrIAAoAgAiAWohFCAEIAFrQQVqIRACQANAIAQtAAAgAUGhz4CAAGotAABHDboBIAFBBUYNASABQQFqIQEgBEEBaiIEIAJHDQALIAAgFDYCAEGSASEQDNICCyAAQQA2AgAgEEEBaiEBQR4hEAy3AQsCQCABIgQgAkcNAEGTASEQDNECCyAELQAAQcwARw24ASAEQQFqIQFBCiEQDLYBCwJAIAQgAkcNAEGUASEQDNACCwJAAkAgBC0AAEG/f2oODwC5AbkBuQG5AbkBuQG5AbkBuQG5AbkBuQG5AQG5AQsgBEEBaiEBQf4AIRAMtwILIARBAWohAUH/ACEQDLYCCwJAIAQgAkcNAEGVASEQDM8CCwJAAkAgBC0AAEG/f2oOAwC4AQG4AQsgBEEBaiEBQf0AIRAMtgILIARBAWohBEGAASEQDLUCCwJAIAQgAkcNAEGWASEQDM4CCyACIARrIAAoAgAiAWohFCAEIAFrQQFqIRACQANAIAQtAAAgAUGnz4CAAGotAABHDbYBIAFBAUYNASABQQFqIQEgBEEBaiIEIAJHDQALIAAgFDYCAEGWASEQDM4CCyAAQQA2AgAgEEEBaiEBQQshEAyzAQsCQCAEIAJHDQBBlwEhEAzNAgsCQAJAAkACQCAELQAAQVNqDiMAuAG4AbgBuAG4AbgBuAG4AbgBuAG4AbgBuAG4AbgBuAG4AbgBuAG4AbgBuAG4AQG4AbgBuAG4AbgBArgBuAG4AQO4AQsgBEEBaiEBQfsAIRAMtgILIARBAWohAUH8ACEQDLUCCyAEQQFqIQRBgQEhEAy0AgsgBEEBaiEEQYIBIRAMswILAkAgBCACRw0AQZgBIRAMzAILIAIgBGsgACgCACIBaiEUIAQgAWtBBGohEAJAA0AgBC0AACABQanPgIAAai0AAEcNtAEgAUEERg0BIAFBAWohASAEQQFqIgQgAkcNAAsgACAUNgIAQZgBIRAMzAILIABBADYCACAQQQFqIQFBGSEQDLEBCwJAIAQgAkcNAEGZASEQDMsCCyACIARrIAAoAgAiAWohFCAEIAFrQQVqIRACQANAIAQtAAAgAUGuz4CAAGotAABHDbMBIAFBBUYNASABQQFqIQEgBEEBaiIEIAJHDQALIAAgFDYCAEGZASEQDMsCCyAAQQA2AgAgEEEBaiEBQQYhEAywAQsCQCAEIAJHDQBBmgEhEAzKAgsgAiAEayAAKAIAIgFqIRQgBCABa0EBaiEQAkADQCAELQAAIAFBtM+AgABqLQAARw2yASABQQFGDQEgAUEBaiEBIARBAWoiBCACRw0ACyAAIBQ2AgBBmgEhEAzKAgsgAEEANgIAIBBBAWohAUEcIRAMrwELAkAgBCACRw0AQZsBIRAMyQILIAIgBGsgACgCACIBaiEUIAQgAWtBAWohEAJAA0AgBC0AACABQbbPgIAAai0AAEcNsQEgAUEBRg0BIAFBAWohASAEQQFqIgQgAkcNAAsgACAUNgIAQZsBIRAMyQILIABBADYCACAQQQFqIQFBJyEQDK4BCwJAIAQgAkcNAEGcASEQDMgCCwJAAkAgBC0AAEGsf2oOAgABsQELIARBAWohBEGGASEQDK8CCyAEQQFqIQRBhwEhEAyuAgsCQCAEIAJHDQBBnQEhEAzHAgsgAiAEayAAKAIAIgFqIRQgBCABa0EBaiEQAkADQCAELQAAIAFBuM+AgABqLQAARw2vASABQQFGDQEgAUEBaiEBIARBAWoiBCACRw0ACyAAIBQ2AgBBnQEhEAzHAgsgAEEANgIAIBBBAWohAUEmIRAMrAELAkAgBCACRw0AQZ4BIRAMxgILIAIgBGsgACgCACIBaiEUIAQgAWtBAWohEAJAA0AgBC0AACABQbrPgIAAai0AAEcNrgEgAUEBRg0BIAFBAWohASAEQQFqIgQgAkcNAAsgACAUNgIAQZ4BIRAMxgILIABBADYCACAQQQFqIQFBAyEQDKsBCwJAIAQgAkcNAEGfASEQDMUCCyACIARrIAAoAgAiAWohFCAEIAFrQQJqIRACQANAIAQtAAAgAUHtz4CAAGotAABHDa0BIAFBAkYNASABQQFqIQEgBEEBaiIEIAJHDQALIAAgFDYCAEGfASEQDMUCCyAAQQA2AgAgEEEBaiEBQQwhEAyqAQsCQCAEIAJHDQBBoAEhEAzEAgsgAiAEayAAKAIAIgFqIRQgBCABa0EDaiEQAkADQCAELQAAIAFBvM+AgABqLQAARw2sASABQQNGDQEgAUEBaiEBIARBAWoiBCACRw0ACyAAIBQ2AgBBoAEhEAzEAgsgAEEANgIAIBBBAWohAUENIRAMqQELAkAgBCACRw0AQaEBIRAMwwILAkACQCAELQAAQbp/ag4LAKwBrAGsAawBrAGsAawBrAGsAQGsAQsgBEEBaiEEQYsBIRAMqgILIARBAWohBEGMASEQDKkCCwJAIAQgAkcNAEGiASEQDMICCyAELQAAQdAARw2pASAEQQFqIQQM6QELAkAgBCACRw0AQaMBIRAMwQILAkACQCAELQAAQbd/ag4HAaoBqgGqAaoBqgEAqgELIARBAWohBEGOASEQDKgCCyAEQQFqIQFBIiEQDKYBCwJAIAQgAkcNAEGkASEQDMACCyACIARrIAAoAgAiAWohFCAEIAFrQQFqIRACQANAIAQtAAAgAUHAz4CAAGotAABHDagBIAFBAUYNASABQQFqIQEgBEEBaiIEIAJHDQALIAAgFDYCAEGkASEQDMACCyAAQQA2AgAgEEEBaiEBQR0hEAylAQsCQCAEIAJHDQBBpQEhEAy/AgsCQAJAIAQtAABBrn9qDgMAqAEBqAELIARBAWohBEGQASEQDKYCCyAEQQFqIQFBBCEQDKQBCwJAIAQgAkcNAEGmASEQDL4CCwJAAkACQAJAAkAgBC0AAEG/f2oOFQCqAaoBqgGqAaoBqgGqAaoBqgGqAQGqAaoBAqoBqgEDqgGqAQSqAQsgBEEBaiEEQYgBIRAMqAILIARBAWohBEGJASEQDKcCCyAEQQFqIQRBigEhEAymAgsgBEEBaiEEQY8BIRAMpQILIARBAWohBEGRASEQDKQCCwJAIAQgAkcNAEGnASEQDL0CCyACIARrIAAoAgAiAWohFCAEIAFrQQJqIRACQANAIAQtAAAgAUHtz4CAAGotAABHDaUBIAFBAkYNASABQQFqIQEgBEEBaiIEIAJHDQALIAAgFDYCAEGnASEQDL0CCyAAQQA2AgAgEEEBaiEBQREhEAyiAQsCQCAEIAJHDQBBqAEhEAy8AgsgAiAEayAAKAIAIgFqIRQgBCABa0ECaiEQAkADQCAELQAAIAFBws+AgABqLQAARw2kASABQQJGDQEgAUEBaiEBIARBAWoiBCACRw0ACyAAIBQ2AgBBqAEhEAy8AgsgAEEANgIAIBBBAWohAUEsIRAMoQELAkAgBCACRw0AQakBIRAMuwILIAIgBGsgACgCACIBaiEUIAQgAWtBBGohEAJAA0AgBC0AACABQcXPgIAAai0AAEcNowEgAUEERg0BIAFBAWohASAEQQFqIgQgAkcNAAsgACAUNgIAQakBIRAMuwILIABBADYCACAQQQFqIQFBKyEQDKABCwJAIAQgAkcNAEGqASEQDLoCCyACIARrIAAoAgAiAWohFCAEIAFrQQJqIRACQANAIAQtAAAgAUHKz4CAAGotAABHDaIBIAFBAkYNASABQQFqIQEgBEEBaiIEIAJHDQALIAAgFDYCAEGqASEQDLoCCyAAQQA2AgAgEEEBaiEBQRQhEAyfAQsCQCAEIAJHDQBBqwEhEAy5AgsCQAJAAkACQCAELQAAQb5/ag4PAAECpAGkAaQBpAGkAaQBpAGkAaQBpAGkAQOkAQsgBEEBaiEEQZMBIRAMogILIARBAWohBEGUASEQDKECCyAEQQFqIQRBlQEhEAygAgsgBEEBaiEEQZYBIRAMnwILAkAgBCACRw0AQawBIRAMuAILIAQtAABBxQBHDZ8BIARBAWohBAzgAQsCQCAEIAJHDQBBrQEhEAy3AgsgAiAEayAAKAIAIgFqIRQgBCABa0ECaiEQAkADQCAELQAAIAFBzc+AgABqLQAARw2fASABQQJGDQEgAUEBaiEBIARBAWoiBCACRw0ACyAAIBQ2AgBBrQEhEAy3AgsgAEEANgIAIBBBAWohAUEOIRAMnAELAkAgBCACRw0AQa4BIRAMtgILIAQtAABB0ABHDZ0BIARBAWohAUElIRAMmwELAkAgBCACRw0AQa8BIRAMtQILIAIgBGsgACgCACIBaiEUIAQgAWtBCGohEAJAA0AgBC0AACABQdDPgIAAai0AAEcNnQEgAUEIRg0BIAFBAWohASAEQQFqIgQgAkcNAAsgACAUNgIAQa8BIRAMtQILIABBADYCACAQQQFqIQFBKiEQDJoBCwJAIAQgAkcNAEGwASEQDLQCCwJAAkAgBC0AAEGrf2oOCwCdAZ0BnQGdAZ0BnQGdAZ0BnQEBnQELIARBAWohBEGaASEQDJsCCyAEQQFqIQRBmwEhEAyaAgsCQCAEIAJHDQBBsQEhEAyzAgsCQAJAIAQtAABBv39qDhQAnAGcAZwBnAGcAZwBnAGcAZwBnAGcAZwBnAGcAZwBnAGcAZwBAZwBCyAEQQFqIQRBmQEhEAyaAgsgBEEBaiEEQZwBIRAMmQILAkAgBCACRw0AQbIBIRAMsgILIAIgBGsgACgCACIBaiEUIAQgAWtBA2ohEAJAA0AgBC0AACABQdnPgIAAai0AAEcNmgEgAUEDRg0BIAFBAWohASAEQQFqIgQgAkcNAAsgACAUNgIAQbIBIRAMsgILIABBADYCACAQQQFqIQFBISEQDJcBCwJAIAQgAkcNAEGzASEQDLECCyACIARrIAAoAgAiAWohFCAEIAFrQQZqIRACQANAIAQtAAAgAUHdz4CAAGotAABHDZkBIAFBBkYNASABQQFqIQEgBEEBaiIEIAJHDQALIAAgFDYCAEGzASEQDLECCyAAQQA2AgAgEEEBaiEBQRohEAyWAQsCQCAEIAJHDQBBtAEhEAywAgsCQAJAAkAgBC0AAEG7f2oOEQCaAZoBmgGaAZoBmgGaAZoBmgEBmgGaAZoBmgGaAQKaAQsgBEEBaiEEQZ0BIRAMmAILIARBAWohBEGeASEQDJcCCyAEQQFqIQRBnwEhEAyWAgsCQCAEIAJHDQBBtQEhEAyvAgsgAiAEayAAKAIAIgFqIRQgBCABa0EFaiEQAkADQCAELQAAIAFB5M+AgABqLQAARw2XASABQQVGDQEgAUEBaiEBIARBAWoiBCACRw0ACyAAIBQ2AgBBtQEhEAyvAgsgAEEANgIAIBBBAWohAUEoIRAMlAELAkAgBCACRw0AQbYBIRAMrgILIAIgBGsgACgCACIBaiEUIAQgAWtBAmohEAJAA0AgBC0AACABQerPgIAAai0AAEcNlgEgAUECRg0BIAFBAWohASAEQQFqIgQgAkcNAAsgACAUNgIAQbYBIRAMrgILIABBADYCACAQQQFqIQFBByEQDJMBCwJAIAQgAkcNAEG3ASEQDK0CCwJAAkAgBC0AAEG7f2oODgCWAZYBlgGWAZYBlgGWAZYBlgGWAZYBlgEBlgELIARBAWohBEGhASEQDJQCCyAEQQFqIQRBogEhEAyTAgsCQCAEIAJHDQBBuAEhEAysAgsgAiAEayAAKAIAIgFqIRQgBCABa0ECaiEQAkADQCAELQAAIAFB7c+AgABqLQAARw2UASABQQJGDQEgAUEBaiEBIARBAWoiBCACRw0ACyAAIBQ2AgBBuAEhEAysAgsgAEEANgIAIBBBAWohAUESIRAMkQELAkAgBCACRw0AQbkBIRAMqwILIAIgBGsgACgCACIBaiEUIAQgAWtBAWohEAJAA0AgBC0AACABQfDPgIAAai0AAEcNkwEgAUEBRg0BIAFBAWohASAEQQFqIgQgAkcNAAsgACAUNgIAQbkBIRAMqwILIABBADYCACAQQQFqIQFBICEQDJABCwJAIAQgAkcNAEG6ASEQDKoCCyACIARrIAAoAgAiAWohFCAEIAFrQQFqIRACQANAIAQtAAAgAUHyz4CAAGotAABHDZIBIAFBAUYNASABQQFqIQEgBEEBaiIEIAJHDQALIAAgFDYCAEG6ASEQDKoCCyAAQQA2AgAgEEEBaiEBQQ8hEAyPAQsCQCAEIAJHDQBBuwEhEAypAgsCQAJAIAQtAABBt39qDgcAkgGSAZIBkgGSAQGSAQsgBEEBaiEEQaUBIRAMkAILIARBAWohBEGmASEQDI8CCwJAIAQgAkcNAEG8ASEQDKgCCyACIARrIAAoAgAiAWohFCAEIAFrQQdqIRACQANAIAQtAAAgAUH0z4CAAGotAABHDZABIAFBB0YNASABQQFqIQEgBEEBaiIEIAJHDQALIAAgFDYCAEG8ASEQDKgCCyAAQQA2AgAgEEEBaiEBQRshEAyNAQsCQCAEIAJHDQBBvQEhEAynAgsCQAJAAkAgBC0AAEG+f2oOEgCRAZEBkQGRAZEBkQGRAZEBkQEBkQGRAZEBkQGRAZEBApEBCyAEQQFqIQRBpAEhEAyPAgsgBEEBaiEEQacBIRAMjgILIARBAWohBEGoASEQDI0CCwJAIAQgAkcNAEG+ASEQDKYCCyAELQAAQc4ARw2NASAEQQFqIQQMzwELAkAgBCACRw0AQb8BIRAMpQILAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkAgBC0AAEG/f2oOFQABAgOcAQQFBpwBnAGcAQcICQoLnAEMDQ4PnAELIARBAWohAUHoACEQDJoCCyAEQQFqIQFB6QAhEAyZAgsgBEEBaiEBQe4AIRAMmAILIARBAWohAUHyACEQDJcCCyAEQQFqIQFB8wAhEAyWAgsgBEEBaiEBQfYAIRAMlQILIARBAWohAUH3ACEQDJQCCyAEQQFqIQFB+gAhEAyTAgsgBEEBaiEEQYMBIRAMkgILIARBAWohBEGEASEQDJECCyAEQQFqIQRBhQEhEAyQAgsgBEEBaiEEQZIBIRAMjwILIARBAWohBEGYASEQDI4CCyAEQQFqIQRBoAEhEAyNAgsgBEEBaiEEQaMBIRAMjAILIARBAWohBEGqASEQDIsCCwJAIAQgAkYNACAAQZCAgIAANgIIIAAgBDYCBEGrASEQDIsCC0HAASEQDKMCCyAAIAUgAhCqgICAACIBDYsBIAUhAQxcCwJAIAYgAkYNACAGQQFqIQUMjQELQcIBIRAMoQILA0ACQCAQLQAAQXZqDgSMAQAAjwEACyAQQQFqIhAgAkcNAAtBwwEhEAygAgsCQCAHIAJGDQAgAEGRgICAADYCCCAAIAc2AgQgByEBQQEhEAyHAgtBxAEhEAyfAgsCQCAHIAJHDQBBxQEhEAyfAgsCQAJAIActAABBdmoOBAHOAc4BAM4BCyAHQQFqIQYMjQELIAdBAWohBQyJAQsCQCAHIAJHDQBBxgEhEAyeAgsCQAJAIActAABBdmoOFwGPAY8BAY8BjwGPAY8BjwGPAY8BjwGPAY8BjwGPAY8BjwGPAY8BjwGPAQCPAQsgB0EBaiEHC0GwASEQDIQCCwJAIAggAkcNAEHIASEQDJ0CCyAILQAAQSBHDY0BIABBADsBMiAIQQFqIQFBswEhEAyDAgsgASEXAkADQCAXIgcgAkYNASAHLQAAQVBqQf8BcSIQQQpPDcwBAkAgAC8BMiIUQZkzSw0AIAAgFEEKbCIUOwEyIBBB//8DcyAUQf7/A3FJDQAgB0EBaiEXIAAgFCAQaiIQOwEyIBBB//8DcUHoB0kNAQsLQQAhECAAQQA2AhwgAEHBiYCAADYCECAAQQ02AgwgACAHQQFqNgIUDJwCC0HHASEQDJsCCyAAIAggAhCugICAACIQRQ3KASAQQRVHDYwBIABByAE2AhwgACAINgIUIABByZeAgAA2AhAgAEEVNgIMQQAhEAyaAgsCQCAJIAJHDQBBzAEhEAyaAgtBACEUQQEhF0EBIRZBACEQAkACQAJAAkACQAJAAkACQAJAIAktAABBUGoOCpYBlQEAAQIDBAUGCJcBC0ECIRAMBgtBAyEQDAULQQQhEAwEC0EFIRAMAwtBBiEQDAILQQchEAwBC0EIIRALQQAhF0EAIRZBACEUDI4BC0EJIRBBASEUQQAhF0EAIRYMjQELAkAgCiACRw0AQc4BIRAMmQILIAotAABBLkcNjgEgCkEBaiEJDMoBCyALIAJHDY4BQdABIRAMlwILAkAgCyACRg0AIABBjoCAgAA2AgggACALNgIEQbcBIRAM/gELQdEBIRAMlgILAkAgBCACRw0AQdIBIRAMlgILIAIgBGsgACgCACIQaiEUIAQgEGtBBGohCwNAIAQtAAAgEEH8z4CAAGotAABHDY4BIBBBBEYN6QEgEEEBaiEQIARBAWoiBCACRw0ACyAAIBQ2AgBB0gEhEAyVAgsgACAMIAIQrICAgAAiAQ2NASAMIQEMuAELAkAgBCACRw0AQdQBIRAMlAILIAIgBGsgACgCACIQaiEUIAQgEGtBAWohDANAIAQtAAAgEEGB0ICAAGotAABHDY8BIBBBAUYNjgEgEEEBaiEQIARBAWoiBCACRw0ACyAAIBQ2AgBB1AEhEAyTAgsCQCAEIAJHDQBB1gEhEAyTAgsgAiAEayAAKAIAIhBqIRQgBCAQa0ECaiELA0AgBC0AACAQQYPQgIAAai0AAEcNjgEgEEECRg2QASAQQQFqIRAgBEEBaiIEIAJHDQALIAAgFDYCAEHWASEQDJICCwJAIAQgAkcNAEHXASEQDJICCwJAAkAgBC0AAEG7f2oOEACPAY8BjwGPAY8BjwGPAY8BjwGPAY8BjwGPAY8BAY8BCyAEQQFqIQRBuwEhEAz5AQsgBEEBaiEEQbwBIRAM+AELAkAgBCACRw0AQdgBIRAMkQILIAQtAABByABHDYwBIARBAWohBAzEAQsCQCAEIAJGDQAgAEGQgICAADYCCCAAIAQ2AgRBvgEhEAz3AQtB2QEhEAyPAgsCQCAEIAJHDQBB2gEhEAyPAgsgBC0AAEHIAEYNwwEgAEEBOgAoDLkBCyAAQQI6AC8gACAEIAIQpoCAgAAiEA2NAUHCASEQDPQBCyAALQAoQX9qDgK3AbkBuAELA0ACQCAELQAAQXZqDgQAjgGOAQCOAQsgBEEBaiIEIAJHDQALQd0BIRAMiwILIABBADoALyAALQAtQQRxRQ2EAgsgAEEAOgAvIABBAToANCABIQEMjAELIBBBFUYN2gEgAEEANgIcIAAgATYCFCAAQaeOgIAANgIQIABBEjYCDEEAIRAMiAILAkAgACAQIAIQtICAgAAiBA0AIBAhAQyBAgsCQCAEQRVHDQAgAEEDNgIcIAAgEDYCFCAAQbCYgIAANgIQIABBFTYCDEEAIRAMiAILIABBADYCHCAAIBA2AhQgAEGnjoCAADYCECAAQRI2AgxBACEQDIcCCyAQQRVGDdYBIABBADYCHCAAIAE2AhQgAEHajYCAADYCECAAQRQ2AgxBACEQDIYCCyAAKAIEIRcgAEEANgIEIBAgEadqIhYhASAAIBcgECAWIBQbIhAQtYCAgAAiFEUNjQEgAEEHNgIcIAAgEDYCFCAAIBQ2AgxBACEQDIUCCyAAIAAvATBBgAFyOwEwIAEhAQtBKiEQDOoBCyAQQRVGDdEBIABBADYCHCAAIAE2AhQgAEGDjICAADYCECAAQRM2AgxBACEQDIICCyAQQRVGDc8BIABBADYCHCAAIAE2AhQgAEGaj4CAADYCECAAQSI2AgxBACEQDIECCyAAKAIEIRAgAEEANgIEAkAgACAQIAEQt4CAgAAiEA0AIAFBAWohAQyNAQsgAEEMNgIcIAAgEDYCDCAAIAFBAWo2AhRBACEQDIACCyAQQRVGDcwBIABBADYCHCAAIAE2AhQgAEGaj4CAADYCECAAQSI2AgxBACEQDP8BCyAAKAIEIRAgAEEANgIEAkAgACAQIAEQt4CAgAAiEA0AIAFBAWohAQyMAQsgAEENNgIcIAAgEDYCDCAAIAFBAWo2AhRBACEQDP4BCyAQQRVGDckBIABBADYCHCAAIAE2AhQgAEHGjICAADYCECAAQSM2AgxBACEQDP0BCyAAKAIEIRAgAEEANgIEAkAgACAQIAEQuYCAgAAiEA0AIAFBAWohAQyLAQsgAEEONgIcIAAgEDYCDCAAIAFBAWo2AhRBACEQDPwBCyAAQQA2AhwgACABNgIUIABBwJWAgAA2AhAgAEECNgIMQQAhEAz7AQsgEEEVRg3FASAAQQA2AhwgACABNgIUIABBxoyAgAA2AhAgAEEjNgIMQQAhEAz6AQsgAEEQNgIcIAAgATYCFCAAIBA2AgxBACEQDPkBCyAAKAIEIQQgAEEANgIEAkAgACAEIAEQuYCAgAAiBA0AIAFBAWohAQzxAQsgAEERNgIcIAAgBDYCDCAAIAFBAWo2AhRBACEQDPgBCyAQQRVGDcEBIABBADYCHCAAIAE2AhQgAEHGjICAADYCECAAQSM2AgxBACEQDPcBCyAAKAIEIRAgAEEANgIEAkAgACAQIAEQuYCAgAAiEA0AIAFBAWohAQyIAQsgAEETNgIcIAAgEDYCDCAAIAFBAWo2AhRBACEQDPYBCyAAKAIEIQQgAEEANgIEAkAgACAEIAEQuYCAgAAiBA0AIAFBAWohAQztAQsgAEEUNgIcIAAgBDYCDCAAIAFBAWo2AhRBACEQDPUBCyAQQRVGDb0BIABBADYCHCAAIAE2AhQgAEGaj4CAADYCECAAQSI2AgxBACEQDPQBCyAAKAIEIRAgAEEANgIEAkAgACAQIAEQt4CAgAAiEA0AIAFBAWohAQyGAQsgAEEWNgIcIAAgEDYCDCAAIAFBAWo2AhRBACEQDPMBCyAAKAIEIQQgAEEANgIEAkAgACAEIAEQt4CAgAAiBA0AIAFBAWohAQzpAQsgAEEXNgIcIAAgBDYCDCAAIAFBAWo2AhRBACEQDPIBCyAAQQA2AhwgACABNgIUIABBzZOAgAA2AhAgAEEMNgIMQQAhEAzxAQtCASERCyAQQQFqIQECQCAAKQMgIhJC//////////8PVg0AIAAgEkIEhiARhDcDICABIQEMhAELIABBADYCHCAAIAE2AhQgAEGtiYCAADYCECAAQQw2AgxBACEQDO8BCyAAQQA2AhwgACAQNgIUIABBzZOAgAA2AhAgAEEMNgIMQQAhEAzuAQsgACgCBCEXIABBADYCBCAQIBGnaiIWIQEgACAXIBAgFiAUGyIQELWAgIAAIhRFDXMgAEEFNgIcIAAgEDYCFCAAIBQ2AgxBACEQDO0BCyAAQQA2AhwgACAQNgIUIABBqpyAgAA2AhAgAEEPNgIMQQAhEAzsAQsgACAQIAIQtICAgAAiAQ0BIBAhAQtBDiEQDNEBCwJAIAFBFUcNACAAQQI2AhwgACAQNgIUIABBsJiAgAA2AhAgAEEVNgIMQQAhEAzqAQsgAEEANgIcIAAgEDYCFCAAQaeOgIAANgIQIABBEjYCDEEAIRAM6QELIAFBAWohEAJAIAAvATAiAUGAAXFFDQACQCAAIBAgAhC7gICAACIBDQAgECEBDHALIAFBFUcNugEgAEEFNgIcIAAgEDYCFCAAQfmXgIAANgIQIABBFTYCDEEAIRAM6QELAkAgAUGgBHFBoARHDQAgAC0ALUECcQ0AIABBADYCHCAAIBA2AhQgAEGWk4CAADYCECAAQQQ2AgxBACEQDOkBCyAAIBAgAhC9gICAABogECEBAkACQAJAAkACQCAAIBAgAhCzgICAAA4WAgEABAQEBAQEBAQEBAQEBAQEBAQEAwQLIABBAToALgsgACAALwEwQcAAcjsBMCAQIQELQSYhEAzRAQsgAEEjNgIcIAAgEDYCFCAAQaWWgIAANgIQIABBFTYCDEEAIRAM6QELIABBADYCHCAAIBA2AhQgAEHVi4CAADYCECAAQRE2AgxBACEQDOgBCyAALQAtQQFxRQ0BQcMBIRAMzgELAkAgDSACRg0AA0ACQCANLQAAQSBGDQAgDSEBDMQBCyANQQFqIg0gAkcNAAtBJSEQDOcBC0ElIRAM5gELIAAoAgQhBCAAQQA2AgQgACAEIA0Qr4CAgAAiBEUNrQEgAEEmNgIcIAAgBDYCDCAAIA1BAWo2AhRBACEQDOUBCyAQQRVGDasBIABBADYCHCAAIAE2AhQgAEH9jYCAADYCECAAQR02AgxBACEQDOQBCyAAQSc2AhwgACABNgIUIAAgEDYCDEEAIRAM4wELIBAhAUEBIRQCQAJAAkACQAJAAkACQCAALQAsQX5qDgcGBQUDAQIABQsgACAALwEwQQhyOwEwDAMLQQIhFAwBC0EEIRQLIABBAToALCAAIAAvATAgFHI7ATALIBAhAQtBKyEQDMoBCyAAQQA2AhwgACAQNgIUIABBq5KAgAA2AhAgAEELNgIMQQAhEAziAQsgAEEANgIcIAAgATYCFCAAQeGPgIAANgIQIABBCjYCDEEAIRAM4QELIABBADoALCAQIQEMvQELIBAhAUEBIRQCQAJAAkACQAJAIAAtACxBe2oOBAMBAgAFCyAAIAAvATBBCHI7ATAMAwtBAiEUDAELQQQhFAsgAEEBOgAsIAAgAC8BMCAUcjsBMAsgECEBC0EpIRAMxQELIABBADYCHCAAIAE2AhQgAEHwlICAADYCECAAQQM2AgxBACEQDN0BCwJAIA4tAABBDUcNACAAKAIEIQEgAEEANgIEAkAgACABIA4QsYCAgAAiAQ0AIA5BAWohAQx1CyAAQSw2AhwgACABNgIMIAAgDkEBajYCFEEAIRAM3QELIAAtAC1BAXFFDQFBxAEhEAzDAQsCQCAOIAJHDQBBLSEQDNwBCwJAAkADQAJAIA4tAABBdmoOBAIAAAMACyAOQQFqIg4gAkcNAAtBLSEQDN0BCyAAKAIEIQEgAEEANgIEAkAgACABIA4QsYCAgAAiAQ0AIA4hAQx0CyAAQSw2AhwgACAONgIUIAAgATYCDEEAIRAM3AELIAAoAgQhASAAQQA2AgQCQCAAIAEgDhCxgICAACIBDQAgDkEBaiEBDHMLIABBLDYCHCAAIAE2AgwgACAOQQFqNgIUQQAhEAzbAQsgACgCBCEEIABBADYCBCAAIAQgDhCxgICAACIEDaABIA4hAQzOAQsgEEEsRw0BIAFBAWohEEEBIQECQAJAAkACQAJAIAAtACxBe2oOBAMBAgQACyAQIQEMBAtBAiEBDAELQQQhAQsgAEEBOgAsIAAgAC8BMCABcjsBMCAQIQEMAQsgACAALwEwQQhyOwEwIBAhAQtBOSEQDL8BCyAAQQA6ACwgASEBC0E0IRAMvQELIAAgAC8BMEEgcjsBMCABIQEMAgsgACgCBCEEIABBADYCBAJAIAAgBCABELGAgIAAIgQNACABIQEMxwELIABBNzYCHCAAIAE2AhQgACAENgIMQQAhEAzUAQsgAEEIOgAsIAEhAQtBMCEQDLkBCwJAIAAtAChBAUYNACABIQEMBAsgAC0ALUEIcUUNkwEgASEBDAMLIAAtADBBIHENlAFBxQEhEAy3AQsCQCAPIAJGDQACQANAAkAgDy0AAEFQaiIBQf8BcUEKSQ0AIA8hAUE1IRAMugELIAApAyAiEUKZs+bMmbPmzBlWDQEgACARQgp+IhE3AyAgESABrUL/AYMiEkJ/hVYNASAAIBEgEnw3AyAgD0EBaiIPIAJHDQALQTkhEAzRAQsgACgCBCECIABBADYCBCAAIAIgD0EBaiIEELGAgIAAIgINlQEgBCEBDMMBC0E5IRAMzwELAkAgAC8BMCIBQQhxRQ0AIAAtAChBAUcNACAALQAtQQhxRQ2QAQsgACABQff7A3FBgARyOwEwIA8hAQtBNyEQDLQBCyAAIAAvATBBEHI7ATAMqwELIBBBFUYNiwEgAEEANgIcIAAgATYCFCAAQfCOgIAANgIQIABBHDYCDEEAIRAMywELIABBwwA2AhwgACABNgIMIAAgDUEBajYCFEEAIRAMygELAkAgAS0AAEE6Rw0AIAAoAgQhECAAQQA2AgQCQCAAIBAgARCvgICAACIQDQAgAUEBaiEBDGMLIABBwwA2AhwgACAQNgIMIAAgAUEBajYCFEEAIRAMygELIABBADYCHCAAIAE2AhQgAEGxkYCAADYCECAAQQo2AgxBACEQDMkBCyAAQQA2AhwgACABNgIUIABBoJmAgAA2AhAgAEEeNgIMQQAhEAzIAQsgAEEANgIACyAAQYASOwEqIAAgF0EBaiIBIAIQqICAgAAiEA0BIAEhAQtBxwAhEAysAQsgEEEVRw2DASAAQdEANgIcIAAgATYCFCAAQeOXgIAANgIQIABBFTYCDEEAIRAMxAELIAAoAgQhECAAQQA2AgQCQCAAIBAgARCngICAACIQDQAgASEBDF4LIABB0gA2AhwgACABNgIUIAAgEDYCDEEAIRAMwwELIABBADYCHCAAIBQ2AhQgAEHBqICAADYCECAAQQc2AgwgAEEANgIAQQAhEAzCAQsgACgCBCEQIABBADYCBAJAIAAgECABEKeAgIAAIhANACABIQEMXQsgAEHTADYCHCAAIAE2AhQgACAQNgIMQQAhEAzBAQtBACEQIABBADYCHCAAIAE2AhQgAEGAkYCAADYCECAAQQk2AgwMwAELIBBBFUYNfSAAQQA2AhwgACABNgIUIABBlI2AgAA2AhAgAEEhNgIMQQAhEAy/AQtBASEWQQAhF0EAIRRBASEQCyAAIBA6ACsgAUEBaiEBAkACQCAALQAtQRBxDQACQAJAAkAgAC0AKg4DAQACBAsgFkUNAwwCCyAUDQEMAgsgF0UNAQsgACgCBCEQIABBADYCBAJAIAAgECABEK2AgIAAIhANACABIQEMXAsgAEHYADYCHCAAIAE2AhQgACAQNgIMQQAhEAy+AQsgACgCBCEEIABBADYCBAJAIAAgBCABEK2AgIAAIgQNACABIQEMrQELIABB2QA2AhwgACABNgIUIAAgBDYCDEEAIRAMvQELIAAoAgQhBCAAQQA2AgQCQCAAIAQgARCtgICAACIEDQAgASEBDKsBCyAAQdoANgIcIAAgATYCFCAAIAQ2AgxBACEQDLwBCyAAKAIEIQQgAEEANgIEAkAgACAEIAEQrYCAgAAiBA0AIAEhAQypAQsgAEHcADYCHCAAIAE2AhQgACAENgIMQQAhEAy7AQsCQCABLQAAQVBqIhBB/wFxQQpPDQAgACAQOgAqIAFBAWohAUHPACEQDKIBCyAAKAIEIQQgAEEANgIEAkAgACAEIAEQrYCAgAAiBA0AIAEhAQynAQsgAEHeADYCHCAAIAE2AhQgACAENgIMQQAhEAy6AQsgAEEANgIAIBdBAWohAQJAIAAtAClBI08NACABIQEMWQsgAEEANgIcIAAgATYCFCAAQdOJgIAANgIQIABBCDYCDEEAIRAMuQELIABBADYCAAtBACEQIABBADYCHCAAIAE2AhQgAEGQs4CAADYCECAAQQg2AgwMtwELIABBADYCACAXQQFqIQECQCAALQApQSFHDQAgASEBDFYLIABBADYCHCAAIAE2AhQgAEGbioCAADYCECAAQQg2AgxBACEQDLYBCyAAQQA2AgAgF0EBaiEBAkAgAC0AKSIQQV1qQQtPDQAgASEBDFULAkAgEEEGSw0AQQEgEHRBygBxRQ0AIAEhAQxVC0EAIRAgAEEANgIcIAAgATYCFCAAQfeJgIAANgIQIABBCDYCDAy1AQsgEEEVRg1xIABBADYCHCAAIAE2AhQgAEG5jYCAADYCECAAQRo2AgxBACEQDLQBCyAAKAIEIRAgAEEANgIEAkAgACAQIAEQp4CAgAAiEA0AIAEhAQxUCyAAQeUANgIcIAAgATYCFCAAIBA2AgxBACEQDLMBCyAAKAIEIRAgAEEANgIEAkAgACAQIAEQp4CAgAAiEA0AIAEhAQxNCyAAQdIANgIcIAAgATYCFCAAIBA2AgxBACEQDLIBCyAAKAIEIRAgAEEANgIEAkAgACAQIAEQp4CAgAAiEA0AIAEhAQxNCyAAQdMANgIcIAAgATYCFCAAIBA2AgxBACEQDLEBCyAAKAIEIRAgAEEANgIEAkAgACAQIAEQp4CAgAAiEA0AIAEhAQxRCyAAQeUANgIcIAAgATYCFCAAIBA2AgxBACEQDLABCyAAQQA2AhwgACABNgIUIABBxoqAgAA2AhAgAEEHNgIMQQAhEAyvAQsgACgCBCEQIABBADYCBAJAIAAgECABEKeAgIAAIhANACABIQEMSQsgAEHSADYCHCAAIAE2AhQgACAQNgIMQQAhEAyuAQsgACgCBCEQIABBADYCBAJAIAAgECABEKeAgIAAIhANACABIQEMSQsgAEHTADYCHCAAIAE2AhQgACAQNgIMQQAhEAytAQsgACgCBCEQIABBADYCBAJAIAAgECABEKeAgIAAIhANACABIQEMTQsgAEHlADYCHCAAIAE2AhQgACAQNgIMQQAhEAysAQsgAEEANgIcIAAgATYCFCAAQdyIgIAANgIQIABBBzYCDEEAIRAMqwELIBBBP0cNASABQQFqIQELQQUhEAyQAQtBACEQIABBADYCHCAAIAE2AhQgAEH9koCAADYCECAAQQc2AgwMqAELIAAoAgQhECAAQQA2AgQCQCAAIBAgARCngICAACIQDQAgASEBDEILIABB0gA2AhwgACABNgIUIAAgEDYCDEEAIRAMpwELIAAoAgQhECAAQQA2AgQCQCAAIBAgARCngICAACIQDQAgASEBDEILIABB0wA2AhwgACABNgIUIAAgEDYCDEEAIRAMpgELIAAoAgQhECAAQQA2AgQCQCAAIBAgARCngICAACIQDQAgASEBDEYLIABB5QA2AhwgACABNgIUIAAgEDYCDEEAIRAMpQELIAAoAgQhASAAQQA2AgQCQCAAIAEgFBCngICAACIBDQAgFCEBDD8LIABB0gA2AhwgACAUNgIUIAAgATYCDEEAIRAMpAELIAAoAgQhASAAQQA2AgQCQCAAIAEgFBCngICAACIBDQAgFCEBDD8LIABB0wA2AhwgACAUNgIUIAAgATYCDEEAIRAMowELIAAoAgQhASAAQQA2AgQCQCAAIAEgFBCngICAACIBDQAgFCEBDEMLIABB5QA2AhwgACAUNgIUIAAgATYCDEEAIRAMogELIABBADYCHCAAIBQ2AhQgAEHDj4CAADYCECAAQQc2AgxBACEQDKEBCyAAQQA2AhwgACABNgIUIABBw4+AgAA2AhAgAEEHNgIMQQAhEAygAQtBACEQIABBADYCHCAAIBQ2AhQgAEGMnICAADYCECAAQQc2AgwMnwELIABBADYCHCAAIBQ2AhQgAEGMnICAADYCECAAQQc2AgxBACEQDJ4BCyAAQQA2AhwgACAUNgIUIABB/pGAgAA2AhAgAEEHNgIMQQAhEAydAQsgAEEANgIcIAAgATYCFCAAQY6bgIAANgIQIABBBjYCDEEAIRAMnAELIBBBFUYNVyAAQQA2AhwgACABNgIUIABBzI6AgAA2AhAgAEEgNgIMQQAhEAybAQsgAEEANgIAIBBBAWohAUEkIRALIAAgEDoAKSAAKAIEIRAgAEEANgIEIAAgECABEKuAgIAAIhANVCABIQEMPgsgAEEANgIAC0EAIRAgAEEANgIcIAAgBDYCFCAAQfGbgIAANgIQIABBBjYCDAyXAQsgAUEVRg1QIABBADYCHCAAIAU2AhQgAEHwjICAADYCECAAQRs2AgxBACEQDJYBCyAAKAIEIQUgAEEANgIEIAAgBSAQEKmAgIAAIgUNASAQQQFqIQULQa0BIRAMewsgAEHBATYCHCAAIAU2AgwgACAQQQFqNgIUQQAhEAyTAQsgACgCBCEGIABBADYCBCAAIAYgEBCpgICAACIGDQEgEEEBaiEGC0GuASEQDHgLIABBwgE2AhwgACAGNgIMIAAgEEEBajYCFEEAIRAMkAELIABBADYCHCAAIAc2AhQgAEGXi4CAADYCECAAQQ02AgxBACEQDI8BCyAAQQA2AhwgACAINgIUIABB45CAgAA2AhAgAEEJNgIMQQAhEAyOAQsgAEEANgIcIAAgCDYCFCAAQZSNgIAANgIQIABBITYCDEEAIRAMjQELQQEhFkEAIRdBACEUQQEhEAsgACAQOgArIAlBAWohCAJAAkAgAC0ALUEQcQ0AAkACQAJAIAAtACoOAwEAAgQLIBZFDQMMAgsgFA0BDAILIBdFDQELIAAoAgQhECAAQQA2AgQgACAQIAgQrYCAgAAiEEUNPSAAQckBNgIcIAAgCDYCFCAAIBA2AgxBACEQDIwBCyAAKAIEIQQgAEEANgIEIAAgBCAIEK2AgIAAIgRFDXYgAEHKATYCHCAAIAg2AhQgACAENgIMQQAhEAyLAQsgACgCBCEEIABBADYCBCAAIAQgCRCtgICAACIERQ10IABBywE2AhwgACAJNgIUIAAgBDYCDEEAIRAMigELIAAoAgQhBCAAQQA2AgQgACAEIAoQrYCAgAAiBEUNciAAQc0BNgIcIAAgCjYCFCAAIAQ2AgxBACEQDIkBCwJAIAstAABBUGoiEEH/AXFBCk8NACAAIBA6ACogC0EBaiEKQbYBIRAMcAsgACgCBCEEIABBADYCBCAAIAQgCxCtgICAACIERQ1wIABBzwE2AhwgACALNgIUIAAgBDYCDEEAIRAMiAELIABBADYCHCAAIAQ2AhQgAEGQs4CAADYCECAAQQg2AgwgAEEANgIAQQAhEAyHAQsgAUEVRg0/IABBADYCHCAAIAw2AhQgAEHMjoCAADYCECAAQSA2AgxBACEQDIYBCyAAQYEEOwEoIAAoAgQhECAAQgA3AwAgACAQIAxBAWoiDBCrgICAACIQRQ04IABB0wE2AhwgACAMNgIUIAAgEDYCDEEAIRAMhQELIABBADYCAAtBACEQIABBADYCHCAAIAQ2AhQgAEHYm4CAADYCECAAQQg2AgwMgwELIAAoAgQhECAAQgA3AwAgACAQIAtBAWoiCxCrgICAACIQDQFBxgEhEAxpCyAAQQI6ACgMVQsgAEHVATYCHCAAIAs2AhQgACAQNgIMQQAhEAyAAQsgEEEVRg03IABBADYCHCAAIAQ2AhQgAEGkjICAADYCECAAQRA2AgxBACEQDH8LIAAtADRBAUcNNCAAIAQgAhC8gICAACIQRQ00IBBBFUcNNSAAQdwBNgIcIAAgBDYCFCAAQdWWgIAANgIQIABBFTYCDEEAIRAMfgtBACEQIABBADYCHCAAQa+LgIAANgIQIABBAjYCDCAAIBRBAWo2AhQMfQtBACEQDGMLQQIhEAxiC0ENIRAMYQtBDyEQDGALQSUhEAxfC0ETIRAMXgtBFSEQDF0LQRYhEAxcC0EXIRAMWwtBGCEQDFoLQRkhEAxZC0EaIRAMWAtBGyEQDFcLQRwhEAxWC0EdIRAMVQtBHyEQDFQLQSEhEAxTC0EjIRAMUgtBxgAhEAxRC0EuIRAMUAtBLyEQDE8LQTshEAxOC0E9IRAMTQtByAAhEAxMC0HJACEQDEsLQcsAIRAMSgtBzAAhEAxJC0HOACEQDEgLQdEAIRAMRwtB1QAhEAxGC0HYACEQDEULQdkAIRAMRAtB2wAhEAxDC0HkACEQDEILQeUAIRAMQQtB8QAhEAxAC0H0ACEQDD8LQY0BIRAMPgtBlwEhEAw9C0GpASEQDDwLQawBIRAMOwtBwAEhEAw6C0G5ASEQDDkLQa8BIRAMOAtBsQEhEAw3C0GyASEQDDYLQbQBIRAMNQtBtQEhEAw0C0G6ASEQDDMLQb0BIRAMMgtBvwEhEAwxC0HBASEQDDALIABBADYCHCAAIAQ2AhQgAEHpi4CAADYCECAAQR82AgxBACEQDEgLIABB2wE2AhwgACAENgIUIABB+paAgAA2AhAgAEEVNgIMQQAhEAxHCyAAQfgANgIcIAAgDDYCFCAAQcqYgIAANgIQIABBFTYCDEEAIRAMRgsgAEHRADYCHCAAIAU2AhQgAEGwl4CAADYCECAAQRU2AgxBACEQDEULIABB+QA2AhwgACABNgIUIAAgEDYCDEEAIRAMRAsgAEH4ADYCHCAAIAE2AhQgAEHKmICAADYCECAAQRU2AgxBACEQDEMLIABB5AA2AhwgACABNgIUIABB45eAgAA2AhAgAEEVNgIMQQAhEAxCCyAAQdcANgIcIAAgATYCFCAAQcmXgIAANgIQIABBFTYCDEEAIRAMQQsgAEEANgIcIAAgATYCFCAAQbmNgIAANgIQIABBGjYCDEEAIRAMQAsgAEHCADYCHCAAIAE2AhQgAEHjmICAADYCECAAQRU2AgxBACEQDD8LIABBADYCBCAAIA8gDxCxgICAACIERQ0BIABBOjYCHCAAIAQ2AgwgACAPQQFqNgIUQQAhEAw+CyAAKAIEIQQgAEEANgIEAkAgACAEIAEQsYCAgAAiBEUNACAAQTs2AhwgACAENgIMIAAgAUEBajYCFEEAIRAMPgsgAUEBaiEBDC0LIA9BAWohAQwtCyAAQQA2AhwgACAPNgIUIABB5JKAgAA2AhAgAEEENgIMQQAhEAw7CyAAQTY2AhwgACAENgIUIAAgAjYCDEEAIRAMOgsgAEEuNgIcIAAgDjYCFCAAIAQ2AgxBACEQDDkLIABB0AA2AhwgACABNgIUIABBkZiAgAA2AhAgAEEVNgIMQQAhEAw4CyANQQFqIQEMLAsgAEEVNgIcIAAgATYCFCAAQYKZgIAANgIQIABBFTYCDEEAIRAMNgsgAEEbNgIcIAAgATYCFCAAQZGXgIAANgIQIABBFTYCDEEAIRAMNQsgAEEPNgIcIAAgATYCFCAAQZGXgIAANgIQIABBFTYCDEEAIRAMNAsgAEELNgIcIAAgATYCFCAAQZGXgIAANgIQIABBFTYCDEEAIRAMMwsgAEEaNgIcIAAgATYCFCAAQYKZgIAANgIQIABBFTYCDEEAIRAMMgsgAEELNgIcIAAgATYCFCAAQYKZgIAANgIQIABBFTYCDEEAIRAMMQsgAEEKNgIcIAAgATYCFCAAQeSWgIAANgIQIABBFTYCDEEAIRAMMAsgAEEeNgIcIAAgATYCFCAAQfmXgIAANgIQIABBFTYCDEEAIRAMLwsgAEEANgIcIAAgEDYCFCAAQdqNgIAANgIQIABBFDYCDEEAIRAMLgsgAEEENgIcIAAgATYCFCAAQbCYgIAANgIQIABBFTYCDEEAIRAMLQsgAEEANgIAIAtBAWohCwtBuAEhEAwSCyAAQQA2AgAgEEEBaiEBQfUAIRAMEQsgASEBAkAgAC0AKUEFRw0AQeMAIRAMEQtB4gAhEAwQC0EAIRAgAEEANgIcIABB5JGAgAA2AhAgAEEHNgIMIAAgFEEBajYCFAwoCyAAQQA2AgAgF0EBaiEBQcAAIRAMDgtBASEBCyAAIAE6ACwgAEEANgIAIBdBAWohAQtBKCEQDAsLIAEhAQtBOCEQDAkLAkAgASIPIAJGDQADQAJAIA8tAABBgL6AgABqLQAAIgFBAUYNACABQQJHDQMgD0EBaiEBDAQLIA9BAWoiDyACRw0AC0E+IRAMIgtBPiEQDCELIABBADoALCAPIQEMAQtBCyEQDAYLQTohEAwFCyABQQFqIQFBLSEQDAQLIAAgAToALCAAQQA2AgAgFkEBaiEBQQwhEAwDCyAAQQA2AgAgF0EBaiEBQQohEAwCCyAAQQA2AgALIABBADoALCANIQFBCSEQDAALC0EAIRAgAEEANgIcIAAgCzYCFCAAQc2QgIAANgIQIABBCTYCDAwXC0EAIRAgAEEANgIcIAAgCjYCFCAAQemKgIAANgIQIABBCTYCDAwWC0EAIRAgAEEANgIcIAAgCTYCFCAAQbeQgIAANgIQIABBCTYCDAwVC0EAIRAgAEEANgIcIAAgCDYCFCAAQZyRgIAANgIQIABBCTYCDAwUC0EAIRAgAEEANgIcIAAgATYCFCAAQc2QgIAANgIQIABBCTYCDAwTC0EAIRAgAEEANgIcIAAgATYCFCAAQemKgIAANgIQIABBCTYCDAwSC0EAIRAgAEEANgIcIAAgATYCFCAAQbeQgIAANgIQIABBCTYCDAwRC0EAIRAgAEEANgIcIAAgATYCFCAAQZyRgIAANgIQIABBCTYCDAwQC0EAIRAgAEEANgIcIAAgATYCFCAAQZeVgIAANgIQIABBDzYCDAwPC0EAIRAgAEEANgIcIAAgATYCFCAAQZeVgIAANgIQIABBDzYCDAwOC0EAIRAgAEEANgIcIAAgATYCFCAAQcCSgIAANgIQIABBCzYCDAwNC0EAIRAgAEEANgIcIAAgATYCFCAAQZWJgIAANgIQIABBCzYCDAwMC0EAIRAgAEEANgIcIAAgATYCFCAAQeGPgIAANgIQIABBCjYCDAwLC0EAIRAgAEEANgIcIAAgATYCFCAAQfuPgIAANgIQIABBCjYCDAwKC0EAIRAgAEEANgIcIAAgATYCFCAAQfGZgIAANgIQIABBAjYCDAwJC0EAIRAgAEEANgIcIAAgATYCFCAAQcSUgIAANgIQIABBAjYCDAwIC0EAIRAgAEEANgIcIAAgATYCFCAAQfKVgIAANgIQIABBAjYCDAwHCyAAQQI2AhwgACABNgIUIABBnJqAgAA2AhAgAEEWNgIMQQAhEAwGC0EBIRAMBQtB1AAhECABIgQgAkYNBCADQQhqIAAgBCACQdjCgIAAQQoQxYCAgAAgAygCDCEEIAMoAggOAwEEAgALEMqAgIAAAAsgAEEANgIcIABBtZqAgAA2AhAgAEEXNgIMIAAgBEEBajYCFEEAIRAMAgsgAEEANgIcIAAgBDYCFCAAQcqagIAANgIQIABBCTYCDEEAIRAMAQsCQCABIgQgAkcNAEEiIRAMAQsgAEGJgICAADYCCCAAIAQ2AgRBISEQCyADQRBqJICAgIAAIBALrwEBAn8gASgCACEGAkACQCACIANGDQAgBCAGaiEEIAYgA2ogAmshByACIAZBf3MgBWoiBmohBQNAAkAgAi0AACAELQAARg0AQQIhBAwDCwJAIAYNAEEAIQQgBSECDAMLIAZBf2ohBiAEQQFqIQQgAkEBaiICIANHDQALIAchBiADIQILIABBATYCACABIAY2AgAgACACNgIEDwsgAUEANgIAIAAgBDYCACAAIAI2AgQLCgAgABDHgICAAAvyNgELfyOAgICAAEEQayIBJICAgIAAAkBBACgCoNCAgAANAEEAEMuAgIAAQYDUhIAAayICQdkASQ0AQQAhAwJAQQAoAuDTgIAAIgQNAEEAQn83AuzTgIAAQQBCgICEgICAwAA3AuTTgIAAQQAgAUEIakFwcUHYqtWqBXMiBDYC4NOAgABBAEEANgL004CAAEEAQQA2AsTTgIAAC0EAIAI2AszTgIAAQQBBgNSEgAA2AsjTgIAAQQBBgNSEgAA2ApjQgIAAQQAgBDYCrNCAgABBAEF/NgKo0ICAAANAIANBxNCAgABqIANBuNCAgABqIgQ2AgAgBCADQbDQgIAAaiIFNgIAIANBvNCAgABqIAU2AgAgA0HM0ICAAGogA0HA0ICAAGoiBTYCACAFIAQ2AgAgA0HU0ICAAGogA0HI0ICAAGoiBDYCACAEIAU2AgAgA0HQ0ICAAGogBDYCACADQSBqIgNBgAJHDQALQYDUhIAAQXhBgNSEgABrQQ9xQQBBgNSEgABBCGpBD3EbIgNqIgRBBGogAkFIaiIFIANrIgNBAXI2AgBBAEEAKALw04CAADYCpNCAgABBACADNgKU0ICAAEEAIAQ2AqDQgIAAQYDUhIAAIAVqQTg2AgQLAkACQAJAAkACQAJAAkACQAJAAkACQAJAIABB7AFLDQACQEEAKAKI0ICAACIGQRAgAEETakFwcSAAQQtJGyICQQN2IgR2IgNBA3FFDQACQAJAIANBAXEgBHJBAXMiBUEDdCIEQbDQgIAAaiIDIARBuNCAgABqKAIAIgQoAggiAkcNAEEAIAZBfiAFd3E2AojQgIAADAELIAMgAjYCCCACIAM2AgwLIARBCGohAyAEIAVBA3QiBUEDcjYCBCAEIAVqIgQgBCgCBEEBcjYCBAwMCyACQQAoApDQgIAAIgdNDQECQCADRQ0AAkACQCADIAR0QQIgBHQiA0EAIANrcnEiA0EAIANrcUF/aiIDIANBDHZBEHEiA3YiBEEFdkEIcSIFIANyIAQgBXYiA0ECdkEEcSIEciADIAR2IgNBAXZBAnEiBHIgAyAEdiIDQQF2QQFxIgRyIAMgBHZqIgRBA3QiA0Gw0ICAAGoiBSADQbjQgIAAaigCACIDKAIIIgBHDQBBACAGQX4gBHdxIgY2AojQgIAADAELIAUgADYCCCAAIAU2AgwLIAMgAkEDcjYCBCADIARBA3QiBGogBCACayIFNgIAIAMgAmoiACAFQQFyNgIEAkAgB0UNACAHQXhxQbDQgIAAaiECQQAoApzQgIAAIQQCQAJAIAZBASAHQQN2dCIIcQ0AQQAgBiAIcjYCiNCAgAAgAiEIDAELIAIoAgghCAsgCCAENgIMIAIgBDYCCCAEIAI2AgwgBCAINgIICyADQQhqIQNBACAANgKc0ICAAEEAIAU2ApDQgIAADAwLQQAoAozQgIAAIglFDQEgCUEAIAlrcUF/aiIDIANBDHZBEHEiA3YiBEEFdkEIcSIFIANyIAQgBXYiA0ECdkEEcSIEciADIAR2IgNBAXZBAnEiBHIgAyAEdiIDQQF2QQFxIgRyIAMgBHZqQQJ0QbjSgIAAaigCACIAKAIEQXhxIAJrIQQgACEFAkADQAJAIAUoAhAiAw0AIAVBFGooAgAiA0UNAgsgAygCBEF4cSACayIFIAQgBSAESSIFGyEEIAMgACAFGyEAIAMhBQwACwsgACgCGCEKAkAgACgCDCIIIABGDQAgACgCCCIDQQAoApjQgIAASRogCCADNgIIIAMgCDYCDAwLCwJAIABBFGoiBSgCACIDDQAgACgCECIDRQ0DIABBEGohBQsDQCAFIQsgAyIIQRRqIgUoAgAiAw0AIAhBEGohBSAIKAIQIgMNAAsgC0EANgIADAoLQX8hAiAAQb9/Sw0AIABBE2oiA0FwcSECQQAoAozQgIAAIgdFDQBBACELAkAgAkGAAkkNAEEfIQsgAkH///8HSw0AIANBCHYiAyADQYD+P2pBEHZBCHEiA3QiBCAEQYDgH2pBEHZBBHEiBHQiBSAFQYCAD2pBEHZBAnEiBXRBD3YgAyAEciAFcmsiA0EBdCACIANBFWp2QQFxckEcaiELC0EAIAJrIQQCQAJAAkACQCALQQJ0QbjSgIAAaigCACIFDQBBACEDQQAhCAwBC0EAIQMgAkEAQRkgC0EBdmsgC0EfRht0IQBBACEIA0ACQCAFKAIEQXhxIAJrIgYgBE8NACAGIQQgBSEIIAYNAEEAIQQgBSEIIAUhAwwDCyADIAVBFGooAgAiBiAGIAUgAEEddkEEcWpBEGooAgAiBUYbIAMgBhshAyAAQQF0IQAgBQ0ACwsCQCADIAhyDQBBACEIQQIgC3QiA0EAIANrciAHcSIDRQ0DIANBACADa3FBf2oiAyADQQx2QRBxIgN2IgVBBXZBCHEiACADciAFIAB2IgNBAnZBBHEiBXIgAyAFdiIDQQF2QQJxIgVyIAMgBXYiA0EBdkEBcSIFciADIAV2akECdEG40oCAAGooAgAhAwsgA0UNAQsDQCADKAIEQXhxIAJrIgYgBEkhAAJAIAMoAhAiBQ0AIANBFGooAgAhBQsgBiAEIAAbIQQgAyAIIAAbIQggBSEDIAUNAAsLIAhFDQAgBEEAKAKQ0ICAACACa08NACAIKAIYIQsCQCAIKAIMIgAgCEYNACAIKAIIIgNBACgCmNCAgABJGiAAIAM2AgggAyAANgIMDAkLAkAgCEEUaiIFKAIAIgMNACAIKAIQIgNFDQMgCEEQaiEFCwNAIAUhBiADIgBBFGoiBSgCACIDDQAgAEEQaiEFIAAoAhAiAw0ACyAGQQA2AgAMCAsCQEEAKAKQ0ICAACIDIAJJDQBBACgCnNCAgAAhBAJAAkAgAyACayIFQRBJDQAgBCACaiIAIAVBAXI2AgRBACAFNgKQ0ICAAEEAIAA2ApzQgIAAIAQgA2ogBTYCACAEIAJBA3I2AgQMAQsgBCADQQNyNgIEIAQgA2oiAyADKAIEQQFyNgIEQQBBADYCnNCAgABBAEEANgKQ0ICAAAsgBEEIaiEDDAoLAkBBACgClNCAgAAiACACTQ0AQQAoAqDQgIAAIgMgAmoiBCAAIAJrIgVBAXI2AgRBACAFNgKU0ICAAEEAIAQ2AqDQgIAAIAMgAkEDcjYCBCADQQhqIQMMCgsCQAJAQQAoAuDTgIAARQ0AQQAoAujTgIAAIQQMAQtBAEJ/NwLs04CAAEEAQoCAhICAgMAANwLk04CAAEEAIAFBDGpBcHFB2KrVqgVzNgLg04CAAEEAQQA2AvTTgIAAQQBBADYCxNOAgABBgIAEIQQLQQAhAwJAIAQgAkHHAGoiB2oiBkEAIARrIgtxIgggAksNAEEAQTA2AvjTgIAADAoLAkBBACgCwNOAgAAiA0UNAAJAQQAoArjTgIAAIgQgCGoiBSAETQ0AIAUgA00NAQtBACEDQQBBMDYC+NOAgAAMCgtBAC0AxNOAgABBBHENBAJAAkACQEEAKAKg0ICAACIERQ0AQcjTgIAAIQMDQAJAIAMoAgAiBSAESw0AIAUgAygCBGogBEsNAwsgAygCCCIDDQALC0EAEMuAgIAAIgBBf0YNBSAIIQYCQEEAKALk04CAACIDQX9qIgQgAHFFDQAgCCAAayAEIABqQQAgA2txaiEGCyAGIAJNDQUgBkH+////B0sNBQJAQQAoAsDTgIAAIgNFDQBBACgCuNOAgAAiBCAGaiIFIARNDQYgBSADSw0GCyAGEMuAgIAAIgMgAEcNAQwHCyAGIABrIAtxIgZB/v///wdLDQQgBhDLgICAACIAIAMoAgAgAygCBGpGDQMgACEDCwJAIANBf0YNACACQcgAaiAGTQ0AAkAgByAGa0EAKALo04CAACIEakEAIARrcSIEQf7///8HTQ0AIAMhAAwHCwJAIAQQy4CAgABBf0YNACAEIAZqIQYgAyEADAcLQQAgBmsQy4CAgAAaDAQLIAMhACADQX9HDQUMAwtBACEIDAcLQQAhAAwFCyAAQX9HDQILQQBBACgCxNOAgABBBHI2AsTTgIAACyAIQf7///8HSw0BIAgQy4CAgAAhAEEAEMuAgIAAIQMgAEF/Rg0BIANBf0YNASAAIANPDQEgAyAAayIGIAJBOGpNDQELQQBBACgCuNOAgAAgBmoiAzYCuNOAgAACQCADQQAoArzTgIAATQ0AQQAgAzYCvNOAgAALAkACQAJAAkBBACgCoNCAgAAiBEUNAEHI04CAACEDA0AgACADKAIAIgUgAygCBCIIakYNAiADKAIIIgMNAAwDCwsCQAJAQQAoApjQgIAAIgNFDQAgACADTw0BC0EAIAA2ApjQgIAAC0EAIQNBACAGNgLM04CAAEEAIAA2AsjTgIAAQQBBfzYCqNCAgABBAEEAKALg04CAADYCrNCAgABBAEEANgLU04CAAANAIANBxNCAgABqIANBuNCAgABqIgQ2AgAgBCADQbDQgIAAaiIFNgIAIANBvNCAgABqIAU2AgAgA0HM0ICAAGogA0HA0ICAAGoiBTYCACAFIAQ2AgAgA0HU0ICAAGogA0HI0ICAAGoiBDYCACAEIAU2AgAgA0HQ0ICAAGogBDYCACADQSBqIgNBgAJHDQALIABBeCAAa0EPcUEAIABBCGpBD3EbIgNqIgQgBkFIaiIFIANrIgNBAXI2AgRBAEEAKALw04CAADYCpNCAgABBACADNgKU0ICAAEEAIAQ2AqDQgIAAIAAgBWpBODYCBAwCCyADLQAMQQhxDQAgBCAFSQ0AIAQgAE8NACAEQXggBGtBD3FBACAEQQhqQQ9xGyIFaiIAQQAoApTQgIAAIAZqIgsgBWsiBUEBcjYCBCADIAggBmo2AgRBAEEAKALw04CAADYCpNCAgABBACAFNgKU0ICAAEEAIAA2AqDQgIAAIAQgC2pBODYCBAwBCwJAIABBACgCmNCAgAAiCE8NAEEAIAA2ApjQgIAAIAAhCAsgACAGaiEFQcjTgIAAIQMCQAJAAkACQAJAAkACQANAIAMoAgAgBUYNASADKAIIIgMNAAwCCwsgAy0ADEEIcUUNAQtByNOAgAAhAwNAAkAgAygCACIFIARLDQAgBSADKAIEaiIFIARLDQMLIAMoAgghAwwACwsgAyAANgIAIAMgAygCBCAGajYCBCAAQXggAGtBD3FBACAAQQhqQQ9xG2oiCyACQQNyNgIEIAVBeCAFa0EPcUEAIAVBCGpBD3EbaiIGIAsgAmoiAmshAwJAIAYgBEcNAEEAIAI2AqDQgIAAQQBBACgClNCAgAAgA2oiAzYClNCAgAAgAiADQQFyNgIEDAMLAkAgBkEAKAKc0ICAAEcNAEEAIAI2ApzQgIAAQQBBACgCkNCAgAAgA2oiAzYCkNCAgAAgAiADQQFyNgIEIAIgA2ogAzYCAAwDCwJAIAYoAgQiBEEDcUEBRw0AIARBeHEhBwJAAkAgBEH/AUsNACAGKAIIIgUgBEEDdiIIQQN0QbDQgIAAaiIARhoCQCAGKAIMIgQgBUcNAEEAQQAoAojQgIAAQX4gCHdxNgKI0ICAAAwCCyAEIABGGiAEIAU2AgggBSAENgIMDAELIAYoAhghCQJAAkAgBigCDCIAIAZGDQAgBigCCCIEIAhJGiAAIAQ2AgggBCAANgIMDAELAkAgBkEUaiIEKAIAIgUNACAGQRBqIgQoAgAiBQ0AQQAhAAwBCwNAIAQhCCAFIgBBFGoiBCgCACIFDQAgAEEQaiEEIAAoAhAiBQ0ACyAIQQA2AgALIAlFDQACQAJAIAYgBigCHCIFQQJ0QbjSgIAAaiIEKAIARw0AIAQgADYCACAADQFBAEEAKAKM0ICAAEF+IAV3cTYCjNCAgAAMAgsgCUEQQRQgCSgCECAGRhtqIAA2AgAgAEUNAQsgACAJNgIYAkAgBigCECIERQ0AIAAgBDYCECAEIAA2AhgLIAYoAhQiBEUNACAAQRRqIAQ2AgAgBCAANgIYCyAHIANqIQMgBiAHaiIGKAIEIQQLIAYgBEF+cTYCBCACIANqIAM2AgAgAiADQQFyNgIEAkAgA0H/AUsNACADQXhxQbDQgIAAaiEEAkACQEEAKAKI0ICAACIFQQEgA0EDdnQiA3ENAEEAIAUgA3I2AojQgIAAIAQhAwwBCyAEKAIIIQMLIAMgAjYCDCAEIAI2AgggAiAENgIMIAIgAzYCCAwDC0EfIQQCQCADQf///wdLDQAgA0EIdiIEIARBgP4/akEQdkEIcSIEdCIFIAVBgOAfakEQdkEEcSIFdCIAIABBgIAPakEQdkECcSIAdEEPdiAEIAVyIAByayIEQQF0IAMgBEEVanZBAXFyQRxqIQQLIAIgBDYCHCACQgA3AhAgBEECdEG40oCAAGohBQJAQQAoAozQgIAAIgBBASAEdCIIcQ0AIAUgAjYCAEEAIAAgCHI2AozQgIAAIAIgBTYCGCACIAI2AgggAiACNgIMDAMLIANBAEEZIARBAXZrIARBH0YbdCEEIAUoAgAhAANAIAAiBSgCBEF4cSADRg0CIARBHXYhACAEQQF0IQQgBSAAQQRxakEQaiIIKAIAIgANAAsgCCACNgIAIAIgBTYCGCACIAI2AgwgAiACNgIIDAILIABBeCAAa0EPcUEAIABBCGpBD3EbIgNqIgsgBkFIaiIIIANrIgNBAXI2AgQgACAIakE4NgIEIAQgBUE3IAVrQQ9xQQAgBUFJakEPcRtqQUFqIgggCCAEQRBqSRsiCEEjNgIEQQBBACgC8NOAgAA2AqTQgIAAQQAgAzYClNCAgABBACALNgKg0ICAACAIQRBqQQApAtDTgIAANwIAIAhBACkCyNOAgAA3AghBACAIQQhqNgLQ04CAAEEAIAY2AszTgIAAQQAgADYCyNOAgABBAEEANgLU04CAACAIQSRqIQMDQCADQQc2AgAgA0EEaiIDIAVJDQALIAggBEYNAyAIIAgoAgRBfnE2AgQgCCAIIARrIgA2AgAgBCAAQQFyNgIEAkAgAEH/AUsNACAAQXhxQbDQgIAAaiEDAkACQEEAKAKI0ICAACIFQQEgAEEDdnQiAHENAEEAIAUgAHI2AojQgIAAIAMhBQwBCyADKAIIIQULIAUgBDYCDCADIAQ2AgggBCADNgIMIAQgBTYCCAwEC0EfIQMCQCAAQf///wdLDQAgAEEIdiIDIANBgP4/akEQdkEIcSIDdCIFIAVBgOAfakEQdkEEcSIFdCIIIAhBgIAPakEQdkECcSIIdEEPdiADIAVyIAhyayIDQQF0IAAgA0EVanZBAXFyQRxqIQMLIAQgAzYCHCAEQgA3AhAgA0ECdEG40oCAAGohBQJAQQAoAozQgIAAIghBASADdCIGcQ0AIAUgBDYCAEEAIAggBnI2AozQgIAAIAQgBTYCGCAEIAQ2AgggBCAENgIMDAQLIABBAEEZIANBAXZrIANBH0YbdCEDIAUoAgAhCANAIAgiBSgCBEF4cSAARg0DIANBHXYhCCADQQF0IQMgBSAIQQRxakEQaiIGKAIAIggNAAsgBiAENgIAIAQgBTYCGCAEIAQ2AgwgBCAENgIIDAMLIAUoAggiAyACNgIMIAUgAjYCCCACQQA2AhggAiAFNgIMIAIgAzYCCAsgC0EIaiEDDAULIAUoAggiAyAENgIMIAUgBDYCCCAEQQA2AhggBCAFNgIMIAQgAzYCCAtBACgClNCAgAAiAyACTQ0AQQAoAqDQgIAAIgQgAmoiBSADIAJrIgNBAXI2AgRBACADNgKU0ICAAEEAIAU2AqDQgIAAIAQgAkEDcjYCBCAEQQhqIQMMAwtBACEDQQBBMDYC+NOAgAAMAgsCQCALRQ0AAkACQCAIIAgoAhwiBUECdEG40oCAAGoiAygCAEcNACADIAA2AgAgAA0BQQAgB0F+IAV3cSIHNgKM0ICAAAwCCyALQRBBFCALKAIQIAhGG2ogADYCACAARQ0BCyAAIAs2AhgCQCAIKAIQIgNFDQAgACADNgIQIAMgADYCGAsgCEEUaigCACIDRQ0AIABBFGogAzYCACADIAA2AhgLAkACQCAEQQ9LDQAgCCAEIAJqIgNBA3I2AgQgCCADaiIDIAMoAgRBAXI2AgQMAQsgCCACaiIAIARBAXI2AgQgCCACQQNyNgIEIAAgBGogBDYCAAJAIARB/wFLDQAgBEF4cUGw0ICAAGohAwJAAkBBACgCiNCAgAAiBUEBIARBA3Z0IgRxDQBBACAFIARyNgKI0ICAACADIQQMAQsgAygCCCEECyAEIAA2AgwgAyAANgIIIAAgAzYCDCAAIAQ2AggMAQtBHyEDAkAgBEH///8HSw0AIARBCHYiAyADQYD+P2pBEHZBCHEiA3QiBSAFQYDgH2pBEHZBBHEiBXQiAiACQYCAD2pBEHZBAnEiAnRBD3YgAyAFciACcmsiA0EBdCAEIANBFWp2QQFxckEcaiEDCyAAIAM2AhwgAEIANwIQIANBAnRBuNKAgABqIQUCQCAHQQEgA3QiAnENACAFIAA2AgBBACAHIAJyNgKM0ICAACAAIAU2AhggACAANgIIIAAgADYCDAwBCyAEQQBBGSADQQF2ayADQR9GG3QhAyAFKAIAIQICQANAIAIiBSgCBEF4cSAERg0BIANBHXYhAiADQQF0IQMgBSACQQRxakEQaiIGKAIAIgINAAsgBiAANgIAIAAgBTYCGCAAIAA2AgwgACAANgIIDAELIAUoAggiAyAANgIMIAUgADYCCCAAQQA2AhggACAFNgIMIAAgAzYCCAsgCEEIaiEDDAELAkAgCkUNAAJAAkAgACAAKAIcIgVBAnRBuNKAgABqIgMoAgBHDQAgAyAINgIAIAgNAUEAIAlBfiAFd3E2AozQgIAADAILIApBEEEUIAooAhAgAEYbaiAINgIAIAhFDQELIAggCjYCGAJAIAAoAhAiA0UNACAIIAM2AhAgAyAINgIYCyAAQRRqKAIAIgNFDQAgCEEUaiADNgIAIAMgCDYCGAsCQAJAIARBD0sNACAAIAQgAmoiA0EDcjYCBCAAIANqIgMgAygCBEEBcjYCBAwBCyAAIAJqIgUgBEEBcjYCBCAAIAJBA3I2AgQgBSAEaiAENgIAAkAgB0UNACAHQXhxQbDQgIAAaiECQQAoApzQgIAAIQMCQAJAQQEgB0EDdnQiCCAGcQ0AQQAgCCAGcjYCiNCAgAAgAiEIDAELIAIoAgghCAsgCCADNgIMIAIgAzYCCCADIAI2AgwgAyAINgIIC0EAIAU2ApzQgIAAQQAgBDYCkNCAgAALIABBCGohAwsgAUEQaiSAgICAACADCwoAIAAQyYCAgAAL4g0BB38CQCAARQ0AIABBeGoiASAAQXxqKAIAIgJBeHEiAGohAwJAIAJBAXENACACQQNxRQ0BIAEgASgCACICayIBQQAoApjQgIAAIgRJDQEgAiAAaiEAAkAgAUEAKAKc0ICAAEYNAAJAIAJB/wFLDQAgASgCCCIEIAJBA3YiBUEDdEGw0ICAAGoiBkYaAkAgASgCDCICIARHDQBBAEEAKAKI0ICAAEF+IAV3cTYCiNCAgAAMAwsgAiAGRhogAiAENgIIIAQgAjYCDAwCCyABKAIYIQcCQAJAIAEoAgwiBiABRg0AIAEoAggiAiAESRogBiACNgIIIAIgBjYCDAwBCwJAIAFBFGoiAigCACIEDQAgAUEQaiICKAIAIgQNAEEAIQYMAQsDQCACIQUgBCIGQRRqIgIoAgAiBA0AIAZBEGohAiAGKAIQIgQNAAsgBUEANgIACyAHRQ0BAkACQCABIAEoAhwiBEECdEG40oCAAGoiAigCAEcNACACIAY2AgAgBg0BQQBBACgCjNCAgABBfiAEd3E2AozQgIAADAMLIAdBEEEUIAcoAhAgAUYbaiAGNgIAIAZFDQILIAYgBzYCGAJAIAEoAhAiAkUNACAGIAI2AhAgAiAGNgIYCyABKAIUIgJFDQEgBkEUaiACNgIAIAIgBjYCGAwBCyADKAIEIgJBA3FBA0cNACADIAJBfnE2AgRBACAANgKQ0ICAACABIABqIAA2AgAgASAAQQFyNgIEDwsgASADTw0AIAMoAgQiAkEBcUUNAAJAAkAgAkECcQ0AAkAgA0EAKAKg0ICAAEcNAEEAIAE2AqDQgIAAQQBBACgClNCAgAAgAGoiADYClNCAgAAgASAAQQFyNgIEIAFBACgCnNCAgABHDQNBAEEANgKQ0ICAAEEAQQA2ApzQgIAADwsCQCADQQAoApzQgIAARw0AQQAgATYCnNCAgABBAEEAKAKQ0ICAACAAaiIANgKQ0ICAACABIABBAXI2AgQgASAAaiAANgIADwsgAkF4cSAAaiEAAkACQCACQf8BSw0AIAMoAggiBCACQQN2IgVBA3RBsNCAgABqIgZGGgJAIAMoAgwiAiAERw0AQQBBACgCiNCAgABBfiAFd3E2AojQgIAADAILIAIgBkYaIAIgBDYCCCAEIAI2AgwMAQsgAygCGCEHAkACQCADKAIMIgYgA0YNACADKAIIIgJBACgCmNCAgABJGiAGIAI2AgggAiAGNgIMDAELAkAgA0EUaiICKAIAIgQNACADQRBqIgIoAgAiBA0AQQAhBgwBCwNAIAIhBSAEIgZBFGoiAigCACIEDQAgBkEQaiECIAYoAhAiBA0ACyAFQQA2AgALIAdFDQACQAJAIAMgAygCHCIEQQJ0QbjSgIAAaiICKAIARw0AIAIgBjYCACAGDQFBAEEAKAKM0ICAAEF+IAR3cTYCjNCAgAAMAgsgB0EQQRQgBygCECADRhtqIAY2AgAgBkUNAQsgBiAHNgIYAkAgAygCECICRQ0AIAYgAjYCECACIAY2AhgLIAMoAhQiAkUNACAGQRRqIAI2AgAgAiAGNgIYCyABIABqIAA2AgAgASAAQQFyNgIEIAFBACgCnNCAgABHDQFBACAANgKQ0ICAAA8LIAMgAkF+cTYCBCABIABqIAA2AgAgASAAQQFyNgIECwJAIABB/wFLDQAgAEF4cUGw0ICAAGohAgJAAkBBACgCiNCAgAAiBEEBIABBA3Z0IgBxDQBBACAEIAByNgKI0ICAACACIQAMAQsgAigCCCEACyAAIAE2AgwgAiABNgIIIAEgAjYCDCABIAA2AggPC0EfIQICQCAAQf///wdLDQAgAEEIdiICIAJBgP4/akEQdkEIcSICdCIEIARBgOAfakEQdkEEcSIEdCIGIAZBgIAPakEQdkECcSIGdEEPdiACIARyIAZyayICQQF0IAAgAkEVanZBAXFyQRxqIQILIAEgAjYCHCABQgA3AhAgAkECdEG40oCAAGohBAJAAkBBACgCjNCAgAAiBkEBIAJ0IgNxDQAgBCABNgIAQQAgBiADcjYCjNCAgAAgASAENgIYIAEgATYCCCABIAE2AgwMAQsgAEEAQRkgAkEBdmsgAkEfRht0IQIgBCgCACEGAkADQCAGIgQoAgRBeHEgAEYNASACQR12IQYgAkEBdCECIAQgBkEEcWpBEGoiAygCACIGDQALIAMgATYCACABIAQ2AhggASABNgIMIAEgATYCCAwBCyAEKAIIIgAgATYCDCAEIAE2AgggAUEANgIYIAEgBDYCDCABIAA2AggLQQBBACgCqNCAgABBf2oiAUF/IAEbNgKo0ICAAAsLBAAAAAtOAAJAIAANAD8AQRB0DwsCQCAAQf//A3ENACAAQX9MDQACQCAAQRB2QAAiAEF/Rw0AQQBBMDYC+NOAgABBfw8LIABBEHQPCxDKgICAAAAL8gICA38BfgJAIAJFDQAgACABOgAAIAIgAGoiA0F/aiABOgAAIAJBA0kNACAAIAE6AAIgACABOgABIANBfWogAToAACADQX5qIAE6AAAgAkEHSQ0AIAAgAToAAyADQXxqIAE6AAAgAkEJSQ0AIABBACAAa0EDcSIEaiIDIAFB/wFxQYGChAhsIgE2AgAgAyACIARrQXxxIgRqIgJBfGogATYCACAEQQlJDQAgAyABNgIIIAMgATYCBCACQXhqIAE2AgAgAkF0aiABNgIAIARBGUkNACADIAE2AhggAyABNgIUIAMgATYCECADIAE2AgwgAkFwaiABNgIAIAJBbGogATYCACACQWhqIAE2AgAgAkFkaiABNgIAIAQgA0EEcUEYciIFayICQSBJDQAgAa1CgYCAgBB+IQYgAyAFaiEBA0AgASAGNwMYIAEgBjcDECABIAY3AwggASAGNwMAIAFBIGohASACQWBqIgJBH0sNAAsLIAALC45IAQBBgAgLhkgBAAAAAgAAAAMAAAAAAAAAAAAAAAQAAAAFAAAAAAAAAAAAAAAGAAAABwAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEludmFsaWQgY2hhciBpbiB1cmwgcXVlcnkAU3BhbiBjYWxsYmFjayBlcnJvciBpbiBvbl9ib2R5AENvbnRlbnQtTGVuZ3RoIG92ZXJmbG93AENodW5rIHNpemUgb3ZlcmZsb3cAUmVzcG9uc2Ugb3ZlcmZsb3cASW52YWxpZCBtZXRob2QgZm9yIEhUVFAveC54IHJlcXVlc3QASW52YWxpZCBtZXRob2QgZm9yIFJUU1AveC54IHJlcXVlc3QARXhwZWN0ZWQgU09VUkNFIG1ldGhvZCBmb3IgSUNFL3gueCByZXF1ZXN0AEludmFsaWQgY2hhciBpbiB1cmwgZnJhZ21lbnQgc3RhcnQARXhwZWN0ZWQgZG90AFNwYW4gY2FsbGJhY2sgZXJyb3IgaW4gb25fc3RhdHVzAEludmFsaWQgcmVzcG9uc2Ugc3RhdHVzAEludmFsaWQgY2hhcmFjdGVyIGluIGNodW5rIGV4dGVuc2lvbnMAVXNlciBjYWxsYmFjayBlcnJvcgBgb25fcmVzZXRgIGNhbGxiYWNrIGVycm9yAGBvbl9jaHVua19oZWFkZXJgIGNhbGxiYWNrIGVycm9yAGBvbl9tZXNzYWdlX2JlZ2luYCBjYWxsYmFjayBlcnJvcgBgb25fY2h1bmtfZXh0ZW5zaW9uX3ZhbHVlYCBjYWxsYmFjayBlcnJvcgBgb25fc3RhdHVzX2NvbXBsZXRlYCBjYWxsYmFjayBlcnJvcgBgb25fdmVyc2lvbl9jb21wbGV0ZWAgY2FsbGJhY2sgZXJyb3IAYG9uX3VybF9jb21wbGV0ZWAgY2FsbGJhY2sgZXJyb3IAYG9uX2NodW5rX2NvbXBsZXRlYCBjYWxsYmFjayBlcnJvcgBgb25faGVhZGVyX3ZhbHVlX2NvbXBsZXRlYCBjYWxsYmFjayBlcnJvcgBgb25fbWVzc2FnZV9jb21wbGV0ZWAgY2FsbGJhY2sgZXJyb3IAYG9uX21ldGhvZF9jb21wbGV0ZWAgY2FsbGJhY2sgZXJyb3IAYG9uX2hlYWRlcl9maWVsZF9jb21wbGV0ZWAgY2FsbGJhY2sgZXJyb3IAYG9uX2NodW5rX2V4dGVuc2lvbl9uYW1lYCBjYWxsYmFjayBlcnJvcgBVbmV4cGVjdGVkIGNoYXIgaW4gdXJsIHNlcnZlcgBJbnZhbGlkIGhlYWRlciB2YWx1ZSBjaGFyAEludmFsaWQgaGVhZGVyIGZpZWxkIGNoYXIAU3BhbiBjYWxsYmFjayBlcnJvciBpbiBvbl92ZXJzaW9uAEludmFsaWQgbWlub3IgdmVyc2lvbgBJbnZhbGlkIG1ham9yIHZlcnNpb24ARXhwZWN0ZWQgc3BhY2UgYWZ0ZXIgdmVyc2lvbgBFeHBlY3RlZCBDUkxGIGFmdGVyIHZlcnNpb24ASW52YWxpZCBIVFRQIHZlcnNpb24ASW52YWxpZCBoZWFkZXIgdG9rZW4AU3BhbiBjYWxsYmFjayBlcnJvciBpbiBvbl91cmwASW52YWxpZCBjaGFyYWN0ZXJzIGluIHVybABVbmV4cGVjdGVkIHN0YXJ0IGNoYXIgaW4gdXJsAERvdWJsZSBAIGluIHVybABFbXB0eSBDb250ZW50LUxlbmd0aABJbnZhbGlkIGNoYXJhY3RlciBpbiBDb250ZW50LUxlbmd0aABEdXBsaWNhdGUgQ29udGVudC1MZW5ndGgASW52YWxpZCBjaGFyIGluIHVybCBwYXRoAENvbnRlbnQtTGVuZ3RoIGNhbid0IGJlIHByZXNlbnQgd2l0aCBUcmFuc2Zlci1FbmNvZGluZwBJbnZhbGlkIGNoYXJhY3RlciBpbiBjaHVuayBzaXplAFNwYW4gY2FsbGJhY2sgZXJyb3IgaW4gb25faGVhZGVyX3ZhbHVlAFNwYW4gY2FsbGJhY2sgZXJyb3IgaW4gb25fY2h1bmtfZXh0ZW5zaW9uX3ZhbHVlAEludmFsaWQgY2hhcmFjdGVyIGluIGNodW5rIGV4dGVuc2lvbnMgdmFsdWUATWlzc2luZyBleHBlY3RlZCBMRiBhZnRlciBoZWFkZXIgdmFsdWUASW52YWxpZCBgVHJhbnNmZXItRW5jb2RpbmdgIGhlYWRlciB2YWx1ZQBJbnZhbGlkIGNoYXJhY3RlciBpbiBjaHVuayBleHRlbnNpb25zIHF1b3RlIHZhbHVlAEludmFsaWQgY2hhcmFjdGVyIGluIGNodW5rIGV4dGVuc2lvbnMgcXVvdGVkIHZhbHVlAFBhdXNlZCBieSBvbl9oZWFkZXJzX2NvbXBsZXRlAEludmFsaWQgRU9GIHN0YXRlAG9uX3Jlc2V0IHBhdXNlAG9uX2NodW5rX2hlYWRlciBwYXVzZQBvbl9tZXNzYWdlX2JlZ2luIHBhdXNlAG9uX2NodW5rX2V4dGVuc2lvbl92YWx1ZSBwYXVzZQBvbl9zdGF0dXNfY29tcGxldGUgcGF1c2UAb25fdmVyc2lvbl9jb21wbGV0ZSBwYXVzZQBvbl91cmxfY29tcGxldGUgcGF1c2UAb25fY2h1bmtfY29tcGxldGUgcGF1c2UAb25faGVhZGVyX3ZhbHVlX2NvbXBsZXRlIHBhdXNlAG9uX21lc3NhZ2VfY29tcGxldGUgcGF1c2UAb25fbWV0aG9kX2NvbXBsZXRlIHBhdXNlAG9uX2hlYWRlcl9maWVsZF9jb21wbGV0ZSBwYXVzZQBvbl9jaHVua19leHRlbnNpb25fbmFtZSBwYXVzZQBVbmV4cGVjdGVkIHNwYWNlIGFmdGVyIHN0YXJ0IGxpbmUAU3BhbiBjYWxsYmFjayBlcnJvciBpbiBvbl9jaHVua19leHRlbnNpb25fbmFtZQBJbnZhbGlkIGNoYXJhY3RlciBpbiBjaHVuayBleHRlbnNpb25zIG5hbWUAUGF1c2Ugb24gQ09OTkVDVC9VcGdyYWRlAFBhdXNlIG9uIFBSSS9VcGdyYWRlAEV4cGVjdGVkIEhUVFAvMiBDb25uZWN0aW9uIFByZWZhY2UAU3BhbiBjYWxsYmFjayBlcnJvciBpbiBvbl9tZXRob2QARXhwZWN0ZWQgc3BhY2UgYWZ0ZXIgbWV0aG9kAFNwYW4gY2FsbGJhY2sgZXJyb3IgaW4gb25faGVhZGVyX2ZpZWxkAFBhdXNlZABJbnZhbGlkIHdvcmQgZW5jb3VudGVyZWQASW52YWxpZCBtZXRob2QgZW5jb3VudGVyZWQAVW5leHBlY3RlZCBjaGFyIGluIHVybCBzY2hlbWEAUmVxdWVzdCBoYXMgaW52YWxpZCBgVHJhbnNmZXItRW5jb2RpbmdgAFNXSVRDSF9QUk9YWQBVU0VfUFJPWFkATUtBQ1RJVklUWQBVTlBST0NFU1NBQkxFX0VOVElUWQBDT1BZAE1PVkVEX1BFUk1BTkVOVExZAFRPT19FQVJMWQBOT1RJRlkARkFJTEVEX0RFUEVOREVOQ1kAQkFEX0dBVEVXQVkAUExBWQBQVVQAQ0hFQ0tPVVQAR0FURVdBWV9USU1FT1VUAFJFUVVFU1RfVElNRU9VVABORVRXT1JLX0NPTk5FQ1RfVElNRU9VVABDT05ORUNUSU9OX1RJTUVPVVQATE9HSU5fVElNRU9VVABORVRXT1JLX1JFQURfVElNRU9VVABQT1NUAE1JU0RJUkVDVEVEX1JFUVVFU1QAQ0xJRU5UX0NMT1NFRF9SRVFVRVNUAENMSUVOVF9DTE9TRURfTE9BRF9CQUxBTkNFRF9SRVFVRVNUAEJBRF9SRVFVRVNUAEhUVFBfUkVRVUVTVF9TRU5UX1RPX0hUVFBTX1BPUlQAUkVQT1JUAElNX0FfVEVBUE9UAFJFU0VUX0NPTlRFTlQATk9fQ09OVEVOVABQQVJUSUFMX0NPTlRFTlQASFBFX0lOVkFMSURfQ09OU1RBTlQASFBFX0NCX1JFU0VUAEdFVABIUEVfU1RSSUNUAENPTkZMSUNUAFRFTVBPUkFSWV9SRURJUkVDVABQRVJNQU5FTlRfUkVESVJFQ1QAQ09OTkVDVABNVUxUSV9TVEFUVVMASFBFX0lOVkFMSURfU1RBVFVTAFRPT19NQU5ZX1JFUVVFU1RTAEVBUkxZX0hJTlRTAFVOQVZBSUxBQkxFX0ZPUl9MRUdBTF9SRUFTT05TAE9QVElPTlMAU1dJVENISU5HX1BST1RPQ09MUwBWQVJJQU5UX0FMU09fTkVHT1RJQVRFUwBNVUxUSVBMRV9DSE9JQ0VTAElOVEVSTkFMX1NFUlZFUl9FUlJPUgBXRUJfU0VSVkVSX1VOS05PV05fRVJST1IAUkFJTEdVTl9FUlJPUgBJREVOVElUWV9QUk9WSURFUl9BVVRIRU5USUNBVElPTl9FUlJPUgBTU0xfQ0VSVElGSUNBVEVfRVJST1IASU5WQUxJRF9YX0ZPUldBUkRFRF9GT1IAU0VUX1BBUkFNRVRFUgBHRVRfUEFSQU1FVEVSAEhQRV9VU0VSAFNFRV9PVEhFUgBIUEVfQ0JfQ0hVTktfSEVBREVSAE1LQ0FMRU5EQVIAU0VUVVAAV0VCX1NFUlZFUl9JU19ET1dOAFRFQVJET1dOAEhQRV9DTE9TRURfQ09OTkVDVElPTgBIRVVSSVNUSUNfRVhQSVJBVElPTgBESVNDT05ORUNURURfT1BFUkFUSU9OAE5PTl9BVVRIT1JJVEFUSVZFX0lORk9STUFUSU9OAEhQRV9JTlZBTElEX1ZFUlNJT04ASFBFX0NCX01FU1NBR0VfQkVHSU4AU0lURV9JU19GUk9aRU4ASFBFX0lOVkFMSURfSEVBREVSX1RPS0VOAElOVkFMSURfVE9LRU4ARk9SQklEREVOAEVOSEFOQ0VfWU9VUl9DQUxNAEhQRV9JTlZBTElEX1VSTABCTE9DS0VEX0JZX1BBUkVOVEFMX0NPTlRST0wATUtDT0wAQUNMAEhQRV9JTlRFUk5BTABSRVFVRVNUX0hFQURFUl9GSUVMRFNfVE9PX0xBUkdFX1VOT0ZGSUNJQUwASFBFX09LAFVOTElOSwBVTkxPQ0sAUFJJAFJFVFJZX1dJVEgASFBFX0lOVkFMSURfQ09OVEVOVF9MRU5HVEgASFBFX1VORVhQRUNURURfQ09OVEVOVF9MRU5HVEgARkxVU0gAUFJPUFBBVENIAE0tU0VBUkNIAFVSSV9UT09fTE9ORwBQUk9DRVNTSU5HAE1JU0NFTExBTkVPVVNfUEVSU0lTVEVOVF9XQVJOSU5HAE1JU0NFTExBTkVPVVNfV0FSTklORwBIUEVfSU5WQUxJRF9UUkFOU0ZFUl9FTkNPRElORwBFeHBlY3RlZCBDUkxGAEhQRV9JTlZBTElEX0NIVU5LX1NJWkUATU9WRQBDT05USU5VRQBIUEVfQ0JfU1RBVFVTX0NPTVBMRVRFAEhQRV9DQl9IRUFERVJTX0NPTVBMRVRFAEhQRV9DQl9WRVJTSU9OX0NPTVBMRVRFAEhQRV9DQl9VUkxfQ09NUExFVEUASFBFX0NCX0NIVU5LX0NPTVBMRVRFAEhQRV9DQl9IRUFERVJfVkFMVUVfQ09NUExFVEUASFBFX0NCX0NIVU5LX0VYVEVOU0lPTl9WQUxVRV9DT01QTEVURQBIUEVfQ0JfQ0hVTktfRVhURU5TSU9OX05BTUVfQ09NUExFVEUASFBFX0NCX01FU1NBR0VfQ09NUExFVEUASFBFX0NCX01FVEhPRF9DT01QTEVURQBIUEVfQ0JfSEVBREVSX0ZJRUxEX0NPTVBMRVRFAERFTEVURQBIUEVfSU5WQUxJRF9FT0ZfU1RBVEUASU5WQUxJRF9TU0xfQ0VSVElGSUNBVEUAUEFVU0UATk9fUkVTUE9OU0UAVU5TVVBQT1JURURfTUVESUFfVFlQRQBHT05FAE5PVF9BQ0NFUFRBQkxFAFNFUlZJQ0VfVU5BVkFJTEFCTEUAUkFOR0VfTk9UX1NBVElTRklBQkxFAE9SSUdJTl9JU19VTlJFQUNIQUJMRQBSRVNQT05TRV9JU19TVEFMRQBQVVJHRQBNRVJHRQBSRVFVRVNUX0hFQURFUl9GSUVMRFNfVE9PX0xBUkdFAFJFUVVFU1RfSEVBREVSX1RPT19MQVJHRQBQQVlMT0FEX1RPT19MQVJHRQBJTlNVRkZJQ0lFTlRfU1RPUkFHRQBIUEVfUEFVU0VEX1VQR1JBREUASFBFX1BBVVNFRF9IMl9VUEdSQURFAFNPVVJDRQBBTk5PVU5DRQBUUkFDRQBIUEVfVU5FWFBFQ1RFRF9TUEFDRQBERVNDUklCRQBVTlNVQlNDUklCRQBSRUNPUkQASFBFX0lOVkFMSURfTUVUSE9EAE5PVF9GT1VORABQUk9QRklORABVTkJJTkQAUkVCSU5EAFVOQVVUSE9SSVpFRABNRVRIT0RfTk9UX0FMTE9XRUQASFRUUF9WRVJTSU9OX05PVF9TVVBQT1JURUQAQUxSRUFEWV9SRVBPUlRFRABBQ0NFUFRFRABOT1RfSU1QTEVNRU5URUQATE9PUF9ERVRFQ1RFRABIUEVfQ1JfRVhQRUNURUQASFBFX0xGX0VYUEVDVEVEAENSRUFURUQASU1fVVNFRABIUEVfUEFVU0VEAFRJTUVPVVRfT0NDVVJFRABQQVlNRU5UX1JFUVVJUkVEAFBSRUNPTkRJVElPTl9SRVFVSVJFRABQUk9YWV9BVVRIRU5USUNBVElPTl9SRVFVSVJFRABORVRXT1JLX0FVVEhFTlRJQ0FUSU9OX1JFUVVJUkVEAExFTkdUSF9SRVFVSVJFRABTU0xfQ0VSVElGSUNBVEVfUkVRVUlSRUQAVVBHUkFERV9SRVFVSVJFRABQQUdFX0VYUElSRUQAUFJFQ09ORElUSU9OX0ZBSUxFRABFWFBFQ1RBVElPTl9GQUlMRUQAUkVWQUxJREFUSU9OX0ZBSUxFRABTU0xfSEFORFNIQUtFX0ZBSUxFRABMT0NLRUQAVFJBTlNGT1JNQVRJT05fQVBQTElFRABOT1RfTU9ESUZJRUQATk9UX0VYVEVOREVEAEJBTkRXSURUSF9MSU1JVF9FWENFRURFRABTSVRFX0lTX09WRVJMT0FERUQASEVBRABFeHBlY3RlZCBIVFRQLwAAXhMAACYTAAAwEAAA8BcAAJ0TAAAVEgAAORcAAPASAAAKEAAAdRIAAK0SAACCEwAATxQAAH8QAACgFQAAIxQAAIkSAACLFAAATRUAANQRAADPFAAAEBgAAMkWAADcFgAAwREAAOAXAAC7FAAAdBQAAHwVAADlFAAACBcAAB8QAABlFQAAoxQAACgVAAACFQAAmRUAACwQAACLGQAATw8AANQOAABqEAAAzhAAAAIXAACJDgAAbhMAABwTAABmFAAAVhcAAMETAADNEwAAbBMAAGgXAABmFwAAXxcAACITAADODwAAaQ4AANgOAABjFgAAyxMAAKoOAAAoFwAAJhcAAMUTAABdFgAA6BEAAGcTAABlEwAA8hYAAHMTAAAdFwAA+RYAAPMRAADPDgAAzhUAAAwSAACzEQAApREAAGEQAAAyFwAAuxMAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAABAQIBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEAAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQAAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAIDAgICAgIAAAICAAICAAICAgICAgICAgIABAAAAAAAAgICAgICAgICAgICAgICAgICAgICAgICAgIAAAACAgICAgICAgICAgICAgICAgICAgICAgICAgICAgACAAIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAACAAICAgICAAACAgACAgACAgICAgICAgICAAMABAAAAAICAgICAgICAgICAgICAgICAgICAgICAgICAAAAAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAAgACAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAbG9zZWVlcC1hbGl2ZQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEAAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEAAAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEBAQEBAQEBAQEBAQIBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAAEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBY2h1bmtlZAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQEAAQEBAQEAAAEBAAEBAAEBAQEBAQEBAQEAAAAAAAAAAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEAAAABAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQABAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABlY3Rpb25lbnQtbGVuZ3Rob25yb3h5LWNvbm5lY3Rpb24AAAAAAAAAAAAAAAAAAAByYW5zZmVyLWVuY29kaW5ncGdyYWRlDQoNCg0KU00NCg0KVFRQL0NFL1RTUC8AAAAAAAAAAAAAAAABAgABAwAAAAAAAAAAAAAAAAAAAAAAAAQBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAAEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAAAAAAAAAAAAAQIAAQMAAAAAAAAAAAAAAAAAAAAAAAAEAQEFAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQABAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQAAAAAAAAAAAAEAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAEBAAEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQABAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEAAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEAAAAAAAAAAAAAAQAAAgAAAAAAAAAAAAAAAAAAAAAAAAMEAAAEBAQEBAQEBAQEBAUEBAQEBAQEBAQEBAQABAAGBwQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAAEAAQABAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAIAAAAAAAADAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwAAAAAAAAMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABAAABAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAIAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMAAAAAAAADAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABOT1VOQ0VFQ0tPVVRORUNURVRFQ1JJQkVMVVNIRVRFQURTRUFSQ0hSR0VDVElWSVRZTEVOREFSVkVPVElGWVBUSU9OU0NIU0VBWVNUQVRDSEdFT1JESVJFQ1RPUlRSQ0hQQVJBTUVURVJVUkNFQlNDUklCRUFSRE9XTkFDRUlORE5LQ0tVQlNDUklCRUhUVFAvQURUUC8=", "base64"); - } -}); - -// .yarn/cache/undici-npm-6.6.2-a0bd6785a6-c8c8a43605.zip/node_modules/undici/lib/llhttp/llhttp_simd-wasm.js -var require_llhttp_simd_wasm = __commonJS({ - ".yarn/cache/undici-npm-6.6.2-a0bd6785a6-c8c8a43605.zip/node_modules/undici/lib/llhttp/llhttp_simd-wasm.js"(exports, module2) { - var { Buffer: Buffer2 } = require("node:buffer"); - module2.exports = Buffer2.from("AGFzbQEAAAABMAhgAX8Bf2ADf39/AX9gBH9/f38Bf2AAAGADf39/AGABfwBgAn9/AGAGf39/f39/AALLAQgDZW52GHdhc21fb25faGVhZGVyc19jb21wbGV0ZQACA2VudhV3YXNtX29uX21lc3NhZ2VfYmVnaW4AAANlbnYLd2FzbV9vbl91cmwAAQNlbnYOd2FzbV9vbl9zdGF0dXMAAQNlbnYUd2FzbV9vbl9oZWFkZXJfZmllbGQAAQNlbnYUd2FzbV9vbl9oZWFkZXJfdmFsdWUAAQNlbnYMd2FzbV9vbl9ib2R5AAEDZW52GHdhc21fb25fbWVzc2FnZV9jb21wbGV0ZQAAA0ZFAwMEAAAFAAAAAAAABQEFAAUFBQAABgAAAAAGBgYGAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQABAAABAQcAAAUFAwABBAUBcAESEgUDAQACBggBfwFBgNQECwfRBSIGbWVtb3J5AgALX2luaXRpYWxpemUACRlfX2luZGlyZWN0X2Z1bmN0aW9uX3RhYmxlAQALbGxodHRwX2luaXQAChhsbGh0dHBfc2hvdWxkX2tlZXBfYWxpdmUAQQxsbGh0dHBfYWxsb2MADAZtYWxsb2MARgtsbGh0dHBfZnJlZQANBGZyZWUASA9sbGh0dHBfZ2V0X3R5cGUADhVsbGh0dHBfZ2V0X2h0dHBfbWFqb3IADxVsbGh0dHBfZ2V0X2h0dHBfbWlub3IAEBFsbGh0dHBfZ2V0X21ldGhvZAARFmxsaHR0cF9nZXRfc3RhdHVzX2NvZGUAEhJsbGh0dHBfZ2V0X3VwZ3JhZGUAEwxsbGh0dHBfcmVzZXQAFA5sbGh0dHBfZXhlY3V0ZQAVFGxsaHR0cF9zZXR0aW5nc19pbml0ABYNbGxodHRwX2ZpbmlzaAAXDGxsaHR0cF9wYXVzZQAYDWxsaHR0cF9yZXN1bWUAGRtsbGh0dHBfcmVzdW1lX2FmdGVyX3VwZ3JhZGUAGhBsbGh0dHBfZ2V0X2Vycm5vABsXbGxodHRwX2dldF9lcnJvcl9yZWFzb24AHBdsbGh0dHBfc2V0X2Vycm9yX3JlYXNvbgAdFGxsaHR0cF9nZXRfZXJyb3JfcG9zAB4RbGxodHRwX2Vycm5vX25hbWUAHxJsbGh0dHBfbWV0aG9kX25hbWUAIBJsbGh0dHBfc3RhdHVzX25hbWUAIRpsbGh0dHBfc2V0X2xlbmllbnRfaGVhZGVycwAiIWxsaHR0cF9zZXRfbGVuaWVudF9jaHVua2VkX2xlbmd0aAAjHWxsaHR0cF9zZXRfbGVuaWVudF9rZWVwX2FsaXZlACQkbGxodHRwX3NldF9sZW5pZW50X3RyYW5zZmVyX2VuY29kaW5nACUYbGxodHRwX21lc3NhZ2VfbmVlZHNfZW9mAD8JFwEAQQELEQECAwQFCwYHNTk3MS8tJyspCrLgAkUCAAsIABCIgICAAAsZACAAEMKAgIAAGiAAIAI2AjggACABOgAoCxwAIAAgAC8BMiAALQAuIAAQwYCAgAAQgICAgAALKgEBf0HAABDGgICAACIBEMKAgIAAGiABQYCIgIAANgI4IAEgADoAKCABCwoAIAAQyICAgAALBwAgAC0AKAsHACAALQAqCwcAIAAtACsLBwAgAC0AKQsHACAALwEyCwcAIAAtAC4LRQEEfyAAKAIYIQEgAC0ALSECIAAtACghAyAAKAI4IQQgABDCgICAABogACAENgI4IAAgAzoAKCAAIAI6AC0gACABNgIYCxEAIAAgASABIAJqEMOAgIAACxAAIABBAEHcABDMgICAABoLZwEBf0EAIQECQCAAKAIMDQACQAJAAkACQCAALQAvDgMBAAMCCyAAKAI4IgFFDQAgASgCLCIBRQ0AIAAgARGAgICAAAAiAQ0DC0EADwsQyoCAgAAACyAAQcOWgIAANgIQQQ4hAQsgAQseAAJAIAAoAgwNACAAQdGbgIAANgIQIABBFTYCDAsLFgACQCAAKAIMQRVHDQAgAEEANgIMCwsWAAJAIAAoAgxBFkcNACAAQQA2AgwLCwcAIAAoAgwLBwAgACgCEAsJACAAIAE2AhALBwAgACgCFAsiAAJAIABBJEkNABDKgICAAAALIABBAnRBoLOAgABqKAIACyIAAkAgAEEuSQ0AEMqAgIAAAAsgAEECdEGwtICAAGooAgAL7gsBAX9B66iAgAAhAQJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAIABBnH9qDvQDY2IAAWFhYWFhYQIDBAVhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhBgcICQoLDA0OD2FhYWFhEGFhYWFhYWFhYWFhEWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYRITFBUWFxgZGhthYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhHB0eHyAhIiMkJSYnKCkqKywtLi8wMTIzNDU2YTc4OTphYWFhYWFhYTthYWE8YWFhYT0+P2FhYWFhYWFhQGFhQWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYUJDREVGR0hJSktMTU5PUFFSU2FhYWFhYWFhVFVWV1hZWlthXF1hYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFeYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhX2BhC0Hhp4CAAA8LQaShgIAADwtBy6yAgAAPC0H+sYCAAA8LQcCkgIAADwtBq6SAgAAPC0GNqICAAA8LQeKmgIAADwtBgLCAgAAPC0G5r4CAAA8LQdekgIAADwtB75+AgAAPC0Hhn4CAAA8LQfqfgIAADwtB8qCAgAAPC0Gor4CAAA8LQa6ygIAADwtBiLCAgAAPC0Hsp4CAAA8LQYKigIAADwtBjp2AgAAPC0HQroCAAA8LQcqjgIAADwtBxbKAgAAPC0HfnICAAA8LQdKcgIAADwtBxKCAgAAPC0HXoICAAA8LQaKfgIAADwtB7a6AgAAPC0GrsICAAA8LQdSlgIAADwtBzK6AgAAPC0H6roCAAA8LQfyrgIAADwtB0rCAgAAPC0HxnYCAAA8LQbuggIAADwtB96uAgAAPC0GQsYCAAA8LQdexgIAADwtBoq2AgAAPC0HUp4CAAA8LQeCrgIAADwtBn6yAgAAPC0HrsYCAAA8LQdWfgIAADwtByrGAgAAPC0HepYCAAA8LQdSegIAADwtB9JyAgAAPC0GnsoCAAA8LQbGdgIAADwtBoJ2AgAAPC0G5sYCAAA8LQbywgIAADwtBkqGAgAAPC0GzpoCAAA8LQemsgIAADwtBrJ6AgAAPC0HUq4CAAA8LQfemgIAADwtBgKaAgAAPC0GwoYCAAA8LQf6egIAADwtBjaOAgAAPC0GJrYCAAA8LQfeigIAADwtBoLGAgAAPC0Gun4CAAA8LQcalgIAADwtB6J6AgAAPC0GTooCAAA8LQcKvgIAADwtBw52AgAAPC0GLrICAAA8LQeGdgIAADwtBja+AgAAPC0HqoYCAAA8LQbStgIAADwtB0q+AgAAPC0HfsoCAAA8LQdKygIAADwtB8LCAgAAPC0GpooCAAA8LQfmjgIAADwtBmZ6AgAAPC0G1rICAAA8LQZuwgIAADwtBkrKAgAAPC0G2q4CAAA8LQcKigIAADwtB+LKAgAAPC0GepYCAAA8LQdCigIAADwtBup6AgAAPC0GBnoCAAA8LEMqAgIAAAAtB1qGAgAAhAQsgAQsWACAAIAAtAC1B/gFxIAFBAEdyOgAtCxkAIAAgAC0ALUH9AXEgAUEAR0EBdHI6AC0LGQAgACAALQAtQfsBcSABQQBHQQJ0cjoALQsZACAAIAAtAC1B9wFxIAFBAEdBA3RyOgAtCy4BAn9BACEDAkAgACgCOCIERQ0AIAQoAgAiBEUNACAAIAQRgICAgAAAIQMLIAMLSQECf0EAIQMCQCAAKAI4IgRFDQAgBCgCBCIERQ0AIAAgASACIAFrIAQRgYCAgAAAIgNBf0cNACAAQcaRgIAANgIQQRghAwsgAwsuAQJ/QQAhAwJAIAAoAjgiBEUNACAEKAIwIgRFDQAgACAEEYCAgIAAACEDCyADC0kBAn9BACEDAkAgACgCOCIERQ0AIAQoAggiBEUNACAAIAEgAiABayAEEYGAgIAAACIDQX9HDQAgAEH2ioCAADYCEEEYIQMLIAMLLgECf0EAIQMCQCAAKAI4IgRFDQAgBCgCNCIERQ0AIAAgBBGAgICAAAAhAwsgAwtJAQJ/QQAhAwJAIAAoAjgiBEUNACAEKAIMIgRFDQAgACABIAIgAWsgBBGBgICAAAAiA0F/Rw0AIABB7ZqAgAA2AhBBGCEDCyADCy4BAn9BACEDAkAgACgCOCIERQ0AIAQoAjgiBEUNACAAIAQRgICAgAAAIQMLIAMLSQECf0EAIQMCQCAAKAI4IgRFDQAgBCgCECIERQ0AIAAgASACIAFrIAQRgYCAgAAAIgNBf0cNACAAQZWQgIAANgIQQRghAwsgAwsuAQJ/QQAhAwJAIAAoAjgiBEUNACAEKAI8IgRFDQAgACAEEYCAgIAAACEDCyADC0kBAn9BACEDAkAgACgCOCIERQ0AIAQoAhQiBEUNACAAIAEgAiABayAEEYGAgIAAACIDQX9HDQAgAEGqm4CAADYCEEEYIQMLIAMLLgECf0EAIQMCQCAAKAI4IgRFDQAgBCgCQCIERQ0AIAAgBBGAgICAAAAhAwsgAwtJAQJ/QQAhAwJAIAAoAjgiBEUNACAEKAIYIgRFDQAgACABIAIgAWsgBBGBgICAAAAiA0F/Rw0AIABB7ZOAgAA2AhBBGCEDCyADCy4BAn9BACEDAkAgACgCOCIERQ0AIAQoAkQiBEUNACAAIAQRgICAgAAAIQMLIAMLLgECf0EAIQMCQCAAKAI4IgRFDQAgBCgCJCIERQ0AIAAgBBGAgICAAAAhAwsgAwsuAQJ/QQAhAwJAIAAoAjgiBEUNACAEKAIsIgRFDQAgACAEEYCAgIAAACEDCyADC0kBAn9BACEDAkAgACgCOCIERQ0AIAQoAigiBEUNACAAIAEgAiABayAEEYGAgIAAACIDQX9HDQAgAEH2iICAADYCEEEYIQMLIAMLLgECf0EAIQMCQCAAKAI4IgRFDQAgBCgCUCIERQ0AIAAgBBGAgICAAAAhAwsgAwtJAQJ/QQAhAwJAIAAoAjgiBEUNACAEKAIcIgRFDQAgACABIAIgAWsgBBGBgICAAAAiA0F/Rw0AIABBwpmAgAA2AhBBGCEDCyADCy4BAn9BACEDAkAgACgCOCIERQ0AIAQoAkgiBEUNACAAIAQRgICAgAAAIQMLIAMLSQECf0EAIQMCQCAAKAI4IgRFDQAgBCgCICIERQ0AIAAgASACIAFrIAQRgYCAgAAAIgNBf0cNACAAQZSUgIAANgIQQRghAwsgAwsuAQJ/QQAhAwJAIAAoAjgiBEUNACAEKAJMIgRFDQAgACAEEYCAgIAAACEDCyADCy4BAn9BACEDAkAgACgCOCIERQ0AIAQoAlQiBEUNACAAIAQRgICAgAAAIQMLIAMLLgECf0EAIQMCQCAAKAI4IgRFDQAgBCgCWCIERQ0AIAAgBBGAgICAAAAhAwsgAwtFAQF/AkACQCAALwEwQRRxQRRHDQBBASEDIAAtAChBAUYNASAALwEyQeUARiEDDAELIAAtAClBBUYhAwsgACADOgAuQQAL/gEBA39BASEDAkAgAC8BMCIEQQhxDQAgACkDIEIAUiEDCwJAAkAgAC0ALkUNAEEBIQUgAC0AKUEFRg0BQQEhBSAEQcAAcUUgA3FBAUcNAQtBACEFIARBwABxDQBBAiEFIARB//8DcSIDQQhxDQACQCADQYAEcUUNAAJAIAAtAChBAUcNACAALQAtQQpxDQBBBQ8LQQQPCwJAIANBIHENAAJAIAAtAChBAUYNACAALwEyQf//A3EiAEGcf2pB5ABJDQAgAEHMAUYNACAAQbACRg0AQQQhBSAEQShxRQ0CIANBiARxQYAERg0CC0EADwtBAEEDIAApAyBQGyEFCyAFC2IBAn9BACEBAkAgAC0AKEEBRg0AIAAvATJB//8DcSICQZx/akHkAEkNACACQcwBRg0AIAJBsAJGDQAgAC8BMCIAQcAAcQ0AQQEhASAAQYgEcUGABEYNACAAQShxRSEBCyABC6cBAQN/AkACQAJAIAAtACpFDQAgAC0AK0UNAEEAIQMgAC8BMCIEQQJxRQ0BDAILQQAhAyAALwEwIgRBAXFFDQELQQEhAyAALQAoQQFGDQAgAC8BMkH//wNxIgVBnH9qQeQASQ0AIAVBzAFGDQAgBUGwAkYNACAEQcAAcQ0AQQAhAyAEQYgEcUGABEYNACAEQShxQQBHIQMLIABBADsBMCAAQQA6AC8gAwuZAQECfwJAAkACQCAALQAqRQ0AIAAtACtFDQBBACEBIAAvATAiAkECcUUNAQwCC0EAIQEgAC8BMCICQQFxRQ0BC0EBIQEgAC0AKEEBRg0AIAAvATJB//8DcSIAQZx/akHkAEkNACAAQcwBRg0AIABBsAJGDQAgAkHAAHENAEEAIQEgAkGIBHFBgARGDQAgAkEocUEARyEBCyABC0kBAXsgAEEQav0MAAAAAAAAAAAAAAAAAAAAACIB/QsDACAAIAH9CwMAIABBMGogAf0LAwAgAEEgaiAB/QsDACAAQd0BNgIcQQALewEBfwJAIAAoAgwiAw0AAkAgACgCBEUNACAAIAE2AgQLAkAgACABIAIQxICAgAAiAw0AIAAoAgwPCyAAIAM2AhxBACEDIAAoAgQiAUUNACAAIAEgAiAAKAIIEYGAgIAAACIBRQ0AIAAgAjYCFCAAIAE2AgwgASEDCyADC+TzAQMOfwN+BH8jgICAgABBEGsiAySAgICAACABIQQgASEFIAEhBiABIQcgASEIIAEhCSABIQogASELIAEhDCABIQ0gASEOIAEhDwJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQCAAKAIcIhBBf2oO3QHaAQHZAQIDBAUGBwgJCgsMDQ7YAQ8Q1wEREtYBExQVFhcYGRob4AHfARwdHtUBHyAhIiMkJdQBJicoKSorLNMB0gEtLtEB0AEvMDEyMzQ1Njc4OTo7PD0+P0BBQkNERUbbAUdISUrPAc4BS80BTMwBTU5PUFFSU1RVVldYWVpbXF1eX2BhYmNkZWZnaGlqa2xtbm9wcXJzdHV2d3h5ent8fX5/gAGBAYIBgwGEAYUBhgGHAYgBiQGKAYsBjAGNAY4BjwGQAZEBkgGTAZQBlQGWAZcBmAGZAZoBmwGcAZ0BngGfAaABoQGiAaMBpAGlAaYBpwGoAakBqgGrAawBrQGuAa8BsAGxAbIBswG0AbUBtgG3AcsBygG4AckBuQHIAboBuwG8Ab0BvgG/AcABwQHCAcMBxAHFAcYBANwBC0EAIRAMxgELQQ4hEAzFAQtBDSEQDMQBC0EPIRAMwwELQRAhEAzCAQtBEyEQDMEBC0EUIRAMwAELQRUhEAy/AQtBFiEQDL4BC0EXIRAMvQELQRghEAy8AQtBGSEQDLsBC0EaIRAMugELQRshEAy5AQtBHCEQDLgBC0EIIRAMtwELQR0hEAy2AQtBICEQDLUBC0EfIRAMtAELQQchEAyzAQtBISEQDLIBC0EiIRAMsQELQR4hEAywAQtBIyEQDK8BC0ESIRAMrgELQREhEAytAQtBJCEQDKwBC0ElIRAMqwELQSYhEAyqAQtBJyEQDKkBC0HDASEQDKgBC0EpIRAMpwELQSshEAymAQtBLCEQDKUBC0EtIRAMpAELQS4hEAyjAQtBLyEQDKIBC0HEASEQDKEBC0EwIRAMoAELQTQhEAyfAQtBDCEQDJ4BC0ExIRAMnQELQTIhEAycAQtBMyEQDJsBC0E5IRAMmgELQTUhEAyZAQtBxQEhEAyYAQtBCyEQDJcBC0E6IRAMlgELQTYhEAyVAQtBCiEQDJQBC0E3IRAMkwELQTghEAySAQtBPCEQDJEBC0E7IRAMkAELQT0hEAyPAQtBCSEQDI4BC0EoIRAMjQELQT4hEAyMAQtBPyEQDIsBC0HAACEQDIoBC0HBACEQDIkBC0HCACEQDIgBC0HDACEQDIcBC0HEACEQDIYBC0HFACEQDIUBC0HGACEQDIQBC0EqIRAMgwELQccAIRAMggELQcgAIRAMgQELQckAIRAMgAELQcoAIRAMfwtBywAhEAx+C0HNACEQDH0LQcwAIRAMfAtBzgAhEAx7C0HPACEQDHoLQdAAIRAMeQtB0QAhEAx4C0HSACEQDHcLQdMAIRAMdgtB1AAhEAx1C0HWACEQDHQLQdUAIRAMcwtBBiEQDHILQdcAIRAMcQtBBSEQDHALQdgAIRAMbwtBBCEQDG4LQdkAIRAMbQtB2gAhEAxsC0HbACEQDGsLQdwAIRAMagtBAyEQDGkLQd0AIRAMaAtB3gAhEAxnC0HfACEQDGYLQeEAIRAMZQtB4AAhEAxkC0HiACEQDGMLQeMAIRAMYgtBAiEQDGELQeQAIRAMYAtB5QAhEAxfC0HmACEQDF4LQecAIRAMXQtB6AAhEAxcC0HpACEQDFsLQeoAIRAMWgtB6wAhEAxZC0HsACEQDFgLQe0AIRAMVwtB7gAhEAxWC0HvACEQDFULQfAAIRAMVAtB8QAhEAxTC0HyACEQDFILQfMAIRAMUQtB9AAhEAxQC0H1ACEQDE8LQfYAIRAMTgtB9wAhEAxNC0H4ACEQDEwLQfkAIRAMSwtB+gAhEAxKC0H7ACEQDEkLQfwAIRAMSAtB/QAhEAxHC0H+ACEQDEYLQf8AIRAMRQtBgAEhEAxEC0GBASEQDEMLQYIBIRAMQgtBgwEhEAxBC0GEASEQDEALQYUBIRAMPwtBhgEhEAw+C0GHASEQDD0LQYgBIRAMPAtBiQEhEAw7C0GKASEQDDoLQYsBIRAMOQtBjAEhEAw4C0GNASEQDDcLQY4BIRAMNgtBjwEhEAw1C0GQASEQDDQLQZEBIRAMMwtBkgEhEAwyC0GTASEQDDELQZQBIRAMMAtBlQEhEAwvC0GWASEQDC4LQZcBIRAMLQtBmAEhEAwsC0GZASEQDCsLQZoBIRAMKgtBmwEhEAwpC0GcASEQDCgLQZ0BIRAMJwtBngEhEAwmC0GfASEQDCULQaABIRAMJAtBoQEhEAwjC0GiASEQDCILQaMBIRAMIQtBpAEhEAwgC0GlASEQDB8LQaYBIRAMHgtBpwEhEAwdC0GoASEQDBwLQakBIRAMGwtBqgEhEAwaC0GrASEQDBkLQawBIRAMGAtBrQEhEAwXC0GuASEQDBYLQQEhEAwVC0GvASEQDBQLQbABIRAMEwtBsQEhEAwSC0GzASEQDBELQbIBIRAMEAtBtAEhEAwPC0G1ASEQDA4LQbYBIRAMDQtBtwEhEAwMC0G4ASEQDAsLQbkBIRAMCgtBugEhEAwJC0G7ASEQDAgLQcYBIRAMBwtBvAEhEAwGC0G9ASEQDAULQb4BIRAMBAtBvwEhEAwDC0HAASEQDAILQcIBIRAMAQtBwQEhEAsDQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAIBAOxwEAAQIDBAUGBwgJCgsMDQ4PEBESExQVFhcYGRobHB4fICEjJSg/QEFERUZHSElKS0xNT1BRUlPeA1dZW1xdYGJlZmdoaWprbG1vcHFyc3R1dnd4eXp7fH1+gAGCAYUBhgGHAYkBiwGMAY0BjgGPAZABkQGUAZUBlgGXAZgBmQGaAZsBnAGdAZ4BnwGgAaEBogGjAaQBpQGmAacBqAGpAaoBqwGsAa0BrgGvAbABsQGyAbMBtAG1AbYBtwG4AbkBugG7AbwBvQG+Ab8BwAHBAcIBwwHEAcUBxgHHAcgByQHKAcsBzAHNAc4BzwHQAdEB0gHTAdQB1QHWAdcB2AHZAdoB2wHcAd0B3gHgAeEB4gHjAeQB5QHmAecB6AHpAeoB6wHsAe0B7gHvAfAB8QHyAfMBmQKkArAC/gL+AgsgASIEIAJHDfMBQd0BIRAM/wMLIAEiECACRw3dAUHDASEQDP4DCyABIgEgAkcNkAFB9wAhEAz9AwsgASIBIAJHDYYBQe8AIRAM/AMLIAEiASACRw1/QeoAIRAM+wMLIAEiASACRw17QegAIRAM+gMLIAEiASACRw14QeYAIRAM+QMLIAEiASACRw0aQRghEAz4AwsgASIBIAJHDRRBEiEQDPcDCyABIgEgAkcNWUHFACEQDPYDCyABIgEgAkcNSkE/IRAM9QMLIAEiASACRw1IQTwhEAz0AwsgASIBIAJHDUFBMSEQDPMDCyAALQAuQQFGDesDDIcCCyAAIAEiASACEMCAgIAAQQFHDeYBIABCADcDIAznAQsgACABIgEgAhC0gICAACIQDecBIAEhAQz1AgsCQCABIgEgAkcNAEEGIRAM8AMLIAAgAUEBaiIBIAIQu4CAgAAiEA3oASABIQEMMQsgAEIANwMgQRIhEAzVAwsgASIQIAJHDStBHSEQDO0DCwJAIAEiASACRg0AIAFBAWohAUEQIRAM1AMLQQchEAzsAwsgAEIAIAApAyAiESACIAEiEGutIhJ9IhMgEyARVhs3AyAgESASViIURQ3lAUEIIRAM6wMLAkAgASIBIAJGDQAgAEGJgICAADYCCCAAIAE2AgQgASEBQRQhEAzSAwtBCSEQDOoDCyABIQEgACkDIFAN5AEgASEBDPICCwJAIAEiASACRw0AQQshEAzpAwsgACABQQFqIgEgAhC2gICAACIQDeUBIAEhAQzyAgsgACABIgEgAhC4gICAACIQDeUBIAEhAQzyAgsgACABIgEgAhC4gICAACIQDeYBIAEhAQwNCyAAIAEiASACELqAgIAAIhAN5wEgASEBDPACCwJAIAEiASACRw0AQQ8hEAzlAwsgAS0AACIQQTtGDQggEEENRw3oASABQQFqIQEM7wILIAAgASIBIAIQuoCAgAAiEA3oASABIQEM8gILA0ACQCABLQAAQfC1gIAAai0AACIQQQFGDQAgEEECRw3rASAAKAIEIRAgAEEANgIEIAAgECABQQFqIgEQuYCAgAAiEA3qASABIQEM9AILIAFBAWoiASACRw0AC0ESIRAM4gMLIAAgASIBIAIQuoCAgAAiEA3pASABIQEMCgsgASIBIAJHDQZBGyEQDOADCwJAIAEiASACRw0AQRYhEAzgAwsgAEGKgICAADYCCCAAIAE2AgQgACABIAIQuICAgAAiEA3qASABIQFBICEQDMYDCwJAIAEiASACRg0AA0ACQCABLQAAQfC3gIAAai0AACIQQQJGDQACQCAQQX9qDgTlAewBAOsB7AELIAFBAWohAUEIIRAMyAMLIAFBAWoiASACRw0AC0EVIRAM3wMLQRUhEAzeAwsDQAJAIAEtAABB8LmAgABqLQAAIhBBAkYNACAQQX9qDgTeAewB4AHrAewBCyABQQFqIgEgAkcNAAtBGCEQDN0DCwJAIAEiASACRg0AIABBi4CAgAA2AgggACABNgIEIAEhAUEHIRAMxAMLQRkhEAzcAwsgAUEBaiEBDAILAkAgASIUIAJHDQBBGiEQDNsDCyAUIQECQCAULQAAQXNqDhTdAu4C7gLuAu4C7gLuAu4C7gLuAu4C7gLuAu4C7gLuAu4C7gLuAgDuAgtBACEQIABBADYCHCAAQa+LgIAANgIQIABBAjYCDCAAIBRBAWo2AhQM2gMLAkAgAS0AACIQQTtGDQAgEEENRw3oASABQQFqIQEM5QILIAFBAWohAQtBIiEQDL8DCwJAIAEiECACRw0AQRwhEAzYAwtCACERIBAhASAQLQAAQVBqDjfnAeYBAQIDBAUGBwgAAAAAAAAACQoLDA0OAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAPEBESExQAC0EeIRAMvQMLQgIhEQzlAQtCAyERDOQBC0IEIREM4wELQgUhEQziAQtCBiERDOEBC0IHIREM4AELQgghEQzfAQtCCSERDN4BC0IKIREM3QELQgshEQzcAQtCDCERDNsBC0INIREM2gELQg4hEQzZAQtCDyERDNgBC0IKIREM1wELQgshEQzWAQtCDCERDNUBC0INIREM1AELQg4hEQzTAQtCDyERDNIBC0IAIRECQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAIBAtAABBUGoON+UB5AEAAQIDBAUGB+YB5gHmAeYB5gHmAeYBCAkKCwwN5gHmAeYB5gHmAeYB5gHmAeYB5gHmAeYB5gHmAeYB5gHmAeYB5gHmAeYB5gHmAeYB5gHmAQ4PEBESE+YBC0ICIREM5AELQgMhEQzjAQtCBCERDOIBC0IFIREM4QELQgYhEQzgAQtCByERDN8BC0IIIREM3gELQgkhEQzdAQtCCiERDNwBC0ILIREM2wELQgwhEQzaAQtCDSERDNkBC0IOIREM2AELQg8hEQzXAQtCCiERDNYBC0ILIREM1QELQgwhEQzUAQtCDSERDNMBC0IOIREM0gELQg8hEQzRAQsgAEIAIAApAyAiESACIAEiEGutIhJ9IhMgEyARVhs3AyAgESASViIURQ3SAUEfIRAMwAMLAkAgASIBIAJGDQAgAEGJgICAADYCCCAAIAE2AgQgASEBQSQhEAynAwtBICEQDL8DCyAAIAEiECACEL6AgIAAQX9qDgW2AQDFAgHRAdIBC0ERIRAMpAMLIABBAToALyAQIQEMuwMLIAEiASACRw3SAUEkIRAMuwMLIAEiDSACRw0eQcYAIRAMugMLIAAgASIBIAIQsoCAgAAiEA3UASABIQEMtQELIAEiECACRw0mQdAAIRAMuAMLAkAgASIBIAJHDQBBKCEQDLgDCyAAQQA2AgQgAEGMgICAADYCCCAAIAEgARCxgICAACIQDdMBIAEhAQzYAQsCQCABIhAgAkcNAEEpIRAMtwMLIBAtAAAiAUEgRg0UIAFBCUcN0wEgEEEBaiEBDBULAkAgASIBIAJGDQAgAUEBaiEBDBcLQSohEAy1AwsCQCABIhAgAkcNAEErIRAMtQMLAkAgEC0AACIBQQlGDQAgAUEgRw3VAQsgAC0ALEEIRg3TASAQIQEMkQMLAkAgASIBIAJHDQBBLCEQDLQDCyABLQAAQQpHDdUBIAFBAWohAQzJAgsgASIOIAJHDdUBQS8hEAyyAwsDQAJAIAEtAAAiEEEgRg0AAkAgEEF2ag4EANwB3AEA2gELIAEhAQzgAQsgAUEBaiIBIAJHDQALQTEhEAyxAwtBMiEQIAEiFCACRg2wAyACIBRrIAAoAgAiAWohFSAUIAFrQQNqIRYCQANAIBQtAAAiF0EgciAXIBdBv39qQf8BcUEaSRtB/wFxIAFB8LuAgABqLQAARw0BAkAgAUEDRw0AQQYhAQyWAwsgAUEBaiEBIBRBAWoiFCACRw0ACyAAIBU2AgAMsQMLIABBADYCACAUIQEM2QELQTMhECABIhQgAkYNrwMgAiAUayAAKAIAIgFqIRUgFCABa0EIaiEWAkADQCAULQAAIhdBIHIgFyAXQb9/akH/AXFBGkkbQf8BcSABQfS7gIAAai0AAEcNAQJAIAFBCEcNAEEFIQEMlQMLIAFBAWohASAUQQFqIhQgAkcNAAsgACAVNgIADLADCyAAQQA2AgAgFCEBDNgBC0E0IRAgASIUIAJGDa4DIAIgFGsgACgCACIBaiEVIBQgAWtBBWohFgJAA0AgFC0AACIXQSByIBcgF0G/f2pB/wFxQRpJG0H/AXEgAUHQwoCAAGotAABHDQECQCABQQVHDQBBByEBDJQDCyABQQFqIQEgFEEBaiIUIAJHDQALIAAgFTYCAAyvAwsgAEEANgIAIBQhAQzXAQsCQCABIgEgAkYNAANAAkAgAS0AAEGAvoCAAGotAAAiEEEBRg0AIBBBAkYNCiABIQEM3QELIAFBAWoiASACRw0AC0EwIRAMrgMLQTAhEAytAwsCQCABIgEgAkYNAANAAkAgAS0AACIQQSBGDQAgEEF2ag4E2QHaAdoB2QHaAQsgAUEBaiIBIAJHDQALQTghEAytAwtBOCEQDKwDCwNAAkAgAS0AACIQQSBGDQAgEEEJRw0DCyABQQFqIgEgAkcNAAtBPCEQDKsDCwNAAkAgAS0AACIQQSBGDQACQAJAIBBBdmoOBNoBAQHaAQALIBBBLEYN2wELIAEhAQwECyABQQFqIgEgAkcNAAtBPyEQDKoDCyABIQEM2wELQcAAIRAgASIUIAJGDagDIAIgFGsgACgCACIBaiEWIBQgAWtBBmohFwJAA0AgFC0AAEEgciABQYDAgIAAai0AAEcNASABQQZGDY4DIAFBAWohASAUQQFqIhQgAkcNAAsgACAWNgIADKkDCyAAQQA2AgAgFCEBC0E2IRAMjgMLAkAgASIPIAJHDQBBwQAhEAynAwsgAEGMgICAADYCCCAAIA82AgQgDyEBIAAtACxBf2oOBM0B1QHXAdkBhwMLIAFBAWohAQzMAQsCQCABIgEgAkYNAANAAkAgAS0AACIQQSByIBAgEEG/f2pB/wFxQRpJG0H/AXEiEEEJRg0AIBBBIEYNAAJAAkACQAJAIBBBnX9qDhMAAwMDAwMDAwEDAwMDAwMDAwMCAwsgAUEBaiEBQTEhEAyRAwsgAUEBaiEBQTIhEAyQAwsgAUEBaiEBQTMhEAyPAwsgASEBDNABCyABQQFqIgEgAkcNAAtBNSEQDKUDC0E1IRAMpAMLAkAgASIBIAJGDQADQAJAIAEtAABBgLyAgABqLQAAQQFGDQAgASEBDNMBCyABQQFqIgEgAkcNAAtBPSEQDKQDC0E9IRAMowMLIAAgASIBIAIQsICAgAAiEA3WASABIQEMAQsgEEEBaiEBC0E8IRAMhwMLAkAgASIBIAJHDQBBwgAhEAygAwsCQANAAkAgAS0AAEF3ag4YAAL+Av4ChAP+Av4C/gL+Av4C/gL+Av4C/gL+Av4C/gL+Av4C/gL+Av4C/gIA/gILIAFBAWoiASACRw0AC0HCACEQDKADCyABQQFqIQEgAC0ALUEBcUUNvQEgASEBC0EsIRAMhQMLIAEiASACRw3TAUHEACEQDJ0DCwNAAkAgAS0AAEGQwICAAGotAABBAUYNACABIQEMtwILIAFBAWoiASACRw0AC0HFACEQDJwDCyANLQAAIhBBIEYNswEgEEE6Rw2BAyAAKAIEIQEgAEEANgIEIAAgASANEK+AgIAAIgEN0AEgDUEBaiEBDLMCC0HHACEQIAEiDSACRg2aAyACIA1rIAAoAgAiAWohFiANIAFrQQVqIRcDQCANLQAAIhRBIHIgFCAUQb9/akH/AXFBGkkbQf8BcSABQZDCgIAAai0AAEcNgAMgAUEFRg30AiABQQFqIQEgDUEBaiINIAJHDQALIAAgFjYCAAyaAwtByAAhECABIg0gAkYNmQMgAiANayAAKAIAIgFqIRYgDSABa0EJaiEXA0AgDS0AACIUQSByIBQgFEG/f2pB/wFxQRpJG0H/AXEgAUGWwoCAAGotAABHDf8CAkAgAUEJRw0AQQIhAQz1AgsgAUEBaiEBIA1BAWoiDSACRw0ACyAAIBY2AgAMmQMLAkAgASINIAJHDQBByQAhEAyZAwsCQAJAIA0tAAAiAUEgciABIAFBv39qQf8BcUEaSRtB/wFxQZJ/ag4HAIADgAOAA4ADgAMBgAMLIA1BAWohAUE+IRAMgAMLIA1BAWohAUE/IRAM/wILQcoAIRAgASINIAJGDZcDIAIgDWsgACgCACIBaiEWIA0gAWtBAWohFwNAIA0tAAAiFEEgciAUIBRBv39qQf8BcUEaSRtB/wFxIAFBoMKAgABqLQAARw39AiABQQFGDfACIAFBAWohASANQQFqIg0gAkcNAAsgACAWNgIADJcDC0HLACEQIAEiDSACRg2WAyACIA1rIAAoAgAiAWohFiANIAFrQQ5qIRcDQCANLQAAIhRBIHIgFCAUQb9/akH/AXFBGkkbQf8BcSABQaLCgIAAai0AAEcN/AIgAUEORg3wAiABQQFqIQEgDUEBaiINIAJHDQALIAAgFjYCAAyWAwtBzAAhECABIg0gAkYNlQMgAiANayAAKAIAIgFqIRYgDSABa0EPaiEXA0AgDS0AACIUQSByIBQgFEG/f2pB/wFxQRpJG0H/AXEgAUHAwoCAAGotAABHDfsCAkAgAUEPRw0AQQMhAQzxAgsgAUEBaiEBIA1BAWoiDSACRw0ACyAAIBY2AgAMlQMLQc0AIRAgASINIAJGDZQDIAIgDWsgACgCACIBaiEWIA0gAWtBBWohFwNAIA0tAAAiFEEgciAUIBRBv39qQf8BcUEaSRtB/wFxIAFB0MKAgABqLQAARw36AgJAIAFBBUcNAEEEIQEM8AILIAFBAWohASANQQFqIg0gAkcNAAsgACAWNgIADJQDCwJAIAEiDSACRw0AQc4AIRAMlAMLAkACQAJAAkAgDS0AACIBQSByIAEgAUG/f2pB/wFxQRpJG0H/AXFBnX9qDhMA/QL9Av0C/QL9Av0C/QL9Av0C/QL9Av0CAf0C/QL9AgID/QILIA1BAWohAUHBACEQDP0CCyANQQFqIQFBwgAhEAz8AgsgDUEBaiEBQcMAIRAM+wILIA1BAWohAUHEACEQDPoCCwJAIAEiASACRg0AIABBjYCAgAA2AgggACABNgIEIAEhAUHFACEQDPoCC0HPACEQDJIDCyAQIQECQAJAIBAtAABBdmoOBAGoAqgCAKgCCyAQQQFqIQELQSchEAz4AgsCQCABIgEgAkcNAEHRACEQDJEDCwJAIAEtAABBIEYNACABIQEMjQELIAFBAWohASAALQAtQQFxRQ3HASABIQEMjAELIAEiFyACRw3IAUHSACEQDI8DC0HTACEQIAEiFCACRg2OAyACIBRrIAAoAgAiAWohFiAUIAFrQQFqIRcDQCAULQAAIAFB1sKAgABqLQAARw3MASABQQFGDccBIAFBAWohASAUQQFqIhQgAkcNAAsgACAWNgIADI4DCwJAIAEiASACRw0AQdUAIRAMjgMLIAEtAABBCkcNzAEgAUEBaiEBDMcBCwJAIAEiASACRw0AQdYAIRAMjQMLAkACQCABLQAAQXZqDgQAzQHNAQHNAQsgAUEBaiEBDMcBCyABQQFqIQFBygAhEAzzAgsgACABIgEgAhCugICAACIQDcsBIAEhAUHNACEQDPICCyAALQApQSJGDYUDDKYCCwJAIAEiASACRw0AQdsAIRAMigMLQQAhFEEBIRdBASEWQQAhEAJAAkACQAJAAkACQAJAAkACQCABLQAAQVBqDgrUAdMBAAECAwQFBgjVAQtBAiEQDAYLQQMhEAwFC0EEIRAMBAtBBSEQDAMLQQYhEAwCC0EHIRAMAQtBCCEQC0EAIRdBACEWQQAhFAzMAQtBCSEQQQEhFEEAIRdBACEWDMsBCwJAIAEiASACRw0AQd0AIRAMiQMLIAEtAABBLkcNzAEgAUEBaiEBDKYCCyABIgEgAkcNzAFB3wAhEAyHAwsCQCABIgEgAkYNACAAQY6AgIAANgIIIAAgATYCBCABIQFB0AAhEAzuAgtB4AAhEAyGAwtB4QAhECABIgEgAkYNhQMgAiABayAAKAIAIhRqIRYgASAUa0EDaiEXA0AgAS0AACAUQeLCgIAAai0AAEcNzQEgFEEDRg3MASAUQQFqIRQgAUEBaiIBIAJHDQALIAAgFjYCAAyFAwtB4gAhECABIgEgAkYNhAMgAiABayAAKAIAIhRqIRYgASAUa0ECaiEXA0AgAS0AACAUQebCgIAAai0AAEcNzAEgFEECRg3OASAUQQFqIRQgAUEBaiIBIAJHDQALIAAgFjYCAAyEAwtB4wAhECABIgEgAkYNgwMgAiABayAAKAIAIhRqIRYgASAUa0EDaiEXA0AgAS0AACAUQenCgIAAai0AAEcNywEgFEEDRg3OASAUQQFqIRQgAUEBaiIBIAJHDQALIAAgFjYCAAyDAwsCQCABIgEgAkcNAEHlACEQDIMDCyAAIAFBAWoiASACEKiAgIAAIhANzQEgASEBQdYAIRAM6QILAkAgASIBIAJGDQADQAJAIAEtAAAiEEEgRg0AAkACQAJAIBBBuH9qDgsAAc8BzwHPAc8BzwHPAc8BzwECzwELIAFBAWohAUHSACEQDO0CCyABQQFqIQFB0wAhEAzsAgsgAUEBaiEBQdQAIRAM6wILIAFBAWoiASACRw0AC0HkACEQDIIDC0HkACEQDIEDCwNAAkAgAS0AAEHwwoCAAGotAAAiEEEBRg0AIBBBfmoOA88B0AHRAdIBCyABQQFqIgEgAkcNAAtB5gAhEAyAAwsCQCABIgEgAkYNACABQQFqIQEMAwtB5wAhEAz/AgsDQAJAIAEtAABB8MSAgABqLQAAIhBBAUYNAAJAIBBBfmoOBNIB0wHUAQDVAQsgASEBQdcAIRAM5wILIAFBAWoiASACRw0AC0HoACEQDP4CCwJAIAEiASACRw0AQekAIRAM/gILAkAgAS0AACIQQXZqDhq6AdUB1QG8AdUB1QHVAdUB1QHVAdUB1QHVAdUB1QHVAdUB1QHVAdUB1QHVAcoB1QHVAQDTAQsgAUEBaiEBC0EGIRAM4wILA0ACQCABLQAAQfDGgIAAai0AAEEBRg0AIAEhAQyeAgsgAUEBaiIBIAJHDQALQeoAIRAM+wILAkAgASIBIAJGDQAgAUEBaiEBDAMLQesAIRAM+gILAkAgASIBIAJHDQBB7AAhEAz6AgsgAUEBaiEBDAELAkAgASIBIAJHDQBB7QAhEAz5AgsgAUEBaiEBC0EEIRAM3gILAkAgASIUIAJHDQBB7gAhEAz3AgsgFCEBAkACQAJAIBQtAABB8MiAgABqLQAAQX9qDgfUAdUB1gEAnAIBAtcBCyAUQQFqIQEMCgsgFEEBaiEBDM0BC0EAIRAgAEEANgIcIABBm5KAgAA2AhAgAEEHNgIMIAAgFEEBajYCFAz2AgsCQANAAkAgAS0AAEHwyICAAGotAAAiEEEERg0AAkACQCAQQX9qDgfSAdMB1AHZAQAEAdkBCyABIQFB2gAhEAzgAgsgAUEBaiEBQdwAIRAM3wILIAFBAWoiASACRw0AC0HvACEQDPYCCyABQQFqIQEMywELAkAgASIUIAJHDQBB8AAhEAz1AgsgFC0AAEEvRw3UASAUQQFqIQEMBgsCQCABIhQgAkcNAEHxACEQDPQCCwJAIBQtAAAiAUEvRw0AIBRBAWohAUHdACEQDNsCCyABQXZqIgRBFksN0wFBASAEdEGJgIACcUUN0wEMygILAkAgASIBIAJGDQAgAUEBaiEBQd4AIRAM2gILQfIAIRAM8gILAkAgASIUIAJHDQBB9AAhEAzyAgsgFCEBAkAgFC0AAEHwzICAAGotAABBf2oOA8kClAIA1AELQeEAIRAM2AILAkAgASIUIAJGDQADQAJAIBQtAABB8MqAgABqLQAAIgFBA0YNAAJAIAFBf2oOAssCANUBCyAUIQFB3wAhEAzaAgsgFEEBaiIUIAJHDQALQfMAIRAM8QILQfMAIRAM8AILAkAgASIBIAJGDQAgAEGPgICAADYCCCAAIAE2AgQgASEBQeAAIRAM1wILQfUAIRAM7wILAkAgASIBIAJHDQBB9gAhEAzvAgsgAEGPgICAADYCCCAAIAE2AgQgASEBC0EDIRAM1AILA0AgAS0AAEEgRw3DAiABQQFqIgEgAkcNAAtB9wAhEAzsAgsCQCABIgEgAkcNAEH4ACEQDOwCCyABLQAAQSBHDc4BIAFBAWohAQzvAQsgACABIgEgAhCsgICAACIQDc4BIAEhAQyOAgsCQCABIgQgAkcNAEH6ACEQDOoCCyAELQAAQcwARw3RASAEQQFqIQFBEyEQDM8BCwJAIAEiBCACRw0AQfsAIRAM6QILIAIgBGsgACgCACIBaiEUIAQgAWtBBWohEANAIAQtAAAgAUHwzoCAAGotAABHDdABIAFBBUYNzgEgAUEBaiEBIARBAWoiBCACRw0ACyAAIBQ2AgBB+wAhEAzoAgsCQCABIgQgAkcNAEH8ACEQDOgCCwJAAkAgBC0AAEG9f2oODADRAdEB0QHRAdEB0QHRAdEB0QHRAQHRAQsgBEEBaiEBQeYAIRAMzwILIARBAWohAUHnACEQDM4CCwJAIAEiBCACRw0AQf0AIRAM5wILIAIgBGsgACgCACIBaiEUIAQgAWtBAmohEAJAA0AgBC0AACABQe3PgIAAai0AAEcNzwEgAUECRg0BIAFBAWohASAEQQFqIgQgAkcNAAsgACAUNgIAQf0AIRAM5wILIABBADYCACAQQQFqIQFBECEQDMwBCwJAIAEiBCACRw0AQf4AIRAM5gILIAIgBGsgACgCACIBaiEUIAQgAWtBBWohEAJAA0AgBC0AACABQfbOgIAAai0AAEcNzgEgAUEFRg0BIAFBAWohASAEQQFqIgQgAkcNAAsgACAUNgIAQf4AIRAM5gILIABBADYCACAQQQFqIQFBFiEQDMsBCwJAIAEiBCACRw0AQf8AIRAM5QILIAIgBGsgACgCACIBaiEUIAQgAWtBA2ohEAJAA0AgBC0AACABQfzOgIAAai0AAEcNzQEgAUEDRg0BIAFBAWohASAEQQFqIgQgAkcNAAsgACAUNgIAQf8AIRAM5QILIABBADYCACAQQQFqIQFBBSEQDMoBCwJAIAEiBCACRw0AQYABIRAM5AILIAQtAABB2QBHDcsBIARBAWohAUEIIRAMyQELAkAgASIEIAJHDQBBgQEhEAzjAgsCQAJAIAQtAABBsn9qDgMAzAEBzAELIARBAWohAUHrACEQDMoCCyAEQQFqIQFB7AAhEAzJAgsCQCABIgQgAkcNAEGCASEQDOICCwJAAkAgBC0AAEG4f2oOCADLAcsBywHLAcsBywEBywELIARBAWohAUHqACEQDMkCCyAEQQFqIQFB7QAhEAzIAgsCQCABIgQgAkcNAEGDASEQDOECCyACIARrIAAoAgAiAWohECAEIAFrQQJqIRQCQANAIAQtAAAgAUGAz4CAAGotAABHDckBIAFBAkYNASABQQFqIQEgBEEBaiIEIAJHDQALIAAgEDYCAEGDASEQDOECC0EAIRAgAEEANgIAIBRBAWohAQzGAQsCQCABIgQgAkcNAEGEASEQDOACCyACIARrIAAoAgAiAWohFCAEIAFrQQRqIRACQANAIAQtAAAgAUGDz4CAAGotAABHDcgBIAFBBEYNASABQQFqIQEgBEEBaiIEIAJHDQALIAAgFDYCAEGEASEQDOACCyAAQQA2AgAgEEEBaiEBQSMhEAzFAQsCQCABIgQgAkcNAEGFASEQDN8CCwJAAkAgBC0AAEG0f2oOCADIAcgByAHIAcgByAEByAELIARBAWohAUHvACEQDMYCCyAEQQFqIQFB8AAhEAzFAgsCQCABIgQgAkcNAEGGASEQDN4CCyAELQAAQcUARw3FASAEQQFqIQEMgwILAkAgASIEIAJHDQBBhwEhEAzdAgsgAiAEayAAKAIAIgFqIRQgBCABa0EDaiEQAkADQCAELQAAIAFBiM+AgABqLQAARw3FASABQQNGDQEgAUEBaiEBIARBAWoiBCACRw0ACyAAIBQ2AgBBhwEhEAzdAgsgAEEANgIAIBBBAWohAUEtIRAMwgELAkAgASIEIAJHDQBBiAEhEAzcAgsgAiAEayAAKAIAIgFqIRQgBCABa0EIaiEQAkADQCAELQAAIAFB0M+AgABqLQAARw3EASABQQhGDQEgAUEBaiEBIARBAWoiBCACRw0ACyAAIBQ2AgBBiAEhEAzcAgsgAEEANgIAIBBBAWohAUEpIRAMwQELAkAgASIBIAJHDQBBiQEhEAzbAgtBASEQIAEtAABB3wBHDcABIAFBAWohAQyBAgsCQCABIgQgAkcNAEGKASEQDNoCCyACIARrIAAoAgAiAWohFCAEIAFrQQFqIRADQCAELQAAIAFBjM+AgABqLQAARw3BASABQQFGDa8CIAFBAWohASAEQQFqIgQgAkcNAAsgACAUNgIAQYoBIRAM2QILAkAgASIEIAJHDQBBiwEhEAzZAgsgAiAEayAAKAIAIgFqIRQgBCABa0ECaiEQAkADQCAELQAAIAFBjs+AgABqLQAARw3BASABQQJGDQEgAUEBaiEBIARBAWoiBCACRw0ACyAAIBQ2AgBBiwEhEAzZAgsgAEEANgIAIBBBAWohAUECIRAMvgELAkAgASIEIAJHDQBBjAEhEAzYAgsgAiAEayAAKAIAIgFqIRQgBCABa0EBaiEQAkADQCAELQAAIAFB8M+AgABqLQAARw3AASABQQFGDQEgAUEBaiEBIARBAWoiBCACRw0ACyAAIBQ2AgBBjAEhEAzYAgsgAEEANgIAIBBBAWohAUEfIRAMvQELAkAgASIEIAJHDQBBjQEhEAzXAgsgAiAEayAAKAIAIgFqIRQgBCABa0EBaiEQAkADQCAELQAAIAFB8s+AgABqLQAARw2/ASABQQFGDQEgAUEBaiEBIARBAWoiBCACRw0ACyAAIBQ2AgBBjQEhEAzXAgsgAEEANgIAIBBBAWohAUEJIRAMvAELAkAgASIEIAJHDQBBjgEhEAzWAgsCQAJAIAQtAABBt39qDgcAvwG/Ab8BvwG/AQG/AQsgBEEBaiEBQfgAIRAMvQILIARBAWohAUH5ACEQDLwCCwJAIAEiBCACRw0AQY8BIRAM1QILIAIgBGsgACgCACIBaiEUIAQgAWtBBWohEAJAA0AgBC0AACABQZHPgIAAai0AAEcNvQEgAUEFRg0BIAFBAWohASAEQQFqIgQgAkcNAAsgACAUNgIAQY8BIRAM1QILIABBADYCACAQQQFqIQFBGCEQDLoBCwJAIAEiBCACRw0AQZABIRAM1AILIAIgBGsgACgCACIBaiEUIAQgAWtBAmohEAJAA0AgBC0AACABQZfPgIAAai0AAEcNvAEgAUECRg0BIAFBAWohASAEQQFqIgQgAkcNAAsgACAUNgIAQZABIRAM1AILIABBADYCACAQQQFqIQFBFyEQDLkBCwJAIAEiBCACRw0AQZEBIRAM0wILIAIgBGsgACgCACIBaiEUIAQgAWtBBmohEAJAA0AgBC0AACABQZrPgIAAai0AAEcNuwEgAUEGRg0BIAFBAWohASAEQQFqIgQgAkcNAAsgACAUNgIAQZEBIRAM0wILIABBADYCACAQQQFqIQFBFSEQDLgBCwJAIAEiBCACRw0AQZIBIRAM0gILIAIgBGsgACgCACIBaiEUIAQgAWtBBWohEAJAA0AgBC0AACABQaHPgIAAai0AAEcNugEgAUEFRg0BIAFBAWohASAEQQFqIgQgAkcNAAsgACAUNgIAQZIBIRAM0gILIABBADYCACAQQQFqIQFBHiEQDLcBCwJAIAEiBCACRw0AQZMBIRAM0QILIAQtAABBzABHDbgBIARBAWohAUEKIRAMtgELAkAgBCACRw0AQZQBIRAM0AILAkACQCAELQAAQb9/ag4PALkBuQG5AbkBuQG5AbkBuQG5AbkBuQG5AbkBAbkBCyAEQQFqIQFB/gAhEAy3AgsgBEEBaiEBQf8AIRAMtgILAkAgBCACRw0AQZUBIRAMzwILAkACQCAELQAAQb9/ag4DALgBAbgBCyAEQQFqIQFB/QAhEAy2AgsgBEEBaiEEQYABIRAMtQILAkAgBCACRw0AQZYBIRAMzgILIAIgBGsgACgCACIBaiEUIAQgAWtBAWohEAJAA0AgBC0AACABQafPgIAAai0AAEcNtgEgAUEBRg0BIAFBAWohASAEQQFqIgQgAkcNAAsgACAUNgIAQZYBIRAMzgILIABBADYCACAQQQFqIQFBCyEQDLMBCwJAIAQgAkcNAEGXASEQDM0CCwJAAkACQAJAIAQtAABBU2oOIwC4AbgBuAG4AbgBuAG4AbgBuAG4AbgBuAG4AbgBuAG4AbgBuAG4AbgBuAG4AbgBAbgBuAG4AbgBuAECuAG4AbgBA7gBCyAEQQFqIQFB+wAhEAy2AgsgBEEBaiEBQfwAIRAMtQILIARBAWohBEGBASEQDLQCCyAEQQFqIQRBggEhEAyzAgsCQCAEIAJHDQBBmAEhEAzMAgsgAiAEayAAKAIAIgFqIRQgBCABa0EEaiEQAkADQCAELQAAIAFBqc+AgABqLQAARw20ASABQQRGDQEgAUEBaiEBIARBAWoiBCACRw0ACyAAIBQ2AgBBmAEhEAzMAgsgAEEANgIAIBBBAWohAUEZIRAMsQELAkAgBCACRw0AQZkBIRAMywILIAIgBGsgACgCACIBaiEUIAQgAWtBBWohEAJAA0AgBC0AACABQa7PgIAAai0AAEcNswEgAUEFRg0BIAFBAWohASAEQQFqIgQgAkcNAAsgACAUNgIAQZkBIRAMywILIABBADYCACAQQQFqIQFBBiEQDLABCwJAIAQgAkcNAEGaASEQDMoCCyACIARrIAAoAgAiAWohFCAEIAFrQQFqIRACQANAIAQtAAAgAUG0z4CAAGotAABHDbIBIAFBAUYNASABQQFqIQEgBEEBaiIEIAJHDQALIAAgFDYCAEGaASEQDMoCCyAAQQA2AgAgEEEBaiEBQRwhEAyvAQsCQCAEIAJHDQBBmwEhEAzJAgsgAiAEayAAKAIAIgFqIRQgBCABa0EBaiEQAkADQCAELQAAIAFBts+AgABqLQAARw2xASABQQFGDQEgAUEBaiEBIARBAWoiBCACRw0ACyAAIBQ2AgBBmwEhEAzJAgsgAEEANgIAIBBBAWohAUEnIRAMrgELAkAgBCACRw0AQZwBIRAMyAILAkACQCAELQAAQax/ag4CAAGxAQsgBEEBaiEEQYYBIRAMrwILIARBAWohBEGHASEQDK4CCwJAIAQgAkcNAEGdASEQDMcCCyACIARrIAAoAgAiAWohFCAEIAFrQQFqIRACQANAIAQtAAAgAUG4z4CAAGotAABHDa8BIAFBAUYNASABQQFqIQEgBEEBaiIEIAJHDQALIAAgFDYCAEGdASEQDMcCCyAAQQA2AgAgEEEBaiEBQSYhEAysAQsCQCAEIAJHDQBBngEhEAzGAgsgAiAEayAAKAIAIgFqIRQgBCABa0EBaiEQAkADQCAELQAAIAFBus+AgABqLQAARw2uASABQQFGDQEgAUEBaiEBIARBAWoiBCACRw0ACyAAIBQ2AgBBngEhEAzGAgsgAEEANgIAIBBBAWohAUEDIRAMqwELAkAgBCACRw0AQZ8BIRAMxQILIAIgBGsgACgCACIBaiEUIAQgAWtBAmohEAJAA0AgBC0AACABQe3PgIAAai0AAEcNrQEgAUECRg0BIAFBAWohASAEQQFqIgQgAkcNAAsgACAUNgIAQZ8BIRAMxQILIABBADYCACAQQQFqIQFBDCEQDKoBCwJAIAQgAkcNAEGgASEQDMQCCyACIARrIAAoAgAiAWohFCAEIAFrQQNqIRACQANAIAQtAAAgAUG8z4CAAGotAABHDawBIAFBA0YNASABQQFqIQEgBEEBaiIEIAJHDQALIAAgFDYCAEGgASEQDMQCCyAAQQA2AgAgEEEBaiEBQQ0hEAypAQsCQCAEIAJHDQBBoQEhEAzDAgsCQAJAIAQtAABBun9qDgsArAGsAawBrAGsAawBrAGsAawBAawBCyAEQQFqIQRBiwEhEAyqAgsgBEEBaiEEQYwBIRAMqQILAkAgBCACRw0AQaIBIRAMwgILIAQtAABB0ABHDakBIARBAWohBAzpAQsCQCAEIAJHDQBBowEhEAzBAgsCQAJAIAQtAABBt39qDgcBqgGqAaoBqgGqAQCqAQsgBEEBaiEEQY4BIRAMqAILIARBAWohAUEiIRAMpgELAkAgBCACRw0AQaQBIRAMwAILIAIgBGsgACgCACIBaiEUIAQgAWtBAWohEAJAA0AgBC0AACABQcDPgIAAai0AAEcNqAEgAUEBRg0BIAFBAWohASAEQQFqIgQgAkcNAAsgACAUNgIAQaQBIRAMwAILIABBADYCACAQQQFqIQFBHSEQDKUBCwJAIAQgAkcNAEGlASEQDL8CCwJAAkAgBC0AAEGuf2oOAwCoAQGoAQsgBEEBaiEEQZABIRAMpgILIARBAWohAUEEIRAMpAELAkAgBCACRw0AQaYBIRAMvgILAkACQAJAAkACQCAELQAAQb9/ag4VAKoBqgGqAaoBqgGqAaoBqgGqAaoBAaoBqgECqgGqAQOqAaoBBKoBCyAEQQFqIQRBiAEhEAyoAgsgBEEBaiEEQYkBIRAMpwILIARBAWohBEGKASEQDKYCCyAEQQFqIQRBjwEhEAylAgsgBEEBaiEEQZEBIRAMpAILAkAgBCACRw0AQacBIRAMvQILIAIgBGsgACgCACIBaiEUIAQgAWtBAmohEAJAA0AgBC0AACABQe3PgIAAai0AAEcNpQEgAUECRg0BIAFBAWohASAEQQFqIgQgAkcNAAsgACAUNgIAQacBIRAMvQILIABBADYCACAQQQFqIQFBESEQDKIBCwJAIAQgAkcNAEGoASEQDLwCCyACIARrIAAoAgAiAWohFCAEIAFrQQJqIRACQANAIAQtAAAgAUHCz4CAAGotAABHDaQBIAFBAkYNASABQQFqIQEgBEEBaiIEIAJHDQALIAAgFDYCAEGoASEQDLwCCyAAQQA2AgAgEEEBaiEBQSwhEAyhAQsCQCAEIAJHDQBBqQEhEAy7AgsgAiAEayAAKAIAIgFqIRQgBCABa0EEaiEQAkADQCAELQAAIAFBxc+AgABqLQAARw2jASABQQRGDQEgAUEBaiEBIARBAWoiBCACRw0ACyAAIBQ2AgBBqQEhEAy7AgsgAEEANgIAIBBBAWohAUErIRAMoAELAkAgBCACRw0AQaoBIRAMugILIAIgBGsgACgCACIBaiEUIAQgAWtBAmohEAJAA0AgBC0AACABQcrPgIAAai0AAEcNogEgAUECRg0BIAFBAWohASAEQQFqIgQgAkcNAAsgACAUNgIAQaoBIRAMugILIABBADYCACAQQQFqIQFBFCEQDJ8BCwJAIAQgAkcNAEGrASEQDLkCCwJAAkACQAJAIAQtAABBvn9qDg8AAQKkAaQBpAGkAaQBpAGkAaQBpAGkAaQBA6QBCyAEQQFqIQRBkwEhEAyiAgsgBEEBaiEEQZQBIRAMoQILIARBAWohBEGVASEQDKACCyAEQQFqIQRBlgEhEAyfAgsCQCAEIAJHDQBBrAEhEAy4AgsgBC0AAEHFAEcNnwEgBEEBaiEEDOABCwJAIAQgAkcNAEGtASEQDLcCCyACIARrIAAoAgAiAWohFCAEIAFrQQJqIRACQANAIAQtAAAgAUHNz4CAAGotAABHDZ8BIAFBAkYNASABQQFqIQEgBEEBaiIEIAJHDQALIAAgFDYCAEGtASEQDLcCCyAAQQA2AgAgEEEBaiEBQQ4hEAycAQsCQCAEIAJHDQBBrgEhEAy2AgsgBC0AAEHQAEcNnQEgBEEBaiEBQSUhEAybAQsCQCAEIAJHDQBBrwEhEAy1AgsgAiAEayAAKAIAIgFqIRQgBCABa0EIaiEQAkADQCAELQAAIAFB0M+AgABqLQAARw2dASABQQhGDQEgAUEBaiEBIARBAWoiBCACRw0ACyAAIBQ2AgBBrwEhEAy1AgsgAEEANgIAIBBBAWohAUEqIRAMmgELAkAgBCACRw0AQbABIRAMtAILAkACQCAELQAAQat/ag4LAJ0BnQGdAZ0BnQGdAZ0BnQGdAQGdAQsgBEEBaiEEQZoBIRAMmwILIARBAWohBEGbASEQDJoCCwJAIAQgAkcNAEGxASEQDLMCCwJAAkAgBC0AAEG/f2oOFACcAZwBnAGcAZwBnAGcAZwBnAGcAZwBnAGcAZwBnAGcAZwBnAEBnAELIARBAWohBEGZASEQDJoCCyAEQQFqIQRBnAEhEAyZAgsCQCAEIAJHDQBBsgEhEAyyAgsgAiAEayAAKAIAIgFqIRQgBCABa0EDaiEQAkADQCAELQAAIAFB2c+AgABqLQAARw2aASABQQNGDQEgAUEBaiEBIARBAWoiBCACRw0ACyAAIBQ2AgBBsgEhEAyyAgsgAEEANgIAIBBBAWohAUEhIRAMlwELAkAgBCACRw0AQbMBIRAMsQILIAIgBGsgACgCACIBaiEUIAQgAWtBBmohEAJAA0AgBC0AACABQd3PgIAAai0AAEcNmQEgAUEGRg0BIAFBAWohASAEQQFqIgQgAkcNAAsgACAUNgIAQbMBIRAMsQILIABBADYCACAQQQFqIQFBGiEQDJYBCwJAIAQgAkcNAEG0ASEQDLACCwJAAkACQCAELQAAQbt/ag4RAJoBmgGaAZoBmgGaAZoBmgGaAQGaAZoBmgGaAZoBApoBCyAEQQFqIQRBnQEhEAyYAgsgBEEBaiEEQZ4BIRAMlwILIARBAWohBEGfASEQDJYCCwJAIAQgAkcNAEG1ASEQDK8CCyACIARrIAAoAgAiAWohFCAEIAFrQQVqIRACQANAIAQtAAAgAUHkz4CAAGotAABHDZcBIAFBBUYNASABQQFqIQEgBEEBaiIEIAJHDQALIAAgFDYCAEG1ASEQDK8CCyAAQQA2AgAgEEEBaiEBQSghEAyUAQsCQCAEIAJHDQBBtgEhEAyuAgsgAiAEayAAKAIAIgFqIRQgBCABa0ECaiEQAkADQCAELQAAIAFB6s+AgABqLQAARw2WASABQQJGDQEgAUEBaiEBIARBAWoiBCACRw0ACyAAIBQ2AgBBtgEhEAyuAgsgAEEANgIAIBBBAWohAUEHIRAMkwELAkAgBCACRw0AQbcBIRAMrQILAkACQCAELQAAQbt/ag4OAJYBlgGWAZYBlgGWAZYBlgGWAZYBlgGWAQGWAQsgBEEBaiEEQaEBIRAMlAILIARBAWohBEGiASEQDJMCCwJAIAQgAkcNAEG4ASEQDKwCCyACIARrIAAoAgAiAWohFCAEIAFrQQJqIRACQANAIAQtAAAgAUHtz4CAAGotAABHDZQBIAFBAkYNASABQQFqIQEgBEEBaiIEIAJHDQALIAAgFDYCAEG4ASEQDKwCCyAAQQA2AgAgEEEBaiEBQRIhEAyRAQsCQCAEIAJHDQBBuQEhEAyrAgsgAiAEayAAKAIAIgFqIRQgBCABa0EBaiEQAkADQCAELQAAIAFB8M+AgABqLQAARw2TASABQQFGDQEgAUEBaiEBIARBAWoiBCACRw0ACyAAIBQ2AgBBuQEhEAyrAgsgAEEANgIAIBBBAWohAUEgIRAMkAELAkAgBCACRw0AQboBIRAMqgILIAIgBGsgACgCACIBaiEUIAQgAWtBAWohEAJAA0AgBC0AACABQfLPgIAAai0AAEcNkgEgAUEBRg0BIAFBAWohASAEQQFqIgQgAkcNAAsgACAUNgIAQboBIRAMqgILIABBADYCACAQQQFqIQFBDyEQDI8BCwJAIAQgAkcNAEG7ASEQDKkCCwJAAkAgBC0AAEG3f2oOBwCSAZIBkgGSAZIBAZIBCyAEQQFqIQRBpQEhEAyQAgsgBEEBaiEEQaYBIRAMjwILAkAgBCACRw0AQbwBIRAMqAILIAIgBGsgACgCACIBaiEUIAQgAWtBB2ohEAJAA0AgBC0AACABQfTPgIAAai0AAEcNkAEgAUEHRg0BIAFBAWohASAEQQFqIgQgAkcNAAsgACAUNgIAQbwBIRAMqAILIABBADYCACAQQQFqIQFBGyEQDI0BCwJAIAQgAkcNAEG9ASEQDKcCCwJAAkACQCAELQAAQb5/ag4SAJEBkQGRAZEBkQGRAZEBkQGRAQGRAZEBkQGRAZEBkQECkQELIARBAWohBEGkASEQDI8CCyAEQQFqIQRBpwEhEAyOAgsgBEEBaiEEQagBIRAMjQILAkAgBCACRw0AQb4BIRAMpgILIAQtAABBzgBHDY0BIARBAWohBAzPAQsCQCAEIAJHDQBBvwEhEAylAgsCQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQCAELQAAQb9/ag4VAAECA5wBBAUGnAGcAZwBBwgJCgucAQwNDg+cAQsgBEEBaiEBQegAIRAMmgILIARBAWohAUHpACEQDJkCCyAEQQFqIQFB7gAhEAyYAgsgBEEBaiEBQfIAIRAMlwILIARBAWohAUHzACEQDJYCCyAEQQFqIQFB9gAhEAyVAgsgBEEBaiEBQfcAIRAMlAILIARBAWohAUH6ACEQDJMCCyAEQQFqIQRBgwEhEAySAgsgBEEBaiEEQYQBIRAMkQILIARBAWohBEGFASEQDJACCyAEQQFqIQRBkgEhEAyPAgsgBEEBaiEEQZgBIRAMjgILIARBAWohBEGgASEQDI0CCyAEQQFqIQRBowEhEAyMAgsgBEEBaiEEQaoBIRAMiwILAkAgBCACRg0AIABBkICAgAA2AgggACAENgIEQasBIRAMiwILQcABIRAMowILIAAgBSACEKqAgIAAIgENiwEgBSEBDFwLAkAgBiACRg0AIAZBAWohBQyNAQtBwgEhEAyhAgsDQAJAIBAtAABBdmoOBIwBAACPAQALIBBBAWoiECACRw0AC0HDASEQDKACCwJAIAcgAkYNACAAQZGAgIAANgIIIAAgBzYCBCAHIQFBASEQDIcCC0HEASEQDJ8CCwJAIAcgAkcNAEHFASEQDJ8CCwJAAkAgBy0AAEF2ag4EAc4BzgEAzgELIAdBAWohBgyNAQsgB0EBaiEFDIkBCwJAIAcgAkcNAEHGASEQDJ4CCwJAAkAgBy0AAEF2ag4XAY8BjwEBjwGPAY8BjwGPAY8BjwGPAY8BjwGPAY8BjwGPAY8BjwGPAY8BAI8BCyAHQQFqIQcLQbABIRAMhAILAkAgCCACRw0AQcgBIRAMnQILIAgtAABBIEcNjQEgAEEAOwEyIAhBAWohAUGzASEQDIMCCyABIRcCQANAIBciByACRg0BIActAABBUGpB/wFxIhBBCk8NzAECQCAALwEyIhRBmTNLDQAgACAUQQpsIhQ7ATIgEEH//wNzIBRB/v8DcUkNACAHQQFqIRcgACAUIBBqIhA7ATIgEEH//wNxQegHSQ0BCwtBACEQIABBADYCHCAAQcGJgIAANgIQIABBDTYCDCAAIAdBAWo2AhQMnAILQccBIRAMmwILIAAgCCACEK6AgIAAIhBFDcoBIBBBFUcNjAEgAEHIATYCHCAAIAg2AhQgAEHJl4CAADYCECAAQRU2AgxBACEQDJoCCwJAIAkgAkcNAEHMASEQDJoCC0EAIRRBASEXQQEhFkEAIRACQAJAAkACQAJAAkACQAJAAkAgCS0AAEFQag4KlgGVAQABAgMEBQYIlwELQQIhEAwGC0EDIRAMBQtBBCEQDAQLQQUhEAwDC0EGIRAMAgtBByEQDAELQQghEAtBACEXQQAhFkEAIRQMjgELQQkhEEEBIRRBACEXQQAhFgyNAQsCQCAKIAJHDQBBzgEhEAyZAgsgCi0AAEEuRw2OASAKQQFqIQkMygELIAsgAkcNjgFB0AEhEAyXAgsCQCALIAJGDQAgAEGOgICAADYCCCAAIAs2AgRBtwEhEAz+AQtB0QEhEAyWAgsCQCAEIAJHDQBB0gEhEAyWAgsgAiAEayAAKAIAIhBqIRQgBCAQa0EEaiELA0AgBC0AACAQQfzPgIAAai0AAEcNjgEgEEEERg3pASAQQQFqIRAgBEEBaiIEIAJHDQALIAAgFDYCAEHSASEQDJUCCyAAIAwgAhCsgICAACIBDY0BIAwhAQy4AQsCQCAEIAJHDQBB1AEhEAyUAgsgAiAEayAAKAIAIhBqIRQgBCAQa0EBaiEMA0AgBC0AACAQQYHQgIAAai0AAEcNjwEgEEEBRg2OASAQQQFqIRAgBEEBaiIEIAJHDQALIAAgFDYCAEHUASEQDJMCCwJAIAQgAkcNAEHWASEQDJMCCyACIARrIAAoAgAiEGohFCAEIBBrQQJqIQsDQCAELQAAIBBBg9CAgABqLQAARw2OASAQQQJGDZABIBBBAWohECAEQQFqIgQgAkcNAAsgACAUNgIAQdYBIRAMkgILAkAgBCACRw0AQdcBIRAMkgILAkACQCAELQAAQbt/ag4QAI8BjwGPAY8BjwGPAY8BjwGPAY8BjwGPAY8BjwEBjwELIARBAWohBEG7ASEQDPkBCyAEQQFqIQRBvAEhEAz4AQsCQCAEIAJHDQBB2AEhEAyRAgsgBC0AAEHIAEcNjAEgBEEBaiEEDMQBCwJAIAQgAkYNACAAQZCAgIAANgIIIAAgBDYCBEG+ASEQDPcBC0HZASEQDI8CCwJAIAQgAkcNAEHaASEQDI8CCyAELQAAQcgARg3DASAAQQE6ACgMuQELIABBAjoALyAAIAQgAhCmgICAACIQDY0BQcIBIRAM9AELIAAtAChBf2oOArcBuQG4AQsDQAJAIAQtAABBdmoOBACOAY4BAI4BCyAEQQFqIgQgAkcNAAtB3QEhEAyLAgsgAEEAOgAvIAAtAC1BBHFFDYQCCyAAQQA6AC8gAEEBOgA0IAEhAQyMAQsgEEEVRg3aASAAQQA2AhwgACABNgIUIABBp46AgAA2AhAgAEESNgIMQQAhEAyIAgsCQCAAIBAgAhC0gICAACIEDQAgECEBDIECCwJAIARBFUcNACAAQQM2AhwgACAQNgIUIABBsJiAgAA2AhAgAEEVNgIMQQAhEAyIAgsgAEEANgIcIAAgEDYCFCAAQaeOgIAANgIQIABBEjYCDEEAIRAMhwILIBBBFUYN1gEgAEEANgIcIAAgATYCFCAAQdqNgIAANgIQIABBFDYCDEEAIRAMhgILIAAoAgQhFyAAQQA2AgQgECARp2oiFiEBIAAgFyAQIBYgFBsiEBC1gICAACIURQ2NASAAQQc2AhwgACAQNgIUIAAgFDYCDEEAIRAMhQILIAAgAC8BMEGAAXI7ATAgASEBC0EqIRAM6gELIBBBFUYN0QEgAEEANgIcIAAgATYCFCAAQYOMgIAANgIQIABBEzYCDEEAIRAMggILIBBBFUYNzwEgAEEANgIcIAAgATYCFCAAQZqPgIAANgIQIABBIjYCDEEAIRAMgQILIAAoAgQhECAAQQA2AgQCQCAAIBAgARC3gICAACIQDQAgAUEBaiEBDI0BCyAAQQw2AhwgACAQNgIMIAAgAUEBajYCFEEAIRAMgAILIBBBFUYNzAEgAEEANgIcIAAgATYCFCAAQZqPgIAANgIQIABBIjYCDEEAIRAM/wELIAAoAgQhECAAQQA2AgQCQCAAIBAgARC3gICAACIQDQAgAUEBaiEBDIwBCyAAQQ02AhwgACAQNgIMIAAgAUEBajYCFEEAIRAM/gELIBBBFUYNyQEgAEEANgIcIAAgATYCFCAAQcaMgIAANgIQIABBIzYCDEEAIRAM/QELIAAoAgQhECAAQQA2AgQCQCAAIBAgARC5gICAACIQDQAgAUEBaiEBDIsBCyAAQQ42AhwgACAQNgIMIAAgAUEBajYCFEEAIRAM/AELIABBADYCHCAAIAE2AhQgAEHAlYCAADYCECAAQQI2AgxBACEQDPsBCyAQQRVGDcUBIABBADYCHCAAIAE2AhQgAEHGjICAADYCECAAQSM2AgxBACEQDPoBCyAAQRA2AhwgACABNgIUIAAgEDYCDEEAIRAM+QELIAAoAgQhBCAAQQA2AgQCQCAAIAQgARC5gICAACIEDQAgAUEBaiEBDPEBCyAAQRE2AhwgACAENgIMIAAgAUEBajYCFEEAIRAM+AELIBBBFUYNwQEgAEEANgIcIAAgATYCFCAAQcaMgIAANgIQIABBIzYCDEEAIRAM9wELIAAoAgQhECAAQQA2AgQCQCAAIBAgARC5gICAACIQDQAgAUEBaiEBDIgBCyAAQRM2AhwgACAQNgIMIAAgAUEBajYCFEEAIRAM9gELIAAoAgQhBCAAQQA2AgQCQCAAIAQgARC5gICAACIEDQAgAUEBaiEBDO0BCyAAQRQ2AhwgACAENgIMIAAgAUEBajYCFEEAIRAM9QELIBBBFUYNvQEgAEEANgIcIAAgATYCFCAAQZqPgIAANgIQIABBIjYCDEEAIRAM9AELIAAoAgQhECAAQQA2AgQCQCAAIBAgARC3gICAACIQDQAgAUEBaiEBDIYBCyAAQRY2AhwgACAQNgIMIAAgAUEBajYCFEEAIRAM8wELIAAoAgQhBCAAQQA2AgQCQCAAIAQgARC3gICAACIEDQAgAUEBaiEBDOkBCyAAQRc2AhwgACAENgIMIAAgAUEBajYCFEEAIRAM8gELIABBADYCHCAAIAE2AhQgAEHNk4CAADYCECAAQQw2AgxBACEQDPEBC0IBIRELIBBBAWohAQJAIAApAyAiEkL//////////w9WDQAgACASQgSGIBGENwMgIAEhAQyEAQsgAEEANgIcIAAgATYCFCAAQa2JgIAANgIQIABBDDYCDEEAIRAM7wELIABBADYCHCAAIBA2AhQgAEHNk4CAADYCECAAQQw2AgxBACEQDO4BCyAAKAIEIRcgAEEANgIEIBAgEadqIhYhASAAIBcgECAWIBQbIhAQtYCAgAAiFEUNcyAAQQU2AhwgACAQNgIUIAAgFDYCDEEAIRAM7QELIABBADYCHCAAIBA2AhQgAEGqnICAADYCECAAQQ82AgxBACEQDOwBCyAAIBAgAhC0gICAACIBDQEgECEBC0EOIRAM0QELAkAgAUEVRw0AIABBAjYCHCAAIBA2AhQgAEGwmICAADYCECAAQRU2AgxBACEQDOoBCyAAQQA2AhwgACAQNgIUIABBp46AgAA2AhAgAEESNgIMQQAhEAzpAQsgAUEBaiEQAkAgAC8BMCIBQYABcUUNAAJAIAAgECACELuAgIAAIgENACAQIQEMcAsgAUEVRw26ASAAQQU2AhwgACAQNgIUIABB+ZeAgAA2AhAgAEEVNgIMQQAhEAzpAQsCQCABQaAEcUGgBEcNACAALQAtQQJxDQAgAEEANgIcIAAgEDYCFCAAQZaTgIAANgIQIABBBDYCDEEAIRAM6QELIAAgECACEL2AgIAAGiAQIQECQAJAAkACQAJAIAAgECACELOAgIAADhYCAQAEBAQEBAQEBAQEBAQEBAQEBAQDBAsgAEEBOgAuCyAAIAAvATBBwAByOwEwIBAhAQtBJiEQDNEBCyAAQSM2AhwgACAQNgIUIABBpZaAgAA2AhAgAEEVNgIMQQAhEAzpAQsgAEEANgIcIAAgEDYCFCAAQdWLgIAANgIQIABBETYCDEEAIRAM6AELIAAtAC1BAXFFDQFBwwEhEAzOAQsCQCANIAJGDQADQAJAIA0tAABBIEYNACANIQEMxAELIA1BAWoiDSACRw0AC0ElIRAM5wELQSUhEAzmAQsgACgCBCEEIABBADYCBCAAIAQgDRCvgICAACIERQ2tASAAQSY2AhwgACAENgIMIAAgDUEBajYCFEEAIRAM5QELIBBBFUYNqwEgAEEANgIcIAAgATYCFCAAQf2NgIAANgIQIABBHTYCDEEAIRAM5AELIABBJzYCHCAAIAE2AhQgACAQNgIMQQAhEAzjAQsgECEBQQEhFAJAAkACQAJAAkACQAJAIAAtACxBfmoOBwYFBQMBAgAFCyAAIAAvATBBCHI7ATAMAwtBAiEUDAELQQQhFAsgAEEBOgAsIAAgAC8BMCAUcjsBMAsgECEBC0ErIRAMygELIABBADYCHCAAIBA2AhQgAEGrkoCAADYCECAAQQs2AgxBACEQDOIBCyAAQQA2AhwgACABNgIUIABB4Y+AgAA2AhAgAEEKNgIMQQAhEAzhAQsgAEEAOgAsIBAhAQy9AQsgECEBQQEhFAJAAkACQAJAAkAgAC0ALEF7ag4EAwECAAULIAAgAC8BMEEIcjsBMAwDC0ECIRQMAQtBBCEUCyAAQQE6ACwgACAALwEwIBRyOwEwCyAQIQELQSkhEAzFAQsgAEEANgIcIAAgATYCFCAAQfCUgIAANgIQIABBAzYCDEEAIRAM3QELAkAgDi0AAEENRw0AIAAoAgQhASAAQQA2AgQCQCAAIAEgDhCxgICAACIBDQAgDkEBaiEBDHULIABBLDYCHCAAIAE2AgwgACAOQQFqNgIUQQAhEAzdAQsgAC0ALUEBcUUNAUHEASEQDMMBCwJAIA4gAkcNAEEtIRAM3AELAkACQANAAkAgDi0AAEF2ag4EAgAAAwALIA5BAWoiDiACRw0AC0EtIRAM3QELIAAoAgQhASAAQQA2AgQCQCAAIAEgDhCxgICAACIBDQAgDiEBDHQLIABBLDYCHCAAIA42AhQgACABNgIMQQAhEAzcAQsgACgCBCEBIABBADYCBAJAIAAgASAOELGAgIAAIgENACAOQQFqIQEMcwsgAEEsNgIcIAAgATYCDCAAIA5BAWo2AhRBACEQDNsBCyAAKAIEIQQgAEEANgIEIAAgBCAOELGAgIAAIgQNoAEgDiEBDM4BCyAQQSxHDQEgAUEBaiEQQQEhAQJAAkACQAJAAkAgAC0ALEF7ag4EAwECBAALIBAhAQwEC0ECIQEMAQtBBCEBCyAAQQE6ACwgACAALwEwIAFyOwEwIBAhAQwBCyAAIAAvATBBCHI7ATAgECEBC0E5IRAMvwELIABBADoALCABIQELQTQhEAy9AQsgACAALwEwQSByOwEwIAEhAQwCCyAAKAIEIQQgAEEANgIEAkAgACAEIAEQsYCAgAAiBA0AIAEhAQzHAQsgAEE3NgIcIAAgATYCFCAAIAQ2AgxBACEQDNQBCyAAQQg6ACwgASEBC0EwIRAMuQELAkAgAC0AKEEBRg0AIAEhAQwECyAALQAtQQhxRQ2TASABIQEMAwsgAC0AMEEgcQ2UAUHFASEQDLcBCwJAIA8gAkYNAAJAA0ACQCAPLQAAQVBqIgFB/wFxQQpJDQAgDyEBQTUhEAy6AQsgACkDICIRQpmz5syZs+bMGVYNASAAIBFCCn4iETcDICARIAGtQv8BgyISQn+FVg0BIAAgESASfDcDICAPQQFqIg8gAkcNAAtBOSEQDNEBCyAAKAIEIQIgAEEANgIEIAAgAiAPQQFqIgQQsYCAgAAiAg2VASAEIQEMwwELQTkhEAzPAQsCQCAALwEwIgFBCHFFDQAgAC0AKEEBRw0AIAAtAC1BCHFFDZABCyAAIAFB9/sDcUGABHI7ATAgDyEBC0E3IRAMtAELIAAgAC8BMEEQcjsBMAyrAQsgEEEVRg2LASAAQQA2AhwgACABNgIUIABB8I6AgAA2AhAgAEEcNgIMQQAhEAzLAQsgAEHDADYCHCAAIAE2AgwgACANQQFqNgIUQQAhEAzKAQsCQCABLQAAQTpHDQAgACgCBCEQIABBADYCBAJAIAAgECABEK+AgIAAIhANACABQQFqIQEMYwsgAEHDADYCHCAAIBA2AgwgACABQQFqNgIUQQAhEAzKAQsgAEEANgIcIAAgATYCFCAAQbGRgIAANgIQIABBCjYCDEEAIRAMyQELIABBADYCHCAAIAE2AhQgAEGgmYCAADYCECAAQR42AgxBACEQDMgBCyAAQQA2AgALIABBgBI7ASogACAXQQFqIgEgAhCogICAACIQDQEgASEBC0HHACEQDKwBCyAQQRVHDYMBIABB0QA2AhwgACABNgIUIABB45eAgAA2AhAgAEEVNgIMQQAhEAzEAQsgACgCBCEQIABBADYCBAJAIAAgECABEKeAgIAAIhANACABIQEMXgsgAEHSADYCHCAAIAE2AhQgACAQNgIMQQAhEAzDAQsgAEEANgIcIAAgFDYCFCAAQcGogIAANgIQIABBBzYCDCAAQQA2AgBBACEQDMIBCyAAKAIEIRAgAEEANgIEAkAgACAQIAEQp4CAgAAiEA0AIAEhAQxdCyAAQdMANgIcIAAgATYCFCAAIBA2AgxBACEQDMEBC0EAIRAgAEEANgIcIAAgATYCFCAAQYCRgIAANgIQIABBCTYCDAzAAQsgEEEVRg19IABBADYCHCAAIAE2AhQgAEGUjYCAADYCECAAQSE2AgxBACEQDL8BC0EBIRZBACEXQQAhFEEBIRALIAAgEDoAKyABQQFqIQECQAJAIAAtAC1BEHENAAJAAkACQCAALQAqDgMBAAIECyAWRQ0DDAILIBQNAQwCCyAXRQ0BCyAAKAIEIRAgAEEANgIEAkAgACAQIAEQrYCAgAAiEA0AIAEhAQxcCyAAQdgANgIcIAAgATYCFCAAIBA2AgxBACEQDL4BCyAAKAIEIQQgAEEANgIEAkAgACAEIAEQrYCAgAAiBA0AIAEhAQytAQsgAEHZADYCHCAAIAE2AhQgACAENgIMQQAhEAy9AQsgACgCBCEEIABBADYCBAJAIAAgBCABEK2AgIAAIgQNACABIQEMqwELIABB2gA2AhwgACABNgIUIAAgBDYCDEEAIRAMvAELIAAoAgQhBCAAQQA2AgQCQCAAIAQgARCtgICAACIEDQAgASEBDKkBCyAAQdwANgIcIAAgATYCFCAAIAQ2AgxBACEQDLsBCwJAIAEtAABBUGoiEEH/AXFBCk8NACAAIBA6ACogAUEBaiEBQc8AIRAMogELIAAoAgQhBCAAQQA2AgQCQCAAIAQgARCtgICAACIEDQAgASEBDKcBCyAAQd4ANgIcIAAgATYCFCAAIAQ2AgxBACEQDLoBCyAAQQA2AgAgF0EBaiEBAkAgAC0AKUEjTw0AIAEhAQxZCyAAQQA2AhwgACABNgIUIABB04mAgAA2AhAgAEEINgIMQQAhEAy5AQsgAEEANgIAC0EAIRAgAEEANgIcIAAgATYCFCAAQZCzgIAANgIQIABBCDYCDAy3AQsgAEEANgIAIBdBAWohAQJAIAAtAClBIUcNACABIQEMVgsgAEEANgIcIAAgATYCFCAAQZuKgIAANgIQIABBCDYCDEEAIRAMtgELIABBADYCACAXQQFqIQECQCAALQApIhBBXWpBC08NACABIQEMVQsCQCAQQQZLDQBBASAQdEHKAHFFDQAgASEBDFULQQAhECAAQQA2AhwgACABNgIUIABB94mAgAA2AhAgAEEINgIMDLUBCyAQQRVGDXEgAEEANgIcIAAgATYCFCAAQbmNgIAANgIQIABBGjYCDEEAIRAMtAELIAAoAgQhECAAQQA2AgQCQCAAIBAgARCngICAACIQDQAgASEBDFQLIABB5QA2AhwgACABNgIUIAAgEDYCDEEAIRAMswELIAAoAgQhECAAQQA2AgQCQCAAIBAgARCngICAACIQDQAgASEBDE0LIABB0gA2AhwgACABNgIUIAAgEDYCDEEAIRAMsgELIAAoAgQhECAAQQA2AgQCQCAAIBAgARCngICAACIQDQAgASEBDE0LIABB0wA2AhwgACABNgIUIAAgEDYCDEEAIRAMsQELIAAoAgQhECAAQQA2AgQCQCAAIBAgARCngICAACIQDQAgASEBDFELIABB5QA2AhwgACABNgIUIAAgEDYCDEEAIRAMsAELIABBADYCHCAAIAE2AhQgAEHGioCAADYCECAAQQc2AgxBACEQDK8BCyAAKAIEIRAgAEEANgIEAkAgACAQIAEQp4CAgAAiEA0AIAEhAQxJCyAAQdIANgIcIAAgATYCFCAAIBA2AgxBACEQDK4BCyAAKAIEIRAgAEEANgIEAkAgACAQIAEQp4CAgAAiEA0AIAEhAQxJCyAAQdMANgIcIAAgATYCFCAAIBA2AgxBACEQDK0BCyAAKAIEIRAgAEEANgIEAkAgACAQIAEQp4CAgAAiEA0AIAEhAQxNCyAAQeUANgIcIAAgATYCFCAAIBA2AgxBACEQDKwBCyAAQQA2AhwgACABNgIUIABB3IiAgAA2AhAgAEEHNgIMQQAhEAyrAQsgEEE/Rw0BIAFBAWohAQtBBSEQDJABC0EAIRAgAEEANgIcIAAgATYCFCAAQf2SgIAANgIQIABBBzYCDAyoAQsgACgCBCEQIABBADYCBAJAIAAgECABEKeAgIAAIhANACABIQEMQgsgAEHSADYCHCAAIAE2AhQgACAQNgIMQQAhEAynAQsgACgCBCEQIABBADYCBAJAIAAgECABEKeAgIAAIhANACABIQEMQgsgAEHTADYCHCAAIAE2AhQgACAQNgIMQQAhEAymAQsgACgCBCEQIABBADYCBAJAIAAgECABEKeAgIAAIhANACABIQEMRgsgAEHlADYCHCAAIAE2AhQgACAQNgIMQQAhEAylAQsgACgCBCEBIABBADYCBAJAIAAgASAUEKeAgIAAIgENACAUIQEMPwsgAEHSADYCHCAAIBQ2AhQgACABNgIMQQAhEAykAQsgACgCBCEBIABBADYCBAJAIAAgASAUEKeAgIAAIgENACAUIQEMPwsgAEHTADYCHCAAIBQ2AhQgACABNgIMQQAhEAyjAQsgACgCBCEBIABBADYCBAJAIAAgASAUEKeAgIAAIgENACAUIQEMQwsgAEHlADYCHCAAIBQ2AhQgACABNgIMQQAhEAyiAQsgAEEANgIcIAAgFDYCFCAAQcOPgIAANgIQIABBBzYCDEEAIRAMoQELIABBADYCHCAAIAE2AhQgAEHDj4CAADYCECAAQQc2AgxBACEQDKABC0EAIRAgAEEANgIcIAAgFDYCFCAAQYycgIAANgIQIABBBzYCDAyfAQsgAEEANgIcIAAgFDYCFCAAQYycgIAANgIQIABBBzYCDEEAIRAMngELIABBADYCHCAAIBQ2AhQgAEH+kYCAADYCECAAQQc2AgxBACEQDJ0BCyAAQQA2AhwgACABNgIUIABBjpuAgAA2AhAgAEEGNgIMQQAhEAycAQsgEEEVRg1XIABBADYCHCAAIAE2AhQgAEHMjoCAADYCECAAQSA2AgxBACEQDJsBCyAAQQA2AgAgEEEBaiEBQSQhEAsgACAQOgApIAAoAgQhECAAQQA2AgQgACAQIAEQq4CAgAAiEA1UIAEhAQw+CyAAQQA2AgALQQAhECAAQQA2AhwgACAENgIUIABB8ZuAgAA2AhAgAEEGNgIMDJcBCyABQRVGDVAgAEEANgIcIAAgBTYCFCAAQfCMgIAANgIQIABBGzYCDEEAIRAMlgELIAAoAgQhBSAAQQA2AgQgACAFIBAQqYCAgAAiBQ0BIBBBAWohBQtBrQEhEAx7CyAAQcEBNgIcIAAgBTYCDCAAIBBBAWo2AhRBACEQDJMBCyAAKAIEIQYgAEEANgIEIAAgBiAQEKmAgIAAIgYNASAQQQFqIQYLQa4BIRAMeAsgAEHCATYCHCAAIAY2AgwgACAQQQFqNgIUQQAhEAyQAQsgAEEANgIcIAAgBzYCFCAAQZeLgIAANgIQIABBDTYCDEEAIRAMjwELIABBADYCHCAAIAg2AhQgAEHjkICAADYCECAAQQk2AgxBACEQDI4BCyAAQQA2AhwgACAINgIUIABBlI2AgAA2AhAgAEEhNgIMQQAhEAyNAQtBASEWQQAhF0EAIRRBASEQCyAAIBA6ACsgCUEBaiEIAkACQCAALQAtQRBxDQACQAJAAkAgAC0AKg4DAQACBAsgFkUNAwwCCyAUDQEMAgsgF0UNAQsgACgCBCEQIABBADYCBCAAIBAgCBCtgICAACIQRQ09IABByQE2AhwgACAINgIUIAAgEDYCDEEAIRAMjAELIAAoAgQhBCAAQQA2AgQgACAEIAgQrYCAgAAiBEUNdiAAQcoBNgIcIAAgCDYCFCAAIAQ2AgxBACEQDIsBCyAAKAIEIQQgAEEANgIEIAAgBCAJEK2AgIAAIgRFDXQgAEHLATYCHCAAIAk2AhQgACAENgIMQQAhEAyKAQsgACgCBCEEIABBADYCBCAAIAQgChCtgICAACIERQ1yIABBzQE2AhwgACAKNgIUIAAgBDYCDEEAIRAMiQELAkAgCy0AAEFQaiIQQf8BcUEKTw0AIAAgEDoAKiALQQFqIQpBtgEhEAxwCyAAKAIEIQQgAEEANgIEIAAgBCALEK2AgIAAIgRFDXAgAEHPATYCHCAAIAs2AhQgACAENgIMQQAhEAyIAQsgAEEANgIcIAAgBDYCFCAAQZCzgIAANgIQIABBCDYCDCAAQQA2AgBBACEQDIcBCyABQRVGDT8gAEEANgIcIAAgDDYCFCAAQcyOgIAANgIQIABBIDYCDEEAIRAMhgELIABBgQQ7ASggACgCBCEQIABCADcDACAAIBAgDEEBaiIMEKuAgIAAIhBFDTggAEHTATYCHCAAIAw2AhQgACAQNgIMQQAhEAyFAQsgAEEANgIAC0EAIRAgAEEANgIcIAAgBDYCFCAAQdibgIAANgIQIABBCDYCDAyDAQsgACgCBCEQIABCADcDACAAIBAgC0EBaiILEKuAgIAAIhANAUHGASEQDGkLIABBAjoAKAxVCyAAQdUBNgIcIAAgCzYCFCAAIBA2AgxBACEQDIABCyAQQRVGDTcgAEEANgIcIAAgBDYCFCAAQaSMgIAANgIQIABBEDYCDEEAIRAMfwsgAC0ANEEBRw00IAAgBCACELyAgIAAIhBFDTQgEEEVRw01IABB3AE2AhwgACAENgIUIABB1ZaAgAA2AhAgAEEVNgIMQQAhEAx+C0EAIRAgAEEANgIcIABBr4uAgAA2AhAgAEECNgIMIAAgFEEBajYCFAx9C0EAIRAMYwtBAiEQDGILQQ0hEAxhC0EPIRAMYAtBJSEQDF8LQRMhEAxeC0EVIRAMXQtBFiEQDFwLQRchEAxbC0EYIRAMWgtBGSEQDFkLQRohEAxYC0EbIRAMVwtBHCEQDFYLQR0hEAxVC0EfIRAMVAtBISEQDFMLQSMhEAxSC0HGACEQDFELQS4hEAxQC0EvIRAMTwtBOyEQDE4LQT0hEAxNC0HIACEQDEwLQckAIRAMSwtBywAhEAxKC0HMACEQDEkLQc4AIRAMSAtB0QAhEAxHC0HVACEQDEYLQdgAIRAMRQtB2QAhEAxEC0HbACEQDEMLQeQAIRAMQgtB5QAhEAxBC0HxACEQDEALQfQAIRAMPwtBjQEhEAw+C0GXASEQDD0LQakBIRAMPAtBrAEhEAw7C0HAASEQDDoLQbkBIRAMOQtBrwEhEAw4C0GxASEQDDcLQbIBIRAMNgtBtAEhEAw1C0G1ASEQDDQLQboBIRAMMwtBvQEhEAwyC0G/ASEQDDELQcEBIRAMMAsgAEEANgIcIAAgBDYCFCAAQemLgIAANgIQIABBHzYCDEEAIRAMSAsgAEHbATYCHCAAIAQ2AhQgAEH6loCAADYCECAAQRU2AgxBACEQDEcLIABB+AA2AhwgACAMNgIUIABBypiAgAA2AhAgAEEVNgIMQQAhEAxGCyAAQdEANgIcIAAgBTYCFCAAQbCXgIAANgIQIABBFTYCDEEAIRAMRQsgAEH5ADYCHCAAIAE2AhQgACAQNgIMQQAhEAxECyAAQfgANgIcIAAgATYCFCAAQcqYgIAANgIQIABBFTYCDEEAIRAMQwsgAEHkADYCHCAAIAE2AhQgAEHjl4CAADYCECAAQRU2AgxBACEQDEILIABB1wA2AhwgACABNgIUIABByZeAgAA2AhAgAEEVNgIMQQAhEAxBCyAAQQA2AhwgACABNgIUIABBuY2AgAA2AhAgAEEaNgIMQQAhEAxACyAAQcIANgIcIAAgATYCFCAAQeOYgIAANgIQIABBFTYCDEEAIRAMPwsgAEEANgIEIAAgDyAPELGAgIAAIgRFDQEgAEE6NgIcIAAgBDYCDCAAIA9BAWo2AhRBACEQDD4LIAAoAgQhBCAAQQA2AgQCQCAAIAQgARCxgICAACIERQ0AIABBOzYCHCAAIAQ2AgwgACABQQFqNgIUQQAhEAw+CyABQQFqIQEMLQsgD0EBaiEBDC0LIABBADYCHCAAIA82AhQgAEHkkoCAADYCECAAQQQ2AgxBACEQDDsLIABBNjYCHCAAIAQ2AhQgACACNgIMQQAhEAw6CyAAQS42AhwgACAONgIUIAAgBDYCDEEAIRAMOQsgAEHQADYCHCAAIAE2AhQgAEGRmICAADYCECAAQRU2AgxBACEQDDgLIA1BAWohAQwsCyAAQRU2AhwgACABNgIUIABBgpmAgAA2AhAgAEEVNgIMQQAhEAw2CyAAQRs2AhwgACABNgIUIABBkZeAgAA2AhAgAEEVNgIMQQAhEAw1CyAAQQ82AhwgACABNgIUIABBkZeAgAA2AhAgAEEVNgIMQQAhEAw0CyAAQQs2AhwgACABNgIUIABBkZeAgAA2AhAgAEEVNgIMQQAhEAwzCyAAQRo2AhwgACABNgIUIABBgpmAgAA2AhAgAEEVNgIMQQAhEAwyCyAAQQs2AhwgACABNgIUIABBgpmAgAA2AhAgAEEVNgIMQQAhEAwxCyAAQQo2AhwgACABNgIUIABB5JaAgAA2AhAgAEEVNgIMQQAhEAwwCyAAQR42AhwgACABNgIUIABB+ZeAgAA2AhAgAEEVNgIMQQAhEAwvCyAAQQA2AhwgACAQNgIUIABB2o2AgAA2AhAgAEEUNgIMQQAhEAwuCyAAQQQ2AhwgACABNgIUIABBsJiAgAA2AhAgAEEVNgIMQQAhEAwtCyAAQQA2AgAgC0EBaiELC0G4ASEQDBILIABBADYCACAQQQFqIQFB9QAhEAwRCyABIQECQCAALQApQQVHDQBB4wAhEAwRC0HiACEQDBALQQAhECAAQQA2AhwgAEHkkYCAADYCECAAQQc2AgwgACAUQQFqNgIUDCgLIABBADYCACAXQQFqIQFBwAAhEAwOC0EBIQELIAAgAToALCAAQQA2AgAgF0EBaiEBC0EoIRAMCwsgASEBC0E4IRAMCQsCQCABIg8gAkYNAANAAkAgDy0AAEGAvoCAAGotAAAiAUEBRg0AIAFBAkcNAyAPQQFqIQEMBAsgD0EBaiIPIAJHDQALQT4hEAwiC0E+IRAMIQsgAEEAOgAsIA8hAQwBC0ELIRAMBgtBOiEQDAULIAFBAWohAUEtIRAMBAsgACABOgAsIABBADYCACAWQQFqIQFBDCEQDAMLIABBADYCACAXQQFqIQFBCiEQDAILIABBADYCAAsgAEEAOgAsIA0hAUEJIRAMAAsLQQAhECAAQQA2AhwgACALNgIUIABBzZCAgAA2AhAgAEEJNgIMDBcLQQAhECAAQQA2AhwgACAKNgIUIABB6YqAgAA2AhAgAEEJNgIMDBYLQQAhECAAQQA2AhwgACAJNgIUIABBt5CAgAA2AhAgAEEJNgIMDBULQQAhECAAQQA2AhwgACAINgIUIABBnJGAgAA2AhAgAEEJNgIMDBQLQQAhECAAQQA2AhwgACABNgIUIABBzZCAgAA2AhAgAEEJNgIMDBMLQQAhECAAQQA2AhwgACABNgIUIABB6YqAgAA2AhAgAEEJNgIMDBILQQAhECAAQQA2AhwgACABNgIUIABBt5CAgAA2AhAgAEEJNgIMDBELQQAhECAAQQA2AhwgACABNgIUIABBnJGAgAA2AhAgAEEJNgIMDBALQQAhECAAQQA2AhwgACABNgIUIABBl5WAgAA2AhAgAEEPNgIMDA8LQQAhECAAQQA2AhwgACABNgIUIABBl5WAgAA2AhAgAEEPNgIMDA4LQQAhECAAQQA2AhwgACABNgIUIABBwJKAgAA2AhAgAEELNgIMDA0LQQAhECAAQQA2AhwgACABNgIUIABBlYmAgAA2AhAgAEELNgIMDAwLQQAhECAAQQA2AhwgACABNgIUIABB4Y+AgAA2AhAgAEEKNgIMDAsLQQAhECAAQQA2AhwgACABNgIUIABB+4+AgAA2AhAgAEEKNgIMDAoLQQAhECAAQQA2AhwgACABNgIUIABB8ZmAgAA2AhAgAEECNgIMDAkLQQAhECAAQQA2AhwgACABNgIUIABBxJSAgAA2AhAgAEECNgIMDAgLQQAhECAAQQA2AhwgACABNgIUIABB8pWAgAA2AhAgAEECNgIMDAcLIABBAjYCHCAAIAE2AhQgAEGcmoCAADYCECAAQRY2AgxBACEQDAYLQQEhEAwFC0HUACEQIAEiBCACRg0EIANBCGogACAEIAJB2MKAgABBChDFgICAACADKAIMIQQgAygCCA4DAQQCAAsQyoCAgAAACyAAQQA2AhwgAEG1moCAADYCECAAQRc2AgwgACAEQQFqNgIUQQAhEAwCCyAAQQA2AhwgACAENgIUIABBypqAgAA2AhAgAEEJNgIMQQAhEAwBCwJAIAEiBCACRw0AQSIhEAwBCyAAQYmAgIAANgIIIAAgBDYCBEEhIRALIANBEGokgICAgAAgEAuvAQECfyABKAIAIQYCQAJAIAIgA0YNACAEIAZqIQQgBiADaiACayEHIAIgBkF/cyAFaiIGaiEFA0ACQCACLQAAIAQtAABGDQBBAiEEDAMLAkAgBg0AQQAhBCAFIQIMAwsgBkF/aiEGIARBAWohBCACQQFqIgIgA0cNAAsgByEGIAMhAgsgAEEBNgIAIAEgBjYCACAAIAI2AgQPCyABQQA2AgAgACAENgIAIAAgAjYCBAsKACAAEMeAgIAAC/I2AQt/I4CAgIAAQRBrIgEkgICAgAACQEEAKAKg0ICAAA0AQQAQy4CAgABBgNSEgABrIgJB2QBJDQBBACEDAkBBACgC4NOAgAAiBA0AQQBCfzcC7NOAgABBAEKAgISAgIDAADcC5NOAgABBACABQQhqQXBxQdiq1aoFcyIENgLg04CAAEEAQQA2AvTTgIAAQQBBADYCxNOAgAALQQAgAjYCzNOAgABBAEGA1ISAADYCyNOAgABBAEGA1ISAADYCmNCAgABBACAENgKs0ICAAEEAQX82AqjQgIAAA0AgA0HE0ICAAGogA0G40ICAAGoiBDYCACAEIANBsNCAgABqIgU2AgAgA0G80ICAAGogBTYCACADQczQgIAAaiADQcDQgIAAaiIFNgIAIAUgBDYCACADQdTQgIAAaiADQcjQgIAAaiIENgIAIAQgBTYCACADQdDQgIAAaiAENgIAIANBIGoiA0GAAkcNAAtBgNSEgABBeEGA1ISAAGtBD3FBAEGA1ISAAEEIakEPcRsiA2oiBEEEaiACQUhqIgUgA2siA0EBcjYCAEEAQQAoAvDTgIAANgKk0ICAAEEAIAM2ApTQgIAAQQAgBDYCoNCAgABBgNSEgAAgBWpBODYCBAsCQAJAAkACQAJAAkACQAJAAkACQAJAAkAgAEHsAUsNAAJAQQAoAojQgIAAIgZBECAAQRNqQXBxIABBC0kbIgJBA3YiBHYiA0EDcUUNAAJAAkAgA0EBcSAEckEBcyIFQQN0IgRBsNCAgABqIgMgBEG40ICAAGooAgAiBCgCCCICRw0AQQAgBkF+IAV3cTYCiNCAgAAMAQsgAyACNgIIIAIgAzYCDAsgBEEIaiEDIAQgBUEDdCIFQQNyNgIEIAQgBWoiBCAEKAIEQQFyNgIEDAwLIAJBACgCkNCAgAAiB00NAQJAIANFDQACQAJAIAMgBHRBAiAEdCIDQQAgA2tycSIDQQAgA2txQX9qIgMgA0EMdkEQcSIDdiIEQQV2QQhxIgUgA3IgBCAFdiIDQQJ2QQRxIgRyIAMgBHYiA0EBdkECcSIEciADIAR2IgNBAXZBAXEiBHIgAyAEdmoiBEEDdCIDQbDQgIAAaiIFIANBuNCAgABqKAIAIgMoAggiAEcNAEEAIAZBfiAEd3EiBjYCiNCAgAAMAQsgBSAANgIIIAAgBTYCDAsgAyACQQNyNgIEIAMgBEEDdCIEaiAEIAJrIgU2AgAgAyACaiIAIAVBAXI2AgQCQCAHRQ0AIAdBeHFBsNCAgABqIQJBACgCnNCAgAAhBAJAAkAgBkEBIAdBA3Z0IghxDQBBACAGIAhyNgKI0ICAACACIQgMAQsgAigCCCEICyAIIAQ2AgwgAiAENgIIIAQgAjYCDCAEIAg2AggLIANBCGohA0EAIAA2ApzQgIAAQQAgBTYCkNCAgAAMDAtBACgCjNCAgAAiCUUNASAJQQAgCWtxQX9qIgMgA0EMdkEQcSIDdiIEQQV2QQhxIgUgA3IgBCAFdiIDQQJ2QQRxIgRyIAMgBHYiA0EBdkECcSIEciADIAR2IgNBAXZBAXEiBHIgAyAEdmpBAnRBuNKAgABqKAIAIgAoAgRBeHEgAmshBCAAIQUCQANAAkAgBSgCECIDDQAgBUEUaigCACIDRQ0CCyADKAIEQXhxIAJrIgUgBCAFIARJIgUbIQQgAyAAIAUbIQAgAyEFDAALCyAAKAIYIQoCQCAAKAIMIgggAEYNACAAKAIIIgNBACgCmNCAgABJGiAIIAM2AgggAyAINgIMDAsLAkAgAEEUaiIFKAIAIgMNACAAKAIQIgNFDQMgAEEQaiEFCwNAIAUhCyADIghBFGoiBSgCACIDDQAgCEEQaiEFIAgoAhAiAw0ACyALQQA2AgAMCgtBfyECIABBv39LDQAgAEETaiIDQXBxIQJBACgCjNCAgAAiB0UNAEEAIQsCQCACQYACSQ0AQR8hCyACQf///wdLDQAgA0EIdiIDIANBgP4/akEQdkEIcSIDdCIEIARBgOAfakEQdkEEcSIEdCIFIAVBgIAPakEQdkECcSIFdEEPdiADIARyIAVyayIDQQF0IAIgA0EVanZBAXFyQRxqIQsLQQAgAmshBAJAAkACQAJAIAtBAnRBuNKAgABqKAIAIgUNAEEAIQNBACEIDAELQQAhAyACQQBBGSALQQF2ayALQR9GG3QhAEEAIQgDQAJAIAUoAgRBeHEgAmsiBiAETw0AIAYhBCAFIQggBg0AQQAhBCAFIQggBSEDDAMLIAMgBUEUaigCACIGIAYgBSAAQR12QQRxakEQaigCACIFRhsgAyAGGyEDIABBAXQhACAFDQALCwJAIAMgCHINAEEAIQhBAiALdCIDQQAgA2tyIAdxIgNFDQMgA0EAIANrcUF/aiIDIANBDHZBEHEiA3YiBUEFdkEIcSIAIANyIAUgAHYiA0ECdkEEcSIFciADIAV2IgNBAXZBAnEiBXIgAyAFdiIDQQF2QQFxIgVyIAMgBXZqQQJ0QbjSgIAAaigCACEDCyADRQ0BCwNAIAMoAgRBeHEgAmsiBiAESSEAAkAgAygCECIFDQAgA0EUaigCACEFCyAGIAQgABshBCADIAggABshCCAFIQMgBQ0ACwsgCEUNACAEQQAoApDQgIAAIAJrTw0AIAgoAhghCwJAIAgoAgwiACAIRg0AIAgoAggiA0EAKAKY0ICAAEkaIAAgAzYCCCADIAA2AgwMCQsCQCAIQRRqIgUoAgAiAw0AIAgoAhAiA0UNAyAIQRBqIQULA0AgBSEGIAMiAEEUaiIFKAIAIgMNACAAQRBqIQUgACgCECIDDQALIAZBADYCAAwICwJAQQAoApDQgIAAIgMgAkkNAEEAKAKc0ICAACEEAkACQCADIAJrIgVBEEkNACAEIAJqIgAgBUEBcjYCBEEAIAU2ApDQgIAAQQAgADYCnNCAgAAgBCADaiAFNgIAIAQgAkEDcjYCBAwBCyAEIANBA3I2AgQgBCADaiIDIAMoAgRBAXI2AgRBAEEANgKc0ICAAEEAQQA2ApDQgIAACyAEQQhqIQMMCgsCQEEAKAKU0ICAACIAIAJNDQBBACgCoNCAgAAiAyACaiIEIAAgAmsiBUEBcjYCBEEAIAU2ApTQgIAAQQAgBDYCoNCAgAAgAyACQQNyNgIEIANBCGohAwwKCwJAAkBBACgC4NOAgABFDQBBACgC6NOAgAAhBAwBC0EAQn83AuzTgIAAQQBCgICEgICAwAA3AuTTgIAAQQAgAUEMakFwcUHYqtWqBXM2AuDTgIAAQQBBADYC9NOAgABBAEEANgLE04CAAEGAgAQhBAtBACEDAkAgBCACQccAaiIHaiIGQQAgBGsiC3EiCCACSw0AQQBBMDYC+NOAgAAMCgsCQEEAKALA04CAACIDRQ0AAkBBACgCuNOAgAAiBCAIaiIFIARNDQAgBSADTQ0BC0EAIQNBAEEwNgL404CAAAwKC0EALQDE04CAAEEEcQ0EAkACQAJAQQAoAqDQgIAAIgRFDQBByNOAgAAhAwNAAkAgAygCACIFIARLDQAgBSADKAIEaiAESw0DCyADKAIIIgMNAAsLQQAQy4CAgAAiAEF/Rg0FIAghBgJAQQAoAuTTgIAAIgNBf2oiBCAAcUUNACAIIABrIAQgAGpBACADa3FqIQYLIAYgAk0NBSAGQf7///8HSw0FAkBBACgCwNOAgAAiA0UNAEEAKAK404CAACIEIAZqIgUgBE0NBiAFIANLDQYLIAYQy4CAgAAiAyAARw0BDAcLIAYgAGsgC3EiBkH+////B0sNBCAGEMuAgIAAIgAgAygCACADKAIEakYNAyAAIQMLAkAgA0F/Rg0AIAJByABqIAZNDQACQCAHIAZrQQAoAujTgIAAIgRqQQAgBGtxIgRB/v///wdNDQAgAyEADAcLAkAgBBDLgICAAEF/Rg0AIAQgBmohBiADIQAMBwtBACAGaxDLgICAABoMBAsgAyEAIANBf0cNBQwDC0EAIQgMBwtBACEADAULIABBf0cNAgtBAEEAKALE04CAAEEEcjYCxNOAgAALIAhB/v///wdLDQEgCBDLgICAACEAQQAQy4CAgAAhAyAAQX9GDQEgA0F/Rg0BIAAgA08NASADIABrIgYgAkE4ak0NAQtBAEEAKAK404CAACAGaiIDNgK404CAAAJAIANBACgCvNOAgABNDQBBACADNgK804CAAAsCQAJAAkACQEEAKAKg0ICAACIERQ0AQcjTgIAAIQMDQCAAIAMoAgAiBSADKAIEIghqRg0CIAMoAggiAw0ADAMLCwJAAkBBACgCmNCAgAAiA0UNACAAIANPDQELQQAgADYCmNCAgAALQQAhA0EAIAY2AszTgIAAQQAgADYCyNOAgABBAEF/NgKo0ICAAEEAQQAoAuDTgIAANgKs0ICAAEEAQQA2AtTTgIAAA0AgA0HE0ICAAGogA0G40ICAAGoiBDYCACAEIANBsNCAgABqIgU2AgAgA0G80ICAAGogBTYCACADQczQgIAAaiADQcDQgIAAaiIFNgIAIAUgBDYCACADQdTQgIAAaiADQcjQgIAAaiIENgIAIAQgBTYCACADQdDQgIAAaiAENgIAIANBIGoiA0GAAkcNAAsgAEF4IABrQQ9xQQAgAEEIakEPcRsiA2oiBCAGQUhqIgUgA2siA0EBcjYCBEEAQQAoAvDTgIAANgKk0ICAAEEAIAM2ApTQgIAAQQAgBDYCoNCAgAAgACAFakE4NgIEDAILIAMtAAxBCHENACAEIAVJDQAgBCAATw0AIARBeCAEa0EPcUEAIARBCGpBD3EbIgVqIgBBACgClNCAgAAgBmoiCyAFayIFQQFyNgIEIAMgCCAGajYCBEEAQQAoAvDTgIAANgKk0ICAAEEAIAU2ApTQgIAAQQAgADYCoNCAgAAgBCALakE4NgIEDAELAkAgAEEAKAKY0ICAACIITw0AQQAgADYCmNCAgAAgACEICyAAIAZqIQVByNOAgAAhAwJAAkACQAJAAkACQAJAA0AgAygCACAFRg0BIAMoAggiAw0ADAILCyADLQAMQQhxRQ0BC0HI04CAACEDA0ACQCADKAIAIgUgBEsNACAFIAMoAgRqIgUgBEsNAwsgAygCCCEDDAALCyADIAA2AgAgAyADKAIEIAZqNgIEIABBeCAAa0EPcUEAIABBCGpBD3EbaiILIAJBA3I2AgQgBUF4IAVrQQ9xQQAgBUEIakEPcRtqIgYgCyACaiICayEDAkAgBiAERw0AQQAgAjYCoNCAgABBAEEAKAKU0ICAACADaiIDNgKU0ICAACACIANBAXI2AgQMAwsCQCAGQQAoApzQgIAARw0AQQAgAjYCnNCAgABBAEEAKAKQ0ICAACADaiIDNgKQ0ICAACACIANBAXI2AgQgAiADaiADNgIADAMLAkAgBigCBCIEQQNxQQFHDQAgBEF4cSEHAkACQCAEQf8BSw0AIAYoAggiBSAEQQN2IghBA3RBsNCAgABqIgBGGgJAIAYoAgwiBCAFRw0AQQBBACgCiNCAgABBfiAId3E2AojQgIAADAILIAQgAEYaIAQgBTYCCCAFIAQ2AgwMAQsgBigCGCEJAkACQCAGKAIMIgAgBkYNACAGKAIIIgQgCEkaIAAgBDYCCCAEIAA2AgwMAQsCQCAGQRRqIgQoAgAiBQ0AIAZBEGoiBCgCACIFDQBBACEADAELA0AgBCEIIAUiAEEUaiIEKAIAIgUNACAAQRBqIQQgACgCECIFDQALIAhBADYCAAsgCUUNAAJAAkAgBiAGKAIcIgVBAnRBuNKAgABqIgQoAgBHDQAgBCAANgIAIAANAUEAQQAoAozQgIAAQX4gBXdxNgKM0ICAAAwCCyAJQRBBFCAJKAIQIAZGG2ogADYCACAARQ0BCyAAIAk2AhgCQCAGKAIQIgRFDQAgACAENgIQIAQgADYCGAsgBigCFCIERQ0AIABBFGogBDYCACAEIAA2AhgLIAcgA2ohAyAGIAdqIgYoAgQhBAsgBiAEQX5xNgIEIAIgA2ogAzYCACACIANBAXI2AgQCQCADQf8BSw0AIANBeHFBsNCAgABqIQQCQAJAQQAoAojQgIAAIgVBASADQQN2dCIDcQ0AQQAgBSADcjYCiNCAgAAgBCEDDAELIAQoAgghAwsgAyACNgIMIAQgAjYCCCACIAQ2AgwgAiADNgIIDAMLQR8hBAJAIANB////B0sNACADQQh2IgQgBEGA/j9qQRB2QQhxIgR0IgUgBUGA4B9qQRB2QQRxIgV0IgAgAEGAgA9qQRB2QQJxIgB0QQ92IAQgBXIgAHJrIgRBAXQgAyAEQRVqdkEBcXJBHGohBAsgAiAENgIcIAJCADcCECAEQQJ0QbjSgIAAaiEFAkBBACgCjNCAgAAiAEEBIAR0IghxDQAgBSACNgIAQQAgACAIcjYCjNCAgAAgAiAFNgIYIAIgAjYCCCACIAI2AgwMAwsgA0EAQRkgBEEBdmsgBEEfRht0IQQgBSgCACEAA0AgACIFKAIEQXhxIANGDQIgBEEddiEAIARBAXQhBCAFIABBBHFqQRBqIggoAgAiAA0ACyAIIAI2AgAgAiAFNgIYIAIgAjYCDCACIAI2AggMAgsgAEF4IABrQQ9xQQAgAEEIakEPcRsiA2oiCyAGQUhqIgggA2siA0EBcjYCBCAAIAhqQTg2AgQgBCAFQTcgBWtBD3FBACAFQUlqQQ9xG2pBQWoiCCAIIARBEGpJGyIIQSM2AgRBAEEAKALw04CAADYCpNCAgABBACADNgKU0ICAAEEAIAs2AqDQgIAAIAhBEGpBACkC0NOAgAA3AgAgCEEAKQLI04CAADcCCEEAIAhBCGo2AtDTgIAAQQAgBjYCzNOAgABBACAANgLI04CAAEEAQQA2AtTTgIAAIAhBJGohAwNAIANBBzYCACADQQRqIgMgBUkNAAsgCCAERg0DIAggCCgCBEF+cTYCBCAIIAggBGsiADYCACAEIABBAXI2AgQCQCAAQf8BSw0AIABBeHFBsNCAgABqIQMCQAJAQQAoAojQgIAAIgVBASAAQQN2dCIAcQ0AQQAgBSAAcjYCiNCAgAAgAyEFDAELIAMoAgghBQsgBSAENgIMIAMgBDYCCCAEIAM2AgwgBCAFNgIIDAQLQR8hAwJAIABB////B0sNACAAQQh2IgMgA0GA/j9qQRB2QQhxIgN0IgUgBUGA4B9qQRB2QQRxIgV0IgggCEGAgA9qQRB2QQJxIgh0QQ92IAMgBXIgCHJrIgNBAXQgACADQRVqdkEBcXJBHGohAwsgBCADNgIcIARCADcCECADQQJ0QbjSgIAAaiEFAkBBACgCjNCAgAAiCEEBIAN0IgZxDQAgBSAENgIAQQAgCCAGcjYCjNCAgAAgBCAFNgIYIAQgBDYCCCAEIAQ2AgwMBAsgAEEAQRkgA0EBdmsgA0EfRht0IQMgBSgCACEIA0AgCCIFKAIEQXhxIABGDQMgA0EddiEIIANBAXQhAyAFIAhBBHFqQRBqIgYoAgAiCA0ACyAGIAQ2AgAgBCAFNgIYIAQgBDYCDCAEIAQ2AggMAwsgBSgCCCIDIAI2AgwgBSACNgIIIAJBADYCGCACIAU2AgwgAiADNgIICyALQQhqIQMMBQsgBSgCCCIDIAQ2AgwgBSAENgIIIARBADYCGCAEIAU2AgwgBCADNgIIC0EAKAKU0ICAACIDIAJNDQBBACgCoNCAgAAiBCACaiIFIAMgAmsiA0EBcjYCBEEAIAM2ApTQgIAAQQAgBTYCoNCAgAAgBCACQQNyNgIEIARBCGohAwwDC0EAIQNBAEEwNgL404CAAAwCCwJAIAtFDQACQAJAIAggCCgCHCIFQQJ0QbjSgIAAaiIDKAIARw0AIAMgADYCACAADQFBACAHQX4gBXdxIgc2AozQgIAADAILIAtBEEEUIAsoAhAgCEYbaiAANgIAIABFDQELIAAgCzYCGAJAIAgoAhAiA0UNACAAIAM2AhAgAyAANgIYCyAIQRRqKAIAIgNFDQAgAEEUaiADNgIAIAMgADYCGAsCQAJAIARBD0sNACAIIAQgAmoiA0EDcjYCBCAIIANqIgMgAygCBEEBcjYCBAwBCyAIIAJqIgAgBEEBcjYCBCAIIAJBA3I2AgQgACAEaiAENgIAAkAgBEH/AUsNACAEQXhxQbDQgIAAaiEDAkACQEEAKAKI0ICAACIFQQEgBEEDdnQiBHENAEEAIAUgBHI2AojQgIAAIAMhBAwBCyADKAIIIQQLIAQgADYCDCADIAA2AgggACADNgIMIAAgBDYCCAwBC0EfIQMCQCAEQf///wdLDQAgBEEIdiIDIANBgP4/akEQdkEIcSIDdCIFIAVBgOAfakEQdkEEcSIFdCICIAJBgIAPakEQdkECcSICdEEPdiADIAVyIAJyayIDQQF0IAQgA0EVanZBAXFyQRxqIQMLIAAgAzYCHCAAQgA3AhAgA0ECdEG40oCAAGohBQJAIAdBASADdCICcQ0AIAUgADYCAEEAIAcgAnI2AozQgIAAIAAgBTYCGCAAIAA2AgggACAANgIMDAELIARBAEEZIANBAXZrIANBH0YbdCEDIAUoAgAhAgJAA0AgAiIFKAIEQXhxIARGDQEgA0EddiECIANBAXQhAyAFIAJBBHFqQRBqIgYoAgAiAg0ACyAGIAA2AgAgACAFNgIYIAAgADYCDCAAIAA2AggMAQsgBSgCCCIDIAA2AgwgBSAANgIIIABBADYCGCAAIAU2AgwgACADNgIICyAIQQhqIQMMAQsCQCAKRQ0AAkACQCAAIAAoAhwiBUECdEG40oCAAGoiAygCAEcNACADIAg2AgAgCA0BQQAgCUF+IAV3cTYCjNCAgAAMAgsgCkEQQRQgCigCECAARhtqIAg2AgAgCEUNAQsgCCAKNgIYAkAgACgCECIDRQ0AIAggAzYCECADIAg2AhgLIABBFGooAgAiA0UNACAIQRRqIAM2AgAgAyAINgIYCwJAAkAgBEEPSw0AIAAgBCACaiIDQQNyNgIEIAAgA2oiAyADKAIEQQFyNgIEDAELIAAgAmoiBSAEQQFyNgIEIAAgAkEDcjYCBCAFIARqIAQ2AgACQCAHRQ0AIAdBeHFBsNCAgABqIQJBACgCnNCAgAAhAwJAAkBBASAHQQN2dCIIIAZxDQBBACAIIAZyNgKI0ICAACACIQgMAQsgAigCCCEICyAIIAM2AgwgAiADNgIIIAMgAjYCDCADIAg2AggLQQAgBTYCnNCAgABBACAENgKQ0ICAAAsgAEEIaiEDCyABQRBqJICAgIAAIAMLCgAgABDJgICAAAviDQEHfwJAIABFDQAgAEF4aiIBIABBfGooAgAiAkF4cSIAaiEDAkAgAkEBcQ0AIAJBA3FFDQEgASABKAIAIgJrIgFBACgCmNCAgAAiBEkNASACIABqIQACQCABQQAoApzQgIAARg0AAkAgAkH/AUsNACABKAIIIgQgAkEDdiIFQQN0QbDQgIAAaiIGRhoCQCABKAIMIgIgBEcNAEEAQQAoAojQgIAAQX4gBXdxNgKI0ICAAAwDCyACIAZGGiACIAQ2AgggBCACNgIMDAILIAEoAhghBwJAAkAgASgCDCIGIAFGDQAgASgCCCICIARJGiAGIAI2AgggAiAGNgIMDAELAkAgAUEUaiICKAIAIgQNACABQRBqIgIoAgAiBA0AQQAhBgwBCwNAIAIhBSAEIgZBFGoiAigCACIEDQAgBkEQaiECIAYoAhAiBA0ACyAFQQA2AgALIAdFDQECQAJAIAEgASgCHCIEQQJ0QbjSgIAAaiICKAIARw0AIAIgBjYCACAGDQFBAEEAKAKM0ICAAEF+IAR3cTYCjNCAgAAMAwsgB0EQQRQgBygCECABRhtqIAY2AgAgBkUNAgsgBiAHNgIYAkAgASgCECICRQ0AIAYgAjYCECACIAY2AhgLIAEoAhQiAkUNASAGQRRqIAI2AgAgAiAGNgIYDAELIAMoAgQiAkEDcUEDRw0AIAMgAkF+cTYCBEEAIAA2ApDQgIAAIAEgAGogADYCACABIABBAXI2AgQPCyABIANPDQAgAygCBCICQQFxRQ0AAkACQCACQQJxDQACQCADQQAoAqDQgIAARw0AQQAgATYCoNCAgABBAEEAKAKU0ICAACAAaiIANgKU0ICAACABIABBAXI2AgQgAUEAKAKc0ICAAEcNA0EAQQA2ApDQgIAAQQBBADYCnNCAgAAPCwJAIANBACgCnNCAgABHDQBBACABNgKc0ICAAEEAQQAoApDQgIAAIABqIgA2ApDQgIAAIAEgAEEBcjYCBCABIABqIAA2AgAPCyACQXhxIABqIQACQAJAIAJB/wFLDQAgAygCCCIEIAJBA3YiBUEDdEGw0ICAAGoiBkYaAkAgAygCDCICIARHDQBBAEEAKAKI0ICAAEF+IAV3cTYCiNCAgAAMAgsgAiAGRhogAiAENgIIIAQgAjYCDAwBCyADKAIYIQcCQAJAIAMoAgwiBiADRg0AIAMoAggiAkEAKAKY0ICAAEkaIAYgAjYCCCACIAY2AgwMAQsCQCADQRRqIgIoAgAiBA0AIANBEGoiAigCACIEDQBBACEGDAELA0AgAiEFIAQiBkEUaiICKAIAIgQNACAGQRBqIQIgBigCECIEDQALIAVBADYCAAsgB0UNAAJAAkAgAyADKAIcIgRBAnRBuNKAgABqIgIoAgBHDQAgAiAGNgIAIAYNAUEAQQAoAozQgIAAQX4gBHdxNgKM0ICAAAwCCyAHQRBBFCAHKAIQIANGG2ogBjYCACAGRQ0BCyAGIAc2AhgCQCADKAIQIgJFDQAgBiACNgIQIAIgBjYCGAsgAygCFCICRQ0AIAZBFGogAjYCACACIAY2AhgLIAEgAGogADYCACABIABBAXI2AgQgAUEAKAKc0ICAAEcNAUEAIAA2ApDQgIAADwsgAyACQX5xNgIEIAEgAGogADYCACABIABBAXI2AgQLAkAgAEH/AUsNACAAQXhxQbDQgIAAaiECAkACQEEAKAKI0ICAACIEQQEgAEEDdnQiAHENAEEAIAQgAHI2AojQgIAAIAIhAAwBCyACKAIIIQALIAAgATYCDCACIAE2AgggASACNgIMIAEgADYCCA8LQR8hAgJAIABB////B0sNACAAQQh2IgIgAkGA/j9qQRB2QQhxIgJ0IgQgBEGA4B9qQRB2QQRxIgR0IgYgBkGAgA9qQRB2QQJxIgZ0QQ92IAIgBHIgBnJrIgJBAXQgACACQRVqdkEBcXJBHGohAgsgASACNgIcIAFCADcCECACQQJ0QbjSgIAAaiEEAkACQEEAKAKM0ICAACIGQQEgAnQiA3ENACAEIAE2AgBBACAGIANyNgKM0ICAACABIAQ2AhggASABNgIIIAEgATYCDAwBCyAAQQBBGSACQQF2ayACQR9GG3QhAiAEKAIAIQYCQANAIAYiBCgCBEF4cSAARg0BIAJBHXYhBiACQQF0IQIgBCAGQQRxakEQaiIDKAIAIgYNAAsgAyABNgIAIAEgBDYCGCABIAE2AgwgASABNgIIDAELIAQoAggiACABNgIMIAQgATYCCCABQQA2AhggASAENgIMIAEgADYCCAtBAEEAKAKo0ICAAEF/aiIBQX8gARs2AqjQgIAACwsEAAAAC04AAkAgAA0APwBBEHQPCwJAIABB//8DcQ0AIABBf0wNAAJAIABBEHZAACIAQX9HDQBBAEEwNgL404CAAEF/DwsgAEEQdA8LEMqAgIAAAAvyAgIDfwF+AkAgAkUNACAAIAE6AAAgAiAAaiIDQX9qIAE6AAAgAkEDSQ0AIAAgAToAAiAAIAE6AAEgA0F9aiABOgAAIANBfmogAToAACACQQdJDQAgACABOgADIANBfGogAToAACACQQlJDQAgAEEAIABrQQNxIgRqIgMgAUH/AXFBgYKECGwiATYCACADIAIgBGtBfHEiBGoiAkF8aiABNgIAIARBCUkNACADIAE2AgggAyABNgIEIAJBeGogATYCACACQXRqIAE2AgAgBEEZSQ0AIAMgATYCGCADIAE2AhQgAyABNgIQIAMgATYCDCACQXBqIAE2AgAgAkFsaiABNgIAIAJBaGogATYCACACQWRqIAE2AgAgBCADQQRxQRhyIgVrIgJBIEkNACABrUKBgICAEH4hBiADIAVqIQEDQCABIAY3AxggASAGNwMQIAEgBjcDCCABIAY3AwAgAUEgaiEBIAJBYGoiAkEfSw0ACwsgAAsLjkgBAEGACAuGSAEAAAACAAAAAwAAAAAAAAAAAAAABAAAAAUAAAAAAAAAAAAAAAYAAAAHAAAACAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAASW52YWxpZCBjaGFyIGluIHVybCBxdWVyeQBTcGFuIGNhbGxiYWNrIGVycm9yIGluIG9uX2JvZHkAQ29udGVudC1MZW5ndGggb3ZlcmZsb3cAQ2h1bmsgc2l6ZSBvdmVyZmxvdwBSZXNwb25zZSBvdmVyZmxvdwBJbnZhbGlkIG1ldGhvZCBmb3IgSFRUUC94LnggcmVxdWVzdABJbnZhbGlkIG1ldGhvZCBmb3IgUlRTUC94LnggcmVxdWVzdABFeHBlY3RlZCBTT1VSQ0UgbWV0aG9kIGZvciBJQ0UveC54IHJlcXVlc3QASW52YWxpZCBjaGFyIGluIHVybCBmcmFnbWVudCBzdGFydABFeHBlY3RlZCBkb3QAU3BhbiBjYWxsYmFjayBlcnJvciBpbiBvbl9zdGF0dXMASW52YWxpZCByZXNwb25zZSBzdGF0dXMASW52YWxpZCBjaGFyYWN0ZXIgaW4gY2h1bmsgZXh0ZW5zaW9ucwBVc2VyIGNhbGxiYWNrIGVycm9yAGBvbl9yZXNldGAgY2FsbGJhY2sgZXJyb3IAYG9uX2NodW5rX2hlYWRlcmAgY2FsbGJhY2sgZXJyb3IAYG9uX21lc3NhZ2VfYmVnaW5gIGNhbGxiYWNrIGVycm9yAGBvbl9jaHVua19leHRlbnNpb25fdmFsdWVgIGNhbGxiYWNrIGVycm9yAGBvbl9zdGF0dXNfY29tcGxldGVgIGNhbGxiYWNrIGVycm9yAGBvbl92ZXJzaW9uX2NvbXBsZXRlYCBjYWxsYmFjayBlcnJvcgBgb25fdXJsX2NvbXBsZXRlYCBjYWxsYmFjayBlcnJvcgBgb25fY2h1bmtfY29tcGxldGVgIGNhbGxiYWNrIGVycm9yAGBvbl9oZWFkZXJfdmFsdWVfY29tcGxldGVgIGNhbGxiYWNrIGVycm9yAGBvbl9tZXNzYWdlX2NvbXBsZXRlYCBjYWxsYmFjayBlcnJvcgBgb25fbWV0aG9kX2NvbXBsZXRlYCBjYWxsYmFjayBlcnJvcgBgb25faGVhZGVyX2ZpZWxkX2NvbXBsZXRlYCBjYWxsYmFjayBlcnJvcgBgb25fY2h1bmtfZXh0ZW5zaW9uX25hbWVgIGNhbGxiYWNrIGVycm9yAFVuZXhwZWN0ZWQgY2hhciBpbiB1cmwgc2VydmVyAEludmFsaWQgaGVhZGVyIHZhbHVlIGNoYXIASW52YWxpZCBoZWFkZXIgZmllbGQgY2hhcgBTcGFuIGNhbGxiYWNrIGVycm9yIGluIG9uX3ZlcnNpb24ASW52YWxpZCBtaW5vciB2ZXJzaW9uAEludmFsaWQgbWFqb3IgdmVyc2lvbgBFeHBlY3RlZCBzcGFjZSBhZnRlciB2ZXJzaW9uAEV4cGVjdGVkIENSTEYgYWZ0ZXIgdmVyc2lvbgBJbnZhbGlkIEhUVFAgdmVyc2lvbgBJbnZhbGlkIGhlYWRlciB0b2tlbgBTcGFuIGNhbGxiYWNrIGVycm9yIGluIG9uX3VybABJbnZhbGlkIGNoYXJhY3RlcnMgaW4gdXJsAFVuZXhwZWN0ZWQgc3RhcnQgY2hhciBpbiB1cmwARG91YmxlIEAgaW4gdXJsAEVtcHR5IENvbnRlbnQtTGVuZ3RoAEludmFsaWQgY2hhcmFjdGVyIGluIENvbnRlbnQtTGVuZ3RoAER1cGxpY2F0ZSBDb250ZW50LUxlbmd0aABJbnZhbGlkIGNoYXIgaW4gdXJsIHBhdGgAQ29udGVudC1MZW5ndGggY2FuJ3QgYmUgcHJlc2VudCB3aXRoIFRyYW5zZmVyLUVuY29kaW5nAEludmFsaWQgY2hhcmFjdGVyIGluIGNodW5rIHNpemUAU3BhbiBjYWxsYmFjayBlcnJvciBpbiBvbl9oZWFkZXJfdmFsdWUAU3BhbiBjYWxsYmFjayBlcnJvciBpbiBvbl9jaHVua19leHRlbnNpb25fdmFsdWUASW52YWxpZCBjaGFyYWN0ZXIgaW4gY2h1bmsgZXh0ZW5zaW9ucyB2YWx1ZQBNaXNzaW5nIGV4cGVjdGVkIExGIGFmdGVyIGhlYWRlciB2YWx1ZQBJbnZhbGlkIGBUcmFuc2Zlci1FbmNvZGluZ2AgaGVhZGVyIHZhbHVlAEludmFsaWQgY2hhcmFjdGVyIGluIGNodW5rIGV4dGVuc2lvbnMgcXVvdGUgdmFsdWUASW52YWxpZCBjaGFyYWN0ZXIgaW4gY2h1bmsgZXh0ZW5zaW9ucyBxdW90ZWQgdmFsdWUAUGF1c2VkIGJ5IG9uX2hlYWRlcnNfY29tcGxldGUASW52YWxpZCBFT0Ygc3RhdGUAb25fcmVzZXQgcGF1c2UAb25fY2h1bmtfaGVhZGVyIHBhdXNlAG9uX21lc3NhZ2VfYmVnaW4gcGF1c2UAb25fY2h1bmtfZXh0ZW5zaW9uX3ZhbHVlIHBhdXNlAG9uX3N0YXR1c19jb21wbGV0ZSBwYXVzZQBvbl92ZXJzaW9uX2NvbXBsZXRlIHBhdXNlAG9uX3VybF9jb21wbGV0ZSBwYXVzZQBvbl9jaHVua19jb21wbGV0ZSBwYXVzZQBvbl9oZWFkZXJfdmFsdWVfY29tcGxldGUgcGF1c2UAb25fbWVzc2FnZV9jb21wbGV0ZSBwYXVzZQBvbl9tZXRob2RfY29tcGxldGUgcGF1c2UAb25faGVhZGVyX2ZpZWxkX2NvbXBsZXRlIHBhdXNlAG9uX2NodW5rX2V4dGVuc2lvbl9uYW1lIHBhdXNlAFVuZXhwZWN0ZWQgc3BhY2UgYWZ0ZXIgc3RhcnQgbGluZQBTcGFuIGNhbGxiYWNrIGVycm9yIGluIG9uX2NodW5rX2V4dGVuc2lvbl9uYW1lAEludmFsaWQgY2hhcmFjdGVyIGluIGNodW5rIGV4dGVuc2lvbnMgbmFtZQBQYXVzZSBvbiBDT05ORUNUL1VwZ3JhZGUAUGF1c2Ugb24gUFJJL1VwZ3JhZGUARXhwZWN0ZWQgSFRUUC8yIENvbm5lY3Rpb24gUHJlZmFjZQBTcGFuIGNhbGxiYWNrIGVycm9yIGluIG9uX21ldGhvZABFeHBlY3RlZCBzcGFjZSBhZnRlciBtZXRob2QAU3BhbiBjYWxsYmFjayBlcnJvciBpbiBvbl9oZWFkZXJfZmllbGQAUGF1c2VkAEludmFsaWQgd29yZCBlbmNvdW50ZXJlZABJbnZhbGlkIG1ldGhvZCBlbmNvdW50ZXJlZABVbmV4cGVjdGVkIGNoYXIgaW4gdXJsIHNjaGVtYQBSZXF1ZXN0IGhhcyBpbnZhbGlkIGBUcmFuc2Zlci1FbmNvZGluZ2AAU1dJVENIX1BST1hZAFVTRV9QUk9YWQBNS0FDVElWSVRZAFVOUFJPQ0VTU0FCTEVfRU5USVRZAENPUFkATU9WRURfUEVSTUFORU5UTFkAVE9PX0VBUkxZAE5PVElGWQBGQUlMRURfREVQRU5ERU5DWQBCQURfR0FURVdBWQBQTEFZAFBVVABDSEVDS09VVABHQVRFV0FZX1RJTUVPVVQAUkVRVUVTVF9USU1FT1VUAE5FVFdPUktfQ09OTkVDVF9USU1FT1VUAENPTk5FQ1RJT05fVElNRU9VVABMT0dJTl9USU1FT1VUAE5FVFdPUktfUkVBRF9USU1FT1VUAFBPU1QATUlTRElSRUNURURfUkVRVUVTVABDTElFTlRfQ0xPU0VEX1JFUVVFU1QAQ0xJRU5UX0NMT1NFRF9MT0FEX0JBTEFOQ0VEX1JFUVVFU1QAQkFEX1JFUVVFU1QASFRUUF9SRVFVRVNUX1NFTlRfVE9fSFRUUFNfUE9SVABSRVBPUlQASU1fQV9URUFQT1QAUkVTRVRfQ09OVEVOVABOT19DT05URU5UAFBBUlRJQUxfQ09OVEVOVABIUEVfSU5WQUxJRF9DT05TVEFOVABIUEVfQ0JfUkVTRVQAR0VUAEhQRV9TVFJJQ1QAQ09ORkxJQ1QAVEVNUE9SQVJZX1JFRElSRUNUAFBFUk1BTkVOVF9SRURJUkVDVABDT05ORUNUAE1VTFRJX1NUQVRVUwBIUEVfSU5WQUxJRF9TVEFUVVMAVE9PX01BTllfUkVRVUVTVFMARUFSTFlfSElOVFMAVU5BVkFJTEFCTEVfRk9SX0xFR0FMX1JFQVNPTlMAT1BUSU9OUwBTV0lUQ0hJTkdfUFJPVE9DT0xTAFZBUklBTlRfQUxTT19ORUdPVElBVEVTAE1VTFRJUExFX0NIT0lDRVMASU5URVJOQUxfU0VSVkVSX0VSUk9SAFdFQl9TRVJWRVJfVU5LTk9XTl9FUlJPUgBSQUlMR1VOX0VSUk9SAElERU5USVRZX1BST1ZJREVSX0FVVEhFTlRJQ0FUSU9OX0VSUk9SAFNTTF9DRVJUSUZJQ0FURV9FUlJPUgBJTlZBTElEX1hfRk9SV0FSREVEX0ZPUgBTRVRfUEFSQU1FVEVSAEdFVF9QQVJBTUVURVIASFBFX1VTRVIAU0VFX09USEVSAEhQRV9DQl9DSFVOS19IRUFERVIATUtDQUxFTkRBUgBTRVRVUABXRUJfU0VSVkVSX0lTX0RPV04AVEVBUkRPV04ASFBFX0NMT1NFRF9DT05ORUNUSU9OAEhFVVJJU1RJQ19FWFBJUkFUSU9OAERJU0NPTk5FQ1RFRF9PUEVSQVRJT04ATk9OX0FVVEhPUklUQVRJVkVfSU5GT1JNQVRJT04ASFBFX0lOVkFMSURfVkVSU0lPTgBIUEVfQ0JfTUVTU0FHRV9CRUdJTgBTSVRFX0lTX0ZST1pFTgBIUEVfSU5WQUxJRF9IRUFERVJfVE9LRU4ASU5WQUxJRF9UT0tFTgBGT1JCSURERU4ARU5IQU5DRV9ZT1VSX0NBTE0ASFBFX0lOVkFMSURfVVJMAEJMT0NLRURfQllfUEFSRU5UQUxfQ09OVFJPTABNS0NPTABBQ0wASFBFX0lOVEVSTkFMAFJFUVVFU1RfSEVBREVSX0ZJRUxEU19UT09fTEFSR0VfVU5PRkZJQ0lBTABIUEVfT0sAVU5MSU5LAFVOTE9DSwBQUkkAUkVUUllfV0lUSABIUEVfSU5WQUxJRF9DT05URU5UX0xFTkdUSABIUEVfVU5FWFBFQ1RFRF9DT05URU5UX0xFTkdUSABGTFVTSABQUk9QUEFUQ0gATS1TRUFSQ0gAVVJJX1RPT19MT05HAFBST0NFU1NJTkcATUlTQ0VMTEFORU9VU19QRVJTSVNURU5UX1dBUk5JTkcATUlTQ0VMTEFORU9VU19XQVJOSU5HAEhQRV9JTlZBTElEX1RSQU5TRkVSX0VOQ09ESU5HAEV4cGVjdGVkIENSTEYASFBFX0lOVkFMSURfQ0hVTktfU0laRQBNT1ZFAENPTlRJTlVFAEhQRV9DQl9TVEFUVVNfQ09NUExFVEUASFBFX0NCX0hFQURFUlNfQ09NUExFVEUASFBFX0NCX1ZFUlNJT05fQ09NUExFVEUASFBFX0NCX1VSTF9DT01QTEVURQBIUEVfQ0JfQ0hVTktfQ09NUExFVEUASFBFX0NCX0hFQURFUl9WQUxVRV9DT01QTEVURQBIUEVfQ0JfQ0hVTktfRVhURU5TSU9OX1ZBTFVFX0NPTVBMRVRFAEhQRV9DQl9DSFVOS19FWFRFTlNJT05fTkFNRV9DT01QTEVURQBIUEVfQ0JfTUVTU0FHRV9DT01QTEVURQBIUEVfQ0JfTUVUSE9EX0NPTVBMRVRFAEhQRV9DQl9IRUFERVJfRklFTERfQ09NUExFVEUAREVMRVRFAEhQRV9JTlZBTElEX0VPRl9TVEFURQBJTlZBTElEX1NTTF9DRVJUSUZJQ0FURQBQQVVTRQBOT19SRVNQT05TRQBVTlNVUFBPUlRFRF9NRURJQV9UWVBFAEdPTkUATk9UX0FDQ0VQVEFCTEUAU0VSVklDRV9VTkFWQUlMQUJMRQBSQU5HRV9OT1RfU0FUSVNGSUFCTEUAT1JJR0lOX0lTX1VOUkVBQ0hBQkxFAFJFU1BPTlNFX0lTX1NUQUxFAFBVUkdFAE1FUkdFAFJFUVVFU1RfSEVBREVSX0ZJRUxEU19UT09fTEFSR0UAUkVRVUVTVF9IRUFERVJfVE9PX0xBUkdFAFBBWUxPQURfVE9PX0xBUkdFAElOU1VGRklDSUVOVF9TVE9SQUdFAEhQRV9QQVVTRURfVVBHUkFERQBIUEVfUEFVU0VEX0gyX1VQR1JBREUAU09VUkNFAEFOTk9VTkNFAFRSQUNFAEhQRV9VTkVYUEVDVEVEX1NQQUNFAERFU0NSSUJFAFVOU1VCU0NSSUJFAFJFQ09SRABIUEVfSU5WQUxJRF9NRVRIT0QATk9UX0ZPVU5EAFBST1BGSU5EAFVOQklORABSRUJJTkQAVU5BVVRIT1JJWkVEAE1FVEhPRF9OT1RfQUxMT1dFRABIVFRQX1ZFUlNJT05fTk9UX1NVUFBPUlRFRABBTFJFQURZX1JFUE9SVEVEAEFDQ0VQVEVEAE5PVF9JTVBMRU1FTlRFRABMT09QX0RFVEVDVEVEAEhQRV9DUl9FWFBFQ1RFRABIUEVfTEZfRVhQRUNURUQAQ1JFQVRFRABJTV9VU0VEAEhQRV9QQVVTRUQAVElNRU9VVF9PQ0NVUkVEAFBBWU1FTlRfUkVRVUlSRUQAUFJFQ09ORElUSU9OX1JFUVVJUkVEAFBST1hZX0FVVEhFTlRJQ0FUSU9OX1JFUVVJUkVEAE5FVFdPUktfQVVUSEVOVElDQVRJT05fUkVRVUlSRUQATEVOR1RIX1JFUVVJUkVEAFNTTF9DRVJUSUZJQ0FURV9SRVFVSVJFRABVUEdSQURFX1JFUVVJUkVEAFBBR0VfRVhQSVJFRABQUkVDT05ESVRJT05fRkFJTEVEAEVYUEVDVEFUSU9OX0ZBSUxFRABSRVZBTElEQVRJT05fRkFJTEVEAFNTTF9IQU5EU0hBS0VfRkFJTEVEAExPQ0tFRABUUkFOU0ZPUk1BVElPTl9BUFBMSUVEAE5PVF9NT0RJRklFRABOT1RfRVhURU5ERUQAQkFORFdJRFRIX0xJTUlUX0VYQ0VFREVEAFNJVEVfSVNfT1ZFUkxPQURFRABIRUFEAEV4cGVjdGVkIEhUVFAvAABeEwAAJhMAADAQAADwFwAAnRMAABUSAAA5FwAA8BIAAAoQAAB1EgAArRIAAIITAABPFAAAfxAAAKAVAAAjFAAAiRIAAIsUAABNFQAA1BEAAM8UAAAQGAAAyRYAANwWAADBEQAA4BcAALsUAAB0FAAAfBUAAOUUAAAIFwAAHxAAAGUVAACjFAAAKBUAAAIVAACZFQAALBAAAIsZAABPDwAA1A4AAGoQAADOEAAAAhcAAIkOAABuEwAAHBMAAGYUAABWFwAAwRMAAM0TAABsEwAAaBcAAGYXAABfFwAAIhMAAM4PAABpDgAA2A4AAGMWAADLEwAAqg4AACgXAAAmFwAAxRMAAF0WAADoEQAAZxMAAGUTAADyFgAAcxMAAB0XAAD5FgAA8xEAAM8OAADOFQAADBIAALMRAAClEQAAYRAAADIXAAC7EwAAAAAAAAAAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEBAgEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQABAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAgMCAgICAgAAAgIAAgIAAgICAgICAgICAgAEAAAAAAACAgICAgICAgICAgICAgICAgICAgICAgICAgAAAAICAgICAgICAgICAgICAgICAgICAgICAgICAgICAAIAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAIAAgICAgIAAAICAAICAAICAgICAgICAgIAAwAEAAAAAgICAgICAgICAgICAgICAgICAgICAgICAgIAAAACAgICAgICAgICAgICAgICAgICAgICAgICAgICAgACAAIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABsb3NlZWVwLWFsaXZlAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAABAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQABAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQEBAQEBAQEBAQEBAgEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEAAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQFjaHVua2VkAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABAQABAQEBAQAAAQEAAQEAAQEBAQEBAQEBAQAAAAAAAAABAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQAAAAEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAAEAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAGVjdGlvbmVudC1sZW5ndGhvbnJveHktY29ubmVjdGlvbgAAAAAAAAAAAAAAAAAAAHJhbnNmZXItZW5jb2RpbmdwZ3JhZGUNCg0KDQpTTQ0KDQpUVFAvQ0UvVFNQLwAAAAAAAAAAAAAAAAECAAEDAAAAAAAAAAAAAAAAAAAAAAAABAEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEAAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEAAAAAAAAAAAABAgABAwAAAAAAAAAAAAAAAAAAAAAAAAQBAQUBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAAEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAAAAAAAAAAAAAQAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAQEAAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAAEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQABAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQAAAAAAAAAAAAABAAACAAAAAAAAAAAAAAAAAAAAAAAAAwQAAAQEBAQEBAQEBAQEBQQEBAQEBAQEBAQEBAAEAAYHBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEAAQABAAEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgAAAAAAAAMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAAAAAAAAAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAAAAAAAAAgAAAAACAAAAAAAAAAAAAAAAAAAAAAADAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwAAAAAAAAMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAE5PVU5DRUVDS09VVE5FQ1RFVEVDUklCRUxVU0hFVEVBRFNFQVJDSFJHRUNUSVZJVFlMRU5EQVJWRU9USUZZUFRJT05TQ0hTRUFZU1RBVENIR0VPUkRJUkVDVE9SVFJDSFBBUkFNRVRFUlVSQ0VCU0NSSUJFQVJET1dOQUNFSU5ETktDS1VCU0NSSUJFSFRUUC9BRFRQLw==", "base64"); - } -}); - -// .yarn/cache/undici-npm-6.6.2-a0bd6785a6-c8c8a43605.zip/node_modules/undici/lib/client.js -var require_client = __commonJS({ - ".yarn/cache/undici-npm-6.6.2-a0bd6785a6-c8c8a43605.zip/node_modules/undici/lib/client.js"(exports, module2) { - "use strict"; - var assert3 = require("node:assert"); - var net = require("node:net"); - var http = require("node:http"); - var { pipeline } = require("node:stream"); - var util = require_util(); - var { channels } = require_diagnostics(); - var timers = require_timers(); - var Request = require_request(); - var DispatcherBase = require_dispatcher_base(); - var { - RequestContentLengthMismatchError, - ResponseContentLengthMismatchError, - InvalidArgumentError, - RequestAbortedError, - HeadersTimeoutError, - HeadersOverflowError, - SocketError, - InformationalError, - BodyTimeoutError, - HTTPParserError, - ResponseExceededMaxSizeError, - ClientDestroyedError - } = require_errors(); - var buildConnector = require_connect(); - var { - kUrl, - kReset, - kServerName, - kClient, - kBusy, - kParser, - kConnect, - kBlocking, - kResuming, - kRunning, - kPending, - kSize, - kWriting, - kQueue, - kConnected, - kConnecting, - kNeedDrain, - kNoRef, - kKeepAliveDefaultTimeout, - kHostHeader, - kPendingIdx, - kRunningIdx, - kError, - kPipelining, - kSocket, - kKeepAliveTimeoutValue, - kMaxHeadersSize, - kKeepAliveMaxTimeout, - kKeepAliveTimeoutThreshold, - kHeadersTimeout, - kBodyTimeout, - kStrictContentLength, - kConnector, - kMaxRedirections, - kMaxRequests, - kCounter, - kClose, - kDestroy, - kDispatch, - kInterceptors, - kLocalAddress, - kMaxResponseSize, - kHTTPConnVersion, - // HTTP2 - kHost, - kHTTP2Session, - kHTTP2SessionState, - kHTTP2BuildRequest, - kHTTP2CopyHeaders, - kHTTP1BuildRequest - } = require_symbols(); - var http2; - try { - http2 = require("node:http2"); - } catch { - http2 = { constants: {} }; - } - var { - constants: { - HTTP2_HEADER_AUTHORITY, - HTTP2_HEADER_METHOD, - HTTP2_HEADER_PATH, - HTTP2_HEADER_SCHEME, - HTTP2_HEADER_CONTENT_LENGTH, - HTTP2_HEADER_EXPECT, - HTTP2_HEADER_STATUS - } - } = http2; - var h2ExperimentalWarned = false; - var FastBuffer = Buffer[Symbol.species]; - var kClosedResolve = Symbol("kClosedResolve"); - var Client = class extends DispatcherBase { - /** - * - * @param {string|URL} url - * @param {import('../types/client').Client.Options} options - */ - constructor(url, { - interceptors, - maxHeaderSize, - headersTimeout, - socketTimeout, - requestTimeout, - connectTimeout, - bodyTimeout, - idleTimeout, - keepAlive, - keepAliveTimeout, - maxKeepAliveTimeout, - keepAliveMaxTimeout, - keepAliveTimeoutThreshold, - socketPath, - pipelining, - tls, - strictContentLength, - maxCachedSessions, - maxRedirections, - connect: connect2, - maxRequestsPerClient, - localAddress, - maxResponseSize, - autoSelectFamily, - autoSelectFamilyAttemptTimeout, - // h2 - allowH2, - maxConcurrentStreams - } = {}) { - super(); - if (keepAlive !== void 0) { - throw new InvalidArgumentError("unsupported keepAlive, use pipelining=0 instead"); + if (upgrade) { + assert3(client[kRunning] === 1); + this.upgrade = true; + return 2; + } + assert3(this.headers.length % 2 === 0); + this.headers = []; + this.headersSize = 0; + if (this.shouldKeepAlive && client[kPipelining]) { + const keepAliveTimeout = this.keepAlive ? util.parseKeepAliveTimeout(this.keepAlive) : null; + if (keepAliveTimeout != null) { + const timeout = Math.min( + keepAliveTimeout - client[kKeepAliveTimeoutThreshold], + client[kKeepAliveMaxTimeout] + ); + if (timeout <= 0) { + socket[kReset] = true; + } else { + client[kKeepAliveTimeoutValue] = timeout; + } + } else { + client[kKeepAliveTimeoutValue] = client[kKeepAliveDefaultTimeout]; + } + } else { + socket[kReset] = true; } - if (socketTimeout !== void 0) { - throw new InvalidArgumentError("unsupported socketTimeout, use headersTimeout & bodyTimeout instead"); + const pause = request.onHeaders(statusCode, headers, this.resume, statusText) === false; + if (request.aborted) { + return -1; } - if (requestTimeout !== void 0) { - throw new InvalidArgumentError("unsupported requestTimeout, use headersTimeout & bodyTimeout instead"); + if (request.method === "HEAD") { + return 1; } - if (idleTimeout !== void 0) { - throw new InvalidArgumentError("unsupported idleTimeout, use keepAliveTimeout instead"); + if (statusCode < 200) { + return 1; } - if (maxKeepAliveTimeout !== void 0) { - throw new InvalidArgumentError("unsupported maxKeepAliveTimeout, use keepAliveMaxTimeout instead"); + if (socket[kBlocking]) { + socket[kBlocking] = false; + client[kResume](); } - if (maxHeaderSize != null && !Number.isFinite(maxHeaderSize)) { - throw new InvalidArgumentError("invalid maxHeaderSize"); + return pause ? constants.ERROR.PAUSED : 0; + } + onBody(buf) { + const { client, socket, statusCode, maxResponseSize } = this; + if (socket.destroyed) { + return -1; } - if (socketPath != null && typeof socketPath !== "string") { - throw new InvalidArgumentError("invalid socketPath"); + const request = client[kQueue][client[kRunningIdx]]; + assert3(request); + assert3.strictEqual(this.timeoutType, TIMEOUT_BODY); + if (this.timeout) { + if (this.timeout.refresh) { + this.timeout.refresh(); + } } - if (connectTimeout != null && (!Number.isFinite(connectTimeout) || connectTimeout < 0)) { - throw new InvalidArgumentError("invalid connectTimeout"); + assert3(statusCode >= 200); + if (maxResponseSize > -1 && this.bytesRead + buf.length > maxResponseSize) { + util.destroy(socket, new ResponseExceededMaxSizeError()); + return -1; } - if (keepAliveTimeout != null && (!Number.isFinite(keepAliveTimeout) || keepAliveTimeout <= 0)) { - throw new InvalidArgumentError("invalid keepAliveTimeout"); + this.bytesRead += buf.length; + if (request.onData(buf) === false) { + return constants.ERROR.PAUSED; } - if (keepAliveMaxTimeout != null && (!Number.isFinite(keepAliveMaxTimeout) || keepAliveMaxTimeout <= 0)) { - throw new InvalidArgumentError("invalid keepAliveMaxTimeout"); + } + onMessageComplete() { + const { client, socket, statusCode, upgrade, headers, contentLength, bytesRead, shouldKeepAlive } = this; + if (socket.destroyed && (!statusCode || shouldKeepAlive)) { + return -1; } - if (keepAliveTimeoutThreshold != null && !Number.isFinite(keepAliveTimeoutThreshold)) { - throw new InvalidArgumentError("invalid keepAliveTimeoutThreshold"); + if (upgrade) { + return; } - if (headersTimeout != null && (!Number.isInteger(headersTimeout) || headersTimeout < 0)) { - throw new InvalidArgumentError("headersTimeout must be a positive integer or zero"); + const request = client[kQueue][client[kRunningIdx]]; + assert3(request); + assert3(statusCode >= 100); + this.statusCode = null; + this.statusText = ""; + this.bytesRead = 0; + this.contentLength = ""; + this.keepAlive = ""; + this.connection = ""; + assert3(this.headers.length % 2 === 0); + this.headers = []; + this.headersSize = 0; + if (statusCode < 200) { + return; } - if (bodyTimeout != null && (!Number.isInteger(bodyTimeout) || bodyTimeout < 0)) { - throw new InvalidArgumentError("bodyTimeout must be a positive integer or zero"); + if (request.method !== "HEAD" && contentLength && bytesRead !== parseInt(contentLength, 10)) { + util.destroy(socket, new ResponseContentLengthMismatchError()); + return -1; } - if (connect2 != null && typeof connect2 !== "function" && typeof connect2 !== "object") { - throw new InvalidArgumentError("connect must be a function or an object"); + request.onComplete(headers); + client[kQueue][client[kRunningIdx]++] = null; + if (socket[kWriting]) { + assert3.strictEqual(client[kRunning], 0); + util.destroy(socket, new InformationalError("reset")); + return constants.ERROR.PAUSED; + } else if (!shouldKeepAlive) { + util.destroy(socket, new InformationalError("reset")); + return constants.ERROR.PAUSED; + } else if (socket[kReset] && client[kRunning] === 0) { + util.destroy(socket, new InformationalError("reset")); + return constants.ERROR.PAUSED; + } else if (client[kPipelining] == null || client[kPipelining] === 1) { + setImmediate(() => client[kResume]()); + } else { + client[kResume](); } - if (maxRedirections != null && (!Number.isInteger(maxRedirections) || maxRedirections < 0)) { - throw new InvalidArgumentError("maxRedirections must be a positive number"); + } + }; + function onParserTimeout(parser) { + const { socket, timeoutType, client } = parser; + if (timeoutType === TIMEOUT_HEADERS) { + if (!socket[kWriting] || socket.writableNeedDrain || client[kRunning] > 1) { + assert3(!parser.paused, "cannot be paused while waiting for headers"); + util.destroy(socket, new HeadersTimeoutError()); } - if (maxRequestsPerClient != null && (!Number.isInteger(maxRequestsPerClient) || maxRequestsPerClient < 0)) { - throw new InvalidArgumentError("maxRequestsPerClient must be a positive number"); + } else if (timeoutType === TIMEOUT_BODY) { + if (!parser.paused) { + util.destroy(socket, new BodyTimeoutError()); } - if (localAddress != null && (typeof localAddress !== "string" || net.isIP(localAddress) === 0)) { - throw new InvalidArgumentError("localAddress must be valid string IP address"); + } else if (timeoutType === TIMEOUT_IDLE) { + assert3(client[kRunning] === 0 && client[kKeepAliveTimeoutValue]); + util.destroy(socket, new InformationalError("socket idle timeout")); + } + } + async function connectH1(client, socket) { + client[kSocket] = socket; + if (!llhttpInstance) { + llhttpInstance = await llhttpPromise; + llhttpPromise = null; + } + socket[kNoRef] = false; + socket[kWriting] = false; + socket[kReset] = false; + socket[kBlocking] = false; + socket[kParser] = new Parser(client, socket, llhttpInstance); + addListener(socket, "error", function(err) { + const parser = this[kParser]; + assert3(err.code !== "ERR_TLS_CERT_ALTNAME_INVALID"); + if (err.code === "ECONNRESET" && parser.statusCode && !parser.shouldKeepAlive) { + parser.onMessageComplete(); + return; } - if (maxResponseSize != null && (!Number.isInteger(maxResponseSize) || maxResponseSize < -1)) { - throw new InvalidArgumentError("maxResponseSize must be a positive number"); + this[kError] = err; + this[kClient][kOnError](err); + }); + addListener(socket, "readable", function() { + const parser = this[kParser]; + if (parser) { + parser.readMore(); } - if (autoSelectFamilyAttemptTimeout != null && (!Number.isInteger(autoSelectFamilyAttemptTimeout) || autoSelectFamilyAttemptTimeout < -1)) { - throw new InvalidArgumentError("autoSelectFamilyAttemptTimeout must be a positive number"); + }); + addListener(socket, "end", function() { + const parser = this[kParser]; + if (parser.statusCode && !parser.shouldKeepAlive) { + parser.onMessageComplete(); + return; } - if (allowH2 != null && typeof allowH2 !== "boolean") { - throw new InvalidArgumentError("allowH2 must be a valid boolean value"); + util.destroy(this, new SocketError("other side closed", util.getSocketInfo(this))); + }); + addListener(socket, "close", function() { + const client2 = this[kClient]; + const parser = this[kParser]; + if (parser) { + if (!this[kError] && parser.statusCode && !parser.shouldKeepAlive) { + parser.onMessageComplete(); + } + this[kParser].destroy(); + this[kParser] = null; + } + const err = this[kError] || new SocketError("closed", util.getSocketInfo(this)); + client2[kSocket] = null; + client2[kHTTPContext] = null; + if (client2.destroyed) { + assert3(client2[kPending] === 0); + const requests = client2[kQueue].splice(client2[kRunningIdx]); + for (let i = 0; i < requests.length; i++) { + const request = requests[i]; + util.errorRequest(client2, request, err); + } + } else if (client2[kRunning] > 0 && err.code !== "UND_ERR_INFO") { + const request = client2[kQueue][client2[kRunningIdx]]; + client2[kQueue][client2[kRunningIdx]++] = null; + util.errorRequest(client2, request, err); } - if (maxConcurrentStreams != null && (typeof maxConcurrentStreams !== "number" || maxConcurrentStreams < 1)) { - throw new InvalidArgumentError("maxConcurrentStreams must be a positive integer, greater than 0"); + client2[kPendingIdx] = client2[kRunningIdx]; + assert3(client2[kRunning] === 0); + client2.emit("disconnect", client2[kUrl], [client2], err); + client2[kResume](); + }); + let closed = false; + socket.on("close", () => { + closed = true; + }); + return { + version: "h1", + defaultPipelining: 1, + write(...args) { + return writeH1(client, ...args); + }, + resume() { + resumeH1(client); + }, + destroy(err, callback) { + if (closed) { + queueMicrotask(callback); + } else { + socket.destroy(err).on("close", callback); + } + }, + get destroyed() { + return socket.destroyed; + }, + busy(request) { + if (socket[kWriting] || socket[kReset] || socket[kBlocking]) { + return true; + } + if (request) { + if (client[kRunning] > 0 && !request.idempotent) { + return true; + } + if (client[kRunning] > 0 && (request.upgrade || request.method === "CONNECT")) { + return true; + } + if (client[kRunning] > 0 && util.bodyLength(request.body) !== 0 && (util.isStream(request.body) || util.isAsyncIterable(request.body) || util.isFormDataLike(request.body))) { + return true; + } + } + return false; } - if (typeof connect2 !== "function") { - connect2 = buildConnector({ - ...tls, - maxCachedSessions, - allowH2, - socketPath, - timeout: connectTimeout, - ...util.nodeHasAutoSelectFamily && autoSelectFamily ? { autoSelectFamily, autoSelectFamilyAttemptTimeout } : void 0, - ...connect2 - }); + }; + } + function resumeH1(client) { + const socket = client[kSocket]; + if (socket && !socket.destroyed) { + if (client[kSize] === 0) { + if (!socket[kNoRef] && socket.unref) { + socket.unref(); + socket[kNoRef] = true; + } + } else if (socket[kNoRef] && socket.ref) { + socket.ref(); + socket[kNoRef] = false; + } + if (client[kSize] === 0) { + if (socket[kParser].timeoutType !== TIMEOUT_IDLE) { + socket[kParser].setTimeout(client[kKeepAliveTimeoutValue], TIMEOUT_IDLE); + } + } else if (client[kRunning] > 0 && socket[kParser].statusCode < 200) { + if (socket[kParser].timeoutType !== TIMEOUT_HEADERS) { + const request = client[kQueue][client[kRunningIdx]]; + const headersTimeout = request.headersTimeout != null ? request.headersTimeout : client[kHeadersTimeout]; + socket[kParser].setTimeout(headersTimeout, TIMEOUT_HEADERS); + } } - this[kInterceptors] = interceptors?.Client && Array.isArray(interceptors.Client) ? interceptors.Client : [createRedirectInterceptor({ maxRedirections })]; - this[kUrl] = util.parseOrigin(url); - this[kConnector] = connect2; - this[kSocket] = null; - this[kPipelining] = pipelining != null ? pipelining : 1; - this[kMaxHeadersSize] = maxHeaderSize || http.maxHeaderSize; - this[kKeepAliveDefaultTimeout] = keepAliveTimeout == null ? 4e3 : keepAliveTimeout; - this[kKeepAliveMaxTimeout] = keepAliveMaxTimeout == null ? 6e5 : keepAliveMaxTimeout; - this[kKeepAliveTimeoutThreshold] = keepAliveTimeoutThreshold == null ? 1e3 : keepAliveTimeoutThreshold; - this[kKeepAliveTimeoutValue] = this[kKeepAliveDefaultTimeout]; - this[kServerName] = null; - this[kLocalAddress] = localAddress != null ? localAddress : null; - this[kResuming] = 0; - this[kNeedDrain] = 0; - this[kHostHeader] = `host: ${this[kUrl].hostname}${this[kUrl].port ? `:${this[kUrl].port}` : ""}\r -`; - this[kBodyTimeout] = bodyTimeout != null ? bodyTimeout : 3e5; - this[kHeadersTimeout] = headersTimeout != null ? headersTimeout : 3e5; - this[kStrictContentLength] = strictContentLength == null ? true : strictContentLength; - this[kMaxRedirections] = maxRedirections; - this[kMaxRequests] = maxRequestsPerClient; - this[kClosedResolve] = null; - this[kMaxResponseSize] = maxResponseSize > -1 ? maxResponseSize : -1; - this[kHTTPConnVersion] = "h1"; - this[kHTTP2Session] = null; - this[kHTTP2SessionState] = !allowH2 ? null : { - // streams: null, // Fixed queue of streams - For future support of `push` - openStreams: 0, - // Keep track of them to decide whether or not unref the session - maxConcurrentStreams: maxConcurrentStreams != null ? maxConcurrentStreams : 100 - // Max peerConcurrentStreams for a Node h2 server - }; - this[kHost] = `${this[kUrl].hostname}${this[kUrl].port ? `:${this[kUrl].port}` : ""}`; - this[kQueue] = []; - this[kRunningIdx] = 0; - this[kPendingIdx] = 0; } - get pipelining() { - return this[kPipelining]; + } + function shouldSendContentLength(method) { + return method !== "GET" && method !== "HEAD" && method !== "OPTIONS" && method !== "TRACE" && method !== "CONNECT"; + } + function writeH1(client, request) { + const { method, path: path10, host, upgrade, blocking, reset } = request; + let { body, headers, contentLength } = request; + const expectsPayload = method === "PUT" || method === "POST" || method === "PATCH"; + if (util.isFormDataLike(body)) { + if (!extractBody) { + extractBody = require_body().extractBody; + } + const [bodyStream, contentType] = extractBody(body); + if (request.contentType == null) { + headers.push("content-type", contentType); + } + body = bodyStream.stream; + contentLength = bodyStream.length; + } else if (util.isBlobLike(body) && request.contentType == null && body.type) { + headers.push("content-type", body.type); } - set pipelining(value) { - this[kPipelining] = value; - resume(this, true); + if (body && typeof body.read === "function") { + body.read(0); + } + const bodyLength = util.bodyLength(body); + contentLength = bodyLength ?? contentLength; + if (contentLength === null) { + contentLength = request.contentLength; + } + if (contentLength === 0 && !expectsPayload) { + contentLength = null; + } + if (shouldSendContentLength(method) && contentLength > 0 && request.contentLength !== null && request.contentLength !== contentLength) { + if (client[kStrictContentLength]) { + util.errorRequest(client, request, new RequestContentLengthMismatchError()); + return false; + } + process.emitWarning(new RequestContentLengthMismatchError()); + } + const socket = client[kSocket]; + const abort = (err) => { + if (request.aborted || request.completed) { + return; + } + util.errorRequest(client, request, err || new RequestAbortedError()); + util.destroy(body); + util.destroy(socket, new InformationalError("aborted")); + }; + try { + request.onConnect(abort); + } catch (err) { + util.errorRequest(client, request, err); + } + if (request.aborted) { + return false; + } + if (method === "HEAD") { + socket[kReset] = true; + } + if (upgrade || method === "CONNECT") { + socket[kReset] = true; + } + if (reset != null) { + socket[kReset] = reset; } - get [kPending]() { - return this[kQueue].length - this[kPendingIdx]; + if (client[kMaxRequests] && socket[kCounter]++ >= client[kMaxRequests]) { + socket[kReset] = true; } - get [kRunning]() { - return this[kPendingIdx] - this[kRunningIdx]; + if (blocking) { + socket[kBlocking] = true; } - get [kSize]() { - return this[kQueue].length - this[kRunningIdx]; + let header = `${method} ${path10} HTTP/1.1\r +`; + if (typeof host === "string") { + header += `host: ${host}\r +`; + } else { + header += client[kHostHeader]; } - get [kConnected]() { - return !!this[kSocket] && !this[kConnecting] && !this[kSocket].destroyed; + if (upgrade) { + header += `connection: upgrade\r +upgrade: ${upgrade}\r +`; + } else if (client[kPipelining] && !socket[kReset]) { + header += "connection: keep-alive\r\n"; + } else { + header += "connection: close\r\n"; } - get [kBusy]() { - const socket = this[kSocket]; - return socket && (socket[kReset] || socket[kWriting] || socket[kBlocking]) || this[kSize] >= (this[kPipelining] || 1) || this[kPending] > 0; + if (Array.isArray(headers)) { + for (let n = 0; n < headers.length; n += 2) { + const key = headers[n + 0]; + const val = headers[n + 1]; + if (Array.isArray(val)) { + for (let i = 0; i < val.length; i++) { + header += `${key}: ${val[i]}\r +`; + } + } else { + header += `${key}: ${val}\r +`; + } + } } - /* istanbul ignore: only used for test */ - [kConnect](cb) { - connect(this); - this.once("connect", cb); + if (channels.sendHeaders.hasSubscribers) { + channels.sendHeaders.publish({ request, headers: header, socket }); } - [kDispatch](opts, handler) { - const origin = opts.origin || this[kUrl].origin; - const request = this[kHTTPConnVersion] === "h2" ? Request[kHTTP2BuildRequest](origin, opts, handler) : Request[kHTTP1BuildRequest](origin, opts, handler); - this[kQueue].push(request); - if (this[kResuming]) { - } else if (util.bodyLength(request.body) == null && util.isIterable(request.body)) { - this[kResuming] = 1; - process.nextTick(resume, this); + if (!body || bodyLength === 0) { + writeBuffer({ abort, body: null, client, request, socket, contentLength, header, expectsPayload }); + } else if (util.isBuffer(body)) { + writeBuffer({ abort, body, client, request, socket, contentLength, header, expectsPayload }); + } else if (util.isBlobLike(body)) { + if (typeof body.stream === "function") { + writeIterable({ abort, body: body.stream(), client, request, socket, contentLength, header, expectsPayload }); } else { - resume(this, true); - } - if (this[kResuming] && this[kNeedDrain] !== 2 && this[kBusy]) { - this[kNeedDrain] = 2; + writeBlob({ abort, body, client, request, socket, contentLength, header, expectsPayload }); } - return this[kNeedDrain] < 2; + } else if (util.isStream(body)) { + writeStream({ abort, body, client, request, socket, contentLength, header, expectsPayload }); + } else if (util.isIterable(body)) { + writeIterable({ abort, body, client, request, socket, contentLength, header, expectsPayload }); + } else { + assert3(false); } - async [kClose]() { - return new Promise((resolve) => { - if (this[kSize]) { - this[kClosedResolve] = resolve; - } else { - resolve(null); + return true; + } + function writeStream({ abort, body, client, request, socket, contentLength, header, expectsPayload }) { + assert3(contentLength !== 0 || client[kRunning] === 0, "stream body cannot be pipelined"); + let finished = false; + const writer = new AsyncWriter({ abort, socket, request, contentLength, client, expectsPayload, header }); + const onData = function(chunk) { + if (finished) { + return; + } + try { + if (!writer.write(chunk) && this.pause) { + this.pause(); } + } catch (err) { + util.destroy(this, err); + } + }; + const onDrain = function() { + if (finished) { + return; + } + if (body.resume) { + body.resume(); + } + }; + const onClose = function() { + queueMicrotask(() => { + body.removeListener("error", onFinished); }); - } - async [kDestroy](err) { - return new Promise((resolve) => { - const requests = this[kQueue].splice(this[kPendingIdx]); - for (let i = 0; i < requests.length; i++) { - const request = requests[i]; - errorRequest(this, request, err); - } - const callback = () => { - if (this[kClosedResolve]) { - this[kClosedResolve](); - this[kClosedResolve] = null; - } - resolve(); - }; - if (this[kHTTP2Session] != null) { - util.destroy(this[kHTTP2Session], err); - this[kHTTP2Session] = null; - this[kHTTP2SessionState] = null; + if (!finished) { + const err = new RequestAbortedError(); + queueMicrotask(() => onFinished(err)); + } + }; + const onFinished = function(err) { + if (finished) { + return; + } + finished = true; + assert3(socket.destroyed || socket[kWriting] && client[kRunning] <= 1); + socket.off("drain", onDrain).off("error", onFinished); + body.removeListener("data", onData).removeListener("end", onFinished).removeListener("close", onClose); + if (!err) { + try { + writer.end(); + } catch (er) { + err = er; } - if (this[kSocket]) { - util.destroy(this[kSocket].on("close", callback), err); + } + writer.destroy(err); + if (err && (err.code !== "UND_ERR_INFO" || err.message !== "reset")) { + util.destroy(body, err); + } else { + util.destroy(body); + } + }; + body.on("data", onData).on("end", onFinished).on("error", onFinished).on("close", onClose); + if (body.resume) { + body.resume(); + } + socket.on("drain", onDrain).on("error", onFinished); + if (body.errorEmitted ?? body.errored) { + setImmediate(() => onFinished(body.errored)); + } else if (body.endEmitted ?? body.readableEnded) { + setImmediate(() => onFinished(null)); + } + if (body.closeEmitted ?? body.closed) { + setImmediate(onClose); + } + } + async function writeBuffer({ abort, body, client, request, socket, contentLength, header, expectsPayload }) { + try { + if (!body) { + if (contentLength === 0) { + socket.write(`${header}content-length: 0\r +\r +`, "latin1"); } else { - queueMicrotask(callback); + assert3(contentLength === null, "no body must not have content length"); + socket.write(`${header}\r +`, "latin1"); } - resume(this); - }); + } else if (util.isBuffer(body)) { + assert3(contentLength === body.byteLength, "buffer body must have content length"); + socket.cork(); + socket.write(`${header}content-length: ${contentLength}\r +\r +`, "latin1"); + socket.write(body); + socket.uncork(); + request.onBodySent(body); + if (!expectsPayload) { + socket[kReset] = true; + } + } + request.onRequestSent(); + client[kResume](); + } catch (err) { + abort(err); } - }; - function onHttp2SessionError(err) { - assert3(err.code !== "ERR_TLS_CERT_ALTNAME_INVALID"); - this[kSocket][kError] = err; - onError(this[kClient], err); } - function onHttp2FrameError(type, code, id) { - const err = new InformationalError(`HTTP/2: "frameError" received - type ${type}, code ${code}`); - if (id === 0) { - this[kSocket][kError] = err; - onError(this[kClient], err); + async function writeBlob({ abort, body, client, request, socket, contentLength, header, expectsPayload }) { + assert3(contentLength === body.size, "blob body must have content length"); + try { + if (contentLength != null && contentLength !== body.size) { + throw new RequestContentLengthMismatchError(); + } + const buffer = Buffer.from(await body.arrayBuffer()); + socket.cork(); + socket.write(`${header}content-length: ${contentLength}\r +\r +`, "latin1"); + socket.write(buffer); + socket.uncork(); + request.onBodySent(buffer); + request.onRequestSent(); + if (!expectsPayload) { + socket[kReset] = true; + } + client[kResume](); + } catch (err) { + abort(err); } } - function onHttp2SessionEnd() { - util.destroy(this, new SocketError("other side closed")); - util.destroy(this[kSocket], new SocketError("other side closed")); - } - function onHTTP2GoAway(code) { - const client = this[kClient]; - const err = new InformationalError(`HTTP/2: "GOAWAY" frame received with code ${code}`); - client[kSocket] = null; - client[kHTTP2Session] = null; - if (client.destroyed) { - assert3(this[kPending] === 0); - const requests = client[kQueue].splice(client[kRunningIdx]); - for (let i = 0; i < requests.length; i++) { - const request = requests[i]; - errorRequest(this, request, err); + async function writeIterable({ abort, body, client, request, socket, contentLength, header, expectsPayload }) { + assert3(contentLength !== 0 || client[kRunning] === 0, "iterator body cannot be pipelined"); + let callback = null; + function onDrain() { + if (callback) { + const cb = callback; + callback = null; + cb(); } - } else if (client[kRunning] > 0) { - const request = client[kQueue][client[kRunningIdx]]; - client[kQueue][client[kRunningIdx]++] = null; - errorRequest(client, request, err); - } - client[kPendingIdx] = client[kRunningIdx]; - assert3(client[kRunning] === 0); - client.emit( - "disconnect", - client[kUrl], - [client], - err - ); - resume(client); - } - var constants = require_constants4(); - var createRedirectInterceptor = require_redirectInterceptor(); - var EMPTY_BUF = Buffer.alloc(0); - async function lazyllhttp() { - const llhttpWasmData = process.env.JEST_WORKER_ID ? require_llhttp_wasm() : void 0; - let mod; - try { - mod = await WebAssembly.compile(require_llhttp_simd_wasm()); - } catch (e) { - mod = await WebAssembly.compile(llhttpWasmData || require_llhttp_wasm()); } - return await WebAssembly.instantiate(mod, { - env: { - /* eslint-disable camelcase */ - wasm_on_url: (p, at, len) => { - return 0; - }, - wasm_on_status: (p, at, len) => { - assert3.strictEqual(currentParser.ptr, p); - const start = at - currentBufferPtr + currentBufferRef.byteOffset; - return currentParser.onStatus(new FastBuffer(currentBufferRef.buffer, start, len)) || 0; - }, - wasm_on_message_begin: (p) => { - assert3.strictEqual(currentParser.ptr, p); - return currentParser.onMessageBegin() || 0; - }, - wasm_on_header_field: (p, at, len) => { - assert3.strictEqual(currentParser.ptr, p); - const start = at - currentBufferPtr + currentBufferRef.byteOffset; - return currentParser.onHeaderField(new FastBuffer(currentBufferRef.buffer, start, len)) || 0; - }, - wasm_on_header_value: (p, at, len) => { - assert3.strictEqual(currentParser.ptr, p); - const start = at - currentBufferPtr + currentBufferRef.byteOffset; - return currentParser.onHeaderValue(new FastBuffer(currentBufferRef.buffer, start, len)) || 0; - }, - wasm_on_headers_complete: (p, statusCode, upgrade, shouldKeepAlive) => { - assert3.strictEqual(currentParser.ptr, p); - return currentParser.onHeadersComplete(statusCode, Boolean(upgrade), Boolean(shouldKeepAlive)) || 0; - }, - wasm_on_body: (p, at, len) => { - assert3.strictEqual(currentParser.ptr, p); - const start = at - currentBufferPtr + currentBufferRef.byteOffset; - return currentParser.onBody(new FastBuffer(currentBufferRef.buffer, start, len)) || 0; - }, - wasm_on_message_complete: (p) => { - assert3.strictEqual(currentParser.ptr, p); - return currentParser.onMessageComplete() || 0; + const waitForDrain = () => new Promise((resolve, reject) => { + assert3(callback === null); + if (socket[kError]) { + reject(socket[kError]); + } else { + callback = resolve; + } + }); + socket.on("close", onDrain).on("drain", onDrain); + const writer = new AsyncWriter({ abort, socket, request, contentLength, client, expectsPayload, header }); + try { + for await (const chunk of body) { + if (socket[kError]) { + throw socket[kError]; + } + if (!writer.write(chunk)) { + await waitForDrain(); } - /* eslint-enable camelcase */ } - }); + writer.end(); + } catch (err) { + writer.destroy(err); + } finally { + socket.off("close", onDrain).off("drain", onDrain); + } } - var llhttpInstance = null; - var llhttpPromise = lazyllhttp(); - llhttpPromise.catch(); - var currentParser = null; - var currentBufferRef = null; - var currentBufferSize = 0; - var currentBufferPtr = null; - var TIMEOUT_HEADERS = 1; - var TIMEOUT_BODY = 2; - var TIMEOUT_IDLE = 3; - var Parser = class { - constructor(client, socket, { exports: exports2 }) { - assert3(Number.isFinite(client[kMaxHeadersSize]) && client[kMaxHeadersSize] > 0); - this.llhttp = exports2; - this.ptr = this.llhttp.llhttp_alloc(constants.TYPE.RESPONSE); - this.client = client; + var AsyncWriter = class { + constructor({ abort, socket, request, contentLength, client, expectsPayload, header }) { this.socket = socket; - this.timeout = null; - this.timeoutValue = null; - this.timeoutType = null; - this.statusCode = null; - this.statusText = ""; - this.upgrade = false; - this.headers = []; - this.headersSize = 0; - this.headersMaxSize = client[kMaxHeadersSize]; - this.shouldKeepAlive = false; - this.paused = false; - this.resume = this.resume.bind(this); - this.bytesRead = 0; - this.keepAlive = ""; - this.contentLength = ""; - this.connection = ""; - this.maxResponseSize = client[kMaxResponseSize]; + this.request = request; + this.contentLength = contentLength; + this.client = client; + this.bytesWritten = 0; + this.expectsPayload = expectsPayload; + this.header = header; + this.abort = abort; + socket[kWriting] = true; } - setTimeout(value, type) { - this.timeoutType = type; - if (value !== this.timeoutValue) { - timers.clearTimeout(this.timeout); - if (value) { - this.timeout = timers.setTimeout(onParserTimeout, value, this); - if (this.timeout.unref) { - this.timeout.unref(); - } + write(chunk) { + const { socket, request, contentLength, client, bytesWritten, expectsPayload, header } = this; + if (socket[kError]) { + throw socket[kError]; + } + if (socket.destroyed) { + return false; + } + const len = Buffer.byteLength(chunk); + if (!len) { + return true; + } + if (contentLength !== null && bytesWritten + len > contentLength) { + if (client[kStrictContentLength]) { + throw new RequestContentLengthMismatchError(); + } + process.emitWarning(new RequestContentLengthMismatchError()); + } + socket.cork(); + if (bytesWritten === 0) { + if (!expectsPayload) { + socket[kReset] = true; + } + if (contentLength === null) { + socket.write(`${header}transfer-encoding: chunked\r +`, "latin1"); } else { - this.timeout = null; + socket.write(`${header}content-length: ${contentLength}\r +\r +`, "latin1"); } - this.timeoutValue = value; - } else if (this.timeout) { - if (this.timeout.refresh) { - this.timeout.refresh(); + } + if (contentLength === null) { + socket.write(`\r +${len.toString(16)}\r +`, "latin1"); + } + this.bytesWritten += len; + const ret = socket.write(chunk); + socket.uncork(); + request.onBodySent(chunk); + if (!ret) { + if (socket[kParser].timeout && socket[kParser].timeoutType === TIMEOUT_HEADERS) { + if (socket[kParser].timeout.refresh) { + socket[kParser].timeout.refresh(); + } } } + return ret; } - resume() { - if (this.socket.destroyed || !this.paused) { + end() { + const { socket, contentLength, client, bytesWritten, expectsPayload, header, request } = this; + request.onRequestSent(); + socket[kWriting] = false; + if (socket[kError]) { + throw socket[kError]; + } + if (socket.destroyed) { return; } - assert3(this.ptr != null); - assert3(currentParser == null); - this.llhttp.llhttp_resume(this.ptr); - assert3(this.timeoutType === TIMEOUT_BODY); - if (this.timeout) { - if (this.timeout.refresh) { - this.timeout.refresh(); + if (bytesWritten === 0) { + if (expectsPayload) { + socket.write(`${header}content-length: 0\r +\r +`, "latin1"); + } else { + socket.write(`${header}\r +`, "latin1"); } + } else if (contentLength === null) { + socket.write("\r\n0\r\n\r\n", "latin1"); } - this.paused = false; - this.execute(this.socket.read() || EMPTY_BUF); - this.readMore(); - } - readMore() { - while (!this.paused && this.ptr) { - const chunk = this.socket.read(); - if (chunk === null) { - break; + if (contentLength !== null && bytesWritten !== contentLength) { + if (client[kStrictContentLength]) { + throw new RequestContentLengthMismatchError(); + } else { + process.emitWarning(new RequestContentLengthMismatchError()); } - this.execute(chunk); } - } - execute(data) { - assert3(this.ptr != null); - assert3(currentParser == null); - assert3(!this.paused); - const { socket, llhttp } = this; - if (data.length > currentBufferSize) { - if (currentBufferPtr) { - llhttp.free(currentBufferPtr); + if (socket[kParser].timeout && socket[kParser].timeoutType === TIMEOUT_HEADERS) { + if (socket[kParser].timeout.refresh) { + socket[kParser].timeout.refresh(); } - currentBufferSize = Math.ceil(data.length / 4096) * 4096; - currentBufferPtr = llhttp.malloc(currentBufferSize); } - new Uint8Array(llhttp.memory.buffer, currentBufferPtr, currentBufferSize).set(data); - try { - let ret; - try { - currentBufferRef = data; - currentParser = this; - ret = llhttp.llhttp_execute(this.ptr, currentBufferPtr, data.length); - } catch (err) { - throw err; - } finally { - currentParser = null; - currentBufferRef = null; - } - const offset = llhttp.llhttp_get_error_pos(this.ptr) - currentBufferPtr; - if (ret === constants.ERROR.PAUSED_UPGRADE) { - this.onUpgrade(data.slice(offset)); - } else if (ret === constants.ERROR.PAUSED) { - this.paused = true; - socket.unshift(data.slice(offset)); - } else if (ret !== constants.ERROR.OK) { - const ptr = llhttp.llhttp_get_error_reason(this.ptr); - let message = ""; - if (ptr) { - const len = new Uint8Array(llhttp.memory.buffer, ptr).indexOf(0); - message = "Response does not match the HTTP/1.1 protocol (" + Buffer.from(llhttp.memory.buffer, ptr, len).toString() + ")"; - } - throw new HTTPParserError(message, constants.ERROR[ret], data.slice(offset)); + client[kResume](); + } + destroy(err) { + const { socket, client, abort } = this; + socket[kWriting] = false; + if (err) { + assert3(client[kRunning] <= 1, "pipeline should only contain this request"); + abort(err); + } + } + }; + module2.exports = connectH1; + } +}); + +// .yarn/cache/undici-npm-6.13.0-1545cd855e-b1b0456e7d.zip/node_modules/undici/lib/dispatcher/client-h2.js +var require_client_h2 = __commonJS({ + ".yarn/cache/undici-npm-6.13.0-1545cd855e-b1b0456e7d.zip/node_modules/undici/lib/dispatcher/client-h2.js"(exports, module2) { + "use strict"; + var assert3 = require("node:assert"); + var { pipeline } = require("node:stream"); + var util = require_util(); + var { + RequestContentLengthMismatchError, + RequestAbortedError, + SocketError, + InformationalError + } = require_errors(); + var { + kUrl, + kReset, + kClient, + kRunning, + kPending, + kQueue, + kPendingIdx, + kRunningIdx, + kError, + kSocket, + kStrictContentLength, + kOnError, + kMaxConcurrentStreams, + kHTTP2Session, + kResume + } = require_symbols(); + var kOpenStreams = Symbol("open streams"); + var h2ExperimentalWarned = false; + var http2; + try { + http2 = require("node:http2"); + } catch { + http2 = { constants: {} }; + } + var { + constants: { + HTTP2_HEADER_AUTHORITY, + HTTP2_HEADER_METHOD, + HTTP2_HEADER_PATH, + HTTP2_HEADER_SCHEME, + HTTP2_HEADER_CONTENT_LENGTH, + HTTP2_HEADER_EXPECT, + HTTP2_HEADER_STATUS + } + } = http2; + function parseH2Headers(headers) { + const result = []; + for (const [name, value] of Object.entries(headers)) { + if (Array.isArray(value)) { + for (const subvalue of value) { + result.push(Buffer.from(name), Buffer.from(subvalue)); } - } catch (err) { - util.destroy(socket, err); + } else { + result.push(Buffer.from(name), Buffer.from(value)); } } - destroy() { - assert3(this.ptr != null); - assert3(currentParser == null); - this.llhttp.llhttp_free(this.ptr); - this.ptr = null; - timers.clearTimeout(this.timeout); - this.timeout = null; - this.timeoutValue = null; - this.timeoutType = null; - this.paused = false; - } - onStatus(buf) { - this.statusText = buf.toString(); + return result; + } + async function connectH2(client, socket) { + client[kSocket] = socket; + if (!h2ExperimentalWarned) { + h2ExperimentalWarned = true; + process.emitWarning("H2 support is experimental, expect them to change at any time.", { + code: "UNDICI-H2" + }); } - onMessageBegin() { - const { socket, client } = this; - if (socket.destroyed) { - return -1; + const session = http2.connect(client[kUrl], { + createConnection: () => socket, + peerMaxConcurrentStreams: client[kMaxConcurrentStreams] + }); + session[kOpenStreams] = 0; + session[kClient] = client; + session[kSocket] = socket; + util.addListener(session, "error", onHttp2SessionError); + util.addListener(session, "frameError", onHttp2FrameError); + util.addListener(session, "end", onHttp2SessionEnd); + util.addListener(session, "goaway", onHTTP2GoAway); + util.addListener(session, "close", function() { + const { [kClient]: client2 } = this; + const { [kSocket]: socket2 } = client2; + const err = this[kSocket][kError] || this[kError] || new SocketError("closed", util.getSocketInfo(socket2)); + client2[kHTTP2Session] = null; + if (client2.destroyed) { + assert3(client2[kPending] === 0); + const requests = client2[kQueue].splice(client2[kRunningIdx]); + for (let i = 0; i < requests.length; i++) { + const request = requests[i]; + util.errorRequest(client2, request, err); + } } - const request = client[kQueue][client[kRunningIdx]]; - if (!request) { - return -1; + }); + session.unref(); + client[kHTTP2Session] = session; + socket[kHTTP2Session] = session; + util.addListener(socket, "error", function(err) { + assert3(err.code !== "ERR_TLS_CERT_ALTNAME_INVALID"); + this[kError] = err; + this[kClient][kOnError](err); + }); + util.addListener(socket, "end", function() { + util.destroy(this, new SocketError("other side closed", util.getSocketInfo(this))); + }); + util.addListener(socket, "close", function() { + const err = this[kError] || new SocketError("closed", util.getSocketInfo(this)); + client[kSocket] = null; + if (this[kHTTP2Session] != null) { + this[kHTTP2Session].destroy(err); } - request.onResponseStarted(); + client[kPendingIdx] = client[kRunningIdx]; + assert3(client[kRunning] === 0); + client.emit("disconnect", client[kUrl], [client], err); + client[kResume](); + }); + let closed = false; + socket.on("close", () => { + closed = true; + }); + return { + version: "h2", + defaultPipelining: Infinity, + write(...args) { + writeH2(client, ...args); + }, + resume() { + }, + destroy(err, callback) { + if (closed) { + queueMicrotask(callback); + } else { + socket.destroy(err).on("close", callback); + } + }, + get destroyed() { + return socket.destroyed; + }, + busy() { + return false; + } + }; + } + function onHttp2SessionError(err) { + assert3(err.code !== "ERR_TLS_CERT_ALTNAME_INVALID"); + this[kSocket][kError] = err; + this[kClient][kOnError](err); + } + function onHttp2FrameError(type, code, id) { + if (id === 0) { + const err = new InformationalError(`HTTP/2: "frameError" received - type ${type}, code ${code}`); + this[kSocket][kError] = err; + this[kClient][kOnError](err); } - onHeaderField(buf) { - const len = this.headers.length; - if ((len & 1) === 0) { - this.headers.push(buf); + } + function onHttp2SessionEnd() { + const err = new SocketError("other side closed", util.getSocketInfo(this[kSocket])); + this.destroy(err); + util.destroy(this[kSocket], err); + } + function onHTTP2GoAway(code) { + const err = new InformationalError(`HTTP/2: "GOAWAY" frame received with code ${code}`); + this[kSocket][kError] = err; + this[kClient][kOnError](err); + this.unref(); + this.destroy(); + util.destroy(this[kSocket], err); + } + function shouldSendContentLength(method) { + return method !== "GET" && method !== "HEAD" && method !== "OPTIONS" && method !== "TRACE" && method !== "CONNECT"; + } + function writeH2(client, request) { + const session = client[kHTTP2Session]; + const { body, method, path: path10, host, upgrade, expectContinue, signal, headers: reqHeaders } = request; + if (upgrade) { + util.errorRequest(client, request, new Error("Upgrade not supported for H2")); + return false; + } + if (request.aborted) { + return false; + } + const headers = {}; + for (let n = 0; n < reqHeaders.length; n += 2) { + const key = reqHeaders[n + 0]; + const val = reqHeaders[n + 1]; + if (Array.isArray(val)) { + for (let i = 0; i < val.length; i++) { + if (headers[key]) { + headers[key] += `,${val[i]}`; + } else { + headers[key] = val[i]; + } + } } else { - this.headers[len - 1] = Buffer.concat([this.headers[len - 1], buf]); + headers[key] = val; } - this.trackHeader(buf.length); } - onHeaderValue(buf) { - let len = this.headers.length; - if ((len & 1) === 1) { - this.headers.push(buf); - len += 1; - } else { - this.headers[len - 1] = Buffer.concat([this.headers[len - 1], buf]); + let stream; + const { hostname, port } = client[kUrl]; + headers[HTTP2_HEADER_AUTHORITY] = host || `${hostname}${port ? `:${port}` : ""}`; + headers[HTTP2_HEADER_METHOD] = method; + const abort = (err) => { + if (request.aborted || request.completed) { + return; } - const key = this.headers[len - 2]; - if (key.length === 10) { - const headerName = util.bufferToLowerCasedHeaderName(key); - if (headerName === "keep-alive") { - this.keepAlive += buf.toString(); - } else if (headerName === "connection") { - this.connection += buf.toString(); - } - } else if (key.length === 14 && util.bufferToLowerCasedHeaderName(key) === "content-length") { - this.contentLength += buf.toString(); + err = err || new RequestAbortedError(); + util.errorRequest(client, request, err); + if (stream != null) { + util.destroy(stream, err); } - this.trackHeader(buf.length); + util.destroy(body, err); + }; + try { + request.onConnect(abort); + } catch (err) { + util.errorRequest(client, request, err); } - trackHeader(len) { - this.headersSize += len; - if (this.headersSize >= this.headersMaxSize) { - util.destroy(this.socket, new HeadersOverflowError()); + if (method === "CONNECT") { + session.ref(); + stream = session.request(headers, { endStream: false, signal }); + if (stream.id && !stream.pending) { + request.onUpgrade(null, null, stream); + ++session[kOpenStreams]; + } else { + stream.once("ready", () => { + request.onUpgrade(null, null, stream); + ++session[kOpenStreams]; + }); } + stream.once("close", () => { + session[kOpenStreams] -= 1; + if (session[kOpenStreams] === 0) + session.unref(); + }); + return true; } - onUpgrade(head) { - const { upgrade, client, socket, headers, statusCode } = this; - assert3(upgrade); - const request = client[kQueue][client[kRunningIdx]]; - assert3(request); - assert3(!socket.destroyed); - assert3(socket === client[kSocket]); - assert3(!this.paused); - assert3(request.upgrade || request.method === "CONNECT"); - this.statusCode = null; - this.statusText = ""; - this.shouldKeepAlive = null; - assert3(this.headers.length % 2 === 0); - this.headers = []; - this.headersSize = 0; - socket.unshift(head); - socket[kParser].destroy(); - socket[kParser] = null; - socket[kClient] = null; - socket[kError] = null; - socket.removeListener("error", onSocketError).removeListener("readable", onSocketReadable).removeListener("end", onSocketEnd).removeListener("close", onSocketClose); - client[kSocket] = null; - client[kQueue][client[kRunningIdx]++] = null; - client.emit("disconnect", client[kUrl], [client], new InformationalError("upgrade")); - try { - request.onUpgrade(statusCode, headers, socket); - } catch (err) { - util.destroy(socket, err); + headers[HTTP2_HEADER_PATH] = path10; + headers[HTTP2_HEADER_SCHEME] = "https"; + const expectsPayload = method === "PUT" || method === "POST" || method === "PATCH"; + if (body && typeof body.read === "function") { + body.read(0); + } + let contentLength = util.bodyLength(body); + if (contentLength == null) { + contentLength = request.contentLength; + } + if (contentLength === 0 || !expectsPayload) { + contentLength = null; + } + if (shouldSendContentLength(method) && contentLength > 0 && request.contentLength != null && request.contentLength !== contentLength) { + if (client[kStrictContentLength]) { + util.errorRequest(client, request, new RequestContentLengthMismatchError()); + return false; } - resume(client); + process.emitWarning(new RequestContentLengthMismatchError()); } - onHeadersComplete(statusCode, upgrade, shouldKeepAlive) { - const { client, socket, headers, statusText } = this; - if (socket.destroyed) { - return -1; + if (contentLength != null) { + assert3(body, "no body must not have content length"); + headers[HTTP2_HEADER_CONTENT_LENGTH] = `${contentLength}`; + } + session.ref(); + const shouldEndStream = method === "GET" || method === "HEAD" || body === null; + if (expectContinue) { + headers[HTTP2_HEADER_EXPECT] = "100-continue"; + stream = session.request(headers, { endStream: shouldEndStream, signal }); + stream.once("continue", writeBodyH2); + } else { + stream = session.request(headers, { + endStream: shouldEndStream, + signal + }); + writeBodyH2(); + } + ++session[kOpenStreams]; + stream.once("response", (headers2) => { + const { [HTTP2_HEADER_STATUS]: statusCode, ...realHeaders } = headers2; + request.onResponseStarted(); + if (request.aborted) { + const err = new RequestAbortedError(); + util.errorRequest(client, request, err); + util.destroy(stream, err); + return; } - const request = client[kQueue][client[kRunningIdx]]; - if (!request) { - return -1; + if (request.onHeaders(Number(statusCode), parseH2Headers(realHeaders), stream.resume.bind(stream), "") === false) { + stream.pause(); } - assert3(!this.upgrade); - assert3(this.statusCode < 200); - if (statusCode === 100) { - util.destroy(socket, new SocketError("bad response", util.getSocketInfo(socket))); - return -1; + stream.on("data", (chunk) => { + if (request.onData(chunk) === false) { + stream.pause(); + } + }); + }); + stream.once("end", () => { + if (stream.state?.state == null || stream.state.state < 6) { + request.onComplete([]); + return; } - if (upgrade && !request.upgrade) { - util.destroy(socket, new SocketError("bad upgrade", util.getSocketInfo(socket))); - return -1; + if (session[kOpenStreams] === 0) { + session.unref(); } - assert3.strictEqual(this.timeoutType, TIMEOUT_HEADERS); - this.statusCode = statusCode; - this.shouldKeepAlive = shouldKeepAlive || // Override llhttp value which does not allow keepAlive for HEAD. - request.method === "HEAD" && !socket[kReset] && this.connection.toLowerCase() === "keep-alive"; - if (this.statusCode >= 200) { - const bodyTimeout = request.bodyTimeout != null ? request.bodyTimeout : client[kBodyTimeout]; - this.setTimeout(bodyTimeout, TIMEOUT_BODY); - } else if (this.timeout) { - if (this.timeout.refresh) { - this.timeout.refresh(); + abort(new InformationalError("HTTP/2: stream half-closed (remote)")); + }); + stream.once("close", () => { + session[kOpenStreams] -= 1; + if (session[kOpenStreams] === 0) { + session.unref(); + } + }); + stream.once("error", function(err) { + abort(err); + }); + stream.once("frameError", (type, code) => { + abort(new InformationalError(`HTTP/2: "frameError" received - type ${type}, code ${code}`)); + }); + return true; + function writeBodyH2() { + if (!body || contentLength === 0) { + writeBuffer({ + abort, + client, + request, + contentLength, + expectsPayload, + h2stream: stream, + body: null, + socket: client[kSocket] + }); + } else if (util.isBuffer(body)) { + writeBuffer({ + abort, + client, + request, + contentLength, + body, + expectsPayload, + h2stream: stream, + socket: client[kSocket] + }); + } else if (util.isBlobLike(body)) { + if (typeof body.stream === "function") { + writeIterable({ + abort, + client, + request, + contentLength, + expectsPayload, + h2stream: stream, + body: body.stream(), + socket: client[kSocket] + }); + } else { + writeBlob({ + abort, + body, + client, + request, + contentLength, + expectsPayload, + h2stream: stream, + socket: client[kSocket] + }); } + } else if (util.isStream(body)) { + writeStream({ + body, + client, + request, + contentLength, + expectsPayload, + socket: client[kSocket], + h2stream: stream, + header: "" + }); + } else if (util.isIterable(body)) { + writeIterable({ + body, + client, + request, + contentLength, + expectsPayload, + header: "", + h2stream: stream, + socket: client[kSocket] + }); + } else { + assert3(false); } - if (request.method === "CONNECT") { - assert3(client[kRunning] === 1); - this.upgrade = true; - return 2; + } + } + function writeBuffer({ abort, h2stream, body, client, request, socket, contentLength, expectsPayload }) { + try { + if (body != null && util.isBuffer(body)) { + assert3(contentLength === body.byteLength, "buffer body must have content length"); + h2stream.cork(); + h2stream.write(body); + h2stream.uncork(); + h2stream.end(); + request.onBodySent(body); } - if (upgrade) { - assert3(client[kRunning] === 1); - this.upgrade = true; - return 2; + if (!expectsPayload) { + socket[kReset] = true; } - assert3(this.headers.length % 2 === 0); - this.headers = []; - this.headersSize = 0; - if (this.shouldKeepAlive && client[kPipelining]) { - const keepAliveTimeout = this.keepAlive ? util.parseKeepAliveTimeout(this.keepAlive) : null; - if (keepAliveTimeout != null) { - const timeout = Math.min( - keepAliveTimeout - client[kKeepAliveTimeoutThreshold], - client[kKeepAliveMaxTimeout] - ); - if (timeout <= 0) { + request.onRequestSent(); + client[kResume](); + } catch (error) { + abort(error); + } + } + function writeStream({ abort, socket, expectsPayload, h2stream, body, client, request, contentLength }) { + assert3(contentLength !== 0 || client[kRunning] === 0, "stream body cannot be pipelined"); + const pipe = pipeline( + body, + h2stream, + (err) => { + if (err) { + util.destroy(pipe, err); + abort(err); + } else { + util.removeAllListeners(pipe); + request.onRequestSent(); + if (!expectsPayload) { socket[kReset] = true; - } else { - client[kKeepAliveTimeoutValue] = timeout; } - } else { - client[kKeepAliveTimeoutValue] = client[kKeepAliveDefaultTimeout]; + client[kResume](); } - } else { - socket[kReset] = true; - } - const pause = request.onHeaders(statusCode, headers, this.resume, statusText) === false; - if (request.aborted) { - return -1; } - if (request.method === "HEAD") { - return 1; + ); + util.addListener(pipe, "data", onPipeData); + function onPipeData(chunk) { + request.onBodySent(chunk); + } + } + async function writeBlob({ abort, h2stream, body, client, request, socket, contentLength, expectsPayload }) { + assert3(contentLength === body.size, "blob body must have content length"); + try { + if (contentLength != null && contentLength !== body.size) { + throw new RequestContentLengthMismatchError(); } - if (statusCode < 200) { - return 1; + const buffer = Buffer.from(await body.arrayBuffer()); + h2stream.cork(); + h2stream.write(buffer); + h2stream.uncork(); + h2stream.end(); + request.onBodySent(buffer); + request.onRequestSent(); + if (!expectsPayload) { + socket[kReset] = true; } - if (socket[kBlocking]) { - socket[kBlocking] = false; - resume(client); + client[kResume](); + } catch (err) { + abort(err); + } + } + async function writeIterable({ abort, h2stream, body, client, request, socket, contentLength, expectsPayload }) { + assert3(contentLength !== 0 || client[kRunning] === 0, "iterator body cannot be pipelined"); + let callback = null; + function onDrain() { + if (callback) { + const cb = callback; + callback = null; + cb(); } - return pause ? constants.ERROR.PAUSED : 0; } - onBody(buf) { - const { client, socket, statusCode, maxResponseSize } = this; - if (socket.destroyed) { - return -1; + const waitForDrain = () => new Promise((resolve, reject) => { + assert3(callback === null); + if (socket[kError]) { + reject(socket[kError]); + } else { + callback = resolve; } - const request = client[kQueue][client[kRunningIdx]]; - assert3(request); - assert3.strictEqual(this.timeoutType, TIMEOUT_BODY); - if (this.timeout) { - if (this.timeout.refresh) { - this.timeout.refresh(); + }); + h2stream.on("close", onDrain).on("drain", onDrain); + try { + for await (const chunk of body) { + if (socket[kError]) { + throw socket[kError]; + } + const res = h2stream.write(chunk); + request.onBodySent(chunk); + if (!res) { + await waitForDrain(); } } - assert3(statusCode >= 200); - if (maxResponseSize > -1 && this.bytesRead + buf.length > maxResponseSize) { - util.destroy(socket, new ResponseExceededMaxSizeError()); - return -1; - } - this.bytesRead += buf.length; - if (request.onData(buf) === false) { - return constants.ERROR.PAUSED; + h2stream.end(); + request.onRequestSent(); + if (!expectsPayload) { + socket[kReset] = true; } + client[kResume](); + } catch (err) { + abort(err); + } finally { + h2stream.off("close", onDrain).off("drain", onDrain); } - onMessageComplete() { - const { client, socket, statusCode, upgrade, headers, contentLength, bytesRead, shouldKeepAlive } = this; - if (socket.destroyed && (!statusCode || shouldKeepAlive)) { - return -1; + } + module2.exports = connectH2; + } +}); + +// .yarn/cache/undici-npm-6.13.0-1545cd855e-b1b0456e7d.zip/node_modules/undici/lib/handler/redirect-handler.js +var require_redirect_handler = __commonJS({ + ".yarn/cache/undici-npm-6.13.0-1545cd855e-b1b0456e7d.zip/node_modules/undici/lib/handler/redirect-handler.js"(exports, module2) { + "use strict"; + var util = require_util(); + var { kBodyUsed } = require_symbols(); + var assert3 = require("node:assert"); + var { InvalidArgumentError } = require_errors(); + var EE = require("node:events"); + var redirectableStatusCodes = [300, 301, 302, 303, 307, 308]; + var kBody = Symbol("body"); + var BodyAsyncIterable = class { + constructor(body) { + this[kBody] = body; + this[kBodyUsed] = false; + } + async *[Symbol.asyncIterator]() { + assert3(!this[kBodyUsed], "disturbed"); + this[kBodyUsed] = true; + yield* this[kBody]; + } + }; + var RedirectHandler = class { + constructor(dispatch, maxRedirections, opts, handler) { + if (maxRedirections != null && (!Number.isInteger(maxRedirections) || maxRedirections < 0)) { + throw new InvalidArgumentError("maxRedirections must be a positive number"); } - if (upgrade) { - return; + util.validateHandler(handler, opts.method, opts.upgrade); + this.dispatch = dispatch; + this.location = null; + this.abort = null; + this.opts = { ...opts, maxRedirections: 0 }; + this.maxRedirections = maxRedirections; + this.handler = handler; + this.history = []; + this.redirectionLimitReached = false; + if (util.isStream(this.opts.body)) { + if (util.bodyLength(this.opts.body) === 0) { + this.opts.body.on("data", function() { + assert3(false); + }); + } + if (typeof this.opts.body.readableDidRead !== "boolean") { + this.opts.body[kBodyUsed] = false; + EE.prototype.on.call(this.opts.body, "data", function() { + this[kBodyUsed] = true; + }); + } + } else if (this.opts.body && typeof this.opts.body.pipeTo === "function") { + this.opts.body = new BodyAsyncIterable(this.opts.body); + } else if (this.opts.body && typeof this.opts.body !== "string" && !ArrayBuffer.isView(this.opts.body) && util.isIterable(this.opts.body)) { + this.opts.body = new BodyAsyncIterable(this.opts.body); } - const request = client[kQueue][client[kRunningIdx]]; - assert3(request); - assert3(statusCode >= 100); - this.statusCode = null; - this.statusText = ""; - this.bytesRead = 0; - this.contentLength = ""; - this.keepAlive = ""; - this.connection = ""; - assert3(this.headers.length % 2 === 0); - this.headers = []; - this.headersSize = 0; - if (statusCode < 200) { + } + onConnect(abort) { + this.abort = abort; + this.handler.onConnect(abort, { history: this.history }); + } + onUpgrade(statusCode, headers, socket) { + this.handler.onUpgrade(statusCode, headers, socket); + } + onError(error) { + this.handler.onError(error); + } + onHeaders(statusCode, headers, resume, statusText) { + this.location = this.history.length >= this.maxRedirections || util.isDisturbed(this.opts.body) ? null : parseLocation(statusCode, headers); + if (this.opts.throwOnMaxRedirect && this.history.length >= this.maxRedirections) { + if (this.request) { + this.request.abort(new Error("max redirects")); + } + this.redirectionLimitReached = true; + this.abort(new Error("max redirects")); return; } - if (request.method !== "HEAD" && contentLength && bytesRead !== parseInt(contentLength, 10)) { - util.destroy(socket, new ResponseContentLengthMismatchError()); - return -1; + if (this.opts.origin) { + this.history.push(new URL(this.opts.path, this.opts.origin)); + } + if (!this.location) { + return this.handler.onHeaders(statusCode, headers, resume, statusText); + } + const { origin, pathname, search } = util.parseURL(new URL(this.location, this.opts.origin && new URL(this.opts.path, this.opts.origin))); + const path10 = search ? `${pathname}${search}` : pathname; + this.opts.headers = cleanRequestHeaders(this.opts.headers, statusCode === 303, this.opts.origin !== origin); + this.opts.path = path10; + this.opts.origin = origin; + this.opts.maxRedirections = 0; + this.opts.query = null; + if (statusCode === 303 && this.opts.method !== "HEAD") { + this.opts.method = "GET"; + this.opts.body = null; } - request.onComplete(headers); - client[kQueue][client[kRunningIdx]++] = null; - if (socket[kWriting]) { - assert3.strictEqual(client[kRunning], 0); - util.destroy(socket, new InformationalError("reset")); - return constants.ERROR.PAUSED; - } else if (!shouldKeepAlive) { - util.destroy(socket, new InformationalError("reset")); - return constants.ERROR.PAUSED; - } else if (socket[kReset] && client[kRunning] === 0) { - util.destroy(socket, new InformationalError("reset")); - return constants.ERROR.PAUSED; - } else if (client[kPipelining] === 1) { - setImmediate(resume, client); + } + onData(chunk) { + if (this.location) { } else { - resume(client); + return this.handler.onData(chunk); } } - }; - function onParserTimeout(parser) { - const { socket, timeoutType, client } = parser; - if (timeoutType === TIMEOUT_HEADERS) { - if (!socket[kWriting] || socket.writableNeedDrain || client[kRunning] > 1) { - assert3(!parser.paused, "cannot be paused while waiting for headers"); - util.destroy(socket, new HeadersTimeoutError()); + onComplete(trailers) { + if (this.location) { + this.location = null; + this.abort = null; + this.dispatch(this.opts, this); + } else { + this.handler.onComplete(trailers); } - } else if (timeoutType === TIMEOUT_BODY) { - if (!parser.paused) { - util.destroy(socket, new BodyTimeoutError()); + } + onBodySent(chunk) { + if (this.handler.onBodySent) { + this.handler.onBodySent(chunk); } - } else if (timeoutType === TIMEOUT_IDLE) { - assert3(client[kRunning] === 0 && client[kKeepAliveTimeoutValue]); - util.destroy(socket, new InformationalError("socket idle timeout")); } - } - function onSocketReadable() { - const { [kParser]: parser } = this; - if (parser) { - parser.readMore(); + }; + function parseLocation(statusCode, headers) { + if (redirectableStatusCodes.indexOf(statusCode) === -1) { + return null; } - } - function onSocketError(err) { - const { [kClient]: client, [kParser]: parser } = this; - assert3(err.code !== "ERR_TLS_CERT_ALTNAME_INVALID"); - if (client[kHTTPConnVersion] !== "h2") { - if (err.code === "ECONNRESET" && parser.statusCode && !parser.shouldKeepAlive) { - parser.onMessageComplete(); - return; + for (let i = 0; i < headers.length; i += 2) { + if (headers[i].length === 8 && util.headerNameToString(headers[i]) === "location") { + return headers[i + 1]; } } - this[kError] = err; - onError(this[kClient], err); } - function onError(client, err) { - if (client[kRunning] === 0 && err.code !== "UND_ERR_INFO" && err.code !== "UND_ERR_SOCKET") { - assert3(client[kPendingIdx] === client[kRunningIdx]); - const requests = client[kQueue].splice(client[kRunningIdx]); - for (let i = 0; i < requests.length; i++) { - const request = requests[i]; - errorRequest(client, request, err); - } - assert3(client[kSize] === 0); + function shouldRemoveHeader(header, removeContent, unknownOrigin) { + if (header.length === 4) { + return util.headerNameToString(header) === "host"; } - } - function onSocketEnd() { - const { [kParser]: parser, [kClient]: client } = this; - if (client[kHTTPConnVersion] !== "h2") { - if (parser.statusCode && !parser.shouldKeepAlive) { - parser.onMessageComplete(); - return; - } + if (removeContent && util.headerNameToString(header).startsWith("content-")) { + return true; } - util.destroy(this, new SocketError("other side closed", util.getSocketInfo(this))); + if (unknownOrigin && (header.length === 13 || header.length === 6 || header.length === 19)) { + const name = util.headerNameToString(header); + return name === "authorization" || name === "cookie" || name === "proxy-authorization"; + } + return false; } - function onSocketClose() { - const { [kClient]: client, [kParser]: parser } = this; - if (client[kHTTPConnVersion] === "h1" && parser) { - if (!this[kError] && parser.statusCode && !parser.shouldKeepAlive) { - parser.onMessageComplete(); + function cleanRequestHeaders(headers, removeContent, unknownOrigin) { + const ret = []; + if (Array.isArray(headers)) { + for (let i = 0; i < headers.length; i += 2) { + if (!shouldRemoveHeader(headers[i], removeContent, unknownOrigin)) { + ret.push(headers[i], headers[i + 1]); + } } - this[kParser].destroy(); - this[kParser] = null; - } - const err = this[kError] || new SocketError("closed", util.getSocketInfo(this)); - client[kSocket] = null; - if (client.destroyed) { - assert3(client[kPending] === 0); - const requests = client[kQueue].splice(client[kRunningIdx]); - for (let i = 0; i < requests.length; i++) { - const request = requests[i]; - errorRequest(client, request, err); + } else if (headers && typeof headers === "object") { + for (const key of Object.keys(headers)) { + if (!shouldRemoveHeader(key, removeContent, unknownOrigin)) { + ret.push(key, headers[key]); + } } - } else if (client[kRunning] > 0 && err.code !== "UND_ERR_INFO") { - const request = client[kQueue][client[kRunningIdx]]; - client[kQueue][client[kRunningIdx]++] = null; - errorRequest(client, request, err); + } else { + assert3(headers == null, "headers must be an object or an array"); } - client[kPendingIdx] = client[kRunningIdx]; - assert3(client[kRunning] === 0); - client.emit("disconnect", client[kUrl], [client], err); - resume(client); + return ret; } - async function connect(client) { - assert3(!client[kConnecting]); - assert3(!client[kSocket]); - let { host, hostname, protocol, port } = client[kUrl]; - if (hostname[0] === "[") { - const idx = hostname.indexOf("]"); - assert3(idx !== -1); - const ip = hostname.substring(1, idx); - assert3(net.isIP(ip)); - hostname = ip; - } - client[kConnecting] = true; - if (channels.beforeConnect.hasSubscribers) { - channels.beforeConnect.publish({ - connectParams: { - host, - hostname, - protocol, - port, - version: client[kHTTPConnVersion], - servername: client[kServerName], - localAddress: client[kLocalAddress] - }, - connector: client[kConnector] - }); - } - try { - const socket = await new Promise((resolve, reject) => { - client[kConnector]({ - host, - hostname, - protocol, - port, - servername: client[kServerName], - localAddress: client[kLocalAddress] - }, (err, socket2) => { - if (err) { - reject(err); - } else { - resolve(socket2); - } - }); - }); - if (client.destroyed) { - util.destroy(socket.on("error", () => { - }), new ClientDestroyedError()); - return; + module2.exports = RedirectHandler; + } +}); + +// .yarn/cache/undici-npm-6.13.0-1545cd855e-b1b0456e7d.zip/node_modules/undici/lib/interceptor/redirect-interceptor.js +var require_redirect_interceptor = __commonJS({ + ".yarn/cache/undici-npm-6.13.0-1545cd855e-b1b0456e7d.zip/node_modules/undici/lib/interceptor/redirect-interceptor.js"(exports, module2) { + "use strict"; + var RedirectHandler = require_redirect_handler(); + function createRedirectInterceptor({ maxRedirections: defaultMaxRedirections }) { + return (dispatch) => { + return function Intercept(opts, handler) { + const { maxRedirections = defaultMaxRedirections } = opts; + if (!maxRedirections) { + return dispatch(opts, handler); + } + const redirectHandler = new RedirectHandler(dispatch, maxRedirections, opts, handler); + opts = { ...opts, maxRedirections: 0 }; + return dispatch(opts, redirectHandler); + }; + }; + } + module2.exports = createRedirectInterceptor; + } +}); + +// .yarn/cache/undici-npm-6.13.0-1545cd855e-b1b0456e7d.zip/node_modules/undici/lib/dispatcher/client.js +var require_client = __commonJS({ + ".yarn/cache/undici-npm-6.13.0-1545cd855e-b1b0456e7d.zip/node_modules/undici/lib/dispatcher/client.js"(exports, module2) { + "use strict"; + var assert3 = require("node:assert"); + var net = require("node:net"); + var http = require("node:http"); + var util = require_util(); + var { channels } = require_diagnostics(); + var Request = require_request(); + var DispatcherBase = require_dispatcher_base(); + var { + InvalidArgumentError, + InformationalError, + ClientDestroyedError + } = require_errors(); + var buildConnector = require_connect(); + var { + kUrl, + kServerName, + kClient, + kBusy, + kConnect, + kResuming, + kRunning, + kPending, + kSize, + kQueue, + kConnected, + kConnecting, + kNeedDrain, + kKeepAliveDefaultTimeout, + kHostHeader, + kPendingIdx, + kRunningIdx, + kError, + kPipelining, + kKeepAliveTimeoutValue, + kMaxHeadersSize, + kKeepAliveMaxTimeout, + kKeepAliveTimeoutThreshold, + kHeadersTimeout, + kBodyTimeout, + kStrictContentLength, + kConnector, + kMaxRedirections, + kMaxRequests, + kCounter, + kClose, + kDestroy, + kDispatch, + kInterceptors, + kLocalAddress, + kMaxResponseSize, + kOnError, + kHTTPContext, + kMaxConcurrentStreams, + kResume + } = require_symbols(); + var connectH1 = require_client_h1(); + var connectH2 = require_client_h2(); + var deprecatedInterceptorWarned = false; + var kClosedResolve = Symbol("kClosedResolve"); + function getPipelining(client) { + return client[kPipelining] ?? client[kHTTPContext]?.defaultPipelining ?? 1; + } + var Client = class extends DispatcherBase { + /** + * + * @param {string|URL} url + * @param {import('../../types/client.js').Client.Options} options + */ + constructor(url, { + interceptors, + maxHeaderSize, + headersTimeout, + socketTimeout, + requestTimeout, + connectTimeout, + bodyTimeout, + idleTimeout, + keepAlive, + keepAliveTimeout, + maxKeepAliveTimeout, + keepAliveMaxTimeout, + keepAliveTimeoutThreshold, + socketPath, + pipelining, + tls, + strictContentLength, + maxCachedSessions, + maxRedirections, + connect: connect2, + maxRequestsPerClient, + localAddress, + maxResponseSize, + autoSelectFamily, + autoSelectFamilyAttemptTimeout, + // h2 + maxConcurrentStreams, + allowH2 + } = {}) { + super(); + if (keepAlive !== void 0) { + throw new InvalidArgumentError("unsupported keepAlive, use pipelining=0 instead"); } - client[kConnecting] = false; - assert3(socket); - const isH2 = socket.alpnProtocol === "h2"; - if (isH2) { - if (!h2ExperimentalWarned) { - h2ExperimentalWarned = true; - process.emitWarning("H2 support is experimental, expect them to change at any time.", { - code: "UNDICI-H2" - }); - } - const session = http2.connect(client[kUrl], { - createConnection: () => socket, - peerMaxConcurrentStreams: client[kHTTP2SessionState].maxConcurrentStreams - }); - client[kHTTPConnVersion] = "h2"; - session[kClient] = client; - session[kSocket] = socket; - session.on("error", onHttp2SessionError); - session.on("frameError", onHttp2FrameError); - session.on("end", onHttp2SessionEnd); - session.on("goaway", onHTTP2GoAway); - session.on("close", onSocketClose); - session.unref(); - client[kHTTP2Session] = session; - socket[kHTTP2Session] = session; - } else { - if (!llhttpInstance) { - llhttpInstance = await llhttpPromise; - llhttpPromise = null; - } - socket[kNoRef] = false; - socket[kWriting] = false; - socket[kReset] = false; - socket[kBlocking] = false; - socket[kParser] = new Parser(client, socket, llhttpInstance); + if (socketTimeout !== void 0) { + throw new InvalidArgumentError("unsupported socketTimeout, use headersTimeout & bodyTimeout instead"); } - socket[kCounter] = 0; - socket[kMaxRequests] = client[kMaxRequests]; - socket[kClient] = client; - socket[kError] = null; - socket.on("error", onSocketError).on("readable", onSocketReadable).on("end", onSocketEnd).on("close", onSocketClose); - client[kSocket] = socket; - if (channels.connected.hasSubscribers) { - channels.connected.publish({ - connectParams: { - host, - hostname, - protocol, - port, - version: client[kHTTPConnVersion], - servername: client[kServerName], - localAddress: client[kLocalAddress] - }, - connector: client[kConnector], - socket - }); + if (requestTimeout !== void 0) { + throw new InvalidArgumentError("unsupported requestTimeout, use headersTimeout & bodyTimeout instead"); } - client.emit("connect", client[kUrl], [client]); - } catch (err) { - if (client.destroyed) { - return; + if (idleTimeout !== void 0) { + throw new InvalidArgumentError("unsupported idleTimeout, use keepAliveTimeout instead"); } - client[kConnecting] = false; - if (channels.connectError.hasSubscribers) { - channels.connectError.publish({ - connectParams: { - host, - hostname, - protocol, - port, - version: client[kHTTPConnVersion], - servername: client[kServerName], - localAddress: client[kLocalAddress] - }, - connector: client[kConnector], - error: err - }); + if (maxKeepAliveTimeout !== void 0) { + throw new InvalidArgumentError("unsupported maxKeepAliveTimeout, use keepAliveMaxTimeout instead"); } - if (err.code === "ERR_TLS_CERT_ALTNAME_INVALID") { - assert3(client[kRunning] === 0); - while (client[kPending] > 0 && client[kQueue][client[kPendingIdx]].servername === client[kServerName]) { - const request = client[kQueue][client[kPendingIdx]++]; - errorRequest(client, request, err); - } - } else { - onError(client, err); + if (maxHeaderSize != null && !Number.isFinite(maxHeaderSize)) { + throw new InvalidArgumentError("invalid maxHeaderSize"); } - client.emit("connectionError", client[kUrl], [client], err); - } - resume(client); - } - function emitDrain(client) { - client[kNeedDrain] = 0; - client.emit("drain", client[kUrl], [client]); - } - function resume(client, sync) { - if (client[kResuming] === 2) { - return; - } - client[kResuming] = 2; - _resume(client, sync); - client[kResuming] = 0; - if (client[kRunningIdx] > 256) { - client[kQueue].splice(0, client[kRunningIdx]); - client[kPendingIdx] -= client[kRunningIdx]; - client[kRunningIdx] = 0; - } - } - function _resume(client, sync) { - while (true) { - if (client.destroyed) { - assert3(client[kPending] === 0); - return; + if (socketPath != null && typeof socketPath !== "string") { + throw new InvalidArgumentError("invalid socketPath"); } - if (client[kClosedResolve] && !client[kSize]) { - client[kClosedResolve](); - client[kClosedResolve] = null; - return; + if (connectTimeout != null && (!Number.isFinite(connectTimeout) || connectTimeout < 0)) { + throw new InvalidArgumentError("invalid connectTimeout"); } - const socket = client[kSocket]; - if (socket && !socket.destroyed && socket.alpnProtocol !== "h2") { - if (client[kSize] === 0) { - if (!socket[kNoRef] && socket.unref) { - socket.unref(); - socket[kNoRef] = true; - } - } else if (socket[kNoRef] && socket.ref) { - socket.ref(); - socket[kNoRef] = false; - } - if (client[kSize] === 0) { - if (socket[kParser].timeoutType !== TIMEOUT_IDLE) { - socket[kParser].setTimeout(client[kKeepAliveTimeoutValue], TIMEOUT_IDLE); - } - } else if (client[kRunning] > 0 && socket[kParser].statusCode < 200) { - if (socket[kParser].timeoutType !== TIMEOUT_HEADERS) { - const request2 = client[kQueue][client[kRunningIdx]]; - const headersTimeout = request2.headersTimeout != null ? request2.headersTimeout : client[kHeadersTimeout]; - socket[kParser].setTimeout(headersTimeout, TIMEOUT_HEADERS); - } - } + if (keepAliveTimeout != null && (!Number.isFinite(keepAliveTimeout) || keepAliveTimeout <= 0)) { + throw new InvalidArgumentError("invalid keepAliveTimeout"); } - if (client[kBusy]) { - client[kNeedDrain] = 2; - } else if (client[kNeedDrain] === 2) { - if (sync) { - client[kNeedDrain] = 1; - process.nextTick(emitDrain, client); - } else { - emitDrain(client); - } - continue; + if (keepAliveMaxTimeout != null && (!Number.isFinite(keepAliveMaxTimeout) || keepAliveMaxTimeout <= 0)) { + throw new InvalidArgumentError("invalid keepAliveMaxTimeout"); } - if (client[kPending] === 0) { - return; + if (keepAliveTimeoutThreshold != null && !Number.isFinite(keepAliveTimeoutThreshold)) { + throw new InvalidArgumentError("invalid keepAliveTimeoutThreshold"); } - if (client[kRunning] >= (client[kPipelining] || 1)) { - return; + if (headersTimeout != null && (!Number.isInteger(headersTimeout) || headersTimeout < 0)) { + throw new InvalidArgumentError("headersTimeout must be a positive integer or zero"); } - const request = client[kQueue][client[kPendingIdx]]; - if (client[kUrl].protocol === "https:" && client[kServerName] !== request.servername) { - if (client[kRunning] > 0) { - return; - } - client[kServerName] = request.servername; - if (socket && socket.servername !== request.servername) { - util.destroy(socket, new InformationalError("servername changed")); - return; - } + if (bodyTimeout != null && (!Number.isInteger(bodyTimeout) || bodyTimeout < 0)) { + throw new InvalidArgumentError("bodyTimeout must be a positive integer or zero"); } - if (client[kConnecting]) { - return; + if (connect2 != null && typeof connect2 !== "function" && typeof connect2 !== "object") { + throw new InvalidArgumentError("connect must be a function or an object"); } - if (!socket && !client[kHTTP2Session]) { - connect(client); - return; + if (maxRedirections != null && (!Number.isInteger(maxRedirections) || maxRedirections < 0)) { + throw new InvalidArgumentError("maxRedirections must be a positive number"); } - if (socket.destroyed || socket[kWriting] || socket[kReset] || socket[kBlocking]) { - return; + if (maxRequestsPerClient != null && (!Number.isInteger(maxRequestsPerClient) || maxRequestsPerClient < 0)) { + throw new InvalidArgumentError("maxRequestsPerClient must be a positive number"); } - if (client[kRunning] > 0 && !request.idempotent) { - return; + if (localAddress != null && (typeof localAddress !== "string" || net.isIP(localAddress) === 0)) { + throw new InvalidArgumentError("localAddress must be valid string IP address"); } - if (client[kRunning] > 0 && (request.upgrade || request.method === "CONNECT")) { - return; + if (maxResponseSize != null && (!Number.isInteger(maxResponseSize) || maxResponseSize < -1)) { + throw new InvalidArgumentError("maxResponseSize must be a positive number"); } - if (client[kRunning] > 0 && util.bodyLength(request.body) !== 0 && (util.isStream(request.body) || util.isAsyncIterable(request.body))) { - return; + if (autoSelectFamilyAttemptTimeout != null && (!Number.isInteger(autoSelectFamilyAttemptTimeout) || autoSelectFamilyAttemptTimeout < -1)) { + throw new InvalidArgumentError("autoSelectFamilyAttemptTimeout must be a positive number"); } - if (!request.aborted && write(client, request)) { - client[kPendingIdx]++; - } else { - client[kQueue].splice(client[kPendingIdx], 1); + if (allowH2 != null && typeof allowH2 !== "boolean") { + throw new InvalidArgumentError("allowH2 must be a valid boolean value"); } - } - } - function shouldSendContentLength(method) { - return method !== "GET" && method !== "HEAD" && method !== "OPTIONS" && method !== "TRACE" && method !== "CONNECT"; - } - function write(client, request) { - if (client[kHTTPConnVersion] === "h2") { - writeH2(client, client[kHTTP2Session], request); - return; - } - const { body, method, path: path10, host, upgrade, headers, blocking, reset } = request; - const expectsPayload = method === "PUT" || method === "POST" || method === "PATCH"; - if (body && typeof body.read === "function") { - body.read(0); - } - const bodyLength = util.bodyLength(body); - let contentLength = bodyLength; - if (contentLength === null) { - contentLength = request.contentLength; - } - if (contentLength === 0 && !expectsPayload) { - contentLength = null; - } - if (shouldSendContentLength(method) && contentLength > 0 && request.contentLength !== null && request.contentLength !== contentLength) { - if (client[kStrictContentLength]) { - errorRequest(client, request, new RequestContentLengthMismatchError()); - return false; + if (maxConcurrentStreams != null && (typeof maxConcurrentStreams !== "number" || maxConcurrentStreams < 1)) { + throw new InvalidArgumentError("maxConcurrentStreams must be a positive integer, greater than 0"); } - process.emitWarning(new RequestContentLengthMismatchError()); - } - const socket = client[kSocket]; - try { - request.onConnect((err) => { - if (request.aborted || request.completed) { - return; + if (typeof connect2 !== "function") { + connect2 = buildConnector({ + ...tls, + maxCachedSessions, + allowH2, + socketPath, + timeout: connectTimeout, + ...util.nodeHasAutoSelectFamily && autoSelectFamily ? { autoSelectFamily, autoSelectFamilyAttemptTimeout } : void 0, + ...connect2 + }); + } + if (interceptors?.Client && Array.isArray(interceptors.Client)) { + this[kInterceptors] = interceptors.Client; + if (!deprecatedInterceptorWarned) { + deprecatedInterceptorWarned = true; + process.emitWarning("Client.Options#interceptor is deprecated. Use Dispatcher#compose instead.", { + code: "UNDICI-CLIENT-INTERCEPTOR-DEPRECATED" + }); } - errorRequest(client, request, err || new RequestAbortedError()); - util.destroy(socket, new InformationalError("aborted")); - }); - } catch (err) { - errorRequest(client, request, err); - } - if (request.aborted) { - return false; - } - if (method === "HEAD") { - socket[kReset] = true; + } else { + this[kInterceptors] = [createRedirectInterceptor({ maxRedirections })]; + } + this[kUrl] = util.parseOrigin(url); + this[kConnector] = connect2; + this[kPipelining] = pipelining != null ? pipelining : 1; + this[kMaxHeadersSize] = maxHeaderSize || http.maxHeaderSize; + this[kKeepAliveDefaultTimeout] = keepAliveTimeout == null ? 4e3 : keepAliveTimeout; + this[kKeepAliveMaxTimeout] = keepAliveMaxTimeout == null ? 6e5 : keepAliveMaxTimeout; + this[kKeepAliveTimeoutThreshold] = keepAliveTimeoutThreshold == null ? 1e3 : keepAliveTimeoutThreshold; + this[kKeepAliveTimeoutValue] = this[kKeepAliveDefaultTimeout]; + this[kServerName] = null; + this[kLocalAddress] = localAddress != null ? localAddress : null; + this[kResuming] = 0; + this[kNeedDrain] = 0; + this[kHostHeader] = `host: ${this[kUrl].hostname}${this[kUrl].port ? `:${this[kUrl].port}` : ""}\r +`; + this[kBodyTimeout] = bodyTimeout != null ? bodyTimeout : 3e5; + this[kHeadersTimeout] = headersTimeout != null ? headersTimeout : 3e5; + this[kStrictContentLength] = strictContentLength == null ? true : strictContentLength; + this[kMaxRedirections] = maxRedirections; + this[kMaxRequests] = maxRequestsPerClient; + this[kClosedResolve] = null; + this[kMaxResponseSize] = maxResponseSize > -1 ? maxResponseSize : -1; + this[kMaxConcurrentStreams] = maxConcurrentStreams != null ? maxConcurrentStreams : 100; + this[kHTTPContext] = null; + this[kQueue] = []; + this[kRunningIdx] = 0; + this[kPendingIdx] = 0; + this[kResume] = (sync) => resume(this, sync); + this[kOnError] = (err) => onError(this, err); } - if (upgrade || method === "CONNECT") { - socket[kReset] = true; + get pipelining() { + return this[kPipelining]; } - if (reset != null) { - socket[kReset] = reset; + set pipelining(value) { + this[kPipelining] = value; + this[kResume](true); } - if (client[kMaxRequests] && socket[kCounter]++ >= client[kMaxRequests]) { - socket[kReset] = true; + get [kPending]() { + return this[kQueue].length - this[kPendingIdx]; } - if (blocking) { - socket[kBlocking] = true; + get [kRunning]() { + return this[kPendingIdx] - this[kRunningIdx]; } - let header = `${method} ${path10} HTTP/1.1\r -`; - if (typeof host === "string") { - header += `host: ${host}\r -`; - } else { - header += client[kHostHeader]; + get [kSize]() { + return this[kQueue].length - this[kRunningIdx]; } - if (upgrade) { - header += `connection: upgrade\r -upgrade: ${upgrade}\r -`; - } else if (client[kPipelining] && !socket[kReset]) { - header += "connection: keep-alive\r\n"; - } else { - header += "connection: close\r\n"; + get [kConnected]() { + return !!this[kHTTPContext] && !this[kConnecting] && !this[kHTTPContext].destroyed; } - if (headers) { - header += headers; + get [kBusy]() { + return Boolean( + this[kHTTPContext]?.busy(null) || this[kSize] >= (getPipelining(this) || 1) || this[kPending] > 0 + ); } - if (channels.sendHeaders.hasSubscribers) { - channels.sendHeaders.publish({ request, headers: header, socket }); + /* istanbul ignore: only used for test */ + [kConnect](cb) { + connect(this); + this.once("connect", cb); } - if (!body || bodyLength === 0) { - if (contentLength === 0) { - socket.write(`${header}content-length: 0\r -\r -`, "latin1"); + [kDispatch](opts, handler) { + const origin = opts.origin || this[kUrl].origin; + const request = new Request(origin, opts, handler); + this[kQueue].push(request); + if (this[kResuming]) { + } else if (util.bodyLength(request.body) == null && util.isIterable(request.body)) { + this[kResuming] = 1; + queueMicrotask(() => resume(this)); } else { - assert3(contentLength === null, "no body must not have content length"); - socket.write(`${header}\r -`, "latin1"); - } - request.onRequestSent(); - } else if (util.isBuffer(body)) { - assert3(contentLength === body.byteLength, "buffer body must have content length"); - socket.cork(); - socket.write(`${header}content-length: ${contentLength}\r -\r -`, "latin1"); - socket.write(body); - socket.uncork(); - request.onBodySent(body); - request.onRequestSent(); - if (!expectsPayload) { - socket[kReset] = true; + this[kResume](true); } - } else if (util.isBlobLike(body)) { - if (typeof body.stream === "function") { - writeIterable({ body: body.stream(), client, request, socket, contentLength, header, expectsPayload }); - } else { - writeBlob({ body, client, request, socket, contentLength, header, expectsPayload }); + if (this[kResuming] && this[kNeedDrain] !== 2 && this[kBusy]) { + this[kNeedDrain] = 2; } - } else if (util.isStream(body)) { - writeStream({ body, client, request, socket, contentLength, header, expectsPayload }); - } else if (util.isIterable(body)) { - writeIterable({ body, client, request, socket, contentLength, header, expectsPayload }); - } else { - assert3(false); - } - return true; - } - function writeH2(client, session, request) { - const { body, method, path: path10, host, upgrade, expectContinue, signal, headers: reqHeaders } = request; - let headers; - if (typeof reqHeaders === "string") - headers = Request[kHTTP2CopyHeaders](reqHeaders.trim()); - else - headers = reqHeaders; - if (upgrade) { - errorRequest(client, request, new Error("Upgrade not supported for H2")); - return false; + return this[kNeedDrain] < 2; } - if (request.aborted) { - return false; + async [kClose]() { + return new Promise((resolve) => { + if (this[kSize]) { + this[kClosedResolve] = resolve; + } else { + resolve(null); + } + }); } - let stream; - const h2State = client[kHTTP2SessionState]; - headers[HTTP2_HEADER_AUTHORITY] = host || client[kHost]; - headers[HTTP2_HEADER_METHOD] = method; - try { - request.onConnect((err) => { - if (request.aborted || request.completed) { - return; + async [kDestroy](err) { + return new Promise((resolve) => { + const requests = this[kQueue].splice(this[kPendingIdx]); + for (let i = 0; i < requests.length; i++) { + const request = requests[i]; + util.errorRequest(this, request, err); } - err = err || new RequestAbortedError(); - if (stream != null) { - util.destroy(stream, err); - h2State.openStreams -= 1; - if (h2State.openStreams === 0) { - session.unref(); + const callback = () => { + if (this[kClosedResolve]) { + this[kClosedResolve](); + this[kClosedResolve] = null; } + resolve(null); + }; + if (this[kHTTPContext]) { + this[kHTTPContext].destroy(err, callback); + this[kHTTPContext] = null; + } else { + queueMicrotask(callback); } - errorRequest(client, request, err); - }); - } catch (err) { - errorRequest(client, request, err); - } - if (method === "CONNECT") { - session.ref(); - stream = session.request(headers, { endStream: false, signal }); - if (stream.id && !stream.pending) { - request.onUpgrade(null, null, stream); - ++h2State.openStreams; - } else { - stream.once("ready", () => { - request.onUpgrade(null, null, stream); - ++h2State.openStreams; - }); - } - stream.once("close", () => { - h2State.openStreams -= 1; - if (h2State.openStreams === 0) - session.unref(); + this[kResume](); }); - return true; - } - headers[HTTP2_HEADER_PATH] = path10; - headers[HTTP2_HEADER_SCHEME] = "https"; - const expectsPayload = method === "PUT" || method === "POST" || method === "PATCH"; - if (body && typeof body.read === "function") { - body.read(0); - } - let contentLength = util.bodyLength(body); - if (contentLength == null) { - contentLength = request.contentLength; - } - if (contentLength === 0 || !expectsPayload) { - contentLength = null; } - if (shouldSendContentLength(method) && contentLength > 0 && request.contentLength != null && request.contentLength !== contentLength) { - if (client[kStrictContentLength]) { - errorRequest(client, request, new RequestContentLengthMismatchError()); - return false; + }; + var createRedirectInterceptor = require_redirect_interceptor(); + function onError(client, err) { + if (client[kRunning] === 0 && err.code !== "UND_ERR_INFO" && err.code !== "UND_ERR_SOCKET") { + assert3(client[kPendingIdx] === client[kRunningIdx]); + const requests = client[kQueue].splice(client[kRunningIdx]); + for (let i = 0; i < requests.length; i++) { + const request = requests[i]; + util.errorRequest(client, request, err); } - process.emitWarning(new RequestContentLengthMismatchError()); + assert3(client[kSize] === 0); } - if (contentLength != null) { - assert3(body, "no body must not have content length"); - headers[HTTP2_HEADER_CONTENT_LENGTH] = `${contentLength}`; + } + async function connect(client) { + assert3(!client[kConnecting]); + assert3(!client[kHTTPContext]); + let { host, hostname, protocol, port } = client[kUrl]; + if (hostname[0] === "[") { + const idx = hostname.indexOf("]"); + assert3(idx !== -1); + const ip = hostname.substring(1, idx); + assert3(net.isIP(ip)); + hostname = ip; } - session.ref(); - const shouldEndStream = method === "GET" || method === "HEAD" || body === null; - if (expectContinue) { - headers[HTTP2_HEADER_EXPECT] = "100-continue"; - stream = session.request(headers, { endStream: shouldEndStream, signal }); - stream.once("continue", writeBodyH2); - } else { - stream = session.request(headers, { - endStream: shouldEndStream, - signal + client[kConnecting] = true; + if (channels.beforeConnect.hasSubscribers) { + channels.beforeConnect.publish({ + connectParams: { + host, + hostname, + protocol, + port, + version: client[kHTTPContext]?.version, + servername: client[kServerName], + localAddress: client[kLocalAddress] + }, + connector: client[kConnector] }); - writeBodyH2(); - } - ++h2State.openStreams; - stream.once("response", (headers2) => { - const { [HTTP2_HEADER_STATUS]: statusCode, ...realHeaders } = headers2; - request.onResponseStarted(); - if (request.onHeaders(Number(statusCode), realHeaders, stream.resume.bind(stream), "") === false) { - stream.pause(); - } - }); - stream.once("end", () => { - request.onComplete([]); - }); - stream.on("data", (chunk) => { - if (request.onData(chunk) === false) { - stream.pause(); - } - }); - stream.once("close", () => { - h2State.openStreams -= 1; - if (h2State.openStreams === 0) { - session.unref(); - } - }); - stream.once("error", function(err) { - if (client[kHTTP2Session] && !client[kHTTP2Session].destroyed && !this.closed && !this.destroyed) { - h2State.streams -= 1; - util.destroy(stream, err); - } - }); - stream.once("frameError", (type, code) => { - const err = new InformationalError(`HTTP/2: "frameError" received - type ${type}, code ${code}`); - errorRequest(client, request, err); - if (client[kHTTP2Session] && !client[kHTTP2Session].destroyed && !this.closed && !this.destroyed) { - h2State.streams -= 1; - util.destroy(stream, err); - } - }); - return true; - function writeBodyH2() { - if (!body) { - request.onRequestSent(); - } else if (util.isBuffer(body)) { - assert3(contentLength === body.byteLength, "buffer body must have content length"); - stream.cork(); - stream.write(body); - stream.uncork(); - stream.end(); - request.onBodySent(body); - request.onRequestSent(); - } else if (util.isBlobLike(body)) { - if (typeof body.stream === "function") { - writeIterable({ - client, - request, - contentLength, - h2stream: stream, - expectsPayload, - body: body.stream(), - socket: client[kSocket], - header: "" - }); - } else { - writeBlob({ - body, - client, - request, - contentLength, - expectsPayload, - h2stream: stream, - header: "", - socket: client[kSocket] - }); - } - } else if (util.isStream(body)) { - writeStream({ - body, - client, - request, - contentLength, - expectsPayload, - socket: client[kSocket], - h2stream: stream, - header: "" - }); - } else if (util.isIterable(body)) { - writeIterable({ - body, - client, - request, - contentLength, - expectsPayload, - header: "", - h2stream: stream, - socket: client[kSocket] - }); - } else { - assert3(false); - } } - } - function writeStream({ h2stream, body, client, request, socket, contentLength, header, expectsPayload }) { - assert3(contentLength !== 0 || client[kRunning] === 0, "stream body cannot be pipelined"); - if (client[kHTTPConnVersion] === "h2") { - let onPipeData = function(chunk) { - request.onBodySent(chunk); - }; - const pipe = pipeline( - body, - h2stream, - (err) => { + try { + const socket = await new Promise((resolve, reject) => { + client[kConnector]({ + host, + hostname, + protocol, + port, + servername: client[kServerName], + localAddress: client[kLocalAddress] + }, (err, socket2) => { if (err) { - util.destroy(body, err); - util.destroy(h2stream, err); + reject(err); } else { - request.onRequestSent(); - } - } - ); - pipe.on("data", onPipeData); - pipe.once("end", () => { - pipe.removeListener("data", onPipeData); - util.destroy(pipe); + resolve(socket2); + } + }); }); - return; - } - let finished = false; - const writer = new AsyncWriter({ socket, request, contentLength, client, expectsPayload, header }); - const onData = function(chunk) { - if (finished) { + if (client.destroyed) { + util.destroy(socket.on("error", () => { + }), new ClientDestroyedError()); return; } + assert3(socket); try { - if (!writer.write(chunk) && this.pause) { - this.pause(); - } + client[kHTTPContext] = socket.alpnProtocol === "h2" ? await connectH2(client, socket) : await connectH1(client, socket); } catch (err) { - util.destroy(this, err); - } - }; - const onDrain = function() { - if (finished) { - return; - } - if (body.resume) { - body.resume(); + socket.destroy().on("error", () => { + }); + throw err; } - }; - const onClose = function() { - queueMicrotask(() => { - body.removeListener("error", onFinished); - }); - if (!finished) { - const err = new RequestAbortedError(); - queueMicrotask(() => onFinished(err)); + client[kConnecting] = false; + socket[kCounter] = 0; + socket[kMaxRequests] = client[kMaxRequests]; + socket[kClient] = client; + socket[kError] = null; + if (channels.connected.hasSubscribers) { + channels.connected.publish({ + connectParams: { + host, + hostname, + protocol, + port, + version: client[kHTTPContext]?.version, + servername: client[kServerName], + localAddress: client[kLocalAddress] + }, + connector: client[kConnector], + socket + }); } - }; - const onFinished = function(err) { - if (finished) { + client.emit("connect", client[kUrl], [client]); + } catch (err) { + if (client.destroyed) { return; } - finished = true; - assert3(socket.destroyed || socket[kWriting] && client[kRunning] <= 1); - socket.off("drain", onDrain).off("error", onFinished); - body.removeListener("data", onData).removeListener("end", onFinished).removeListener("close", onClose); - if (!err) { - try { - writer.end(); - } catch (er) { - err = er; - } + client[kConnecting] = false; + if (channels.connectError.hasSubscribers) { + channels.connectError.publish({ + connectParams: { + host, + hostname, + protocol, + port, + version: client[kHTTPContext]?.version, + servername: client[kServerName], + localAddress: client[kLocalAddress] + }, + connector: client[kConnector], + error: err + }); } - writer.destroy(err); - if (err && (err.code !== "UND_ERR_INFO" || err.message !== "reset")) { - util.destroy(body, err); + if (err.code === "ERR_TLS_CERT_ALTNAME_INVALID") { + assert3(client[kRunning] === 0); + while (client[kPending] > 0 && client[kQueue][client[kPendingIdx]].servername === client[kServerName]) { + const request = client[kQueue][client[kPendingIdx]++]; + util.errorRequest(client, request, err); + } } else { - util.destroy(body); + onError(client, err); } - }; - body.on("data", onData).on("end", onFinished).on("error", onFinished).on("close", onClose); - if (body.resume) { - body.resume(); + client.emit("connectionError", client[kUrl], [client], err); } - socket.on("drain", onDrain).on("error", onFinished); + client[kResume](); } - async function writeBlob({ h2stream, body, client, request, socket, contentLength, header, expectsPayload }) { - assert3(contentLength === body.size, "blob body must have content length"); - const isH2 = client[kHTTPConnVersion] === "h2"; - try { - if (contentLength != null && contentLength !== body.size) { - throw new RequestContentLengthMismatchError(); - } - const buffer = Buffer.from(await body.arrayBuffer()); - if (isH2) { - h2stream.cork(); - h2stream.write(buffer); - h2stream.uncork(); - } else { - socket.cork(); - socket.write(`${header}content-length: ${contentLength}\r -\r -`, "latin1"); - socket.write(buffer); - socket.uncork(); - } - request.onBodySent(buffer); - request.onRequestSent(); - if (!expectsPayload) { - socket[kReset] = true; - } - resume(client); - } catch (err) { - util.destroy(isH2 ? h2stream : socket, err); - } + function emitDrain(client) { + client[kNeedDrain] = 0; + client.emit("drain", client[kUrl], [client]); } - async function writeIterable({ h2stream, body, client, request, socket, contentLength, header, expectsPayload }) { - assert3(contentLength !== 0 || client[kRunning] === 0, "iterator body cannot be pipelined"); - let callback = null; - function onDrain() { - if (callback) { - const cb = callback; - callback = null; - cb(); - } - } - const waitForDrain = () => new Promise((resolve, reject) => { - assert3(callback === null); - if (socket[kError]) { - reject(socket[kError]); - } else { - callback = resolve; - } - }); - if (client[kHTTPConnVersion] === "h2") { - h2stream.on("close", onDrain).on("drain", onDrain); - try { - for await (const chunk of body) { - if (socket[kError]) { - throw socket[kError]; - } - const res = h2stream.write(chunk); - request.onBodySent(chunk); - if (!res) { - await waitForDrain(); - } - } - } catch (err) { - h2stream.destroy(err); - } finally { - request.onRequestSent(); - h2stream.end(); - h2stream.off("close", onDrain).off("drain", onDrain); - } + function resume(client, sync) { + if (client[kResuming] === 2) { return; } - socket.on("close", onDrain).on("drain", onDrain); - const writer = new AsyncWriter({ socket, request, contentLength, client, expectsPayload, header }); - try { - for await (const chunk of body) { - if (socket[kError]) { - throw socket[kError]; - } - if (!writer.write(chunk)) { - await waitForDrain(); - } - } - writer.end(); - } catch (err) { - writer.destroy(err); - } finally { - socket.off("close", onDrain).off("drain", onDrain); + client[kResuming] = 2; + _resume(client, sync); + client[kResuming] = 0; + if (client[kRunningIdx] > 256) { + client[kQueue].splice(0, client[kRunningIdx]); + client[kPendingIdx] -= client[kRunningIdx]; + client[kRunningIdx] = 0; } } - var AsyncWriter = class { - constructor({ socket, request, contentLength, client, expectsPayload, header }) { - this.socket = socket; - this.request = request; - this.contentLength = contentLength; - this.client = client; - this.bytesWritten = 0; - this.expectsPayload = expectsPayload; - this.header = header; - socket[kWriting] = true; - } - write(chunk) { - const { socket, request, contentLength, client, bytesWritten, expectsPayload, header } = this; - if (socket[kError]) { - throw socket[kError]; - } - if (socket.destroyed) { - return false; + function _resume(client, sync) { + while (true) { + if (client.destroyed) { + assert3(client[kPending] === 0); + return; } - const len = Buffer.byteLength(chunk); - if (!len) { - return true; + if (client[kClosedResolve] && !client[kSize]) { + client[kClosedResolve](); + client[kClosedResolve] = null; + return; } - if (contentLength !== null && bytesWritten + len > contentLength) { - if (client[kStrictContentLength]) { - throw new RequestContentLengthMismatchError(); - } - process.emitWarning(new RequestContentLengthMismatchError()); + if (client[kHTTPContext]) { + client[kHTTPContext].resume(); } - socket.cork(); - if (bytesWritten === 0) { - if (!expectsPayload) { - socket[kReset] = true; - } - if (contentLength === null) { - socket.write(`${header}transfer-encoding: chunked\r -`, "latin1"); + if (client[kBusy]) { + client[kNeedDrain] = 2; + } else if (client[kNeedDrain] === 2) { + if (sync) { + client[kNeedDrain] = 1; + queueMicrotask(() => emitDrain(client)); } else { - socket.write(`${header}content-length: ${contentLength}\r -\r -`, "latin1"); + emitDrain(client); } + continue; } - if (contentLength === null) { - socket.write(`\r -${len.toString(16)}\r -`, "latin1"); + if (client[kPending] === 0) { + return; } - this.bytesWritten += len; - const ret = socket.write(chunk); - socket.uncork(); - request.onBodySent(chunk); - if (!ret) { - if (socket[kParser].timeout && socket[kParser].timeoutType === TIMEOUT_HEADERS) { - if (socket[kParser].timeout.refresh) { - socket[kParser].timeout.refresh(); - } - } + if (client[kRunning] >= (getPipelining(client) || 1)) { + return; } - return ret; - } - end() { - const { socket, contentLength, client, bytesWritten, expectsPayload, header, request } = this; - request.onRequestSent(); - socket[kWriting] = false; - if (socket[kError]) { - throw socket[kError]; + const request = client[kQueue][client[kPendingIdx]]; + if (client[kUrl].protocol === "https:" && client[kServerName] !== request.servername) { + if (client[kRunning] > 0) { + return; + } + client[kServerName] = request.servername; + client[kHTTPContext]?.destroy(new InformationalError("servername changed"), () => { + client[kHTTPContext] = null; + resume(client); + }); } - if (socket.destroyed) { + if (client[kConnecting]) { return; } - if (bytesWritten === 0) { - if (expectsPayload) { - socket.write(`${header}content-length: 0\r -\r -`, "latin1"); - } else { - socket.write(`${header}\r -`, "latin1"); - } - } else if (contentLength === null) { - socket.write("\r\n0\r\n\r\n", "latin1"); + if (!client[kHTTPContext]) { + connect(client); + return; } - if (contentLength !== null && bytesWritten !== contentLength) { - if (client[kStrictContentLength]) { - throw new RequestContentLengthMismatchError(); - } else { - process.emitWarning(new RequestContentLengthMismatchError()); - } + if (client[kHTTPContext].destroyed) { + return; } - if (socket[kParser].timeout && socket[kParser].timeoutType === TIMEOUT_HEADERS) { - if (socket[kParser].timeout.refresh) { - socket[kParser].timeout.refresh(); - } + if (client[kHTTPContext].busy(request)) { + return; } - resume(client); - } - destroy(err) { - const { socket, client } = this; - socket[kWriting] = false; - if (err) { - assert3(client[kRunning] <= 1, "pipeline should only contain this request"); - util.destroy(socket, err); + if (!request.aborted && client[kHTTPContext].write(request)) { + client[kPendingIdx]++; + } else { + client[kQueue].splice(client[kPendingIdx], 1); } } - }; - function errorRequest(client, request, err) { - try { - request.onError(err); - assert3(request.aborted); - } catch (err2) { - client.emit("error", err2); - } } module2.exports = Client; } }); -// .yarn/cache/undici-npm-6.6.2-a0bd6785a6-c8c8a43605.zip/node_modules/undici/lib/pool.js +// .yarn/cache/undici-npm-6.13.0-1545cd855e-b1b0456e7d.zip/node_modules/undici/lib/dispatcher/pool.js var require_pool = __commonJS({ - ".yarn/cache/undici-npm-6.6.2-a0bd6785a6-c8c8a43605.zip/node_modules/undici/lib/pool.js"(exports, module2) { + ".yarn/cache/undici-npm-6.13.0-1545cd855e-b1b0456e7d.zip/node_modules/undici/lib/dispatcher/pool.js"(exports, module2) { "use strict"; var { PoolBase, @@ -13177,9 +12954,9 @@ var require_pool = __commonJS({ } }); -// .yarn/cache/undici-npm-6.6.2-a0bd6785a6-c8c8a43605.zip/node_modules/undici/lib/agent.js +// .yarn/cache/undici-npm-6.13.0-1545cd855e-b1b0456e7d.zip/node_modules/undici/lib/dispatcher/agent.js var require_agent = __commonJS({ - ".yarn/cache/undici-npm-6.6.2-a0bd6785a6-c8c8a43605.zip/node_modules/undici/lib/agent.js"(exports, module2) { + ".yarn/cache/undici-npm-6.13.0-1545cd855e-b1b0456e7d.zip/node_modules/undici/lib/dispatcher/agent.js"(exports, module2) { "use strict"; var { InvalidArgumentError } = require_errors(); var { kClients, kRunning, kClose, kDestroy, kDispatch, kInterceptors } = require_symbols(); @@ -13187,7 +12964,7 @@ var require_agent = __commonJS({ var Pool = require_pool(); var Client = require_client(); var util = require_util(); - var createRedirectInterceptor = require_redirectInterceptor(); + var createRedirectInterceptor = require_redirect_interceptor(); var kOnConnect = Symbol("onConnect"); var kOnDisconnect = Symbol("onDisconnect"); var kOnConnectionError = Symbol("onConnectionError"); @@ -13274,16 +13051,16 @@ var require_agent = __commonJS({ } }); -// .yarn/cache/undici-npm-6.6.2-a0bd6785a6-c8c8a43605.zip/node_modules/undici/lib/proxy-agent.js +// .yarn/cache/undici-npm-6.13.0-1545cd855e-b1b0456e7d.zip/node_modules/undici/lib/dispatcher/proxy-agent.js var require_proxy_agent = __commonJS({ - ".yarn/cache/undici-npm-6.6.2-a0bd6785a6-c8c8a43605.zip/node_modules/undici/lib/proxy-agent.js"(exports, module2) { + ".yarn/cache/undici-npm-6.13.0-1545cd855e-b1b0456e7d.zip/node_modules/undici/lib/dispatcher/proxy-agent.js"(exports, module2) { "use strict"; var { kProxy, kClose, kDestroy, kInterceptors } = require_symbols(); var { URL: URL2 } = require("node:url"); var Agent = require_agent(); var Pool = require_pool(); var DispatcherBase = require_dispatcher_base(); - var { InvalidArgumentError, RequestAbortedError } = require_errors(); + var { InvalidArgumentError, RequestAbortedError, SecureProxyConnectionError } = require_errors(); var buildConnector = require_connect(); var kAgent = Symbol("proxy agent"); var kClient = Symbol("proxy client"); @@ -13294,42 +13071,26 @@ var require_proxy_agent = __commonJS({ function defaultProtocolPort(protocol) { return protocol === "https:" ? 443 : 80; } - function buildProxyOptions(opts) { - if (typeof opts === "string") { - opts = { uri: opts }; - } - if (!opts || !opts.uri) { - throw new InvalidArgumentError("Proxy opts.uri is mandatory"); - } - return { - uri: opts.uri, - protocol: opts.protocol || "https" - }; - } function defaultFactory(origin, opts) { return new Pool(origin, opts); } - var ProxyAgent = class extends DispatcherBase { + var ProxyAgent2 = class extends DispatcherBase { constructor(opts) { - super(opts); - this[kProxy] = buildProxyOptions(opts); - this[kAgent] = new Agent(opts); - this[kInterceptors] = opts.interceptors?.ProxyAgent && Array.isArray(opts.interceptors.ProxyAgent) ? opts.interceptors.ProxyAgent : []; - if (typeof opts === "string") { - opts = { uri: opts }; - } - if (!opts || !opts.uri) { - throw new InvalidArgumentError("Proxy opts.uri is mandatory"); + super(); + if (!opts || typeof opts === "object" && !(opts instanceof URL2) && !opts.uri) { + throw new InvalidArgumentError("Proxy uri is mandatory"); } const { clientFactory = defaultFactory } = opts; if (typeof clientFactory !== "function") { throw new InvalidArgumentError("Proxy opts.clientFactory must be a function."); } + const url = this.#getUrl(opts); + const { href, origin, port, protocol, username, password, hostname: proxyHostname } = url; + this[kProxy] = { uri: href, protocol }; + this[kInterceptors] = opts.interceptors?.ProxyAgent && Array.isArray(opts.interceptors.ProxyAgent) ? opts.interceptors.ProxyAgent : []; this[kRequestTls] = opts.requestTls; this[kProxyTls] = opts.proxyTls; this[kProxyHeaders] = opts.headers || {}; - const resolvedUrl = new URL2(opts.uri); - const { origin, port, username, password } = resolvedUrl; if (opts.auth && opts.token) { throw new InvalidArgumentError("opts.auth cannot be used in combination with opts.token"); } else if (opts.auth) { @@ -13341,24 +13102,25 @@ var require_proxy_agent = __commonJS({ } const connect = buildConnector({ ...opts.proxyTls }); this[kConnectEndpoint] = buildConnector({ ...opts.requestTls }); - this[kClient] = clientFactory(resolvedUrl, { connect }); + this[kClient] = clientFactory(url, { connect }); this[kAgent] = new Agent({ ...opts, connect: async (opts2, callback) => { - let requestedHost = opts2.host; + let requestedPath = opts2.host; if (!opts2.port) { - requestedHost += `:${defaultProtocolPort(opts2.protocol)}`; + requestedPath += `:${defaultProtocolPort(opts2.protocol)}`; } try { const { socket, statusCode } = await this[kClient].connect({ origin, port, - path: requestedHost, + path: requestedPath, signal: opts2.signal, headers: { ...this[kProxyHeaders], - host: requestedHost - } + host: opts2.host + }, + servername: this[kProxyTls]?.servername || proxyHostname }); if (statusCode !== 200) { socket.on("error", () => { @@ -13377,26 +13139,43 @@ var require_proxy_agent = __commonJS({ } this[kConnectEndpoint]({ ...opts2, servername, httpSocket: socket }, callback); } catch (err) { - callback(err); + if (err.code === "ERR_TLS_CERT_ALTNAME_INVALID") { + callback(new SecureProxyConnectionError(err)); + } else { + callback(err); + } } } }); } dispatch(opts, handler) { - const { host } = new URL2(opts.origin); const headers = buildHeaders(opts.headers); throwIfProxyAuthIsSent(headers); + if (headers && !("host" in headers) && !("Host" in headers)) { + const { host } = new URL2(opts.origin); + headers.host = host; + } return this[kAgent].dispatch( { ...opts, - headers: { - ...headers, - host - } + headers }, handler ); } + /** + * @param {import('../types/proxy-agent').ProxyAgent.Options | string | URL} opts + * @returns {URL} + */ + #getUrl(opts) { + if (typeof opts === "string") { + return new URL2(opts); + } else if (opts instanceof URL2) { + return opts; + } else { + return new URL2(opts.uri); + } + } async [kClose]() { await this[kAgent].close(); await this[kClient].close(); @@ -13422,13 +13201,13 @@ var require_proxy_agent = __commonJS({ throw new InvalidArgumentError("Proxy-Authorization should be sent in ProxyAgent constructor"); } } - module2.exports = ProxyAgent; + module2.exports = ProxyAgent2; } }); -// .yarn/cache/tar-npm-6.2.0-3eb25205a7-02ca064a1a.zip/node_modules/tar/lib/high-level-opt.js +// .yarn/cache/tar-npm-6.2.1-237800bb20-a5eca3eb50.zip/node_modules/tar/lib/high-level-opt.js var require_high_level_opt = __commonJS({ - ".yarn/cache/tar-npm-6.2.0-3eb25205a7-02ca064a1a.zip/node_modules/tar/lib/high-level-opt.js"(exports, module2) { + ".yarn/cache/tar-npm-6.2.1-237800bb20-a5eca3eb50.zip/node_modules/tar/lib/high-level-opt.js"(exports, module2) { "use strict"; var argmap = /* @__PURE__ */ new Map([ ["C", "cwd"], @@ -14969,17 +14748,17 @@ var require_minizlib = __commonJS({ } }); -// .yarn/cache/tar-npm-6.2.0-3eb25205a7-02ca064a1a.zip/node_modules/tar/lib/normalize-windows-path.js +// .yarn/cache/tar-npm-6.2.1-237800bb20-a5eca3eb50.zip/node_modules/tar/lib/normalize-windows-path.js var require_normalize_windows_path = __commonJS({ - ".yarn/cache/tar-npm-6.2.0-3eb25205a7-02ca064a1a.zip/node_modules/tar/lib/normalize-windows-path.js"(exports, module2) { + ".yarn/cache/tar-npm-6.2.1-237800bb20-a5eca3eb50.zip/node_modules/tar/lib/normalize-windows-path.js"(exports, module2) { var platform = process.env.TESTING_TAR_FAKE_PLATFORM || process.platform; module2.exports = platform !== "win32" ? (p) => p : (p) => p && p.replace(/\\/g, "/"); } }); -// .yarn/cache/tar-npm-6.2.0-3eb25205a7-02ca064a1a.zip/node_modules/tar/lib/read-entry.js +// .yarn/cache/tar-npm-6.2.1-237800bb20-a5eca3eb50.zip/node_modules/tar/lib/read-entry.js var require_read_entry = __commonJS({ - ".yarn/cache/tar-npm-6.2.0-3eb25205a7-02ca064a1a.zip/node_modules/tar/lib/read-entry.js"(exports, module2) { + ".yarn/cache/tar-npm-6.2.1-237800bb20-a5eca3eb50.zip/node_modules/tar/lib/read-entry.js"(exports, module2) { "use strict"; var { Minipass } = require_minipass(); var normPath = require_normalize_windows_path(); @@ -15071,9 +14850,9 @@ var require_read_entry = __commonJS({ } }); -// .yarn/cache/tar-npm-6.2.0-3eb25205a7-02ca064a1a.zip/node_modules/tar/lib/types.js +// .yarn/cache/tar-npm-6.2.1-237800bb20-a5eca3eb50.zip/node_modules/tar/lib/types.js var require_types = __commonJS({ - ".yarn/cache/tar-npm-6.2.0-3eb25205a7-02ca064a1a.zip/node_modules/tar/lib/types.js"(exports) { + ".yarn/cache/tar-npm-6.2.1-237800bb20-a5eca3eb50.zip/node_modules/tar/lib/types.js"(exports) { "use strict"; exports.name = /* @__PURE__ */ new Map([ ["0", "File"], @@ -15118,9 +14897,9 @@ var require_types = __commonJS({ } }); -// .yarn/cache/tar-npm-6.2.0-3eb25205a7-02ca064a1a.zip/node_modules/tar/lib/large-numbers.js +// .yarn/cache/tar-npm-6.2.1-237800bb20-a5eca3eb50.zip/node_modules/tar/lib/large-numbers.js var require_large_numbers = __commonJS({ - ".yarn/cache/tar-npm-6.2.0-3eb25205a7-02ca064a1a.zip/node_modules/tar/lib/large-numbers.js"(exports, module2) { + ".yarn/cache/tar-npm-6.2.1-237800bb20-a5eca3eb50.zip/node_modules/tar/lib/large-numbers.js"(exports, module2) { "use strict"; var encode = (num, buf) => { if (!Number.isSafeInteger(num)) { @@ -15208,9 +14987,9 @@ var require_large_numbers = __commonJS({ } }); -// .yarn/cache/tar-npm-6.2.0-3eb25205a7-02ca064a1a.zip/node_modules/tar/lib/header.js +// .yarn/cache/tar-npm-6.2.1-237800bb20-a5eca3eb50.zip/node_modules/tar/lib/header.js var require_header = __commonJS({ - ".yarn/cache/tar-npm-6.2.0-3eb25205a7-02ca064a1a.zip/node_modules/tar/lib/header.js"(exports, module2) { + ".yarn/cache/tar-npm-6.2.1-237800bb20-a5eca3eb50.zip/node_modules/tar/lib/header.js"(exports, module2) { "use strict"; var types = require_types(); var pathModule = require("path").posix; @@ -15428,9 +15207,9 @@ var require_header = __commonJS({ } }); -// .yarn/cache/tar-npm-6.2.0-3eb25205a7-02ca064a1a.zip/node_modules/tar/lib/pax.js +// .yarn/cache/tar-npm-6.2.1-237800bb20-a5eca3eb50.zip/node_modules/tar/lib/pax.js var require_pax = __commonJS({ - ".yarn/cache/tar-npm-6.2.0-3eb25205a7-02ca064a1a.zip/node_modules/tar/lib/pax.js"(exports, module2) { + ".yarn/cache/tar-npm-6.2.1-237800bb20-a5eca3eb50.zip/node_modules/tar/lib/pax.js"(exports, module2) { "use strict"; var Header = require_header(); var path10 = require("path"); @@ -15529,9 +15308,9 @@ var require_pax = __commonJS({ } }); -// .yarn/cache/tar-npm-6.2.0-3eb25205a7-02ca064a1a.zip/node_modules/tar/lib/strip-trailing-slashes.js +// .yarn/cache/tar-npm-6.2.1-237800bb20-a5eca3eb50.zip/node_modules/tar/lib/strip-trailing-slashes.js var require_strip_trailing_slashes = __commonJS({ - ".yarn/cache/tar-npm-6.2.0-3eb25205a7-02ca064a1a.zip/node_modules/tar/lib/strip-trailing-slashes.js"(exports, module2) { + ".yarn/cache/tar-npm-6.2.1-237800bb20-a5eca3eb50.zip/node_modules/tar/lib/strip-trailing-slashes.js"(exports, module2) { module2.exports = (str) => { let i = str.length - 1; let slashesStart = -1; @@ -15544,9 +15323,9 @@ var require_strip_trailing_slashes = __commonJS({ } }); -// .yarn/cache/tar-npm-6.2.0-3eb25205a7-02ca064a1a.zip/node_modules/tar/lib/warn-mixin.js +// .yarn/cache/tar-npm-6.2.1-237800bb20-a5eca3eb50.zip/node_modules/tar/lib/warn-mixin.js var require_warn_mixin = __commonJS({ - ".yarn/cache/tar-npm-6.2.0-3eb25205a7-02ca064a1a.zip/node_modules/tar/lib/warn-mixin.js"(exports, module2) { + ".yarn/cache/tar-npm-6.2.1-237800bb20-a5eca3eb50.zip/node_modules/tar/lib/warn-mixin.js"(exports, module2) { "use strict"; module2.exports = (Base) => class extends Base { warn(code, message, data = {}) { @@ -15574,9 +15353,9 @@ var require_warn_mixin = __commonJS({ } }); -// .yarn/cache/tar-npm-6.2.0-3eb25205a7-02ca064a1a.zip/node_modules/tar/lib/winchars.js +// .yarn/cache/tar-npm-6.2.1-237800bb20-a5eca3eb50.zip/node_modules/tar/lib/winchars.js var require_winchars = __commonJS({ - ".yarn/cache/tar-npm-6.2.0-3eb25205a7-02ca064a1a.zip/node_modules/tar/lib/winchars.js"(exports, module2) { + ".yarn/cache/tar-npm-6.2.1-237800bb20-a5eca3eb50.zip/node_modules/tar/lib/winchars.js"(exports, module2) { "use strict"; var raw = [ "|", @@ -15595,9 +15374,9 @@ var require_winchars = __commonJS({ } }); -// .yarn/cache/tar-npm-6.2.0-3eb25205a7-02ca064a1a.zip/node_modules/tar/lib/strip-absolute-path.js +// .yarn/cache/tar-npm-6.2.1-237800bb20-a5eca3eb50.zip/node_modules/tar/lib/strip-absolute-path.js var require_strip_absolute_path = __commonJS({ - ".yarn/cache/tar-npm-6.2.0-3eb25205a7-02ca064a1a.zip/node_modules/tar/lib/strip-absolute-path.js"(exports, module2) { + ".yarn/cache/tar-npm-6.2.1-237800bb20-a5eca3eb50.zip/node_modules/tar/lib/strip-absolute-path.js"(exports, module2) { var { isAbsolute, parse } = require("path").win32; module2.exports = (path10) => { let r = ""; @@ -15613,9 +15392,9 @@ var require_strip_absolute_path = __commonJS({ } }); -// .yarn/cache/tar-npm-6.2.0-3eb25205a7-02ca064a1a.zip/node_modules/tar/lib/mode-fix.js +// .yarn/cache/tar-npm-6.2.1-237800bb20-a5eca3eb50.zip/node_modules/tar/lib/mode-fix.js var require_mode_fix = __commonJS({ - ".yarn/cache/tar-npm-6.2.0-3eb25205a7-02ca064a1a.zip/node_modules/tar/lib/mode-fix.js"(exports, module2) { + ".yarn/cache/tar-npm-6.2.1-237800bb20-a5eca3eb50.zip/node_modules/tar/lib/mode-fix.js"(exports, module2) { "use strict"; module2.exports = (mode, isDir, portable) => { mode &= 4095; @@ -15638,9 +15417,9 @@ var require_mode_fix = __commonJS({ } }); -// .yarn/cache/tar-npm-6.2.0-3eb25205a7-02ca064a1a.zip/node_modules/tar/lib/write-entry.js +// .yarn/cache/tar-npm-6.2.1-237800bb20-a5eca3eb50.zip/node_modules/tar/lib/write-entry.js var require_write_entry = __commonJS({ - ".yarn/cache/tar-npm-6.2.0-3eb25205a7-02ca064a1a.zip/node_modules/tar/lib/write-entry.js"(exports, module2) { + ".yarn/cache/tar-npm-6.2.1-237800bb20-a5eca3eb50.zip/node_modules/tar/lib/write-entry.js"(exports, module2) { "use strict"; var { Minipass } = require_minipass(); var Pax = require_pax(); @@ -16103,9 +15882,9 @@ var require_write_entry = __commonJS({ } }); -// .yarn/cache/tar-npm-6.2.0-3eb25205a7-02ca064a1a.zip/node_modules/tar/lib/pack.js +// .yarn/cache/tar-npm-6.2.1-237800bb20-a5eca3eb50.zip/node_modules/tar/lib/pack.js var require_pack = __commonJS({ - ".yarn/cache/tar-npm-6.2.0-3eb25205a7-02ca064a1a.zip/node_modules/tar/lib/pack.js"(exports, module2) { + ".yarn/cache/tar-npm-6.2.1-237800bb20-a5eca3eb50.zip/node_modules/tar/lib/pack.js"(exports, module2) { "use strict"; var PackJob = class { constructor(path11, absolute) { @@ -16847,9 +16626,9 @@ var require_fs_minipass = __commonJS({ } }); -// .yarn/cache/tar-npm-6.2.0-3eb25205a7-02ca064a1a.zip/node_modules/tar/lib/parse.js +// .yarn/cache/tar-npm-6.2.1-237800bb20-a5eca3eb50.zip/node_modules/tar/lib/parse.js var require_parse2 = __commonJS({ - ".yarn/cache/tar-npm-6.2.0-3eb25205a7-02ca064a1a.zip/node_modules/tar/lib/parse.js"(exports, module2) { + ".yarn/cache/tar-npm-6.2.1-237800bb20-a5eca3eb50.zip/node_modules/tar/lib/parse.js"(exports, module2) { "use strict"; var warner = require_warn_mixin(); var Header = require_header(); @@ -17271,9 +17050,9 @@ var require_parse2 = __commonJS({ } }); -// .yarn/cache/tar-npm-6.2.0-3eb25205a7-02ca064a1a.zip/node_modules/tar/lib/list.js +// .yarn/cache/tar-npm-6.2.1-237800bb20-a5eca3eb50.zip/node_modules/tar/lib/list.js var require_list = __commonJS({ - ".yarn/cache/tar-npm-6.2.0-3eb25205a7-02ca064a1a.zip/node_modules/tar/lib/list.js"(exports, module2) { + ".yarn/cache/tar-npm-6.2.1-237800bb20-a5eca3eb50.zip/node_modules/tar/lib/list.js"(exports, module2) { "use strict"; var hlo = require_high_level_opt(); var Parser = require_parse2(); @@ -17385,9 +17164,9 @@ var require_list = __commonJS({ } }); -// .yarn/cache/tar-npm-6.2.0-3eb25205a7-02ca064a1a.zip/node_modules/tar/lib/create.js +// .yarn/cache/tar-npm-6.2.1-237800bb20-a5eca3eb50.zip/node_modules/tar/lib/create.js var require_create = __commonJS({ - ".yarn/cache/tar-npm-6.2.0-3eb25205a7-02ca064a1a.zip/node_modules/tar/lib/create.js"(exports, module2) { + ".yarn/cache/tar-npm-6.2.1-237800bb20-a5eca3eb50.zip/node_modules/tar/lib/create.js"(exports, module2) { "use strict"; var hlo = require_high_level_opt(); var Pack = require_pack(); @@ -17479,9 +17258,9 @@ var require_create = __commonJS({ } }); -// .yarn/cache/tar-npm-6.2.0-3eb25205a7-02ca064a1a.zip/node_modules/tar/lib/replace.js +// .yarn/cache/tar-npm-6.2.1-237800bb20-a5eca3eb50.zip/node_modules/tar/lib/replace.js var require_replace = __commonJS({ - ".yarn/cache/tar-npm-6.2.0-3eb25205a7-02ca064a1a.zip/node_modules/tar/lib/replace.js"(exports, module2) { + ".yarn/cache/tar-npm-6.2.1-237800bb20-a5eca3eb50.zip/node_modules/tar/lib/replace.js"(exports, module2) { "use strict"; var hlo = require_high_level_opt(); var Pack = require_pack(); @@ -17695,9 +17474,9 @@ var require_replace = __commonJS({ } }); -// .yarn/cache/tar-npm-6.2.0-3eb25205a7-02ca064a1a.zip/node_modules/tar/lib/update.js +// .yarn/cache/tar-npm-6.2.1-237800bb20-a5eca3eb50.zip/node_modules/tar/lib/update.js var require_update = __commonJS({ - ".yarn/cache/tar-npm-6.2.0-3eb25205a7-02ca064a1a.zip/node_modules/tar/lib/update.js"(exports, module2) { + ".yarn/cache/tar-npm-6.2.1-237800bb20-a5eca3eb50.zip/node_modules/tar/lib/update.js"(exports, module2) { "use strict"; var hlo = require_high_level_opt(); var r = require_replace(); @@ -18082,9 +17861,9 @@ var require_chownr = __commonJS({ } }); -// .yarn/cache/tar-npm-6.2.0-3eb25205a7-02ca064a1a.zip/node_modules/tar/lib/mkdir.js +// .yarn/cache/tar-npm-6.2.1-237800bb20-a5eca3eb50.zip/node_modules/tar/lib/mkdir.js var require_mkdir = __commonJS({ - ".yarn/cache/tar-npm-6.2.0-3eb25205a7-02ca064a1a.zip/node_modules/tar/lib/mkdir.js"(exports, module2) { + ".yarn/cache/tar-npm-6.2.1-237800bb20-a5eca3eb50.zip/node_modules/tar/lib/mkdir.js"(exports, module2) { "use strict"; var mkdirp = require_mkdirp(); var fs8 = require("fs"); @@ -18274,9 +18053,9 @@ var require_mkdir = __commonJS({ } }); -// .yarn/cache/tar-npm-6.2.0-3eb25205a7-02ca064a1a.zip/node_modules/tar/lib/normalize-unicode.js +// .yarn/cache/tar-npm-6.2.1-237800bb20-a5eca3eb50.zip/node_modules/tar/lib/normalize-unicode.js var require_normalize_unicode = __commonJS({ - ".yarn/cache/tar-npm-6.2.0-3eb25205a7-02ca064a1a.zip/node_modules/tar/lib/normalize-unicode.js"(exports, module2) { + ".yarn/cache/tar-npm-6.2.1-237800bb20-a5eca3eb50.zip/node_modules/tar/lib/normalize-unicode.js"(exports, module2) { var normalizeCache = /* @__PURE__ */ Object.create(null); var { hasOwnProperty } = Object.prototype; module2.exports = (s) => { @@ -18288,9 +18067,9 @@ var require_normalize_unicode = __commonJS({ } }); -// .yarn/cache/tar-npm-6.2.0-3eb25205a7-02ca064a1a.zip/node_modules/tar/lib/path-reservations.js +// .yarn/cache/tar-npm-6.2.1-237800bb20-a5eca3eb50.zip/node_modules/tar/lib/path-reservations.js var require_path_reservations = __commonJS({ - ".yarn/cache/tar-npm-6.2.0-3eb25205a7-02ca064a1a.zip/node_modules/tar/lib/path-reservations.js"(exports, module2) { + ".yarn/cache/tar-npm-6.2.1-237800bb20-a5eca3eb50.zip/node_modules/tar/lib/path-reservations.js"(exports, module2) { var assert3 = require("assert"); var normalize = require_normalize_unicode(); var stripSlashes = require_strip_trailing_slashes(); @@ -18402,9 +18181,9 @@ var require_path_reservations = __commonJS({ } }); -// .yarn/cache/tar-npm-6.2.0-3eb25205a7-02ca064a1a.zip/node_modules/tar/lib/get-write-flag.js +// .yarn/cache/tar-npm-6.2.1-237800bb20-a5eca3eb50.zip/node_modules/tar/lib/get-write-flag.js var require_get_write_flag = __commonJS({ - ".yarn/cache/tar-npm-6.2.0-3eb25205a7-02ca064a1a.zip/node_modules/tar/lib/get-write-flag.js"(exports, module2) { + ".yarn/cache/tar-npm-6.2.1-237800bb20-a5eca3eb50.zip/node_modules/tar/lib/get-write-flag.js"(exports, module2) { var platform = process.env.__FAKE_PLATFORM__ || process.platform; var isWindows = platform === "win32"; var fs8 = global.__FAKE_TESTING_FS__ || require("fs"); @@ -18416,9 +18195,9 @@ var require_get_write_flag = __commonJS({ } }); -// .yarn/cache/tar-npm-6.2.0-3eb25205a7-02ca064a1a.zip/node_modules/tar/lib/unpack.js +// .yarn/cache/tar-npm-6.2.1-237800bb20-a5eca3eb50.zip/node_modules/tar/lib/unpack.js var require_unpack = __commonJS({ - ".yarn/cache/tar-npm-6.2.0-3eb25205a7-02ca064a1a.zip/node_modules/tar/lib/unpack.js"(exports, module2) { + ".yarn/cache/tar-npm-6.2.1-237800bb20-a5eca3eb50.zip/node_modules/tar/lib/unpack.js"(exports, module2) { "use strict"; var assert3 = require("assert"); var Parser = require_parse2(); @@ -18461,6 +18240,7 @@ var require_unpack = __commonJS({ var getFlag = require_get_write_flag(); var platform = process.env.TESTING_TAR_FAKE_PLATFORM || process.platform; var isWindows = platform === "win32"; + var DEFAULT_MAX_DEPTH = 1024; var unlinkFile = (path11, cb) => { if (!isWindows) { return fs8.unlink(path11, cb); @@ -18539,6 +18319,7 @@ var require_unpack = __commonJS({ } this.processUid = (this.preserveOwner || this.setOwner) && process.getuid ? process.getuid() : null; this.processGid = (this.preserveOwner || this.setOwner) && process.getgid ? process.getgid() : null; + this.maxDepth = typeof opt.maxDepth === "number" ? opt.maxDepth : DEFAULT_MAX_DEPTH; this.forceChown = opt.forceChown === true; this.win32 = !!opt.win32 || isWindows; this.newer = !!opt.newer; @@ -18571,12 +18352,12 @@ var require_unpack = __commonJS({ } } [CHECKPATH](entry) { + const p = normPath(entry.path); + const parts = p.split("/"); if (this.strip) { - const parts = normPath(entry.path).split("/"); if (parts.length < this.strip) { return false; } - entry.path = parts.slice(this.strip).join("/"); if (entry.type === "Link") { const linkparts = normPath(entry.linkpath).split("/"); if (linkparts.length >= this.strip) { @@ -18585,10 +18366,19 @@ var require_unpack = __commonJS({ return false; } } + parts.splice(0, this.strip); + entry.path = parts.join("/"); + } + if (isFinite(this.maxDepth) && parts.length > this.maxDepth) { + this.warn("TAR_ENTRY_ERROR", "path excessively deep", { + entry, + path: p, + depth: parts.length, + maxDepth: this.maxDepth + }); + return false; } if (!this.preservePaths) { - const p = normPath(entry.path); - const parts = p.split("/"); if (parts.includes("..") || isWindows && /^[a-z]:\.\.$/i.test(parts[0])) { this.warn("TAR_ENTRY_ERROR", `path contains '..'`, { entry, @@ -19098,9 +18888,9 @@ var require_unpack = __commonJS({ } }); -// .yarn/cache/tar-npm-6.2.0-3eb25205a7-02ca064a1a.zip/node_modules/tar/lib/extract.js +// .yarn/cache/tar-npm-6.2.1-237800bb20-a5eca3eb50.zip/node_modules/tar/lib/extract.js var require_extract = __commonJS({ - ".yarn/cache/tar-npm-6.2.0-3eb25205a7-02ca064a1a.zip/node_modules/tar/lib/extract.js"(exports, module2) { + ".yarn/cache/tar-npm-6.2.1-237800bb20-a5eca3eb50.zip/node_modules/tar/lib/extract.js"(exports, module2) { "use strict"; var hlo = require_high_level_opt(); var Unpack = require_unpack(); @@ -19183,9 +18973,9 @@ var require_extract = __commonJS({ } }); -// .yarn/cache/tar-npm-6.2.0-3eb25205a7-02ca064a1a.zip/node_modules/tar/index.js +// .yarn/cache/tar-npm-6.2.1-237800bb20-a5eca3eb50.zip/node_modules/tar/index.js var require_tar = __commonJS({ - ".yarn/cache/tar-npm-6.2.0-3eb25205a7-02ca064a1a.zip/node_modules/tar/index.js"(exports) { + ".yarn/cache/tar-npm-6.2.1-237800bb20-a5eca3eb50.zip/node_modules/tar/index.js"(exports) { "use strict"; exports.c = exports.create = require_create(); exports.r = exports.replace = require_replace(); @@ -20629,7 +20419,7 @@ var require_cmd_shim = __commonJS({ return cmdShim2(src, to, opts).catch(() => { }); } - function rm2(path11, opts) { + function rm(path11, opts) { return opts.fs_.unlink(path11).catch(() => { }); } @@ -20653,7 +20443,7 @@ var require_cmd_shim = __commonJS({ return Promise.all(generatorAndExts.map((generatorAndExt) => writeShim(src, to + generatorAndExt.extension, srcRuntimeInfo, generatorAndExt.generator, opts_))); } function writeShimPre(target, opts) { - return rm2(target, opts); + return rm(target, opts); } function writeShimPost(target, opts) { return chmodShim(target, opts); @@ -22627,7 +22417,7 @@ function String2(descriptor, ...args) { } // package.json -var version = "0.26.0"; +var version = "0.28.0"; // sources/Engine.ts var import_fs4 = __toESM(require("fs")); @@ -22639,7 +22429,7 @@ var import_semver4 = __toESM(require_semver2()); var config_default = { definitions: { npm: { - default: "10.5.0+sha1.726f91df5b1b14d9637c8819d7e71cb873c395a1", + default: "10.5.2+sha1.0e9b72afaf5ecf8249b2abb4b7417db6739c1475", fetchLatestFrom: { type: "npm", package: "npm" @@ -22676,7 +22466,7 @@ var config_default = { } }, pnpm: { - default: "8.15.4+sha1.c85a4305534f76d461407b59277b954bac97b5c4", + default: "9.0.3+sha1.ff3ad37177cbd0843e533aab13d5e40a05803b47", fetchLatestFrom: { type: "npm", package: "pnpm" @@ -22734,7 +22524,7 @@ var config_default = { } }, yarn: { - default: "1.22.21+sha1.1959a18351b811cdeedbd484a8f86c3cc3bbaf72", + default: "1.22.22+sha1.ac34549e6aa8e7ead463a7407e1c7390f61a6610", fetchLatestFrom: { type: "npm", package: "yarn" @@ -22787,7 +22577,8 @@ var config_default = { }, npmRegistry: { type: "npm", - package: "@yarnpkg/cli-dist" + package: "@yarnpkg/cli-dist", + bin: "bin/yarn.js" }, commands: { use: [ @@ -22798,17 +22589,28 @@ var config_default = { } } } + }, + keys: { + npm: [ + { + expires: null, + keyid: "SHA256:jl3bwswu80PjjokCgh0o2w5c2U4LhQAE57gj9cz1kzA", + keytype: "ecdsa-sha2-nistp256", + scheme: "ecdsa-sha2-nistp256", + key: "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAE1Olb3zMAFFxXKHiIkQO5cJ3Yhl5i6UPp+IhuteBJbuHcA5UogKo0EWtlWwW6KSaKoTNEYL7JlCQiVnkhBktUgg==" + } + ] } }; // sources/corepackUtils.ts -var import_crypto = require("crypto"); +var import_crypto2 = require("crypto"); var import_events2 = require("events"); var import_fs2 = __toESM(require("fs")); var import_module = __toESM(require("module")); var import_path2 = __toESM(require("path")); var import_semver = __toESM(require_semver2()); -var import_promises2 = require("timers/promises"); +var import_promises = require("timers/promises"); // sources/debugUtils.ts var import_debug = __toESM(require_src()); @@ -22853,26 +22655,108 @@ function getTemporaryFolder(target = (0, import_os.tmpdir)()) { } } -// sources/fsUtils.ts -var import_promises = require("fs/promises"); -async function rimraf(path10) { - return (0, import_promises.rm)(path10, { recursive: true, force: true }); -} - // sources/httpUtils.ts var import_assert = __toESM(require("assert")); var import_events = require("events"); var import_process2 = require("process"); var import_stream = require("stream"); + +// sources/npmRegistryUtils.ts +var import_crypto = require("crypto"); +var DEFAULT_HEADERS = { + [`Accept`]: `application/vnd.npm.install-v1+json; q=1.0, application/json; q=0.8` +}; +var DEFAULT_NPM_REGISTRY_URL = `https://registry.npmjs.org`; +async function fetchAsJson2(packageName, version2) { + const npmRegistryUrl = process.env.COREPACK_NPM_REGISTRY || DEFAULT_NPM_REGISTRY_URL; + if (process.env.COREPACK_ENABLE_NETWORK === `0`) + throw new UsageError(`Network access disabled by the environment; can't reach npm repository ${npmRegistryUrl}`); + const headers = { ...DEFAULT_HEADERS }; + if (`COREPACK_NPM_TOKEN` in process.env) { + headers.authorization = `Bearer ${process.env.COREPACK_NPM_TOKEN}`; + } else if (`COREPACK_NPM_USERNAME` in process.env && `COREPACK_NPM_PASSWORD` in process.env) { + const encodedCreds = Buffer.from(`${process.env.COREPACK_NPM_USERNAME}:${process.env.COREPACK_NPM_PASSWORD}`, `utf8`).toString(`base64`); + headers.authorization = `Basic ${encodedCreds}`; + } + return fetchAsJson(`${npmRegistryUrl}/${packageName}${version2 ? `/${version2}` : ``}`, { headers }); +} +function verifySignature({ signatures, integrity, packageName, version: version2 }) { + const { npm: keys } = process.env.COREPACK_INTEGRITY_KEYS ? JSON.parse(process.env.COREPACK_INTEGRITY_KEYS) : config_default.keys; + const key = keys.find(({ keyid }) => signatures.some((s) => s.keyid === keyid)); + const signature = signatures.find(({ keyid }) => keyid === key?.keyid); + if (key == null || signature == null) + throw new Error(`Cannot find matching keyid: ${JSON.stringify({ signatures, keys })}`); + const verifier = (0, import_crypto.createVerify)(`SHA256`); + verifier.end(`${packageName}@${version2}:${integrity}`); + const valid = verifier.verify( + `-----BEGIN PUBLIC KEY----- +${key.key} +-----END PUBLIC KEY-----`, + signature.sig, + `base64` + ); + if (!valid) { + throw new Error(`Signature does not match`); + } +} +async function fetchLatestStableVersion(packageName) { + const metadata = await fetchAsJson2(packageName, `latest`); + const { version: version2, dist: { integrity, signatures } } = metadata; + if (process.env.COREPACK_INTEGRITY_KEYS !== ``) { + verifySignature({ + packageName, + version: version2, + integrity, + signatures + }); + } + return `${version2}+sha512.${Buffer.from(integrity.slice(7), `base64`).toString(`hex`)}`; +} +async function fetchAvailableTags(packageName) { + const metadata = await fetchAsJson2(packageName); + return metadata[`dist-tags`]; +} +async function fetchAvailableVersions(packageName) { + const metadata = await fetchAsJson2(packageName); + return Object.keys(metadata.versions); +} +async function fetchTarballURLAndSignature(packageName, version2) { + const versionMetadata = await fetchAsJson2(packageName, version2); + const { tarball, signatures, integrity } = versionMetadata.dist; + if (tarball === void 0 || !tarball.startsWith(`http`)) + throw new Error(`${packageName}@${version2} does not have a valid tarball.`); + return { tarball, signatures, integrity }; +} + +// sources/httpUtils.ts async function fetch(input, init) { if (process.env.COREPACK_ENABLE_NETWORK === `0`) throw new UsageError(`Network access disabled by the environment; can't reach ${input}`); const agent = await getProxyAgent(input); + if (typeof input === `string`) + input = new URL(input); + let headers = init?.headers; + const username = input.username ?? process.env.COREPACK_NPM_USERNAME; + const password = input.password ?? process.env.COREPACK_NPM_PASSWORD; + if (username || password) { + headers = { + ...headers, + authorization: `Basic ${Buffer.from(`${username}:${password}`).toString(`base64`)}` + }; + input.username = input.password = ``; + } + if (input.origin === (process.env.COREPACK_NPM_REGISTRY || DEFAULT_NPM_REGISTRY_URL) && process.env.COREPACK_NPM_TOKEN) { + headers = { + ...headers, + authorization: `Bearer ${process.env.COREPACK_NPM_TOKEN}` + }; + } let response; try { response = await globalThis.fetch(input, { ...init, - dispatcher: agent + dispatcher: agent, + headers }); } catch (error) { throw new Error( @@ -22911,58 +22795,25 @@ async function fetchUrlStream(input, init) { const stream = import_stream.Readable.fromWeb(webStream); return stream; } +var ProxyAgent; async function getProxyAgent(input) { const { getProxyForUrl } = await Promise.resolve().then(() => __toESM(require_proxy_from_env())); const proxy = getProxyForUrl(input); if (!proxy) return void 0; - const { default: ProxyAgent } = await Promise.resolve().then(() => __toESM(require_proxy_agent())); - return new ProxyAgent(proxy); -} - -// sources/npmRegistryUtils.ts -var DEFAULT_HEADERS = { - [`Accept`]: `application/vnd.npm.install-v1+json; q=1.0, application/json; q=0.8` -}; -var DEFAULT_NPM_REGISTRY_URL = `https://registry.npmjs.org`; -async function fetchAsJson2(packageName) { - const npmRegistryUrl = process.env.COREPACK_NPM_REGISTRY || DEFAULT_NPM_REGISTRY_URL; - if (process.env.COREPACK_ENABLE_NETWORK === `0`) - throw new UsageError(`Network access disabled by the environment; can't reach npm repository ${npmRegistryUrl}`); - const headers = { ...DEFAULT_HEADERS }; - if (`COREPACK_NPM_TOKEN` in process.env) { - headers.authorization = `Bearer ${process.env.COREPACK_NPM_TOKEN}`; - } else if (`COREPACK_NPM_USERNAME` in process.env && `COREPACK_NPM_PASSWORD` in process.env) { - const encodedCreds = Buffer.from(`${process.env.COREPACK_NPM_USERNAME}:${process.env.COREPACK_NPM_PASSWORD}`, `utf8`).toString(`base64`); - headers.authorization = `Basic ${encodedCreds}`; + if (ProxyAgent == null) { + const [api, Dispatcher, _ProxyAgent] = await Promise.all([ + // @ts-expect-error internal module is untyped + Promise.resolve().then(() => __toESM(require_api())), + // @ts-expect-error internal module is untyped + Promise.resolve().then(() => __toESM(require_dispatcher())), + // @ts-expect-error internal module is untyped + Promise.resolve().then(() => __toESM(require_proxy_agent())) + ]); + Object.assign(Dispatcher.default.prototype, api.default); + ProxyAgent = _ProxyAgent.default; } - return fetchAsJson(`${npmRegistryUrl}/${packageName}`, { headers }); -} -async function fetchLatestStableVersion(packageName) { - const metadata = await fetchAsJson2(packageName); - const { latest } = metadata[`dist-tags`]; - if (latest === void 0) - throw new Error(`${packageName} does not have a "latest" tag.`); - const { shasum } = metadata.versions[latest].dist; - return `${latest}+sha1.${shasum}`; -} -async function fetchAvailableTags(packageName) { - const metadata = await fetchAsJson2(packageName); - return metadata[`dist-tags`]; -} -async function fetchAvailableVersions(packageName) { - const metadata = await fetchAsJson2(packageName); - return Object.keys(metadata.versions); -} -async function fetchTarballUrl(packageName, version2) { - const metadata = await fetchAsJson2(packageName); - const versionMetadata = metadata.versions?.[version2]; - if (versionMetadata === void 0) - throw new Error(`${packageName}@${version2} does not exist.`); - const { tarball } = versionMetadata.dist; - if (tarball === void 0 || !tarball.startsWith(`http`)) - throw new Error(`${packageName}@${version2} does not have a valid tarball.`); - return tarball; + return new ProxyAgent(proxy); } // sources/corepackUtils.ts @@ -23059,6 +22910,51 @@ function isValidBinList(x) { function isValidBinSpec(x) { return typeof x === `object` && x !== null && !Array.isArray(x) && Object.keys(x).length > 0; } +async function download(installTarget, url, algo, binPath = null) { + const tmpFolder = getTemporaryFolder(installTarget); + log(`Downloading to ${tmpFolder}`); + const stream = await fetchUrlStream(url); + const parsedUrl = new URL(url); + const ext = import_path2.default.posix.extname(parsedUrl.pathname); + let outputFile = null; + let sendTo; + if (ext === `.tgz`) { + const { default: tar } = await Promise.resolve().then(() => __toESM(require_tar())); + sendTo = tar.x({ + strip: 1, + cwd: tmpFolder, + filter: binPath ? (path10) => { + const pos = path10.indexOf(`/`); + return pos !== -1 && path10.slice(pos + 1) === binPath; + } : void 0 + }); + } else if (ext === `.js`) { + outputFile = import_path2.default.join(tmpFolder, import_path2.default.posix.basename(parsedUrl.pathname)); + sendTo = import_fs2.default.createWriteStream(outputFile); + } + stream.pipe(sendTo); + let hash = !binPath ? stream.pipe((0, import_crypto2.createHash)(algo)) : null; + await (0, import_events2.once)(sendTo, `finish`); + if (binPath) { + const downloadedBin = import_path2.default.join(tmpFolder, binPath); + outputFile = import_path2.default.join(tmpFolder, import_path2.default.basename(downloadedBin)); + try { + await renameSafe(downloadedBin, outputFile); + } catch (err) { + if (err?.code === `ENOENT`) + throw new Error(`Cannot locate '${binPath}' in downloaded tarball`, { cause: err }); + throw err; + } + const fileStream = import_fs2.default.createReadStream(outputFile); + hash = fileStream.pipe((0, import_crypto2.createHash)(algo)); + await (0, import_events2.once)(fileStream, `close`); + } + return { + tmpFolder, + outputFile, + hash: hash.digest(`hex`) + }; +} async function installVersion(installTarget, locator, { spec }) { const locatorIsASupportedPackageManager = isSupportedPackageManagerLocator(locator); const locatorReference = locatorIsASupportedPackageManager ? import_semver.default.parse(locator.reference) : parseURLReference(locator); @@ -23080,12 +22976,18 @@ async function installVersion(installTarget, locator, { spec }) { } } let url; + let signatures; + let integrity; + let binPath = null; if (locatorIsASupportedPackageManager) { url = spec.url.replace(`{}`, version2); if (process.env.COREPACK_NPM_REGISTRY) { const registry = getRegistryFromPackageManagerSpec(spec); if (registry.type === `npm`) { - url = await fetchTarballUrl(registry.package, version2); + ({ tarball: url, signatures, integrity } = await fetchTarballURLAndSignature(registry.package, version2)); + if (registry.bin) { + binPath = registry.bin; + } } else { url = url.replace( DEFAULT_NPM_REGISTRY_URL, @@ -23095,25 +22997,16 @@ async function installVersion(installTarget, locator, { spec }) { } } else { url = decodeURIComponent(version2); + if (process.env.COREPACK_NPM_REGISTRY && url.startsWith(DEFAULT_NPM_REGISTRY_URL)) { + url = url.replace( + DEFAULT_NPM_REGISTRY_URL, + () => process.env.COREPACK_NPM_REGISTRY + ); + } } - const tmpFolder = getTemporaryFolder(installTarget); - log(`Installing ${locator.name}@${version2} from ${url} to ${tmpFolder}`); - const stream = await fetchUrlStream(url); - const parsedUrl = new URL(url); - const ext = import_path2.default.posix.extname(parsedUrl.pathname); - let outputFile = null; - let sendTo; - if (ext === `.tgz`) { - const { default: tar } = await Promise.resolve().then(() => __toESM(require_tar())); - sendTo = tar.x({ strip: 1, cwd: tmpFolder }); - } else if (ext === `.js`) { - outputFile = import_path2.default.join(tmpFolder, import_path2.default.posix.basename(parsedUrl.pathname)); - sendTo = import_fs2.default.createWriteStream(outputFile); - } - stream.pipe(sendTo); - const algo = build[0] ?? `sha256`; - const hash = stream.pipe((0, import_crypto.createHash)(algo)); - await (0, import_events2.once)(sendTo, `finish`); + log(`Installing ${locator.name}@${version2} from ${url}`); + const algo = build[0] ?? `sha512`; + const { tmpFolder, outputFile, hash: actualHash } = await download(installTarget, url, algo, binPath); let bin; const isSingleFile = outputFile !== null; if (isSingleFile) { @@ -23136,7 +23029,15 @@ async function installVersion(installTarget, locator, { spec }) { } } } - const actualHash = hash.digest(`hex`); + if (!build[1]) { + const registry = getRegistryFromPackageManagerSpec(spec); + if (registry.type === `npm` && !registry.bin && process.env.COREPACK_INTEGRITY_KEYS !== ``) { + if (signatures == null || integrity == null) + ({ signatures, integrity } = await fetchTarballURLAndSignature(registry.package, version2)); + verifySignature({ signatures, integrity, packageName: registry.package, version: version2 }); + build[1] = Buffer.from(integrity.slice(`sha512-`.length), `base64`).toString(`hex`); + } + } if (build[1] && actualHash !== build[1]) throw new Error(`Mismatch hashes. Expected ${build[1]}, got ${actualHash}`); const serializedHash = `${algo}.${actualHash}`; @@ -23156,30 +23057,20 @@ async function installVersion(installTarget, locator, { spec }) { if (err.code === `ENOTEMPTY` || // On Windows the error code is EPERM so we check if it is a directory err.code === `EPERM` && (await import_fs2.default.promises.stat(installFolder)).isDirectory()) { log(`Another instance of corepack installed ${locator.name}@${locator.reference}`); - await rimraf(tmpFolder); + await import_fs2.default.promises.rm(tmpFolder, { recursive: true, force: true }); } else { throw err; } } if (locatorIsASupportedPackageManager && process.env.COREPACK_DEFAULT_TO_LATEST !== `0`) { - let lastKnownGoodFile; - try { - lastKnownGoodFile = await getLastKnownGoodFile(`r+`); - const lastKnownGood = await getJSONFileContent(lastKnownGoodFile); - const defaultVersion = getLastKnownGoodFromFileContent(lastKnownGood, locator.name); - if (defaultVersion) { - const currentDefault = import_semver.default.parse(defaultVersion); - const downloadedVersion = locatorReference; - if (currentDefault.major === downloadedVersion.major && import_semver.default.lt(currentDefault, downloadedVersion)) { - await activatePackageManagerFromFileHandle(lastKnownGoodFile, lastKnownGood, locator); - } - } - } catch (err) { - if (err?.code !== `ENOENT`) { - throw err; + const lastKnownGood = await getLastKnownGood(); + const defaultVersion = getLastKnownGoodFromFileContent(lastKnownGood, locator.name); + if (defaultVersion) { + const currentDefault = import_semver.default.parse(defaultVersion); + const downloadedVersion = locatorReference; + if (currentDefault.major === downloadedVersion.major && import_semver.default.lt(currentDefault, downloadedVersion)) { + await activatePackageManager(lastKnownGood, locator); } - } finally { - await lastKnownGoodFile?.close(); } } log(`Install finished`); @@ -23189,6 +23080,13 @@ async function installVersion(installTarget, locator, { spec }) { hash: serializedHash }; } +async function renameSafe(oldPath, newPath) { + if (process.platform === `win32`) { + await renameUnderWindows(oldPath, newPath); + } else { + await import_fs2.default.promises.rename(oldPath, newPath); + } +} async function renameUnderWindows(oldPath, newPath) { const retries = 5; for (let i = 0; i < retries; i++) { @@ -23197,7 +23095,7 @@ async function renameUnderWindows(oldPath, newPath) { break; } catch (err) { if ((err.code === `ENOENT` || err.code === `EPERM`) && i < retries - 1) { - await (0, import_promises2.setTimeout)(100 * 2 ** i); + await (0, import_promises.setTimeout)(100 * 2 ** i); continue; } else { throw err; @@ -23239,13 +23137,6 @@ async function runVersion(locator, installSpec, binName, args) { process.nextTick(import_module.default.runMain, binPath); } -// sources/miscUtils.ts -var Cancellation = class extends Error { - constructor() { - super(`Cancelled operation`); - } -}; - // sources/semverUtils.ts var import_semver2 = __toESM(require_semver2()); function satisfiesWithPrereleases(version2, range, loose = false) { @@ -23424,42 +23315,53 @@ async function loadSpec(initialCwd) { } // sources/Engine.ts -function getLastKnownGoodFile(flag = `r`) { - return import_fs4.default.promises.open(import_path4.default.join(getCorepackHomeFolder(), `lastKnownGood.json`), flag); +function getLastKnownGoodFilePath() { + return import_path4.default.join(getCorepackHomeFolder(), `lastKnownGood.json`); } -async function createLastKnownGoodFile() { - await import_fs4.default.promises.mkdir(getCorepackHomeFolder(), { recursive: true }); - return getLastKnownGoodFile(`w`); -} -async function getJSONFileContent(fh) { - let lastKnownGood; +async function getLastKnownGood() { + let raw; + try { + raw = await import_fs4.default.promises.readFile(getLastKnownGoodFilePath(), `utf8`); + } catch (err) { + if (err?.code === `ENOENT`) + return {}; + throw err; + } try { - lastKnownGood = JSON.parse(await fh.readFile(`utf8`)); + const parsed = JSON.parse(raw); + if (!parsed) + return {}; + if (typeof parsed !== `object`) + return {}; + Object.entries(parsed).forEach(([key, value]) => { + if (typeof value !== `string`) { + delete parsed[key]; + } + }); + return parsed; } catch { - return void 0; + return {}; } - return lastKnownGood; } -async function overwriteJSONFileContent(fh, content) { - await fh.truncate(0); - await fh.write(`${JSON.stringify(content, null, 2)} -`, 0); +async function createLastKnownGoodFile(lastKnownGood) { + const content = `${JSON.stringify(lastKnownGood, null, 2)} +`; + await import_fs4.default.promises.mkdir(getCorepackHomeFolder(), { recursive: true }); + await import_fs4.default.promises.writeFile(getLastKnownGoodFilePath(), content, `utf8`); } function getLastKnownGoodFromFileContent(lastKnownGood, packageManager) { - if (typeof lastKnownGood === `object` && lastKnownGood !== null && Object.hasOwn(lastKnownGood, packageManager)) { - const override = lastKnownGood[packageManager]; - if (typeof override === `string`) { - return override; - } - } + if (Object.hasOwn(lastKnownGood, packageManager)) + return lastKnownGood[packageManager]; return void 0; } -async function activatePackageManagerFromFileHandle(lastKnownGoodFile, lastKnownGood, locator) { - if (typeof lastKnownGood !== `object` || lastKnownGood === null) - lastKnownGood = {}; +async function activatePackageManager(lastKnownGood, locator) { + if (lastKnownGood[locator.name] === locator.reference) { + log(`${locator.name}@${locator.reference} is already Last Known Good version`); + return; + } lastKnownGood[locator.name] = locator.reference; log(`Setting ${locator.name}@${locator.reference} as Last Known Good version`); - await overwriteJSONFileContent(lastKnownGoodFile, lastKnownGood); + await createLastKnownGoodFile(lastKnownGood); } var Engine = class { constructor(config = config_default) { @@ -23522,46 +23424,25 @@ var Engine = class { const definition = this.config.definitions[packageManager]; if (typeof definition === `undefined`) throw new UsageError(`This package manager (${packageManager}) isn't supported by this corepack build`); - let lastKnownGoodFile = await getLastKnownGoodFile(`r+`).catch((err) => { - if (err?.code !== `ENOENT` && err?.code !== `EROFS`) { - throw err; - } - }); + const lastKnownGood = await getLastKnownGood(); + const lastKnownGoodForThisPackageManager = getLastKnownGoodFromFileContent(lastKnownGood, packageManager); + if (lastKnownGoodForThisPackageManager) + return lastKnownGoodForThisPackageManager; + if (import_process3.default.env.COREPACK_DEFAULT_TO_LATEST === `0`) + return definition.default; + const reference = await fetchLatestStableVersion2(definition.fetchLatestFrom); try { - const lastKnownGood = lastKnownGoodFile == null || await getJSONFileContent(lastKnownGoodFile); - const lastKnownGoodForThisPackageManager = getLastKnownGoodFromFileContent(lastKnownGood, packageManager); - if (lastKnownGoodForThisPackageManager) - return lastKnownGoodForThisPackageManager; - if (import_process3.default.env.COREPACK_DEFAULT_TO_LATEST === `0`) - return definition.default; - const reference = await fetchLatestStableVersion2(definition.fetchLatestFrom); - try { - lastKnownGoodFile ??= await createLastKnownGoodFile(); - await activatePackageManagerFromFileHandle(lastKnownGoodFile, lastKnownGood, { - name: packageManager, - reference - }); - } catch { - } - return reference; - } finally { - await lastKnownGoodFile?.close(); + await activatePackageManager(lastKnownGood, { + name: packageManager, + reference + }); + } catch { } + return reference; } async activatePackageManager(locator) { - let emptyFile = false; - const lastKnownGoodFile = await getLastKnownGoodFile(`r+`).catch((err) => { - if (err?.code === `ENOENT`) { - emptyFile = true; - return getLastKnownGoodFile(`w`); - } - throw err; - }); - try { - await activatePackageManagerFromFileHandle(lastKnownGoodFile, emptyFile || await getJSONFileContent(lastKnownGoodFile), locator); - } finally { - await lastKnownGoodFile.close(); - } + const lastKnownGood = await getLastKnownGood(); + await activatePackageManager(lastKnownGood, locator); } async ensurePackageManager(locator) { const spec = this.getPackageManagerSpecFor(locator); @@ -23625,7 +23506,7 @@ var Engine = class { if (transparent) { return fallbackDescriptor; } else { - throw new UsageError(`This project is configured to use ${result.spec.name}`); + throw new UsageError(`This project is configured to use ${result.spec.name} because ${result.target} has a "packageManager" field`); } } else { return result.spec; @@ -23641,7 +23522,7 @@ var Engine = class { }; let isTransparentCommand = false; if (packageManager != null) { - const defaultVersion = await this.getDefaultVersion(packageManager); + const defaultVersion = binaryVersion || await this.getDefaultVersion(packageManager); const definition = this.config.definitions[packageManager]; for (const transparentPath of definition.transparent.commands) { if (transparentPath[0] === binaryName && transparentPath.slice(1).every((segment, index) => segment === args[index])) { @@ -23655,16 +23536,7 @@ var Engine = class { reference: fallbackReference }; } - let descriptor; - try { - descriptor = await this.findProjectSpec(cwd, fallbackLocator, { transparent: isTransparentCommand }); - } catch (err) { - if (err instanceof Cancellation) { - return 1; - } else { - throw err; - } - } + const descriptor = await this.findProjectSpec(cwd, fallbackLocator, { transparent: isTransparentCommand }); if (binaryVersion) descriptor.range = binaryVersion; const resolved = await this.resolveDescriptor(descriptor, { allowTags: true }); @@ -24040,7 +23912,7 @@ var InstallLocalCommand = class extends BaseCommand { }; // sources/commands/Pack.ts -var import_promises3 = require("fs/promises"); +var import_promises2 = require("fs/promises"); var import_path8 = __toESM(require("path")); var PackCommand = class extends BaseCommand { static paths = [ @@ -24090,7 +23962,7 @@ var PackCommand = class extends BaseCommand { `); } const { default: tar } = await Promise.resolve().then(() => __toESM(require_tar())); - await (0, import_promises3.mkdir)(baseInstallFolder, { recursive: true }); + await (0, import_promises2.mkdir)(baseInstallFolder, { recursive: true }); await tar.c({ gzip: true, cwd: baseInstallFolder, file: import_path8.default.resolve(outputPath) }, installLocations.map((location) => { return import_path8.default.relative(baseInstallFolder, location); })); @@ -24181,7 +24053,7 @@ var UseCommand = class extends BaseCommand { }; // sources/commands/deprecated/Hydrate.ts -var import_promises4 = require("fs/promises"); +var import_promises3 = require("fs/promises"); var import_path9 = __toESM(require("path")); var HydrateCommand = class extends Command { static paths = [ @@ -24220,7 +24092,7 @@ var HydrateCommand = class extends Command { else this.context.stdout.write(`Hydrating ${name}@${reference}... `); - await (0, import_promises4.mkdir)(installFolder, { recursive: true }); + await (0, import_promises3.mkdir)(installFolder, { recursive: true }); await tar.x({ file: fileName, cwd: installFolder }, [`${name}/${reference}`]); if (this.activate) { await this.context.engine.activatePackageManager({ name, reference }); @@ -24233,7 +24105,7 @@ var HydrateCommand = class extends Command { }; // sources/commands/deprecated/Prepare.ts -var import_promises5 = require("fs/promises"); +var import_promises4 = require("fs/promises"); var import_path10 = __toESM(require("path")); var PrepareCommand = class extends Command { static paths = [ @@ -24293,7 +24165,7 @@ var PrepareCommand = class extends Command { this.context.stdout.write(`Packing the selected tools in ${import_path10.default.basename(outputPath)}... `); const { default: tar } = await Promise.resolve().then(() => __toESM(require_tar())); - await (0, import_promises5.mkdir)(baseInstallFolder, { recursive: true }); + await (0, import_promises4.mkdir)(baseInstallFolder, { recursive: true }); await tar.c({ gzip: true, cwd: baseInstallFolder, file: import_path10.default.resolve(outputPath) }, installLocations.map((location) => { return import_path10.default.relative(baseInstallFolder, location); })); @@ -24309,14 +24181,14 @@ var PrepareCommand = class extends Command { }; // sources/main.ts -function getPackageManagerRequestFromCli(parameter, context) { +function getPackageManagerRequestFromCli(parameter, engine) { if (!parameter) return null; const match = parameter.match(/^([^@]*)(?:@(.*))?$/); if (!match) return null; const [, binaryName, binaryVersion] = match; - const packageManager = context.engine.getPackageManagerFor(binaryName); + const packageManager = engine.getPackageManagerFor(binaryName); if (packageManager == null && binaryVersion == null) return null; return { @@ -24326,14 +24198,9 @@ function getPackageManagerRequestFromCli(parameter, context) { }; } async function runMain(argv) { - const context = { - ...Cli.defaultContext, - cwd: process.cwd(), - engine: new Engine() - }; + const engine = new Engine(); const [firstArg, ...restArgs] = argv; - const request = getPackageManagerRequestFromCli(firstArg, context); - let code; + const request = getPackageManagerRequestFromCli(firstArg, engine); if (!request) { const cli = new Cli({ binaryLabel: `Corepack`, @@ -24352,26 +24219,20 @@ async function runMain(argv) { cli.register(UseCommand); cli.register(HydrateCommand); cli.register(PrepareCommand); - code = await cli.run(argv, context); + const context = { + ...Cli.defaultContext, + cwd: process.cwd(), + engine + }; + const code = await cli.run(argv, context); + if (code !== 0) { + process.exitCode ??= code; + } } else { - const cli = new Cli({ - binaryLabel: `'${request.binaryName}', via Corepack`, - binaryName: request.binaryName, - binaryVersion: `corepack/${version}` + await engine.executePackageManagerRequest(request, { + cwd: process.cwd(), + args: restArgs }); - cli.register(class BinaryCommand extends Command { - proxy = options_exports.Proxy(); - async execute() { - return this.context.engine.executePackageManagerRequest(request, { - cwd: this.context.cwd, - args: this.proxy - }); - } - }); - code = await cli.run(restArgs, context); - } - if (code !== 0) { - process.exitCode ??= code; } } // Annotate the CommonJS export names for ESM import in node: @@ -24380,7 +24241,7 @@ async function runMain(argv) { }); /*! Bundled license information: -undici/lib/fetch/body.js: +undici/lib/web/fetch/body.js: (*! formdata-polyfill. MIT License. Jimmy Wärting *) is-windows/index.js: diff --git a/deps/corepack/package.json b/deps/corepack/package.json index c3bc56c961bb35..5ca045e37041e0 100644 --- a/deps/corepack/package.json +++ b/deps/corepack/package.json @@ -1,6 +1,6 @@ { "name": "corepack", - "version": "0.26.0", + "version": "0.28.0", "homepage": "https://github.com/nodejs/corepack#readme", "bugs": { "url": "https://github.com/nodejs/corepack/issues" @@ -16,7 +16,7 @@ "./package.json": "./package.json" }, "license": "MIT", - "packageManager": "yarn@4.1.0+sha224.bc24d7f5afc738464f3d4e95f4e6e7829a35cee54a0fd527ea5baa83", + "packageManager": "yarn@4.1.1+sha224.00f08619463229f8ba40c4ee90e8c2e4ced1f11c3115c26f3b98432e", "devDependencies": { "@babel/core": "^7.14.3", "@babel/plugin-transform-modules-commonjs": "^7.14.0", @@ -45,13 +45,16 @@ "proxy-from-env": "^1.1.0", "semver": "^7.5.2", "supports-color": "^9.0.0", - "tar": "^6.0.1", + "tar": "^6.2.1", "ts-node": "^10.0.0", "typescript": "^5.3.3", "undici": "^6.6.1", "v8-compile-cache": "^2.3.0", "which": "^4.0.0" }, + "resolutions": { + "undici-types": "6.x" + }, "scripts": { "build": "rm -rf dist shims && run build:bundle && ts-node ./mkshims.ts", "build:bundle": "esbuild ./sources/_lib.ts --bundle --platform=node --target=node18.17.0 --external:corepack --outfile='./dist/lib/corepack.cjs' --resolve-extensions='.ts,.mjs,.js'", From e4ea2db58bfb8857b6d8af603469ced2e7f7c271 Mon Sep 17 00:00:00 2001 From: "Node.js GitHub Bot" Date: Thu, 4 Apr 2024 16:10:39 +0300 Subject: [PATCH 32/41] deps: update c-ares to 1.28.1 PR-URL: https://github.com/nodejs/node/pull/52285 Reviewed-By: Luigi Pinca Reviewed-By: Antoine du Hamel --- deps/cares/CHANGES | 369 ++++++--- deps/cares/CMakeLists.txt | 29 +- deps/cares/Makefile.in | 1 + deps/cares/Makefile.msvc | 8 +- deps/cares/RELEASE-NOTES.md | 51 +- deps/cares/aminclude_static.am | 2 +- deps/cares/cares.gyp | 12 +- deps/cares/configure | 47 +- deps/cares/configure.ac | 5 +- deps/cares/docs/Makefile.in | 11 + deps/cares/docs/Makefile.inc | 7 + deps/cares/docs/adig.1 | 2 +- deps/cares/docs/ahost.1 | 2 +- deps/cares/docs/ares_dns_record.3 | 47 +- deps/cares/docs/ares_dns_record_duplicate.3 | 3 + .../docs/ares_dns_record_query_set_name.3 | 3 + .../docs/ares_dns_record_query_set_type.3 | 3 + .../cares/docs/ares_dns_record_rr_get_const.3 | 3 + deps/cares/docs/ares_dns_rr.3 | 34 +- deps/cares/docs/ares_init_options.3 | 2 +- deps/cares/docs/ares_query.3 | 90 ++- deps/cares/docs/ares_query_dnsrec.3 | 3 + deps/cares/docs/ares_queue.3 | 53 ++ deps/cares/docs/ares_queue_active_queries.3 | 3 + deps/cares/docs/ares_queue_wait_empty.3 | 3 + deps/cares/docs/ares_search.3 | 39 +- deps/cares/docs/ares_search_dnsrec.3 | 3 + deps/cares/docs/ares_send.3 | 129 +-- deps/cares/docs/ares_send_dnsrec.3 | 3 + deps/cares/include/Makefile.in | 1 + deps/cares/include/ares.h | 323 +++++--- deps/cares/include/ares_dns_record.h | 74 +- deps/cares/include/ares_version.h | 6 +- deps/cares/src/Makefile.in | 1 + deps/cares/src/lib/CMakeLists.txt | 19 +- deps/cares/src/lib/Makefile.in | 48 +- deps/cares/src/lib/Makefile.inc | 2 - deps/cares/src/lib/ares__buf.c | 227 ++++-- deps/cares/src/lib/ares__buf.h | 39 +- deps/cares/src/lib/ares__hosts_file.c | 91 +-- deps/cares/src/lib/ares__htable.h | 12 +- deps/cares/src/lib/ares__htable_asvp.h | 4 +- deps/cares/src/lib/ares__htable_strvp.h | 2 +- deps/cares/src/lib/ares__htable_szvp.h | 2 +- deps/cares/src/lib/ares__llist.h | 10 +- .../cares/src/lib/ares__parse_into_addrinfo.c | 11 +- deps/cares/src/lib/ares__read_line.c | 90 --- deps/cares/src/lib/ares__slist.h | 4 +- deps/cares/src/lib/ares__threads.c | 10 +- deps/cares/src/lib/ares__threads.h | 6 +- deps/cares/src/lib/ares_cancel.c | 2 +- deps/cares/src/lib/ares_config.h.cmake | 3 + deps/cares/src/lib/ares_config.h.in | 3 + deps/cares/src/lib/ares_create_query.c | 71 +- deps/cares/src/lib/ares_destroy.c | 2 +- deps/cares/src/lib/ares_dns_mapping.c | 34 + deps/cares/src/lib/ares_dns_private.h | 27 + deps/cares/src/lib/ares_dns_record.c | 135 +++- deps/cares/src/lib/ares_dns_write.c | 14 +- deps/cares/src/lib/ares_event.h | 8 +- deps/cares/src/lib/ares_event_poll.c | 7 +- deps/cares/src/lib/ares_getaddrinfo.c | 256 +++--- deps/cares/src/lib/ares_gethostbyaddr.c | 31 +- deps/cares/src/lib/ares_init.c | 32 +- deps/cares/src/lib/ares_ipv6.h | 4 + deps/cares/src/lib/ares_library_init.c | 4 +- deps/cares/src/lib/ares_mkquery.c | 35 - deps/cares/src/lib/ares_options.c | 2 +- deps/cares/src/lib/ares_parse_a_reply.c | 13 +- deps/cares/src/lib/ares_parse_aaaa_reply.c | 13 +- deps/cares/src/lib/ares_parse_ptr_reply.c | 63 +- deps/cares/src/lib/ares_private.h | 91 ++- deps/cares/src/lib/ares_process.c | 40 +- deps/cares/src/lib/ares_qcache.c | 57 +- deps/cares/src/lib/ares_query.c | 159 ++-- deps/cares/src/lib/ares_search.c | 712 +++++++++++------ deps/cares/src/lib/ares_send.c | 100 ++- deps/cares/src/lib/ares_str.c | 116 +++ deps/cares/src/lib/ares_str.h | 17 +- deps/cares/src/lib/ares_strsplit.c | 2 +- deps/cares/src/lib/ares_sysconfig.c | 6 +- deps/cares/src/lib/ares_sysconfig_files.c | 734 ++++++++++-------- deps/cares/src/lib/ares_update_servers.c | 12 +- deps/cares/src/lib/setup_once.h | 2 +- deps/cares/src/tools/CMakeLists.txt | 4 +- deps/cares/src/tools/Makefile.am | 3 +- deps/cares/src/tools/Makefile.in | 3 +- 87 files changed, 2903 insertions(+), 1863 deletions(-) create mode 100644 deps/cares/docs/ares_dns_record_duplicate.3 create mode 100644 deps/cares/docs/ares_dns_record_query_set_name.3 create mode 100644 deps/cares/docs/ares_dns_record_query_set_type.3 create mode 100644 deps/cares/docs/ares_dns_record_rr_get_const.3 create mode 100644 deps/cares/docs/ares_query_dnsrec.3 create mode 100644 deps/cares/docs/ares_queue.3 create mode 100644 deps/cares/docs/ares_queue_active_queries.3 create mode 100644 deps/cares/docs/ares_queue_wait_empty.3 create mode 100644 deps/cares/docs/ares_search_dnsrec.3 create mode 100644 deps/cares/docs/ares_send_dnsrec.3 delete mode 100644 deps/cares/src/lib/ares__read_line.c delete mode 100644 deps/cares/src/lib/ares_mkquery.c diff --git a/deps/cares/CHANGES b/deps/cares/CHANGES index 24a68a89849b67..ae56d4f24bc3d9 100644 --- a/deps/cares/CHANGES +++ b/deps/cares/CHANGES @@ -1,5 +1,250 @@ Changelog for the c-ares project. Generated with git2changes.pl +Version 1.28.1 (30 Mar 2024) + +GitHub (30 Mar 2024) +- [Brad House brought this change] + + release prep for 1.28.1 (#739) + +Brad House (30 Mar 2024) +- ares_search() and ares_getaddrinfo() resolution fails if no search domains + + Due to an error in creating the list of domains to search, if no search + domains were configured, resolution would fail. + + Fixes Issue: #737 + Fix By: Brad House (@bradh352) + +- typo + +Version 1.28.0 (28 Mar 2024) + +GitHub (28 Mar 2024) +- [Brad House brought this change] + + Allow configuration value for NDots to be zero (#735) + + As per Issue #734 some people use `ndots:0` in their configuration which + is allowed by the system resolver but not by c-ares. Add support for + `ndots:0` and add a test case to validate this behavior. + + Fixes Issue: #734 + Fix By: Brad House (@bradh352) + +Brad House (27 Mar 2024) +- typo + +GitHub (27 Mar 2024) +- [Brad House brought this change] + + 1.28.0 release prep (#733) + +Brad House (27 Mar 2024) +- CMake: don't overwrite global required libraries/definitions/includes + + When chain building c-ares, global settings were being unset which + could lead to build problems. + + Fixes Issue: #729 + Fix By: Brad House (@bradh352) + +- remove tests that have been disabled forever + +- clang-format + +- ares_search_dnsrec() takes a const + +- sonarcloud: clean up some minor codesmells + +GitHub (26 Mar 2024) +- [Brad House brought this change] + + mark deprecated functions as such (#732) + + Multiple functions have been deprecated over the years, annotate them + with attribute deprecated. + + When possible show a message about their replacements. + + This is a continuation/completion of PR #706 + + Fix By: Cristian Rodríguez (@crrodriguez) + +Brad House (26 Mar 2024) +- silence clang static analyzer + +- silence coverity + +- coverity: fix mostly bogus warnings + +- fix missing doc + +GitHub (25 Mar 2024) +- [Brad House brought this change] + + Rework internals to pass around `ares_dns_record_t` instead of binary data (#730) + + c-ares has historically passed around raw dns packets in binary form. + Now that we have a new parser, and messages are already parsed + internally, lets pass around that parsed message rather than requiring + multiple parse attempts on the same message. Also add a new + `ares_send_dnsrec()` and `ares_query_dnsrec()` similar to + `ares_search_dnsrec()` added with PR #719 that can return the pointer to + the `ares_dns_record_t` to the caller enqueuing queries and rework + `ares_search_dnsrec()` to use `ares_send_dnsrec()` internally. + + Fix By: Brad House (@bradh352) + +Brad House (23 Mar 2024) +- tests: mockserver is local, shorten timeouts to make test cases run faster to use less CI resources + +- appveyor: disable UWP builds until MSVC version is updated in base image + +GitHub (21 Mar 2024) +- [Faraz brought this change] + + Include netinet6/in6.h (#728) + + On some platforms, "netinet6/in6.h" is not included by "netinet/in.h" + and needs to be included separately. + + Fix By: Faraz (@farazrbx) + +- [Oliver Welsh brought this change] + + Add function ares_search_dnrec() to search for records using the new DNS record parser (#719) + + This PR adds a new function `ares_search_dnsrec()` to search for records + using the new DNS record parser. + + The function takes an arbitrary DNS record object to search (that must + represent a query for a single name). The function takes a new callback + type, `ares_callback_dnsrec`, that is invoked with a parsed DNS record + object rather than the raw buffer(+length). + + The original motivation for this change is to provide support for + [draft-kaplan-enum-sip-routing-04](https://datatracker.ietf.org/doc/html/draft-kaplan-enum-sip-routing-04); + when routing phone calls using an ENUM server, it can be useful to + include identifying source information in an OPT RR options value, to + help select the appropriate route for the call. The new function allows + for more customisable searches like this. + + **Summary of code changes** + + A new function `ares_search_dnsrec()` has been added and exposed. + Moreover, the entire `ares_search_int()` internal code flow has been + refactored to use parsed DNS record objects and the new DNS record + parser. The DNS record object is passed through the `search_query` + structure by encoding/decoding to/from a buffer (if multiple search + domains are used). A helper function `ares_dns_write_query_altname()` is + used to re-write the DNS record object with a new query name (used to + append search domains). + + `ares_search()` is now a wrapper around the new internal code, where the + DNS record object is created based on the name, class and type + parameters. + + The new function uses a new callback type, `ares_callback_dnsrec`. This + is invoked with a parsed DNS record object. For now, we convert from + `ares_callback` to this new type using `ares__dnsrec_convert_cb()`. + + Some functions that are common to both `ares_query()` and + `ares_search()` have been refactored using the new DNS record parser. + See `ares_dns_record_create_query()` and + `ares_dns_query_reply_tostatus()`. + + **Testing** + + A new FV has been added to test the new function, which searches for a + DNS record containing an OPT RR with custom options value. + + As part of this, I needed to enhance the mock DNS server to expect + request text (and assert that it matches actual request text). This is + because the FV needs to check that the request contains the correct OPT + RR. + + **Documentation** + + The man page docs have been updated to describe the new feature. + + **Futures** + + In the future, a new variant of `ares_send()` could be introduced in the + same vein (`ares_send_dnsrec()`). This could be used by + `ares_search_dnsrec()`. Moreover, we could migrate internal code to use + `ares_callback_dnsrec` as the default callback. + + This will help to make the new DNS record parser the norm in C-Ares. + + --------- + + Co-authored-by: Oliver Welsh (@oliverwelsh) + +- [Brad House brought this change] + + Replace configuration file parsers with memory-safe parser (#725) + + Rewrite configuration parsers using new memory safe parsing functions. + After CVE-2024-25629 its obvious that we need to prioritize again on + getting all the hand written parsers with direct pointer manipulation + replaced. They're just not safe and hard to audit. It was yet another + example of 20+yr old code having a memory safety issue just now coming + to light. + + Though these parsers are definitely less efficient, they're written with + memory safety in mind, and any performance difference is going to be + meaningless for something that only happens once a while. + + Fix By: Brad House (@bradh352) + +Brad House (12 Mar 2024) +- skip ubsan/asan on debian arm64 due to the compiler getting killed + +- ares_init potential memory leak + + If initializing using default settings fails, there may be a memory leak of + search domains that were set by system configuration. + + Fixes Issue: #724 + Fix By: Brad House (@bradh352) + +GitHub (12 Mar 2024) +- [Faraz Fallahi brought this change] + + simple implementation for isascii where stdlib isascii is not available (#721) + + Some platforms don't have the isascii() function. Implement as a macro. + + Fix By: Faraz Fallahi (@fffaraz) + +Brad House (11 Mar 2024) +- Doxygen: fix typos + + Fix reported typos in doxygen-style comments. + + Fixes Issue: #722 + Credit: @dzalyalov88 + +- CI: update freebsd image + +- CMake: Fix Chain building if CMAKE runtime paths not set + + This fixes issues created by #708 + + Fix By: Brad House (@bradh352) + +- silence benign warnings + +- Remove acountry completely from code, including manpage + + Since acountry cannot be restored due to nerd.dk being decommissioned, + we should completely remove the manpage and source. This also + will resolve issue #718. + + Fixes Issue: #718 + Fix By: Brad House (@bradh352) + Version 1.27.0 (22 Feb 2024) GitHub (22 Feb 2024) @@ -6100,127 +6345,3 @@ Yang Tse (10 Mar 2013) Daniel Stenberg (9 Mar 2013) - ares.h: there is no ares_free_soa function - -Yang Tse (9 Mar 2013) -- Makefile.am: empty AM_LDFLAGS definition for automake 1.7 compatibility - -- ares_inet_ntop.3: s/socklen_t/ares_socklen_t - -- configure: use XC_LIBTOOL for portability across libtool versions - -- xc-lt-iface.m4: provide XC_LIBTOOL macro - -- Makefile.am: use AM_CPPFLAGS instead of INCLUDES - -- inet_ntop.c: s/socklen_t/ares_socklen_t - -- inet_ntop.c: s/socklen_t/ares_socklen_t for portability - -Daniel Stenberg (19 Feb 2013) -- ares.h: s/socklen_t/ares_socklen_t for portability - -- ares_inet_ntop.3: 4th argument is socklen_t! - -- spell inet correctly! - -- ares_inet_pton/ntop: cleanup - - Make sure that the symbols are always exported and present in c-ares. - - Make the headers prefixed with 'ares'. - - Removed the inet_ntop.h version as it no longer features any content. - -- ares_inet_ntop/ares_inet_pton: added man pages - -Yang Tse (15 Feb 2013) -- [Gisle Vanem brought this change] - - curl_setup_once.h: definition of HAVE_CLOSE_S defines sclose() to close_s() - -- [Gisle Vanem brought this change] - - config-dos.h: define HAVE_CLOSE_S for MSDOS/Watt-32 - -- [Gisle Vanem brought this change] - - config-dos.h: define strerror() to strerror_s_() for High-C - -Daniel Stenberg (13 Feb 2013) -- ares_get_datatype: removed unused function - - it was also wrongly named as internal functions require two underscores - -- ares__bitncmp: use two underscores for private functions - - It used a single one previously making it look like a public one - -- ares__generate_new_id: moved to ares_query.c - - ... and ares__rc4 is turned into a local static function. - -- ares__swap_lists: make private and static - - ... since there's only one user, make it static within ares_process.c - -Yang Tse (13 Feb 2013) -- Makefile.msvc: add four VS version strings - -Daniel Stenberg (13 Feb 2013) -- ares_expand_name.3: clarify how to free the data - -Yang Tse (30 Jan 2013) -- zz40-xc-ovr.m4: fix 'wc' detection - follow-up 2 - - - Fix a pair of single quotes to double quotes. - - URL: http://curl.haxx.se/mail/lib-2013-01/0355.html - Reported by: Tor Arntsen - -- zz40-xc-ovr.m4: fix 'wc' detection - follow-up - - - Take into account that 'wc' may return leading spaces and/or tabs. - - - Set initial IFS to space, tab and newline. - -- zz40-xc-ovr.m4: fix 'wc' detection - - - Take into account that 'wc' may return leading spaces. - - - Set internationalization behavior variables. - - Tor Arntsen analyzed and reported the issue. - - URL: http://curl.haxx.se/mail/lib-2013-01/0351.html - -- zz40-xc-ovr.m4: check another three basic utilities - -- zz40-xc-ovr.m4: 1.0 interface stabilization - - - Stabilization results in 4 public interface m4 macros: - XC_CONFIGURE_PREAMBLE - XC_CONFIGURE_PREAMBLE_VER_MAJOR - XC_CONFIGURE_PREAMBLE_VER_MINOR - XC_CHECK_PATH_SEPARATOR - - Avoid one level of internal indirection - - Update comments - - Drop XC_OVR_ZZ40 macro - -- zz40-xc-ovr.m4: emit witness message in configure BODY - - This avoids witness message in output when running configure --help, - while sending the message to config.log for other configure runs. - -- zz40-xc-ovr.m4: truly do version conditional overriding - - - version conditional overriding - - catch unexpanded XC macros - - fix double words in comments - -- zz40-xc-ovr.m4: fix variable assignment of subshell output bashism - - Tor Arntsen analyzed and reported the issue. - - URL: http://curl.haxx.se/mail/lib-2013-01/0306.html - -- zz40-xc-ovr.m4: reinstate strict AC_REQUIRE macro dependencies diff --git a/deps/cares/CMakeLists.txt b/deps/cares/CMakeLists.txt index e951cafd7b4068..2718ce52b73ff6 100644 --- a/deps/cares/CMakeLists.txt +++ b/deps/cares/CMakeLists.txt @@ -12,10 +12,10 @@ INCLUDE (CheckCSourceCompiles) INCLUDE (CheckStructHasMember) INCLUDE (CheckLibraryExists) -PROJECT (c-ares LANGUAGES C VERSION "1.27.0" ) +PROJECT (c-ares LANGUAGES C VERSION "1.28.1" ) # Set this version before release -SET (CARES_VERSION "1.27.0") +SET (CARES_VERSION "1.28.1") INCLUDE (GNUInstallDirs) # include this *AFTER* PROJECT(), otherwise paths are wrong. @@ -30,7 +30,7 @@ INCLUDE (GNUInstallDirs) # include this *AFTER* PROJECT(), otherwise paths are w # For example, a version of 4:0:2 would generate output such as: # libname.so -> libname.so.2 # libname.so.2 -> libname.so.2.2.0 -SET (CARES_LIB_VERSIONINFO "14:0:12") +SET (CARES_LIB_VERSIONINFO "15:1:13") OPTION (CARES_STATIC "Build as a static library" OFF) @@ -98,7 +98,7 @@ IF (CMAKE_CURRENT_SOURCE_DIR STREQUAL CMAKE_SOURCE_DIR) SET (CMAKE_RUNTIME_OUTPUT_DIRECTORY "${PROJECT_BINARY_DIR}/${CMAKE_INSTALL_BINDIR}") SET (CMAKE_LIBRARY_OUTPUT_DIRECTORY "${PROJECT_BINARY_DIR}/${CMAKE_INSTALL_LIBDIR}") SET (CMAKE_ARCHIVE_OUTPUT_DIRECTORY "${PROJECT_BINARY_DIR}/${CMAKE_INSTALL_LIBDIR}") - SET (PACKAGE_DIRECTORY ${PROJECT_BINARY_DIR}/package) + SET (CMAKE_COMPILE_PDB_OUTPUT_DIRECTORY "${CMAKE_RUNTIME_OUTPUT_DIRECTORY}") ENDIF () # Destinations for installing different kinds of targets (pass to install command). @@ -113,11 +113,10 @@ SET (TARGETS_INST_DEST # CHECK_LIBRARY_EXISTS can't be used as it will return true if the function # is found in a different required/dependent library. MACRO (CARES_FUNCTION_IN_LIBRARY func lib var) - - SET (_ORIG_CMAKE_REQUIRED_LIBRARIES "${CMAKE_REQUIRED_LIBRARIES}") + SET (_ORIG_FIL_CMAKE_REQUIRED_LIBRARIES "${CMAKE_REQUIRED_LIBRARIES}") SET (CMAKE_REQUIRED_LIBRARIES ) CHECK_FUNCTION_EXISTS ("${func}" "_CARES_FUNC_IN_LIB_GLOBAL_${func}") - SET (CMAKE_REQUIRED_LIBRARIES "${_ORIG_CMAKE_REQUIRED_LIBRARIES}") + SET (CMAKE_REQUIRED_LIBRARIES "${_ORIG_FIL_CMAKE_REQUIRED_LIBRARIES}") IF ("${_CARES_FUNC_IN_LIB_GLOBAL_${func}}") SET (${var} FALSE) @@ -207,6 +206,7 @@ CHECK_INCLUDE_FILES (malloc.h HAVE_MALLOC_H) CHECK_INCLUDE_FILES (memory.h HAVE_MEMORY_H) CHECK_INCLUDE_FILES (netdb.h HAVE_NETDB_H) CHECK_INCLUDE_FILES (netinet/in.h HAVE_NETINET_IN_H) +CHECK_INCLUDE_FILES (netinet6/in6.h HAVE_NETINET6_IN6_H) # On old MacOS SDK versions, you must include sys/socket.h before net/if.h IF (HAVE_SYS_SOCKET_H) CHECK_INCLUDE_FILES ("sys/socket.h;net/if.h" HAVE_NET_IF_H) @@ -303,7 +303,11 @@ ENDIF () # headers, libraries, and definitions for the detection to work properly # CMAKE_REQUIRED_DEFINITIONS, CMAKE_REQUIRED_LIBRARIES, and # CMAKE_EXTRA_INCLUDE_FILES. When we're done with the detection, we'll -# unset them. +# restore them to their original values (otherwise a parent project +# that tries to set these won't be maintained, see Issue #729) +SET (ORIG_CMAKE_REQUIRED_DEFINITIONS ${CMAKE_REQUIRED_DEEFINITIONS}) +SET (ORIG_CMAKE_REQUIRED_LIBRARIES ${CMAKE_REQUIRED_LIBRARIES}) +SET (ORIG_CMAKE_EXTRA_INCLUDE_FILES ${CMAKE_EXTRA_INCLUDE_FILES}) SET (CMAKE_REQUIRED_DEFINITIONS ${SYSFLAGS}) LIST (APPEND CMAKE_REQUIRED_LIBRARIES ${CARES_DEPENDENT_LIBS}) @@ -323,6 +327,7 @@ CARES_EXTRAINCLUDE_IFSET (HAVE_NETDB_H netdb.h) CARES_EXTRAINCLUDE_IFSET (HAVE_NET_IF_H net/if.h) CARES_EXTRAINCLUDE_IFSET (HAVE_IFADDRS_H ifaddrs.h) CARES_EXTRAINCLUDE_IFSET (HAVE_NETINET_IN_H netinet/in.h) +CARES_EXTRAINCLUDE_IFSET (HAVE_NETINET6_IN6_H netinet6/in6.h) CARES_EXTRAINCLUDE_IFSET (HAVE_NETINET_TCP_H netinet/tcp.h) CARES_EXTRAINCLUDE_IFSET (HAVE_SIGNAL_H signal.h) CARES_EXTRAINCLUDE_IFSET (HAVE_STDLIB_H stdlib.h) @@ -458,10 +463,10 @@ CHECK_SYMBOL_EXISTS (epoll_create1 "${CMAKE_EXTRA_INCLUDE_FILES}" HAVE_EPOLL) # from libc. We need to perform a link test instead of a header/symbol test. CHECK_FUNCTION_EXISTS (__system_property_get HAVE___SYSTEM_PROPERTY_GET) -# Unset temporary data -SET (CMAKE_EXTRA_INCLUDE_FILES) -SET (CMAKE_REQUIRED_DEFINITIONS) -SET (CMAKE_REQUIRED_LIBRARIES) +# Restore original values (as per Issue #729) +SET (CMAKE_REQUIRED_DEFINITIONS ${ORIG_CMAKE_REQUIRED_DEEFINITIONS}) +SET (CMAKE_REQUIRED_LIBRARIES ${ORIG_CMAKE_REQUIRED_LIBRARIES}) +SET (CMAKE_EXTRA_INCLUDE_FILES ${ORIG_CMAKE_EXTRA_INCLUDE_FILES}) ################################################################################ diff --git a/deps/cares/Makefile.in b/deps/cares/Makefile.in index d1c663d5488366..928cdc217ee6de 100644 --- a/deps/cares/Makefile.in +++ b/deps/cares/Makefile.in @@ -364,6 +364,7 @@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ +PKGCONFIG_CFLAGS = @PKGCONFIG_CFLAGS@ PKG_CONFIG = @PKG_CONFIG@ PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ diff --git a/deps/cares/Makefile.msvc b/deps/cares/Makefile.msvc index c89454ec155d94..f6e8f4e71f2e6f 100644 --- a/deps/cares/Makefile.msvc +++ b/deps/cares/Makefile.msvc @@ -229,7 +229,7 @@ LINK_CMD_EXE_DBG = $(LINK_CMD_EXE) /debug $(PDBTYPE_CONSOLIDATE) CARES_TARGET = $(STA_LIB_REL).lib CARES_CFLAGS = /DCARES_BUILDING_LIBRARY /DCARES_STATICLIB CARES_LFLAGS = -SPROG_CFLAGS = /DCARES_STATICLIB +SPROG_CFLAGS = /DCARES_STATICLIB /DCARES_NO_DEPRECATED SPROG_LFLAGS = /libpath:$(CARES_OUTDIR) $(EX_LIBS_REL) $(STA_LIB_REL).lib CARES_LINK = $(LINK_CMD_LIB) SPROG_LINK = $(LINK_CMD_EXE_REL) @@ -240,7 +240,7 @@ CC_CMD = $(CC_CMD_REL) CARES_TARGET = $(STA_LIB_DBG).lib CARES_CFLAGS = /DCARES_BUILDING_LIBRARY /DCARES_STATICLIB /DDEBUGBUILD CARES_LFLAGS = -SPROG_CFLAGS = /DCARES_STATICLIB +SPROG_CFLAGS = /DCARES_STATICLIB /DCARES_NO_DEPRECATED SPROG_LFLAGS = /libpath:$(CARES_OUTDIR) $(EX_LIBS_DBG) $(STA_LIB_DBG).lib CARES_LINK = $(LINK_CMD_LIB) SPROG_LINK = $(LINK_CMD_EXE_DBG) @@ -251,7 +251,7 @@ CC_CMD = $(CC_CMD_DBG) CARES_TARGET = $(DYN_LIB_REL).dll CARES_CFLAGS = /DCARES_BUILDING_LIBRARY CARES_LFLAGS = /release $(EX_LIBS_REL) /implib:$(CARES_OUTDIR)\$(IMP_LIB_REL).lib $(PDB_NONE) -SPROG_CFLAGS = +SPROG_CFLAGS = /DCARES_NO_DEPRECATED SPROG_LFLAGS = /libpath:$(CARES_OUTDIR) $(EX_LIBS_REL) $(IMP_LIB_REL).lib CARES_LINK = $(LINK_CMD_DLL) SPROG_LINK = $(LINK_CMD_EXE_REL) @@ -264,7 +264,7 @@ RC_CMD = $(RC_CMD_REL) CARES_TARGET = $(DYN_LIB_DBG).dll CARES_CFLAGS = /DCARES_BUILDING_LIBRARY /DDEBUGBUILD CARES_LFLAGS = /debug $(EX_LIBS_DBG) /implib:$(CARES_OUTDIR)\$(IMP_LIB_DBG).lib /pdb:$(CARES_OUTDIR)\$(DYN_LIB_DBG).pdb $(PDBTYPE_CONSOLIDATE) -SPROG_CFLAGS = +SPROG_CFLAGS = /DCARES_NO_DEPRECATED SPROG_LFLAGS = /libpath:$(CARES_OUTDIR) $(EX_LIBS_DBG) $(IMP_LIB_DBG).lib CARES_LINK = $(LINK_CMD_DLL) SPROG_LINK = $(LINK_CMD_EXE_DBG) diff --git a/deps/cares/RELEASE-NOTES.md b/deps/cares/RELEASE-NOTES.md index 0fdcbc6b968488..3a9b9dd9c35fa1 100644 --- a/deps/cares/RELEASE-NOTES.md +++ b/deps/cares/RELEASE-NOTES.md @@ -1,40 +1,49 @@ -## c-ares version 1.27.0 - Feb 23 2024 +## c-ares version 1.28.1 - Mar 30 2024 -This is a security, feature, and bugfix release. +This release contains a fix for a single significant regression introduced +in c-ares 1.28.0. -Security: +* `ares_search()` and `ares_getaddrinfo()` resolution fails if no search domains + are specified. [Issue #737](https://github.com/c-ares/c-ares/issues/737) -* Moderate. CVE-2024-25629. Reading malformatted `/etc/resolv.conf`, - `/etc/nsswitch.conf` or the `HOSTALIASES` file could result in a crash. - [GHSA-mg26-v6qh-x48q](https://github.com/c-ares/c-ares/security/advisories/GHSA-mg26-v6qh-x48q) + +## c-ares version 1.28.0 - Mar 29 2024 + +This is a feature and bugfix release. Features: -* New function `ares_queue_active_queries()` to retrieve number of in-flight - queries. [PR #712](https://github.com/c-ares/c-ares/pull/712) -* New function `ares_queue_wait_empty()` to wait for the number of in-flight - queries to reach zero. [PR #710](https://github.com/c-ares/c-ares/pull/710) -* New `ARES_FLAG_NO_DEFLT_SVR` for `ares_init_options()` to return a failure if - no DNS servers can be found rather than attempting to use `127.0.0.1`. This - also introduces a new ares status code of `ARES_ENOSERVER`. [PR #713](https://github.com/c-ares/c-ares/pull/713) +* Emit warnings when deprecated c-ares functions are used. This can be + disabled by passing a compiler definition of `CARES_NO_DEPRECATED`. [PR #732](https://github.com/c-ares/c-ares/pull/732) +* Add function `ares_search_dnsrec()` to search for records using the new DNS + record data structures. [PR #719](https://github.com/c-ares/c-ares/pull/719) +* Rework internals to pass around `ares_dns_record_t` instead of binary data, + this introduces new public functions of `ares_query_dnsrec()` and + `ares_send_dnsrec()`. [PR #730](https://github.com/c-ares/c-ares/pull/730) Changes: -* EDNS Packet size should be 1232 as per DNS Flag Day. [PR #705](https://github.com/c-ares/c-ares/pull/705) +* tests: when performing simulated queries, reduce timeouts to make tests run + faster +* Replace configuration file parsers with memory-safe parser. [PR #725](https://github.com/c-ares/c-ares/pull/725) +* Remove `acountry` completely, the manpage might still get installed otherwise. [Issue #718](https://github.com/c-ares/c-ares/pull/718) Bugfixes: -* Windows DNS suffix search list memory leak. [PR #711](https://github.com/c-ares/c-ares/pull/711) -* Fix warning due to ignoring return code of `write()`. [PR #709](https://github.com/c-ares/c-ares/pull/709) -* CMake: don't override target output locations if not top-level. [Issue #708](https://github.com/c-ares/c-ares/issues/708) -* Fix building c-ares without thread support. [PR #700](https://github.com/c-ares/c-ares/pull/700) +* CMake: don't overwrite global required libraries/definitions/includes which + could cause build errors for projects chain building c-ares. [Issue #729](https://github.com/c-ares/c-ares/issues/729) +* On some platforms, `netinet6/in6.h` is not included by `netinet/in.h` + and needs to be included separately. [PR #728](https://github.com/c-ares/c-ares/pull/728) +* Fix a potential memory leak in `ares_init()`. [Issue #724](https://github.com/c-ares/c-ares/issues/724) +* Some platforms don't have the `isascii()` function. Implement as a macro. [PR #721](https://github.com/c-ares/c-ares/pull/721) +* CMake: Fix Chain building if CMAKE runtime paths not set +* NDots configuration should allow a value of zero. [PR #735](https://github.com/c-ares/c-ares/pull/735) Thanks go to these friendly people for their efforts and contributions for this release: -* Anthony Alayo (@anthonyalayo) * Brad House (@bradh352) -* Cheng Zhao (@zcbenz) * Cristian Rodríguez (@crrodriguez) * Daniel Stenberg (@bagder) +* Faraz (@farazrbx) +* Faraz Fallahi (@fffaraz) * Oliver Welsh (@oliverwelsh) -* Vojtěch Vobr (@vojtechvobr) diff --git a/deps/cares/aminclude_static.am b/deps/cares/aminclude_static.am index e3fc636c7b51a4..6fa817a8346703 100644 --- a/deps/cares/aminclude_static.am +++ b/deps/cares/aminclude_static.am @@ -1,6 +1,6 @@ # aminclude_static.am generated automatically by Autoconf -# from AX_AM_MACROS_STATIC on Fri Feb 23 08:24:27 CET 2024 +# from AX_AM_MACROS_STATIC on Sat Mar 30 16:17:17 CET 2024 # Code coverage diff --git a/deps/cares/cares.gyp b/deps/cares/cares.gyp index e8ac0b75796e72..053cdf8c8286ad 100644 --- a/deps/cares/cares.gyp +++ b/deps/cares/cares.gyp @@ -26,7 +26,6 @@ 'src/lib/ares__llist.c', 'src/lib/ares__llist.h', 'src/lib/ares__parse_into_addrinfo.c', - 'src/lib/ares__read_line.c', 'src/lib/ares__slist.c', 'src/lib/ares__slist.h', 'src/lib/ares__socket.c', @@ -74,7 +73,6 @@ 'src/lib/ares_library_init.c', 'src/lib/ares_ipv6.h', 'src/lib/ares_math.c', - 'src/lib/ares_mkquery.c', 'src/lib/ares_options.c', 'src/lib/ares_parse_a_reply.c', 'src/lib/ares_parse_aaaa_reply.c', @@ -154,7 +152,15 @@ 'type': '<(library)', 'include_dirs': [ 'include' ], 'direct_dependent_settings': { - 'include_dirs': [ 'include' ] + 'include_dirs': [ 'include' ], + 'cflags': [ '-Wno-error=deprecated-declarations' ], + 'conditions': [ + [ 'OS=="mac"', { + 'xcode_settings': { + 'OTHER_CFLAGS': [ '-Wno-error=deprecated-declarations' ] + } + }] + ] }, 'sources': [ '<@(cares_sources_common)', diff --git a/deps/cares/configure b/deps/cares/configure index ac3c7b55db0565..656164f46e3ad8 100755 --- a/deps/cares/configure +++ b/deps/cares/configure @@ -1,6 +1,6 @@ #! /bin/sh # Guess values for system-dependent variables and create Makefiles. -# Generated by GNU Autoconf 2.71 for c-ares 1.27.0. +# Generated by GNU Autoconf 2.71 for c-ares 1.28.1. # # Report bugs to . # @@ -621,8 +621,8 @@ MAKEFLAGS= # Identity of this package. PACKAGE_NAME='c-ares' PACKAGE_TARNAME='c-ares' -PACKAGE_VERSION='1.27.0' -PACKAGE_STRING='c-ares 1.27.0' +PACKAGE_VERSION='1.28.1' +PACKAGE_STRING='c-ares 1.28.1' PACKAGE_BUGREPORT='c-ares mailing list: http://lists.haxx.se/listinfo/c-ares' PACKAGE_URL='' @@ -1420,7 +1420,7 @@ if test "$ac_init_help" = "long"; then # Omit some internal or obsolete options to make the list less imposing. # This message is too long to be a string in the A/UX 3.1 sh. cat <<_ACEOF -\`configure' configures c-ares 1.27.0 to adapt to many kinds of systems. +\`configure' configures c-ares 1.28.1 to adapt to many kinds of systems. Usage: $0 [OPTION]... [VAR=VALUE]... @@ -1491,7 +1491,7 @@ fi if test -n "$ac_init_help"; then case $ac_init_help in - short | recursive ) echo "Configuration of c-ares 1.27.0:";; + short | recursive ) echo "Configuration of c-ares 1.28.1:";; esac cat <<\_ACEOF @@ -1627,7 +1627,7 @@ fi test -n "$ac_init_help" && exit $ac_status if $ac_init_version; then cat <<\_ACEOF -c-ares configure 1.27.0 +c-ares configure 1.28.1 generated by GNU Autoconf 2.71 Copyright (C) 2021 Free Software Foundation, Inc. @@ -2251,7 +2251,7 @@ cat >config.log <<_ACEOF This file contains any messages produced by compilers while running configure, to aid debugging if configure makes a mistake. -It was created by c-ares $as_me 1.27.0, which was +It was created by c-ares $as_me 1.28.1, which was generated by GNU Autoconf 2.71. Invocation command line was $ $0$ac_configure_args_raw @@ -3225,7 +3225,7 @@ ac_compiler_gnu=$ac_cv_c_compiler_gnu -CARES_VERSION_INFO="14:0:12" +CARES_VERSION_INFO="15:1:13" @@ -5907,7 +5907,7 @@ fi # Define the identity of the package. PACKAGE='c-ares' - VERSION='1.27.0' + VERSION='1.28.1' printf "%s\n" "#define PACKAGE \"$PACKAGE\"" >>confdefs.h @@ -21681,6 +21681,31 @@ if test "x$ac_cv_header_netinet_in_h" = xyes then : printf "%s\n" "#define HAVE_NETINET_IN_H 1" >>confdefs.h +fi +ac_fn_c_check_header_compile "$LINENO" "netinet6/in6.h" "ac_cv_header_netinet6_in6_h" " +#ifdef HAVE_SYS_TYPES_H +#include +#endif +#ifdef HAVE_SYS_TIME_H +#include +#endif +#ifdef HAVE_ARPA_NAMESER_H +#include +#endif + +#ifdef HAVE_SYS_SOCKET_H +#include +#endif +#ifdef HAVE_NETINET_IN_H +#include +#endif + + +" +if test "x$ac_cv_header_netinet6_in6_h" = xyes +then : + printf "%s\n" "#define HAVE_NETINET6_IN6_H 1" >>confdefs.h + fi ac_fn_c_check_header_compile "$LINENO" "netinet/tcp.h" "ac_cv_header_netinet_tcp_h" " #ifdef HAVE_SYS_TYPES_H @@ -25931,7 +25956,7 @@ cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 # report actual input values of CONFIG_FILES etc. instead of their # values after options handling. ac_log=" -This file was extended by c-ares $as_me 1.27.0, which was +This file was extended by c-ares $as_me 1.28.1, which was generated by GNU Autoconf 2.71. Invocation command line was CONFIG_FILES = $CONFIG_FILES @@ -25999,7 +26024,7 @@ ac_cs_config_escaped=`printf "%s\n" "$ac_cs_config" | sed "s/^ //; s/'/'\\\\\\\\ cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 ac_cs_config='$ac_cs_config_escaped' ac_cs_version="\\ -c-ares config.status 1.27.0 +c-ares config.status 1.28.1 configured by $0, generated by GNU Autoconf 2.71, with options \\"\$ac_cs_config\\" diff --git a/deps/cares/configure.ac b/deps/cares/configure.ac index 1a3ba8c3fc4c49..4d263a7f309017 100644 --- a/deps/cares/configure.ac +++ b/deps/cares/configure.ac @@ -2,10 +2,10 @@ dnl Copyright (C) The c-ares project and its contributors dnl SPDX-License-Identifier: MIT AC_PREREQ([2.69]) -AC_INIT([c-ares], [1.27.0], +AC_INIT([c-ares], [1.28.1], [c-ares mailing list: http://lists.haxx.se/listinfo/c-ares]) -CARES_VERSION_INFO="14:0:12" +CARES_VERSION_INFO="15:1:13" dnl This flag accepts an argument of the form current[:revision[:age]]. So, dnl passing -version-info 3:12:1 sets current to 3, revision to 12, and age to dnl 1. @@ -436,6 +436,7 @@ AC_CHECK_HEADERS( netioapi.h \ netdb.h \ netinet/in.h \ + netinet6/in6.h \ netinet/tcp.h \ net/if.h \ ifaddrs.h \ diff --git a/deps/cares/docs/Makefile.in b/deps/cares/docs/Makefile.in index 4f5bb62409c7ab..8cb46878fa59ee 100644 --- a/deps/cares/docs/Makefile.in +++ b/deps/cares/docs/Makefile.in @@ -263,6 +263,7 @@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ +PKGCONFIG_CFLAGS = @PKGCONFIG_CFLAGS@ PKG_CONFIG = @PKG_CONFIG@ PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ @@ -357,18 +358,22 @@ MANPAGES = ares_cancel.3 \ ares_dns_rcode_tostr.3 \ ares_dns_record.3 \ ares_dns_record_create.3 \ + ares_dns_record_duplicate.3 \ ares_dns_record_get_flags.3 \ ares_dns_record_get_id.3 \ ares_dns_record_get_opcode.3 \ ares_dns_record_get_rcode.3 \ ares_dns_record_destroy.3 \ ares_dns_record_query_add.3 \ + ares_dns_record_query_set_name.3 \ + ares_dns_record_query_set_type.3 \ ares_dns_record_query_cnt.3 \ ares_dns_record_query_get.3 \ ares_dns_record_rr_add.3 \ ares_dns_record_rr_cnt.3 \ ares_dns_record_rr_del.3 \ ares_dns_record_rr_get.3 \ + ares_dns_record_rr_get_const.3 \ ares_dns_rec_type_fromstr.3 \ ares_dns_rec_type_t.3 \ ares_dns_rr.3 \ @@ -442,10 +447,16 @@ MANPAGES = ares_cancel.3 \ ares_parse_uri_reply.3 \ ares_process.3 \ ares_query.3 \ + ares_query_dnsrec.3 \ + ares_queue.3 \ + ares_queue_active_queries.3 \ + ares_queue_wait_empty.3 \ ares_reinit.3 \ ares_save_options.3 \ ares_search.3 \ + ares_search_dnsrec.3 \ ares_send.3 \ + ares_send_dnsrec.3 \ ares_set_local_dev.3 \ ares_set_local_ip4.3 \ ares_set_local_ip6.3 \ diff --git a/deps/cares/docs/Makefile.inc b/deps/cares/docs/Makefile.inc index 3645a7fcddc58b..882bf2280446d5 100644 --- a/deps/cares/docs/Makefile.inc +++ b/deps/cares/docs/Makefile.inc @@ -20,18 +20,22 @@ MANPAGES = ares_cancel.3 \ ares_dns_rcode_tostr.3 \ ares_dns_record.3 \ ares_dns_record_create.3 \ + ares_dns_record_duplicate.3 \ ares_dns_record_get_flags.3 \ ares_dns_record_get_id.3 \ ares_dns_record_get_opcode.3 \ ares_dns_record_get_rcode.3 \ ares_dns_record_destroy.3 \ ares_dns_record_query_add.3 \ + ares_dns_record_query_set_name.3 \ + ares_dns_record_query_set_type.3 \ ares_dns_record_query_cnt.3 \ ares_dns_record_query_get.3 \ ares_dns_record_rr_add.3 \ ares_dns_record_rr_cnt.3 \ ares_dns_record_rr_del.3 \ ares_dns_record_rr_get.3 \ + ares_dns_record_rr_get_const.3 \ ares_dns_rec_type_fromstr.3 \ ares_dns_rec_type_t.3 \ ares_dns_rr.3 \ @@ -105,13 +109,16 @@ MANPAGES = ares_cancel.3 \ ares_parse_uri_reply.3 \ ares_process.3 \ ares_query.3 \ + ares_query_dnsrec.3 \ ares_queue.3 \ ares_queue_active_queries.3 \ ares_queue_wait_empty.3 \ ares_reinit.3 \ ares_save_options.3 \ ares_search.3 \ + ares_search_dnsrec.3 \ ares_send.3 \ + ares_send_dnsrec.3 \ ares_set_local_dev.3 \ ares_set_local_ip4.3 \ ares_set_local_ip6.3 \ diff --git a/deps/cares/docs/adig.1 b/deps/cares/docs/adig.1 index 48b491b593b73c..59923790587ddd 100644 --- a/deps/cares/docs/adig.1 +++ b/deps/cares/docs/adig.1 @@ -64,4 +64,4 @@ Report bugs to the c-ares mailing list: \fBhttps://lists.haxx.se/listinfo/c-ares\fR .SH "SEE ALSO" .PP -acountry(1), ahost(1). +ahost(1). diff --git a/deps/cares/docs/ahost.1 b/deps/cares/docs/ahost.1 index 5feed981ab62c1..e17057273f8285 100644 --- a/deps/cares/docs/ahost.1 +++ b/deps/cares/docs/ahost.1 @@ -47,4 +47,4 @@ Report bugs to the c-ares mailing list: \fBhttps://lists.haxx.se/listinfo/c-ares\fR .SH "SEE ALSO" .PP -acountry(1), adig(1) +adig(1) diff --git a/deps/cares/docs/ares_dns_record.3 b/deps/cares/docs/ares_dns_record.3 index fe23b5eece60e5..01ce7601aa3199 100644 --- a/deps/cares/docs/ares_dns_record.3 +++ b/deps/cares/docs/ares_dns_record.3 @@ -19,7 +19,7 @@ ares_status_t ares_dns_parse(const unsigned char *buf, size_t buf_len, unsigned int flags, ares_dns_record_t **dnsrec); -ares_status_t ares_dns_write(ares_dns_record_t *dnsrec, +ares_status_t ares_dns_write(const ares_dns_record_t *dnsrec, unsigned char **buf, size_t *buf_len); ares_status_t ares_dns_record_create(ares_dns_record_t **dnsrec, @@ -28,6 +28,8 @@ ares_status_t ares_dns_record_create(ares_dns_record_t **dnsrec, ares_dns_opcode_t opcode, ares_dns_rcode_t rcode); +ares_dns_record_t *ares_dns_record_duplicate(const ares_dns_record_t *dnsrec); + unsigned short ares_dns_record_get_id(const ares_dns_record_t *dnsrec); unsigned short ares_dns_record_get_flags(const ares_dns_record_t *dnsrec); @@ -41,6 +43,14 @@ ares_status_t ares_dns_record_query_add(ares_dns_record_t *dnsrec, ares_dns_rec_type_t qtype, ares_dns_class_t qclass); +ares_status_t ares_dns_record_query_set_name(ares_dns_record_t *dnsrec, + size_t idx, + const char *name); + +ares_status_t ares_dns_record_query_set_type(ares_dns_record_t *dnsrec, + size_t idx, + ares_dns_rec_type_t qtype); + size_t ares_dns_record_query_cnt(const ares_dns_record_t *dnsrec); ares_status_t ares_dns_record_query_get(const ares_dns_record_t *dnsrec, @@ -67,7 +77,7 @@ on requests, and some may only be valid on responses: .B ARES_REC_TYPE_SOA - Start of authority zone .br -.B ARES_REC_TYPE_PTR +.B ARES_REC_TYPE_PTR - Domain name pointer .br .B ARES_REC_TYPE_HINFO @@ -317,6 +327,13 @@ is meant mostly for responses and is passed in the .IR rcode parameter and is typically \fPARES_RCODE_NOERROR\fP. +The \fIares_dns_record_duplicate(3)\fP function duplicates an existing DNS +record structure. This may be useful if needing to save a result as retrieved +from \fIares_send_dnsrec(3)\fP or \fIares_search_dnsrec(3)\fP. The structure +to be duplicated is passed in the +.IR dnsrec +parameter, and the duplicated copy is returned, or NULL on error such as +out of memory. The \fIares_dns_record_get_id(3)\fP function is used to retrieve the DNS message id from the DNS record provided in the @@ -350,6 +367,29 @@ parameter and the question class (typically \fIARES_CLASS_IN\fP) in the .IR qclass parameter. +The \fIares_dns_record_query_set_name(3)\fP function is used to modify the +question name in the DNS record provided in the +.IR dnsrec +parameter. The index of the query, which must be less than +\fIares_dns_record_query_cnt(3)\fP, is provided in the +.IR idx +parameter. The new domain name is provided in the +.IR name +parameter. Care should be taken as this will cause invalidation of any +\fIname\fP pointer retrieved from \fIares_dns_Record_query_get(3)\fP. This +function is useful if sending multiple similar queries without re-creating +the entire DNS query. + +The \fIares_dns_record_query_set_type(3)\fP function is used to modify the +question type in the DNS record provided in the +.IR dnsrec +parameter. The index of the query, which must be less than +\fIares_dns_record_query_cnt(3)\fP, is provided in the +.IR idx +parameter. The new query type is provided in the +.IR qtype +parameter. + The \fIares_dns_record_query_cnt(3)\fP function is used to retrieve the number of DNS questions in the DNS record provided in the .IR dnsrec @@ -363,7 +403,8 @@ parameter. The index provided in the parameter must be less than the value returned from \fIares_dns_record_query_cnt(3)\fP. The DNS question name will be returned in the variable pointed to by the .IR name -parameter, this may be provided as NULL if the name is not needed. +parameter, this may be provided as NULL if the name is not needed. This pointer +will be invalided by any call to \fIares_dns_record_query_set_name(3)\fP. The DNS question type will be returned in the variable pointed to by the .IR qtype parameter, this may be provided as NULL if the type is not needed. diff --git a/deps/cares/docs/ares_dns_record_duplicate.3 b/deps/cares/docs/ares_dns_record_duplicate.3 new file mode 100644 index 00000000000000..4acc581d29789c --- /dev/null +++ b/deps/cares/docs/ares_dns_record_duplicate.3 @@ -0,0 +1,3 @@ +.\" Copyright (C) 2023 The c-ares project and its contributors. +.\" SPDX-License-Identifier: MIT +.so man3/ares_dns_record.3 diff --git a/deps/cares/docs/ares_dns_record_query_set_name.3 b/deps/cares/docs/ares_dns_record_query_set_name.3 new file mode 100644 index 00000000000000..4acc581d29789c --- /dev/null +++ b/deps/cares/docs/ares_dns_record_query_set_name.3 @@ -0,0 +1,3 @@ +.\" Copyright (C) 2023 The c-ares project and its contributors. +.\" SPDX-License-Identifier: MIT +.so man3/ares_dns_record.3 diff --git a/deps/cares/docs/ares_dns_record_query_set_type.3 b/deps/cares/docs/ares_dns_record_query_set_type.3 new file mode 100644 index 00000000000000..4acc581d29789c --- /dev/null +++ b/deps/cares/docs/ares_dns_record_query_set_type.3 @@ -0,0 +1,3 @@ +.\" Copyright (C) 2023 The c-ares project and its contributors. +.\" SPDX-License-Identifier: MIT +.so man3/ares_dns_record.3 diff --git a/deps/cares/docs/ares_dns_record_rr_get_const.3 b/deps/cares/docs/ares_dns_record_rr_get_const.3 new file mode 100644 index 00000000000000..b93e4cd4e37fa8 --- /dev/null +++ b/deps/cares/docs/ares_dns_record_rr_get_const.3 @@ -0,0 +1,3 @@ +.\" Copyright (C) 2023 The c-ares project and its contributors. +.\" SPDX-License-Identifier: MIT +.so man3/ares_dns_rr.3 diff --git a/deps/cares/docs/ares_dns_rr.3 b/deps/cares/docs/ares_dns_rr.3 index 2999d18e3aa6bb..290859e838e7ef 100644 --- a/deps/cares/docs/ares_dns_rr.3 +++ b/deps/cares/docs/ares_dns_rr.3 @@ -4,14 +4,15 @@ .TH ARES_DNS_RR 3 "12 November 2023" .SH NAME ares_dns_record_rr_add, ares_dns_record_rr_cnt, ares_dns_record_rr_del, -ares_dns_record_rr_get, ares_dns_rr_get_addr, ares_dns_rr_get_addr6, -ares_dns_rr_get_bin, ares_dns_rr_get_class, ares_dns_rr_get_name, -ares_dns_rr_get_opt, ares_dns_rr_get_opt_byid, ares_dns_rr_get_opt_cnt, -ares_dns_rr_get_str, ares_dns_rr_get_ttl, ares_dns_rr_get_type, -ares_dns_rr_get_u16, ares_dns_rr_get_u32, ares_dns_rr_get_u8, ares_dns_rr_key_t, -ares_dns_rr_set_addr, ares_dns_rr_set_addr6, ares_dns_rr_set_bin, -ares_dns_rr_set_opt, ares_dns_rr_set_str, ares_dns_rr_set_u16, -ares_dns_rr_set_u32, ares_dns_rr_set_u8, ares_dns_section_t, ares_tlsa_match_t, +ares_dns_record_rr_get, ares_dns_record_rr_get_const, ares_dns_rr_get_addr, +ares_dns_rr_get_addr6, ares_dns_rr_get_bin, ares_dns_rr_get_class, +ares_dns_rr_get_name, ares_dns_rr_get_opt, ares_dns_rr_get_opt_byid, +ares_dns_rr_get_opt_cnt, ares_dns_rr_get_str, ares_dns_rr_get_ttl, +ares_dns_rr_get_type, ares_dns_rr_get_u16, ares_dns_rr_get_u32, +ares_dns_rr_get_u8, ares_dns_rr_key_t, ares_dns_rr_set_addr, +ares_dns_rr_set_addr6, ares_dns_rr_set_bin, ares_dns_rr_set_opt, +ares_dns_rr_set_str, ares_dns_rr_set_u16, ares_dns_rr_set_u32, +ares_dns_rr_set_u8, ares_dns_section_t, ares_tlsa_match_t, ares_tlsa_selector_t, ares_tlsa_usage_t \- DNS Resource Record creating, reading, and writing functions. .SH SYNOPSIS @@ -33,6 +34,10 @@ ares_dns_rr_t *ares_dns_record_rr_get(ares_dns_record_t *dnsrec, ares_dns_section_t sect, size_t idx); +const ares_dns_rr_t *ares_dns_record_rr_get_const(const ares_dns_record_t *dnsrec, + ares_dns_section_t sect, + size_t idx); + ares_status_t ares_dns_record_rr_del(ares_dns_record_t *dnsrec, ares_dns_section_t sect, size_t idx); @@ -357,14 +362,17 @@ parameter, and the Time To Live (TTL) in the parameter. -The \fIares_dns_record_rr_get(3)\fP function is used to retrieve the resource -record pointer from the DNS record provided in the +The \fIares_dns_record_rr_get(3)\fP and \fIares_dns_record_rr_get_const(3)\fP +functions are used to retrieve the resource record pointer from the DNS record +provided in the .IR dnsrec parameter, for the resource record section provided in the .IR sect parameter, for the specified index in the .IR idx -parameter. The index must be less than \fIares_dns_record_rr_cnt(3)\fP. +parameter. The index must be less than \fIares_dns_record_rr_cnt(3)\fP. The +former returns a writable pointer to the resource record, while the latter +returns a read-only pointer to the resource record. The \fIares_dns_record_rr_del(3)\fP is used to delete a resource record from @@ -615,8 +623,8 @@ prescribed datatype values and in general can't fail except for misuse cases, in which a 0 (or NULL) may be returned, however 0 can also be a valid return value for most of these functions. -\fIares_dns_record_rr_get(3)\fP will return the requested resource record -pointer or NULL on failure (misuse). +\fIares_dns_record_rr_get(3)\fP and \fIares_dns_record_rr_get_const(3)\fP will +return the requested resource record pointer or NULL on failure (misuse). \fIares_dns_rr_get_opt_byid(3)\fP will return ARES_TRUE if the option was found, otherwise ARES_FALSE if not found (or misuse). diff --git a/deps/cares/docs/ares_init_options.3 b/deps/cares/docs/ares_init_options.3 index 000cc1d9a592b5..72889b5b4874ef 100644 --- a/deps/cares/docs/ares_init_options.3 +++ b/deps/cares/docs/ares_init_options.3 @@ -158,7 +158,7 @@ before giving up. The default is three tries. The number of dots which must be present in a domain name for it to be queried for "as is" prior to querying for it with the default domain extensions appended. The default value is 1 unless set otherwise by -resolv.conf or the RES_OPTIONS environment variable. +resolv.conf or the RES_OPTIONS environment variable. Valid range is 0-15. .TP 18 .B ARES_OPT_MAXTIMEOUTMS .B int \fImaxtimeout\fP; diff --git a/deps/cares/docs/ares_query.3 b/deps/cares/docs/ares_query.3 index 00e44f52594d90..24decf7009441b 100644 --- a/deps/cares/docs/ares_query.3 +++ b/deps/cares/docs/ares_query.3 @@ -9,19 +9,32 @@ ares_query \- Initiate a single-question DNS query .nf #include -typedef void (*ares_callback)(void *\fIarg\fP, int \fIstatus\fP, - int \fItimeouts\fP, unsigned char *\fIabuf\fP, - int \fIalen\fP) +typedef void (*ares_callback_dnsrec)(void *arg, ares_status_t status, + size_t timeouts, + const ares_dns_record_t *dnsrec); + +ares_status_t ares_query_dnsrec(ares_channel_t *channel, + const char *name, + ares_dns_class_t dnsclass, + ares_dns_rec_type_t type, + ares_callback_dnsrec callback, + void *arg, + unsigned short *qid); + +typedef void (*ares_callback)(void *arg, int status, + int timeouts, unsigned char *abuf, + int alen); + +void ares_query(ares_channel_t *channel, const char *name, + int dnsclass, int type, + ares_callback callback, void *arg); -void ares_query(ares_channel_t *\fIchannel\fP, const char *\fIname\fP, - int \fIdnsclass\fP, int \fItype\fP, - ares_callback \fIcallback\fP, void *\fIarg\fP) .fi + .SH DESCRIPTION -The -.B ares_query -function initiates a single-question DNS query on the name service -channel identified by + +The \fBares_query_dnsrec(3)\fP and \fBares_query(3)\fP functions initiate a +single-question DNS query on the name service channel identified by .IR channel . The parameter .I name @@ -31,27 +44,27 @@ a label must be escaped with a backslash. The parameters .I dnsclass and .I type -give the class and type of the query using the values defined in -.BR . +give the class and type of the query. + +\fBares_query_dnsrec(3)\fP uses the ares \fBares_dns_class_t\fP and +\fBares_dns_rec_type_t\fP defined types. However, \fBares_query(3)\fP uses +the values defined in \fB\fP. + When the query is complete or has failed, the ares library will invoke .IR callback . -Completion or failure of the query may happen immediately, or may -happen during a later call to -.BR ares_process (3) -or -.BR ares_destroy (3). -.PP +Completion or failure of the query may happen immediately (even before the +return of the function call), or may happen during a later call to +\fBares_process(3)\fP or \fBares_destroy(3)\fP. + If this is called from a thread other than which the main program event loop is running, care needs to be taken to ensure any file descriptor lists are updated immediately within the eventloop. When the associated callback is called, it is called with a channel lock so care must be taken to ensure any processing is minimal to prevent DNS channel stalls. -.PP + The callback argument .I arg -is copied from the -.B ares_query -argument +is copied from the \fBares_query_dnsrec(3)\fP or \fBares_query(3)\fP argument .IR arg . The callback argument .I status @@ -73,9 +86,7 @@ The query completed but the server claims to have experienced a failure. (This code can only occur if the .B ARES_FLAG_NOCHECKRESP flag was specified at channel initialization time; otherwise, such -responses are ignored at the -.BR ares_send (3) -level.) +responses are ignored at the \fBares_send_dnsrec(3)\fP level.) .TP 19 .B ARES_ENOTFOUND The query completed but the queried-for domain name was not found. @@ -85,18 +96,14 @@ The query completed but the server does not implement the operation requested by the query. (This code can only occur if the .B ARES_FLAG_NOCHECKRESP flag was specified at channel initialization time; otherwise, such -responses are ignored at the -.BR ares_send (3) -level.) +responses are ignored at the \fBares_send_dnsrec(3)\fP level.) .TP 19 .B ARES_EREFUSED The query completed but the server refused the query. (This code can only occur if the .B ARES_FLAG_NOCHECKRESP flag was specified at channel initialization time; otherwise, such -responses are ignored at the -.BR ares_send (3) -level.) +responses are ignored at the \fBares_send_dnsrec(3)\fP level.) .TP 19 .B ARES_EBADNAME The query name @@ -126,23 +133,26 @@ is being destroyed; the query will not be completed. The query will not be completed because no DNS servers were configured on the channel. .PP + The callback argument .I timeouts reports how many times a query timed out during the execution of the given request. -.PP + If the query completed (even if there was something wrong with it, as indicated by some of the above error codes), the callback argument +.I dnsrec +or .I abuf -points to a result buffer of length -.IR alen . -If the query did not complete, -.I abuf -will be NULL and -.I alen -will be 0. +will be non-NULL, otherwise they will be NULL. + +.SH AVAILABILITY +\fBares_query_dnsrec(3)\fP was introduced in c-ares 1.28.0. + .SH SEE ALSO -.BR ares_process (3) +.BR ares_process (3), +.BR ares_dns_record (3) + .SH AUTHOR Greg Hudson, MIT Information Systems .br diff --git a/deps/cares/docs/ares_query_dnsrec.3 b/deps/cares/docs/ares_query_dnsrec.3 new file mode 100644 index 00000000000000..b178e987646055 --- /dev/null +++ b/deps/cares/docs/ares_query_dnsrec.3 @@ -0,0 +1,3 @@ +.\" Copyright (C) 2023 The c-ares project and its contributors. +.\" SPDX-License-Identifier: MIT +.so man3/ares_query.3 diff --git a/deps/cares/docs/ares_queue.3 b/deps/cares/docs/ares_queue.3 new file mode 100644 index 00000000000000..1212e8d3f8c3cc --- /dev/null +++ b/deps/cares/docs/ares_queue.3 @@ -0,0 +1,53 @@ +.\" +.\" SPDX-License-Identifier: MIT +.\" +.TH ARES_QUEUE 3 "16 February 2024" +.SH NAME +ares_queue_wait_empty, ares_queue_active_queries \- Functions for checking the +c-ares queue status +.SH SYNOPSIS +.nf +#include + +size_t ares_queue_active_queries(ares_channel_t *channel); + +ares_status_t ares_queue_wait_empty(ares_channel_t *channel, + int timeout_ms); +.fi +.SH DESCRIPTION +The \fBares_queue_active_queries(3)\fP function retrieves the total number of +active queries pending answers from servers. Some c-ares requests may spawn +multiple queries, such as \fIares_getaddrinfo(3)\fP when using \fIAF_UNSPEC\fP, +which will be reflected in this number. The \fBchannel\fP parameter must be set +to an initialized channel. + +The \fBares_queue_wait_empty(3)\fP function blocks until notified that there are +no longer any queries in queue, or the specified timeout has expired. The +\fBchannel\fP parameter must be set to an initialized channel. The +\fBtimeout_ms\fP parameter is the number of milliseconds to wait for the queue +to be empty or -1 for Infinite. + +.SH RETURN VALUES +\fIares_queue_active_queries(3)\fP returns the active query count. + +\fIares_queue_wait_empty(3)\fP can return any of the following values: +.TP 14 +.B ARES_ENOTIMP +if not built with threading support +.TP 14 +.B ARES_ETIMEOUT +if requested timeout expired +.TP 14 +.B ARES_SUCCESS +when queue is empty. +.TP 14 + +.SH AVAILABILITY +This function was first introduced in c-ares version 1.27.0, and requires the +c-ares library to be built with threading support. + +.SH SEE ALSO +.BR ares_init_options (3), +.BR ares_threadsafety (3) +.SH AUTHOR +Copyright (C) 2024 The c-ares project and its members. diff --git a/deps/cares/docs/ares_queue_active_queries.3 b/deps/cares/docs/ares_queue_active_queries.3 new file mode 100644 index 00000000000000..c16c69ddcb9e02 --- /dev/null +++ b/deps/cares/docs/ares_queue_active_queries.3 @@ -0,0 +1,3 @@ +.\" Copyright (C) 2024 The c-ares project and its contributors. +.\" SPDX-License-Identifier: MIT +.so man3/ares_queue.3 diff --git a/deps/cares/docs/ares_queue_wait_empty.3 b/deps/cares/docs/ares_queue_wait_empty.3 new file mode 100644 index 00000000000000..c16c69ddcb9e02 --- /dev/null +++ b/deps/cares/docs/ares_queue_wait_empty.3 @@ -0,0 +1,3 @@ +.\" Copyright (C) 2024 The c-ares project and its contributors. +.\" SPDX-License-Identifier: MIT +.so man3/ares_queue.3 diff --git a/deps/cares/docs/ares_search.3 b/deps/cares/docs/ares_search.3 index 08246d349d02fd..1a324b0ff47db5 100644 --- a/deps/cares/docs/ares_search.3 +++ b/deps/cares/docs/ares_search.3 @@ -9,13 +9,23 @@ ares_search \- Initiate a DNS query with domain search .nf #include +typedef void (*ares_callback_dnsrec)(void *\fIarg\fP, + ares_status_t \fIstatus\fP, + size_t \fItimeouts\fP, + const ares_dns_record_t *\fIdnsrec\fP); + +void ares_search_dnsrec(ares_channel_t *\fIchannel\fP, + const ares_dns_record_t *\fIdnsrec\fP, + ares_callback_dnsrec \fIcallback\fP, void *\fIarg\fP); + typedef void (*ares_callback)(void *\fIarg\fP, int \fIstatus\fP, int \fItimeouts\fP, unsigned char *\fIabuf\fP, - int \fIalen\fP) + int \fIalen\fP); void ares_search(ares_channel_t *\fIchannel\fP, const char *\fIname\fP, int \fIdnsclass\fP, int \fItype\fP, - ares_callback \fIcallback\fP, void *\fIarg\fP) + ares_callback \fIcallback\fP, void *\fIarg\fP); + .fi .SH DESCRIPTION The @@ -142,9 +152,34 @@ will usually be NULL and will usually be 0, but in some cases an unsuccessful query result may be placed in .IR abuf . + +The \fIares_search_dnsrec(3)\fP function behaves identically to +\fIares_search(3)\fP, but takes an initialized and filled DNS record object to +use for queries as the second argument +.I dnsrec +instead of a name, class and type. This object is used as the base for the +queries and must itself represent a valid query for a single name. Note that +the search domains will only be appended to the name in the question section; +RRs on the DNS record object will not be affected. Moreover, the +.I callback +argument is of type \fIares_callback_dnsrec\fP. This callback behaves +identically to \fIares_callback\fP, but is invoked with a parsed DNS record +object +.I dnsrec +rather than a raw buffer with length. Note that this object is read-only. + +The \fIares_search_dnsrec(3)\fP function returns an \fIares_status_t\fP response +code. This may be useful to know that the query was enqueued properly. The +response code does not reflect the result of the query, just the result of the +enqueuing of the query. + +.SH AVAILABILITY +\fBares_search_dnsrec(3)\fP was introduced in c-ares 1.28.0. + .SH SEE ALSO .BR ares_process (3), .BR ares_dns_record (3) + .SH AUTHOR Greg Hudson, MIT Information Systems .br diff --git a/deps/cares/docs/ares_search_dnsrec.3 b/deps/cares/docs/ares_search_dnsrec.3 new file mode 100644 index 00000000000000..86c2317c071144 --- /dev/null +++ b/deps/cares/docs/ares_search_dnsrec.3 @@ -0,0 +1,3 @@ +.\" Copyright (C) 2023 The c-ares project and its contributors. +.\" SPDX-License-Identifier: MIT +.so man3/ares_search.3 diff --git a/deps/cares/docs/ares_send.3 b/deps/cares/docs/ares_send.3 index 1fe1c0273e7379..010bb2579174bd 100644 --- a/deps/cares/docs/ares_send.3 +++ b/deps/cares/docs/ares_send.3 @@ -9,46 +9,78 @@ ares_send \- Initiate a DNS query .nf #include -typedef void (*ares_callback)(void *\fIarg\fP, int \fIstatus\fP, - int \fItimeouts\fP, unsigned char *\fIabuf\fP, - int \fIalen\fP) +typedef void (*ares_callback_dnsrec)(void *arg, ares_status_t status, + size_t timeouts, + const ares_dns_record_t *dnsrec); + +ares_status_t ares_send_dnsrec(ares_channel_t *channel, + const ares_dns_record_t *dnsrec, + ares_callback_dnsrec callback, + void *arg, unsigned short *qid); + +typedef void (*ares_callback)(void *arg, int status, + int timeouts, unsigned char *abuf, + int alen); + +void ares_send(ares_channel_t *channel, const unsigned char *qbuf, + int qlen, ares_callback callback, void *arg); -void ares_send(ares_channel_t *\fIchannel\fP, const unsigned char *\fIqbuf\fP, - int \fIqlen\fP, ares_callback \fIcallback\fP, void *\fIarg\fP) .fi .SH DESCRIPTION -The -.B ares_send -function initiates a DNS query on the name service channel identified -by -.IR channel . -The parameters -.I qbuf +The \fIares_send_dnsrec(3)\fP function initiates a DNS query formatted using the +\fIares_dns_record_t *\fP data structure created via +\fIares_dns_record_create(3)\fP in the +.IR dnsrec +parameter. The supplied callback in the +.IR callback +parameter also returns the response using a +\fIares_dns_record_t *\fP data structure. + +The \fIares_send(3)\fP function similarly initiates a DNS query, but instead uses +raw binary buffers with fully formatted DNS messages passed in the request via the +.IR qbuf and -.I qlen -give the DNS query, which should already have been formatted according -to the DNS protocol. When the query is complete or has failed, the -ares library will invoke -.IR callback . -Completion or failure of the query may happen immediately, or may -happen later as network events are processed. -.PP +.IR qlen +parameters. The supplied callback in the +.IR callback +parameter also returns the raw binary DNS response in the +.IR abuf +and +.IR alen +parameters. This method should be considered deprecated in favor of +\fIares_send_dnsrec(3)\fP. + +Both functions take an initialized ares channel identified by +.IR channel . + +The \fIares_send_dnsrec(3)\fP also can be supplied an optional output parameter of +.IR qid +to populate the query id as it was placed on the wire. + +The \fIares_send_dnsrec(3)\fP function returns an \fIares_status_t\fP response +code. This may be useful to know that the query was enqueued properly. The +response code does not reflect the result of the query, just the result of the +enqueuing of the query. + +Completion or failure of the query may happen immediately (even before the +function returning), or may happen later as network events are processed. + When the associated callback is called, it is called with a channel lock so care must be taken to ensure any processing is minimal to prevent DNS channel stalls. The callback may be triggered from a different thread than the one which -called \fIares_send(3)\fP. +called \fIares_send_dnsrec(3)\fP or \fIares_send(3)\fP. For integrators running their own event loops and not using \fBARES_OPT_EVENT_THREAD\fP, care needs to be taken to ensure any file descriptor lists are updated immediately within the eventloop when notified. -.PP + The callback argument -.I arg -is copied from the -.B ares_send -argument -.IR arg . +.IR arg +is copied from the \fIares_send_dnsrec(3)\fP or \fIares_send(3)\fP +.IR arg +parameter. + The callback argument .I status indicates whether the query succeeded and, if not, how it failed. It @@ -82,43 +114,44 @@ is being destroyed; the query will not be completed. The query will not be completed because no DNS servers were configured on the channel. .PP + The callback argument .I timeouts reports how many times a query timed out during the execution of the given request. -.PP + If the query completed, the callback argument -.I abuf -points to a result buffer of length -.IR alen . -If the query did not complete, -.I abuf -will be NULL and -.I alen -will be 0. -.PP +.IR dnsrec +for \fIares_send_dnsrec(3)\fP or +.IR abuf +and +.IR alen +for \fIares_send(3)\fP will be non-NULL. + Unless the flag .B ARES_FLAG_NOCHECKRESP -was set at channel initialization time, -.B ares_send -will normally ignore responses whose questions do not match the -questions in -.IR qbuf , -as well as responses with reply codes of +was set at channel initialization time, \fIares_send_dnsrec(3)\fP and +\fIares_send(3)\fP will normally ignore responses whose questions do not match +the supplied questions, as well as responses with reply codes of .BR SERVFAIL , .BR NOTIMP , and .BR REFUSED . Unlike other query functions in the ares library, however, -.B ares_send -does not inspect the header of the reply packet to determine the error -status, so a callback status of +\fIares_send_dnsrec(3)\fP and \fIares_send(3)\fP do not inspect the header of +the reply packet to determine the error status, so a callback status of .B ARES_SUCCESS -does not reflect as much about the response as for other query -functions. +does not reflect as much about the response as for other query functions. + +.SH AVAILABILITY +\fBares_send_dnsrec(3)\fP was introduced in c-ares 1.28.0. + .SH SEE ALSO +.BR ares_dns_record_create (3), .BR ares_process (3), +.BR ares_search (3), .BR ares_dns_record (3) + .SH AUTHOR Greg Hudson, MIT Information Systems .br diff --git a/deps/cares/docs/ares_send_dnsrec.3 b/deps/cares/docs/ares_send_dnsrec.3 new file mode 100644 index 00000000000000..f5596f5cf049bc --- /dev/null +++ b/deps/cares/docs/ares_send_dnsrec.3 @@ -0,0 +1,3 @@ +.\" Copyright (C) 2023 The c-ares project and its contributors. +.\" SPDX-License-Identifier: MIT +.so man3/ares_send.3 diff --git a/deps/cares/include/Makefile.in b/deps/cares/include/Makefile.in index cf8cb55170cc8b..6e06995bfc5a3b 100644 --- a/deps/cares/include/Makefile.in +++ b/deps/cares/include/Makefile.in @@ -274,6 +274,7 @@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ +PKGCONFIG_CFLAGS = @PKGCONFIG_CFLAGS@ PKG_CONFIG = @PKG_CONFIG@ PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ diff --git a/deps/cares/include/ares.h b/deps/cares/include/ares.h index acbd6583074a56..bc17230e47262f 100644 --- a/deps/cares/include/ares.h +++ b/deps/cares/include/ares.h @@ -119,6 +119,37 @@ extern "C" { # endif #endif +#ifdef __GNUC__ +# define CARES_GCC_VERSION \ + (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) +#else +# define CARES_GCC_VERSION 0 +#endif + +#ifndef __has_attribute +# define __has_attribute(x) 0 +#endif + +#ifdef CARES_NO_DEPRECATED +# define CARES_DEPRECATED +# define CARES_DEPRECATED_FOR(f) +#else +# if CARES_GCC_VERSION >= 30200 || __has_attribute(__deprecated__) +# define CARES_DEPRECATED __attribute__((__deprecated__)) +# else +# define CARES_DEPRECATED +# endif + +# if CARES_GCC_VERSION >= 40500 || defined(__clang__) +# define CARES_DEPRECATED_FOR(f) \ + __attribute__((deprecated("Use " #f " instead"))) +# elif defined(_MSC_VER) +# define CARES_DEPRECATED_FOR(f) __declspec(deprecated("Use " #f " instead")) +# else +# define CARES_DEPRECATED_FOR(f) CARES_DEPRECATED +# endif +#endif + typedef enum { ARES_SUCCESS = 0, @@ -352,29 +383,57 @@ typedef struct ares_channeldata *ares_channel; /* Current main channel typedef */ typedef struct ares_channeldata ares_channel_t; +/* + * NOTE: before c-ares 1.7.0 we would most often use the system in6_addr + * struct below when ares itself was built, but many apps would use this + * private version since the header checked a HAVE_* define for it. Starting + * with 1.7.0 we always declare and use our own to stop relying on the + * system's one. + */ +struct ares_in6_addr { + union { + unsigned char _S6_u8[16]; + } _S6_un; +}; + +struct ares_addr { + int family; + + union { + struct in_addr addr4; + struct ares_in6_addr addr6; + } addr; +}; + +/* DNS record parser, writer, and helpers */ +#include "ares_dns_record.h" -typedef void (*ares_callback)(void *arg, int status, int timeouts, +typedef void (*ares_callback)(void *arg, int status, int timeouts, unsigned char *abuf, int alen); -typedef void (*ares_host_callback)(void *arg, int status, int timeouts, +typedef void (*ares_callback_dnsrec)(void *arg, ares_status_t status, + size_t timeouts, + const ares_dns_record_t *dnsrec); + +typedef void (*ares_host_callback)(void *arg, int status, int timeouts, struct hostent *hostent); -typedef void (*ares_nameinfo_callback)(void *arg, int status, int timeouts, +typedef void (*ares_nameinfo_callback)(void *arg, int status, int timeouts, char *node, char *service); -typedef int (*ares_sock_create_callback)(ares_socket_t socket_fd, int type, +typedef int (*ares_sock_create_callback)(ares_socket_t socket_fd, int type, void *data); -typedef int (*ares_sock_config_callback)(ares_socket_t socket_fd, int type, +typedef int (*ares_sock_config_callback)(ares_socket_t socket_fd, int type, void *data); -typedef void (*ares_addrinfo_callback)(void *arg, int status, int timeouts, +typedef void (*ares_addrinfo_callback)(void *arg, int status, int timeouts, struct ares_addrinfo *res); CARES_EXTERN int ares_library_init(int flags); CARES_EXTERN int ares_library_init_mem(int flags, void *(*amalloc)(size_t size), - void (*afree)(void *ptr), + void (*afree)(void *ptr), void *(*arealloc)(void *ptr, size_t size)); @@ -384,13 +443,14 @@ CARES_EXTERN int ares_library_init_android(jobject connectivity_manager); CARES_EXTERN int ares_library_android_initialized(void); #endif -CARES_EXTERN int ares_library_initialized(void); +CARES_EXTERN int ares_library_initialized(void); -CARES_EXTERN void ares_library_cleanup(void); +CARES_EXTERN void ares_library_cleanup(void); -CARES_EXTERN const char *ares_version(int *version); +CARES_EXTERN const char *ares_version(int *version); -CARES_EXTERN int ares_init(ares_channel_t **channelptr); +CARES_EXTERN CARES_DEPRECATED_FOR(ares_init_options) int ares_init( + ares_channel_t **channelptr); CARES_EXTERN int ares_init_options(ares_channel_t **channelptr, const struct ares_options *options, @@ -453,7 +513,7 @@ struct iovec; struct ares_socket_functions { ares_socket_t (*asocket)(int, int, int, void *); - int (*aclose)(ares_socket_t, void *); + int (*aclose)(ares_socket_t, void *); int (*aconnect)(ares_socket_t, const struct sockaddr *, ares_socklen_t, void *); ares_ssize_t (*arecvfrom)(ares_socket_t, void *, size_t, int, @@ -462,24 +522,76 @@ struct ares_socket_functions { }; CARES_EXTERN void - ares_set_socket_functions(ares_channel_t *channel, - const struct ares_socket_functions *funcs, - void *user_data); + ares_set_socket_functions(ares_channel_t *channel, + const struct ares_socket_functions *funcs, + void *user_data); + +CARES_EXTERN CARES_DEPRECATED_FOR(ares_send_dnsrec) void ares_send( + ares_channel_t *channel, const unsigned char *qbuf, int qlen, + ares_callback callback, void *arg); -CARES_EXTERN void ares_send(ares_channel_t *channel, const unsigned char *qbuf, - int qlen, ares_callback callback, void *arg); +/*! Send a DNS query as an ares_dns_record_t with a callback containing the + * parsed DNS record. + * + * \param[in] channel Pointer to channel on which queries will be sent. + * \param[in] dnsrec DNS Record to send + * \param[in] callback Callback function invoked on completion or failure of + * the query sequence. + * \param[in] arg Additional argument passed to the callback function. + * \param[out] qid Query ID + * \return One of the c-ares status codes. + */ +CARES_EXTERN ares_status_t ares_send_dnsrec(ares_channel_t *channel, + const ares_dns_record_t *dnsrec, + ares_callback_dnsrec callback, + void *arg, unsigned short *qid); -CARES_EXTERN void ares_query(ares_channel_t *channel, const char *name, - int dnsclass, int type, ares_callback callback, - void *arg); +CARES_EXTERN CARES_DEPRECATED_FOR(ares_query_dnsrec) void ares_query( + ares_channel_t *channel, const char *name, int dnsclass, int type, + ares_callback callback, void *arg); -CARES_EXTERN void ares_search(ares_channel_t *channel, const char *name, - int dnsclass, int type, ares_callback callback, - void *arg); +/*! Perform a DNS query with a callback containing the parsed DNS record. + * + * \param[in] channel Pointer to channel on which queries will be sent. + * \param[in] name Query name + * \param[in] dnsclass DNS Class + * \param[in] type DNS Record Type + * \param[in] callback Callback function invoked on completion or failure of + * the query sequence. + * \param[in] arg Additional argument passed to the callback function. + * \param[out] qid Query ID + * \return One of the c-ares status codes. + */ +CARES_EXTERN ares_status_t ares_query_dnsrec(ares_channel_t *channel, + const char *name, + ares_dns_class_t dnsclass, + ares_dns_rec_type_t type, + ares_callback_dnsrec callback, + void *arg, unsigned short *qid); + +CARES_EXTERN CARES_DEPRECATED_FOR(ares_search_dnsrec) void ares_search( + ares_channel_t *channel, const char *name, int dnsclass, int type, + ares_callback callback, void *arg); + +/*! Search for a complete DNS message. + * + * \param[in] channel Pointer to channel on which queries will be sent. + * \param[in] dnsrec Pointer to initialized and filled DNS record object. + * \param[in] callback Callback function invoked on completion or failure of + * the query sequence. + * \param[in] arg Additional argument passed to the callback function. + * \return One of the c-ares status codes. In all cases, except + * ARES_EFORMERR due to misuse, this error code will also be sent + * to the provided callback. + */ +CARES_EXTERN ares_status_t ares_search_dnsrec(ares_channel_t *channel, + const ares_dns_record_t *dnsrec, + ares_callback_dnsrec callback, + void *arg); -CARES_EXTERN void ares_gethostbyname(ares_channel_t *channel, const char *name, - int family, ares_host_callback callback, - void *arg); +CARES_EXTERN CARES_DEPRECATED_FOR(ares_getaddrinfo) void ares_gethostbyname( + ares_channel_t *channel, const char *name, int family, + ares_host_callback callback, void *arg); CARES_EXTERN int ares_gethostbyname_file(ares_channel_t *channel, const char *name, int family, @@ -494,61 +606,42 @@ CARES_EXTERN void ares_getnameinfo(ares_channel_t *channel, ares_socklen_t salen, int flags, ares_nameinfo_callback callback, void *arg); -CARES_EXTERN int ares_fds(ares_channel_t *channel, fd_set *read_fds, - fd_set *write_fds); +CARES_EXTERN CARES_DEPRECATED_FOR( + ARES_OPT_EVENT_THREAD or + ARES_OPT_SOCK_STATE_CB) int ares_fds(ares_channel_t *channel, + fd_set *read_fds, fd_set *write_fds); -CARES_EXTERN int ares_getsock(ares_channel_t *channel, ares_socket_t *socks, - int numsocks); +CARES_EXTERN CARES_DEPRECATED_FOR( + ARES_OPT_EVENT_THREAD or + ARES_OPT_SOCK_STATE_CB) int ares_getsock(ares_channel_t *channel, + ares_socket_t *socks, int numsocks); CARES_EXTERN struct timeval *ares_timeout(ares_channel_t *channel, struct timeval *maxtv, struct timeval *tv); -CARES_EXTERN void ares_process(ares_channel_t *channel, fd_set *read_fds, - fd_set *write_fds); +CARES_EXTERN CARES_DEPRECATED_FOR(ares_process_fd) void ares_process( + ares_channel_t *channel, fd_set *read_fds, fd_set *write_fds); CARES_EXTERN void ares_process_fd(ares_channel_t *channel, ares_socket_t read_fd, ares_socket_t write_fd); -CARES_EXTERN int ares_create_query(const char *name, int dnsclass, int type, - unsigned short id, int rd, - unsigned char **buf, int *buflen, - int max_udp_size); +CARES_EXTERN CARES_DEPRECATED_FOR(ares_dns_record_create) int ares_create_query( + const char *name, int dnsclass, int type, unsigned short id, int rd, + unsigned char **buf, int *buflen, int max_udp_size); -CARES_EXTERN int ares_mkquery(const char *name, int dnsclass, int type, - unsigned short id, int rd, unsigned char **buf, - int *buflen); - -CARES_EXTERN int ares_expand_name(const unsigned char *encoded, - const unsigned char *abuf, int alen, char **s, - long *enclen); - -CARES_EXTERN int ares_expand_string(const unsigned char *encoded, - const unsigned char *abuf, int alen, - unsigned char **s, long *enclen); - -/* - * NOTE: before c-ares 1.7.0 we would most often use the system in6_addr - * struct below when ares itself was built, but many apps would use this - * private version since the header checked a HAVE_* define for it. Starting - * with 1.7.0 we always declare and use our own to stop relying on the - * system's one. - */ -struct ares_in6_addr { - union { - unsigned char _S6_u8[16]; - } _S6_un; -}; +CARES_EXTERN CARES_DEPRECATED_FOR(ares_dns_record_create) int ares_mkquery( + const char *name, int dnsclass, int type, unsigned short id, int rd, + unsigned char **buf, int *buflen); -struct ares_addr { - int family; +CARES_EXTERN int ares_expand_name(const unsigned char *encoded, + const unsigned char *abuf, int alen, char **s, + long *enclen); - union { - struct in_addr addr4; - struct ares_in6_addr addr6; - } addr; -}; +CARES_EXTERN int ares_expand_string(const unsigned char *encoded, + const unsigned char *abuf, int alen, + unsigned char **s, long *enclen); struct ares_addrttl { struct in_addr ipaddr; @@ -675,52 +768,50 @@ struct ares_addrinfo_hints { ** so written. */ -CARES_EXTERN int ares_parse_a_reply(const unsigned char *abuf, int alen, - struct hostent **host, - struct ares_addrttl *addrttls, - int *naddrttls); +CARES_EXTERN CARES_DEPRECATED_FOR(ares_dns_parse) int ares_parse_a_reply( + const unsigned char *abuf, int alen, struct hostent **host, + struct ares_addrttl *addrttls, int *naddrttls); -CARES_EXTERN int ares_parse_aaaa_reply(const unsigned char *abuf, int alen, - struct hostent **host, - struct ares_addr6ttl *addrttls, - int *naddrttls); +CARES_EXTERN CARES_DEPRECATED_FOR(ares_dns_parse) int ares_parse_aaaa_reply( + const unsigned char *abuf, int alen, struct hostent **host, + struct ares_addr6ttl *addrttls, int *naddrttls); -CARES_EXTERN int ares_parse_caa_reply(const unsigned char *abuf, int alen, - struct ares_caa_reply **caa_out); +CARES_EXTERN CARES_DEPRECATED_FOR(ares_dns_parse) int ares_parse_caa_reply( + const unsigned char *abuf, int alen, struct ares_caa_reply **caa_out); -CARES_EXTERN int ares_parse_ptr_reply(const unsigned char *abuf, int alen, - const void *addr, int addrlen, int family, - struct hostent **host); +CARES_EXTERN CARES_DEPRECATED_FOR(ares_dns_parse) int ares_parse_ptr_reply( + const unsigned char *abuf, int alen, const void *addr, int addrlen, + int family, struct hostent **host); -CARES_EXTERN int ares_parse_ns_reply(const unsigned char *abuf, int alen, - struct hostent **host); +CARES_EXTERN CARES_DEPRECATED_FOR(ares_dns_parse) int ares_parse_ns_reply( + const unsigned char *abuf, int alen, struct hostent **host); -CARES_EXTERN int ares_parse_srv_reply(const unsigned char *abuf, int alen, - struct ares_srv_reply **srv_out); +CARES_EXTERN CARES_DEPRECATED_FOR(ares_dns_parse) int ares_parse_srv_reply( + const unsigned char *abuf, int alen, struct ares_srv_reply **srv_out); -CARES_EXTERN int ares_parse_mx_reply(const unsigned char *abuf, int alen, - struct ares_mx_reply **mx_out); +CARES_EXTERN CARES_DEPRECATED_FOR(ares_dns_parse) int ares_parse_mx_reply( + const unsigned char *abuf, int alen, struct ares_mx_reply **mx_out); -CARES_EXTERN int ares_parse_txt_reply(const unsigned char *abuf, int alen, - struct ares_txt_reply **txt_out); +CARES_EXTERN CARES_DEPRECATED_FOR(ares_dns_parse) int ares_parse_txt_reply( + const unsigned char *abuf, int alen, struct ares_txt_reply **txt_out); -CARES_EXTERN int ares_parse_txt_reply_ext(const unsigned char *abuf, int alen, - struct ares_txt_ext **txt_out); +CARES_EXTERN CARES_DEPRECATED_FOR(ares_dns_parse) int ares_parse_txt_reply_ext( + const unsigned char *abuf, int alen, struct ares_txt_ext **txt_out); -CARES_EXTERN int ares_parse_naptr_reply(const unsigned char *abuf, int alen, - struct ares_naptr_reply **naptr_out); +CARES_EXTERN CARES_DEPRECATED_FOR(ares_dns_parse) int ares_parse_naptr_reply( + const unsigned char *abuf, int alen, struct ares_naptr_reply **naptr_out); -CARES_EXTERN int ares_parse_soa_reply(const unsigned char *abuf, int alen, - struct ares_soa_reply **soa_out); +CARES_EXTERN CARES_DEPRECATED_FOR(ares_dns_parse) int ares_parse_soa_reply( + const unsigned char *abuf, int alen, struct ares_soa_reply **soa_out); -CARES_EXTERN int ares_parse_uri_reply(const unsigned char *abuf, int alen, - struct ares_uri_reply **uri_out); +CARES_EXTERN CARES_DEPRECATED_FOR(ares_dns_parse) int ares_parse_uri_reply( + const unsigned char *abuf, int alen, struct ares_uri_reply **uri_out); -CARES_EXTERN void ares_free_string(void *str); +CARES_EXTERN void ares_free_string(void *str); -CARES_EXTERN void ares_free_hostent(struct hostent *host); +CARES_EXTERN void ares_free_hostent(struct hostent *host); -CARES_EXTERN void ares_free_data(void *dataptr); +CARES_EXTERN void ares_free_data(void *dataptr); CARES_EXTERN const char *ares_strerror(int code); @@ -747,23 +838,26 @@ struct ares_addr_port_node { int tcp_port; }; -CARES_EXTERN int ares_set_servers(ares_channel_t *channel, - const struct ares_addr_node *servers); -CARES_EXTERN int - ares_set_servers_ports(ares_channel_t *channel, - const struct ares_addr_port_node *servers); +CARES_EXTERN CARES_DEPRECATED_FOR(ares_set_servers_csv) int ares_set_servers( + ares_channel_t *channel, const struct ares_addr_node *servers); + +CARES_EXTERN + CARES_DEPRECATED_FOR(ares_set_servers_ports_csv) int ares_set_servers_ports( + ares_channel_t *channel, const struct ares_addr_port_node *servers); /* Incoming string format: host[:port][,host[:port]]... */ -CARES_EXTERN int ares_set_servers_csv(ares_channel_t *channel, - const char *servers); -CARES_EXTERN int ares_set_servers_ports_csv(ares_channel_t *channel, - const char *servers); -CARES_EXTERN char *ares_get_servers_csv(ares_channel_t *channel); +CARES_EXTERN int ares_set_servers_csv(ares_channel_t *channel, + const char *servers); +CARES_EXTERN int ares_set_servers_ports_csv(ares_channel_t *channel, + const char *servers); +CARES_EXTERN char *ares_get_servers_csv(ares_channel_t *channel); -CARES_EXTERN int ares_get_servers(ares_channel_t *channel, - struct ares_addr_node **servers); -CARES_EXTERN int ares_get_servers_ports(ares_channel_t *channel, - struct ares_addr_port_node **servers); +CARES_EXTERN CARES_DEPRECATED_FOR(ares_get_servers_csv) int ares_get_servers( + ares_channel_t *channel, struct ares_addr_node **servers); + +CARES_EXTERN + CARES_DEPRECATED_FOR(ares_get_servers_ports_csv) int ares_get_servers_ports( + ares_channel_t *channel, struct ares_addr_port_node **servers); CARES_EXTERN const char *ares_inet_ntop(int af, const void *src, char *dst, ares_socklen_t size); @@ -803,7 +897,4 @@ CARES_EXTERN size_t ares_queue_active_queries(ares_channel_t *channel); } #endif -/* DNS record parser, writer, and helpers */ -#include "ares_dns_record.h" - #endif /* ARES__H */ diff --git a/deps/cares/include/ares_dns_record.h b/deps/cares/include/ares_dns_record.h index 3f802aefa3231e..8d09bd0a464254 100644 --- a/deps/cares/include/ares_dns_record.h +++ b/deps/cares/include/ares_dns_record.h @@ -393,11 +393,11 @@ typedef enum { /*! Parse Additional from RFC 1035 that allow name compression as RAW */ ARES_DNS_PARSE_AR_BASE_RAW = 1 << 2, /*! Parse Answers from later RFCs (no name compression) RAW */ - ARES_DNS_PARSE_AN_EXT_RAW = 1 << 3, + ARES_DNS_PARSE_AN_EXT_RAW = 1 << 3, /*! Parse Authority from later RFCs (no name compression) as RAW */ - ARES_DNS_PARSE_NS_EXT_RAW = 1 << 4, - /*< Parse Additional from later RFCs (no name compression) as RAW */ - ARES_DNS_PARSE_AR_EXT_RAW = 1 << 5 + ARES_DNS_PARSE_NS_EXT_RAW = 1 << 4, + /*! Parse Additional from later RFCs (no name compression) as RAW */ + ARES_DNS_PARSE_AR_EXT_RAW = 1 << 5 } ares_dns_parse_flags_t; /*! String representation of DNS Record Type @@ -468,7 +468,7 @@ CARES_EXTERN const char *ares_dns_rcode_tostr(ares_dns_rcode_t rcode); * \param[in] ipaddr ASCII string form of the ip address * \param[in,out] addr Must set "family" member to one of AF_UNSPEC, * AF_INET, AF_INET6 on input. - * \param[out] ptr_len Length of binary form address + * \param[out] out_len Length of binary form address * \return Pointer to start of binary address or NULL on error. */ CARES_EXTERN const void *ares_dns_pton(const char *ipaddr, @@ -619,6 +619,32 @@ CARES_EXTERN ares_status_t ares_dns_record_query_add(ares_dns_record_t *dnsrec, ares_dns_rec_type_t qtype, ares_dns_class_t qclass); +/*! Replace the question name with a new name. This may be used when performing + * a search with aliases. + * + * Note that this will invalidate the name pointer returned from + * ares_dns_record_query_get(). + * + * \param[in] dnsrec Initialized record object + * \param[in] idx Index of question (typically 0) + * \param[in] name Name to use as replacement. + * \return ARES_SUCCESS on success + */ +CARES_EXTERN ares_status_t ares_dns_record_query_set_name( + ares_dns_record_t *dnsrec, size_t idx, const char *name); + + +/*! Replace the question type with a different type. This may be used when + * needing to query more than one address class (e.g. A and AAAA) + * + * \param[in] dnsrec Initialized record object + * \param[in] idx Index of question (typically 0) + * \param[in] qtype Record Type to use as replacement. + * \return ARES_SUCCESS on success + */ +CARES_EXTERN ares_status_t ares_dns_record_query_set_type( + ares_dns_record_t *dnsrec, size_t idx, ares_dns_rec_type_t qtype); + /*! Get the count of queries in the DNS Record * * \param[in] dnsrec Initialized record object @@ -631,6 +657,8 @@ CARES_EXTERN size_t ares_dns_record_query_cnt(const ares_dns_record_t *dnsrec); * \param[in] dnsrec Initialized record object * \param[in] idx Index of query * \param[out] name Optional. Returns name, may pass NULL if not desired. + * This pointer will be invalided by any call to + * ares_dns_record_query_set_name(). * \param[out] qtype Optional. Returns record type, may pass NULL. * \param[out] qclass Optional. Returns class, may pass NULL. * \return ARES_SUCCESS on success @@ -667,17 +695,28 @@ CARES_EXTERN ares_status_t ares_dns_record_rr_add( const char *name, ares_dns_rec_type_t type, ares_dns_class_t rclass, unsigned int ttl); -/*! Fetch a resource record based on the section and index. +/*! Fetch a writable resource record based on the section and index. * * \param[in] dnsrec Initialized record object * \param[in] sect Section for resource record * \param[in] idx Index of resource record in section - * \return NULL on misuse, otherwise a pointer to the resource record + * \return NULL on misuse, otherwise a writable pointer to the resource record */ CARES_EXTERN ares_dns_rr_t *ares_dns_record_rr_get(ares_dns_record_t *dnsrec, ares_dns_section_t sect, size_t idx); +/*! Fetch a non-writeable resource record based on the section and index. + * + * \param[in] dnsrec Initialized record object + * \param[in] sect Section for resource record + * \param[in] idx Index of resource record in section + * \return NULL on misuse, otherwise a const pointer to the resource record + */ +CARES_EXTERN const ares_dns_rr_t * + ares_dns_record_rr_get_const(const ares_dns_record_t *dnsrec, + ares_dns_section_t sect, size_t idx); + /*! Remove the resource record based on the section and index * @@ -686,9 +725,9 @@ CARES_EXTERN ares_dns_rr_t *ares_dns_record_rr_get(ares_dns_record_t *dnsrec, * \param[in] idx Index of resource record in section * \return ARES_SUCCESS on success, otherwise an error code. */ -CARES_EXTERN ares_status_t ares_dns_record_rr_del(ares_dns_record_t *dnsrec, - ares_dns_section_t sect, - size_t idx); +CARES_EXTERN ares_status_t ares_dns_record_rr_del(ares_dns_record_t *dnsrec, + ares_dns_section_t sect, + size_t idx); /*! Retrieve the resource record Name/Hostname @@ -696,7 +735,7 @@ CARES_EXTERN ares_status_t ares_dns_record_rr_del(ares_dns_record_t *dnsrec, * \param[in] rr Pointer to resource record * \return Name */ -CARES_EXTERN const char *ares_dns_rr_get_name(const ares_dns_rr_t *rr); +CARES_EXTERN const char *ares_dns_rr_get_name(const ares_dns_rr_t *rr); /*! Retrieve the resource record type * @@ -959,8 +998,19 @@ CARES_EXTERN ares_status_t ares_dns_parse(const unsigned char *buf, * \param[out] buf_len Length of returned buffer containing DNS message. * \return ARES_SUCCESS on success */ -CARES_EXTERN ares_status_t ares_dns_write(ares_dns_record_t *dnsrec, +CARES_EXTERN ares_status_t ares_dns_write(const ares_dns_record_t *dnsrec, unsigned char **buf, size_t *buf_len); + + +/*! Duplicate a complete DNS message. This does not copy internal members + * (such as the ttl decrement capability). + * + * \param[in] dnsrec Pointer to initialized and filled DNS record object. + * \return duplicted DNS record object, or NULL on out of memory. + */ +CARES_EXTERN ares_dns_record_t * + ares_dns_record_duplicate(const ares_dns_record_t *dnsrec); + /*! @} */ #ifdef __cplusplus diff --git a/deps/cares/include/ares_version.h b/deps/cares/include/ares_version.h index 44dbdef161ac35..0e94a98be8f280 100644 --- a/deps/cares/include/ares_version.h +++ b/deps/cares/include/ares_version.h @@ -31,12 +31,12 @@ #define ARES_COPYRIGHT "2004 - 2024 Daniel Stenberg, ." #define ARES_VERSION_MAJOR 1 -#define ARES_VERSION_MINOR 27 -#define ARES_VERSION_PATCH 0 +#define ARES_VERSION_MINOR 28 +#define ARES_VERSION_PATCH 1 #define ARES_VERSION \ ((ARES_VERSION_MAJOR << 16) | (ARES_VERSION_MINOR << 8) | \ (ARES_VERSION_PATCH)) -#define ARES_VERSION_STR "1.27.0" +#define ARES_VERSION_STR "1.28.1" #if (ARES_VERSION >= 0x010700) # define CARES_HAVE_ARES_LIBRARY_INIT 1 diff --git a/deps/cares/src/Makefile.in b/deps/cares/src/Makefile.in index 040373fe95a247..f657ef0d43e7b9 100644 --- a/deps/cares/src/Makefile.in +++ b/deps/cares/src/Makefile.in @@ -285,6 +285,7 @@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ +PKGCONFIG_CFLAGS = @PKGCONFIG_CFLAGS@ PKG_CONFIG = @PKG_CONFIG@ PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ diff --git a/deps/cares/src/lib/CMakeLists.txt b/deps/cares/src/lib/CMakeLists.txt index 015e57f8193ebd..de73f712f1d1ce 100644 --- a/deps/cares/src/lib/CMakeLists.txt +++ b/deps/cares/src/lib/CMakeLists.txt @@ -31,7 +31,6 @@ IF (CARES_SHARED) EXPORT_NAME cares OUTPUT_NAME cares COMPILE_PDB_NAME cares - COMPILE_PDB_OUTPUT_DIRECTORY ${CMAKE_RUNTIME_OUTPUT_DIRECTORY} SOVERSION ${CARES_LIB_VERSION_MAJOR} VERSION "${CARES_LIB_VERSION_MAJOR}.${CARES_LIB_VERSION_MINOR}.${CARES_LIB_VERSION_RELEASE}" C_STANDARD 90 @@ -65,11 +64,13 @@ IF (CARES_SHARED) COMPONENT Library ${TARGETS_INST_DEST} ) - INSTALL(FILES ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/cares.pdb - DESTINATION ${CMAKE_INSTALL_BINDIR} - COMPONENT Library - OPTIONAL - ) + IF (MSVC) + INSTALL(FILES $ + DESTINATION ${CMAKE_INSTALL_BINDIR} + COMPONENT Library + OPTIONAL + ) + ENDIF () ENDIF () SET (STATIC_SUFFIX "_static") @@ -88,7 +89,6 @@ IF (CARES_STATIC) EXPORT_NAME cares${STATIC_SUFFIX} OUTPUT_NAME cares${STATIC_SUFFIX} COMPILE_PDB_NAME cares${STATIC_SUFFIX} - COMPILE_PDB_OUTPUT_DIRECTORY ${CMAKE_RUNTIME_OUTPUT_DIRECTORY} C_STANDARD 90 ) @@ -116,11 +116,6 @@ IF (CARES_STATIC) INSTALL (TARGETS ${LIBNAME} EXPORT ${PROJECT_NAME}-targets COMPONENT Devel ${TARGETS_INST_DEST} ) - INSTALL(FILES ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/cares${STATIC_SUFFIX}.pdb - DESTINATION ${CMAKE_INSTALL_BINDIR} - COMPONENT Library - OPTIONAL - ) ENDIF () # For chain building: add alias targets that look like import libs that would be returned by find_package(c-ares). diff --git a/deps/cares/src/lib/Makefile.in b/deps/cares/src/lib/Makefile.in index c516cba2cf46d2..0060295c21e8ef 100644 --- a/deps/cares/src/lib/Makefile.in +++ b/deps/cares/src/lib/Makefile.in @@ -15,7 +15,7 @@ @SET_MAKE@ # aminclude_static.am generated automatically by Autoconf -# from AX_AM_MACROS_STATIC on Fri Jan 26 17:16:19 CET 2024 +# from AX_AM_MACROS_STATIC on Sat Mar 30 16:15:43 CET 2024 # Copyright (C) The c-ares project and its contributors # SPDX-License-Identifier: MIT @@ -169,14 +169,14 @@ am__objects_1 = libcares_la-ares__addrinfo2hostent.lo \ libcares_la-ares__htable_szvp.lo \ libcares_la-ares__iface_ips.lo libcares_la-ares__llist.lo \ libcares_la-ares__parse_into_addrinfo.lo \ - libcares_la-ares__read_line.lo libcares_la-ares__slist.lo \ - libcares_la-ares__socket.lo libcares_la-ares__sortaddrinfo.lo \ - libcares_la-ares__threads.lo libcares_la-ares__timeval.lo \ - libcares_la-ares_android.lo libcares_la-ares_cancel.lo \ - libcares_la-ares_data.lo libcares_la-ares_destroy.lo \ - libcares_la-ares_dns_mapping.lo libcares_la-ares_dns_name.lo \ - libcares_la-ares_dns_parse.lo libcares_la-ares_dns_record.lo \ - libcares_la-ares_dns_write.lo libcares_la-ares_event_epoll.lo \ + libcares_la-ares__slist.lo libcares_la-ares__socket.lo \ + libcares_la-ares__sortaddrinfo.lo libcares_la-ares__threads.lo \ + libcares_la-ares__timeval.lo libcares_la-ares_android.lo \ + libcares_la-ares_cancel.lo libcares_la-ares_data.lo \ + libcares_la-ares_destroy.lo libcares_la-ares_dns_mapping.lo \ + libcares_la-ares_dns_name.lo libcares_la-ares_dns_parse.lo \ + libcares_la-ares_dns_record.lo libcares_la-ares_dns_write.lo \ + libcares_la-ares_event_epoll.lo \ libcares_la-ares_event_kqueue.lo \ libcares_la-ares_event_poll.lo \ libcares_la-ares_event_select.lo \ @@ -193,9 +193,8 @@ am__objects_1 = libcares_la-ares__addrinfo2hostent.lo \ libcares_la-ares_gethostbyname.lo \ libcares_la-ares_getnameinfo.lo libcares_la-ares_getsock.lo \ libcares_la-ares_init.lo libcares_la-ares_library_init.lo \ - libcares_la-ares_math.lo libcares_la-ares_mkquery.lo \ - libcares_la-ares_create_query.lo libcares_la-ares_options.lo \ - libcares_la-ares_parse_a_reply.lo \ + libcares_la-ares_math.lo libcares_la-ares_create_query.lo \ + libcares_la-ares_options.lo libcares_la-ares_parse_a_reply.lo \ libcares_la-ares_parse_aaaa_reply.lo \ libcares_la-ares_parse_caa_reply.lo \ libcares_la-ares_parse_mx_reply.lo \ @@ -254,7 +253,6 @@ am__depfiles_remade = \ ./$(DEPDIR)/libcares_la-ares__iface_ips.Plo \ ./$(DEPDIR)/libcares_la-ares__llist.Plo \ ./$(DEPDIR)/libcares_la-ares__parse_into_addrinfo.Plo \ - ./$(DEPDIR)/libcares_la-ares__read_line.Plo \ ./$(DEPDIR)/libcares_la-ares__slist.Plo \ ./$(DEPDIR)/libcares_la-ares__socket.Plo \ ./$(DEPDIR)/libcares_la-ares__sortaddrinfo.Plo \ @@ -292,7 +290,6 @@ am__depfiles_remade = \ ./$(DEPDIR)/libcares_la-ares_init.Plo \ ./$(DEPDIR)/libcares_la-ares_library_init.Plo \ ./$(DEPDIR)/libcares_la-ares_math.Plo \ - ./$(DEPDIR)/libcares_la-ares_mkquery.Plo \ ./$(DEPDIR)/libcares_la-ares_options.Plo \ ./$(DEPDIR)/libcares_la-ares_parse_a_reply.Plo \ ./$(DEPDIR)/libcares_la-ares_parse_aaaa_reply.Plo \ @@ -505,6 +502,7 @@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ +PKGCONFIG_CFLAGS = @PKGCONFIG_CFLAGS@ PKG_CONFIG = @PKG_CONFIG@ PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ @@ -628,7 +626,6 @@ CSOURCES = ares__addrinfo2hostent.c \ ares__iface_ips.c \ ares__llist.c \ ares__parse_into_addrinfo.c \ - ares__read_line.c \ ares__slist.c \ ares__socket.c \ ares__sortaddrinfo.c \ @@ -665,7 +662,6 @@ CSOURCES = ares__addrinfo2hostent.c \ ares_init.c \ ares_library_init.c \ ares_math.c \ - ares_mkquery.c \ ares_create_query.c \ ares_options.c \ ares_parse_a_reply.c \ @@ -834,7 +830,6 @@ distclean-compile: @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcares_la-ares__iface_ips.Plo@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcares_la-ares__llist.Plo@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcares_la-ares__parse_into_addrinfo.Plo@am__quote@ # am--include-marker -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcares_la-ares__read_line.Plo@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcares_la-ares__slist.Plo@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcares_la-ares__socket.Plo@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcares_la-ares__sortaddrinfo.Plo@am__quote@ # am--include-marker @@ -872,7 +867,6 @@ distclean-compile: @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcares_la-ares_init.Plo@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcares_la-ares_library_init.Plo@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcares_la-ares_math.Plo@am__quote@ # am--include-marker -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcares_la-ares_mkquery.Plo@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcares_la-ares_options.Plo@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcares_la-ares_parse_a_reply.Plo@am__quote@ # am--include-marker @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libcares_la-ares_parse_aaaa_reply.Plo@am__quote@ # am--include-marker @@ -1019,13 +1013,6 @@ libcares_la-ares__parse_into_addrinfo.lo: ares__parse_into_addrinfo.c @AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libcares_la_CPPFLAGS) $(CPPFLAGS) $(libcares_la_CFLAGS) $(CFLAGS) -c -o libcares_la-ares__parse_into_addrinfo.lo `test -f 'ares__parse_into_addrinfo.c' || echo '$(srcdir)/'`ares__parse_into_addrinfo.c -libcares_la-ares__read_line.lo: ares__read_line.c -@am__fastdepCC_TRUE@ $(AM_V_CC)$(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libcares_la_CPPFLAGS) $(CPPFLAGS) $(libcares_la_CFLAGS) $(CFLAGS) -MT libcares_la-ares__read_line.lo -MD -MP -MF $(DEPDIR)/libcares_la-ares__read_line.Tpo -c -o libcares_la-ares__read_line.lo `test -f 'ares__read_line.c' || echo '$(srcdir)/'`ares__read_line.c -@am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libcares_la-ares__read_line.Tpo $(DEPDIR)/libcares_la-ares__read_line.Plo -@AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='ares__read_line.c' object='libcares_la-ares__read_line.lo' libtool=yes @AMDEPBACKSLASH@ -@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ -@am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libcares_la_CPPFLAGS) $(CPPFLAGS) $(libcares_la_CFLAGS) $(CFLAGS) -c -o libcares_la-ares__read_line.lo `test -f 'ares__read_line.c' || echo '$(srcdir)/'`ares__read_line.c - libcares_la-ares__slist.lo: ares__slist.c @am__fastdepCC_TRUE@ $(AM_V_CC)$(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libcares_la_CPPFLAGS) $(CPPFLAGS) $(libcares_la_CFLAGS) $(CFLAGS) -MT libcares_la-ares__slist.lo -MD -MP -MF $(DEPDIR)/libcares_la-ares__slist.Tpo -c -o libcares_la-ares__slist.lo `test -f 'ares__slist.c' || echo '$(srcdir)/'`ares__slist.c @am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libcares_la-ares__slist.Tpo $(DEPDIR)/libcares_la-ares__slist.Plo @@ -1278,13 +1265,6 @@ libcares_la-ares_math.lo: ares_math.c @AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libcares_la_CPPFLAGS) $(CPPFLAGS) $(libcares_la_CFLAGS) $(CFLAGS) -c -o libcares_la-ares_math.lo `test -f 'ares_math.c' || echo '$(srcdir)/'`ares_math.c -libcares_la-ares_mkquery.lo: ares_mkquery.c -@am__fastdepCC_TRUE@ $(AM_V_CC)$(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libcares_la_CPPFLAGS) $(CPPFLAGS) $(libcares_la_CFLAGS) $(CFLAGS) -MT libcares_la-ares_mkquery.lo -MD -MP -MF $(DEPDIR)/libcares_la-ares_mkquery.Tpo -c -o libcares_la-ares_mkquery.lo `test -f 'ares_mkquery.c' || echo '$(srcdir)/'`ares_mkquery.c -@am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libcares_la-ares_mkquery.Tpo $(DEPDIR)/libcares_la-ares_mkquery.Plo -@AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='ares_mkquery.c' object='libcares_la-ares_mkquery.lo' libtool=yes @AMDEPBACKSLASH@ -@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ -@am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libcares_la_CPPFLAGS) $(CPPFLAGS) $(libcares_la_CFLAGS) $(CFLAGS) -c -o libcares_la-ares_mkquery.lo `test -f 'ares_mkquery.c' || echo '$(srcdir)/'`ares_mkquery.c - libcares_la-ares_create_query.lo: ares_create_query.c @am__fastdepCC_TRUE@ $(AM_V_CC)$(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libcares_la_CPPFLAGS) $(CPPFLAGS) $(libcares_la_CFLAGS) $(CFLAGS) -MT libcares_la-ares_create_query.lo -MD -MP -MF $(DEPDIR)/libcares_la-ares_create_query.Tpo -c -o libcares_la-ares_create_query.lo `test -f 'ares_create_query.c' || echo '$(srcdir)/'`ares_create_query.c @am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/libcares_la-ares_create_query.Tpo $(DEPDIR)/libcares_la-ares_create_query.Plo @@ -1728,7 +1708,6 @@ distclean: distclean-recursive -rm -f ./$(DEPDIR)/libcares_la-ares__iface_ips.Plo -rm -f ./$(DEPDIR)/libcares_la-ares__llist.Plo -rm -f ./$(DEPDIR)/libcares_la-ares__parse_into_addrinfo.Plo - -rm -f ./$(DEPDIR)/libcares_la-ares__read_line.Plo -rm -f ./$(DEPDIR)/libcares_la-ares__slist.Plo -rm -f ./$(DEPDIR)/libcares_la-ares__socket.Plo -rm -f ./$(DEPDIR)/libcares_la-ares__sortaddrinfo.Plo @@ -1766,7 +1745,6 @@ distclean: distclean-recursive -rm -f ./$(DEPDIR)/libcares_la-ares_init.Plo -rm -f ./$(DEPDIR)/libcares_la-ares_library_init.Plo -rm -f ./$(DEPDIR)/libcares_la-ares_math.Plo - -rm -f ./$(DEPDIR)/libcares_la-ares_mkquery.Plo -rm -f ./$(DEPDIR)/libcares_la-ares_options.Plo -rm -f ./$(DEPDIR)/libcares_la-ares_parse_a_reply.Plo -rm -f ./$(DEPDIR)/libcares_la-ares_parse_aaaa_reply.Plo @@ -1855,7 +1833,6 @@ maintainer-clean: maintainer-clean-recursive -rm -f ./$(DEPDIR)/libcares_la-ares__iface_ips.Plo -rm -f ./$(DEPDIR)/libcares_la-ares__llist.Plo -rm -f ./$(DEPDIR)/libcares_la-ares__parse_into_addrinfo.Plo - -rm -f ./$(DEPDIR)/libcares_la-ares__read_line.Plo -rm -f ./$(DEPDIR)/libcares_la-ares__slist.Plo -rm -f ./$(DEPDIR)/libcares_la-ares__socket.Plo -rm -f ./$(DEPDIR)/libcares_la-ares__sortaddrinfo.Plo @@ -1893,7 +1870,6 @@ maintainer-clean: maintainer-clean-recursive -rm -f ./$(DEPDIR)/libcares_la-ares_init.Plo -rm -f ./$(DEPDIR)/libcares_la-ares_library_init.Plo -rm -f ./$(DEPDIR)/libcares_la-ares_math.Plo - -rm -f ./$(DEPDIR)/libcares_la-ares_mkquery.Plo -rm -f ./$(DEPDIR)/libcares_la-ares_options.Plo -rm -f ./$(DEPDIR)/libcares_la-ares_parse_a_reply.Plo -rm -f ./$(DEPDIR)/libcares_la-ares_parse_aaaa_reply.Plo diff --git a/deps/cares/src/lib/Makefile.inc b/deps/cares/src/lib/Makefile.inc index 29a65fd35b1dd0..38f7a115fe3598 100644 --- a/deps/cares/src/lib/Makefile.inc +++ b/deps/cares/src/lib/Makefile.inc @@ -13,7 +13,6 @@ CSOURCES = ares__addrinfo2hostent.c \ ares__iface_ips.c \ ares__llist.c \ ares__parse_into_addrinfo.c \ - ares__read_line.c \ ares__slist.c \ ares__socket.c \ ares__sortaddrinfo.c \ @@ -50,7 +49,6 @@ CSOURCES = ares__addrinfo2hostent.c \ ares_init.c \ ares_library_init.c \ ares_math.c \ - ares_mkquery.c \ ares_create_query.c \ ares_options.c \ ares_parse_a_reply.c \ diff --git a/deps/cares/src/lib/ares__buf.c b/deps/cares/src/lib/ares__buf.c index 8f9f32d71867ff..0663383df9e42e 100644 --- a/deps/cares/src/lib/ares__buf.c +++ b/deps/cares/src/lib/ares__buf.c @@ -45,47 +45,6 @@ struct ares__buf { * SIZE_MAX if not set. */ }; -ares_bool_t ares__isprint(int ch) -{ - if (ch >= 0x20 && ch <= 0x7E) { - return ARES_TRUE; - } - return ARES_FALSE; -} - -/* Character set allowed by hostnames. This is to include the normal - * domain name character set plus: - * - underscores which are used in SRV records. - * - Forward slashes such as are used for classless in-addr.arpa - * delegation (CNAMEs) - * - Asterisks may be used for wildcard domains in CNAMEs as seen in the - * real world. - * While RFC 2181 section 11 does state not to do validation, - * that applies to servers, not clients. Vulnerabilities have been - * reported when this validation is not performed. Security is more - * important than edge-case compatibility (which is probably invalid - * anyhow). */ -ares_bool_t ares__is_hostnamech(int ch) -{ - /* [A-Za-z0-9-*._/] - * Don't use isalnum() as it is locale-specific - */ - if (ch >= 'A' && ch <= 'Z') { - return ARES_TRUE; - } - if (ch >= 'a' && ch <= 'z') { - return ARES_TRUE; - } - if (ch >= '0' && ch <= '9') { - return ARES_TRUE; - } - if (ch == '-' || ch == '.' || ch == '_' || ch == '/' || ch == '*') { - return ARES_TRUE; - } - - return ARES_FALSE; -} - ares__buf_t *ares__buf_create(void) { ares__buf_t *buf = ares_malloc_zero(sizeof(*buf)); @@ -630,6 +589,24 @@ ares_status_t ares__buf_fetch_bytes_into_buf(ares__buf_t *buf, return ares__buf_consume(buf, len); } +static ares_bool_t ares__is_whitespace(unsigned char c, + ares_bool_t include_linefeed) +{ + switch (c) { + case '\r': + case '\t': + case ' ': + case '\v': + case '\f': + return ARES_TRUE; + case '\n': + return include_linefeed; + default: + break; + } + return ARES_FALSE; +} + size_t ares__buf_consume_whitespace(ares__buf_t *buf, ares_bool_t include_linefeed) { @@ -642,24 +619,11 @@ size_t ares__buf_consume_whitespace(ares__buf_t *buf, } for (i = 0; i < remaining_len; i++) { - switch (ptr[i]) { - case '\r': - case '\t': - case ' ': - case '\v': - case '\f': - break; - case '\n': - if (!include_linefeed) { - goto done; - } - break; - default: - goto done; + if (!ares__is_whitespace(ptr[i], include_linefeed)) { + break; } } -done: if (i > 0) { ares__buf_consume(buf, i); } @@ -677,20 +641,11 @@ size_t ares__buf_consume_nonwhitespace(ares__buf_t *buf) } for (i = 0; i < remaining_len; i++) { - switch (ptr[i]) { - case '\r': - case '\t': - case ' ': - case '\v': - case '\f': - case '\n': - goto done; - default: - break; + if (ares__is_whitespace(ptr[i], ARES_TRUE)) { + break; } } -done: if (i > 0) { ares__buf_consume(buf, i); } @@ -826,7 +781,7 @@ static ares_bool_t ares__buf_split_isduplicate(ares__llist_t *list, ares_status_t ares__buf_split(ares__buf_t *buf, const unsigned char *delims, size_t delims_len, ares__buf_split_t flags, - ares__llist_t **list) + size_t max_sections, ares__llist_t **list) { ares_status_t status = ARES_SUCCESS; ares_bool_t first = ARES_TRUE; @@ -842,20 +797,57 @@ ares_status_t ares__buf_split(ares__buf_t *buf, const unsigned char *delims, } while (ares__buf_len(buf)) { - size_t len; + size_t len = 0; + const unsigned char *ptr; + + if (first) { + /* No delimiter yet, just tag the start */ + ares__buf_tag(buf); + } else { + if (flags & ARES_BUF_SPLIT_DONT_CONSUME_DELIMS) { + /* tag then eat delimiter so its first byte in buffer */ + ares__buf_tag(buf); + ares__buf_consume(buf, 1); + } else { + /* throw away delimiter */ + ares__buf_consume(buf, 1); + ares__buf_tag(buf); + } + } + + if (max_sections && ares__llist_len(*list) >= max_sections - 1) { + ares__buf_consume(buf, ares__buf_len(buf)); + } else { + ares__buf_consume_until_charset(buf, delims, delims_len, ARES_FALSE); + } - ares__buf_tag(buf); + ptr = ares__buf_tag_fetch(buf, &len); - len = ares__buf_consume_until_charset(buf, delims, delims_len, ARES_FALSE); + /* Shouldn't be possible */ + if (ptr == NULL) { + status = ARES_EFORMERR; + goto done; + } + + if (flags & ARES_BUF_SPLIT_LTRIM) { + size_t i; + for (i = 0; i < len; i++) { + if (!ares__is_whitespace(ptr[i], ARES_TRUE)) { + break; + } + } + ptr += i; + len -= i; + } - /* Don't treat a delimiter as part of the length */ - if (!first && len && flags & ARES_BUF_SPLIT_DONT_CONSUME_DELIMS) { - len--; + if (flags & ARES_BUF_SPLIT_RTRIM) { + while (len && ares__is_whitespace(ptr[len - 1], ARES_TRUE)) { + len--; + } } if (len != 0 || flags & ARES_BUF_SPLIT_ALLOW_BLANK) { - const unsigned char *ptr = ares__buf_tag_fetch(buf, &len); - ares__buf_t *data; + ares__buf_t *data; if (!(flags & ARES_BUF_SPLIT_NO_DUPLICATES) || !ares__buf_split_isduplicate(*list, ptr, len, flags)) { @@ -880,12 +872,6 @@ ares_status_t ares__buf_split(ares__buf_t *buf, const unsigned char *delims, } } - if (!(flags & ARES_BUF_SPLIT_DONT_CONSUME_DELIMS) && - ares__buf_len(buf) != 0) { - /* Consume delimiter */ - ares__buf_consume(buf, 1); - } - first = ARES_FALSE; } @@ -1150,3 +1136,80 @@ ares_status_t ares__buf_hexdump(ares__buf_t *buf, const unsigned char *data, return ARES_SUCCESS; } + +ares_status_t ares__buf_load_file(const char *filename, ares__buf_t *buf) +{ + FILE *fp = NULL; + unsigned char *ptr = NULL; + size_t len = 0; + size_t ptr_len = 0; + long ftell_len = 0; + ares_status_t status; + + if (filename == NULL || buf == NULL) { + return ARES_EFORMERR; + } + + fp = fopen(filename, "rb"); + if (fp == NULL) { + int error = ERRNO; + switch (error) { + case ENOENT: + case ESRCH: + status = ARES_ENOTFOUND; + goto done; + default: + DEBUGF(fprintf(stderr, "fopen() failed with error: %d %s\n", error, + strerror(error))); + DEBUGF(fprintf(stderr, "Error opening file: %s\n", filename)); + status = ARES_EFILE; + goto done; + } + } + + /* Get length portably, fstat() is POSIX, not C */ + if (fseek(fp, 0, SEEK_END) != 0) { + status = ARES_EFILE; + goto done; + } + + ftell_len = ftell(fp); + if (ftell_len < 0) { + status = ARES_EFILE; + goto done; + } + len = (size_t)ftell_len; + + if (fseek(fp, 0, SEEK_SET) != 0) { + status = ARES_EFILE; + goto done; + } + + if (len == 0) { + status = ARES_SUCCESS; + goto done; + } + + /* Read entire data into buffer */ + ptr_len = len; + ptr = ares__buf_append_start(buf, &ptr_len); + if (ptr == NULL) { + status = ARES_ENOMEM; + goto done; + } + + ptr_len = fread(ptr, 1, len, fp); + if (ptr_len != len) { + status = ARES_EFILE; + goto done; + } + + ares__buf_append_finish(buf, len); + status = ARES_SUCCESS; + +done: + if (fp != NULL) { + fclose(fp); + } + return status; +} diff --git a/deps/cares/src/lib/ares__buf.h b/deps/cares/src/lib/ares__buf.h index 52054a0b33c658..4298814f7b4396 100644 --- a/deps/cares/src/lib/ares__buf.h +++ b/deps/cares/src/lib/ares__buf.h @@ -177,7 +177,7 @@ void ares__buf_append_finish(ares__buf_t *buf, size_t len); * * \param[in] buf Initialized buffer object. * \param[in] data Data to hex dump - * \param[in] data_len Length of data to hexdump + * \param[in] len Length of data to hexdump * \return ARES_SUCCESS on success. */ ares_status_t ares__buf_hexdump(ares__buf_t *buf, const unsigned char *data, @@ -373,7 +373,8 @@ size_t ares__buf_consume_whitespace(ares__buf_t *buf, size_t ares__buf_consume_nonwhitespace(ares__buf_t *buf); -/*! Consume until a character in the character set provided is reached +/*! Consume until a character in the character set provided is reached. Does + * not include the character from the charset at the end. * * \param[in] buf Initialized buffer object * \param[in] charset character set @@ -414,7 +415,9 @@ typedef enum { /*! No flags */ ARES_BUF_SPLIT_NONE = 0, /*! The delimiter will be the first character in the buffer, except the - * first buffer since the start doesn't have a delimiter + * first buffer since the start doesn't have a delimiter. This option is + * incompatible with ARES_BUF_SPLIT_LTRIM since the delimiter is always + * the first character. */ ARES_BUF_SPLIT_DONT_CONSUME_DELIMS = 1 << 0, /*! Allow blank sections, by default blank sections are not emitted. If using @@ -424,7 +427,13 @@ typedef enum { /*! Remove duplicate entries */ ARES_BUF_SPLIT_NO_DUPLICATES = 1 << 2, /*! Perform case-insensitive matching when comparing values */ - ARES_BUF_SPLIT_CASE_INSENSITIVE = 1 << 3 + ARES_BUF_SPLIT_CASE_INSENSITIVE = 1 << 3, + /*! Trim leading whitespace from buffer */ + ARES_BUF_SPLIT_LTRIM = 1 << 4, + /*! Trim trailing whitespace from buffer */ + ARES_BUF_SPLIT_RTRIM = 1 << 5, + /*! Trim leading and trailing whitespace from buffer */ + ARES_BUF_SPLIT_TRIM = (ARES_BUF_SPLIT_LTRIM | ARES_BUF_SPLIT_RTRIM) } ares__buf_split_t; /*! Split the provided buffer into multiple sub-buffers stored in the variable @@ -435,6 +444,12 @@ typedef enum { * \param[in] delims Possible delimiters * \param[in] delims_len Length of possible delimiters * \param[in] flags One more more flags + * \param[in] max_sections Maximum number of sections. Use 0 for + * unlimited. Useful for splitting key/value + * pairs where the delimiter may be a valid + * character in the value. A value of 1 would + * have little usefulness and would effectively + * ignore the delimiter itself. * \param[out] list Result. Depending on flags, this may be a * valid list with no elements. Use * ares__llist_destroy() to free the memory which @@ -444,7 +459,7 @@ typedef enum { */ ares_status_t ares__buf_split(ares__buf_t *buf, const unsigned char *delims, size_t delims_len, ares__buf_split_t flags, - ares__llist_t **list); + size_t max_sections, ares__llist_t **list); /*! Check the unprocessed buffer to see if it begins with the sequence of @@ -536,7 +551,7 @@ size_t ares__buf_get_position(const ares__buf_t *buf); * \param[in] remaining_len maximum length that should be used for parsing * the string, this is often less than the remaining * buffer and is based on the RR record length. - * \param[out] str Pointer passed by reference to be filled in with + * \param[out] name Pointer passed by reference to be filled in with * allocated string of the parsed that must be * ares_free()'d by the caller. * \param[in] allow_multiple ARES_TRUE if it should attempt to parse multiple @@ -567,6 +582,18 @@ ares_status_t ares__buf_parse_dns_str(ares__buf_t *buf, size_t remaining_len, ares_status_t ares__buf_parse_dns_binstr(ares__buf_t *buf, size_t remaining_len, unsigned char **bin, size_t *bin_len, ares_bool_t allow_multiple); + +/*! Load data from specified file path into provided buffer. The entire file + * is loaded into memory. + * + * \param[in] filename complete path to file + * \param[in,out] buf Initialized (non-const) buffer object to load data + * into + * \return ARES_ENOTFOUND if file not found, ARES_EFILE if issues reading + * file, ARES_ENOMEM if out of memory, ARES_SUCCESS on success. + */ +ares_status_t ares__buf_load_file(const char *filename, ares__buf_t *buf); + /*! @} */ #endif /* __ARES__BUF_H */ diff --git a/deps/cares/src/lib/ares__hosts_file.c b/deps/cares/src/lib/ares__hosts_file.c index c6fe63a429d269..e279623de37e64 100644 --- a/deps/cares/src/lib/ares__hosts_file.c +++ b/deps/cares/src/lib/ares__hosts_file.c @@ -98,95 +98,6 @@ struct ares_hosts_entry { ares__llist_t *hosts; }; -static ares_status_t ares__read_file_into_buf(const char *filename, - ares__buf_t *buf) -{ - FILE *fp = NULL; - unsigned char *ptr = NULL; - size_t len = 0; - size_t ptr_len = 0; - long ftell_len = 0; - ares_status_t status; - - if (filename == NULL || buf == NULL) { - return ARES_EFORMERR; - } - - fp = fopen(filename, "rb"); - if (fp == NULL) { - int error = ERRNO; - switch (error) { - case ENOENT: - case ESRCH: - status = ARES_ENOTFOUND; - goto done; - default: - DEBUGF(fprintf(stderr, "fopen() failed with error: %d %s\n", error, - strerror(error))); - DEBUGF(fprintf(stderr, "Error opening file: %s\n", filename)); - status = ARES_EFILE; - goto done; - } - } - - /* Get length portably, fstat() is POSIX, not C */ - if (fseek(fp, 0, SEEK_END) != 0) { - status = ARES_EFILE; - goto done; - } - - ftell_len = ftell(fp); - if (ftell_len < 0) { - status = ARES_EFILE; - goto done; - } - len = (size_t)ftell_len; - - if (fseek(fp, 0, SEEK_SET) != 0) { - status = ARES_EFILE; - goto done; - } - - if (len == 0) { - status = ARES_SUCCESS; - goto done; - } - - /* Read entire data into buffer */ - ptr_len = len; - ptr = ares__buf_append_start(buf, &ptr_len); - if (ptr == NULL) { - status = ARES_ENOMEM; - goto done; - } - - ptr_len = fread(ptr, 1, len, fp); - if (ptr_len != len) { - status = ARES_EFILE; - goto done; - } - - ares__buf_append_finish(buf, len); - status = ARES_SUCCESS; - -done: - if (fp != NULL) { - fclose(fp); - } - return status; -} - -static ares_bool_t ares__is_hostname(const char *str) -{ - size_t i; - for (i = 0; str[i] != 0; i++) { - if (!ares__is_hostnamech(str[i])) { - return ARES_FALSE; - } - } - return ARES_TRUE; -} - const void *ares_dns_pton(const char *ipaddr, struct ares_addr *addr, size_t *out_len) { @@ -605,7 +516,7 @@ static ares_status_t ares__parse_hosts(const char *filename, goto done; } - status = ares__read_file_into_buf(filename, buf); + status = ares__buf_load_file(filename, buf); if (status != ARES_SUCCESS) { goto done; } diff --git a/deps/cares/src/lib/ares__htable.h b/deps/cares/src/lib/ares__htable.h index fd1c0a2366022f..d09c865977cdae 100644 --- a/deps/cares/src/lib/ares__htable.h +++ b/deps/cares/src/lib/ares__htable.h @@ -58,21 +58,21 @@ typedef struct ares__htable ares__htable_t; * but otherwise will not change between calls. * \return hash */ -typedef unsigned int (*ares__htable_hashfunc_t)(const void *key, +typedef unsigned int (*ares__htable_hashfunc_t)(const void *key, unsigned int seed); /*! Callback to free the bucket * * \param[in] bucket user provided bucket */ -typedef void (*ares__htable_bucket_free_t)(void *bucket); +typedef void (*ares__htable_bucket_free_t)(void *bucket); /*! Callback to extract the key from the user-provided bucket * * \param[in] bucket user provided bucket * \return pointer to key held in bucket */ -typedef const void *(*ares__htable_bucket_key_t)(const void *bucket); +typedef const void *(*ares__htable_bucket_key_t)(const void *bucket); /*! Callback to compare two keys for equality * @@ -80,15 +80,15 @@ typedef const void *(*ares__htable_bucket_key_t)(const void *bucket); * \param[in] key2 second key * \return ARES_TRUE if equal, ARES_FALSE if not */ -typedef ares_bool_t (*ares__htable_key_eq_t)(const void *key1, +typedef ares_bool_t (*ares__htable_key_eq_t)(const void *key1, const void *key2); /*! Destroy the initialized hashtable * - * \param[in] initialized hashtable + * \param[in] htable initialized hashtable */ -void ares__htable_destroy(ares__htable_t *htable); +void ares__htable_destroy(ares__htable_t *htable); /*! Create a new hashtable * diff --git a/deps/cares/src/lib/ares__htable_asvp.h b/deps/cares/src/lib/ares__htable_asvp.h index ee253455b2690c..49a766d023091e 100644 --- a/deps/cares/src/lib/ares__htable_asvp.h +++ b/deps/cares/src/lib/ares__htable_asvp.h @@ -51,7 +51,7 @@ typedef struct ares__htable_asvp ares__htable_asvp_t; * * \param[in] val user-supplied value */ -typedef void (*ares__htable_asvp_val_free_t)(void *val); +typedef void (*ares__htable_asvp_val_free_t)(void *val); /*! Destroy hashtable * @@ -71,7 +71,7 @@ ares__htable_asvp_t * /*! Retrieve an array of keys from the hashtable. * * \param[in] htable Initialized hashtable - * \param[out] num_keys Count of returned keys + * \param[out] num Count of returned keys * \return Array of keys in the hashtable. Must be free'd with ares_free(). */ ares_socket_t *ares__htable_asvp_keys(const ares__htable_asvp_t *htable, diff --git a/deps/cares/src/lib/ares__htable_strvp.h b/deps/cares/src/lib/ares__htable_strvp.h index 80d375c06804a5..25dd2b90777d8d 100644 --- a/deps/cares/src/lib/ares__htable_strvp.h +++ b/deps/cares/src/lib/ares__htable_strvp.h @@ -49,7 +49,7 @@ typedef struct ares__htable_strvp ares__htable_strvp_t; * * \param[in] val user-supplied value */ -typedef void (*ares__htable_strvp_val_free_t)(void *val); +typedef void (*ares__htable_strvp_val_free_t)(void *val); /*! Destroy hashtable * diff --git a/deps/cares/src/lib/ares__htable_szvp.h b/deps/cares/src/lib/ares__htable_szvp.h index 9857afe79604d3..62b1776be92b5b 100644 --- a/deps/cares/src/lib/ares__htable_szvp.h +++ b/deps/cares/src/lib/ares__htable_szvp.h @@ -49,7 +49,7 @@ typedef struct ares__htable_szvp ares__htable_szvp_t; * * \param[in] val user-supplied value */ -typedef void (*ares__htable_szvp_val_free_t)(void *val); +typedef void (*ares__htable_szvp_val_free_t)(void *val); /*! Destroy hashtable * diff --git a/deps/cares/src/lib/ares__llist.h b/deps/cares/src/lib/ares__llist.h index bd18bb9ec1d54c..7d57bdab3b077c 100644 --- a/deps/cares/src/lib/ares__llist.h +++ b/deps/cares/src/lib/ares__llist.h @@ -52,7 +52,7 @@ typedef struct ares__llist_node ares__llist_node_t; * * \param[in] data user supplied data */ -typedef void (*ares__llist_destructor_t)(void *data); +typedef void (*ares__llist_destructor_t)(void *data); /*! Create a linked list object * @@ -201,8 +201,8 @@ void ares__llist_destroy(ares__llist_t *list); /*! Detach node from the current list and re-attach it to the new list as the * last entry. * - * \param[in] node node to move - * \param[in] parent new list + * \param[in] node node to move + * \param[in] new_parent new list */ void ares__llist_node_move_parent_last(ares__llist_node_t *node, ares__llist_t *new_parent); @@ -210,8 +210,8 @@ void ares__llist_node_move_parent_last(ares__llist_node_t *node, /*! Detach node from the current list and re-attach it to the new list as the * first entry. * - * \param[in] node node to move - * \param[in] parent new list + * \param[in] node node to move + * \param[in] new_parent new list */ void ares__llist_node_move_parent_first(ares__llist_node_t *node, ares__llist_t *new_parent); diff --git a/deps/cares/src/lib/ares__parse_into_addrinfo.c b/deps/cares/src/lib/ares__parse_into_addrinfo.c index a5ce0c594fc3be..90e951c02f3f6d 100644 --- a/deps/cares/src/lib/ares__parse_into_addrinfo.c +++ b/deps/cares/src/lib/ares__parse_into_addrinfo.c @@ -47,13 +47,12 @@ #include "ares.h" #include "ares_private.h" -ares_status_t ares__parse_into_addrinfo(const unsigned char *abuf, size_t alen, +ares_status_t ares__parse_into_addrinfo(const ares_dns_record_t *dnsrec, ares_bool_t cname_only_is_enodata, unsigned short port, struct ares_addrinfo *ai) { ares_status_t status; - ares_dns_record_t *dnsrec = NULL; size_t i; size_t ancount; const char *hostname = NULL; @@ -63,11 +62,6 @@ ares_status_t ares__parse_into_addrinfo(const unsigned char *abuf, size_t alen, struct ares_addrinfo_cname *cnames = NULL; struct ares_addrinfo_node *nodes = NULL; - status = ares_dns_parse(abuf, alen, 0, &dnsrec); - if (status != ARES_SUCCESS) { - goto done; - } - /* Save question hostname */ status = ares_dns_record_query_get(dnsrec, 0, &hostname, NULL, NULL); if (status != ARES_SUCCESS) { @@ -83,7 +77,7 @@ ares_status_t ares__parse_into_addrinfo(const unsigned char *abuf, size_t alen, for (i = 0; i < ancount; i++) { ares_dns_rec_type_t rtype; const ares_dns_rr_t *rr = - ares_dns_record_rr_get(dnsrec, ARES_SECTION_ANSWER, i); + ares_dns_record_rr_get_const(dnsrec, ARES_SECTION_ANSWER, i); if (ares_dns_rr_get_class(rr) != ARES_CLASS_IN) { continue; @@ -177,7 +171,6 @@ ares_status_t ares__parse_into_addrinfo(const unsigned char *abuf, size_t alen, done: ares__freeaddrinfo_cnames(cnames); ares__freeaddrinfo_nodes(nodes); - ares_dns_record_destroy(dnsrec); /* compatibility */ if (status == ARES_EBADNAME) { diff --git a/deps/cares/src/lib/ares__read_line.c b/deps/cares/src/lib/ares__read_line.c deleted file mode 100644 index 018f55e8b2681f..00000000000000 --- a/deps/cares/src/lib/ares__read_line.c +++ /dev/null @@ -1,90 +0,0 @@ -/* MIT License - * - * Copyright (c) 1998 Massachusetts Institute of Technology - * Copyright (c) The c-ares project and its contributors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - * - * SPDX-License-Identifier: MIT - */ - -#include "ares_setup.h" - -#include "ares.h" -#include "ares_private.h" - -/* This is an internal function. Its contract is to read a line from - * a file into a dynamically allocated buffer, zeroing the trailing - * newline if there is one. The calling routine may call - * ares__read_line multiple times with the same buf and bufsize - * pointers; *buf will be reallocated and *bufsize adjusted as - * appropriate. The initial value of *buf should be NULL. After the - * calling routine is done reading lines, it should free *buf. - */ -ares_status_t ares__read_line(FILE *fp, char **buf, size_t *bufsize) -{ - char *newbuf; - size_t offset = 0; - size_t len; - - if (*buf == NULL) { - *buf = ares_malloc(128); - if (!*buf) { - return ARES_ENOMEM; - } - *bufsize = 128; - } - - for (;;) { - int bytestoread = (int)(*bufsize - offset); - - if (!fgets(*buf + offset, bytestoread, fp)) { - return (offset != 0) ? 0 : (ferror(fp)) ? ARES_EFILE : ARES_EOF; - } - len = offset + ares_strlen(*buf + offset); - - /* Probably means there was an embedded NULL as the first character in - * the line, throw away line */ - if (len == 0) { - offset = 0; - continue; - } - - if ((*buf)[len - 1] == '\n') { - (*buf)[len - 1] = 0; - break; - } - offset = len; - if (len < *bufsize - 1) { - continue; - } - - /* Allocate more space. */ - newbuf = ares_realloc(*buf, *bufsize * 2); - if (!newbuf) { - ares_free(*buf); - *buf = NULL; - return ARES_ENOMEM; - } - *buf = newbuf; - *bufsize *= 2; - } - return ARES_SUCCESS; -} diff --git a/deps/cares/src/lib/ares__slist.h b/deps/cares/src/lib/ares__slist.h index 04cd50806ebc2d..26af88fa782499 100644 --- a/deps/cares/src/lib/ares__slist.h +++ b/deps/cares/src/lib/ares__slist.h @@ -63,7 +63,7 @@ typedef struct ares__slist_node ares__slist_node_t; * * \param[in] data User-defined data to destroy */ -typedef void (*ares__slist_destructor_t)(void *data); +typedef void (*ares__slist_destructor_t)(void *data); /*! SkipList comparison function * @@ -71,7 +71,7 @@ typedef void (*ares__slist_destructor_t)(void *data); * \param[in] data2 Second user-defined data object * \return < 0 if data1 < data1, > 0 if data1 > data2, 0 if data1 == data2 */ -typedef int (*ares__slist_cmp_t)(const void *data1, const void *data2); +typedef int (*ares__slist_cmp_t)(const void *data1, const void *data2); /*! Create SkipList * diff --git a/deps/cares/src/lib/ares__threads.c b/deps/cares/src/lib/ares__threads.c index 028790aead5abe..f6de8c698e373d 100644 --- a/deps/cares/src/lib/ares__threads.c +++ b/deps/cares/src/lib/ares__threads.c @@ -138,9 +138,9 @@ struct ares__thread { HANDLE thread; DWORD id; - void *(*func)(void *arg); - void *arg; - void *rv; + void *(*func)(void *arg); + void *arg; + void *rv; }; /* Wrap for pthread compatibility */ @@ -335,8 +335,8 @@ static void ares__timespec_timeout(struct timespec *ts, unsigned long add_ms) # error cannot determine current system time # endif - ts->tv_sec += add_ms / 1000; - ts->tv_nsec += (add_ms % 1000) * 1000000; + ts->tv_sec += (time_t)(add_ms / 1000); + ts->tv_nsec += (long)((add_ms % 1000) * 1000000); /* Normalize if needed */ if (ts->tv_nsec >= 1000000000) { diff --git a/deps/cares/src/lib/ares__threads.h b/deps/cares/src/lib/ares__threads.h index 39764296478a07..108354dfc1e17f 100644 --- a/deps/cares/src/lib/ares__threads.h +++ b/deps/cares/src/lib/ares__threads.h @@ -52,9 +52,9 @@ ares_status_t ares__thread_cond_timedwait(ares__thread_cond_t *cond, struct ares__thread; typedef struct ares__thread ares__thread_t; -typedef void *(*ares__thread_func_t)(void *arg); -ares_status_t ares__thread_create(ares__thread_t **thread, - ares__thread_func_t func, void *arg); +typedef void *(*ares__thread_func_t)(void *arg); +ares_status_t ares__thread_create(ares__thread_t **thread, + ares__thread_func_t func, void *arg); ares_status_t ares__thread_join(ares__thread_t *thread, void **rv); #endif diff --git a/deps/cares/src/lib/ares_cancel.c b/deps/cares/src/lib/ares_cancel.c index 0ee6124dd71440..5a9fb722cb7778 100644 --- a/deps/cares/src/lib/ares_cancel.c +++ b/deps/cares/src/lib/ares_cancel.c @@ -74,7 +74,7 @@ void ares_cancel(ares_channel_t *channel) query->node_all_queries = NULL; /* NOTE: its possible this may enqueue new queries */ - query->callback(query->arg, ARES_ECANCELLED, 0, NULL, 0); + query->callback(query->arg, ARES_ECANCELLED, 0, NULL); ares__free_query(query); /* See if the connection should be cleaned up */ diff --git a/deps/cares/src/lib/ares_config.h.cmake b/deps/cares/src/lib/ares_config.h.cmake index 01dcccaa17c2b4..10a1b7a971604b 100644 --- a/deps/cares/src/lib/ares_config.h.cmake +++ b/deps/cares/src/lib/ares_config.h.cmake @@ -212,6 +212,9 @@ /* Define to 1 if you have the header file. */ #cmakedefine HAVE_NETINET_IN_H +/* Define to 1 if you have the header file. */ +#cmakedefine HAVE_NETINET6_IN6_H + /* Define to 1 if you have the header file. */ #cmakedefine HAVE_NETINET_TCP_H diff --git a/deps/cares/src/lib/ares_config.h.in b/deps/cares/src/lib/ares_config.h.in index 4e07e58473a009..f486b6b4f000b9 100644 --- a/deps/cares/src/lib/ares_config.h.in +++ b/deps/cares/src/lib/ares_config.h.in @@ -186,6 +186,9 @@ /* Define to 1 if you have the header file. */ #undef HAVE_NETDB_H +/* Define to 1 if you have the header file. */ +#undef HAVE_NETINET6_IN6_H + /* Define to 1 if you have the header file. */ #undef HAVE_NETINET_IN_H diff --git a/deps/cares/src/lib/ares_create_query.c b/deps/cares/src/lib/ares_create_query.c index f66b0ff6e0693d..a2f2caac6e95d9 100644 --- a/deps/cares/src/lib/ares_create_query.c +++ b/deps/cares/src/lib/ares_create_query.c @@ -28,13 +28,15 @@ #include "ares.h" #include "ares_private.h" -int ares_create_query(const char *name, int dnsclass, int type, - unsigned short id, int rd, unsigned char **bufp, - int *buflenp, int max_udp_size) +static int ares_create_query_int(const char *name, int dnsclass, int type, + unsigned short id, int rd, + unsigned char **bufp, int *buflenp, + int max_udp_size) { ares_status_t status; ares_dns_record_t *dnsrec = NULL; size_t len; + ares_dns_flags_t rd_flag = rd ? ARES_FLAG_RD : 0; if (name == NULL || bufp == NULL || buflenp == NULL) { status = ARES_EFORMERR; @@ -44,56 +46,13 @@ int ares_create_query(const char *name, int dnsclass, int type, *bufp = NULL; *buflenp = 0; - /* Per RFC 7686, reject queries for ".onion" domain names with NXDOMAIN. */ - if (ares__is_onion_domain(name)) { - status = ARES_ENOTFOUND; - goto done; - } - - status = ares_dns_record_create(&dnsrec, id, rd ? ARES_FLAG_RD : 0, - ARES_OPCODE_QUERY, ARES_RCODE_NOERROR); + status = ares_dns_record_create_query( + &dnsrec, name, (ares_dns_class_t)dnsclass, (ares_dns_rec_type_t)type, id, + rd_flag, (size_t)max_udp_size); if (status != ARES_SUCCESS) { goto done; } - status = ares_dns_record_query_add(dnsrec, name, (ares_dns_rec_type_t)type, - (ares_dns_class_t)dnsclass); - if (status != ARES_SUCCESS) { - goto done; - } - - /* max_udp_size > 0 indicates EDNS, so send OPT RR as an additional record */ - if (max_udp_size > 0) { - ares_dns_rr_t *rr = NULL; - - status = ares_dns_record_rr_add(&rr, dnsrec, ARES_SECTION_ADDITIONAL, "", - ARES_REC_TYPE_OPT, ARES_CLASS_IN, 0); - if (status != ARES_SUCCESS) { - goto done; - } - - if (max_udp_size > 65535) { - status = ARES_EFORMERR; - goto done; - } - - status = ares_dns_rr_set_u16(rr, ARES_RR_OPT_UDP_SIZE, - (unsigned short)max_udp_size); - if (status != ARES_SUCCESS) { - goto done; - } - - status = ares_dns_rr_set_u8(rr, ARES_RR_OPT_VERSION, 0); - if (status != ARES_SUCCESS) { - goto done; - } - - status = ares_dns_rr_set_u16(rr, ARES_RR_OPT_FLAGS, 0); - if (status != ARES_SUCCESS) { - goto done; - } - } - status = ares_dns_write(dnsrec, bufp, &len); if (status != ARES_SUCCESS) { goto done; @@ -105,3 +64,17 @@ int ares_create_query(const char *name, int dnsclass, int type, ares_dns_record_destroy(dnsrec); return (int)status; } + +int ares_create_query(const char *name, int dnsclass, int type, + unsigned short id, int rd, unsigned char **bufp, + int *buflenp, int max_udp_size) +{ + return ares_create_query_int(name, dnsclass, type, id, rd, bufp, buflenp, + max_udp_size); +} + +int ares_mkquery(const char *name, int dnsclass, int type, unsigned short id, + int rd, unsigned char **buf, int *buflen) +{ + return ares_create_query_int(name, dnsclass, type, id, rd, buf, buflen, 0); +} diff --git a/deps/cares/src/lib/ares_destroy.c b/deps/cares/src/lib/ares_destroy.c index 145084577f7fba..6965b601e76e07 100644 --- a/deps/cares/src/lib/ares_destroy.c +++ b/deps/cares/src/lib/ares_destroy.c @@ -51,7 +51,7 @@ void ares_destroy(ares_channel_t *channel) struct query *query = ares__llist_node_claim(node); query->node_all_queries = NULL; - query->callback(query->arg, ARES_EDESTRUCTION, 0, NULL, 0); + query->callback(query->arg, ARES_EDESTRUCTION, 0, NULL); ares__free_query(query); node = next; diff --git a/deps/cares/src/lib/ares_dns_mapping.c b/deps/cares/src/lib/ares_dns_mapping.c index 55f1af7939c32f..2b463fe83128a7 100644 --- a/deps/cares/src/lib/ares_dns_mapping.c +++ b/deps/cares/src/lib/ares_dns_mapping.c @@ -883,3 +883,37 @@ const char *ares_dns_rcode_tostr(ares_dns_rcode_t rcode) return "UNKNOWN"; } + +/* Convert an rcode and ancount from a query reply into an ares_status_t + * value. Used internally by ares_search() and ares_query(). + */ +ares_status_t ares_dns_query_reply_tostatus(ares_dns_rcode_t rcode, + size_t ancount) +{ + ares_status_t status = ARES_SUCCESS; + + switch (rcode) { + case ARES_RCODE_NOERROR: + status = (ancount > 0) ? ARES_SUCCESS : ARES_ENODATA; + break; + case ARES_RCODE_FORMERR: + status = ARES_EFORMERR; + break; + case ARES_RCODE_SERVFAIL: + status = ARES_ESERVFAIL; + break; + case ARES_RCODE_NXDOMAIN: + status = ARES_ENOTFOUND; + break; + case ARES_RCODE_NOTIMP: + status = ARES_ENOTIMP; + break; + case ARES_RCODE_REFUSED: + status = ARES_EREFUSED; + break; + default: + break; + } + + return status; +} diff --git a/deps/cares/src/lib/ares_dns_private.h b/deps/cares/src/lib/ares_dns_private.h index 91635e74cd8010..3af4b3c9926e42 100644 --- a/deps/cares/src/lib/ares_dns_private.h +++ b/deps/cares/src/lib/ares_dns_private.h @@ -49,6 +49,33 @@ ares_bool_t ares_dns_has_opt_rr(const ares_dns_record_t *rec); void ares_dns_record_write_ttl_decrement(ares_dns_record_t *dnsrec, unsigned int ttl_decrement); +/*! Create a DNS record object for a query. The arguments are the same as + * those for ares_create_query(). + * + * \param[out] dnsrec DNS record object to create. + * \param[in] name NUL-terminated name for the query. + * \param[in] dnsclass Class for the query. + * \param[in] type Type for the query. + * \param[in] id Identifier for the query. + * \param[in] flags Flags for the query. + * \param[in] max_udp_size Maximum size of a UDP packet for EDNS. + * \return ARES_SUCCESS on success, otherwise an error code. + */ +ares_status_t + ares_dns_record_create_query(ares_dns_record_t **dnsrec, const char *name, + ares_dns_class_t dnsclass, + ares_dns_rec_type_t type, unsigned short id, + ares_dns_flags_t flags, size_t max_udp_size); + +/*! Convert the RCODE and ANCOUNT from a DNS query reply into a status code. + * + * \param[in] rcode The RCODE from the reply. + * \param[in] ancount The ANCOUNT from the reply. + * \return An appropriate status code. + */ +ares_status_t ares_dns_query_reply_tostatus(ares_dns_rcode_t rcode, + size_t ancount); + struct ares_dns_qd { char *name; ares_dns_rec_type_t qtype; diff --git a/deps/cares/src/lib/ares_dns_record.c b/deps/cares/src/lib/ares_dns_record.c index 30219003e24a57..ec7f7e734302de 100644 --- a/deps/cares/src/lib/ares_dns_record.c +++ b/deps/cares/src/lib/ares_dns_record.c @@ -276,6 +276,39 @@ ares_status_t ares_dns_record_query_add(ares_dns_record_t *dnsrec, return ARES_SUCCESS; } +ares_status_t ares_dns_record_query_set_name(ares_dns_record_t *dnsrec, + size_t idx, const char *name) +{ + char *orig_name = NULL; + + if (dnsrec == NULL || idx >= dnsrec->qdcount || name == NULL) { + return ARES_EFORMERR; + } + orig_name = dnsrec->qd[idx].name; + dnsrec->qd[idx].name = ares_strdup(name); + if (dnsrec->qd[idx].name == NULL) { + dnsrec->qd[idx].name = orig_name; + return ARES_ENOMEM; + } + + ares_free(orig_name); + return ARES_SUCCESS; +} + +ares_status_t ares_dns_record_query_set_type(ares_dns_record_t *dnsrec, + size_t idx, + ares_dns_rec_type_t qtype) +{ + if (dnsrec == NULL || idx >= dnsrec->qdcount || + !ares_dns_rec_type_isvalid(qtype, ARES_TRUE)) { + return ARES_EFORMERR; + } + + dnsrec->qd[idx].qtype = qtype; + + return ARES_SUCCESS; +} + ares_status_t ares_dns_record_query_get(const ares_dns_record_t *dnsrec, size_t idx, const char **name, ares_dns_rec_type_t *qtype, @@ -499,7 +532,7 @@ ares_dns_rr_t *ares_dns_record_rr_get(ares_dns_record_t *dnsrec, return &rr_ptr[idx]; } -static const ares_dns_rr_t * +const ares_dns_rr_t * ares_dns_record_rr_get_const(const ares_dns_record_t *dnsrec, ares_dns_section_t sect, size_t idx) { @@ -1314,3 +1347,103 @@ ares_bool_t ares_dns_has_opt_rr(const ares_dns_record_t *rec) } return ARES_FALSE; } + +/* Construct a DNS record for a name with given class and type. Used internally + * by ares_search() and ares_create_query(). + */ +ares_status_t + ares_dns_record_create_query(ares_dns_record_t **dnsrec, const char *name, + ares_dns_class_t dnsclass, + ares_dns_rec_type_t type, unsigned short id, + ares_dns_flags_t flags, size_t max_udp_size) +{ + ares_status_t status; + ares_dns_rr_t *rr = NULL; + + if (dnsrec == NULL) { + return ARES_EFORMERR; + } + + *dnsrec = NULL; + + /* Per RFC 7686, reject queries for ".onion" domain names with NXDOMAIN */ + if (ares__is_onion_domain(name)) { + status = ARES_ENOTFOUND; + goto done; + } + + status = ares_dns_record_create(dnsrec, id, (unsigned short)flags, + ARES_OPCODE_QUERY, ARES_RCODE_NOERROR); + if (status != ARES_SUCCESS) { + goto done; + } + + status = ares_dns_record_query_add(*dnsrec, name, type, dnsclass); + if (status != ARES_SUCCESS) { + goto done; + } + + /* max_udp_size > 0 indicates EDNS, so send OPT RR as an additional record */ + if (max_udp_size > 0) { + /* max_udp_size must fit into a 16 bit unsigned integer field on the OPT + * RR, so check here that it fits + */ + if (max_udp_size > 65535) { + status = ARES_EFORMERR; + goto done; + } + + status = ares_dns_record_rr_add(&rr, *dnsrec, ARES_SECTION_ADDITIONAL, "", + ARES_REC_TYPE_OPT, ARES_CLASS_IN, 0); + if (status != ARES_SUCCESS) { + goto done; + } + + status = ares_dns_rr_set_u16(rr, ARES_RR_OPT_UDP_SIZE, + (unsigned short)max_udp_size); + if (status != ARES_SUCCESS) { + goto done; + } + + status = ares_dns_rr_set_u8(rr, ARES_RR_OPT_VERSION, 0); + if (status != ARES_SUCCESS) { + goto done; + } + + status = ares_dns_rr_set_u16(rr, ARES_RR_OPT_FLAGS, 0); + if (status != ARES_SUCCESS) { + goto done; + } + } + +done: + if (status != ARES_SUCCESS) { + ares_dns_record_destroy(*dnsrec); + *dnsrec = NULL; + } + return status; +} + +ares_dns_record_t *ares_dns_record_duplicate(const ares_dns_record_t *dnsrec) +{ + unsigned char *data = NULL; + size_t data_len = 0; + ares_dns_record_t *out = NULL; + ares_status_t status; + + if (dnsrec == NULL) { + return NULL; + } + + status = ares_dns_write(dnsrec, &data, &data_len); + if (status != ARES_SUCCESS) { + return NULL; + } + + status = ares_dns_parse(data, data_len, 0, &out); + ares_free(data); + if (status != ARES_SUCCESS) { + return NULL; + } + return out; +} diff --git a/deps/cares/src/lib/ares_dns_write.c b/deps/cares/src/lib/ares_dns_write.c index 2e99c5ba88aee7..b49ec07bcb9b6b 100644 --- a/deps/cares/src/lib/ares_dns_write.c +++ b/deps/cares/src/lib/ares_dns_write.c @@ -831,10 +831,10 @@ static ares_status_t ares_dns_write_rr_raw_rr(ares__buf_t *buf, return ares__buf_append(buf, data, data_len); } -static ares_status_t ares_dns_write_rr(ares_dns_record_t *dnsrec, - ares__llist_t **namelist, - ares_dns_section_t section, - ares__buf_t *buf) +static ares_status_t ares_dns_write_rr(const ares_dns_record_t *dnsrec, + ares__llist_t **namelist, + ares_dns_section_t section, + ares__buf_t *buf) { size_t i; @@ -849,7 +849,7 @@ static ares_status_t ares_dns_write_rr(ares_dns_record_t *dnsrec, size_t end_length; unsigned int ttl; - rr = ares_dns_record_rr_get(dnsrec, section, i); + rr = ares_dns_record_rr_get_const(dnsrec, section, i); if (rr == NULL) { return ARES_EFORMERR; } @@ -988,8 +988,8 @@ static ares_status_t ares_dns_write_rr(ares_dns_record_t *dnsrec, return ARES_SUCCESS; } -ares_status_t ares_dns_write(ares_dns_record_t *dnsrec, unsigned char **buf, - size_t *buf_len) +ares_status_t ares_dns_write(const ares_dns_record_t *dnsrec, + unsigned char **buf, size_t *buf_len) { ares__buf_t *b = NULL; ares_status_t status; diff --git a/deps/cares/src/lib/ares_event.h b/deps/cares/src/lib/ares_event.h index 9d01d75f372afe..23e9637924ba07 100644 --- a/deps/cares/src/lib/ares_event.h +++ b/deps/cares/src/lib/ares_event.h @@ -72,11 +72,11 @@ struct ares_event { typedef struct { const char *name; ares_bool_t (*init)(ares_event_thread_t *e); - void (*destroy)(ares_event_thread_t *e); + void (*destroy)(ares_event_thread_t *e); ares_bool_t (*event_add)(ares_event_t *event); - void (*event_del)(ares_event_t *event); - void (*event_mod)(ares_event_t *event, ares_event_flags_t new_flags); - size_t (*wait)(ares_event_thread_t *e, unsigned long timeout_ms); + void (*event_del)(ares_event_t *event); + void (*event_mod)(ares_event_t *event, ares_event_flags_t new_flags); + size_t (*wait)(ares_event_thread_t *e, unsigned long timeout_ms); } ares_event_sys_t; struct ares_event_thread { diff --git a/deps/cares/src/lib/ares_event_poll.c b/deps/cares/src/lib/ares_event_poll.c index c16b2824663544..33b1d6dfd58ec7 100644 --- a/deps/cares/src/lib/ares_event_poll.c +++ b/deps/cares/src/lib/ares_event_poll.c @@ -75,8 +75,11 @@ static size_t ares_evsys_poll_wait(ares_event_thread_t *e, size_t cnt = 0; size_t i; - if (num_fds) { + if (fdlist != NULL && num_fds) { pollfd = ares_malloc_zero(sizeof(*pollfd) * num_fds); + if (pollfd == NULL) { + goto done; + } for (i = 0; i < num_fds; i++) { const ares_event_t *ev = ares__htable_asvp_get_direct(e->ev_handles, fdlist[i]); @@ -96,7 +99,7 @@ static size_t ares_evsys_poll_wait(ares_event_thread_t *e, goto done; } - for (i = 0; i < num_fds; i++) { + for (i = 0; pollfd != NULL && i < num_fds; i++) { ares_event_t *ev; ares_event_flags_t flags = 0; diff --git a/deps/cares/src/lib/ares_getaddrinfo.c b/deps/cares/src/lib/ares_getaddrinfo.c index eaa4b422c06e9e..cfc889c70a84e2 100644 --- a/deps/cares/src/lib/ares_getaddrinfo.c +++ b/deps/cares/src/lib/ares_getaddrinfo.c @@ -79,15 +79,20 @@ struct host_query { char *lookups; /* Duplicate memory from channel because of ares_reinit() */ const char *remaining_lookups; /* types of lookup we need to perform ("fb" by default, file and dns respectively) */ - char **domains; /* duplicate from channel for ares_reinit() safety */ - size_t ndomains; - struct ares_addrinfo *ai; /* store results between lookups */ - unsigned short qid_a; /* qid for A request */ - unsigned short qid_aaaa; /* qid for AAAA request */ - size_t remaining; /* number of DNS answers waiting for */ - ares_ssize_t next_domain; /* next search domain to try */ - size_t - nodata_cnt; /* Track nodata responses to possibly override final result */ + + /* Search order for names */ + char **names; + size_t names_cnt; + size_t next_name_idx; /* next name index being attempted */ + + struct ares_addrinfo *ai; /* store results between lookups */ + unsigned short qid_a; /* qid for A request */ + unsigned short qid_aaaa; /* qid for AAAA request */ + + size_t remaining; /* number of DNS answers waiting for */ + + /* Track nodata responses to possibly override final result */ + size_t nodata_cnt; }; static const struct ares_addrinfo_hints default_hints = { @@ -98,10 +103,6 @@ static const struct ares_addrinfo_hints default_hints = { }; /* forward declarations */ -static void host_callback(void *arg, int status, int timeouts, - unsigned char *abuf, int alen); -static ares_bool_t as_is_first(const struct host_query *hquery); -static ares_bool_t as_is_only(const struct host_query *hquery); static ares_bool_t next_dns_lookup(struct host_query *hquery); struct ares_addrinfo_cname * @@ -324,6 +325,17 @@ static ares_bool_t fake_addrinfo(const char *name, unsigned short port, return ARES_TRUE; } +static void hquery_free(struct host_query *hquery, ares_bool_t cleanup_ai) +{ + if (cleanup_ai) { + ares_freeaddrinfo(hquery->ai); + } + ares__strsplit_free(hquery->names, hquery->names_cnt); + ares_free(hquery->name); + ares_free(hquery->lookups); + ares_free(hquery); +} + static void end_hquery(struct host_query *hquery, ares_status_t status) { struct ares_addrinfo_node sentinel; @@ -349,10 +361,7 @@ static void end_hquery(struct host_query *hquery, ares_status_t status) } hquery->callback(hquery->arg, (int)status, (int)hquery->timeouts, hquery->ai); - ares__strsplit_free(hquery->domains, hquery->ndomains); - ares_free(hquery->lookups); - ares_free(hquery->name); - ares_free(hquery); + hquery_free(hquery, ARES_FALSE); } ares_bool_t ares__is_localhost(const char *name) @@ -478,25 +487,23 @@ static void terminate_retries(const struct host_query *hquery, query->no_retries = ARES_TRUE; } -static void host_callback(void *arg, int status, int timeouts, - unsigned char *abuf, int alen) +static void host_callback(void *arg, ares_status_t status, size_t timeouts, + const ares_dns_record_t *dnsrec) { struct host_query *hquery = (struct host_query *)arg; ares_status_t addinfostatus = ARES_SUCCESS; - unsigned short qid = 0; - hquery->timeouts += (size_t)timeouts; + hquery->timeouts += timeouts; hquery->remaining--; if (status == ARES_SUCCESS) { - if (alen < 0) { + if (dnsrec == NULL) { addinfostatus = ARES_EBADRESP; } else { - addinfostatus = ares__parse_into_addrinfo(abuf, (size_t)alen, ARES_TRUE, - hquery->port, hquery->ai); + addinfostatus = + ares__parse_into_addrinfo(dnsrec, ARES_TRUE, hquery->port, hquery->ai); } - if (addinfostatus == ARES_SUCCESS && alen >= HFIXEDSZ) { - qid = DNS_HEADER_QID(abuf); /* Converts to host byte order */ - terminate_retries(hquery, qid); + if (addinfostatus == ARES_SUCCESS) { + terminate_retries(hquery, ares_dns_record_get_id(dnsrec)); } } @@ -505,7 +512,7 @@ static void host_callback(void *arg, int status, int timeouts, /* must make sure we don't do next_lookup() on destroy or cancel, * and return the appropriate status. We won't return a partial * result in this case. */ - end_hquery(hquery, (ares_status_t)status); + end_hquery(hquery, status); } else if (addinfostatus != ARES_SUCCESS && addinfostatus != ARES_ENODATA) { /* error in parsing result e.g. no memory */ if (addinfostatus == ARES_EBADRESP && hquery->ai->nodes) { @@ -523,10 +530,9 @@ static void host_callback(void *arg, int status, int timeouts, if (status == ARES_ENODATA || addinfostatus == ARES_ENODATA) { hquery->nodata_cnt++; } - next_lookup(hquery, - hquery->nodata_cnt ? ARES_ENODATA : (ares_status_t)status); + next_lookup(hquery, hquery->nodata_cnt ? ARES_ENODATA : status); } else { - end_hquery(hquery, (ares_status_t)status); + end_hquery(hquery, status); } } @@ -542,7 +548,6 @@ static void ares_getaddrinfo_int(ares_channel_t *channel, const char *name, unsigned short port = 0; int family; struct ares_addrinfo *ai; - char *alias_name = NULL; ares_status_t status; if (!hints) { @@ -563,25 +568,12 @@ static void ares_getaddrinfo_int(ares_channel_t *channel, const char *name, return; } - /* perform HOSTALIAS resolution (technically this function does some other - * things we are going to ignore) */ - status = ares__single_domain(channel, name, &alias_name); - if (status != ARES_SUCCESS) { - callback(arg, (int)status, 0, NULL); - return; - } - - if (alias_name) { - name = alias_name; - } - if (service) { if (hints->ai_flags & ARES_AI_NUMERICSERV) { unsigned long val; errno = 0; val = strtoul(service, NULL, 0); if ((val == 0 && errno != 0) || val > 65535) { - ares_free(alias_name); callback(arg, ARES_ESERVICE, 0, NULL); return; } @@ -593,7 +585,6 @@ static void ares_getaddrinfo_int(ares_channel_t *channel, const char *name, errno = 0; val = strtoul(service, NULL, 0); if ((val == 0 && errno != 0) || val > 65535) { - ares_free(alias_name); callback(arg, ARES_ESERVICE, 0, NULL); return; } @@ -604,66 +595,53 @@ static void ares_getaddrinfo_int(ares_channel_t *channel, const char *name, ai = ares_malloc_zero(sizeof(*ai)); if (!ai) { - ares_free(alias_name); callback(arg, ARES_ENOMEM, 0, NULL); return; } if (fake_addrinfo(name, port, hints, ai, callback, arg)) { - ares_free(alias_name); return; } /* Allocate and fill in the host query structure. */ hquery = ares_malloc_zero(sizeof(*hquery)); if (!hquery) { - ares_free(alias_name); ares_freeaddrinfo(ai); callback(arg, ARES_ENOMEM, 0, NULL); return; } - memset(hquery, 0, sizeof(*hquery)); - hquery->name = ares_strdup(name); - ares_free(alias_name); - if (!hquery->name) { - ares_free(hquery); - ares_freeaddrinfo(ai); + + hquery->port = port; + hquery->channel = channel; + hquery->hints = *hints; + hquery->sent_family = -1; /* nothing is sent yet */ + hquery->callback = callback; + hquery->arg = arg; + hquery->ai = ai; + hquery->name = ares_strdup(name); + if (hquery->name == NULL) { + hquery_free(hquery, ARES_TRUE); callback(arg, ARES_ENOMEM, 0, NULL); return; } - hquery->lookups = ares_strdup(channel->lookups); - if (!hquery->lookups) { - ares_free(hquery->name); - ares_free(hquery); - ares_freeaddrinfo(ai); - callback(arg, ARES_ENOMEM, 0, NULL); + + status = + ares__search_name_list(channel, name, &hquery->names, &hquery->names_cnt); + if (status != ARES_SUCCESS) { + hquery_free(hquery, ARES_TRUE); + callback(arg, (int)status, 0, NULL); return; } + hquery->next_name_idx = 0; - if (channel->ndomains) { - /* Duplicate for ares_reinit() safety */ - hquery->domains = - ares__strsplit_duplicate(channel->domains, channel->ndomains); - if (hquery->domains == NULL) { - ares_free(hquery->lookups); - ares_free(hquery->name); - ares_free(hquery); - ares_freeaddrinfo(ai); - callback(arg, ARES_ENOMEM, 0, NULL); - return; - } - hquery->ndomains = channel->ndomains; - } - hquery->port = port; - hquery->channel = channel; - hquery->hints = *hints; - hquery->sent_family = -1; /* nothing is sent yet */ - hquery->callback = callback; - hquery->arg = arg; + hquery->lookups = ares_strdup(channel->lookups); + if (hquery->lookups == NULL) { + hquery_free(hquery, ARES_TRUE); + callback(arg, ARES_ENOMEM, 0, NULL); + return; + } hquery->remaining_lookups = hquery->lookups; - hquery->ai = ai; - hquery->next_domain = -1; /* Start performing lookups according to channel->lookups. */ next_lookup(hquery, ARES_ECONNREFUSED /* initial error code */); @@ -684,93 +662,39 @@ void ares_getaddrinfo(ares_channel_t *channel, const char *name, static ares_bool_t next_dns_lookup(struct host_query *hquery) { - char *s = NULL; - ares_bool_t is_s_allocated = ARES_FALSE; - ares_status_t status; - - /* if next_domain == -1 and as_is_first is true, try hquery->name */ - if (hquery->next_domain == -1) { - if (as_is_first(hquery)) { - s = hquery->name; - } - hquery->next_domain = 0; - } - - /* if as_is_first is false, try hquery->name at last */ - if (!s && (size_t)hquery->next_domain == hquery->ndomains) { - if (!as_is_first(hquery)) { - s = hquery->name; - } - hquery->next_domain++; - } - - if (!s && (size_t)hquery->next_domain < hquery->ndomains && - !as_is_only(hquery)) { - status = ares__cat_domain(hquery->name, - hquery->domains[hquery->next_domain++], &s); - if (status == ARES_SUCCESS) { - is_s_allocated = ARES_TRUE; - } - } + const char *name = NULL; - if (s) { - /* NOTE: hquery may be invalidated during the call to ares_query_qid(), - * so should not be referenced after this point */ - switch (hquery->hints.ai_family) { - case AF_INET: - hquery->remaining += 1; - ares_query_qid(hquery->channel, s, C_IN, T_A, host_callback, hquery, - &hquery->qid_a); - break; - case AF_INET6: - hquery->remaining += 1; - ares_query_qid(hquery->channel, s, C_IN, T_AAAA, host_callback, hquery, - &hquery->qid_aaaa); - break; - case AF_UNSPEC: - hquery->remaining += 2; - ares_query_qid(hquery->channel, s, C_IN, T_A, host_callback, hquery, - &hquery->qid_a); - ares_query_qid(hquery->channel, s, C_IN, T_AAAA, host_callback, hquery, - &hquery->qid_aaaa); - break; - default: - break; - } - if (is_s_allocated) { - ares_free(s); - } - return ARES_TRUE; - } else { - assert(!hquery->ai->nodes); + if (hquery->next_name_idx >= hquery->names_cnt) { return ARES_FALSE; } -} -static ares_bool_t as_is_first(const struct host_query *hquery) -{ - const char *p; - size_t ndots = 0; - for (p = hquery->name; p && *p; p++) { - if (*p == '.') { - ndots++; - } - } - if (as_is_only(hquery)) { - /* prevent ARES_EBADNAME for valid FQDN, where ndots < channel->ndots */ - return ARES_TRUE; - } - return ndots >= hquery->channel->ndots ? ARES_TRUE : ARES_FALSE; -} + name = hquery->names[hquery->next_name_idx++]; -static ares_bool_t as_is_only(const struct host_query *hquery) -{ - size_t nname = ares_strlen(hquery->name); - if (hquery->channel->flags & ARES_FLAG_NOSEARCH) { - return ARES_TRUE; - } - if (hquery->name != NULL && nname && hquery->name[nname - 1] == '.') { - return ARES_TRUE; + /* NOTE: hquery may be invalidated during the call to ares_query_qid(), + * so should not be referenced after this point */ + switch (hquery->hints.ai_family) { + case AF_INET: + hquery->remaining += 1; + ares_query_dnsrec(hquery->channel, name, ARES_CLASS_IN, ARES_REC_TYPE_A, + host_callback, hquery, &hquery->qid_a); + break; + case AF_INET6: + hquery->remaining += 1; + ares_query_dnsrec(hquery->channel, name, ARES_CLASS_IN, + ARES_REC_TYPE_AAAA, host_callback, hquery, + &hquery->qid_aaaa); + break; + case AF_UNSPEC: + hquery->remaining += 2; + ares_query_dnsrec(hquery->channel, name, ARES_CLASS_IN, ARES_REC_TYPE_A, + host_callback, hquery, &hquery->qid_a); + ares_query_dnsrec(hquery->channel, name, ARES_CLASS_IN, + ARES_REC_TYPE_AAAA, host_callback, hquery, + &hquery->qid_aaaa); + break; + default: + break; } - return ARES_FALSE; + + return ARES_TRUE; } diff --git a/deps/cares/src/lib/ares_gethostbyaddr.c b/deps/cares/src/lib/ares_gethostbyaddr.c index ab54706ba96889..453673260dcee5 100644 --- a/deps/cares/src/lib/ares_gethostbyaddr.c +++ b/deps/cares/src/lib/ares_gethostbyaddr.c @@ -59,11 +59,11 @@ struct addr_query { size_t timeouts; }; -static void next_lookup(struct addr_query *aquery); -static void addr_callback(void *arg, int status, int timeouts, - unsigned char *abuf, int alen); -static void end_aquery(struct addr_query *aquery, ares_status_t status, - struct hostent *host); +static void next_lookup(struct addr_query *aquery); +static void addr_callback(void *arg, ares_status_t status, size_t timeouts, + const ares_dns_record_t *dnsrec); +static void end_aquery(struct addr_query *aquery, ares_status_t status, + struct hostent *host); static ares_status_t file_lookup(ares_channel_t *channel, const struct ares_addr *addr, struct hostent **host); @@ -138,7 +138,8 @@ static void next_lookup(struct addr_query *aquery) return; } aquery->remaining_lookups = p + 1; - ares_query(aquery->channel, name, C_IN, T_PTR, addr_callback, aquery); + ares_query_dnsrec(aquery->channel, name, ARES_CLASS_IN, + ARES_REC_TYPE_PTR, addr_callback, aquery, NULL); ares_free(name); return; case 'f': @@ -159,27 +160,27 @@ static void next_lookup(struct addr_query *aquery) end_aquery(aquery, ARES_ENOTFOUND, NULL); } -static void addr_callback(void *arg, int status, int timeouts, - unsigned char *abuf, int alen) +static void addr_callback(void *arg, ares_status_t status, size_t timeouts, + const ares_dns_record_t *dnsrec) { struct addr_query *aquery = (struct addr_query *)arg; struct hostent *host; size_t addrlen; - aquery->timeouts += (size_t)timeouts; + aquery->timeouts += timeouts; if (status == ARES_SUCCESS) { if (aquery->addr.family == AF_INET) { addrlen = sizeof(aquery->addr.addr.addr4); - status = ares_parse_ptr_reply(abuf, alen, &aquery->addr.addr.addr4, - (int)addrlen, AF_INET, &host); + status = ares_parse_ptr_reply_dnsrec(dnsrec, &aquery->addr.addr.addr4, + (int)addrlen, AF_INET, &host); } else { addrlen = sizeof(aquery->addr.addr.addr6); - status = ares_parse_ptr_reply(abuf, alen, &aquery->addr.addr.addr6, - (int)addrlen, AF_INET6, &host); + status = ares_parse_ptr_reply_dnsrec(dnsrec, &aquery->addr.addr.addr6, + (int)addrlen, AF_INET6, &host); } - end_aquery(aquery, (ares_status_t)status, host); + end_aquery(aquery, status, host); } else if (status == ARES_EDESTRUCTION || status == ARES_ECANCELLED) { - end_aquery(aquery, (ares_status_t)status, NULL); + end_aquery(aquery, status, NULL); } else { next_lookup(aquery); } diff --git a/deps/cares/src/lib/ares_init.c b/deps/cares/src/lib/ares_init.c index bae7c72fe2cf67..28a509ea48a5af 100644 --- a/deps/cares/src/lib/ares_init.c +++ b/deps/cares/src/lib/ares_init.c @@ -152,10 +152,6 @@ static ares_status_t init_by_defaults(ares_channel_t *channel) channel->tries = DEFAULT_TRIES; } - if (channel->ndots == 0) { - channel->ndots = 1; - } - if (ares__slist_len(channel->servers) == 0) { /* Add a default local named server to the channel unless configured not * to (in which case return an error). @@ -261,31 +257,6 @@ static ares_status_t init_by_defaults(ares_channel_t *channel) } error: - if (rc) { - if (channel->domains && channel->domains[0]) { - ares_free(channel->domains[0]); - } - if (channel->domains) { - ares_free(channel->domains); - channel->domains = NULL; - } - - if (channel->lookups) { - ares_free(channel->lookups); - channel->lookups = NULL; - } - - if (channel->resolvconf_path) { - ares_free(channel->resolvconf_path); - channel->resolvconf_path = NULL; - } - - if (channel->hosts_path) { - ares_free(channel->hosts_path); - channel->hosts_path = NULL; - } - } - if (hostname) { ares_free(hostname); } @@ -309,6 +280,9 @@ int ares_init_options(ares_channel_t **channelptr, return ARES_ENOMEM; } + /* One option where zero is valid, so set default value here */ + channel->ndots = 1; + status = ares__channel_threading_init(channel); if (status != ARES_SUCCESS) { goto done; diff --git a/deps/cares/src/lib/ares_ipv6.h b/deps/cares/src/lib/ares_ipv6.h index be8cbe989396c2..28d7851ff3f051 100644 --- a/deps/cares/src/lib/ares_ipv6.h +++ b/deps/cares/src/lib/ares_ipv6.h @@ -27,6 +27,10 @@ #ifndef ARES_IPV6_H #define ARES_IPV6_H +#ifdef HAVE_NETINET6_IN6_H +# include +#endif + #ifndef HAVE_PF_INET6 # define PF_INET6 AF_INET6 #endif diff --git a/deps/cares/src/lib/ares_library_init.c b/deps/cares/src/lib/ares_library_init.c index 5cd39dc244ba4b..2767f1f93c77e4 100644 --- a/deps/cares/src/lib/ares_library_init.c +++ b/deps/cares/src/lib/ares_library_init.c @@ -72,7 +72,7 @@ static void default_free(void *p) #endif void *(*ares_malloc)(size_t size) = default_malloc; void *(*ares_realloc)(void *ptr, size_t size) = default_realloc; -void (*ares_free)(void *ptr) = default_free; +void (*ares_free)(void *ptr) = default_free; void *ares_malloc_zero(size_t size) { @@ -114,7 +114,7 @@ int ares_library_init(int flags) } int ares_library_init_mem(int flags, void *(*amalloc)(size_t size), - void (*afree)(void *ptr), + void (*afree)(void *ptr), void *(*arealloc)(void *ptr, size_t size)) { if (amalloc) { diff --git a/deps/cares/src/lib/ares_mkquery.c b/deps/cares/src/lib/ares_mkquery.c deleted file mode 100644 index da1898e74cd951..00000000000000 --- a/deps/cares/src/lib/ares_mkquery.c +++ /dev/null @@ -1,35 +0,0 @@ -/* MIT License - * - * Copyright (c) 1998 Massachusetts Institute of Technology - * Copyright (c) The c-ares project and its contributors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - * - * SPDX-License-Identifier: MIT - */ - -#include "ares_setup.h" -#include "ares.h" - -int ares_mkquery(const char *name, int dnsclass, int type, unsigned short id, - int rd, unsigned char **buf, int *buflen) -{ - return ares_create_query(name, dnsclass, type, id, rd, buf, buflen, 0); -} diff --git a/deps/cares/src/lib/ares_options.c b/deps/cares/src/lib/ares_options.c index 342d2ea1bec968..adc3e062ac437e 100644 --- a/deps/cares/src/lib/ares_options.c +++ b/deps/cares/src/lib/ares_options.c @@ -316,7 +316,7 @@ ares_status_t ares__init_by_options(ares_channel_t *channel, } if (optmask & ARES_OPT_NDOTS) { - if (options->ndots <= 0) { + if (options->ndots < 0) { optmask &= ~(ARES_OPT_NDOTS); } else { channel->ndots = (size_t)options->ndots; diff --git a/deps/cares/src/lib/ares_parse_a_reply.c b/deps/cares/src/lib/ares_parse_a_reply.c index f576575fe4b2fd..da841f0da9af36 100644 --- a/deps/cares/src/lib/ares_parse_a_reply.c +++ b/deps/cares/src/lib/ares_parse_a_reply.c @@ -59,6 +59,7 @@ int ares_parse_a_reply(const unsigned char *abuf, int alen, char *question_hostname = NULL; ares_status_t status; size_t req_naddrttls = 0; + ares_dns_record_t *dnsrec = NULL; if (alen < 0) { return ARES_EBADRESP; @@ -71,7 +72,12 @@ int ares_parse_a_reply(const unsigned char *abuf, int alen, memset(&ai, 0, sizeof(ai)); - status = ares__parse_into_addrinfo(abuf, (size_t)alen, 0, 0, &ai); + status = ares_dns_parse(abuf, (size_t)alen, 0, &dnsrec); + if (status != ARES_SUCCESS) { + goto fail; + } + + status = ares__parse_into_addrinfo(dnsrec, 0, 0, &ai); if (status != ARES_SUCCESS && status != ARES_ENODATA) { goto fail; } @@ -96,6 +102,11 @@ int ares_parse_a_reply(const unsigned char *abuf, int alen, ares__freeaddrinfo_nodes(ai.nodes); ares_free(ai.name); ares_free(question_hostname); + ares_dns_record_destroy(dnsrec); + + if (status == ARES_EBADNAME) { + status = ARES_EBADRESP; + } return (int)status; } diff --git a/deps/cares/src/lib/ares_parse_aaaa_reply.c b/deps/cares/src/lib/ares_parse_aaaa_reply.c index cef4ad7f80948f..b3eba166be6ad6 100644 --- a/deps/cares/src/lib/ares_parse_aaaa_reply.c +++ b/deps/cares/src/lib/ares_parse_aaaa_reply.c @@ -61,6 +61,7 @@ int ares_parse_aaaa_reply(const unsigned char *abuf, int alen, char *question_hostname = NULL; ares_status_t status; size_t req_naddrttls = 0; + ares_dns_record_t *dnsrec = NULL; if (alen < 0) { return ARES_EBADRESP; @@ -73,7 +74,12 @@ int ares_parse_aaaa_reply(const unsigned char *abuf, int alen, memset(&ai, 0, sizeof(ai)); - status = ares__parse_into_addrinfo(abuf, (size_t)alen, 0, 0, &ai); + status = ares_dns_parse(abuf, (size_t)alen, 0, &dnsrec); + if (status != ARES_SUCCESS) { + goto fail; + } + + status = ares__parse_into_addrinfo(dnsrec, 0, 0, &ai); if (status != ARES_SUCCESS && status != ARES_ENODATA) { goto fail; } @@ -97,6 +103,11 @@ int ares_parse_aaaa_reply(const unsigned char *abuf, int alen, ares__freeaddrinfo_nodes(ai.nodes); ares_free(question_hostname); ares_free(ai.name); + ares_dns_record_destroy(dnsrec); + + if (status == ARES_EBADNAME) { + status = ARES_EBADRESP; + } return (int)status; } diff --git a/deps/cares/src/lib/ares_parse_ptr_reply.c b/deps/cares/src/lib/ares_parse_ptr_reply.c index d8a29f272251cf..6ee20f722e3d01 100644 --- a/deps/cares/src/lib/ares_parse_ptr_reply.c +++ b/deps/cares/src/lib/ares_parse_ptr_reply.c @@ -36,33 +36,20 @@ #include "ares.h" #include "ares_private.h" -int ares_parse_ptr_reply(const unsigned char *abuf, int alen_int, - const void *addr, int addrlen, int family, - struct hostent **host) +ares_status_t ares_parse_ptr_reply_dnsrec(const ares_dns_record_t *dnsrec, + const void *addr, int addrlen, + int family, struct hostent **host) { - ares_status_t status; - size_t alen; - size_t ptrcount = 0; - struct hostent *hostent = NULL; - const char *hostname = NULL; - const char *ptrname = NULL; - ares_dns_record_t *dnsrec = NULL; - size_t i; - size_t ancount; + ares_status_t status; + size_t ptrcount = 0; + struct hostent *hostent = NULL; + const char *hostname = NULL; + const char *ptrname = NULL; + size_t i; + size_t ancount; *host = NULL; - if (alen_int < 0) { - return ARES_EBADRESP; - } - - alen = (size_t)alen_int; - - status = ares_dns_parse(abuf, alen, 0, &dnsrec); - if (status != ARES_SUCCESS) { - goto done; - } - /* Fetch name from query as we will use it to compare later on. Old code * did this check, so we'll retain it. */ status = ares_dns_record_query_get(dnsrec, 0, &ptrname, NULL, NULL); @@ -114,7 +101,7 @@ int ares_parse_ptr_reply(const unsigned char *abuf, int alen_int, /* Cycle through answers */ for (i = 0; i < ancount; i++) { const ares_dns_rr_t *rr = - ares_dns_record_rr_get(dnsrec, ARES_SECTION_ANSWER, i); + ares_dns_record_rr_get_const(dnsrec, ARES_SECTION_ANSWER, i); if (rr == NULL) { /* Shouldn't be possible */ @@ -195,6 +182,34 @@ int ares_parse_ptr_reply(const unsigned char *abuf, int alen_int, } else { *host = hostent; } + return status; +} + +int ares_parse_ptr_reply(const unsigned char *abuf, int alen_int, + const void *addr, int addrlen, int family, + struct hostent **host) +{ + size_t alen; + ares_dns_record_t *dnsrec = NULL; + ares_status_t status; + + if (alen_int < 0) { + return ARES_EBADRESP; + } + + alen = (size_t)alen_int; + + status = ares_dns_parse(abuf, alen, 0, &dnsrec); + if (status != ARES_SUCCESS) { + goto done; + } + + status = ares_parse_ptr_reply_dnsrec(dnsrec, addr, addrlen, family, host); + +done: ares_dns_record_destroy(dnsrec); + if (status == ARES_EBADNAME) { + status = ARES_EBADRESP; + } return (int)status; } diff --git a/deps/cares/src/lib/ares_private.h b/deps/cares/src/lib/ares_private.h index fd321b911c4a1c..6a9e04af2eb633 100644 --- a/deps/cares/src/lib/ares_private.h +++ b/deps/cares/src/lib/ares_private.h @@ -209,7 +209,7 @@ struct query { unsigned char *qbuf; size_t qlen; - ares_callback callback; + ares_callback_dnsrec callback; void *arg; /* Query status */ @@ -318,12 +318,12 @@ struct ares_channeldata { }; /* Does the domain end in ".onion" or ".onion."? Case-insensitive. */ -ares_bool_t ares__is_onion_domain(const char *name); +ares_bool_t ares__is_onion_domain(const char *name); /* Memory management functions */ -extern void *(*ares_malloc)(size_t size); -extern void *(*ares_realloc)(void *ptr, size_t size); -extern void (*ares_free)(void *ptr); +extern void *(*ares_malloc)(size_t size); +extern void *(*ares_realloc)(void *ptr, size_t size); +extern void (*ares_free)(void *ptr); void *ares_malloc_zero(size_t size); void *ares_realloc_zero(void *ptr, size_t orig_size, size_t new_size); @@ -335,23 +335,37 @@ ares_bool_t ares__timedout(const struct timeval *now, ares_status_t ares__send_query(struct query *query, struct timeval *now); ares_status_t ares__requeue_query(struct query *query, struct timeval *now); -/* Identical to ares_query, but returns a normal ares return code like - * ARES_SUCCESS, and can be passed the qid by reference which will be - * filled in on ARES_SUCCESS */ -ares_status_t ares_query_qid(ares_channel_t *channel, const char *name, - int dnsclass, int type, ares_callback callback, - void *arg, unsigned short *qid); -/* Identical to ares_send() except returns normal ares return codes like - * ARES_SUCCESS */ -ares_status_t ares_send_ex(ares_channel_t *channel, const unsigned char *qbuf, - size_t qlen, ares_callback callback, void *arg, - unsigned short *qid); -void ares__close_connection(struct server_connection *conn); -void ares__close_sockets(struct server_state *server); -void ares__check_cleanup_conn(const ares_channel_t *channel, - struct server_connection *conn); -ares_status_t ares__read_line(FILE *fp, char **buf, size_t *bufsize); -void ares__free_query(struct query *query); +/*! Retrieve a list of names to use for searching. The first successful + * query in the list wins. This function also uses the HOSTSALIASES file + * as well as uses channel configuration to determine the search order. + * + * \param[in] channel initialized ares channel + * \param[in] name initial name being searched + * \param[out] names array of names to attempt, use ares__strsplit_free() + * when no longer needed. + * \param[out] names_len number of names in array + * \return ARES_SUCCESS on success, otherwise one of the other error codes. + */ +ares_status_t ares__search_name_list(const ares_channel_t *channel, + const char *name, char ***names, + size_t *names_len); + +/*! Function to create callback arg for converting from ares_callback_dnsrec + * to ares_calback */ +void *ares__dnsrec_convert_arg(ares_callback callback, void *arg); + +/*! Callback function used to convert from the ares_callback_dnsrec prototype to + * the ares_callback prototype, by writing the result and passing that to + * the inner callback. + */ +void ares__dnsrec_convert_cb(void *arg, ares_status_t status, size_t timeouts, + const ares_dns_record_t *dnsrec); + +void ares__close_connection(struct server_connection *conn); +void ares__close_sockets(struct server_state *server); +void ares__check_cleanup_conn(const ares_channel_t *channel, + struct server_connection *conn); +void ares__free_query(struct query *query); ares_rand_state *ares__init_rand_state(void); void ares__destroy_rand_state(ares_rand_state *state); @@ -391,6 +405,7 @@ typedef struct { size_t tries; ares_bool_t rotate; size_t timeout_ms; + ares_bool_t usevc; } ares_sysconfig_t; ares_status_t ares__init_by_environment(ares_sysconfig_t *sysconfig); @@ -401,8 +416,13 @@ ares_status_t ares__parse_sortlist(struct apattern **sortlist, size_t *nsort, const char *str); void ares__destroy_servers_state(ares_channel_t *channel); -ares_status_t ares__single_domain(const ares_channel_t *channel, - const char *name, char **s); + +/* Returns ARES_SUCCESS if alias found, alias is set. Returns ARES_ENOTFOUND + * if not alias found. Returns other errors on critical failure like + * ARES_ENOMEM */ +ares_status_t ares__lookup_hostaliases(const ares_channel_t *channel, + const char *name, char **alias); + ares_status_t ares__cat_domain(const char *name, const char *domain, char **s); ares_status_t ares__sortaddrinfo(ares_channel_t *channel, struct ares_addrinfo_node *ai_node); @@ -427,10 +447,13 @@ ares_status_t ares_append_ai_node(int aftype, unsigned short port, void ares__addrinfo_cat_cnames(struct ares_addrinfo_cname **head, struct ares_addrinfo_cname *tail); -ares_status_t ares__parse_into_addrinfo(const unsigned char *abuf, size_t alen, +ares_status_t ares__parse_into_addrinfo(const ares_dns_record_t *dnsrec, ares_bool_t cname_only_is_enodata, unsigned short port, struct ares_addrinfo *ai); +ares_status_t ares_parse_ptr_reply_dnsrec(const ares_dns_record_t *dnsrec, + const void *addr, int addrlen, + int family, struct hostent **host); ares_status_t ares__addrinfo2hostent(const struct ares_addrinfo *ai, int family, struct hostent **host); @@ -456,10 +479,9 @@ ares_ssize_t ares__socket_recvfrom(ares_channel_t *channel, ares_socket_t s, ares_ssize_t ares__socket_recv(ares_channel_t *channel, ares_socket_t s, void *data, size_t data_len); void ares__close_socket(ares_channel, ares_socket_t); -int ares__connect_socket(ares_channel_t *channel, ares_socket_t sockfd, - const struct sockaddr *addr, ares_socklen_t addrlen); -ares_bool_t ares__is_hostnamech(int ch); -void ares__destroy_server(struct server_state *server); +int ares__connect_socket(ares_channel_t *channel, ares_socket_t sockfd, + const struct sockaddr *addr, ares_socklen_t addrlen); +void ares__destroy_server(struct server_state *server); ares_status_t ares__servers_update(ares_channel_t *channel, ares__llist_t *server_list, @@ -494,7 +516,6 @@ ares_status_t ares__hosts_entry_to_addrinfo(const ares_hosts_entry_t *entry, unsigned short port, ares_bool_t want_cnames, struct ares_addrinfo *ai); -ares_bool_t ares__isprint(int ch); /*! Parse a compressed DNS name as defined in RFC1035 starting at the current @@ -560,7 +581,7 @@ void ares_queue_notify_empty(ares_channel_t *channel); } while (0) #define ARES_CONFIG_CHECK(x) \ - (x && x->lookups && ares__slist_len(x->servers) > 0 && x->ndots > 0 && \ + (x && x->lookups && ares__slist_len(x->servers) > 0 && \ x->timeout > 0 && x->tries > 0) ares_bool_t ares__subnet_match(const struct ares_addr *addr, @@ -583,10 +604,10 @@ ares_status_t ares_qcache_insert(ares_channel_t *channel, const struct timeval *now, const struct query *query, ares_dns_record_t *dnsrec); -ares_status_t ares_qcache_fetch(ares_channel_t *channel, - const struct timeval *now, - const unsigned char *qbuf, size_t qlen, - unsigned char **abuf, size_t *alen); +ares_status_t ares_qcache_fetch(ares_channel_t *channel, + const struct timeval *now, + const ares_dns_record_t *dnsrec, + const ares_dns_record_t **dnsrec_resp); ares_status_t ares__channel_threading_init(ares_channel_t *channel); void ares__channel_threading_destroy(ares_channel_t *channel); diff --git a/deps/cares/src/lib/ares_process.c b/deps/cares/src/lib/ares_process.c index bd84d09e134805..b9705ae882b9ff 100644 --- a/deps/cares/src/lib/ares_process.c +++ b/deps/cares/src/lib/ares_process.c @@ -68,8 +68,7 @@ static ares_bool_t same_questions(const ares_dns_record_t *qrec, static ares_bool_t same_address(const struct sockaddr *sa, const struct ares_addr *aa); static void end_query(ares_channel_t *channel, struct query *query, - ares_status_t status, const unsigned char *abuf, - size_t alen); + ares_status_t status, const ares_dns_record_t *dnsrec); static void server_increment_failures(struct server_state *server) { @@ -625,6 +624,7 @@ static ares_status_t process_answer(ares_channel_t *channel, ares_dns_record_t *rdnsrec = NULL; ares_dns_record_t *qdnsrec = NULL; ares_status_t status; + ares_bool_t is_cached = ARES_FALSE; /* Parse the response */ status = ares_dns_parse(abuf, alen, 0, &rdnsrec); @@ -648,7 +648,7 @@ static ares_status_t process_answer(ares_channel_t *channel, /* Parse the question we sent as we use it to compare */ status = ares_dns_parse(query->qbuf, query->qlen, 0, &qdnsrec); if (status != ARES_SUCCESS) { - end_query(channel, query, status, NULL, 0); + end_query(channel, query, status, NULL); goto cleanup; } @@ -674,7 +674,7 @@ static ares_status_t process_answer(ares_channel_t *channel, ares_dns_has_opt_rr(qdnsrec) && !ares_dns_has_opt_rr(rdnsrec)) { status = rewrite_without_edns(qdnsrec, query); if (status != ARES_SUCCESS) { - end_query(channel, query, status, NULL, 0); + end_query(channel, query, status, NULL); goto cleanup; } @@ -729,16 +729,20 @@ static ares_status_t process_answer(ares_channel_t *channel, /* If cache insertion was successful, it took ownership. We ignore * other cache insertion failures. */ if (ares_qcache_insert(channel, now, query, rdnsrec) == ARES_SUCCESS) { - rdnsrec = NULL; + is_cached = ARES_TRUE; } server_set_good(server); - end_query(channel, query, ARES_SUCCESS, abuf, alen); + end_query(channel, query, ARES_SUCCESS, rdnsrec); status = ARES_SUCCESS; cleanup: - ares_dns_record_destroy(rdnsrec); + /* Don't cleanup the cached pointer to the dns response */ + if (!is_cached) { + ares_dns_record_destroy(rdnsrec); + } + ares_dns_record_destroy(qdnsrec); return status; } @@ -774,7 +778,7 @@ ares_status_t ares__requeue_query(struct query *query, struct timeval *now) query->error_status = ARES_ETIMEOUT; } - end_query(channel, query, query->error_status, NULL, 0); + end_query(channel, query, query->error_status, NULL); return ARES_ETIMEOUT; } @@ -893,7 +897,7 @@ ares_status_t ares__send_query(struct query *query, struct timeval *now) } if (server == NULL) { - end_query(channel, query, ARES_ENOSERVER /* ? */, NULL, 0); + end_query(channel, query, ARES_ENOSERVER /* ? */, NULL); return ARES_ENOSERVER; } @@ -920,7 +924,7 @@ ares_status_t ares__send_query(struct query *query, struct timeval *now) /* Anything else is not retryable, likely ENOMEM */ default: - end_query(channel, query, status, NULL, 0); + end_query(channel, query, status, NULL); return status; } } @@ -931,7 +935,7 @@ ares_status_t ares__send_query(struct query *query, struct timeval *now) status = ares__append_tcpbuf(server, query); if (status != ARES_SUCCESS) { - end_query(channel, query, status, NULL, 0); + end_query(channel, query, status, NULL); /* Only safe to kill connection if it was new, otherwise it should be * cleaned up by another process later */ @@ -979,7 +983,7 @@ ares_status_t ares__send_query(struct query *query, struct timeval *now) /* Anything else is not retryable, likely ENOMEM */ default: - end_query(channel, query, status, NULL, 0); + end_query(channel, query, status, NULL); return status; } node = ares__llist_node_first(server->connections); @@ -1011,7 +1015,7 @@ ares_status_t ares__send_query(struct query *query, struct timeval *now) query->node_queries_by_timeout = ares__slist_insert(channel->queries_by_timeout, query); if (!query->node_queries_by_timeout) { - end_query(channel, query, ARES_ENOMEM, NULL, 0); + end_query(channel, query, ARES_ENOMEM, NULL); /* Only safe to kill connection if it was new, otherwise it should be * cleaned up by another process later */ if (new_connection) { @@ -1027,7 +1031,7 @@ ares_status_t ares__send_query(struct query *query, struct timeval *now) ares__llist_insert_last(conn->queries_to_conn, query); if (query->node_queries_to_conn == NULL) { - end_query(channel, query, ARES_ENOMEM, NULL, 0); + end_query(channel, query, ARES_ENOMEM, NULL); /* Only safe to kill connection if it was new, otherwise it should be * cleaned up by another process later */ if (new_connection) { @@ -1124,14 +1128,10 @@ static void ares_detach_query(struct query *query) } static void end_query(ares_channel_t *channel, struct query *query, - ares_status_t status, const unsigned char *abuf, - size_t alen) + ares_status_t status, const ares_dns_record_t *dnsrec) { /* Invoke the callback. */ - query->callback(query->arg, (int)status, (int)query->timeouts, - /* due to prior design flaws, abuf isn't meant to be modified, - * but bad prototypes, ugh. Lets cast off constfor compat. */ - (unsigned char *)((void *)((size_t)abuf)), (int)alen); + query->callback(query->arg, status, query->timeouts, dnsrec); ares__free_query(query); /* Check and notify if no other queries are enqueued on the channel. This diff --git a/deps/cares/src/lib/ares_qcache.c b/deps/cares/src/lib/ares_qcache.c index bab8781850789a..2af1125a0d299f 100644 --- a/deps/cares/src/lib/ares_qcache.c +++ b/deps/cares/src/lib/ares_qcache.c @@ -81,6 +81,7 @@ static char *ares__qcache_calc_key(const ares_dns_record_t *dnsrec) for (i = 0; i < ares_dns_record_query_cnt(dnsrec); i++) { const char *name; + size_t name_len; ares_dns_rec_type_t qtype; ares_dns_class_t qclass; @@ -114,7 +115,15 @@ static char *ares__qcache_calc_key(const ares_dns_record_t *dnsrec) goto fail; } - status = ares__buf_append_str(buf, name); + /* On queries, a '.' may be appended to the name to indicate an explicit + * name lookup without performing a search. Strip this since its not part + * of a cached response. */ + name_len = ares_strlen(name); + if (name_len && name[name_len - 1] == '.') { + name_len--; + } + + status = ares__buf_append(buf, (const unsigned char *)name, name_len); if (status != ARES_SUCCESS) { goto fail; } @@ -384,20 +393,24 @@ static ares_status_t ares__qcache_insert(ares__qcache_t *qcache, return ARES_ENOMEM; } -static ares_status_t ares__qcache_fetch(ares__qcache_t *qcache, - const ares_dns_record_t *dnsrec, - const struct timeval *now, - unsigned char **buf, size_t *buf_len) +ares_status_t ares_qcache_fetch(ares_channel_t *channel, + const struct timeval *now, + const ares_dns_record_t *dnsrec, + const ares_dns_record_t **dnsrec_resp) { char *key = NULL; ares__qcache_entry_t *entry; - ares_status_t status; + ares_status_t status = ARES_SUCCESS; - if (qcache == NULL || dnsrec == NULL) { + if (channel == NULL || dnsrec == NULL || dnsrec_resp == NULL) { return ARES_EFORMERR; } - ares__qcache_expire(qcache, now); + if (channel->qcache == NULL) { + return ARES_ENOTFOUND; + } + + ares__qcache_expire(channel->qcache, now); key = ares__qcache_calc_key(dnsrec); if (key == NULL) { @@ -405,7 +418,7 @@ static ares_status_t ares__qcache_fetch(ares__qcache_t *qcache, goto done; } - entry = ares__htable_strvp_get_direct(qcache->cache, key); + entry = ares__htable_strvp_get_direct(channel->qcache->cache, key); if (entry == NULL) { status = ARES_ENOTFOUND; goto done; @@ -414,7 +427,7 @@ static ares_status_t ares__qcache_fetch(ares__qcache_t *qcache, ares_dns_record_write_ttl_decrement( entry->dnsrec, (unsigned int)(now->tv_sec - entry->insert_ts)); - status = ares_dns_write(entry->dnsrec, buf, buf_len); + *dnsrec_resp = entry->dnsrec; done: ares_free(key); @@ -429,27 +442,3 @@ ares_status_t ares_qcache_insert(ares_channel_t *channel, return ares__qcache_insert(channel->qcache, dnsrec, query->qbuf, query->qlen, now); } - -ares_status_t ares_qcache_fetch(ares_channel_t *channel, - const struct timeval *now, - const unsigned char *qbuf, size_t qlen, - unsigned char **abuf, size_t *alen) -{ - ares_status_t status; - ares_dns_record_t *dnsrec = NULL; - - if (channel->qcache == NULL) { - return ARES_ENOTFOUND; - } - - status = ares_dns_parse(qbuf, qlen, 0, &dnsrec); - if (status != ARES_SUCCESS) { - goto done; - } - - status = ares__qcache_fetch(channel->qcache, dnsrec, now, abuf, alen); - -done: - ares_dns_record_destroy(dnsrec); - return status; -} diff --git a/deps/cares/src/lib/ares_query.c b/deps/cares/src/lib/ares_query.c index 098e6789471809..0eea80e7fc1e59 100644 --- a/deps/cares/src/lib/ares_query.c +++ b/deps/cares/src/lib/ares_query.c @@ -37,103 +37,116 @@ #include "ares_dns.h" #include "ares_private.h" -struct qquery { - ares_callback callback; - void *arg; -}; +typedef struct { + ares_callback_dnsrec callback; + void *arg; +} ares_query_dnsrec_arg_t; -static void qcallback(void *arg, int status, int timeouts, unsigned char *abuf, - int alen); - -ares_status_t ares_query_qid(ares_channel_t *channel, const char *name, - int dnsclass, int type, ares_callback callback, - void *arg, unsigned short *qid) +static void ares_query_dnsrec_cb(void *arg, ares_status_t status, + size_t timeouts, + const ares_dns_record_t *dnsrec) { - struct qquery *qquery; - unsigned char *qbuf; - int qlen; - int rd; - ares_status_t status; - - /* Compose the query. */ - rd = !(channel->flags & ARES_FLAG_NORECURSE); - status = (ares_status_t)ares_create_query( - name, dnsclass, type, 0, rd, &qbuf, &qlen, - (channel->flags & ARES_FLAG_EDNS) ? (int)channel->ednspsz : 0); + ares_query_dnsrec_arg_t *qquery = arg; + if (status != ARES_SUCCESS) { - if (qbuf != NULL) { - ares_free(qbuf); + qquery->callback(qquery->arg, status, timeouts, dnsrec); + } else { + size_t ancount; + ares_dns_rcode_t rcode; + /* Pull the response code and answer count from the packet and convert any + * errors. + */ + rcode = ares_dns_record_get_rcode(dnsrec); + ancount = ares_dns_record_rr_cnt(dnsrec, ARES_SECTION_ANSWER); + status = ares_dns_query_reply_tostatus(rcode, ancount); + qquery->callback(qquery->arg, status, timeouts, dnsrec); + } + ares_free(qquery); +} + +static ares_status_t ares_query_int(ares_channel_t *channel, const char *name, + ares_dns_class_t dnsclass, + ares_dns_rec_type_t type, + ares_callback_dnsrec callback, void *arg, + unsigned short *qid) +{ + ares_status_t status; + ares_dns_record_t *dnsrec = NULL; + ares_dns_flags_t flags = 0; + ares_query_dnsrec_arg_t *qquery = NULL; + + if (channel == NULL || name == NULL || callback == NULL) { + status = ARES_EFORMERR; + if (callback != NULL) { + callback(arg, status, 0, NULL); } - callback(arg, (int)status, 0, NULL, 0); return status; } - /* Allocate and fill in the query structure. */ - qquery = ares_malloc(sizeof(struct qquery)); - if (!qquery) { - ares_free_string(qbuf); - callback(arg, ARES_ENOMEM, 0, NULL, 0); - return ARES_ENOMEM; + if (!(channel->flags & ARES_FLAG_NORECURSE)) { + flags |= ARES_FLAG_RD; } + + status = ares_dns_record_create_query( + &dnsrec, name, dnsclass, type, 0, flags, + (size_t)(channel->flags & ARES_FLAG_EDNS) ? channel->ednspsz : 0); + if (status != ARES_SUCCESS) { + callback(arg, status, 0, NULL); + return status; + } + + qquery = ares_malloc(sizeof(*qquery)); + if (qquery == NULL) { + status = ARES_ENOMEM; + callback(arg, status, 0, NULL); + ares_dns_record_destroy(dnsrec); + return status; + } + qquery->callback = callback; qquery->arg = arg; /* Send it off. qcallback will be called when we get an answer. */ - status = ares_send_ex(channel, qbuf, (size_t)qlen, qcallback, qquery, qid); - ares_free_string(qbuf); + status = ares_send_dnsrec(channel, dnsrec, ares_query_dnsrec_cb, qquery, qid); + ares_dns_record_destroy(dnsrec); return status; } -void ares_query(ares_channel_t *channel, const char *name, int dnsclass, - int type, ares_callback callback, void *arg) +ares_status_t ares_query_dnsrec(ares_channel_t *channel, const char *name, + ares_dns_class_t dnsclass, + ares_dns_rec_type_t type, + ares_callback_dnsrec callback, void *arg, + unsigned short *qid) { + ares_status_t status; + if (channel == NULL) { - return; + return ARES_EFORMERR; } + ares__channel_lock(channel); - ares_query_qid(channel, name, dnsclass, type, callback, arg, NULL); + status = ares_query_int(channel, name, dnsclass, type, callback, arg, qid); ares__channel_unlock(channel); + return status; } -static void qcallback(void *arg, int status, int timeouts, unsigned char *abuf, - int alen) +void ares_query(ares_channel_t *channel, const char *name, int dnsclass, + int type, ares_callback callback, void *arg) { - struct qquery *qquery = (struct qquery *)arg; - size_t ancount; - int rcode; + void *carg = NULL; - if (status != ARES_SUCCESS) { - qquery->callback(qquery->arg, status, timeouts, abuf, alen); - } else { - /* Pull the response code and answer count from the packet. */ - rcode = DNS_HEADER_RCODE(abuf); - ancount = DNS_HEADER_ANCOUNT(abuf); - - /* Convert errors. */ - switch (rcode) { - case NOERROR: - status = (ancount > 0) ? ARES_SUCCESS : ARES_ENODATA; - break; - case FORMERR: - status = ARES_EFORMERR; - break; - case SERVFAIL: - status = ARES_ESERVFAIL; - break; - case NXDOMAIN: - status = ARES_ENOTFOUND; - break; - case NOTIMP: - status = ARES_ENOTIMP; - break; - case REFUSED: - status = ARES_EREFUSED; - break; - default: - break; - } - qquery->callback(qquery->arg, status, timeouts, abuf, alen); + if (channel == NULL) { + return; } - ares_free(qquery); + + carg = ares__dnsrec_convert_arg(callback, arg); + if (carg == NULL) { + callback(arg, ARES_ENOMEM, 0, NULL, 0); + return; + } + + ares_query_dnsrec(channel, name, (ares_dns_class_t)dnsclass, + (ares_dns_rec_type_t)type, ares__dnsrec_convert_cb, carg, + NULL); } diff --git a/deps/cares/src/lib/ares_search.c b/deps/cares/src/lib/ares_search.c index 429c7e1db0de26..4fd909cd4f8b9f 100644 --- a/deps/cares/src/lib/ares_search.c +++ b/deps/cares/src/lib/ares_search.c @@ -33,209 +33,437 @@ #include "ares.h" #include "ares_private.h" +#include "ares_dns.h" struct search_query { - /* Arguments passed to ares_search */ - ares_channel_t *channel; - char *name; /* copied into an allocated buffer */ - int dnsclass; - int type; - ares_callback callback; - void *arg; - char **domains; /* duplicate for ares_reinit() safety */ - size_t ndomains; - - int status_as_is; /* error status from trying as-is */ - size_t next_domain; /* next search domain to try */ - ares_bool_t trying_as_is; /* current query is for name as-is */ - size_t timeouts; /* number of timeouts we saw for this request */ + /* Arguments passed to ares_search_dnsrec() */ + ares_channel_t *channel; + ares_callback_dnsrec callback; + void *arg; + + /* Duplicate of DNS record passed to ares_search_dnsrec() */ + ares_dns_record_t *dnsrec; + + /* Search order for names */ + char **names; + size_t names_cnt; + + /* State tracking progress through the search query */ + size_t next_name_idx; /* next name index being attempted */ + size_t timeouts; /* number of timeouts we saw for this request */ ares_bool_t ever_got_nodata; /* did we ever get ARES_ENODATA along the way? */ }; -static void search_callback(void *arg, int status, int timeouts, - unsigned char *abuf, int alen); +static void squery_free(struct search_query *squery) +{ + if (squery == NULL) { + return; + } + ares__strsplit_free(squery->names, squery->names_cnt); + ares_dns_record_destroy(squery->dnsrec); + ares_free(squery); +} + +/* End a search query by invoking the user callback and freeing the + * search_query structure. + */ static void end_squery(struct search_query *squery, ares_status_t status, - unsigned char *abuf, size_t alen); + const ares_dns_record_t *dnsrec) +{ + squery->callback(squery->arg, status, squery->timeouts, dnsrec); + squery_free(squery); +} + +static void search_callback(void *arg, ares_status_t status, size_t timeouts, + const ares_dns_record_t *dnsrec); -static void ares_search_int(ares_channel_t *channel, const char *name, - int dnsclass, int type, ares_callback callback, - void *arg) +static ares_status_t ares_search_next(ares_channel_t *channel, + struct search_query *squery, + ares_bool_t *skip_cleanup) { - struct search_query *squery; - char *s; - const char *p; - ares_status_t status; - size_t ndots; + ares_status_t status; - /* Per RFC 7686, reject queries for ".onion" domain names with NXDOMAIN. */ - if (ares__is_onion_domain(name)) { - callback(arg, ARES_ENOTFOUND, 0, NULL, 0); - return; + *skip_cleanup = ARES_FALSE; + + /* Misuse check */ + if (squery->next_name_idx >= squery->names_cnt) { + return ARES_EFORMERR; } - /* If name only yields one domain to search, then we don't have - * to keep extra state, so just do an ares_query(). - */ - status = ares__single_domain(channel, name, &s); + status = ares_dns_record_query_set_name( + squery->dnsrec, 0, squery->names[squery->next_name_idx++]); if (status != ARES_SUCCESS) { - callback(arg, (int)status, 0, NULL, 0); + return status; + } + + status = + ares_send_dnsrec(channel, squery->dnsrec, search_callback, squery, NULL); + + if (status != ARES_EFORMERR) { + *skip_cleanup = ARES_TRUE; + } + + return status; +} + +static void search_callback(void *arg, ares_status_t status, size_t timeouts, + const ares_dns_record_t *dnsrec) +{ + struct search_query *squery = (struct search_query *)arg; + ares_channel_t *channel = squery->channel; + ares_dns_rcode_t rcode; + size_t ancount; + ares_status_t mystatus; + ares_bool_t skip_cleanup = ARES_FALSE; + + squery->timeouts += timeouts; + + if (status != ARES_SUCCESS) { + end_squery(squery, status, dnsrec); return; } - if (s) { - ares_query(channel, s, dnsclass, type, callback, arg); - ares_free(s); + + rcode = ares_dns_record_get_rcode(dnsrec); + ancount = ares_dns_record_rr_cnt(dnsrec, ARES_SECTION_ANSWER); + mystatus = ares_dns_query_reply_tostatus(rcode, ancount); + + if (mystatus != ARES_ENODATA && mystatus != ARES_ESERVFAIL && + mystatus != ARES_ENOTFOUND) { + end_squery(squery, mystatus, dnsrec); return; } - /* Allocate a search_query structure to hold the state necessary for - * doing multiple lookups. + /* If we ever get ARES_ENODATA along the way, record that; if the search + * should run to the very end and we got at least one ARES_ENODATA, + * then callers like ares_gethostbyname() may want to try a T_A search + * even if the last domain we queried for T_AAAA resource records + * returned ARES_ENOTFOUND. */ - squery = ares_malloc_zero(sizeof(*squery)); - if (!squery) { - callback(arg, ARES_ENOMEM, 0, NULL, 0); + if (mystatus == ARES_ENODATA) { + squery->ever_got_nodata = ARES_TRUE; + } + + if (squery->next_name_idx < squery->names_cnt) { + mystatus = ares_search_next(channel, squery, &skip_cleanup); + if (mystatus != ARES_SUCCESS && !skip_cleanup) { + end_squery(squery, mystatus, NULL); + } return; } - squery->channel = channel; - squery->name = ares_strdup(name); - if (!squery->name) { - ares_free(squery); - callback(arg, ARES_ENOMEM, 0, NULL, 0); + + + /* We have no more domains to search, return an appropriate response. */ + if (mystatus == ARES_ENOTFOUND && squery->ever_got_nodata) { + end_squery(squery, ARES_ENODATA, NULL); return; } - /* Duplicate domains for safety during ares_reinit() */ - if (channel->ndomains) { - squery->domains = - ares__strsplit_duplicate(channel->domains, channel->ndomains); - if (squery->domains == NULL) { - ares_free(squery->name); - ares_free(squery); - callback(arg, ARES_ENOMEM, 0, NULL, 0); - return; + end_squery(squery, mystatus, NULL); +} + +/* Determine if the domain should be looked up as-is, or if it is eligible + * for search by appending domains */ +static ares_bool_t ares__search_eligible(const ares_channel_t *channel, + const char *name) +{ + size_t len = ares_strlen(name); + + /* Name ends in '.', cannot search */ + if (len && name[len - 1] == '.') { + return ARES_FALSE; + } + + if (channel->flags & ARES_FLAG_NOSEARCH) { + return ARES_FALSE; + } + + return ARES_TRUE; +} + +ares_status_t ares__search_name_list(const ares_channel_t *channel, + const char *name, char ***names, + size_t *names_len) +{ + ares_status_t status; + char **list = NULL; + size_t list_len = 0; + char *alias = NULL; + size_t ndots = 0; + size_t idx = 0; + const char *p; + size_t i; + + /* Perform HOSTALIASES resolution */ + status = ares__lookup_hostaliases(channel, name, &alias); + if (status == ARES_SUCCESS) { + /* If hostalias succeeds, there is no searching, it is used as-is */ + list_len = 1; + list = ares_malloc_zero(sizeof(*list) * list_len); + if (list == NULL) { + status = ARES_ENOMEM; + goto done; } - squery->ndomains = channel->ndomains; + list[0] = alias; + alias = NULL; + goto done; + } else if (status != ARES_ENOTFOUND) { + goto done; } - squery->dnsclass = dnsclass; - squery->type = type; - squery->status_as_is = -1; - squery->callback = callback; - squery->arg = arg; - squery->timeouts = 0; - squery->ever_got_nodata = ARES_FALSE; + /* See if searching is eligible at all, if not, look up as-is only */ + if (!ares__search_eligible(channel, name)) { + list_len = 1; + list = ares_malloc_zero(sizeof(*list) * list_len); + if (list == NULL) { + status = ARES_ENOMEM; + goto done; + } + list[0] = ares_strdup(name); + if (list[0] == NULL) { + status = ARES_ENOMEM; + } else { + status = ARES_SUCCESS; + } + goto done; + } - /* Count the number of dots in name. */ + /* Count the number of dots in name */ ndots = 0; - for (p = name; *p; p++) { + for (p = name; *p != 0; p++) { if (*p == '.') { ndots++; } } - /* If ndots is at least the channel ndots threshold (usually 1), - * then we try the name as-is first. Otherwise, we try the name - * as-is last. - */ - if (ndots >= channel->ndots || squery->ndomains == 0) { - /* Try the name as-is first. */ - squery->next_domain = 0; - squery->trying_as_is = ARES_TRUE; - ares_query(channel, name, dnsclass, type, search_callback, squery); - } else { - /* Try the name as-is last; start with the first search domain. */ - squery->next_domain = 1; - squery->trying_as_is = ARES_FALSE; - status = ares__cat_domain(name, squery->domains[0], &s); - if (status == ARES_SUCCESS) { - ares_query(channel, s, dnsclass, type, search_callback, squery); - ares_free(s); - } else { - /* failed, free the malloc()ed memory */ - ares_free(squery->name); - ares_free(squery); - callback(arg, (int)status, 0, NULL, 0); + /* Allocate an entry for each search domain, plus one for as-is */ + list_len = channel->ndomains + 1; + list = ares_malloc_zero(sizeof(*list) * list_len); + if (list == NULL) { + status = ARES_ENOMEM; + goto done; + } + + /* Set status here, its possible there are no search domains at all, so + * status may be ARES_ENOTFOUND from ares__lookup_hostaliases(). */ + status = ARES_SUCCESS; + + /* Try as-is first */ + if (ndots >= channel->ndots) { + list[idx] = ares_strdup(name); + if (list[idx] == NULL) { + status = ARES_ENOMEM; + goto done; + } + idx++; + } + + /* Append each search suffix to the name */ + for (i = 0; i < channel->ndomains; i++) { + status = ares__cat_domain(name, channel->domains[i], &list[idx]); + if (status != ARES_SUCCESS) { + goto done; } + idx++; } + + /* Try as-is last */ + if (ndots < channel->ndots) { + list[idx] = ares_strdup(name); + if (list[idx] == NULL) { + status = ARES_ENOMEM; + goto done; + } + idx++; + } + + +done: + if (status == ARES_SUCCESS) { + *names = list; + *names_len = list_len; + } else { + ares__strsplit_free(list, list_len); + } + + ares_free(alias); + return status; } -void ares_search(ares_channel_t *channel, const char *name, int dnsclass, - int type, ares_callback callback, void *arg) +static ares_status_t ares_search_int(ares_channel_t *channel, + const ares_dns_record_t *dnsrec, + ares_callback_dnsrec callback, void *arg) { - if (channel == NULL) { - return; + struct search_query *squery = NULL; + const char *name; + ares_status_t status = ARES_SUCCESS; + ares_bool_t skip_cleanup = ARES_FALSE; + + /* Extract the name for the search. Note that searches are only supported for + * DNS records containing a single query. + */ + if (ares_dns_record_query_cnt(dnsrec) != 1) { + status = ARES_EBADQUERY; + goto fail; } - ares__channel_lock(channel); - ares_search_int(channel, name, dnsclass, type, callback, arg); - ares__channel_unlock(channel); + + status = ares_dns_record_query_get(dnsrec, 0, &name, NULL, NULL); + if (status != ARES_SUCCESS) { + goto fail; + } + + /* Per RFC 7686, reject queries for ".onion" domain names with NXDOMAIN. */ + if (ares__is_onion_domain(name)) { + status = ARES_ENOTFOUND; + goto fail; + } + + /* Allocate a search_query structure to hold the state necessary for + * doing multiple lookups. + */ + squery = ares_malloc_zero(sizeof(*squery)); + if (squery == NULL) { + status = ARES_ENOMEM; + goto fail; + } + + squery->channel = channel; + + /* Duplicate DNS record since, name will need to be rewritten */ + squery->dnsrec = ares_dns_record_duplicate(dnsrec); + if (squery->dnsrec == NULL) { + status = ARES_ENOMEM; + goto fail; + } + + squery->callback = callback; + squery->arg = arg; + squery->timeouts = 0; + squery->ever_got_nodata = ARES_FALSE; + + status = + ares__search_name_list(channel, name, &squery->names, &squery->names_cnt); + if (status != ARES_SUCCESS) { + goto fail; + } + + status = ares_search_next(channel, squery, &skip_cleanup); + if (status != ARES_SUCCESS) { + goto fail; + } + + return status; + +fail: + if (!skip_cleanup) { + squery_free(squery); + callback(arg, status, 0, NULL); + } + return status; } -static void search_callback(void *arg, int status, int timeouts, - unsigned char *abuf, int alen) +/* Callback argument structure passed to ares__dnsrec_convert_cb(). */ +typedef struct { + ares_callback callback; + void *arg; +} dnsrec_convert_arg_t; + +/*! Function to create callback arg for converting from ares_callback_dnsrec + * to ares_calback */ +void *ares__dnsrec_convert_arg(ares_callback callback, void *arg) { - struct search_query *squery = (struct search_query *)arg; - ares_channel_t *channel = squery->channel; - char *s; + dnsrec_convert_arg_t *carg = ares_malloc_zero(sizeof(*carg)); + if (carg == NULL) { + return NULL; + } + carg->callback = callback; + carg->arg = arg; + return carg; +} - squery->timeouts += (size_t)timeouts; +/*! Callback function used to convert from the ares_callback_dnsrec prototype to + * the ares_callback prototype, by writing the result and passing that to + * the inner callback. + */ +void ares__dnsrec_convert_cb(void *arg, ares_status_t status, size_t timeouts, + const ares_dns_record_t *dnsrec) +{ + dnsrec_convert_arg_t *carg = arg; + unsigned char *abuf = NULL; + size_t alen = 0; - /* Stop searching unless we got a non-fatal error. */ - if (status != ARES_ENODATA && status != ARES_ESERVFAIL && - status != ARES_ENOTFOUND) { - end_squery(squery, (ares_status_t)status, abuf, (size_t)alen); - } else { - /* Save the status if we were trying as-is. */ - if (squery->trying_as_is) { - squery->status_as_is = status; + if (dnsrec != NULL) { + ares_status_t mystatus = ares_dns_write(dnsrec, &abuf, &alen); + if (mystatus != ARES_SUCCESS) { + status = mystatus; } + } - /* - * If we ever get ARES_ENODATA along the way, record that; if the search - * should run to the very end and we got at least one ARES_ENODATA, - * then callers like ares_gethostbyname() may want to try a T_A search - * even if the last domain we queried for T_AAAA resource records - * returned ARES_ENOTFOUND. - */ - if (status == ARES_ENODATA) { - squery->ever_got_nodata = ARES_TRUE; - } + carg->callback(carg->arg, (int)status, (int)timeouts, abuf, (int)alen); - if (squery->next_domain < squery->ndomains) { - ares_status_t mystatus; - /* Try the next domain. */ - mystatus = ares__cat_domain(squery->name, - squery->domains[squery->next_domain], &s); - if (mystatus != ARES_SUCCESS) { - end_squery(squery, mystatus, NULL, 0); - } else { - squery->trying_as_is = ARES_FALSE; - squery->next_domain++; - ares_query(channel, s, squery->dnsclass, squery->type, search_callback, - squery); - ares_free(s); - } - } else if (squery->status_as_is == -1) { - /* Try the name as-is at the end. */ - squery->trying_as_is = ARES_TRUE; - ares_query(channel, squery->name, squery->dnsclass, squery->type, - search_callback, squery); - } else { - if (squery->status_as_is == ARES_ENOTFOUND && squery->ever_got_nodata) { - end_squery(squery, ARES_ENODATA, NULL, 0); - } else { - end_squery(squery, (ares_status_t)squery->status_as_is, NULL, 0); - } - } + ares_free(abuf); + ares_free(carg); +} + +/* Search for a DNS name with given class and type. Wrapper around + * ares_search_int() where the DNS record to search is first constructed. + */ +void ares_search(ares_channel_t *channel, const char *name, int dnsclass, + int type, ares_callback callback, void *arg) +{ + ares_status_t status; + ares_dns_record_t *dnsrec = NULL; + size_t max_udp_size; + ares_dns_flags_t rd_flag; + void *carg = NULL; + if (channel == NULL || name == NULL) { + return; + } + + /* For now, ares_search_int() uses the ares_callback prototype. We need to + * wrap the callback passed to this function in ares__dnsrec_convert_cb, to + * convert from ares_callback_dnsrec to ares_callback. Allocate the convert + * arg structure here. + */ + carg = ares__dnsrec_convert_arg(callback, arg); + if (carg == NULL) { + callback(arg, ARES_ENOMEM, 0, NULL, 0); + return; + } + + rd_flag = !(channel->flags & ARES_FLAG_NORECURSE) ? ARES_FLAG_RD : 0; + max_udp_size = (channel->flags & ARES_FLAG_EDNS) ? channel->ednspsz : 0; + status = ares_dns_record_create_query( + &dnsrec, name, (ares_dns_class_t)dnsclass, (ares_dns_rec_type_t)type, 0, + rd_flag, max_udp_size); + if (status != ARES_SUCCESS) { + callback(arg, (int)status, 0, NULL, 0); + ares_free(carg); + return; } + + ares__channel_lock(channel); + ares_search_int(channel, dnsrec, ares__dnsrec_convert_cb, carg); + ares__channel_unlock(channel); + + ares_dns_record_destroy(dnsrec); } -static void end_squery(struct search_query *squery, ares_status_t status, - unsigned char *abuf, size_t alen) +/* Search for a DNS record. Wrapper around ares_search_int(). */ +ares_status_t ares_search_dnsrec(ares_channel_t *channel, + const ares_dns_record_t *dnsrec, + ares_callback_dnsrec callback, void *arg) { - squery->callback(squery->arg, (int)status, (int)squery->timeouts, abuf, - (int)alen); - ares__strsplit_free(squery->domains, squery->ndomains); - ares_free(squery->name); - ares_free(squery); + ares_status_t status; + + if (channel == NULL || dnsrec == NULL || callback == NULL) { + return ARES_EFORMERR; + } + + ares__channel_lock(channel); + status = ares_search_int(channel, dnsrec, callback, arg); + ares__channel_unlock(channel); + + return status; } /* Concatenate two domains. */ @@ -260,89 +488,113 @@ ares_status_t ares__cat_domain(const char *name, const char *domain, char **s) return ARES_SUCCESS; } -/* Determine if this name only yields one query. If it does, set *s to - * the string we should query, in an allocated buffer. If not, set *s - * to NULL. - */ -ares_status_t ares__single_domain(const ares_channel_t *channel, - const char *name, char **s) +ares_status_t ares__lookup_hostaliases(const ares_channel_t *channel, + const char *name, char **alias) { - size_t len = ares_strlen(name); - const char *hostaliases; - FILE *fp; - char *line = NULL; - ares_status_t status; - size_t linesize; - const char *p; - const char *q; - int error; + ares_status_t status = ARES_SUCCESS; + const char *hostaliases = NULL; + ares__buf_t *buf = NULL; + ares__llist_t *lines = NULL; + ares__llist_node_t *node; + + if (channel == NULL || name == NULL || alias == NULL) { + return ARES_EFORMERR; + } + + *alias = NULL; + + /* Configuration says to not perform alias lookup */ + if (channel->flags & ARES_FLAG_NOALIASES) { + return ARES_ENOTFOUND; + } + + /* If a domain has a '.', its not allowed to perform an alias lookup */ + if (strchr(name, '.') != NULL) { + return ARES_ENOTFOUND; + } - /* If the name contains a trailing dot, then the single query is the name - * sans the trailing dot. + hostaliases = getenv("HOSTALIASES"); + if (hostaliases == NULL) { + status = ARES_ENOTFOUND; + goto done; + } + + buf = ares__buf_create(); + if (buf == NULL) { + status = ARES_ENOMEM; + goto done; + } + + status = ares__buf_load_file(hostaliases, buf); + if (status != ARES_SUCCESS) { + goto done; + } + + /* The HOSTALIASES file is structured as one alias per line. The first + * field in the line is the simple hostname with no periods, followed by + * whitespace, then the full domain name, e.g.: + * + * c-ares www.c-ares.org + * curl www.curl.se */ - if ((len > 0) && (name[len - 1] == '.')) { - *s = ares_strdup(name); - return (*s) ? ARES_SUCCESS : ARES_ENOMEM; - } - - if (!(channel->flags & ARES_FLAG_NOALIASES) && !strchr(name, '.')) { - /* The name might be a host alias. */ - hostaliases = getenv("HOSTALIASES"); - if (hostaliases) { - fp = fopen(hostaliases, "r"); - if (fp) { - while ((status = ares__read_line(fp, &line, &linesize)) == - ARES_SUCCESS) { - if (strncasecmp(line, name, len) != 0 || !ISSPACE(line[len])) { - continue; - } - p = line + len; - while (ISSPACE(*p)) { - p++; - } - if (*p) { - q = p + 1; - while (*q && !ISSPACE(*q)) { - q++; - } - *s = ares_malloc((size_t)(q - p + 1)); - if (*s) { - memcpy(*s, p, (size_t)(q - p)); - (*s)[q - p] = 0; - } - ares_free(line); - fclose(fp); - return (*s) ? ARES_SUCCESS : ARES_ENOMEM; - } - } - ares_free(line); - fclose(fp); - if (status != ARES_SUCCESS && status != ARES_EOF) { - return status; - } - } else { - error = ERRNO; - switch (error) { - case ENOENT: - case ESRCH: - break; - default: - DEBUGF(fprintf(stderr, "fopen() failed with error: %d %s\n", error, - strerror(error))); - DEBUGF(fprintf(stderr, "Error opening file: %s\n", hostaliases)); - *s = NULL; - return ARES_EFILE; - } - } - } + + status = ares__buf_split(buf, (const unsigned char *)"\n", 1, + ARES_BUF_SPLIT_TRIM, 0, &lines); + if (status != ARES_SUCCESS) { + goto done; } - if (channel->flags & ARES_FLAG_NOSEARCH || channel->ndomains == 0) { - /* No domain search to do; just try the name as-is. */ - *s = ares_strdup(name); - return (*s) ? ARES_SUCCESS : ARES_ENOMEM; + for (node = ares__llist_node_first(lines); node != NULL; + node = ares__llist_node_next(node)) { + ares__buf_t *line = ares__llist_node_val(node); + char hostname[64] = ""; + char fqdn[256] = ""; + + /* Pull off hostname */ + ares__buf_tag(line); + ares__buf_consume_nonwhitespace(line); + if (ares__buf_tag_fetch_string(line, hostname, sizeof(hostname)) != + ARES_SUCCESS) { + continue; + } + + /* Match hostname */ + if (strcasecmp(hostname, name) != 0) { + continue; + } + + /* consume whitespace */ + ares__buf_consume_whitespace(line, ARES_TRUE); + + /* pull off fqdn */ + ares__buf_tag(line); + ares__buf_consume_nonwhitespace(line); + if (ares__buf_tag_fetch_string(line, fqdn, sizeof(fqdn)) != ARES_SUCCESS || + ares_strlen(fqdn) == 0) { + continue; + } + + /* Validate characterset */ + if (!ares__is_hostname(fqdn)) { + continue; + } + + *alias = ares_strdup(fqdn); + if (*alias == NULL) { + status = ARES_ENOMEM; + goto done; + } + + /* Good! */ + status = ARES_SUCCESS; + goto done; } - *s = NULL; - return ARES_SUCCESS; + status = ARES_ENOTFOUND; + +done: + ares__buf_destroy(buf); + ares__llist_destroy(lines); + + return status; } diff --git a/deps/cares/src/lib/ares_send.c b/deps/cares/src/lib/ares_send.c index 6cefdb6a36a87e..54f2b504d50cac 100644 --- a/deps/cares/src/lib/ares_send.c +++ b/deps/cares/src/lib/ares_send.c @@ -48,52 +48,47 @@ static unsigned short generate_unique_qid(ares_channel_t *channel) return id; } -ares_status_t ares_send_ex(ares_channel_t *channel, const unsigned char *qbuf, - size_t qlen, ares_callback callback, void *arg, - unsigned short *qid) +static ares_status_t ares_send_dnsrec_int(ares_channel_t *channel, + const ares_dns_record_t *dnsrec, + ares_callback_dnsrec callback, + void *arg, unsigned short *qid) { - struct query *query; - size_t packetsz; - struct timeval now = ares__tvnow(); - ares_status_t status; - unsigned short id = generate_unique_qid(channel); - unsigned char *abuf = NULL; - size_t alen = 0; + struct query *query; + size_t packetsz; + struct timeval now = ares__tvnow(); + ares_status_t status; + unsigned short id = generate_unique_qid(channel); + const ares_dns_record_t *dnsrec_resp = NULL; - /* Verify that the query is at least long enough to hold the header. */ - if (qlen < HFIXEDSZ || qlen >= (1 << 16)) { - callback(arg, ARES_EBADQUERY, 0, NULL, 0); - return ARES_EBADQUERY; - } if (ares__slist_len(channel->servers) == 0) { - callback(arg, ARES_ENOSERVER, 0, NULL, 0); + callback(arg, ARES_ENOSERVER, 0, NULL); return ARES_ENOSERVER; } /* Check query cache */ - status = ares_qcache_fetch(channel, &now, qbuf, qlen, &abuf, &alen); + status = ares_qcache_fetch(channel, &now, dnsrec, &dnsrec_resp); if (status != ARES_ENOTFOUND) { /* ARES_SUCCESS means we retrieved the cache, anything else is a critical * failure, all result in termination */ - callback(arg, (int)status, 0, abuf, (int)alen); - ares_free(abuf); + callback(arg, status, 0, dnsrec_resp); return status; } /* Allocate space for query and allocated fields. */ query = ares_malloc(sizeof(struct query)); if (!query) { - callback(arg, ARES_ENOMEM, 0, NULL, 0); + callback(arg, ARES_ENOMEM, 0, NULL); return ARES_ENOMEM; } memset(query, 0, sizeof(*query)); query->channel = channel; - query->qbuf = ares_malloc(qlen); - if (!query->qbuf) { + + status = ares_dns_write(dnsrec, &query->qbuf, &query->qlen); + if (status != ARES_SUCCESS) { ares_free(query); - callback(arg, ARES_ENOMEM, 0, NULL, 0); - return ARES_ENOMEM; + callback(arg, status, 0, NULL); + return status; } query->qid = id; @@ -103,8 +98,6 @@ ares_status_t ares_send_ex(ares_channel_t *channel, const unsigned char *qbuf, /* Ignore first 2 bytes, assign our own query id */ query->qbuf[0] = (unsigned char)((id >> 8) & 0xFF); query->qbuf[1] = (unsigned char)(id & 0xFF); - memcpy(query->qbuf + 2, qbuf + 2, qlen - 2); - query->qlen = qlen; /* Fill in query arguments. */ query->callback = callback; @@ -114,7 +107,8 @@ ares_status_t ares_send_ex(ares_channel_t *channel, const unsigned char *qbuf, query->try_count = 0; packetsz = (channel->flags & ARES_FLAG_EDNS) ? channel->ednspsz : PACKETSZ; - query->using_tcp = (channel->flags & ARES_FLAG_USEVC) || qlen > packetsz; + query->using_tcp = + (channel->flags & ARES_FLAG_USEVC) || query->qlen > packetsz; query->error_status = ARES_SUCCESS; query->timeouts = 0; @@ -127,7 +121,7 @@ ares_status_t ares_send_ex(ares_channel_t *channel, const unsigned char *qbuf, query->node_all_queries = ares__llist_insert_last(channel->all_queries, query); if (query->node_all_queries == NULL) { - callback(arg, ARES_ENOMEM, 0, NULL, 0); + callback(arg, ARES_ENOMEM, 0, NULL); ares__free_query(query); return ARES_ENOMEM; } @@ -136,7 +130,7 @@ ares_status_t ares_send_ex(ares_channel_t *channel, const unsigned char *qbuf, * responses quickly. */ if (!ares__htable_szvp_insert(channel->queries_by_qid, query->qid, query)) { - callback(arg, ARES_ENOMEM, 0, NULL, 0); + callback(arg, ARES_ENOMEM, 0, NULL); ares__free_query(query); return ARES_ENOMEM; } @@ -150,18 +144,60 @@ ares_status_t ares_send_ex(ares_channel_t *channel, const unsigned char *qbuf, return status; } +ares_status_t ares_send_dnsrec(ares_channel_t *channel, + const ares_dns_record_t *dnsrec, + ares_callback_dnsrec callback, void *arg, + unsigned short *qid) +{ + ares_status_t status; + + if (channel == NULL) { + return ARES_EFORMERR; + } + + ares__channel_lock(channel); + + status = ares_send_dnsrec_int(channel, dnsrec, callback, arg, qid); + + ares__channel_unlock(channel); + + return status; +} + void ares_send(ares_channel_t *channel, const unsigned char *qbuf, int qlen, ares_callback callback, void *arg) { + ares_dns_record_t *dnsrec = NULL; + ares_status_t status; + void *carg = NULL; + if (channel == NULL) { return; } - ares__channel_lock(channel); + /* Verify that the query is at least long enough to hold the header. */ + if (qlen < HFIXEDSZ || qlen >= (1 << 16)) { + callback(arg, ARES_EBADQUERY, 0, NULL, 0); + return; + } - ares_send_ex(channel, qbuf, (size_t)qlen, callback, arg, NULL); + status = ares_dns_parse(qbuf, (size_t)qlen, 0, &dnsrec); + if (status != ARES_SUCCESS) { + callback(arg, (int)status, 0, NULL, 0); + return; + } - ares__channel_unlock(channel); + carg = ares__dnsrec_convert_arg(callback, arg); + if (carg == NULL) { + status = ARES_ENOMEM; + ares_dns_record_destroy(dnsrec); + callback(arg, (int)status, 0, NULL, 0); + return; + } + + ares_send_dnsrec(channel, dnsrec, ares__dnsrec_convert_cb, carg, NULL); + + ares_dns_record_destroy(dnsrec); } size_t ares_queue_active_queries(ares_channel_t *channel) diff --git a/deps/cares/src/lib/ares_str.c b/deps/cares/src/lib/ares_str.c index 80660136dac8e1..5f25cfeaff041e 100644 --- a/deps/cares/src/lib/ares_str.c +++ b/deps/cares/src/lib/ares_str.c @@ -110,6 +110,54 @@ ares_bool_t ares_str_isnum(const char *str) return ARES_TRUE; } +void ares__str_rtrim(char *str) +{ + size_t len; + size_t i; + + if (str == NULL) { + return; + } + + len = ares_strlen(str); + for (i = len; i > 0; i--) { + if (!ares__isspace(str[i - 1])) { + break; + } + } + str[i] = 0; +} + +void ares__str_ltrim(char *str) +{ + size_t i; + size_t len; + + if (str == NULL) { + return; + } + + for (i = 0; str[i] != 0 && ares__isspace(str[i]); i++) { + /* Do nothing */ + } + + if (i == 0) { + return; + } + + len = ares_strlen(str); + if (i != len) { + memmove(str, str + i, len - i); + } + str[len - i] = 0; +} + +void ares__str_trim(char *str) +{ + ares__str_ltrim(str); + ares__str_rtrim(str); +} + /* tolower() is locale-specific. Use a lookup table fast conversion that only * operates on ASCII */ static const unsigned char ares__tolower_lookup[] = { @@ -151,3 +199,71 @@ ares_bool_t ares__memeq_ci(const unsigned char *ptr, const unsigned char *val, } return ARES_TRUE; } + +ares_bool_t ares__isspace(int ch) +{ + switch (ch) { + case '\r': + case '\t': + case ' ': + case '\v': + case '\f': + case '\n': + return ARES_TRUE; + default: + break; + } + return ARES_FALSE; +} + +ares_bool_t ares__isprint(int ch) +{ + if (ch >= 0x20 && ch <= 0x7E) { + return ARES_TRUE; + } + return ARES_FALSE; +} + +/* Character set allowed by hostnames. This is to include the normal + * domain name character set plus: + * - underscores which are used in SRV records. + * - Forward slashes such as are used for classless in-addr.arpa + * delegation (CNAMEs) + * - Asterisks may be used for wildcard domains in CNAMEs as seen in the + * real world. + * While RFC 2181 section 11 does state not to do validation, + * that applies to servers, not clients. Vulnerabilities have been + * reported when this validation is not performed. Security is more + * important than edge-case compatibility (which is probably invalid + * anyhow). */ +ares_bool_t ares__is_hostnamech(int ch) +{ + /* [A-Za-z0-9-*._/] + * Don't use isalnum() as it is locale-specific + */ + if (ch >= 'A' && ch <= 'Z') { + return ARES_TRUE; + } + if (ch >= 'a' && ch <= 'z') { + return ARES_TRUE; + } + if (ch >= '0' && ch <= '9') { + return ARES_TRUE; + } + if (ch == '-' || ch == '.' || ch == '_' || ch == '/' || ch == '*') { + return ARES_TRUE; + } + + return ARES_FALSE; +} + +ares_bool_t ares__is_hostname(const char *str) +{ + size_t i; + for (i = 0; str[i] != 0; i++) { + if (!ares__is_hostnamech(str[i])) { + return ARES_FALSE; + } + } + return ARES_TRUE; +} diff --git a/deps/cares/src/lib/ares_str.h b/deps/cares/src/lib/ares_str.h index 2bf32d0d253da7..8d869073d8153c 100644 --- a/deps/cares/src/lib/ares_str.h +++ b/deps/cares/src/lib/ares_str.h @@ -24,8 +24,8 @@ * * SPDX-License-Identifier: MIT */ -#ifndef HEADER_CARES_STRDUP_H -#define HEADER_CARES_STRDUP_H +#ifndef __ARES_STR_H +#define __ARES_STR_H #include "ares_setup.h" #include "ares.h" @@ -48,8 +48,19 @@ size_t ares_strcpy(char *dest, const char *src, size_t dest_size); ares_bool_t ares_str_isnum(const char *str); +void ares__str_ltrim(char *str); +void ares__str_rtrim(char *str); +void ares__str_trim(char *str); + unsigned char ares__tolower(unsigned char c); ares_bool_t ares__memeq_ci(const unsigned char *ptr, const unsigned char *val, size_t len); -#endif /* HEADER_CARES_STRDUP_H */ +ares_bool_t ares__isspace(int ch); +ares_bool_t ares__isprint(int ch); +ares_bool_t ares__is_hostnamech(int ch); + +ares_bool_t ares__is_hostname(const char *str); + + +#endif /* __ARES_STR_H */ diff --git a/deps/cares/src/lib/ares_strsplit.c b/deps/cares/src/lib/ares_strsplit.c index 5ec615c76482ed..395bf1ebb9a5ec 100644 --- a/deps/cares/src/lib/ares_strsplit.c +++ b/deps/cares/src/lib/ares_strsplit.c @@ -94,7 +94,7 @@ char **ares__strsplit(const char *in, const char *delms, size_t *num_elm) status = ares__buf_split( buf, (const unsigned char *)delms, ares_strlen(delms), - ARES_BUF_SPLIT_NO_DUPLICATES | ARES_BUF_SPLIT_CASE_INSENSITIVE, &llist); + ARES_BUF_SPLIT_NO_DUPLICATES | ARES_BUF_SPLIT_CASE_INSENSITIVE, 0, &llist); if (status != ARES_SUCCESS) { goto done; } diff --git a/deps/cares/src/lib/ares_sysconfig.c b/deps/cares/src/lib/ares_sysconfig.c index 825008b7b8a543..474534512af191 100644 --- a/deps/cares/src/lib/ares_sysconfig.c +++ b/deps/cares/src/lib/ares_sysconfig.c @@ -954,7 +954,7 @@ static ares_status_t ares__init_sysconfig_libresolv(ares_sysconfig_t *sysconfig) } } - if (res.ndots > 0) { + if (res.ndots >= 0) { sysconfig->ndots = (size_t)res.ndots; } if (res.retry > 0) { @@ -1059,6 +1059,10 @@ static ares_status_t ares_sysconfig_apply(ares_channel_t *channel, channel->rotate = sysconfig->rotate; } + if (sysconfig->usevc) { + channel->flags |= ARES_FLAG_USEVC; + } + return ARES_SUCCESS; } diff --git a/deps/cares/src/lib/ares_sysconfig_files.c b/deps/cares/src/lib/ares_sysconfig_files.c index 9802c7e54a5419..557888bc740a39 100644 --- a/deps/cares/src/lib/ares_sysconfig_files.c +++ b/deps/cares/src/lib/ares_sysconfig_files.c @@ -134,7 +134,7 @@ static ares_status_t parse_sort(ares__buf_t *buf, struct apattern *pat) ares__buf_tag(buf); /* Consume ip address */ - if (ares__buf_consume_charset(buf, ip_charset, sizeof(ip_charset)) == 0) { + if (ares__buf_consume_charset(buf, ip_charset, sizeof(ip_charset) - 1) == 0) { return ARES_EBADSTR; } @@ -162,8 +162,8 @@ static ares_status_t parse_sort(ares__buf_t *buf, struct apattern *pat) ares__buf_tag(buf); /* Consume mask */ - if (ares__buf_consume_charset(buf, ipv4_charset, sizeof(ipv4_charset)) == - 0) { + if (ares__buf_consume_charset(buf, ipv4_charset, + sizeof(ipv4_charset) - 1) == 0) { return ARES_EBADSTR; } @@ -241,7 +241,7 @@ ares_status_t ares__parse_sortlist(struct apattern **sortlist, size_t *nsort, /* Split on space or semicolon */ status = ares__buf_split(buf, (const unsigned char *)" ;", 2, - ARES_BUF_SPLIT_NONE, &list); + ARES_BUF_SPLIT_NONE, 0, &list); if (status != ARES_SUCCESS) { goto done; } @@ -282,7 +282,8 @@ ares_status_t ares__parse_sortlist(struct apattern **sortlist, size_t *nsort, return status; } -static ares_status_t config_search(ares_sysconfig_t *sysconfig, const char *str) +static ares_status_t config_search(ares_sysconfig_t *sysconfig, const char *str, + size_t max_domains) { if (sysconfig->domains && sysconfig->ndomains > 0) { /* if we already have some domains present, free them first */ @@ -296,410 +297,515 @@ static ares_status_t config_search(ares_sysconfig_t *sysconfig, const char *str) return ARES_ENOMEM; } + /* Truncate if necessary */ + if (max_domains && sysconfig->ndomains > max_domains) { + size_t i; + for (i = max_domains; i < sysconfig->ndomains; i++) { + ares_free(sysconfig->domains[i]); + sysconfig->domains[i] = NULL; + } + sysconfig->ndomains = max_domains; + } + return ARES_SUCCESS; } -static ares_status_t config_domain(ares_sysconfig_t *sysconfig, char *str) +static ares_status_t buf_fetch_string(ares__buf_t *buf, char *str, + size_t str_len) { - char *q; - - /* Set a single search domain. */ - q = str; - while (*q && !ISSPACE(*q)) { - q++; - } - *q = '\0'; + ares_status_t status; + ares__buf_tag(buf); + ares__buf_consume(buf, ares__buf_len(buf)); - return config_search(sysconfig, str); + status = ares__buf_tag_fetch_string(buf, str, str_len); + return status; } -static ares_status_t config_lookup(ares_sysconfig_t *sysconfig, const char *str, - const char *bindch, const char *altbindch, - const char *filech) +static ares_status_t config_lookup(ares_sysconfig_t *sysconfig, + ares__buf_t *buf, const char *separators) { - char lookups[3]; - char *l; - const char *p; - ares_bool_t found; - - if (altbindch == NULL) { - altbindch = bindch; + ares_status_t status; + char lookupstr[32]; + size_t lookupstr_cnt = 0; + ares__llist_t *lookups = NULL; + ares__llist_node_t *node; + size_t separators_len = ares_strlen(separators); + + status = ares__buf_split(buf, (const unsigned char *)separators, + separators_len, ARES_BUF_SPLIT_TRIM, 0, &lookups); + if (status != ARES_SUCCESS) { + goto done; } - /* Set the lookup order. Only the first letter of each work - * is relevant, and it has to be "b" for DNS or "f" for the - * host file. Ignore everything else. - */ - l = lookups; - p = str; - found = ARES_FALSE; - while (*p) { - if ((*p == *bindch || *p == *altbindch || *p == *filech) && - l < lookups + 2) { - if (*p == *bindch || *p == *altbindch) { - *l++ = 'b'; - } else { - *l++ = 'f'; - } - found = ARES_TRUE; + memset(lookupstr, 0, sizeof(lookupstr)); + + for (node = ares__llist_node_first(lookups); node != NULL; + node = ares__llist_node_next(node)) { + char value[128]; + char ch; + ares__buf_t *valbuf = ares__llist_node_val(node); + + status = buf_fetch_string(valbuf, value, sizeof(value)); + if (status != ARES_SUCCESS) { + continue; } - while (*p && !ISSPACE(*p) && (*p != ',')) { - p++; + + if (strcasecmp(value, "dns") == 0 || strcasecmp(value, "bind") == 0 || + strcasecmp(value, "resolv") == 0 || strcasecmp(value, "resolve") == 0) { + ch = 'b'; + } else if (strcasecmp(value, "files") == 0 || + strcasecmp(value, "file") == 0 || + strcasecmp(value, "local") == 0) { + ch = 'f'; + } else { + continue; } - while (*p && (ISSPACE(*p) || (*p == ','))) { - p++; + + /* Look for a duplicate and ignore */ + if (memchr(lookupstr, ch, lookupstr_cnt) == NULL) { + lookupstr[lookupstr_cnt++] = ch; } } - if (!found) { - return ARES_ENOTINITIALIZED; + + if (lookupstr_cnt) { + ares_free(sysconfig->lookups); + sysconfig->lookups = ares_strdup(lookupstr); + if (sysconfig->lookups == NULL) { + return ARES_ENOMEM; + } } - *l = '\0'; - ares_free(sysconfig->lookups); - sysconfig->lookups = ares_strdup(lookups); - if (sysconfig->lookups == NULL) { - return ARES_ENOMEM; + status = ARES_SUCCESS; + +done: + if (status != ARES_ENOMEM) { + status = ARES_SUCCESS; } - return ARES_SUCCESS; + ares__llist_destroy(lookups); + return status; } -static const char *try_option(const char *p, const char *q, const char *opt) +static ares_status_t process_option(ares_sysconfig_t *sysconfig, + ares__buf_t *option) { - size_t len = ares_strlen(opt); - return ((size_t)(q - p) >= len && !strncmp(p, opt, len)) ? &p[len] : NULL; + ares__llist_t *kv = NULL; + char key[32] = ""; + char val[32] = ""; + unsigned int valint = 0; + ares_status_t status; + + /* Split on : */ + status = ares__buf_split(option, (const unsigned char *)":", 1, + ARES_BUF_SPLIT_TRIM, 2, &kv); + if (status != ARES_SUCCESS) { + goto done; + } + + status = buf_fetch_string(ares__llist_first_val(kv), key, sizeof(key)); + if (status != ARES_SUCCESS) { + goto done; + } + if (ares__llist_len(kv) == 2) { + status = buf_fetch_string(ares__llist_last_val(kv), val, sizeof(val)); + if (status != ARES_SUCCESS) { + goto done; + } + valint = (unsigned int)strtoul(val, NULL, 10); + } + + if (strcmp(key, "ndots") == 0) { + sysconfig->ndots = valint; + } else if (strcmp(key, "retrans") == 0 || strcmp(key, "timeout") == 0) { + if (valint == 0) { + return ARES_EFORMERR; + } + sysconfig->timeout_ms = valint * 1000; + } else if (strcmp(key, "retry") == 0 || strcmp(key, "attempts") == 0) { + if (valint == 0) { + return ARES_EFORMERR; + } + sysconfig->tries = valint; + } else if (strcmp(key, "rotate") == 0) { + sysconfig->rotate = ARES_TRUE; + } else if (strcmp(key, "use-vc") == 0 || strcmp(key, "usevc") == 0) { + sysconfig->usevc = ARES_TRUE; + } + +done: + ares__llist_destroy(kv); + return status; } static ares_status_t set_options(ares_sysconfig_t *sysconfig, const char *str) { - const char *p; - const char *q; - const char *val; + ares__buf_t *buf = NULL; + ares__llist_t *options = NULL; + ares_status_t status; + ares__llist_node_t *node; - if (str == NULL) { - return ARES_SUCCESS; + buf = ares__buf_create_const((const unsigned char *)str, ares_strlen(str)); + if (buf == NULL) { + return ARES_ENOMEM; } - p = str; - while (*p) { - q = p; - while (*q && !ISSPACE(*q)) { - q++; - } - val = try_option(p, q, "ndots:"); - if (val) { - sysconfig->ndots = strtoul(val, NULL, 10); - } + status = ares__buf_split(buf, (const unsigned char *)" \t", 2, + ARES_BUF_SPLIT_TRIM, 0, &options); + if (status != ARES_SUCCESS) { + goto done; + } - // Outdated option. - val = try_option(p, q, "retrans:"); - if (val) { - sysconfig->timeout_ms = strtoul(val, NULL, 10); - } + for (node = ares__llist_node_first(options); node != NULL; + node = ares__llist_node_next(node)) { + ares__buf_t *valbuf = ares__llist_node_val(node); - val = try_option(p, q, "timeout:"); - if (val) { - sysconfig->timeout_ms = strtoul(val, NULL, 10) * 1000; + status = process_option(sysconfig, valbuf); + /* Out of memory is the only fatal condition */ + if (status == ARES_ENOMEM) { + goto done; } + } - // Outdated option. - val = try_option(p, q, "retry:"); - if (val) { - sysconfig->tries = strtoul(val, NULL, 10); - } + status = ARES_SUCCESS; - val = try_option(p, q, "attempts:"); - if (val) { - sysconfig->tries = strtoul(val, NULL, 10); - } +done: + ares__llist_destroy(options); + ares__buf_destroy(buf); + return status; +} - val = try_option(p, q, "rotate"); - if (val) { - sysconfig->rotate = ARES_TRUE; +ares_status_t ares__init_by_environment(ares_sysconfig_t *sysconfig) +{ + const char *localdomain; + const char *res_options; + ares_status_t status; + + localdomain = getenv("LOCALDOMAIN"); + if (localdomain) { + char *temp = ares_strdup(localdomain); + if (temp == NULL) { + return ARES_ENOMEM; } + status = config_search(sysconfig, temp, 1); + ares_free(temp); + if (status != ARES_SUCCESS) { + return status; + } + } - p = q; - while (ISSPACE(*p)) { - p++; + res_options = getenv("RES_OPTIONS"); + if (res_options) { + status = set_options(sysconfig, res_options); + if (status != ARES_SUCCESS) { + return status; } } return ARES_SUCCESS; } -static char *try_config(char *s, const char *opt, char scc) +/* Configuration Files: + * /etc/resolv.conf + * - All Unix-like systems + * - Comments start with ; or # + * - Lines have a keyword followed by a value that is interpreted specific + * to the keyword: + * - Keywords: + * - nameserver - IP address of nameserver with optional port (using a : + * prefix). If using an ipv6 address and specifying a port, the ipv6 + * address must be encapsulated in brackets. For link-local ipv6 + * addresses, the interface can also be specified with a % prefix. e.g.: + * "nameserver [fe80::1]:1234%iface" + * This keyword may be specified multiple times. + * - search - whitespace separated list of domains + * - domain - obsolete, same as search except only a single domain + * - lookup / hostresorder - local, bind, file, files + * - sortlist - whitespace separated ip-address/netmask pairs + * - options - options controlling resolver variables + * - ndots:n - set ndots option + * - timeout:n (retrans:n) - timeout per query attempt in seconds + * - attempts:n (retry:n) - number of times resolver will send query + * - rotate - round-robin selection of name servers + * - use-vc / usevc - force tcp + * /etc/nsswitch.conf + * - Modern Linux, FreeBSD, HP-UX, Solaris + * - Search order set via: + * "hosts: files dns mdns4_minimal mdns4" + * - files is /etc/hosts + * - dns is dns + * - mdns4_minimal does mdns only if ending in .local + * - mdns4 does not limit to domains ending in .local + * /etc/netsvc.conf + * - AIX + * - Search order set via: + * "hosts = local , bind" + * - bind is dns + * - local is /etc/hosts + * /etc/svc.conf + * - Tru64 + * - Same format as /etc/netsvc.conf + * /etc/host.conf + * - Early FreeBSD, Early Linux + * - Not worth supporting, format varied based on system, FreeBSD used + * just a line per search order, Linux used "order " and a comma + * delimited list of "bind" and "hosts" + */ + + +/* This function will only return ARES_SUCCESS or ARES_ENOMEM. Any other + * conditions are ignored. Users may mess up config files, but we want to + * process anything we can. */ +static ares_status_t parse_resolvconf_line(ares_sysconfig_t *sysconfig, + ares__buf_t *line) { - size_t len; - char *p; - char *q; + char option[32]; + char value[512]; + ares_status_t status = ARES_SUCCESS; - if (!s || !opt) { - /* no line or no option */ - return NULL; /* LCOV_EXCL_LINE */ + /* Ignore lines beginning with a comment */ + if (ares__buf_begins_with(line, (const unsigned char *)"#", 1) || + ares__buf_begins_with(line, (const unsigned char *)";", 1)) { + return ARES_SUCCESS; } - /* Hash '#' character is always used as primary comment char, additionally - a not-NUL secondary comment char will be considered when specified. */ + ares__buf_tag(line); - /* trim line comment */ - p = s; - if (scc) { - while (*p && (*p != '#') && (*p != scc)) { - p++; - } - } else { - while (*p && (*p != '#')) { - p++; - } + /* Shouldn't be possible, but if it happens, ignore the line. */ + if (ares__buf_consume_nonwhitespace(line) == 0) { + return ARES_SUCCESS; } - *p = '\0'; - /* trim trailing whitespace */ - q = p - 1; - while ((q >= s) && ISSPACE(*q)) { - q--; + status = ares__buf_tag_fetch_string(line, option, sizeof(option)); + if (status != ARES_SUCCESS) { + return ARES_SUCCESS; } - *++q = '\0'; - /* skip leading whitespace */ - p = s; - while (*p && ISSPACE(*p)) { - p++; - } + ares__buf_consume_whitespace(line, ARES_TRUE); - if (!*p) { - /* empty line */ - return NULL; + status = buf_fetch_string(line, value, sizeof(value)); + if (status != ARES_SUCCESS) { + return ARES_SUCCESS; } - if ((len = ares_strlen(opt)) == 0) { - /* empty option */ - return NULL; /* LCOV_EXCL_LINE */ + ares__str_trim(value); + if (*value == 0) { + return ARES_SUCCESS; } - if (strncmp(p, opt, len) != 0) { - /* line and option do not match */ - return NULL; + /* At this point we have a string option and a string value, both trimmed + * of leading and trailing whitespace. Lets try to evaluate them */ + if (strcmp(option, "domain") == 0) { + /* Domain is legacy, don't overwrite an existing config set by search */ + if (sysconfig->domains == NULL) { + status = config_search(sysconfig, value, 1); + } + } else if (strcmp(option, "lookup") == 0 || + strcmp(option, "hostresorder") == 0) { + ares__buf_tag_rollback(line); + status = config_lookup(sysconfig, line, " \t"); + } else if (strcmp(option, "search") == 0) { + status = config_search(sysconfig, value, 0); + } else if (strcmp(option, "nameserver") == 0) { + status = + ares__sconfig_append_fromstr(&sysconfig->sconfig, value, ARES_TRUE); + } else if (strcmp(option, "sortlist") == 0) { + /* Ignore all failures except ENOMEM. If the sysadmin set a bad + * sortlist, just ignore the sortlist, don't cause an inoperable + * channel */ + status = + ares__parse_sortlist(&sysconfig->sortlist, &sysconfig->nsortlist, value); + if (status != ARES_ENOMEM) { + status = ARES_SUCCESS; + } + } else if (strcmp(option, "options") == 0) { + status = set_options(sysconfig, value); } - /* skip over given option name */ - p += len; + return status; +} + +/* This function will only return ARES_SUCCESS or ARES_ENOMEM. Any other + * conditions are ignored. Users may mess up config files, but we want to + * process anything we can. */ +static ares_status_t parse_nsswitch_line(ares_sysconfig_t *sysconfig, + ares__buf_t *line) +{ + char option[32]; + ares__buf_t *buf; + ares_status_t status = ARES_SUCCESS; + ares__llist_t *sects = NULL; - if (!*p) { - /* no option value */ - return NULL; /* LCOV_EXCL_LINE */ + /* Ignore lines beginning with a comment */ + if (ares__buf_begins_with(line, (const unsigned char *)"#", 1)) { + return ARES_SUCCESS; } - if ((opt[len - 1] != ':') && (opt[len - 1] != '=') && !ISSPACE(*p)) { - /* whitespace between option name and value is mandatory - for given option names which do not end with ':' or '=' */ - return NULL; + /* database : values (space delimited) */ + status = ares__buf_split(line, (const unsigned char *)":", 1, + ARES_BUF_SPLIT_TRIM, 2, §s); + + if (status != ARES_SUCCESS || ares__llist_len(sects) != 2) { + goto done; } - /* skip over whitespace */ - while (*p && ISSPACE(*p)) { - p++; + buf = ares__llist_first_val(sects); + status = buf_fetch_string(buf, option, sizeof(option)); + if (status != ARES_SUCCESS) { + goto done; } - if (!*p) { - /* no option value */ - return NULL; + /* Only support "hosts:" */ + if (strcmp(option, "hosts") != 0) { + goto done; } - /* return pointer to option value */ - return p; + /* Values are space separated */ + buf = ares__llist_last_val(sects); + status = config_lookup(sysconfig, buf, " \t"); + +done: + ares__llist_destroy(sects); + if (status != ARES_ENOMEM) { + status = ARES_SUCCESS; + } + return status; } -ares_status_t ares__init_by_environment(ares_sysconfig_t *sysconfig) +/* This function will only return ARES_SUCCESS or ARES_ENOMEM. Any other + * conditions are ignored. Users may mess up config files, but we want to + * process anything we can. */ +static ares_status_t parse_svcconf_line(ares_sysconfig_t *sysconfig, + ares__buf_t *line) { - const char *localdomain; - const char *res_options; - ares_status_t status; + char option[32]; + ares__buf_t *buf; + ares_status_t status = ARES_SUCCESS; + ares__llist_t *sects = NULL; - localdomain = getenv("LOCALDOMAIN"); - if (localdomain) { - char *temp = ares_strdup(localdomain); - if (temp == NULL) { - return ARES_ENOMEM; - } - status = config_domain(sysconfig, temp); - ares_free(temp); - if (status != ARES_SUCCESS) { - return status; - } + /* Ignore lines beginning with a comment */ + if (ares__buf_begins_with(line, (const unsigned char *)"#", 1)) { + return ARES_SUCCESS; } - res_options = getenv("RES_OPTIONS"); - if (res_options) { - status = set_options(sysconfig, res_options); - if (status != ARES_SUCCESS) { - return status; - } + /* database = values (comma delimited)*/ + status = ares__buf_split(line, (const unsigned char *)"=", 1, + ARES_BUF_SPLIT_TRIM, 2, §s); + + if (status != ARES_SUCCESS || ares__llist_len(sects) != 2) { + goto done; } - return ARES_SUCCESS; + buf = ares__llist_first_val(sects); + status = buf_fetch_string(buf, option, sizeof(option)); + if (status != ARES_SUCCESS) { + goto done; + } + + /* Only support "hosts=" */ + if (strcmp(option, "hosts") != 0) { + goto done; + } + + /* Values are comma separated */ + buf = ares__llist_last_val(sects); + status = config_lookup(sysconfig, buf, ","); + +done: + ares__llist_destroy(sects); + if (status != ARES_ENOMEM) { + status = ARES_SUCCESS; + } + return status; } -ares_status_t ares__init_sysconfig_files(const ares_channel_t *channel, - ares_sysconfig_t *sysconfig) +typedef ares_status_t (*line_callback_t)(ares_sysconfig_t *sysconfig, + ares__buf_t *line); + +/* Should only return: + * ARES_ENOTFOUND - file not found + * ARES_EFILE - error reading file (perms) + * ARES_ENOMEM - out of memory + * ARES_SUCCESS - file processed, doesn't necessarily mean it was a good + * file, but we're not erroring out if we can't parse + * something (or anything at all) */ +static ares_status_t process_config_lines(const char *filename, + ares_sysconfig_t *sysconfig, + line_callback_t cb) { - char *p; - FILE *fp = NULL; - char *line = NULL; - size_t linesize = 0; - int error; - const char *resolvconf_path; - ares_status_t status = ARES_SUCCESS; + ares_status_t status = ARES_SUCCESS; + ares__llist_node_t *node; + ares__llist_t *lines = NULL; + ares__buf_t *buf = NULL; - /* Support path for resolvconf filename set by ares_init_options */ - if (channel->resolvconf_path) { - resolvconf_path = channel->resolvconf_path; - } else { - resolvconf_path = PATH_RESOLV_CONF; - } - - fp = fopen(resolvconf_path, "r"); - if (fp) { - while ((status = ares__read_line(fp, &line, &linesize)) == ARES_SUCCESS) { - if ((p = try_config(line, "domain", ';'))) { - status = config_domain(sysconfig, p); - } else if ((p = try_config(line, "lookup", ';'))) { - status = config_lookup(sysconfig, p, "bind", NULL, "file"); - } else if ((p = try_config(line, "search", ';'))) { - status = config_search(sysconfig, p); - } else if ((p = try_config(line, "nameserver", ';'))) { - status = - ares__sconfig_append_fromstr(&sysconfig->sconfig, p, ARES_TRUE); - } else if ((p = try_config(line, "sortlist", ';'))) { - /* Ignore all failures except ENOMEM. If the sysadmin set a bad - * sortlist, just ignore the sortlist, don't cause an inoperable - * channel */ - status = - ares__parse_sortlist(&sysconfig->sortlist, &sysconfig->nsortlist, p); - if (status != ARES_ENOMEM) { - status = ARES_SUCCESS; - } - } else if ((p = try_config(line, "options", ';'))) { - status = set_options(sysconfig, p); - } else { - status = ARES_SUCCESS; - } - if (status != ARES_SUCCESS) { - fclose(fp); - goto done; - } - } - fclose(fp); + buf = ares__buf_create(); + if (buf == NULL) { + status = ARES_ENOMEM; + goto done; + } - if (status != ARES_EOF) { - goto done; - } - } else { - error = ERRNO; - switch (error) { - case ENOENT: - case ESRCH: - break; - default: - DEBUGF(fprintf(stderr, "fopen() failed with error: %d %s\n", error, - strerror(error))); - DEBUGF(fprintf(stderr, "Error opening file: %s\n", PATH_RESOLV_CONF)); - status = ARES_EFILE; - goto done; - } + status = ares__buf_load_file(filename, buf); + if (status != ARES_SUCCESS) { + goto done; } + status = ares__buf_split(buf, (const unsigned char *)"\n", 1, + ARES_BUF_SPLIT_TRIM, 0, &lines); + if (status != ARES_SUCCESS) { + goto done; + } - /* Many systems (Solaris, Linux, BSD's) use nsswitch.conf */ - fp = fopen("/etc/nsswitch.conf", "r"); - if (fp) { - while ((status = ares__read_line(fp, &line, &linesize)) == ARES_SUCCESS) { - if ((p = try_config(line, "hosts:", '\0'))) { - (void)config_lookup(sysconfig, p, "dns", "resolve", "files"); - } - } - fclose(fp); - if (status != ARES_EOF) { + for (node = ares__llist_node_first(lines); node != NULL; + node = ares__llist_node_next(node)) { + ares__buf_t *line = ares__llist_node_val(node); + + status = cb(sysconfig, line); + if (status != ARES_SUCCESS) { goto done; } - } else { - error = ERRNO; - switch (error) { - case ENOENT: - case ESRCH: - break; - default: - DEBUGF(fprintf(stderr, "fopen() failed with error: %d %s\n", error, - strerror(error))); - DEBUGF( - fprintf(stderr, "Error opening file: %s\n", "/etc/nsswitch.conf")); - break; - } - /* ignore error, maybe we will get luck in next if clause */ } +done: + ares__buf_destroy(buf); + ares__llist_destroy(lines); - /* Linux / GNU libc 2.x and possibly others have host.conf */ - fp = fopen("/etc/host.conf", "r"); - if (fp) { - while ((status = ares__read_line(fp, &line, &linesize)) == ARES_SUCCESS) { - if ((p = try_config(line, "order", '\0'))) { - /* ignore errors */ - (void)config_lookup(sysconfig, p, "bind", NULL, "hosts"); - } - } - fclose(fp); - if (status != ARES_EOF) { - goto done; - } - } else { - error = ERRNO; - switch (error) { - case ENOENT: - case ESRCH: - break; - default: - DEBUGF(fprintf(stderr, "fopen() failed with error: %d %s\n", error, - strerror(error))); - DEBUGF(fprintf(stderr, "Error opening file: %s\n", "/etc/host.conf")); - break; - } + return status; +} + +ares_status_t ares__init_sysconfig_files(const ares_channel_t *channel, + ares_sysconfig_t *sysconfig) +{ + ares_status_t status = ARES_SUCCESS; + + /* Resolv.conf */ + status = process_config_lines((channel->resolvconf_path != NULL) + ? channel->resolvconf_path + : PATH_RESOLV_CONF, + sysconfig, parse_resolvconf_line); + if (status != ARES_SUCCESS && status != ARES_ENOTFOUND) { + goto done; + } - /* ignore error, maybe we will get luck in next if clause */ + /* Nsswitch.conf */ + status = + process_config_lines("/etc/nsswitch.conf", sysconfig, parse_nsswitch_line); + if (status != ARES_SUCCESS && status != ARES_ENOTFOUND) { + goto done; } + /* netsvc.conf */ + status = + process_config_lines("/etc/netsvc.conf", sysconfig, parse_svcconf_line); + if (status != ARES_SUCCESS && status != ARES_ENOTFOUND) { + goto done; + } - /* Tru64 uses /etc/svc.conf */ - fp = fopen("/etc/svc.conf", "r"); - if (fp) { - while ((status = ares__read_line(fp, &line, &linesize)) == ARES_SUCCESS) { - if ((p = try_config(line, "hosts=", '\0'))) { - /* ignore errors */ - (void)config_lookup(sysconfig, p, "bind", NULL, "local"); - } - } - fclose(fp); - if (status != ARES_EOF) { - goto done; - } - } else { - error = ERRNO; - switch (error) { - case ENOENT: - case ESRCH: - break; - default: - DEBUGF(fprintf(stderr, "fopen() failed with error: %d %s\n", error, - strerror(error))); - DEBUGF(fprintf(stderr, "Error opening file: %s\n", "/etc/svc.conf")); - break; - } - /* ignore error */ + /* svc.conf */ + status = process_config_lines("/etc/svc.conf", sysconfig, parse_svcconf_line); + if (status != ARES_SUCCESS && status != ARES_ENOTFOUND) { + goto done; } status = ARES_SUCCESS; done: - ares_free(line); - return status; } diff --git a/deps/cares/src/lib/ares_update_servers.c b/deps/cares/src/lib/ares_update_servers.c index dd24fbfdd4675c..fce791476327c3 100644 --- a/deps/cares/src/lib/ares_update_servers.c +++ b/deps/cares/src/lib/ares_update_servers.c @@ -266,8 +266,8 @@ static ares_status_t parse_nameserver(ares__buf_t *buf, ares_sconfig_t *sconfig) } else { /* IPv6 */ const unsigned char ipv6_charset[] = "ABCDEFabcdef0123456789.:"; - if (ares__buf_consume_charset(buf, ipv6_charset, sizeof(ipv6_charset)) == - 0) { + if (ares__buf_consume_charset(buf, ipv6_charset, + sizeof(ipv6_charset) - 1) == 0) { return ARES_EBADSTR; } } @@ -318,8 +318,8 @@ static ares_status_t parse_nameserver(ares__buf_t *buf, ares_sconfig_t *sconfig) ares__buf_tag(buf); - if (ares__buf_consume_charset(buf, iface_charset, sizeof(iface_charset)) == - 0) { + if (ares__buf_consume_charset(buf, iface_charset, + sizeof(iface_charset) - 1) == 0) { return ARES_EBADSTR; } @@ -463,7 +463,7 @@ ares_status_t ares__sconfig_append_fromstr(ares__llist_t **sconfig, } status = ares__buf_split(buf, (const unsigned char *)" ,", 2, - ARES_BUF_SPLIT_NONE, &list); + ARES_BUF_SPLIT_NONE, 0, &list); if (status != ARES_SUCCESS) { goto done; } @@ -1080,7 +1080,7 @@ static ares_status_t set_servers_csv(ares_channel_t *channel, const char *_csv) if (ares_strlen(_csv) == 0) { /* blank all servers */ - return (ares_status_t)ares_set_servers_ports(channel, NULL); + return ares__servers_update(channel, NULL, ARES_TRUE); } status = ares__sconfig_append_fromstr(&slist, _csv, ARES_FALSE); diff --git a/deps/cares/src/lib/setup_once.h b/deps/cares/src/lib/setup_once.h index 8341b348e7a9e0..a6168c9aed5365 100644 --- a/deps/cares/src/lib/setup_once.h +++ b/deps/cares/src/lib/setup_once.h @@ -274,7 +274,7 @@ Error Missing_definition_of_macro_sread #define ISPRINT(x) (isprint((int)((unsigned char)x))) #define ISUPPER(x) (isupper((int)((unsigned char)x))) #define ISLOWER(x) (islower((int)((unsigned char)x))) -#define ISASCII(x) (isascii((int)((unsigned char)x))) +#define ISASCII(x) (((unsigned char)x) <= 127 ? 1 : 0) #define ISBLANK(x) \ (int)((((unsigned char)x) == ' ') || (((unsigned char)x) == '\t')) diff --git a/deps/cares/src/tools/CMakeLists.txt b/deps/cares/src/tools/CMakeLists.txt index 13aefe135e9105..fb795a91741aaf 100644 --- a/deps/cares/src/tools/CMakeLists.txt +++ b/deps/cares/src/tools/CMakeLists.txt @@ -19,7 +19,7 @@ IF (CARES_BUILD_TOOLS) C_STANDARD 90 ) - TARGET_COMPILE_DEFINITIONS (ahost PRIVATE HAVE_CONFIG_H=1) + TARGET_COMPILE_DEFINITIONS (ahost PRIVATE HAVE_CONFIG_H=1 CARES_NO_DEPRECATED) TARGET_LINK_LIBRARIES (ahost PRIVATE ${PROJECT_NAME}) IF (CARES_INSTALL) INSTALL (TARGETS ahost COMPONENT Tools ${TARGETS_INST_DEST}) @@ -40,7 +40,7 @@ IF (CARES_BUILD_TOOLS) C_STANDARD 90 ) - TARGET_COMPILE_DEFINITIONS (adig PRIVATE HAVE_CONFIG_H=1) + TARGET_COMPILE_DEFINITIONS (adig PRIVATE HAVE_CONFIG_H=1 CARES_NO_DEPRECATED) TARGET_LINK_LIBRARIES (adig PRIVATE ${PROJECT_NAME}) IF (CARES_INSTALL) INSTALL (TARGETS adig COMPONENT Tools ${TARGETS_INST_DEST}) diff --git a/deps/cares/src/tools/Makefile.am b/deps/cares/src/tools/Makefile.am index 729658d79a76da..ba7a672f89faf5 100644 --- a/deps/cares/src/tools/Makefile.am +++ b/deps/cares/src/tools/Makefile.am @@ -15,7 +15,8 @@ noinst_PROGRAMS =$(PROGS) AM_CPPFLAGS += -I$(top_builddir)/include \ -I$(top_builddir)/src/lib \ -I$(top_srcdir)/include \ - -I$(top_srcdir)/src/lib + -I$(top_srcdir)/src/lib \ + -DCARES_NO_DEPRECATED include Makefile.inc diff --git a/deps/cares/src/tools/Makefile.in b/deps/cares/src/tools/Makefile.in index f0602fe172fdbb..0b7a310baaab5d 100644 --- a/deps/cares/src/tools/Makefile.in +++ b/deps/cares/src/tools/Makefile.in @@ -228,7 +228,7 @@ AM_CFLAGS = @AM_CFLAGS@ # might possibly already be installed in the system. AM_CPPFLAGS = @AM_CPPFLAGS@ -I$(top_builddir)/include \ -I$(top_builddir)/src/lib -I$(top_srcdir)/include \ - -I$(top_srcdir)/src/lib + -I$(top_srcdir)/src/lib -DCARES_NO_DEPRECATED AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ AR = @AR@ AS = @AS@ @@ -310,6 +310,7 @@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ +PKGCONFIG_CFLAGS = @PKGCONFIG_CFLAGS@ PKG_CONFIG = @PKG_CONFIG@ PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ From af3e32073b3af2bbbffba50ca83f8b14982a9019 Mon Sep 17 00:00:00 2001 From: "Node.js GitHub Bot" Date: Sun, 14 Apr 2024 00:26:40 +0000 Subject: [PATCH 33/41] deps: update ada to 2.7.8 PR-URL: https://github.com/nodejs/node/pull/52517 Reviewed-By: Yagiz Nizipli Reviewed-By: James M Snell Reviewed-By: Marco Ippolito Reviewed-By: Benjamin Gruenbaum --- deps/ada/ada.cpp | 73 ++++++++++++++++++++++++++++++------------- deps/ada/ada.h | 80 +++++++++++++++++++++++++++++++++--------------- 2 files changed, 108 insertions(+), 45 deletions(-) diff --git a/deps/ada/ada.cpp b/deps/ada/ada.cpp index a7be606bb9e70f..bff36abb835760 100644 --- a/deps/ada/ada.cpp +++ b/deps/ada/ada.cpp @@ -1,4 +1,4 @@ -/* auto-generated on 2024-03-07 13:23:39 -0500. Do not edit! */ +/* auto-generated on 2024-04-11 16:39:11 -0400. Do not edit! */ /* begin file src/ada.cpp */ #include "ada.h" /* begin file src/checkers.cpp */ @@ -11585,7 +11585,7 @@ ada_really_inline bool url::parse_scheme(const std::string_view input) { } } } else { // slow path - std::string _buffer = std::string(input); + std::string _buffer(input); // Next function is only valid if the input is ASCII and returns false // otherwise, but it seems that we always have ascii content so we do not // need to check the return value. @@ -13227,7 +13227,7 @@ template } } } else { // slow path - std::string _buffer = std::string(input); + std::string _buffer(input); // Next function is only valid if the input is ASCII and returns false // otherwise, but it seems that we always have ascii content so we do not // need to check the return value. @@ -13683,7 +13683,7 @@ bool url_aggregator::set_host_or_hostname(const std::string_view input) { return false; } - std::string previous_host = std::string(get_hostname()); + std::string previous_host(get_hostname()); uint32_t previous_port = components.port; size_t host_end_pos = input.find('#'); @@ -14983,7 +14983,7 @@ bool ada_can_parse(const char* input, size_t length) noexcept { bool ada_can_parse_with_base(const char* input, size_t input_length, const char* base, size_t base_length) noexcept { - auto base_view = std::string_view(base, base_length); + std::string_view base_view(base, base_length); return ada::can_parse(std::string_view(input, input_length), &base_view); } @@ -15388,14 +15388,18 @@ ada_owned_string ada_search_params_to_string(ada_url_search_params result) { size_t ada_search_params_size(ada_url_search_params result) { ada::result& r = *(ada::result*)result; - if (!r) return 0; + if (!r) { + return 0; + } return r->size(); } void ada_search_params_sort(ada_url_search_params result) { ada::result& r = *(ada::result*)result; - if (r) r->sort(); + if (r) { + r->sort(); + } } void ada_search_params_append(ada_url_search_params result, const char* key, @@ -15444,7 +15448,9 @@ bool ada_search_params_has(ada_url_search_params result, const char* key, size_t key_length) { ada::result& r = *(ada::result*)result; - if (!r) return false; + if (!r) { + return false; + } return r->has(std::string_view(key, key_length)); } @@ -15453,7 +15459,9 @@ bool ada_search_params_has_value(ada_url_search_params result, const char* key, size_t value_length) { ada::result& r = *(ada::result*)result; - if (!r) return false; + if (!r) { + return false; + } return r->has(std::string_view(key, key_length), std::string_view(value, value_length)); } @@ -15462,9 +15470,13 @@ ada_string ada_search_params_get(ada_url_search_params result, const char* key, size_t key_length) { ada::result& r = *(ada::result*)result; - if (!r) return ada_string_create(NULL, 0); + if (!r) { + return ada_string_create(NULL, 0); + } auto found = r->get(std::string_view(key, key_length)); - if (!found.has_value()) return ada_string_create(NULL, 0); + if (!found.has_value()) { + return ada_string_create(NULL, 0); + } return ada_string_create(found->data(), found->length()); } @@ -15522,14 +15534,18 @@ void ada_free_strings(ada_strings result) { size_t ada_strings_size(ada_strings result) { ada::result>* r = (ada::result>*)result; - if (!r) return 0; + if (!r) { + return 0; + } return (*r)->size(); } ada_string ada_strings_get(ada_strings result, size_t index) { ada::result>* r = (ada::result>*)result; - if (!r) return ada_string_create(NULL, 0); + if (!r) { + return ada_string_create(NULL, 0); + } std::string_view view = (*r)->at(index); return ada_string_create(view.data(), view.length()); } @@ -15544,9 +15560,13 @@ ada_string ada_search_params_keys_iter_next( ada_url_search_params_keys_iter result) { ada::result* r = (ada::result*)result; - if (!r) return ada_string_create(NULL, 0); + if (!r) { + return ada_string_create(NULL, 0); + } auto next = (*r)->next(); - if (!next.has_value()) return ada_string_create(NULL, 0); + if (!next.has_value()) { + return ada_string_create(NULL, 0); + } return ada_string_create(next->data(), next->length()); } @@ -15554,7 +15574,9 @@ bool ada_search_params_keys_iter_has_next( ada_url_search_params_keys_iter result) { ada::result* r = (ada::result*)result; - if (!r) return false; + if (!r) { + return false; + } return (*r)->has_next(); } @@ -15569,9 +15591,13 @@ ada_string ada_search_params_values_iter_next( ada_url_search_params_values_iter result) { ada::result* r = (ada::result*)result; - if (!r) return ada_string_create(NULL, 0); + if (!r) { + return ada_string_create(NULL, 0); + } auto next = (*r)->next(); - if (!next.has_value()) return ada_string_create(NULL, 0); + if (!next.has_value()) { + return ada_string_create(NULL, 0); + } return ada_string_create(next->data(), next->length()); } @@ -15579,7 +15605,9 @@ bool ada_search_params_values_iter_has_next( ada_url_search_params_values_iter result) { ada::result* r = (ada::result*)result; - if (!r) return false; + if (!r) { + return false; + } return (*r)->has_next(); } @@ -15596,8 +15624,9 @@ ada_string_pair ada_search_params_entries_iter_next( (ada::result*)result; if (!r) return {ada_string_create(NULL, 0), ada_string_create(NULL, 0)}; auto next = (*r)->next(); - if (!next.has_value()) + if (!next.has_value()) { return {ada_string_create(NULL, 0), ada_string_create(NULL, 0)}; + } return ada_string_pair{ ada_string_create(next->first.data(), next->first.length()), ada_string_create(next->second.data(), next->second.length())}; @@ -15607,7 +15636,9 @@ bool ada_search_params_entries_iter_has_next( ada_url_search_params_entries_iter result) { ada::result* r = (ada::result*)result; - if (!r) return false; + if (!r) { + return false; + } return (*r)->has_next(); } diff --git a/deps/ada/ada.h b/deps/ada/ada.h index 721ac736c39c57..b9e000b841d1ed 100644 --- a/deps/ada/ada.h +++ b/deps/ada/ada.h @@ -1,4 +1,4 @@ -/* auto-generated on 2024-03-07 13:23:39 -0500. Do not edit! */ +/* auto-generated on 2024-04-11 16:39:11 -0400. Do not edit! */ /* begin file include/ada.h */ /** * @file ada.h @@ -461,9 +461,11 @@ namespace ada { #ifdef ADA_VISUAL_STUDIO #define ADA_ASSUME(COND) __assume(COND) #else -#define ADA_ASSUME(COND) \ - do { \ - if (!(COND)) __builtin_unreachable(); \ +#define ADA_ASSUME(COND) \ + do { \ + if (!(COND)) { \ + __builtin_unreachable(); \ + } \ } while (0) #endif @@ -948,15 +950,15 @@ constexpr uint8_t WWW_FORM_URLENCODED_PERCENT_ENCODE[32] = { // 30 31 32 33 34 35 36 37 0x00 | 0x00 | 0x00 | 0x00 | 0x00 | 0x00 | 0x00 | 0x00, // 38 39 3A 3B 3C 3D 3E 3F - 0x00 | 0x00 | 0x00 | 0x00 | 0x10 | 0x00 | 0x40 | 0x80, + 0x00 | 0x00 | 0x04 | 0x08 | 0x10 | 0x20 | 0x40 | 0x80, // 40 41 42 43 44 45 46 47 - 0x00 | 0x00 | 0x00 | 0x00 | 0x00 | 0x00 | 0x00 | 0x00, + 0x01 | 0x00 | 0x00 | 0x00 | 0x00 | 0x00 | 0x00 | 0x00, // 48 49 4A 4B 4C 4D 4E 4F 0x00 | 0x00 | 0x00 | 0x00 | 0x00 | 0x00 | 0x00 | 0x00, // 50 51 52 53 54 55 56 57 0x00 | 0x00 | 0x00 | 0x00 | 0x00 | 0x00 | 0x00 | 0x00, // 58 59 5A 5B 5C 5D 5E 5F - 0x00 | 0x00 | 0x00 | 0x00 | 0x00 | 0x00 | 0x00 | 0x00, + 0x00 | 0x00 | 0x00 | 0x08 | 0x00 | 0x20 | 0x40 | 0x00, // 60 61 62 63 64 65 66 67 0x01 | 0x00 | 0x00 | 0x00 | 0x00 | 0x00 | 0x00 | 0x00, // 68 69 6A 6B 6C 6D 6E 6F @@ -964,7 +966,7 @@ constexpr uint8_t WWW_FORM_URLENCODED_PERCENT_ENCODE[32] = { // 70 71 72 73 74 75 76 77 0x00 | 0x00 | 0x00 | 0x00 | 0x00 | 0x00 | 0x00 | 0x00, // 78 79 7A 7B 7C 7D 7E 7F - 0x00 | 0x00 | 0x00 | 0x08 | 0x00 | 0x20 | 0x40 | 0x80, + 0x00 | 0x00 | 0x00 | 0x08 | 0x10 | 0x20 | 0x40 | 0x80, // 80 81 82 83 84 85 86 87 0x01 | 0x02 | 0x04 | 0x08 | 0x10 | 0x20 | 0x40 | 0x80, // 88 89 8A 8B 8C 8D 8E 8F @@ -1072,7 +1074,7 @@ ada_really_inline bool begins_with(std::string_view view, } // namespace ada::checkers -#endif // ADA_CHECKERS_H +#endif // ADA_CHECKERS_INL_H /* end file include/ada/checkers-inl.h */ /* begin file include/ada/log.h */ /** @@ -4349,6 +4351,30 @@ constexpr std::string_view is_special_list[] = {"http", " ", "https", "ws", constexpr uint16_t special_ports[] = {80, 0, 443, 80, 21, 443, 0, 0}; } // namespace details +/**** + * @private + * In is_special, get_scheme_type, and get_special_port, we + * use a standard hashing technique to find the index of the scheme in + * the is_special_list. The hashing technique is based on the size of + * the scheme and the first character of the scheme. It ensures that we + * do at most one string comparison per call. If the protocol is + * predictible (e.g., it is always "http"), we can get a better average + * performance by using a simpler approach where we loop and compare + * scheme with all possible protocols starting with the most likely + * protocol. Doing multiple comparisons may have a poor worst case + * performance, however. In this instance, we choose a potentially + * slightly lower best-case performance for a better worst-case + * performance. We can revisit this choice at any time. + * + * Reference: + * Schmidt, Douglas C. "Gperf: A perfect hash function generator." + * More C++ gems 17 (2000). + * + * Reference: https://en.wikipedia.org/wiki/Perfect_hash_function + * + * Reference: https://github.com/ada-url/ada/issues/617 + ****/ + ada_really_inline constexpr bool is_special(std::string_view scheme) { if (scheme.empty()) { return false; @@ -5642,7 +5668,7 @@ inline std::ostream &operator<<(std::ostream &out, const ada::url &u) { if (query.has_value()) { out.search_start = uint32_t(running_index); running_index += get_search().size(); - if (get_search().size() == 0) { + if (get_search().empty()) { running_index++; } } @@ -6113,7 +6139,7 @@ inline void url_aggregator::append_base_pathname(const std::string_view input) { ADA_ASSERT_TRUE(!helpers::overlaps(input, buffer)); #if ADA_DEVELOPMENT_CHECKS // computing the expected password. - std::string path_expected = std::string(get_pathname()); + std::string path_expected(get_pathname()); path_expected.append(input); #endif // ADA_DEVELOPMENT_CHECKS uint32_t ending_index = uint32_t(buffer.size()); @@ -6183,7 +6209,7 @@ inline void url_aggregator::append_base_username(const std::string_view input) { ADA_ASSERT_TRUE(!helpers::overlaps(input, buffer)); #if ADA_DEVELOPMENT_CHECKS // computing the expected password. - std::string username_expected = std::string(get_username()); + std::string username_expected(get_username()); username_expected.append(input); #endif // ADA_DEVELOPMENT_CHECKS add_authority_slashes_if_needed(); @@ -6213,7 +6239,7 @@ inline void url_aggregator::append_base_username(const std::string_view input) { components.hash_start += difference; } #if ADA_DEVELOPMENT_CHECKS - std::string username_after = std::string(get_username()); + std::string username_after(get_username()); ADA_ASSERT_EQUAL( username_expected, username_after, "append_base_username problem after inserting " + std::string(input)); @@ -6339,7 +6365,7 @@ inline void url_aggregator::append_base_password(const std::string_view input) { components.hash_start += difference; } #if ADA_DEVELOPMENT_CHECKS - std::string password_after = std::string(get_password()); + std::string password_after(get_password()); ADA_ASSERT_EQUAL( password_expected, password_after, "append_base_password problem after inserting " + std::string(input)); @@ -6826,7 +6852,7 @@ struct url_search_params { /** * @see https://url.spec.whatwg.org/#urlsearchparams-stringification-behavior */ - inline std::string to_string(); + inline std::string to_string() const; /** * Returns a simple JS-style iterator over all of the keys in this @@ -6943,12 +6969,12 @@ inline void url_search_params::initialize(std::string_view input) { auto equal = current.find('='); if (equal == std::string_view::npos) { - auto name = std::string(current); + std::string name(current); std::replace(name.begin(), name.end(), '+', ' '); params.emplace_back(unicode::percent_decode(name, name.find('%')), ""); } else { - auto name = std::string(current.substr(0, equal)); - auto value = std::string(current.substr(equal + 1)); + std::string name(current.substr(0, equal)); + std::string value(current.substr(equal + 1)); std::replace(name.begin(), name.end(), '+', ' '); std::replace(value.begin(), value.end(), '+', ' '); @@ -7021,7 +7047,7 @@ inline bool url_search_params::has(std::string_view key, return entry != params.end(); } -inline std::string url_search_params::to_string() { +inline std::string url_search_params::to_string() const { auto character_set = ada::character_sets::WWW_FORM_URLENCODED_PERCENT_ENCODE; std::string out{}; for (size_t i = 0; i < params.size(); i++) { @@ -7106,20 +7132,26 @@ inline bool url_search_params_iter::has_next() { template <> inline std::optional url_search_params_keys_iter::next() { - if (!has_next()) return std::nullopt; + if (!has_next()) { + return std::nullopt; + } return params.params[pos++].first; } template <> inline std::optional url_search_params_values_iter::next() { - if (!has_next()) return std::nullopt; + if (!has_next()) { + return std::nullopt; + } return params.params[pos++].second; } template <> inline std::optional url_search_params_entries_iter::next() { - if (!has_next()) return std::nullopt; + if (!has_next()) { + return std::nullopt; + } return params.params[pos++]; } @@ -7137,14 +7169,14 @@ url_search_params_entries_iter::next() { #ifndef ADA_ADA_VERSION_H #define ADA_ADA_VERSION_H -#define ADA_VERSION "2.7.7" +#define ADA_VERSION "2.7.8" namespace ada { enum { ADA_VERSION_MAJOR = 2, ADA_VERSION_MINOR = 7, - ADA_VERSION_REVISION = 7, + ADA_VERSION_REVISION = 8, }; } // namespace ada From 755399db9d9ab247bb8b3e668ae3152be226fa8e Mon Sep 17 00:00:00 2001 From: "Node.js GitHub Bot" Date: Tue, 19 Mar 2024 12:14:39 +0200 Subject: [PATCH 34/41] deps: update zlib to 1.3.0.1-motley-24342f6 PR-URL: https://github.com/nodejs/node/pull/52123 Reviewed-By: Luigi Pinca Reviewed-By: Marco Ippolito Reviewed-By: Michael Dawson --- deps/zlib/BUILD.gn | 5 - deps/zlib/CMakeLists.txt | 76 ++- deps/zlib/DIR_METADATA | 3 + deps/zlib/contrib/minizip/Makefile | 12 +- deps/zlib/contrib/minizip/README.chromium | 15 +- deps/zlib/contrib/minizip/crypt.h | 29 +- deps/zlib/contrib/minizip/ioapi.c | 78 ++- deps/zlib/contrib/minizip/ioapi.h | 46 +- deps/zlib/contrib/minizip/iowin32.c | 66 +-- deps/zlib/contrib/minizip/iowin32.h | 8 +- deps/zlib/contrib/minizip/miniunz.c | 84 +-- deps/zlib/contrib/minizip/minizip.c | 85 ++- deps/zlib/contrib/minizip/mztools.c | 8 +- deps/zlib/contrib/minizip/unzip.c | 509 +++++++----------- deps/zlib/contrib/minizip/unzip.h | 148 ++--- deps/zlib/contrib/minizip/zip.c | 352 +++++------- deps/zlib/contrib/minizip/zip.h | 310 +++++------ .../contrib/tests/fuzzers/deflate_fuzzer.cc | 90 +++- deps/zlib/contrib/tests/utils_unittest.cc | 65 +++ deps/zlib/crc_folding.c | 2 +- deps/zlib/deflate.c | 20 +- deps/zlib/google/zip_internal.cc | 13 +- deps/zlib/google/zip_reader_unittest.cc | 6 +- deps/zlib/patches/0000-build.patch | 44 +- deps/zlib/patches/0001-simd.patch | 2 +- deps/zlib/patches/0004-fix-uwp.patch | 13 +- .../0008-minizip-zip-unzip-tools.patch | 55 +- ...14-minizip-unzip-with-incorrect-size.patch | 34 ++ ...0015-minizip-unzip-enable-decryption.patch | 39 ++ ...nizip-parse-unicode-path-extra-field.patch | 117 ++++ deps/zlib/zutil.h | 23 +- 31 files changed, 1208 insertions(+), 1149 deletions(-) create mode 100644 deps/zlib/patches/0014-minizip-unzip-with-incorrect-size.patch create mode 100644 deps/zlib/patches/0015-minizip-unzip-enable-decryption.patch create mode 100644 deps/zlib/patches/0016-minizip-parse-unicode-path-extra-field.patch diff --git a/deps/zlib/BUILD.gn b/deps/zlib/BUILD.gn index 46627bca7eb158..7fff5762e81b6a 100644 --- a/deps/zlib/BUILD.gn +++ b/deps/zlib/BUILD.gn @@ -274,7 +274,6 @@ source_set("zlib_slide_hash_simd") { config("zlib_warnings") { if (is_clang) { cflags = [ - "-Wno-deprecated-non-prototype", "-Wno-incompatible-pointer-types", "-Wunused-variable", ] @@ -380,7 +379,6 @@ config("minizip_warnings") { cflags = [ # zlib uses `if ((a == b))` for some reason. "-Wno-parentheses-equality", - "-Wno-deprecated-non-prototype", ] } } @@ -452,8 +450,6 @@ if (!is_win || target_os != "winuwp") { if (is_clang) { cflags = [ "-Wno-incompatible-pointer-types-discards-qualifiers", - - "-Wno-deprecated-non-prototype", ] } @@ -476,7 +472,6 @@ if (!is_win || target_os != "winuwp") { if (is_clang) { cflags = [ "-Wno-incompatible-pointer-types-discards-qualifiers", - "-Wno-deprecated-non-prototype", ] } diff --git a/deps/zlib/CMakeLists.txt b/deps/zlib/CMakeLists.txt index 5541985f474c64..8389cdd6c38faa 100644 --- a/deps/zlib/CMakeLists.txt +++ b/deps/zlib/CMakeLists.txt @@ -25,32 +25,55 @@ option(ENABLE_SIMD_OPTIMIZATIONS "Enable all SIMD optimizations" OFF) option(ENABLE_SIMD_AVX512 "Enable SIMD AXV512 optimizations" OFF) option(USE_ZLIB_RABIN_KARP_HASH "Enable bitstream compatibility with canonical zlib" OFF) option(BUILD_UNITTESTS "Enable standalone unit tests build" OFF) +option(BUILD_MINIZIP_BIN "Enable building minzip_bin tool" OFF) if (USE_ZLIB_RABIN_KARP_HASH) add_definitions(-DUSE_ZLIB_RABIN_KARP_ROLLING_HASH) endif() -# TODO(cavalcantii): add support for other OSes (e.g. Android, fuchsia, osx) -# and architectures (e.g. Arm). +# TODO(cavalcantii): add support for other OSes (e.g. Android, Fuchsia, etc) +# and architectures (e.g. RISCV). if (ENABLE_SIMD_OPTIMIZATIONS) - add_definitions(-DINFLATE_CHUNK_SIMD_SSE2) - add_definitions(-DADLER32_SIMD_SSSE3) - add_definitions(-DINFLATE_CHUNK_READ_64LE) - add_definitions(-DCRC32_SIMD_SSE42_PCLMUL) - if (ENABLE_SIMD_AVX512) - add_definitions(-DCRC32_SIMD_AVX512_PCLMUL) - add_compile_options(-mvpclmulqdq -msse2 -mavx512f -mpclmul) - else() - add_compile_options(-msse4.2 -mpclmul) - endif() - add_definitions(-DDEFLATE_SLIDE_HASH_SSE2) - # Required by CPU features detection code. - add_definitions(-DX86_NOT_WINDOWS) - # Apparently some environments (e.g. CentOS) require to explicitly link - # with pthread and that is required by the CPU features detection code. - find_package (Threads REQUIRED) - SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -pthread") - SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -pthread") + # Apparently some environments (e.g. CentOS) require to explicitly link + # with pthread and that is required by the CPU features detection code. + find_package (Threads REQUIRED) + SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -pthread") + SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -pthread") + + if (CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64") + add_definitions(-DINFLATE_CHUNK_SIMD_SSE2) + add_definitions(-DADLER32_SIMD_SSSE3) + add_definitions(-DINFLATE_CHUNK_READ_64LE) + add_definitions(-DCRC32_SIMD_SSE42_PCLMUL) + if (ENABLE_SIMD_AVX512) + add_definitions(-DCRC32_SIMD_AVX512_PCLMUL) + add_compile_options(-mvpclmulqdq -msse2 -mavx512f -mpclmul) + else() + add_compile_options(-msse4.2 -mpclmul) + endif() + add_definitions(-DDEFLATE_SLIDE_HASH_SSE2) + # Required by CPU features detection code. + add_definitions(-DX86_NOT_WINDOWS) + endif() + + if ((CMAKE_SYSTEM_PROCESSOR STREQUAL "arm64") OR + (CMAKE_SYSTEM_PROCESSOR STREQUAL "aarch64")) + add_definitions(-DINFLATE_CHUNK_SIMD_NEON) + add_definitions(-DADLER32_SIMD_NEON) + add_definitions(-DINFLATE_CHUNK_READ_64LE) + add_definitions(-DCRC32_ARMV8_CRC32) + add_definitions(-DDEFLATE_SLIDE_HASH_NEON) + # Required by CPU features detection code. + if (APPLE) + add_definitions(-DARMV8_OS_MACOS) + endif() + + if (UNIX AND NOT APPLE) + add_definitions(-DARMV8_OS_LINUX) + endif() + + SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -march=armv8-a+crc+crypto") + endif() endif() # @@ -300,8 +323,11 @@ endif() #============================================================================ # Minigzip tool #============================================================================ -add_executable(minizip_bin contrib/minizip/minizip.c contrib/minizip/ioapi.c -contrib/minizip/ioapi.h contrib/minizip/unzip.c -contrib/minizip/unzip.h contrib/minizip/zip.c contrib/minizip/zip.h -) -target_link_libraries(minizip_bin zlib) +# TODO(cavalcantii): get it working on Windows. +if (BUILD_MINIZIP_BIN) + add_executable(minizip_bin contrib/minizip/minizip.c contrib/minizip/ioapi.c + contrib/minizip/ioapi.h contrib/minizip/unzip.c + contrib/minizip/unzip.h contrib/minizip/zip.c contrib/minizip/zip.h + ) + target_link_libraries(minizip_bin zlib) +endif() diff --git a/deps/zlib/DIR_METADATA b/deps/zlib/DIR_METADATA index d366dc732137cd..45f7798a6860a5 100644 --- a/deps/zlib/DIR_METADATA +++ b/deps/zlib/DIR_METADATA @@ -1,3 +1,6 @@ monorail: { component: "Internals" } +buganizer_public: { + component_id: 1456292 +} diff --git a/deps/zlib/contrib/minizip/Makefile b/deps/zlib/contrib/minizip/Makefile index 84eaad20d4fb19..aac76e07f6b999 100644 --- a/deps/zlib/contrib/minizip/Makefile +++ b/deps/zlib/contrib/minizip/Makefile @@ -1,5 +1,5 @@ CC=cc -CFLAGS=-O -I../.. +CFLAGS := $(CFLAGS) -O -I../.. UNZ_OBJS = miniunz.o unzip.o ioapi.o ../../libz.a ZIP_OBJS = minizip.o zip.o ioapi.o ../../libz.a @@ -16,10 +16,14 @@ minizip: $(ZIP_OBJS) $(CC) $(CFLAGS) -o $@ $(ZIP_OBJS) test: miniunz minizip - ./minizip test readme.txt + @rm -f test.* + @echo hello hello hello > test.txt + ./minizip test test.txt ./miniunz -l test.zip - mv readme.txt readme.old + @mv test.txt test.old ./miniunz test.zip + @cmp test.txt test.old + @rm -f test.* clean: - /bin/rm -f *.o *~ minizip miniunz + /bin/rm -f *.o *~ minizip miniunz test.* diff --git a/deps/zlib/contrib/minizip/README.chromium b/deps/zlib/contrib/minizip/README.chromium index b13de237b5387a..b5895f2a5181b0 100644 --- a/deps/zlib/contrib/minizip/README.chromium +++ b/deps/zlib/contrib/minizip/README.chromium @@ -1,7 +1,7 @@ Name: ZIP file API for reading file entries in a ZIP archive Short Name: minizip URL: https://github.com/madler/zlib/tree/master/contrib/minizip -Version: 1.2.12 +Version: 1.3.0.1 License: Zlib License File: //third_party/zlib/LICENSE Security Critical: yes @@ -13,9 +13,16 @@ Minizip provides API on top of zlib that can enumerate and extract ZIP archive files. See minizip.md for chromium build instructions. Local Modifications: +- Fixed uncompressing files with wrong uncompressed size set + crrev.com/268940 + 0014-minizip-unzip-with-incorrect-size.patch + +- Enable traditional PKWARE decryption in zlib/contrib/minizip + Correct the value of rest_read_compressed when decompressing an encrypted + zip. (crrev.com/580862) + 0015-minizip-unzip-enable-decryption.patch + - Add parsing of the 'Info-ZIP Unicode Path Extra Field' as described in https://pkware.cachefly.net/webdocs/casestudies/APPNOTE.TXT section 4.6.9. (see crrev.com/1002476) - -- Check for overly long filename, comment, or extra field in - zipOpenNewFileInZip4_64 (crbug.com/1470539). + 0016-minizip-parse-unicode-path-extra-field.patch diff --git a/deps/zlib/contrib/minizip/crypt.h b/deps/zlib/contrib/minizip/crypt.h index 1e9e8200b201ff..f4b93b78dc31cc 100644 --- a/deps/zlib/contrib/minizip/crypt.h +++ b/deps/zlib/contrib/minizip/crypt.h @@ -32,12 +32,12 @@ /*********************************************************************** * Return the next byte in the pseudo-random sequence */ -static int decrypt_byte(unsigned long* pkeys, const z_crc_t* pcrc_32_tab) -{ +static int decrypt_byte(unsigned long* pkeys, const z_crc_t* pcrc_32_tab) { unsigned temp; /* POTENTIAL BUG: temp*(temp^1) may overflow in an * unpredictable manner on 16-bit systems; not a problem * with any known compiler so far, though */ + (void)pcrc_32_tab; temp = ((unsigned)(*(pkeys+2)) & 0xffff) | 2; return (int)(((temp * (temp ^ 1)) >> 8) & 0xff); } @@ -45,8 +45,7 @@ static int decrypt_byte(unsigned long* pkeys, const z_crc_t* pcrc_32_tab) /*********************************************************************** * Update the encryption keys with the next byte of plain text */ -static int update_keys(unsigned long* pkeys,const z_crc_t* pcrc_32_tab,int c) -{ +static int update_keys(unsigned long* pkeys, const z_crc_t* pcrc_32_tab, int c) { (*(pkeys+0)) = CRC32((*(pkeys+0)), c); (*(pkeys+1)) += (*(pkeys+0)) & 0xff; (*(pkeys+1)) = (*(pkeys+1)) * 134775813L + 1; @@ -62,8 +61,7 @@ static int update_keys(unsigned long* pkeys,const z_crc_t* pcrc_32_tab,int c) * Initialize the encryption keys and the random header according to * the given password. */ -static void init_keys(const char* passwd,unsigned long* pkeys,const z_crc_t* pcrc_32_tab) -{ +static void init_keys(const char* passwd, unsigned long* pkeys, const z_crc_t* pcrc_32_tab) { *(pkeys+0) = 305419896L; *(pkeys+1) = 591751049L; *(pkeys+2) = 878082192L; @@ -77,24 +75,23 @@ static void init_keys(const char* passwd,unsigned long* pkeys,const z_crc_t* pcr (update_keys(pkeys,pcrc_32_tab,c ^= decrypt_byte(pkeys,pcrc_32_tab))) #define zencode(pkeys,pcrc_32_tab,c,t) \ - (t=decrypt_byte(pkeys,pcrc_32_tab), update_keys(pkeys,pcrc_32_tab,c), t^(c)) + (t=decrypt_byte(pkeys,pcrc_32_tab), update_keys(pkeys,pcrc_32_tab,c), (Byte)t^(c)) #ifdef INCLUDECRYPTINGCODE_IFCRYPTALLOWED #define RAND_HEAD_LEN 12 /* "last resort" source for second part of crypt seed pattern */ # ifndef ZCR_SEED2 -# define ZCR_SEED2 3141592654UL /* use PI as default pattern */ +# define ZCR_SEED2 3141592654UL /* use PI as default pattern */ # endif -static int crypthead(const char* passwd, /* password string */ - unsigned char* buf, /* where to write header */ - int bufSize, - unsigned long* pkeys, - const z_crc_t* pcrc_32_tab, - unsigned long crcForCrypting) -{ - int n; /* index in random header */ +static unsigned crypthead(const char* passwd, /* password string */ + unsigned char* buf, /* where to write header */ + int bufSize, + unsigned long* pkeys, + const z_crc_t* pcrc_32_tab, + unsigned long crcForCrypting) { + unsigned n; /* index in random header */ int t; /* temporary */ int c; /* random byte */ unsigned char header[RAND_HEAD_LEN-2]; /* random header */ diff --git a/deps/zlib/contrib/minizip/ioapi.c b/deps/zlib/contrib/minizip/ioapi.c index 543910b5e118a4..a38881dca90a23 100644 --- a/deps/zlib/contrib/minizip/ioapi.c +++ b/deps/zlib/contrib/minizip/ioapi.c @@ -14,7 +14,7 @@ #define _CRT_SECURE_NO_WARNINGS #endif -#if defined(__APPLE__) || defined(__Fuchsia__) || defined(IOAPI_NO_64) +#if defined(__APPLE__) || defined(__Fuchsia__) || defined(IOAPI_NO_64) || defined(__HAIKU__) || defined(MINIZIP_FOPEN_NO_64) // In darwin and perhaps other BSD variants off_t is a 64 bit value, hence no need for specific 64 bit functions #define FOPEN_FUNC(filename, mode) fopen(filename, mode) #define FTELLO_FUNC(stream) ftello(stream) @@ -28,8 +28,7 @@ #include "ioapi.h" -voidpf call_zopen64 (const zlib_filefunc64_32_def* pfilefunc,const void*filename,int mode) -{ +voidpf call_zopen64 (const zlib_filefunc64_32_def* pfilefunc, const void*filename, int mode) { if (pfilefunc->zfile_func64.zopen64_file != NULL) return (*(pfilefunc->zfile_func64.zopen64_file)) (pfilefunc->zfile_func64.opaque,filename,mode); else @@ -38,8 +37,7 @@ voidpf call_zopen64 (const zlib_filefunc64_32_def* pfilefunc,const void*filename } } -long call_zseek64 (const zlib_filefunc64_32_def* pfilefunc,voidpf filestream, ZPOS64_T offset, int origin) -{ +long call_zseek64 (const zlib_filefunc64_32_def* pfilefunc,voidpf filestream, ZPOS64_T offset, int origin) { if (pfilefunc->zfile_func64.zseek64_file != NULL) return (*(pfilefunc->zfile_func64.zseek64_file)) (pfilefunc->zfile_func64.opaque,filestream,offset,origin); else @@ -52,13 +50,12 @@ long call_zseek64 (const zlib_filefunc64_32_def* pfilefunc,voidpf filestream, ZP } } -ZPOS64_T call_ztell64 (const zlib_filefunc64_32_def* pfilefunc,voidpf filestream) -{ +ZPOS64_T call_ztell64 (const zlib_filefunc64_32_def* pfilefunc, voidpf filestream) { if (pfilefunc->zfile_func64.zseek64_file != NULL) return (*(pfilefunc->zfile_func64.ztell64_file)) (pfilefunc->zfile_func64.opaque,filestream); else { - uLong tell_uLong = (*(pfilefunc->ztell32_file))(pfilefunc->zfile_func64.opaque,filestream); + uLong tell_uLong = (uLong)(*(pfilefunc->ztell32_file))(pfilefunc->zfile_func64.opaque,filestream); if ((tell_uLong) == MAXU32) return (ZPOS64_T)-1; else @@ -66,11 +63,9 @@ ZPOS64_T call_ztell64 (const zlib_filefunc64_32_def* pfilefunc,voidpf filestream } } -void fill_zlib_filefunc64_32_def_from_filefunc32(zlib_filefunc64_32_def* p_filefunc64_32,const zlib_filefunc_def* p_filefunc32) -{ +void fill_zlib_filefunc64_32_def_from_filefunc32(zlib_filefunc64_32_def* p_filefunc64_32, const zlib_filefunc_def* p_filefunc32) { p_filefunc64_32->zfile_func64.zopen64_file = NULL; p_filefunc64_32->zopen32_file = p_filefunc32->zopen_file; - p_filefunc64_32->zfile_func64.zerror_file = p_filefunc32->zerror_file; p_filefunc64_32->zfile_func64.zread_file = p_filefunc32->zread_file; p_filefunc64_32->zfile_func64.zwrite_file = p_filefunc32->zwrite_file; p_filefunc64_32->zfile_func64.ztell64_file = NULL; @@ -84,18 +79,10 @@ void fill_zlib_filefunc64_32_def_from_filefunc32(zlib_filefunc64_32_def* p_filef -static voidpf ZCALLBACK fopen_file_func OF((voidpf opaque, const char* filename, int mode)); -static uLong ZCALLBACK fread_file_func OF((voidpf opaque, voidpf stream, void* buf, uLong size)); -static uLong ZCALLBACK fwrite_file_func OF((voidpf opaque, voidpf stream, const void* buf,uLong size)); -static ZPOS64_T ZCALLBACK ftell64_file_func OF((voidpf opaque, voidpf stream)); -static long ZCALLBACK fseek64_file_func OF((voidpf opaque, voidpf stream, ZPOS64_T offset, int origin)); -static int ZCALLBACK fclose_file_func OF((voidpf opaque, voidpf stream)); -static int ZCALLBACK ferror_file_func OF((voidpf opaque, voidpf stream)); - -static voidpf ZCALLBACK fopen_file_func (voidpf opaque, const char* filename, int mode) -{ +static voidpf ZCALLBACK fopen_file_func(voidpf opaque, const char* filename, int mode) { FILE* file = NULL; const char* mode_fopen = NULL; + (void)opaque; if ((mode & ZLIB_FILEFUNC_MODE_READWRITEFILTER)==ZLIB_FILEFUNC_MODE_READ) mode_fopen = "rb"; else @@ -110,10 +97,10 @@ static voidpf ZCALLBACK fopen_file_func (voidpf opaque, const char* filename, in return file; } -static voidpf ZCALLBACK fopen64_file_func (voidpf opaque, const void* filename, int mode) -{ +static voidpf ZCALLBACK fopen64_file_func(voidpf opaque, const void* filename, int mode) { FILE* file = NULL; const char* mode_fopen = NULL; + (void)opaque; if ((mode & ZLIB_FILEFUNC_MODE_READWRITEFILTER)==ZLIB_FILEFUNC_MODE_READ) mode_fopen = "rb"; else @@ -129,39 +116,39 @@ static voidpf ZCALLBACK fopen64_file_func (voidpf opaque, const void* filename, } -static uLong ZCALLBACK fread_file_func (voidpf opaque, voidpf stream, void* buf, uLong size) -{ +static uLong ZCALLBACK fread_file_func(voidpf opaque, voidpf stream, void* buf, uLong size) { uLong ret; + (void)opaque; ret = (uLong)fread(buf, 1, (size_t)size, (FILE *)stream); return ret; } -static uLong ZCALLBACK fwrite_file_func (voidpf opaque, voidpf stream, const void* buf, uLong size) -{ +static uLong ZCALLBACK fwrite_file_func(voidpf opaque, voidpf stream, const void* buf, uLong size) { uLong ret; + (void)opaque; ret = (uLong)fwrite(buf, 1, (size_t)size, (FILE *)stream); return ret; } -static long ZCALLBACK ftell_file_func (voidpf opaque, voidpf stream) -{ +static long ZCALLBACK ftell_file_func(voidpf opaque, voidpf stream) { long ret; + (void)opaque; ret = ftell((FILE *)stream); return ret; } -static ZPOS64_T ZCALLBACK ftell64_file_func (voidpf opaque, voidpf stream) -{ +static ZPOS64_T ZCALLBACK ftell64_file_func(voidpf opaque, voidpf stream) { ZPOS64_T ret; - ret = FTELLO_FUNC((FILE *)stream); + (void)opaque; + ret = (ZPOS64_T)FTELLO_FUNC((FILE *)stream); return ret; } -static long ZCALLBACK fseek_file_func (voidpf opaque, voidpf stream, uLong offset, int origin) -{ +static long ZCALLBACK fseek_file_func(voidpf opaque, voidpf stream, uLong offset, int origin) { int fseek_origin=0; long ret; + (void)opaque; switch (origin) { case ZLIB_FILEFUNC_SEEK_CUR : @@ -176,15 +163,15 @@ static long ZCALLBACK fseek_file_func (voidpf opaque, voidpf stream, uLong offs default: return -1; } ret = 0; - if (fseek((FILE *)stream, offset, fseek_origin) != 0) + if (fseek((FILE *)stream, (long)offset, fseek_origin) != 0) ret = -1; return ret; } -static long ZCALLBACK fseek64_file_func (voidpf opaque, voidpf stream, ZPOS64_T offset, int origin) -{ +static long ZCALLBACK fseek64_file_func(voidpf opaque, voidpf stream, ZPOS64_T offset, int origin) { int fseek_origin=0; long ret; + (void)opaque; switch (origin) { case ZLIB_FILEFUNC_SEEK_CUR : @@ -200,30 +187,28 @@ static long ZCALLBACK fseek64_file_func (voidpf opaque, voidpf stream, ZPOS64_T } ret = 0; - if(FSEEKO_FUNC((FILE *)stream, offset, fseek_origin) != 0) + if(FSEEKO_FUNC((FILE *)stream, (z_off64_t)offset, fseek_origin) != 0) ret = -1; return ret; } -static int ZCALLBACK fclose_file_func (voidpf opaque, voidpf stream) -{ +static int ZCALLBACK fclose_file_func(voidpf opaque, voidpf stream) { int ret; + (void)opaque; ret = fclose((FILE *)stream); return ret; } -static int ZCALLBACK ferror_file_func (voidpf opaque, voidpf stream) -{ +static int ZCALLBACK ferror_file_func(voidpf opaque, voidpf stream) { int ret; + (void)opaque; ret = ferror((FILE *)stream); return ret; } -void fill_fopen_filefunc (pzlib_filefunc_def) - zlib_filefunc_def* pzlib_filefunc_def; -{ +void fill_fopen_filefunc(zlib_filefunc_def* pzlib_filefunc_def) { pzlib_filefunc_def->zopen_file = fopen_file_func; pzlib_filefunc_def->zread_file = fread_file_func; pzlib_filefunc_def->zwrite_file = fwrite_file_func; @@ -234,8 +219,7 @@ void fill_fopen_filefunc (pzlib_filefunc_def) pzlib_filefunc_def->opaque = NULL; } -void fill_fopen64_filefunc (zlib_filefunc64_def* pzlib_filefunc_def) -{ +void fill_fopen64_filefunc(zlib_filefunc64_def* pzlib_filefunc_def) { pzlib_filefunc_def->zopen64_file = fopen64_file_func; pzlib_filefunc_def->zread_file = fread_file_func; pzlib_filefunc_def->zwrite_file = fwrite_file_func; diff --git a/deps/zlib/contrib/minizip/ioapi.h b/deps/zlib/contrib/minizip/ioapi.h index 8dcbdb06e35ad5..a2d2e6e60d9250 100644 --- a/deps/zlib/contrib/minizip/ioapi.h +++ b/deps/zlib/contrib/minizip/ioapi.h @@ -50,7 +50,7 @@ #define ftello64 ftell #define fseeko64 fseek #else -#ifdef __FreeBSD__ +#if defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__NetBSD__) || defined(__HAIKU__) || defined(MINIZIP_FOPEN_NO_64) #define fopen64 fopen #define ftello64 ftello #define fseeko64 fseeko @@ -82,7 +82,7 @@ #include "mz64conf.h" #endif -/* a type choosen by DEFINE */ +/* a type chosen by DEFINE */ #ifdef HAVE_64BIT_INT_CUSTOM typedef 64BIT_INT_CUSTOM_TYPE ZPOS64_T; #else @@ -91,8 +91,7 @@ typedef 64BIT_INT_CUSTOM_TYPE ZPOS64_T; typedef uint64_t ZPOS64_T; #else -/* Maximum unsigned 32-bit value used as placeholder for zip64 */ -#define MAXU32 0xffffffff + #if defined(_MSC_VER) || defined(__BORLANDC__) typedef unsigned __int64 ZPOS64_T; @@ -102,7 +101,10 @@ typedef unsigned long long int ZPOS64_T; #endif #endif - +/* Maximum unsigned 32-bit value used as placeholder for zip64 */ +#ifndef MAXU32 +#define MAXU32 (0xffffffff) +#endif #ifdef __cplusplus extern "C" { @@ -132,17 +134,17 @@ extern "C" { -typedef voidpf (ZCALLBACK *open_file_func) OF((voidpf opaque, const char* filename, int mode)); -typedef uLong (ZCALLBACK *read_file_func) OF((voidpf opaque, voidpf stream, void* buf, uLong size)); -typedef uLong (ZCALLBACK *write_file_func) OF((voidpf opaque, voidpf stream, const void* buf, uLong size)); -typedef int (ZCALLBACK *close_file_func) OF((voidpf opaque, voidpf stream)); -typedef int (ZCALLBACK *testerror_file_func) OF((voidpf opaque, voidpf stream)); +typedef voidpf (ZCALLBACK *open_file_func) (voidpf opaque, const char* filename, int mode); +typedef uLong (ZCALLBACK *read_file_func) (voidpf opaque, voidpf stream, void* buf, uLong size); +typedef uLong (ZCALLBACK *write_file_func) (voidpf opaque, voidpf stream, const void* buf, uLong size); +typedef int (ZCALLBACK *close_file_func) (voidpf opaque, voidpf stream); +typedef int (ZCALLBACK *testerror_file_func) (voidpf opaque, voidpf stream); -typedef long (ZCALLBACK *tell_file_func) OF((voidpf opaque, voidpf stream)); -typedef long (ZCALLBACK *seek_file_func) OF((voidpf opaque, voidpf stream, uLong offset, int origin)); +typedef long (ZCALLBACK *tell_file_func) (voidpf opaque, voidpf stream); +typedef long (ZCALLBACK *seek_file_func) (voidpf opaque, voidpf stream, uLong offset, int origin); -/* here is the "old" 32 bits structure structure */ +/* here is the "old" 32 bits structure */ typedef struct zlib_filefunc_def_s { open_file_func zopen_file; @@ -155,9 +157,9 @@ typedef struct zlib_filefunc_def_s voidpf opaque; } zlib_filefunc_def; -typedef ZPOS64_T (ZCALLBACK *tell64_file_func) OF((voidpf opaque, voidpf stream)); -typedef long (ZCALLBACK *seek64_file_func) OF((voidpf opaque, voidpf stream, ZPOS64_T offset, int origin)); -typedef voidpf (ZCALLBACK *open64_file_func) OF((voidpf opaque, const void* filename, int mode)); +typedef ZPOS64_T (ZCALLBACK *tell64_file_func) (voidpf opaque, voidpf stream); +typedef long (ZCALLBACK *seek64_file_func) (voidpf opaque, voidpf stream, ZPOS64_T offset, int origin); +typedef voidpf (ZCALLBACK *open64_file_func) (voidpf opaque, const void* filename, int mode); typedef struct zlib_filefunc64_def_s { @@ -171,8 +173,8 @@ typedef struct zlib_filefunc64_def_s voidpf opaque; } zlib_filefunc64_def; -void fill_fopen64_filefunc OF((zlib_filefunc64_def* pzlib_filefunc_def)); -void fill_fopen_filefunc OF((zlib_filefunc_def* pzlib_filefunc_def)); +void fill_fopen64_filefunc(zlib_filefunc64_def* pzlib_filefunc_def); +void fill_fopen_filefunc(zlib_filefunc_def* pzlib_filefunc_def); /* now internal definition, only for zip.c and unzip.h */ typedef struct zlib_filefunc64_32_def_s @@ -191,11 +193,11 @@ typedef struct zlib_filefunc64_32_def_s #define ZCLOSE64(filefunc,filestream) ((*((filefunc).zfile_func64.zclose_file)) ((filefunc).zfile_func64.opaque,filestream)) #define ZERROR64(filefunc,filestream) ((*((filefunc).zfile_func64.zerror_file)) ((filefunc).zfile_func64.opaque,filestream)) -voidpf call_zopen64 OF((const zlib_filefunc64_32_def* pfilefunc,const void*filename,int mode)); -long call_zseek64 OF((const zlib_filefunc64_32_def* pfilefunc,voidpf filestream, ZPOS64_T offset, int origin)); -ZPOS64_T call_ztell64 OF((const zlib_filefunc64_32_def* pfilefunc,voidpf filestream)); +voidpf call_zopen64(const zlib_filefunc64_32_def* pfilefunc,const void*filename,int mode); +long call_zseek64(const zlib_filefunc64_32_def* pfilefunc,voidpf filestream, ZPOS64_T offset, int origin); +ZPOS64_T call_ztell64(const zlib_filefunc64_32_def* pfilefunc,voidpf filestream); -void fill_zlib_filefunc64_32_def_from_filefunc32(zlib_filefunc64_32_def* p_filefunc64_32,const zlib_filefunc_def* p_filefunc32); +void fill_zlib_filefunc64_32_def_from_filefunc32(zlib_filefunc64_32_def* p_filefunc64_32,const zlib_filefunc_def* p_filefunc32); #define ZOPEN64(filefunc,filename,mode) (call_zopen64((&(filefunc)),(filename),(mode))) #define ZTELL64(filefunc,filestream) (call_ztell64((&(filefunc)),(filestream))) diff --git a/deps/zlib/contrib/minizip/iowin32.c b/deps/zlib/contrib/minizip/iowin32.c index c6bc314b3c28af..3f6867fd7e40b5 100644 --- a/deps/zlib/contrib/minizip/iowin32.c +++ b/deps/zlib/contrib/minizip/iowin32.c @@ -25,7 +25,6 @@ #define INVALID_SET_FILE_POINTER ((DWORD)-1) #endif - #ifdef _WIN32_WINNT #undef _WIN32_WINNT #define _WIN32_WINNT 0x601 @@ -38,14 +37,6 @@ #endif #endif -voidpf ZCALLBACK win32_open_file_func OF((voidpf opaque, const char* filename, int mode)); -uLong ZCALLBACK win32_read_file_func OF((voidpf opaque, voidpf stream, void* buf, uLong size)); -uLong ZCALLBACK win32_write_file_func OF((voidpf opaque, voidpf stream, const void* buf, uLong size)); -ZPOS64_T ZCALLBACK win32_tell64_file_func OF((voidpf opaque, voidpf stream)); -long ZCALLBACK win32_seek64_file_func OF((voidpf opaque, voidpf stream, ZPOS64_T offset, int origin)); -int ZCALLBACK win32_close_file_func OF((voidpf opaque, voidpf stream)); -int ZCALLBACK win32_error_file_func OF((voidpf opaque, voidpf stream)); - typedef struct { HANDLE hf; @@ -57,8 +48,7 @@ static void win32_translate_open_mode(int mode, DWORD* lpdwDesiredAccess, DWORD* lpdwCreationDisposition, DWORD* lpdwShareMode, - DWORD* lpdwFlagsAndAttributes) -{ + DWORD* lpdwFlagsAndAttributes) { *lpdwDesiredAccess = *lpdwShareMode = *lpdwFlagsAndAttributes = *lpdwCreationDisposition = 0; if ((mode & ZLIB_FILEFUNC_MODE_READWRITEFILTER)==ZLIB_FILEFUNC_MODE_READ) @@ -79,8 +69,7 @@ static void win32_translate_open_mode(int mode, } } -static voidpf win32_build_iowin(HANDLE hFile) -{ +static voidpf win32_build_iowin(HANDLE hFile) { voidpf ret=NULL; if ((hFile != NULL) && (hFile != INVALID_HANDLE_VALUE)) @@ -98,8 +87,7 @@ static voidpf win32_build_iowin(HANDLE hFile) return ret; } -voidpf ZCALLBACK win32_open64_file_func (voidpf opaque,const void* filename,int mode) -{ +voidpf ZCALLBACK win32_open64_file_func(voidpf opaque, const void* filename, int mode) { const char* mode_fopen = NULL; DWORD dwDesiredAccess,dwCreationDisposition,dwShareMode,dwFlagsAndAttributes ; HANDLE hFile = NULL; @@ -127,8 +115,7 @@ voidpf ZCALLBACK win32_open64_file_func (voidpf opaque,const void* filename,int } -voidpf ZCALLBACK win32_open64_file_funcA (voidpf opaque,const void* filename,int mode) -{ +voidpf ZCALLBACK win32_open64_file_funcA(voidpf opaque, const void* filename, int mode) { const char* mode_fopen = NULL; DWORD dwDesiredAccess,dwCreationDisposition,dwShareMode,dwFlagsAndAttributes ; HANDLE hFile = NULL; @@ -151,8 +138,7 @@ voidpf ZCALLBACK win32_open64_file_funcA (voidpf opaque,const void* filename,int } -voidpf ZCALLBACK win32_open64_file_funcW (voidpf opaque,const void* filename,int mode) -{ +voidpf ZCALLBACK win32_open64_file_funcW(voidpf opaque, const void* filename, int mode) { const char* mode_fopen = NULL; DWORD dwDesiredAccess,dwCreationDisposition,dwShareMode,dwFlagsAndAttributes ; HANDLE hFile = NULL; @@ -171,8 +157,7 @@ voidpf ZCALLBACK win32_open64_file_funcW (voidpf opaque,const void* filename,int } -voidpf ZCALLBACK win32_open_file_func (voidpf opaque,const char* filename,int mode) -{ +voidpf ZCALLBACK win32_open_file_func(voidpf opaque, const char* filename, int mode) { const char* mode_fopen = NULL; DWORD dwDesiredAccess,dwCreationDisposition,dwShareMode,dwFlagsAndAttributes ; HANDLE hFile = NULL; @@ -200,8 +185,7 @@ voidpf ZCALLBACK win32_open_file_func (voidpf opaque,const char* filename,int mo } -uLong ZCALLBACK win32_read_file_func (voidpf opaque, voidpf stream, void* buf,uLong size) -{ +uLong ZCALLBACK win32_read_file_func(voidpf opaque, voidpf stream, void* buf,uLong size) { uLong ret=0; HANDLE hFile = NULL; if (stream!=NULL) @@ -222,8 +206,7 @@ uLong ZCALLBACK win32_read_file_func (voidpf opaque, voidpf stream, void* buf,uL } -uLong ZCALLBACK win32_write_file_func (voidpf opaque,voidpf stream,const void* buf,uLong size) -{ +uLong ZCALLBACK win32_write_file_func(voidpf opaque, voidpf stream, const void* buf, uLong size) { uLong ret=0; HANDLE hFile = NULL; if (stream!=NULL) @@ -243,8 +226,7 @@ uLong ZCALLBACK win32_write_file_func (voidpf opaque,voidpf stream,const void* b return ret; } -static BOOL MySetFilePointerEx(HANDLE hFile, LARGE_INTEGER pos, LARGE_INTEGER *newPos, DWORD dwMoveMethod) -{ +static BOOL MySetFilePointerEx(HANDLE hFile, LARGE_INTEGER pos, LARGE_INTEGER *newPos, DWORD dwMoveMethod) { #ifdef IOWIN32_USING_WINRT_API return SetFilePointerEx(hFile, pos, newPos, dwMoveMethod); #else @@ -263,8 +245,7 @@ static BOOL MySetFilePointerEx(HANDLE hFile, LARGE_INTEGER pos, LARGE_INTEGER *n #endif } -long ZCALLBACK win32_tell_file_func (voidpf opaque,voidpf stream) -{ +long ZCALLBACK win32_tell_file_func(voidpf opaque, voidpf stream) { long ret=-1; HANDLE hFile = NULL; if (stream!=NULL) @@ -286,8 +267,7 @@ long ZCALLBACK win32_tell_file_func (voidpf opaque,voidpf stream) return ret; } -ZPOS64_T ZCALLBACK win32_tell64_file_func (voidpf opaque, voidpf stream) -{ +ZPOS64_T ZCALLBACK win32_tell64_file_func(voidpf opaque, voidpf stream) { ZPOS64_T ret= (ZPOS64_T)-1; HANDLE hFile = NULL; if (stream!=NULL) @@ -311,8 +291,7 @@ ZPOS64_T ZCALLBACK win32_tell64_file_func (voidpf opaque, voidpf stream) } -long ZCALLBACK win32_seek_file_func (voidpf opaque,voidpf stream,uLong offset,int origin) -{ +long ZCALLBACK win32_seek_file_func(voidpf opaque, voidpf stream, uLong offset, int origin) { DWORD dwMoveMethod=0xFFFFFFFF; HANDLE hFile = NULL; @@ -349,8 +328,7 @@ long ZCALLBACK win32_seek_file_func (voidpf opaque,voidpf stream,uLong offset,in return ret; } -long ZCALLBACK win32_seek64_file_func (voidpf opaque, voidpf stream,ZPOS64_T offset,int origin) -{ +long ZCALLBACK win32_seek64_file_func(voidpf opaque, voidpf stream, ZPOS64_T offset, int origin) { DWORD dwMoveMethod=0xFFFFFFFF; HANDLE hFile = NULL; long ret=-1; @@ -388,8 +366,7 @@ long ZCALLBACK win32_seek64_file_func (voidpf opaque, voidpf stream,ZPOS64_T off return ret; } -int ZCALLBACK win32_close_file_func (voidpf opaque, voidpf stream) -{ +int ZCALLBACK win32_close_file_func(voidpf opaque, voidpf stream) { int ret=-1; if (stream!=NULL) @@ -406,8 +383,7 @@ int ZCALLBACK win32_close_file_func (voidpf opaque, voidpf stream) return ret; } -int ZCALLBACK win32_error_file_func (voidpf opaque,voidpf stream) -{ +int ZCALLBACK win32_error_file_func(voidpf opaque, voidpf stream) { int ret=-1; if (stream!=NULL) { @@ -416,8 +392,7 @@ int ZCALLBACK win32_error_file_func (voidpf opaque,voidpf stream) return ret; } -void fill_win32_filefunc (zlib_filefunc_def* pzlib_filefunc_def) -{ +void fill_win32_filefunc(zlib_filefunc_def* pzlib_filefunc_def) { pzlib_filefunc_def->zopen_file = win32_open_file_func; pzlib_filefunc_def->zread_file = win32_read_file_func; pzlib_filefunc_def->zwrite_file = win32_write_file_func; @@ -428,8 +403,7 @@ void fill_win32_filefunc (zlib_filefunc_def* pzlib_filefunc_def) pzlib_filefunc_def->opaque = NULL; } -void fill_win32_filefunc64(zlib_filefunc64_def* pzlib_filefunc_def) -{ +void fill_win32_filefunc64(zlib_filefunc64_def* pzlib_filefunc_def) { pzlib_filefunc_def->zopen64_file = win32_open64_file_func; pzlib_filefunc_def->zread_file = win32_read_file_func; pzlib_filefunc_def->zwrite_file = win32_write_file_func; @@ -441,8 +415,7 @@ void fill_win32_filefunc64(zlib_filefunc64_def* pzlib_filefunc_def) } -void fill_win32_filefunc64A(zlib_filefunc64_def* pzlib_filefunc_def) -{ +void fill_win32_filefunc64A(zlib_filefunc64_def* pzlib_filefunc_def) { pzlib_filefunc_def->zopen64_file = win32_open64_file_funcA; pzlib_filefunc_def->zread_file = win32_read_file_func; pzlib_filefunc_def->zwrite_file = win32_write_file_func; @@ -454,8 +427,7 @@ void fill_win32_filefunc64A(zlib_filefunc64_def* pzlib_filefunc_def) } -void fill_win32_filefunc64W(zlib_filefunc64_def* pzlib_filefunc_def) -{ +void fill_win32_filefunc64W(zlib_filefunc64_def* pzlib_filefunc_def) { pzlib_filefunc_def->zopen64_file = win32_open64_file_funcW; pzlib_filefunc_def->zread_file = win32_read_file_func; pzlib_filefunc_def->zwrite_file = win32_write_file_func; diff --git a/deps/zlib/contrib/minizip/iowin32.h b/deps/zlib/contrib/minizip/iowin32.h index 0ca0969a7d09a4..a23a65d4331d93 100644 --- a/deps/zlib/contrib/minizip/iowin32.h +++ b/deps/zlib/contrib/minizip/iowin32.h @@ -18,10 +18,10 @@ extern "C" { #endif -void fill_win32_filefunc OF((zlib_filefunc_def* pzlib_filefunc_def)); -void fill_win32_filefunc64 OF((zlib_filefunc64_def* pzlib_filefunc_def)); -void fill_win32_filefunc64A OF((zlib_filefunc64_def* pzlib_filefunc_def)); -void fill_win32_filefunc64W OF((zlib_filefunc64_def* pzlib_filefunc_def)); +void fill_win32_filefunc(zlib_filefunc_def* pzlib_filefunc_def); +void fill_win32_filefunc64(zlib_filefunc64_def* pzlib_filefunc_def); +void fill_win32_filefunc64A(zlib_filefunc64_def* pzlib_filefunc_def); +void fill_win32_filefunc64W(zlib_filefunc64_def* pzlib_filefunc_def); #ifdef __cplusplus } diff --git a/deps/zlib/contrib/minizip/miniunz.c b/deps/zlib/contrib/minizip/miniunz.c index 08737f689a96f3..5b4312e5647cd2 100644 --- a/deps/zlib/contrib/minizip/miniunz.c +++ b/deps/zlib/contrib/minizip/miniunz.c @@ -27,7 +27,7 @@ #endif #endif -#if defined(__APPLE__) || defined(__Fuchsia__) || defined(__ANDROID_API__) +#if defined(__APPLE__) || defined(__HAIKU__) || defined(MINIZIP_FOPEN_NO_64) || defined(__Fuchsia__) || defined(__ANDROID_API__) // In darwin and perhaps other BSD variants off_t is a 64 bit value, hence no need for specific 64 bit functions #define FOPEN_FUNC(filename, mode) fopen(filename, mode) #define FTELLO_FUNC(stream) ftello(stream) @@ -81,11 +81,7 @@ filename : the filename of the file where date/time must be modified dosdate : the new date at the MSDos format (4 bytes) tmu_date : the SAME new date at the tm_unz format */ -void change_file_date(filename,dosdate,tmu_date) - const char *filename; - uLong dosdate; - tm_unz tmu_date; -{ +static void change_file_date(const char *filename, uLong dosdate, tm_unz tmu_date) { #ifdef _WIN32 HANDLE hFile; FILETIME ftm,ftLocal,ftCreate,ftLastAcc,ftLastWrite; @@ -99,6 +95,7 @@ void change_file_date(filename,dosdate,tmu_date) CloseHandle(hFile); #else #if defined(unix) || defined(__APPLE__) || defined(__Fuchsia__) || defined(__ANDROID_API__) + (void)dosdate; struct utimbuf ut; struct tm newdate; newdate.tm_sec = tmu_date.tm_sec; @@ -114,6 +111,10 @@ void change_file_date(filename,dosdate,tmu_date) ut.actime=ut.modtime=mktime(&newdate); utime(filename,&ut); +#else + (void)filename; + (void)dosdate; + (void)tmu_date; #endif #endif } @@ -122,26 +123,24 @@ void change_file_date(filename,dosdate,tmu_date) /* mymkdir and change_file_date are not 100 % portable As I don't know well Unix, I wait feedback for the unix portion */ -int mymkdir(dirname) - const char* dirname; -{ +static int mymkdir(const char* dirname) { int ret=0; #if defined(_WIN32) ret = _mkdir(dirname); #elif defined(unix) || defined(__APPLE__) || defined(__Fuchsia__) || defined(__ANDROID_API__) ret = mkdir (dirname,0775); +#else + (void)dirname; #endif return ret; } -int makedir (newdir) - char *newdir; -{ +static int makedir(const char *newdir) { char *buffer ; char *p; - int len = (int)strlen(newdir); + size_t len = strlen(newdir); - if (len <= 0) + if (len == 0) return 0; buffer = (char*)malloc(len+1); @@ -184,14 +183,12 @@ int makedir (newdir) return 1; } -void do_banner() -{ - printf("MiniUnz 1.01b, demo of zLib + Unz package written by Gilles Vollant\n"); +static void do_banner(void) { + printf("MiniUnz 1.1, demo of zLib + Unz package written by Gilles Vollant\n"); printf("more info at http://www.winimage.com/zLibDll/unzip.html\n\n"); } -void do_help() -{ +static void do_help(void) { printf("Usage : miniunz [-e] [-x] [-v] [-l] [-o] [-p password] file.zip [file_to_extr.] [-d extractdir]\n\n" \ " -e Extract without pathname (junk paths)\n" \ " -x Extract with pathname\n" \ @@ -199,11 +196,10 @@ void do_help() " -l list files\n" \ " -d directory to extract into\n" \ " -o overwrite files without prompting\n" \ - " -p extract crypted file using password\n\n"); + " -p extract encrypted file using password\n\n"); } -void Display64BitsSize(ZPOS64_T n, int size_char) -{ +static void Display64BitsSize(ZPOS64_T n, int size_char) { /* to avoid compatibility problem , we do here the conversion */ char number[21]; int offset=19; @@ -230,9 +226,7 @@ void Display64BitsSize(ZPOS64_T n, int size_char) printf("%s",&number[pos_string]); } -int do_list(uf) - unzFile uf; -{ +static int do_list(unzFile uf) { uLong i; unz_global_info64 gi; int err; @@ -247,7 +241,7 @@ int do_list(uf) char filename_inzip[256]; unz_file_info64 file_info; uLong ratio=0; - const char *string_method; + const char *string_method = ""; char charCrypt=' '; err = unzGetCurrentFileInfo64(uf,&file_info,filename_inzip,sizeof(filename_inzip),NULL,0,NULL,0); if (err!=UNZ_OK) @@ -258,7 +252,7 @@ int do_list(uf) if (file_info.uncompressed_size>0) ratio = (uLong)((file_info.compressed_size*100)/file_info.uncompressed_size); - /* display a '*' if the file is crypted */ + /* display a '*' if the file is encrypted */ if ((file_info.flag & 1) != 0) charCrypt='*'; @@ -308,12 +302,7 @@ int do_list(uf) } -int do_extract_currentfile(uf,popt_extract_without_path,popt_overwrite,password) - unzFile uf; - const int* popt_extract_without_path; - int* popt_overwrite; - const char* password; -{ +static int do_extract_currentfile(unzFile uf, const int* popt_extract_without_path, int* popt_overwrite, const char* password) { char filename_inzip[256]; char* filename_withoutpath; char* p; @@ -323,7 +312,6 @@ int do_extract_currentfile(uf,popt_extract_without_path,popt_overwrite,password) uInt size_buf; unz_file_info64 file_info; - uLong ratio=0; err = unzGetCurrentFileInfo64(uf,&file_info,filename_inzip,sizeof(filename_inzip),NULL,0,NULL,0); if (err!=UNZ_OK) @@ -438,7 +426,7 @@ int do_extract_currentfile(uf,popt_extract_without_path,popt_overwrite,password) break; } if (err>0) - if (fwrite(buf,err,1,fout)!=1) + if (fwrite(buf,(unsigned)err,1,fout)!=1) { printf("error in writing extracted file\n"); err=UNZ_ERRNO; @@ -471,16 +459,10 @@ int do_extract_currentfile(uf,popt_extract_without_path,popt_overwrite,password) } -int do_extract(uf,opt_extract_without_path,opt_overwrite,password) - unzFile uf; - int opt_extract_without_path; - int opt_overwrite; - const char* password; -{ +static int do_extract(unzFile uf, int opt_extract_without_path, int opt_overwrite, const char* password) { uLong i; unz_global_info64 gi; int err; - FILE* fout=NULL; err = unzGetGlobalInfo64(uf,&gi); if (err!=UNZ_OK) @@ -507,14 +489,7 @@ int do_extract(uf,opt_extract_without_path,opt_overwrite,password) return 0; } -int do_extract_onefile(uf,filename,opt_extract_without_path,opt_overwrite,password) - unzFile uf; - const char* filename; - int opt_extract_without_path; - int opt_overwrite; - const char* password; -{ - int err = UNZ_OK; +static int do_extract_onefile(unzFile uf, const char* filename, int opt_extract_without_path, int opt_overwrite, const char* password) { if (unzLocateFile(uf,filename,CASESENSITIVITY)!=UNZ_OK) { printf("file %s not found in the zipfile\n",filename); @@ -530,10 +505,7 @@ int do_extract_onefile(uf,filename,opt_extract_without_path,opt_overwrite,passwo } -int main(argc,argv) - int argc; - char *argv[]; -{ +int main(int argc, char *argv[]) { const char *zipfilename=NULL; const char *filename_to_extract=NULL; const char *password=NULL; @@ -564,7 +536,7 @@ int main(argc,argv) while ((*p)!='\0') { - char c=*(p++);; + char c=*(p++); if ((c=='l') || (c=='L')) opt_do_list = 1; if ((c=='v') || (c=='V')) @@ -606,7 +578,7 @@ int main(argc,argv) # endif strncpy(filename_try, zipfilename,MAXFILENAME-1); - /* strncpy doesnt append the trailing NULL, of the string is too long. */ + /* strncpy doesn't append the trailing NULL, of the string is too long. */ filename_try[ MAXFILENAME ] = '\0'; # ifdef USEWIN32IOAPI diff --git a/deps/zlib/contrib/minizip/minizip.c b/deps/zlib/contrib/minizip/minizip.c index b794953c5c2393..9eb3956a55e00f 100644 --- a/deps/zlib/contrib/minizip/minizip.c +++ b/deps/zlib/contrib/minizip/minizip.c @@ -27,7 +27,7 @@ #endif #endif -#if defined(__APPLE__) || defined(__Fuchsia__) || defined(__ANDROID_API__) +#if defined(__APPLE__) || defined(__HAIKU__) || defined(MINIZIP_FOPEN_NO_64) || defined(__Fuchsia__) || defined(__ANDROID_API__) // In darwin and perhaps other BSD variants off_t is a 64 bit value, hence no need for specific 64 bit functions #define FOPEN_FUNC(filename, mode) fopen(filename, mode) #define FTELLO_FUNC(stream) ftello(stream) @@ -70,11 +70,9 @@ #define MAXFILENAME (256) #ifdef _WIN32 -uLong filetime(f, tmzip, dt) - char *f; /* name of file to get info on */ - tm_zip *tmzip; /* return value: access, modific. and creation times */ - uLong *dt; /* dostime */ -{ +/* f: name of file to get info on, tmzip: return value: access, + modification and creation times, dt: dostime */ +static int filetime(const char *f, tm_zip *tmzip, uLong *dt) { int ret = 0; { FILETIME ftLocal; @@ -94,11 +92,10 @@ uLong filetime(f, tmzip, dt) } #else #if defined(unix) || defined(__APPLE__) || defined(__Fuchsia__) || defined(__ANDROID_API__) -uLong filetime(f, tmzip, dt) - char *f; /* name of file to get info on */ - tm_zip *tmzip; /* return value: access, modific. and creation times */ - uLong *dt; /* dostime */ -{ +/* f: name of file to get info on, tmzip: return value: access, + modification and creation times, dt: dostime */ +static int filetime(const char *f, tm_zip *tmzip, uLong *dt) { + (void)dt; int ret=0; struct stat s; /* results of stat() */ struct tm* filedate; @@ -107,12 +104,12 @@ uLong filetime(f, tmzip, dt) if (strcmp(f,"-")!=0) { char name[MAXFILENAME+1]; - int len = strlen(f); + size_t len = strlen(f); if (len > MAXFILENAME) len = MAXFILENAME; strncpy(name, f,MAXFILENAME-1); - /* strncpy doesnt append the trailing NULL, of the string is too long. */ + /* strncpy doesn't append the trailing NULL, of the string is too long. */ name[ MAXFILENAME ] = '\0'; if (name[len - 1] == '/') @@ -136,11 +133,12 @@ uLong filetime(f, tmzip, dt) return ret; } #else -uLong filetime(f, tmzip, dt) - char *f; /* name of file to get info on */ - tm_zip *tmzip; /* return value: access, modific. and creation times */ - uLong *dt; /* dostime */ -{ +/* f: name of file to get info on, tmzip: return value: access, + modification and creation times, dt: dostime */ +static int filetime(const char *f, tm_zip *tmzip, uLong *dt) { + (void)f; + (void)tmzip; + (void)dt; return 0; } #endif @@ -149,9 +147,7 @@ uLong filetime(f, tmzip, dt) -int check_exist_file(filename) - const char* filename; -{ +static int check_exist_file(const char* filename) { FILE* ftestexist; int ret = 1; ftestexist = FOPEN_FUNC(filename,"rb"); @@ -162,14 +158,12 @@ int check_exist_file(filename) return ret; } -void do_banner() -{ +static void do_banner(void) { printf("MiniZip 1.1, demo of zLib + MiniZip64 package, written by Gilles Vollant\n"); printf("more info on MiniZip at http://www.winimage.com/zLibDll/minizip.html\n\n"); } -void do_help() -{ +static void do_help(void) { printf("Usage : minizip [-o] [-a] [-0 to -9] [-p password] [-j] file.zip [files_to_add]\n\n" \ " -o Overwrite existing file.zip\n" \ " -a Append to existing file.zip\n" \ @@ -181,14 +175,13 @@ void do_help() /* calculate the CRC32 of a file, because to encrypt a file, we need known the CRC32 of the file before */ -int getFileCrc(const char* filenameinzip,void*buf,unsigned long size_buf,unsigned long* result_crc) -{ +static int getFileCrc(const char* filenameinzip, void* buf, unsigned long size_buf, unsigned long* result_crc) { unsigned long calculate_crc=0; int err=ZIP_OK; FILE * fin = FOPEN_FUNC(filenameinzip,"rb"); unsigned long size_read = 0; - unsigned long total_read = 0; + /* unsigned long total_read = 0; */ if (fin==NULL) { err = ZIP_ERRNO; @@ -198,7 +191,7 @@ int getFileCrc(const char* filenameinzip,void*buf,unsigned long size_buf,unsigne do { err = ZIP_OK; - size_read = (int)fread(buf,1,size_buf,fin); + size_read = fread(buf,1,size_buf,fin); if (size_read < size_buf) if (feof(fin)==0) { @@ -207,8 +200,8 @@ int getFileCrc(const char* filenameinzip,void*buf,unsigned long size_buf,unsigne } if (size_read>0) - calculate_crc = crc32(calculate_crc,buf,size_read); - total_read += size_read; + calculate_crc = crc32_z(calculate_crc,buf,size_read); + /* total_read += size_read; */ } while ((err == ZIP_OK) && (size_read>0)); @@ -220,18 +213,17 @@ int getFileCrc(const char* filenameinzip,void*buf,unsigned long size_buf,unsigne return err; } -int isLargeFile(const char* filename) -{ +static int isLargeFile(const char* filename) { int largeFile = 0; ZPOS64_T pos = 0; FILE* pFile = FOPEN_FUNC(filename, "rb"); if(pFile != NULL) { - int n = FSEEKO_FUNC(pFile, 0, SEEK_END); - pos = FTELLO_FUNC(pFile); + FSEEKO_FUNC(pFile, 0, SEEK_END); + pos = (ZPOS64_T)FTELLO_FUNC(pFile); - printf("File : %s is %lld bytes\n", filename, pos); + printf("File : %s is %llu bytes\n", filename, pos); if(pos >= 0xffffffff) largeFile = 1; @@ -242,10 +234,7 @@ int isLargeFile(const char* filename) return largeFile; } -int main(argc,argv) - int argc; - char *argv[]; -{ +int main(int argc, char *argv[]) { int i; int opt_overwrite=0; int opt_compress_level=Z_DEFAULT_COMPRESSION; @@ -254,7 +243,7 @@ int main(argc,argv) char filename_try[MAXFILENAME+16]; int zipok; int err=0; - int size_buf=0; + size_t size_buf=0; void* buf=NULL; const char* password=NULL; @@ -275,7 +264,7 @@ int main(argc,argv) while ((*p)!='\0') { - char c=*(p++);; + char c=*(p++); if ((c=='o') || (c=='O')) opt_overwrite = 1; if ((c=='a') || (c=='A')) @@ -321,7 +310,7 @@ int main(argc,argv) zipok = 1 ; strncpy(filename_try, argv[zipfilenamearg],MAXFILENAME-1); - /* strncpy doesnt append the trailing NULL, of the string is too long. */ + /* strncpy doesn't append the trailing NULL, of the string is too long. */ filename_try[ MAXFILENAME ] = '\0'; len=(int)strlen(filename_try); @@ -391,11 +380,11 @@ int main(argc,argv) ((argv[i][1]=='o') || (argv[i][1]=='O') || (argv[i][1]=='a') || (argv[i][1]=='A') || (argv[i][1]=='p') || (argv[i][1]=='P') || - ((argv[i][1]>='0') || (argv[i][1]<='9'))) && + ((argv[i][1]>='0') && (argv[i][1]<='9'))) && (strlen(argv[i]) == 2))) { - FILE * fin; - int size_read; + FILE * fin = NULL; + size_t size_read; const char* filenameinzip = argv[i]; const char *savefilenameinzip; zip_fileinfo zi; @@ -471,7 +460,7 @@ int main(argc,argv) do { err = ZIP_OK; - size_read = (int)fread(buf,1,size_buf,fin); + size_read = fread(buf,1,size_buf,fin); if (size_read < size_buf) if (feof(fin)==0) { @@ -481,7 +470,7 @@ int main(argc,argv) if (size_read>0) { - err = zipWriteInFileInZip (zf,buf,size_read); + err = zipWriteInFileInZip (zf,buf,(unsigned)size_read); if (err<0) { printf("error in writing %s in the zipfile\n", diff --git a/deps/zlib/contrib/minizip/mztools.c b/deps/zlib/contrib/minizip/mztools.c index 96891c2e0b71ef..c8d23756155731 100644 --- a/deps/zlib/contrib/minizip/mztools.c +++ b/deps/zlib/contrib/minizip/mztools.c @@ -27,13 +27,7 @@ WRITE_16((unsigned char*)(buff) + 2, (n) >> 16); \ } while(0) -extern int ZEXPORT unzRepair(file, fileOut, fileOutTmp, nRecovered, bytesRecovered) -const char* file; -const char* fileOut; -const char* fileOutTmp; -uLong* nRecovered; -uLong* bytesRecovered; -{ +extern int ZEXPORT unzRepair(const char* file, const char* fileOut, const char* fileOutTmp, uLong* nRecovered, uLong* bytesRecovered) { int err = Z_OK; FILE* fpZip = fopen(file, "rb"); FILE* fpOut = fopen(fileOut, "wb"); diff --git a/deps/zlib/contrib/minizip/unzip.c b/deps/zlib/contrib/minizip/unzip.c index 4973a4eea97b99..3576a8504dde0c 100644 --- a/deps/zlib/contrib/minizip/unzip.c +++ b/deps/zlib/contrib/minizip/unzip.c @@ -49,12 +49,12 @@ Copyright (C) 2007-2008 Even Rouault - Oct-2009 - Mathias Svensson - Removed cpl_* from symbol names (Even Rouault added them but since this is now moved to a new project (minizip64) I renamed them again). + Oct-2009 - Mathias Svensson - Removed cpl_* from symbol names (Even Rouault added them but since this is now moved to a new project (minizip64) I renamed them again). Oct-2009 - Mathias Svensson - Fixed problem if uncompressed size was > 4G and compressed size was <4G should only read the compressed/uncompressed size from the Zip64 format if the size from normal header was 0xFFFFFFFF - Oct-2009 - Mathias Svensson - Applied some bug fixes from paches recived from Gilles Vollant - Oct-2009 - Mathias Svensson - Applied support to unzip files with compression mathod BZIP2 (bzip2 lib is required) + Oct-2009 - Mathias Svensson - Applied some bug fixes from patches received from Gilles Vollant + Oct-2009 - Mathias Svensson - Applied support to unzip files with compression method BZIP2 (bzip2 lib is required) Patch created by Daniel Borca Jan-2010 - back to unzip and minizip 1.0 name scheme, with compatibility layer @@ -73,8 +73,6 @@ #ifdef STDC # include -# include -# include #endif #ifdef NO_ERRNO_H extern int errno; @@ -107,9 +105,6 @@ #ifndef ALLOC # define ALLOC(size) (malloc(size)) #endif -#ifndef TRYFREE -# define TRYFREE(p) {if (p) free(p);} -#endif #define SIZECENTRALDIRITEM (0x2e) #define SIZEZIPLOCALHEADER (0x1e) @@ -149,7 +144,7 @@ typedef struct ZPOS64_T rest_read_compressed; /* number of byte to be decompressed */ ZPOS64_T rest_read_uncompressed;/*number of byte to be obtained after decomp*/ zlib_filefunc64_32_def z_filefunc; - voidpf filestream; /* io structore of the zipfile */ + voidpf filestream; /* io structure of the zipfile */ uLong compression_method; /* compression method (0==store) */ ZPOS64_T byte_before_the_zipfile;/* byte before the zipfile, (>0 for sfx)*/ int raw; @@ -162,7 +157,7 @@ typedef struct { zlib_filefunc64_32_def z_filefunc; int is64bitOpenFunction; - voidpf filestream; /* io structore of the zipfile */ + voidpf filestream; /* io structure of the zipfile */ unz_global_info64 gi; /* public global information */ ZPOS64_T byte_before_the_zipfile;/* byte before the zipfile, (>0 for sfx)*/ ZPOS64_T num_file; /* number of the current file in the zipfile*/ @@ -193,20 +188,14 @@ typedef struct #include "crypt.h" #endif + /* =========================================================================== - Read a byte from a gz_stream; update next_in and avail_in. Return EOF - for end of file. - IN assertion: the stream s has been successfully opened for reading. + Reads a long in LSB order from the given gz_stream. Sets */ - -local int unz64local_getByte OF(( - const zlib_filefunc64_32_def* pzlib_filefunc_def, - voidpf filestream, - int *pi)); - -local int unz64local_getByte(const zlib_filefunc64_32_def* pzlib_filefunc_def, voidpf filestream, int *pi) -{ +local int unz64local_getByte(const zlib_filefunc64_32_def* pzlib_filefunc_def, + voidpf filestream, + int *pi) { unsigned char c; int err = (int)ZREAD64(*pzlib_filefunc_def,filestream,&c,1); if (err==1) @@ -216,6 +205,7 @@ local int unz64local_getByte(const zlib_filefunc64_32_def* pzlib_filefunc_def, v } else { + *pi = 0; if (ZERROR64(*pzlib_filefunc_def,filestream)) return UNZ_ERRNO; else @@ -223,127 +213,70 @@ local int unz64local_getByte(const zlib_filefunc64_32_def* pzlib_filefunc_def, v } } - -/* =========================================================================== - Reads a long in LSB order from the given gz_stream. Sets -*/ -local int unz64local_getShort OF(( - const zlib_filefunc64_32_def* pzlib_filefunc_def, - voidpf filestream, - uLong *pX)); - -local int unz64local_getShort (const zlib_filefunc64_32_def* pzlib_filefunc_def, - voidpf filestream, - uLong *pX) -{ - uLong x ; - int i = 0; - int err; - - err = unz64local_getByte(pzlib_filefunc_def,filestream,&i); - x = (uLong)i; - - if (err==UNZ_OK) - err = unz64local_getByte(pzlib_filefunc_def,filestream,&i); - x |= ((uLong)i)<<8; - - if (err==UNZ_OK) - *pX = x; +local int unz64local_getShort(const zlib_filefunc64_32_def* pzlib_filefunc_def, + voidpf filestream, + uLong *pX) { + unsigned char c[2]; + int err = (int)ZREAD64(*pzlib_filefunc_def,filestream,c,2); + if (err==2) + { + *pX = c[0] | ((uLong)c[1] << 8); + return UNZ_OK; + } else + { *pX = 0; - return err; + if (ZERROR64(*pzlib_filefunc_def,filestream)) + return UNZ_ERRNO; + else + return UNZ_EOF; + } } -local int unz64local_getLong OF(( - const zlib_filefunc64_32_def* pzlib_filefunc_def, - voidpf filestream, - uLong *pX)); - -local int unz64local_getLong (const zlib_filefunc64_32_def* pzlib_filefunc_def, - voidpf filestream, - uLong *pX) -{ - uLong x ; - int i = 0; - int err; - - err = unz64local_getByte(pzlib_filefunc_def,filestream,&i); - x = (uLong)i; - - if (err==UNZ_OK) - err = unz64local_getByte(pzlib_filefunc_def,filestream,&i); - x |= ((uLong)i)<<8; - - if (err==UNZ_OK) - err = unz64local_getByte(pzlib_filefunc_def,filestream,&i); - x |= ((uLong)i)<<16; - - if (err==UNZ_OK) - err = unz64local_getByte(pzlib_filefunc_def,filestream,&i); - x += ((uLong)i)<<24; - - if (err==UNZ_OK) - *pX = x; +local int unz64local_getLong(const zlib_filefunc64_32_def* pzlib_filefunc_def, + voidpf filestream, + uLong *pX) { + unsigned char c[4]; + int err = (int)ZREAD64(*pzlib_filefunc_def,filestream,c,4); + if (err==4) + { + *pX = c[0] | ((uLong)c[1] << 8) | ((uLong)c[2] << 16) | ((uLong)c[3] << 24); + return UNZ_OK; + } else + { *pX = 0; - return err; + if (ZERROR64(*pzlib_filefunc_def,filestream)) + return UNZ_ERRNO; + else + return UNZ_EOF; + } } -local int unz64local_getLong64 OF(( - const zlib_filefunc64_32_def* pzlib_filefunc_def, - voidpf filestream, - ZPOS64_T *pX)); - -local int unz64local_getLong64 (const zlib_filefunc64_32_def* pzlib_filefunc_def, - voidpf filestream, - ZPOS64_T *pX) -{ - ZPOS64_T x ; - int i = 0; - int err; - - err = unz64local_getByte(pzlib_filefunc_def,filestream,&i); - x = (ZPOS64_T)i; - - if (err==UNZ_OK) - err = unz64local_getByte(pzlib_filefunc_def,filestream,&i); - x |= ((ZPOS64_T)i)<<8; - - if (err==UNZ_OK) - err = unz64local_getByte(pzlib_filefunc_def,filestream,&i); - x |= ((ZPOS64_T)i)<<16; - - if (err==UNZ_OK) - err = unz64local_getByte(pzlib_filefunc_def,filestream,&i); - x |= ((ZPOS64_T)i)<<24; - - if (err==UNZ_OK) - err = unz64local_getByte(pzlib_filefunc_def,filestream,&i); - x |= ((ZPOS64_T)i)<<32; - - if (err==UNZ_OK) - err = unz64local_getByte(pzlib_filefunc_def,filestream,&i); - x |= ((ZPOS64_T)i)<<40; - - if (err==UNZ_OK) - err = unz64local_getByte(pzlib_filefunc_def,filestream,&i); - x |= ((ZPOS64_T)i)<<48; - - if (err==UNZ_OK) - err = unz64local_getByte(pzlib_filefunc_def,filestream,&i); - x |= ((ZPOS64_T)i)<<56; - - if (err==UNZ_OK) - *pX = x; +local int unz64local_getLong64(const zlib_filefunc64_32_def* pzlib_filefunc_def, + voidpf filestream, + ZPOS64_T *pX) { + unsigned char c[8]; + int err = (int)ZREAD64(*pzlib_filefunc_def,filestream,c,8); + if (err==8) + { + *pX = c[0] | ((ZPOS64_T)c[1] << 8) | ((ZPOS64_T)c[2] << 16) | ((ZPOS64_T)c[3] << 24) + | ((ZPOS64_T)c[4] << 32) | ((ZPOS64_T)c[5] << 40) | ((ZPOS64_T)c[6] << 48) | ((ZPOS64_T)c[7] << 56); + return UNZ_OK; + } else + { *pX = 0; - return err; + if (ZERROR64(*pzlib_filefunc_def,filestream)) + return UNZ_ERRNO; + else + return UNZ_EOF; + } } /* My own strcmpi / strcasecmp */ -local int strcmpcasenosensitive_internal (const char* fileName1, const char* fileName2) -{ +local int strcmpcasenosensitive_internal(const char* fileName1, const char* fileName2) { for (;;) { char c1=*(fileName1++); @@ -375,19 +308,17 @@ local int strcmpcasenosensitive_internal (const char* fileName1, const char* fil #endif /* - Compare two filename (fileName1,fileName2). - If iCaseSenisivity = 1, comparision is case sensitivity (like strcmp) - If iCaseSenisivity = 2, comparision is not case sensitivity (like strcmpi + Compare two filenames (fileName1,fileName2). + If iCaseSensitivity = 1, comparison is case sensitive (like strcmp) + If iCaseSensitivity = 2, comparison is not case sensitive (like strcmpi or strcasecmp) - If iCaseSenisivity = 0, case sensitivity is defaut of your operating system + If iCaseSensitivity = 0, case sensitivity is default of your operating system (like 1 on Unix, 2 on Windows) */ extern int ZEXPORT unzStringFileNameCompare (const char* fileName1, - const char* fileName2, - int iCaseSensitivity) - -{ + const char* fileName2, + int iCaseSensitivity) { if (iCaseSensitivity==0) iCaseSensitivity=CASESENSITIVITYDEFAULTVALUE; @@ -401,21 +332,24 @@ extern int ZEXPORT unzStringFileNameCompare (const char* fileName1, #define BUFREADCOMMENT (0x400) #endif +#ifndef CENTRALDIRINVALID +#define CENTRALDIRINVALID ((ZPOS64_T)(-1)) +#endif + + /* Locate the Central directory of a zipfile (at the end, just before the global comment) */ -local ZPOS64_T unz64local_SearchCentralDir OF((const zlib_filefunc64_32_def* pzlib_filefunc_def, voidpf filestream)); -local ZPOS64_T unz64local_SearchCentralDir(const zlib_filefunc64_32_def* pzlib_filefunc_def, voidpf filestream) -{ +local ZPOS64_T unz64local_SearchCentralDir(const zlib_filefunc64_32_def* pzlib_filefunc_def, voidpf filestream) { unsigned char* buf; ZPOS64_T uSizeFile; ZPOS64_T uBackRead; ZPOS64_T uMaxBack=0xffff; /* maximum size of global comment */ - ZPOS64_T uPosFound=0; + ZPOS64_T uPosFound=CENTRALDIRINVALID; if (ZSEEK64(*pzlib_filefunc_def,filestream,0,ZLIB_FILEFUNC_SEEK_END) != 0) - return 0; + return CENTRALDIRINVALID; uSizeFile = ZTELL64(*pzlib_filefunc_def,filestream); @@ -425,7 +359,7 @@ local ZPOS64_T unz64local_SearchCentralDir(const zlib_filefunc64_32_def* pzlib_f buf = (unsigned char*)ALLOC(BUFREADCOMMENT+4); if (buf==NULL) - return 0; + return CENTRALDIRINVALID; uBackRead = 4; while (uBackReadz_filefunc, s->filestream); - TRYFREE(s); + free(s); return UNZ_OK; } @@ -821,8 +744,7 @@ extern int ZEXPORT unzClose (unzFile file) Write info about the ZipFile in the *pglobal_info structure. No preparation of the structure is needed return UNZ_OK if there is no problem. */ -extern int ZEXPORT unzGetGlobalInfo64 (unzFile file, unz_global_info64* pglobal_info) -{ +extern int ZEXPORT unzGetGlobalInfo64(unzFile file, unz_global_info64* pglobal_info) { unz64_s* s; if (file==NULL) return UNZ_PARAMERROR; @@ -831,8 +753,7 @@ extern int ZEXPORT unzGetGlobalInfo64 (unzFile file, unz_global_info64* pglobal_ return UNZ_OK; } -extern int ZEXPORT unzGetGlobalInfo (unzFile file, unz_global_info* pglobal_info32) -{ +extern int ZEXPORT unzGetGlobalInfo(unzFile file, unz_global_info* pglobal_info32) { unz64_s* s; if (file==NULL) return UNZ_PARAMERROR; @@ -843,46 +764,33 @@ extern int ZEXPORT unzGetGlobalInfo (unzFile file, unz_global_info* pglobal_info return UNZ_OK; } /* - Translate date/time from Dos format to tm_unz (readable more easilty) + Translate date/time from Dos format to tm_unz (readable more easily) */ -local void unz64local_DosDateToTmuDate (ZPOS64_T ulDosDate, tm_unz* ptm) -{ +local void unz64local_DosDateToTmuDate(ZPOS64_T ulDosDate, tm_unz* ptm) { ZPOS64_T uDate; uDate = (ZPOS64_T)(ulDosDate>>16); - ptm->tm_mday = (uInt)(uDate&0x1f) ; - ptm->tm_mon = (uInt)((((uDate)&0x1E0)/0x20)-1) ; - ptm->tm_year = (uInt)(((uDate&0x0FE00)/0x0200)+1980) ; + ptm->tm_mday = (int)(uDate&0x1f) ; + ptm->tm_mon = (int)((((uDate)&0x1E0)/0x20)-1) ; + ptm->tm_year = (int)(((uDate&0x0FE00)/0x0200)+1980) ; - ptm->tm_hour = (uInt) ((ulDosDate &0xF800)/0x800); - ptm->tm_min = (uInt) ((ulDosDate&0x7E0)/0x20) ; - ptm->tm_sec = (uInt) (2*(ulDosDate&0x1f)) ; + ptm->tm_hour = (int) ((ulDosDate &0xF800)/0x800); + ptm->tm_min = (int) ((ulDosDate&0x7E0)/0x20) ; + ptm->tm_sec = (int) (2*(ulDosDate&0x1f)) ; } /* Get Info about the current file in the zipfile, with internal only info */ -local int unz64local_GetCurrentFileInfoInternal OF((unzFile file, - unz_file_info64 *pfile_info, - unz_file_info64_internal - *pfile_info_internal, - char *szFileName, - uLong fileNameBufferSize, - void *extraField, - uLong extraFieldBufferSize, - char *szComment, - uLong commentBufferSize)); - -local int unz64local_GetCurrentFileInfoInternal (unzFile file, - unz_file_info64 *pfile_info, - unz_file_info64_internal - *pfile_info_internal, - char *szFileName, - uLong fileNameBufferSize, - void *extraField, - uLong extraFieldBufferSize, - char *szComment, - uLong commentBufferSize) -{ +local int unz64local_GetCurrentFileInfoInternal(unzFile file, + unz_file_info64 *pfile_info, + unz_file_info64_internal + *pfile_info_internal, + char *szFileName, + uLong fileNameBufferSize, + void *extraField, + uLong extraFieldBufferSize, + char *szComment, + uLong commentBufferSize) { unz64_s* s; unz_file_info64 file_info; unz_file_info64_internal file_info_internal; @@ -989,7 +897,7 @@ local int unz64local_GetCurrentFileInfoInternal (unzFile file, if (lSeek!=0) { - if (ZSEEK64(s->z_filefunc, s->filestream,lSeek,ZLIB_FILEFUNC_SEEK_CUR)==0) + if (ZSEEK64(s->z_filefunc, s->filestream,(ZPOS64_T)lSeek,ZLIB_FILEFUNC_SEEK_CUR)==0) lSeek=0; else err=UNZ_ERRNO; @@ -1014,7 +922,7 @@ local int unz64local_GetCurrentFileInfoInternal (unzFile file, if (lSeek!=0) { - if (ZSEEK64(s->z_filefunc, s->filestream,lSeek,ZLIB_FILEFUNC_SEEK_CUR)==0) + if (ZSEEK64(s->z_filefunc, s->filestream,(ZPOS64_T)lSeek,ZLIB_FILEFUNC_SEEK_CUR)==0) lSeek=0; else err=UNZ_ERRNO; @@ -1023,19 +931,17 @@ local int unz64local_GetCurrentFileInfoInternal (unzFile file, while(acc < file_info.size_file_extra) { uLong headerId; - uLong dataSize; + uLong dataSize; if (unz64local_getShort(&s->z_filefunc, s->filestream,&headerId) != UNZ_OK) err=UNZ_ERRNO; if (unz64local_getShort(&s->z_filefunc, s->filestream,&dataSize) != UNZ_OK) err=UNZ_ERRNO; - + /* ZIP64 extra fields */ if (headerId == 0x0001) { - uLong uL; - if(file_info.uncompressed_size == MAXU32) { if (unz64local_getLong64(&s->z_filefunc, s->filestream,&file_info.uncompressed_size) != UNZ_OK) @@ -1055,10 +961,10 @@ local int unz64local_GetCurrentFileInfoInternal (unzFile file, err=UNZ_ERRNO; } - if(file_info.disk_num_start == MAXU32) + if(file_info.disk_num_start == 0xffff) { /* Disk Start Number */ - if (unz64local_getLong(&s->z_filefunc, s->filestream,&uL) != UNZ_OK) + if (unz64local_getLong(&s->z_filefunc, s->filestream,&file_info.disk_num_start) != UNZ_OK) err=UNZ_ERRNO; } @@ -1142,7 +1048,7 @@ local int unz64local_GetCurrentFileInfoInternal (unzFile file, if (lSeek!=0) { - if (ZSEEK64(s->z_filefunc, s->filestream,lSeek,ZLIB_FILEFUNC_SEEK_CUR)==0) + if (ZSEEK64(s->z_filefunc, s->filestream,(ZPOS64_T)lSeek,ZLIB_FILEFUNC_SEEK_CUR)==0) lSeek=0; else err=UNZ_ERRNO; @@ -1173,24 +1079,22 @@ local int unz64local_GetCurrentFileInfoInternal (unzFile file, No preparation of the structure is needed return UNZ_OK if there is no problem. */ -extern int ZEXPORT unzGetCurrentFileInfo64 (unzFile file, - unz_file_info64 * pfile_info, - char * szFileName, uLong fileNameBufferSize, - void *extraField, uLong extraFieldBufferSize, - char* szComment, uLong commentBufferSize) -{ +extern int ZEXPORT unzGetCurrentFileInfo64(unzFile file, + unz_file_info64 * pfile_info, + char * szFileName, uLong fileNameBufferSize, + void *extraField, uLong extraFieldBufferSize, + char* szComment, uLong commentBufferSize) { return unz64local_GetCurrentFileInfoInternal(file,pfile_info,NULL, - szFileName,fileNameBufferSize, - extraField,extraFieldBufferSize, - szComment,commentBufferSize); + szFileName,fileNameBufferSize, + extraField,extraFieldBufferSize, + szComment,commentBufferSize); } -extern int ZEXPORT unzGetCurrentFileInfo (unzFile file, - unz_file_info * pfile_info, - char * szFileName, uLong fileNameBufferSize, - void *extraField, uLong extraFieldBufferSize, - char* szComment, uLong commentBufferSize) -{ +extern int ZEXPORT unzGetCurrentFileInfo(unzFile file, + unz_file_info * pfile_info, + char * szFileName, uLong fileNameBufferSize, + void *extraField, uLong extraFieldBufferSize, + char* szComment, uLong commentBufferSize) { int err; unz_file_info64 file_info64; err = unz64local_GetCurrentFileInfoInternal(file,&file_info64,NULL, @@ -1214,7 +1118,7 @@ extern int ZEXPORT unzGetCurrentFileInfo (unzFile file, pfile_info->internal_fa = file_info64.internal_fa; pfile_info->external_fa = file_info64.external_fa; - pfile_info->tmu_date = file_info64.tmu_date, + pfile_info->tmu_date = file_info64.tmu_date; pfile_info->compressed_size = (uLong)file_info64.compressed_size; @@ -1227,8 +1131,7 @@ extern int ZEXPORT unzGetCurrentFileInfo (unzFile file, Set the current file of the zipfile to the first file. return UNZ_OK if there is no problem */ -extern int ZEXPORT unzGoToFirstFile (unzFile file) -{ +extern int ZEXPORT unzGoToFirstFile(unzFile file) { int err=UNZ_OK; unz64_s* s; if (file==NULL) @@ -1248,8 +1151,7 @@ extern int ZEXPORT unzGoToFirstFile (unzFile file) return UNZ_OK if there is no problem return UNZ_END_OF_LIST_OF_FILE if the actual file was the latest. */ -extern int ZEXPORT unzGoToNextFile (unzFile file) -{ +extern int ZEXPORT unzGoToNextFile(unzFile file) { unz64_s* s; int err; @@ -1281,8 +1183,7 @@ extern int ZEXPORT unzGoToNextFile (unzFile file) UNZ_OK if the file is found. It becomes the current file. UNZ_END_OF_LIST_OF_FILE if the file is not found */ -extern int ZEXPORT unzLocateFile (unzFile file, const char *szFileName, int iCaseSensitivity) -{ +extern int ZEXPORT unzLocateFile(unzFile file, const char *szFileName, int iCaseSensitivity) { unz64_s* s; int err; @@ -1357,8 +1258,7 @@ typedef struct unz_file_pos_s } unz_file_pos; */ -extern int ZEXPORT unzGetFilePos64(unzFile file, unz64_file_pos* file_pos) -{ +extern int ZEXPORT unzGetFilePos64(unzFile file, unz64_file_pos* file_pos) { unz64_s* s; if (file==NULL || file_pos==NULL) @@ -1373,10 +1273,7 @@ extern int ZEXPORT unzGetFilePos64(unzFile file, unz64_file_pos* file_pos) return UNZ_OK; } -extern int ZEXPORT unzGetFilePos( - unzFile file, - unz_file_pos* file_pos) -{ +extern int ZEXPORT unzGetFilePos(unzFile file, unz_file_pos* file_pos) { unz64_file_pos file_pos64; int err = unzGetFilePos64(file,&file_pos64); if (err==UNZ_OK) @@ -1387,8 +1284,7 @@ extern int ZEXPORT unzGetFilePos( return err; } -extern int ZEXPORT unzGoToFilePos64(unzFile file, const unz64_file_pos* file_pos) -{ +extern int ZEXPORT unzGoToFilePos64(unzFile file, const unz64_file_pos* file_pos) { unz64_s* s; int err; @@ -1409,10 +1305,7 @@ extern int ZEXPORT unzGoToFilePos64(unzFile file, const unz64_file_pos* file_pos return err; } -extern int ZEXPORT unzGoToFilePos( - unzFile file, - unz_file_pos* file_pos) -{ +extern int ZEXPORT unzGoToFilePos(unzFile file, unz_file_pos* file_pos) { unz64_file_pos file_pos64; if (file_pos == NULL) return UNZ_PARAMERROR; @@ -1434,10 +1327,9 @@ extern int ZEXPORT unzGoToFilePos( store in *piSizeVar the size of extra info in local header (filename and size of extra field data) */ -local int unz64local_CheckCurrentFileCoherencyHeader (unz64_s* s, uInt* piSizeVar, - ZPOS64_T * poffset_local_extrafield, - uInt * psize_local_extrafield) -{ +local int unz64local_CheckCurrentFileCoherencyHeader(unz64_s* s, uInt* piSizeVar, + ZPOS64_T * poffset_local_extrafield, + uInt * psize_local_extrafield) { uLong uMagic,uData,uFlags; uLong size_filename; uLong size_extra_field; @@ -1521,9 +1413,8 @@ local int unz64local_CheckCurrentFileCoherencyHeader (unz64_s* s, uInt* piSizeVa Open for reading data the current file in the zipfile. If there is no error and the file is opened, the return value is UNZ_OK. */ -extern int ZEXPORT unzOpenCurrentFile3 (unzFile file, int* method, - int* level, int raw, const char* password) -{ +extern int ZEXPORT unzOpenCurrentFile3(unzFile file, int* method, + int* level, int raw, const char* password) { int err=UNZ_OK; uInt iSizeVar; unz64_s* s; @@ -1561,7 +1452,7 @@ extern int ZEXPORT unzOpenCurrentFile3 (unzFile file, int* method, if (pfile_in_zip_read_info->read_buffer==NULL) { - TRYFREE(pfile_in_zip_read_info); + free(pfile_in_zip_read_info); return UNZ_INTERNALERROR; } @@ -1618,7 +1509,8 @@ extern int ZEXPORT unzOpenCurrentFile3 (unzFile file, int* method, pfile_in_zip_read_info->stream_initialised=Z_BZIP2ED; else { - TRYFREE(pfile_in_zip_read_info); + free(pfile_in_zip_read_info->read_buffer); + free(pfile_in_zip_read_info); return err; } #else @@ -1638,7 +1530,8 @@ extern int ZEXPORT unzOpenCurrentFile3 (unzFile file, int* method, pfile_in_zip_read_info->stream_initialised=Z_DEFLATED; else { - TRYFREE(pfile_in_zip_read_info); + free(pfile_in_zip_read_info->read_buffer); + free(pfile_in_zip_read_info); return err; } /* windowBits is passed < 0 to tell that there is no zlib header. @@ -1691,25 +1584,21 @@ extern int ZEXPORT unzOpenCurrentFile3 (unzFile file, int* method, return UNZ_OK; } -extern int ZEXPORT unzOpenCurrentFile (unzFile file) -{ +extern int ZEXPORT unzOpenCurrentFile(unzFile file) { return unzOpenCurrentFile3(file, NULL, NULL, 0, NULL); } -extern int ZEXPORT unzOpenCurrentFilePassword (unzFile file, const char* password) -{ +extern int ZEXPORT unzOpenCurrentFilePassword(unzFile file, const char* password) { return unzOpenCurrentFile3(file, NULL, NULL, 0, password); } -extern int ZEXPORT unzOpenCurrentFile2 (unzFile file, int* method, int* level, int raw) -{ +extern int ZEXPORT unzOpenCurrentFile2(unzFile file, int* method, int* level, int raw) { return unzOpenCurrentFile3(file, method, level, raw, NULL); } /** Addition for GDAL : START */ -extern ZPOS64_T ZEXPORT unzGetCurrentFileZStreamPos64( unzFile file) -{ +extern ZPOS64_T ZEXPORT unzGetCurrentFileZStreamPos64(unzFile file) { unz64_s* s; file_in_zip64_read_info_s* pfile_in_zip_read_info; s=(unz64_s*)file; @@ -1729,13 +1618,12 @@ extern ZPOS64_T ZEXPORT unzGetCurrentFileZStreamPos64( unzFile file) buf contain buffer where data must be copied len the size of buf. - return the number of byte copied if somes bytes are copied + return the number of byte copied if some bytes are copied return 0 if the end of file was reached return <0 with error code if there is an error (UNZ_ERRNO for IO error, or zLib error for uncompress error) */ -extern int ZEXPORT unzReadCurrentFile (unzFile file, voidp buf, unsigned len) -{ +extern int ZEXPORT unzReadCurrentFile(unzFile file, voidp buf, unsigned len) { int err=UNZ_OK; uInt iRead = 0; unz64_s* s; @@ -1815,7 +1703,7 @@ extern int ZEXPORT unzReadCurrentFile (unzFile file, voidp buf, unsigned len) if ((pfile_in_zip_read_info->stream.avail_in == 0) && (pfile_in_zip_read_info->rest_read_compressed == 0)) - return (iRead==0) ? UNZ_EOF : iRead; + return (iRead==0) ? UNZ_EOF : (int)iRead; if (pfile_in_zip_read_info->stream.avail_out < pfile_in_zip_read_info->stream.avail_in) @@ -1905,6 +1793,9 @@ extern int ZEXPORT unzReadCurrentFile (unzFile file, voidp buf, unsigned len) err = Z_DATA_ERROR; uTotalOutAfter = pfile_in_zip_read_info->stream.total_out; + /* Detect overflow, because z_stream.total_out is uLong (32 bits) */ + if (uTotalOutAftertotal_out_64 = pfile_in_zip_read_info->total_out_64 + uOutThis; @@ -1919,14 +1810,14 @@ extern int ZEXPORT unzReadCurrentFile (unzFile file, voidp buf, unsigned len) iRead += (uInt)(uTotalOutAfter - uTotalOutBefore); if (err==Z_STREAM_END) - return (iRead==0) ? UNZ_EOF : iRead; + return (iRead==0) ? UNZ_EOF : (int)iRead; if (err!=Z_OK) break; } } if (err==Z_OK) - return iRead; + return (int)iRead; return err; } @@ -1934,8 +1825,7 @@ extern int ZEXPORT unzReadCurrentFile (unzFile file, voidp buf, unsigned len) /* Give the current position in uncompressed data */ -extern z_off_t ZEXPORT unztell (unzFile file) -{ +extern z_off_t ZEXPORT unztell(unzFile file) { unz64_s* s; file_in_zip64_read_info_s* pfile_in_zip_read_info; if (file==NULL) @@ -1949,8 +1839,7 @@ extern z_off_t ZEXPORT unztell (unzFile file) return (z_off_t)pfile_in_zip_read_info->stream.total_out; } -extern ZPOS64_T ZEXPORT unztell64 (unzFile file) -{ +extern ZPOS64_T ZEXPORT unztell64(unzFile file) { unz64_s* s; file_in_zip64_read_info_s* pfile_in_zip_read_info; @@ -1969,8 +1858,7 @@ extern ZPOS64_T ZEXPORT unztell64 (unzFile file) /* return 1 if the end of file was reached, 0 elsewhere */ -extern int ZEXPORT unzeof (unzFile file) -{ +extern int ZEXPORT unzeof(unzFile file) { unz64_s* s; file_in_zip64_read_info_s* pfile_in_zip_read_info; if (file==NULL) @@ -2001,8 +1889,7 @@ more info in the local-header version than in the central-header) the return value is the number of bytes copied in buf, or (if <0) the error code */ -extern int ZEXPORT unzGetLocalExtrafield (unzFile file, voidp buf, unsigned len) -{ +extern int ZEXPORT unzGetLocalExtrafield(unzFile file, voidp buf, unsigned len) { unz64_s* s; file_in_zip64_read_info_s* pfile_in_zip_read_info; uInt read_now; @@ -2049,8 +1936,7 @@ extern int ZEXPORT unzGetLocalExtrafield (unzFile file, voidp buf, unsigned len) Close the file in zip opened with unzOpenCurrentFile Return UNZ_CRCERROR if all the file was read but the CRC is not good */ -extern int ZEXPORT unzCloseCurrentFile (unzFile file) -{ +extern int ZEXPORT unzCloseCurrentFile(unzFile file) { int err=UNZ_OK; unz64_s* s; @@ -2072,7 +1958,7 @@ extern int ZEXPORT unzCloseCurrentFile (unzFile file) } - TRYFREE(pfile_in_zip_read_info->read_buffer); + free(pfile_in_zip_read_info->read_buffer); pfile_in_zip_read_info->read_buffer = NULL; if (pfile_in_zip_read_info->stream_initialised == Z_DEFLATED) inflateEnd(&pfile_in_zip_read_info->stream); @@ -2083,7 +1969,7 @@ extern int ZEXPORT unzCloseCurrentFile (unzFile file) pfile_in_zip_read_info->stream_initialised = 0; - TRYFREE(pfile_in_zip_read_info); + free(pfile_in_zip_read_info); s->pfile_in_zip_read=NULL; @@ -2096,8 +1982,7 @@ extern int ZEXPORT unzCloseCurrentFile (unzFile file) uSizeBuf is the size of the szComment buffer. return the number of byte copied or an error code <0 */ -extern int ZEXPORT unzGetGlobalComment (unzFile file, char * szComment, uLong uSizeBuf) -{ +extern int ZEXPORT unzGetGlobalComment(unzFile file, char * szComment, uLong uSizeBuf) { unz64_s* s; uLong uReadThis ; if (file==NULL) @@ -2124,8 +2009,7 @@ extern int ZEXPORT unzGetGlobalComment (unzFile file, char * szComment, uLong uS } /* Additions by RX '2004 */ -extern ZPOS64_T ZEXPORT unzGetOffset64(unzFile file) -{ +extern ZPOS64_T ZEXPORT unzGetOffset64(unzFile file) { unz64_s* s; if (file==NULL) @@ -2139,8 +2023,7 @@ extern ZPOS64_T ZEXPORT unzGetOffset64(unzFile file) return s->pos_in_central_dir; } -extern uLong ZEXPORT unzGetOffset (unzFile file) -{ +extern uLong ZEXPORT unzGetOffset(unzFile file) { ZPOS64_T offset64; if (file==NULL) @@ -2149,8 +2032,7 @@ extern uLong ZEXPORT unzGetOffset (unzFile file) return (uLong)offset64; } -extern int ZEXPORT unzSetOffset64(unzFile file, ZPOS64_T pos) -{ +extern int ZEXPORT unzSetOffset64(unzFile file, ZPOS64_T pos) { unz64_s* s; int err; @@ -2167,7 +2049,6 @@ extern int ZEXPORT unzSetOffset64(unzFile file, ZPOS64_T pos) return err; } -extern int ZEXPORT unzSetOffset (unzFile file, uLong pos) -{ +extern int ZEXPORT unzSetOffset (unzFile file, uLong pos) { return unzSetOffset64(file,pos); } diff --git a/deps/zlib/contrib/minizip/unzip.h b/deps/zlib/contrib/minizip/unzip.h index 2104e39150749b..14105840f6d247 100644 --- a/deps/zlib/contrib/minizip/unzip.h +++ b/deps/zlib/contrib/minizip/unzip.h @@ -83,12 +83,12 @@ typedef voidp unzFile; /* tm_unz contain date/time info */ typedef struct tm_unz_s { - uInt tm_sec; /* seconds after the minute - [0,59] */ - uInt tm_min; /* minutes after the hour - [0,59] */ - uInt tm_hour; /* hours since midnight - [0,23] */ - uInt tm_mday; /* day of the month - [1,31] */ - uInt tm_mon; /* months since January - [0,11] */ - uInt tm_year; /* years - [1980..2044] */ + int tm_sec; /* seconds after the minute - [0,59] */ + int tm_min; /* minutes after the hour - [0,59] */ + int tm_hour; /* hours since midnight - [0,23] */ + int tm_mday; /* day of the month - [1,31] */ + int tm_mon; /* months since January - [0,11] */ + int tm_year; /* years - [1980..2044] */ } tm_unz; /* unz_global_info structure contain global data about the ZIPfile @@ -150,21 +150,21 @@ typedef struct unz_file_info_s tm_unz tmu_date; } unz_file_info; -extern int ZEXPORT unzStringFileNameCompare OF ((const char* fileName1, - const char* fileName2, - int iCaseSensitivity)); +extern int ZEXPORT unzStringFileNameCompare(const char* fileName1, + const char* fileName2, + int iCaseSensitivity); /* - Compare two filename (fileName1,fileName2). - If iCaseSenisivity = 1, comparision is case sensitivity (like strcmp) - If iCaseSenisivity = 2, comparision is not case sensitivity (like strcmpi + Compare two filenames (fileName1,fileName2). + If iCaseSensitivity = 1, comparison is case sensitive (like strcmp) + If iCaseSensitivity = 2, comparison is not case sensitive (like strcmpi or strcasecmp) - If iCaseSenisivity = 0, case sensitivity is defaut of your operating system + If iCaseSensitivity = 0, case sensitivity is default of your operating system (like 1 on Unix, 2 on Windows) */ -extern unzFile ZEXPORT unzOpen OF((const char *path)); -extern unzFile ZEXPORT unzOpen64 OF((const void *path)); +extern unzFile ZEXPORT unzOpen(const char *path); +extern unzFile ZEXPORT unzOpen64(const void *path); /* Open a Zip file. path contain the full pathname (by example, on a Windows XP computer "c:\\zlib\\zlib113.zip" or on an Unix computer @@ -181,41 +181,41 @@ extern unzFile ZEXPORT unzOpen64 OF((const void *path)); */ -extern unzFile ZEXPORT unzOpen2 OF((const char *path, - zlib_filefunc_def* pzlib_filefunc_def)); +extern unzFile ZEXPORT unzOpen2(const char *path, + zlib_filefunc_def* pzlib_filefunc_def); /* Open a Zip file, like unzOpen, but provide a set of file low level API for read/write the zip file (see ioapi.h) */ -extern unzFile ZEXPORT unzOpen2_64 OF((const void *path, - zlib_filefunc64_def* pzlib_filefunc_def)); +extern unzFile ZEXPORT unzOpen2_64(const void *path, + zlib_filefunc64_def* pzlib_filefunc_def); /* Open a Zip file, like unz64Open, but provide a set of file low level API for read/write the zip file (see ioapi.h) */ -extern int ZEXPORT unzClose OF((unzFile file)); +extern int ZEXPORT unzClose(unzFile file); /* Close a ZipFile opened with unzOpen. If there is files inside the .Zip opened with unzOpenCurrentFile (see later), these files MUST be closed with unzCloseCurrentFile before call unzClose. return UNZ_OK if there is no problem. */ -extern int ZEXPORT unzGetGlobalInfo OF((unzFile file, - unz_global_info *pglobal_info)); +extern int ZEXPORT unzGetGlobalInfo(unzFile file, + unz_global_info *pglobal_info); -extern int ZEXPORT unzGetGlobalInfo64 OF((unzFile file, - unz_global_info64 *pglobal_info)); +extern int ZEXPORT unzGetGlobalInfo64(unzFile file, + unz_global_info64 *pglobal_info); /* Write info about the ZipFile in the *pglobal_info structure. No preparation of the structure is needed return UNZ_OK if there is no problem. */ -extern int ZEXPORT unzGetGlobalComment OF((unzFile file, - char *szComment, - uLong uSizeBuf)); +extern int ZEXPORT unzGetGlobalComment(unzFile file, + char *szComment, + uLong uSizeBuf); /* Get the global comment string of the ZipFile, in the szComment buffer. uSizeBuf is the size of the szComment buffer. @@ -226,22 +226,22 @@ extern int ZEXPORT unzGetGlobalComment OF((unzFile file, /***************************************************************************/ /* Unzip package allow you browse the directory of the zipfile */ -extern int ZEXPORT unzGoToFirstFile OF((unzFile file)); +extern int ZEXPORT unzGoToFirstFile(unzFile file); /* Set the current file of the zipfile to the first file. return UNZ_OK if there is no problem */ -extern int ZEXPORT unzGoToNextFile OF((unzFile file)); +extern int ZEXPORT unzGoToNextFile(unzFile file); /* Set the current file of the zipfile to the next file. return UNZ_OK if there is no problem return UNZ_END_OF_LIST_OF_FILE if the actual file was the latest. */ -extern int ZEXPORT unzLocateFile OF((unzFile file, - const char *szFileName, - int iCaseSensitivity)); +extern int ZEXPORT unzLocateFile(unzFile file, + const char *szFileName, + int iCaseSensitivity); /* Try locate the file szFileName in the zipfile. For the iCaseSensitivity signification, see unzStringFileNameCompare @@ -285,26 +285,26 @@ extern int ZEXPORT unzGoToFilePos64( /* ****************************************** */ -extern int ZEXPORT unzGetCurrentFileInfo64 OF((unzFile file, - unz_file_info64 *pfile_info, - char *szFileName, - uLong fileNameBufferSize, - void *extraField, - uLong extraFieldBufferSize, - char *szComment, - uLong commentBufferSize)); - -extern int ZEXPORT unzGetCurrentFileInfo OF((unzFile file, - unz_file_info *pfile_info, - char *szFileName, - uLong fileNameBufferSize, - void *extraField, - uLong extraFieldBufferSize, - char *szComment, - uLong commentBufferSize)); +extern int ZEXPORT unzGetCurrentFileInfo64(unzFile file, + unz_file_info64 *pfile_info, + char *szFileName, + uLong fileNameBufferSize, + void *extraField, + uLong extraFieldBufferSize, + char *szComment, + uLong commentBufferSize); + +extern int ZEXPORT unzGetCurrentFileInfo(unzFile file, + unz_file_info *pfile_info, + char *szFileName, + uLong fileNameBufferSize, + void *extraField, + uLong extraFieldBufferSize, + char *szComment, + uLong commentBufferSize); /* Get Info about the current file - if pfile_info!=NULL, the *pfile_info structure will contain somes info about + if pfile_info!=NULL, the *pfile_info structure will contain some info about the current file if szFileName!=NULL, the filemane string will be copied in szFileName (fileNameBufferSize is the size of the buffer) @@ -318,7 +318,7 @@ extern int ZEXPORT unzGetCurrentFileInfo OF((unzFile file, /** Addition for GDAL : START */ -extern ZPOS64_T ZEXPORT unzGetCurrentFileZStreamPos64 OF((unzFile file)); +extern ZPOS64_T ZEXPORT unzGetCurrentFileZStreamPos64(unzFile file); /** Addition for GDAL : END */ @@ -328,24 +328,24 @@ extern ZPOS64_T ZEXPORT unzGetCurrentFileZStreamPos64 OF((unzFile file)); from it, and close it (you can close it before reading all the file) */ -extern int ZEXPORT unzOpenCurrentFile OF((unzFile file)); +extern int ZEXPORT unzOpenCurrentFile(unzFile file); /* Open for reading data the current file in the zipfile. If there is no error, the return value is UNZ_OK. */ -extern int ZEXPORT unzOpenCurrentFilePassword OF((unzFile file, - const char* password)); +extern int ZEXPORT unzOpenCurrentFilePassword(unzFile file, + const char* password); /* Open for reading data the current file in the zipfile. password is a crypting password If there is no error, the return value is UNZ_OK. */ -extern int ZEXPORT unzOpenCurrentFile2 OF((unzFile file, - int* method, - int* level, - int raw)); +extern int ZEXPORT unzOpenCurrentFile2(unzFile file, + int* method, + int* level, + int raw); /* Same than unzOpenCurrentFile, but open for read raw the file (not uncompress) if raw==1 @@ -355,11 +355,11 @@ extern int ZEXPORT unzOpenCurrentFile2 OF((unzFile file, but you CANNOT set method parameter as NULL */ -extern int ZEXPORT unzOpenCurrentFile3 OF((unzFile file, - int* method, - int* level, - int raw, - const char* password)); +extern int ZEXPORT unzOpenCurrentFile3(unzFile file, + int* method, + int* level, + int raw, + const char* password); /* Same than unzOpenCurrentFile, but open for read raw the file (not uncompress) if raw==1 @@ -370,41 +370,41 @@ extern int ZEXPORT unzOpenCurrentFile3 OF((unzFile file, */ -extern int ZEXPORT unzCloseCurrentFile OF((unzFile file)); +extern int ZEXPORT unzCloseCurrentFile(unzFile file); /* Close the file in zip opened with unzOpenCurrentFile Return UNZ_CRCERROR if all the file was read but the CRC is not good */ -extern int ZEXPORT unzReadCurrentFile OF((unzFile file, - voidp buf, - unsigned len)); +extern int ZEXPORT unzReadCurrentFile(unzFile file, + voidp buf, + unsigned len); /* Read bytes from the current file (opened by unzOpenCurrentFile) buf contain buffer where data must be copied len the size of buf. - return the number of byte copied if somes bytes are copied + return the number of byte copied if some bytes are copied return 0 if the end of file was reached return <0 with error code if there is an error (UNZ_ERRNO for IO error, or zLib error for uncompress error) */ -extern z_off_t ZEXPORT unztell OF((unzFile file)); +extern z_off_t ZEXPORT unztell(unzFile file); -extern ZPOS64_T ZEXPORT unztell64 OF((unzFile file)); +extern ZPOS64_T ZEXPORT unztell64(unzFile file); /* Give the current position in uncompressed data */ -extern int ZEXPORT unzeof OF((unzFile file)); +extern int ZEXPORT unzeof(unzFile file); /* return 1 if the end of file was reached, 0 elsewhere */ -extern int ZEXPORT unzGetLocalExtrafield OF((unzFile file, - voidp buf, - unsigned len)); +extern int ZEXPORT unzGetLocalExtrafield(unzFile file, + voidp buf, + unsigned len); /* Read extra field from the current file (opened by unzOpenCurrentFile) This is the local-header version of the extra field (sometimes, there is diff --git a/deps/zlib/contrib/minizip/zip.c b/deps/zlib/contrib/minizip/zip.c index f21d4954286af4..e2e9da07c5f307 100644 --- a/deps/zlib/contrib/minizip/zip.c +++ b/deps/zlib/contrib/minizip/zip.c @@ -14,7 +14,7 @@ Oct-2009 - Mathias Svensson - Added Zip64 Support when creating new file archives Oct-2009 - Mathias Svensson - Did some code cleanup and refactoring to get better overview of some functions. Oct-2009 - Mathias Svensson - Added zipRemoveExtraInfoBlock to strip extra field data from its ZIP64 data - It is used when recreting zip archive with RAW when deleting items from a zip. + It is used when recreating zip archive with RAW when deleting items from a zip. ZIP64 data is automatically added to items that needs it, and existing ZIP64 data need to be removed. Oct-2009 - Mathias Svensson - Added support for BZIP2 as compression mode (bzip2 lib is required) Jan-2010 - back to unzip and minizip 1.0 name scheme, with compatibility layer @@ -25,14 +25,13 @@ #include #include #include +#include #include #include "zlib.h" #include "zip.h" #ifdef STDC # include -# include -# include #endif #ifdef NO_ERRNO_H extern int errno; @@ -47,7 +46,7 @@ /* compile with -Dlocal if your debugger can't find static symbols */ #ifndef VERSIONMADEBY -# define VERSIONMADEBY (0x0) /* platform depedent */ +# define VERSIONMADEBY (0x0) /* platform dependent */ #endif #ifndef Z_BUFSIZE @@ -61,9 +60,6 @@ #ifndef ALLOC # define ALLOC(size) (malloc(size)) #endif -#ifndef TRYFREE -# define TRYFREE(p) {if (p) free(p);} -#endif /* #define SIZECENTRALDIRITEM (0x2e) @@ -138,37 +134,37 @@ typedef struct uInt pos_in_buffered_data; /* last written byte in buffered_data */ ZPOS64_T pos_local_header; /* offset of the local header of the file - currenty writing */ + currently writing */ char* central_header; /* central header data for the current file */ uLong size_centralExtra; uLong size_centralheader; /* size of the central header for cur file */ uLong size_centralExtraFree; /* Extra bytes allocated to the centralheader but that are not used */ uLong flag; /* flag of the file currently writing */ - int method; /* compression method of file currenty wr.*/ + int method; /* compression method of file currently wr.*/ int raw; /* 1 for directly writing raw data */ Byte buffered_data[Z_BUFSIZE];/* buffer contain compressed data to be writ*/ uLong dosDate; uLong crc32; int encrypt; - int zip64; /* Add ZIP64 extened information in the extra field */ + int zip64; /* Add ZIP64 extended information in the extra field */ ZPOS64_T pos_zip64extrainfo; ZPOS64_T totalCompressedData; ZPOS64_T totalUncompressedData; #ifndef NOCRYPT unsigned long keys[3]; /* keys defining the pseudo-random sequence */ const z_crc_t* pcrc_32_tab; - int crypt_header_size; + unsigned crypt_header_size; #endif } curfile64_info; typedef struct { zlib_filefunc64_32_def z_filefunc; - voidpf filestream; /* io structore of the zipfile */ + voidpf filestream; /* io structure of the zipfile */ linkedlist_data central_dir;/* datablock with central dir in construction*/ int in_opened_file_inzip; /* 1 if a file in the zip is currently writ.*/ - curfile64_info ci; /* info on the file curretly writing */ + curfile64_info ci; /* info on the file currently writing */ ZPOS64_T begin_pos; /* position of the beginning of the zipfile */ ZPOS64_T add_position_when_writing_offset; @@ -186,8 +182,7 @@ typedef struct #include "crypt.h" #endif -local linkedlist_datablock_internal* allocate_new_datablock() -{ +local linkedlist_datablock_internal* allocate_new_datablock(void) { linkedlist_datablock_internal* ldi; ldi = (linkedlist_datablock_internal*) ALLOC(sizeof(linkedlist_datablock_internal)); @@ -200,30 +195,26 @@ local linkedlist_datablock_internal* allocate_new_datablock() return ldi; } -local void free_datablock(linkedlist_datablock_internal* ldi) -{ +local void free_datablock(linkedlist_datablock_internal* ldi) { while (ldi!=NULL) { linkedlist_datablock_internal* ldinext = ldi->next_datablock; - TRYFREE(ldi); + free(ldi); ldi = ldinext; } } -local void init_linkedlist(linkedlist_data* ll) -{ +local void init_linkedlist(linkedlist_data* ll) { ll->first_block = ll->last_block = NULL; } -local void free_linkedlist(linkedlist_data* ll) -{ +local void free_linkedlist(linkedlist_data* ll) { free_datablock(ll->first_block); ll->first_block = ll->last_block = NULL; } -local int add_data_in_datablock(linkedlist_data* ll, const void* buf, uLong len) -{ +local int add_data_in_datablock(linkedlist_data* ll, const void* buf, uLong len) { linkedlist_datablock_internal* ldi; const unsigned char* from_copy; @@ -238,7 +229,7 @@ local int add_data_in_datablock(linkedlist_data* ll, const void* buf, uLong len) } ldi = ll->last_block; - from_copy = (unsigned char*)buf; + from_copy = (const unsigned char*)buf; while (len>0) { @@ -283,9 +274,7 @@ local int add_data_in_datablock(linkedlist_data* ll, const void* buf, uLong len) nbByte == 1, 2 ,4 or 8 (byte, short or long, ZPOS64_T) */ -local int zip64local_putValue OF((const zlib_filefunc64_32_def* pzlib_filefunc_def, voidpf filestream, ZPOS64_T x, int nbByte)); -local int zip64local_putValue (const zlib_filefunc64_32_def* pzlib_filefunc_def, voidpf filestream, ZPOS64_T x, int nbByte) -{ +local int zip64local_putValue(const zlib_filefunc64_32_def* pzlib_filefunc_def, voidpf filestream, ZPOS64_T x, int nbByte) { unsigned char buf[8]; int n; for (n = 0; n < nbByte; n++) @@ -301,15 +290,13 @@ local int zip64local_putValue (const zlib_filefunc64_32_def* pzlib_filefunc_def, } } - if (ZWRITE64(*pzlib_filefunc_def,filestream,buf,nbByte)!=(uLong)nbByte) + if (ZWRITE64(*pzlib_filefunc_def,filestream,buf,(uLong)nbByte)!=(uLong)nbByte) return ZIP_ERRNO; else return ZIP_OK; } -local void zip64local_putValue_inmemory OF((void* dest, ZPOS64_T x, int nbByte)); -local void zip64local_putValue_inmemory (void* dest, ZPOS64_T x, int nbByte) -{ +local void zip64local_putValue_inmemory (void* dest, ZPOS64_T x, int nbByte) { unsigned char* buf=(unsigned char*)dest; int n; for (n = 0; n < nbByte; n++) { @@ -329,25 +316,21 @@ local void zip64local_putValue_inmemory (void* dest, ZPOS64_T x, int nbByte) /****************************************************************************/ -local uLong zip64local_TmzDateToDosDate(const tm_zip* ptm) -{ +local uLong zip64local_TmzDateToDosDate(const tm_zip* ptm) { uLong year = (uLong)ptm->tm_year; if (year>=1980) year-=1980; else if (year>=80) year-=80; return - (uLong) (((ptm->tm_mday) + (32 * (ptm->tm_mon+1)) + (512 * year)) << 16) | - ((ptm->tm_sec/2) + (32* ptm->tm_min) + (2048 * (uLong)ptm->tm_hour)); + (uLong) (((uLong)(ptm->tm_mday) + (32 * (uLong)(ptm->tm_mon+1)) + (512 * year)) << 16) | + (((uLong)ptm->tm_sec/2) + (32 * (uLong)ptm->tm_min) + (2048 * (uLong)ptm->tm_hour)); } /****************************************************************************/ -local int zip64local_getByte OF((const zlib_filefunc64_32_def* pzlib_filefunc_def, voidpf filestream, int *pi)); - -local int zip64local_getByte(const zlib_filefunc64_32_def* pzlib_filefunc_def,voidpf filestream,int* pi) -{ +local int zip64local_getByte(const zlib_filefunc64_32_def* pzlib_filefunc_def, voidpf filestream, int* pi) { unsigned char c; int err = (int)ZREAD64(*pzlib_filefunc_def,filestream,&c,1); if (err==1) @@ -368,10 +351,7 @@ local int zip64local_getByte(const zlib_filefunc64_32_def* pzlib_filefunc_def,vo /* =========================================================================== Reads a long in LSB order from the given gz_stream. Sets */ -local int zip64local_getShort OF((const zlib_filefunc64_32_def* pzlib_filefunc_def, voidpf filestream, uLong *pX)); - -local int zip64local_getShort (const zlib_filefunc64_32_def* pzlib_filefunc_def, voidpf filestream, uLong* pX) -{ +local int zip64local_getShort(const zlib_filefunc64_32_def* pzlib_filefunc_def, voidpf filestream, uLong* pX) { uLong x ; int i = 0; int err; @@ -390,10 +370,7 @@ local int zip64local_getShort (const zlib_filefunc64_32_def* pzlib_filefunc_def, return err; } -local int zip64local_getLong OF((const zlib_filefunc64_32_def* pzlib_filefunc_def, voidpf filestream, uLong *pX)); - -local int zip64local_getLong (const zlib_filefunc64_32_def* pzlib_filefunc_def, voidpf filestream, uLong* pX) -{ +local int zip64local_getLong(const zlib_filefunc64_32_def* pzlib_filefunc_def, voidpf filestream, uLong* pX) { uLong x ; int i = 0; int err; @@ -420,11 +397,8 @@ local int zip64local_getLong (const zlib_filefunc64_32_def* pzlib_filefunc_def, return err; } -local int zip64local_getLong64 OF((const zlib_filefunc64_32_def* pzlib_filefunc_def, voidpf filestream, ZPOS64_T *pX)); - -local int zip64local_getLong64 (const zlib_filefunc64_32_def* pzlib_filefunc_def, voidpf filestream, ZPOS64_T *pX) -{ +local int zip64local_getLong64(const zlib_filefunc64_32_def* pzlib_filefunc_def, voidpf filestream, ZPOS64_T *pX) { ZPOS64_T x; int i = 0; int err; @@ -475,10 +449,7 @@ local int zip64local_getLong64 (const zlib_filefunc64_32_def* pzlib_filefunc_def Locate the Central directory of a zipfile (at the end, just before the global comment) */ -local ZPOS64_T zip64local_SearchCentralDir OF((const zlib_filefunc64_32_def* pzlib_filefunc_def, voidpf filestream)); - -local ZPOS64_T zip64local_SearchCentralDir(const zlib_filefunc64_32_def* pzlib_filefunc_def, voidpf filestream) -{ +local ZPOS64_T zip64local_SearchCentralDir(const zlib_filefunc64_32_def* pzlib_filefunc_def, voidpf filestream) { unsigned char* buf; ZPOS64_T uSizeFile; ZPOS64_T uBackRead; @@ -522,14 +493,14 @@ local ZPOS64_T zip64local_SearchCentralDir(const zlib_filefunc64_32_def* pzlib_f if (((*(buf+i))==0x50) && ((*(buf+i+1))==0x4b) && ((*(buf+i+2))==0x05) && ((*(buf+i+3))==0x06)) { - uPosFound = uReadPos+i; + uPosFound = uReadPos+(unsigned)i; break; } - if (uPosFound!=0) - break; + if (uPosFound!=0) + break; } - TRYFREE(buf); + free(buf); return uPosFound; } @@ -537,10 +508,7 @@ local ZPOS64_T zip64local_SearchCentralDir(const zlib_filefunc64_32_def* pzlib_f Locate the End of Zip64 Central directory locator and from there find the CD of a zipfile (at the end, just before the global comment) */ -local ZPOS64_T zip64local_SearchCentralDir64 OF((const zlib_filefunc64_32_def* pzlib_filefunc_def, voidpf filestream)); - -local ZPOS64_T zip64local_SearchCentralDir64(const zlib_filefunc64_32_def* pzlib_filefunc_def, voidpf filestream) -{ +local ZPOS64_T zip64local_SearchCentralDir64(const zlib_filefunc64_32_def* pzlib_filefunc_def, voidpf filestream) { unsigned char* buf; ZPOS64_T uSizeFile; ZPOS64_T uBackRead; @@ -586,7 +554,7 @@ local ZPOS64_T zip64local_SearchCentralDir64(const zlib_filefunc64_32_def* pzlib // Signature "0x07064b50" Zip64 end of central directory locater if (((*(buf+i))==0x50) && ((*(buf+i+1))==0x4b) && ((*(buf+i+2))==0x06) && ((*(buf+i+3))==0x07)) { - uPosFound = uReadPos+i; + uPosFound = uReadPos+(unsigned)i; break; } } @@ -595,7 +563,7 @@ local ZPOS64_T zip64local_SearchCentralDir64(const zlib_filefunc64_32_def* pzlib break; } - TRYFREE(buf); + free(buf); if (uPosFound == 0) return 0; @@ -637,8 +605,7 @@ local ZPOS64_T zip64local_SearchCentralDir64(const zlib_filefunc64_32_def* pzlib return relativeOffset; } -int LoadCentralDirectoryRecord(zip64_internal* pziinit) -{ +local int LoadCentralDirectoryRecord(zip64_internal* pziinit) { int err=ZIP_OK; ZPOS64_T byte_before_the_zipfile;/* byte before the zipfile, (>0 for sfx)*/ @@ -647,10 +614,10 @@ int LoadCentralDirectoryRecord(zip64_internal* pziinit) ZPOS64_T central_pos; uLong uL; - uLong number_disk; /* number of the current dist, used for - spaning ZIP, unsupported, always 0*/ - uLong number_disk_with_CD; /* number the the disk with central dir, used - for spaning ZIP, unsupported, always 0*/ + uLong number_disk; /* number of the current disk, used for + spanning ZIP, unsupported, always 0*/ + uLong number_disk_with_CD; /* number of the disk with central dir, used + for spanning ZIP, unsupported, always 0*/ ZPOS64_T number_entry; ZPOS64_T number_entry_CD; /* total number of entries in the central dir @@ -830,7 +797,7 @@ int LoadCentralDirectoryRecord(zip64_internal* pziinit) size_central_dir_to_read-=read_this; } - TRYFREE(buf_read); + free(buf_read); } pziinit->begin_pos = byte_before_the_zipfile; pziinit->number_entry = number_entry_CD; @@ -846,8 +813,7 @@ int LoadCentralDirectoryRecord(zip64_internal* pziinit) /************************************************************/ -extern zipFile ZEXPORT zipOpen3 (const void *pathname, int append, zipcharpc* globalcomment, zlib_filefunc64_32_def* pzlib_filefunc64_32_def) -{ +extern zipFile ZEXPORT zipOpen3(const void *pathname, int append, zipcharpc* globalcomment, zlib_filefunc64_32_def* pzlib_filefunc64_32_def) { zip64_internal ziinit; zip64_internal* zi; int err=ZIP_OK; @@ -905,9 +871,9 @@ extern zipFile ZEXPORT zipOpen3 (const void *pathname, int append, zipcharpc* gl if (err != ZIP_OK) { # ifndef NO_ADDFILEINEXISTINGZIP - TRYFREE(ziinit.globalcomment); + free(ziinit.globalcomment); # endif /* !NO_ADDFILEINEXISTINGZIP*/ - TRYFREE(zi); + free(zi); return NULL; } else @@ -917,8 +883,7 @@ extern zipFile ZEXPORT zipOpen3 (const void *pathname, int append, zipcharpc* gl } } -extern zipFile ZEXPORT zipOpen2 (const char *pathname, int append, zipcharpc* globalcomment, zlib_filefunc_def* pzlib_filefunc32_def) -{ +extern zipFile ZEXPORT zipOpen2(const char *pathname, int append, zipcharpc* globalcomment, zlib_filefunc_def* pzlib_filefunc32_def) { if (pzlib_filefunc32_def != NULL) { zlib_filefunc64_32_def zlib_filefunc64_32_def_fill; @@ -929,8 +894,7 @@ extern zipFile ZEXPORT zipOpen2 (const char *pathname, int append, zipcharpc* gl return zipOpen3(pathname, append, globalcomment, NULL); } -extern zipFile ZEXPORT zipOpen2_64 (const void *pathname, int append, zipcharpc* globalcomment, zlib_filefunc64_def* pzlib_filefunc_def) -{ +extern zipFile ZEXPORT zipOpen2_64(const void *pathname, int append, zipcharpc* globalcomment, zlib_filefunc64_def* pzlib_filefunc_def) { if (pzlib_filefunc_def != NULL) { zlib_filefunc64_32_def zlib_filefunc64_32_def_fill; @@ -945,18 +909,15 @@ extern zipFile ZEXPORT zipOpen2_64 (const void *pathname, int append, zipcharpc* -extern zipFile ZEXPORT zipOpen (const char* pathname, int append) -{ +extern zipFile ZEXPORT zipOpen(const char* pathname, int append) { return zipOpen3((const void*)pathname,append,NULL,NULL); } -extern zipFile ZEXPORT zipOpen64 (const void* pathname, int append) -{ +extern zipFile ZEXPORT zipOpen64(const void* pathname, int append) { return zipOpen3(pathname,append,NULL,NULL); } -int Write_LocalFileHeader(zip64_internal* zi, const char* filename, uInt size_extrafield_local, const void* extrafield_local) -{ +local int Write_LocalFileHeader(zip64_internal* zi, const char* filename, uInt size_extrafield_local, const void* extrafield_local) { /* write the local header */ int err; uInt size_filename = (uInt)strlen(filename); @@ -1034,8 +995,8 @@ int Write_LocalFileHeader(zip64_internal* zi, const char* filename, uInt size_ex // Remember position of Zip64 extended info for the local file header. (needed when we update size after done with file) zi->ci.pos_zip64extrainfo = ZTELL64(zi->z_filefunc,zi->filestream); - err = zip64local_putValue(&zi->z_filefunc, zi->filestream, (short)HeaderID,2); - err = zip64local_putValue(&zi->z_filefunc, zi->filestream, (short)DataSize,2); + err = zip64local_putValue(&zi->z_filefunc, zi->filestream, (ZPOS64_T)HeaderID,2); + err = zip64local_putValue(&zi->z_filefunc, zi->filestream, (ZPOS64_T)DataSize,2); err = zip64local_putValue(&zi->z_filefunc, zi->filestream, (ZPOS64_T)UncompressedSize,8); err = zip64local_putValue(&zi->z_filefunc, zi->filestream, (ZPOS64_T)CompressedSize,8); @@ -1052,14 +1013,13 @@ int Write_LocalFileHeader(zip64_internal* zi, const char* filename, uInt size_ex It is not done here because then we need to realloc a new buffer since parameters are 'const' and I want to minimize unnecessary allocations. */ -extern int ZEXPORT zipOpenNewFileInZip4_64 (zipFile file, const char* filename, const zip_fileinfo* zipfi, - const void* extrafield_local, uInt size_extrafield_local, - const void* extrafield_global, uInt size_extrafield_global, - const char* comment, int method, int level, int raw, - int windowBits,int memLevel, int strategy, - const char* password, uLong crcForCrypting, - uLong versionMadeBy, uLong flagBase, int zip64) -{ +extern int ZEXPORT zipOpenNewFileInZip4_64(zipFile file, const char* filename, const zip_fileinfo* zipfi, + const void* extrafield_local, uInt size_extrafield_local, + const void* extrafield_global, uInt size_extrafield_global, + const char* comment, int method, int level, int raw, + int windowBits,int memLevel, int strategy, + const char* password, uLong crcForCrypting, + uLong versionMadeBy, uLong flagBase, int zip64) { zip64_internal* zi; uInt size_filename; uInt size_comment; @@ -1273,35 +1233,33 @@ extern int ZEXPORT zipOpenNewFileInZip4_64 (zipFile file, const char* filename, return err; } -extern int ZEXPORT zipOpenNewFileInZip4 (zipFile file, const char* filename, const zip_fileinfo* zipfi, - const void* extrafield_local, uInt size_extrafield_local, - const void* extrafield_global, uInt size_extrafield_global, - const char* comment, int method, int level, int raw, - int windowBits,int memLevel, int strategy, - const char* password, uLong crcForCrypting, - uLong versionMadeBy, uLong flagBase) -{ - return zipOpenNewFileInZip4_64 (file, filename, zipfi, - extrafield_local, size_extrafield_local, - extrafield_global, size_extrafield_global, - comment, method, level, raw, - windowBits, memLevel, strategy, - password, crcForCrypting, versionMadeBy, flagBase, 0); +extern int ZEXPORT zipOpenNewFileInZip4(zipFile file, const char* filename, const zip_fileinfo* zipfi, + const void* extrafield_local, uInt size_extrafield_local, + const void* extrafield_global, uInt size_extrafield_global, + const char* comment, int method, int level, int raw, + int windowBits,int memLevel, int strategy, + const char* password, uLong crcForCrypting, + uLong versionMadeBy, uLong flagBase) { + return zipOpenNewFileInZip4_64(file, filename, zipfi, + extrafield_local, size_extrafield_local, + extrafield_global, size_extrafield_global, + comment, method, level, raw, + windowBits, memLevel, strategy, + password, crcForCrypting, versionMadeBy, flagBase, 0); } -extern int ZEXPORT zipOpenNewFileInZip3 (zipFile file, const char* filename, const zip_fileinfo* zipfi, - const void* extrafield_local, uInt size_extrafield_local, - const void* extrafield_global, uInt size_extrafield_global, - const char* comment, int method, int level, int raw, - int windowBits,int memLevel, int strategy, - const char* password, uLong crcForCrypting) -{ - return zipOpenNewFileInZip4_64 (file, filename, zipfi, - extrafield_local, size_extrafield_local, - extrafield_global, size_extrafield_global, - comment, method, level, raw, - windowBits, memLevel, strategy, - password, crcForCrypting, VERSIONMADEBY, 0, 0); +extern int ZEXPORT zipOpenNewFileInZip3(zipFile file, const char* filename, const zip_fileinfo* zipfi, + const void* extrafield_local, uInt size_extrafield_local, + const void* extrafield_global, uInt size_extrafield_global, + const char* comment, int method, int level, int raw, + int windowBits,int memLevel, int strategy, + const char* password, uLong crcForCrypting) { + return zipOpenNewFileInZip4_64(file, filename, zipfi, + extrafield_local, size_extrafield_local, + extrafield_global, size_extrafield_global, + comment, method, level, raw, + windowBits, memLevel, strategy, + password, crcForCrypting, VERSIONMADEBY, 0, 0); } extern int ZEXPORT zipOpenNewFileInZip3_64(zipFile file, const char* filename, const zip_fileinfo* zipfi, @@ -1309,70 +1267,64 @@ extern int ZEXPORT zipOpenNewFileInZip3_64(zipFile file, const char* filename, c const void* extrafield_global, uInt size_extrafield_global, const char* comment, int method, int level, int raw, int windowBits,int memLevel, int strategy, - const char* password, uLong crcForCrypting, int zip64) -{ - return zipOpenNewFileInZip4_64 (file, filename, zipfi, - extrafield_local, size_extrafield_local, - extrafield_global, size_extrafield_global, - comment, method, level, raw, - windowBits, memLevel, strategy, - password, crcForCrypting, VERSIONMADEBY, 0, zip64); + const char* password, uLong crcForCrypting, int zip64) { + return zipOpenNewFileInZip4_64(file, filename, zipfi, + extrafield_local, size_extrafield_local, + extrafield_global, size_extrafield_global, + comment, method, level, raw, + windowBits, memLevel, strategy, + password, crcForCrypting, VERSIONMADEBY, 0, zip64); } extern int ZEXPORT zipOpenNewFileInZip2(zipFile file, const char* filename, const zip_fileinfo* zipfi, const void* extrafield_local, uInt size_extrafield_local, const void* extrafield_global, uInt size_extrafield_global, - const char* comment, int method, int level, int raw) -{ - return zipOpenNewFileInZip4_64 (file, filename, zipfi, - extrafield_local, size_extrafield_local, - extrafield_global, size_extrafield_global, - comment, method, level, raw, - -MAX_WBITS, DEF_MEM_LEVEL, Z_DEFAULT_STRATEGY, - NULL, 0, VERSIONMADEBY, 0, 0); + const char* comment, int method, int level, int raw) { + return zipOpenNewFileInZip4_64(file, filename, zipfi, + extrafield_local, size_extrafield_local, + extrafield_global, size_extrafield_global, + comment, method, level, raw, + -MAX_WBITS, DEF_MEM_LEVEL, Z_DEFAULT_STRATEGY, + NULL, 0, VERSIONMADEBY, 0, 0); } extern int ZEXPORT zipOpenNewFileInZip2_64(zipFile file, const char* filename, const zip_fileinfo* zipfi, - const void* extrafield_local, uInt size_extrafield_local, - const void* extrafield_global, uInt size_extrafield_global, - const char* comment, int method, int level, int raw, int zip64) -{ - return zipOpenNewFileInZip4_64 (file, filename, zipfi, - extrafield_local, size_extrafield_local, - extrafield_global, size_extrafield_global, - comment, method, level, raw, - -MAX_WBITS, DEF_MEM_LEVEL, Z_DEFAULT_STRATEGY, - NULL, 0, VERSIONMADEBY, 0, zip64); + const void* extrafield_local, uInt size_extrafield_local, + const void* extrafield_global, uInt size_extrafield_global, + const char* comment, int method, int level, int raw, int zip64) { + return zipOpenNewFileInZip4_64(file, filename, zipfi, + extrafield_local, size_extrafield_local, + extrafield_global, size_extrafield_global, + comment, method, level, raw, + -MAX_WBITS, DEF_MEM_LEVEL, Z_DEFAULT_STRATEGY, + NULL, 0, VERSIONMADEBY, 0, zip64); } -extern int ZEXPORT zipOpenNewFileInZip64 (zipFile file, const char* filename, const zip_fileinfo* zipfi, - const void* extrafield_local, uInt size_extrafield_local, - const void*extrafield_global, uInt size_extrafield_global, - const char* comment, int method, int level, int zip64) -{ - return zipOpenNewFileInZip4_64 (file, filename, zipfi, - extrafield_local, size_extrafield_local, - extrafield_global, size_extrafield_global, - comment, method, level, 0, - -MAX_WBITS, DEF_MEM_LEVEL, Z_DEFAULT_STRATEGY, - NULL, 0, VERSIONMADEBY, 0, zip64); +extern int ZEXPORT zipOpenNewFileInZip64(zipFile file, const char* filename, const zip_fileinfo* zipfi, + const void* extrafield_local, uInt size_extrafield_local, + const void*extrafield_global, uInt size_extrafield_global, + const char* comment, int method, int level, int zip64) { + return zipOpenNewFileInZip4_64(file, filename, zipfi, + extrafield_local, size_extrafield_local, + extrafield_global, size_extrafield_global, + comment, method, level, 0, + -MAX_WBITS, DEF_MEM_LEVEL, Z_DEFAULT_STRATEGY, + NULL, 0, VERSIONMADEBY, 0, zip64); } -extern int ZEXPORT zipOpenNewFileInZip (zipFile file, const char* filename, const zip_fileinfo* zipfi, - const void* extrafield_local, uInt size_extrafield_local, - const void*extrafield_global, uInt size_extrafield_global, - const char* comment, int method, int level) -{ - return zipOpenNewFileInZip4_64 (file, filename, zipfi, - extrafield_local, size_extrafield_local, - extrafield_global, size_extrafield_global, - comment, method, level, 0, - -MAX_WBITS, DEF_MEM_LEVEL, Z_DEFAULT_STRATEGY, - NULL, 0, VERSIONMADEBY, 0, 0); +extern int ZEXPORT zipOpenNewFileInZip(zipFile file, const char* filename, const zip_fileinfo* zipfi, + const void* extrafield_local, uInt size_extrafield_local, + const void*extrafield_global, uInt size_extrafield_global, + const char* comment, int method, int level) { + return zipOpenNewFileInZip4_64(file, filename, zipfi, + extrafield_local, size_extrafield_local, + extrafield_global, size_extrafield_global, + comment, method, level, 0, + -MAX_WBITS, DEF_MEM_LEVEL, Z_DEFAULT_STRATEGY, + NULL, 0, VERSIONMADEBY, 0, 0); } -local int zip64FlushWriteBuffer(zip64_internal* zi) -{ +local int zip64FlushWriteBuffer(zip64_internal* zi) { int err=ZIP_OK; if (zi->ci.encrypt != 0) @@ -1410,8 +1362,7 @@ local int zip64FlushWriteBuffer(zip64_internal* zi) return err; } -extern int ZEXPORT zipWriteInFileInZip (zipFile file,const void* buf,unsigned int len) -{ +extern int ZEXPORT zipWriteInFileInZip(zipFile file, const void* buf, unsigned int len) { zip64_internal* zi; int err=ZIP_OK; @@ -1461,7 +1412,7 @@ extern int ZEXPORT zipWriteInFileInZip (zipFile file,const void* buf,unsigned in else #endif { - zi->ci.stream.next_in = (Bytef*)buf; + zi->ci.stream.next_in = (Bytef*)(uintptr_t)buf; zi->ci.stream.avail_in = len; while ((err==ZIP_OK) && (zi->ci.stream.avail_in>0)) @@ -1482,11 +1433,6 @@ extern int ZEXPORT zipWriteInFileInZip (zipFile file,const void* buf,unsigned in { uLong uTotalOutBefore = zi->ci.stream.total_out; err=deflate(&zi->ci.stream, Z_NO_FLUSH); - if(uTotalOutBefore > zi->ci.stream.total_out) - { - int bBreak = 0; - bBreak++; - } zi->ci.pos_in_buffered_data += (uInt)(zi->ci.stream.total_out - uTotalOutBefore) ; } @@ -1517,17 +1463,15 @@ extern int ZEXPORT zipWriteInFileInZip (zipFile file,const void* buf,unsigned in return err; } -extern int ZEXPORT zipCloseFileInZipRaw (zipFile file, uLong uncompressed_size, uLong crc32) -{ +extern int ZEXPORT zipCloseFileInZipRaw(zipFile file, uLong uncompressed_size, uLong crc32) { return zipCloseFileInZipRaw64 (file, uncompressed_size, crc32); } -extern int ZEXPORT zipCloseFileInZipRaw64 (zipFile file, ZPOS64_T uncompressed_size, uLong crc32) -{ +extern int ZEXPORT zipCloseFileInZipRaw64(zipFile file, ZPOS64_T uncompressed_size, uLong crc32) { zip64_internal* zi; ZPOS64_T compressed_size; uLong invalidValue = 0xffffffff; - short datasize = 0; + unsigned datasize = 0; int err=ZIP_OK; if (file == NULL) @@ -1758,13 +1702,11 @@ extern int ZEXPORT zipCloseFileInZipRaw64 (zipFile file, ZPOS64_T uncompressed_s return err; } -extern int ZEXPORT zipCloseFileInZip (zipFile file) -{ +extern int ZEXPORT zipCloseFileInZip(zipFile file) { return zipCloseFileInZipRaw (file,0,0); } -int Write_Zip64EndOfCentralDirectoryLocator(zip64_internal* zi, ZPOS64_T zip64eocd_pos_inzip) -{ +local int Write_Zip64EndOfCentralDirectoryLocator(zip64_internal* zi, ZPOS64_T zip64eocd_pos_inzip) { int err = ZIP_OK; ZPOS64_T pos = zip64eocd_pos_inzip - zi->add_position_when_writing_offset; @@ -1785,8 +1727,7 @@ int Write_Zip64EndOfCentralDirectoryLocator(zip64_internal* zi, ZPOS64_T zip64eo return err; } -int Write_Zip64EndOfCentralDirectoryRecord(zip64_internal* zi, uLong size_centraldir, ZPOS64_T centraldir_pos_inzip) -{ +local int Write_Zip64EndOfCentralDirectoryRecord(zip64_internal* zi, uLong size_centraldir, ZPOS64_T centraldir_pos_inzip) { int err = ZIP_OK; uLong Zip64DataSize = 44; @@ -1824,8 +1765,8 @@ int Write_Zip64EndOfCentralDirectoryRecord(zip64_internal* zi, uLong size_centra } return err; } -int Write_EndOfCentralDirectoryRecord(zip64_internal* zi, uLong size_centraldir, ZPOS64_T centraldir_pos_inzip) -{ + +local int Write_EndOfCentralDirectoryRecord(zip64_internal* zi, uLong size_centraldir, ZPOS64_T centraldir_pos_inzip) { int err = ZIP_OK; /*signature*/ @@ -1872,8 +1813,7 @@ int Write_EndOfCentralDirectoryRecord(zip64_internal* zi, uLong size_centraldir, return err; } -int Write_GlobalComment(zip64_internal* zi, const char* global_comment) -{ +local int Write_GlobalComment(zip64_internal* zi, const char* global_comment) { int err = ZIP_OK; uInt size_global_comment = 0; @@ -1890,8 +1830,7 @@ int Write_GlobalComment(zip64_internal* zi, const char* global_comment) return err; } -extern int ZEXPORT zipClose (zipFile file, const char* global_comment) -{ +extern int ZEXPORT zipClose(zipFile file, const char* global_comment) { zip64_internal* zi; int err = 0; uLong size_centraldir = 0; @@ -1933,7 +1872,7 @@ extern int ZEXPORT zipClose (zipFile file, const char* global_comment) free_linkedlist(&(zi->central_dir)); pos = centraldir_pos_inzip - zi->add_position_when_writing_offset; - if(pos >= 0xffffffff || zi->number_entry > 0xFFFF) + if(pos >= 0xffffffff || zi->number_entry >= 0xFFFF) { ZPOS64_T Zip64EOCDpos = ZTELL64(zi->z_filefunc,zi->filestream); Write_Zip64EndOfCentralDirectoryRecord(zi, size_centraldir, centraldir_pos_inzip); @@ -1952,15 +1891,14 @@ extern int ZEXPORT zipClose (zipFile file, const char* global_comment) err = ZIP_ERRNO; #ifndef NO_ADDFILEINEXISTINGZIP - TRYFREE(zi->globalcomment); + free(zi->globalcomment); #endif - TRYFREE(zi); + free(zi); return err; } -extern int ZEXPORT zipRemoveExtraInfoBlock (char* pData, int* dataLen, short sHeader) -{ +extern int ZEXPORT zipRemoveExtraInfoBlock(char* pData, int* dataLen, short sHeader) { char* p = pData; int size = 0; char* pNewHeader; @@ -1970,10 +1908,10 @@ extern int ZEXPORT zipRemoveExtraInfoBlock (char* pData, int* dataLen, short sHe int retVal = ZIP_OK; - if(pData == NULL || *dataLen < 4) + if(pData == NULL || dataLen == NULL || *dataLen < 4) return ZIP_PARAMERROR; - pNewHeader = (char*)ALLOC(*dataLen); + pNewHeader = (char*)ALLOC((unsigned)*dataLen); pTmp = pNewHeader; while(p < (pData + *dataLen)) @@ -2012,7 +1950,7 @@ extern int ZEXPORT zipRemoveExtraInfoBlock (char* pData, int* dataLen, short sHe else retVal = ZIP_ERRNO; - TRYFREE(pNewHeader); + free(pNewHeader); return retVal; } diff --git a/deps/zlib/contrib/minizip/zip.h b/deps/zlib/contrib/minizip/zip.h index 8aaebb623430fc..3e230d3405f603 100644 --- a/deps/zlib/contrib/minizip/zip.h +++ b/deps/zlib/contrib/minizip/zip.h @@ -88,12 +88,12 @@ typedef voidp zipFile; /* tm_zip contain date/time info */ typedef struct tm_zip_s { - uInt tm_sec; /* seconds after the minute - [0,59] */ - uInt tm_min; /* minutes after the hour - [0,59] */ - uInt tm_hour; /* hours since midnight - [0,23] */ - uInt tm_mday; /* day of the month - [1,31] */ - uInt tm_mon; /* months since January - [0,11] */ - uInt tm_year; /* years - [1980..2044] */ + int tm_sec; /* seconds after the minute - [0,59] */ + int tm_min; /* minutes after the hour - [0,59] */ + int tm_hour; /* hours since midnight - [0,23] */ + int tm_mday; /* day of the month - [1,31] */ + int tm_mon; /* months since January - [0,11] */ + int tm_year; /* years - [1980..2044] */ } tm_zip; typedef struct @@ -113,8 +113,8 @@ typedef const char* zipcharpc; #define APPEND_STATUS_CREATEAFTER (1) #define APPEND_STATUS_ADDINZIP (2) -extern zipFile ZEXPORT zipOpen OF((const char *pathname, int append)); -extern zipFile ZEXPORT zipOpen64 OF((const void *pathname, int append)); +extern zipFile ZEXPORT zipOpen(const char *pathname, int append); +extern zipFile ZEXPORT zipOpen64(const void *pathname, int append); /* Create a zipfile. pathname contain on Windows XP a filename like "c:\\zlib\\zlib113.zip" or on @@ -131,50 +131,55 @@ extern zipFile ZEXPORT zipOpen64 OF((const void *pathname, int append)); /* Note : there is no delete function into a zipfile. If you want delete file into a zipfile, you must open a zipfile, and create another - Of couse, you can use RAW reading and writing to copy the file you did not want delte + Of course, you can use RAW reading and writing to copy the file you did not want delete */ -extern zipFile ZEXPORT zipOpen2 OF((const char *pathname, - int append, - zipcharpc* globalcomment, - zlib_filefunc_def* pzlib_filefunc_def)); +extern zipFile ZEXPORT zipOpen2(const char *pathname, + int append, + zipcharpc* globalcomment, + zlib_filefunc_def* pzlib_filefunc_def); -extern zipFile ZEXPORT zipOpen2_64 OF((const void *pathname, +extern zipFile ZEXPORT zipOpen2_64(const void *pathname, int append, zipcharpc* globalcomment, - zlib_filefunc64_def* pzlib_filefunc_def)); - -extern int ZEXPORT zipOpenNewFileInZip OF((zipFile file, - const char* filename, - const zip_fileinfo* zipfi, - const void* extrafield_local, - uInt size_extrafield_local, - const void* extrafield_global, - uInt size_extrafield_global, - const char* comment, - int method, - int level)); - -extern int ZEXPORT zipOpenNewFileInZip64 OF((zipFile file, - const char* filename, - const zip_fileinfo* zipfi, - const void* extrafield_local, - uInt size_extrafield_local, - const void* extrafield_global, - uInt size_extrafield_global, - const char* comment, - int method, - int level, - int zip64)); + zlib_filefunc64_def* pzlib_filefunc_def); + +extern zipFile ZEXPORT zipOpen3(const void *pathname, + int append, + zipcharpc* globalcomment, + zlib_filefunc64_32_def* pzlib_filefunc64_32_def); + +extern int ZEXPORT zipOpenNewFileInZip(zipFile file, + const char* filename, + const zip_fileinfo* zipfi, + const void* extrafield_local, + uInt size_extrafield_local, + const void* extrafield_global, + uInt size_extrafield_global, + const char* comment, + int method, + int level); + +extern int ZEXPORT zipOpenNewFileInZip64(zipFile file, + const char* filename, + const zip_fileinfo* zipfi, + const void* extrafield_local, + uInt size_extrafield_local, + const void* extrafield_global, + uInt size_extrafield_global, + const char* comment, + int method, + int level, + int zip64); /* Open a file in the ZIP for writing. filename : the filename in zip (if NULL, '-' without quote will be used *zipfi contain supplemental information if extrafield_local!=NULL and size_extrafield_local>0, extrafield_local - contains the extrafield data the the local header + contains the extrafield data for the local header if extrafield_global!=NULL and size_extrafield_global>0, extrafield_global - contains the extrafield data the the local header + contains the extrafield data for the global header if comment != NULL, comment contain the comment string method contain the compression method (0 for store, Z_DEFLATED for deflate) level contain the level of compression (can be Z_DEFAULT_COMPRESSION) @@ -184,70 +189,69 @@ extern int ZEXPORT zipOpenNewFileInZip64 OF((zipFile file, */ -extern int ZEXPORT zipOpenNewFileInZip2 OF((zipFile file, - const char* filename, - const zip_fileinfo* zipfi, - const void* extrafield_local, - uInt size_extrafield_local, - const void* extrafield_global, - uInt size_extrafield_global, - const char* comment, - int method, - int level, - int raw)); - - -extern int ZEXPORT zipOpenNewFileInZip2_64 OF((zipFile file, - const char* filename, - const zip_fileinfo* zipfi, - const void* extrafield_local, - uInt size_extrafield_local, - const void* extrafield_global, - uInt size_extrafield_global, - const char* comment, - int method, - int level, - int raw, - int zip64)); +extern int ZEXPORT zipOpenNewFileInZip2(zipFile file, + const char* filename, + const zip_fileinfo* zipfi, + const void* extrafield_local, + uInt size_extrafield_local, + const void* extrafield_global, + uInt size_extrafield_global, + const char* comment, + int method, + int level, + int raw); + + +extern int ZEXPORT zipOpenNewFileInZip2_64(zipFile file, + const char* filename, + const zip_fileinfo* zipfi, + const void* extrafield_local, + uInt size_extrafield_local, + const void* extrafield_global, + uInt size_extrafield_global, + const char* comment, + int method, + int level, + int raw, + int zip64); /* Same than zipOpenNewFileInZip, except if raw=1, we write raw file */ -extern int ZEXPORT zipOpenNewFileInZip3 OF((zipFile file, - const char* filename, - const zip_fileinfo* zipfi, - const void* extrafield_local, - uInt size_extrafield_local, - const void* extrafield_global, - uInt size_extrafield_global, - const char* comment, - int method, - int level, - int raw, - int windowBits, - int memLevel, - int strategy, - const char* password, - uLong crcForCrypting)); - -extern int ZEXPORT zipOpenNewFileInZip3_64 OF((zipFile file, - const char* filename, - const zip_fileinfo* zipfi, - const void* extrafield_local, - uInt size_extrafield_local, - const void* extrafield_global, - uInt size_extrafield_global, - const char* comment, - int method, - int level, - int raw, - int windowBits, - int memLevel, - int strategy, - const char* password, - uLong crcForCrypting, - int zip64 - )); +extern int ZEXPORT zipOpenNewFileInZip3(zipFile file, + const char* filename, + const zip_fileinfo* zipfi, + const void* extrafield_local, + uInt size_extrafield_local, + const void* extrafield_global, + uInt size_extrafield_global, + const char* comment, + int method, + int level, + int raw, + int windowBits, + int memLevel, + int strategy, + const char* password, + uLong crcForCrypting); + +extern int ZEXPORT zipOpenNewFileInZip3_64(zipFile file, + const char* filename, + const zip_fileinfo* zipfi, + const void* extrafield_local, + uInt size_extrafield_local, + const void* extrafield_global, + uInt size_extrafield_global, + const char* comment, + int method, + int level, + int raw, + int windowBits, + int memLevel, + int strategy, + const char* password, + uLong crcForCrypting, + int zip64); /* Same than zipOpenNewFileInZip2, except @@ -256,47 +260,45 @@ extern int ZEXPORT zipOpenNewFileInZip3_64 OF((zipFile file, crcForCrypting : crc of file to compress (needed for crypting) */ -extern int ZEXPORT zipOpenNewFileInZip4 OF((zipFile file, - const char* filename, - const zip_fileinfo* zipfi, - const void* extrafield_local, - uInt size_extrafield_local, - const void* extrafield_global, - uInt size_extrafield_global, - const char* comment, - int method, - int level, - int raw, - int windowBits, - int memLevel, - int strategy, - const char* password, - uLong crcForCrypting, - uLong versionMadeBy, - uLong flagBase - )); - - -extern int ZEXPORT zipOpenNewFileInZip4_64 OF((zipFile file, - const char* filename, - const zip_fileinfo* zipfi, - const void* extrafield_local, - uInt size_extrafield_local, - const void* extrafield_global, - uInt size_extrafield_global, - const char* comment, - int method, - int level, - int raw, - int windowBits, - int memLevel, - int strategy, - const char* password, - uLong crcForCrypting, - uLong versionMadeBy, - uLong flagBase, - int zip64 - )); +extern int ZEXPORT zipOpenNewFileInZip4(zipFile file, + const char* filename, + const zip_fileinfo* zipfi, + const void* extrafield_local, + uInt size_extrafield_local, + const void* extrafield_global, + uInt size_extrafield_global, + const char* comment, + int method, + int level, + int raw, + int windowBits, + int memLevel, + int strategy, + const char* password, + uLong crcForCrypting, + uLong versionMadeBy, + uLong flagBase); + + +extern int ZEXPORT zipOpenNewFileInZip4_64(zipFile file, + const char* filename, + const zip_fileinfo* zipfi, + const void* extrafield_local, + uInt size_extrafield_local, + const void* extrafield_global, + uInt size_extrafield_global, + const char* comment, + int method, + int level, + int raw, + int windowBits, + int memLevel, + int strategy, + const char* password, + uLong crcForCrypting, + uLong versionMadeBy, + uLong flagBase, + int zip64); /* Same than zipOpenNewFileInZip4, except versionMadeBy : value for Version made by field @@ -304,25 +306,25 @@ extern int ZEXPORT zipOpenNewFileInZip4_64 OF((zipFile file, */ -extern int ZEXPORT zipWriteInFileInZip OF((zipFile file, - const void* buf, - unsigned len)); +extern int ZEXPORT zipWriteInFileInZip(zipFile file, + const void* buf, + unsigned len); /* Write data in the zipfile */ -extern int ZEXPORT zipCloseFileInZip OF((zipFile file)); +extern int ZEXPORT zipCloseFileInZip(zipFile file); /* Close the current file in the zipfile */ -extern int ZEXPORT zipCloseFileInZipRaw OF((zipFile file, - uLong uncompressed_size, - uLong crc32)); +extern int ZEXPORT zipCloseFileInZipRaw(zipFile file, + uLong uncompressed_size, + uLong crc32); -extern int ZEXPORT zipCloseFileInZipRaw64 OF((zipFile file, - ZPOS64_T uncompressed_size, - uLong crc32)); +extern int ZEXPORT zipCloseFileInZipRaw64(zipFile file, + ZPOS64_T uncompressed_size, + uLong crc32); /* Close the current file in the zipfile, for file opened with @@ -330,14 +332,14 @@ extern int ZEXPORT zipCloseFileInZipRaw64 OF((zipFile file, uncompressed_size and crc32 are value for the uncompressed size */ -extern int ZEXPORT zipClose OF((zipFile file, - const char* global_comment)); +extern int ZEXPORT zipClose(zipFile file, + const char* global_comment); /* Close the zipfile */ -extern int ZEXPORT zipRemoveExtraInfoBlock OF((char* pData, int* dataLen, short sHeader)); +extern int ZEXPORT zipRemoveExtraInfoBlock(char* pData, int* dataLen, short sHeader); /* zipRemoveExtraInfoBlock - Added by Mathias Svensson diff --git a/deps/zlib/contrib/tests/fuzzers/deflate_fuzzer.cc b/deps/zlib/contrib/tests/fuzzers/deflate_fuzzer.cc index 64892bc5539e3b..f986d78d49442c 100644 --- a/deps/zlib/contrib/tests/fuzzers/deflate_fuzzer.cc +++ b/deps/zlib/contrib/tests/fuzzers/deflate_fuzzer.cc @@ -23,42 +23,96 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) { FuzzedDataProvider fdp(data, size); - int level = fdp.PickValueInArray({0, 1, 2, 3, 4, 5, 6, 7, 8, 9}); + int level = fdp.PickValueInArray({-1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9}); int windowBits = fdp.PickValueInArray({9, 10, 11, 12, 13, 14, 15}); int memLevel = fdp.PickValueInArray({1, 2, 3, 4, 5, 6, 7, 8, 9}); int strategy = fdp.PickValueInArray( {Z_DEFAULT_STRATEGY, Z_FILTERED, Z_HUFFMAN_ONLY, Z_RLE, Z_FIXED}); - std::vector src = fdp.ConsumeRemainingBytes(); + + if (fdp.ConsumeBool()) { + // Gzip wrapper. + windowBits += 16; + } else if (fdp.ConsumeBool()) { + // Raw deflate. + windowBits *= -1; + } else { + // Default: zlib wrapper. + } + + std::vector src; + std::vector compressed; + static const int kMinChunk = 1; + static const int kMaxChunk = 512 * 1024; z_stream stream; stream.zalloc = Z_NULL; stream.zfree = Z_NULL; - - // Compress the data one byte at a time to exercise the streaming code. int ret = deflateInit2(&stream, level, Z_DEFLATED, windowBits, memLevel, strategy); ASSERT(ret == Z_OK); - size_t deflate_bound = deflateBound(&stream, src.size()); + // Stream with random-sized input and output buffers. + while (fdp.ConsumeBool()) { + if (fdp.ConsumeBool()) { + // Check that copying the stream's state works. Gating this behind + // ConsumeBool() allows to interleave deflateCopy() with deflate() calls + // to better stress the code. + z_stream stream2; + ASSERT(deflateCopy(&stream2, &stream) == Z_OK); + ret = deflateEnd(&stream); + ASSERT(ret == Z_OK || Z_DATA_ERROR); + memset(&stream, 0xff, sizeof(stream)); + + ASSERT(deflateCopy(&stream, &stream2) == Z_OK); + ret = deflateEnd(&stream2); + ASSERT(ret == Z_OK || Z_DATA_ERROR); + } - std::vector compressed(src.size() * 2 + 1000); - stream.next_out = compressed.data(); - stream.avail_out = compressed.size(); - for (uint8_t b : src) { - stream.next_in = &b; - stream.avail_in = 1; + std::vector src_chunk = fdp.ConsumeBytes( + fdp.ConsumeIntegralInRange(kMinChunk, kMaxChunk)); + std::vector out_chunk( + fdp.ConsumeIntegralInRange(kMinChunk, kMaxChunk)); + stream.next_in = src_chunk.data(); + stream.avail_in = src_chunk.size(); + stream.next_out = out_chunk.data(); + stream.avail_out = out_chunk.size(); ret = deflate(&stream, Z_NO_FLUSH); - ASSERT(ret == Z_OK); + ASSERT(ret == Z_OK || ret == Z_BUF_ERROR); + + src.insert(src.end(), src_chunk.begin(), src_chunk.end() - stream.avail_in); + compressed.insert(compressed.end(), out_chunk.begin(), + out_chunk.end() - stream.avail_out); + } + // Finish up. + while (true) { + std::vector out_chunk( + fdp.ConsumeIntegralInRange(kMinChunk, kMaxChunk)); + stream.next_in = Z_NULL; + stream.avail_in = 0; + stream.next_out = out_chunk.data(); + stream.avail_out = out_chunk.size(); + ret = deflate(&stream, Z_FINISH); + compressed.insert(compressed.end(), out_chunk.begin(), + out_chunk.end() - stream.avail_out); + if (ret == Z_STREAM_END) { + break; + } + ASSERT(ret == Z_OK || Z_BUF_ERROR); } - stream.next_in = Z_NULL; - stream.avail_in = 0; - ret = deflate(&stream, Z_FINISH); - ASSERT(ret == Z_STREAM_END); - compressed.resize(compressed.size() - stream.avail_out); deflateEnd(&stream); - // Check that the bound was correct. + // Check deflateBound(). + // Use a newly initialized stream since computing the bound on a "used" stream + // may not yield a correct result (https://github.com/madler/zlib/issues/944). + z_stream bound_stream; + bound_stream.zalloc = Z_NULL; + bound_stream.zfree = Z_NULL; + ret = deflateInit2(&bound_stream, level, Z_DEFLATED, windowBits, memLevel, + strategy); + ASSERT(ret == Z_OK); + size_t deflate_bound = deflateBound(&bound_stream, src.size()); ASSERT(compressed.size() <= deflate_bound); + deflateEnd(&bound_stream); // Verify that the data decompresses correctly. ret = inflateInit2(&stream, windowBits); diff --git a/deps/zlib/contrib/tests/utils_unittest.cc b/deps/zlib/contrib/tests/utils_unittest.cc index 4a8027717920f6..0cc10813775f3e 100644 --- a/deps/zlib/contrib/tests/utils_unittest.cc +++ b/deps/zlib/contrib/tests/utils_unittest.cc @@ -1080,6 +1080,71 @@ TEST(ZlibTest, DeflateCopy) { 0); } +TEST(ZlibTest, GzipStored) { + // Check that deflating uncompressed blocks with a gzip header doesn't write + // out of bounds (crbug.com/325990053). + z_stream stream; + stream.zalloc = Z_NULL; + stream.zfree = Z_NULL; + static const int kGzipWrapper = 16; + int ret = deflateInit2(&stream, Z_NO_COMPRESSION, Z_DEFLATED, + 9 + kGzipWrapper, 9, Z_DEFAULT_STRATEGY); + ASSERT_EQ(ret, Z_OK); + + const std::vector src(512 * 1024); + stream.next_in = (unsigned char*)src.data(); + stream.avail_in = src.size(); + + std::vector out(1000); + stream.next_out = (unsigned char*)out.data(); + stream.avail_out = out.size(); + + ret = deflate(&stream, Z_NO_FLUSH); + ASSERT_EQ(ret, Z_OK); + + deflateEnd(&stream); +} + +TEST(ZlibTest, DeflateBound) { + // Check that the deflateBound() isn't too low when using non-default + // parameters (crbug.com/40270738). + const int level = 9; + const int windowBits = 15; + const int memLevel = 1; + const int strategy = Z_FIXED; + const uint8_t src[] = { + 49, 255, 255, 20, 45, 49, 167, 56, 55, 255, 255, 255, 223, 255, 49, + 255, 3, 78, 0, 0, 141, 253, 209, 163, 29, 195, 43, 60, 199, 123, + 112, 35, 134, 13, 148, 102, 212, 4, 184, 103, 7, 102, 225, 102, 156, + 164, 78, 48, 70, 49, 125, 162, 55, 116, 161, 174, 83, 0, 59, 0, + 225, 140, 0, 0, 63, 63, 4, 15, 198, 30, 126, 196, 33, 99, 135, + 41, 192, 82, 28, 105, 216, 170, 221, 14, 61, 1, 0, 0, 22, 195, + 45, 53, 244, 163, 167, 158, 229, 68, 18, 112, 49, 174, 43, 75, 90, + 161, 85, 19, 36, 163, 118, 228, 169, 180, 161, 237, 234, 253, 197, 234, + 66, 106, 12, 42, 124, 96, 160, 144, 183, 194, 157, 167, 202, 217}; + + z_stream stream; + stream.zalloc = Z_NULL; + stream.zfree = Z_NULL; + int ret = + deflateInit2(&stream, level, Z_DEFLATED, windowBits, memLevel, strategy); + ASSERT_EQ(ret, Z_OK); + size_t deflate_bound = deflateBound(&stream, sizeof(src)); + + uint8_t out[sizeof(src) * 10]; + stream.next_in = (uint8_t*)src; + stream.avail_in = sizeof(src); + stream.next_out = out; + stream.avail_out = sizeof(out); + ret = deflate(&stream, Z_FINISH); + ASSERT_EQ(ret, Z_STREAM_END); + + size_t out_size = sizeof(out) - stream.avail_out; + EXPECT_LE(out_size, deflate_bound); + + deflateEnd(&stream); +} + // TODO(gustavoa): make these tests run standalone. #ifndef CMAKE_STANDALONE_UNITTESTS diff --git a/deps/zlib/crc_folding.c b/deps/zlib/crc_folding.c index 1b4f4e1d193871..1d54ee8d48cff6 100644 --- a/deps/zlib/crc_folding.c +++ b/deps/zlib/crc_folding.c @@ -403,7 +403,7 @@ ZLIB_INTERNAL void crc_fold_copy(deflate_state *const s, } #endif - _mm_storeu_si128((__m128i *)dst, xmm_crc_part); + zmemcpy(dst, src, len); /* TODO: Possibly generate more efficient code. */ partial_fold(s, len, &xmm_crc0, &xmm_crc1, &xmm_crc2, &xmm_crc3, &xmm_crc_part); done: diff --git a/deps/zlib/deflate.c b/deps/zlib/deflate.c index 4920e7007af887..a67d195c5d46f2 100644 --- a/deps/zlib/deflate.c +++ b/deps/zlib/deflate.c @@ -387,11 +387,12 @@ int ZEXPORT deflateInit_(z_streamp strm, int level, const char *version, /* To do: ignore strm->next_in if we use it as window */ } +#define WINDOW_PADDING 8 + /* ========================================================================= */ int ZEXPORT deflateInit2_(z_streamp strm, int level, int method, int windowBits, int memLevel, int strategy, const char *version, int stream_size) { - unsigned window_padding = 8; deflate_state *s; int wrap = 1; static const char my_version[] = ZLIB_VERSION; @@ -477,11 +478,11 @@ int ZEXPORT deflateInit2_(z_streamp strm, int level, int method, s->hash_shift = ((s->hash_bits + MIN_MATCH-1) / MIN_MATCH); s->window = (Bytef *) ZALLOC(strm, - s->w_size + window_padding, + s->w_size + WINDOW_PADDING, 2*sizeof(Byte)); /* Avoid use of unitialized values in the window, see crbug.com/1137613 and * crbug.com/1144420 */ - zmemzero(s->window, (s->w_size + window_padding) * (2 * sizeof(Byte))); + zmemzero(s->window, (s->w_size + WINDOW_PADDING) * (2 * sizeof(Byte))); s->prev = (Posf *) ZALLOC(strm, s->w_size, sizeof(Pos)); /* Avoid use of uninitialized value, see: * https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=11360 @@ -923,6 +924,12 @@ uLong ZEXPORT deflateBound(z_streamp strm, uLong sourceLen) { wraplen = 6; } + /* With Chromium's hashing, s->hash_bits may not correspond to the + memLevel, making the computations below incorrect. Return the + conservative bound. */ + if (s->chromium_zlib_hash) + return (fixedlen > storelen ? fixedlen : storelen) + wraplen; + /* if not default parameters, return one of the conservative bounds */ if (s->w_bits != 15 || s->hash_bits != 8 + 7) return (s->w_bits <= s->hash_bits && s->level ? fixedlen : storelen) + @@ -1342,7 +1349,9 @@ int ZEXPORT deflateCopy(z_streamp dest, z_streamp source) { zmemcpy((voidpf)ds, (voidpf)ss, sizeof(deflate_state)); ds->strm = dest; - ds->window = (Bytef *) ZALLOC(dest, ds->w_size, 2*sizeof(Byte)); + ds->window = (Bytef *) ZALLOC(dest, + ds->w_size + WINDOW_PADDING, + 2*sizeof(Byte)); ds->prev = (Posf *) ZALLOC(dest, ds->w_size, sizeof(Pos)); ds->head = (Posf *) ZALLOC(dest, ds->hash_size, sizeof(Pos)); #ifdef LIT_MEM @@ -1357,7 +1366,8 @@ int ZEXPORT deflateCopy(z_streamp dest, z_streamp source) { return Z_MEM_ERROR; } /* following zmemcpy do not work for 16-bit MSDOS */ - zmemcpy(ds->window, ss->window, ds->w_size * 2 * sizeof(Byte)); + zmemcpy(ds->window, ss->window, + (ds->w_size + WINDOW_PADDING) * 2 * sizeof(Byte)); zmemcpy((voidpf)ds->prev, (voidpf)ss->prev, ds->w_size * sizeof(Pos)); zmemcpy((voidpf)ds->head, (voidpf)ss->head, ds->hash_size * sizeof(Pos)); #ifdef LIT_MEM diff --git a/deps/zlib/google/zip_internal.cc b/deps/zlib/google/zip_internal.cc index b9976d63ee301b..9b20b421e24765 100644 --- a/deps/zlib/google/zip_internal.cc +++ b/deps/zlib/google/zip_internal.cc @@ -260,13 +260,12 @@ zip_fileinfo TimeToZipFileInfo(const base::Time& file_time) { // It assumes that dates below 1980 are in the double digit format. // Hence the fail safe option is to leave the date unset. Some programs // might show the unset date as 1980-0-0 which is invalid. - zip_info.tmz_date = { - .tm_sec = static_cast(file_time_parts.second), - .tm_min = static_cast(file_time_parts.minute), - .tm_hour = static_cast(file_time_parts.hour), - .tm_mday = static_cast(file_time_parts.day_of_month), - .tm_mon = static_cast(file_time_parts.month - 1), - .tm_year = static_cast(file_time_parts.year)}; + zip_info.tmz_date.tm_year = file_time_parts.year; + zip_info.tmz_date.tm_mon = file_time_parts.month - 1; + zip_info.tmz_date.tm_mday = file_time_parts.day_of_month; + zip_info.tmz_date.tm_hour = file_time_parts.hour; + zip_info.tmz_date.tm_min = file_time_parts.minute; + zip_info.tmz_date.tm_sec = file_time_parts.second; } return zip_info; diff --git a/deps/zlib/google/zip_reader_unittest.cc b/deps/zlib/google/zip_reader_unittest.cc index 8ef0274e112483..e6f89d7e4faaaa 100644 --- a/deps/zlib/google/zip_reader_unittest.cc +++ b/deps/zlib/google/zip_reader_unittest.cc @@ -234,8 +234,10 @@ TEST_F(ZipReaderTest, Open_ExistentButNonZipFile) { TEST_F(ZipReaderTest, Open_EmptyFile) { ZipReader reader; EXPECT_FALSE(reader.ok()); - EXPECT_FALSE(reader.Open(data_dir_.AppendASCII("empty.zip"))); - EXPECT_FALSE(reader.ok()); + EXPECT_TRUE(reader.Open(data_dir_.AppendASCII("empty.zip"))); + EXPECT_TRUE(reader.ok()); + EXPECT_EQ(0, reader.num_entries()); + EXPECT_EQ(nullptr, reader.Next()); } // Iterate through the contents in the test ZIP archive, and compare that the diff --git a/deps/zlib/patches/0000-build.patch b/deps/zlib/patches/0000-build.patch index 6119f09c05b73c..1861194dd50cd3 100644 --- a/deps/zlib/patches/0000-build.patch +++ b/deps/zlib/patches/0000-build.patch @@ -1,43 +1,41 @@ +diff --git a/contrib/minizip/ioapi.c b/contrib/minizip/ioapi.c +index 782d32469ae5d..a38881dca90a2 100644 +--- a/contrib/minizip/ioapi.c ++++ b/contrib/minizip/ioapi.c +@@ -14,7 +14,7 @@ + #define _CRT_SECURE_NO_WARNINGS + #endif + +-#if defined(__APPLE__) || defined(IOAPI_NO_64) || defined(__HAIKU__) || defined(MINIZIP_FOPEN_NO_64) ++#if defined(__APPLE__) || defined(__Fuchsia__) || defined(IOAPI_NO_64) || defined(__HAIKU__) || defined(MINIZIP_FOPEN_NO_64) + // In darwin and perhaps other BSD variants off_t is a 64 bit value, hence no need for specific 64 bit functions + #define FOPEN_FUNC(filename, mode) fopen(filename, mode) + #define FTELLO_FUNC(stream) ftello(stream) diff --git a/contrib/minizip/iowin32.c b/contrib/minizip/iowin32.c -index 274f39eb1dd2..246ceb91a139 100644 +index 08536e94b8a28..bbd7773e67146 100644 --- a/contrib/minizip/iowin32.c +++ b/contrib/minizip/iowin32.c -@@ -26,12 +26,19 @@ +@@ -25,7 +25,12 @@ + #define INVALID_SET_FILE_POINTER ((DWORD)-1) #endif - +#ifdef _WIN32_WINNT +#undef _WIN32_WINNT +#define _WIN32_WINNT 0x601 +#endif -+ + +#if _WIN32_WINNT >= _WIN32_WINNT_WIN8 // see Include/shared/winapifamily.h in the Windows Kit #if defined(WINAPI_FAMILY_PARTITION) && (!(defined(IOWIN32_USING_WINRT_API))) - #if WINAPI_FAMILY_ONE_PARTITION(WINAPI_FAMILY, WINAPI_PARTITION_APP) + +@@ -37,6 +42,7 @@ #define IOWIN32_USING_WINRT_API 1 #endif #endif +#endif - voidpf ZCALLBACK win32_open_file_func OF((voidpf opaque, const char* filename, int mode)); - uLong ZCALLBACK win32_read_file_func OF((voidpf opaque, voidpf stream, void* buf, uLong size)); -diff --git a/contrib/minizip/unzip.c b/contrib/minizip/unzip.c -index bcfb9416ec35..199b4723fcfc 100644 ---- a/contrib/minizip/unzip.c -+++ b/contrib/minizip/unzip.c -@@ -1705,11 +1705,6 @@ extern int ZEXPORT unzReadCurrentFile (unzFile file, voidp buf, unsigned len) - - pfile_in_zip_read_info->stream.avail_out = (uInt)len; - -- if ((len>pfile_in_zip_read_info->rest_read_uncompressed) && -- (!(pfile_in_zip_read_info->raw))) -- pfile_in_zip_read_info->stream.avail_out = -- (uInt)pfile_in_zip_read_info->rest_read_uncompressed; -- - if ((len>pfile_in_zip_read_info->rest_read_compressed+ - pfile_in_zip_read_info->stream.avail_in) && - (pfile_in_zip_read_info->raw)) + typedef struct + { diff --git a/gzread.c b/gzread.c index 956b91ea7d9e..832d3ef98c59 100644 --- a/gzread.c diff --git a/deps/zlib/patches/0001-simd.patch b/deps/zlib/patches/0001-simd.patch index 9434ca0cc4a68c..dccf5056064e99 100644 --- a/deps/zlib/patches/0001-simd.patch +++ b/deps/zlib/patches/0001-simd.patch @@ -449,7 +449,7 @@ index 000000000000..48d77744aaf4 + } +#endif + -+ _mm_storeu_si128((__m128i *)dst, xmm_crc_part); ++ zmemcpy(dst, src, len); /* TODO: Possibly generate more efficient code. */ + partial_fold(s, len, &xmm_crc0, &xmm_crc1, &xmm_crc2, &xmm_crc3, + &xmm_crc_part); +done: diff --git a/deps/zlib/patches/0004-fix-uwp.patch b/deps/zlib/patches/0004-fix-uwp.patch index 23145a7ae5357f..edef10a2025678 100644 --- a/deps/zlib/patches/0004-fix-uwp.patch +++ b/deps/zlib/patches/0004-fix-uwp.patch @@ -1,14 +1,19 @@ diff --git a/third_party/zlib/contrib/minizip/iowin32.c b/third_party/zlib/contrib/minizip/iowin32.c -index 246ceb91a139..c6bc314b3c28 100644 +index bbd7773e67146..3f6867fd7e40b 100644 --- a/third_party/zlib/contrib/minizip/iowin32.c +++ b/third_party/zlib/contrib/minizip/iowin32.c -@@ -31,14 +31,12 @@ +@@ -30,19 +30,12 @@ #define _WIN32_WINNT 0x601 #endif -#if _WIN32_WINNT >= _WIN32_WINNT_WIN8 -// see Include/shared/winapifamily.h in the Windows Kit -#if defined(WINAPI_FAMILY_PARTITION) && (!(defined(IOWIN32_USING_WINRT_API))) +- +-#if !defined(WINAPI_FAMILY_ONE_PARTITION) +-#define WINAPI_FAMILY_ONE_PARTITION(PartitionSet, Partition) ((WINAPI_FAMILY & PartitionSet) == Partition) +-#endif +- -#if WINAPI_FAMILY_ONE_PARTITION(WINAPI_FAMILY, WINAPI_PARTITION_APP) +#if !defined(IOWIN32_USING_WINRT_API) +#if defined(WINAPI_FAMILY) && (WINAPI_FAMILY == WINAPI_FAMILY_APP) @@ -18,5 +23,5 @@ index 246ceb91a139..c6bc314b3c28 100644 #endif -#endif - voidpf ZCALLBACK win32_open_file_func OF((voidpf opaque, const char* filename, int mode)); - uLong ZCALLBACK win32_read_file_func OF((voidpf opaque, voidpf stream, void* buf, uLong size)); + typedef struct + { diff --git a/deps/zlib/patches/0008-minizip-zip-unzip-tools.patch b/deps/zlib/patches/0008-minizip-zip-unzip-tools.patch index 48ceb02d4c3781..273a8c98011bed 100644 --- a/deps/zlib/patches/0008-minizip-zip-unzip-tools.patch +++ b/deps/zlib/patches/0008-minizip-zip-unzip-tools.patch @@ -9,13 +9,13 @@ Subject: [PATCH] Build minizip zip and unzip tools 2 files changed, 9 insertions(+), 11 deletions(-) diff --git a/third_party/zlib/contrib/minizip/miniunz.c b/third_party/zlib/contrib/minizip/miniunz.c -index 3d65401be5cd..08737f689a96 100644 +index 8ada038dbd4e7..5b4312e5647cd 100644 --- a/third_party/zlib/contrib/minizip/miniunz.c +++ b/third_party/zlib/contrib/minizip/miniunz.c @@ -12,7 +12,7 @@ Copyright (C) 2009-2010 Mathias Svensson ( http://result42.com ) */ - + -#if (!defined(_WIN32)) && (!defined(WIN32)) && (!defined(__APPLE__)) +#if (!defined(_WIN32)) && (!defined(WIN32)) && (!defined(__APPLE__)) && (!defined(__ANDROID_API__)) #ifndef __USE_FILE_OFFSET64 @@ -24,32 +24,24 @@ index 3d65401be5cd..08737f689a96 100644 @@ -27,7 +27,7 @@ #endif #endif - --#ifdef __APPLE__ -+#if defined(__APPLE__) || defined(__Fuchsia__) || defined(__ANDROID_API__) + +-#if defined(__APPLE__) || defined(__HAIKU__) || defined(MINIZIP_FOPEN_NO_64) ++#if defined(__APPLE__) || defined(__HAIKU__) || defined(MINIZIP_FOPEN_NO_64) || defined(__Fuchsia__) || defined(__ANDROID_API__) // In darwin and perhaps other BSD variants off_t is a 64 bit value, hence no need for specific 64 bit functions #define FOPEN_FUNC(filename, mode) fopen(filename, mode) #define FTELLO_FUNC(stream) ftello(stream) -@@ -45,6 +45,7 @@ - #include - #include - #include -+#include - - #ifdef _WIN32 - # include -@@ -97,7 +98,7 @@ void change_file_date(filename,dosdate,tmu_date) +@@ -94,7 +94,7 @@ static void change_file_date(const char *filename, uLong dosdate, tm_unz tmu_dat SetFileTime(hFile,&ftm,&ftLastAcc,&ftm); CloseHandle(hFile); #else --#ifdef unix || __APPLE__ +-#if defined(unix) || defined(__APPLE__) +#if defined(unix) || defined(__APPLE__) || defined(__Fuchsia__) || defined(__ANDROID_API__) + (void)dosdate; struct utimbuf ut; struct tm newdate; - newdate.tm_sec = tmu_date.tm_sec; -@@ -125,11 +126,9 @@ int mymkdir(dirname) - const char* dirname; - { +@@ -125,11 +125,9 @@ static void change_file_date(const char *filename, uLong dosdate, tm_unz tmu_dat + + static int mymkdir(const char* dirname) { int ret=0; -#ifdef _WIN32 +#if defined(_WIN32) @@ -59,16 +51,16 @@ index 3d65401be5cd..08737f689a96 100644 -#elif __APPLE__ +#elif defined(unix) || defined(__APPLE__) || defined(__Fuchsia__) || defined(__ANDROID_API__) ret = mkdir (dirname,0775); - #endif - return ret; + #else + (void)dirname; diff --git a/third_party/zlib/contrib/minizip/minizip.c b/third_party/zlib/contrib/minizip/minizip.c -index 4288962ecef0..b794953c5c23 100644 +index 26ee8d029efe6..9eb3956a55e00 100644 --- a/third_party/zlib/contrib/minizip/minizip.c +++ b/third_party/zlib/contrib/minizip/minizip.c @@ -12,8 +12,7 @@ Copyright (C) 2009-2010 Mathias Svensson ( http://result42.com ) */ - + - -#if (!defined(_WIN32)) && (!defined(WIN32)) && (!defined(__APPLE__)) +#if (!defined(_WIN32)) && (!defined(WIN32)) && (!defined(__APPLE__)) && (!defined(__ANDROID_API__)) @@ -78,21 +70,20 @@ index 4288962ecef0..b794953c5c23 100644 @@ -28,7 +27,7 @@ #endif #endif - --#ifdef __APPLE__ -+#if defined(__APPLE__) || defined(__Fuchsia__) || defined(__ANDROID_API__) + +-#if defined(__APPLE__) || defined(__HAIKU__) || defined(MINIZIP_FOPEN_NO_64) ++#if defined(__APPLE__) || defined(__HAIKU__) || defined(MINIZIP_FOPEN_NO_64) || defined(__Fuchsia__) || defined(__ANDROID_API__) // In darwin and perhaps other BSD variants off_t is a 64 bit value, hence no need for specific 64 bit functions #define FOPEN_FUNC(filename, mode) fopen(filename, mode) #define FTELLO_FUNC(stream) ftello(stream) -@@ -94,7 +93,7 @@ uLong filetime(f, tmzip, dt) +@@ -92,7 +91,7 @@ static int filetime(const char *f, tm_zip *tmzip, uLong *dt) { return ret; } #else --#ifdef unix || __APPLE__ +-#if defined(unix) || defined(__APPLE__) +#if defined(unix) || defined(__APPLE__) || defined(__Fuchsia__) || defined(__ANDROID_API__) - uLong filetime(f, tmzip, dt) - char *f; /* name of file to get info on */ - tm_zip *tmzip; /* return value: access, modific. and creation times */ + /* f: name of file to get info on, tmzip: return value: access, + modification and creation times, dt: dostime */ + static int filetime(const char *f, tm_zip *tmzip, uLong *dt) { -- 2.31.1.818.g46aad6cb9e-goog - diff --git a/deps/zlib/patches/0014-minizip-unzip-with-incorrect-size.patch b/deps/zlib/patches/0014-minizip-unzip-with-incorrect-size.patch new file mode 100644 index 00000000000000..5ede561e3c20dd --- /dev/null +++ b/deps/zlib/patches/0014-minizip-unzip-with-incorrect-size.patch @@ -0,0 +1,34 @@ +commit 764f0715d75c8d49339aa73d0ee2feb75d63473f +Author: joaoe@opera.com +Date: Wed May 7 20:53:02 2014 +0000 + + Fixed uncompressing files with wrong uncompressed size set. + + A zip file carries some metadata for each archived file, including the total + uncompressed size. If that size was incorrect, therefore the compressed file + being different in size when unpacking, the minizip code would fail with a + CRC error. Every other zip utility handles these files, so should the minizip + code for safety sake. + + BUG=359516 + + Review URL: https://codereview.chromium.org/222243003 + + git-svn-id: svn://svn.chromium.org/chrome/trunk/src@268940 0039d316-1c4b-4281-b951-d872f2087c98 + +diff --git a/third_party/zlib/contrib/minizip/unzip.c b/third_party/zlib/contrib/minizip/unzip.c +index ed763f89f1f87..82275d6c1775d 100644 +--- a/third_party/zlib/contrib/minizip/unzip.c ++++ b/third_party/zlib/contrib/minizip/unzip.c +@@ -1572,11 +1572,6 @@ extern int ZEXPORT unzReadCurrentFile(unzFile file, voidp buf, unsigned len) { + + pfile_in_zip_read_info->stream.avail_out = (uInt)len; + +- if ((len>pfile_in_zip_read_info->rest_read_uncompressed) && +- (!(pfile_in_zip_read_info->raw))) +- pfile_in_zip_read_info->stream.avail_out = +- (uInt)pfile_in_zip_read_info->rest_read_uncompressed; +- + if ((len>pfile_in_zip_read_info->rest_read_compressed+ + pfile_in_zip_read_info->stream.avail_in) && + (pfile_in_zip_read_info->raw)) diff --git a/deps/zlib/patches/0015-minizip-unzip-enable-decryption.patch b/deps/zlib/patches/0015-minizip-unzip-enable-decryption.patch new file mode 100644 index 00000000000000..966e83c7dc5d76 --- /dev/null +++ b/deps/zlib/patches/0015-minizip-unzip-enable-decryption.patch @@ -0,0 +1,39 @@ +commit f3ace98803035b8425d127fb3d874dafe0b9475a +Author: Che-yu Wu +Date: Mon Aug 6 14:09:22 2018 +0000 + + Enable traditional PKWARE decryption in zlib/contrib/minizip. + + Remove the #define which enables NOUNCRYPT by default. + Correct the value of rest_read_compressed when decompressing an encrypted zip. + + Bug: crbug.com/869541 + Change-Id: Ia86c1d234a8193f405147d35ad05c29fe86f812d + Reviewed-on: https://chromium-review.googlesource.com/1161109 + Reviewed-by: Chris Blume + Commit-Queue: Che-yu Wu + Cr-Commit-Position: refs/heads/master@{#580862} + +diff --git a/third_party/zlib/contrib/minizip/unzip.c b/third_party/zlib/contrib/minizip/unzip.c +index 82275d6c1775d..c8a01b23efd42 100644 +--- a/third_party/zlib/contrib/minizip/unzip.c ++++ b/third_party/zlib/contrib/minizip/unzip.c +@@ -68,10 +68,6 @@ + #include + #include + +-#ifndef NOUNCRYPT +- #define NOUNCRYPT +-#endif +- + #include "zlib.h" + #include "unzip.h" + +@@ -1502,6 +1498,7 @@ extern int ZEXPORT unzOpenCurrentFile3(unzFile file, int* method, + zdecode(s->keys,s->pcrc_32_tab,source[i]); + + s->pfile_in_zip_read->pos_in_zipfile+=12; ++ s->pfile_in_zip_read->rest_read_compressed-=12; + s->encrypted=1; + } + # endif diff --git a/deps/zlib/patches/0016-minizip-parse-unicode-path-extra-field.patch b/deps/zlib/patches/0016-minizip-parse-unicode-path-extra-field.patch new file mode 100644 index 00000000000000..9a99a8ae027dfc --- /dev/null +++ b/deps/zlib/patches/0016-minizip-parse-unicode-path-extra-field.patch @@ -0,0 +1,117 @@ +commit c8834821f452a3d424edd0ed2a1e9ceeda38d0ea +Author: Alex Danilo +Date: Thu May 12 03:29:52 2022 +0000 + + Extract: Parse Unicode Path Extra field in minizip + + Adds parsing of the Info-ZIP Extra field which overrides the + file name in the File Header only if the CRC in the extra field + is a CRC of the file name in the File Header. + + See https://pkware.cachefly.net/webdocs/casestudies/APPNOTE.TXT + section 4.6.9 for reference. + + Also tidied up some whitespace indent. + + Bug: 953256, 953599 + Tests: Manually tested, auto test in follow on CL + Change-Id: I1283dcb88a203c3bb56c1d9c504035a2e51aecbd + Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/3641742 + Reviewed-by: Noel Gordon + Commit-Queue: Alex Danilo + Cr-Commit-Position: refs/heads/main@{#1002476} + +diff --git a/third_party/zlib/contrib/minizip/unzip.c b/third_party/zlib/contrib/minizip/unzip.c +index c8a01b23efd42..42677cff82c96 100644 +--- a/third_party/zlib/contrib/minizip/unzip.c ++++ b/third_party/zlib/contrib/minizip/unzip.c +@@ -193,6 +193,26 @@ typedef struct + Reads a long in LSB order from the given gz_stream. Sets + */ + ++local int unz64local_getByte(const zlib_filefunc64_32_def* pzlib_filefunc_def, ++ voidpf filestream, ++ int *pi) { ++ unsigned char c; ++ int err = (int)ZREAD64(*pzlib_filefunc_def,filestream,&c,1); ++ if (err==1) ++ { ++ *pi = (int)c; ++ return UNZ_OK; ++ } ++ else ++ { ++ *pi = 0; ++ if (ZERROR64(*pzlib_filefunc_def,filestream)) ++ return UNZ_ERRNO; ++ else ++ return UNZ_EOF; ++ } ++} ++ + local int unz64local_getShort(const zlib_filefunc64_32_def* pzlib_filefunc_def, + voidpf filestream, + uLong *pX) { +@@ -948,6 +968,62 @@ local int unz64local_GetCurrentFileInfoInternal(unzFile file, + } + + } ++ else if (headerId == 0x7075) /* Info-ZIP Unicode Path Extra Field */ ++ { ++ int version = 0; ++ ++ if (unz64local_getByte(&s->z_filefunc, s->filestream, &version) != UNZ_OK) ++ { ++ err = UNZ_ERRNO; ++ } ++ if (version != 1) ++ { ++ if (ZSEEK64(s->z_filefunc, s->filestream,dataSize - 1, ZLIB_FILEFUNC_SEEK_CUR) != 0) ++ { ++ err = UNZ_ERRNO; ++ } ++ } ++ else ++ { ++ uLong uCrc, uHeaderCrc, fileNameSize; ++ ++ if (unz64local_getLong(&s->z_filefunc, s->filestream, &uCrc) != UNZ_OK) ++ { ++ err = UNZ_ERRNO; ++ } ++ uHeaderCrc = crc32(0, (const unsigned char *)szFileName, file_info.size_filename); ++ fileNameSize = dataSize - (2 * sizeof (short) + 1); ++ /* Check CRC against file name in the header. */ ++ if (uHeaderCrc != uCrc) ++ { ++ if (ZSEEK64(s->z_filefunc, s->filestream, fileNameSize, ZLIB_FILEFUNC_SEEK_CUR) != 0) ++ { ++ err = UNZ_ERRNO; ++ } ++ } ++ else ++ { ++ uLong uSizeRead; ++ ++ if (fileNameSize < fileNameBufferSize) ++ { ++ *(szFileName + fileNameSize) = '\0'; ++ uSizeRead = fileNameSize; ++ } ++ else ++ { ++ uSizeRead = fileNameBufferSize; ++ } ++ if ((fileNameSize > 0) && (fileNameBufferSize > 0)) ++ { ++ if (ZREAD64(s->z_filefunc, s->filestream, szFileName, uSizeRead) != uSizeRead) ++ { ++ err = UNZ_ERRNO; ++ } ++ } ++ } ++ } ++ } + else + { + if (ZSEEK64(s->z_filefunc, s->filestream,dataSize,ZLIB_FILEFUNC_SEEK_CUR)!=0) diff --git a/deps/zlib/zutil.h b/deps/zlib/zutil.h index 6980a5f4ea3446..2e2f57665bba81 100644 --- a/deps/zlib/zutil.h +++ b/deps/zlib/zutil.h @@ -152,17 +152,8 @@ extern z_const char * const z_errmsg[10]; /* indexed by 2-zlib_error */ # endif #endif -#if defined(MACOS) || defined(TARGET_OS_MAC) +#if defined(MACOS) # define OS_CODE 7 -# ifndef Z_SOLO -# if defined(__MWERKS__) && __dest_os != __be_os && __dest_os != __win32_os -# include /* for fdopen */ -# else -# ifndef fdopen -# define fdopen(fd,mode) NULL /* No fdopen() */ -# endif -# endif -# endif #endif #ifdef __acorn @@ -185,18 +176,6 @@ extern z_const char * const z_errmsg[10]; /* indexed by 2-zlib_error */ # define OS_CODE 19 #endif -#if defined(_BEOS_) || defined(RISCOS) -# define fdopen(fd,mode) NULL /* No fdopen() */ -#endif - -#if (defined(_MSC_VER) && (_MSC_VER > 600)) && !defined __INTERIX -# if defined(_WIN32_WCE) -# define fdopen(fd,mode) NULL /* No fdopen() */ -# else -# define fdopen(fd,type) _fdopen(fd,type) -# endif -#endif - #if defined(__BORLANDC__) && !defined(MSDOS) #pragma warn -8004 #pragma warn -8008 From 1152d7f9197223d6f334af81c3635f61128bf4fb Mon Sep 17 00:00:00 2001 From: "Node.js GitHub Bot" Date: Wed, 27 Mar 2024 21:53:28 +0200 Subject: [PATCH 35/41] deps: update zlib to 1.3.0.1-motley-24c07df PR-URL: https://github.com/nodejs/node/pull/52199 Reviewed-By: Marco Ippolito Reviewed-By: Luigi Pinca --- deps/zlib/CMakeLists.txt | 46 +++++++++++------ deps/zlib/adler32.c | 13 +++-- deps/zlib/adler32_simd.c | 104 +++++++++++++++++++++++++++++++++++++++ deps/zlib/cpu_features.c | 32 ++++++++++-- deps/zlib/cpu_features.h | 3 ++ deps/zlib/crc32.c | 6 ++- deps/zlib/deflate.c | 3 +- 7 files changed, 182 insertions(+), 25 deletions(-) diff --git a/deps/zlib/CMakeLists.txt b/deps/zlib/CMakeLists.txt index 8389cdd6c38faa..c3f424770d92ce 100644 --- a/deps/zlib/CMakeLists.txt +++ b/deps/zlib/CMakeLists.txt @@ -74,6 +74,16 @@ if (ENABLE_SIMD_OPTIMIZATIONS) SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -march=armv8-a+crc+crypto") endif() + + if (CMAKE_SYSTEM_PROCESSOR STREQUAL "riscv64") + add_definitions(-DRISCV_RVV) + add_definitions(-DDEFLATE_SLIDE_HASH_RVV) + add_definitions(-DADLER32_SIMD_RVV) + #TODO(cavalcantii): add remaining flags as we port optimizations to RVV. + # Required by CPU features detection code. + SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} --target=riscv64-unknown-linux-gnu -march=rv64gcv") + endif() + endif() # @@ -180,20 +190,28 @@ set(ZLIB_SRCS # Update list of source files if optimizations were enabled #============================================================================ if (ENABLE_SIMD_OPTIMIZATIONS) - list(REMOVE_ITEM ZLIB_SRCS inflate.c) - - list(APPEND ZLIB_PRIVATE_HDRS ${CMAKE_CURRENT_SOURCE_DIR}/adler32_simd.h) - list(APPEND ZLIB_PRIVATE_HDRS ${CMAKE_CURRENT_SOURCE_DIR}/contrib/optimizations/chunkcopy.h) - list(APPEND ZLIB_PRIVATE_HDRS ${CMAKE_CURRENT_SOURCE_DIR}/contrib/optimizations/inffast_chunk.h) - list(APPEND ZLIB_PRIVATE_HDRS ${CMAKE_CURRENT_SOURCE_DIR}/cpu_features.h) - list(APPEND ZLIB_PRIVATE_HDRS ${CMAKE_CURRENT_SOURCE_DIR}/crc32_simd.h) - - list(APPEND ZLIB_SRCS ${CMAKE_CURRENT_SOURCE_DIR}/adler32_simd.c) - list(APPEND ZLIB_SRCS ${CMAKE_CURRENT_SOURCE_DIR}/contrib/optimizations/inffast_chunk.c) - list(APPEND ZLIB_SRCS ${CMAKE_CURRENT_SOURCE_DIR}/contrib/optimizations/inflate.c) - list(APPEND ZLIB_SRCS ${CMAKE_CURRENT_SOURCE_DIR}/cpu_features.c) - list(APPEND ZLIB_SRCS ${CMAKE_CURRENT_SOURCE_DIR}/crc32_simd.c) - list(APPEND ZLIB_SRCS ${CMAKE_CURRENT_SOURCE_DIR}/crc_folding.c) + if (CMAKE_SYSTEM_PROCESSOR STREQUAL "riscv64") + message("RISCVV: Add optimizations.") + list(APPEND ZLIB_PRIVATE_HDRS ${CMAKE_CURRENT_SOURCE_DIR}/adler32_simd.h) + list(APPEND ZLIB_PRIVATE_HDRS ${CMAKE_CURRENT_SOURCE_DIR}/cpu_features.h) + list(APPEND ZLIB_SRCS ${CMAKE_CURRENT_SOURCE_DIR}/adler32_simd.c) + list(APPEND ZLIB_SRCS ${CMAKE_CURRENT_SOURCE_DIR}/cpu_features.c) + else() + list(REMOVE_ITEM ZLIB_SRCS inflate.c) + + list(APPEND ZLIB_PRIVATE_HDRS ${CMAKE_CURRENT_SOURCE_DIR}/adler32_simd.h) + list(APPEND ZLIB_PRIVATE_HDRS ${CMAKE_CURRENT_SOURCE_DIR}/contrib/optimizations/chunkcopy.h) + list(APPEND ZLIB_PRIVATE_HDRS ${CMAKE_CURRENT_SOURCE_DIR}/contrib/optimizations/inffast_chunk.h) + list(APPEND ZLIB_PRIVATE_HDRS ${CMAKE_CURRENT_SOURCE_DIR}/cpu_features.h) + list(APPEND ZLIB_PRIVATE_HDRS ${CMAKE_CURRENT_SOURCE_DIR}/crc32_simd.h) + + list(APPEND ZLIB_SRCS ${CMAKE_CURRENT_SOURCE_DIR}/adler32_simd.c) + list(APPEND ZLIB_SRCS ${CMAKE_CURRENT_SOURCE_DIR}/contrib/optimizations/inffast_chunk.c) + list(APPEND ZLIB_SRCS ${CMAKE_CURRENT_SOURCE_DIR}/contrib/optimizations/inflate.c) + list(APPEND ZLIB_SRCS ${CMAKE_CURRENT_SOURCE_DIR}/cpu_features.c) + list(APPEND ZLIB_SRCS ${CMAKE_CURRENT_SOURCE_DIR}/crc32_simd.c) + list(APPEND ZLIB_SRCS ${CMAKE_CURRENT_SOURCE_DIR}/crc_folding.c) + endif() endif() # parse the full version number from zlib.h and include in ZLIB_FULL_VERSION diff --git a/deps/zlib/adler32.c b/deps/zlib/adler32.c index 99a294496f7eb5..de78b4e56b038e 100644 --- a/deps/zlib/adler32.c +++ b/deps/zlib/adler32.c @@ -58,7 +58,7 @@ #endif #include "cpu_features.h" -#if defined(ADLER32_SIMD_SSSE3) || defined(ADLER32_SIMD_NEON) +#if defined(ADLER32_SIMD_SSSE3) || defined(ADLER32_SIMD_NEON) || defined(ADLER32_SIMD_RVV) #include "adler32_simd.h" #endif @@ -66,12 +66,16 @@ uLong ZEXPORT adler32_z(uLong adler, const Bytef *buf, z_size_t len) { unsigned long sum2; unsigned n; - + /* TODO(cavalcantii): verify if this lengths are optimal for current CPUs. */ +#if defined(ADLER32_SIMD_SSSE3) || defined(ADLER32_SIMD_NEON) \ + || defined(ADLER32_SIMD_RVV) #if defined(ADLER32_SIMD_SSSE3) if (buf != Z_NULL && len >= 64 && x86_cpu_enable_ssse3) - return adler32_simd_(adler, buf, len); #elif defined(ADLER32_SIMD_NEON) if (buf != Z_NULL && len >= 64) +#elif defined(ADLER32_SIMD_RVV) + if (buf != Z_NULL && len >= 32 && riscv_cpu_enable_rvv) +#endif return adler32_simd_(adler, buf, len); #endif @@ -90,7 +94,8 @@ uLong ZEXPORT adler32_z(uLong adler, const Bytef *buf, z_size_t len) { return adler | (sum2 << 16); } -#if defined(ADLER32_SIMD_SSSE3) || defined(ADLER32_SIMD_NEON) +#if defined(ADLER32_SIMD_SSSE3) || defined(ADLER32_SIMD_NEON) \ + || defined(RISCV_RVV) /* * Use SIMD to compute the adler32. Since this function can be * freely used, check CPU features here. zlib convention is to diff --git a/deps/zlib/adler32_simd.c b/deps/zlib/adler32_simd.c index 58966eecf0b800..9970ea9ca71857 100644 --- a/deps/zlib/adler32_simd.c +++ b/deps/zlib/adler32_simd.c @@ -41,6 +41,9 @@ * [2] zlib adler32_z() uses this fact to implement NMAX-block-based updates * of the adler s1 s2 of uint32_t type (see adler32.c). */ +/* Copyright (C) 2023 SiFive, Inc. All rights reserved. + * For conditions of distribution and use, see copyright notice in zlib.h + */ #include "adler32_simd.h" @@ -363,4 +366,105 @@ uint32_t ZLIB_INTERNAL adler32_simd_( /* NEON */ return s1 | (s2 << 16); } +#elif defined(ADLER32_SIMD_RVV) +#include +/* adler32_rvv.c - RVV version of Adler-32 + * RVV 1.0 code contributed by Alex Chiang + * on https://github.com/zlib-ng/zlib-ng/pull/1532 + * Port from Simon Hosie's fork: + * https://github.com/cloudflare/zlib/commit/40688b53c61cb9bfc36471acd2dc0800b7ebcab1 + */ + +uint32_t ZLIB_INTERNAL adler32_simd_( /* RVV */ + uint32_t adler, + const unsigned char *buf, + unsigned long len) +{ + /* split Adler-32 into component sums */ + uint32_t sum2 = (adler >> 16) & 0xffff; + adler &= 0xffff; + + size_t left = len; + size_t vl = __riscv_vsetvlmax_e8m1(); + vl = vl > 256 ? 256 : vl; + vuint32m4_t v_buf32_accu = __riscv_vmv_v_x_u32m4(0, vl); + vuint32m4_t v_adler32_prev_accu = __riscv_vmv_v_x_u32m4(0, vl); + vuint16m2_t v_buf16_accu; + + /* + * We accumulate 8-bit data, and to prevent overflow, we have to use a 32-bit accumulator. + * However, adding 8-bit data into a 32-bit accumulator isn't efficient. We use 16-bit & 32-bit + * accumulators to boost performance. + * + * The block_size is the largest multiple of vl that <= 256, because overflow would occur when + * vl > 256 (255 * 256 <= UINT16_MAX). + * + * We accumulate 8-bit data into a 16-bit accumulator and then + * move the data into the 32-bit accumulator at the last iteration. + */ + size_t block_size = (256 / vl) * vl; + size_t nmax_limit = (NMAX / block_size); + size_t cnt = 0; + while (left >= block_size) { + v_buf16_accu = __riscv_vmv_v_x_u16m2(0, vl); + size_t subprob = block_size; + while (subprob > 0) { + vuint8m1_t v_buf8 = __riscv_vle8_v_u8m1(buf, vl); + v_adler32_prev_accu = __riscv_vwaddu_wv_u32m4(v_adler32_prev_accu, v_buf16_accu, vl); + v_buf16_accu = __riscv_vwaddu_wv_u16m2(v_buf16_accu, v_buf8, vl); + buf += vl; + subprob -= vl; + } + v_adler32_prev_accu = __riscv_vmacc_vx_u32m4(v_adler32_prev_accu, block_size / vl, v_buf32_accu, vl); + v_buf32_accu = __riscv_vwaddu_wv_u32m4(v_buf32_accu, v_buf16_accu, vl); + left -= block_size; + /* do modulo once each block of NMAX size */ + if (++cnt >= nmax_limit) { + v_adler32_prev_accu = __riscv_vremu_vx_u32m4(v_adler32_prev_accu, BASE, vl); + cnt = 0; + } + } + /* the left len <= 256 now, we can use 16-bit accum safely */ + v_buf16_accu = __riscv_vmv_v_x_u16m2(0, vl); + size_t res = left; + while (left >= vl) { + vuint8m1_t v_buf8 = __riscv_vle8_v_u8m1(buf, vl); + v_adler32_prev_accu = __riscv_vwaddu_wv_u32m4(v_adler32_prev_accu, v_buf16_accu, vl); + v_buf16_accu = __riscv_vwaddu_wv_u16m2(v_buf16_accu, v_buf8, vl); + buf += vl; + left -= vl; + } + v_adler32_prev_accu = __riscv_vmacc_vx_u32m4(v_adler32_prev_accu, res / vl, v_buf32_accu, vl); + v_adler32_prev_accu = __riscv_vremu_vx_u32m4(v_adler32_prev_accu, BASE, vl); + v_buf32_accu = __riscv_vwaddu_wv_u32m4(v_buf32_accu, v_buf16_accu, vl); + + vuint32m4_t v_seq = __riscv_vid_v_u32m4(vl); + vuint32m4_t v_rev_seq = __riscv_vrsub_vx_u32m4(v_seq, vl, vl); + vuint32m4_t v_sum32_accu = __riscv_vmul_vv_u32m4(v_buf32_accu, v_rev_seq, vl); + + v_sum32_accu = __riscv_vadd_vv_u32m4(v_sum32_accu, __riscv_vmul_vx_u32m4(v_adler32_prev_accu, vl, vl), vl); + + vuint32m1_t v_sum2_sum = __riscv_vmv_s_x_u32m1(0, vl); + v_sum2_sum = __riscv_vredsum_vs_u32m4_u32m1(v_sum32_accu, v_sum2_sum, vl); + uint32_t sum2_sum = __riscv_vmv_x_s_u32m1_u32(v_sum2_sum); + + sum2 += (sum2_sum + adler * (len - left)); + + vuint32m1_t v_adler_sum = __riscv_vmv_s_x_u32m1(0, vl); + v_adler_sum = __riscv_vredsum_vs_u32m4_u32m1(v_buf32_accu, v_adler_sum, vl); + uint32_t adler_sum = __riscv_vmv_x_s_u32m1_u32(v_adler_sum); + + adler += adler_sum; + + while (left--) { + adler += *buf++; + sum2 += adler; + } + + sum2 %= BASE; + adler %= BASE; + + return adler | (sum2 << 16); +} + #endif /* ADLER32_SIMD_SSSE3 */ diff --git a/deps/zlib/cpu_features.c b/deps/zlib/cpu_features.c index 64e0428cd2fc2d..34ae7b913af9a2 100644 --- a/deps/zlib/cpu_features.c +++ b/deps/zlib/cpu_features.c @@ -33,9 +33,13 @@ int ZLIB_INTERNAL x86_cpu_enable_ssse3 = 0; int ZLIB_INTERNAL x86_cpu_enable_simd = 0; int ZLIB_INTERNAL x86_cpu_enable_avx512 = 0; +int ZLIB_INTERNAL riscv_cpu_enable_rvv = 0; +int ZLIB_INTERNAL riscv_cpu_enable_vclmul = 0; + #ifndef CPU_NO_SIMD -#if defined(ARMV8_OS_ANDROID) || defined(ARMV8_OS_LINUX) || defined(ARMV8_OS_FUCHSIA) || defined(ARMV8_OS_IOS) +#if defined(ARMV8_OS_ANDROID) || defined(ARMV8_OS_LINUX) || \ + defined(ARMV8_OS_FUCHSIA) || defined(ARMV8_OS_IOS) #include #endif @@ -62,7 +66,10 @@ int ZLIB_INTERNAL x86_cpu_enable_avx512 = 0; static void _cpu_check_features(void); #endif -#if defined(ARMV8_OS_ANDROID) || defined(ARMV8_OS_LINUX) || defined(ARMV8_OS_MACOS) || defined(ARMV8_OS_FUCHSIA) || defined(X86_NOT_WINDOWS) || defined(ARMV8_OS_IOS) +#if defined(ARMV8_OS_ANDROID) || defined(ARMV8_OS_LINUX) || \ + defined(ARMV8_OS_MACOS) || defined(ARMV8_OS_FUCHSIA) || \ + defined(X86_NOT_WINDOWS) || defined(ARMV8_OS_IOS) || \ + defined(RISCV_RVV) #if !defined(ARMV8_OS_MACOS) // _cpu_check_features() doesn't need to do anything on mac/arm since all // features are known at build time, so don't call it. @@ -184,6 +191,23 @@ static void _cpu_check_features(void) x86_cpu_enable_avx512 = _xgetbv(0) & 0x00000040; #endif } +#endif // x86 & NO_SIMD + +#elif defined(RISCV_RVV) +#include + +#ifndef ZLIB_HWCAP_RVV +#define ZLIB_HWCAP_RVV (1 << ('v' - 'a')) #endif -#endif -#endif + +/* TODO(cavalcantii) + * - add support for Android@RISCV i.e. __riscv_hwprobe(). + * - detect vclmul (crypto extensions). + */ +static void _cpu_check_features(void) +{ + unsigned long features = getauxval(AT_HWCAP); + riscv_cpu_enable_rvv = !!(features & ZLIB_HWCAP_RVV); +} +#endif // ARM | x86 | RISCV +#endif // NO SIMD CPU diff --git a/deps/zlib/cpu_features.h b/deps/zlib/cpu_features.h index aed3e834c5ac89..6092c7e852bda2 100644 --- a/deps/zlib/cpu_features.h +++ b/deps/zlib/cpu_features.h @@ -16,4 +16,7 @@ extern int x86_cpu_enable_ssse3; extern int x86_cpu_enable_simd; extern int x86_cpu_enable_avx512; +extern int riscv_cpu_enable_rvv; +extern int riscv_cpu_enable_vclmul; + void cpu_check_features(void); diff --git a/deps/zlib/crc32.c b/deps/zlib/crc32.c index cf8579f30aa707..32686f92488c51 100644 --- a/deps/zlib/crc32.c +++ b/deps/zlib/crc32.c @@ -706,7 +706,8 @@ unsigned long ZEXPORT crc32_z(unsigned long crc, const unsigned char FAR *buf, * place to cache CPU features if needed for those later, more * interesting crc32() calls. */ -#if defined(CRC32_SIMD_SSE42_PCLMUL) || defined(CRC32_ARMV8_CRC32) +#if defined(CRC32_SIMD_SSE42_PCLMUL) || defined(CRC32_ARMV8_CRC32) \ + || defined(RISCV_RVV) /* * Since this routine can be freely used, check CPU features here. */ @@ -1085,7 +1086,8 @@ unsigned long ZEXPORT crc32(unsigned long crc, const unsigned char FAR *buf, /* Some bots compile with optimizations disabled, others will emulate * ARM on x86 and other weird combinations. */ -#if defined(CRC32_SIMD_SSE42_PCLMUL) || defined(CRC32_ARMV8_CRC32) +#if defined(CRC32_SIMD_SSE42_PCLMUL) || defined(CRC32_ARMV8_CRC32) \ + || defined(RISCV_RVV) /* We got to verify CPU features, so exploit the common usage pattern * of calling this function with Z_NULL for an initial valid crc value. * This allows to cache the result of the feature check and avoid extraneous diff --git a/deps/zlib/deflate.c b/deps/zlib/deflate.c index a67d195c5d46f2..b9a312030464c7 100644 --- a/deps/zlib/deflate.c +++ b/deps/zlib/deflate.c @@ -401,7 +401,8 @@ int ZEXPORT deflateInit2_(z_streamp strm, int level, int method, // for all wrapper formats (e.g. RAW, ZLIB, GZIP). // Feature detection is not triggered while using RAW mode (i.e. we never // call crc32() with a NULL buffer). -#if defined(CRC32_ARMV8_CRC32) || defined(CRC32_SIMD_SSE42_PCLMUL) +#if defined(CRC32_ARMV8_CRC32) || defined(CRC32_SIMD_SSE42_PCLMUL) \ + || defined(RISCV_RVV) cpu_check_features(); #endif From 0c260e10e7a2264462626ef1beb9a5a46e43876c Mon Sep 17 00:00:00 2001 From: "Node.js GitHub Bot" Date: Wed, 17 Apr 2024 23:20:38 +0300 Subject: [PATCH 36/41] deps: update zlib to 1.3.0.1-motley-7d77fb7 PR-URL: https://github.com/nodejs/node/pull/52516 Reviewed-By: Marco Ippolito Reviewed-By: Mohammed Keyvanzadeh Reviewed-By: Luigi Pinca --- deps/zlib/BUILD.gn | 30 + deps/zlib/CMakeLists.txt | 37 +- deps/zlib/adler32_simd.c | 166 +++--- deps/zlib/contrib/optimizations/chunkcopy.h | 75 +++ deps/zlib/contrib/tests/utils_unittest.cc | 24 +- deps/zlib/examples/zpipe.c | 209 +++++++ deps/zlib/google/compression_utils.cc | 1 - deps/zlib/google/zip_reader_unittest.cc | 8 +- deps/zlib/test/minigzip.c | 579 ++++++++++++++++++++ 9 files changed, 1031 insertions(+), 98 deletions(-) create mode 100644 deps/zlib/examples/zpipe.c create mode 100644 deps/zlib/test/minigzip.c diff --git a/deps/zlib/BUILD.gn b/deps/zlib/BUILD.gn index 7fff5762e81b6a..f97ab45de2741f 100644 --- a/deps/zlib/BUILD.gn +++ b/deps/zlib/BUILD.gn @@ -441,6 +441,36 @@ executable("zlib_bench") { configs += [ "//build/config/compiler:no_chromium_code" ] } +executable("minigzip") { + include_dirs = [ "." ] + + sources = [ "test/minigzip.c" ] + if (!is_debug) { + configs -= [ "//build/config/compiler:default_optimization" ] + configs += [ "//build/config/compiler:optimize_speed" ] + } + + deps = [ ":zlib" ] + + configs -= [ "//build/config/compiler:chromium_code" ] + configs += [ "//build/config/compiler:no_chromium_code" ] +} + +executable("zpipe") { + include_dirs = [ "." ] + + sources = [ "examples/zpipe.c" ] + if (!is_debug) { + configs -= [ "//build/config/compiler:default_optimization" ] + configs += [ "//build/config/compiler:optimize_speed" ] + } + + deps = [ ":zlib" ] + + configs -= [ "//build/config/compiler:chromium_code" ] + configs += [ "//build/config/compiler:no_chromium_code" ] +} + if (!is_win || target_os != "winuwp") { executable("minizip_bin") { include_dirs = [ "." ] diff --git a/deps/zlib/CMakeLists.txt b/deps/zlib/CMakeLists.txt index c3f424770d92ce..66f7d04966afa5 100644 --- a/deps/zlib/CMakeLists.txt +++ b/deps/zlib/CMakeLists.txt @@ -26,6 +26,8 @@ option(ENABLE_SIMD_AVX512 "Enable SIMD AXV512 optimizations" OFF) option(USE_ZLIB_RABIN_KARP_HASH "Enable bitstream compatibility with canonical zlib" OFF) option(BUILD_UNITTESTS "Enable standalone unit tests build" OFF) option(BUILD_MINIZIP_BIN "Enable building minzip_bin tool" OFF) +option(BUILD_ZPIPE "Enable building zpipe tool" OFF) +option(BUILD_MINIGZIP "Enable building minigzip tool" OFF) if (USE_ZLIB_RABIN_KARP_HASH) add_definitions(-DUSE_ZLIB_RABIN_KARP_ROLLING_HASH) @@ -79,9 +81,16 @@ if (ENABLE_SIMD_OPTIMIZATIONS) add_definitions(-DRISCV_RVV) add_definitions(-DDEFLATE_SLIDE_HASH_RVV) add_definitions(-DADLER32_SIMD_RVV) - #TODO(cavalcantii): add remaining flags as we port optimizations to RVV. - # Required by CPU features detection code. - SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} --target=riscv64-unknown-linux-gnu -march=rv64gcv") + + # TODO(cavalcantii): add remaining flags as we port optimizations to RVV. + # chunk_copy is required for READ64 and unconditional decode of literals. + add_definitions(-DINFLATE_CHUNK_GENERIC) + add_definitions(-DINFLATE_CHUNK_READ_64LE) + + # Tested with clang-17, unaligned loads are required by read64 & chunk_copy. + # TODO(cavalcantii): replace internal clang flags for -munaligned-access + # when we have a newer compiler available. + SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} --target=riscv64-unknown-linux-gnu -march=rv64gcv -Xclang -target-feature -Xclang +unaligned-scalar-mem") endif() endif() @@ -192,9 +201,14 @@ set(ZLIB_SRCS if (ENABLE_SIMD_OPTIMIZATIONS) if (CMAKE_SYSTEM_PROCESSOR STREQUAL "riscv64") message("RISCVV: Add optimizations.") + list(REMOVE_ITEM ZLIB_SRCS inflate.c) list(APPEND ZLIB_PRIVATE_HDRS ${CMAKE_CURRENT_SOURCE_DIR}/adler32_simd.h) + list(APPEND ZLIB_PRIVATE_HDRS ${CMAKE_CURRENT_SOURCE_DIR}/contrib/optimizations/chunkcopy.h) list(APPEND ZLIB_PRIVATE_HDRS ${CMAKE_CURRENT_SOURCE_DIR}/cpu_features.h) + list(APPEND ZLIB_SRCS ${CMAKE_CURRENT_SOURCE_DIR}/adler32_simd.c) + list(APPEND ZLIB_SRCS ${CMAKE_CURRENT_SOURCE_DIR}/contrib/optimizations/inffast_chunk.c) + list(APPEND ZLIB_SRCS ${CMAKE_CURRENT_SOURCE_DIR}/contrib/optimizations/inflate.c) list(APPEND ZLIB_SRCS ${CMAKE_CURRENT_SOURCE_DIR}/cpu_features.c) else() list(REMOVE_ITEM ZLIB_SRCS inflate.c) @@ -339,7 +353,7 @@ if (BUILD_UNITTESTS) endif() #============================================================================ -# Minigzip tool +# Minizip tool #============================================================================ # TODO(cavalcantii): get it working on Windows. if (BUILD_MINIZIP_BIN) @@ -349,3 +363,18 @@ if (BUILD_MINIZIP_BIN) ) target_link_libraries(minizip_bin zlib) endif() + +#============================================================================ +# zpipe tool +#============================================================================ +if (BUILD_ZPIPE) + add_executable(zpipe examples/zpipe.c) + target_link_libraries(zpipe zlib) +endif() +#============================================================================ +# MiniGzip tool +#============================================================================ +if (BUILD_MINIGZIP) + add_executable(minigzip_bin test/minigzip.c) + target_link_libraries(minigzip_bin zlib) +endif() diff --git a/deps/zlib/adler32_simd.c b/deps/zlib/adler32_simd.c index 9970ea9ca71857..b3e1f0a3dfda01 100644 --- a/deps/zlib/adler32_simd.c +++ b/deps/zlib/adler32_simd.c @@ -41,9 +41,6 @@ * [2] zlib adler32_z() uses this fact to implement NMAX-block-based updates * of the adler s1 s2 of uint32_t type (see adler32.c). */ -/* Copyright (C) 2023 SiFive, Inc. All rights reserved. - * For conditions of distribution and use, see copyright notice in zlib.h - */ #include "adler32_simd.h" @@ -368,11 +365,10 @@ uint32_t ZLIB_INTERNAL adler32_simd_( /* NEON */ #elif defined(ADLER32_SIMD_RVV) #include -/* adler32_rvv.c - RVV version of Adler-32 - * RVV 1.0 code contributed by Alex Chiang - * on https://github.com/zlib-ng/zlib-ng/pull/1532 - * Port from Simon Hosie's fork: - * https://github.com/cloudflare/zlib/commit/40688b53c61cb9bfc36471acd2dc0800b7ebcab1 + +/* + * Patch by Simon Hosie, from: + * https://github.com/cloudflare/zlib/pull/55 */ uint32_t ZLIB_INTERNAL adler32_simd_( /* RVV */ @@ -380,91 +376,81 @@ uint32_t ZLIB_INTERNAL adler32_simd_( /* RVV */ const unsigned char *buf, unsigned long len) { - /* split Adler-32 into component sums */ - uint32_t sum2 = (adler >> 16) & 0xffff; - adler &= 0xffff; - - size_t left = len; - size_t vl = __riscv_vsetvlmax_e8m1(); - vl = vl > 256 ? 256 : vl; - vuint32m4_t v_buf32_accu = __riscv_vmv_v_x_u32m4(0, vl); - vuint32m4_t v_adler32_prev_accu = __riscv_vmv_v_x_u32m4(0, vl); - vuint16m2_t v_buf16_accu; - - /* - * We accumulate 8-bit data, and to prevent overflow, we have to use a 32-bit accumulator. - * However, adding 8-bit data into a 32-bit accumulator isn't efficient. We use 16-bit & 32-bit - * accumulators to boost performance. - * - * The block_size is the largest multiple of vl that <= 256, because overflow would occur when - * vl > 256 (255 * 256 <= UINT16_MAX). - * - * We accumulate 8-bit data into a 16-bit accumulator and then - * move the data into the 32-bit accumulator at the last iteration. + size_t vl = __riscv_vsetvlmax_e8m2(); + const vuint16m4_t zero16 = __riscv_vmv_v_x_u16m4(0, vl); + vuint16m4_t a_sum = zero16; + vuint32m8_t b_sum = __riscv_vmv_v_x_u32m8(0, vl); + + /* Deal with the part which is not a multiple of vl first; because it's + * easier to zero-stuff the beginning of the checksum than it is to tweak the + * multipliers and sums for odd lengths afterwards. + */ + size_t head = len & (vl - 1); + if (head > 0) { + vuint8m2_t zero8 = __riscv_vmv_v_x_u8m2(0, vl); + vuint8m2_t in = __riscv_vle8_v_u8m2(buf, vl); + in = __riscv_vslideup(zero8, in, vl - head, vl); + vuint16m4_t in16 = __riscv_vwcvtu_x(in, vl); + a_sum = in16; + buf += head; + } + + /* We have a 32-bit accumulator, and in each iteration we add 22-times a + * 16-bit value, plus another 16-bit value. We periodically subtract up to + * 65535 times BASE to avoid overflow. b_overflow estimates how often we + * need to do this subtraction. + */ + const int b_overflow = BASE / 23; + int fixup = b_overflow; + ssize_t iters = (len - head) / vl; + while (iters > 0) { + const vuint16m4_t a_overflow = __riscv_vrsub(a_sum, BASE, vl); + int batch = iters < 22 ? iters : 22; + iters -= batch; + b_sum = __riscv_vwmaccu(b_sum, batch, a_sum, vl); + vuint16m4_t a_batch = zero16, b_batch = zero16; + + /* Do a short batch, where neither a_sum nor b_sum can overflow a 16-bit + * register. Then add them back into the main accumulators. */ - size_t block_size = (256 / vl) * vl; - size_t nmax_limit = (NMAX / block_size); - size_t cnt = 0; - while (left >= block_size) { - v_buf16_accu = __riscv_vmv_v_x_u16m2(0, vl); - size_t subprob = block_size; - while (subprob > 0) { - vuint8m1_t v_buf8 = __riscv_vle8_v_u8m1(buf, vl); - v_adler32_prev_accu = __riscv_vwaddu_wv_u32m4(v_adler32_prev_accu, v_buf16_accu, vl); - v_buf16_accu = __riscv_vwaddu_wv_u16m2(v_buf16_accu, v_buf8, vl); - buf += vl; - subprob -= vl; - } - v_adler32_prev_accu = __riscv_vmacc_vx_u32m4(v_adler32_prev_accu, block_size / vl, v_buf32_accu, vl); - v_buf32_accu = __riscv_vwaddu_wv_u32m4(v_buf32_accu, v_buf16_accu, vl); - left -= block_size; - /* do modulo once each block of NMAX size */ - if (++cnt >= nmax_limit) { - v_adler32_prev_accu = __riscv_vremu_vx_u32m4(v_adler32_prev_accu, BASE, vl); - cnt = 0; - } + while (batch-- > 0) { + vuint8m2_t in8 = __riscv_vle8_v_u8m2(buf, vl); + buf += vl; + b_batch = __riscv_vadd(b_batch, a_batch, vl); + a_batch = __riscv_vwaddu_wv(a_batch, in8, vl); } - /* the left len <= 256 now, we can use 16-bit accum safely */ - v_buf16_accu = __riscv_vmv_v_x_u16m2(0, vl); - size_t res = left; - while (left >= vl) { - vuint8m1_t v_buf8 = __riscv_vle8_v_u8m1(buf, vl); - v_adler32_prev_accu = __riscv_vwaddu_wv_u32m4(v_adler32_prev_accu, v_buf16_accu, vl); - v_buf16_accu = __riscv_vwaddu_wv_u16m2(v_buf16_accu, v_buf8, vl); - buf += vl; - left -= vl; + vbool4_t ov = __riscv_vmsgeu(a_batch, a_overflow, vl); + a_sum = __riscv_vadd(a_sum, a_batch, vl); + a_sum = __riscv_vadd_mu(ov, a_sum, a_sum, 65536 - BASE, vl); + b_sum = __riscv_vwaddu_wv(b_sum, b_batch, vl); + if (--fixup <= 0) { + b_sum = __riscv_vnmsac(b_sum, BASE, __riscv_vsrl(b_sum, 16, vl), vl); + fixup = b_overflow; } - v_adler32_prev_accu = __riscv_vmacc_vx_u32m4(v_adler32_prev_accu, res / vl, v_buf32_accu, vl); - v_adler32_prev_accu = __riscv_vremu_vx_u32m4(v_adler32_prev_accu, BASE, vl); - v_buf32_accu = __riscv_vwaddu_wv_u32m4(v_buf32_accu, v_buf16_accu, vl); - - vuint32m4_t v_seq = __riscv_vid_v_u32m4(vl); - vuint32m4_t v_rev_seq = __riscv_vrsub_vx_u32m4(v_seq, vl, vl); - vuint32m4_t v_sum32_accu = __riscv_vmul_vv_u32m4(v_buf32_accu, v_rev_seq, vl); - - v_sum32_accu = __riscv_vadd_vv_u32m4(v_sum32_accu, __riscv_vmul_vx_u32m4(v_adler32_prev_accu, vl, vl), vl); - - vuint32m1_t v_sum2_sum = __riscv_vmv_s_x_u32m1(0, vl); - v_sum2_sum = __riscv_vredsum_vs_u32m4_u32m1(v_sum32_accu, v_sum2_sum, vl); - uint32_t sum2_sum = __riscv_vmv_x_s_u32m1_u32(v_sum2_sum); - - sum2 += (sum2_sum + adler * (len - left)); - - vuint32m1_t v_adler_sum = __riscv_vmv_s_x_u32m1(0, vl); - v_adler_sum = __riscv_vredsum_vs_u32m4_u32m1(v_buf32_accu, v_adler_sum, vl); - uint32_t adler_sum = __riscv_vmv_x_s_u32m1_u32(v_adler_sum); - - adler += adler_sum; - - while (left--) { - adler += *buf++; - sum2 += adler; - } - - sum2 %= BASE; - adler %= BASE; - - return adler | (sum2 << 16); + } + /* Adjust per-lane sums to have appropriate offsets from the end of the + * buffer. + */ + const vuint16m4_t off = __riscv_vrsub(__riscv_vid_v_u16m4(vl), vl, vl); + vuint16m4_t bsum16 = __riscv_vncvt_x(__riscv_vremu(b_sum, BASE, vl), vl); + b_sum = __riscv_vadd(__riscv_vwmulu(a_sum, off, vl), + __riscv_vwmulu(bsum16, vl, vl), vl); + bsum16 = __riscv_vncvt_x(__riscv_vremu(b_sum, BASE, vl), vl); + + /* And finally, do a horizontal sum across the registers for the final + * result. + */ + uint32_t a = adler & 0xffff; + uint32_t b = ((adler >> 16) + a * (len % BASE)) % BASE; + vuint32m1_t sca = __riscv_vmv_v_x_u32m1(a, 1); + vuint32m1_t scb = __riscv_vmv_v_x_u32m1(b, 1); + sca = __riscv_vwredsumu(a_sum, sca, vl); + scb = __riscv_vwredsumu(bsum16, scb, vl); + a = __riscv_vmv_x(sca); + b = __riscv_vmv_x(scb); + a %= BASE; + b %= BASE; + return (b << 16) | a; } #endif /* ADLER32_SIMD_SSSE3 */ diff --git a/deps/zlib/contrib/optimizations/chunkcopy.h b/deps/zlib/contrib/optimizations/chunkcopy.h index f40546d54dbe77..97efff3a42496d 100644 --- a/deps/zlib/contrib/optimizations/chunkcopy.h +++ b/deps/zlib/contrib/optimizations/chunkcopy.h @@ -21,8 +21,10 @@ #if defined(__clang__) || defined(__GNUC__) || defined(__llvm__) #define Z_BUILTIN_MEMCPY __builtin_memcpy +#define Z_BUILTIN_MEMSET __builtin_memset #else #define Z_BUILTIN_MEMCPY zmemcpy +#define Z_BUILTIN_MEMSET zmemset #endif #if defined(INFLATE_CHUNK_SIMD_NEON) @@ -31,6 +33,8 @@ typedef uint8x16_t z_vec128i_t; #elif defined(INFLATE_CHUNK_SIMD_SSE2) #include typedef __m128i z_vec128i_t; +#elif defined(INFLATE_CHUNK_GENERIC) +typedef struct { uint8_t x[16]; } z_vec128i_t; #else #error chunkcopy.h inflate chunk SIMD is not defined for your build target #endif @@ -265,6 +269,77 @@ static inline z_vec128i_t v_load8_dup(const void* src) { static inline void v_store_128(void* out, const z_vec128i_t vec) { _mm_storeu_si128((__m128i*)out, vec); } +#elif defined(INFLATE_CHUNK_GENERIC) +/* + * Default implementations for chunk-copy functions rely on memcpy() being + * inlined by the compiler for best performance. This is most likely to work + * as expected when the length argument is constant (as is the case here) and + * the target supports unaligned loads and stores. Since that's not always a + * safe assumption, this may need extra compiler arguments such as + * `-mno-strict-align` or `-munaligned-access`, or the availability of + * extensions like SIMD. + */ + +/* + * v_load64_dup(): load *src as an unaligned 64-bit int and duplicate it in + * every 64-bit component of the 128-bit result (64-bit int splat). + */ +static inline z_vec128i_t v_load64_dup(const void* src) { + int64_t in; + Z_BUILTIN_MEMCPY(&in, src, sizeof(in)); + z_vec128i_t out; + for (int i = 0; i < sizeof(out); i += sizeof(in)) { + Z_BUILTIN_MEMCPY((uint8_t*)&out + i, &in, sizeof(in)); + } + return out; +} + +/* + * v_load32_dup(): load *src as an unaligned 32-bit int and duplicate it in + * every 32-bit component of the 128-bit result (32-bit int splat). + */ +static inline z_vec128i_t v_load32_dup(const void* src) { + int32_t in; + Z_BUILTIN_MEMCPY(&in, src, sizeof(in)); + z_vec128i_t out; + for (int i = 0; i < sizeof(out); i += sizeof(in)) { + Z_BUILTIN_MEMCPY((uint8_t*)&out + i, &in, sizeof(in)); + } + return out; +} + +/* + * v_load16_dup(): load *src as an unaligned 16-bit int and duplicate it in + * every 16-bit component of the 128-bit result (16-bit int splat). + */ +static inline z_vec128i_t v_load16_dup(const void* src) { + int16_t in; + Z_BUILTIN_MEMCPY(&in, src, sizeof(in)); + z_vec128i_t out; + for (int i = 0; i < sizeof(out); i += sizeof(in)) { + Z_BUILTIN_MEMCPY((uint8_t*)&out + i, &in, sizeof(in)); + } + return out; +} + +/* + * v_load8_dup(): load the 8-bit int *src and duplicate it in every 8-bit + * component of the 128-bit result (8-bit int splat). + */ +static inline z_vec128i_t v_load8_dup(const void* src) { + int8_t in = *(const uint8_t*)src; + z_vec128i_t out; + Z_BUILTIN_MEMSET(&out, in, sizeof(out)); + return out; +} + +/* + * v_store_128(): store the 128-bit vec in a memory destination (that might + * not be 16-byte aligned) void* out. + */ +static inline void v_store_128(void* out, const z_vec128i_t vec) { + Z_BUILTIN_MEMCPY(out, &vec, sizeof(vec)); +} #endif /* diff --git a/deps/zlib/contrib/tests/utils_unittest.cc b/deps/zlib/contrib/tests/utils_unittest.cc index 0cc10813775f3e..f487a06996c98b 100644 --- a/deps/zlib/contrib/tests/utils_unittest.cc +++ b/deps/zlib/contrib/tests/utils_unittest.cc @@ -20,7 +20,8 @@ #include "zlib.h" -void TestPayloads(size_t input_size, zlib_internal::WrapperType type) { +void TestPayloads(size_t input_size, zlib_internal::WrapperType type, + const int compression_level = Z_DEFAULT_COMPRESSION) { std::vector input; input.reserve(input_size); for (size_t i = 1; i <= input_size; ++i) @@ -36,7 +37,7 @@ void TestPayloads(size_t input_size, zlib_internal::WrapperType type) { unsigned long compressed_size = static_cast(compressed.size()); int result = zlib_internal::CompressHelper( type, compressed.data(), &compressed_size, input.data(), input.size(), - Z_DEFAULT_COMPRESSION, nullptr, nullptr); + compression_level, nullptr, nullptr); ASSERT_EQ(result, Z_OK); unsigned long decompressed_size = @@ -67,6 +68,25 @@ TEST(ZlibTest, RawWrapper) { TestPayloads(i, zlib_internal::WrapperType::ZRAW); } +TEST(ZlibTest, LargePayloads) { + static const size_t lengths[] = { 6000, 8000, 10'000, 15'000, 20'000, 30'000, + 50'000, 100'000, 150'000, 2'500'000, + 5'000'000, 10'000'000, 20'000'000 }; + + for (size_t length: lengths) { + TestPayloads(length, zlib_internal::WrapperType::ZLIB); + TestPayloads(length, zlib_internal::WrapperType::GZIP); + } +} + +TEST(ZlibTest, CompressionLevels) { + static const int levels[] = { 1, 2, 3, 4, 5, 6, 7, 8, 9 }; + for (int level: levels) { + TestPayloads(5'000'000, zlib_internal::WrapperType::ZLIB, level); + TestPayloads(5'000'000, zlib_internal::WrapperType::GZIP, level); + } +} + TEST(ZlibTest, InflateCover) { cover_support(); cover_wrap(); diff --git a/deps/zlib/examples/zpipe.c b/deps/zlib/examples/zpipe.c new file mode 100644 index 00000000000000..51dec4745772e5 --- /dev/null +++ b/deps/zlib/examples/zpipe.c @@ -0,0 +1,209 @@ +/* zpipe.c: example of proper use of zlib's inflate() and deflate() + Not copyrighted -- provided to the public domain + Version 1.4 11 December 2005 Mark Adler */ + +/* Version history: + 1.0 30 Oct 2004 First version + 1.1 8 Nov 2004 Add void casting for unused return values + Use switch statement for inflate() return values + 1.2 9 Nov 2004 Add assertions to document zlib guarantees + 1.3 6 Apr 2005 Remove incorrect assertion in inf() + 1.4 11 Dec 2005 Add hack to avoid MSDOS end-of-line conversions + Avoid some compiler warnings for input and output buffers + */ + +#if defined(_WIN32) && !defined(_CRT_NONSTDC_NO_DEPRECATE) +# define _CRT_NONSTDC_NO_DEPRECATE +#endif + +#include +#include +#include +#include "zlib.h" + +#if defined(MSDOS) || defined(OS2) || defined(WIN32) || defined(__CYGWIN__) +# include +# include +# define SET_BINARY_MODE(file) setmode(fileno(file), O_BINARY) +#else +# define SET_BINARY_MODE(file) +#endif + +#define CHUNK 16384 + +/* Compress from file source to file dest until EOF on source. + def() returns Z_OK on success, Z_MEM_ERROR if memory could not be + allocated for processing, Z_STREAM_ERROR if an invalid compression + level is supplied, Z_VERSION_ERROR if the version of zlib.h and the + version of the library linked do not match, or Z_ERRNO if there is + an error reading or writing the files. */ +int def(FILE *source, FILE *dest, int level) +{ + int ret, flush; + unsigned have; + z_stream strm; + unsigned char in[CHUNK]; + unsigned char out[CHUNK]; + + /* allocate deflate state */ + strm.zalloc = Z_NULL; + strm.zfree = Z_NULL; + strm.opaque = Z_NULL; + ret = deflateInit(&strm, level); + if (ret != Z_OK) + return ret; + + /* compress until end of file */ + do { + strm.avail_in = fread(in, 1, CHUNK, source); + if (ferror(source)) { + (void)deflateEnd(&strm); + return Z_ERRNO; + } + flush = feof(source) ? Z_FINISH : Z_NO_FLUSH; + strm.next_in = in; + + /* run deflate() on input until output buffer not full, finish + compression if all of source has been read in */ + do { + strm.avail_out = CHUNK; + strm.next_out = out; + ret = deflate(&strm, flush); /* no bad return value */ + assert(ret != Z_STREAM_ERROR); /* state not clobbered */ + have = CHUNK - strm.avail_out; + if (fwrite(out, 1, have, dest) != have || ferror(dest)) { + (void)deflateEnd(&strm); + return Z_ERRNO; + } + } while (strm.avail_out == 0); + assert(strm.avail_in == 0); /* all input will be used */ + + /* done when last data in file processed */ + } while (flush != Z_FINISH); + assert(ret == Z_STREAM_END); /* stream will be complete */ + + /* clean up and return */ + (void)deflateEnd(&strm); + return Z_OK; +} + +/* Decompress from file source to file dest until stream ends or EOF. + inf() returns Z_OK on success, Z_MEM_ERROR if memory could not be + allocated for processing, Z_DATA_ERROR if the deflate data is + invalid or incomplete, Z_VERSION_ERROR if the version of zlib.h and + the version of the library linked do not match, or Z_ERRNO if there + is an error reading or writing the files. */ +int inf(FILE *source, FILE *dest) +{ + int ret; + unsigned have; + z_stream strm; + unsigned char in[CHUNK]; + unsigned char out[CHUNK]; + + /* allocate inflate state */ + strm.zalloc = Z_NULL; + strm.zfree = Z_NULL; + strm.opaque = Z_NULL; + strm.avail_in = 0; + strm.next_in = Z_NULL; + ret = inflateInit(&strm); + if (ret != Z_OK) + return ret; + + /* decompress until deflate stream ends or end of file */ + do { + strm.avail_in = fread(in, 1, CHUNK, source); + if (ferror(source)) { + (void)inflateEnd(&strm); + return Z_ERRNO; + } + if (strm.avail_in == 0) + break; + strm.next_in = in; + + /* run inflate() on input until output buffer not full */ + do { + strm.avail_out = CHUNK; + strm.next_out = out; + ret = inflate(&strm, Z_NO_FLUSH); + assert(ret != Z_STREAM_ERROR); /* state not clobbered */ + switch (ret) { + case Z_NEED_DICT: + ret = Z_DATA_ERROR; /* and fall through */ + case Z_DATA_ERROR: + case Z_MEM_ERROR: + (void)inflateEnd(&strm); + return ret; + } + have = CHUNK - strm.avail_out; + if (fwrite(out, 1, have, dest) != have || ferror(dest)) { + (void)inflateEnd(&strm); + return Z_ERRNO; + } + } while (strm.avail_out == 0); + + /* done when inflate() says it's done */ + } while (ret != Z_STREAM_END); + + /* clean up and return */ + (void)inflateEnd(&strm); + return ret == Z_STREAM_END ? Z_OK : Z_DATA_ERROR; +} + +/* report a zlib or i/o error */ +void zerr(int ret) +{ + fputs("zpipe: ", stderr); + switch (ret) { + case Z_ERRNO: + if (ferror(stdin)) + fputs("error reading stdin\n", stderr); + if (ferror(stdout)) + fputs("error writing stdout\n", stderr); + break; + case Z_STREAM_ERROR: + fputs("invalid compression level\n", stderr); + break; + case Z_DATA_ERROR: + fputs("invalid or incomplete deflate data\n", stderr); + break; + case Z_MEM_ERROR: + fputs("out of memory\n", stderr); + break; + case Z_VERSION_ERROR: + fputs("zlib version mismatch!\n", stderr); + } +} + +/* compress or decompress from stdin to stdout */ +int main(int argc, char **argv) +{ + int ret; + + /* avoid end-of-line conversions */ + SET_BINARY_MODE(stdin); + SET_BINARY_MODE(stdout); + + /* do compression if no arguments */ + if (argc == 1) { + ret = def(stdin, stdout, Z_DEFAULT_COMPRESSION); + if (ret != Z_OK) + zerr(ret); + return ret; + } + + /* do decompression if -d specified */ + else if (argc == 2 && strcmp(argv[1], "-d") == 0) { + ret = inf(stdin, stdout); + if (ret != Z_OK) + zerr(ret); + return ret; + } + + /* otherwise, report usage */ + else { + fputs("zpipe usage: zpipe [-d] < source > dest\n", stderr); + return 1; + } +} diff --git a/deps/zlib/google/compression_utils.cc b/deps/zlib/google/compression_utils.cc index c2b17e4ced6f22..0ba31101489fde 100644 --- a/deps/zlib/google/compression_utils.cc +++ b/deps/zlib/google/compression_utils.cc @@ -6,7 +6,6 @@ #include "base/check_op.h" #include "base/process/memory.h" -#include "base/sys_byteorder.h" #include "third_party/zlib/google/compression_utils_portable.h" diff --git a/deps/zlib/google/zip_reader_unittest.cc b/deps/zlib/google/zip_reader_unittest.cc index e6f89d7e4faaaa..9eb7d7d2b10e05 100644 --- a/deps/zlib/google/zip_reader_unittest.cc +++ b/deps/zlib/google/zip_reader_unittest.cc @@ -72,7 +72,7 @@ class FileWrapper { // A mock that provides methods that can be used as callbacks in asynchronous // unzip functions. Tracks the number of calls and number of bytes reported. // Assumes that progress callbacks will be executed in-order. -class MockUnzipListener : public base::SupportsWeakPtr { +class MockUnzipListener final { public: MockUnzipListener() : success_calls_(0), @@ -98,12 +98,18 @@ class MockUnzipListener : public base::SupportsWeakPtr { int progress_calls() { return progress_calls_; } int current_progress() { return current_progress_; } + base::WeakPtr AsWeakPtr() { + return weak_ptr_factory_.GetWeakPtr(); + } + private: int success_calls_; int failure_calls_; int progress_calls_; int64_t current_progress_; + + base::WeakPtrFactory weak_ptr_factory_{this}; }; class MockWriterDelegate : public zip::WriterDelegate { diff --git a/deps/zlib/test/minigzip.c b/deps/zlib/test/minigzip.c new file mode 100644 index 00000000000000..c72356dbccf255 --- /dev/null +++ b/deps/zlib/test/minigzip.c @@ -0,0 +1,579 @@ +/* minigzip.c -- simulate gzip using the zlib compression library + * Copyright (C) 1995-2006, 2010, 2011, 2016 Jean-loup Gailly + * For conditions of distribution and use, see copyright notice in zlib.h + */ + +/* + * minigzip is a minimal implementation of the gzip utility. This is + * only an example of using zlib and isn't meant to replace the + * full-featured gzip. No attempt is made to deal with file systems + * limiting names to 14 or 8+3 characters, etc... Error checking is + * very limited. So use minigzip only for testing; use gzip for the + * real thing. On MSDOS, use only on file names without extension + * or in pipe mode. + */ + +/* @(#) $Id$ */ + +#include "zlib.h" +#include + +#ifdef STDC +# include +# include +#endif + +#ifdef USE_MMAP +# include +# include +# include +#endif + +#if defined(MSDOS) || defined(OS2) || defined(WIN32) || defined(__CYGWIN__) +# include +# include +# ifdef UNDER_CE +# include +# endif +# define SET_BINARY_MODE(file) setmode(fileno(file), O_BINARY) +#else +# define SET_BINARY_MODE(file) +#endif + +#if defined(_MSC_VER) && _MSC_VER < 1900 +# define snprintf _snprintf +#endif + +#ifdef VMS +# define unlink delete +# define GZ_SUFFIX "-gz" +#endif +#ifdef RISCOS +# define unlink remove +# define GZ_SUFFIX "-gz" +# define fileno(file) file->__file +#endif +#if defined(__MWERKS__) && __dest_os != __be_os && __dest_os != __win32_os +# include /* for fileno */ +#endif + +#if !defined(Z_HAVE_UNISTD_H) && !defined(_LARGEFILE64_SOURCE) +#ifndef WIN32 /* unlink already in stdio.h for WIN32 */ + extern int unlink(const char *); +#endif +#endif + +#if defined(UNDER_CE) +# include +# define perror(s) pwinerror(s) + +/* Map the Windows error number in ERROR to a locale-dependent error + message string and return a pointer to it. Typically, the values + for ERROR come from GetLastError. + + The string pointed to shall not be modified by the application, + but may be overwritten by a subsequent call to strwinerror + + The strwinerror function does not change the current setting + of GetLastError. */ + +static char *strwinerror (error) + DWORD error; +{ + static char buf[1024]; + + wchar_t *msgbuf; + DWORD lasterr = GetLastError(); + DWORD chars = FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM + | FORMAT_MESSAGE_ALLOCATE_BUFFER, + NULL, + error, + 0, /* Default language */ + (LPVOID)&msgbuf, + 0, + NULL); + if (chars != 0) { + /* If there is an \r\n appended, zap it. */ + if (chars >= 2 + && msgbuf[chars - 2] == '\r' && msgbuf[chars - 1] == '\n') { + chars -= 2; + msgbuf[chars] = 0; + } + + if (chars > sizeof (buf) - 1) { + chars = sizeof (buf) - 1; + msgbuf[chars] = 0; + } + + wcstombs(buf, msgbuf, chars + 1); + LocalFree(msgbuf); + } + else { + sprintf(buf, "unknown win32 error (%ld)", error); + } + + SetLastError(lasterr); + return buf; +} + +static void pwinerror (s) + const char *s; +{ + if (s && *s) + fprintf(stderr, "%s: %s\n", s, strwinerror(GetLastError ())); + else + fprintf(stderr, "%s\n", strwinerror(GetLastError ())); +} + +#endif /* UNDER_CE */ + +#ifndef GZ_SUFFIX +# define GZ_SUFFIX ".gz" +#endif +#define SUFFIX_LEN (sizeof(GZ_SUFFIX)-1) + +#define BUFLEN 16384 +#define MAX_NAME_LEN 1024 + +#ifdef MAXSEG_64K +# define local static + /* Needed for systems with limitation on stack size. */ +#else +# define local +#endif + +#ifdef Z_SOLO +/* for Z_SOLO, create simplified gz* functions using deflate and inflate */ + +#if defined(Z_HAVE_UNISTD_H) || defined(Z_LARGE) +# include /* for unlink() */ +#endif + +static void *myalloc(void *q, unsigned n, unsigned m) { + (void)q; + return calloc(n, m); +} + +static void myfree(void *q, void *p) { + (void)q; + free(p); +} + +typedef struct gzFile_s { + FILE *file; + int write; + int err; + char *msg; + z_stream strm; +} *gzFile; + +static gzFile gz_open(const char *path, int fd, const char *mode) { + gzFile gz; + int ret; + + gz = malloc(sizeof(struct gzFile_s)); + if (gz == NULL) + return NULL; + gz->write = strchr(mode, 'w') != NULL; + gz->strm.zalloc = myalloc; + gz->strm.zfree = myfree; + gz->strm.opaque = Z_NULL; + if (gz->write) + ret = deflateInit2(&(gz->strm), -1, 8, 15 + 16, 8, 0); + else { + gz->strm.next_in = 0; + gz->strm.avail_in = Z_NULL; + ret = inflateInit2(&(gz->strm), 15 + 16); + } + if (ret != Z_OK) { + free(gz); + return NULL; + } + gz->file = path == NULL ? fdopen(fd, gz->write ? "wb" : "rb") : + fopen(path, gz->write ? "wb" : "rb"); + if (gz->file == NULL) { + gz->write ? deflateEnd(&(gz->strm)) : inflateEnd(&(gz->strm)); + free(gz); + return NULL; + } + gz->err = 0; + gz->msg = ""; + return gz; +} + +static gzFile gzopen(const char *path, const char *mode) { + return gz_open(path, -1, mode); +} + +static gzFile gzdopen(int fd, const char *mode) { + return gz_open(NULL, fd, mode); +} + +static int gzwrite(gzFile gz, const void *buf, unsigned len) { + z_stream *strm; + unsigned char out[BUFLEN]; + + if (gz == NULL || !gz->write) + return 0; + strm = &(gz->strm); + strm->next_in = (void *)buf; + strm->avail_in = len; + do { + strm->next_out = out; + strm->avail_out = BUFLEN; + (void)deflate(strm, Z_NO_FLUSH); + fwrite(out, 1, BUFLEN - strm->avail_out, gz->file); + } while (strm->avail_out == 0); + return len; +} + +static int gzread(gzFile gz, void *buf, unsigned len) { + int ret; + unsigned got; + unsigned char in[1]; + z_stream *strm; + + if (gz == NULL || gz->write) + return 0; + if (gz->err) + return 0; + strm = &(gz->strm); + strm->next_out = (void *)buf; + strm->avail_out = len; + do { + got = fread(in, 1, 1, gz->file); + if (got == 0) + break; + strm->next_in = in; + strm->avail_in = 1; + ret = inflate(strm, Z_NO_FLUSH); + if (ret == Z_DATA_ERROR) { + gz->err = Z_DATA_ERROR; + gz->msg = strm->msg; + return 0; + } + if (ret == Z_STREAM_END) + inflateReset(strm); + } while (strm->avail_out); + return len - strm->avail_out; +} + +static int gzclose(gzFile gz) { + z_stream *strm; + unsigned char out[BUFLEN]; + + if (gz == NULL) + return Z_STREAM_ERROR; + strm = &(gz->strm); + if (gz->write) { + strm->next_in = Z_NULL; + strm->avail_in = 0; + do { + strm->next_out = out; + strm->avail_out = BUFLEN; + (void)deflate(strm, Z_FINISH); + fwrite(out, 1, BUFLEN - strm->avail_out, gz->file); + } while (strm->avail_out == 0); + deflateEnd(strm); + } + else + inflateEnd(strm); + fclose(gz->file); + free(gz); + return Z_OK; +} + +static const char *gzerror(gzFile gz, int *err) { + *err = gz->err; + return gz->msg; +} + +#endif + +static char *prog; + +/* =========================================================================== + * Display error message and exit + */ +static void error(const char *msg) { + fprintf(stderr, "%s: %s\n", prog, msg); + exit(1); +} + +#ifdef USE_MMAP /* MMAP version, Miguel Albrecht */ + +/* Try compressing the input file at once using mmap. Return Z_OK if + * if success, Z_ERRNO otherwise. + */ +static int gz_compress_mmap(FILE *in, gzFile out) { + int len; + int err; + int ifd = fileno(in); + caddr_t buf; /* mmap'ed buffer for the entire input file */ + off_t buf_len; /* length of the input file */ + struct stat sb; + + /* Determine the size of the file, needed for mmap: */ + if (fstat(ifd, &sb) < 0) return Z_ERRNO; + buf_len = sb.st_size; + if (buf_len <= 0) return Z_ERRNO; + + /* Now do the actual mmap: */ + buf = mmap((caddr_t) 0, buf_len, PROT_READ, MAP_SHARED, ifd, (off_t)0); + if (buf == (caddr_t)(-1)) return Z_ERRNO; + + /* Compress the whole file at once: */ + len = gzwrite(out, (char *)buf, (unsigned)buf_len); + + if (len != (int)buf_len) error(gzerror(out, &err)); + + munmap(buf, buf_len); + fclose(in); + if (gzclose(out) != Z_OK) error("failed gzclose"); + return Z_OK; +} +#endif /* USE_MMAP */ + +/* =========================================================================== + * Compress input to output then close both files. + */ + +static void gz_compress(FILE *in, gzFile out) { + local char buf[BUFLEN]; + int len; + int err; + +#ifdef USE_MMAP + /* Try first compressing with mmap. If mmap fails (minigzip used in a + * pipe), use the normal fread loop. + */ + if (gz_compress_mmap(in, out) == Z_OK) return; +#endif + for (;;) { + len = (int)fread(buf, 1, sizeof(buf), in); + if (ferror(in)) { + perror("fread"); + exit(1); + } + if (len == 0) break; + + if (gzwrite(out, buf, (unsigned)len) != len) error(gzerror(out, &err)); + } + fclose(in); + if (gzclose(out) != Z_OK) error("failed gzclose"); +} + +/* =========================================================================== + * Uncompress input to output then close both files. + */ +static void gz_uncompress(gzFile in, FILE *out) { + local char buf[BUFLEN]; + int len; + int err; + + for (;;) { + len = gzread(in, buf, sizeof(buf)); + if (len < 0) error (gzerror(in, &err)); + if (len == 0) break; + + if ((int)fwrite(buf, 1, (unsigned)len, out) != len) { + error("failed fwrite"); + } + } + if (fclose(out)) error("failed fclose"); + + if (gzclose(in) != Z_OK) error("failed gzclose"); +} + + +/* =========================================================================== + * Compress the given file: create a corresponding .gz file and remove the + * original. + */ +static void file_compress(char *file, char *mode) { + local char outfile[MAX_NAME_LEN]; + FILE *in; + gzFile out; + + if (strlen(file) + strlen(GZ_SUFFIX) >= sizeof(outfile)) { + fprintf(stderr, "%s: filename too long\n", prog); + exit(1); + } + +#if !defined(NO_snprintf) && !defined(NO_vsnprintf) + snprintf(outfile, sizeof(outfile), "%s%s", file, GZ_SUFFIX); +#else + strcpy(outfile, file); + strcat(outfile, GZ_SUFFIX); +#endif + + in = fopen(file, "rb"); + if (in == NULL) { + perror(file); + exit(1); + } + out = gzopen(outfile, mode); + if (out == NULL) { + fprintf(stderr, "%s: can't gzopen %s\n", prog, outfile); + exit(1); + } + gz_compress(in, out); + + unlink(file); +} + + +/* =========================================================================== + * Uncompress the given file and remove the original. + */ +static void file_uncompress(char *file) { + local char buf[MAX_NAME_LEN]; + char *infile, *outfile; + FILE *out; + gzFile in; + z_size_t len = strlen(file); + + if (len + strlen(GZ_SUFFIX) >= sizeof(buf)) { + fprintf(stderr, "%s: filename too long\n", prog); + exit(1); + } + +#if !defined(NO_snprintf) && !defined(NO_vsnprintf) + snprintf(buf, sizeof(buf), "%s", file); +#else + strcpy(buf, file); +#endif + + if (len > SUFFIX_LEN && strcmp(file+len-SUFFIX_LEN, GZ_SUFFIX) == 0) { + infile = file; + outfile = buf; + outfile[len-3] = '\0'; + } else { + outfile = file; + infile = buf; +#if !defined(NO_snprintf) && !defined(NO_vsnprintf) + snprintf(buf + len, sizeof(buf) - len, "%s", GZ_SUFFIX); +#else + strcat(infile, GZ_SUFFIX); +#endif + } + in = gzopen(infile, "rb"); + if (in == NULL) { + fprintf(stderr, "%s: can't gzopen %s\n", prog, infile); + exit(1); + } + out = fopen(outfile, "wb"); + if (out == NULL) { + perror(file); + exit(1); + } + + gz_uncompress(in, out); + + unlink(infile); +} + + +/* =========================================================================== + * Usage: minigzip [-c] [-d] [-f] [-h] [-r] [-1 to -9] [files...] + * -c : write to standard output + * -d : decompress + * -f : compress with Z_FILTERED + * -h : compress with Z_HUFFMAN_ONLY + * -r : compress with Z_RLE + * -1 to -9 : compression level + */ + +int main(int argc, char *argv[]) { + int copyout = 0; + int uncompr = 0; + gzFile file; + char *bname, outmode[20]; + +#if !defined(NO_snprintf) && !defined(NO_vsnprintf) + snprintf(outmode, sizeof(outmode), "%s", "wb6 "); +#else + strcpy(outmode, "wb6 "); +#endif + + prog = argv[0]; + bname = strrchr(argv[0], '/'); + if (bname) + bname++; + else + bname = argv[0]; + argc--, argv++; + + if (!strcmp(bname, "gunzip")) + uncompr = 1; + else if (!strcmp(bname, "zcat")) + copyout = uncompr = 1; + + while (argc > 0) { + if (strcmp(*argv, "-c") == 0) + copyout = 1; + else if (strcmp(*argv, "-d") == 0) + uncompr = 1; + else if (strcmp(*argv, "-f") == 0) + outmode[3] = 'f'; + else if (strcmp(*argv, "-h") == 0) + outmode[3] = 'h'; + else if (strcmp(*argv, "-r") == 0) + outmode[3] = 'R'; + else if ((*argv)[0] == '-' && (*argv)[1] >= '1' && (*argv)[1] <= '9' && + (*argv)[2] == 0) + outmode[2] = (*argv)[1]; + else + break; + argc--, argv++; + } + if (outmode[3] == ' ') + outmode[3] = 0; + if (argc == 0) { + SET_BINARY_MODE(stdin); + SET_BINARY_MODE(stdout); + if (uncompr) { + file = gzdopen(fileno(stdin), "rb"); + if (file == NULL) error("can't gzdopen stdin"); + gz_uncompress(file, stdout); + } else { + file = gzdopen(fileno(stdout), outmode); + if (file == NULL) error("can't gzdopen stdout"); + gz_compress(stdin, file); + } + } else { + if (copyout) { + SET_BINARY_MODE(stdout); + } + do { + if (uncompr) { + if (copyout) { + file = gzopen(*argv, "rb"); + if (file == NULL) + fprintf(stderr, "%s: can't gzopen %s\n", prog, *argv); + else + gz_uncompress(file, stdout); + } else { + file_uncompress(*argv); + } + } else { + if (copyout) { + FILE * in = fopen(*argv, "rb"); + + if (in == NULL) { + perror(*argv); + } else { + file = gzdopen(fileno(stdout), outmode); + if (file == NULL) error("can't gzdopen stdout"); + + gz_compress(in, file); + } + + } else { + file_compress(*argv, outmode); + } + } + } while (argv++, --argc); + } + return 0; +} From 1147fee7d95ce18f483186fff0c4b668ce843363 Mon Sep 17 00:00:00 2001 From: Jamie King Date: Sun, 17 Mar 2024 05:10:27 -0700 Subject: [PATCH 37/41] doc: remove ableist language from crypto MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit PR-URL: https://github.com/nodejs/node/pull/52063 Reviewed-By: Richard Lau Reviewed-By: Marco Ippolito Reviewed-By: Akhil Marsonya Reviewed-By: Tobias Nießen Reviewed-By: Luigi Pinca --- doc/api/crypto.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/api/crypto.md b/doc/api/crypto.md index c22b6c3bedc429..4db3787c477f93 100644 --- a/doc/api/crypto.md +++ b/doc/api/crypto.md @@ -5868,7 +5868,7 @@ See the [list of SSL OP Flags][] for details. SSL_OP_CISCO_ANYCONNECT - Instructs OpenSSL to use Cisco's "speshul" version of DTLS_BAD_VER. + Instructs OpenSSL to use Cisco's version identifier of DTLS_BAD_VER. SSL_OP_COOKIE_EXCHANGE From 351ef189cab72b2bc1d16d3a03124399ac9886e7 Mon Sep 17 00:00:00 2001 From: Luke Albao Date: Wed, 25 Oct 2023 16:37:54 -0700 Subject: [PATCH 38/41] test: v8: Add test-linux-perf-logger test suite MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Cherry-picked from 9c714d8232 PR-URL: https://github.com/nodejs/node/pull/50352 Backport-PR-URL: https://github.com/nodejs/node/pull/52925 Reviewed-By: Michael Dawson Reviewed-By: Richard Lau Reviewed-By: Vinícius Lourenço Claro Cardoso --- test/fixtures/linux-perf-logger.js | 17 +++ test/v8-updates/test-linux-perf-logger.js | 152 ++++++++++++++++++++++ 2 files changed, 169 insertions(+) create mode 100644 test/fixtures/linux-perf-logger.js create mode 100644 test/v8-updates/test-linux-perf-logger.js diff --git a/test/fixtures/linux-perf-logger.js b/test/fixtures/linux-perf-logger.js new file mode 100644 index 00000000000000..d39f9e0cc45b05 --- /dev/null +++ b/test/fixtures/linux-perf-logger.js @@ -0,0 +1,17 @@ +'use strict'; + +process.stdout.write(`${process.pid}`); + +const testRegex = /test-regex/gi; + +function functionOne() { + for (let i = 0; i < 100; i++) { + const match = testRegex.exec(Math.random().toString()); + } +} + +function functionTwo() { + functionOne(); +} + +functionTwo(); diff --git a/test/v8-updates/test-linux-perf-logger.js b/test/v8-updates/test-linux-perf-logger.js new file mode 100644 index 00000000000000..2cd7ee3a85e0eb --- /dev/null +++ b/test/v8-updates/test-linux-perf-logger.js @@ -0,0 +1,152 @@ +'use strict'; + +// --- About this test suite +// +// JIT support for perf(1) was added in 2009 (see https://lkml.org/lkml/2009/6/8/499). +// It works by looking for a perf map file in /tmp/perf-.map, where is the +// PID of the target process. +// +// The structure of this file is stable. Perf expects each line to specify a symbol +// in the form: +// +// +// +// where is the hex representation of the instruction pointer for the beginning +// of the function, is the byte length of the function, and is the +// readable JIT name used for reporting. +// +// This file asserts that a node script run with the appropriate flags will produce +// a compliant perf map. +// +// NOTE: This test runs only on linux, as that is the only platform supported by perf, and +// accordingly the only platform where `perf-basic-prof*` v8 flags are available. +// +// MAINTAINERS' NOTE: As of early 2024, the most common failure mode for this test suite +// is for v8 options to change from version to version. If this suite fails, look there first. +// We use options to forcibly require certain test cases to JIT code, and the nodeFlags to do +// so can change. + +const common = require('../common'); +if (!common.isLinux) { + common.skip('--perf-basic-prof* is statically defined as linux-only'); +} + +const assert = require('assert'); +const { spawnSync } = require('child_process'); +const { readFileSync } = require('fs'); + +const fixtures = require('../common/fixtures'); +const tmpdir = require('../common/tmpdir'); +tmpdir.refresh(); + +const testCases = [ + { + title: '--perf-basic-prof interpreted', + nodeFlags: ['--perf-basic-prof', '--no-turbo-inlining', '--no-opt'], + matches: [ + '~functionOne .+/linux-perf-logger.js', + '~functionTwo .+/linux-perf-logger.js', + 'test-regex', + ], + noMatches: ['\\*functionOne', '\\*functionTwo'], + }, + { + title: '--perf-basic-prof compiled', + nodeFlags: ['--perf-basic-prof', '--no-turbo-inlining', '--always-opt'], + matches: [ + 'test-regex', + '~functionOne .+/linux-perf-logger.js', + '~functionTwo .+/linux-perf-logger.js', + '\\*functionOne .+/linux-perf-logger.js', + '\\*functionTwo .+/linux-perf-logger.js', + ], + noMatches: [], + }, + { + title: '--perf-basic-prof-only-functions interpreted', + nodeFlags: ['--perf-basic-prof-only-functions', '--no-turbo-inlining', '--no-opt'], + matches: ['~functionOne .+/linux-perf-logger.js', '~functionTwo .+/linux-perf-logger.js'], + noMatches: ['\\*functionOne', '\\*functionTwo', 'test-regex'], + }, + { + title: '--perf-basic-prof-only-functions compiled', + nodeFlags: ['--perf-basic-prof-only-functions', '--no-turbo-inlining', '--always-opt'], + matches: [ + '~functionOne .+/linux-perf-logger.js', + '~functionTwo .+/linux-perf-logger.js', + '\\*functionOne .+/linux-perf-logger.js', + '\\*functionTwo .+/linux-perf-logger.js', + ], + noMatches: ['test-regex'], + }, +]; + +function runTest(test) { + const report = { + title: test.title, + perfMap: '[uninitialized]', + errors: [], + }; + + const args = test.nodeFlags.concat(fixtures.path('linux-perf-logger.js')); + const run = spawnSync(process.execPath, args, { cwd: tmpdir.path, encoding: 'utf8' }); + if (run.error) { + report.errors.push(run.error.stack); + return report; + } + if (run.status !== 0) { + report.errors.push(`running script:\n${run.stderr}`); + return report; + } + + try { + report.perfMap = readFileSync(`/tmp/perf-${run.pid}.map`, 'utf8'); + } catch (err) { + report.errors.push(`reading perf map: ${err.stack}`); + return report; + } + + const hexRegex = '[a-fA-F0-9]+'; + for (const testRegex of test.matches) { + const lineRegex = new RegExp(`${hexRegex} ${hexRegex}.*:${testRegex}`); + if (!lineRegex.test(report.perfMap)) { + report.errors.push(`Expected to match ${lineRegex}`); + } + } + + for (const regex of test.noMatches) { + const noMatch = new RegExp(regex); + if (noMatch.test(report.perfMap)) { + report.errors.push(`Expected not to match ${noMatch}`); + } + } + + return report; +} + +function serializeError(report, index) { + return `[ERROR ${index + 1}] ${report.title} +Errors: +${report.errors.map((err, i) => `${i + 1}. ${err}`).join('\n')} +Perf map content: +${report.perfMap} + +`; +} + +function runSuite() { + const failures = []; + + for (const tc of testCases) { + const report = runTest(tc); + if (report.errors.length > 0) { + failures.push(report); + } + } + + const errorsToReport = failures.map(serializeError).join('\n--------\n'); + + assert.strictEqual(failures.length, 0, `${failures.length} tests failed\n\n${errorsToReport}`); +} + +runSuite(); From e5fc8ec9fce2ac50b6c2024128afeabc4c91caa4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C3=ABl=20Zasso?= Date: Sat, 7 Oct 2023 14:35:40 +0200 Subject: [PATCH 39/41] test: skip v8-updates/test-linux-perf Refs: https://github.com/nodejs/node/issues/50079 PR-URL: https://github.com/nodejs/node/pull/49639 Reviewed-By: Jiawen Geng Reviewed-By: Rafael Gonzaga Reviewed-By: Antoine du Hamel --- test/v8-updates/v8-updates.status | 2 ++ 1 file changed, 2 insertions(+) diff --git a/test/v8-updates/v8-updates.status b/test/v8-updates/v8-updates.status index 46149f4751ffb5..07ff708ab023dc 100644 --- a/test/v8-updates/v8-updates.status +++ b/test/v8-updates/v8-updates.status @@ -5,6 +5,8 @@ prefix v8-updates # sample-test : PASS,FLAKY [true] # This section applies to all platforms +# https://github.com/nodejs/node/issues/50079 +test-linux-perf: SKIP [$system==win32] From d9d9e62474b41f5a937d1900d68ac4739810bee9 Mon Sep 17 00:00:00 2001 From: Chengzhong Wu Date: Mon, 8 Jan 2024 22:39:30 +0800 Subject: [PATCH 40/41] src: avoid draining platform tasks at FreeEnvironment MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit At the point of `FreeEnvironment` and onwards, no JavaScript execution associated with the Environment should be triggered. Avoid draining platform tasks that can trigger JavaScript execution in `FreeEnvironment`. The holder of `node::Environment` should immediately call `node::MultiIsolatePlatform::UnregisterIsolate` and `v8::Isolate::Dispose` to cancel pending foreground tasks and join concurrent tasks after the environment was freed. `NodePlatform` can properly handle the case in `RunForegroundTask` when an Isolate out-lives its associated `node::Environment`. PR-URL: https://github.com/nodejs/node/pull/51290 Fixes: https://github.com/nodejs/node/issues/47748 Fixes: https://github.com/nodejs/node/issues/49344 Reviewed-By: Vinícius Lourenço Claro Cardoso Reviewed-By: Matteo Collina --- src/api/environment.cc | 7 ------ src/node_main_instance.cc | 20 ++++++++++++++-- src/node_platform.cc | 5 ++++ .../test-finalization-registry-shutdown.js | 23 +++++++++++++++++++ 4 files changed, 46 insertions(+), 9 deletions(-) create mode 100644 test/parallel/test-finalization-registry-shutdown.js diff --git a/src/api/environment.cc b/src/api/environment.cc index 51cd46d0fb1c02..de58a26fde5bae 100644 --- a/src/api/environment.cc +++ b/src/api/environment.cc @@ -451,13 +451,6 @@ void FreeEnvironment(Environment* env) { RunAtExit(env); } - // This call needs to be made while the `Environment` is still alive - // because we assume that it is available for async tracking in the - // NodePlatform implementation. - MultiIsolatePlatform* platform = env->isolate_data()->platform(); - if (platform != nullptr) - platform->DrainTasks(isolate); - delete env; } diff --git a/src/node_main_instance.cc b/src/node_main_instance.cc index 1d23631780cf90..0bc6180697e28c 100644 --- a/src/node_main_instance.cc +++ b/src/node_main_instance.cc @@ -103,8 +103,24 @@ NodeMainInstance::~NodeMainInstance() { if (isolate_params_ == nullptr) { return; } - // This should only be done on a main instance that owns its isolate. - platform_->UnregisterIsolate(isolate_); + + { +#ifdef DEBUG + // node::Environment has been disposed and no JavaScript Execution is + // allowed at this point. + // Create a scope to check that no JavaScript is executed in debug build + // and proactively crash the process in the case JavaScript is being + // executed. + // Isolate::Dispose() must be invoked outside of this scope to avoid + // use-after-free. + Isolate::DisallowJavascriptExecutionScope disallow_js( + isolate_, Isolate::DisallowJavascriptExecutionScope::CRASH_ON_FAILURE); +#endif + // This should only be done on a main instance that owns its isolate. + // IsolateData must be freed before UnregisterIsolate() is called. + isolate_data_.reset(); + platform_->UnregisterIsolate(isolate_); + } isolate_->Dispose(); } diff --git a/src/node_platform.cc b/src/node_platform.cc index 7dd0526e6ece5f..c16766f76a9187 100644 --- a/src/node_platform.cc +++ b/src/node_platform.cc @@ -424,6 +424,11 @@ void PerIsolatePlatformData::RunForegroundTask(std::unique_ptr task) { InternalCallbackScope::kNoFlags); task->Run(); } else { + // When the Environment was freed, the tasks of the Isolate should also be + // canceled by `NodePlatform::UnregisterIsolate`. However, if the embedder + // request to run the foreground task after the Environment was freed, run + // the task without InternalCallbackScope. + // The task is moved out of InternalCallbackScope if env is not available. // This is a required else block, and should not be removed. // See comment: https://github.com/nodejs/node/pull/34688#pullrequestreview-463867489 diff --git a/test/parallel/test-finalization-registry-shutdown.js b/test/parallel/test-finalization-registry-shutdown.js new file mode 100644 index 00000000000000..f896aa2f285c75 --- /dev/null +++ b/test/parallel/test-finalization-registry-shutdown.js @@ -0,0 +1,23 @@ +// Flags: --expose-gc +'use strict'; +const common = require('../common'); + +// This test verifies that when a V8 FinalizationRegistryCleanupTask is queue +// at the last moment when JavaScript can be executed, the callback of a +// FinalizationRegistry will not be invoked and the process should exit +// normally. + +const reg = new FinalizationRegistry( + common.mustNotCall('This FinalizationRegistry callback should never be called')); + +function register() { + // Create a temporary object in a new function scope to allow it to be GC-ed. + reg.register({}); +} + +process.on('exit', () => { + // This is the final chance to execute JavaScript. + register(); + // Queue a FinalizationRegistryCleanupTask by a testing gc request. + global.gc(); +}); From 64903b1ca04dbe182b0abc36deb7437a49bf1881 Mon Sep 17 00:00:00 2001 From: Richard Lau Date: Thu, 16 May 2024 15:42:22 +0000 Subject: [PATCH 41/41] 2024-05-21, Version 18.20.3 'Hydrogen' (LTS) Notable changes: This release fixes a regression introduced in Node.js 18.19.0 where `http.server.close()` was incorrectly closing idle connections. A fix has also been included for compiling Node.js from source with newer versions of Clang. The list of keys used to sign releases has been synchronized with the current list from the `main` branch. Updated dependencies: - acorn updated to 8.11.3. - acorn-walk updated to 8.3.2. - ada updated to 2.7.8. - c-ares updated to 1.28.1. - corepack updated to 0.28.0. - nghttp2 updated to 1.61.0. - ngtcp2 updated to 1.3.0. - npm updated to 10.7.0. Includes a fix from npm@10.5.1 to limit the number of open connections. - simdutf updated to 5.2.4. - zlib updated to 1.3.0.1-motley-7d77fb7. PR-URL: https://github.com/nodejs/node/pull/53028 --- CHANGELOG.md | 3 +- doc/changelogs/CHANGELOG_V18.md | 69 +++++++++++++++++++++++++++++++++ src/node_version.h | 2 +- 3 files changed, 72 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3c0f7e8a0221d6..3a8f3f7717c277 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -32,7 +32,8 @@ release. -18.20.2
    +18.20.3
    +18.20.2
    18.20.1
    18.20.0
    18.19.1
    diff --git a/doc/changelogs/CHANGELOG_V18.md b/doc/changelogs/CHANGELOG_V18.md index 678527839e56ac..57dd96a123883d 100644 --- a/doc/changelogs/CHANGELOG_V18.md +++ b/doc/changelogs/CHANGELOG_V18.md @@ -9,6 +9,7 @@ +18.20.3
    18.20.2
    18.20.1
    18.20.0
    @@ -67,6 +68,74 @@ * [io.js](CHANGELOG_IOJS.md) * [Archive](CHANGELOG_ARCHIVE.md) + + +## 2024-05-21, Version 18.20.3 'Hydrogen' (LTS), @richardlau + +### Notable Changes + +This release fixes a regression introduced in Node.js 18.19.0 where `http.server.close()` was incorrectly closing idle connections. + +A fix has also been included for compiling Node.js from source with newer versions of Clang. + +The list of keys used to sign releases has been synchronized with the current list from the `main` branch. + +#### Updated dependencies + +* acorn updated to 8.11.3. +* acorn-walk updated to 8.3.2. +* ada updated to 2.7.8. +* c-ares updated to 1.28.1. +* corepack updated to 0.28.0. +* nghttp2 updated to 1.61.0. +* ngtcp2 updated to 1.3.0. +* npm updated to 10.7.0. Includes a fix from npm\@10.5.1 to limit the number of open connections [npm/cli#7324](https://github.com/npm/cli/pull/7324). +* simdutf updated to 5.2.4. +* zlib updated to 1.3.0.1-motley-7d77fb7. + +### Commits + +* \[[`0c260e10e7`](https://github.com/nodejs/node/commit/0c260e10e7)] - **deps**: update zlib to 1.3.0.1-motley-7d77fb7 (Node.js GitHub Bot) [#52516](https://github.com/nodejs/node/pull/52516) +* \[[`1152d7f919`](https://github.com/nodejs/node/commit/1152d7f919)] - **deps**: update zlib to 1.3.0.1-motley-24c07df (Node.js GitHub Bot) [#52199](https://github.com/nodejs/node/pull/52199) +* \[[`755399db9d`](https://github.com/nodejs/node/commit/755399db9d)] - **deps**: update zlib to 1.3.0.1-motley-24342f6 (Node.js GitHub Bot) [#52123](https://github.com/nodejs/node/pull/52123) +* \[[`af3e32073b`](https://github.com/nodejs/node/commit/af3e32073b)] - **deps**: update ada to 2.7.8 (Node.js GitHub Bot) [#52517](https://github.com/nodejs/node/pull/52517) +* \[[`e4ea2db58b`](https://github.com/nodejs/node/commit/e4ea2db58b)] - **deps**: update c-ares to 1.28.1 (Node.js GitHub Bot) [#52285](https://github.com/nodejs/node/pull/52285) +* \[[`14e857bea2`](https://github.com/nodejs/node/commit/14e857bea2)] - **deps**: update corepack to 0.28.0 (Node.js GitHub Bot) [#52616](https://github.com/nodejs/node/pull/52616) +* \[[`7f5dd44ca6`](https://github.com/nodejs/node/commit/7f5dd44ca6)] - **deps**: upgrade npm to 10.7.0 (npm team) [#52767](https://github.com/nodejs/node/pull/52767) +* \[[`78f84ebb09`](https://github.com/nodejs/node/commit/78f84ebb09)] - **deps**: update ngtcp2 to 1.3.0 (Node.js GitHub Bot) [#51796](https://github.com/nodejs/node/pull/51796) +* \[[`1f489a3753`](https://github.com/nodejs/node/commit/1f489a3753)] - **deps**: update ngtcp2 to 1.2.0 (Node.js GitHub Bot) [#51584](https://github.com/nodejs/node/pull/51584) +* \[[`3034968225`](https://github.com/nodejs/node/commit/3034968225)] - **deps**: update ngtcp2 to 1.1.0 (Node.js GitHub Bot) [#51319](https://github.com/nodejs/node/pull/51319) +* \[[`1aa9da467f`](https://github.com/nodejs/node/commit/1aa9da467f)] - **deps**: add nghttp3/\*\*/.deps to .gitignore (Luigi Pinca) [#51400](https://github.com/nodejs/node/pull/51400) +* \[[`28c0c78c9a`](https://github.com/nodejs/node/commit/28c0c78c9a)] - **deps**: update ngtcp2 and nghttp3 (James M Snell) [#51291](https://github.com/nodejs/node/pull/51291) +* \[[`8fd5a35364`](https://github.com/nodejs/node/commit/8fd5a35364)] - **deps**: upgrade npm to 10.5.2 (npm team) [#52458](https://github.com/nodejs/node/pull/52458) +* \[[`2c53ff31c9`](https://github.com/nodejs/node/commit/2c53ff31c9)] - **deps**: update acorn-walk to 8.3.2 (Node.js GitHub Bot) [#51457](https://github.com/nodejs/node/pull/51457) +* \[[`12f28f33c2`](https://github.com/nodejs/node/commit/12f28f33c2)] - **deps**: update acorn to 8.11.3 (Node.js GitHub Bot) [#51317](https://github.com/nodejs/node/pull/51317) +* \[[`dddb7eb3e0`](https://github.com/nodejs/node/commit/dddb7eb3e0)] - **deps**: update acorn-walk to 8.3.1 (Node.js GitHub Bot) [#50457](https://github.com/nodejs/node/pull/50457) +* \[[`c86550e607`](https://github.com/nodejs/node/commit/c86550e607)] - **deps**: update acorn-walk to 8.3.0 (Node.js GitHub Bot) [#50457](https://github.com/nodejs/node/pull/50457) +* \[[`9500817f66`](https://github.com/nodejs/node/commit/9500817f66)] - **deps**: update acorn to 8.11.2 (Node.js GitHub Bot) [#50460](https://github.com/nodejs/node/pull/50460) +* \[[`7a8c7b6275`](https://github.com/nodejs/node/commit/7a8c7b6275)] - **deps**: update ada to 2.7.7 (Node.js GitHub Bot) [#52028](https://github.com/nodejs/node/pull/52028) +* \[[`b199889943`](https://github.com/nodejs/node/commit/b199889943)] - **deps**: update corepack to 0.26.0 (Node.js GitHub Bot) [#52027](https://github.com/nodejs/node/pull/52027) +* \[[`052b0ba0c6`](https://github.com/nodejs/node/commit/052b0ba0c6)] - **deps**: upgrade npm to 10.5.1 (npm team) [#52351](https://github.com/nodejs/node/pull/52351) +* \[[`209823d3af`](https://github.com/nodejs/node/commit/209823d3af)] - **deps**: update simdutf to 5.2.4 (Node.js GitHub Bot) [#52473](https://github.com/nodejs/node/pull/52473) +* \[[`5114cbe18a`](https://github.com/nodejs/node/commit/5114cbe18a)] - **deps**: update simdutf to 5.2.3 (Yagiz Nizipli) [#52381](https://github.com/nodejs/node/pull/52381) +* \[[`be30309ea0`](https://github.com/nodejs/node/commit/be30309ea0)] - **deps**: update simdutf to 5.0.0 (Daniel Lemire) [#52138](https://github.com/nodejs/node/pull/52138) +* \[[`b56f66e250`](https://github.com/nodejs/node/commit/b56f66e250)] - **deps**: update simdutf to 4.0.9 (Node.js GitHub Bot) [#51655](https://github.com/nodejs/node/pull/51655) +* \[[`a9f3b9d9d1`](https://github.com/nodejs/node/commit/a9f3b9d9d1)] - **deps**: update nghttp2 to 1.61.0 (Node.js GitHub Bot) [#52395](https://github.com/nodejs/node/pull/52395) +* \[[`1b6fa70620`](https://github.com/nodejs/node/commit/1b6fa70620)] - **deps**: update nghttp2 to 1.60.0 (Node.js GitHub Bot) [#51948](https://github.com/nodejs/node/pull/51948) +* \[[`3c9dbbf4d4`](https://github.com/nodejs/node/commit/3c9dbbf4d4)] - **deps**: update nghttp2 to 1.59.0 (Node.js GitHub Bot) [#51581](https://github.com/nodejs/node/pull/51581) +* \[[`e28316da54`](https://github.com/nodejs/node/commit/e28316da54)] - **deps**: update nghttp2 to 1.58.0 (Node.js GitHub Bot) [#50441](https://github.com/nodejs/node/pull/50441) +* \[[`678641f470`](https://github.com/nodejs/node/commit/678641f470)] - **deps**: V8: cherry-pick d15d49b09dc7 (Bo Anderson) [#52337](https://github.com/nodejs/node/pull/52337) +* \[[`1147fee7d9`](https://github.com/nodejs/node/commit/1147fee7d9)] - **doc**: remove ableist language from crypto (Jamie King) [#52063](https://github.com/nodejs/node/pull/52063) +* \[[`5e93eae972`](https://github.com/nodejs/node/commit/5e93eae972)] - **doc**: add release key for marco-ippolito (marco-ippolito) [#52257](https://github.com/nodejs/node/pull/52257) +* \[[`6689a98488`](https://github.com/nodejs/node/commit/6689a98488)] - **http**: remove closeIdleConnections function while calling server close (Kumar Rishav) [#52336](https://github.com/nodejs/node/pull/52336) +* \[[`71616e8a8a`](https://github.com/nodejs/node/commit/71616e8a8a)] - **node-api**: make tsfn accept napi\_finalize once more (Gabriel Schulhof) [#51801](https://github.com/nodejs/node/pull/51801) +* \[[`d9d9e62474`](https://github.com/nodejs/node/commit/d9d9e62474)] - **src**: avoid draining platform tasks at FreeEnvironment (Chengzhong Wu) [#51290](https://github.com/nodejs/node/pull/51290) +* \[[`e5fc8ec9fc`](https://github.com/nodejs/node/commit/e5fc8ec9fc)] - **test**: skip v8-updates/test-linux-perf (Michaël Zasso) [#49639](https://github.com/nodejs/node/pull/49639) +* \[[`351ef189ca`](https://github.com/nodejs/node/commit/351ef189ca)] - **test**: v8: Add test-linux-perf-logger test suite (Luke Albao) [#50352](https://github.com/nodejs/node/pull/50352) +* \[[`5cec2efc31`](https://github.com/nodejs/node/commit/5cec2efc31)] - **test**: reduce the number of requests and parsers (Luigi Pinca) [#50240](https://github.com/nodejs/node/pull/50240) +* \[[`5186e453d9`](https://github.com/nodejs/node/commit/5186e453d9)] - **test**: deflake test-http-regr-gh-2928 (Luigi Pinca) [#49574](https://github.com/nodejs/node/pull/49574) +* \[[`c60cd67e1c`](https://github.com/nodejs/node/commit/c60cd67e1c)] - **test**: skip test for dynamically linked OpenSSL (Richard Lau) [#52542](https://github.com/nodejs/node/pull/52542) + ## 2024-04-10, Version 18.20.2 'Hydrogen' (LTS), @RafaelGSS diff --git a/src/node_version.h b/src/node_version.h index a13bb708945c2d..959b34831a3e2e 100644 --- a/src/node_version.h +++ b/src/node_version.h @@ -29,7 +29,7 @@ #define NODE_VERSION_IS_LTS 1 #define NODE_VERSION_LTS_CODENAME "Hydrogen" -#define NODE_VERSION_IS_RELEASE 0 +#define NODE_VERSION_IS_RELEASE 1 #ifndef NODE_STRINGIFY #define NODE_STRINGIFY(n) NODE_STRINGIFY_HELPER(n)