From 7409fec12097b4780af0e1a6b8014b19ab7d58d3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C3=ABl=20Zasso?= Date: Sat, 29 Jan 2022 08:33:07 +0100 Subject: [PATCH 01/19] deps: update V8 to 9.8.177.9 --- deps/v8/.vpython3 | 46 + deps/v8/AUTHORS | 1 + deps/v8/BUILD.bazel | 106 +- deps/v8/BUILD.gn | 48 +- deps/v8/DEPS | 76 +- deps/v8/OWNERS | 11 +- deps/v8/WATCHLISTS | 8 +- deps/v8/bazel/BUILD.icu | 68 +- deps/v8/bazel/BUILD.zlib | 8 +- deps/v8/bazel/config/BUILD.bazel | 65 + deps/v8/bazel/defs.bzl | 99 +- deps/v8/bazel/generate-inspector-files.cmd | 24 + deps/v8/include/cppgc/garbage-collected.h | 40 +- deps/v8/include/cppgc/heap-state.h | 12 + deps/v8/include/cppgc/internal/logging.h | 6 +- .../include/cppgc/internal/persistent-node.h | 51 +- deps/v8/include/libplatform/v8-tracing.h | 1 - deps/v8/include/v8-callbacks.h | 18 +- deps/v8/include/v8-cppgc.h | 19 +- deps/v8/include/v8-data.h | 15 + deps/v8/include/v8-embedder-heap.h | 1 + deps/v8/include/v8-embedder-state-scope.h | 48 + deps/v8/include/v8-fast-api-calls.h | 67 +- deps/v8/include/v8-initialization.h | 4 +- deps/v8/include/v8-inspector.h | 4 - deps/v8/include/v8-internal.h | 29 +- deps/v8/include/v8-isolate.h | 23 +- deps/v8/include/v8-locker.h | 2 +- deps/v8/include/v8-message.h | 68 +- deps/v8/include/v8-object.h | 2 +- deps/v8/include/v8-platform.h | 227 ++ deps/v8/include/v8-primitive.h | 14 +- deps/v8/include/v8-profiler.h | 12 + deps/v8/include/v8-script.h | 11 +- deps/v8/include/v8-statistics.h | 2 + deps/v8/include/v8-unwinder.h | 17 +- deps/v8/include/v8-version.h | 6 +- deps/v8/include/v8-wasm.h | 6 + deps/v8/include/v8config.h | 32 + deps/v8/infra/mb/mb_config.pyl | 69 +- deps/v8/infra/testing/builders.pyl | 50 + deps/v8/samples/cppgc/hello-world.cc | 29 +- deps/v8/samples/hello-world.cc | 2 +- deps/v8/samples/shell.cc | 2 +- deps/v8/src/api/api-inl.h | 1 + deps/v8/src/api/api-natives.cc | 13 +- deps/v8/src/api/api.cc | 282 +- deps/v8/src/api/api.h | 3 + deps/v8/src/asmjs/asm-js.cc | 2 +- deps/v8/src/asmjs/asm-parser.cc | 2 +- deps/v8/src/ast/ast-value-factory.cc | 17 +- deps/v8/src/ast/ast-value-factory.h | 7 +- deps/v8/src/ast/ast.h | 26 +- deps/v8/src/ast/prettyprinter.cc | 4 +- deps/v8/src/ast/scopes.cc | 113 +- deps/v8/src/ast/scopes.h | 27 +- deps/v8/src/base/atomic-utils.h | 25 + deps/v8/src/base/atomicops.h | 58 + deps/v8/src/base/bounded-page-allocator.cc | 5 +- .../base/emulated-virtual-address-subspace.cc | 138 + .../base/emulated-virtual-address-subspace.h | 113 + deps/v8/src/base/platform/platform-fuchsia.cc | 280 +- deps/v8/src/base/platform/platform-posix.cc | 96 +- deps/v8/src/base/platform/platform-win32.cc | 160 +- deps/v8/src/base/platform/platform.h | 98 + deps/v8/src/base/platform/yield-processor.h | 55 + deps/v8/src/base/region-allocator.cc | 30 + deps/v8/src/base/region-allocator.h | 34 +- .../sanitizer/lsan-virtual-address-space.cc | 61 + .../sanitizer/lsan-virtual-address-space.h | 63 + deps/v8/src/base/small-vector.h | 72 +- .../virtual-address-space-page-allocator.cc | 69 + .../virtual-address-space-page-allocator.h | 72 + deps/v8/src/base/virtual-address-space.cc | 262 ++ deps/v8/src/base/virtual-address-space.h | 136 + deps/v8/src/base/win32-headers.h | 4 +- .../src/baseline/baseline-batch-compiler.cc | 20 +- deps/v8/src/baseline/baseline-compiler.cc | 46 +- .../loong64/baseline-assembler-loong64-inl.h | 21 +- .../mips/baseline-assembler-mips-inl.h | 24 +- .../mips64/baseline-assembler-mips64-inl.h | 24 +- .../riscv64/baseline-assembler-riscv64-inl.h | 1 + .../s390/baseline-assembler-s390-inl.h | 224 +- deps/v8/src/bigint/CPPLINT.cfg | 1 + deps/v8/src/bigint/bigint-internal.cc | 4 +- deps/v8/src/bigint/bigint.h | 19 + deps/v8/src/bigint/bitwise.cc | 106 +- deps/v8/src/bigint/digit-arithmetic.h | 4 +- deps/v8/src/bigint/div-barrett.cc | 28 +- deps/v8/src/bigint/div-burnikel.cc | 14 +- deps/v8/src/bigint/div-helpers.cc | 6 +- deps/v8/src/bigint/div-schoolbook.cc | 7 +- deps/v8/src/bigint/fromstring.cc | 10 +- deps/v8/src/bigint/mul-fft.cc | 40 +- deps/v8/src/bigint/mul-karatsuba.cc | 8 +- deps/v8/src/bigint/mul-schoolbook.cc | 4 +- deps/v8/src/bigint/tostring.cc | 18 +- deps/v8/src/bigint/vector-arithmetic.cc | 2 +- deps/v8/src/builtins/arm/builtins-arm.cc | 8 +- deps/v8/src/builtins/arm64/builtins-arm64.cc | 29 +- deps/v8/src/builtins/array-from.tq | 8 +- deps/v8/src/builtins/array-join.tq | 4 +- deps/v8/src/builtins/base.tq | 9 +- .../builtins/builtins-async-function-gen.cc | 1 + deps/v8/src/builtins/builtins-async-gen.cc | 5 +- .../builtins/builtins-async-generator-gen.cc | 2 - .../src/builtins/builtins-collections-gen.cc | 5 +- .../src/builtins/builtins-constructor-gen.cc | 8 +- deps/v8/src/builtins/builtins-dataview.cc | 70 +- deps/v8/src/builtins/builtins-date.cc | 12 +- deps/v8/src/builtins/builtins-definitions.h | 12 + deps/v8/src/builtins/builtins-function.cc | 6 +- deps/v8/src/builtins/builtins-internal-gen.cc | 19 +- deps/v8/src/builtins/builtins-intl.cc | 3 +- deps/v8/src/builtins/builtins-iterator-gen.cc | 5 +- deps/v8/src/builtins/builtins-lazy-gen.cc | 38 +- deps/v8/src/builtins/builtins-lazy-gen.h | 2 +- .../builtins/builtins-microtask-queue-gen.cc | 23 +- deps/v8/src/builtins/builtins-number-gen.cc | 29 + deps/v8/src/builtins/builtins-object-gen.cc | 6 +- deps/v8/src/builtins/builtins-regexp-gen.cc | 12 +- deps/v8/src/builtins/builtins-regexp-gen.h | 2 +- deps/v8/src/builtins/builtins-string.tq | 25 + deps/v8/src/builtins/builtins-temporal.cc | 2 +- deps/v8/src/builtins/builtins-trace.cc | 4 +- .../src/builtins/builtins-typed-array-gen.cc | 19 +- .../src/builtins/builtins-typed-array-gen.h | 3 +- deps/v8/src/builtins/builtins-utils.h | 11 +- deps/v8/src/builtins/builtins-wasm-gen.cc | 4 +- deps/v8/src/builtins/builtins-wasm-gen.h | 2 +- deps/v8/src/builtins/builtins.cc | 48 +- deps/v8/src/builtins/builtins.h | 18 + deps/v8/src/builtins/collections.tq | 7 +- deps/v8/src/builtins/convert.tq | 6 + deps/v8/src/builtins/data-view.tq | 102 +- deps/v8/src/builtins/finalization-registry.tq | 4 +- deps/v8/src/builtins/ia32/builtins-ia32.cc | 8 +- deps/v8/src/builtins/iterator.tq | 2 +- .../src/builtins/loong64/builtins-loong64.cc | 11 +- deps/v8/src/builtins/mips/builtins-mips.cc | 11 +- .../v8/src/builtins/mips64/builtins-mips64.cc | 11 +- deps/v8/src/builtins/object-fromentries.tq | 4 +- deps/v8/src/builtins/ppc/builtins-ppc.cc | 161 +- .../builtins/promise-abstract-operations.tq | 2 +- deps/v8/src/builtins/promise-all.tq | 12 +- deps/v8/src/builtins/promise-any.tq | 10 +- deps/v8/src/builtins/promise-constructor.tq | 2 +- deps/v8/src/builtins/promise-jobs.tq | 2 +- deps/v8/src/builtins/promise-misc.tq | 4 +- deps/v8/src/builtins/promise-race.tq | 12 +- deps/v8/src/builtins/promise-reaction-job.tq | 4 +- deps/v8/src/builtins/promise-resolve.tq | 2 +- .../src/builtins/riscv64/builtins-riscv64.cc | 11 +- deps/v8/src/builtins/s390/builtins-s390.cc | 680 ++++- .../src/builtins/setup-builtins-internal.cc | 7 +- deps/v8/src/builtins/torque-internal.tq | 31 +- .../builtins/typed-array-createtypedarray.tq | 7 +- deps/v8/src/builtins/typed-array.tq | 11 +- deps/v8/src/builtins/wasm.tq | 59 +- deps/v8/src/builtins/x64/builtins-x64.cc | 102 +- deps/v8/src/codegen/OWNERS | 2 + deps/v8/src/codegen/arm/assembler-arm-inl.h | 2 +- deps/v8/src/codegen/arm/assembler-arm.cc | 10 +- deps/v8/src/codegen/arm/assembler-arm.h | 12 +- .../v8/src/codegen/arm/macro-assembler-arm.cc | 2 +- deps/v8/src/codegen/arm/macro-assembler-arm.h | 2 +- .../src/codegen/arm64/assembler-arm64-inl.h | 4 +- deps/v8/src/codegen/arm64/assembler-arm64.cc | 8 +- deps/v8/src/codegen/arm64/assembler-arm64.h | 12 +- .../codegen/arm64/macro-assembler-arm64.cc | 2 +- .../src/codegen/arm64/macro-assembler-arm64.h | 2 +- deps/v8/src/codegen/arm64/register-arm64.h | 2 - deps/v8/src/codegen/assembler.h | 7 +- deps/v8/src/codegen/code-reference.cc | 24 +- deps/v8/src/codegen/code-reference.h | 21 +- deps/v8/src/codegen/code-stub-assembler.cc | 226 +- deps/v8/src/codegen/code-stub-assembler.h | 51 +- deps/v8/src/codegen/compilation-cache.cc | 14 +- deps/v8/src/codegen/compiler.cc | 813 +++--- deps/v8/src/codegen/compiler.h | 89 +- deps/v8/src/codegen/constant-pool.h | 8 +- deps/v8/src/codegen/cpu-features.h | 2 +- .../src/codegen/external-reference-table.cc | 8 +- deps/v8/src/codegen/external-reference.cc | 49 +- deps/v8/src/codegen/external-reference.h | 34 +- deps/v8/src/codegen/ia32/assembler-ia32-inl.h | 6 +- deps/v8/src/codegen/ia32/assembler-ia32.cc | 55 +- deps/v8/src/codegen/ia32/assembler-ia32.h | 199 +- deps/v8/src/codegen/ia32/fma-instr.h | 58 + .../src/codegen/ia32/macro-assembler-ia32.cc | 4 +- .../src/codegen/ia32/macro-assembler-ia32.h | 2 +- .../src/codegen/loong64/assembler-loong64.cc | 6 +- .../src/codegen/loong64/assembler-loong64.h | 11 +- .../loong64/macro-assembler-loong64.cc | 25 +- .../codegen/loong64/macro-assembler-loong64.h | 8 +- deps/v8/src/codegen/machine-type.h | 4 +- deps/v8/src/codegen/mips/assembler-mips.cc | 12 +- deps/v8/src/codegen/mips/assembler-mips.h | 15 +- .../src/codegen/mips/macro-assembler-mips.cc | 25 +- .../src/codegen/mips/macro-assembler-mips.h | 6 +- .../v8/src/codegen/mips64/assembler-mips64.cc | 8 +- deps/v8/src/codegen/mips64/assembler-mips64.h | 15 +- .../codegen/mips64/macro-assembler-mips64.cc | 26 +- .../codegen/mips64/macro-assembler-mips64.h | 6 +- .../src/codegen/pending-optimization-table.cc | 7 +- deps/v8/src/codegen/ppc/assembler-ppc-inl.h | 2 +- deps/v8/src/codegen/ppc/assembler-ppc.cc | 17 +- deps/v8/src/codegen/ppc/assembler-ppc.h | 16 +- .../ppc/interface-descriptors-ppc-inl.h | 14 +- .../v8/src/codegen/ppc/macro-assembler-ppc.cc | 51 +- deps/v8/src/codegen/ppc/macro-assembler-ppc.h | 6 +- deps/v8/src/codegen/reloc-info.cc | 6 +- deps/v8/src/codegen/reloc-info.h | 6 +- .../src/codegen/riscv64/assembler-riscv64.cc | 285 +- .../src/codegen/riscv64/assembler-riscv64.h | 121 +- .../src/codegen/riscv64/constants-riscv64.h | 97 +- .../riscv64/macro-assembler-riscv64.cc | 101 +- .../codegen/riscv64/macro-assembler-riscv64.h | 13 +- .../v8/src/codegen/riscv64/register-riscv64.h | 14 +- deps/v8/src/codegen/s390/assembler-s390-inl.h | 2 +- deps/v8/src/codegen/s390/assembler-s390.cc | 6 +- deps/v8/src/codegen/s390/assembler-s390.h | 10 +- .../s390/interface-descriptors-s390-inl.h | 20 +- .../src/codegen/s390/macro-assembler-s390.cc | 43 +- .../src/codegen/s390/macro-assembler-s390.h | 4 +- deps/v8/src/codegen/safepoint-table.cc | 340 ++- deps/v8/src/codegen/safepoint-table.h | 244 +- deps/v8/src/codegen/script-details.h | 2 +- .../macro-assembler-shared-ia32-x64.cc | 95 + .../macro-assembler-shared-ia32-x64.h | 11 +- deps/v8/src/codegen/turbo-assembler.cc | 12 + deps/v8/src/codegen/turbo-assembler.h | 1 + .../codegen/unoptimized-compilation-info.cc | 5 +- .../codegen/unoptimized-compilation-info.h | 8 + deps/v8/src/codegen/x64/assembler-x64-inl.h | 4 +- deps/v8/src/codegen/x64/assembler-x64.cc | 109 +- deps/v8/src/codegen/x64/assembler-x64.h | 47 +- deps/v8/src/codegen/x64/fma-instr.h | 42 +- .../v8/src/codegen/x64/macro-assembler-x64.cc | 111 +- deps/v8/src/codegen/x64/macro-assembler-x64.h | 13 +- deps/v8/src/common/globals.h | 88 +- .../common/high-allocation-throughput-scope.h | 37 + deps/v8/src/common/message-template.h | 13 +- deps/v8/src/common/ptr-compr-inl.h | 2 +- .../lazy-compile-dispatcher.cc | 522 ++-- .../lazy-compile-dispatcher.h | 134 +- deps/v8/src/compiler/access-builder.cc | 10 + deps/v8/src/compiler/access-info.cc | 29 +- deps/v8/src/compiler/access-info.h | 5 +- .../arm64/instruction-selector-arm64.cc | 76 +- .../v8/src/compiler/backend/code-generator.cc | 5 +- .../backend/ia32/code-generator-ia32.cc | 60 +- .../backend/ia32/instruction-codes-ia32.h | 21 +- .../ia32/instruction-scheduler-ia32.cc | 21 +- .../backend/ia32/instruction-selector-ia32.cc | 143 +- .../backend/instruction-selector-impl.h | 4 + .../compiler/backend/instruction-selector.cc | 11 +- deps/v8/src/compiler/backend/instruction.h | 4 +- .../loong64/instruction-selector-loong64.cc | 5 +- .../mips64/instruction-selector-mips64.cc | 6 +- .../backend/ppc/code-generator-ppc.cc | 75 +- .../backend/riscv64/code-generator-riscv64.cc | 1050 ++++++- .../riscv64/instruction-codes-riscv64.h | 26 +- .../riscv64/instruction-scheduler-riscv64.cc | 26 +- .../riscv64/instruction-selector-riscv64.cc | 227 +- .../backend/s390/code-generator-s390.cc | 375 ++- .../backend/x64/instruction-selector-x64.cc | 19 +- .../src/compiler/compilation-dependencies.cc | 510 +++- .../src/compiler/compilation-dependencies.h | 17 +- deps/v8/src/compiler/csa-load-elimination.cc | 3 + deps/v8/src/compiler/diamond.h | 4 +- .../src/compiler/effect-control-linearizer.cc | 11 +- deps/v8/src/compiler/fast-api-calls.cc | 1 + deps/v8/src/compiler/functional-list.h | 12 +- deps/v8/src/compiler/globals.h | 4 +- deps/v8/src/compiler/graph-assembler.cc | 9 - deps/v8/src/compiler/graph-assembler.h | 1 - deps/v8/src/compiler/heap-refs.cc | 40 +- deps/v8/src/compiler/heap-refs.h | 1 + deps/v8/src/compiler/int64-lowering.cc | 10 +- deps/v8/src/compiler/int64-lowering.h | 2 +- deps/v8/src/compiler/js-call-reducer.cc | 134 +- deps/v8/src/compiler/js-call-reducer.h | 1 + deps/v8/src/compiler/js-create-lowering.cc | 2 +- deps/v8/src/compiler/js-heap-broker.cc | 18 +- deps/v8/src/compiler/js-heap-broker.h | 14 + deps/v8/src/compiler/js-inlining-heuristic.cc | 20 +- deps/v8/src/compiler/js-inlining.cc | 18 +- .../js-native-context-specialization.cc | 23 +- deps/v8/src/compiler/js-type-hint-lowering.cc | 5 +- deps/v8/src/compiler/js-typed-lowering.cc | 7 +- deps/v8/src/compiler/linkage.cc | 23 + deps/v8/src/compiler/linkage.h | 18 +- deps/v8/src/compiler/load-elimination.cc | 49 +- deps/v8/src/compiler/loop-analysis.cc | 34 +- .../v8/src/compiler/machine-graph-verifier.cc | 28 +- .../src/compiler/machine-operator-reducer.cc | 2 +- deps/v8/src/compiler/machine-operator.cc | 2 +- deps/v8/src/compiler/memory-lowering.cc | 12 +- deps/v8/src/compiler/node-properties.cc | 16 +- deps/v8/src/compiler/persistent-map.h | 9 +- deps/v8/src/compiler/pipeline.cc | 3 + deps/v8/src/compiler/simplified-lowering.cc | 1 + deps/v8/src/compiler/typer.cc | 15 +- deps/v8/src/compiler/types.cc | 31 +- deps/v8/src/compiler/types.h | 100 +- .../src/compiler/value-numbering-reducer.cc | 14 +- deps/v8/src/compiler/wasm-compiler.cc | 543 ++-- deps/v8/src/compiler/wasm-compiler.h | 52 +- deps/v8/src/compiler/wasm-escape-analysis.cc | 35 +- deps/v8/src/compiler/wasm-inlining.cc | 43 +- deps/v8/src/compiler/wasm-inlining.h | 4 +- deps/v8/src/d8/d8-test.cc | 187 +- deps/v8/src/d8/d8.cc | 404 +-- deps/v8/src/d8/d8.h | 46 +- deps/v8/src/date/dateparser-inl.h | 12 +- deps/v8/src/debug/debug-coverage.cc | 2 + deps/v8/src/debug/debug-interface.cc | 71 +- deps/v8/src/debug/debug-interface.h | 31 +- deps/v8/src/debug/debug-property-iterator.cc | 15 + deps/v8/src/debug/debug-scopes.cc | 7 +- deps/v8/src/debug/debug-scopes.h | 2 + .../src/debug/debug-stack-trace-iterator.cc | 3 +- deps/v8/src/debug/debug-wasm-objects.cc | 15 +- deps/v8/src/debug/debug.cc | 118 +- deps/v8/src/debug/debug.h | 12 +- deps/v8/src/debug/liveedit.cc | 20 +- .../src/debug/wasm/gdb-server/gdb-server.cc | 3 +- .../v8/src/debug/wasm/gdb-server/gdb-server.h | 7 +- deps/v8/src/deoptimizer/deoptimizer.cc | 5 +- deps/v8/src/deoptimizer/translated-state.cc | 44 +- deps/v8/src/deoptimizer/translated-state.h | 18 +- .../src/diagnostics/basic-block-profiler.cc | 4 +- deps/v8/src/diagnostics/disassembler.cc | 8 +- deps/v8/src/diagnostics/ia32/disasm-ia32.cc | 92 +- deps/v8/src/diagnostics/objects-debug.cc | 44 +- deps/v8/src/diagnostics/objects-printer.cc | 41 +- deps/v8/src/diagnostics/perf-jit.cc | 2 - deps/v8/src/diagnostics/ppc/disasm-ppc.cc | 4 + .../src/diagnostics/riscv64/disasm-riscv64.cc | 132 +- .../src/diagnostics/unwinding-info-win64.cc | 30 - deps/v8/src/diagnostics/x64/disasm-x64.cc | 123 +- deps/v8/src/execution/DEPS | 5 + .../v8/src/execution/arm64/simulator-arm64.cc | 148 +- deps/v8/src/execution/arm64/simulator-arm64.h | 3 + deps/v8/src/execution/embedder-state.cc | 45 + deps/v8/src/execution/embedder-state.h | 39 + deps/v8/src/execution/encoded-c-signature.cc | 41 + deps/v8/src/execution/encoded-c-signature.h | 60 + deps/v8/src/execution/execution.cc | 25 +- deps/v8/src/execution/execution.h | 2 +- deps/v8/src/execution/frame-constants.h | 16 + deps/v8/src/execution/frames-inl.h | 4 + deps/v8/src/execution/frames.cc | 146 +- deps/v8/src/execution/frames.h | 27 +- deps/v8/src/execution/isolate-data.h | 39 + deps/v8/src/execution/isolate-inl.h | 1 + deps/v8/src/execution/isolate-utils-inl.h | 22 + deps/v8/src/execution/isolate.cc | 446 ++- deps/v8/src/execution/isolate.h | 111 +- deps/v8/src/execution/local-isolate.cc | 18 +- deps/v8/src/execution/local-isolate.h | 20 + deps/v8/src/execution/messages.cc | 46 +- deps/v8/src/execution/ppc/simulator-ppc.cc | 61 +- .../execution/riscv64/simulator-riscv64.cc | 756 ++++- .../src/execution/riscv64/simulator-riscv64.h | 24 +- deps/v8/src/execution/runtime-profiler.cc | 19 +- deps/v8/src/execution/s390/simulator-s390.cc | 62 +- deps/v8/src/execution/simulator-base.cc | 25 + deps/v8/src/execution/simulator-base.h | 53 + deps/v8/src/execution/simulator.h | 10 +- deps/v8/src/execution/thread-local-top.cc | 1 + deps/v8/src/execution/thread-local-top.h | 6 +- deps/v8/src/flags/flag-definitions.h | 129 +- deps/v8/src/handles/handles-inl.h | 4 +- deps/v8/src/handles/handles.h | 8 +- deps/v8/src/heap/allocation-observer.cc | 4 +- deps/v8/src/heap/base/worklist.h | 38 +- deps/v8/src/heap/code-range.cc | 10 +- deps/v8/src/heap/code-range.h | 2 +- deps/v8/src/heap/code-stats.cc | 13 +- deps/v8/src/heap/collection-barrier.cc | 23 +- deps/v8/src/heap/collection-barrier.h | 23 +- deps/v8/src/heap/concurrent-allocator.cc | 38 +- deps/v8/src/heap/concurrent-allocator.h | 6 + deps/v8/src/heap/concurrent-marking.cc | 78 +- deps/v8/src/heap/concurrent-marking.h | 1 - deps/v8/src/heap/cppgc-js/cpp-heap.cc | 100 +- deps/v8/src/heap/cppgc-js/cpp-heap.h | 37 +- deps/v8/src/heap/cppgc-js/cpp-snapshot.cc | 6 +- deps/v8/src/heap/cppgc/caged-heap.h | 9 +- deps/v8/src/heap/cppgc/compactor.cc | 10 +- deps/v8/src/heap/cppgc/compactor.h | 1 + deps/v8/src/heap/cppgc/heap-base.cc | 9 +- deps/v8/src/heap/cppgc/heap-base.h | 16 +- deps/v8/src/heap/cppgc/heap-object-header.h | 35 +- deps/v8/src/heap/cppgc/heap-page.cc | 11 +- deps/v8/src/heap/cppgc/heap-space.h | 3 +- deps/v8/src/heap/cppgc/heap-state.cc | 7 + deps/v8/src/heap/cppgc/heap.cc | 31 +- deps/v8/src/heap/cppgc/heap.h | 3 - .../cppgc/incremental-marking-schedule.cc | 1 - deps/v8/src/heap/cppgc/marker.cc | 37 +- deps/v8/src/heap/cppgc/marker.h | 5 +- deps/v8/src/heap/cppgc/marking-state.h | 18 +- deps/v8/src/heap/cppgc/marking-verifier.cc | 10 +- deps/v8/src/heap/cppgc/marking-visitor.cc | 3 +- deps/v8/src/heap/cppgc/object-poisoner.h | 3 +- deps/v8/src/heap/cppgc/object-size-trait.cc | 6 +- deps/v8/src/heap/cppgc/object-view.h | 21 +- deps/v8/src/heap/cppgc/persistent-node.cc | 12 +- .../v8/src/heap/cppgc/prefinalizer-handler.cc | 4 +- deps/v8/src/heap/cppgc/sweeper.cc | 90 +- deps/v8/src/heap/cppgc/visitor.cc | 2 +- deps/v8/src/heap/embedder-tracing.cc | 72 +- deps/v8/src/heap/embedder-tracing.h | 33 +- deps/v8/src/heap/factory-base.cc | 128 +- deps/v8/src/heap/factory-base.h | 33 +- deps/v8/src/heap/factory.cc | 168 +- deps/v8/src/heap/factory.h | 53 +- deps/v8/src/heap/gc-tracer.cc | 50 +- deps/v8/src/heap/gc-tracer.h | 16 +- deps/v8/src/heap/heap-controller.cc | 4 - deps/v8/src/heap/heap-inl.h | 29 +- deps/v8/src/heap/heap-layout-tracer.cc | 73 + deps/v8/src/heap/heap-layout-tracer.h | 33 + deps/v8/src/heap/heap-write-barrier-inl.h | 10 +- deps/v8/src/heap/heap-write-barrier.cc | 13 + deps/v8/src/heap/heap-write-barrier.h | 3 + deps/v8/src/heap/heap.cc | 353 ++- deps/v8/src/heap/heap.h | 96 +- deps/v8/src/heap/incremental-marking-inl.h | 8 + deps/v8/src/heap/incremental-marking.cc | 33 +- deps/v8/src/heap/incremental-marking.h | 20 +- deps/v8/src/heap/invalidated-slots-inl.h | 4 +- deps/v8/src/heap/large-spaces.cc | 16 +- deps/v8/src/heap/linear-allocation-area.h | 8 + deps/v8/src/heap/local-allocator-inl.h | 2 +- deps/v8/src/heap/local-factory.cc | 3 +- deps/v8/src/heap/local-factory.h | 20 +- deps/v8/src/heap/local-heap-inl.h | 20 +- deps/v8/src/heap/local-heap.cc | 152 +- deps/v8/src/heap/local-heap.h | 16 +- deps/v8/src/heap/mark-compact-inl.h | 4 +- deps/v8/src/heap/mark-compact.cc | 736 +++-- deps/v8/src/heap/mark-compact.h | 107 +- deps/v8/src/heap/marking-barrier-inl.h | 2 + deps/v8/src/heap/marking-barrier.cc | 14 +- deps/v8/src/heap/marking-barrier.h | 1 + deps/v8/src/heap/marking-visitor-inl.h | 26 +- deps/v8/src/heap/marking-visitor.h | 43 +- deps/v8/src/heap/memory-allocator.cc | 14 +- deps/v8/src/heap/memory-allocator.h | 10 +- deps/v8/src/heap/memory-chunk.cc | 9 +- deps/v8/src/heap/memory-chunk.h | 5 + deps/v8/src/heap/memory-measurement-inl.h | 1 + deps/v8/src/heap/memory-measurement.cc | 9 +- deps/v8/src/heap/new-spaces-inl.h | 12 +- deps/v8/src/heap/new-spaces.cc | 84 +- deps/v8/src/heap/new-spaces.h | 23 +- deps/v8/src/heap/object-stats.cc | 27 +- deps/v8/src/heap/objects-visiting.h | 5 +- deps/v8/src/heap/paged-spaces-inl.h | 21 +- deps/v8/src/heap/paged-spaces.cc | 106 +- deps/v8/src/heap/paged-spaces.h | 30 +- deps/v8/src/heap/parked-scope.h | 2 + deps/v8/src/heap/read-only-spaces.cc | 11 +- deps/v8/src/heap/remembered-set.h | 6 +- deps/v8/src/heap/safepoint.cc | 267 +- deps/v8/src/heap/safepoint.h | 109 +- deps/v8/src/heap/scavenger-inl.h | 102 +- deps/v8/src/heap/scavenger.cc | 62 +- deps/v8/src/heap/scavenger.h | 73 +- deps/v8/src/heap/setup-heap-internal.cc | 14 +- deps/v8/src/heap/slot-set.h | 2 +- deps/v8/src/heap/spaces.cc | 43 +- deps/v8/src/heap/spaces.h | 24 +- deps/v8/src/heap/sweeper.cc | 2 +- deps/v8/src/heap/weak-object-worklists.cc | 21 +- deps/v8/src/heap/weak-object-worklists.h | 22 +- deps/v8/src/heap/worklist.h | 453 --- deps/v8/src/ic/accessor-assembler.cc | 51 +- deps/v8/src/ic/accessor-assembler.h | 2 +- deps/v8/src/ic/binary-op-assembler.cc | 35 + deps/v8/src/ic/binary-op-assembler.h | 39 +- deps/v8/src/ic/ic-inl.h | 6 +- deps/v8/src/ic/ic.cc | 24 +- deps/v8/src/ic/ic.h | 2 +- deps/v8/src/ic/keyed-store-generic.cc | 2 +- deps/v8/src/init/bootstrapper.cc | 106 +- deps/v8/src/init/heap-symbols.h | 12 +- deps/v8/src/init/v8.cc | 19 +- deps/v8/src/init/v8.h | 6 +- deps/v8/src/inspector/DEPS | 1 + deps/v8/src/inspector/string-util.h | 4 + .../src/inspector/v8-debugger-agent-impl.cc | 59 +- .../v8/src/inspector/v8-debugger-agent-impl.h | 4 +- deps/v8/src/inspector/v8-debugger.cc | 160 +- deps/v8/src/inspector/v8-debugger.h | 37 +- deps/v8/src/inspector/v8-stack-trace-impl.cc | 24 +- deps/v8/src/inspector/v8-stack-trace-impl.h | 5 +- deps/v8/src/inspector/value-mirror.cc | 88 - .../src/interpreter/bytecode-array-builder.h | 11 + .../src/interpreter/bytecode-array-writer.cc | 2 + deps/v8/src/interpreter/bytecode-generator.cc | 132 +- deps/v8/src/interpreter/bytecode-generator.h | 24 +- .../src/interpreter/interpreter-assembler.cc | 49 + .../src/interpreter/interpreter-assembler.h | 35 + .../src/interpreter/interpreter-generator.cc | 86 +- deps/v8/src/interpreter/interpreter.cc | 18 +- deps/v8/src/interpreter/interpreter.h | 2 +- deps/v8/src/json/json-parser.cc | 7 +- deps/v8/src/json/json-parser.h | 3 + deps/v8/src/json/json-stringifier.cc | 272 +- deps/v8/src/libsampler/sampler.cc | 4 +- deps/v8/src/logging/counters-definitions.h | 25 +- deps/v8/src/logging/counters.cc | 130 +- deps/v8/src/logging/counters.h | 252 +- deps/v8/src/logging/log.cc | 14 +- .../v8/src/logging/runtime-call-stats-scope.h | 9 + deps/v8/src/logging/runtime-call-stats.h | 11 +- deps/v8/src/numbers/conversions.cc | 2 +- deps/v8/src/numbers/conversions.h | 2 +- deps/v8/src/numbers/hash-seed-inl.h | 7 +- deps/v8/src/objects/all-objects-inl.h | 2 +- deps/v8/src/objects/allocation-site-inl.h | 9 +- deps/v8/src/objects/allocation-site.h | 2 + deps/v8/src/objects/api-callbacks.h | 10 + deps/v8/src/objects/arguments.h | 4 + deps/v8/src/objects/backing-store.cc | 5 +- deps/v8/src/objects/backing-store.h | 5 + deps/v8/src/objects/bigint.cc | 95 +- deps/v8/src/objects/code-inl.h | 142 +- deps/v8/src/objects/code.cc | 236 +- deps/v8/src/objects/code.h | 192 +- deps/v8/src/objects/contexts.cc | 11 +- deps/v8/src/objects/contexts.h | 3 +- deps/v8/src/objects/contexts.tq | 4 +- deps/v8/src/objects/debug-objects.h | 7 + deps/v8/src/objects/descriptor-array.h | 3 + deps/v8/src/objects/descriptor-array.tq | 6 +- deps/v8/src/objects/elements.cc | 54 +- deps/v8/src/objects/feedback-vector.cc | 92 +- deps/v8/src/objects/feedback-vector.h | 12 +- deps/v8/src/objects/feedback-vector.tq | 2 +- deps/v8/src/objects/field-index.h | 2 + deps/v8/src/objects/fixed-array-inl.h | 6 +- deps/v8/src/objects/fixed-array.h | 14 +- deps/v8/src/objects/foreign.tq | 2 +- deps/v8/src/objects/free-space.h | 2 + deps/v8/src/objects/function-kind.h | 5 +- deps/v8/src/objects/heap-number.h | 2 + deps/v8/src/objects/heap-object.h | 14 +- deps/v8/src/objects/instance-type-inl.h | 50 +- deps/v8/src/objects/instance-type.h | 39 +- deps/v8/src/objects/intl-objects.cc | 107 +- deps/v8/src/objects/intl-objects.h | 7 +- deps/v8/src/objects/intl-objects.tq | 125 + deps/v8/src/objects/js-array-buffer-inl.h | 101 +- deps/v8/src/objects/js-array-buffer.cc | 45 +- deps/v8/src/objects/js-array-buffer.h | 48 +- deps/v8/src/objects/js-array-buffer.tq | 45 +- deps/v8/src/objects/js-collator.cc | 6 +- deps/v8/src/objects/js-date-time-format.cc | 34 +- deps/v8/src/objects/js-display-names.cc | 53 +- deps/v8/src/objects/js-function-inl.h | 12 +- deps/v8/src/objects/js-function.cc | 6 +- deps/v8/src/objects/js-function.h | 8 + deps/v8/src/objects/js-generator.h | 3 + deps/v8/src/objects/js-locale.cc | 47 +- deps/v8/src/objects/js-objects-inl.h | 12 +- deps/v8/src/objects/js-objects.cc | 45 +- deps/v8/src/objects/js-objects.h | 4 +- deps/v8/src/objects/keys.cc | 6 +- deps/v8/src/objects/literal-objects.cc | 22 +- deps/v8/src/objects/literal-objects.h | 5 + deps/v8/src/objects/lookup-inl.h | 8 +- deps/v8/src/objects/lookup.cc | 62 +- deps/v8/src/objects/map-inl.h | 10 +- deps/v8/src/objects/map-updater.cc | 93 +- deps/v8/src/objects/map-updater.h | 7 +- deps/v8/src/objects/map.cc | 77 +- deps/v8/src/objects/map.h | 9 +- deps/v8/src/objects/map.tq | 4 +- deps/v8/src/objects/microtask.h | 6 + deps/v8/src/objects/module.cc | 40 +- deps/v8/src/objects/module.h | 9 +- deps/v8/src/objects/object-list-macros.h | 13 +- .../objects/objects-body-descriptors-inl.h | 276 +- .../v8/src/objects/objects-body-descriptors.h | 6 +- deps/v8/src/objects/objects-definitions.h | 16 +- deps/v8/src/objects/objects-inl.h | 113 +- deps/v8/src/objects/objects.cc | 116 +- deps/v8/src/objects/objects.h | 14 +- deps/v8/src/objects/oddball.tq | 8 +- deps/v8/src/objects/promise.h | 14 + deps/v8/src/objects/property-cell-inl.h | 4 +- .../src/objects/property-descriptor-object.h | 2 + deps/v8/src/objects/property-descriptor.cc | 8 +- deps/v8/src/objects/property-details.h | 111 +- deps/v8/src/objects/property.cc | 20 +- deps/v8/src/objects/prototype-info.tq | 4 +- deps/v8/src/objects/scope-info.cc | 8 +- deps/v8/src/objects/scope-info.h | 2 +- deps/v8/src/objects/script.h | 3 + .../v8/src/objects/shared-function-info-inl.h | 63 +- deps/v8/src/objects/shared-function-info.cc | 107 +- deps/v8/src/objects/shared-function-info.h | 59 +- deps/v8/src/objects/shared-function-info.tq | 44 +- deps/v8/src/objects/source-text-module.cc | 95 +- deps/v8/src/objects/source-text-module.h | 9 +- deps/v8/src/objects/stack-frame-info.cc | 55 +- deps/v8/src/objects/stack-frame-info.h | 10 +- deps/v8/src/objects/string-inl.h | 230 +- deps/v8/src/objects/string-table.cc | 28 +- deps/v8/src/objects/string.cc | 340 ++- deps/v8/src/objects/string.h | 104 +- deps/v8/src/objects/string.tq | 3 +- deps/v8/src/objects/struct.h | 8 + deps/v8/src/objects/synthetic-module.cc | 28 +- deps/v8/src/objects/tagged-field.h | 2 +- deps/v8/src/objects/template-objects.h | 6 + deps/v8/src/objects/templates.cc | 4 +- deps/v8/src/objects/templates.h | 10 + deps/v8/src/objects/transitions-inl.h | 2 +- deps/v8/src/objects/transitions.cc | 8 +- deps/v8/src/objects/turbofan-types.h | 9 +- deps/v8/src/objects/turbofan-types.tq | 82 +- deps/v8/src/objects/value-serializer.cc | 51 +- deps/v8/src/objects/visitors.h | 9 +- deps/v8/src/parsing/expression-scope.h | 8 +- deps/v8/src/parsing/parse-info.cc | 108 +- deps/v8/src/parsing/parse-info.h | 164 +- deps/v8/src/parsing/parser-base.h | 49 +- deps/v8/src/parsing/parser.cc | 384 ++- deps/v8/src/parsing/parser.h | 30 +- deps/v8/src/parsing/parsing.cc | 14 +- deps/v8/src/parsing/preparse-data-impl.h | 4 +- deps/v8/src/parsing/preparse-data.cc | 18 +- deps/v8/src/parsing/preparse-data.h | 2 + .../src/parsing/scanner-character-streams.cc | 145 +- deps/v8/src/parsing/scanner.cc | 2 +- deps/v8/src/profiler/cpu-profiler.cc | 52 +- deps/v8/src/profiler/cpu-profiler.h | 22 +- .../src/profiler/heap-snapshot-generator.cc | 83 +- .../v8/src/profiler/heap-snapshot-generator.h | 4 +- deps/v8/src/profiler/profile-generator.cc | 25 +- deps/v8/src/profiler/profile-generator.h | 17 +- deps/v8/src/profiler/profiler-listener.cc | 33 +- .../v8/src/profiler/sampling-heap-profiler.cc | 10 +- deps/v8/src/profiler/symbolizer.cc | 4 +- deps/v8/src/profiler/tick-sample.cc | 21 +- deps/v8/src/profiler/tick-sample.h | 9 +- .../experimental/experimental-bytecode.cc | 12 +- .../experimental/experimental-bytecode.h | 4 +- .../experimental/experimental-compiler.cc | 2 +- .../experimental/experimental-interpreter.cc | 17 +- .../loong64/regexp-macro-assembler-loong64.cc | 174 +- .../loong64/regexp-macro-assembler-loong64.h | 14 +- .../mips/regexp-macro-assembler-mips.cc | 175 +- .../regexp/mips/regexp-macro-assembler-mips.h | 14 +- .../mips64/regexp-macro-assembler-mips64.cc | 175 +- .../mips64/regexp-macro-assembler-mips64.h | 14 +- deps/v8/src/regexp/regexp-ast.cc | 16 +- deps/v8/src/regexp/regexp-ast.h | 10 +- deps/v8/src/regexp/regexp-compiler-tonode.cc | 56 +- deps/v8/src/regexp/regexp-compiler.cc | 10 +- deps/v8/src/regexp/regexp-dotprinter.cc | 4 +- deps/v8/src/regexp/regexp-interpreter.cc | 3 + deps/v8/src/regexp/regexp-parser.cc | 230 +- deps/v8/src/roots/roots.h | 14 +- deps/v8/src/runtime/runtime-classes.cc | 8 +- deps/v8/src/runtime/runtime-compiler.cc | 16 +- deps/v8/src/runtime/runtime-debug.cc | 10 +- deps/v8/src/runtime/runtime-internal.cc | 21 +- deps/v8/src/runtime/runtime-literals.cc | 12 +- deps/v8/src/runtime/runtime-object.cc | 36 +- deps/v8/src/runtime/runtime-scopes.cc | 7 +- deps/v8/src/runtime/runtime-strings.cc | 14 +- deps/v8/src/runtime/runtime-symbol.cc | 2 +- deps/v8/src/runtime/runtime-test.cc | 99 +- deps/v8/src/runtime/runtime-trace.cc | 33 +- deps/v8/src/runtime/runtime-wasm.cc | 80 +- deps/v8/src/runtime/runtime.cc | 2 + deps/v8/src/runtime/runtime.h | 8 +- deps/v8/src/security/caged-pointer-inl.h | 23 +- deps/v8/src/security/caged-pointer.h | 10 +- deps/v8/src/security/vm-cage.cc | 271 +- deps/v8/src/security/vm-cage.h | 64 +- deps/v8/src/snapshot/code-serializer.cc | 43 +- deps/v8/src/snapshot/context-serializer.cc | 6 +- deps/v8/src/snapshot/deserializer.cc | 34 +- deps/v8/src/snapshot/deserializer.h | 2 - .../v8/src/snapshot/embedded/embedded-data.cc | 38 +- deps/v8/src/snapshot/embedded/embedded-data.h | 33 +- deps/v8/src/snapshot/mksnapshot.cc | 2 +- deps/v8/src/snapshot/read-only-serializer.cc | 2 +- deps/v8/src/snapshot/roots-serializer.cc | 4 +- .../v8/src/snapshot/serializer-deserializer.h | 4 +- deps/v8/src/snapshot/serializer.cc | 21 +- deps/v8/src/snapshot/serializer.h | 17 +- deps/v8/src/snapshot/snapshot-source-sink.h | 4 - deps/v8/src/snapshot/snapshot.cc | 43 +- deps/v8/src/snapshot/snapshot.h | 3 + deps/v8/src/strings/string-builder-inl.h | 56 +- deps/v8/src/strings/string-builder.cc | 4 + deps/v8/src/strings/string-stream.cc | 2 +- deps/v8/src/temporal/OWNERS | 2 + deps/v8/src/temporal/temporal-parser.cc | 1220 ++++++++ deps/v8/src/temporal/temporal-parser.h | 147 + deps/v8/src/torque/ast.h | 2 +- deps/v8/src/torque/constants.h | 21 +- deps/v8/src/torque/earley-parser.cc | 6 +- deps/v8/src/torque/earley-parser.h | 7 +- deps/v8/src/torque/implementation-visitor.cc | 45 +- deps/v8/src/torque/instance-type-generator.cc | 31 +- deps/v8/src/torque/source-positions.h | 3 +- deps/v8/src/torque/torque-parser.cc | 128 +- deps/v8/src/torque/type-visitor.cc | 26 +- deps/v8/src/torque/types.cc | 14 +- deps/v8/src/torque/types.h | 20 +- deps/v8/src/trap-handler/trap-handler.h | 2 +- deps/v8/src/utils/allocation.cc | 21 +- deps/v8/src/utils/allocation.h | 4 + deps/v8/src/utils/identity-map.h | 1 + .../wasm/baseline/arm/liftoff-assembler-arm.h | 4 +- .../baseline/ia32/liftoff-assembler-ia32.h | 2 +- deps/v8/src/wasm/baseline/liftoff-assembler.h | 2 +- deps/v8/src/wasm/baseline/liftoff-compiler.cc | 198 +- .../baseline/mips/liftoff-assembler-mips.h | 2 +- .../wasm/baseline/ppc/liftoff-assembler-ppc.h | 247 +- .../riscv64/liftoff-assembler-riscv64.h | 866 +++++- .../baseline/s390/liftoff-assembler-s390.h | 60 + .../wasm/baseline/x64/liftoff-assembler-x64.h | 4 +- deps/v8/src/wasm/c-api.cc | 26 +- deps/v8/src/wasm/code-space-access.cc | 5 +- deps/v8/src/wasm/compilation-environment.h | 21 +- deps/v8/src/wasm/function-body-decoder-impl.h | 110 +- deps/v8/src/wasm/graph-builder-interface.cc | 69 +- deps/v8/src/wasm/init-expr-interface.cc | 4 +- deps/v8/src/wasm/init-expr-interface.h | 5 +- deps/v8/src/wasm/jump-table-assembler.cc | 22 +- deps/v8/src/wasm/memory-protection-key.cc | 8 +- deps/v8/src/wasm/memory-protection-key.h | 5 +- deps/v8/src/wasm/module-compiler.cc | 201 +- deps/v8/src/wasm/module-compiler.h | 8 +- deps/v8/src/wasm/module-instantiate.cc | 114 +- deps/v8/src/wasm/stacks.h | 63 +- deps/v8/src/wasm/streaming-decoder.cc | 23 +- deps/v8/src/wasm/wasm-code-manager.cc | 70 +- deps/v8/src/wasm/wasm-code-manager.h | 21 +- deps/v8/src/wasm/wasm-constants.h | 4 + deps/v8/src/wasm/wasm-engine.cc | 37 +- deps/v8/src/wasm/wasm-external-refs.cc | 15 +- deps/v8/src/wasm/wasm-init-expr.cc | 2 +- deps/v8/src/wasm/wasm-init-expr.h | 87 +- deps/v8/src/wasm/wasm-js.cc | 130 +- deps/v8/src/wasm/wasm-limits.h | 2 +- deps/v8/src/wasm/wasm-module-builder.cc | 14 +- deps/v8/src/wasm/wasm-module-builder.h | 2 +- deps/v8/src/wasm/wasm-module.h | 8 +- deps/v8/src/wasm/wasm-objects-inl.h | 81 +- deps/v8/src/wasm/wasm-objects.cc | 445 ++- deps/v8/src/wasm/wasm-objects.h | 132 +- deps/v8/src/wasm/wasm-objects.tq | 45 +- deps/v8/src/wasm/wasm-serialization.cc | 4 + deps/v8/src/web-snapshot/web-snapshot.cc | 759 +++-- deps/v8/src/web-snapshot/web-snapshot.h | 48 +- deps/v8/test/benchmarks/cpp/cppgc/BUILD.gn | 30 +- .../benchmarks/cpp/cppgc/allocation_perf.cc | 2 +- .../benchmarks/cpp/cppgc/benchmark_main.cc | 21 + .../benchmarks/cpp/cppgc/benchmark_utils.cc | 31 + .../cpp/cppgc/{utils.h => benchmark_utils.h} | 29 +- .../benchmarks/cpp/cppgc/binary-trees_perf.cc | 105 + .../test/benchmarks/cpp/cppgc/trace_perf.cc | 2 +- deps/v8/test/cctest/BUILD.gn | 1 + deps/v8/test/cctest/cctest.cc | 13 +- deps/v8/test/cctest/cctest.status | 14 +- deps/v8/test/cctest/compiler/c-signature.h | 17 + .../test-concurrent-shared-function-info.cc | 14 +- .../test-run-bytecode-graph-builder.cc | 3 +- .../test-run-calls-to-external-references.cc | 871 +++--- .../test/cctest/compiler/test-run-machops.cc | 319 ++- deps/v8/test/cctest/heap/heap-utils.cc | 2 +- deps/v8/test/cctest/heap/test-alloc.cc | 1 + .../cctest/heap/test-array-buffer-tracker.cc | 4 +- deps/v8/test/cctest/heap/test-compaction.cc | 10 +- .../cctest/heap/test-concurrent-allocation.cc | 84 +- .../cctest/heap/test-concurrent-marking.cc | 1 - .../test/cctest/heap/test-embedder-tracing.cc | 15 - .../heap/test-external-string-tracker.cc | 2 +- deps/v8/test/cctest/heap/test-heap.cc | 121 +- deps/v8/test/cctest/heap/test-lab.cc | 13 +- deps/v8/test/cctest/heap/test-mark-compact.cc | 35 +- .../test/cctest/heap/test-page-promotion.cc | 66 - deps/v8/test/cctest/heap/test-shared-heap.cc | 83 +- deps/v8/test/cctest/heap/test-spaces.cc | 44 +- .../test/cctest/heap/test-weak-references.cc | 3 +- .../bytecode_expectations/AsyncModules.golden | 1 - .../ClassDeclarations.golden | 12 +- .../bytecode_expectations/NewAndSpread.golden | 12 +- .../PrivateAccessorAccess.golden | 58 +- .../PrivateAccessorDeclaration.golden | 36 +- .../PrivateClassFields.golden | 20 +- .../PrivateMethodAccess.golden | 28 +- .../PrivateMethodDeclaration.golden | 20 +- .../PublicClassFields.golden | 20 +- .../StaticClassFields.golden | 20 +- .../StaticPrivateMethodAccess.golden | 171 +- .../StaticPrivateMethodDeclaration.golden | 20 +- .../generate-bytecode-expectations.cc | 15 +- .../interpreter/test-bytecode-generator.cc | 3 - .../cctest/interpreter/test-interpreter.cc | 27 +- deps/v8/test/cctest/parsing/test-preparser.cc | 16 +- .../cctest/parsing/test-scanner-streams.cc | 61 +- deps/v8/test/cctest/test-api-icu.cc | 2 +- deps/v8/test/cctest/test-api-typed-array.cc | 18 + deps/v8/test/cctest/test-api-wasm.cc | 19 +- deps/v8/test/cctest/test-api.cc | 405 ++- deps/v8/test/cctest/test-assembler-arm64.cc | 2 +- deps/v8/test/cctest/test-assembler-ia32.cc | 2 +- deps/v8/test/cctest/test-assembler-riscv64.cc | 107 +- deps/v8/test/cctest/test-assembler-s390.cc | 6 +- deps/v8/test/cctest/test-assembler-x64.cc | 50 +- deps/v8/test/cctest/test-atomicops.cc | 22 + .../test/cctest/test-code-stub-assembler.cc | 6 +- .../cctest/test-concurrent-feedback-vector.cc | 30 +- .../test-concurrent-transition-array.cc | 16 +- deps/v8/test/cctest/test-conversions.cc | 81 +- deps/v8/test/cctest/test-cpu-profiler.cc | 264 +- deps/v8/test/cctest/test-debug.cc | 115 +- deps/v8/test/cctest/test-decls.cc | 10 - deps/v8/test/cctest/test-descriptor-array.cc | 2 +- deps/v8/test/cctest/test-disasm-ia32.cc | 63 +- deps/v8/test/cctest/test-disasm-riscv64.cc | 8 + deps/v8/test/cctest/test-disasm-x64.cc | 9 + deps/v8/test/cctest/test-feedback-vector.cc | 68 +- .../test/cctest/test-field-type-tracking.cc | 73 +- deps/v8/test/cctest/test-heap-profiler.cc | 18 +- deps/v8/test/cctest/test-intl.cc | 3 +- deps/v8/test/cctest/test-js-weak-refs.cc | 2 + deps/v8/test/cctest/test-log-stack-tracer.cc | 2 +- .../test/cctest/test-macro-assembler-x64.cc | 5 +- deps/v8/test/cctest/test-modules.cc | 1163 ++++---- deps/v8/test/cctest/test-object.cc | 53 +- deps/v8/test/cctest/test-orderedhashtable.cc | 16 +- deps/v8/test/cctest/test-parsing.cc | 245 +- deps/v8/test/cctest/test-profile-generator.cc | 39 +- deps/v8/test/cctest/test-regexp.cc | 11 - deps/v8/test/cctest/test-sampler-api.cc | 30 +- deps/v8/test/cctest/test-serialize.cc | 587 +++- deps/v8/test/cctest/test-shared-strings.cc | 203 +- deps/v8/test/cctest/test-strings.cc | 4 +- .../test-swiss-name-dictionary-shared-tests.h | 4 +- deps/v8/test/cctest/test-temporal-parser.cc | 2504 +++++++++++++++++ deps/v8/test/cctest/test-transitions.cc | 29 +- deps/v8/test/cctest/test-types.cc | 4 +- deps/v8/test/cctest/test-utils.cc | 3 - deps/v8/test/cctest/test-weakmaps.cc | 19 +- deps/v8/test/cctest/test-weaksets.cc | 12 +- deps/v8/test/cctest/test-web-snapshots.cc | 90 + .../cctest/wasm/test-compilation-cache.cc | 78 + deps/v8/test/cctest/wasm/test-gc.cc | 34 +- .../cctest/wasm/test-jump-table-assembler.cc | 40 +- .../cctest/wasm/test-run-wasm-atomics64.cc | 24 +- .../cctest/wasm/test-run-wasm-relaxed-simd.cc | 60 +- .../cctest/wasm/test-run-wasm-wrappers.cc | 10 +- deps/v8/test/cctest/wasm/test-run-wasm.cc | 16 +- .../cctest/wasm/test-streaming-compilation.cc | 14 +- .../test/cctest/wasm/test-wasm-breakpoints.cc | 6 +- .../cctest/wasm/test-wasm-serialization.cc | 30 + deps/v8/test/cctest/wasm/test-wasm-stack.cc | 16 +- .../cctest/wasm/test-wasm-trap-position.cc | 26 +- deps/v8/test/cctest/wasm/wasm-run-utils.cc | 12 +- deps/v8/test/common/flag-utils.h | 6 +- deps/v8/test/common/wasm/wasm-interpreter.cc | 44 +- .../debug/debug-break-class-fields.js | 30 +- deps/v8/test/debugger/debugger.status | 10 +- .../test/debugger/regress/regress-1145119.js | 31 + .../debugger/regress/regress-crbug-1259878.js | 10 + deps/v8/test/fuzzer/fuzzer-support.cc | 2 +- deps/v8/test/fuzzer/parser.cc | 5 +- deps/v8/test/fuzzer/wasm-compile.cc | 87 +- deps/v8/test/fuzzer/wasm-fuzzer-common.cc | 315 ++- .../debugger/class-fields-scopes-expected.txt | 24 +- .../class-private-methods-unused-expected.txt | 6 +- .../debugger/debugger-statement-expected.txt | 4 + .../inspector/debugger/debugger-statement.js | 26 + ...ible-breakpoints-class-fields-expected.txt | 60 +- .../debugger/other-pause-reasons-expected.txt | 23 + .../inspector/debugger/other-pause-reasons.js | 126 + ...et-instrumentation-breakpoint-expected.txt | 7 - .../set-instrumentation-breakpoint.js | 40 +- deps/v8/test/inspector/inspector.status | 27 +- .../regress-crbug-1274529-expected.txt | 9 + .../regress/regress-crbug-1274529.js | 25 + .../date-format/timezone-name-extended.js | 2 - deps/v8/test/intl/displaynames/calendar-v2.js | 2 - .../intl/displaynames/constructor-order-v2.js | 2 - .../test/intl/displaynames/constructor-v2.js | 2 - .../intl/displaynames/date-time-field-v2.js | 2 - .../intl/displaynames/resolved-options-v2.js | 2 - .../locale/locale-info-check-return-types.js | 19 +- deps/v8/test/intl/regress-10529.js | 6 +- deps/v8/test/intl/string-localecompare.js | 85 +- .../test/js-perf-test/BigInt/bigint-util.js | 5 +- deps/v8/test/js-perf-test/BigInt/shift.js | 266 ++ deps/v8/test/js-perf-test/ClassFields.json | 8 + .../test/js-perf-test/ClassFields/classes.js | 27 + .../ClassFields/evaluate-class.js | 8 + .../ClassFields/initialize-instance.js | 8 + deps/v8/test/js-perf-test/JSTests1.json | 24 + deps/v8/test/js-perf-test/JSTests3.json | 10 +- deps/v8/test/js-perf-test/JSTests5.json | 3 +- ...cessors-private-undefined-getter-count.out | 4 +- ...essors-private-undefined-getter-nested.out | 4 +- ...ass-accessors-private-undefined-getter.out | 4 +- ...sors-private-undefined-setter-compound.out | 4 +- ...cessors-private-undefined-setter-count.out | 4 +- ...essors-private-undefined-setter-nested.out | 4 +- ...ass-accessors-private-undefined-setter.out | 4 +- ...-methods-private-brand-check-anonymous.out | 4 +- .../class-methods-private-brand-check.out | 4 +- .../class-methods-private-throw-write.out | 4 +- ...ivate-brand-compound-assign-getter-only.js | 10 + ...vate-brand-compound-assign-getter-only.out | 6 + ...ss-private-brand-compound-assign-method.js | 10 + ...s-private-brand-compound-assign-method.out | 6 + ...ivate-brand-compound-assign-setter-only.js | 10 + ...vate-brand-compound-assign-setter-only.out | 6 + .../class-private-brand-count-getter-only.js | 10 + .../class-private-brand-count-getter-only.out | 6 + .../fail/class-private-brand-count-method.js | 10 + .../fail/class-private-brand-count-method.out | 6 + .../class-private-brand-count-setter-only.js | 10 + .../class-private-brand-count-setter-only.out | 6 + .../class-private-brand-write-getter-only.js | 10 + .../class-private-brand-write-getter-only.out | 6 + .../fail/class-private-brand-write-method.js | 10 + .../fail/class-private-brand-write-method.out | 6 + .../class-private-brand-write-setter-only.js | 10 + .../class-private-brand-write-setter-only.out | 6 + ...vate-static-compound-assign-getter-only.js | 10 + ...ate-static-compound-assign-getter-only.out | 6 + ...s-private-static-compound-assign-method.js | 10 + ...-private-static-compound-assign-method.out | 6 + ...vate-static-compound-assign-setter-only.js | 10 + ...ate-static-compound-assign-setter-only.out | 6 + .../class-private-static-count-getter-only.js | 10 + ...class-private-static-count-getter-only.out | 6 + .../fail/class-private-static-count-method.js | 10 + .../class-private-static-count-method.out | 6 + .../class-private-static-count-setter-only.js | 10 + ...class-private-static-count-setter-only.out | 6 + .../class-private-static-write-getter-only.js | 10 + ...class-private-static-write-getter-only.out | 6 + .../fail/class-private-static-write-method.js | 10 + .../class-private-static-write-method.out | 6 + .../class-private-static-write-setter-only.js | 10 + ...class-private-static-write-setter-only.out | 6 + .../modules-import-top-level-await-fail-1.mjs | 9 - .../modules-import-top-level-await-fail-1.out | 3 - .../modules-import-top-level-await-fail-2.out | 3 - .../throw-during-IteratorCloseOnException.js | 10 + .../throw-during-IteratorCloseOnException.out | 6 + .../mjsunit/fail/regress/regress-1267172.js | 6 + .../mjsunit/fail/regress/regress-1267172.out | 4 + .../regress/fail/regress-crbug-1265570.js | 14 + .../regress/fail/regress-crbug-1265570.out | 12 + .../mjsunit/code-coverage-class-fields.js | 14 +- .../mjsunit/compiler/fast-api-sequences.js | 82 +- .../mjsunit/compiler/inline-private-method.js | 22 + deps/v8/test/mjsunit/d8/d8-worker-shutdown.js | 6 +- .../dataview-growablesharedarraybuffer.js | 113 + .../dataview-resizablearraybuffer-detach.js | 193 ++ .../mjsunit/dataview-resizablearraybuffer.js | 434 +++ deps/v8/test/mjsunit/dump-counters-quit.js | 18 + deps/v8/test/mjsunit/dump-counters.js | 15 + deps/v8/test/mjsunit/escape.js | 50 +- .../import-from-compilation-errored.js | 2 +- .../harmony/import-from-evaluation-errored.js | 2 +- .../harmony/import-from-fetch-errored.js | 2 +- .../import-from-instantiation-errored.js | 2 +- .../test/mjsunit/harmony/modules-import-1.mjs | 2 +- .../mjsunit/harmony/modules-import-10.mjs | 2 +- .../mjsunit/harmony/modules-import-11.mjs | 2 +- .../mjsunit/harmony/modules-import-12.mjs | 2 +- .../mjsunit/harmony/modules-import-13.mjs | 2 +- .../mjsunit/harmony/modules-import-14.mjs | 2 +- .../modules-import-15-top-level-await.mjs | 1 - .../mjsunit/harmony/modules-import-15.mjs | 2 +- .../mjsunit/harmony/modules-import-16.mjs | 2 +- .../test/mjsunit/harmony/modules-import-2.mjs | 2 +- .../test/mjsunit/harmony/modules-import-3.mjs | 2 +- .../test/mjsunit/harmony/modules-import-4.mjs | 1 - .../test/mjsunit/harmony/modules-import-5.mjs | 2 +- .../test/mjsunit/harmony/modules-import-6.mjs | 2 +- .../test/mjsunit/harmony/modules-import-7.mjs | 2 +- .../test/mjsunit/harmony/modules-import-8.mjs | 2 +- .../test/mjsunit/harmony/modules-import-9.mjs | 2 +- .../modules-import-top-level-await-1.mjs | 2 +- .../modules-import-top-level-await-2.mjs | 2 +- .../modules-import-top-level-await-3.mjs | 2 +- .../modules-import-top-level-await-4.mjs | 2 +- .../modules-import-top-level-await-5.mjs | 2 +- .../modules-import-top-level-await-6.mjs | 2 +- .../modules-import-top-level-await-7.mjs | 2 +- .../modules-import-top-level-await-8.mjs | 2 +- .../modules-import-top-level-await-cycle.mjs | 2 +- ...les-import-top-level-await-exception-1.mjs | 2 +- ...les-import-top-level-await-exception-2.mjs | 2 +- ...les-import-top-level-await-exception-3.mjs | 2 +- .../test/mjsunit/harmony/modules-skip-5.mjs | 2 +- .../test/mjsunit/harmony/modules-skip-6.mjs | 2 +- .../test/mjsunit/harmony/modules-skip-7.mjs | 2 +- .../test/mjsunit/harmony/private-accessors.js | 105 + .../harmony/private-fields-special-object.js | 2 +- .../mjsunit/harmony/regexp-named-captures.js | 24 +- ...nregistry-independent-lifetime-multiple.js | 32 + deps/v8/test/mjsunit/mjsunit.status | 26 +- .../v8/test/mjsunit/parallel-compile-tasks.js | 2 +- .../test/mjsunit/regress/regress-1230597.js | 35 + .../regress/regress-12359.js} | 13 +- .../test/mjsunit/regress/regress-1273677.js | 20 + deps/v8/test/mjsunit/regress/regress-4578.js | 95 + .../v8/test/mjsunit/regress/regress-744292.js | 2 +- .../v8/test/mjsunit/regress/regress-746909.js | 1 - .../v8/test/mjsunit/regress/regress-797581.js | 2 +- .../mjsunit/regress/regress-crbug-1265043.js | 19 + .../mjsunit/regress/regress-crbug-1272026.js | 12 + .../test/mjsunit/regress/regress-v8-10384.js | 12 + .../test/mjsunit/regress/regress-v8-11360.js | 12 +- .../mjsunit/regress/wasm/regress-1084151.js | 2 - .../mjsunit/regress/wasm/regress-11024.js | 23 +- .../mjsunit/regress/wasm/regress-11206.js | 3 +- .../mjsunit/regress/wasm/regress-1179065.js | 4 +- .../mjsunit/regress/wasm/regress-1271456.js | 31 + .../mjsunit/regress/wasm/regress-1273705.js | 7 + .../mjsunit/regress/wasm/regress-875556.js | 3 +- ...learraybuffer-growablesharedarraybuffer.js | 8 - .../typedarray-growablesharedarraybuffer.js | 4 - deps/v8/test/mjsunit/typedarray-helpers.js | 56 + deps/v8/test/mjsunit/wasm/embenchen/box2d.js | 2 +- .../mjsunit/wasm/embenchen/corrections.js | 2 +- .../mjsunit/wasm/embenchen/lua_binarytrees.js | 2 +- deps/v8/test/mjsunit/wasm/externref-table.js | 68 +- ...oad-elimination.js => gc-optimizations.js} | 87 +- .../test/mjsunit/wasm/grow-memory-in-call.js | 2 +- deps/v8/test/mjsunit/wasm/inlining.js | 63 + deps/v8/test/mjsunit/wasm/loop-unrolling.js | 54 +- deps/v8/test/mjsunit/wasm/module-memory.js | 2 +- deps/v8/test/mjsunit/wasm/multi-value-simd.js | 2 +- deps/v8/test/mjsunit/wasm/multi-value.js | 2 - deps/v8/test/mjsunit/wasm/reference-tables.js | 2 + .../mjsunit/wasm/serialize-lazy-module.js | 12 +- .../test/mjsunit/wasm/speculative-inlining.js | 140 +- deps/v8/test/mjsunit/wasm/stack-switching.js | 25 +- ...est-serialization-with-lazy-compilation.js | 8 +- .../mjsunit/wasm/type-reflection-with-mv.js | 2 +- .../test/mjsunit/wasm/wasm-dynamic-tiering.js | 17 +- .../test/mjsunit/web-snapshot/web-snapshot.js | 85 +- deps/v8/test/mkgrokdump/mkgrokdump.cc | 5 +- deps/v8/test/test262/test262.status | 213 +- deps/v8/test/test262/testcfg.py | 3 - deps/v8/test/torque/test-torque.tq | 6 +- deps/v8/test/unittests/BUILD.gn | 11 +- .../compiler-dispatcher-unittest.cc | 346 ++- .../compiler/linkage-tail-call-unittest.cc | 3 + .../unittests/heap/base/worklist-unittest.cc | 60 +- .../traced-reference-unittest.cc | 14 +- .../unified-heap-snapshot-unittest.cc | 2 +- .../{ => cppgc-js}/unified-heap-unittest.cc | 27 +- .../heap/{ => cppgc-js}/unified-heap-utils.cc | 2 +- .../heap/{ => cppgc-js}/unified-heap-utils.h | 6 +- .../heap/cppgc/compactor-unittest.cc | 1 + .../heap/cppgc/garbage-collected-unittest.cc | 12 + .../unittests/heap/cppgc/heap-unittest.cc | 35 + .../unittests/heap/cppgc/marker-unittest.cc | 2 +- .../heap/cppgc/page-memory-unittest.cc | 2 +- .../heap/cppgc/persistent-family-unittest.cc | 38 + deps/v8/test/unittests/heap/cppgc/tests.h | 10 +- .../heap/cppgc/workloads-unittest.cc | 2 +- .../heap/embedder-tracing-unittest.cc | 4 +- .../unittests/heap/local-factory-unittest.cc | 26 +- .../test/unittests/heap/unmapper-unittest.cc | 6 +- .../test/unittests/heap/worklist-unittest.cc | 346 --- .../objects/value-serializer-unittest.cc | 18 +- deps/v8/test/unittests/run-all-unittests.cc | 4 +- .../security/virtual-memory-cage-unittest.cc | 52 +- .../tasks/background-compile-task-unittest.cc | 53 +- deps/v8/test/unittests/test-helpers.cc | 17 +- deps/v8/test/unittests/test-helpers.h | 9 +- .../test/unittests/torque/torque-unittest.cc | 2 +- .../wasm/memory-protection-unittest.cc | 2 +- .../wasm/trap-handler-x64-unittest.cc | 14 +- deps/v8/test/wasm-js/tests.tar.gz.sha1 | 2 +- deps/v8/test/wasm-js/wasm-js.status | 7 + .../v8/test/wasm-spec-tests/tests.tar.gz.sha1 | 2 +- .../wasm-spec-tests/wasm-spec-tests.status | 9 + deps/v8/test/webkit/webkit.status | 3 +- .../crdtp/json_platform_v8.cc | 3 +- deps/v8/third_party/zlib/google/zip.cc | 4 +- deps/v8/third_party/zlib/google/zip.h | 7 + deps/v8/third_party/zlib/google/zip_reader.cc | 1 - deps/v8/third_party/zlib/google/zip_reader.h | 1 - .../zlib/google/zip_reader_unittest.cc | 9 +- .../third_party/zlib/google/zip_unittest.cc | 1 - deps/v8/third_party/zlib/google/zip_writer.cc | 19 +- deps/v8/third_party/zlib/google/zip_writer.h | 8 + deps/v8/tools/blink_tests/TestExpectations | 0 .../clusterfuzz/testdata/failure_output.txt | 2 +- .../testdata/failure_output_arch.txt | 2 +- .../testdata/failure_output_second.txt | 2 +- .../testdata/smoke_test_output.txt | 2 +- deps/v8/tools/clusterfuzz/v8_foozzie.py | 12 +- deps/v8/tools/eval_gc_time.sh | 1 - deps/v8/tools/gen-postmortem-metadata.py | 7 +- .../tools/process-wasm-compilation-times.py | 4 +- deps/v8/tools/profile.mjs | 4 +- deps/v8/tools/system-analyzer/helper.mjs | 6 +- .../tools/system-analyzer/view/list-panel.mjs | 4 +- deps/v8/tools/testrunner/base_runner.py | 6 - .../v8/tools/testrunner/local/junit_output.py | 49 - deps/v8/tools/testrunner/local/variants.py | 8 +- deps/v8/tools/testrunner/num_fuzzer.py | 5 +- deps/v8/tools/testrunner/testproc/fuzzer.py | 21 +- deps/v8/tools/testrunner/testproc/progress.py | 40 - deps/v8/tools/try_perf.py | 5 - deps/v8/tools/v8heapconst.py | 708 ++--- .../tools/v8windbg/src/v8windbg-extension.cc | 6 +- deps/v8/tools/whitespace.txt | 4 +- 1131 files changed, 37260 insertions(+), 16586 deletions(-) create mode 100644 deps/v8/.vpython3 create mode 100644 deps/v8/bazel/generate-inspector-files.cmd create mode 100644 deps/v8/include/v8-embedder-state-scope.h create mode 100644 deps/v8/src/base/emulated-virtual-address-subspace.cc create mode 100644 deps/v8/src/base/emulated-virtual-address-subspace.h create mode 100644 deps/v8/src/base/platform/yield-processor.h create mode 100644 deps/v8/src/base/sanitizer/lsan-virtual-address-space.cc create mode 100644 deps/v8/src/base/sanitizer/lsan-virtual-address-space.h create mode 100644 deps/v8/src/base/virtual-address-space-page-allocator.cc create mode 100644 deps/v8/src/base/virtual-address-space-page-allocator.h create mode 100644 deps/v8/src/base/virtual-address-space.cc create mode 100644 deps/v8/src/base/virtual-address-space.h create mode 100644 deps/v8/src/bigint/CPPLINT.cfg create mode 100644 deps/v8/src/codegen/ia32/fma-instr.h create mode 100644 deps/v8/src/common/high-allocation-throughput-scope.h create mode 100644 deps/v8/src/execution/DEPS create mode 100644 deps/v8/src/execution/embedder-state.cc create mode 100644 deps/v8/src/execution/embedder-state.h create mode 100644 deps/v8/src/execution/encoded-c-signature.cc create mode 100644 deps/v8/src/execution/encoded-c-signature.h create mode 100644 deps/v8/src/heap/heap-layout-tracer.cc create mode 100644 deps/v8/src/heap/heap-layout-tracer.h delete mode 100644 deps/v8/src/heap/worklist.h create mode 100644 deps/v8/src/temporal/OWNERS create mode 100644 deps/v8/src/temporal/temporal-parser.cc create mode 100644 deps/v8/src/temporal/temporal-parser.h create mode 100644 deps/v8/test/benchmarks/cpp/cppgc/benchmark_main.cc create mode 100644 deps/v8/test/benchmarks/cpp/cppgc/benchmark_utils.cc rename deps/v8/test/benchmarks/cpp/cppgc/{utils.h => benchmark_utils.h} (53%) create mode 100644 deps/v8/test/benchmarks/cpp/cppgc/binary-trees_perf.cc create mode 100644 deps/v8/test/cctest/test-temporal-parser.cc create mode 100644 deps/v8/test/debugger/regress/regress-1145119.js create mode 100644 deps/v8/test/debugger/regress/regress-crbug-1259878.js create mode 100644 deps/v8/test/inspector/debugger/debugger-statement-expected.txt create mode 100644 deps/v8/test/inspector/debugger/debugger-statement.js create mode 100644 deps/v8/test/inspector/debugger/other-pause-reasons-expected.txt create mode 100644 deps/v8/test/inspector/debugger/other-pause-reasons.js create mode 100644 deps/v8/test/inspector/regress/regress-crbug-1274529-expected.txt create mode 100644 deps/v8/test/inspector/regress/regress-crbug-1274529.js create mode 100644 deps/v8/test/js-perf-test/BigInt/shift.js create mode 100644 deps/v8/test/message/fail/class-private-brand-compound-assign-getter-only.js create mode 100644 deps/v8/test/message/fail/class-private-brand-compound-assign-getter-only.out create mode 100644 deps/v8/test/message/fail/class-private-brand-compound-assign-method.js create mode 100644 deps/v8/test/message/fail/class-private-brand-compound-assign-method.out create mode 100644 deps/v8/test/message/fail/class-private-brand-compound-assign-setter-only.js create mode 100644 deps/v8/test/message/fail/class-private-brand-compound-assign-setter-only.out create mode 100644 deps/v8/test/message/fail/class-private-brand-count-getter-only.js create mode 100644 deps/v8/test/message/fail/class-private-brand-count-getter-only.out create mode 100644 deps/v8/test/message/fail/class-private-brand-count-method.js create mode 100644 deps/v8/test/message/fail/class-private-brand-count-method.out create mode 100644 deps/v8/test/message/fail/class-private-brand-count-setter-only.js create mode 100644 deps/v8/test/message/fail/class-private-brand-count-setter-only.out create mode 100644 deps/v8/test/message/fail/class-private-brand-write-getter-only.js create mode 100644 deps/v8/test/message/fail/class-private-brand-write-getter-only.out create mode 100644 deps/v8/test/message/fail/class-private-brand-write-method.js create mode 100644 deps/v8/test/message/fail/class-private-brand-write-method.out create mode 100644 deps/v8/test/message/fail/class-private-brand-write-setter-only.js create mode 100644 deps/v8/test/message/fail/class-private-brand-write-setter-only.out create mode 100644 deps/v8/test/message/fail/class-private-static-compound-assign-getter-only.js create mode 100644 deps/v8/test/message/fail/class-private-static-compound-assign-getter-only.out create mode 100644 deps/v8/test/message/fail/class-private-static-compound-assign-method.js create mode 100644 deps/v8/test/message/fail/class-private-static-compound-assign-method.out create mode 100644 deps/v8/test/message/fail/class-private-static-compound-assign-setter-only.js create mode 100644 deps/v8/test/message/fail/class-private-static-compound-assign-setter-only.out create mode 100644 deps/v8/test/message/fail/class-private-static-count-getter-only.js create mode 100644 deps/v8/test/message/fail/class-private-static-count-getter-only.out create mode 100644 deps/v8/test/message/fail/class-private-static-count-method.js create mode 100644 deps/v8/test/message/fail/class-private-static-count-method.out create mode 100644 deps/v8/test/message/fail/class-private-static-count-setter-only.js create mode 100644 deps/v8/test/message/fail/class-private-static-count-setter-only.out create mode 100644 deps/v8/test/message/fail/class-private-static-write-getter-only.js create mode 100644 deps/v8/test/message/fail/class-private-static-write-getter-only.out create mode 100644 deps/v8/test/message/fail/class-private-static-write-method.js create mode 100644 deps/v8/test/message/fail/class-private-static-write-method.out create mode 100644 deps/v8/test/message/fail/class-private-static-write-setter-only.js create mode 100644 deps/v8/test/message/fail/class-private-static-write-setter-only.out delete mode 100644 deps/v8/test/message/fail/modules-import-top-level-await-fail-1.mjs delete mode 100644 deps/v8/test/message/fail/modules-import-top-level-await-fail-1.out delete mode 100644 deps/v8/test/message/fail/modules-import-top-level-await-fail-2.out create mode 100644 deps/v8/test/message/fail/throw-during-IteratorCloseOnException.js create mode 100644 deps/v8/test/message/fail/throw-during-IteratorCloseOnException.out create mode 100644 deps/v8/test/message/mjsunit/fail/regress/regress-1267172.js create mode 100644 deps/v8/test/message/mjsunit/fail/regress/regress-1267172.out create mode 100644 deps/v8/test/message/regress/fail/regress-crbug-1265570.js create mode 100644 deps/v8/test/message/regress/fail/regress-crbug-1265570.out create mode 100644 deps/v8/test/mjsunit/compiler/inline-private-method.js create mode 100644 deps/v8/test/mjsunit/dataview-growablesharedarraybuffer.js create mode 100644 deps/v8/test/mjsunit/dataview-resizablearraybuffer-detach.js create mode 100644 deps/v8/test/mjsunit/dataview-resizablearraybuffer.js create mode 100644 deps/v8/test/mjsunit/dump-counters-quit.js create mode 100644 deps/v8/test/mjsunit/dump-counters.js create mode 100644 deps/v8/test/mjsunit/harmony/weakrefs/finalizationregistry-independent-lifetime-multiple.js create mode 100644 deps/v8/test/mjsunit/regress/regress-1230597.js rename deps/v8/test/{message/fail/modules-import-top-level-await-fail-2.mjs => mjsunit/regress/regress-12359.js} (52%) create mode 100644 deps/v8/test/mjsunit/regress/regress-1273677.js create mode 100644 deps/v8/test/mjsunit/regress/regress-4578.js create mode 100644 deps/v8/test/mjsunit/regress/regress-crbug-1265043.js create mode 100644 deps/v8/test/mjsunit/regress/regress-crbug-1272026.js create mode 100644 deps/v8/test/mjsunit/regress/regress-v8-10384.js create mode 100644 deps/v8/test/mjsunit/regress/wasm/regress-1271456.js create mode 100644 deps/v8/test/mjsunit/regress/wasm/regress-1273705.js rename deps/v8/test/mjsunit/wasm/{load-elimination.js => gc-optimizations.js} (79%) rename deps/v8/test/unittests/heap/{ => cppgc-js}/traced-reference-unittest.cc (96%) rename deps/v8/test/unittests/heap/{ => cppgc-js}/unified-heap-snapshot-unittest.cc (99%) rename deps/v8/test/unittests/heap/{ => cppgc-js}/unified-heap-unittest.cc (93%) rename deps/v8/test/unittests/heap/{ => cppgc-js}/unified-heap-utils.cc (98%) rename deps/v8/test/unittests/heap/{ => cppgc-js}/unified-heap-utils.h (93%) delete mode 100644 deps/v8/test/unittests/heap/worklist-unittest.cc delete mode 100644 deps/v8/tools/blink_tests/TestExpectations delete mode 100644 deps/v8/tools/testrunner/local/junit_output.py diff --git a/deps/v8/.vpython3 b/deps/v8/.vpython3 new file mode 100644 index 00000000000000..95e52ee59ebea6 --- /dev/null +++ b/deps/v8/.vpython3 @@ -0,0 +1,46 @@ +# This is a vpython "spec" file. +# +# It describes patterns for python wheel dependencies of the python scripts in +# the chromium repo, particularly for dependencies that have compiled components +# (since pure-python dependencies can be easily vendored into third_party). +# +# When vpython is invoked, it finds this file and builds a python VirtualEnv, +# containing all of the dependencies described in this file, fetching them from +# CIPD (the "Chrome Infrastructure Package Deployer" service). Unlike `pip`, +# this never requires the end-user machine to have a working python extension +# compilation environment. All of these packages are built using: +# https://chromium.googlesource.com/infra/infra/+/main/infra/tools/dockerbuild/ +# +# All python scripts in the repo share this same spec, to avoid dependency +# fragmentation. +# +# If you have depot_tools installed in your $PATH, you can invoke python scripts +# in this repo by running them as you normally would run them, except +# substituting `vpython` instead of `python` on the command line, e.g.: +# vpython path/to/script.py some --arguments +# +# Read more about `vpython` and how to modify this file here: +# https://chromium.googlesource.com/infra/infra/+/main/doc/users/vpython.md + +python_version: "3.8" + +# The default set of platforms vpython checks does not yet include mac-arm64. +# Setting `verify_pep425_tag` to the list of platforms we explicitly must support +# allows us to ensure that vpython specs stay mac-arm64-friendly +verify_pep425_tag: [ + {python: "cp38", abi: "cp38", platform: "manylinux1_x86_64"}, + {python: "cp38", abi: "cp38", platform: "linux_arm64"}, + + {python: "cp38", abi: "cp38", platform: "macosx_10_10_intel"}, + {python: "cp38", abi: "cp38", platform: "macosx_11_0_arm64"}, + + {python: "cp38", abi: "cp38", platform: "win32"}, + {python: "cp38", abi: "cp38", platform: "win_amd64"} +] + +# TODO(https://crbug.com/898348): Add in necessary wheels as Python3 versions +# become available. +wheel: < + name: "infra/python/wheels/six-py2_py3" + version: "version:1.15.0" +> diff --git a/deps/v8/AUTHORS b/deps/v8/AUTHORS index 46dd9fb1aaf72f..b89eacba9f18e5 100644 --- a/deps/v8/AUTHORS +++ b/deps/v8/AUTHORS @@ -149,6 +149,7 @@ Loo Rong Jie Lu Yahan Luis Reis Luke Zarko +Ma Aiguo Maciej Małecki Marcel Laverdet Marcin Cieślak diff --git a/deps/v8/BUILD.bazel b/deps/v8/BUILD.bazel index ff69465f0ebf3b..1bad423e03d7a6 100644 --- a/deps/v8/BUILD.bazel +++ b/deps/v8/BUILD.bazel @@ -44,7 +44,6 @@ config_setting( # v8_enable_trace_baseline_exec # v8_enable_trace_feedback_updates # v8_enable_atomic_object_field_writes -# v8_enable_atomic_marking_state # v8_enable_concurrent_marking # v8_enable_ignition_dispatch_counting # v8_enable_builtins_profiling @@ -322,6 +321,14 @@ v8_config( "V8_HAVE_TARGET_OS", "V8_TARGET_OS_MACOSX", ], + "@config//:is_windows": [ + "V8_HAVE_TARGET_OS", + "V8_TARGET_OS_WIN", + "UNICODE", + "_UNICODE", + "_CRT_RAND_S", + "_WIN32_WINNT=0x0602", + ], }) + select({ ":is_v8_enable_pointer_compression": [ "V8_COMPRESS_POINTERS", @@ -351,6 +358,19 @@ v8_config( # File group rules # ================================================= +filegroup( + name = "public_header_files", + srcs = glob(["include/**/*.h"]), +) + +filegroup( + name = "public_wasm_c_api_header_files", + srcs = [ + "third_party/wasm-api/wasm.h", + "third_party/wasm-api/wasm.hh", + ], +) + filegroup( name = "v8_config_headers_files", srcs = [ @@ -425,6 +445,7 @@ filegroup( "include/v8-date.h", "include/v8-debug.h", "include/v8-embedder-heap.h", + "include/v8-embedder-state-scope.h", "include/v8-exception.h", "include/v8-extension.h", "include/v8-external.h", @@ -520,6 +541,8 @@ filegroup( "src/base/debug/stack_trace.h", "src/base/division-by-constant.cc", "src/base/division-by-constant.h", + "src/base/emulated-virtual-address-subspace.cc", + "src/base/emulated-virtual-address-subspace.h", "src/base/enum-set.h", "src/base/export-template.h", "src/base/file-utils.cc", @@ -591,9 +614,14 @@ filegroup( "src/base/utils/random-number-generator.cc", "src/base/utils/random-number-generator.h", "src/base/vector.h", + "src/base/virtual-address-space-page-allocator.cc", + "src/base/virtual-address-space-page-allocator.h", + "src/base/virtual-address-space.cc", + "src/base/virtual-address-space.h", "src/base/v8-fallthrough.h", "src/base/vlq-base64.cc", "src/base/vlq-base64.h", + "src/base/platform/yield-processor.h", ] + select({ "@config//:is_posix": [ "src/base/platform/platform-posix.cc", @@ -615,6 +643,11 @@ filegroup( "src/base/debug/stack_trace_posix.cc", "src/base/platform/platform-macos.cc", ], + "@config//:is_windows": [ + "src/base/win32-headers.h", + "src/base/debug/stack_trace_win.cc", + "src/base/platform/platform-win32.cc", + ], }), visibility = ["//visibility:public"], ) @@ -1110,6 +1143,7 @@ filegroup( "src/common/assert-scope.cc", "src/common/assert-scope.h", "src/common/checks.h", + "src/common/high-allocation-throughput-scope.h", "src/common/message-template.h", "src/common/ptr-compr-inl.h", "src/common/ptr-compr.h", @@ -1180,6 +1214,10 @@ filegroup( "src/execution/arguments-inl.h", "src/execution/arguments.cc", "src/execution/arguments.h", + "src/execution/encoded-c-signature.cc", + "src/execution/encoded-c-signature.h", + "src/execution/embedder-state.h", + "src/execution/embedder-state.cc", "src/execution/execution.cc", "src/execution/execution.h", "src/execution/frame-constants.h", @@ -1307,6 +1345,8 @@ filegroup( "src/heap/heap-controller.cc", "src/heap/heap-controller.h", "src/heap/heap-inl.h", + "src/heap/heap-layout-tracer.cc", + "src/heap/heap-layout-tracer.h", "src/heap/heap-write-barrier-inl.h", "src/heap/heap-write-barrier.cc", "src/heap/heap-write-barrier.h", @@ -1400,7 +1440,6 @@ filegroup( "src/heap/sweeper.h", "src/heap/weak-object-worklists.cc", "src/heap/weak-object-worklists.h", - "src/heap/worklist.h", "src/ic/call-optimization.cc", "src/ic/call-optimization.h", "src/ic/handler-configuration-inl.h", @@ -1890,6 +1929,8 @@ filegroup( "src/base/sanitizer/asan.h", "src/base/sanitizer/lsan-page-allocator.cc", "src/base/sanitizer/lsan-page-allocator.h", + "src/base/sanitizer/lsan-virtual-address-space.cc", + "src/base/sanitizer/lsan-virtual-address-space.h", "src/base/sanitizer/msan.h", "src/base/sanitizer/tsan.h", "src/snapshot/code-serializer.cc", @@ -1961,6 +2002,8 @@ filegroup( "src/tasks/operations-barrier.h", "src/tasks/task-utils.cc", "src/tasks/task-utils.h", + "src/temporal/temporal-parser.cc", + "src/temporal/temporal-parser.h", "src/torque/runtime-macro-shims.h", "src/third_party/siphash/halfsiphash.cc", "src/third_party/siphash/halfsiphash.h", @@ -2163,7 +2206,7 @@ filegroup( ], }) + select({ # Only for x64 builds and for arm64 with x64 host simulator. - "@config//:is_x64": [ + "@config//:is_posix_x64": [ "src/trap-handler/handler-inside-posix.cc", "src/trap-handler/handler-outside-posix.cc", ], @@ -2174,6 +2217,22 @@ filegroup( "src/trap-handler/handler-outside-simulator.cc", ], "//conditions:default": [], + }) + select({ + "@config//:is_windows": [ + "src/trap-handler/handler-inside-win.cc", + "src/trap-handler/handler-outside-win.cc", + "src/trap-handler/handler-inside-win.h", + # Needed on windows to work around https://github.com/bazelbuild/bazel/issues/6337 + "third_party/zlib/zlib.h", + "third_party/zlib/zconf.h", + ], + "//conditions:default": [], + }) + select({ + "@config//:is_windows_64bit": [ + "src/diagnostics/unwinding-info-win64.cc", + "src/diagnostics/unwinding-info-win64.h", + ], + "//conditions:default": [], }) + select({ ":is_v8_enable_webassembly": [ "src/asmjs/asm-js.cc", @@ -2717,6 +2776,7 @@ filegroup( "src/heap/cppgc/marking-visitor.h", "src/heap/cppgc/marking-worklists.cc", "src/heap/cppgc/marking-worklists.h", + "src/heap/cppgc/memory.cc", "src/heap/cppgc/memory.h", "src/heap/cppgc/metric-recorder.h", "src/heap/cppgc/name-trait.cc", @@ -2770,10 +2830,13 @@ filegroup( # Note these cannot be v8_target_is_* selects because these contain # inline assembly that runs inside the executable. Since these are # linked directly into mksnapshot, they must use the actual target cpu. - "@config//:is_ia32": ["src/heap/base/asm/ia32/push_registers_asm.cc"], - "@config//:is_x64": ["src/heap/base/asm/x64/push_registers_asm.cc"], - "@config//:is_arm": ["src/heap/base/asm/arm/push_registers_asm.cc"], - "@config//:is_arm64": ["src/heap/base/asm/arm64/push_registers_asm.cc"], + "@config//:is_inline_asm_ia32": ["src/heap/base/asm/ia32/push_registers_asm.cc"], + "@config//:is_inline_asm_x64": ["src/heap/base/asm/x64/push_registers_asm.cc"], + "@config//:is_inline_asm_arm": ["src/heap/base/asm/arm/push_registers_asm.cc"], + "@config//:is_inline_asm_arm64": ["src/heap/base/asm/arm64/push_registers_asm.cc"], + "@config//:is_msvc_asm_ia32": ["src/heap/base/asm/ia32/push_registers_masm.S"], + "@config//:is_msvc_asm_x64": ["src/heap/base/asm/x64/push_registers_masm.S"], + "@config//:is_msvc_asm_arm64": ["src/heap/base/asm/arm64/push_registers_masm.S"], }), ) @@ -2968,6 +3031,11 @@ filegroup( ], ) +filegroup( + name = "kythe_torque_headers", + srcs = glob(["src/torque/*.h"]), +) + # ================================================= # Generated files # ================================================= @@ -3033,6 +3101,7 @@ genrule( "src/inspector/protocol/Schema.h", ], cmd = "bazel/generate-inspector-files.sh $(@D)", + cmd_bat = "bazel\\generate-inspector-files.cmd $(@D)", local = 1, message = "Generating inspector files", ) @@ -3071,6 +3140,7 @@ genrule( srcs = [], outs = ["builtins-generated/bytecodes-builtins-list.h"], cmd = "$(location :bytecode_builtins_list_generator) $@", + cmd_bat = "$(location :bytecode_builtins_list_generator) $@", tools = [":bytecode_builtins_list_generator"], ) @@ -3079,6 +3149,7 @@ genrule( srcs = [], outs = ["src/regexp/special-case.cc"], cmd = "$(location :regexp_special_case_generator) $@", + cmd_bat = "$(location :regexp_special_case_generator) $@", tools = [":regexp_special_case_generator"], ) @@ -3137,17 +3208,18 @@ cc_library( ], include_prefix = "third_party/v8", includes = ["."], - visibility = ["//visibility:public"], ) cc_library( - name = "torque_base", + name = "kythe_torque_base", srcs = [ ":torque_base_files", ], - copts = ["-fexceptions"], + copts = select({ + "@config//:is_posix": [ "-fexceptions" ], + "//conditions:default": [], + }), features = ["-use_header_modules"], - visibility = ["//visibility:public"], deps = [ ":torque_base_headers", ":v8_libbase", @@ -3250,7 +3322,10 @@ v8_binary( "src/torque/torque.cc", ":torque_base_files", ], - copts = ["-fexceptions"], + copts = select({ + "@config//:is_posix": [ "-fexceptions" ], + "//conditions:default": [], + }), features = ["-use_header_modules"], linkopts = select({ "@config//:is_android": ["-llog"], @@ -3267,7 +3342,7 @@ v8_binary( "@config//:is_android": ["-llog"], "//conditions:default": [], }), - noicu_deps = [":noicu/v8_libshared"], + noicu_deps = [":v8_libshared_noicu"], ) v8_binary( @@ -3283,6 +3358,11 @@ v8_binary_non_pointer_compression( binary = "torque", ) +alias( + name = "v8ci", + actual = "icu/v8", +) + # ================================================= # Tests # ================================================= diff --git a/deps/v8/BUILD.gn b/deps/v8/BUILD.gn index bca5b5356b20bc..19731feebe82b4 100644 --- a/deps/v8/BUILD.gn +++ b/deps/v8/BUILD.gn @@ -146,11 +146,8 @@ declare_args() { # into relaxed atomic operations. v8_enable_atomic_object_field_writes = "" - # Sets -dV8_ATOMIC_MARKING_STATE - v8_enable_atomic_marking_state = "" - - # Controls the default values of v8_enable_atomic_object_field_writes and - # v8_enable_concurrent_marking_state. See the default setting code below. + # Controls the default value of v8_enable_concurrent_marking_state. See the + # default setting code below. v8_enable_concurrent_marking = true # Sets -dV8_IGNITION_DISPATCH_COUNTING. @@ -275,8 +272,10 @@ declare_args() { # Generate comments describing the Torque intermediate representation. v8_annotate_torque_ir = false - # Disable all snapshot compression. - v8_enable_snapshot_compression = true + # Enable snapshot compression (enabled by default for desktop) devices. + v8_enable_snapshot_compression = + target_os == "android" || target_os == "chromeos" || + target_os == "fuchsia" # Enable control-flow integrity features, such as pointer authentication for # ARM64. @@ -353,11 +352,7 @@ declare_args() { # parameter count of function with JS linkage. # TODO(v8:11112): Remove once all architectures support the flag and it is # enabled unconditionally. - v8_include_receiver_in_argc = - v8_current_cpu == "x86" || v8_current_cpu == "x64" || - v8_current_cpu == "arm" || v8_current_cpu == "arm64" || - v8_current_cpu == "mips64el" || v8_current_cpu == "mipsel" || - v8_current_cpu == "loong64" || v8_current_cpu == "riscv64" + v8_include_receiver_in_argc = true } # Derived defaults. @@ -427,9 +422,6 @@ if (v8_enable_single_generation == "") { if (v8_enable_atomic_object_field_writes == "") { v8_enable_atomic_object_field_writes = v8_enable_concurrent_marking } -if (v8_enable_atomic_marking_state == "") { - v8_enable_atomic_marking_state = v8_enable_concurrent_marking -} if (v8_enable_third_party_heap) { v8_disable_write_barriers = true v8_enable_single_generation = true @@ -443,8 +435,6 @@ if (v8_enable_single_generation) { } assert(!v8_enable_concurrent_marking || v8_enable_atomic_object_field_writes, "Concurrent marking requires atomic object field writes.") -assert(!v8_enable_concurrent_marking || v8_enable_atomic_marking_state, - "Concurrent marking requires atomic marking state.") if (v8_enable_trace_unoptimized == "") { v8_enable_trace_unoptimized = v8_enable_trace_ignition || v8_enable_trace_baseline_exec @@ -901,9 +891,6 @@ config("features") { if (v8_enable_atomic_object_field_writes) { defines += [ "V8_ATOMIC_OBJECT_FIELD_WRITES" ] } - if (v8_enable_atomic_marking_state) { - defines += [ "V8_ATOMIC_MARKING_STATE" ] - } if (v8_enable_ignition_dispatch_counting) { defines += [ "V8_IGNITION_DISPATCH_COUNTING" ] } @@ -2198,7 +2185,6 @@ action("v8_dump_build_config") { "is_ubsan_vptr=$is_ubsan_vptr", "target_cpu=\"$target_cpu\"", "v8_current_cpu=\"$v8_current_cpu\"", - "v8_enable_atomic_marking_state=$v8_enable_atomic_marking_state", "v8_enable_atomic_object_field_writes=" + "$v8_enable_atomic_object_field_writes", "v8_enable_concurrent_marking=$v8_enable_concurrent_marking", @@ -2496,6 +2482,7 @@ v8_header_set("v8_headers") { "include/v8-date.h", "include/v8-debug.h", "include/v8-embedder-heap.h", + "include/v8-embedder-state-scope.h", "include/v8-exception.h", "include/v8-extension.h", "include/v8-external.h", @@ -2708,6 +2695,7 @@ v8_header_set("v8_internal_headers") { "src/codegen/unoptimized-compilation-info.h", "src/common/assert-scope.h", "src/common/checks.h", + "src/common/high-allocation-throughput-scope.h", "src/common/message-template.h", "src/common/ptr-compr-inl.h", "src/common/ptr-compr.h", @@ -2872,6 +2860,8 @@ v8_header_set("v8_internal_headers") { "src/diagnostics/unwinder.h", "src/execution/arguments-inl.h", "src/execution/arguments.h", + "src/execution/embedder-state.h", + "src/execution/encoded-c-signature.h", "src/execution/execution.h", "src/execution/frame-constants.h", "src/execution/frames-inl.h", @@ -2946,6 +2936,7 @@ v8_header_set("v8_internal_headers") { "src/heap/gc-tracer.h", "src/heap/heap-controller.h", "src/heap/heap-inl.h", + "src/heap/heap-layout-tracer.h", "src/heap/heap-write-barrier-inl.h", "src/heap/heap-write-barrier.h", "src/heap/heap.h", @@ -3006,7 +2997,6 @@ v8_header_set("v8_internal_headers") { "src/heap/stress-scavenge-observer.h", "src/heap/sweeper.h", "src/heap/weak-object-worklists.h", - "src/heap/worklist.h", "src/ic/call-optimization.h", "src/ic/handler-configuration-inl.h", "src/ic/handler-configuration.h", @@ -3369,6 +3359,7 @@ v8_header_set("v8_internal_headers") { "src/tasks/cancelable-task.h", "src/tasks/operations-barrier.h", "src/tasks/task-utils.h", + "src/temporal/temporal-parser.h", "src/third_party/siphash/halfsiphash.h", "src/third_party/utf8-decoder/utf8-decoder.h", "src/torque/runtime-macro-shims.h", @@ -4086,6 +4077,8 @@ v8_source_set("v8_base_without_compiler") { "src/diagnostics/perf-jit.cc", "src/diagnostics/unwinder.cc", "src/execution/arguments.cc", + "src/execution/embedder-state.cc", + "src/execution/encoded-c-signature.cc", "src/execution/execution.cc", "src/execution/frames.cc", "src/execution/futex-emulation.cc", @@ -4135,6 +4128,7 @@ v8_source_set("v8_base_without_compiler") { "src/heap/gc-idle-time-handler.cc", "src/heap/gc-tracer.cc", "src/heap/heap-controller.cc", + "src/heap/heap-layout-tracer.cc", "src/heap/heap-write-barrier.cc", "src/heap/heap.cc", "src/heap/incremental-marking-job.cc", @@ -4378,6 +4372,7 @@ v8_source_set("v8_base_without_compiler") { "src/tasks/cancelable-task.cc", "src/tasks/operations-barrier.cc", "src/tasks/task-utils.cc", + "src/temporal/temporal-parser.cc", "src/third_party/siphash/halfsiphash.cc", "src/tracing/trace-event.cc", "src/tracing/traced-value.cc", @@ -4978,6 +4973,8 @@ v8_component("v8_libbase") { "src/base/debug/stack_trace.h", "src/base/division-by-constant.cc", "src/base/division-by-constant.h", + "src/base/emulated-virtual-address-subspace.cc", + "src/base/emulated-virtual-address-subspace.h", "src/base/enum-set.h", "src/base/export-template.h", "src/base/file-utils.cc", @@ -5031,6 +5028,7 @@ v8_component("v8_libbase") { "src/base/platform/time.cc", "src/base/platform/time.h", "src/base/platform/wrappers.h", + "src/base/platform/yield-processor.h", "src/base/region-allocator.cc", "src/base/region-allocator.h", "src/base/ring-buffer.h", @@ -5040,6 +5038,8 @@ v8_component("v8_libbase") { "src/base/sanitizer/asan.h", "src/base/sanitizer/lsan-page-allocator.cc", "src/base/sanitizer/lsan-page-allocator.h", + "src/base/sanitizer/lsan-virtual-address-space.cc", + "src/base/sanitizer/lsan-virtual-address-space.h", "src/base/sanitizer/lsan.h", "src/base/sanitizer/msan.h", "src/base/sanitizer/tsan.h", @@ -5056,6 +5056,10 @@ v8_component("v8_libbase") { "src/base/utils/random-number-generator.h", "src/base/v8-fallthrough.h", "src/base/vector.h", + "src/base/virtual-address-space-page-allocator.cc", + "src/base/virtual-address-space-page-allocator.h", + "src/base/virtual-address-space.cc", + "src/base/virtual-address-space.h", "src/base/vlq-base64.cc", "src/base/vlq-base64.h", "src/base/vlq.h", diff --git a/deps/v8/DEPS b/deps/v8/DEPS index 8d1be4a65833ae..102f46264b8e95 100644 --- a/deps/v8/DEPS +++ b/deps/v8/DEPS @@ -40,10 +40,10 @@ vars = { 'reclient_version': 're_client_version:0.40.0.40ff5a5', # GN CIPD package version. - 'gn_version': 'git_revision:8926696a4186279489cc2b8d768533e61bba73d7', + 'gn_version': 'git_revision:fc295f3ac7ca4fe7acc6cb5fb052d22909ef3a8f', # luci-go CIPD package version. - 'luci_go': 'git_revision:68355732afb00a422ae0c70eed95c6a45f9868b1', + 'luci_go': 'git_revision:31175eb1a2712bb75d06a9bad5d4dd3f2a09cd1f', # Three lines of non-changing comments so that # the commit queue can handle CLs rolling android_sdk_build-tools_version @@ -76,18 +76,18 @@ vars = { # Three lines of non-changing comments so that # the commit queue can handle CLs rolling android_sdk_tools-lint_version # and whatever else without interference from each other. - 'android_sdk_cmdline-tools_version': 'AuYa11pULKT8AI14_owabJrkZoRGuovL-nvwmiONlYEC', + 'android_sdk_cmdline-tools_version': 'Ez2NWws2SJYCF6qw2O-mSCqK6424l3ZdSTpppLyVR_cC', } deps = { 'base/trace_event/common': Var('chromium_url') + '/chromium/src/base/trace_event/common.git' + '@' + '7f36dbc19d31e2aad895c60261ca8f726442bfbb', 'build': - Var('chromium_url') + '/chromium/src/build.git' + '@' + 'cf325916d58a194a935c26a56fcf6b525d1e2bf4', + Var('chromium_url') + '/chromium/src/build.git' + '@' + '9cfc74504f0c5093fe6799e70f15bded2423b5b4', 'buildtools': - Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + '80e4f838faaf50e18629ae630df1d421f255a62a', + Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + '075dd7e22837a69189003e4fa84499acf63188cf', 'buildtools/clang_format/script': - Var('chromium_url') + '/external/github.com/llvm/llvm-project/clang/tools/clang-format.git' + '@' + '99876cacf78329e5f99c244dbe42ccd1654517a0', + Var('chromium_url') + '/external/github.com/llvm/llvm-project/clang/tools/clang-format.git' + '@' + 'e435ad79c17b1888b34df88d6a30a094936e3836', 'buildtools/linux64': { 'packages': [ { @@ -111,9 +111,9 @@ deps = { 'buildtools/third_party/libc++/trunk': Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxx.git' + '@' + '79a2e924d96e2fc1e4b937c42efd08898fa472d7', 'buildtools/third_party/libc++abi/trunk': - Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxxabi.git' + '@' + '4c6e0991b109638204f08c93600b008c21f01da5', + Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxxabi.git' + '@' + '89f2e82120461d34098edd216e57aa743f441107', 'buildtools/third_party/libunwind/trunk': - Var('chromium_url') + '/external/github.com/llvm/llvm-project/libunwind.git' + '@' + '99015718c37b30d44c3bcbcc92a03fb85fb85a99', + Var('chromium_url') + '/external/github.com/llvm/llvm-project/libunwind.git' + '@' + 'c8c0ec928e46328fa284e7290c4ef052c7d285d4', 'buildtools/win': { 'packages': [ { @@ -139,14 +139,14 @@ deps = { 'test/mozilla/data': Var('chromium_url') + '/v8/deps/third_party/mozilla-tests.git' + '@' + 'f6c578a10ea707b1a8ab0b88943fe5115ce2b9be', 'test/test262/data': - Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + 'ba82d46238bd16c3e31b93d21d2846c81a9ccf7a', + Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + '1f16a6ad0edd10e774e336d8b331471b0c3bb360', 'test/test262/harness': Var('chromium_url') + '/external/github.com/test262-utils/test262-harness-py.git' + '@' + '278bcfaed0dcaa13936831fb1769d15e7c1e3b2b', 'third_party/aemu-linux-x64': { 'packages': [ { 'package': 'fuchsia/third_party/aemu/linux-amd64', - 'version': 'hys6gk1KOHMz9nURGWen255HiLIaVd3e4eZfa-w6l7oC' + 'version': 'vRCm89BzABss-_H8vC-tLjcSf6uusZA9IBSSYtdw4_kC' }, ], 'condition': 'host_os == "linux" and checkout_fuchsia', @@ -167,7 +167,7 @@ deps = { 'condition': 'checkout_android', }, 'third_party/android_platform': { - 'url': Var('chromium_url') + '/chromium/src/third_party/android_platform.git' + '@' + '72e09e98a62744cd10b762bd438c702ed8b131fb', + 'url': Var('chromium_url') + '/chromium/src/third_party/android_platform.git' + '@' + 'abc362f16dfc1a6cc082298ed54504bef11eb9e7', 'condition': 'checkout_android', }, 'third_party/android_sdk/public': { @@ -209,7 +209,7 @@ deps = { 'dep_type': 'cipd', }, 'third_party/catapult': { - 'url': Var('chromium_url') + '/catapult.git' + '@' + '75c4ea8c6eef1d5941ec3d5cfee174e8d0f73566', + 'url': Var('chromium_url') + '/catapult.git' + '@' + '49839733a7f26070e8d666d91fae177711154e1d', 'condition': 'checkout_android', }, 'third_party/colorama/src': { @@ -217,20 +217,20 @@ deps = { 'condition': 'checkout_android', }, 'third_party/depot_tools': - Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + '57c928cd959aa46e9dbd6b0bc754888075b4a4c3', + Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + '0a233e176044b6d9b9ff9fb30b589bfb18f9ca04', 'third_party/fuchsia-sdk': { - 'url': Var('chromium_url') + '/chromium/src/third_party/fuchsia-sdk.git' + '@' + '18896843130c33372c455c153ad07d2217bd2085', + 'url': Var('chromium_url') + '/chromium/src/third_party/fuchsia-sdk.git' + '@' + '5e0b0d0b67e889360eaa456cc17ce47d89a92167', 'condition': 'checkout_fuchsia', }, 'third_party/google_benchmark/src': { - 'url': Var('chromium_url') + '/external/github.com/google/benchmark.git' + '@' + '4f31803ebbf283e24602260e39e63a296d44b0f8', + 'url': Var('chromium_url') + '/external/github.com/google/benchmark.git' + '@' + 'ab867074da2423c2d9cf225233191a01f043485d', }, 'third_party/googletest/src': - Var('chromium_url') + '/external/github.com/google/googletest.git' + '@' + '16f637fbf4ffc3f7a01fa4eceb7906634565242f', + Var('chromium_url') + '/external/github.com/google/googletest.git' + '@' + '4c5650f68866e3c2e60361d5c4c95c6f335fb64b', 'third_party/icu': - Var('chromium_url') + '/chromium/deps/icu.git' + '@' + 'eedbaf76e49d28465d9119b10c30b82906e606ff', + Var('chromium_url') + '/chromium/deps/icu.git' + '@' + 'edf883ad2db9c723b058a6a17a146d68d6343143', 'third_party/instrumented_libraries': - Var('chromium_url') + '/chromium/src/third_party/instrumented_libraries.git' + '@' + '3c149f5611237dc59a7ec229e8ea009d8be8f51d', + Var('chromium_url') + '/chromium/src/third_party/instrumented_libraries.git' + '@' + 'e09c4b66b6e87116eb190651421f1a6e2f3b9c52', 'third_party/ittapi': { # Force checkout ittapi libraries to pass v8 header includes check on # bots that has check_v8_header_includes enabled. @@ -274,9 +274,9 @@ deps = { 'condition': 'checkout_android', }, 'third_party/zlib': - Var('chromium_url') + '/chromium/src/third_party/zlib.git'+ '@' + '6da1d53b97c89b07e47714d88cab61f1ce003c68', + Var('chromium_url') + '/chromium/src/third_party/zlib.git'+ '@' + 'efd9399ae01364926be2a38946127fdf463480db', 'tools/clang': - Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + '21baac0e13389b03d6f805701c75544ed0b1ebb0', + Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + '336fcfd099995c128bc93e97b8263cc6fc891cc8', 'tools/clang/dsymutil': { 'packages': [ { @@ -293,10 +293,6 @@ deps = { 'package': 'infra/tools/luci/isolate/${{platform}}', 'version': Var('luci_go'), }, - { - 'package': 'infra/tools/luci/isolated/${{platform}}', - 'version': Var('luci_go'), - }, { 'package': 'infra/tools/luci/swarming/${{platform}}', 'version': Var('luci_go'), @@ -339,7 +335,7 @@ hooks = [ 'name': 'disable_depot_tools_selfupdate', 'pattern': '.', 'action': [ - 'python', + 'python3', 'third_party/depot_tools/update_depot_tools_toggle.py', '--disable', ], @@ -351,7 +347,7 @@ hooks = [ 'name': 'landmines', 'pattern': '.', 'action': [ - 'python', + 'python3', 'build/landmines.py', '--landmine-scripts', 'tools/get_landmines.py', @@ -453,28 +449,28 @@ hooks = [ 'name': 'sysroot_arm', 'pattern': '.', 'condition': '(checkout_linux and checkout_arm)', - 'action': ['python', 'build/linux/sysroot_scripts/install-sysroot.py', + 'action': ['python3', 'build/linux/sysroot_scripts/install-sysroot.py', '--arch=arm'], }, { 'name': 'sysroot_arm64', 'pattern': '.', 'condition': '(checkout_linux and checkout_arm64)', - 'action': ['python', 'build/linux/sysroot_scripts/install-sysroot.py', + 'action': ['python3', 'build/linux/sysroot_scripts/install-sysroot.py', '--arch=arm64'], }, { 'name': 'sysroot_x86', 'pattern': '.', 'condition': '(checkout_linux and (checkout_x86 or checkout_x64))', - 'action': ['python', 'build/linux/sysroot_scripts/install-sysroot.py', + 'action': ['python3', 'build/linux/sysroot_scripts/install-sysroot.py', '--arch=x86'], }, { 'name': 'sysroot_x64', 'pattern': '.', 'condition': 'checkout_linux and checkout_x64', - 'action': ['python', 'build/linux/sysroot_scripts/install-sysroot.py', + 'action': ['python3', 'build/linux/sysroot_scripts/install-sysroot.py', '--arch=x64'], }, { @@ -516,14 +512,14 @@ hooks = [ 'name': 'win_toolchain', 'pattern': '.', 'condition': 'checkout_win', - 'action': ['python', 'build/vs_toolchain.py', 'update', '--force'], + 'action': ['python3', 'build/vs_toolchain.py', 'update', '--force'], }, { # Update the Mac toolchain if necessary. 'name': 'mac_toolchain', 'pattern': '.', 'condition': 'checkout_mac', - 'action': ['python', 'build/mac_toolchain.py'], + 'action': ['python3', 'build/mac_toolchain.py'], }, { # Note: On Win, this should run after win_toolchain, as it may use it. @@ -544,7 +540,7 @@ hooks = [ # Update LASTCHANGE. 'name': 'lastchange', 'pattern': '.', - 'action': ['python', 'build/util/lastchange.py', + 'action': ['python3', 'build/util/lastchange.py', '-o', 'build/util/LASTCHANGE'], }, { @@ -552,7 +548,7 @@ hooks = [ 'pattern': '.', 'condition': 'checkout_fuchsia', 'action': [ - 'python', + 'python3', 'build/fuchsia/update_sdk.py', ], }, @@ -561,7 +557,7 @@ hooks = [ 'pattern': '.', 'condition': 'checkout_fuchsia', 'action': [ - 'python', + 'python3', 'build/fuchsia/update_images.py', '--boot-images={checkout_fuchsia_boot_images}', ], @@ -584,12 +580,20 @@ hooks = [ '-vpython-tool', 'install', ], }, + { + 'name': 'vpython3_common', + 'pattern': '.', + 'action': [ 'vpython3', + '-vpython-spec', '.vpython3', + '-vpython-tool', 'install', + ], + }, { 'name': 'check_v8_header_includes', 'pattern': '.', 'condition': 'check_v8_header_includes', 'action': [ - 'python', + 'python3', 'tools/generate-header-include-checks.py', ], }, diff --git a/deps/v8/OWNERS b/deps/v8/OWNERS index 7174da6f15a297..42d7d9981da461 100644 --- a/deps/v8/OWNERS +++ b/deps/v8/OWNERS @@ -2,17 +2,8 @@ # Disagreement among owners should be escalated to eng reviewers. file:ENG_REVIEW_OWNERS -per-file .clang-format=file:INFRA_OWNERS -per-file .clang-tidy=file:INFRA_OWNERS -per-file .editorconfig=file:INFRA_OWNERS -per-file .flake8=file:INFRA_OWNERS -per-file .git-blame-ignore-revs=file:INFRA_OWNERS -per-file .gitattributes=file:INFRA_OWNERS -per-file .gitignore=file:INFRA_OWNERS -per-file .gn=file:INFRA_OWNERS +per-file .*=file:INFRA_OWNERS per-file .mailmap=file:COMMON_OWNERS -per-file .vpython=file:INFRA_OWNERS -per-file .ycm_extra_conf.py=file:INFRA_OWNERS per-file .bazelrc=file:COMMON_OWNERS per-file BUILD.bazel=file:COMMON_OWNERS per-file BUILD.gn=file:COMMON_OWNERS diff --git a/deps/v8/WATCHLISTS b/deps/v8/WATCHLISTS index ad065a9842849a..b1dc86db9d9467 100644 --- a/deps/v8/WATCHLISTS +++ b/deps/v8/WATCHLISTS @@ -60,7 +60,7 @@ 'feature_shipping_status': { 'filepath': 'src/flags/flag-definitions.h', }, - 'gc_changes': { + 'heap_changes': { 'filepath': 'src/heap/', }, 'arm': { @@ -108,7 +108,8 @@ 'filepath': 'src/heap/cppgc/' \ '|src/heap/cppgc-js/' \ '|include/cppgc/' \ - '|test/unittests/heap/', + '|test/unittests/heap/cppgc/' \ + '|test/unittests/heap/cppgc-js/', }, }, @@ -132,8 +133,9 @@ 'feature_shipping_status': [ 'hablich@chromium.org', ], - 'gc_changes': [ + 'heap_changes': [ 'hpayer@chromium.org', + 'mlippautz+watch@chromium.org', ], 'arm': [ 'v8-mips-ports@googlegroups.com', diff --git a/deps/v8/bazel/BUILD.icu b/deps/v8/bazel/BUILD.icu index ea3860ac901b0a..662e11ec03a34a 100644 --- a/deps/v8/bazel/BUILD.icu +++ b/deps/v8/bazel/BUILD.icu @@ -26,14 +26,29 @@ cc_library( "U_ENABLE_RESOURCE_TRACING=0", "UNISTR_FROM_STRING_EXPLICIT=", "UNISTR_FROM_CHAR_EXPLICIT=", - ], - copts = [ - "-Wno-unused-function", - "-Wno-parentheses", - "-Wno-unused-function", - "-Wno-unused-variable", - "-Wno-deprecated-declarations", - ], + ] + select({ + "@platforms//os:windows": [ + "U_STATIC_IMPLEMENTATION", + "UNICODE", + "_UNICODE", + ], + "//conditions:default": [], + }), + copts = select({ + "@platforms//os:windows": [ + "/wd4005", # Macro redefinition. + "/wd4068", # Unknown pragmas. + "/wd4267", # Conversion from size_t on 64-bits. + "/utf-8", # ICU source files are in UTF-8. + ], + "//conditions:default": [ + "-Wno-unused-function", + "-Wno-parentheses", + "-Wno-unused-function", + "-Wno-unused-variable", + "-Wno-deprecated-declarations", + ], + }), includes = [ "source/common", "source/i18n", @@ -49,9 +64,25 @@ cc_library( "source/i18n/**/*.h", "source/i18n/**/*.cpp" ]), + copts = select({ + "@platforms//os:windows": [ + "/wd4005", # Macro redefinition. + "/wd4068", # Unknown pragmas. + "/wd4267", # Conversion from size_t on 64-bits. + "/utf-8", # ICU source files are in UTF-8. + ], + "//conditions:default": [], + }), defines = [ "U_I18N_IMPLEMENTATION", - ], + ] + select({ + "@platforms//os:windows": [ + "U_STATIC_IMPLEMENTATION", + "UNICODE", + "_UNICODE", + ], + "//conditions:default": [], + }), deps = [ ":icuuc" ], alwayslink = 1, ) @@ -65,6 +96,25 @@ cc_library( srcs = [ "source/stubdata/stubdata.cpp", ], + copts = select({ + "@platforms//os:windows": [ + "/wd4005", # Macro redefinition. + "/wd4068", # Unknown pragmas. + "/wd4267", # Conversion from size_t on 64-bits. + "/utf-8", # ICU source files are in UTF-8. + ], + "//conditions:default": [], + }), + defines = [ + "U_I18N_IMPLEMENTATION", + ] + select({ + "@platforms//os:windows": [ + "U_STATIC_IMPLEMENTATION", + "UNICODE", + "_UNICODE", + ], + "//conditions:default": [], + }), include_prefix = "third_party/icu", deps = [ ":icuuc", diff --git a/deps/v8/bazel/BUILD.zlib b/deps/v8/bazel/BUILD.zlib index 0c97b325a34c51..140f761fbb81e3 100644 --- a/deps/v8/bazel/BUILD.zlib +++ b/deps/v8/bazel/BUILD.zlib @@ -45,8 +45,12 @@ cc_library( include_prefix = "third_party/zlib", defines = [ "CHROMIUM_ZLIB_NO_CHROMECONF", - "HAVE_HIDDEN", "CPU_NO_SIMD", - ], + ] + select({ + "@platforms//os:windows": [], + "//conditions:default": [ + "HAVE_HIDDEN", + ], + }), visibility = ["//visibility:public"], ) diff --git a/deps/v8/bazel/config/BUILD.bazel b/deps/v8/bazel/config/BUILD.bazel index 78dcdb14d5ec45..78a1b5debdac77 100644 --- a/deps/v8/bazel/config/BUILD.bazel +++ b/deps/v8/bazel/config/BUILD.bazel @@ -99,6 +99,28 @@ config_setting( constraint_values = ["@platforms//os:macos"], ) +config_setting( + name = "is_windows", + constraint_values = ["@platforms//os:windows"], +) + +selects.config_setting_group( + name = "is_64bit", + match_any = [ + ":v8_target_arm64", + ":is_x64", + ":is_arm64", + ], +) + +selects.config_setting_group( + name = "is_windows_64bit", + match_all = [ + ":is_64bit", + ":is_windows", + ], +) + selects.config_setting_group( name = "is_posix", match_any = [ @@ -107,3 +129,46 @@ selects.config_setting_group( ":is_macos", ], ) + +selects.config_setting_group( + name = "is_posix_x64", + match_all = [ + ":is_posix", + ":is_x64", + ] +) + +selects.config_setting_group( + name = "is_inline_asm_x64", + match_all = [":is_posix", ":is_x64"], +) + +selects.config_setting_group( + name = "is_inline_asm_ia32", + match_all = [":is_posix", ":is_ia32"], +) + +selects.config_setting_group( + name = "is_inline_asm_arm64", + match_all = [":is_posix", ":is_arm64"], +) + +selects.config_setting_group( + name = "is_inline_asm_arm", + match_all = [":is_posix", ":is_arm"], +) + +selects.config_setting_group( + name = "is_msvc_asm_x64", + match_all = [":is_windows", ":is_x64"], +) + +selects.config_setting_group( + name = "is_msvc_asm_ia32", + match_all = [":is_windows", ":is_ia32"], +) + +selects.config_setting_group( + name = "is_msvc_asm_arm64", + match_all = [":is_windows", ":is_arm64"], +) diff --git a/deps/v8/bazel/defs.bzl b/deps/v8/bazel/defs.bzl index 130e7be9eddd65..fc428ba16cd083 100644 --- a/deps/v8/bazel/defs.bzl +++ b/deps/v8/bazel/defs.bzl @@ -88,25 +88,40 @@ v8_config = rule( def _default_args(): return struct( deps = [":define_flags"], - copts = [ - "-fPIC", - "-Werror", - "-Wextra", - "-Wno-bitwise-instead-of-logical", - "-Wno-builtin-assume-aligned-alignment", - "-Wno-unused-parameter", - "-Wno-implicit-int-float-conversion", - "-Wno-deprecated-copy", - "-Wno-non-virtual-dtor", - "-std=c++17", - "-isystem .", - ], + defines = select({ + "@config//:is_windows": [ + "UNICODE", + "_UNICODE", + "_CRT_RAND_S", + "_WIN32_WINNT=0x0602", # Override bazel default to Windows 8 + ], + "//conditions:default": [], + }), + copts = select({ + "@config//:is_posix": [ + "-fPIC", + "-Werror", + "-Wextra", + "-Wno-bitwise-instead-of-logical", + "-Wno-builtin-assume-aligned-alignment", + "-Wno-unused-parameter", + "-Wno-implicit-int-float-conversion", + "-Wno-deprecated-copy", + "-Wno-non-virtual-dtor", + "-std=c++17", + "-isystem .", + ], + "//conditions:default": [], + }), includes = ["include"], - linkopts = [ - "-pthread", - ] + select({ - "@config//:is_macos": [], - "//conditions:default": ["-Wl,--no-as-needed -ldl"], + linkopts = select({ + "@config//:is_windows": [ + "Winmm.lib", + "DbgHelp.lib", + "Advapi32.lib", + ], + "@config//:is_macos": ["-pthread"], + "//conditions:default": ["-Wl,--no-as-needed -ldl -pthread"], }) + select({ ":should_add_rdynamic": ["-rdynamic"], "//conditions:default": [], @@ -184,25 +199,41 @@ def v8_library( default = _default_args() if _should_emit_noicu_and_icu(noicu_srcs, noicu_deps, icu_srcs, icu_deps): native.cc_library( - name = "noicu/" + name, + name = name + "_noicu", srcs = srcs + noicu_srcs, deps = deps + noicu_deps + default.deps, includes = includes + default.includes, copts = copts + default.copts, linkopts = linkopts + default.linkopts, alwayslink = 1, + linkstatic = 1, **kwargs ) + # Alias target used because of cc_library bug in bazel on windows + # https://github.com/bazelbuild/bazel/issues/14237 + # TODO(victorgomes): Remove alias once bug is fixed + native.alias( + name = "noicu/" + name, + actual = name + "_noicu", + ) native.cc_library( - name = "icu/" + name, + name = name + "_icu", srcs = srcs + icu_srcs, deps = deps + icu_deps + default.deps, includes = includes + default.includes, copts = copts + default.copts + ENABLE_I18N_SUPPORT_DEFINES, linkopts = linkopts + default.linkopts, alwayslink = 1, + linkstatic = 1, **kwargs ) + # Alias target used because of cc_library bug in bazel on windows + # https://github.com/bazelbuild/bazel/issues/14237 + # TODO(victorgomes): Remove alias once bug is fixed + native.alias( + name = "icu/" + name, + actual = name + "_icu", + ) else: native.cc_library( name = name, @@ -212,6 +243,7 @@ def v8_library( copts = copts + default.copts, linkopts = linkopts + default.linkopts, alwayslink = 1, + linkstatic = 1, **kwargs ) @@ -297,6 +329,29 @@ def v8_torque(name, noicu_srcs, icu_srcs, args, extras): }), ) +def _v8_target_cpu_transition_impl(settings, attr): + mapping = { + "haswell": "x64", + "k8": "x64", + "x86_64": "x64", + "darwin_x86_64": "x64", + "x86": "ia32", + "ppc": "ppc64", + "arm64-v8a": "arm64", + "arm": "arm64", + "armeabi-v7a": "arm32", + } + v8_target_cpu = mapping[settings["//command_line_option:cpu"]] + return {"@config//:v8_target_cpu": v8_target_cpu} + +# Set the v8_target_cpu to be the correct architecture given the cpu specified +# on the command line. +v8_target_cpu_transition = transition( + implementation = _v8_target_cpu_transition_impl, + inputs = ["//command_line_option:cpu"], + outputs = ["@config//:v8_target_cpu"], +) + def _mksnapshot(ctx): outs = [ ctx.actions.declare_file(ctx.attr.prefix + "/snapshot.cc"), @@ -327,8 +382,12 @@ _v8_mksnapshot = rule( executable = True, cfg = "exec", ), + "_allowlist_function_transition": attr.label( + default = "@bazel_tools//tools/allowlists/function_transition_allowlist", + ), "prefix": attr.string(mandatory = True), }, + cfg = v8_target_cpu_transition, ) def v8_mksnapshot(name, args): diff --git a/deps/v8/bazel/generate-inspector-files.cmd b/deps/v8/bazel/generate-inspector-files.cmd new file mode 100644 index 00000000000000..202dd81d7cf48f --- /dev/null +++ b/deps/v8/bazel/generate-inspector-files.cmd @@ -0,0 +1,24 @@ +REM Copyright 2021 the V8 project authors. All rights reserved. +REM Use of this source code is governed by a BSD-style license that can be +REM found in the LICENSE file. + +set BAZEL_OUT=%1 + +REM Bazel nukes all env vars, and we need the following for gn to work +set DEPOT_TOOLS_WIN_TOOLCHAIN=0 +set ProgramFiles(x86)=C:\Program Files (x86) +set windir=C:\Windows + +REM Create a default GN output folder +cmd.exe /S /E:ON /V:ON /D /c gn gen out/inspector + +REM Generate inspector files +cmd.exe /S /E:ON /V:ON /D /c autoninja -C out/inspector gen/src/inspector/protocol/Forward.h + +REM Create directories in bazel output folder +MKDIR -p %BAZEL_OUT%\include\inspector +MKDIR -p %BAZEL_OUT%\src\inspector\protocol + +REM Copy generated files to bazel output folder +COPY out\inspector\gen\include\inspector\* %BAZEL_OUT%\include\inspector\ +COPY out\inspector\gen\src\inspector\protocol\* %BAZEL_OUT%\src\inspector\protocol\ \ No newline at end of file diff --git a/deps/v8/include/cppgc/garbage-collected.h b/deps/v8/include/cppgc/garbage-collected.h index a3839e1baa59bf..75d127ee9c6414 100644 --- a/deps/v8/include/cppgc/garbage-collected.h +++ b/deps/v8/include/cppgc/garbage-collected.h @@ -5,8 +5,6 @@ #ifndef INCLUDE_CPPGC_GARBAGE_COLLECTED_H_ #define INCLUDE_CPPGC_GARBAGE_COLLECTED_H_ -#include - #include "cppgc/internal/api-constants.h" #include "cppgc/platform.h" #include "cppgc/trace-trait.h" @@ -16,28 +14,6 @@ namespace cppgc { class Visitor; -namespace internal { - -class GarbageCollectedBase { - public: - // Must use MakeGarbageCollected. - void* operator new(size_t) = delete; - void* operator new[](size_t) = delete; - // The garbage collector is taking care of reclaiming the object. Also, - // virtual destructor requires an unambiguous, accessible 'operator delete'. - void operator delete(void*) { -#ifdef V8_ENABLE_CHECKS - internal::Abort(); -#endif // V8_ENABLE_CHECKS - } - void operator delete[](void*) = delete; - - protected: - GarbageCollectedBase() = default; -}; - -} // namespace internal - /** * Base class for managed objects. Only descendent types of `GarbageCollected` * can be constructed using `MakeGarbageCollected()`. Must be inherited from as @@ -74,11 +50,23 @@ class GarbageCollectedBase { * \endcode */ template -class GarbageCollected : public internal::GarbageCollectedBase { +class GarbageCollected { public: using IsGarbageCollectedTypeMarker = void; using ParentMostGarbageCollectedType = T; + // Must use MakeGarbageCollected. + void* operator new(size_t) = delete; + void* operator new[](size_t) = delete; + // The garbage collector is taking care of reclaiming the object. Also, + // virtual destructor requires an unambiguous, accessible 'operator delete'. + void operator delete(void*) { +#ifdef V8_ENABLE_CHECKS + internal::Abort(); +#endif // V8_ENABLE_CHECKS + } + void operator delete[](void*) = delete; + protected: GarbageCollected() = default; }; @@ -101,7 +89,7 @@ class GarbageCollected : public internal::GarbageCollectedBase { * }; * \endcode */ -class GarbageCollectedMixin : public internal::GarbageCollectedBase { +class GarbageCollectedMixin { public: using IsGarbageCollectedMixinTypeMarker = void; diff --git a/deps/v8/include/cppgc/heap-state.h b/deps/v8/include/cppgc/heap-state.h index 3fd6b54a8a2123..28212589f8d714 100644 --- a/deps/v8/include/cppgc/heap-state.h +++ b/deps/v8/include/cppgc/heap-state.h @@ -38,6 +38,18 @@ class V8_EXPORT HeapState final { */ static bool IsSweeping(const HeapHandle& heap_handle); + /* + * Returns whether the garbage collector is currently sweeping on the thread + * owning this heap. This API allows the caller to determine whether it has + * been called from a destructor of a managed object. This API is experimental + * and may be removed in future. + * + * \param heap_handle The corresponding heap. + * \returns true if the garbage collector is currently sweeping on this + * thread, and false otherwise. + */ + static bool IsSweepingOnOwningThread(const HeapHandle& heap_handle); + /** * Returns whether the garbage collector is in the atomic pause, i.e., the * mutator is stopped from running. This API is experimental and is expected diff --git a/deps/v8/include/cppgc/internal/logging.h b/deps/v8/include/cppgc/internal/logging.h index 79beaef7d4f80d..3a279fe0bef839 100644 --- a/deps/v8/include/cppgc/internal/logging.h +++ b/deps/v8/include/cppgc/internal/logging.h @@ -20,18 +20,18 @@ FatalImpl(const char*, const SourceLocation& = SourceLocation::Current()); template struct EatParams {}; -#if DEBUG +#if defined(DEBUG) #define CPPGC_DCHECK_MSG(condition, message) \ do { \ if (V8_UNLIKELY(!(condition))) { \ ::cppgc::internal::DCheckImpl(message); \ } \ } while (false) -#else +#else // !defined(DEBUG) #define CPPGC_DCHECK_MSG(condition, message) \ (static_cast(::cppgc::internal::EatParams(condition), message)>{})) -#endif +#endif // !defined(DEBUG) #define CPPGC_DCHECK(condition) CPPGC_DCHECK_MSG(condition, #condition) diff --git a/deps/v8/include/cppgc/internal/persistent-node.h b/deps/v8/include/cppgc/internal/persistent-node.h index 68a8096cb66c6e..22b4cf093c110f 100644 --- a/deps/v8/include/cppgc/internal/persistent-node.h +++ b/deps/v8/include/cppgc/internal/persistent-node.h @@ -80,23 +80,31 @@ class V8_EXPORT PersistentRegionBase { using PersistentNodeSlots = std::array; public: - explicit PersistentRegionBase(const FatalOutOfMemoryHandler& oom_handler); // Clears Persistent fields to avoid stale pointers after heap teardown. ~PersistentRegionBase(); PersistentRegionBase(const PersistentRegionBase&) = delete; PersistentRegionBase& operator=(const PersistentRegionBase&) = delete; - PersistentNode* AllocateNode(void* owner, TraceCallback trace) { - if (!free_list_head_) { - EnsureNodeSlots(); - CPPGC_DCHECK(free_list_head_); + void Trace(Visitor*); + + size_t NodesInUse() const; + + void ClearAllUsedNodes(); + + protected: + explicit PersistentRegionBase(const FatalOutOfMemoryHandler& oom_handler); + + PersistentNode* TryAllocateNodeFromFreeList(void* owner, + TraceCallback trace) { + PersistentNode* node = nullptr; + if (V8_LIKELY(free_list_head_)) { + node = free_list_head_; + free_list_head_ = free_list_head_->FreeListNext(); + CPPGC_DCHECK(!node->IsUsed()); + node->InitializeAsUsedNode(owner, trace); + nodes_in_use_++; } - PersistentNode* node = free_list_head_; - free_list_head_ = free_list_head_->FreeListNext(); - CPPGC_DCHECK(!node->IsUsed()); - node->InitializeAsUsedNode(owner, trace); - nodes_in_use_++; return node; } @@ -109,18 +117,15 @@ class V8_EXPORT PersistentRegionBase { nodes_in_use_--; } - void Trace(Visitor*); - - size_t NodesInUse() const; - - void ClearAllUsedNodes(); + PersistentNode* RefillFreeListAndAllocateNode(void* owner, + TraceCallback trace); private: - void EnsureNodeSlots(); - template void ClearAllUsedNodes(); + void RefillFreeList(); + std::vector> nodes_; PersistentNode* free_list_head_ = nullptr; size_t nodes_in_use_ = 0; @@ -142,7 +147,12 @@ class V8_EXPORT PersistentRegion final : public PersistentRegionBase { V8_INLINE PersistentNode* AllocateNode(void* owner, TraceCallback trace) { CPPGC_DCHECK(IsCreationThread()); - return PersistentRegionBase::AllocateNode(owner, trace); + auto* node = TryAllocateNodeFromFreeList(owner, trace); + if (V8_LIKELY(node)) return node; + + // Slow path allocation allows for checking thread correspondence. + CPPGC_CHECK(IsCreationThread()); + return RefillFreeListAndAllocateNode(owner, trace); } V8_INLINE void FreeNode(PersistentNode* node) { @@ -181,7 +191,10 @@ class V8_EXPORT CrossThreadPersistentRegion final V8_INLINE PersistentNode* AllocateNode(void* owner, TraceCallback trace) { PersistentRegionLock::AssertLocked(); - return PersistentRegionBase::AllocateNode(owner, trace); + auto* node = TryAllocateNodeFromFreeList(owner, trace); + if (V8_LIKELY(node)) return node; + + return RefillFreeListAndAllocateNode(owner, trace); } V8_INLINE void FreeNode(PersistentNode* node) { diff --git a/deps/v8/include/libplatform/v8-tracing.h b/deps/v8/include/libplatform/v8-tracing.h index c7a5c4f9f5fb5a..12489327c54210 100644 --- a/deps/v8/include/libplatform/v8-tracing.h +++ b/deps/v8/include/libplatform/v8-tracing.h @@ -37,7 +37,6 @@ const int kTraceMaxNumArgs = 2; class V8_PLATFORM_EXPORT TraceObject { public: union ArgValue { - V8_DEPRECATED("use as_uint ? true : false") bool as_bool; uint64_t as_uint; int64_t as_int; double as_double; diff --git a/deps/v8/include/v8-callbacks.h b/deps/v8/include/v8-callbacks.h index 870df6a8211139..b70d59dbeca2a6 100644 --- a/deps/v8/include/v8-callbacks.h +++ b/deps/v8/include/v8-callbacks.h @@ -148,11 +148,13 @@ using JitCodeEventHandler = void (*)(const JitCodeEvent* event); */ enum GCType { kGCTypeScavenge = 1 << 0, - kGCTypeMarkSweepCompact = 1 << 1, - kGCTypeIncrementalMarking = 1 << 2, - kGCTypeProcessWeakCallbacks = 1 << 3, - kGCTypeAll = kGCTypeScavenge | kGCTypeMarkSweepCompact | - kGCTypeIncrementalMarking | kGCTypeProcessWeakCallbacks + kGCTypeMinorMarkCompact = 1 << 1, + kGCTypeMarkSweepCompact = 1 << 2, + kGCTypeIncrementalMarking = 1 << 3, + kGCTypeProcessWeakCallbacks = 1 << 4, + kGCTypeAll = kGCTypeScavenge | kGCTypeMinorMarkCompact | + kGCTypeMarkSweepCompact | kGCTypeIncrementalMarking | + kGCTypeProcessWeakCallbacks }; /** @@ -316,7 +318,7 @@ using SharedArrayBufferConstructorEnabledCallback = bool (*)(Local context); /** - * HostImportModuleDynamicallyWithImportAssertionsCallback is called when we + * HostImportModuleDynamicallyCallback is called when we * require the embedder to load a module. This is used as part of the dynamic * import syntax. * @@ -346,6 +348,10 @@ using HostImportModuleDynamicallyWithImportAssertionsCallback = Local referrer, Local specifier, Local import_assertions); +using HostImportModuleDynamicallyCallback = MaybeLocal (*)( + Local context, Local host_defined_options, + Local resource_name, Local specifier, + Local import_assertions); /** * HostInitializeImportMetaObjectCallback is called the first time import.meta diff --git a/deps/v8/include/v8-cppgc.h b/deps/v8/include/v8-cppgc.h index 64b42c2b48b3af..8ec826a59552b4 100644 --- a/deps/v8/include/v8-cppgc.h +++ b/deps/v8/include/v8-cppgc.h @@ -195,7 +195,7 @@ class V8_EXPORT JSHeapConsistency final { * \returns whether a write barrier is needed and which barrier to invoke. */ template - V8_DEPRECATE_SOON("Write barriers automatically emitted by TracedReference.") + V8_DEPRECATED("Write barriers automatically emitted by TracedReference.") static V8_INLINE WriteBarrierType GetWriteBarrierType(const TracedReferenceBase& ref, WriteBarrierParams& params, @@ -235,9 +235,13 @@ class V8_EXPORT JSHeapConsistency final { * \returns whether a write barrier is needed and which barrier to invoke. */ template - static V8_INLINE WriteBarrierType GetWriteBarrierType( - v8::Local& wrapper, int wrapper_index, const void* wrappable, - WriteBarrierParams& params, HeapHandleCallback callback) { + V8_DEPRECATE_SOON( + "Write barriers automatically emitted when using " + "`SetAlignedPointerInInternalFields()`.") + static V8_INLINE WriteBarrierType + GetWriteBarrierType(v8::Local& wrapper, int wrapper_index, + const void* wrappable, WriteBarrierParams& params, + HeapHandleCallback callback) { #if V8_ENABLE_CHECKS CheckWrapper(wrapper, wrapper_index, wrappable); #endif // V8_ENABLE_CHECKS @@ -253,7 +257,7 @@ class V8_EXPORT JSHeapConsistency final { * \param params The parameters retrieved from `GetWriteBarrierType()`. * \param ref The reference being written to. */ - V8_DEPRECATE_SOON("Write barriers automatically emitted by TracedReference.") + V8_DEPRECATED("Write barriers automatically emitted by TracedReference.") static V8_INLINE void DijkstraMarkingBarrier(const WriteBarrierParams& params, cppgc::HeapHandle& heap_handle, const TracedReferenceBase& ref) { @@ -270,6 +274,9 @@ class V8_EXPORT JSHeapConsistency final { * \param object The pointer to the object. May be an interior pointer to a * an interface of the actual object. */ + V8_DEPRECATE_SOON( + "Write barriers automatically emitted when using " + "`SetAlignedPointerInInternalFields()`.") static V8_INLINE void DijkstraMarkingBarrier(const WriteBarrierParams& params, cppgc::HeapHandle& heap_handle, const void* object) { @@ -283,7 +290,7 @@ class V8_EXPORT JSHeapConsistency final { * \param params The parameters retrieved from `GetWriteBarrierType()`. * \param ref The reference being written to. */ - V8_DEPRECATE_SOON("Write barriers automatically emitted by TracedReference.") + V8_DEPRECATED("Write barriers automatically emitted by TracedReference.") static V8_INLINE void GenerationalBarrier(const WriteBarrierParams& params, const TracedReferenceBase& ref) {} diff --git a/deps/v8/include/v8-data.h b/deps/v8/include/v8-data.h index dbd36c9a035290..cc51fefe105bbe 100644 --- a/deps/v8/include/v8-data.h +++ b/deps/v8/include/v8-data.h @@ -27,6 +27,11 @@ class V8_EXPORT Data { */ bool IsModule() const; + /** + * Returns tru if this data is a |v8::FixedArray| + */ + bool IsFixedArray() const; + /** * Returns true if this data is a |v8::Private|. */ @@ -58,6 +63,16 @@ class V8_EXPORT FixedArray : public Data { public: int Length() const; Local Get(Local context, int i) const; + + V8_INLINE static FixedArray* Cast(Data* data) { +#ifdef V8_ENABLE_CHECKS + CheckCast(data); +#endif + return reinterpret_cast(data); + } + + private: + static void CheckCast(Data* obj); }; } // namespace v8 diff --git a/deps/v8/include/v8-embedder-heap.h b/deps/v8/include/v8-embedder-heap.h index c3e5ddc16c7d5c..81390f1a7c5c3c 100644 --- a/deps/v8/include/v8-embedder-heap.h +++ b/deps/v8/include/v8-embedder-heap.h @@ -210,6 +210,7 @@ class V8_EXPORT EmbedderHeapTracer { * * Should only be used in testing code. */ + V8_DEPRECATE_SOON("Use Isolate::RequestGarbageCollectionForTesting instead") void GarbageCollectionForTesting(EmbedderStackState stack_state); /* diff --git a/deps/v8/include/v8-embedder-state-scope.h b/deps/v8/include/v8-embedder-state-scope.h new file mode 100644 index 00000000000000..6ae9b3b4779bf7 --- /dev/null +++ b/deps/v8/include/v8-embedder-state-scope.h @@ -0,0 +1,48 @@ +// Copyright 2021 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef INCLUDE_V8_EMBEDDER_STATE_SCOPE_H_ +#define INCLUDE_V8_EMBEDDER_STATE_SCOPE_H_ + +#include + +#include "v8-context.h" // NOLINT(build/include_directory) +#include "v8-internal.h" // NOLINT(build/include_directory) +#include "v8-local-handle.h" // NOLINT(build/include_directory) + +namespace v8 { + +namespace internal { +class EmbedderState; +} // namespace internal + +// A StateTag represents a possible state of the embedder. +enum class EmbedderStateTag : uint8_t { + EMPTY = 0, + // embedder can define any state in between + OTHER = UINT8_MAX, +}; + +// A stack-allocated class that manages an embedder state on the isolate. +// After an EmbedderState scope has been created, a new embedder state will be +// pushed on the isolate stack. +class V8_EXPORT EmbedderStateScope { + public: + EmbedderStateScope(Isolate* isolate, Local context, + EmbedderStateTag tag); + + private: + // Declaring operator new and delete as deleted is not spec compliant. + // Therefore declare them private instead to disable dynamic alloc + void* operator new(size_t size); + void* operator new[](size_t size); + void operator delete(void*, size_t); + void operator delete[](void*, size_t); + + std::unique_ptr embedder_state_; +}; + +} // namespace v8 + +#endif // INCLUDE_V8_EMBEDDER_STATE_SCOPE_H_ diff --git a/deps/v8/include/v8-fast-api-calls.h b/deps/v8/include/v8-fast-api-calls.h index a6c1b27353168b..a2c867217dbb1a 100644 --- a/deps/v8/include/v8-fast-api-calls.h +++ b/deps/v8/include/v8-fast-api-calls.h @@ -249,6 +249,15 @@ class CTypeInfo { kV8Value, kApiObject, // This will be deprecated once all users have // migrated from v8::ApiObject to v8::Local. + kAny, // This is added to enable untyped representation of fast + // call arguments for test purposes. It can represent any of + // the other types stored in the same memory as a union (see + // the AnyCType struct declared below). This allows for + // uniform passing of arguments w.r.t. their location + // (in a register or on the stack), independent of their + // actual type. It's currently used by the arm64 simulator + // and can be added to the other simulators as well when fast + // calls having both GP and FP params need to be supported. }; // kCallbackOptionsType is not part of the Type enum @@ -404,6 +413,37 @@ class V8_EXPORT CFunctionInfo { const CTypeInfo* arg_info_; }; +struct FastApiCallbackOptions; + +// Provided for testing. +struct AnyCType { + AnyCType() : int64_value(0) {} + + union { + bool bool_value; + int32_t int32_value; + uint32_t uint32_value; + int64_t int64_value; + uint64_t uint64_value; + float float_value; + double double_value; + Local object_value; + Local sequence_value; + const FastApiTypedArray* int32_ta_value; + const FastApiTypedArray* uint32_ta_value; + const FastApiTypedArray* int64_ta_value; + const FastApiTypedArray* uint64_ta_value; + const FastApiTypedArray* float_ta_value; + const FastApiTypedArray* double_ta_value; + FastApiCallbackOptions* options_value; + }; +}; + +static_assert( + sizeof(AnyCType) == 8, + "The AnyCType struct should have size == 64 bits, as this is assumed " + "by EffectControlLinearizer."); + class V8_EXPORT CFunction { public: constexpr CFunction() : address_(nullptr), type_info_(nullptr) {} @@ -460,6 +500,19 @@ class V8_EXPORT CFunction { return ArgUnwrap::Make(func); } + // Provided for testing purposes. + template + static CFunction Make(R (*func)(Args...), + R_Patch (*patching_func)(Args_Patch...)) { + CFunction c_func = ArgUnwrap::Make(func); + static_assert( + sizeof...(Args_Patch) == sizeof...(Args), + "The patching function must have the same number of arguments."); + c_func.address_ = reinterpret_cast(patching_func); + return c_func; + } + CFunction(const void* address, const CFunctionInfo* type_info); private: @@ -479,7 +532,7 @@ class V8_EXPORT CFunction { }; }; -struct ApiObject { +struct V8_DEPRECATED("Use v8::Local instead.") ApiObject { uintptr_t address; }; @@ -555,7 +608,8 @@ class CFunctionInfoImpl : public CFunctionInfo { kReturnType == CTypeInfo::Type::kInt32 || kReturnType == CTypeInfo::Type::kUint32 || kReturnType == CTypeInfo::Type::kFloat32 || - kReturnType == CTypeInfo::Type::kFloat64, + kReturnType == CTypeInfo::Type::kFloat64 || + kReturnType == CTypeInfo::Type::kAny, "64-bit int and api object values are not currently " "supported return types."); } @@ -606,7 +660,8 @@ struct CTypeInfoTraits {}; V(void, kVoid) \ V(v8::Local, kV8Value) \ V(v8::Local, kV8Value) \ - V(ApiObject, kApiObject) + V(ApiObject, kApiObject) \ + V(AnyCType, kAny) // ApiObject was a temporary solution to wrap the pointer to the v8::Value. // Please use v8::Local in new code for the arguments and @@ -832,14 +887,14 @@ static constexpr CTypeInfo kTypeInfoFloat64 = * returns true on success. `type_info` will be used for conversions. */ template -V8_DEPRECATE_SOON( +V8_DEPRECATED( "Use TryToCopyAndConvertArrayToCppBuffer()") bool V8_EXPORT V8_WARN_UNUSED_RESULT TryCopyAndConvertArrayToCppBuffer(Local src, T* dst, uint32_t max_length); template <> -V8_DEPRECATE_SOON( +V8_DEPRECATED( "Use TryToCopyAndConvertArrayToCppBuffer()") inline bool V8_WARN_UNUSED_RESULT TryCopyAndConvertArrayToCppBuffer<&kTypeInfoInt32, int32_t>( @@ -848,7 +903,7 @@ inline bool V8_WARN_UNUSED_RESULT } template <> -V8_DEPRECATE_SOON( +V8_DEPRECATED( "Use TryToCopyAndConvertArrayToCppBuffer()") inline bool V8_WARN_UNUSED_RESULT TryCopyAndConvertArrayToCppBuffer<&kTypeInfoFloat64, double>( diff --git a/deps/v8/include/v8-initialization.h b/deps/v8/include/v8-initialization.h index 822d150371c698..7a2ae9316a8b0a 100644 --- a/deps/v8/include/v8-initialization.h +++ b/deps/v8/include/v8-initialization.h @@ -180,7 +180,9 @@ class V8_EXPORT V8 { * Clears all references to the v8::Platform. This should be invoked after * V8 was disposed. */ - static void ShutdownPlatform(); + static void DisposePlatform(); + V8_DEPRECATE_SOON("Use DisposePlatform()") + static void ShutdownPlatform() { DisposePlatform(); } #ifdef V8_VIRTUAL_MEMORY_CAGE // diff --git a/deps/v8/include/v8-inspector.h b/deps/v8/include/v8-inspector.h index 8ba21ffd84a560..2a258d505ac3bc 100644 --- a/deps/v8/include/v8-inspector.h +++ b/deps/v8/include/v8-inspector.h @@ -114,14 +114,10 @@ class V8_EXPORT V8StackTrace { virtual int topLineNumber() const = 0; virtual int topColumnNumber() const = 0; virtual int topScriptId() const = 0; - V8_DEPRECATED("Use V8::StackTrace::topScriptId() instead.") - int topScriptIdAsInteger() const { return topScriptId(); } virtual StringView topFunctionName() const = 0; virtual ~V8StackTrace() = default; virtual std::unique_ptr - buildInspectorObject() const = 0; - virtual std::unique_ptr buildInspectorObject(int maxAsyncDepth) const = 0; virtual std::unique_ptr toString() const = 0; diff --git a/deps/v8/include/v8-internal.h b/deps/v8/include/v8-internal.h index f0531bcff6ebde..f49b54557c628a 100644 --- a/deps/v8/include/v8-internal.h +++ b/deps/v8/include/v8-internal.h @@ -218,7 +218,7 @@ class Internals { static const int kEmbedderDataSlotRawPayloadOffset = kApiTaggedSize; #endif static const int kNativeContextEmbedderDataOffset = 6 * kApiTaggedSize; - static const int kFullStringRepresentationMask = 0x0f; + static const int kStringRepresentationAndEncodingMask = 0x0f; static const int kStringEncodingMask = 0x8; static const int kExternalTwoByteRepresentationTag = 0x02; static const int kExternalOneByteRepresentationTag = 0x0a; @@ -268,9 +268,9 @@ class Internals { static const int kNodeStateIsWeakValue = 2; static const int kNodeStateIsPendingValue = 3; - static const int kFirstNonstringType = 0x40; - static const int kOddballType = 0x43; - static const int kForeignType = 0x46; + static const int kFirstNonstringType = 0x80; + static const int kOddballType = 0x83; + static const int kForeignType = 0xcc; static const int kJSSpecialApiObjectType = 0x410; static const int kJSObjectType = 0x421; static const int kFirstJSApiObjectType = 0x422; @@ -337,7 +337,7 @@ class Internals { } V8_INLINE static bool IsExternalTwoByteString(int instance_type) { - int representation = (instance_type & kFullStringRepresentationMask); + int representation = (instance_type & kStringRepresentationAndEncodingMask); return representation == kExternalTwoByteRepresentationTag; } @@ -494,6 +494,11 @@ constexpr bool VirtualMemoryCageIsEnabled() { #endif } +// CagedPointers are guaranteed to point into the virtual memory cage. This is +// achieved for example by storing them as offset from the cage base rather +// than as raw pointers. +using CagedPointer_t = Address; + #ifdef V8_VIRTUAL_MEMORY_CAGE_IS_AVAILABLE #define GB (1ULL << 30) @@ -511,17 +516,11 @@ constexpr size_t kVirtualMemoryCageSize = 1ULL << kVirtualMemoryCageSizeLog2; constexpr size_t kVirtualMemoryCageAlignment = Internals::kPtrComprCageBaseAlignment; -#ifdef V8_CAGED_POINTERS -// CagedPointers are guaranteed to point into the virtual memory cage. This is -// achieved by storing them as offset from the cage base rather than as raw -// pointers. -using CagedPointer_t = Address; - -// For efficiency, the offset is stored shifted to the left, so that -// it is guaranteed that the offset is smaller than the cage size after -// shifting it to the right again. This constant specifies the shift amount. +// Caged pointers are stored inside the heap as offset from the cage base +// shifted to the left. This way, it is guaranteed that the offset is smaller +// than the cage size after shifting it to the right again. This constant +// specifies the shift amount. constexpr uint64_t kCagedPointerShift = 64 - kVirtualMemoryCageSizeLog2; -#endif // Size of the guard regions surrounding the virtual memory cage. This assumes a // worst-case scenario of a 32-bit unsigned index being used to access an array diff --git a/deps/v8/include/v8-isolate.h b/deps/v8/include/v8-isolate.h index 32b53f1b423557..2fc7daf40ba946 100644 --- a/deps/v8/include/v8-isolate.h +++ b/deps/v8/include/v8-isolate.h @@ -628,8 +628,11 @@ class V8_EXPORT Isolate { * This specifies the callback called by the upcoming dynamic * import() language feature to load modules. */ + V8_DEPRECATE_SOON("Use HostImportModuleDynamicallyCallback") void SetHostImportModuleDynamicallyCallback( HostImportModuleDynamicallyWithImportAssertionsCallback callback); + void SetHostImportModuleDynamicallyCallback( + HostImportModuleDynamicallyCallback callback); /** * This specifies the callback called by the upcoming import.meta @@ -911,11 +914,13 @@ class V8_EXPORT Isolate { /** * Sets the embedder heap tracer for the isolate. + * SetEmbedderHeapTracer cannot be used simultaneously with AttachCppHeap. */ void SetEmbedderHeapTracer(EmbedderHeapTracer* tracer); /* - * Gets the currently active heap tracer for the isolate. + * Gets the currently active heap tracer for the isolate that was set with + * SetEmbedderHeapTracer. */ EmbedderHeapTracer* GetEmbedderHeapTracer(); @@ -935,6 +940,7 @@ class V8_EXPORT Isolate { * Attaches a managed C++ heap as an extension to the JavaScript heap. The * embedder maintains ownership of the CppHeap. At most one C++ heap can be * attached to V8. + * AttachCppHeap cannot be used simultaneously with SetEmbedderHeapTracer. * * This is an experimental feature and may still change significantly. */ @@ -1131,6 +1137,21 @@ class V8_EXPORT Isolate { */ void RequestGarbageCollectionForTesting(GarbageCollectionType type); + /** + * Request garbage collection with a specific embedderstack state in this + * Isolate. It is only valid to call this function if --expose_gc was + * specified. + * + * This should only be used for testing purposes and not to enforce a garbage + * collection schedule. It has strong negative impact on the garbage + * collection performance. Use IdleNotificationDeadline() or + * LowMemoryNotification() instead to influence the garbage collection + * schedule. + */ + void RequestGarbageCollectionForTesting( + GarbageCollectionType type, + EmbedderHeapTracer::EmbedderStackState stack_state); + /** * Set the callback to invoke for logging event. */ diff --git a/deps/v8/include/v8-locker.h b/deps/v8/include/v8-locker.h index 360022b7d9932c..88ce4beb6219fb 100644 --- a/deps/v8/include/v8-locker.h +++ b/deps/v8/include/v8-locker.h @@ -128,7 +128,7 @@ class V8_EXPORT Locker { * results if anybody uses v8::Locker in the current process. */ static bool WasEverUsed(); - V8_DEPRECATE_SOON("Use WasEverUsed instead") + V8_DEPRECATED("Use WasEverUsed instead") static bool IsActive(); // Disallow copying and assigning. diff --git a/deps/v8/include/v8-message.h b/deps/v8/include/v8-message.h index 8f09619cba5be3..a17eb7026dd4f5 100644 --- a/deps/v8/include/v8-message.h +++ b/deps/v8/include/v8-message.h @@ -11,6 +11,7 @@ #include "v8-local-handle.h" // NOLINT(build/include_directory) #include "v8-maybe.h" // NOLINT(build/include_directory) +#include "v8-primitive.h" // NOLINT(build/include_directory) #include "v8config.h" // NOLINT(build/include_directory) namespace v8 { @@ -60,9 +61,7 @@ class ScriptOriginOptions { */ class V8_EXPORT ScriptOrigin { public: - #if defined(_MSC_VER) && _MSC_VER >= 1910 /* Disable on VS2015 */ V8_DEPRECATED("Use constructor with primitive C++ types") - #endif ScriptOrigin( Local resource_name, Local resource_line_offset, Local resource_column_offset, @@ -72,26 +71,26 @@ class V8_EXPORT ScriptOrigin { Local resource_is_opaque = Local(), Local is_wasm = Local(), Local is_module = Local(), - Local host_defined_options = Local()); - #if defined(_MSC_VER) && _MSC_VER >= 1910 /* Disable on VS2015 */ + Local host_defined_options = Local()); V8_DEPRECATED("Use constructor that takes an isolate") - #endif - explicit ScriptOrigin( - Local resource_name, int resource_line_offset = 0, - int resource_column_offset = 0, - bool resource_is_shared_cross_origin = false, int script_id = -1, - Local source_map_url = Local(), - bool resource_is_opaque = false, bool is_wasm = false, - bool is_module = false, - Local host_defined_options = Local()); - V8_INLINE ScriptOrigin( - Isolate* isolate, Local resource_name, - int resource_line_offset = 0, int resource_column_offset = 0, - bool resource_is_shared_cross_origin = false, int script_id = -1, - Local source_map_url = Local(), - bool resource_is_opaque = false, bool is_wasm = false, - bool is_module = false, - Local host_defined_options = Local()) + explicit ScriptOrigin(Local resource_name, + int resource_line_offset = 0, + int resource_column_offset = 0, + bool resource_is_shared_cross_origin = false, + int script_id = -1, + Local source_map_url = Local(), + bool resource_is_opaque = false, bool is_wasm = false, + bool is_module = false, + Local host_defined_options = Local()); + V8_INLINE ScriptOrigin(Isolate* isolate, Local resource_name, + int resource_line_offset = 0, + int resource_column_offset = 0, + bool resource_is_shared_cross_origin = false, + int script_id = -1, + Local source_map_url = Local(), + bool resource_is_opaque = false, bool is_wasm = false, + bool is_module = false, + Local host_defined_options = Local()) : isolate_(isolate), resource_name_(resource_name), resource_line_offset_(resource_line_offset), @@ -100,7 +99,9 @@ class V8_EXPORT ScriptOrigin { is_module), script_id_(script_id), source_map_url_(source_map_url), - host_defined_options_(host_defined_options) {} + host_defined_options_(host_defined_options) { + VerifyHostDefinedOptions(); + } V8_INLINE Local ResourceName() const; V8_DEPRECATED("Use getter with primitive C++ types.") @@ -113,10 +114,13 @@ class V8_EXPORT ScriptOrigin { V8_INLINE int ColumnOffset() const; V8_INLINE int ScriptId() const; V8_INLINE Local SourceMapUrl() const; - V8_INLINE Local HostDefinedOptions() const; + V8_DEPRECATE_SOON("Use GetHostDefinedOptions") + Local HostDefinedOptions() const; + V8_INLINE Local GetHostDefinedOptions() const; V8_INLINE ScriptOriginOptions Options() const { return options_; } private: + void VerifyHostDefinedOptions() const; Isolate* isolate_; Local resource_name_; int resource_line_offset_; @@ -124,7 +128,7 @@ class V8_EXPORT ScriptOrigin { ScriptOriginOptions options_; int script_id_; Local source_map_url_; - Local host_defined_options_; + Local host_defined_options_; }; /** @@ -212,7 +216,7 @@ class V8_EXPORT Message { bool IsSharedCrossOrigin() const; bool IsOpaque() const; - V8_DEPRECATE_SOON("Use the version that takes a std::ostream&.") + V8_DEPRECATED("Use the version that takes a std::ostream&.") static void PrintCurrentStackTrace(Isolate* isolate, FILE* out); static void PrintCurrentStackTrace(Isolate* isolate, std::ostream& out); @@ -224,7 +228,19 @@ class V8_EXPORT Message { Local ScriptOrigin::ResourceName() const { return resource_name_; } -Local ScriptOrigin::HostDefinedOptions() const { +Local ScriptOrigin::ResourceLineOffset() const { + return v8::Integer::New(isolate_, resource_line_offset_); +} + +Local ScriptOrigin::ResourceColumnOffset() const { + return v8::Integer::New(isolate_, resource_column_offset_); +} + +Local ScriptOrigin::ScriptID() const { + return v8::Integer::New(isolate_, script_id_); +} + +Local ScriptOrigin::GetHostDefinedOptions() const { return host_defined_options_; } diff --git a/deps/v8/include/v8-object.h b/deps/v8/include/v8-object.h index 6716162df10d75..e047c413ac2f70 100644 --- a/deps/v8/include/v8-object.h +++ b/deps/v8/include/v8-object.h @@ -604,7 +604,7 @@ class V8_EXPORT Object : public Value { Local GetCreationContextChecked(); /** Same as above, but works for Persistents */ - V8_DEPRECATE_SOON( + V8_DEPRECATED( "Use MaybeLocal GetCreationContext(const " "PersistentBase& object)") static Local CreationContext(const PersistentBase& object); diff --git a/deps/v8/include/v8-platform.h b/deps/v8/include/v8-platform.h index 234582f0f6aff2..9e226331f830ed 100644 --- a/deps/v8/include/v8-platform.h +++ b/deps/v8/include/v8-platform.h @@ -510,6 +510,213 @@ class PageAllocator { virtual bool CanAllocateSharedPages() { return false; } }; +/** + * Page permissions. + */ +enum class PagePermissions { + kNoAccess, + kRead, + kReadWrite, + kReadWriteExecute, + kReadExecute, +}; + +/** + * Class to manage a virtual memory address space. + * + * This class represents a contiguous region of virtual address space in which + * sub-spaces and (private or shared) memory pages can be allocated, freed, and + * modified. This interface is meant to eventually replace the PageAllocator + * interface, and can be used as an alternative in the meantime. + */ +class VirtualAddressSpace { + public: + using Address = uintptr_t; + + VirtualAddressSpace(size_t page_size, size_t allocation_granularity, + Address base, size_t size) + : page_size_(page_size), + allocation_granularity_(allocation_granularity), + base_(base), + size_(size) {} + + virtual ~VirtualAddressSpace() = default; + + /** + * The page size used inside this space. Guaranteed to be a power of two. + * Used as granularity for all page-related operations except for allocation, + * which use the allocation_granularity(), see below. + * + * \returns the page size in bytes. + */ + size_t page_size() const { return page_size_; } + + /** + * The granularity of page allocations and, by extension, of subspace + * allocations. This is guaranteed to be a power of two and a multiple of the + * page_size(). In practice, this is equal to the page size on most OSes, but + * on Windows it is usually 64KB, while the page size is 4KB. + * + * \returns the allocation granularity in bytes. + */ + size_t allocation_granularity() const { return allocation_granularity_; } + + /** + * The base address of the address space managed by this instance. + * + * \returns the base address of this address space. + */ + Address base() const { return base_; } + + /** + * The size of the address space managed by this instance. + * + * \returns the size of this address space in bytes. + */ + size_t size() const { return size_; } + + /** + * Sets the random seed so that GetRandomPageAddress() will generate + * repeatable sequences of random addresses. + * + * \param The seed for the PRNG. + */ + virtual void SetRandomSeed(int64_t seed) = 0; + + /** + * Returns a random address inside this address space, suitable for page + * allocations hints. + * + * \returns a random address aligned to allocation_granularity(). + */ + virtual Address RandomPageAddress() = 0; + + /** + * Allocates private memory pages with the given alignment and permissions. + * + * \param hint If nonzero, the allocation is attempted to be placed at the + * given address first. If that fails, the allocation is attempted to be + * placed elsewhere, possibly nearby, but that is not guaranteed. Specifying + * zero for the hint always causes this function to choose a random address. + * + * \param size The size of the allocation in bytes. Must be a multiple of the + * allocation_granularity(). + * + * \param alignment The alignment of the allocation in bytes. Must be a + * multiple of the allocation_granularity() and should be a power of two. + * + * \param permissions The page permissions of the newly allocated pages. + * + * \returns the start address of the allocated pages on success, zero on + * failure. + */ + static constexpr Address kNoHint = 0; + virtual V8_WARN_UNUSED_RESULT Address + AllocatePages(Address hint, size_t size, size_t alignment, + PagePermissions permissions) = 0; + + /** + * Frees previously allocated pages. + * + * \param address The start address of the pages to free. This address must + * have been obtains from a call to AllocatePages. + * + * \param size The size in bytes of the region to free. This must match the + * size passed to AllocatePages when the pages were allocated. + * + * \returns true on success, false otherwise. + */ + virtual V8_WARN_UNUSED_RESULT bool FreePages(Address address, + size_t size) = 0; + + /** + * Sets permissions of all allocated pages in the given range. + * + * \param address The start address of the range. Must be aligned to + * page_size(). + * + * \param size The size in bytes of the range. Must be a multiple + * of page_size(). + * + * \param permissions The new permissions for the range. + * + * \returns true on success, false otherwise. + */ + virtual V8_WARN_UNUSED_RESULT bool SetPagePermissions( + Address address, size_t size, PagePermissions permissions) = 0; + + /** + * Whether this instance can allocate subspaces or not. + * + * \returns true if subspaces can be allocated, false if not. + */ + virtual bool CanAllocateSubspaces() = 0; + + /* + * Allocate a subspace. + * + * The address space of a subspace stays reserved in the parent space for the + * lifetime of the subspace. As such, it is guaranteed that page allocations + * on the parent space cannot end up inside a subspace. + * + * \param hint Hints where the subspace should be allocated. See + * AllocatePages() for more details. + * + * \param size The size in bytes of the subspace. Must be a multiple of the + * allocation_granularity(). + * + * \param alignment The alignment of the subspace in bytes. Must be a multiple + * of the allocation_granularity() and should be a power of two. + * + * \param max_permissions The maximum permissions that pages allocated in the + * subspace can obtain. + * + * \returns a new subspace or nullptr on failure. + */ + virtual std::unique_ptr AllocateSubspace( + Address hint, size_t size, size_t alignment, + PagePermissions max_permissions) = 0; + + // + // TODO(v8) maybe refactor the methods below before stabilizing the API. For + // example by combining them into some form of page operation method that + // takes a command enum as parameter. + // + + /** + * Frees memory in the given [address, address + size) range. address and + * size should be aligned to the page_size(). The next write to this memory + * area brings the memory transparently back. This should be treated as a + * hint to the OS that the pages are no longer needed. It does not guarantee + * that the pages will be discarded immediately or at all. + * + * \returns true on success, false otherwise. Since this method is only a + * hint, a successful invocation does not imply that pages have been removed. + */ + virtual V8_WARN_UNUSED_RESULT bool DiscardSystemPages(Address address, + size_t size) { + return true; + } + /** + * Decommits any wired memory pages in the given range, allowing the OS to + * reclaim them, and marks the region as inacessible (kNoAccess). The address + * range stays reserved and can be accessed again later by changing its + * permissions. However, in that case the memory content is guaranteed to be + * zero-initialized again. The memory must have been previously allocated by a + * call to AllocatePages. + * + * \returns true on success, false otherwise. + */ + virtual V8_WARN_UNUSED_RESULT bool DecommitPages(Address address, + size_t size) = 0; + + private: + const size_t page_size_; + const size_t allocation_granularity_; + const Address base_; + const size_t size_; +}; + /** * V8 Allocator used for allocating zone backings. */ @@ -522,6 +729,16 @@ class ZoneBackingAllocator { virtual FreeFn GetFreeFn() const { return ::free; } }; +/** + * Observer used by V8 to notify the embedder about entering/leaving sections + * with high throughput of malloc/free operations. + */ +class HighAllocationThroughputObserver { + public: + virtual void EnterSection() {} + virtual void LeaveSection() {} +}; + /** * V8 Platform abstraction layer. * @@ -713,6 +930,16 @@ class Platform { */ virtual void DumpWithoutCrashing() {} + /** + * Allows the embedder to observe sections with high throughput allocation + * operations. + */ + virtual HighAllocationThroughputObserver* + GetHighAllocationThroughputObserver() { + static HighAllocationThroughputObserver default_observer; + return &default_observer; + } + protected: /** * Default implementation of current wall-clock time in milliseconds diff --git a/deps/v8/include/v8-primitive.h b/deps/v8/include/v8-primitive.h index 8a95c151bd1f62..11c01876c723cb 100644 --- a/deps/v8/include/v8-primitive.h +++ b/deps/v8/include/v8-primitive.h @@ -54,12 +54,22 @@ class V8_EXPORT Boolean : public Primitive { * This is passed back to the embedder as part of * HostImportModuleDynamicallyCallback for module loading. */ -class V8_EXPORT PrimitiveArray { +class V8_EXPORT PrimitiveArray : public Data { public: static Local New(Isolate* isolate, int length); int Length() const; void Set(Isolate* isolate, int index, Local item); Local Get(Isolate* isolate, int index); + + V8_INLINE static PrimitiveArray* Cast(Data* data) { +#ifdef V8_ENABLE_CHECKS + CheckCast(data); +#endif + return reinterpret_cast(data); + } + + private: + static void CheckCast(Data* obj); }; /** @@ -796,7 +806,7 @@ String::ExternalStringResourceBase* String::GetExternalStringResourceBase( using A = internal::Address; using I = internal::Internals; A obj = *reinterpret_cast(this); - int type = I::GetInstanceType(obj) & I::kFullStringRepresentationMask; + int type = I::GetInstanceType(obj) & I::kStringRepresentationAndEncodingMask; *encoding_out = static_cast(type & I::kStringEncodingMask); ExternalStringResourceBase* resource; if (type == I::kExternalOneByteRepresentationTag || diff --git a/deps/v8/include/v8-profiler.h b/deps/v8/include/v8-profiler.h index ccf15bab2a0cdd..c9a2704f7bde72 100644 --- a/deps/v8/include/v8-profiler.h +++ b/deps/v8/include/v8-profiler.h @@ -20,9 +20,11 @@ */ namespace v8 { +enum class EmbedderStateTag : uint8_t; class HeapGraphNode; struct HeapStatsUpdate; class Object; +enum StateTag : int; using NativeObject = void*; using SnapshotObjectId = uint32_t; @@ -210,6 +212,16 @@ class V8_EXPORT CpuProfile { */ int64_t GetStartTime() const; + /** + * Returns state of the vm when sample was captured. + */ + StateTag GetSampleState(int index) const; + + /** + * Returns state of the embedder when sample was captured. + */ + EmbedderStateTag GetSampleEmbedderState(int index) const; + /** * Returns time when the profile recording was stopped (in microseconds) * since some unspecified starting point. diff --git a/deps/v8/include/v8-script.h b/deps/v8/include/v8-script.h index 356b99358be339..d4b11c147d9b58 100644 --- a/deps/v8/include/v8-script.h +++ b/deps/v8/include/v8-script.h @@ -47,7 +47,9 @@ class V8_EXPORT ScriptOrModule { * The options that were passed by the embedder as HostDefinedOptions to * the ScriptOrigin. */ + V8_DEPRECATE_SOON("Use HostDefinedOptions") Local GetHostDefinedOptions(); + Local HostDefinedOptions(); }; /** @@ -209,7 +211,7 @@ class V8_EXPORT Module : public Data { */ int GetIdentityHash() const; - using ResolveCallback = + using ResolveCallback V8_DEPRECATED("Use ResolveModuleCallback") = MaybeLocal (*)(Local context, Local specifier, Local referrer); using ResolveModuleCallback = MaybeLocal (*)( @@ -340,6 +342,8 @@ class V8_EXPORT Script { * UnboundScript::BindToCurrentContext()). */ V8_WARN_UNUSED_RESULT MaybeLocal Run(Local context); + V8_WARN_UNUSED_RESULT MaybeLocal Run(Local context, + Local host_defined_options); /** * Returns the corresponding context-unbound script. @@ -430,7 +434,7 @@ class V8_EXPORT ScriptCompiler { int resource_column_offset; ScriptOriginOptions resource_options; Local source_map_url; - Local host_defined_options; + Local host_defined_options; // Cached data from previous compilation (if a kConsume*Cache flag is // set), or hold newly generated cache data (kProduce*Cache flags) are @@ -688,6 +692,7 @@ class V8_EXPORT ScriptCompiler { * It is possible to specify multiple context extensions (obj in the above * example). */ + V8_DEPRECATE_SOON("Use CompileFunction") static V8_WARN_UNUSED_RESULT MaybeLocal CompileFunctionInContext( Local context, Source* source, size_t arguments_count, Local arguments[], size_t context_extension_count, @@ -747,7 +752,7 @@ ScriptCompiler::Source::Source(Local string, const ScriptOrigin& origin, resource_column_offset(origin.ColumnOffset()), resource_options(origin.Options()), source_map_url(origin.SourceMapUrl()), - host_defined_options(origin.HostDefinedOptions()), + host_defined_options(origin.GetHostDefinedOptions()), cached_data(data), consume_cache_task(consume_cache_task) {} diff --git a/deps/v8/include/v8-statistics.h b/deps/v8/include/v8-statistics.h index 7f69e5d65efb0c..ca20bc9f6c4161 100644 --- a/deps/v8/include/v8-statistics.h +++ b/deps/v8/include/v8-statistics.h @@ -201,11 +201,13 @@ class V8_EXPORT HeapCodeStatistics { size_t code_and_metadata_size() { return code_and_metadata_size_; } size_t bytecode_and_metadata_size() { return bytecode_and_metadata_size_; } size_t external_script_source_size() { return external_script_source_size_; } + size_t cpu_profiler_metadata_size() { return cpu_profiler_metadata_size_; } private: size_t code_and_metadata_size_; size_t bytecode_and_metadata_size_; size_t external_script_source_size_; + size_t cpu_profiler_metadata_size_; friend class Isolate; }; diff --git a/deps/v8/include/v8-unwinder.h b/deps/v8/include/v8-unwinder.h index 22a5cd713d4f91..8dca52f41c817a 100644 --- a/deps/v8/include/v8-unwinder.h +++ b/deps/v8/include/v8-unwinder.h @@ -7,7 +7,8 @@ #include -#include "v8config.h" // NOLINT(build/include_directory) +#include "v8-embedder-state-scope.h" // NOLINT(build/include_directory) +#include "v8config.h" // NOLINT(build/include_directory) namespace v8 { // Holds the callee saved registers needed for the stack unwinder. It is the @@ -32,7 +33,7 @@ struct V8_EXPORT RegisterState { }; // A StateTag represents a possible state of the VM. -enum StateTag { +enum StateTag : int { JS, GC, PARSER, @@ -46,11 +47,13 @@ enum StateTag { // The output structure filled up by GetStackSample API function. struct SampleInfo { - size_t frames_count; // Number of frames collected. - StateTag vm_state; // Current VM state. - void* external_callback_entry; // External callback address if VM is - // executing an external callback. - void* context; // Incumbent native context address. + size_t frames_count; // Number of frames collected. + void* external_callback_entry; // External callback address if VM is + // executing an external callback. + void* context; // Incumbent native context address. + void* embedder_context; // Native context address for embedder state + StateTag vm_state; // Current VM state. + EmbedderStateTag embedder_state; // Current Embedder state }; struct MemoryRange { diff --git a/deps/v8/include/v8-version.h b/deps/v8/include/v8-version.h index 24da2489f7f3f3..1b2795a877f826 100644 --- a/deps/v8/include/v8-version.h +++ b/deps/v8/include/v8-version.h @@ -9,9 +9,9 @@ // NOTE these macros are used by some of the tool scripts and the build // system so their names cannot be changed without changing the scripts. #define V8_MAJOR_VERSION 9 -#define V8_MINOR_VERSION 7 -#define V8_BUILD_NUMBER 106 -#define V8_PATCH_LEVEL 18 +#define V8_MINOR_VERSION 8 +#define V8_BUILD_NUMBER 177 +#define V8_PATCH_LEVEL 9 // Use 1 for candidates and 0 otherwise. // (Boolean macro values are not supported by all preprocessors.) diff --git a/deps/v8/include/v8-wasm.h b/deps/v8/include/v8-wasm.h index 612ed2fae40c89..59b2a69b1244ca 100644 --- a/deps/v8/include/v8-wasm.h +++ b/deps/v8/include/v8-wasm.h @@ -103,6 +103,12 @@ class V8_EXPORT WasmModuleObject : public Object { */ CompiledWasmModule GetCompiledModule(); + /** + * Compile a Wasm module from the provided uncompiled bytes. + */ + static MaybeLocal Compile( + Isolate* isolate, MemorySpan wire_bytes); + V8_INLINE static WasmModuleObject* Cast(Value* value) { #ifdef V8_ENABLE_CHECKS CheckCast(value); diff --git a/deps/v8/include/v8config.h b/deps/v8/include/v8config.h index ecb992822cff3e..1242d4289ceb93 100644 --- a/deps/v8/include/v8config.h +++ b/deps/v8/include/v8config.h @@ -293,6 +293,8 @@ path. Add it with -I to the command line // V8_HAS_ATTRIBUTE_WARN_UNUSED_RESULT - __attribute__((warn_unused_result)) // supported // V8_HAS_CPP_ATTRIBUTE_NODISCARD - [[nodiscard]] supported +// V8_HAS_CPP_ATTRIBUTE_NO_UNIQUE_ADDRESS +// - [[no_unique_address]] supported // V8_HAS_BUILTIN_BSWAP16 - __builtin_bswap16() supported // V8_HAS_BUILTIN_BSWAP32 - __builtin_bswap32() supported // V8_HAS_BUILTIN_BSWAP64 - __builtin_bswap64() supported @@ -337,6 +339,8 @@ path. Add it with -I to the command line (__has_attribute(warn_unused_result)) # define V8_HAS_CPP_ATTRIBUTE_NODISCARD (V8_HAS_CPP_ATTRIBUTE(nodiscard)) +# define V8_HAS_CPP_ATTRIBUTE_NO_UNIQUE_ADDRESS \ + (V8_HAS_CPP_ATTRIBUTE(no_unique_address)) # define V8_HAS_BUILTIN_ASSUME_ALIGNED (__has_builtin(__builtin_assume_aligned)) # define V8_HAS_BUILTIN_BSWAP16 (__has_builtin(__builtin_bswap16)) @@ -507,6 +511,27 @@ path. Add it with -I to the command line #define V8_NODISCARD /* NOT SUPPORTED */ #endif +// The no_unique_address attribute allows tail padding in a non-static data +// member to overlap other members of the enclosing class (and in the special +// case when the type is empty, permits it to fully overlap other members). The +// field is laid out as if a base class were encountered at the corresponding +// point within the class (except that it does not share a vptr with the +// enclosing object). +// +// Apply to a data member like: +// +// class Foo { +// V8_NO_UNIQUE_ADDRESS Bar bar_; +// }; +// +// [[no_unique_address]] comes in C++20 but supported in clang with +// -std >= c++11. +#if V8_HAS_CPP_ATTRIBUTE_NO_UNIQUE_ADDRESS +#define V8_NO_UNIQUE_ADDRESS [[no_unique_address]] +#else +#define V8_NO_UNIQUE_ADDRESS /* NOT SUPPORTED */ +#endif + // Helper macro to define no_sanitize attributes only with clang. #if defined(__clang__) && defined(__has_attribute) #if __has_attribute(no_sanitize) @@ -566,6 +591,13 @@ V8 shared library set USING_V8_SHARED. #define V8_CAGED_POINTERS #endif +// From C++17 onwards, static constexpr member variables are defined to be +// "inline", and adding a separate definition for them can trigger deprecation +// warnings. For C++14 and below, however, these definitions are required. +#if __cplusplus < 201703L && (!defined(_MSVC_LANG) || _MSVC_LANG < 201703L) +#define V8_STATIC_CONSTEXPR_VARIABLES_NEED_DEFINITIONS +#endif + // clang-format on #undef V8_HAS_CPP_ATTRIBUTE diff --git a/deps/v8/infra/mb/mb_config.pyl b/deps/v8/infra/mb/mb_config.pyl index 049a2e2786aba3..82964dd7d46598 100644 --- a/deps/v8/infra/mb/mb_config.pyl +++ b/deps/v8/infra/mb/mb_config.pyl @@ -58,9 +58,9 @@ # Linux. 'V8 Linux - builder': 'release_x86_gcmole', 'V8 Linux - debug builder': 'debug_x86', - 'V8 Linux - shared': 'release_x86_shared_verify_heap', - 'V8 Linux - noi18n - debug': 'debug_x86_no_i18n', - 'V8 Linux - verify csa': 'release_x86_verify_csa', + 'V8 Linux - shared - builder': 'release_x86_shared_verify_heap', + 'V8 Linux - noi18n - debug builder': 'debug_x86_no_i18n', + 'V8 Linux - verify csa - builder': 'release_x86_verify_csa', # Linux64. 'V8 Linux64 - builder': 'release_x64', 'V8 Linux64 - builder (goma cache silo)': 'release_x64', @@ -71,10 +71,10 @@ 'V8 Linux64 - external code space - debug - builder': 'debug_x64_external_code_space', 'V8 Linux64 - custom snapshot - debug builder': 'debug_x64_custom', 'V8 Linux64 - heap sandbox - debug - builder': 'debug_x64_heap_sandbox', - 'V8 Linux64 - internal snapshot': 'release_x64_internal', - 'V8 Linux64 - debug - header includes': 'debug_x64_header_includes', - 'V8 Linux64 - shared': 'release_x64_shared_verify_heap', - 'V8 Linux64 - verify csa': 'release_x64_verify_csa', + 'V8 Linux64 - internal snapshot - builder': 'release_x64_internal', + 'V8 Linux64 - debug - header includes - builder': 'debug_x64_header_includes', + 'V8 Linux64 - shared - builder': 'release_x64_shared_verify_heap', + 'V8 Linux64 - verify csa - builder': 'release_x64_verify_csa', 'V8 Linux64 - no wasm - builder': 'release_x64_webassembly_disabled', # Windows. 'V8 Win32 - builder': 'release_x86_minimal_symbols', @@ -83,52 +83,52 @@ 'V8 Win32 - builder (reclient compare)': 'release_x86_minimal_symbols_reclient', 'V8 Win32 - debug builder': 'debug_x86_minimal_symbols', # TODO(machenbach): Remove after switching to x64 on infra side. - 'V8 Win64 ASAN': 'release_x64_asan_no_lsan', - 'V8 Win64': 'release_x64_minimal_symbols', + 'V8 Win64 ASAN - builder': 'release_x64_asan_no_lsan', + 'V8 Win64 - builder': 'release_x64_minimal_symbols', 'V8 Win64 - dev image': 'release_x64_minimal_symbols', - 'V8 Win64 - debug': 'debug_x64_minimal_symbols', - 'V8 Win64 - msvc': 'release_x64_msvc', + 'V8 Win64 - debug builder': 'debug_x64_minimal_symbols', + 'V8 Win64 - msvc - builder': 'release_x64_msvc', # Mac. 'V8 Mac64 - builder': 'release_x64', 'V8 Mac64 - debug builder': 'debug_x64', 'V8 Official Mac ARM64': 'release_arm64', 'V8 Official Mac ARM64 Debug': 'debug_arm64', - 'V8 Mac64 ASAN': 'release_x64_asan_no_lsan', + 'V8 Mac64 ASAN - builder': 'release_x64_asan_no_lsan', 'V8 Mac - arm64 - release builder': 'release_arm64', 'V8 Mac - arm64 - debug builder': 'debug_arm64', 'V8 Mac - arm64 - sim - debug builder': 'debug_simulate_arm64', 'V8 Mac - arm64 - sim - release builder': 'release_simulate_arm64', # Sanitizers. - 'V8 Linux64 ASAN': 'release_x64_asan', + 'V8 Linux64 ASAN - builder': 'release_x64_asan', 'V8 Linux64 TSAN - builder': 'release_x64_tsan', 'V8 Linux64 TSAN - no-concurrent-marking - builder': 'release_x64_tsan_no_cm', - 'V8 Linux - arm64 - sim - CFI': 'release_simulate_arm64_cfi', - 'V8 Linux - arm64 - sim - MSAN': 'release_simulate_arm64_msan', + 'V8 Linux - arm64 - sim - CFI - builder': 'release_simulate_arm64_cfi', + 'V8 Linux - arm64 - sim - MSAN - builder': 'release_simulate_arm64_msan', # Misc. - 'V8 Linux gcc': 'release_x86_gcc', + 'V8 Linux gcc - builder': 'release_x86_gcc', # FYI. - 'V8 iOS - sim': 'release_x64_ios_simulator', + 'V8 iOS - sim - builder': 'release_x64_ios_simulator', 'V8 Linux64 - arm64 - sim - heap sandbox - debug - builder': 'debug_x64_heap_sandbox_arm64_sim', 'V8 Linux64 - cppgc-non-default - debug - builder': 'debug_x64_non_default_cppgc', 'V8 Linux64 - debug - perfetto - builder': 'debug_x64_perfetto', - 'V8 Linux64 - disable runtime call stats': 'release_x64_disable_runtime_call_stats', + 'V8 Linux64 - disable runtime call stats - builder': 'release_x64_disable_runtime_call_stats', 'V8 Linux64 - debug - single generation - builder': 'debug_x64_single_generation', - 'V8 Linux64 - pointer compression': 'release_x64_pointer_compression', + 'V8 Linux64 - pointer compression - builder': 'release_x64_pointer_compression', 'V8 Linux64 - pointer compression without dchecks': 'release_x64_pointer_compression_without_dchecks', 'V8 Linux64 - arm64 - sim - pointer compression - builder': 'release_simulate_arm64_pointer_compression', - 'V8 Linux64 gcc - debug': 'debug_x64_gcc', + 'V8 Linux64 gcc - debug builder': 'debug_x64_gcc', 'V8 Fuchsia - builder': 'release_x64_fuchsia', 'V8 Fuchsia - debug builder': 'debug_x64_fuchsia', - 'V8 Linux64 - cfi': 'release_x64_cfi', - 'V8 Linux64 UBSan': 'release_x64_ubsan', + 'V8 Linux64 - cfi - builder': 'release_x64_cfi', + 'V8 Linux64 UBSan - builder': 'release_x64_ubsan', 'V8 Linux - vtunejit': 'debug_x86_vtunejit', 'V8 Linux64 - gcov coverage': 'release_x64_gcc_coverage', - 'V8 Linux64 - Fuzzilli': 'release_x64_fuzzilli', - 'V8 Linux - predictable': 'release_x86_predictable', - 'V8 Linux - full debug': 'full_debug_x86', - 'V8 Mac64 - full debug': 'full_debug_x64', + 'V8 Linux64 - Fuzzilli - builder': 'release_x64_fuzzilli', + 'V8 Linux - predictable - builder': 'release_x86_predictable', + 'V8 Linux - full debug builder': 'full_debug_x86', + 'V8 Mac64 - full debug builder': 'full_debug_x64', 'V8 Random Deopt Fuzzer - debug': 'debug_x64', }, 'client.v8.clusterfuzz': { @@ -180,23 +180,23 @@ 'V8 Arm - builder': 'release_arm', 'V8 Arm - debug builder': 'debug_arm', 'V8 Android Arm - builder': 'release_android_arm', - 'V8 Linux - arm - sim': 'release_simulate_arm', - 'V8 Linux - arm - sim - debug': 'debug_simulate_arm', + 'V8 Linux - arm - sim - builder': 'release_simulate_arm', + 'V8 Linux - arm - sim - debug builder': 'debug_simulate_arm', 'V8 Linux - arm - sim - lite - builder': 'release_simulate_arm_lite', 'V8 Linux - arm - sim - lite - debug builder': 'debug_simulate_arm_lite', # Arm64. 'V8 Android Arm64 - builder': 'release_android_arm64', 'V8 Android Arm64 - debug builder': 'debug_android_arm64', 'V8 Arm64 - builder': 'release_arm64_hard_float', - 'V8 Linux - arm64 - sim': 'release_simulate_arm64', - 'V8 Linux - arm64 - sim - debug': 'debug_simulate_arm64', - 'V8 Linux - arm64 - sim - gc stress': 'debug_simulate_arm64', + 'V8 Linux - arm64 - sim - builder': 'release_simulate_arm64', + 'V8 Linux - arm64 - sim - debug builder': 'debug_simulate_arm64', + 'V8 Linux - arm64 - sim - gc stress - builder': 'debug_simulate_arm64', # Mips. 'V8 Linux - mipsel - sim - builder': 'release_simulate_mipsel', 'V8 Linux - mips64el - sim - builder': 'release_simulate_mips64el', # IBM. - 'V8 Linux - ppc64 - sim': 'release_simulate_ppc64', - 'V8 Linux - s390x - sim': 'release_simulate_s390x', + 'V8 Linux - ppc64 - sim - builder': 'release_simulate_ppc64', + 'V8 Linux - s390x - sim - builder': 'release_simulate_s390x', # RISC-V 'V8 Linux - riscv64 - sim - builder': 'release_simulate_riscv64', # Loongson @@ -226,7 +226,7 @@ 'v8_linux64_cppgc_non_default_dbg_ng': 'debug_x64_non_default_cppgc', 'v8_linux64_dbg_ng': 'debug_x64_trybot', 'v8_linux64_dict_tracking_dbg_ng': 'debug_x64_dict_tracking_trybot', - 'v8_linux64_disable_runtime_call_stats_rel': 'release_x64_disable_runtime_call_stats', + 'v8_linux64_disable_runtime_call_stats_rel_ng': 'release_x64_disable_runtime_call_stats', 'v8_linux64_external_code_space_dbg_ng': 'debug_x64_external_code_space', 'v8_linux64_gc_stress_custom_snapshot_dbg_ng': 'debug_x64_trybot_custom', 'v8_linux64_gcc_compile_dbg': 'debug_x64_gcc', @@ -280,6 +280,7 @@ 'v8_mac64_dbg': 'debug_x64', 'v8_mac64_dbg_ng': 'debug_x64', 'v8_mac64_compile_full_dbg_ng': 'full_debug_x64', + 'v8_mac64_asan_compile_rel_ng': 'release_x64_asan_no_lsan', 'v8_mac64_asan_rel_ng': 'release_x64_asan_no_lsan', 'v8_linux_arm_rel_ng': 'release_simulate_arm_trybot', 'v8_linux_arm_lite_compile_dbg': 'debug_simulate_arm_lite', diff --git a/deps/v8/infra/testing/builders.pyl b/deps/v8/infra/testing/builders.pyl index 56e238f4d0b533..db7566addd132b 100644 --- a/deps/v8/infra/testing/builders.pyl +++ b/deps/v8/infra/testing/builders.pyl @@ -373,6 +373,14 @@ {'name': 'v8testing', 'shards': 3}, ], }, + 'v8_linux64_disable_runtime_call_stats_rel_ng_triggered': { + 'swarming_dimensions' : { + 'os': 'Ubuntu-18.04', + }, + 'tests': [ + {'name': 'v8testing'}, + ], + }, 'v8_linux64_external_code_space_dbg_ng_triggered': { 'swarming_dimensions' : { 'cpu': 'x86-64-avx2', @@ -1240,6 +1248,14 @@ {'name': 'v8testing', 'shards': 3}, ], }, + 'V8 Linux64 - disable runtime call stats': { + 'swarming_dimensions' : { + 'os': 'Ubuntu-18.04', + }, + 'tests': [ + {'name': 'v8testing'}, + ], + }, 'V8 Linux64 - debug - fyi': { 'swarming_dimensions' : { 'os': 'Ubuntu-18.04', @@ -2020,6 +2036,11 @@ 'suffix': 'deopt', 'test_args': ['--total-timeout-sec=2100', '--stress-deopt=1'] }, + { + 'name': 'numfuzz', + 'suffix': 'interrupt', + 'test_args': ['--total-timeout-sec=2100', '--stress-interrupt-budget=1'] + }, ], }, 'V8 NumFuzz - TSAN': { @@ -2043,6 +2064,11 @@ 'suffix': 'delay', 'test_args': ['--total-timeout-sec=2100', '--stress-delay-tasks=1'] }, + { + 'name': 'numfuzz', + 'suffix': 'interrupt', + 'test_args': ['--total-timeout-sec=2100', '--stress-interrupt-budget=1'] + }, { 'name': 'numfuzz', 'suffix': 'threads', @@ -2066,6 +2092,7 @@ '--stress-scavenge=4', '--stress-thread-pool-size=2', '--stress-stack-size=1', + '--stress-interrupt-budget=1', ], 'shards': 4 }, @@ -2098,6 +2125,11 @@ 'suffix': 'delay', 'test_args': ['--total-timeout-sec=2100', '--stress-delay-tasks=1'] }, + { + 'name': 'numfuzz', + 'suffix': 'interrupt', + 'test_args': ['--total-timeout-sec=2100', '--stress-interrupt-budget=1'] + }, { 'name': 'numfuzz', 'suffix': 'threads', @@ -2121,6 +2153,7 @@ '--stress-scavenge=4', '--stress-thread-pool-size=2', '--stress-stack-size=1', + '--stress-interrupt-budget=1', ], 'shards': 3 }, @@ -2147,6 +2180,11 @@ 'suffix': 'deopt', 'test_args': ['--total-timeout-sec=900', '--stress-deopt=1'] }, + { + 'name': 'numfuzz', + 'suffix': 'interrupt', + 'test_args': ['--total-timeout-sec=900', '--stress-interrupt-budget=1'] + }, ], }, 'v8_numfuzz_tsan_ng_triggered': { @@ -2164,6 +2202,11 @@ 'suffix': 'delay', 'test_args': ['--total-timeout-sec=900', '--stress-delay-tasks=1'] }, + { + 'name': 'numfuzz', + 'suffix': 'interrupt', + 'test_args': ['--total-timeout-sec=900', '--stress-interrupt-budget=1'] + }, { 'name': 'numfuzz', 'suffix': 'threads', @@ -2187,6 +2230,7 @@ '--stress-scavenge=4', '--stress-thread-pool-size=2', '--stress-stack-size=1', + '--stress-interrupt-budget=1', ], }, { @@ -2211,6 +2255,11 @@ 'suffix': 'delay', 'test_args': ['--total-timeout-sec=900', '--stress-delay-tasks=1'] }, + { + 'name': 'numfuzz', + 'suffix': 'interrupt', + 'test_args': ['--total-timeout-sec=900', '--stress-interrupt-budget=1'] + }, { 'name': 'numfuzz', 'suffix': 'threads', @@ -2234,6 +2283,7 @@ '--stress-scavenge=4', '--stress-thread-pool-size=2', '--stress-stack-size=1', + '--stress-interrupt-budget=1', ], }, { diff --git a/deps/v8/samples/cppgc/hello-world.cc b/deps/v8/samples/cppgc/hello-world.cc index d76c16a553619f..86b0afe92f63df 100644 --- a/deps/v8/samples/cppgc/hello-world.cc +++ b/deps/v8/samples/cppgc/hello-world.cc @@ -24,13 +24,13 @@ class Rope final : public cppgc::GarbageCollected { public: explicit Rope(std::string part, Rope* next = nullptr) - : part_(part), next_(next) {} + : part_(std::move(part)), next_(next) {} void Trace(cppgc::Visitor* visitor) const { visitor->Trace(next_); } private: - std::string part_; - cppgc::Member next_; + const std::string part_; + const cppgc::Member next_; friend std::ostream& operator<<(std::ostream& os, const Rope& rope) { os << rope.part_; @@ -48,16 +48,19 @@ int main(int argc, char* argv[]) { // Initialize the process. This must happen before any cppgc::Heap::Create() // calls. cppgc::DefaultPlatform::InitializeProcess(cppgc_platform.get()); - // Create a managed heap. - std::unique_ptr heap = cppgc::Heap::Create(cppgc_platform); - // Allocate a string rope on the managed heap. - auto* greeting = cppgc::MakeGarbageCollected( - heap->GetAllocationHandle(), "Hello ", - cppgc::MakeGarbageCollected(heap->GetAllocationHandle(), "World!")); - // Manually trigger garbage collection. The object greeting is held alive - // through conservative stack scanning. - heap->ForceGarbageCollectionSlow("CppGC example", "Testing"); - std::cout << *greeting << std::endl; + { + // Create a managed heap. + std::unique_ptr heap = cppgc::Heap::Create(cppgc_platform); + // Allocate a string rope on the managed heap. + Rope* greeting = cppgc::MakeGarbageCollected( + heap->GetAllocationHandle(), "Hello ", + cppgc::MakeGarbageCollected(heap->GetAllocationHandle(), + "World!")); + // Manually trigger garbage collection. The object greeting is held alive + // through conservative stack scanning. + heap->ForceGarbageCollectionSlow("CppGC example", "Testing"); + std::cout << *greeting << std::endl; + } // Gracefully shutdown the process. cppgc::ShutdownProcess(); return 0; diff --git a/deps/v8/samples/hello-world.cc b/deps/v8/samples/hello-world.cc index 92436e01773214..557ba63e0fd85e 100644 --- a/deps/v8/samples/hello-world.cc +++ b/deps/v8/samples/hello-world.cc @@ -98,7 +98,7 @@ int main(int argc, char* argv[]) { // Dispose the isolate and tear down V8. isolate->Dispose(); v8::V8::Dispose(); - v8::V8::ShutdownPlatform(); + v8::V8::DisposePlatform(); delete create_params.array_buffer_allocator; return 0; } diff --git a/deps/v8/samples/shell.cc b/deps/v8/samples/shell.cc index ab8abeb71e36f0..9a2c8c3f544bf1 100644 --- a/deps/v8/samples/shell.cc +++ b/deps/v8/samples/shell.cc @@ -95,7 +95,7 @@ int main(int argc, char* argv[]) { } isolate->Dispose(); v8::V8::Dispose(); - v8::V8::ShutdownPlatform(); + v8::V8::DisposePlatform(); delete create_params.array_buffer_allocator; return result; } diff --git a/deps/v8/src/api/api-inl.h b/deps/v8/src/api/api-inl.h index 20a3d910cea486..17f8bd94bc2035 100644 --- a/deps/v8/src/api/api-inl.h +++ b/deps/v8/src/api/api-inl.h @@ -318,6 +318,7 @@ inline bool V8_EXPORT TryToCopyAndConvertArrayToCppBuffer(Local src, namespace internal { Handle HandleScopeImplementer::LastEnteredContext() { + DCHECK_EQ(entered_contexts_.capacity(), is_microtask_context_.capacity()); DCHECK_EQ(entered_contexts_.size(), is_microtask_context_.size()); for (size_t i = 0; i < entered_contexts_.size(); ++i) { diff --git a/deps/v8/src/api/api-natives.cc b/deps/v8/src/api/api-natives.cc index c64107f3b8cd5e..75109e35b7ece4 100644 --- a/deps/v8/src/api/api-natives.cc +++ b/deps/v8/src/api/api-natives.cc @@ -244,7 +244,7 @@ MaybeHandle ConfigureInstance(Isolate* isolate, Handle obj, PropertyAttributes attributes = details.attributes(); PropertyKind kind = details.kind(); - if (kind == kData) { + if (kind == PropertyKind::kData) { auto prop_data = handle(properties->get(i++), isolate); RETURN_ON_EXCEPTION( isolate, @@ -263,7 +263,7 @@ MaybeHandle ConfigureInstance(Isolate* isolate, Handle obj, // context. PropertyDetails details(Smi::cast(properties->get(i++))); PropertyAttributes attributes = details.attributes(); - DCHECK_EQ(kData, details.kind()); + DCHECK_EQ(PropertyKind::kData, details.kind()); v8::Intrinsic intrinsic = static_cast(Smi::ToInt(properties->get(i++))); @@ -625,7 +625,8 @@ MaybeHandle ApiNatives::InstantiateRemoteObject( void ApiNatives::AddDataProperty(Isolate* isolate, Handle info, Handle name, Handle value, PropertyAttributes attributes) { - PropertyDetails details(kData, attributes, PropertyConstness::kMutable); + PropertyDetails details(PropertyKind::kData, attributes, + PropertyConstness::kMutable); auto details_handle = handle(details.AsSmi(), isolate); Handle data[] = {name, details_handle, value}; AddPropertyToPropertyList(isolate, info, arraysize(data), data); @@ -636,7 +637,8 @@ void ApiNatives::AddDataProperty(Isolate* isolate, Handle info, PropertyAttributes attributes) { auto value = handle(Smi::FromInt(intrinsic), isolate); auto intrinsic_marker = isolate->factory()->true_value(); - PropertyDetails details(kData, attributes, PropertyConstness::kMutable); + PropertyDetails details(PropertyKind::kData, attributes, + PropertyConstness::kMutable); auto details_handle = handle(details.AsSmi(), isolate); Handle data[] = {name, intrinsic_marker, details_handle, value}; AddPropertyToPropertyList(isolate, info, arraysize(data), data); @@ -650,7 +652,8 @@ void ApiNatives::AddAccessorProperty(Isolate* isolate, PropertyAttributes attributes) { if (!getter.is_null()) getter->set_published(true); if (!setter.is_null()) setter->set_published(true); - PropertyDetails details(kAccessor, attributes, PropertyConstness::kMutable); + PropertyDetails details(PropertyKind::kAccessor, attributes, + PropertyConstness::kMutable); auto details_handle = handle(details.AsSmi(), isolate); Handle data[] = {name, details_handle, getter, setter}; AddPropertyToPropertyList(isolate, info, arraysize(data), data); diff --git a/deps/v8/src/api/api.cc b/deps/v8/src/api/api.cc index 3cc4f2b61e0692..7e9c504f8e7e2c 100644 --- a/deps/v8/src/api/api.cc +++ b/deps/v8/src/api/api.cc @@ -15,6 +15,7 @@ #include "include/v8-callbacks.h" #include "include/v8-cppgc.h" #include "include/v8-date.h" +#include "include/v8-embedder-state-scope.h" #include "include/v8-extension.h" #include "include/v8-fast-api-calls.h" #include "include/v8-function.h" @@ -43,9 +44,13 @@ #include "src/common/globals.h" #include "src/compiler-dispatcher/lazy-compile-dispatcher.h" #include "src/date/date.h" +#if V8_ENABLE_WEBASSEMBLY +#include "src/debug/debug-wasm-objects.h" +#endif // V8_ENABLE_WEBASSEMBLY #include "src/debug/liveedit.h" #include "src/deoptimizer/deoptimizer.h" #include "src/diagnostics/gdb-jit.h" +#include "src/execution/embedder-state.h" #include "src/execution/execution.h" #include "src/execution/frames-inl.h" #include "src/execution/isolate-inl.h" @@ -59,6 +64,8 @@ #include "src/handles/persistent-handles.h" #include "src/heap/embedder-tracing.h" #include "src/heap/heap-inl.h" +#include "src/heap/heap-write-barrier.h" +#include "src/heap/safepoint.h" #include "src/init/bootstrapper.h" #include "src/init/icu_util.h" #include "src/init/startup-data-util.h" @@ -170,8 +177,8 @@ static ScriptOrigin GetScriptOriginForScript(i::Isolate* isolate, i::Handle script) { i::Handle scriptName(script->GetNameOrSourceURL(), isolate); i::Handle source_map_url(script->source_mapping_url(), isolate); - i::Handle host_defined_options(script->host_defined_options(), - isolate); + i::Handle host_defined_options(script->host_defined_options(), + isolate); ScriptOriginOptions options(script->origin_options()); bool is_wasm = false; #if V8_ENABLE_WEBASSEMBLY @@ -182,7 +189,7 @@ static ScriptOrigin GetScriptOriginForScript(i::Isolate* isolate, script->line_offset(), script->column_offset(), options.IsSharedCrossOrigin(), script->id(), Utils::ToLocal(source_map_url), options.IsOpaque(), is_wasm, - options.IsModule(), Utils::PrimitiveArrayToLocal(host_defined_options)); + options.IsModule(), Utils::ToLocal(host_defined_options)); return origin; } @@ -191,7 +198,7 @@ ScriptOrigin::ScriptOrigin( Local column_offset, Local is_shared_cross_origin, Local script_id, Local source_map_url, Local is_opaque, Local is_wasm, Local is_module, - Local host_defined_options) + Local host_defined_options) : ScriptOrigin( Isolate::GetCurrent(), resource_name, line_offset.IsEmpty() ? 0 : static_cast(line_offset->Value()), @@ -207,7 +214,7 @@ ScriptOrigin::ScriptOrigin(Local resource_name, int line_offset, int column_offset, bool is_shared_cross_origin, int script_id, Local source_map_url, bool is_opaque, bool is_wasm, bool is_module, - Local host_defined_options) + Local host_defined_options) : isolate_(Isolate::GetCurrent()), resource_name_(resource_name), resource_line_offset_(line_offset), @@ -217,16 +224,15 @@ ScriptOrigin::ScriptOrigin(Local resource_name, int line_offset, source_map_url_(source_map_url), host_defined_options_(host_defined_options) {} -Local ScriptOrigin::ResourceLineOffset() const { - return v8::Integer::New(isolate_, resource_line_offset_); -} - -Local ScriptOrigin::ResourceColumnOffset() const { - return v8::Integer::New(isolate_, resource_column_offset_); -} - -Local ScriptOrigin::ScriptID() const { - return v8::Integer::New(isolate_, script_id_); +Local ScriptOrigin::HostDefinedOptions() const { + // TODO(cbruni, chromium:1244145): remove once migrated to the context. + Utils::ApiCheck(!host_defined_options_->IsFixedArray(), + "ScriptOrigin::HostDefinedOptions", + "HostDefinedOptions is not a PrimitiveArray, please use " + "ScriptOrigin::GetHostDefinedOptions()"); + i::Handle options = + Utils::OpenHandle(*host_defined_options_.As()); + return Utils::PrimitiveArrayToLocal(options); } // --- E x c e p t i o n B e h a v i o r --- @@ -665,6 +671,7 @@ StartupData SnapshotCreator::CreateBlob( i::Snapshot::ClearReconstructableDataForSerialization( isolate, function_code_handling == FunctionCodeHandling::kClear); + i::GlobalSafepointScope global_safepoint(isolate); i::DisallowGarbageCollection no_gc_from_here_on; // Create a vector with all contexts and clear associated Persistent fields. @@ -702,7 +709,7 @@ StartupData SnapshotCreator::CreateBlob( data->created_ = true; return i::Snapshot::Create(isolate, &contexts, embedder_fields_serializers, - no_gc_from_here_on); + global_safepoint, no_gc_from_here_on); } bool StartupData::CanBeRehashed() const { @@ -1033,6 +1040,9 @@ void SealHandleScope::operator delete(void*, size_t) { base::OS::Abort(); } void SealHandleScope::operator delete[](void*, size_t) { base::OS::Abort(); } bool Data::IsModule() const { return Utils::OpenHandle(this)->IsModule(); } +bool Data::IsFixedArray() const { + return Utils::OpenHandle(this)->IsFixedArray(); +} bool Data::IsValue() const { i::DisallowGarbageCollection no_gc; @@ -1365,6 +1375,14 @@ Local FunctionTemplate::New( // Changes to the environment cannot be captured in the snapshot. Expect no // function templates when the isolate is created for serialization. LOG_API(i_isolate, FunctionTemplate, New); + + if (!Utils::ApiCheck( + !c_function || behavior == ConstructorBehavior::kThrow, + "FunctionTemplate::New", + "Fast API calls are not supported for constructor functions.")) { + return Local(); + } + ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate); return FunctionTemplateNew( i_isolate, callback, data, signature, length, behavior, false, @@ -1380,17 +1398,17 @@ Local FunctionTemplate::NewWithCFunctionOverloads( v8::Local signature, int length, ConstructorBehavior behavior, SideEffectType side_effect_type, const MemorySpan& c_function_overloads) { - // TODO(mslekova): Once runtime overload resolution between sequences is - // supported, check that if (c_function_overloads.size() == 2), then - // c_function_overloads.data()[0]. - // CanResolveOverload(c_function_overloads.data()[1]). We won't support - // the case where the size is greater than 2 for runtime resolution, until - // we've added support for ArrayBuffers and ArrayBufferViews. OTOH the - // overloads list might contain more than 2 functions with different arity, - // the resolution between which is available at compile time. - i::Isolate* i_isolate = reinterpret_cast(isolate); LOG_API(i_isolate, FunctionTemplate, New); + + if (!Utils::ApiCheck( + c_function_overloads.size() == 0 || + behavior == ConstructorBehavior::kThrow, + "FunctionTemplate::NewWithCFunctionOverloads", + "Fast API calls are not supported for constructor functions.")) { + return Local(); + } + ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate); return FunctionTemplateNew(i_isolate, callback, data, signature, length, behavior, false, Local(), @@ -1488,23 +1506,27 @@ i::Handle MakeAccessorInfo( if (redirected != i::kNullAddress) { SET_FIELD_WRAPPED(isolate, obj, set_js_getter, redirected); } - if (data.IsEmpty()) { - data = v8::Undefined(reinterpret_cast(isolate)); - } - obj->set_data(*Utils::OpenHandle(*data)); - obj->set_is_special_data_property(is_special_data_property); - obj->set_replace_on_access(replace_on_access); + i::Handle accessor_name = Utils::OpenHandle(*name); if (!accessor_name->IsUniqueName()) { accessor_name = isolate->factory()->InternalizeString( i::Handle::cast(accessor_name)); } - obj->set_name(*accessor_name); - if (settings & ALL_CAN_READ) obj->set_all_can_read(true); - if (settings & ALL_CAN_WRITE) obj->set_all_can_write(true); - obj->set_initial_property_attributes(i::NONE); + i::DisallowGarbageCollection no_gc; + i::AccessorInfo raw_obj = *obj; + if (data.IsEmpty()) { + raw_obj.set_data(i::ReadOnlyRoots(isolate).undefined_value()); + } else { + raw_obj.set_data(*Utils::OpenHandle(*data)); + } + raw_obj.set_name(*accessor_name); + raw_obj.set_is_special_data_property(is_special_data_property); + raw_obj.set_replace_on_access(replace_on_access); + if (settings & ALL_CAN_READ) raw_obj.set_all_can_read(true); + if (settings & ALL_CAN_WRITE) raw_obj.set_all_can_write(true); + raw_obj.set_initial_property_attributes(i::NONE); if (!signature.IsEmpty()) { - obj->set_expected_receiver_type(*Utils::OpenHandle(*signature)); + raw_obj.set_expected_receiver_type(*Utils::OpenHandle(*signature)); } return obj; } @@ -1637,10 +1659,14 @@ static void TemplateSetAccessor( i::Handle accessor_info = MakeAccessorInfo(isolate, name, getter, setter, data, settings, signature, is_special_data_property, replace_on_access); - accessor_info->set_initial_property_attributes( - static_cast(attribute)); - accessor_info->set_getter_side_effect_type(getter_side_effect_type); - accessor_info->set_setter_side_effect_type(setter_side_effect_type); + { + i::DisallowGarbageCollection no_gc; + i::AccessorInfo raw = *accessor_info; + raw.set_initial_property_attributes( + static_cast(attribute)); + raw.set_getter_side_effect_type(getter_side_effect_type); + raw.set_setter_side_effect_type(setter_side_effect_type); + } i::ApiNatives::AddNativeDataProperty(isolate, info, accessor_info); } @@ -2051,6 +2077,11 @@ Local UnboundScript::GetSourceMappingURL() { } MaybeLocal Script::Run(Local context) { + return Run(context, Local()); +} + +MaybeLocal Script::Run(Local context, + Local host_defined_options) { auto v8_isolate = context->GetIsolate(); auto isolate = reinterpret_cast(v8_isolate); TRACE_EVENT_CALL_STATS_SCOPED(isolate, "v8", "V8.Execute"); @@ -2096,12 +2127,12 @@ MaybeLocal Script::Run(Local context) { } i::Handle receiver = isolate->global_proxy(); - i::Handle host_defined_options( + // TODO(cbruni, chromium:1244145): Remove once migrated to the context. + i::Handle options( i::Script::cast(fun->shared().script()).host_defined_options(), isolate); Local result; has_pending_exception = !ToLocal( - i::Execution::CallScript(isolate, fun, receiver, host_defined_options), - &result); + i::Execution::CallScript(isolate, fun, receiver, options), &result); if (i::FLAG_script_delay_fraction > 0.0) { delta = v8::base::TimeDelta::FromMillisecondsD( @@ -2125,11 +2156,15 @@ Local ScriptOrModule::GetResourceName() { } Local ScriptOrModule::GetHostDefinedOptions() { + return HostDefinedOptions().As(); +} + +Local ScriptOrModule::HostDefinedOptions() { i::Handle obj = Utils::OpenHandle(this); i::Isolate* isolate = i::GetIsolateFromWritableObject(*obj); ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate); - i::Handle val(obj->host_defined_options(), isolate); - return ToApiHandle(val); + i::Handle val(obj->host_defined_options(), isolate); + return ToApiHandle(val); } Local Script::GetUnboundScript() { @@ -2190,6 +2225,14 @@ Local PrimitiveArray::Get(Isolate* v8_isolate, int index) { return ToApiHandle(i_item); } +void v8::PrimitiveArray::CheckCast(v8::Data* that) { + i::Handle obj = Utils::OpenHandle(that); + Utils::ApiCheck( + obj->IsFixedArray(), "v8::PrimitiveArray::Cast", + "Value is not a PrimitiveArray. This is a temporary issue, v8::Data and " + "v8::PrimitiveArray will not be compatible in the future."); +} + int FixedArray::Length() const { i::Handle self = Utils::OpenHandle(this); return self->length(); @@ -2469,13 +2512,10 @@ Maybe Module::SetSyntheticModuleExport(Isolate* isolate, namespace { -i::ScriptDetails GetScriptDetails(i::Isolate* isolate, - Local resource_name, - int resource_line_offset, - int resource_column_offset, - Local source_map_url, - Local host_defined_options, - ScriptOriginOptions origin_options) { +i::ScriptDetails GetScriptDetails( + i::Isolate* isolate, Local resource_name, int resource_line_offset, + int resource_column_offset, Local source_map_url, + Local host_defined_options, ScriptOriginOptions origin_options) { i::ScriptDetails script_details(Utils::OpenHandle(*(resource_name), true), origin_options); script_details.line_offset = resource_line_offset; @@ -2767,7 +2807,7 @@ i::MaybeHandle CompileStreamedSource( i::ScriptDetails script_details = GetScriptDetails(isolate, origin.ResourceName(), origin.LineOffset(), origin.ColumnOffset(), origin.SourceMapUrl(), - origin.HostDefinedOptions(), origin.Options()); + origin.GetHostDefinedOptions(), origin.Options()); i::ScriptStreamingData* data = v8_source->impl(); return i::Compiler::GetSharedFunctionInfoForStreamedScript( isolate, str, script_details, data); @@ -3021,6 +3061,20 @@ ScriptOrigin Message::GetScriptOrigin() const { return GetScriptOriginForScript(isolate, script); } +void ScriptOrigin::VerifyHostDefinedOptions() const { + // TODO(cbruni, chromium:1244145): Remove checks once we allow arbitrary + // host-defined options. + if (host_defined_options_.IsEmpty()) return; + Utils::ApiCheck(host_defined_options_->IsFixedArray(), "ScriptOrigin()", + "Host-defined options has to be a PrimitiveArray"); + i::Handle options = + Utils::OpenHandle(*host_defined_options_.As()); + for (int i = 0; i < options->length(); i++) { + Utils::ApiCheck(options->get(i).IsPrimitive(), "ScriptOrigin()", + "PrimitiveArray can only contain primtive values"); + } +} + v8::Local Message::GetScriptResourceName() const { ASSERT_NO_SCRIPT_NO_EXCEPTION(Utils::OpenHandle(this)->GetIsolate()); return GetScriptOrigin().ResourceName(); @@ -3243,6 +3297,15 @@ Local StackFrame::GetScriptSourceMappingURL() const { Local StackFrame::GetFunctionName() const { auto self = Utils::OpenHandle(this); +#if V8_ENABLE_WEBASSEMBLY + if (self->IsWasm()) { + auto isolate = self->GetIsolate(); + auto instance = handle(self->GetWasmInstance(), isolate); + auto func_index = self->GetWasmFunctionIndex(); + return Utils::ToLocal( + i::GetWasmFunctionDebugName(isolate, instance, func_index)); + } +#endif // V8_ENABLE_WEBASSEMBLY auto name = i::StackFrameInfo::GetFunctionName(self); if (!name->IsString()) return {}; return Local::Cast(Utils::ToLocal(name)); @@ -3890,6 +3953,12 @@ void v8::Private::CheckCast(v8::Data* that) { "v8::Private::Cast", "Value is not a Private"); } +void v8::FixedArray::CheckCast(v8::Data* that) { + i::Handle obj = Utils::OpenHandle(that); + Utils::ApiCheck(obj->IsFixedArray(), "v8::FixedArray::Cast", + "Value is not a FixedArray"); +} + void v8::ModuleRequest::CheckCast(v8::Data* that) { i::Handle obj = Utils::OpenHandle(that); Utils::ApiCheck(obj->IsModuleRequest(), "v8::ModuleRequest::Cast", @@ -5813,7 +5882,8 @@ String::ExternalStringResourceBase* String::GetExternalStringResourceBaseSlow( } internal::Address string = str.ptr(); - int type = I::GetInstanceType(string) & I::kFullStringRepresentationMask; + int type = + I::GetInstanceType(string) & I::kStringRepresentationAndEncodingMask; *encoding_out = static_cast(type & I::kStringEncodingMask); if (i::StringShape(str).IsExternalOneByte() || i::StringShape(str).IsExternalTwoByte()) { @@ -5966,6 +6036,7 @@ void v8::Object::SetAlignedPointerInInternalField(int index, void* value) { .store_aligned_pointer(obj->GetIsolate(), value), location, "Unaligned pointer"); DCHECK_EQ(value, GetAlignedPointerFromInternalField(index)); + internal::WriteBarrier::MarkingFromInternalFields(i::JSObject::cast(*obj)); } void v8::Object::SetAlignedPointerInInternalFields(int argc, int indices[], @@ -5987,6 +6058,7 @@ void v8::Object::SetAlignedPointerInInternalFields(int argc, int indices[], location, "Unaligned pointer"); DCHECK_EQ(value, GetAlignedPointerFromInternalField(index)); } + internal::WriteBarrier::MarkingFromInternalFields(js_obj); } static void* ExternalValue(i::Object obj) { @@ -6010,7 +6082,7 @@ bool v8::V8::InitializeVirtualMemoryCage() { } #endif -void v8::V8::ShutdownPlatform() { i::V8::ShutdownPlatform(); } +void v8::V8::DisposePlatform() { i::V8::DisposePlatform(); } bool v8::V8::Initialize(const int build_config) { const bool kEmbedderPointerCompression = @@ -6105,7 +6177,7 @@ void v8::V8::SetReturnAddressLocationResolver( } bool v8::V8::Dispose() { - i::V8::TearDown(); + i::V8::Dispose(); return true; } @@ -6144,7 +6216,8 @@ HeapObjectStatistics::HeapObjectStatistics() HeapCodeStatistics::HeapCodeStatistics() : code_and_metadata_size_(0), bytecode_and_metadata_size_(0), - external_script_source_size_(0) {} + external_script_source_size_(0), + cpu_profiler_metadata_size_(0) {} bool v8::V8::InitializeICU(const char* icu_data_file) { return i::InitializeICU(icu_data_file); @@ -7804,6 +7877,37 @@ MaybeLocal WasmModuleObject::FromCompiledModule( #endif // V8_ENABLE_WEBASSEMBLY } +MaybeLocal WasmModuleObject::Compile( + Isolate* isolate, MemorySpan wire_bytes) { +#if V8_ENABLE_WEBASSEMBLY + const uint8_t* start = wire_bytes.data(); + size_t length = wire_bytes.size(); + i::Isolate* i_isolate = reinterpret_cast(isolate); + if (!i::wasm::IsWasmCodegenAllowed(i_isolate, i_isolate->native_context())) { + return MaybeLocal(); + } + i::MaybeHandle maybe_compiled; + { + i::wasm::ErrorThrower thrower(i_isolate, "WasmModuleObject::Compile()"); + auto enabled_features = i::wasm::WasmFeatures::FromIsolate(i_isolate); + maybe_compiled = i::wasm::GetWasmEngine()->SyncCompile( + i_isolate, enabled_features, &thrower, + i::wasm::ModuleWireBytes(start, start + length)); + } + CHECK_EQ(maybe_compiled.is_null(), i_isolate->has_pending_exception()); + if (maybe_compiled.is_null()) { + i_isolate->OptionalRescheduleException(false); + return MaybeLocal(); + } + return Local::Cast( + Utils::ToLocal(maybe_compiled.ToHandleChecked())); +#else + Utils::ApiCheck(false, "WasmModuleObject::Compile", + "WebAssembly support is not enabled."); + UNREACHABLE(); +#endif // V8_ENABLE_WEBASSEMBLY +} + WasmModuleObjectBuilderStreaming::WasmModuleObjectBuilderStreaming( Isolate* isolate) { USE(isolate_); @@ -7945,21 +8049,20 @@ Local v8::ArrayBufferView::Buffer() { size_t v8::ArrayBufferView::CopyContents(void* dest, size_t byte_length) { i::Handle self = Utils::OpenHandle(this); - size_t byte_offset = self->byte_offset(); size_t bytes_to_copy = std::min(byte_length, self->byte_length()); if (bytes_to_copy) { i::DisallowGarbageCollection no_gc; i::Isolate* isolate = self->GetIsolate(); - i::Handle buffer(i::JSArrayBuffer::cast(self->buffer()), - isolate); - const char* source = reinterpret_cast(buffer->backing_store()); - if (source == nullptr) { - DCHECK(self->IsJSTypedArray()); - i::Handle typed_array(i::JSTypedArray::cast(*self), - isolate); - source = reinterpret_cast(typed_array->DataPtr()); + const char* source; + if (self->IsJSTypedArray()) { + i::Handle array(i::JSTypedArray::cast(*self), isolate); + source = reinterpret_cast(array->DataPtr()); + } else { + DCHECK(self->IsJSDataView()); + i::Handle data_view(i::JSDataView::cast(*self), isolate); + source = reinterpret_cast(data_view->data_pointer()); } - memcpy(dest, source + byte_offset, bytes_to_copy); + memcpy(dest, source, bytes_to_copy); } return bytes_to_copy; } @@ -8397,6 +8500,7 @@ void Isolate::RemoveGCEpilogueCallback(GCCallback callback) { void Isolate::SetEmbedderHeapTracer(EmbedderHeapTracer* tracer) { i::Isolate* isolate = reinterpret_cast(this); + CHECK_NULL(isolate->heap()->cpp_heap()); isolate->heap()->SetEmbedderHeapTracer(tracer); } @@ -8412,6 +8516,7 @@ void Isolate::SetEmbedderRootsHandler(EmbedderRootsHandler* handler) { void Isolate::AttachCppHeap(CppHeap* cpp_heap) { i::Isolate* isolate = reinterpret_cast(this); + CHECK_NULL(GetEmbedderHeapTracer()); isolate->heap()->AttachCppHeap(cpp_heap); } @@ -8477,6 +8582,17 @@ void Isolate::RequestGarbageCollectionForTesting(GarbageCollectionType type) { } } +void Isolate::RequestGarbageCollectionForTesting( + GarbageCollectionType type, + EmbedderHeapTracer::EmbedderStackState stack_state) { + if (type == kFullGarbageCollection) { + reinterpret_cast(this) + ->heap() + ->SetEmbedderStackStateForNextFinalization(stack_state); + } + RequestGarbageCollectionForTesting(type); +} + Isolate* Isolate::GetCurrent() { i::Isolate* isolate = i::Isolate::Current(); return reinterpret_cast(isolate); @@ -8505,6 +8621,7 @@ Isolate::CreateParams::~CreateParams() = default; void Isolate::Initialize(Isolate* isolate, const v8::Isolate::CreateParams& params) { i::Isolate* i_isolate = reinterpret_cast(isolate); + TRACE_EVENT_CALL_STATS_SCOPED(i_isolate, "v8", "V8.IsolateInitialize"); if (auto allocator = params.array_buffer_allocator_shared) { CHECK(params.array_buffer_allocator == nullptr || params.array_buffer_allocator == allocator.get()); @@ -8646,6 +8763,12 @@ void Isolate::SetHostImportModuleDynamicallyCallback( isolate->SetHostImportModuleDynamicallyCallback(callback); } +void Isolate::SetHostImportModuleDynamicallyCallback( + HostImportModuleDynamicallyCallback callback) { + i::Isolate* isolate = reinterpret_cast(this); + isolate->SetHostImportModuleDynamicallyCallback(callback); +} + void Isolate::SetHostInitializeImportMetaObjectCallback( HostInitializeImportMetaObjectCallback callback) { i::Isolate* isolate = reinterpret_cast(this); @@ -8897,6 +9020,10 @@ bool Isolate::GetHeapCodeAndMetadataStatistics( isolate->bytecode_and_metadata_size(); code_statistics->external_script_source_size_ = isolate->external_script_source_size(); + code_statistics->cpu_profiler_metadata_size_ = + i::CpuProfiler::GetAllProfilersMemorySize( + reinterpret_cast(isolate)); + return true; } @@ -9427,7 +9554,6 @@ void v8::Isolate::LocaleConfigurationChangeNotification() { #ifdef V8_INTL_SUPPORT i_isolate->ResetDefaultLocale(); - i_isolate->clear_cached_icu_objects(); #endif // V8_INTL_SUPPORT } @@ -9752,6 +9878,16 @@ int64_t CpuProfile::GetSampleTimestamp(int index) const { return profile->sample(index).timestamp.since_origin().InMicroseconds(); } +StateTag CpuProfile::GetSampleState(int index) const { + const i::CpuProfile* profile = reinterpret_cast(this); + return profile->sample(index).state_tag; +} + +EmbedderStateTag CpuProfile::GetSampleEmbedderState(int index) const { + const i::CpuProfile* profile = reinterpret_cast(this); + return profile->sample(index).embedder_state_tag; +} + int64_t CpuProfile::GetStartTime() const { const i::CpuProfile* profile = reinterpret_cast(this); return profile->start_time().since_origin().InMicroseconds(); @@ -10220,6 +10356,11 @@ void EmbedderHeapTracer::ResetHandleInNonTracingGC( UNREACHABLE(); } +EmbedderStateScope::EmbedderStateScope(Isolate* isolate, + Local context, + EmbedderStateTag tag) + : embedder_state_(new internal::EmbedderState(isolate, context, tag)) {} + void TracedReferenceBase::CheckValue() const { #ifdef V8_HOST_ARCH_64_BIT if (!val_) return; @@ -10388,6 +10529,11 @@ void HandleScopeImplementer::IterateThis(RootVisitor* v) { v->VisitRootPointers(Root::kHandleScope, nullptr, start, start + static_cast(context_lists[i]->size())); } + // The shape of |entered_contexts_| and |is_microtask_context_| stacks must + // be in sync. + is_microtask_context_.shrink_to_fit(); + DCHECK_EQ(entered_contexts_.capacity(), is_microtask_context_.capacity()); + DCHECK_EQ(entered_contexts_.size(), is_microtask_context_.size()); } void HandleScopeImplementer::Iterate(RootVisitor* v) { diff --git a/deps/v8/src/api/api.h b/deps/v8/src/api/api.h index 48f549bbb0d513..320346b22fae05 100644 --- a/deps/v8/src/api/api.h +++ b/deps/v8/src/api/api.h @@ -468,6 +468,7 @@ bool HandleScopeImplementer::HasSavedContexts() { } void HandleScopeImplementer::EnterContext(Context context) { + DCHECK_EQ(entered_contexts_.capacity(), is_microtask_context_.capacity()); DCHECK_EQ(entered_contexts_.size(), is_microtask_context_.size()); entered_contexts_.push_back(context); is_microtask_context_.push_back(0); @@ -475,6 +476,7 @@ void HandleScopeImplementer::EnterContext(Context context) { void HandleScopeImplementer::LeaveContext() { DCHECK(!entered_contexts_.empty()); + DCHECK_EQ(entered_contexts_.capacity(), is_microtask_context_.capacity()); DCHECK_EQ(entered_contexts_.size(), is_microtask_context_.size()); entered_contexts_.pop_back(); is_microtask_context_.pop_back(); @@ -485,6 +487,7 @@ bool HandleScopeImplementer::LastEnteredContextWas(Context context) { } void HandleScopeImplementer::EnterMicrotaskContext(Context context) { + DCHECK_EQ(entered_contexts_.capacity(), is_microtask_context_.capacity()); DCHECK_EQ(entered_contexts_.size(), is_microtask_context_.size()); entered_contexts_.push_back(context); is_microtask_context_.push_back(1); diff --git a/deps/v8/src/asmjs/asm-js.cc b/deps/v8/src/asmjs/asm-js.cc index 28b44bf088b1e7..8791e4eae2010e 100644 --- a/deps/v8/src/asmjs/asm-js.cc +++ b/deps/v8/src/asmjs/asm-js.cc @@ -77,7 +77,7 @@ bool AreStdlibMembersValid(Isolate* isolate, Handle stdlib, return false; \ } \ DCHECK_EQ(shared.GetCode(), \ - isolate->builtins()->code(Builtin::kMath##FName)); \ + isolate->builtins()->codet(Builtin::kMath##FName)); \ } STDLIB_MATH_FUNCTION_LIST(STDLIB_MATH_FUNC) #undef STDLIB_MATH_FUNC diff --git a/deps/v8/src/asmjs/asm-parser.cc b/deps/v8/src/asmjs/asm-parser.cc index c782bbaae7237c..3ff2a44201321d 100644 --- a/deps/v8/src/asmjs/asm-parser.cc +++ b/deps/v8/src/asmjs/asm-parser.cc @@ -239,7 +239,7 @@ void AsmJsParser::DeclareGlobal(VarInfo* info, bool mutable_variable, WasmInitExpr init) { info->kind = VarKind::kGlobal; info->type = type; - info->index = module_builder_->AddGlobal(vtype, true, std::move(init)); + info->index = module_builder_->AddGlobal(vtype, true, init); info->mutable_variable = mutable_variable; } diff --git a/deps/v8/src/ast/ast-value-factory.cc b/deps/v8/src/ast/ast-value-factory.cc index 307e1d06b70935..4dab59fdae26e2 100644 --- a/deps/v8/src/ast/ast-value-factory.cc +++ b/deps/v8/src/ast/ast-value-factory.cc @@ -338,10 +338,11 @@ const AstRawString* AstValueFactory::GetTwoByteStringInternal( base::Vector::cast(literal)); } -const AstRawString* AstValueFactory::GetString(Handle literal) { +const AstRawString* AstValueFactory::GetString( + String literal, const SharedStringAccessGuardIfNeeded& access_guard) { const AstRawString* result = nullptr; DisallowGarbageCollection no_gc; - String::FlatContent content = literal->GetFlatContent(no_gc); + String::FlatContent content = literal.GetFlatContent(no_gc, access_guard); if (content.IsOneByte()) { result = GetOneByteStringInternal(content.ToOneByteVector()); } else { @@ -351,15 +352,6 @@ const AstRawString* AstValueFactory::GetString(Handle literal) { return result; } -const AstRawString* AstValueFactory::CloneFromOtherFactory( - const AstRawString* raw_string) { - const AstRawString* result = - GetString(raw_string->raw_hash_field(), raw_string->is_one_byte(), - base::Vector(raw_string->raw_data(), - raw_string->byte_length())); - return result; -} - AstConsString* AstValueFactory::NewConsString() { return zone()->New(); } @@ -375,8 +367,6 @@ AstConsString* AstValueFactory::NewConsString(const AstRawString* str1, template void AstValueFactory::Internalize(IsolateT* isolate) { - if (!zone_) return; - // Strings need to be internalized before values, because values refer to // strings. for (AstRawString* current = strings_; current != nullptr;) { @@ -386,7 +376,6 @@ void AstValueFactory::Internalize(IsolateT* isolate) { } ResetStrings(); - zone_ = nullptr; } template EXPORT_TEMPLATE_DEFINE( V8_EXPORT_PRIVATE) void AstValueFactory::Internalize(Isolate* isolate); diff --git a/deps/v8/src/ast/ast-value-factory.h b/deps/v8/src/ast/ast-value-factory.h index 67c761a8f8f13e..d036d99604eab3 100644 --- a/deps/v8/src/ast/ast-value-factory.h +++ b/deps/v8/src/ast/ast-value-factory.h @@ -340,11 +340,8 @@ class AstValueFactory { const AstRawString* GetTwoByteString(base::Vector literal) { return GetTwoByteStringInternal(literal); } - const AstRawString* GetString(Handle literal); - - // Clones an AstRawString from another ast value factory, adding it to this - // factory and returning the clone. - const AstRawString* CloneFromOtherFactory(const AstRawString* raw_string); + const AstRawString* GetString(String literal, + const SharedStringAccessGuardIfNeeded&); V8_EXPORT_PRIVATE AstConsString* NewConsString(); V8_EXPORT_PRIVATE AstConsString* NewConsString(const AstRawString* str); diff --git a/deps/v8/src/ast/ast.h b/deps/v8/src/ast/ast.h index 0b2320860e56a5..f7b3f247f72896 100644 --- a/deps/v8/src/ast/ast.h +++ b/deps/v8/src/ast/ast.h @@ -2192,6 +2192,13 @@ class FunctionLiteral final : public Expression { return HasDuplicateParameters::decode(bit_field_); } + bool should_parallel_compile() const { + return ShouldParallelCompileField::decode(bit_field_); + } + void set_should_parallel_compile() { + bit_field_ = ShouldParallelCompileField::update(bit_field_, true); + } + // This is used as a heuristic on when to eagerly compile a function // literal. We consider the following constructs as hints that the // function will be called immediately: @@ -2205,16 +2212,6 @@ class FunctionLiteral final : public Expression { } FunctionKind kind() const; - bool dont_optimize() { - return dont_optimize_reason() != BailoutReason::kNoReason; - } - BailoutReason dont_optimize_reason() { - return DontOptimizeReasonField::decode(bit_field_); - } - void set_dont_optimize_reason(BailoutReason reason) { - bit_field_ = DontOptimizeReasonField::update(bit_field_, reason); - } - bool IsAnonymousFunctionDefinition() const { return is_anonymous_expression(); } @@ -2290,9 +2287,9 @@ class FunctionLiteral final : public Expression { Pretenure::encode(false) | HasDuplicateParameters::encode(has_duplicate_parameters == kHasDuplicateParameters) | - DontOptimizeReasonField::encode(BailoutReason::kNoReason) | RequiresInstanceMembersInitializer::encode(false) | - HasBracesField::encode(has_braces); + HasBracesField::encode(has_braces) | + ShouldParallelCompileField::encode(false); if (eager_compile_hint == kShouldEagerCompile) SetShouldEagerCompile(); } @@ -2300,15 +2297,14 @@ class FunctionLiteral final : public Expression { Expression::NextBitField; using Pretenure = FunctionSyntaxKindBits::Next; using HasDuplicateParameters = Pretenure::Next; - using DontOptimizeReasonField = - HasDuplicateParameters::Next; using RequiresInstanceMembersInitializer = - DontOptimizeReasonField::Next; + HasDuplicateParameters::Next; using ClassScopeHasPrivateBrandField = RequiresInstanceMembersInitializer::Next; using HasStaticPrivateMethodsOrAccessorsField = ClassScopeHasPrivateBrandField::Next; using HasBracesField = HasStaticPrivateMethodsOrAccessorsField::Next; + using ShouldParallelCompileField = HasBracesField::Next; // expected_property_count_ is the sum of instance fields and properties. // It can vary depending on whether a function is lazily or eagerly parsed. diff --git a/deps/v8/src/ast/prettyprinter.cc b/deps/v8/src/ast/prettyprinter.cc index 44f4ea155f8a3d..c8be8bf47a6b0b 100644 --- a/deps/v8/src/ast/prettyprinter.cc +++ b/deps/v8/src/ast/prettyprinter.cc @@ -35,7 +35,7 @@ CallPrinter::CallPrinter(Isolate* isolate, bool is_user_js, is_user_js_ = is_user_js; error_in_spread_args_ = error_in_spread_args; spread_arg_ = nullptr; - function_kind_ = kNormalFunction; + function_kind_ = FunctionKind::kNormalFunction; InitializeAstVisitor(isolate); } @@ -823,7 +823,7 @@ const char* AstPrinter::PrintProgram(FunctionLiteral* program) { Init(); { IndentedScope indent(this, "FUNC", program->position()); PrintIndented("KIND"); - Print(" %d\n", program->kind()); + Print(" %d\n", static_cast(program->kind())); PrintIndented("LITERAL ID"); Print(" %d\n", program->function_literal_id()); PrintIndented("SUSPEND COUNT"); diff --git a/deps/v8/src/ast/scopes.cc b/deps/v8/src/ast/scopes.cc index c179776571c43b..6758079823be64 100644 --- a/deps/v8/src/ast/scopes.cc +++ b/deps/v8/src/ast/scopes.cc @@ -143,8 +143,9 @@ DeclarationScope::DeclarationScope(Zone* zone, AstValueFactory* ast_value_factory, REPLMode repl_mode) : Scope(zone), - function_kind_(repl_mode == REPLMode::kYes ? kAsyncFunction - : kNormalFunction), + function_kind_(repl_mode == REPLMode::kYes + ? FunctionKind::kAsyncFunction + : FunctionKind::kNormalFunction), params_(4, zone) { DCHECK_EQ(scope_type_, SCRIPT_SCOPE); SetDefaults(); @@ -165,14 +166,15 @@ DeclarationScope::DeclarationScope(Zone* zone, Scope* outer_scope, ModuleScope::ModuleScope(DeclarationScope* script_scope, AstValueFactory* avfactory) - : DeclarationScope(avfactory->zone(), script_scope, MODULE_SCOPE, kModule), + : DeclarationScope(avfactory->zone(), script_scope, MODULE_SCOPE, + FunctionKind::kModule), module_descriptor_(avfactory->zone()->New( avfactory->zone())) { set_language_mode(LanguageMode::kStrict); DeclareThis(avfactory); } -ModuleScope::ModuleScope(Isolate* isolate, Handle scope_info, +ModuleScope::ModuleScope(Handle scope_info, AstValueFactory* avfactory) : DeclarationScope(avfactory->zone(), MODULE_SCOPE, avfactory, scope_info), module_descriptor_(nullptr) { @@ -186,7 +188,8 @@ ClassScope::ClassScope(Zone* zone, Scope* outer_scope, bool is_anonymous) set_language_mode(LanguageMode::kStrict); } -ClassScope::ClassScope(Isolate* isolate, Zone* zone, +template +ClassScope::ClassScope(IsolateT* isolate, Zone* zone, AstValueFactory* ast_value_factory, Handle scope_info) : Scope(zone, CLASS_SCOPE, ast_value_factory, scope_info), @@ -212,12 +215,20 @@ ClassScope::ClassScope(Isolate* isolate, Zone* zone, DCHECK_EQ(scope_info->ContextLocalMaybeAssignedFlag(index), MaybeAssignedFlag::kMaybeAssigned); Variable* var = DeclareClassVariable( - ast_value_factory, ast_value_factory->GetString(handle(name, isolate)), + ast_value_factory, + ast_value_factory->GetString(name, + SharedStringAccessGuardIfNeeded(isolate)), kNoSourcePosition); var->AllocateTo(VariableLocation::CONTEXT, Context::MIN_CONTEXT_SLOTS + index); } } +template ClassScope::ClassScope(Isolate* isolate, Zone* zone, + AstValueFactory* ast_value_factory, + Handle scope_info); +template ClassScope::ClassScope(LocalIsolate* isolate, Zone* zone, + AstValueFactory* ast_value_factory, + Handle scope_info); Scope::Scope(Zone* zone, ScopeType scope_type, AstValueFactory* ast_value_factory, Handle scope_info) @@ -394,7 +405,8 @@ bool Scope::ContainsAsmModule() const { } #endif // V8_ENABLE_WEBASSEMBLY -Scope* Scope::DeserializeScopeChain(Isolate* isolate, Zone* zone, +template +Scope* Scope::DeserializeScopeChain(IsolateT* isolate, Zone* zone, ScopeInfo scope_info, DeclarationScope* script_scope, AstValueFactory* ast_value_factory, @@ -450,7 +462,7 @@ Scope* Scope::DeserializeScopeChain(Isolate* isolate, Zone* zone, handle(scope_info, isolate)); } } else if (scope_info.scope_type() == MODULE_SCOPE) { - outer_scope = zone->New(isolate, handle(scope_info, isolate), + outer_scope = zone->New(handle(scope_info, isolate), ast_value_factory); } else { DCHECK_EQ(scope_info.scope_type(), CATCH_SCOPE); @@ -460,9 +472,11 @@ Scope* Scope::DeserializeScopeChain(Isolate* isolate, Zone* zone, String name = scope_info.ContextLocalName(0); MaybeAssignedFlag maybe_assigned = scope_info.ContextLocalMaybeAssignedFlag(0); - outer_scope = zone->New( - zone, ast_value_factory->GetString(handle(name, isolate)), - maybe_assigned, handle(scope_info, isolate)); + outer_scope = + zone->New(zone, + ast_value_factory->GetString( + name, SharedStringAccessGuardIfNeeded(isolate)), + maybe_assigned, handle(scope_info, isolate)); } if (deserialization_mode == DeserializationMode::kScopesOnly) { outer_scope->scope_info_ = Handle::null(); @@ -496,6 +510,26 @@ Scope* Scope::DeserializeScopeChain(Isolate* isolate, Zone* zone, return innermost_scope; } +template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) + Scope* Scope::DeserializeScopeChain( + Isolate* isolate, Zone* zone, ScopeInfo scope_info, + DeclarationScope* script_scope, AstValueFactory* ast_value_factory, + DeserializationMode deserialization_mode); +template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) + Scope* Scope::DeserializeScopeChain( + LocalIsolate* isolate, Zone* zone, ScopeInfo scope_info, + DeclarationScope* script_scope, AstValueFactory* ast_value_factory, + DeserializationMode deserialization_mode); + +#ifdef DEBUG +bool Scope::IsReparsedMemberInitializerScope() const { + return is_declaration_scope() && + IsClassMembersInitializerFunction( + AsDeclarationScope()->function_kind()) && + outer_scope()->AsClassScope()->is_reparsed_class_scope(); +} +#endif + DeclarationScope* Scope::AsDeclarationScope() { DCHECK(is_declaration_scope()); return static_cast(this); @@ -639,8 +673,10 @@ bool DeclarationScope::Analyze(ParseInfo* info) { // We are compiling one of four cases: // 1) top-level code, // 2) a function/eval/module on the top-level - // 3) a function/eval in a scope that was already resolved. + // 4) a class member initializer function scope + // 3) 4 function/eval in a scope that was already resolved. DCHECK(scope->is_script_scope() || scope->outer_scope()->is_script_scope() || + scope->IsReparsedMemberInitializerScope() || scope->outer_scope()->already_resolved_); // The outer scope is never lazy. @@ -1819,7 +1855,7 @@ void Scope::Print(int n) { // Print header. FunctionKind function_kind = is_function_scope() ? AsDeclarationScope()->function_kind() - : kNormalFunction; + : FunctionKind::kNormalFunction; Indent(n0, Header(scope_type_, function_kind, is_declaration_scope())); if (scope_name_ != nullptr && !scope_name_->IsEmpty()) { PrintF(" "); @@ -1868,6 +1904,8 @@ void Scope::Print(int n) { if (scope->needs_private_name_context_chain_recalc()) { Indent(n1, "// needs #-name context chain recalc\n"); } + Indent(n1, "// "); + PrintF("%s\n", FunctionKind2String(scope->function_kind())); } if (num_stack_slots_ > 0) { Indent(n1, "// "); @@ -2657,6 +2695,55 @@ bool IsComplementaryAccessorPair(VariableMode a, VariableMode b) { } } +void ClassScope::ReplaceReparsedClassScope(Isolate* isolate, + AstValueFactory* ast_value_factory, + ClassScope* old_scope) { + DCHECK_EQ(outer_scope_, old_scope->outer_scope()); + Scope* outer = outer_scope_; + + outer->RemoveInnerScope(old_scope); + // The outer scope should only have this deserialized inner scope, + // otherwise we have to update the sibling scopes. + DCHECK_EQ(outer->inner_scope_, this); + DCHECK_NULL(sibling_); + + DCHECK_NULL(old_scope->inner_scope_); + + Handle scope_info = old_scope->scope_info_; + DCHECK(!scope_info.is_null()); + DCHECK(!scope_info->IsEmpty()); + + // Restore variable allocation results for context-allocated variables in + // the class scope from ScopeInfo, so that we don't need to run + // resolution and allocation on these variables again when generating + // code for the initializer function. + int context_local_count = scope_info->ContextLocalCount(); + int context_header_length = scope_info->ContextHeaderLength(); + DisallowGarbageCollection no_gc; + for (int i = 0; i < context_local_count; ++i) { + int slot_index = context_header_length + i; + DCHECK_LT(slot_index, scope_info->ContextLength()); + + String name = scope_info->ContextLocalName(i); + const AstRawString* string = ast_value_factory->GetString( + name, SharedStringAccessGuardIfNeeded(isolate)); + Variable* var = nullptr; + + var = string->IsPrivateName() ? LookupLocalPrivateName(string) + : LookupLocal(string); + DCHECK_NOT_NULL(var); + var->AllocateTo(VariableLocation::CONTEXT, slot_index); + } + + scope_info_ = scope_info; + + // Set this bit so that DelcarationScope::Analyze recognizes + // the reparsed instance member initializer scope. +#ifdef DEBUG + is_reparsed_class_scope_ = true; +#endif +} + Variable* ClassScope::DeclarePrivateName(const AstRawString* name, VariableMode mode, IsStaticFlag is_static_flag, diff --git a/deps/v8/src/ast/scopes.h b/deps/v8/src/ast/scopes.h index 2aa0c2376795ab..c04d99b4b0eab3 100644 --- a/deps/v8/src/ast/scopes.h +++ b/deps/v8/src/ast/scopes.h @@ -163,7 +163,9 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) { enum class DeserializationMode { kIncludingVariables, kScopesOnly }; - static Scope* DeserializeScopeChain(Isolate* isolate, Zone* zone, + template + EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) + static Scope* DeserializeScopeChain(IsolateT* isolate, Zone* zone, ScopeInfo scope_info, DeclarationScope* script_scope, AstValueFactory* ast_value_factory, @@ -422,6 +424,9 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) { return num_heap_slots() > 0; } +#ifdef DEBUG + bool IsReparsedMemberInitializerScope() const; +#endif // Use Scope::ForEach for depth first traversal of scopes. // Before: // void Scope::VisitRecursively() { @@ -850,7 +855,7 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) { class V8_EXPORT_PRIVATE DeclarationScope : public Scope { public: DeclarationScope(Zone* zone, Scope* outer_scope, ScopeType scope_type, - FunctionKind function_kind = kNormalFunction); + FunctionKind function_kind = FunctionKind::kNormalFunction); DeclarationScope(Zone* zone, ScopeType scope_type, AstValueFactory* ast_value_factory, Handle scope_info); @@ -987,7 +992,7 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope { void set_is_async_module() { DCHECK(IsModule(function_kind_)); - function_kind_ = kAsyncModule; + function_kind_ = FunctionKind::kAsyncModule; } void DeclareThis(AstValueFactory* ast_value_factory); @@ -1363,8 +1368,7 @@ class ModuleScope final : public DeclarationScope { ModuleScope(DeclarationScope* script_scope, AstValueFactory* avfactory); // Deserialization. Does not restore the module descriptor. - ModuleScope(Isolate* isolate, Handle scope_info, - AstValueFactory* avfactory); + ModuleScope(Handle scope_info, AstValueFactory* avfactory); // Returns nullptr in a deserialized scope. SourceTextModuleDescriptor* module() const { return module_descriptor_; } @@ -1381,7 +1385,8 @@ class V8_EXPORT_PRIVATE ClassScope : public Scope { public: ClassScope(Zone* zone, Scope* outer_scope, bool is_anonymous); // Deserialization. - ClassScope(Isolate* isolate, Zone* zone, AstValueFactory* ast_value_factory, + template + ClassScope(IsolateT* isolate, Zone* zone, AstValueFactory* ast_value_factory, Handle scope_info); struct HeritageParsingScope { @@ -1472,6 +1477,13 @@ class V8_EXPORT_PRIVATE ClassScope : public Scope { should_save_class_variable_index_ = true; } + void ReplaceReparsedClassScope(Isolate* isolate, + AstValueFactory* ast_value_factory, + ClassScope* old_scope); +#ifdef DEBUG + bool is_reparsed_class_scope() const { return is_reparsed_class_scope_; } +#endif + private: friend class Scope; friend class PrivateNameScopeIterator; @@ -1517,6 +1529,9 @@ class V8_EXPORT_PRIVATE ClassScope : public Scope { // This is only maintained during reparsing, restored from the // preparsed data. bool should_save_class_variable_index_ = false; +#ifdef DEBUG + bool is_reparsed_class_scope_ = false; +#endif }; // Iterate over the private name scope chain. The iteration proceeds from the diff --git a/deps/v8/src/base/atomic-utils.h b/deps/v8/src/base/atomic-utils.h index ed034bfe06eca7..84015af36228f6 100644 --- a/deps/v8/src/base/atomic-utils.h +++ b/deps/v8/src/base/atomic-utils.h @@ -178,6 +178,27 @@ using AsAtomic8 = AsAtomicImpl; using AsAtomic32 = AsAtomicImpl; using AsAtomicWord = AsAtomicImpl; +template +struct AtomicTypeFromByteWidth {}; +template <> +struct AtomicTypeFromByteWidth<1> { + using type = base::Atomic8; +}; +template <> +struct AtomicTypeFromByteWidth<2> { + using type = base::Atomic16; +}; +template <> +struct AtomicTypeFromByteWidth<4> { + using type = base::Atomic32; +}; +#if V8_HOST_ARCH_64_BIT +template <> +struct AtomicTypeFromByteWidth<8> { + using type = base::Atomic64; +}; +#endif + // This is similar to AsAtomicWord but it explicitly deletes functionality // provided atomic access to bit representation of stored values. template @@ -211,11 +232,15 @@ inline void CheckedDecrement( template V8_INLINE std::atomic* AsAtomicPtr(T* t) { + STATIC_ASSERT(sizeof(T) == sizeof(std::atomic)); + STATIC_ASSERT(alignof(T) >= alignof(std::atomic)); return reinterpret_cast*>(t); } template V8_INLINE const std::atomic* AsAtomicPtr(const T* t) { + STATIC_ASSERT(sizeof(T) == sizeof(std::atomic)); + STATIC_ASSERT(alignof(T) >= alignof(std::atomic)); return reinterpret_cast*>(t); } diff --git a/deps/v8/src/base/atomicops.h b/deps/v8/src/base/atomicops.h index 20efe3479cc995..56fd5f3094a886 100644 --- a/deps/v8/src/base/atomicops.h +++ b/deps/v8/src/base/atomicops.h @@ -378,6 +378,64 @@ inline void Relaxed_Memmove(volatile Atomic8* dst, volatile const Atomic8* src, } } +namespace helper { +inline int MemcmpNotEqualFundamental(Atomic8 u1, Atomic8 u2) { + DCHECK_NE(u1, u2); + return u1 < u2 ? -1 : 1; +} +inline int MemcmpNotEqualFundamental(AtomicWord u1, AtomicWord u2) { + DCHECK_NE(u1, u2); +#if defined(V8_TARGET_BIG_ENDIAN) + return u1 < u2 ? -1 : 1; +#else + for (size_t i = 0; i < sizeof(AtomicWord); ++i) { + uint8_t byte1 = u1 & 0xFF; + uint8_t byte2 = u2 & 0xFF; + if (byte1 != byte2) return byte1 < byte2 ? -1 : 1; + u1 >>= 8; + u2 >>= 8; + } + UNREACHABLE(); +#endif +} +} // namespace helper + +inline int Relaxed_Memcmp(volatile const Atomic8* s1, + volatile const Atomic8* s2, size_t len) { + constexpr size_t kAtomicWordSize = sizeof(AtomicWord); + while (len > 0 && + !(IsAligned(reinterpret_cast(s1), kAtomicWordSize) && + IsAligned(reinterpret_cast(s2), kAtomicWordSize))) { + Atomic8 u1 = Relaxed_Load(s1++); + Atomic8 u2 = Relaxed_Load(s2++); + if (u1 != u2) return helper::MemcmpNotEqualFundamental(u1, u2); + --len; + } + + if (IsAligned(reinterpret_cast(s1), kAtomicWordSize) && + IsAligned(reinterpret_cast(s2), kAtomicWordSize)) { + while (len >= kAtomicWordSize) { + AtomicWord u1 = + Relaxed_Load(reinterpret_cast(s1)); + AtomicWord u2 = + Relaxed_Load(reinterpret_cast(s2)); + if (u1 != u2) return helper::MemcmpNotEqualFundamental(u1, u2); + s1 += kAtomicWordSize; + s2 += kAtomicWordSize; + len -= kAtomicWordSize; + } + } + + while (len > 0) { + Atomic8 u1 = Relaxed_Load(s1++); + Atomic8 u2 = Relaxed_Load(s2++); + if (u1 != u2) return helper::MemcmpNotEqualFundamental(u1, u2); + --len; + } + + return 0; +} + } // namespace base } // namespace v8 diff --git a/deps/v8/src/base/bounded-page-allocator.cc b/deps/v8/src/base/bounded-page-allocator.cc index d33857845a5c34..a51206aec698e8 100644 --- a/deps/v8/src/base/bounded-page-allocator.cc +++ b/deps/v8/src/base/bounded-page-allocator.cc @@ -142,6 +142,9 @@ bool BoundedPageAllocator::ReleasePages(void* raw_address, size_t size, DCHECK_LT(new_size, size); DCHECK(IsAligned(size - new_size, commit_page_size_)); + // This must be held until the page permissions are updated. + MutexGuard guard(&mutex_); + // Check if we freed any allocatable pages by this release. size_t allocated_size = RoundUp(size, allocate_page_size_); size_t new_allocated_size = RoundUp(new_size, allocate_page_size_); @@ -150,13 +153,11 @@ bool BoundedPageAllocator::ReleasePages(void* raw_address, size_t size, { // There must be an allocated region at given |address| of a size not // smaller than |size|. - MutexGuard guard(&mutex_); DCHECK_EQ(allocated_size, region_allocator_.CheckRegion(address)); } #endif if (new_allocated_size < allocated_size) { - MutexGuard guard(&mutex_); region_allocator_.TrimRegion(address, new_allocated_size); } diff --git a/deps/v8/src/base/emulated-virtual-address-subspace.cc b/deps/v8/src/base/emulated-virtual-address-subspace.cc new file mode 100644 index 00000000000000..fbfb1255693ac8 --- /dev/null +++ b/deps/v8/src/base/emulated-virtual-address-subspace.cc @@ -0,0 +1,138 @@ +// Copyright 2021 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/base/emulated-virtual-address-subspace.h" + +#include "src/base/bits.h" +#include "src/base/platform/platform.h" +#include "src/base/platform/wrappers.h" + +namespace v8 { +namespace base { + +EmulatedVirtualAddressSubspace::EmulatedVirtualAddressSubspace( + VirtualAddressSpace* parent_space, Address base, size_t mapped_size, + size_t total_size) + : VirtualAddressSpace(parent_space->page_size(), + parent_space->allocation_granularity(), base, + total_size), + mapped_size_(mapped_size), + parent_space_(parent_space), + region_allocator_(base, mapped_size, parent_space_->page_size()) { + // For simplicity, we currently require both the mapped and total size to be + // a power of two. This simplifies some things later on, for example, random + // addresses can be generated with a simply bitmask, and will then be inside + // the unmapped space with a probability >= 50% (mapped size == unmapped + // size) or never (mapped size == total size). + DCHECK(base::bits::IsPowerOfTwo(mapped_size)); + DCHECK(base::bits::IsPowerOfTwo(total_size)); +} + +EmulatedVirtualAddressSubspace::~EmulatedVirtualAddressSubspace() { + CHECK(parent_space_->FreePages(base(), mapped_size_)); +} + +void EmulatedVirtualAddressSubspace::SetRandomSeed(int64_t seed) { + MutexGuard guard(&mutex_); + rng_.SetSeed(seed); +} + +Address EmulatedVirtualAddressSubspace::RandomPageAddress() { + MutexGuard guard(&mutex_); + Address addr = base() + (rng_.NextInt64() % size()); + return RoundDown(addr, allocation_granularity()); +} + +Address EmulatedVirtualAddressSubspace::AllocatePages( + Address hint, size_t size, size_t alignment, PagePermissions permissions) { + if (hint == kNoHint || MappedRegionContains(hint, size)) { + MutexGuard guard(&mutex_); + + // Attempt to find a region in the mapped region. + Address address = region_allocator_.AllocateRegion(hint, size, alignment); + if (address != RegionAllocator::kAllocationFailure) { + // Success. Only need to adjust the page permissions. + if (parent_space_->SetPagePermissions(address, size, permissions)) { + return address; + } + // Probably ran out of memory, but still try to allocate in the unmapped + // space. + CHECK_EQ(size, region_allocator_.FreeRegion(address)); + } + } + + // No luck or hint is outside of the mapped region. Try to allocate pages in + // the unmapped space using page allocation hints instead. + + // Somewhat arbitrary size limitation to ensure that the loop below for + // finding a fitting base address hint terminates quickly. + if (size >= (unmapped_size() / 2)) return kNullAddress; + + static constexpr int kMaxAttempts = 10; + for (int i = 0; i < kMaxAttempts; i++) { + // If the hint wouldn't result in the entire allocation being inside the + // managed region, simply retry. There is at least a 50% chance of + // getting a usable address due to the size restriction above. + while (!UnmappedRegionContains(hint, size)) { + hint = RandomPageAddress(); + } + + Address region = + parent_space_->AllocatePages(hint, size, alignment, permissions); + if (region && UnmappedRegionContains(region, size)) { + return region; + } else if (region) { + CHECK(parent_space_->FreePages(region, size)); + } + + // Retry at a different address. + hint = RandomPageAddress(); + } + + return kNullAddress; +} + +bool EmulatedVirtualAddressSubspace::FreePages(Address address, size_t size) { + if (MappedRegionContains(address, size)) { + MutexGuard guard(&mutex_); + if (region_allocator_.FreeRegion(address) != size) return false; + CHECK(parent_space_->DecommitPages(address, size)); + return true; + } + if (!UnmappedRegionContains(address, size)) return false; + return parent_space_->FreePages(address, size); +} + +bool EmulatedVirtualAddressSubspace::SetPagePermissions( + Address address, size_t size, PagePermissions permissions) { + DCHECK(Contains(address, size)); + return parent_space_->SetPagePermissions(address, size, permissions); +} + +bool EmulatedVirtualAddressSubspace::CanAllocateSubspaces() { + // This is not supported, mostly because it's not (yet) needed in practice. + return false; +} + +std::unique_ptr +EmulatedVirtualAddressSubspace::AllocateSubspace( + Address hint, size_t size, size_t alignment, + PagePermissions max_permissions) { + UNREACHABLE(); +} + +bool EmulatedVirtualAddressSubspace::DiscardSystemPages(Address address, + size_t size) { + DCHECK(Contains(address, size)); + return parent_space_->DiscardSystemPages(address, size); +} + +bool EmulatedVirtualAddressSubspace::DecommitPages(Address address, + size_t size) { + DCHECK(Contains(address, size)); + return parent_space_->DecommitPages(address, size); +} + +} // namespace base +} // namespace v8 diff --git a/deps/v8/src/base/emulated-virtual-address-subspace.h b/deps/v8/src/base/emulated-virtual-address-subspace.h new file mode 100644 index 00000000000000..480c3e1ae0f00b --- /dev/null +++ b/deps/v8/src/base/emulated-virtual-address-subspace.h @@ -0,0 +1,113 @@ +// Copyright 2021 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_BASE_EMULATED_VIRTUAL_ADDRESS_SUBSPACE_H_ +#define V8_BASE_EMULATED_VIRTUAL_ADDRESS_SUBSPACE_H_ + +#include "include/v8-platform.h" +#include "src/base/base-export.h" +#include "src/base/compiler-specific.h" +#include "src/base/platform/mutex.h" +#include "src/base/region-allocator.h" +#include "src/base/virtual-address-space.h" + +namespace v8 { +namespace base { + +/** + * Emulates a virtual address subspace. + * + * This class is (optionally) backed by a page allocation and emulates a virtual + * address space that is potentially larger than that mapping. It generally + * first attempts to satisfy page allocation requests from its backing mapping, + * but will also attempt to obtain new page mappings inside the unmapped space + * through page allocation hints if necessary. + * + * Caveat: an emulated subspace violates the invariant that page allocations in + * an address space will never end up inside a child space and so does not + * provide the same security gurarantees. + */ +class V8_BASE_EXPORT EmulatedVirtualAddressSubspace final + : public NON_EXPORTED_BASE(::v8::VirtualAddressSpace) { + public: + // Construct an emulated virtual address subspace of the specified total size, + // potentially backed by a page allocation from the parent space. The newly + // created instance takes ownership of the page allocation (if any) and frees + // it during destruction. + EmulatedVirtualAddressSubspace(v8::VirtualAddressSpace* parent_space, + Address base, size_t mapped_size, + size_t total_size); + + ~EmulatedVirtualAddressSubspace() override; + + void SetRandomSeed(int64_t seed) override; + + Address RandomPageAddress() override; + + Address AllocatePages(Address hint, size_t size, size_t alignment, + PagePermissions permissions) override; + + bool FreePages(Address address, size_t size) override; + + bool SetPagePermissions(Address address, size_t size, + PagePermissions permissions) override; + + bool CanAllocateSubspaces() override; + + std::unique_ptr AllocateSubspace( + Address hint, size_t size, size_t alignment, + PagePermissions max_permissions) override; + + bool DiscardSystemPages(Address address, size_t size) override; + + bool DecommitPages(Address address, size_t size) override; + + private: + size_t mapped_size() const { return mapped_size_; } + size_t unmapped_size() const { return size() - mapped_size_; } + + Address mapped_base() const { return base(); } + Address unmapped_base() const { return base() + mapped_size_; } + + bool Contains(Address outer_start, size_t outer_size, Address inner_start, + size_t inner_size) const { + return (inner_start >= outer_start) && + ((inner_start + inner_size) <= (outer_start + outer_size)); + } + + bool Contains(Address addr, size_t length) const { + return Contains(base(), size(), addr, length); + } + + bool MappedRegionContains(Address addr, size_t length) const { + return Contains(mapped_base(), mapped_size(), addr, length); + } + + bool UnmappedRegionContains(Address addr, size_t length) const { + return Contains(unmapped_base(), unmapped_size(), addr, length); + } + + // Size of the mapped region located at the beginning of this address space. + const size_t mapped_size_; + + // Pointer to the parent space from which the backing pages were allocated. + // Must be kept alive by the owner of this instance. + v8::VirtualAddressSpace* parent_space_; + + // Mutex guarding the non-threadsafe RegionAllocator and + // RandomNumberGenerator. + Mutex mutex_; + + // RegionAllocator to manage the page allocation and divide it into further + // regions as necessary. + RegionAllocator region_allocator_; + + // Random number generator for generating random addresses. + RandomNumberGenerator rng_; +}; + +} // namespace base +} // namespace v8 + +#endif // V8_BASE_EMULATED_VIRTUAL_ADDRESS_SUBSPACE_H_ diff --git a/deps/v8/src/base/platform/platform-fuchsia.cc b/deps/v8/src/base/platform/platform-fuchsia.cc index a0fd83e93974fc..f090ea5b6ac094 100644 --- a/deps/v8/src/base/platform/platform-fuchsia.cc +++ b/deps/v8/src/base/platform/platform-fuchsia.cc @@ -7,6 +7,7 @@ #include #include +#include "src/base/bits.h" #include "src/base/macros.h" #include "src/base/platform/platform-posix-time.h" #include "src/base/platform/platform-posix.h" @@ -34,24 +35,37 @@ zx_vm_option_t GetProtectionFromMemoryPermission(OS::MemoryPermission access) { UNREACHABLE(); } -} // namespace - -TimezoneCache* OS::CreateTimezoneCache() { - return new PosixDefaultTimezoneCache(); +// Determine ZX_VM_ALIGN_X constant corresponding to the specified alignment. +// Returns 0 if there is none. +zx_vm_option_t GetAlignmentOptionFromAlignment(size_t alignment) { + // The alignment must be one of the ZX_VM_ALIGN_X constants. + // See zircon/system/public/zircon/types.h. + static_assert( + ZX_VM_ALIGN_1KB == (10 << ZX_VM_ALIGN_BASE), + "Fuchsia's ZX_VM_ALIGN_1KB constant doesn't match expected value"); + static_assert( + ZX_VM_ALIGN_4GB == (32 << ZX_VM_ALIGN_BASE), + "Fuchsia's ZX_VM_ALIGN_4GB constant doesn't match expected value"); + zx_vm_option_t alignment_log2 = 0; + for (int shift = 10; shift <= 32; shift++) { + if (alignment == (size_t{1} << shift)) { + alignment_log2 = shift; + break; + } + } + return alignment_log2 << ZX_VM_ALIGN_BASE; } -// static -void* OS::Allocate(void* address, size_t size, size_t alignment, - OS::MemoryPermission access) { - size_t page_size = OS::AllocatePageSize(); +void* AllocateInternal(const zx::vmar& vmar, size_t page_size, + size_t vmar_offset, bool vmar_offset_is_hint, + size_t size, size_t alignment, + OS::MemoryPermission access) { DCHECK_EQ(0, size % page_size); DCHECK_EQ(0, alignment % page_size); - address = AlignedAddress(address, alignment); - // Add the maximum misalignment so we are guaranteed an aligned base address. - size_t request_size = size + (alignment - page_size); + DCHECK_EQ(0, vmar_offset % page_size); zx::vmo vmo; - if (zx::vmo::create(request_size, 0, &vmo) != ZX_OK) { + if (zx::vmo::create(size, 0, &vmo) != ZX_OK) { return nullptr; } static const char kVirtualMemoryName[] = "v8-virtualmem"; @@ -68,85 +82,130 @@ void* OS::Allocate(void* address, size_t size, size_t alignment, zx_vm_option_t options = GetProtectionFromMemoryPermission(access); - uint64_t vmar_offset = 0; - if (address) { - vmar_offset = reinterpret_cast(address); + zx_vm_option_t alignment_option = GetAlignmentOptionFromAlignment(alignment); + CHECK_NE(0, alignment_option); // Invalid alignment specified + options |= alignment_option; + + if (vmar_offset != 0) { options |= ZX_VM_SPECIFIC; } - zx_vaddr_t reservation; - zx_status_t status = zx::vmar::root_self()->map(options, vmar_offset, vmo, 0, - request_size, &reservation); - if (status != ZX_OK && address != nullptr) { - // Retry without the hint, if we supplied one. + zx_vaddr_t address; + zx_status_t status = vmar.map(options, vmar_offset, vmo, 0, size, &address); + + if (status != ZX_OK && vmar_offset != 0 && vmar_offset_is_hint) { + // If a vmar_offset was specified and the allocation failed (for example, + // because the offset overlapped another mapping), then we should retry + // again without a vmar_offset if that offset was just meant to be a hint. options &= ~(ZX_VM_SPECIFIC); - status = zx::vmar::root_self()->map(options, 0, vmo, 0, request_size, - &reservation); + status = vmar.map(options, 0, vmo, 0, size, &address); } + if (status != ZX_OK) { return nullptr; } - uint8_t* base = reinterpret_cast(reservation); - uint8_t* aligned_base = reinterpret_cast( - RoundUp(reinterpret_cast(base), alignment)); - - // Unmap extra memory reserved before and after the desired block. - if (aligned_base != base) { - DCHECK_LT(base, aligned_base); - size_t prefix_size = static_cast(aligned_base - base); - zx::vmar::root_self()->unmap(reinterpret_cast(base), - prefix_size); - request_size -= prefix_size; - } + return reinterpret_cast(address); +} + +bool FreeInternal(const zx::vmar& vmar, size_t page_size, void* address, + const size_t size) { + DCHECK_EQ(0, reinterpret_cast(address) % page_size); + DCHECK_EQ(0, size % page_size); + return vmar.unmap(reinterpret_cast(address), size) == ZX_OK; +} + +bool SetPermissionsInternal(const zx::vmar& vmar, size_t page_size, + void* address, size_t size, + OS::MemoryPermission access) { + DCHECK_EQ(0, reinterpret_cast(address) % page_size); + DCHECK_EQ(0, size % page_size); + uint32_t prot = GetProtectionFromMemoryPermission(access); + return vmar.protect(prot, reinterpret_cast(address), size) == + ZX_OK; +} + +bool DiscardSystemPagesInternal(const zx::vmar& vmar, size_t page_size, + void* address, size_t size) { + DCHECK_EQ(0, reinterpret_cast(address) % page_size); + DCHECK_EQ(0, size % page_size); + uint64_t address_int = reinterpret_cast(address); + return vmar.op_range(ZX_VMO_OP_DECOMMIT, address_int, size, nullptr, 0) == + ZX_OK; +} + +zx_status_t CreateAddressSpaceReservationInternal( + const zx::vmar& vmar, size_t page_size, size_t vmar_offset, + bool vmar_offset_is_hint, size_t size, size_t alignment, + OS::MemoryPermission max_permission, zx::vmar* child, + zx_vaddr_t* child_addr) { + DCHECK_EQ(0, size % page_size); + DCHECK_EQ(0, alignment % page_size); + DCHECK_EQ(0, vmar_offset % page_size); + + // TODO(v8) determine these based on max_permission. + zx_vm_option_t options = ZX_VM_CAN_MAP_READ | ZX_VM_CAN_MAP_WRITE | + ZX_VM_CAN_MAP_EXECUTE | ZX_VM_CAN_MAP_SPECIFIC; - size_t aligned_size = RoundUp(size, page_size); + zx_vm_option_t alignment_option = GetAlignmentOptionFromAlignment(alignment); + CHECK_NE(0, alignment_option); // Invalid alignment specified + options |= alignment_option; - if (aligned_size != request_size) { - DCHECK_LT(aligned_size, request_size); - size_t suffix_size = request_size - aligned_size; - zx::vmar::root_self()->unmap( - reinterpret_cast(aligned_base + aligned_size), suffix_size); - request_size -= suffix_size; + if (vmar_offset != 0) { + options |= ZX_VM_SPECIFIC; } - DCHECK(aligned_size == request_size); - return static_cast(aligned_base); + zx_status_t status = + vmar.allocate(options, vmar_offset, size, child, child_addr); + if (status != ZX_OK && vmar_offset != 0 && vmar_offset_is_hint) { + // If a vmar_offset was specified and the allocation failed (for example, + // because the offset overlapped another mapping), then we should retry + // again without a vmar_offset if that offset was just meant to be a hint. + options &= ~(ZX_VM_SPECIFIC); + status = vmar.allocate(options, 0, size, child, child_addr); + } + + return status; +} + +} // namespace + +TimezoneCache* OS::CreateTimezoneCache() { + return new PosixDefaultTimezoneCache(); } // static -bool OS::Free(void* address, const size_t size) { - DCHECK_EQ(0, reinterpret_cast(address) % AllocatePageSize()); - DCHECK_EQ(0, size % AllocatePageSize()); - return zx::vmar::root_self()->unmap(reinterpret_cast(address), - size) == ZX_OK; +void* OS::Allocate(void* address, size_t size, size_t alignment, + MemoryPermission access) { + constexpr bool vmar_offset_is_hint = true; + DCHECK_EQ(0, reinterpret_cast
(address) % alignment); + return AllocateInternal(*zx::vmar::root_self(), AllocatePageSize(), + reinterpret_cast(address), + vmar_offset_is_hint, size, alignment, access); } // static -bool OS::Release(void* address, size_t size) { - DCHECK_EQ(0, reinterpret_cast(address) % CommitPageSize()); - DCHECK_EQ(0, size % CommitPageSize()); - return zx::vmar::root_self()->unmap(reinterpret_cast(address), - size) == ZX_OK; +bool OS::Free(void* address, const size_t size) { + return FreeInternal(*zx::vmar::root_self(), AllocatePageSize(), address, + size); } +// static +bool OS::Release(void* address, size_t size) { return Free(address, size); } + // static bool OS::SetPermissions(void* address, size_t size, MemoryPermission access) { - DCHECK_EQ(0, reinterpret_cast(address) % CommitPageSize()); - DCHECK_EQ(0, size % CommitPageSize()); - uint32_t prot = GetProtectionFromMemoryPermission(access); - return zx::vmar::root_self()->protect( - prot, reinterpret_cast(address), size) == ZX_OK; + return SetPermissionsInternal(*zx::vmar::root_self(), CommitPageSize(), + address, size, access); } // static bool OS::DiscardSystemPages(void* address, size_t size) { - uint64_t address_int = reinterpret_cast(address); - zx_status_t status = zx::vmar::root_self()->op_range( - ZX_VMO_OP_DECOMMIT, address_int, size, nullptr, 0); - return status == ZX_OK; + return DiscardSystemPagesInternal(*zx::vmar::root_self(), CommitPageSize(), + address, size); } +// static bool OS::DecommitPages(void* address, size_t size) { // We rely on DiscardSystemPages decommitting the pages immediately (via // ZX_VMO_OP_DECOMMIT) so that they are guaranteed to be zero-initialized @@ -155,6 +214,34 @@ bool OS::DecommitPages(void* address, size_t size) { DiscardSystemPages(address, size); } +// static +bool OS::CanReserveAddressSpace() { return true; } + +// static +Optional OS::CreateAddressSpaceReservation( + void* hint, size_t size, size_t alignment, + MemoryPermission max_permission) { + DCHECK_EQ(0, reinterpret_cast
(hint) % alignment); + zx::vmar child; + zx_vaddr_t child_addr; + uint64_t vmar_offset = reinterpret_cast(hint); + constexpr bool vmar_offset_is_hint = true; + zx_status_t status = CreateAddressSpaceReservationInternal( + *zx::vmar::root_self(), AllocatePageSize(), vmar_offset, + vmar_offset_is_hint, size, alignment, max_permission, &child, + &child_addr); + if (status != ZX_OK) return {}; + return AddressSpaceReservation(reinterpret_cast(child_addr), size, + child.release()); +} + +// static +bool OS::FreeAddressSpaceReservation(AddressSpaceReservation reservation) { + // Destroy the vmar and release the handle. + zx::vmar vmar(reservation.vmar_); + return vmar.destroy() == ZX_OK; +} + // static bool OS::HasLazyCommits() { return true; } @@ -194,5 +281,74 @@ std::vector OS::GetFreeMemoryRangesWithin( return {}; } +Optional AddressSpaceReservation::CreateSubReservation( + void* address, size_t size, OS::MemoryPermission max_permission) { + DCHECK(Contains(address, size)); + + zx::vmar child; + zx_vaddr_t child_addr; + size_t vmar_offset = 0; + if (address != 0) { + vmar_offset = + reinterpret_cast(address) - reinterpret_cast(base()); + } + constexpr bool vmar_offset_is_hint = false; + zx_status_t status = CreateAddressSpaceReservationInternal( + *zx::unowned_vmar(vmar_), OS::AllocatePageSize(), vmar_offset, + vmar_offset_is_hint, size, OS::AllocatePageSize(), max_permission, &child, + &child_addr); + if (status != ZX_OK) return {}; + DCHECK_EQ(reinterpret_cast(child_addr), address); + return AddressSpaceReservation(reinterpret_cast(child_addr), size, + child.release()); +} + +bool AddressSpaceReservation::FreeSubReservation( + AddressSpaceReservation reservation) { + return OS::FreeAddressSpaceReservation(reservation); +} + +bool AddressSpaceReservation::Allocate(void* address, size_t size, + OS::MemoryPermission access) { + DCHECK(Contains(address, size)); + size_t vmar_offset = 0; + if (address != 0) { + vmar_offset = + reinterpret_cast(address) - reinterpret_cast(base()); + } + constexpr bool vmar_offset_is_hint = false; + void* allocation = AllocateInternal( + *zx::unowned_vmar(vmar_), OS::AllocatePageSize(), vmar_offset, + vmar_offset_is_hint, size, OS::AllocatePageSize(), access); + DCHECK(!allocation || allocation == address); + return allocation != nullptr; +} + +bool AddressSpaceReservation::Free(void* address, size_t size) { + DCHECK(Contains(address, size)); + return FreeInternal(*zx::unowned_vmar(vmar_), OS::AllocatePageSize(), address, + size); +} + +bool AddressSpaceReservation::SetPermissions(void* address, size_t size, + OS::MemoryPermission access) { + DCHECK(Contains(address, size)); + return SetPermissionsInternal(*zx::unowned_vmar(vmar_), OS::CommitPageSize(), + address, size, access); +} + +bool AddressSpaceReservation::DiscardSystemPages(void* address, size_t size) { + DCHECK(Contains(address, size)); + return DiscardSystemPagesInternal(*zx::unowned_vmar(vmar_), + OS::CommitPageSize(), address, size); +} + +bool AddressSpaceReservation::DecommitPages(void* address, size_t size) { + DCHECK(Contains(address, size)); + // See comment in OS::DecommitPages. + return SetPermissions(address, size, OS::MemoryPermission::kNoAccess) && + DiscardSystemPages(address, size); +} + } // namespace base } // namespace v8 diff --git a/deps/v8/src/base/platform/platform-posix.cc b/deps/v8/src/base/platform/platform-posix.cc index f05f22c9136eb4..155af37155a4d7 100644 --- a/deps/v8/src/base/platform/platform-posix.cc +++ b/deps/v8/src/base/platform/platform-posix.cc @@ -153,11 +153,15 @@ int GetFlagsForMemoryPermission(OS::MemoryPermission access, flags |= MAP_LAZY; #endif // V8_OS_QNX } -#if V8_HAS_PTHREAD_JIT_WRITE_PROTECT +#if V8_OS_MACOSX + // MAP_JIT is required to obtain writable and executable pages when the + // hardened runtime/memory protection is enabled, which is optional (via code + // signing) on Intel-based Macs but mandatory on Apple silicon ones. See also + // https://developer.apple.com/documentation/apple-silicon/porting-just-in-time-compilers-to-apple-silicon. if (access == OS::MemoryPermission::kNoAccessWillJitLater) { flags |= MAP_JIT; } -#endif +#endif // V8_OS_MACOSX return flags; } @@ -467,6 +471,7 @@ bool OS::SetPermissions(void* address, size_t size, MemoryPermission access) { return ret == 0; } +// static bool OS::DiscardSystemPages(void* address, size_t size) { DCHECK_EQ(0, reinterpret_cast(address) % CommitPageSize()); DCHECK_EQ(0, size % CommitPageSize()); @@ -495,6 +500,7 @@ bool OS::DiscardSystemPages(void* address, size_t size) { return ret == 0; } +// static bool OS::DecommitPages(void* address, size_t size) { DCHECK_EQ(0, reinterpret_cast(address) % CommitPageSize()); DCHECK_EQ(0, size % CommitPageSize()); @@ -509,6 +515,36 @@ bool OS::DecommitPages(void* address, size_t size) { return ptr == address; } +// static +bool OS::CanReserveAddressSpace() { return true; } + +// static +Optional OS::CreateAddressSpaceReservation( + void* hint, size_t size, size_t alignment, + MemoryPermission max_permission) { + // On POSIX, address space reservations are backed by private memory mappings. + MemoryPermission permission = MemoryPermission::kNoAccess; + if (max_permission == MemoryPermission::kReadWriteExecute) { + permission = MemoryPermission::kNoAccessWillJitLater; + } + + void* reservation = Allocate(hint, size, alignment, permission); + if (!reservation && permission == MemoryPermission::kNoAccessWillJitLater) { + // Retry without MAP_JIT, for example in case we are running on an old OS X. + permission = MemoryPermission::kNoAccess; + reservation = Allocate(hint, size, alignment, permission); + } + + if (!reservation) return {}; + + return AddressSpaceReservation(reservation, size); +} + +// static +bool OS::FreeAddressSpaceReservation(AddressSpaceReservation reservation) { + return Free(reservation.base(), reservation.size()); +} + // static bool OS::HasLazyCommits() { #if V8_OS_AIX || V8_OS_LINUX || V8_OS_MACOSX @@ -823,6 +859,57 @@ void OS::StrNCpy(char* dest, int length, const char* src, size_t n) { strncpy(dest, src, n); } +// ---------------------------------------------------------------------------- +// POSIX Address space reservation support. +// + +#if !V8_OS_CYGWIN && !V8_OS_FUCHSIA + +Optional AddressSpaceReservation::CreateSubReservation( + void* address, size_t size, OS::MemoryPermission max_permission) { + DCHECK(Contains(address, size)); + DCHECK_EQ(0, size % OS::AllocatePageSize()); + DCHECK_EQ(0, reinterpret_cast(address) % OS::AllocatePageSize()); + + return AddressSpaceReservation(address, size); +} + +bool AddressSpaceReservation::FreeSubReservation( + AddressSpaceReservation reservation) { + // Nothing to do. + // Pages allocated inside the reservation must've already been freed. + return true; +} + +bool AddressSpaceReservation::Allocate(void* address, size_t size, + OS::MemoryPermission access) { + // The region is already mmap'ed, so it just has to be made accessible now. + DCHECK(Contains(address, size)); + return OS::SetPermissions(address, size, access); +} + +bool AddressSpaceReservation::Free(void* address, size_t size) { + DCHECK(Contains(address, size)); + return OS::DecommitPages(address, size); +} + +bool AddressSpaceReservation::SetPermissions(void* address, size_t size, + OS::MemoryPermission access) { + DCHECK(Contains(address, size)); + return OS::SetPermissions(address, size, access); +} + +bool AddressSpaceReservation::DiscardSystemPages(void* address, size_t size) { + DCHECK(Contains(address, size)); + return OS::DiscardSystemPages(address, size); +} + +bool AddressSpaceReservation::DecommitPages(void* address, size_t size) { + DCHECK(Contains(address, size)); + return OS::DecommitPages(address, size); +} + +#endif // !V8_OS_CYGWIN && !V8_OS_FUCHSIA // ---------------------------------------------------------------------------- // POSIX thread support. @@ -840,9 +927,8 @@ Thread::Thread(const Options& options) : data_(new PlatformData), stack_size_(options.stack_size()), start_semaphore_(nullptr) { - if (stack_size_ > 0 && static_cast(stack_size_) < PTHREAD_STACK_MIN) { - stack_size_ = PTHREAD_STACK_MIN; - } + const int min_stack_size = static_cast(PTHREAD_STACK_MIN); + if (stack_size_ > 0) stack_size_ = std::max(stack_size_, min_stack_size); set_name(options.name()); } diff --git a/deps/v8/src/base/platform/platform-win32.cc b/deps/v8/src/base/platform/platform-win32.cc index 919c3ef4df8956..d00c4f5ebb977a 100644 --- a/deps/v8/src/base/platform/platform-win32.cc +++ b/deps/v8/src/base/platform/platform-win32.cc @@ -722,6 +722,20 @@ void OS::Initialize(bool hard_abort, const char* const gc_fake_mmap) { g_hard_abort = hard_abort; } +typedef PVOID (*VirtualAlloc2_t)(HANDLE, PVOID, SIZE_T, ULONG, ULONG, + MEM_EXTENDED_PARAMETER*, ULONG); +VirtualAlloc2_t VirtualAlloc2; + +void OS::EnsureWin32MemoryAPILoaded() { + static bool loaded = false; + if (!loaded) { + VirtualAlloc2 = (VirtualAlloc2_t)GetProcAddress( + GetModuleHandle(L"kernelbase.dll"), "VirtualAlloc2"); + + loaded = true; + } +} + // static size_t OS::AllocatePageSize() { static size_t allocate_alignment = 0; @@ -801,6 +815,14 @@ DWORD GetProtectionFromMemoryPermission(OS::MemoryPermission access) { UNREACHABLE(); } +void* VirtualAllocWrapper(void* hint, size_t size, DWORD flags, DWORD protect) { + if (VirtualAlloc2) { + return VirtualAlloc2(nullptr, hint, size, flags, protect, NULL, 0); + } else { + return VirtualAlloc(hint, size, flags, protect); + } +} + uint8_t* RandomizedVirtualAlloc(size_t size, DWORD flags, DWORD protect, void* hint) { LPVOID base = nullptr; @@ -816,32 +838,18 @@ uint8_t* RandomizedVirtualAlloc(size_t size, DWORD flags, DWORD protect, if (use_aslr && protect != PAGE_READWRITE) { // For executable or reserved pages try to randomize the allocation address. - base = VirtualAlloc(hint, size, flags, protect); + base = VirtualAllocWrapper(hint, size, flags, protect); } // On failure, let the OS find an address to use. if (base == nullptr) { - base = VirtualAlloc(nullptr, size, flags, protect); + base = VirtualAllocWrapper(nullptr, size, flags, protect); } return reinterpret_cast(base); } -} // namespace - -// static -void* OS::Allocate(void* hint, size_t size, size_t alignment, - MemoryPermission access) { - size_t page_size = AllocatePageSize(); - DCHECK_EQ(0, size % page_size); - DCHECK_EQ(0, alignment % page_size); - DCHECK_LE(page_size, alignment); - hint = AlignedAddress(hint, alignment); - - DWORD flags = (access == OS::MemoryPermission::kNoAccess) - ? MEM_RESERVE - : MEM_RESERVE | MEM_COMMIT; - DWORD protect = GetProtectionFromMemoryPermission(access); - +void* AllocateInternal(void* hint, size_t size, size_t alignment, + size_t page_size, DWORD flags, DWORD protect) { // First, try an exact size aligned allocation. uint8_t* base = RandomizedVirtualAlloc(size, flags, protect, hint); if (base == nullptr) return nullptr; // Can't allocate, we're OOM. @@ -852,7 +860,7 @@ void* OS::Allocate(void* hint, size_t size, size_t alignment, if (base == aligned_base) return reinterpret_cast(base); // Otherwise, free it and try a larger allocation. - CHECK(Free(base, size)); + CHECK(VirtualFree(base, 0, MEM_RELEASE)); // Clear the hint. It's unlikely we can allocate at this address. hint = nullptr; @@ -868,11 +876,11 @@ void* OS::Allocate(void* hint, size_t size, size_t alignment, // Try to trim the allocation by freeing the padded allocation and then // calling VirtualAlloc at the aligned base. - CHECK(Free(base, padded_size)); + CHECK(VirtualFree(base, 0, MEM_RELEASE)); aligned_base = reinterpret_cast( RoundUp(reinterpret_cast(base), alignment)); base = reinterpret_cast( - VirtualAlloc(aligned_base, size, flags, protect)); + VirtualAllocWrapper(aligned_base, size, flags, protect)); // We might not get the reduced allocation due to a race. In that case, // base will be nullptr. if (base != nullptr) break; @@ -881,6 +889,25 @@ void* OS::Allocate(void* hint, size_t size, size_t alignment, return reinterpret_cast(base); } +} // namespace + +// static +void* OS::Allocate(void* hint, size_t size, size_t alignment, + MemoryPermission access) { + size_t page_size = AllocatePageSize(); + DCHECK_EQ(0, size % page_size); + DCHECK_EQ(0, alignment % page_size); + DCHECK_LE(page_size, alignment); + hint = AlignedAddress(hint, alignment); + + DWORD flags = (access == OS::MemoryPermission::kNoAccess) + ? MEM_RESERVE + : MEM_RESERVE | MEM_COMMIT; + DWORD protect = GetProtectionFromMemoryPermission(access); + + return AllocateInternal(hint, size, alignment, page_size, flags, protect); +} + // static bool OS::Free(void* address, const size_t size) { DCHECK_EQ(0, reinterpret_cast(address) % AllocatePageSize()); @@ -904,7 +931,7 @@ bool OS::SetPermissions(void* address, size_t size, MemoryPermission access) { return VirtualFree(address, size, MEM_DECOMMIT) != 0; } DWORD protect = GetProtectionFromMemoryPermission(access); - return VirtualAlloc(address, size, MEM_COMMIT, protect) != nullptr; + return VirtualAllocWrapper(address, size, MEM_COMMIT, protect) != nullptr; } // static @@ -929,7 +956,7 @@ bool OS::DiscardSystemPages(void* address, size_t size) { } // DiscardVirtualMemory is buggy in Win10 SP0, so fall back to MEM_RESET on // failure. - void* ptr = VirtualAlloc(address, size, MEM_RESET, PAGE_READWRITE); + void* ptr = VirtualAllocWrapper(address, size, MEM_RESET, PAGE_READWRITE); CHECK(ptr); return ptr; } @@ -949,6 +976,35 @@ bool OS::DecommitPages(void* address, size_t size) { return VirtualFree(address, size, MEM_DECOMMIT) != 0; } +// static +bool OS::CanReserveAddressSpace() { return VirtualAlloc2 != nullptr; } + +// static +Optional OS::CreateAddressSpaceReservation( + void* hint, size_t size, size_t alignment, + MemoryPermission max_permission) { + CHECK(CanReserveAddressSpace()); + + size_t page_size = AllocatePageSize(); + DCHECK_EQ(0, size % page_size); + DCHECK_EQ(0, alignment % page_size); + DCHECK_LE(page_size, alignment); + hint = AlignedAddress(hint, alignment); + + // On Windows, address space reservations are backed by placeholder mappings. + void* reservation = + AllocateInternal(hint, size, alignment, page_size, + MEM_RESERVE | MEM_RESERVE_PLACEHOLDER, PAGE_NOACCESS); + if (!reservation) return {}; + + return AddressSpaceReservation(reservation, size); +} + +// static +bool OS::FreeAddressSpaceReservation(AddressSpaceReservation reservation) { + return OS::Free(reservation.base(), reservation.size()); +} + // static bool OS::HasLazyCommits() { // TODO(alph): implement for the platform. @@ -1068,6 +1124,64 @@ Win32MemoryMappedFile::~Win32MemoryMappedFile() { CloseHandle(file_); } +Optional AddressSpaceReservation::CreateSubReservation( + void* address, size_t size, OS::MemoryPermission max_permission) { + // Nothing to do, the sub reservation must already have been split by now. + DCHECK(Contains(address, size)); + DCHECK_EQ(0, size % OS::AllocatePageSize()); + DCHECK_EQ(0, reinterpret_cast(address) % OS::AllocatePageSize()); + + return AddressSpaceReservation(address, size); +} + +bool AddressSpaceReservation::FreeSubReservation( + AddressSpaceReservation reservation) { + // Nothing to do. + // Pages allocated inside the reservation must've already been freed. + return true; +} + +bool AddressSpaceReservation::SplitPlaceholder(void* address, size_t size) { + DCHECK(Contains(address, size)); + return VirtualFree(address, size, MEM_RELEASE | MEM_PRESERVE_PLACEHOLDER); +} + +bool AddressSpaceReservation::MergePlaceholders(void* address, size_t size) { + DCHECK(Contains(address, size)); + return VirtualFree(address, size, MEM_RELEASE | MEM_COALESCE_PLACEHOLDERS); +} + +bool AddressSpaceReservation::Allocate(void* address, size_t size, + OS::MemoryPermission access) { + DCHECK(Contains(address, size)); + CHECK(VirtualAlloc2); + DWORD flags = (access == OS::MemoryPermission::kNoAccess) + ? MEM_RESERVE | MEM_REPLACE_PLACEHOLDER + : MEM_RESERVE | MEM_COMMIT | MEM_REPLACE_PLACEHOLDER; + DWORD protect = GetProtectionFromMemoryPermission(access); + return VirtualAlloc2(nullptr, address, size, flags, protect, NULL, 0); +} + +bool AddressSpaceReservation::Free(void* address, size_t size) { + DCHECK(Contains(address, size)); + return VirtualFree(address, size, MEM_RELEASE | MEM_PRESERVE_PLACEHOLDER); +} + +bool AddressSpaceReservation::SetPermissions(void* address, size_t size, + OS::MemoryPermission access) { + DCHECK(Contains(address, size)); + return OS::SetPermissions(address, size, access); +} + +bool AddressSpaceReservation::DiscardSystemPages(void* address, size_t size) { + DCHECK(Contains(address, size)); + return OS::DiscardSystemPages(address, size); +} + +bool AddressSpaceReservation::DecommitPages(void* address, size_t size) { + DCHECK(Contains(address, size)); + return OS::DecommitPages(address, size); +} // The following code loads functions defined in DbhHelp.h and TlHelp32.h // dynamically. This is to avoid being depending on dbghelp.dll and diff --git a/deps/v8/src/base/platform/platform.h b/deps/v8/src/base/platform/platform.h index bc1edc9c03e1ec..53a7267889cd3d 100644 --- a/deps/v8/src/base/platform/platform.h +++ b/deps/v8/src/base/platform/platform.h @@ -29,6 +29,7 @@ #include "src/base/base-export.h" #include "src/base/build_config.h" #include "src/base/compiler-specific.h" +#include "src/base/optional.h" #include "src/base/platform/mutex.h" #include "src/base/platform/semaphore.h" @@ -36,6 +37,10 @@ #include "src/base/qnx-math.h" #endif +#if V8_OS_FUCHSIA +#include +#endif // V8_OS_FUCHSIA + #ifdef V8_USE_ADDRESS_SANITIZER #include #endif // V8_USE_ADDRESS_SANITIZER @@ -115,8 +120,11 @@ inline intptr_t InternalGetExistingThreadLocal(intptr_t index) { #endif // V8_NO_FAST_TLS +class AddressSpaceReservation; class PageAllocator; class TimezoneCache; +class VirtualAddressSpace; +class VirtualAddressSubspace; // ---------------------------------------------------------------------------- // OS @@ -132,6 +140,17 @@ class V8_BASE_EXPORT OS { // - gc_fake_mmap: Name of the file for fake gc mmap used in ll_prof. static void Initialize(bool hard_abort, const char* const gc_fake_mmap); +#if V8_OS_WIN + // On Windows, ensure the newer memory API is loaded if available. This + // includes function like VirtualAlloc2 and MapViewOfFile3. + // TODO(chromium:1218005) this should probably happen as part of Initialize, + // but that is currently invoked too late, after the virtual memory cage + // is initialized. However, eventually the virtual memory cage initialization + // will happen as part of V8::Initialize, at which point this function can + // probably be merged into OS::Initialize. + static void EnsureWin32MemoryAPILoaded(); +#endif + // Returns the accumulated user time for thread. This routine // can be used for profiling. The implementation should // strive for high-precision timer resolution, preferable @@ -291,9 +310,12 @@ class V8_BASE_EXPORT OS { private: // These classes use the private memory management API below. + friend class AddressSpaceReservation; friend class MemoryMappedFile; friend class PosixMemoryMappedFile; friend class v8::base::PageAllocator; + friend class v8::base::VirtualAddressSpace; + friend class v8::base::VirtualAddressSubspace; static size_t AllocatePageSize(); @@ -326,6 +348,15 @@ class V8_BASE_EXPORT OS { V8_WARN_UNUSED_RESULT static bool DecommitPages(void* address, size_t size); + V8_WARN_UNUSED_RESULT static bool CanReserveAddressSpace(); + + V8_WARN_UNUSED_RESULT static Optional + CreateAddressSpaceReservation(void* hint, size_t size, size_t alignment, + MemoryPermission max_permission); + + V8_WARN_UNUSED_RESULT static bool FreeAddressSpaceReservation( + AddressSpaceReservation reservation); + static const int msPerSecond = 1000; #if V8_OS_POSIX @@ -347,6 +378,73 @@ inline void EnsureConsoleOutput() { #endif // (defined(_WIN32) || defined(_WIN64)) } +// ---------------------------------------------------------------------------- +// AddressSpaceReservation +// +// This class provides the same memory management functions as OS but operates +// inside a previously reserved contiguous region of virtual address space. +class V8_BASE_EXPORT AddressSpaceReservation { + public: + using Address = uintptr_t; + + void* base() const { return base_; } + size_t size() const { return size_; } + + bool Contains(void* region_addr, size_t region_size) const { + Address base = reinterpret_cast
(base_); + Address region_base = reinterpret_cast
(region_addr); + return (region_base >= base) && + ((region_base + region_size) <= (base + size_)); + } + + V8_WARN_UNUSED_RESULT bool Allocate(void* address, size_t size, + OS::MemoryPermission access); + + V8_WARN_UNUSED_RESULT bool Free(void* address, size_t size); + + V8_WARN_UNUSED_RESULT bool SetPermissions(void* address, size_t size, + OS::MemoryPermission access); + + V8_WARN_UNUSED_RESULT bool DiscardSystemPages(void* address, size_t size); + + V8_WARN_UNUSED_RESULT bool DecommitPages(void* address, size_t size); + + V8_WARN_UNUSED_RESULT Optional CreateSubReservation( + void* address, size_t size, OS::MemoryPermission max_permission); + + V8_WARN_UNUSED_RESULT static bool FreeSubReservation( + AddressSpaceReservation reservation); + +#if V8_OS_WIN + // On Windows, the placeholder mappings backing address space reservations + // need to be split and merged as page allocations can only replace an entire + // placeholder mapping, not parts of it. This must be done by the users of + // this API as it requires a RegionAllocator (or equivalent) to keep track of + // sub-regions and decide when to split and when to coalesce multiple free + // regions into a single one. + V8_WARN_UNUSED_RESULT bool SplitPlaceholder(void* address, size_t size); + V8_WARN_UNUSED_RESULT bool MergePlaceholders(void* address, size_t size); +#endif // V8_OS_WIN + + private: + friend class OS; + +#if V8_OS_FUCHSIA + AddressSpaceReservation(void* base, size_t size, zx_handle_t vmar) + : base_(base), size_(size), vmar_(vmar) {} +#else + AddressSpaceReservation(void* base, size_t size) : base_(base), size_(size) {} +#endif // V8_OS_FUCHSIA + + void* base_ = nullptr; + size_t size_ = 0; + +#if V8_OS_FUCHSIA + // On Fuchsia, address space reservations are backed by VMARs. + zx_handle_t vmar_ = ZX_HANDLE_INVALID; +#endif // V8_OS_FUCHSIA +}; + // ---------------------------------------------------------------------------- // Thread // diff --git a/deps/v8/src/base/platform/yield-processor.h b/deps/v8/src/base/platform/yield-processor.h new file mode 100644 index 00000000000000..a2f4b2d4136099 --- /dev/null +++ b/deps/v8/src/base/platform/yield-processor.h @@ -0,0 +1,55 @@ +// Copyright 2021 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_BASE_PLATFORM_YIELD_PROCESSOR_H_ +#define V8_BASE_PLATFORM_YIELD_PROCESSOR_H_ + +// The YIELD_PROCESSOR macro wraps an architecture specific-instruction that +// informs the processor we're in a busy wait, so it can handle the branch more +// intelligently and e.g. reduce power to our core or give more resources to the +// other hyper-thread on this core. See the following for context: +// https://software.intel.com/en-us/articles/benefitting-power-and-performance-sleep-loops + +#if defined(V8_CC_MSVC) +// MSVC does not support inline assembly via __asm__ and provides compiler +// intrinsics instead. Check if there is a usable intrinsic. +// +// intrin.h is an expensive header, so only include it if we're on a host +// architecture that has a usable intrinsic. +#if defined(V8_HOST_ARCH_IA32) || defined(V8_HOST_ARCH_X64) +#include +#define YIELD_PROCESSOR _mm_pause() +#elif defined(V8_HOST_ARCH_ARM64) || \ + (defined(V8_HOST_ARCH_ARM) && __ARM_ARCH >= 6) +#include +#define YIELD_PROCESSOR __yield() +#endif // V8_HOST_ARCH + +#else // !V8_CC_MSVC + +#if defined(V8_HOST_ARCH_IA32) || defined(V8_HOST_ARCH_X64) +#define YIELD_PROCESSOR __asm__ __volatile__("pause") +#elif defined(V8_HOST_ARCH_ARM64) || \ + (defined(V8_HOST_ARCH_ARM) && __ARM_ARCH >= 6) +#define YIELD_PROCESSOR __asm__ __volatile__("yield") +#elif defined(V8_HOST_ARCH_MIPS) +// The MIPS32 docs state that the PAUSE instruction is a no-op on older +// architectures (first added in MIPS32r2). To avoid assembler errors when +// targeting pre-r2, we must encode the instruction manually. +#define YIELD_PROCESSOR __asm__ __volatile__(".word 0x00000140") +#elif defined(V8_HOST_ARCH_MIPS64EL) && __mips_isa_rev >= 2 +// Don't bother doing using .word here since r2 is the lowest supported mips64 +// that Chromium supports. +#define YIELD_PROCESSOR __asm__ __volatile__("pause") +#elif defined(V8_HOST_ARCH_PPC64) +#define YIELD_PROCESSOR __asm__ __volatile__("or 31,31,31") +#endif // V8_HOST_ARCH + +#endif // V8_CC_MSVC + +#ifndef YIELD_PROCESSOR +#define YIELD_PROCESSOR ((void)0) +#endif + +#endif // V8_BASE_PLATFORM_YIELD_PROCESSOR_H_ diff --git a/deps/v8/src/base/region-allocator.cc b/deps/v8/src/base/region-allocator.cc index 53932d2864f133..d4d443cacf9e82 100644 --- a/deps/v8/src/base/region-allocator.cc +++ b/deps/v8/src/base/region-allocator.cc @@ -41,6 +41,8 @@ RegionAllocator::RegionAllocator(Address memory_region_begin, } RegionAllocator::~RegionAllocator() { + // TODO(chromium:1218005) either (D)CHECK that all allocated regions have + // been freed again (and thus merged into a single region) or do that now. for (Region* region : all_regions_) { delete region; } @@ -87,6 +89,8 @@ RegionAllocator::Region* RegionAllocator::Split(Region* region, DCHECK_NE(new_size, 0); DCHECK_GT(region->size(), new_size); + if (on_split_) on_split_(region->begin(), new_size); + // Create new region and put it to the lists after the |region|. DCHECK(!region->is_excluded()); RegionState state = region->state(); @@ -112,6 +116,9 @@ void RegionAllocator::Merge(AllRegionsSet::iterator prev_iter, Region* prev = *prev_iter; Region* next = *next_iter; DCHECK_EQ(prev->end(), next->begin()); + + if (on_merge_) on_merge_(prev->begin(), prev->size() + next->size()); + prev->set_size(prev->size() + next->size()); all_regions_.erase(next_iter); // prev_iter stays valid. @@ -229,6 +236,29 @@ RegionAllocator::Address RegionAllocator::AllocateAlignedRegion( return region->begin(); } +RegionAllocator::Address RegionAllocator::AllocateRegion(Address hint, + size_t size, + size_t alignment) { + DCHECK(IsAligned(alignment, page_size())); + DCHECK(IsAligned(hint, alignment)); + + if (hint && contains(hint, size)) { + if (AllocateRegionAt(hint, size)) { + return hint; + } + } + + Address address; + if (alignment <= page_size()) { + // TODO(chromium:1218005): Consider using randomized version here. + address = AllocateRegion(size); + } else { + address = AllocateAlignedRegion(size, alignment); + } + + return address; +} + size_t RegionAllocator::TrimRegion(Address address, size_t new_size) { DCHECK(IsAligned(new_size, page_size_)); diff --git a/deps/v8/src/base/region-allocator.h b/deps/v8/src/base/region-allocator.h index f80524870f4c40..13df2aa7efabf6 100644 --- a/deps/v8/src/base/region-allocator.h +++ b/deps/v8/src/base/region-allocator.h @@ -27,6 +27,8 @@ class V8_BASE_EXPORT RegionAllocator final { public: using Address = uintptr_t; + using SplitMergeCallback = std::function; + static constexpr Address kAllocationFailure = static_cast
(-1); enum class RegionState { @@ -43,6 +45,27 @@ class V8_BASE_EXPORT RegionAllocator final { RegionAllocator& operator=(const RegionAllocator&) = delete; ~RegionAllocator(); + // Split and merge callbacks. + // + // These callbacks can be installed to perform additional logic when regions + // are split or merged. For example, when managing Windows placeholder + // regions, a region must be split into sub-regions (using + // VirtualFree(MEM_PRESERVE_PLACEHOLDER)) before a part of it can be replaced + // with an actual memory mapping. Similarly, multiple sub-regions must be + // merged (using VirtualFree(MEM_COALESCE_PLACEHOLDERS)) when coalescing them + // into a larger, free region again. + // + // The on_split callback is called to signal that an existing region is split + // so that [start, start+size) becomes a new region. + void set_on_split_callback(SplitMergeCallback callback) { + on_split_ = callback; + } + // The on_merge callback is called to signal that all regions in the range + // [start, start+size) are merged into a single one. + void set_on_merge_callback(SplitMergeCallback callback) { + on_merge_ = callback; + } + // Allocates region of |size| (must be |page_size|-aligned). Returns // the address of the region on success or kAllocationFailure. Address AllocateRegion(size_t size); @@ -66,6 +89,11 @@ class V8_BASE_EXPORT RegionAllocator final { // success or kAllocationFailure. Address AllocateAlignedRegion(size_t size, size_t alignment); + // Attempts to allocate a region of the given size and alignment at the + // specified address but fall back to allocating the region elsewhere if + // necessary. + Address AllocateRegion(Address hint, size_t size, size_t alignment); + // Frees region at given |address|, returns the size of the region. // There must be a used region starting at given address otherwise nothing // will be freed and 0 will be returned. @@ -114,9 +142,9 @@ class V8_BASE_EXPORT RegionAllocator final { bool is_free() const { return state_ == RegionState::kFree; } bool is_allocated() const { return state_ == RegionState::kAllocated; } bool is_excluded() const { return state_ == RegionState::kExcluded; } - void set_state(RegionState state) { state_ = state; } RegionState state() { return state_; } + void set_state(RegionState state) { state_ = state; } void Print(std::ostream& os) const; @@ -158,6 +186,10 @@ class V8_BASE_EXPORT RegionAllocator final { // Free regions ordered by sizes and addresses. std::set free_regions_; + // Callbacks called when regions are split or merged. + SplitMergeCallback on_split_; + SplitMergeCallback on_merge_; + // Returns region containing given address or nullptr. AllRegionsSet::iterator FindRegion(Address address); diff --git a/deps/v8/src/base/sanitizer/lsan-virtual-address-space.cc b/deps/v8/src/base/sanitizer/lsan-virtual-address-space.cc new file mode 100644 index 00000000000000..1877c44b7be866 --- /dev/null +++ b/deps/v8/src/base/sanitizer/lsan-virtual-address-space.cc @@ -0,0 +1,61 @@ +// Copyright 2021 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/base/sanitizer/lsan-virtual-address-space.h" + +#include "include/v8-platform.h" +#include "src/base/logging.h" + +#if defined(LEAK_SANITIZER) +#include +#endif + +namespace v8 { +namespace base { + +LsanVirtualAddressSpace::LsanVirtualAddressSpace( + std::unique_ptr vas) + : VirtualAddressSpace(vas->page_size(), vas->allocation_granularity(), + vas->base(), vas->size()), + vas_(std::move(vas)) { + DCHECK_NOT_NULL(vas_); +} + +Address LsanVirtualAddressSpace::AllocatePages(Address hint, size_t size, + size_t alignment, + PagePermissions permissions) { + Address result = vas_->AllocatePages(hint, size, alignment, permissions); +#if defined(LEAK_SANITIZER) + if (result != 0) { + __lsan_register_root_region(reinterpret_cast(result), size); + } +#endif // defined(LEAK_SANITIZER) + return result; +} + +bool LsanVirtualAddressSpace::FreePages(Address address, size_t size) { + bool result = vas_->FreePages(address, size); +#if defined(LEAK_SANITIZER) + if (result) { + __lsan_unregister_root_region(reinterpret_cast(address), size); + } +#endif // defined(LEAK_SANITIZER) + return result; +} + +std::unique_ptr LsanVirtualAddressSpace::AllocateSubspace( + Address hint, size_t size, size_t alignment, + PagePermissions max_permissions) { + auto subspace = + vas_->AllocateSubspace(hint, size, alignment, max_permissions); +#if defined(LEAK_SANITIZER) + if (subspace) { + subspace = std::make_unique(std::move(subspace)); + } +#endif // defined(LEAK_SANITIZER) + return subspace; +} + +} // namespace base +} // namespace v8 diff --git a/deps/v8/src/base/sanitizer/lsan-virtual-address-space.h b/deps/v8/src/base/sanitizer/lsan-virtual-address-space.h new file mode 100644 index 00000000000000..cc165617101292 --- /dev/null +++ b/deps/v8/src/base/sanitizer/lsan-virtual-address-space.h @@ -0,0 +1,63 @@ +// Copyright 2021 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_BASE_SANITIZER_LSAN_VIRTUAL_ADDRESS_SPACE_H_ +#define V8_BASE_SANITIZER_LSAN_VIRTUAL_ADDRESS_SPACE_H_ + +#include "include/v8-platform.h" +#include "src/base/base-export.h" +#include "src/base/compiler-specific.h" + +namespace v8 { +namespace base { + +using Address = uintptr_t; + +// This is a v8::VirtualAddressSpace implementation that decorates provided page +// allocator object with leak sanitizer notifications when LEAK_SANITIZER is +// defined. +class V8_BASE_EXPORT LsanVirtualAddressSpace final + : public v8::VirtualAddressSpace { + public: + explicit LsanVirtualAddressSpace( + std::unique_ptr vas); + ~LsanVirtualAddressSpace() override = default; + + void SetRandomSeed(int64_t seed) override { + return vas_->SetRandomSeed(seed); + } + + Address RandomPageAddress() override { return vas_->RandomPageAddress(); } + + Address AllocatePages(Address hint, size_t size, size_t alignment, + PagePermissions permissions) override; + + bool FreePages(Address address, size_t size) override; + + bool SetPagePermissions(Address address, size_t size, + PagePermissions permissions) override { + return vas_->SetPagePermissions(address, size, permissions); + } + + bool CanAllocateSubspaces() override { return vas_->CanAllocateSubspaces(); } + + std::unique_ptr AllocateSubspace( + Address hint, size_t size, size_t alignment, + PagePermissions max_permissions) override; + + bool DiscardSystemPages(Address address, size_t size) override { + return vas_->DiscardSystemPages(address, size); + } + + bool DecommitPages(Address address, size_t size) override { + return vas_->DecommitPages(address, size); + } + + private: + std::unique_ptr vas_; +}; + +} // namespace base +} // namespace v8 +#endif // V8_BASE_SANITIZER_LSAN_VIRTUAL_ADDRESS_SPACE_H_ diff --git a/deps/v8/src/base/small-vector.h b/deps/v8/src/base/small-vector.h index 9b866dde6b83ae..30850013dc2f08 100644 --- a/deps/v8/src/base/small-vector.h +++ b/deps/v8/src/base/small-vector.h @@ -11,14 +11,13 @@ #include "src/base/bits.h" #include "src/base/macros.h" -#include "src/base/platform/wrappers.h" namespace v8 { namespace base { // Minimal SmallVector implementation. Uses inline storage first, switches to -// malloc when it overflows. -template +// dynamic storage when it overflows. +template > class SmallVector { // Currently only support trivially copyable and trivially destructible data // types, as it uses memcpy to copy elements and never calls destructors. @@ -28,17 +27,31 @@ class SmallVector { public: static constexpr size_t kInlineSize = kSize; - SmallVector() = default; - explicit SmallVector(size_t size) { resize_no_init(size); } - SmallVector(const SmallVector& other) V8_NOEXCEPT { *this = other; } - SmallVector(SmallVector&& other) V8_NOEXCEPT { *this = std::move(other); } - SmallVector(std::initializer_list init) { + explicit SmallVector(const Allocator& allocator = Allocator()) + : allocator_(allocator) {} + explicit SmallVector(size_t size, const Allocator& allocator = Allocator()) + : allocator_(allocator) { + resize_no_init(size); + } + SmallVector(const SmallVector& other, + const Allocator& allocator = Allocator()) V8_NOEXCEPT + : allocator_(allocator) { + *this = other; + } + SmallVector(SmallVector&& other, + const Allocator& allocator = Allocator()) V8_NOEXCEPT + : allocator_(allocator) { + *this = std::move(other); + } + SmallVector(std::initializer_list init, + const Allocator& allocator = Allocator()) + : allocator_(allocator) { resize_no_init(init.size()); memcpy(begin_, init.begin(), sizeof(T) * init.size()); } ~SmallVector() { - if (is_big()) base::Free(begin_); + if (is_big()) FreeDynamicStorage(); } SmallVector& operator=(const SmallVector& other) V8_NOEXCEPT { @@ -46,8 +59,8 @@ class SmallVector { size_t other_size = other.size(); if (capacity() < other_size) { // Create large-enough heap-allocated storage. - if (is_big()) base::Free(begin_); - begin_ = reinterpret_cast(base::Malloc(sizeof(T) * other_size)); + if (is_big()) FreeDynamicStorage(); + begin_ = AllocateDynamicStorage(other_size); end_of_storage_ = begin_ + other_size; } memcpy(begin_, other.begin_, sizeof(T) * other_size); @@ -58,11 +71,11 @@ class SmallVector { SmallVector& operator=(SmallVector&& other) V8_NOEXCEPT { if (this == &other) return *this; if (other.is_big()) { - if (is_big()) base::Free(begin_); + if (is_big()) FreeDynamicStorage(); begin_ = other.begin_; end_ = other.end_; end_of_storage_ = other.end_of_storage_; - other.reset(); + other.reset_to_inline_storage(); } else { DCHECK_GE(capacity(), other.size()); // Sanity check. size_t other_size = other.size(); @@ -126,17 +139,12 @@ class SmallVector { end_ = begin_ + new_size; } - // Clear without freeing any storage. + // Clear without reverting back to inline storage. void clear() { end_ = begin_; } - // Clear and go back to inline storage. - void reset() { - begin_ = inline_storage_begin(); - end_ = begin_; - end_of_storage_ = begin_ + kInlineSize; - } - private: + V8_NO_UNIQUE_ADDRESS Allocator allocator_; + T* begin_ = inline_storage_begin(); T* end_ = begin_; T* end_of_storage_ = begin_ + kInlineSize; @@ -152,8 +160,7 @@ class SmallVector { size_t in_use = end_ - begin_; size_t new_capacity = base::bits::RoundUpToPowerOfTwo(std::max(min_capacity, 2 * capacity())); - T* new_storage = - reinterpret_cast(base::Malloc(sizeof(T) * new_capacity)); + T* new_storage = AllocateDynamicStorage(new_capacity); if (new_storage == nullptr) { // Should be: V8::FatalProcessOutOfMemory, but we don't include V8 from // base. The message is intentionally the same as FatalProcessOutOfMemory @@ -162,13 +169,30 @@ class SmallVector { FATAL("Fatal process out of memory: base::SmallVector::Grow"); } memcpy(new_storage, begin_, sizeof(T) * in_use); - if (is_big()) base::Free(begin_); + if (is_big()) FreeDynamicStorage(); begin_ = new_storage; end_ = new_storage + in_use; end_of_storage_ = new_storage + new_capacity; return end_; } + T* AllocateDynamicStorage(size_t number_of_elements) { + return allocator_.allocate(number_of_elements); + } + + void FreeDynamicStorage() { + DCHECK(is_big()); + allocator_.deallocate(begin_, end_of_storage_ - begin_); + } + + // Clear and go back to inline storage. Dynamic storage is *not* freed. For + // internal use only. + void reset_to_inline_storage() { + begin_ = inline_storage_begin(); + end_ = begin_; + end_of_storage_ = begin_ + kInlineSize; + } + bool is_big() const { return begin_ != inline_storage_begin(); } T* inline_storage_begin() { return reinterpret_cast(&inline_storage_); } diff --git a/deps/v8/src/base/virtual-address-space-page-allocator.cc b/deps/v8/src/base/virtual-address-space-page-allocator.cc new file mode 100644 index 00000000000000..297b9adbf95105 --- /dev/null +++ b/deps/v8/src/base/virtual-address-space-page-allocator.cc @@ -0,0 +1,69 @@ +// Copyright 2018 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/base/virtual-address-space-page-allocator.h" + +namespace v8 { +namespace base { + +VirtualAddressSpacePageAllocator::VirtualAddressSpacePageAllocator( + v8::VirtualAddressSpace* vas) + : vas_(vas) {} + +void* VirtualAddressSpacePageAllocator::AllocatePages( + void* hint, size_t size, size_t alignment, + PageAllocator::Permission access) { + return reinterpret_cast( + vas_->AllocatePages(reinterpret_cast
(hint), size, alignment, + static_cast(access))); +} + +bool VirtualAddressSpacePageAllocator::FreePages(void* ptr, size_t size) { + MutexGuard guard(&mutex_); + Address address = reinterpret_cast
(ptr); + // Was this allocation resized previously? If so, use the original size. + auto result = resized_allocations_.find(address); + if (result != resized_allocations_.end()) { + size = result->second; + resized_allocations_.erase(result); + } + return vas_->FreePages(address, size); +} + +bool VirtualAddressSpacePageAllocator::ReleasePages(void* ptr, size_t size, + size_t new_size) { + // The VirtualAddressSpace class doesn't support this method because it can't + // be properly implemented on top of Windows placeholder mappings (they cannot + // be partially freed or resized while being allocated). Instead, we emulate + // this behaviour by decommitting the released pages, which in effect achieves + // exactly what ReleasePages would normally do as well. However, we still need + // to pass the original size to FreePages eventually, so we'll need to keep + // track of that. + DCHECK_LE(new_size, size); + + MutexGuard guard(&mutex_); + // Will fail if the allocation was resized previously, which is desired. + Address address = reinterpret_cast
(ptr); + resized_allocations_.insert({address, size}); + return vas_->DecommitPages(address + new_size, size - new_size); +} + +bool VirtualAddressSpacePageAllocator::SetPermissions( + void* address, size_t size, PageAllocator::Permission access) { + return vas_->SetPagePermissions(reinterpret_cast
(address), size, + static_cast(access)); +} + +bool VirtualAddressSpacePageAllocator::DiscardSystemPages(void* address, + size_t size) { + return vas_->DiscardSystemPages(reinterpret_cast
(address), size); +} + +bool VirtualAddressSpacePageAllocator::DecommitPages(void* address, + size_t size) { + return vas_->DecommitPages(reinterpret_cast
(address), size); +} + +} // namespace base +} // namespace v8 diff --git a/deps/v8/src/base/virtual-address-space-page-allocator.h b/deps/v8/src/base/virtual-address-space-page-allocator.h new file mode 100644 index 00000000000000..46368783cc5620 --- /dev/null +++ b/deps/v8/src/base/virtual-address-space-page-allocator.h @@ -0,0 +1,72 @@ +// Copyright 2021 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_BASE_VIRTUAL_ADDRESS_SPACE_PAGE_ALLOCATOR_H_ +#define V8_BASE_VIRTUAL_ADDRESS_SPACE_PAGE_ALLOCATOR_H_ + +#include + +#include "include/v8-platform.h" +#include "src/base/base-export.h" +#include "src/base/platform/platform.h" + +namespace v8 { +namespace base { + +// This class bridges a VirtualAddressSpace, the future memory management API, +// to a PageAllocator, the current API. +class V8_BASE_EXPORT VirtualAddressSpacePageAllocator + : public v8::PageAllocator { + public: + using Address = uintptr_t; + + explicit VirtualAddressSpacePageAllocator(v8::VirtualAddressSpace* vas); + + VirtualAddressSpacePageAllocator(const VirtualAddressSpacePageAllocator&) = + delete; + VirtualAddressSpacePageAllocator& operator=( + const VirtualAddressSpacePageAllocator&) = delete; + ~VirtualAddressSpacePageAllocator() override = default; + + size_t AllocatePageSize() override { return vas_->allocation_granularity(); } + + size_t CommitPageSize() override { return vas_->page_size(); } + + void SetRandomMmapSeed(int64_t seed) override { vas_->SetRandomSeed(seed); } + + void* GetRandomMmapAddr() override { + return reinterpret_cast(vas_->RandomPageAddress()); + } + + void* AllocatePages(void* hint, size_t size, size_t alignment, + Permission access) override; + + bool FreePages(void* address, size_t size) override; + + bool ReleasePages(void* address, size_t size, size_t new_size) override; + + bool SetPermissions(void* address, size_t size, Permission access) override; + + bool DiscardSystemPages(void* address, size_t size) override; + + bool DecommitPages(void* address, size_t size) override; + + private: + // Client of this class must keep the VirtualAddressSpace alive during the + // lifetime of this instance. + v8::VirtualAddressSpace* vas_; + + // As the VirtualAddressSpace class doesn't support ReleasePages, this map is + // required to keep track of the original size of resized page allocations. + // See the ReleasePages implementation. + std::unordered_map resized_allocations_; + + // Mutex guarding the above map. + Mutex mutex_; +}; + +} // namespace base +} // namespace v8 + +#endif // V8_BASE_VIRTUAL_ADDRESS_SPACE_PAGE_ALLOCATOR_H_ diff --git a/deps/v8/src/base/virtual-address-space.cc b/deps/v8/src/base/virtual-address-space.cc new file mode 100644 index 00000000000000..9907facb57e4c4 --- /dev/null +++ b/deps/v8/src/base/virtual-address-space.cc @@ -0,0 +1,262 @@ +// Copyright 2021 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/base/virtual-address-space.h" + +#include "include/v8-platform.h" +#include "src/base/bits.h" +#include "src/base/platform/platform.h" +#include "src/base/platform/wrappers.h" + +namespace v8 { +namespace base { + +#define STATIC_ASSERT_ENUM(a, b) \ + static_assert(static_cast(a) == static_cast(b), \ + "mismatching enum: " #a) + +STATIC_ASSERT_ENUM(PagePermissions::kNoAccess, OS::MemoryPermission::kNoAccess); +STATIC_ASSERT_ENUM(PagePermissions::kReadWrite, + OS::MemoryPermission::kReadWrite); +STATIC_ASSERT_ENUM(PagePermissions::kReadWriteExecute, + OS::MemoryPermission::kReadWriteExecute); +STATIC_ASSERT_ENUM(PagePermissions::kReadExecute, + OS::MemoryPermission::kReadExecute); + +#undef STATIC_ASSERT_ENUM + +VirtualAddressSpace::VirtualAddressSpace() + : VirtualAddressSpaceBase(OS::CommitPageSize(), OS::AllocatePageSize(), + kNullAddress, + std::numeric_limits::max()) { +#if V8_OS_WIN + // On Windows, this additional step is required to lookup the VirtualAlloc2 + // and friends functions. + OS::EnsureWin32MemoryAPILoaded(); +#endif // V8_OS_WIN + DCHECK(bits::IsPowerOfTwo(page_size())); + DCHECK(bits::IsPowerOfTwo(allocation_granularity())); + DCHECK_GE(allocation_granularity(), page_size()); + DCHECK(IsAligned(allocation_granularity(), page_size())); +} + +void VirtualAddressSpace::SetRandomSeed(int64_t seed) { + OS::SetRandomMmapSeed(seed); +} + +Address VirtualAddressSpace::RandomPageAddress() { + return reinterpret_cast
(OS::GetRandomMmapAddr()); +} + +Address VirtualAddressSpace::AllocatePages(Address hint, size_t size, + size_t alignment, + PagePermissions permissions) { + DCHECK(IsAligned(alignment, allocation_granularity())); + DCHECK(IsAligned(hint, alignment)); + DCHECK(IsAligned(size, allocation_granularity())); + + return reinterpret_cast
( + OS::Allocate(reinterpret_cast(hint), size, alignment, + static_cast(permissions))); +} + +bool VirtualAddressSpace::FreePages(Address address, size_t size) { + DCHECK(IsAligned(address, allocation_granularity())); + DCHECK(IsAligned(size, allocation_granularity())); + + return OS::Free(reinterpret_cast(address), size); +} + +bool VirtualAddressSpace::SetPagePermissions(Address address, size_t size, + PagePermissions permissions) { + DCHECK(IsAligned(address, page_size())); + DCHECK(IsAligned(size, page_size())); + + return OS::SetPermissions(reinterpret_cast(address), size, + static_cast(permissions)); +} + +bool VirtualAddressSpace::CanAllocateSubspaces() { + return OS::CanReserveAddressSpace(); +} + +std::unique_ptr VirtualAddressSpace::AllocateSubspace( + Address hint, size_t size, size_t alignment, + PagePermissions max_permissions) { + DCHECK(IsAligned(alignment, allocation_granularity())); + DCHECK(IsAligned(hint, alignment)); + DCHECK(IsAligned(size, allocation_granularity())); + + base::Optional reservation = + OS::CreateAddressSpaceReservation( + reinterpret_cast(hint), size, alignment, + static_cast(max_permissions)); + if (!reservation.has_value()) + return std::unique_ptr(); + return std::unique_ptr( + new VirtualAddressSubspace(*reservation, this)); +} + +bool VirtualAddressSpace::DiscardSystemPages(Address address, size_t size) { + DCHECK(IsAligned(address, page_size())); + DCHECK(IsAligned(size, page_size())); + + return OS::DiscardSystemPages(reinterpret_cast(address), size); +} + +bool VirtualAddressSpace::DecommitPages(Address address, size_t size) { + DCHECK(IsAligned(address, page_size())); + DCHECK(IsAligned(size, page_size())); + + return OS::DecommitPages(reinterpret_cast(address), size); +} + +bool VirtualAddressSpace::FreeSubspace(VirtualAddressSubspace* subspace) { + return OS::FreeAddressSpaceReservation(subspace->reservation_); +} + +VirtualAddressSubspace::VirtualAddressSubspace( + AddressSpaceReservation reservation, VirtualAddressSpaceBase* parent_space) + : VirtualAddressSpaceBase( + parent_space->page_size(), parent_space->allocation_granularity(), + reinterpret_cast
(reservation.base()), reservation.size()), + reservation_(reservation), + region_allocator_(reinterpret_cast
(reservation.base()), + reservation.size(), + parent_space->allocation_granularity()), + parent_space_(parent_space) { +#if V8_OS_WIN + // On Windows, the address space reservation needs to be split and merged at + // the OS level as well. + region_allocator_.set_on_split_callback([this](Address start, size_t size) { + DCHECK(IsAligned(start, allocation_granularity())); + CHECK(reservation_.SplitPlaceholder(reinterpret_cast(start), size)); + }); + region_allocator_.set_on_merge_callback([this](Address start, size_t size) { + DCHECK(IsAligned(start, allocation_granularity())); + CHECK(reservation_.MergePlaceholders(reinterpret_cast(start), size)); + }); +#endif // V8_OS_WIN +} + +VirtualAddressSubspace::~VirtualAddressSubspace() { + CHECK(parent_space_->FreeSubspace(this)); +} + +void VirtualAddressSubspace::SetRandomSeed(int64_t seed) { + MutexGuard guard(&mutex_); + rng_.SetSeed(seed); +} + +Address VirtualAddressSubspace::RandomPageAddress() { + MutexGuard guard(&mutex_); + // Note: the random numbers generated here aren't uniformly distributed if the + // size isn't a power of two. + Address addr = base() + (rng_.NextInt64() % size()); + return RoundDown(addr, allocation_granularity()); +} + +Address VirtualAddressSubspace::AllocatePages(Address hint, size_t size, + size_t alignment, + PagePermissions permissions) { + DCHECK(IsAligned(alignment, allocation_granularity())); + DCHECK(IsAligned(hint, alignment)); + DCHECK(IsAligned(size, allocation_granularity())); + + MutexGuard guard(&mutex_); + + Address address = region_allocator_.AllocateRegion(hint, size, alignment); + if (address == RegionAllocator::kAllocationFailure) return kNullAddress; + + if (!reservation_.Allocate(reinterpret_cast(address), size, + static_cast(permissions))) { + // This most likely means that we ran out of memory. + CHECK_EQ(size, region_allocator_.FreeRegion(address)); + return kNullAddress; + } + + return address; +} + +bool VirtualAddressSubspace::FreePages(Address address, size_t size) { + DCHECK(IsAligned(address, allocation_granularity())); + DCHECK(IsAligned(size, allocation_granularity())); + + MutexGuard guard(&mutex_); + if (region_allocator_.CheckRegion(address) != size) return false; + + // The order here is important: on Windows, the allocation first has to be + // freed to a placeholder before the placeholder can be merged (during the + // merge_callback) with any surrounding placeholder mappings. + CHECK(reservation_.Free(reinterpret_cast(address), size)); + CHECK_EQ(size, region_allocator_.FreeRegion(address)); + return true; +} + +bool VirtualAddressSubspace::SetPagePermissions(Address address, size_t size, + PagePermissions permissions) { + DCHECK(IsAligned(address, page_size())); + DCHECK(IsAligned(size, page_size())); + + return reservation_.SetPermissions( + reinterpret_cast(address), size, + static_cast(permissions)); +} + +std::unique_ptr +VirtualAddressSubspace::AllocateSubspace(Address hint, size_t size, + size_t alignment, + PagePermissions max_permissions) { + DCHECK(IsAligned(alignment, allocation_granularity())); + DCHECK(IsAligned(hint, alignment)); + DCHECK(IsAligned(size, allocation_granularity())); + + MutexGuard guard(&mutex_); + + Address address = region_allocator_.AllocateRegion(hint, size, alignment); + if (address == RegionAllocator::kAllocationFailure) { + return std::unique_ptr(); + } + + base::Optional reservation = + reservation_.CreateSubReservation( + reinterpret_cast(address), size, + static_cast(max_permissions)); + if (!reservation.has_value()) { + CHECK_EQ(size, region_allocator_.FreeRegion(address)); + return nullptr; + } + return std::unique_ptr( + new VirtualAddressSubspace(*reservation, this)); +} + +bool VirtualAddressSubspace::DiscardSystemPages(Address address, size_t size) { + DCHECK(IsAligned(address, page_size())); + DCHECK(IsAligned(size, page_size())); + + return reservation_.DiscardSystemPages(reinterpret_cast(address), + size); +} + +bool VirtualAddressSubspace::DecommitPages(Address address, size_t size) { + DCHECK(IsAligned(address, page_size())); + DCHECK(IsAligned(size, page_size())); + + return reservation_.DecommitPages(reinterpret_cast(address), size); +} + +bool VirtualAddressSubspace::FreeSubspace(VirtualAddressSubspace* subspace) { + MutexGuard guard(&mutex_); + + AddressSpaceReservation reservation = subspace->reservation_; + Address base = reinterpret_cast
(reservation.base()); + if (region_allocator_.FreeRegion(base) != reservation.size()) { + return false; + } + + return reservation_.FreeSubReservation(reservation); +} + +} // namespace base +} // namespace v8 diff --git a/deps/v8/src/base/virtual-address-space.h b/deps/v8/src/base/virtual-address-space.h new file mode 100644 index 00000000000000..5cfe462079987f --- /dev/null +++ b/deps/v8/src/base/virtual-address-space.h @@ -0,0 +1,136 @@ +// Copyright 2021 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_BASE_VIRTUAL_ADDRESS_SPACE_H_ +#define V8_BASE_VIRTUAL_ADDRESS_SPACE_H_ + +#include "include/v8-platform.h" +#include "src/base/base-export.h" +#include "src/base/compiler-specific.h" +#include "src/base/platform/platform.h" +#include "src/base/region-allocator.h" + +namespace v8 { +namespace base { + +using Address = uintptr_t; +constexpr Address kNullAddress = 0; + +class VirtualAddressSubspace; + +/* + * Common parent class to implement deletion of subspaces. + */ +class VirtualAddressSpaceBase + : public NON_EXPORTED_BASE(::v8::VirtualAddressSpace) { + public: + using VirtualAddressSpace::VirtualAddressSpace; + + private: + friend VirtualAddressSubspace; + // Called by a subspace during destruction. Responsible for freeing the + // address space reservation and any other data associated with the subspace + // in the parent space. + virtual bool FreeSubspace(VirtualAddressSubspace* subspace) = 0; +}; + +/* + * The virtual address space of the current process. Conceptionally, there + * should only be one such "root" instance. However, in practice there is no + * issue with having multiple instances as the actual resources are managed by + * the OS kernel. + */ +class V8_BASE_EXPORT VirtualAddressSpace : public VirtualAddressSpaceBase { + public: + VirtualAddressSpace(); + ~VirtualAddressSpace() override = default; + + void SetRandomSeed(int64_t seed) override; + + Address RandomPageAddress() override; + + Address AllocatePages(Address hint, size_t size, size_t alignment, + PagePermissions access) override; + + bool FreePages(Address address, size_t size) override; + + bool SetPagePermissions(Address address, size_t size, + PagePermissions access) override; + + bool CanAllocateSubspaces() override; + + std::unique_ptr AllocateSubspace( + Address hint, size_t size, size_t alignment, + PagePermissions max_permissions) override; + + bool DiscardSystemPages(Address address, size_t size) override; + + bool DecommitPages(Address address, size_t size) override; + + private: + bool FreeSubspace(VirtualAddressSubspace* subspace) override; +}; + +/* + * A subspace of a parent virtual address space. This represents a reserved + * contiguous region of virtual address space in the current process. + */ +class V8_BASE_EXPORT VirtualAddressSubspace : public VirtualAddressSpaceBase { + public: + ~VirtualAddressSubspace() override; + + void SetRandomSeed(int64_t seed) override; + + Address RandomPageAddress() override; + + Address AllocatePages(Address hint, size_t size, size_t alignment, + PagePermissions permissions) override; + + bool FreePages(Address address, size_t size) override; + + bool SetPagePermissions(Address address, size_t size, + PagePermissions permissions) override; + + bool CanAllocateSubspaces() override { return true; } + + std::unique_ptr AllocateSubspace( + Address hint, size_t size, size_t alignment, + PagePermissions max_permissions) override; + + bool DiscardSystemPages(Address address, size_t size) override; + + bool DecommitPages(Address address, size_t size) override; + + private: + // The VirtualAddressSpace class creates instances of this class when + // allocating sub spaces. + friend class v8::base::VirtualAddressSpace; + + bool FreeSubspace(VirtualAddressSubspace* subspace) override; + + VirtualAddressSubspace(AddressSpaceReservation reservation, + VirtualAddressSpaceBase* parent_space); + + // The address space reservation backing this subspace. + AddressSpaceReservation reservation_; + + // Mutex guarding the non-threadsafe RegionAllocator and + // RandomNumberGenerator. + Mutex mutex_; + + // RegionAllocator to manage the virtual address reservation and divide it + // into further regions as necessary. + RegionAllocator region_allocator_; + + // Random number generator for generating random addresses. + RandomNumberGenerator rng_; + + // Pointer to the parent space. Must be kept alive by the owner of this + // instance during its lifetime. + VirtualAddressSpaceBase* parent_space_; +}; + +} // namespace base +} // namespace v8 +#endif // V8_BASE_VIRTUAL_ADDRESS_SPACE_H_ diff --git a/deps/v8/src/base/win32-headers.h b/deps/v8/src/base/win32-headers.h index 95aedd8c95ef54..08eb44dc581472 100644 --- a/deps/v8/src/base/win32-headers.h +++ b/deps/v8/src/base/win32-headers.h @@ -33,10 +33,8 @@ #ifndef NOMCX #define NOMCX #endif -// Require Windows Vista or higher (this is required for the -// QueryThreadCycleTime function to be present). #ifndef _WIN32_WINNT -#define _WIN32_WINNT 0x0600 +#error This should be set in build config files. See build\config\win\BUILD.gn #endif #include // For raise(). diff --git a/deps/v8/src/baseline/baseline-batch-compiler.cc b/deps/v8/src/baseline/baseline-batch-compiler.cc index a34764744bc16e..fe0e9d84cc007f 100644 --- a/deps/v8/src/baseline/baseline-batch-compiler.cc +++ b/deps/v8/src/baseline/baseline-batch-compiler.cc @@ -9,6 +9,8 @@ #include "src/flags/flags.h" #if ENABLE_SPARKPLUG +#include + #include "src/baseline/baseline-compiler.h" #include "src/codegen/compiler.h" #include "src/execution/isolate.h" @@ -56,7 +58,13 @@ class BaselineCompilerTask { if (FLAG_print_code) { code->Print(); } - shared_function_info_->set_baseline_code(*code, kReleaseStore); + // Don't install the code if the bytecode has been flushed or has + // already some baseline code installed. + if (!shared_function_info_->is_compiled() || + shared_function_info_->HasBaselineCode()) { + return; + } + shared_function_info_->set_baseline_code(ToCodeT(*code), kReleaseStore); if (V8_LIKELY(FLAG_use_osr)) { // Arm back edges for OSR shared_function_info_->GetBytecodeArray(isolate) @@ -162,8 +170,12 @@ class ConcurrentBaselineCompiler { void Run(JobDelegate* delegate) override { while (!incoming_queue_->IsEmpty() && !delegate->ShouldYield()) { + // Since we're going to compile an entire batch, this guarantees that + // we only switch back the memory chunks to RX at the end. + CodePageCollectionMemoryModificationScope batch_alloc(isolate_->heap()); std::unique_ptr job; - incoming_queue_->Dequeue(&job); + if (!incoming_queue_->Dequeue(&job)) break; + DCHECK_NOT_NULL(job); job->Compile(); outgoing_queue_->Enqueue(std::move(job)); } @@ -171,6 +183,10 @@ class ConcurrentBaselineCompiler { } size_t GetMaxConcurrency(size_t worker_count) const override { + size_t max_threads = FLAG_concurrent_sparkplug_max_threads; + if (max_threads > 0) { + return std::min(max_threads, incoming_queue_->size()); + } return incoming_queue_->size(); } diff --git a/deps/v8/src/baseline/baseline-compiler.cc b/deps/v8/src/baseline/baseline-compiler.cc index 071e46268efb8c..3ef0c68727b258 100644 --- a/deps/v8/src/baseline/baseline-compiler.cc +++ b/deps/v8/src/baseline/baseline-compiler.cc @@ -1043,62 +1043,62 @@ void BaselineCompiler::VisitShiftRightLogical() { } void BaselineCompiler::VisitAddSmi() { - CallBuiltin(kInterpreterAccumulatorRegister, - IntAsSmi(0), Index(1)); + CallBuiltin(kInterpreterAccumulatorRegister, + IntAsSmi(0), Index(1)); } void BaselineCompiler::VisitSubSmi() { - CallBuiltin(kInterpreterAccumulatorRegister, - IntAsSmi(0), Index(1)); + CallBuiltin(kInterpreterAccumulatorRegister, + IntAsSmi(0), Index(1)); } void BaselineCompiler::VisitMulSmi() { - CallBuiltin(kInterpreterAccumulatorRegister, - IntAsSmi(0), Index(1)); + CallBuiltin(kInterpreterAccumulatorRegister, + IntAsSmi(0), Index(1)); } void BaselineCompiler::VisitDivSmi() { - CallBuiltin(kInterpreterAccumulatorRegister, - IntAsSmi(0), Index(1)); + CallBuiltin(kInterpreterAccumulatorRegister, + IntAsSmi(0), Index(1)); } void BaselineCompiler::VisitModSmi() { - CallBuiltin(kInterpreterAccumulatorRegister, - IntAsSmi(0), Index(1)); + CallBuiltin(kInterpreterAccumulatorRegister, + IntAsSmi(0), Index(1)); } void BaselineCompiler::VisitExpSmi() { - CallBuiltin(kInterpreterAccumulatorRegister, - IntAsSmi(0), Index(1)); + CallBuiltin( + kInterpreterAccumulatorRegister, IntAsSmi(0), Index(1)); } void BaselineCompiler::VisitBitwiseOrSmi() { - CallBuiltin(kInterpreterAccumulatorRegister, - IntAsSmi(0), Index(1)); + CallBuiltin(kInterpreterAccumulatorRegister, + IntAsSmi(0), Index(1)); } void BaselineCompiler::VisitBitwiseXorSmi() { - CallBuiltin(kInterpreterAccumulatorRegister, - IntAsSmi(0), Index(1)); + CallBuiltin(kInterpreterAccumulatorRegister, + IntAsSmi(0), Index(1)); } void BaselineCompiler::VisitBitwiseAndSmi() { - CallBuiltin(kInterpreterAccumulatorRegister, - IntAsSmi(0), Index(1)); + CallBuiltin(kInterpreterAccumulatorRegister, + IntAsSmi(0), Index(1)); } void BaselineCompiler::VisitShiftLeftSmi() { - CallBuiltin(kInterpreterAccumulatorRegister, - IntAsSmi(0), Index(1)); + CallBuiltin(kInterpreterAccumulatorRegister, + IntAsSmi(0), Index(1)); } void BaselineCompiler::VisitShiftRightSmi() { - CallBuiltin(kInterpreterAccumulatorRegister, - IntAsSmi(0), Index(1)); + CallBuiltin(kInterpreterAccumulatorRegister, + IntAsSmi(0), Index(1)); } void BaselineCompiler::VisitShiftRightLogicalSmi() { - CallBuiltin( + CallBuiltin( kInterpreterAccumulatorRegister, IntAsSmi(0), Index(1)); } diff --git a/deps/v8/src/baseline/loong64/baseline-assembler-loong64-inl.h b/deps/v8/src/baseline/loong64/baseline-assembler-loong64-inl.h index 33f792fce83e35..185bb349c286f9 100644 --- a/deps/v8/src/baseline/loong64/baseline-assembler-loong64-inl.h +++ b/deps/v8/src/baseline/loong64/baseline-assembler-loong64-inl.h @@ -414,26 +414,17 @@ void BaselineAssembler::Switch(Register reg, int case_value_base, Label** labels, int num_labels) { ASM_CODE_COMMENT(masm_); Label fallthrough; - if (case_value_base > 0) { + if (case_value_base != 0) { __ Sub_d(reg, reg, Operand(case_value_base)); } - ScratchRegisterScope scope(this); - Register scratch = scope.AcquireScratch(); __ Branch(&fallthrough, AsMasmCondition(Condition::kUnsignedGreaterThanEqual), reg, Operand(num_labels)); - int entry_size_log2 = 2; - __ pcaddi(scratch, 3); - __ Alsl_d(scratch, reg, scratch, entry_size_log2); - __ Jump(scratch); - { - TurboAssembler::BlockTrampolinePoolScope(masm()); - __ BlockTrampolinePoolFor(num_labels * kInstrSize); - for (int i = 0; i < num_labels; ++i) { - __ Branch(labels[i]); - } - __ bind(&fallthrough); - } + + __ GenerateSwitchTable(reg, num_labels, + [labels](size_t i) { return labels[i]; }); + + __ bind(&fallthrough); } #undef __ diff --git a/deps/v8/src/baseline/mips/baseline-assembler-mips-inl.h b/deps/v8/src/baseline/mips/baseline-assembler-mips-inl.h index 996b4ba831f93a..9cc0e749bd31c1 100644 --- a/deps/v8/src/baseline/mips/baseline-assembler-mips-inl.h +++ b/deps/v8/src/baseline/mips/baseline-assembler-mips-inl.h @@ -426,29 +426,17 @@ void BaselineAssembler::Switch(Register reg, int case_value_base, Label** labels, int num_labels) { ASM_CODE_COMMENT(masm_); Label fallthrough; - if (case_value_base > 0) { + if (case_value_base != 0) { __ Subu(reg, reg, Operand(case_value_base)); } - ScratchRegisterScope scope(this); - Register temp = scope.AcquireScratch(); __ Branch(&fallthrough, AsMasmCondition(Condition::kUnsignedGreaterThanEqual), reg, Operand(num_labels)); - __ push(ra); - int entry_size_log2 = 3; - __ nal(); - __ addiu(reg, reg, 3); - __ Lsa(temp, ra, reg, entry_size_log2); - __ pop(ra); - __ Jump(temp); - { - TurboAssembler::BlockTrampolinePoolScope(masm()); - __ BlockTrampolinePoolFor(num_labels * kInstrSize * 2); - for (int i = 0; i < num_labels; ++i) { - __ Branch(labels[i]); - } - __ bind(&fallthrough); - } + + __ GenerateSwitchTable(reg, num_labels, + [labels](size_t i) { return labels[i]; }); + + __ bind(&fallthrough); } #undef __ diff --git a/deps/v8/src/baseline/mips64/baseline-assembler-mips64-inl.h b/deps/v8/src/baseline/mips64/baseline-assembler-mips64-inl.h index 18e0c3445dd312..3f4dd6d4559b93 100644 --- a/deps/v8/src/baseline/mips64/baseline-assembler-mips64-inl.h +++ b/deps/v8/src/baseline/mips64/baseline-assembler-mips64-inl.h @@ -424,29 +424,17 @@ void BaselineAssembler::Switch(Register reg, int case_value_base, Label** labels, int num_labels) { ASM_CODE_COMMENT(masm_); Label fallthrough; - if (case_value_base > 0) { + if (case_value_base != 0) { __ Dsubu(reg, reg, Operand(case_value_base)); } - ScratchRegisterScope scope(this); - Register temp = scope.AcquireScratch(); __ Branch(&fallthrough, AsMasmCondition(Condition::kUnsignedGreaterThanEqual), reg, Operand(num_labels)); - __ push(ra); - int entry_size_log2 = 3; - __ nal(); - __ daddiu(reg, reg, 3); - __ Dlsa(temp, ra, reg, entry_size_log2); - __ pop(ra); - __ Jump(temp); - { - TurboAssembler::BlockTrampolinePoolScope(masm()); - __ BlockTrampolinePoolFor(num_labels * kInstrSize * 2); - for (int i = 0; i < num_labels; ++i) { - __ Branch(labels[i]); - } - __ bind(&fallthrough); - } + + __ GenerateSwitchTable(reg, num_labels, + [labels](size_t i) { return labels[i]; }); + + __ bind(&fallthrough); } #undef __ diff --git a/deps/v8/src/baseline/riscv64/baseline-assembler-riscv64-inl.h b/deps/v8/src/baseline/riscv64/baseline-assembler-riscv64-inl.h index 85ada600f18d28..96420093d16790 100644 --- a/deps/v8/src/baseline/riscv64/baseline-assembler-riscv64-inl.h +++ b/deps/v8/src/baseline/riscv64/baseline-assembler-riscv64-inl.h @@ -437,6 +437,7 @@ void BaselineAssembler::Switch(Register reg, int case_value_base, CHECK(is_int32(imm64 + 0x800)); int32_t Hi20 = (((int32_t)imm64 + 0x800) >> 12); int32_t Lo12 = (int32_t)imm64 << 20 >> 20; + __ BlockTrampolinePoolFor(2); __ auipc(t6, Hi20); // Read PC + Hi20 into t6 __ addi(t6, t6, Lo12); // jump PC + Hi20 + Lo12 diff --git a/deps/v8/src/baseline/s390/baseline-assembler-s390-inl.h b/deps/v8/src/baseline/s390/baseline-assembler-s390-inl.h index c73f080ecb8db0..ce7afbf4ea3315 100644 --- a/deps/v8/src/baseline/s390/baseline-assembler-s390-inl.h +++ b/deps/v8/src/baseline/s390/baseline-assembler-s390-inl.h @@ -13,78 +13,148 @@ namespace v8 { namespace internal { namespace baseline { +namespace detail { + +static constexpr Register kScratchRegisters[] = {r8, r9, ip, r1}; +static constexpr int kNumScratchRegisters = arraysize(kScratchRegisters); + +#ifdef DEBUG +inline bool Clobbers(Register target, MemOperand op) { + return op.rb() == target || op.rx() == target; +} +#endif +} // namespace detail + class BaselineAssembler::ScratchRegisterScope { public: explicit ScratchRegisterScope(BaselineAssembler* assembler) : assembler_(assembler), prev_scope_(assembler->scratch_register_scope_), - wrapped_scope_(assembler->masm()) { - if (!assembler_->scratch_register_scope_) { - // If we haven't opened a scratch scope yet, for the first one add a - // couple of extra registers. - DCHECK(wrapped_scope_.CanAcquire()); - wrapped_scope_.Include(r8, r9); - wrapped_scope_.Include(kInterpreterBytecodeOffsetRegister); - } + registers_used_(prev_scope_ == nullptr ? 0 + : prev_scope_->registers_used_) { assembler_->scratch_register_scope_ = this; } ~ScratchRegisterScope() { assembler_->scratch_register_scope_ = prev_scope_; } - Register AcquireScratch() { return wrapped_scope_.Acquire(); } + Register AcquireScratch() { + DCHECK_LT(registers_used_, detail::kNumScratchRegisters); + return detail::kScratchRegisters[registers_used_++]; + } private: BaselineAssembler* assembler_; ScratchRegisterScope* prev_scope_; - UseScratchRegisterScope wrapped_scope_; + int registers_used_; }; // TODO(v8:11429,leszeks): Unify condition names in the MacroAssembler. enum class Condition : uint32_t { - kEqual = static_cast(eq), - kNotEqual = static_cast(ne), + kEqual, + kNotEqual, - kLessThan = static_cast(lt), - kGreaterThan = static_cast(gt), - kLessThanEqual = static_cast(le), - kGreaterThanEqual = static_cast(ge), + kLessThan, + kGreaterThan, + kLessThanEqual, + kGreaterThanEqual, - kUnsignedLessThan = static_cast(lo), - kUnsignedGreaterThan = static_cast(hi), - kUnsignedLessThanEqual = static_cast(ls), - kUnsignedGreaterThanEqual = static_cast(hs), + kUnsignedLessThan, + kUnsignedGreaterThan, + kUnsignedLessThanEqual, + kUnsignedGreaterThanEqual, - kOverflow = static_cast(vs), - kNoOverflow = static_cast(vc), + kOverflow, + kNoOverflow, - kZero = static_cast(eq), - kNotZero = static_cast(ne), + kZero, + kNotZero }; inline internal::Condition AsMasmCondition(Condition cond) { - UNIMPLEMENTED(); - return static_cast(cond); + STATIC_ASSERT(sizeof(internal::Condition) == sizeof(Condition)); + switch (cond) { + case Condition::kEqual: + return eq; + case Condition::kNotEqual: + return ne; + + case Condition::kLessThan: + return lt; + case Condition::kGreaterThan: + return gt; + case Condition::kLessThanEqual: + return le; + case Condition::kGreaterThanEqual: + return ge; + + case Condition::kUnsignedLessThan: + return lt; + case Condition::kUnsignedGreaterThan: + return gt; + case Condition::kUnsignedLessThanEqual: + return le; + case Condition::kUnsignedGreaterThanEqual: + return ge; + + case Condition::kOverflow: + return overflow; + case Condition::kNoOverflow: + return nooverflow; + + case Condition::kZero: + return eq; + case Condition::kNotZero: + return ne; + default: + UNREACHABLE(); + } } -namespace detail { +inline bool IsSignedCondition(Condition cond) { + switch (cond) { + case Condition::kEqual: + case Condition::kNotEqual: + case Condition::kLessThan: + case Condition::kGreaterThan: + case Condition::kLessThanEqual: + case Condition::kGreaterThanEqual: + case Condition::kOverflow: + case Condition::kNoOverflow: + case Condition::kZero: + case Condition::kNotZero: + return true; + + case Condition::kUnsignedLessThan: + case Condition::kUnsignedGreaterThan: + case Condition::kUnsignedLessThanEqual: + case Condition::kUnsignedGreaterThanEqual: + return false; + + default: + UNREACHABLE(); + } +} -#ifdef DEBUG -inline bool Clobbers(Register target, MemOperand op) { - UNIMPLEMENTED(); - return false; +#define __ assm->masm()-> +// s390x helper +void JumpIfHelper(BaselineAssembler* assm, Condition cc, Register lhs, + Register rhs, Label* target) { + if (IsSignedCondition(cc)) { + __ CmpS64(lhs, rhs); + } else { + __ CmpU64(lhs, rhs); + } + __ b(AsMasmCondition(cc), target); } -#endif -} // namespace detail +#undef __ #define __ masm_-> MemOperand BaselineAssembler::RegisterFrameOperand( interpreter::Register interpreter_register) { - UNIMPLEMENTED(); return MemOperand(fp, interpreter_register.ToOperand() * kSystemPointerSize); } MemOperand BaselineAssembler::FeedbackVectorOperand() { - UNIMPLEMENTED(); return MemOperand(fp, BaselineFrameConstants::kFeedbackVectorFromFp); } @@ -93,83 +163,129 @@ void BaselineAssembler::BindWithoutJumpTarget(Label* label) { __ bind(label); } void BaselineAssembler::JumpTarget() { // NOP on arm. - UNIMPLEMENTED(); } void BaselineAssembler::Jump(Label* target, Label::Distance distance) { - UNIMPLEMENTED(); + __ b(target); } void BaselineAssembler::JumpIfRoot(Register value, RootIndex index, Label* target, Label::Distance) { - UNIMPLEMENTED(); + __ JumpIfRoot(value, index, target); } void BaselineAssembler::JumpIfNotRoot(Register value, RootIndex index, Label* target, Label::Distance) { - UNIMPLEMENTED(); + __ JumpIfNotRoot(value, index, target); } void BaselineAssembler::JumpIfSmi(Register value, Label* target, Label::Distance) { - UNIMPLEMENTED(); + __ JumpIfSmi(value, target); } void BaselineAssembler::JumpIfNotSmi(Register value, Label* target, Label::Distance) { - UNIMPLEMENTED(); + __ JumpIfNotSmi(value, target); +} + +void BaselineAssembler::CallBuiltin(Builtin builtin) { + if (masm()->options().short_builtin_calls) { + // Generate pc-relative call. + __ CallBuiltin(builtin); + } else { + ScratchRegisterScope temps(this); + Register temp = temps.AcquireScratch(); + __ LoadEntryFromBuiltin(builtin, temp); + __ Call(temp); + } } -void BaselineAssembler::CallBuiltin(Builtin builtin) { UNIMPLEMENTED(); } - void BaselineAssembler::TailCallBuiltin(Builtin builtin) { ASM_CODE_COMMENT_STRING(masm_, __ CommentForOffHeapTrampoline("tail call", builtin)); - UNIMPLEMENTED(); + if (masm()->options().short_builtin_calls) { + // Generate pc-relative call. + __ TailCallBuiltin(builtin); + } else { + ScratchRegisterScope temps(this); + Register temp = temps.AcquireScratch(); + __ LoadEntryFromBuiltin(builtin, temp); + __ Jump(temp); + } } void BaselineAssembler::TestAndBranch(Register value, int mask, Condition cc, Label* target, Label::Distance) { - UNIMPLEMENTED(); + __ AndP(r0, value, Operand(mask)); + __ b(AsMasmCondition(cc), target); } void BaselineAssembler::JumpIf(Condition cc, Register lhs, const Operand& rhs, Label* target, Label::Distance) { - UNIMPLEMENTED(); + if (IsSignedCondition(cc)) { + __ CmpS64(lhs, rhs); + } else { + __ CmpU64(lhs, rhs); + } + __ b(AsMasmCondition(cc), target); } + void BaselineAssembler::JumpIfObjectType(Condition cc, Register object, InstanceType instance_type, Register map, Label* target, Label::Distance) { - UNIMPLEMENTED(); + ScratchRegisterScope temps(this); + Register type = temps.AcquireScratch(); + __ LoadMap(map, object); + __ LoadU16(type, FieldMemOperand(map, Map::kInstanceTypeOffset)); + JumpIf(cc, type, Operand(instance_type), target); } void BaselineAssembler::JumpIfInstanceType(Condition cc, Register map, InstanceType instance_type, Label* target, Label::Distance) { - UNIMPLEMENTED(); + ScratchRegisterScope temps(this); + Register type = temps.AcquireScratch(); + if (FLAG_debug_code) { + __ AssertNotSmi(map); + __ CompareObjectType(map, type, type, MAP_TYPE); + __ Assert(eq, AbortReason::kUnexpectedValue); + } + __ LoadU16(type, FieldMemOperand(map, Map::kInstanceTypeOffset)); + JumpIf(cc, type, Operand(instance_type), target); } void BaselineAssembler::JumpIfPointer(Condition cc, Register value, MemOperand operand, Label* target, Label::Distance) { - UNIMPLEMENTED(); + ScratchRegisterScope temps(this); + Register tmp = temps.AcquireScratch(); + __ LoadU64(tmp, operand); + JumpIfHelper(this, cc, value, tmp, target); } + void BaselineAssembler::JumpIfSmi(Condition cc, Register value, Smi smi, Label* target, Label::Distance) { - UNIMPLEMENTED(); + __ AssertSmi(value); + __ LoadSmiLiteral(r0, smi); + JumpIfHelper(this, cc, value, r0, target); } void BaselineAssembler::JumpIfSmi(Condition cc, Register lhs, Register rhs, Label* target, Label::Distance) { - UNIMPLEMENTED(); + __ AssertSmi(lhs); + __ AssertSmi(rhs); + JumpIfHelper(this, cc, lhs, rhs, target); } void BaselineAssembler::JumpIfTagged(Condition cc, Register value, MemOperand operand, Label* target, Label::Distance) { - UNIMPLEMENTED(); + __ LoadU64(r0, operand); + JumpIfHelper(this, cc, value, r0, target); } void BaselineAssembler::JumpIfTagged(Condition cc, MemOperand operand, Register value, Label* target, Label::Distance) { - UNIMPLEMENTED(); + __ LoadU64(r0, operand); + JumpIfHelper(this, cc, r0, value, target); } void BaselineAssembler::JumpIfByte(Condition cc, Register value, int32_t byte, Label* target, Label::Distance) { - UNIMPLEMENTED(); + JumpIf(cc, value, Operand(byte), target); } void BaselineAssembler::Move(interpreter::Register output, Register source) { diff --git a/deps/v8/src/bigint/CPPLINT.cfg b/deps/v8/src/bigint/CPPLINT.cfg new file mode 100644 index 00000000000000..663e4f7d126e41 --- /dev/null +++ b/deps/v8/src/bigint/CPPLINT.cfg @@ -0,0 +1 @@ +filter=-readability/check \ No newline at end of file diff --git a/deps/v8/src/bigint/bigint-internal.cc b/deps/v8/src/bigint/bigint-internal.cc index 2d74f3572cc6fd..35a9e5b3f2377f 100644 --- a/deps/v8/src/bigint/bigint-internal.cc +++ b/deps/v8/src/bigint/bigint-internal.cc @@ -52,7 +52,7 @@ void ProcessorImpl::Multiply(RWDigits Z, Digits X, Digits Y) { void ProcessorImpl::Divide(RWDigits Q, Digits A, Digits B) { A.Normalize(); B.Normalize(); - DCHECK(B.len() > 0); // NOLINT(readability/check) + DCHECK(B.len() > 0); int cmp = Compare(A, B); if (cmp < 0) return Q.Clear(); if (cmp == 0) { @@ -82,7 +82,7 @@ void ProcessorImpl::Divide(RWDigits Q, Digits A, Digits B) { void ProcessorImpl::Modulo(RWDigits R, Digits A, Digits B) { A.Normalize(); B.Normalize(); - DCHECK(B.len() > 0); // NOLINT(readability/check) + DCHECK(B.len() > 0); int cmp = Compare(A, B); if (cmp < 0) { for (int i = 0; i < B.len(); i++) R[i] = B[i]; diff --git a/deps/v8/src/bigint/bigint.h b/deps/v8/src/bigint/bigint.h index 28df2936ac2407..300229c97df9b4 100644 --- a/deps/v8/src/bigint/bigint.h +++ b/deps/v8/src/bigint/bigint.h @@ -253,6 +253,14 @@ void BitwiseOr_PosNeg(RWDigits Z, Digits X, Digits Y); void BitwiseXor_PosPos(RWDigits Z, Digits X, Digits Y); void BitwiseXor_NegNeg(RWDigits Z, Digits X, Digits Y); void BitwiseXor_PosNeg(RWDigits Z, Digits X, Digits Y); +void LeftShift(RWDigits Z, Digits X, digit_t shift); +// RightShiftState is provided by RightShift_ResultLength and used by the actual +// RightShift to avoid some recomputation. +struct RightShiftState { + bool must_round_down = false; +}; +void RightShift(RWDigits Z, Digits X, digit_t shift, + const RightShiftState& state); // Z := (least significant n bits of X, interpreted as a signed n-bit integer). // Returns true if the result is negative; Z will hold the absolute value. @@ -352,6 +360,17 @@ inline int BitwiseXor_PosNeg_ResultLength(int x_length, int y_length) { // Result length growth example: 3 ^ -1 == -4 (2-bit inputs, 3-bit result). return std::max(x_length, y_length) + 1; } +inline int LeftShift_ResultLength(int x_length, + digit_t x_most_significant_digit, + digit_t shift) { + int digit_shift = static_cast(shift / kDigitBits); + int bits_shift = static_cast(shift % kDigitBits); + bool grow = bits_shift != 0 && + (x_most_significant_digit >> (kDigitBits - bits_shift)) != 0; + return x_length + digit_shift + grow; +} +int RightShift_ResultLength(Digits X, bool x_sign, digit_t shift, + RightShiftState* state); // Returns -1 if this "asIntN" operation would be a no-op. int AsIntNResultLength(Digits X, bool x_negative, int n); diff --git a/deps/v8/src/bigint/bitwise.cc b/deps/v8/src/bigint/bitwise.cc index 087847c1181a63..c4cec22b539671 100644 --- a/deps/v8/src/bigint/bitwise.cc +++ b/deps/v8/src/bigint/bitwise.cc @@ -33,8 +33,8 @@ void BitwiseAnd_NegNeg(RWDigits Z, Digits X, Digits Y) { // (At least) one of the next two loops will perform zero iterations: for (; i < X.len(); i++) Z[i] = digit_sub(X[i], x_borrow, &x_borrow); for (; i < Y.len(); i++) Z[i] = digit_sub(Y[i], y_borrow, &y_borrow); - DCHECK(x_borrow == 0); // NOLINT(readability/check) - DCHECK(y_borrow == 0); // NOLINT(readability/check) + DCHECK(x_borrow == 0); + DCHECK(y_borrow == 0); for (; i < Z.len(); i++) Z[i] = 0; Add(Z, 1); } @@ -83,7 +83,7 @@ void BitwiseOr_PosNeg(RWDigits Z, Digits X, Digits Y) { int i = 0; for (; i < pairs; i++) Z[i] = digit_sub(Y[i], borrow, &borrow) & ~X[i]; for (; i < Y.len(); i++) Z[i] = digit_sub(Y[i], borrow, &borrow); - DCHECK(borrow == 0); // NOLINT(readability/check) + DCHECK(borrow == 0); for (; i < Z.len(); i++) Z[i] = 0; Add(Z, 1); } @@ -114,8 +114,8 @@ void BitwiseXor_NegNeg(RWDigits Z, Digits X, Digits Y) { // (At least) one of the next two loops will perform zero iterations: for (; i < X.len(); i++) Z[i] = digit_sub(X[i], x_borrow, &x_borrow); for (; i < Y.len(); i++) Z[i] = digit_sub(Y[i], y_borrow, &y_borrow); - DCHECK(x_borrow == 0); // NOLINT(readability/check) - DCHECK(y_borrow == 0); // NOLINT(readability/check) + DCHECK(x_borrow == 0); + DCHECK(y_borrow == 0); for (; i < Z.len(); i++) Z[i] = 0; } @@ -128,11 +128,96 @@ void BitwiseXor_PosNeg(RWDigits Z, Digits X, Digits Y) { // (At least) one of the next two loops will perform zero iterations: for (; i < X.len(); i++) Z[i] = X[i]; for (; i < Y.len(); i++) Z[i] = digit_sub(Y[i], borrow, &borrow); - DCHECK(borrow == 0); // NOLINT(readability/check) + DCHECK(borrow == 0); for (; i < Z.len(); i++) Z[i] = 0; Add(Z, 1); } +void LeftShift(RWDigits Z, Digits X, digit_t shift) { + int digit_shift = static_cast(shift / kDigitBits); + int bits_shift = static_cast(shift % kDigitBits); + + int i = 0; + for (; i < digit_shift; ++i) Z[i] = 0; + if (bits_shift == 0) { + for (; i < X.len() + digit_shift; ++i) Z[i] = X[i - digit_shift]; + for (; i < Z.len(); ++i) Z[i] = 0; + } else { + digit_t carry = 0; + for (; i < X.len() + digit_shift; ++i) { + digit_t d = X[i - digit_shift]; + Z[i] = (d << bits_shift) | carry; + carry = d >> (kDigitBits - bits_shift); + } + if (carry != 0) Z[i++] = carry; + for (; i < Z.len(); ++i) Z[i] = 0; + } +} + +int RightShift_ResultLength(Digits X, bool x_sign, digit_t shift, + RightShiftState* state) { + int digit_shift = static_cast(shift / kDigitBits); + int bits_shift = static_cast(shift % kDigitBits); + int result_length = X.len() - digit_shift; + if (result_length <= 0) return 0; + + // For negative numbers, round down if any bit was shifted out (so that e.g. + // -5n >> 1n == -3n and not -2n). Check now whether this will happen and + // whether it can cause overflow into a new digit. + bool must_round_down = false; + if (x_sign) { + const digit_t mask = (static_cast(1) << bits_shift) - 1; + if ((X[digit_shift] & mask) != 0) { + must_round_down = true; + } else { + for (int i = 0; i < digit_shift; i++) { + if (X[i] != 0) { + must_round_down = true; + break; + } + } + } + } + // If bits_shift is non-zero, it frees up bits, preventing overflow. + if (must_round_down && bits_shift == 0) { + // Overflow cannot happen if the most significant digit has unset bits. + const bool rounding_can_overflow = digit_ismax(X.msd()); + if (rounding_can_overflow) ++result_length; + } + + if (state) { + DCHECK(!must_round_down || x_sign); + state->must_round_down = must_round_down; + } + return result_length; +} + +void RightShift(RWDigits Z, Digits X, digit_t shift, + const RightShiftState& state) { + int digit_shift = static_cast(shift / kDigitBits); + int bits_shift = static_cast(shift % kDigitBits); + + int i = 0; + if (bits_shift == 0) { + for (; i < X.len() - digit_shift; ++i) Z[i] = X[i + digit_shift]; + } else { + digit_t carry = X[digit_shift] >> bits_shift; + for (; i < X.len() - digit_shift - 1; ++i) { + digit_t d = X[i + digit_shift + 1]; + Z[i] = (d << (kDigitBits - bits_shift)) | carry; + carry = d >> bits_shift; + } + Z[i++] = carry; + } + for (; i < Z.len(); ++i) Z[i] = 0; + + if (state.must_round_down) { + // Rounding down (a negative value) means adding one to + // its absolute value. This cannot overflow. + Add(Z, 1); + } +} + namespace { // Z := (least significant n bits of X). @@ -175,7 +260,7 @@ void TruncateAndSubFromPowerOfTwo(RWDigits Z, Digits X, int n) { msd = (msd << drop) >> drop; digit_t minuend_msd = static_cast(1) << bits; digit_t result_msd = digit_sub2(minuend_msd, msd, borrow, &borrow); - DCHECK(borrow == 0); // result < 2^n. NOLINT(readability/check) + DCHECK(borrow == 0); // result < 2^n. // If all subtracted bits were zero, we have to get rid of the // materialized minuend_msd again. Z[last] = result_msd & (minuend_msd - 1); @@ -203,9 +288,8 @@ int AsIntNResultLength(Digits X, bool x_negative, int n) { } bool AsIntN(RWDigits Z, Digits X, bool x_negative, int n) { - DCHECK(X.len() > 0); // NOLINT(readability/check) - DCHECK(n > 0); // NOLINT(readability/check) - // NOLINTNEXTLINE(readability/check) + DCHECK(X.len() > 0); + DCHECK(n > 0); DCHECK(AsIntNResultLength(X, x_negative, n) > 0); int needed_digits = DIV_CEIL(n, kDigitBits); digit_t top_digit = X[needed_digits - 1]; @@ -250,7 +334,7 @@ int AsUintN_Pos_ResultLength(Digits X, int n) { } void AsUintN_Pos(RWDigits Z, Digits X, int n) { - DCHECK(AsUintN_Pos_ResultLength(X, n) > 0); // NOLINT(readability/check) + DCHECK(AsUintN_Pos_ResultLength(X, n) > 0); TruncateToNBits(Z, X, n); } diff --git a/deps/v8/src/bigint/digit-arithmetic.h b/deps/v8/src/bigint/digit-arithmetic.h index 96ac949eb754d7..d9113efc91e63d 100644 --- a/deps/v8/src/bigint/digit-arithmetic.h +++ b/deps/v8/src/bigint/digit-arithmetic.h @@ -17,6 +17,8 @@ static constexpr int kHalfDigitBits = kDigitBits / 2; static constexpr digit_t kHalfDigitBase = digit_t{1} << kHalfDigitBits; static constexpr digit_t kHalfDigitMask = kHalfDigitBase - 1; +constexpr bool digit_ismax(digit_t x) { return static_cast(~x) == 0; } + // {carry} will be set to 0 or 1. inline digit_t digit_add2(digit_t a, digit_t b, digit_t* carry) { #if HAVE_TWODIGIT_T @@ -118,7 +120,7 @@ static inline digit_t digit_div(digit_t high, digit_t low, digit_t divisor, digit_t* remainder) { #if defined(DCHECK) DCHECK(high < divisor); - DCHECK(divisor != 0); // NOLINT(readability/check) + DCHECK(divisor != 0); #endif #if __x86_64__ && (__GNUC__ || __clang__) digit_t quotient; diff --git a/deps/v8/src/bigint/div-barrett.cc b/deps/v8/src/bigint/div-barrett.cc index 39f09d0ac15804..306dec8b25fc19 100644 --- a/deps/v8/src/bigint/div-barrett.cc +++ b/deps/v8/src/bigint/div-barrett.cc @@ -41,7 +41,7 @@ void DcheckIntegerPartRange(Digits X, digit_t min, digit_t max) { // See comments at {Invert} and {InvertNewton} below for details. void ProcessorImpl::InvertBasecase(RWDigits Z, Digits V, RWDigits scratch) { DCHECK(Z.len() > V.len()); - DCHECK(V.len() > 0); // NOLINT(readability/check) + DCHECK(V.len() > 0); DCHECK(scratch.len() >= 2 * V.len()); int n = V.len(); RWDigits X(scratch, 0, 2 * n); @@ -49,7 +49,7 @@ void ProcessorImpl::InvertBasecase(RWDigits Z, Digits V, RWDigits scratch) { int i = 0; for (; i < n; i++) X[i] = 0; for (; i < 2 * n; i++) X[i] = digit_sub2(0, V[i - n], borrow, &borrow); - DCHECK(borrow == 1); // NOLINT(readability/check) + DCHECK(borrow == 1); RWDigits R(nullptr, 0); // We don't need the remainder. if (n < kBurnikelThreshold) { DivideSchoolbook(Z, R, X, V); @@ -76,7 +76,7 @@ void ProcessorImpl::InvertNewton(RWDigits Z, Digits V, RWDigits scratch) { const int kUOffset = vn + kInvertNewtonExtraSpace; // The base case won't work otherwise. - DCHECK(V.len() >= 3); // NOLINT(readability/check) + DCHECK(V.len() >= 3); constexpr int kBasecasePrecision = kNewtonInversionThreshold - 1; // V must have more digits than the basecase. @@ -147,17 +147,17 @@ void ProcessorImpl::InvertNewton(RWDigits Z, Digits V, RWDigits scratch) { if (U.len() <= vn) { // Normal subtraction. // This is not the last iteration. - DCHECK(iteration > 0); // NOLINT(readability/check) + DCHECK(iteration > 0); Z.set_len(U.len()); digit_t borrow = SubtractAndReturnBorrow(Z, W, U); - DCHECK(borrow == 0); // NOLINT(readability/check) + DCHECK(borrow == 0); USE(borrow); DcheckIntegerPartRange(Z, 1, 2); } else { // Truncate some least significant digits so that we get vn // fraction digits, and compute the integer digit separately. // This is the last iteration. - DCHECK(iteration == 0); // NOLINT(readability/check) + DCHECK(iteration == 0); Z.set_len(vn); Digits W_part(W, W.len() - vn - 1, vn); Digits U_part(U, U.len() - vn - 1, vn); @@ -186,7 +186,7 @@ void ProcessorImpl::InvertNewton(RWDigits Z, Digits V, RWDigits scratch) { // Needs InvertScratchSpace(V.len) digits of scratch space. void ProcessorImpl::Invert(RWDigits Z, Digits V, RWDigits scratch) { DCHECK(Z.len() > V.len()); - DCHECK(V.len() >= 1); // NOLINT(readability/check) + DCHECK(V.len() >= 1); DCHECK(IsBitNormalized(V)); DCHECK(scratch.len() >= InvertScratchSpace(V.len())); @@ -218,7 +218,7 @@ void ProcessorImpl::DivideBarrett(RWDigits Q, RWDigits R, Digits A, Digits B, DCHECK(R.len() >= B.len()); DCHECK(A.len() > B.len()); // Careful: This is *not* '>=' ! DCHECK(A.len() <= 2 * B.len()); - DCHECK(B.len() > 0); // NOLINT(readability/check) + DCHECK(B.len() > 0); DCHECK(IsBitNormalized(B)); DCHECK(I.len() == A.len() - B.len()); DCHECK(scratch.len() >= DivideBarrettScratchSpace(A.len())); @@ -257,7 +257,7 @@ void ProcessorImpl::DivideBarrett(RWDigits Q, RWDigits R, Digits A, Digits B, do { r_high += AddAndReturnCarry(R, R, B); q_sub++; - DCHECK(q_sub <= 5); // NOLINT(readability/check) + DCHECK(q_sub <= 5); } while (r_high != 0); Subtract(Q, q_sub); } else { @@ -266,7 +266,7 @@ void ProcessorImpl::DivideBarrett(RWDigits Q, RWDigits R, Digits A, Digits B, // (5c): R >= B, so R -= B r_high -= SubtractAndReturnBorrow(R, R, B); q_add++; - DCHECK(q_add <= 5); // NOLINT(readability/check) + DCHECK(q_add <= 5); } Add(Q, q_add); } @@ -281,7 +281,7 @@ void ProcessorImpl::DivideBarrett(RWDigits Q, RWDigits R, Digits A, Digits B) { DCHECK(Q.len() > A.len() - B.len()); DCHECK(R.len() >= B.len()); DCHECK(A.len() > B.len()); // Careful: This is *not* '>=' ! - DCHECK(B.len() > 0); // NOLINT(readability/check) + DCHECK(B.len() > 0); // Normalize B, and shift A by the same amount. ShiftedDigits b_normalized(B); @@ -312,7 +312,7 @@ void ProcessorImpl::DivideBarrett(RWDigits Q, RWDigits R, Digits A, Digits B) { int n = B.len(); // Chunk length. // (5): {t} is the number of B-sized chunks of A. int t = DIV_CEIL(A.len(), n); - DCHECK(t >= 3); // NOLINT(readability/check) + DCHECK(t >= 3); // (6)/(7): Z is used for the current 2-chunk block to be divided by B, // initialized to the two topmost chunks of A. int z_len = n * 2; @@ -334,7 +334,7 @@ void ProcessorImpl::DivideBarrett(RWDigits Q, RWDigits R, Digits A, Digits B) { for (int j = to_copy; j < target.len(); j++) target[j] = 0; #if DEBUG for (int j = to_copy; j < Qi.len(); j++) { - DCHECK(Qi[j] == 0); // NOLINT(readability/check) + DCHECK(Qi[j] == 0); } #endif } @@ -346,7 +346,7 @@ void ProcessorImpl::DivideBarrett(RWDigits Q, RWDigits R, Digits A, Digits B) { PutAt(Z, A + n * i, n); // (8a): Compute Qi, Ri such that Zi = B*Qi + Ri. DivideBarrett(Qi, Ri, Z, B, I, scratch); - DCHECK(Qi[qi_len - 1] == 0); // NOLINT(readability/check) + DCHECK(Qi[qi_len - 1] == 0); if (should_terminate()) return; // (9): Return Q = [Q_(t-2), ..., Q_0]... PutAt(Q + n * i, Qi, n); diff --git a/deps/v8/src/bigint/div-burnikel.cc b/deps/v8/src/bigint/div-burnikel.cc index 0caedb1cc171b7..264bc784a8101b 100644 --- a/deps/v8/src/bigint/div-burnikel.cc +++ b/deps/v8/src/bigint/div-burnikel.cc @@ -70,7 +70,7 @@ class BZ { void BZ::DivideBasecase(RWDigits Q, RWDigits R, Digits A, Digits B) { A.Normalize(); B.Normalize(); - DCHECK(B.len() > 0); // NOLINT(readability/check) + DCHECK(B.len() > 0); int cmp = Compare(A, B); if (cmp <= 0) { Q.Clear(); @@ -94,11 +94,11 @@ void BZ::DivideBasecase(RWDigits Q, RWDigits R, Digits A, Digits B) { // Returns Q(uotient) and R(emainder) for A/B, with B having two thirds // the size of A = [A1, A2, A3]. void BZ::D3n2n(RWDigits Q, RWDigits R, Digits A1A2, Digits A3, Digits B) { - DCHECK((B.len() & 1) == 0); // NOLINT(readability/check) + DCHECK((B.len() & 1) == 0); int n = B.len() / 2; DCHECK(A1A2.len() == 2 * n); // Actual condition is stricter than length: A < B * 2^(kDigitBits * n) - DCHECK(Compare(A1A2, B) < 0); // NOLINT(readability/check) + DCHECK(Compare(A1A2, B) < 0); DCHECK(A3.len() == n); DCHECK(Q.len() == n); DCHECK(R.len() == 2 * n); @@ -126,7 +126,7 @@ void BZ::D3n2n(RWDigits Q, RWDigits R, Digits A1A2, Digits A3, Digits B) { RWDigits temp = R1; Subtract(temp, A1, B1); temp.Normalize(); - DCHECK(temp.len() <= 1); // NOLINT(readability/check) + DCHECK(temp.len() <= 1); if (temp.len() > 0) r1_high = temp[0]; // Step 2: compute A2 + B1. Digits A2(A1A2, 0, n); @@ -149,7 +149,7 @@ void BZ::D3n2n(RWDigits Q, RWDigits R, Digits A1A2, Digits A3, Digits B) { // 5. Compute Rhat = R1*2^(kDigitBits * n) + A3 - D = [R1, A3] - D. digit_t borrow = SubtractAndReturnBorrow(R, R, D); DCHECK(borrow == r1_high); - DCHECK(Compare(R, B) < 0); // NOLINT(readability/check) + DCHECK(Compare(R, B) < 0); (void)borrow; // 7. Return R = Rhat, Q = Qhat. } @@ -160,7 +160,7 @@ void BZ::D2n1n(RWDigits Q, RWDigits R, Digits A, Digits B) { int n = B.len(); DCHECK(A.len() <= 2 * n); // A < B * 2^(kDigitsBits * n) - DCHECK(Compare(Digits(A, n, n), B) < 0); // NOLINT(readability/check) + DCHECK(Compare(Digits(A, n, n), B) < 0); DCHECK(Q.len() == n); DCHECK(R.len() == n); // 1. If n is odd or smaller than some convenient constant, compute Q and R @@ -264,7 +264,7 @@ void ProcessorImpl::DivideBurnikelZiegler(RWDigits Q, RWDigits R, Digits A, // 9. Return Q = [Q_(t-2), ..., Q_0] and R = R_0 * 2^(-sigma). #if DEBUG for (int i = 0; i < digit_shift; i++) { - DCHECK(Ri[i] == 0); // NOLINT(readability/check) + DCHECK(Ri[i] == 0); } #endif if (R.len() != 0) { diff --git a/deps/v8/src/bigint/div-helpers.cc b/deps/v8/src/bigint/div-helpers.cc index 0dfca0b02c6577..39beb48675eb47 100644 --- a/deps/v8/src/bigint/div-helpers.cc +++ b/deps/v8/src/bigint/div-helpers.cc @@ -23,7 +23,7 @@ void Copy(RWDigits Z, Digits X) { // Z := X << shift // Z and X may alias for an in-place shift. void LeftShift(RWDigits Z, Digits X, int shift) { - DCHECK(shift >= 0); // NOLINT(readability/check) + DCHECK(shift >= 0); DCHECK(shift < kDigitBits); DCHECK(Z.len() >= X.len()); if (shift == 0) return Copy(Z, X); @@ -37,7 +37,7 @@ void LeftShift(RWDigits Z, Digits X, int shift) { if (i < Z.len()) { Z[i++] = carry; } else { - DCHECK(carry == 0); // NOLINT(readability/check) + DCHECK(carry == 0); } for (; i < Z.len(); i++) Z[i] = 0; } @@ -45,7 +45,7 @@ void LeftShift(RWDigits Z, Digits X, int shift) { // Z := X >> shift // Z and X may alias for an in-place shift. void RightShift(RWDigits Z, Digits X, int shift) { - DCHECK(shift >= 0); // NOLINT(readability/check) + DCHECK(shift >= 0); DCHECK(shift < kDigitBits); X.Normalize(); DCHECK(Z.len() >= X.len()); diff --git a/deps/v8/src/bigint/div-schoolbook.cc b/deps/v8/src/bigint/div-schoolbook.cc index a6295c573c3edd..d8245a77ad5fe8 100644 --- a/deps/v8/src/bigint/div-schoolbook.cc +++ b/deps/v8/src/bigint/div-schoolbook.cc @@ -28,8 +28,8 @@ namespace bigint { // Q may be the same as A for an in-place division. void ProcessorImpl::DivideSingle(RWDigits Q, digit_t* remainder, Digits A, digit_t b) { - DCHECK(b != 0); // NOLINT(readability/check) - DCHECK(A.len() > 0); // NOLINT(readability/check) + DCHECK(b != 0); + DCHECK(A.len() > 0); *remainder = 0; int length = A.len(); if (Q.len() != 0) { @@ -93,7 +93,6 @@ bool QLengthOK(Digits Q, Digits A, Digits B) { // See Knuth, Volume 2, section 4.3.1, Algorithm D. void ProcessorImpl::DivideSchoolbook(RWDigits Q, RWDigits R, Digits A, Digits B) { - // NOLINTNEXTLINE(readability/check) DCHECK(B.len() >= 2); // Use DivideSingle otherwise. DCHECK(A.len() >= B.len()); // No-op otherwise. DCHECK(Q.len() == 0 || QLengthOK(Q, A, B)); @@ -173,7 +172,7 @@ void ProcessorImpl::DivideSchoolbook(RWDigits Q, RWDigits R, Digits A, if (Q.len() != 0) { if (j >= Q.len()) { - DCHECK(qhat == 0); // NOLINT(readability/check) + DCHECK(qhat == 0); } else { Q[j] = qhat; } diff --git a/deps/v8/src/bigint/fromstring.cc b/deps/v8/src/bigint/fromstring.cc index a4b34a1a025d88..456a6d2919cea6 100644 --- a/deps/v8/src/bigint/fromstring.cc +++ b/deps/v8/src/bigint/fromstring.cc @@ -13,7 +13,7 @@ namespace bigint { void ProcessorImpl::FromStringClassic(RWDigits Z, FromStringAccumulator* accumulator) { // We always have at least one part to process. - DCHECK(accumulator->stack_parts_used_ > 0); // NOLINT(readability/check) + DCHECK(accumulator->stack_parts_used_ > 0); Z[0] = accumulator->stack_parts_[0]; RWDigits already_set(Z, 0, 1); for (int i = 1; i < Z.len(); i++) Z[i] = 0; @@ -89,7 +89,7 @@ void ProcessorImpl::FromStringClassic(RWDigits Z, void ProcessorImpl::FromStringLarge(RWDigits Z, FromStringAccumulator* accumulator) { int num_parts = static_cast(accumulator->heap_parts_.size()); - DCHECK(num_parts >= 2); // NOLINT(readability/check) + DCHECK(num_parts >= 2); DCHECK(Z.len() >= num_parts); RWDigits parts(accumulator->heap_parts_.data(), num_parts); Storage multipliers_storage(num_parts); @@ -160,7 +160,7 @@ void ProcessorImpl::FromStringLarge(RWDigits Z, Multiply(p_out, p_in, m_in2); if (should_terminate()) return; digit_t overflow = AddAndReturnOverflow(p_out, p_in2); - DCHECK(overflow == 0); // NOLINT(readability/check) + DCHECK(overflow == 0); USE(overflow); // m[j] = m[i] * m[i+1] if (i > 0) { @@ -240,7 +240,7 @@ void ProcessorImpl::FromStringLarge(RWDigits Z, void ProcessorImpl::FromStringBasePowerOfTwo( RWDigits Z, FromStringAccumulator* accumulator) { const int num_parts = accumulator->ResultLength(); - DCHECK(num_parts >= 1); // NOLINT(readability/check) + DCHECK(num_parts >= 1); DCHECK(Z.len() >= num_parts); Digits parts(accumulator->heap_parts_.size() > 0 ? accumulator->heap_parts_.data() @@ -259,7 +259,7 @@ void ProcessorImpl::FromStringBasePowerOfTwo( // If the last part is fully populated, then all parts must be, and we can // simply copy them (in reversed order). if (unused_last_part_bits == 0) { - DCHECK(kDigitBits % char_bits == 0); // NOLINT(readability/check) + DCHECK(kDigitBits % char_bits == 0); while (part_index >= 0) { Z[z_index++] = parts[part_index--]; } diff --git a/deps/v8/src/bigint/mul-fft.cc b/deps/v8/src/bigint/mul-fft.cc index 9c297c00dfcdff..3c255f48ad3737 100644 --- a/deps/v8/src/bigint/mul-fft.cc +++ b/deps/v8/src/bigint/mul-fft.cc @@ -183,7 +183,7 @@ void ShiftModFn(digit_t* result, const digit_t* input, int power_of_two, int K, // The modulo-reduction amounts to a subtraction, which we combine // with the shift as follows: // input = [ iK ][iK-1] .... .... [ i1 ][ i0 ] - // result = [iX-1] .... [ i0 ] <<<<<<<<<<< shift by {power_of_two} + // result = [iX-1] .... [ i0 ] <---------- shift by {power_of_two} // - [ iK ] .... [ iX ] // where "X" is the index "K - digit_shift". int digit_shift = power_of_two / kDigitBits; @@ -207,7 +207,7 @@ void ShiftModFn(digit_t* result, const digit_t* input, int power_of_two, int K, } // Any remaining work can hard-code the knowledge that input[i] == 0. for (; i < K - digit_shift; i++) { - DCHECK(input[i] == 0); // NOLINT(readability/check) + DCHECK(input[i] == 0); result[i + digit_shift] = 0; } // Second phase: subtract input digits [iX] to [iK] from (virtually) zero- @@ -219,7 +219,7 @@ void ShiftModFn(digit_t* result, const digit_t* input, int power_of_two, int K, } // Any remaining work can hard-code the knowledge that input[i] == 0. for (; i < K; i++) { - DCHECK(input[i] == 0); // NOLINT(readability/check) + DCHECK(input[i] == 0); result[i - K + digit_shift] = digit_sub(0, borrow, &borrow); } // Last step: subtract [iK] from [i0] and store at result index digit_shift. @@ -238,7 +238,7 @@ void ShiftModFn(digit_t* result, const digit_t* input, int power_of_two, int K, } // Any remaining work can hard-code the knowledge that input[i] == 0. for (; i < K - digit_shift; i++) { - DCHECK(input[i] == 0); // NOLINT(readability/check) + DCHECK(input[i] == 0); result[i + digit_shift] = carry; carry = 0; } @@ -252,13 +252,13 @@ void ShiftModFn(digit_t* result, const digit_t* input, int power_of_two, int K, } // Any remaining work can hard-code the knowledge that input[i] == 0. if (i < K) { - DCHECK(input[i] == 0); // NOLINT(readability/check) + DCHECK(input[i] == 0); result[i - K + digit_shift] = digit_sub2(0, carry, borrow, &borrow); carry = 0; i++; } for (; i < K; i++) { - DCHECK(input[i] == 0); // NOLINT(readability/check) + DCHECK(input[i] == 0); result[i - K + digit_shift] = digit_sub(0, borrow, &borrow); } // Last step: compute result[digit_shift]. @@ -266,7 +266,7 @@ void ShiftModFn(digit_t* result, const digit_t* input, int power_of_two, int K, result[digit_shift] = digit_sub2( result[digit_shift], (d << bits_shift) | carry, borrow, &borrow); // No carry left. - DCHECK((d >> (kDigitBits - bits_shift)) == 0); // NOLINT(readability/check) + DCHECK((d >> (kDigitBits - bits_shift)) == 0); } result[K] = 0; for (int i = digit_shift + 1; i <= K && borrow > 0; i++) { @@ -324,8 +324,8 @@ void ComputeParameters(int N, int m, Parameters* params) { K_tz = CountTrailingZeros(K); } - DCHECK(K % kDigitBits == 0); // NOLINT(readability/check) - DCHECK(s % kDigitBits == 0); // NOLINT(readability/check) + DCHECK(K % kDigitBits == 0); + DCHECK(s % kDigitBits == 0); params->K = K / kDigitBits; params->s = s / kDigitBits; params->n = n; @@ -347,8 +347,8 @@ void ComputeParameters_Inner(int N, Parameters* params) { K = RoundUp(K, n); // ...and a multiple of n and kDigitBits. K = RoundUp(K, kDigitBits); params->r = K >> m; // Which multiple? - DCHECK(K % kDigitBits == 0); // NOLINT(readability/check) - DCHECK(s % kDigitBits == 0); // NOLINT(readability/check) + DCHECK(K % kDigitBits == 0); + DCHECK(s % kDigitBits == 0); params->K = K / kDigitBits; params->s = s / kDigitBits; params->n = n; @@ -502,7 +502,7 @@ void FFTContainer::Start_Default(Digits X, int chunk_size, int theta, // corner case where X[n_ * chunk_size] == 1. Detect that case, and handle // the extra bit as part of the last chunk; we always have the space. if (i == n_ - 1 && len == chunk_size + 1) { - DCHECK(X[n_ * chunk_size] <= 1); // NOLINT(readability/check) + DCHECK(X[n_ * chunk_size] <= 1); DCHECK(length_ >= chunk_size + 1); chunk_size++; } @@ -517,7 +517,7 @@ void FFTContainer::Start_Default(Digits X, int chunk_size, int theta, pointer += chunk_size; len -= chunk_size; } - DCHECK(len == 0); // NOLINT(readability/check) + DCHECK(len == 0); for (; i < n_; i++) { memset(part_[i], 0, part_length_in_bytes); } @@ -531,7 +531,7 @@ void FFTContainer::Start(Digits X, int chunk_size, int theta, int omega) { if (len > n_ * chunk_size / 2) { return Start_Default(X, chunk_size, theta, omega); } - DCHECK(theta == 0); // NOLINT(readability/check) + DCHECK(theta == 0); const digit_t* pointer = X.digits(); const size_t part_length_in_bytes = length_ * sizeof(digit_t); int nhalf = n_ / 2; @@ -562,7 +562,7 @@ void FFTContainer::Start(Digits X, int chunk_size, int theta, int omega) { // need as input for the "DIT" aka "decimation in time" backwards transform. void FFTContainer::FFT_ReturnShuffledThreadsafe(int start, int len, int omega, digit_t* temp) { - DCHECK((len & 1) == 0); // {len} must be even. NOLINT(readability/check) + DCHECK((len & 1) == 0); // {len} must be even. int half = len / 2; SumDiff(part_[start], part_[start + half], part_[start], part_[start + half], length_); @@ -592,7 +592,7 @@ void FFTContainer::BackwardFFT(int start, int len, int omega) { void FFTContainer::BackwardFFT_Threadsafe(int start, int len, int omega, digit_t* temp) { - DCHECK((len & 1) == 0); // {len} must be even. NOLINT(readability/check) + DCHECK((len & 1) == 0); // {len} must be even. int half = len / 2; // Don't recurse for half == 2, as PointwiseMultiply already performed // the first level of the backwards FFT. @@ -626,7 +626,7 @@ void FFTContainer::NormalizeAndRecombine(int omega, int m, RWDigits Z, Z[zi] = digit_add3(Z[zi], temp_[j], carry, &carry); } for (; j < length_; j++) { - DCHECK(temp_[j] == 0); // NOLINT(readability/check) + DCHECK(temp_[j] == 0); } if (carry != 0) { DCHECK(zi < Z.len()); @@ -654,7 +654,7 @@ void FFTContainer::CounterWeightAndRecombine(int theta, int m, RWDigits Z, for (int k = 0; k < n_; k++, z_index += s) { int shift = -theta * k - m; if (shift < 0) shift += 2 * n_ * theta; - DCHECK(shift >= 0); // NOLINT(readability/check) + DCHECK(shift >= 0); digit_t* input = part_[k]; ShiftModFn(temp_, input, shift, K_); int remaining_z = Z.len() - z_index; @@ -679,7 +679,7 @@ void FFTContainer::CounterWeightAndRecombine(int theta, int m, RWDigits Z, digit_t d = digit_sub2(1, temp_[i], borrow_Fn, &borrow_Fn); Z[z_index + i] = digit_sub2(Z[z_index + i], d, borrow_z, &borrow_z); } - DCHECK(borrow_Fn == 0); // NOLINT(readability/check) + DCHECK(borrow_Fn == 0); for (; borrow_z > 0 && i < remaining_z; i++) { Z[z_index + i] = digit_sub(Z[z_index + i], borrow_z, &borrow_z); } @@ -690,7 +690,7 @@ void FFTContainer::CounterWeightAndRecombine(int theta, int m, RWDigits Z, Z[z_index + i] = digit_add3(Z[z_index + i], temp_[i], carry, &carry); } for (; i < length_; i++) { - DCHECK(temp_[i] == 0); // NOLINT(readability/check) + DCHECK(temp_[i] == 0); } for (; carry > 0 && i < remaining_z; i++) { Z[z_index + i] = digit_add2(Z[z_index + i], carry, &carry); diff --git a/deps/v8/src/bigint/mul-karatsuba.cc b/deps/v8/src/bigint/mul-karatsuba.cc index d4b5a58383a489..3fdda20aeb2173 100644 --- a/deps/v8/src/bigint/mul-karatsuba.cc +++ b/deps/v8/src/bigint/mul-karatsuba.cc @@ -82,7 +82,7 @@ void KaratsubaSubtractionHelper(RWDigits result, Digits X, Digits Y, for (; i < X.len(); i++) { result[i] = digit_sub(X[i], borrow, &borrow); } - DCHECK(borrow == 0); // NOLINT(readability/check) + DCHECK(borrow == 0); for (; i < result.len(); i++) result[i] = 0; } @@ -160,7 +160,7 @@ void ProcessorImpl::KaratsubaMain(RWDigits Z, Digits X, Digits Y, } } DCHECK(scratch.len() >= 4 * n); - DCHECK((n & 1) == 0); // NOLINT(readability/check) + DCHECK((n & 1) == 0); int n2 = n >> 1; Digits X0(X, 0, n2); Digits X1(X, n2, n2); @@ -178,7 +178,7 @@ void ProcessorImpl::KaratsubaMain(RWDigits Z, Digits X, Digits Y, int end = std::min(Z2.len(), P2.len()); for (int i = 0; i < end; i++) Z2[i] = P2[i]; for (int i = end; i < n; i++) { - DCHECK(P2[i] == 0); // NOLINT(readability/check) + DCHECK(P2[i] == 0); } // The intermediate result can be one digit too large; the subtraction // below will fix this. @@ -197,7 +197,7 @@ void ProcessorImpl::KaratsubaMain(RWDigits Z, Digits X, Digits Y, overflow -= SubAndReturnBorrow(Z + n2, P1); } // The intermediate result may have been bigger, but the final result fits. - DCHECK(overflow == 0); // NOLINT(readability/check) + DCHECK(overflow == 0); USE(overflow); } diff --git a/deps/v8/src/bigint/mul-schoolbook.cc b/deps/v8/src/bigint/mul-schoolbook.cc index 9222e1e675f132..27a3a243117e0c 100644 --- a/deps/v8/src/bigint/mul-schoolbook.cc +++ b/deps/v8/src/bigint/mul-schoolbook.cc @@ -11,7 +11,7 @@ namespace bigint { // Z := X * y, where y is a single digit. void ProcessorImpl::MultiplySingle(RWDigits Z, Digits X, digit_t y) { - DCHECK(y != 0); // NOLINT(readability/check) + DCHECK(y != 0); digit_t carry = 0; digit_t high = 0; for (int i = 0; i < X.len(); i++) { @@ -87,7 +87,7 @@ void ProcessorImpl::MultiplySchoolbook(RWDigits Z, Digits X, Digits Y) { } // Write the last digit, and zero out any extra space in Z. Z[i++] = digit_add2(next, carry, &carry); - DCHECK(carry == 0); // NOLINT(readability/check) + DCHECK(carry == 0); for (; i < Z.len(); i++) Z[i] = 0; } diff --git a/deps/v8/src/bigint/tostring.cc b/deps/v8/src/bigint/tostring.cc index 51fb75957aaef7..0447ce0c22651c 100644 --- a/deps/v8/src/bigint/tostring.cc +++ b/deps/v8/src/bigint/tostring.cc @@ -56,7 +56,7 @@ constexpr digit_t digit_pow_rec(digit_t base, digit_t exponent) { template char* BasecaseFixedLast(digit_t chunk, char* out) { while (chunk != 0) { - DCHECK(*(out - 1) == kStringZapValue); // NOLINT(readability/check) + DCHECK(*(out - 1) == kStringZapValue); if (radix <= 10) { *(--out) = '0' + (chunk % radix); } else { @@ -94,7 +94,7 @@ char* DivideByMagic(RWDigits rest, Digits input, char* output) { } // {remainder} is now the current chunk to be written out. for (int i = 0; i < chunk_chars; i++) { - DCHECK(*(output - 1) == kStringZapValue); // NOLINT(readability/check) + DCHECK(*(output - 1) == kStringZapValue); if (radix <= 10) { *(--output) = '0' + (remainder % radix); } else { @@ -102,7 +102,7 @@ char* DivideByMagic(RWDigits rest, Digits input, char* output) { } remainder /= radix; } - DCHECK(remainder == 0); // NOLINT(readability/check) + DCHECK(remainder == 0); return output; } @@ -182,7 +182,7 @@ class ToStringFormatter { char* BasecaseLast(digit_t digit, char* out) { if (radix_ == 10) return BasecaseFixedLast<10>(digit, out); do { - DCHECK(*(out - 1) == kStringZapValue); // NOLINT(readability/check) + DCHECK(*(out - 1) == kStringZapValue); *(--out) = kConversionChars[digit % radix_]; digit /= radix_; } while (digit > 0); @@ -193,11 +193,11 @@ class ToStringFormatter { // same number of characters (as many '0' as necessary). char* BasecaseMiddle(digit_t digit, char* out) { for (int i = 0; i < chunk_chars_; i++) { - DCHECK(*(out - 1) == kStringZapValue); // NOLINT(readability/check) + DCHECK(*(out - 1) == kStringZapValue); *(--out) = kConversionChars[digit % radix_]; digit /= radix_; } - DCHECK(digit == 0); // NOLINT(readability/check) + DCHECK(digit == 0); return out; } @@ -221,7 +221,7 @@ void ToStringFormatter::Start() { chunk_chars_ = kDigitBits * kBitsPerCharTableMultiplier / max_bits_per_char_; chunk_divisor_ = digit_pow(radix_, chunk_chars_); // By construction of chunk_chars_, there can't have been overflow. - DCHECK(chunk_divisor_ != 0); // NOLINT(readability/check) + DCHECK(chunk_divisor_ != 0); } int ToStringFormatter::Finish() { @@ -411,7 +411,7 @@ void RecursionLevel::ComputeInverse(ProcessorImpl* processor, } Digits RecursionLevel::GetInverse(int dividend_length) { - DCHECK(inverse_.len() != 0); // NOLINT(readability/check) + DCHECK(inverse_.len() != 0); int inverse_len = dividend_length - divisor_.len(); DCHECK(inverse_len <= inverse_.len()); return inverse_ + (inverse_.len() - inverse_len); @@ -484,7 +484,7 @@ char* ToStringFormatter::ProcessLevel(RecursionLevel* level, Digits chunk, chunk = original_chunk; out = ProcessLevel(level->next_, chunk, out, is_last_on_level); } else { - DCHECK(comparison == 0); // NOLINT(readability/check) + DCHECK(comparison == 0); // If the chunk is equal to the divisor, we know that the right half // is all '0', and the left half is '...0001'. // Handling this case specially is an optimization; we could also diff --git a/deps/v8/src/bigint/vector-arithmetic.cc b/deps/v8/src/bigint/vector-arithmetic.cc index 9bbea3873ea79c..0cd65589c8a8d5 100644 --- a/deps/v8/src/bigint/vector-arithmetic.cc +++ b/deps/v8/src/bigint/vector-arithmetic.cc @@ -68,7 +68,7 @@ void Subtract(RWDigits Z, Digits X, Digits Y) { for (; i < X.len(); i++) { Z[i] = digit_sub(X[i], borrow, &borrow); } - DCHECK(borrow == 0); // NOLINT(readability/check) + DCHECK(borrow == 0); for (; i < Z.len(); i++) Z[i] = 0; } diff --git a/deps/v8/src/builtins/arm/builtins-arm.cc b/deps/v8/src/builtins/arm/builtins-arm.cc index a3a2209f9fefdd..00f1009610cfb6 100644 --- a/deps/v8/src/builtins/arm/builtins-arm.cc +++ b/deps/v8/src/builtins/arm/builtins-arm.cc @@ -209,8 +209,10 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { __ ldr(r4, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset)); __ ldr(r4, FieldMemOperand(r4, SharedFunctionInfo::kFlagsOffset)); __ DecodeField(r4); - __ JumpIfIsInRange(r4, kDefaultDerivedConstructor, kDerivedConstructor, - ¬_create_implicit_receiver); + __ JumpIfIsInRange( + r4, static_cast(FunctionKind::kDefaultDerivedConstructor), + static_cast(FunctionKind::kDerivedConstructor), + ¬_create_implicit_receiver); // If not derived class constructor: Allocate the new receiver object. __ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1, r4, @@ -892,7 +894,7 @@ static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm, Runtime::FunctionId function_id) { ASM_CODE_COMMENT(masm); Label no_match; - __ cmp_raw_immediate(actual_marker, expected_marker); + __ cmp_raw_immediate(actual_marker, static_cast(expected_marker)); __ b(ne, &no_match); GenerateTailCallToReturnedCode(masm, function_id); __ bind(&no_match); diff --git a/deps/v8/src/builtins/arm64/builtins-arm64.cc b/deps/v8/src/builtins/arm64/builtins-arm64.cc index 0cb79c1f04d5a0..b75ffcc0656146 100644 --- a/deps/v8/src/builtins/arm64/builtins-arm64.cc +++ b/deps/v8/src/builtins/arm64/builtins-arm64.cc @@ -75,7 +75,7 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm, } static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch"); - __ JumpCodeObject(x2); + __ JumpCodeTObject(x2); } namespace { @@ -253,8 +253,10 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { x4, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset)); __ Ldr(w4, FieldMemOperand(x4, SharedFunctionInfo::kFlagsOffset)); __ DecodeField(w4); - __ JumpIfIsInRange(w4, kDefaultDerivedConstructor, kDerivedConstructor, - ¬_create_implicit_receiver); + __ JumpIfIsInRange( + w4, static_cast(FunctionKind::kDefaultDerivedConstructor), + static_cast(FunctionKind::kDerivedConstructor), + ¬_create_implicit_receiver); // If not derived class constructor: Allocate the new receiver object. __ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1, x4, @@ -1083,7 +1085,8 @@ static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm, Runtime::FunctionId function_id) { ASM_CODE_COMMENT(masm); Label no_match; - __ CompareAndBranch(actual_marker, Operand(expected_marker), ne, &no_match); + __ CompareAndBranch(actual_marker, Operand(static_cast(expected_marker)), + ne, &no_match); GenerateTailCallToReturnedCode(masm, function_id); __ bind(&no_match); } @@ -1891,10 +1894,12 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) { __ Ldr(kJavaScriptCallCodeStartRegister, MemOperand(kInterpreterDispatchTableRegister, x1)); - UseScratchRegisterScope temps(masm); - temps.Exclude(x17); - __ Mov(x17, kJavaScriptCallCodeStartRegister); - __ Jump(x17); + { + UseScratchRegisterScope temps(masm); + temps.Exclude(x17); + __ Mov(x17, kJavaScriptCallCodeStartRegister); + __ Jump(x17); + } __ Bind(&return_from_bytecode_dispatch); @@ -1932,8 +1937,12 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) { __ Bind(&trampoline_loaded); - __ Add(x17, x1, Operand(interpreter_entry_return_pc_offset.value())); - __ Br(x17); + { + UseScratchRegisterScope temps(masm); + temps.Exclude(x17); + __ Add(x17, x1, Operand(interpreter_entry_return_pc_offset.value())); + __ Br(x17); + } } void Builtins::Generate_InterpreterEnterAtNextBytecode(MacroAssembler* masm) { diff --git a/deps/v8/src/builtins/array-from.tq b/deps/v8/src/builtins/array-from.tq index 5fcdefccc37c30..f1783b58f691e6 100644 --- a/deps/v8/src/builtins/array-from.tq +++ b/deps/v8/src/builtins/array-from.tq @@ -110,9 +110,9 @@ ArrayFrom(js-implicit context: NativeContext, receiver: JSAny)(...arguments): try { mappedValue = Call(context, UnsafeCast(mapfn), thisArg, nextValue, k); - } catch (e) { + } catch (e, message) { iterator::IteratorCloseOnException(iteratorRecord); - ReThrow(context, e); + ReThrowWithMessage(context, e, message); } } else { mappedValue = nextValue; @@ -123,9 +123,9 @@ ArrayFrom(js-implicit context: NativeContext, receiver: JSAny)(...arguments): // return ? IteratorClose(iteratorRecord, defineStatus). try { FastCreateDataProperty(a, k, mappedValue); - } catch (e) deferred { + } catch (e, message) deferred { iterator::IteratorCloseOnException(iteratorRecord); - ReThrow(context, e); + ReThrowWithMessage(context, e, message); } // x. Set k to k + 1. k += 1; diff --git a/deps/v8/src/builtins/array-join.tq b/deps/v8/src/builtins/array-join.tq index 12988af2a20033..a4bf6f002d20bb 100644 --- a/deps/v8/src/builtins/array-join.tq +++ b/deps/v8/src/builtins/array-join.tq @@ -537,9 +537,9 @@ transitioning macro CycleProtectedArrayJoin( ArrayJoin(useToLocaleString, o, sep, len, locales, options); JoinStackPopInline(o); return result; - } catch (e) deferred { + } catch (e, message) deferred { JoinStackPopInline(o); - ReThrow(context, e); + ReThrowWithMessage(context, e, message); } } else { return kEmptyString; diff --git a/deps/v8/src/builtins/base.tq b/deps/v8/src/builtins/base.tq index 3726207e1d6268..69e9faef533496 100644 --- a/deps/v8/src/builtins/base.tq +++ b/deps/v8/src/builtins/base.tq @@ -231,8 +231,6 @@ extern class ObjectBoilerplateDescription extends FixedArray; extern class ClosureFeedbackCellArray extends FixedArray; extern class ScriptContextTable extends FixedArray; -type LayoutDescriptor extends ByteArray - generates 'TNode'; extern class TransitionArray extends WeakFixedArray; extern operator '.length_intptr' macro LoadAndUntagWeakFixedArrayLength( @@ -777,7 +775,9 @@ macro Equal(implicit context: Context)(left: JSAny, right: JSAny): Boolean { extern macro StrictEqual(JSAny, JSAny): Boolean; extern macro SmiLexicographicCompare(Smi, Smi): Smi; -extern runtime ReThrow(Context, JSAny): never; + +extern runtime ReThrowWithMessage( + Context, JSAny, TheHole | JSMessageObject): never; extern runtime Throw(implicit context: Context)(JSAny): never; extern runtime ThrowInvalidStringLength(Context): never; @@ -952,7 +952,6 @@ extern operator '+' macro ConstexprInt32Add( extern operator '*' macro ConstexprInt31Mul( constexpr int31, constexpr int31): constexpr int31; extern operator '-' macro Int32Sub(int16, int16): int32; -extern operator '-' macro Int32Sub(uint16, uint16): int32; extern operator '-' macro Int32Sub(int32, int32): int32; extern operator '-' macro Uint32Sub(uint32, uint32): uint32; extern operator '*' macro Int32Mul(int32, int32): int32; @@ -1871,6 +1870,8 @@ extern macro FeedbackIteratorHandlerOffset(): intptr; extern operator '[]' macro LoadWeakFixedArrayElement( WeakFixedArray, intptr): MaybeObject; +extern operator '[]' macro LoadUint8Ptr(RawPtr, intptr): uint8; + const kNoHashSentinel: constexpr int32 generates 'PropertyArray::kNoHashSentinel'; extern macro LoadNameHash(Name): uint32; diff --git a/deps/v8/src/builtins/builtins-async-function-gen.cc b/deps/v8/src/builtins/builtins-async-function-gen.cc index 039f4ade69f26b..1373e663970d29 100644 --- a/deps/v8/src/builtins/builtins-async-function-gen.cc +++ b/deps/v8/src/builtins/builtins-async-function-gen.cc @@ -265,6 +265,7 @@ void AsyncFunctionBuiltinsAssembler::AsyncFunctionAwait( Label after_debug_hook(this), call_debug_hook(this, Label::kDeferred); GotoIf(HasAsyncEventDelegate(), &call_debug_hook); + GotoIf(IsDebugActive(), &call_debug_hook); Goto(&after_debug_hook); BIND(&after_debug_hook); diff --git a/deps/v8/src/builtins/builtins-async-gen.cc b/deps/v8/src/builtins/builtins-async-gen.cc index 0adb95ad433367..08cea2e74e5bf1 100644 --- a/deps/v8/src/builtins/builtins-async-gen.cc +++ b/deps/v8/src/builtins/builtins-async-gen.cc @@ -283,9 +283,8 @@ void AsyncBuiltinsAssembler::InitializeNativeClosure( // which almost doubles the size of `await` builtins (unnecessarily). TNode builtin_id = LoadObjectField( shared_info, SharedFunctionInfo::kFunctionDataOffset); - TNode code = LoadBuiltin(builtin_id); - StoreObjectFieldNoWriteBarrier(function, JSFunction::kCodeOffset, - ToCodeT(code)); + TNode code = LoadBuiltin(builtin_id); + StoreObjectFieldNoWriteBarrier(function, JSFunction::kCodeOffset, code); } TNode AsyncBuiltinsAssembler::CreateUnwrapClosure( diff --git a/deps/v8/src/builtins/builtins-async-generator-gen.cc b/deps/v8/src/builtins/builtins-async-generator-gen.cc index 87c1d443a6adca..384fba337527fe 100644 --- a/deps/v8/src/builtins/builtins-async-generator-gen.cc +++ b/deps/v8/src/builtins/builtins-async-generator-gen.cc @@ -381,7 +381,6 @@ TF_BUILTIN(AsyncGeneratorAwaitCaught, AsyncGeneratorBuiltinsAssembler) { } TF_BUILTIN(AsyncGeneratorResumeNext, AsyncGeneratorBuiltinsAssembler) { - using Descriptor = AsyncGeneratorResumeNextDescriptor; const auto generator = Parameter(Descriptor::kGenerator); const auto context = Parameter(Descriptor::kContext); @@ -542,7 +541,6 @@ TF_BUILTIN(AsyncGeneratorResolve, AsyncGeneratorBuiltinsAssembler) { } TF_BUILTIN(AsyncGeneratorReject, AsyncGeneratorBuiltinsAssembler) { - using Descriptor = AsyncGeneratorRejectDescriptor; const auto generator = Parameter(Descriptor::kGenerator); const auto value = Parameter(Descriptor::kValue); diff --git a/deps/v8/src/builtins/builtins-collections-gen.cc b/deps/v8/src/builtins/builtins-collections-gen.cc index f4885efed8d534..818b8373de196f 100644 --- a/deps/v8/src/builtins/builtins-collections-gen.cc +++ b/deps/v8/src/builtins/builtins-collections-gen.cc @@ -333,8 +333,11 @@ void BaseCollectionsAssembler::AddConstructorEntriesFromIterable( } BIND(&if_exception); { + TNode message = GetPendingMessage(); + SetPendingMessage(TheHoleConstant()); IteratorCloseOnException(context, iterator); - CallRuntime(Runtime::kReThrow, context, var_exception.value()); + CallRuntime(Runtime::kReThrowWithMessage, context, var_exception.value(), + message); Unreachable(); } BIND(&exit); diff --git a/deps/v8/src/builtins/builtins-constructor-gen.cc b/deps/v8/src/builtins/builtins-constructor-gen.cc index 28af8bfabc3245..9fff2f49119b7d 100644 --- a/deps/v8/src/builtins/builtins-constructor-gen.cc +++ b/deps/v8/src/builtins/builtins-constructor-gen.cc @@ -254,11 +254,9 @@ TF_BUILTIN(FastNewClosure, ConstructorBuiltinsAssembler) { StoreObjectFieldNoWriteBarrier(result, JSFunction::kSharedFunctionInfoOffset, shared_function_info); StoreObjectFieldNoWriteBarrier(result, JSFunction::kContextOffset, context); - Handle lazy_builtin_handle = BUILTIN_CODE(isolate(), CompileLazy); - // TODO(v8:11880): support embedding of CodeDataContainers. - TNode lazy_builtin = HeapConstant(lazy_builtin_handle); - StoreObjectFieldNoWriteBarrier(result, JSFunction::kCodeOffset, - ToCodeT(lazy_builtin)); + TNode lazy_builtin = + HeapConstant(BUILTIN_CODET(isolate(), CompileLazy)); + StoreObjectFieldNoWriteBarrier(result, JSFunction::kCodeOffset, lazy_builtin); Return(result); } diff --git a/deps/v8/src/builtins/builtins-dataview.cc b/deps/v8/src/builtins/builtins-dataview.cc index 7bd277beafd3ab..bab7ba4eeb23d6 100644 --- a/deps/v8/src/builtins/builtins-dataview.cc +++ b/deps/v8/src/builtins/builtins-dataview.cc @@ -21,6 +21,7 @@ namespace internal { BUILTIN(DataViewConstructor) { const char* const kMethodName = "DataView constructor"; HandleScope scope(isolate); + // 1. If NewTarget is undefined, throw a TypeError exception. if (args.new_target()->IsUndefined(isolate)) { // [[Call]] THROW_NEW_ERROR_RETURN_FAILURE( isolate, NewTypeError(MessageTemplate::kConstructorNotFunction, @@ -55,8 +56,8 @@ BUILTIN(DataViewConstructor) { kMethodName))); } - // 5. Let bufferByteLength be buffer.[[ArrayBufferByteLength]]. - size_t const buffer_byte_length = array_buffer->byte_length(); + // 5. Let bufferByteLength be ArrayBufferByteLength(buffer, SeqCst). + size_t buffer_byte_length = array_buffer->GetByteLength(); // 6. If offset > bufferByteLength, throw a RangeError exception. if (view_byte_offset > buffer_byte_length) { @@ -64,15 +65,22 @@ BUILTIN(DataViewConstructor) { isolate, NewRangeError(MessageTemplate::kInvalidOffset, byte_offset)); } + // 7. Let bufferIsResizable be IsResizableArrayBuffer(buffer). + // 8. Let byteLengthChecked be empty. + // 9. If bufferIsResizable is true and byteLength is undefined, then + // a. Let viewByteLength be auto. + // 10. Else if byteLength is undefined, then + // a. Let viewByteLength be bufferByteLength - offset. size_t view_byte_length; + bool length_tracking = false; if (byte_length->IsUndefined(isolate)) { - // 7. If byteLength is undefined, then - // a. Let viewByteLength be bufferByteLength - offset. view_byte_length = buffer_byte_length - view_byte_offset; + length_tracking = array_buffer->is_resizable(); } else { - // 8. Else, - // a. Let viewByteLength be ? ToIndex(byteLength). - // b. If offset+viewByteLength > bufferByteLength, throw a + // 11. Else, + // a. Set byteLengthChecked be ? ToIndex(byteLength). + // b. Let viewByteLength be byteLengthChecked. + // c. If offset + viewByteLength > bufferByteLength, throw a // RangeError exception. ASSIGN_RETURN_FAILURE_ON_EXCEPTION( isolate, byte_length, @@ -85,9 +93,9 @@ BUILTIN(DataViewConstructor) { view_byte_length = byte_length->Number(); } - // 9. Let O be ? OrdinaryCreateFromConstructor(NewTarget, - // "%DataViewPrototype%", «[[DataView]], [[ViewedArrayBuffer]], - // [[ByteLength]], [[ByteOffset]]»). + // 12. Let O be ? OrdinaryCreateFromConstructor(NewTarget, + // "%DataViewPrototype%", «[[DataView]], [[ViewedArrayBuffer]], + // [[ByteLength]], [[ByteOffset]]»). Handle result; ASSIGN_RETURN_FAILURE_ON_EXCEPTION( isolate, result, @@ -97,26 +105,30 @@ BUILTIN(DataViewConstructor) { // TODO(v8:10391, saelo): Handle external pointers in EmbedderDataSlot data_view->SetEmbedderField(i, Smi::zero()); } + data_view->set_bit_field(0); + data_view->set_is_backed_by_rab(array_buffer->is_resizable() && + !array_buffer->is_shared()); + data_view->set_is_length_tracking(length_tracking); - // We have to set the internal slots before the detached check on step 10 or + // We have to set the internal slots before the checks on steps 13 - 17 or // the TorqueGeneratedClassVerifier ended up complaining that the slot is // empty or invalid on heap teardown. - // The result object is not observable from JavaScript when step 10 early - // aborts so it is fine to set internal slots here. + // The result object is not observable from JavaScript when steps 13 - 17 + // early abort so it is fine to set internal slots here. - // 11. Set O.[[ViewedArrayBuffer]] to buffer. + // 18. Set O.[[ViewedArrayBuffer]] to buffer. data_view->set_buffer(*array_buffer); - // 12. Set O.[[ByteLength]] to viewByteLength. - data_view->set_byte_length(view_byte_length); + // 19. Set O.[[ByteLength]] to viewByteLength. + data_view->set_byte_length(length_tracking ? 0 : view_byte_length); - // 13. Set O.[[ByteOffset]] to offset. + // 20. Set O.[[ByteOffset]] to offset. data_view->set_byte_offset(view_byte_offset); data_view->set_data_pointer( isolate, static_cast(array_buffer->backing_store()) + view_byte_offset); - // 10. If IsDetachedBuffer(buffer) is true, throw a TypeError exception. + // 13. If IsDetachedBuffer(buffer) is true, throw a TypeError exception. if (array_buffer->was_detached()) { THROW_NEW_ERROR_RETURN_FAILURE( isolate, NewTypeError(MessageTemplate::kDetachedOperation, @@ -124,7 +136,27 @@ BUILTIN(DataViewConstructor) { kMethodName))); } - // 14. Return O. + // 14. Let getBufferByteLength be + // MakeIdempotentArrayBufferByteLengthGetter(SeqCst). + // 15. Set bufferByteLength be getBufferByteLength(buffer). + buffer_byte_length = array_buffer->GetByteLength(); + + // 16. If offset > bufferByteLength, throw a RangeError exception. + if (view_byte_offset > buffer_byte_length) { + THROW_NEW_ERROR_RETURN_FAILURE( + isolate, NewRangeError(MessageTemplate::kInvalidOffset, byte_offset)); + } + + // 17. If byteLengthChecked is not empty, then + // a. If offset + viewByteLength > bufferByteLength, throw a RangeError + // exception. + if (!length_tracking && + view_byte_offset + view_byte_length > buffer_byte_length) { + THROW_NEW_ERROR_RETURN_FAILURE( + isolate, NewRangeError(MessageTemplate::kInvalidDataViewLength)); + } + + // 21. Return O. return *result; } diff --git a/deps/v8/src/builtins/builtins-date.cc b/deps/v8/src/builtins/builtins-date.cc index cb264279d506f2..c1264891f67c5e 100644 --- a/deps/v8/src/builtins/builtins-date.cc +++ b/deps/v8/src/builtins/builtins-date.cc @@ -445,12 +445,12 @@ BUILTIN(DatePrototypeSetMinutes) { // ES6 section 20.3.4.25 Date.prototype.setMonth ( month, date ) BUILTIN(DatePrototypeSetMonth) { HandleScope scope(isolate); - CHECK_RECEIVER(JSDate, date, "Date.prototype.setMonth"); + CHECK_RECEIVER(JSDate, this_date, "Date.prototype.setMonth"); int const argc = args.length() - 1; Handle month = args.atOrUndefined(isolate, 1); ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, month, Object::ToNumber(isolate, month)); - double time_val = date->value().Number(); + double time_val = this_date->value().Number(); if (!std::isnan(time_val)) { int64_t const time_ms = static_cast(time_val); int64_t local_time_ms = isolate->date_cache()->ToLocal(time_ms); @@ -468,7 +468,7 @@ BUILTIN(DatePrototypeSetMonth) { } time_val = MakeDate(MakeDay(year, m, dt), time_within_day); } - return SetLocalDateValue(isolate, date, time_val); + return SetLocalDateValue(isolate, this_date, time_val); } // ES6 section 20.3.4.26 Date.prototype.setSeconds ( sec, ms ) @@ -662,12 +662,12 @@ BUILTIN(DatePrototypeSetUTCMinutes) { // ES6 section 20.3.4.31 Date.prototype.setUTCMonth ( month, date ) BUILTIN(DatePrototypeSetUTCMonth) { HandleScope scope(isolate); - CHECK_RECEIVER(JSDate, date, "Date.prototype.setUTCMonth"); + CHECK_RECEIVER(JSDate, this_date, "Date.prototype.setUTCMonth"); int const argc = args.length() - 1; Handle month = args.atOrUndefined(isolate, 1); ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, month, Object::ToNumber(isolate, month)); - double time_val = date->value().Number(); + double time_val = this_date->value().Number(); if (!std::isnan(time_val)) { int64_t const time_ms = static_cast(time_val); int days = isolate->date_cache()->DaysFromTime(time_ms); @@ -684,7 +684,7 @@ BUILTIN(DatePrototypeSetUTCMonth) { } time_val = MakeDate(MakeDay(year, m, dt), time_within_day); } - return *JSDate::SetValue(date, DateCache::TimeClip(time_val)); + return *JSDate::SetValue(this_date, DateCache::TimeClip(time_val)); } // ES6 section 20.3.4.34 Date.prototype.setUTCSeconds ( sec, ms ) diff --git a/deps/v8/src/builtins/builtins-definitions.h b/deps/v8/src/builtins/builtins-definitions.h index db4fc381890ce5..538a3970d4b52c 100644 --- a/deps/v8/src/builtins/builtins-definitions.h +++ b/deps/v8/src/builtins/builtins-definitions.h @@ -704,17 +704,29 @@ namespace internal { \ /* Binary ops with feedback collection */ \ TFC(Add_Baseline, BinaryOp_Baseline) \ + TFC(AddSmi_Baseline, BinaryOp_Baseline) \ TFC(Subtract_Baseline, BinaryOp_Baseline) \ + TFC(SubtractSmi_Baseline, BinaryOp_Baseline) \ TFC(Multiply_Baseline, BinaryOp_Baseline) \ + TFC(MultiplySmi_Baseline, BinaryOp_Baseline) \ TFC(Divide_Baseline, BinaryOp_Baseline) \ + TFC(DivideSmi_Baseline, BinaryOp_Baseline) \ TFC(Modulus_Baseline, BinaryOp_Baseline) \ + TFC(ModulusSmi_Baseline, BinaryOp_Baseline) \ TFC(Exponentiate_Baseline, BinaryOp_Baseline) \ + TFC(ExponentiateSmi_Baseline, BinaryOp_Baseline) \ TFC(BitwiseAnd_Baseline, BinaryOp_Baseline) \ + TFC(BitwiseAndSmi_Baseline, BinaryOp_Baseline) \ TFC(BitwiseOr_Baseline, BinaryOp_Baseline) \ + TFC(BitwiseOrSmi_Baseline, BinaryOp_Baseline) \ TFC(BitwiseXor_Baseline, BinaryOp_Baseline) \ + TFC(BitwiseXorSmi_Baseline, BinaryOp_Baseline) \ TFC(ShiftLeft_Baseline, BinaryOp_Baseline) \ + TFC(ShiftLeftSmi_Baseline, BinaryOp_Baseline) \ TFC(ShiftRight_Baseline, BinaryOp_Baseline) \ + TFC(ShiftRightSmi_Baseline, BinaryOp_Baseline) \ TFC(ShiftRightLogical_Baseline, BinaryOp_Baseline) \ + TFC(ShiftRightLogicalSmi_Baseline, BinaryOp_Baseline) \ \ TFC(Add_WithFeedback, BinaryOp_WithFeedback) \ TFC(Subtract_WithFeedback, BinaryOp_WithFeedback) \ diff --git a/deps/v8/src/builtins/builtins-function.cc b/deps/v8/src/builtins/builtins-function.cc index b12f1ec6ea5fa9..23350c3860a5fd 100644 --- a/deps/v8/src/builtins/builtins-function.cc +++ b/deps/v8/src/builtins/builtins-function.cc @@ -47,7 +47,7 @@ MaybeHandle CreateDynamicFunction(Isolate* isolate, IncrementalStringBuilder builder(isolate); builder.AppendCharacter('('); builder.AppendCString(token); - builder.AppendCString(" anonymous("); + builder.AppendCStringLiteral(" anonymous("); if (argc > 1) { for (int i = 1; i < argc; ++i) { if (i > 1) builder.AppendCharacter(','); @@ -60,14 +60,14 @@ MaybeHandle CreateDynamicFunction(Isolate* isolate, } builder.AppendCharacter('\n'); parameters_end_pos = builder.Length(); - builder.AppendCString(") {\n"); + builder.AppendCStringLiteral(") {\n"); if (argc > 0) { Handle body; ASSIGN_RETURN_ON_EXCEPTION( isolate, body, Object::ToString(isolate, args.at(argc)), Object); builder.AppendString(body); } - builder.AppendCString("\n})"); + builder.AppendCStringLiteral("\n})"); ASSIGN_RETURN_ON_EXCEPTION(isolate, source, builder.Finish(), Object); } diff --git a/deps/v8/src/builtins/builtins-internal-gen.cc b/deps/v8/src/builtins/builtins-internal-gen.cc index dc5a49640e8a48..4777983a4e605d 100644 --- a/deps/v8/src/builtins/builtins-internal-gen.cc +++ b/deps/v8/src/builtins/builtins-internal-gen.cc @@ -110,8 +110,9 @@ TF_BUILTIN(DebugBreakTrampoline, CodeStubAssembler) { BIND(&tailcall_to_shared); // Tail call into code object on the SharedFunctionInfo. - TNode code = GetSharedFunctionInfoCode(shared); - TailCallJSCode(code, context, function, new_target, arg_count); + TNode code = GetSharedFunctionInfoCode(shared); + // TODO(v8:11880): call CodeT directly. + TailCallJSCode(FromCodeT(code), context, function, new_target, arg_count); } class WriteBarrierCodeStubAssembler : public CodeStubAssembler { @@ -247,7 +248,8 @@ class WriteBarrierCodeStubAssembler : public CodeStubAssembler { void GenerationalWriteBarrier(SaveFPRegsMode fp_mode) { Label incremental_wb(this), test_old_to_young_flags(this), - store_buffer_exit(this), store_buffer_incremental_wb(this), next(this); + remembered_set_only(this), remembered_set_and_incremental_wb(this), + next(this); // When incremental marking is not on, we skip cross generation pointer // checking here, because there are checks for @@ -257,7 +259,7 @@ class WriteBarrierCodeStubAssembler : public CodeStubAssembler { // stub, which serves as the cross generation checking. auto slot = UncheckedParameter(WriteBarrierDescriptor::kSlotAddress); - Branch(IsMarking(), &test_old_to_young_flags, &store_buffer_exit); + Branch(IsMarking(), &test_old_to_young_flags, &remembered_set_only); BIND(&test_old_to_young_flags); { @@ -274,10 +276,11 @@ class WriteBarrierCodeStubAssembler : public CodeStubAssembler { UncheckedParameter(WriteBarrierDescriptor::kObject)); TNode object_is_young = IsPageFlagSet(object, MemoryChunk::kIsInYoungGenerationMask); - Branch(object_is_young, &incremental_wb, &store_buffer_incremental_wb); + Branch(object_is_young, &incremental_wb, + &remembered_set_and_incremental_wb); } - BIND(&store_buffer_exit); + BIND(&remembered_set_only); { TNode object = BitcastTaggedToWord( UncheckedParameter(WriteBarrierDescriptor::kObject)); @@ -285,7 +288,7 @@ class WriteBarrierCodeStubAssembler : public CodeStubAssembler { Goto(&next); } - BIND(&store_buffer_incremental_wb); + BIND(&remembered_set_and_incremental_wb); { TNode object = BitcastTaggedToWord( UncheckedParameter(WriteBarrierDescriptor::kObject)); @@ -1325,7 +1328,7 @@ TF_BUILTIN(InstantiateAsmJs, CodeStubAssembler) { // On failure, tail call back to regular JavaScript by re-calling the given // function which has been reset to the compile lazy builtin. - // TODO(v8:11880): call CodeT instead. + // TODO(v8:11880): call CodeT directly. TNode code = FromCodeT(LoadJSFunctionCode(function)); TailCallJSCode(code, context, function, new_target, arg_count); } diff --git a/deps/v8/src/builtins/builtins-intl.cc b/deps/v8/src/builtins/builtins-intl.cc index 6fd36dd8e06ef3..1d72a3ae32e450 100644 --- a/deps/v8/src/builtins/builtins-intl.cc +++ b/deps/v8/src/builtins/builtins-intl.cc @@ -238,7 +238,8 @@ Handle CreateBoundFunction(Isolate* isolate, Handle info = isolate->factory()->NewSharedFunctionInfoForBuiltin( - isolate->factory()->empty_string(), builtin, kNormalFunction); + isolate->factory()->empty_string(), builtin, + FunctionKind::kNormalFunction); info->set_internal_formal_parameter_count(JSParameterCount(len)); info->set_length(len); diff --git a/deps/v8/src/builtins/builtins-iterator-gen.cc b/deps/v8/src/builtins/builtins-iterator-gen.cc index 11c11b00b03915..6656a37a1ced60 100644 --- a/deps/v8/src/builtins/builtins-iterator-gen.cc +++ b/deps/v8/src/builtins/builtins-iterator-gen.cc @@ -268,8 +268,11 @@ TNode IteratorBuiltinsAssembler::StringListFromIterable( // 2. Return ? IteratorClose(iteratorRecord, error). BIND(&if_exception); + TNode message = GetPendingMessage(); + SetPendingMessage(TheHoleConstant()); IteratorCloseOnException(context, iterator_record); - CallRuntime(Runtime::kReThrow, context, var_exception.value()); + CallRuntime(Runtime::kReThrowWithMessage, context, var_exception.value(), + message); Unreachable(); } } diff --git a/deps/v8/src/builtins/builtins-lazy-gen.cc b/deps/v8/src/builtins/builtins-lazy-gen.cc index 2ef9aa073429ee..5e888ba563306d 100644 --- a/deps/v8/src/builtins/builtins-lazy-gen.cc +++ b/deps/v8/src/builtins/builtins-lazy-gen.cc @@ -15,18 +15,18 @@ namespace v8 { namespace internal { void LazyBuiltinsAssembler::GenerateTailCallToJSCode( - TNode code, TNode function) { + TNode code, TNode function) { auto argc = UncheckedParameter(Descriptor::kActualArgumentsCount); auto context = Parameter(Descriptor::kContext); auto new_target = Parameter(Descriptor::kNewTarget); - - TailCallJSCode(code, context, function, new_target, argc); + // TODO(v8:11880): call CodeT directly. + TailCallJSCode(FromCodeT(code), context, function, new_target, argc); } void LazyBuiltinsAssembler::GenerateTailCallToReturnedCode( Runtime::FunctionId function_id, TNode function) { auto context = Parameter(Descriptor::kContext); - TNode code = CAST(CallRuntime(function_id, context, function)); + TNode code = CAST(CallRuntime(function_id, context, function)); GenerateTailCallToJSCode(code, function); } @@ -34,7 +34,9 @@ void LazyBuiltinsAssembler::TailCallRuntimeIfMarkerEquals( TNode marker, OptimizationMarker expected_marker, Runtime::FunctionId function_id, TNode function) { Label no_match(this); - GotoIfNot(Word32Equal(marker, Uint32Constant(expected_marker)), &no_match); + GotoIfNot(Word32Equal(marker, + Uint32Constant(static_cast(expected_marker))), + &no_match); GenerateTailCallToReturnedCode(function_id, function); BIND(&no_match); } @@ -78,14 +80,13 @@ void LazyBuiltinsAssembler::MaybeTailCallOptimizedCodeSlot( feedback_vector, FeedbackVector::kMaybeOptimizedCodeOffset); // Optimized code slot is a weak reference to CodeT object. - TNode code_t = CAST(GetHeapObjectAssumeWeak( + TNode optimized_code = CAST(GetHeapObjectAssumeWeak( maybe_optimized_code_entry, &heal_optimized_code_slot)); - TNode optimized_code = FromCodeT(code_t); // Check if the optimized code is marked for deopt. If it is, call the // runtime to clear it. TNode code_data_container = - CodeDataContainerFromCodeT(code_t); + CodeDataContainerFromCodeT(optimized_code); TNode code_kind_specific_flags = LoadObjectField( code_data_container, CodeDataContainer::kKindSpecificFlagsOffset); GotoIf(IsSetWord32( @@ -94,10 +95,8 @@ void LazyBuiltinsAssembler::MaybeTailCallOptimizedCodeSlot( // Optimized code is good, get it into the closure and link the closure into // the optimized functions list, then tail call the optimized code. - StoreObjectField(function, JSFunction::kCodeOffset, - ToCodeT(optimized_code, code_data_container)); + StoreObjectField(function, JSFunction::kCodeOffset, optimized_code); Comment("MaybeTailCallOptimizedCodeSlot:: GenerateTailCallToJSCode"); - // TODO(v8:11880): call CodeT directly. GenerateTailCallToJSCode(optimized_code, function); // Optimized code slot contains deoptimized code or code is cleared and @@ -122,7 +121,7 @@ void LazyBuiltinsAssembler::CompileLazy(TNode function) { TNode shared = CAST(LoadObjectField(function, JSFunction::kSharedFunctionInfoOffset)); TVARIABLE(Uint16T, sfi_data_type); - TNode sfi_code = + TNode sfi_code = GetSharedFunctionInfoCode(shared, &sfi_data_type, &compile_function); TNode feedback_cell_value = LoadFeedbackCellValue(function); @@ -146,14 +145,14 @@ void LazyBuiltinsAssembler::CompileLazy(TNode function) { // optimized Code object (we'd have tail-called it above). A usual case would // be the InterpreterEntryTrampoline to start executing existing bytecode. BIND(&maybe_use_sfi_code); - CSA_DCHECK(this, TaggedNotEqual(sfi_code, HeapConstant(BUILTIN_CODE( + CSA_DCHECK(this, TaggedNotEqual(sfi_code, HeapConstant(BUILTIN_CODET( isolate(), CompileLazy)))); - StoreObjectField(function, JSFunction::kCodeOffset, ToCodeT(sfi_code)); + StoreObjectField(function, JSFunction::kCodeOffset, sfi_code); Label tailcall_code(this); Label baseline(this); - TVARIABLE(Code, code); + TVARIABLE(CodeT, code); // Check if we have baseline code. GotoIf(InstanceTypeEqual(sfi_data_type.value(), CODET_TYPE), &baseline); @@ -163,7 +162,7 @@ void LazyBuiltinsAssembler::CompileLazy(TNode function) { BIND(&baseline); // Ensure we have a feedback vector. - code = Select( + code = Select( IsFeedbackVector(feedback_cell_value), [=]() { return sfi_code; }, [=]() { return CAST(CallRuntime(Runtime::kInstallBaselineCode, @@ -188,12 +187,9 @@ TF_BUILTIN(CompileLazy, LazyBuiltinsAssembler) { TF_BUILTIN(CompileLazyDeoptimizedCode, LazyBuiltinsAssembler) { auto function = Parameter(Descriptor::kTarget); - Handle compile_lazy = BUILTIN_CODE(isolate(), CompileLazy); - TNode code = HeapConstant(compile_lazy); + TNode code = HeapConstant(BUILTIN_CODET(isolate(), CompileLazy)); // Set the code slot inside the JSFunction to CompileLazy. - // TODO(v8:11880): support embedding of CodeDataContainer constants. - StoreObjectField(function, JSFunction::kCodeOffset, ToCodeT(code)); - // TODO(v8:11880): call CodeT directly. + StoreObjectField(function, JSFunction::kCodeOffset, code); GenerateTailCallToJSCode(code, function); } diff --git a/deps/v8/src/builtins/builtins-lazy-gen.h b/deps/v8/src/builtins/builtins-lazy-gen.h index b51dcb58d45395..623811663e0008 100644 --- a/deps/v8/src/builtins/builtins-lazy-gen.h +++ b/deps/v8/src/builtins/builtins-lazy-gen.h @@ -17,7 +17,7 @@ class LazyBuiltinsAssembler : public CodeStubAssembler { explicit LazyBuiltinsAssembler(compiler::CodeAssemblerState* state) : CodeStubAssembler(state) {} - void GenerateTailCallToJSCode(TNode code, TNode function); + void GenerateTailCallToJSCode(TNode code, TNode function); void GenerateTailCallToReturnedCode(Runtime::FunctionId function_id, TNode function); diff --git a/deps/v8/src/builtins/builtins-microtask-queue-gen.cc b/deps/v8/src/builtins/builtins-microtask-queue-gen.cc index ab7dcf832ff10a..9edc8ce00c524a 100644 --- a/deps/v8/src/builtins/builtins-microtask-queue-gen.cc +++ b/deps/v8/src/builtins/builtins-microtask-queue-gen.cc @@ -413,14 +413,23 @@ void MicrotaskQueueBuiltinsAssembler::EnterMicrotaskContext( TNode flag_data_offset = IntPtrConstant(HandleScopeImplementer::kIsMicrotaskContextOffset + FlagStack::kDataOffset); + TNode flag_capacity_offset = + IntPtrConstant(HandleScopeImplementer::kIsMicrotaskContextOffset + + FlagStack::kCapacityOffset); + TNode flag_size_offset = + IntPtrConstant(HandleScopeImplementer::kIsMicrotaskContextOffset + + FlagStack::kSizeOffset); + // Ensure both stacks are in sync. + USE(flag_capacity_offset); + CSA_DCHECK(this, + WordEqual(capacity, Load(hsi, flag_capacity_offset))); + CSA_DCHECK(this, WordEqual(size, Load(hsi, flag_size_offset))); + TNode flag_data = Load(hsi, flag_data_offset); StoreNoWriteBarrier(MachineRepresentation::kWord8, flag_data, size, BoolConstant(true)); - StoreNoWriteBarrier( - MachineType::PointerRepresentation(), hsi, - IntPtrConstant(HandleScopeImplementer::kIsMicrotaskContextOffset + - FlagStack::kSizeOffset), - new_size); + StoreNoWriteBarrier(MachineType::PointerRepresentation(), hsi, + flag_size_offset, new_size); Goto(&done); } @@ -449,13 +458,11 @@ void MicrotaskQueueBuiltinsAssembler::RewindEnteredContext( IntPtrConstant(HandleScopeImplementer::kEnteredContextsOffset + ContextStack::kSizeOffset); -#ifdef ENABLE_VERIFY_CSA - { + if (DEBUG_BOOL) { TNode size = Load(hsi, size_offset); CSA_CHECK(this, IntPtrLessThan(IntPtrConstant(0), size)); CSA_CHECK(this, IntPtrLessThanOrEqual(saved_entered_context_count, size)); } -#endif StoreNoWriteBarrier(MachineType::PointerRepresentation(), hsi, size_offset, saved_entered_context_count); diff --git a/deps/v8/src/builtins/builtins-number-gen.cc b/deps/v8/src/builtins/builtins-number-gen.cc index 390552836de6a8..ef89a1baddf583 100644 --- a/deps/v8/src/builtins/builtins-number-gen.cc +++ b/deps/v8/src/builtins/builtins-number-gen.cc @@ -74,6 +74,35 @@ DEF_BINOP(ShiftRight_Baseline, Generate_ShiftRightWithFeedback) DEF_BINOP(ShiftRightLogical_Baseline, Generate_ShiftRightLogicalWithFeedback) #undef DEF_BINOP +#define DEF_BINOP_RHS_SMI(Name, Generator) \ + TF_BUILTIN(Name, CodeStubAssembler) { \ + auto lhs = Parameter(Descriptor::kLeft); \ + auto rhs = Parameter(Descriptor::kRight); \ + auto slot = UncheckedParameter(Descriptor::kSlot); \ + \ + BinaryOpAssembler binop_asm(state()); \ + TNode result = binop_asm.Generator( \ + [&]() { return LoadContextFromBaseline(); }, lhs, rhs, slot, \ + [&]() { return LoadFeedbackVectorFromBaseline(); }, \ + UpdateFeedbackMode::kGuaranteedFeedback, true); \ + \ + Return(result); \ + } +DEF_BINOP_RHS_SMI(AddSmi_Baseline, Generate_AddWithFeedback) +DEF_BINOP_RHS_SMI(SubtractSmi_Baseline, Generate_SubtractWithFeedback) +DEF_BINOP_RHS_SMI(MultiplySmi_Baseline, Generate_MultiplyWithFeedback) +DEF_BINOP_RHS_SMI(DivideSmi_Baseline, Generate_DivideWithFeedback) +DEF_BINOP_RHS_SMI(ModulusSmi_Baseline, Generate_ModulusWithFeedback) +DEF_BINOP_RHS_SMI(ExponentiateSmi_Baseline, Generate_ExponentiateWithFeedback) +DEF_BINOP_RHS_SMI(BitwiseOrSmi_Baseline, Generate_BitwiseOrWithFeedback) +DEF_BINOP_RHS_SMI(BitwiseXorSmi_Baseline, Generate_BitwiseXorWithFeedback) +DEF_BINOP_RHS_SMI(BitwiseAndSmi_Baseline, Generate_BitwiseAndWithFeedback) +DEF_BINOP_RHS_SMI(ShiftLeftSmi_Baseline, Generate_ShiftLeftWithFeedback) +DEF_BINOP_RHS_SMI(ShiftRightSmi_Baseline, Generate_ShiftRightWithFeedback) +DEF_BINOP_RHS_SMI(ShiftRightLogicalSmi_Baseline, + Generate_ShiftRightLogicalWithFeedback) +#undef DEF_BINOP_RHS_SMI + #define DEF_UNOP(Name, Generator) \ TF_BUILTIN(Name, CodeStubAssembler) { \ auto value = Parameter(Descriptor::kValue); \ diff --git a/deps/v8/src/builtins/builtins-object-gen.cc b/deps/v8/src/builtins/builtins-object-gen.cc index 3e56df803ae030..e8a880545347ea 100644 --- a/deps/v8/src/builtins/builtins-object-gen.cc +++ b/deps/v8/src/builtins/builtins-object-gen.cc @@ -152,12 +152,14 @@ TNode ObjectEntriesValuesBuiltinsAssembler::IsPropertyEnumerable( TNode ObjectEntriesValuesBuiltinsAssembler::IsPropertyKindAccessor( TNode kind) { - return Word32Equal(kind, Int32Constant(PropertyKind::kAccessor)); + return Word32Equal(kind, + Int32Constant(static_cast(PropertyKind::kAccessor))); } TNode ObjectEntriesValuesBuiltinsAssembler::IsPropertyKindData( TNode kind) { - return Word32Equal(kind, Int32Constant(PropertyKind::kData)); + return Word32Equal(kind, + Int32Constant(static_cast(PropertyKind::kData))); } void ObjectEntriesValuesBuiltinsAssembler::GetOwnValuesOrEntries( diff --git a/deps/v8/src/builtins/builtins-regexp-gen.cc b/deps/v8/src/builtins/builtins-regexp-gen.cc index 0a75e1bebd4457..38dc47a122f5d3 100644 --- a/deps/v8/src/builtins/builtins-regexp-gen.cc +++ b/deps/v8/src/builtins/builtins-regexp-gen.cc @@ -47,7 +47,13 @@ TNode RegExpBuiltinsAssembler::IntPtrZero() { // If code is a builtin, return the address to the (possibly embedded) builtin // code entry, otherwise return the entry of the code object itself. -TNode RegExpBuiltinsAssembler::LoadCodeObjectEntry(TNode code) { +TNode RegExpBuiltinsAssembler::LoadCodeObjectEntry(TNode code) { + if (V8_EXTERNAL_CODE_SPACE_BOOL) { + // When external code space is enabled we can load the entry point directly + // from the CodeT object. + return GetCodeEntry(code); + } + TVARIABLE(RawPtrT, var_result); Label if_code_is_off_heap(this), out(this); @@ -553,8 +559,7 @@ TNode RegExpBuiltinsAssembler::RegExpExecInternal( #endif GotoIf(TaggedIsSmi(var_code.value()), &runtime); - // TODO(v8:11880): avoid roundtrips between cdc and code. - TNode code = FromCodeT(CAST(var_code.value())); + TNode code = CAST(var_code.value()); Label if_success(this), if_exception(this, Label::kDeferred); { @@ -618,7 +623,6 @@ TNode RegExpBuiltinsAssembler::RegExpExecInternal( MachineType arg8_type = type_tagged; TNode arg8 = regexp; - // TODO(v8:11880): avoid roundtrips between cdc and code. TNode code_entry = LoadCodeObjectEntry(code); // AIX uses function descriptors on CFunction calls. code_entry in this case diff --git a/deps/v8/src/builtins/builtins-regexp-gen.h b/deps/v8/src/builtins/builtins-regexp-gen.h index e55af65f81b29b..ef606463143a6d 100644 --- a/deps/v8/src/builtins/builtins-regexp-gen.h +++ b/deps/v8/src/builtins/builtins-regexp-gen.h @@ -21,7 +21,7 @@ class RegExpBuiltinsAssembler : public CodeStubAssembler { TNode SmiZero(); TNode IntPtrZero(); - TNode LoadCodeObjectEntry(TNode code); + TNode LoadCodeObjectEntry(TNode code); // Allocate either a JSRegExpResult or a JSRegExpResultWithIndices (depending // on has_indices) with the given length (the number of captures, including diff --git a/deps/v8/src/builtins/builtins-string.tq b/deps/v8/src/builtins/builtins-string.tq index ab2cf2696d91d3..769b3223bcb15b 100644 --- a/deps/v8/src/builtins/builtins-string.tq +++ b/deps/v8/src/builtins/builtins-string.tq @@ -253,3 +253,28 @@ builtin StringCharAt(implicit context: Context)( return StringFromSingleCharCode(code); } } + +// Check two slices for equal content. +// Checking from both ends simultaniously allows us to detect differences +// quickly even when the slices share a prefix or a suffix. +macro EqualContent( + a: ConstSlice, b: ConstSlice): bool { + const length = a.length; + if (length != b.length) return false; + if (a.GCUnsafeStartPointer() == b.GCUnsafeStartPointer()) return true; + // This creates references to the first and last characters of the slices, + // which can be out-of-bounds if the slices are empty. But in this case, + // the references will never be accessed. + let aFirst = a.UncheckedAtIndex(0); + let bFirst = b.UncheckedAtIndex(0); + let aLast = a.UncheckedAtIndex(length - 1); + let bLast = b.UncheckedAtIndex(length - 1); + while (aFirst.offset <= aLast.offset) { + if (*aFirst != *bFirst || *aLast != *bLast) return false; + aFirst = unsafe::AddOffset(aFirst, 1); + aLast = unsafe::AddOffset(aLast, -1); + bFirst = unsafe::AddOffset(bFirst, 1); + bLast = unsafe::AddOffset(bLast, -1); + } + return true; +} diff --git a/deps/v8/src/builtins/builtins-temporal.cc b/deps/v8/src/builtins/builtins-temporal.cc index bbffa68a1df031..d25c769ea47ab1 100644 --- a/deps/v8/src/builtins/builtins-temporal.cc +++ b/deps/v8/src/builtins/builtins-temporal.cc @@ -11,7 +11,7 @@ namespace v8 { namespace internal { #define TO_BE_IMPLEMENTED(id) \ - BUILTIN(id) { \ + BUILTIN_NO_RCS(id) { \ HandleScope scope(isolate); \ UNIMPLEMENTED(); \ } diff --git a/deps/v8/src/builtins/builtins-trace.cc b/deps/v8/src/builtins/builtins-trace.cc index 0fd7d57e36aafd..5403110e879e2c 100644 --- a/deps/v8/src/builtins/builtins-trace.cc +++ b/deps/v8/src/builtins/builtins-trace.cc @@ -61,7 +61,7 @@ class MaybeUtf8 { private: void AllocateSufficientSpace(int len) { if (len + 1 > MAX_STACK_LENGTH) { - allocated_.reset(new uint8_t[len + 1]); + allocated_ = std::make_unique(len + 1); buf_ = allocated_.get(); } } @@ -72,7 +72,7 @@ class MaybeUtf8 { // the MAX_STACK_LENGTH should be more than enough. uint8_t* buf_; uint8_t data_[MAX_STACK_LENGTH]; - std::unique_ptr allocated_; + std::unique_ptr allocated_; }; #if !defined(V8_USE_PERFETTO) diff --git a/deps/v8/src/builtins/builtins-typed-array-gen.cc b/deps/v8/src/builtins/builtins-typed-array-gen.cc index 60f26c63dc1adf..00b040f03f556d 100644 --- a/deps/v8/src/builtins/builtins-typed-array-gen.cc +++ b/deps/v8/src/builtins/builtins-typed-array-gen.cc @@ -33,7 +33,7 @@ void TypedArrayBuiltinsAssembler::SetupTypedArrayEmbedderFields( // elements. // TODO(bmeurer,v8:4153): Rename this and maybe fix up the implementation a bit. TNode TypedArrayBuiltinsAssembler::AllocateEmptyOnHeapBuffer( - TNode context, TNode byte_length) { + TNode context) { TNode native_context = LoadNativeContext(context); TNode map = CAST(LoadContextElement(native_context, Context::ARRAY_BUFFER_MAP_INDEX)); @@ -49,7 +49,7 @@ TNode TypedArrayBuiltinsAssembler::AllocateEmptyOnHeapBuffer( // Setup the ArrayBuffer. // - Set BitField to 0. // - Set IsExternal and IsDetachable bits of BitFieldSlot. - // - Set the byte_length field to byte_length. + // - Set the byte_length field to zero. // - Set backing_store to null/Smi(0). // - Set extension to null. // - Set all embedder fields to Smi(0). @@ -64,9 +64,9 @@ TNode TypedArrayBuiltinsAssembler::AllocateEmptyOnHeapBuffer( Int32Constant(bitfield_value)); StoreObjectFieldNoWriteBarrier(buffer, JSArrayBuffer::kByteLengthOffset, - byte_length); - StoreObjectFieldNoWriteBarrier(buffer, JSArrayBuffer::kBackingStoreOffset, - PointerConstant(nullptr)); + UintPtrConstant(0)); + StoreCagedPointerToObject(buffer, JSArrayBuffer::kBackingStoreOffset, + EmptyBackingStoreBufferConstant()); StoreObjectFieldNoWriteBarrier(buffer, JSArrayBuffer::kExtensionOffset, IntPtrConstant(0)); for (int offset = JSArrayBuffer::kHeaderSize; @@ -127,7 +127,8 @@ TF_BUILTIN(TypedArrayPrototypeByteLength, TypedArrayBuiltinsAssembler) { LoadJSArrayBufferViewBuffer(receiver_array); Label variable_length(this), normal(this); - Branch(IsVariableLengthTypedArray(receiver_array), &variable_length, &normal); + Branch(IsVariableLengthJSArrayBufferView(receiver_array), &variable_length, + &normal); BIND(&variable_length); { Return(ChangeUintPtrToTagged(LoadVariableLengthJSTypedArrayByteLength( @@ -155,8 +156,8 @@ TF_BUILTIN(TypedArrayPrototypeByteOffset, TypedArrayBuiltinsAssembler) { // Default to zero if the {receiver}s buffer was detached / out of bounds. Label detached_or_oob(this), not_detached_nor_oob(this); - IsJSTypedArrayDetachedOrOutOfBounds(CAST(receiver), &detached_or_oob, - ¬_detached_nor_oob); + IsJSArrayBufferViewDetachedOrOutOfBounds(CAST(receiver), &detached_or_oob, + ¬_detached_nor_oob); BIND(&detached_or_oob); Return(ChangeUintPtrToTagged(UintPtrConstant(0))); @@ -436,10 +437,10 @@ void TypedArrayBuiltinsAssembler::SetJSTypedArrayOnHeapDataPtr( TNode ptr_compr_cage_base = IntPtrSub(full_base, Signed(ChangeUint32ToWord(compressed_base))); // Add JSTypedArray::ExternalPointerCompensationForOnHeapArray() to offset. + // See JSTypedArray::AddExternalPointerCompensationForDeserialization(). DCHECK_EQ( isolate()->cage_base(), JSTypedArray::ExternalPointerCompensationForOnHeapArray(isolate())); - // See JSTypedArray::SetOnHeapDataPtr() for details. offset = Unsigned(IntPtrAdd(offset, ptr_compr_cage_base)); } diff --git a/deps/v8/src/builtins/builtins-typed-array-gen.h b/deps/v8/src/builtins/builtins-typed-array-gen.h index 2807745ecb9f33..2df46e499b012b 100644 --- a/deps/v8/src/builtins/builtins-typed-array-gen.h +++ b/deps/v8/src/builtins/builtins-typed-array-gen.h @@ -21,8 +21,7 @@ class TypedArrayBuiltinsAssembler : public CodeStubAssembler { TNode map, TNode length, TNode byte_offset); - TNode AllocateEmptyOnHeapBuffer(TNode context, - TNode byte_length); + TNode AllocateEmptyOnHeapBuffer(TNode context); TNode LoadMapForType(TNode array); TNode IsMockArrayBufferAllocatorFlag(); diff --git a/deps/v8/src/builtins/builtins-utils.h b/deps/v8/src/builtins/builtins-utils.h index e219aec65d9f5e..0fdca8a0895899 100644 --- a/deps/v8/src/builtins/builtins-utils.h +++ b/deps/v8/src/builtins/builtins-utils.h @@ -79,8 +79,7 @@ class BuiltinArguments : public JavaScriptArguments { // through the BuiltinArguments object args. // TODO(cbruni): add global flag to check whether any tracing events have been // enabled. -#ifdef V8_RUNTIME_CALL_STATS -#define BUILTIN(name) \ +#define BUILTIN_RCS(name) \ V8_WARN_UNUSED_RESULT static Object Builtin_Impl_##name( \ BuiltinArguments args, Isolate* isolate); \ \ @@ -106,8 +105,7 @@ class BuiltinArguments : public JavaScriptArguments { V8_WARN_UNUSED_RESULT static Object Builtin_Impl_##name( \ BuiltinArguments args, Isolate* isolate) -#else // V8_RUNTIME_CALL_STATS -#define BUILTIN(name) \ +#define BUILTIN_NO_RCS(name) \ V8_WARN_UNUSED_RESULT static Object Builtin_Impl_##name( \ BuiltinArguments args, Isolate* isolate); \ \ @@ -120,6 +118,11 @@ class BuiltinArguments : public JavaScriptArguments { \ V8_WARN_UNUSED_RESULT static Object Builtin_Impl_##name( \ BuiltinArguments args, Isolate* isolate) + +#ifdef V8_RUNTIME_CALL_STATS +#define BUILTIN(name) BUILTIN_RCS(name) +#else // V8_RUNTIME_CALL_STATS +#define BUILTIN(name) BUILTIN_NO_RCS(name) #endif // V8_RUNTIME_CALL_STATS // ---------------------------------------------------------------------------- diff --git a/deps/v8/src/builtins/builtins-wasm-gen.cc b/deps/v8/src/builtins/builtins-wasm-gen.cc index eb9311d0c6236c..66746c3b945b24 100644 --- a/deps/v8/src/builtins/builtins-wasm-gen.cc +++ b/deps/v8/src/builtins/builtins-wasm-gen.cc @@ -30,10 +30,10 @@ TNode WasmBuiltinsAssembler::LoadTablesFromInstance( WasmInstanceObject::kTablesOffset); } -TNode WasmBuiltinsAssembler::LoadExternalFunctionsFromInstance( +TNode WasmBuiltinsAssembler::LoadInternalFunctionsFromInstance( TNode instance) { return LoadObjectField( - instance, WasmInstanceObject::kWasmExternalFunctionsOffset); + instance, WasmInstanceObject::kWasmInternalFunctionsOffset); } TNode WasmBuiltinsAssembler::LoadManagedObjectMapsFromInstance( diff --git a/deps/v8/src/builtins/builtins-wasm-gen.h b/deps/v8/src/builtins/builtins-wasm-gen.h index ccf5bae7a150fe..1804957ef164cb 100644 --- a/deps/v8/src/builtins/builtins-wasm-gen.h +++ b/deps/v8/src/builtins/builtins-wasm-gen.h @@ -22,7 +22,7 @@ class WasmBuiltinsAssembler : public CodeStubAssembler { TNode LoadTablesFromInstance(TNode instance); - TNode LoadExternalFunctionsFromInstance( + TNode LoadInternalFunctionsFromInstance( TNode instance); TNode LoadManagedObjectMapsFromInstance( diff --git a/deps/v8/src/builtins/builtins.cc b/deps/v8/src/builtins/builtins.cc index af1e7490b0eb48..561bca430768ba 100644 --- a/deps/v8/src/builtins/builtins.cc +++ b/deps/v8/src/builtins/builtins.cc @@ -107,7 +107,7 @@ void Builtins::TearDown() { initialized_ = false; } const char* Builtins::Lookup(Address pc) { // Off-heap pc's can be looked up through binary search. - Builtin builtin = InstructionStream::TryLookupCode(isolate_, pc); + Builtin builtin = OffHeapInstructionStream::TryLookupCode(isolate_, pc); if (Builtins::IsBuiltinId(builtin)) return name(builtin); // May be called during initialization (disassembler). @@ -194,6 +194,39 @@ Handle Builtins::code_handle(Builtin builtin) { return Handle(location); } +FullObjectSlot Builtins::builtin_code_data_container_slot(Builtin builtin) { + CHECK(V8_EXTERNAL_CODE_SPACE_BOOL); + Address* location = + &isolate_->builtin_code_data_container_table()[Builtins::ToInt(builtin)]; + return FullObjectSlot(location); +} + +void Builtins::set_codet(Builtin builtin, CodeT code) { + CHECK(V8_EXTERNAL_CODE_SPACE_BOOL); + // TODO(v8:11880): add DCHECK_EQ(builtin, code.builtin_id()); once CodeT + // has respective field. + DCHECK(Internals::HasHeapObjectTag(code.ptr())); + // The given builtin may be uninitialized thus we cannot check its type here. + isolate_->builtin_code_data_container_table()[Builtins::ToInt(builtin)] = + code.ptr(); +} + +CodeT Builtins::codet(Builtin builtin) { + Address* table = V8_EXTERNAL_CODE_SPACE_BOOL + ? isolate_->builtin_code_data_container_table() + : isolate_->builtin_table(); + Address ptr = table[Builtins::ToInt(builtin)]; + return CodeT::cast(Object(ptr)); +} + +Handle Builtins::codet_handle(Builtin builtin) { + Address* table = V8_EXTERNAL_CODE_SPACE_BOOL + ? isolate_->builtin_code_data_container_table() + : isolate_->builtin_table(); + Address* location = &table[Builtins::ToInt(builtin)]; + return Handle(location); +} + // static int Builtins::GetStackParameterCount(Builtin builtin) { DCHECK(Builtins::KindOf(builtin) == TFJ); @@ -296,6 +329,17 @@ bool Builtins::IsBuiltinHandle(Handle maybe_code, return true; } +bool Builtins::IsBuiltinCodeDataContainerHandle(Handle maybe_code, + Builtin* builtin) const { + Address* handle_location = maybe_code.location(); + Address* builtins_table = isolate_->builtin_code_data_container_table(); + if (handle_location < builtins_table) return false; + Address* builtins_table_end = &builtins_table[Builtins::kBuiltinCount]; + if (handle_location >= builtins_table_end) return false; + *builtin = FromInt(static_cast(handle_location - builtins_table)); + return true; +} + // static bool Builtins::IsIsolateIndependentBuiltin(const Code code) { const Builtin builtin = code.builtin_id(); @@ -373,7 +417,7 @@ class OffHeapTrampolineGenerator { FrameScope scope(&masm_, StackFrame::NO_FRAME_TYPE); if (type == TrampolineType::kJump) { masm_.CodeEntry(); - masm_.JumpToInstructionStream(off_heap_entry); + masm_.JumpToOffHeapInstructionStream(off_heap_entry); } else { DCHECK_EQ(type, TrampolineType::kAbort); masm_.Trap(); diff --git a/deps/v8/src/builtins/builtins.h b/deps/v8/src/builtins/builtins.h index 79e4da840cd09b..ddb50d3230b885 100644 --- a/deps/v8/src/builtins/builtins.h +++ b/deps/v8/src/builtins/builtins.h @@ -36,6 +36,13 @@ static constexpr T FirstFromVarArgs(T x, ...) noexcept { #define BUILTIN_CODE(isolate, name) \ (isolate)->builtins()->code_handle(i::Builtin::k##name) +#ifdef V8_EXTERNAL_CODE_SPACE +#define BUILTIN_CODET(isolate, name) \ + (isolate)->builtins()->codet_handle(i::Builtin::k##name) +#else +#define BUILTIN_CODET(isolate, name) BUILTIN_CODE(isolate, name) +#endif // V8_EXTERNAL_CODE_SPACE + enum class Builtin : int32_t { kNoBuiltinId = -1, #define DEF_ENUM(Name, ...) k##Name, @@ -158,10 +165,14 @@ class Builtins { // Used by CreateOffHeapTrampolines in isolate.cc. void set_code(Builtin builtin, Code code); + void set_codet(Builtin builtin, CodeT code); V8_EXPORT_PRIVATE Code code(Builtin builtin); V8_EXPORT_PRIVATE Handle code_handle(Builtin builtin); + V8_EXPORT_PRIVATE CodeT codet(Builtin builtin); + V8_EXPORT_PRIVATE Handle codet_handle(Builtin builtin); + static CallInterfaceDescriptor CallInterfaceDescriptorFor(Builtin builtin); V8_EXPORT_PRIVATE static Callable CallableFor(Isolate* isolate, Builtin builtin); @@ -192,6 +203,11 @@ class Builtins { // by handle location. Similar to Heap::IsRootHandle. bool IsBuiltinHandle(Handle maybe_code, Builtin* index) const; + // Similar to IsBuiltinHandle but for respective CodeDataContainer handle. + // Can be used only when external code space is enabled. + bool IsBuiltinCodeDataContainerHandle(Handle maybe_code, + Builtin* index) const; + // True, iff the given code object is a builtin with off-heap embedded code. static bool IsIsolateIndependentBuiltin(const Code code); @@ -280,6 +296,8 @@ class Builtins { FullObjectSlot builtin_slot(Builtin builtin); // Returns given builtin's slot in the tier0 builtin table. FullObjectSlot builtin_tier0_slot(Builtin builtin); + // Returns given builtin's slot in the builtin code data container table. + FullObjectSlot builtin_code_data_container_slot(Builtin builtin); private: static void Generate_CallFunction(MacroAssembler* masm, diff --git a/deps/v8/src/builtins/collections.tq b/deps/v8/src/builtins/collections.tq index c0d311a825f159..30444ddadc0ddc 100644 --- a/deps/v8/src/builtins/collections.tq +++ b/deps/v8/src/builtins/collections.tq @@ -33,12 +33,9 @@ macro LoadKeyValuePairNoSideEffects(implicit context: Context)(o: JSAny): } } } - case (JSReceiver): { + case (JSAny): { goto MayHaveSideEffects; } - case (o: JSAny): deferred { - ThrowTypeError(MessageTemplate::kIteratorValueNotAnObject, o); - } } } @@ -48,6 +45,8 @@ transitioning macro LoadKeyValuePair(implicit context: Context)(o: JSAny): try { return LoadKeyValuePairNoSideEffects(o) otherwise Generic; } label Generic { + const o = Cast(o) + otherwise ThrowTypeError(MessageTemplate::kIteratorValueNotAnObject, o); return KeyValuePair{ key: GetProperty(o, Convert(0)), value: GetProperty(o, Convert(1)) diff --git a/deps/v8/src/builtins/convert.tq b/deps/v8/src/builtins/convert.tq index 2a36badfb74c87..64c81ca572342e 100644 --- a/deps/v8/src/builtins/convert.tq +++ b/deps/v8/src/builtins/convert.tq @@ -195,6 +195,12 @@ Convert(i: uint16): int32 { Convert(i: char16|char8): int32 { return Signed(Convert(i)); } +Convert(i: char16): intptr { + return Convert(i); +} +Convert(i: char8): intptr { + return Convert(i); +} Convert(i: uint31): int32 { return Signed(Convert(i)); } diff --git a/deps/v8/src/builtins/data-view.tq b/deps/v8/src/builtins/data-view.tq index 4acc13b2239f0b..9bc4bd5f2e53dc 100644 --- a/deps/v8/src/builtins/data-view.tq +++ b/deps/v8/src/builtins/data-view.tq @@ -84,15 +84,32 @@ javascript builtin DataViewPrototypeGetBuffer( return dataView.buffer; } +extern macro IsJSArrayBufferViewDetachedOrOutOfBounds(JSArrayBufferView): + never labels DetachedOrOutOfBounds, NotDetachedNorOutOfBounds; +extern macro LoadVariableLengthJSArrayBufferViewByteLength( + JSArrayBufferView, JSArrayBuffer): uintptr labels DetachedOrOutOfBounds; + // ES6 section 24.2.4.2 get DataView.prototype.byteLength javascript builtin DataViewPrototypeGetByteLength( js-implicit context: NativeContext, receiver: JSAny)(...arguments): Number { const dataView: JSDataView = ValidateDataView(context, receiver, 'get DataView.prototype.byte_length'); - if (WasDetached(dataView)) { - ThrowTypeError(MessageTemplate::kDetachedOperation, kBuiltinNameByteLength); + if (IsVariableLengthJSArrayBufferView(dataView)) { + try { + const byteLength = LoadVariableLengthJSArrayBufferViewByteLength( + dataView, dataView.buffer) otherwise DetachedOrOutOfBounds; + return Convert(byteLength); + } label DetachedOrOutOfBounds { + ThrowTypeError( + MessageTemplate::kDetachedOperation, kBuiltinNameByteLength); + } + } else { + if (WasDetached(dataView)) { + ThrowTypeError( + MessageTemplate::kDetachedOperation, kBuiltinNameByteLength); + } + return Convert(dataView.byte_length); } - return Convert(dataView.byte_length); } // ES6 section 24.2.4.3 get DataView.prototype.byteOffset @@ -100,10 +117,14 @@ javascript builtin DataViewPrototypeGetByteOffset( js-implicit context: NativeContext, receiver: JSAny)(...arguments): Number { const dataView: JSDataView = ValidateDataView(context, receiver, 'get DataView.prototype.byte_offset'); - if (WasDetached(dataView)) { + try { + IsJSArrayBufferViewDetachedOrOutOfBounds(dataView) + otherwise DetachedOrOutOfBounds, NotDetachedNorOutOfBounds; + } label DetachedOrOutOfBounds { ThrowTypeError(MessageTemplate::kDetachedOperation, kBuiltinNameByteOffset); + } label NotDetachedNorOutOfBounds { + return Convert(dataView.byte_offset); } - return Convert(dataView.byte_offset); } extern macro BitcastInt32ToFloat32(uint32): float32; @@ -373,28 +394,40 @@ transitioning macro DataViewGet( // 5. Let buffer be view.[[ViewedArrayBuffer]]. const buffer: JSArrayBuffer = dataView.buffer; - // 6. If IsDetachedBuffer(buffer) is true, throw a TypeError exception. - if (IsDetachedBuffer(buffer)) { + // 6. Let getBufferByteLength be + // MakeIdempotentArrayBufferByteLengthGetter(Unordered). + // 7. If IsViewOutOfBounds(view, getBufferByteLength) is true, throw a + // TypeError exception. + try { + IsJSArrayBufferViewDetachedOrOutOfBounds(dataView) + otherwise DetachedOrOutOfBounds, NotDetachedNorOutOfBounds; + } label DetachedOrOutOfBounds { ThrowTypeError( MessageTemplate::kDetachedOperation, MakeDataViewGetterNameString(kind)); - } + } label NotDetachedNorOutOfBounds {} - // 7. Let viewOffset be view.[[ByteOffset]]. + // 8. Let viewOffset be view.[[ByteOffset]]. const viewOffset: uintptr = dataView.byte_offset; - // 8. Let viewSize be view.[[ByteLength]]. - const viewSize: uintptr = dataView.byte_length; + // 9. Let viewSize be GetViewByteLength(view, getBufferByteLength). + let viewSize: uintptr; + if (dataView.bit_field.is_length_tracking) { + viewSize = LoadVariableLengthJSArrayBufferViewByteLength( + dataView, dataView.buffer) otherwise unreachable; + } else { + viewSize = dataView.byte_length; + } - // 9. Let elementSize be the Element Size value specified in Table 62 + // 10. Let elementSize be the Element Size value specified in Table 62 // for Element Type type. const elementSize: uintptr = DataViewElementSize(kind); - // 10. If getIndex + elementSize > viewSize, throw a RangeError exception. + // 11. If getIndex + elementSize > viewSize, throw a RangeError exception. CheckIntegerIndexAdditionOverflow(getIndex, elementSize, viewSize) otherwise RangeError; - // 11. Let bufferIndex be getIndex + viewOffset. + // 12. Let bufferIndex be getIndex + viewOffset. const bufferIndex: uintptr = getIndex + viewOffset; if constexpr (kind == ElementsKind::UINT8_ELEMENTS) { @@ -654,9 +687,6 @@ transitioning macro DataViewSet( // 3. Let getIndex be ? ToIndex(requestIndex). const getIndex: uintptr = ToIndex(requestIndex) otherwise RangeError; - const littleEndian: bool = ToBoolean(requestedLittleEndian); - const buffer: JSArrayBuffer = dataView.buffer; - let numberValue: Numeric; if constexpr ( kind == ElementsKind::BIGUINT64_ELEMENTS || @@ -669,28 +699,54 @@ transitioning macro DataViewSet( numberValue = ToNumber(context, value); } + // 6. Set isLittleEndian to !ToBoolean(isLittleEndian). + const littleEndian: bool = ToBoolean(requestedLittleEndian); + + // 7. Let buffer be view.[[ViewedArrayBuffer]]. + const buffer: JSArrayBuffer = dataView.buffer; + // 6. If IsDetachedBuffer(buffer) is true, throw a TypeError exception. if (IsDetachedBuffer(buffer)) { ThrowTypeError( MessageTemplate::kDetachedOperation, MakeDataViewSetterNameString(kind)); } + // 8. Let getBufferByteLength be + // MakeIdempotentArrayBufferByteLengthGetter(Unordered). + // 9. NOTE: Bounds checking is not a synchronizing operation when view's + // backing buffer is a growable SharedArrayBuffer. + // 10. If IsViewOutOfBounds(view, getBufferByteLength) is true, throw a + // TypeError exception. + try { + IsJSArrayBufferViewDetachedOrOutOfBounds(dataView) + otherwise DetachedOrOutOfBounds, NotDetachedNorOutOfBounds; + } label DetachedOrOutOfBounds { + ThrowTypeError( + MessageTemplate::kDetachedOperation, + MakeDataViewGetterNameString(kind)); + } label NotDetachedNorOutOfBounds {} - // 9. Let viewOffset be view.[[ByteOffset]]. + // 11. Let viewOffset be view.[[ByteOffset]]. const viewOffset: uintptr = dataView.byte_offset; - // 10. Let viewSize be view.[[ByteLength]]. - const viewSize: uintptr = dataView.byte_length; + // 12. Let viewSize be GetViewByteLength(view, getBufferByteLength). + let viewSize: uintptr; + if (dataView.bit_field.is_length_tracking) { + viewSize = LoadVariableLengthJSArrayBufferViewByteLength( + dataView, dataView.buffer) otherwise unreachable; + } else { + viewSize = dataView.byte_length; + } - // 11. Let elementSize be the Element Size value specified in Table 62 + // 13. Let elementSize be the Element Size value specified in Table 62 // for Element Type type. const elementSize: uintptr = DataViewElementSize(kind); - // 12. If getIndex + elementSize > viewSize, throw a RangeError exception. + // 14. If getIndex + elementSize > viewSize, throw a RangeError exception. CheckIntegerIndexAdditionOverflow(getIndex, elementSize, viewSize) otherwise RangeError; - // 13. Let bufferIndex be getIndex + viewOffset. + // 15. Let bufferIndex be getIndex + viewOffset. const bufferIndex: uintptr = getIndex + viewOffset; if constexpr ( diff --git a/deps/v8/src/builtins/finalization-registry.tq b/deps/v8/src/builtins/finalization-registry.tq index 72db154a6f50fd..38cae7ed20b9ff 100644 --- a/deps/v8/src/builtins/finalization-registry.tq +++ b/deps/v8/src/builtins/finalization-registry.tq @@ -79,10 +79,10 @@ FinalizationRegistryCleanupLoop(implicit context: Context)( case (weakCell: WeakCell): { try { Call(context, callback, Undefined, weakCell.holdings); - } catch (e) { + } catch (e, message) { runtime::ShrinkFinalizationRegistryUnregisterTokenMap( context, finalizationRegistry); - ReThrow(context, e); + ReThrowWithMessage(context, e, message); } } } diff --git a/deps/v8/src/builtins/ia32/builtins-ia32.cc b/deps/v8/src/builtins/ia32/builtins-ia32.cc index aed3333c71ecbf..3beff0d53fdf29 100644 --- a/deps/v8/src/builtins/ia32/builtins-ia32.cc +++ b/deps/v8/src/builtins/ia32/builtins-ia32.cc @@ -214,8 +214,10 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { __ mov(eax, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset)); __ mov(eax, FieldOperand(eax, SharedFunctionInfo::kFlagsOffset)); __ DecodeField(eax); - __ JumpIfIsInRange(eax, kDefaultDerivedConstructor, kDerivedConstructor, ecx, - ¬_create_implicit_receiver, Label::kNear); + __ JumpIfIsInRange( + eax, static_cast(FunctionKind::kDefaultDerivedConstructor), + static_cast(FunctionKind::kDerivedConstructor), ecx, + ¬_create_implicit_receiver, Label::kNear); // If not derived class constructor: Allocate the new receiver object. __ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1, @@ -837,7 +839,7 @@ static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm, Runtime::FunctionId function_id) { ASM_CODE_COMMENT(masm); Label no_match; - __ cmp(actual_marker, expected_marker); + __ cmp(actual_marker, static_cast(expected_marker)); __ j(not_equal, &no_match, Label::kNear); GenerateTailCallToReturnedCode(masm, function_id); __ bind(&no_match); diff --git a/deps/v8/src/builtins/iterator.tq b/deps/v8/src/builtins/iterator.tq index 0511c0aa690cb7..ad030a1e9c5b78 100644 --- a/deps/v8/src/builtins/iterator.tq +++ b/deps/v8/src/builtins/iterator.tq @@ -130,7 +130,7 @@ transitioning macro IteratorCloseOnException(implicit context: Context)( // c. Set innerResult to Call(return, iterator). // If an exception occurs, the original exception remains bound Call(context, method, iterator.object); - } catch (_e) { + } catch (_e, _message) { // Swallow the exception. } diff --git a/deps/v8/src/builtins/loong64/builtins-loong64.cc b/deps/v8/src/builtins/loong64/builtins-loong64.cc index 30632232270ffd..2e533f6afd4df1 100644 --- a/deps/v8/src/builtins/loong64/builtins-loong64.cc +++ b/deps/v8/src/builtins/loong64/builtins-loong64.cc @@ -186,8 +186,10 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { __ Ld_d(t2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); __ Ld_wu(t2, FieldMemOperand(t2, SharedFunctionInfo::kFlagsOffset)); __ DecodeField(t2); - __ JumpIfIsInRange(t2, kDefaultDerivedConstructor, kDerivedConstructor, - ¬_create_implicit_receiver); + __ JumpIfIsInRange( + t2, static_cast(FunctionKind::kDefaultDerivedConstructor), + static_cast(FunctionKind::kDerivedConstructor), + ¬_create_implicit_receiver); // If not derived class constructor: Allocate the new receiver object. __ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1, t2, @@ -871,7 +873,8 @@ static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm, OptimizationMarker expected_marker, Runtime::FunctionId function_id) { Label no_match; - __ Branch(&no_match, ne, actual_marker, Operand(expected_marker)); + __ Branch(&no_match, ne, actual_marker, + Operand(static_cast(expected_marker))); GenerateTailCallToReturnedCode(masm, function_id); __ bind(&no_match); } @@ -2298,7 +2301,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm, // -- a0 : the number of arguments // -- a1 : the function to call (checked to be a JSFunction) // ----------------------------------- - __ AssertFunction(a1); + __ AssertCallableFunction(a1); Label class_constructor; __ Ld_d(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); diff --git a/deps/v8/src/builtins/mips/builtins-mips.cc b/deps/v8/src/builtins/mips/builtins-mips.cc index 74493abad3228b..c1b1b4711dddeb 100644 --- a/deps/v8/src/builtins/mips/builtins-mips.cc +++ b/deps/v8/src/builtins/mips/builtins-mips.cc @@ -185,8 +185,10 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { __ lw(t2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); __ lw(t2, FieldMemOperand(t2, SharedFunctionInfo::kFlagsOffset)); __ DecodeField(t2); - __ JumpIfIsInRange(t2, kDefaultDerivedConstructor, kDerivedConstructor, - ¬_create_implicit_receiver); + __ JumpIfIsInRange( + t2, static_cast(FunctionKind::kDefaultDerivedConstructor), + static_cast(FunctionKind::kDerivedConstructor), + ¬_create_implicit_receiver); // If not derived class constructor: Allocate the new receiver object. __ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1, @@ -865,7 +867,8 @@ static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm, Runtime::FunctionId function_id) { ASM_CODE_COMMENT(masm); Label no_match; - __ Branch(&no_match, ne, actual_marker, Operand(expected_marker)); + __ Branch(&no_match, ne, actual_marker, + Operand(static_cast(expected_marker))); GenerateTailCallToReturnedCode(masm, function_id); __ bind(&no_match); } @@ -2241,7 +2244,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm, // -- a0 : the number of arguments // -- a1 : the function to call (checked to be a JSFunction) // ----------------------------------- - __ AssertFunction(a1); + __ AssertCallableFunction(a1); Label class_constructor; __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); diff --git a/deps/v8/src/builtins/mips64/builtins-mips64.cc b/deps/v8/src/builtins/mips64/builtins-mips64.cc index a357877acf7060..2ad2fae5db5fae 100644 --- a/deps/v8/src/builtins/mips64/builtins-mips64.cc +++ b/deps/v8/src/builtins/mips64/builtins-mips64.cc @@ -186,8 +186,10 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { __ Ld(t2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); __ lwu(t2, FieldMemOperand(t2, SharedFunctionInfo::kFlagsOffset)); __ DecodeField(t2); - __ JumpIfIsInRange(t2, kDefaultDerivedConstructor, kDerivedConstructor, - ¬_create_implicit_receiver); + __ JumpIfIsInRange( + t2, static_cast(FunctionKind::kDefaultDerivedConstructor), + static_cast(FunctionKind::kDerivedConstructor), + ¬_create_implicit_receiver); // If not derived class constructor: Allocate the new receiver object. __ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1, @@ -876,7 +878,8 @@ static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm, OptimizationMarker expected_marker, Runtime::FunctionId function_id) { Label no_match; - __ Branch(&no_match, ne, actual_marker, Operand(expected_marker)); + __ Branch(&no_match, ne, actual_marker, + Operand(static_cast(expected_marker))); GenerateTailCallToReturnedCode(masm, function_id); __ bind(&no_match); } @@ -2294,7 +2297,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm, // -- a0 : the number of arguments // -- a1 : the function to call (checked to be a JSFunction) // ----------------------------------- - __ AssertFunction(a1); + __ AssertCallableFunction(a1); Label class_constructor; __ Ld(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); diff --git a/deps/v8/src/builtins/object-fromentries.tq b/deps/v8/src/builtins/object-fromentries.tq index 34ab73148f006b..cb43a1ea2a4d4e 100644 --- a/deps/v8/src/builtins/object-fromentries.tq +++ b/deps/v8/src/builtins/object-fromentries.tq @@ -69,9 +69,9 @@ ObjectFromEntries( CreateDataProperty(result, pair.key, pair.value); } return result; - } catch (e) deferred { + } catch (e, message) deferred { iterator::IteratorCloseOnException(i); - ReThrow(context, e); + ReThrowWithMessage(context, e, message); } } label Throw deferred { ThrowTypeError(MessageTemplate::kNotIterable); diff --git a/deps/v8/src/builtins/ppc/builtins-ppc.cc b/deps/v8/src/builtins/ppc/builtins-ppc.cc index 56dfcfa2627b3c..1c4f571e83f782 100644 --- a/deps/v8/src/builtins/ppc/builtins-ppc.cc +++ b/deps/v8/src/builtins/ppc/builtins-ppc.cc @@ -79,11 +79,16 @@ void Generate_PushArguments(MacroAssembler* masm, Register array, Register argc, ArgumentsElementType element_type) { DCHECK(!AreAliased(array, argc, scratch)); Label loop, done; - __ cmpi(argc, Operand::Zero()); + if (kJSArgcIncludesReceiver) { + __ subi(scratch, argc, Operand(kJSArgcReceiverSlots)); + } else { + __ mr(scratch, argc); + } + __ cmpi(scratch, Operand::Zero()); __ beq(&done); - __ ShiftLeftU64(scratch, argc, Operand(kSystemPointerSizeLog2)); + __ mtctr(scratch); + __ ShiftLeftU64(scratch, scratch, Operand(kSystemPointerSizeLog2)); __ add(scratch, array, scratch); - __ mtctr(argc); __ bind(&loop); __ LoadU64WithUpdate(ip, MemOperand(scratch, -kSystemPointerSize)); @@ -155,7 +160,9 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) { } // Remove caller arguments from the stack and return. __ DropArguments(scratch, TurboAssembler::kCountIsSmi, - TurboAssembler::kCountExcludesReceiver); + kJSArgcIncludesReceiver + ? TurboAssembler::kCountIncludesReceiver + : TurboAssembler::kCountExcludesReceiver); __ blr(); __ bind(&stack_overflow); @@ -202,8 +209,10 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { r7, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset), r0); __ lwz(r7, FieldMemOperand(r7, SharedFunctionInfo::kFlagsOffset)); __ DecodeField(r7); - __ JumpIfIsInRange(r7, kDefaultDerivedConstructor, kDerivedConstructor, - ¬_create_implicit_receiver); + __ JumpIfIsInRange( + r7, static_cast(FunctionKind::kDefaultDerivedConstructor), + static_cast(FunctionKind::kDerivedConstructor), + ¬_create_implicit_receiver); // If not derived class constructor: Allocate the new receiver object. __ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1, r7, @@ -315,7 +324,9 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { // Remove caller arguments from the stack and return. __ DropArguments(r4, TurboAssembler::kCountIsSmi, - TurboAssembler::kCountExcludesReceiver); + kJSArgcIncludesReceiver + ? TurboAssembler::kCountIncludesReceiver + : TurboAssembler::kCountExcludesReceiver); __ blr(); __ bind(&check_receiver); @@ -423,6 +434,9 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { r6, FieldMemOperand(r7, JSFunction::kSharedFunctionInfoOffset), r0); __ LoadU16( r6, FieldMemOperand(r6, SharedFunctionInfo::kFormalParameterCountOffset)); + if (kJSArgcIncludesReceiver) { + __ subi(r6, r6, Operand(kJSArgcReceiverSlots)); + } __ LoadTaggedPointerField( r5, FieldMemOperand(r4, JSGeneratorObject::kParametersAndRegistersOffset), r0); @@ -732,7 +746,11 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm, // Check if we have enough stack space to push all arguments. Label enough_stack_space, stack_overflow; - __ addi(r3, r7, Operand(1)); + if (kJSArgcIncludesReceiver) { + __ mr(r3, r7); + } else { + __ addi(r3, r7, Operand(1)); + } __ StackOverflowCheck(r3, r9, &stack_overflow); __ b(&enough_stack_space); __ bind(&stack_overflow); @@ -834,7 +852,10 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1, MemOperand(fp, StandardFrameConstants::kArgCOffset)); __ ShiftLeftU64(actual_params_size, actual_params_size, Operand(kSystemPointerSizeLog2)); - __ addi(actual_params_size, actual_params_size, Operand(kSystemPointerSize)); + if (!kJSArgcIncludesReceiver) { + __ addi(actual_params_size, actual_params_size, + Operand(kSystemPointerSize)); + } // If actual is bigger than formal, then we should use it to free up the stack // arguments. @@ -856,7 +877,7 @@ static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm, OptimizationMarker expected_marker, Runtime::FunctionId function_id) { Label no_match; - __ cmpi(actual_marker, Operand(expected_marker)); + __ cmpi(actual_marker, Operand(static_cast(expected_marker))); __ bne(&no_match); GenerateTailCallToReturnedCode(masm, function_id); __ bind(&no_match); @@ -1051,7 +1072,7 @@ static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot( // stack left to right. // // The live registers are: -// o r3: actual argument count (not including the receiver) +// o r3: actual argument count // o r4: the JS function object being called. // o r6: the incoming new target or generator object // o cp: our context @@ -1302,7 +1323,7 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl( InterpreterPushArgsMode mode) { DCHECK(mode != InterpreterPushArgsMode::kArrayFunction); // ----------- S t a t e ------------- - // -- r3 : the number of arguments (not including the receiver) + // -- r3 : the number of arguments // -- r5 : the address of the first argument to be pushed. Subsequent // arguments should be consecutive above this, in the same order as // they are to be pushed onto the stack. @@ -1315,15 +1336,18 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl( __ subi(r3, r3, Operand(1)); } - // Calculate number of arguments (add one for receiver). - __ addi(r6, r3, Operand(1)); - __ StackOverflowCheck(r6, ip, &stack_overflow); - - if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) { - // Don't copy receiver. Argument count is correct. + const bool skip_receiver = + receiver_mode == ConvertReceiverMode::kNullOrUndefined; + if (kJSArgcIncludesReceiver && skip_receiver) { + __ subi(r6, r3, Operand(kJSArgcReceiverSlots)); + } else if (!kJSArgcIncludesReceiver && !skip_receiver) { + __ addi(r6, r3, Operand(1)); + } else { __ mr(r6, r3); } + __ StackOverflowCheck(r6, ip, &stack_overflow); + // Push the arguments. GenerateInterpreterPushArgs(masm, r6, r5, r7); @@ -1359,23 +1383,28 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl( void Builtins::Generate_InterpreterPushArgsThenConstructImpl( MacroAssembler* masm, InterpreterPushArgsMode mode) { // ----------- S t a t e ------------- - // -- r3 : argument count (not including receiver) + // -- r3 : argument count // -- r6 : new target // -- r4 : constructor to call // -- r5 : allocation site feedback if available, undefined otherwise. // -- r7 : address of the first argument // ----------------------------------- Label stack_overflow; - __ addi(r8, r3, Operand(1)); - __ StackOverflowCheck(r8, ip, &stack_overflow); + __ StackOverflowCheck(r3, ip, &stack_overflow); if (mode == InterpreterPushArgsMode::kWithFinalSpread) { // The spread argument should not be pushed. __ subi(r3, r3, Operand(1)); } + Register argc_without_receiver = r3; + if (kJSArgcIncludesReceiver) { + argc_without_receiver = ip; + __ subi(argc_without_receiver, r3, Operand(kJSArgcReceiverSlots)); + } + // Push the arguments. - GenerateInterpreterPushArgs(masm, r3, r7, r8); + GenerateInterpreterPushArgs(masm, argc_without_receiver, r7, r8); // Push a slot for the receiver to be constructed. __ li(r0, Operand::Zero()); @@ -1582,13 +1611,14 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm, // Overwrite the hole inserted by the deoptimizer with the return value from // the LAZY deopt point. r0 contains the arguments count, the return value // from LAZY is always the last argument. - __ addi(r3, r3, - Operand(BuiltinContinuationFrameConstants::kFixedSlotCount)); + constexpr int return_value_offset = + BuiltinContinuationFrameConstants::kFixedSlotCount - + kJSArgcReceiverSlots; + __ addi(r3, r3, Operand(return_value_offset)); __ ShiftLeftU64(r0, r3, Operand(kSystemPointerSizeLog2)); __ StoreU64(scratch, MemOperand(sp, r0)); // Recover arguments count. - __ subi(r3, r3, - Operand(BuiltinContinuationFrameConstants::kFixedSlotCount)); + __ subi(r3, r3, Operand(return_value_offset)); } __ LoadU64( fp, @@ -1703,16 +1733,18 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) { Label done; __ LoadU64(r4, MemOperand(sp)); // receiver - __ cmpi(r3, Operand(1)); + __ CmpS64(r3, Operand(JSParameterCount(1)), r0); __ blt(&done); __ LoadU64(r8, MemOperand(sp, kSystemPointerSize)); // thisArg - __ cmpi(r3, Operand(2)); + __ CmpS64(r3, Operand(JSParameterCount(2)), r0); __ blt(&done); __ LoadU64(r5, MemOperand(sp, 2 * kSystemPointerSize)); // argArray __ bind(&done); - __ DropArgumentsAndPushNewReceiver(r3, r8, TurboAssembler::kCountIsInteger, - TurboAssembler::kCountExcludesReceiver); + __ DropArgumentsAndPushNewReceiver( + r3, r8, TurboAssembler::kCountIsInteger, + kJSArgcIncludesReceiver ? TurboAssembler::kCountIncludesReceiver + : TurboAssembler::kCountExcludesReceiver); } // ----------- S t a t e ------------- @@ -1738,7 +1770,7 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) { // arguments to the receiver. __ bind(&no_arguments); { - __ li(r3, Operand::Zero()); + __ mov(r3, Operand(JSParameterCount(0))); __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET); } } @@ -1752,7 +1784,7 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) { // r3: actual number of arguments { Label done; - __ cmpi(r3, Operand::Zero()); + __ CmpS64(r3, Operand(JSParameterCount(0)), r0); __ bne(&done); __ PushRoot(RootIndex::kUndefinedValue); __ addi(r3, r3, Operand(1)); @@ -1784,19 +1816,21 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) { __ mr(r5, r4); Label done; - __ cmpi(r3, Operand(1)); + __ CmpS64(r3, Operand(JSParameterCount(1)), r0); __ blt(&done); __ LoadU64(r4, MemOperand(sp, kSystemPointerSize)); // thisArg - __ cmpi(r3, Operand(2)); + __ CmpS64(r3, Operand(JSParameterCount(2)), r0); __ blt(&done); __ LoadU64(r8, MemOperand(sp, 2 * kSystemPointerSize)); // argArray - __ cmpi(r3, Operand(3)); + __ CmpS64(r3, Operand(JSParameterCount(3)), r0); __ blt(&done); __ LoadU64(r5, MemOperand(sp, 3 * kSystemPointerSize)); // argArray __ bind(&done); - __ DropArgumentsAndPushNewReceiver(r3, r8, TurboAssembler::kCountIsInteger, - TurboAssembler::kCountExcludesReceiver); + __ DropArgumentsAndPushNewReceiver( + r3, r8, TurboAssembler::kCountIsInteger, + kJSArgcIncludesReceiver ? TurboAssembler::kCountIncludesReceiver + : TurboAssembler::kCountExcludesReceiver); } // ----------- S t a t e ------------- @@ -1833,19 +1867,21 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) { Label done; __ mr(r7, r4); - __ cmpi(r3, Operand(1)); + __ CmpS64(r3, Operand(JSParameterCount(1)), r0); __ blt(&done); __ LoadU64(r4, MemOperand(sp, kSystemPointerSize)); // thisArg __ mr(r6, r4); - __ cmpi(r3, Operand(2)); + __ CmpS64(r3, Operand(JSParameterCount(2)), r0); __ blt(&done); __ LoadU64(r5, MemOperand(sp, 2 * kSystemPointerSize)); // argArray - __ cmpi(r3, Operand(3)); + __ CmpS64(r3, Operand(JSParameterCount(3)), r0); __ blt(&done); __ LoadU64(r6, MemOperand(sp, 3 * kSystemPointerSize)); // argArray __ bind(&done); - __ DropArgumentsAndPushNewReceiver(r3, r7, TurboAssembler::kCountIsInteger, - TurboAssembler::kCountExcludesReceiver); + __ DropArgumentsAndPushNewReceiver( + r3, r7, TurboAssembler::kCountIsInteger, + kJSArgcIncludesReceiver ? TurboAssembler::kCountIncludesReceiver + : TurboAssembler::kCountExcludesReceiver); } // ----------- S t a t e ------------- @@ -1887,14 +1923,21 @@ void Generate_AllocateSpaceAndShiftExistingArguments( Register dest = pointer_to_new_space_out; __ addi(dest, sp, Operand(-kSystemPointerSize)); - __ addi(r0, argc_in_out, Operand(1)); + Label loop, skip; + if (!kJSArgcIncludesReceiver) { + __ addi(r0, argc_in_out, Operand(1)); + } else { + __ mr(r0, argc_in_out); + __ cmpi(r0, Operand::Zero()); + __ ble(&skip); + } __ mtctr(r0); - Label loop; __ bind(&loop); __ LoadU64WithUpdate(r0, MemOperand(old_sp, kSystemPointerSize)); __ StoreU64WithUpdate(r0, MemOperand(dest, kSystemPointerSize)); __ bdnz(&loop); + __ bind(&skip); // Update total number of arguments, restore dest. __ add(argc_in_out, argc_in_out, count); __ addi(dest, dest, Operand(kSystemPointerSize)); @@ -1908,7 +1951,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm, Handle code) { // ----------- S t a t e ------------- // -- r4 : target - // -- r3 : number of parameters on the stack (not including the receiver) + // -- r3 : number of parameters on the stack // -- r5 : arguments list (a FixedArray) // -- r7 : len (number of elements to push from args) // -- r6 : new.target (for [[Construct]]) @@ -1980,7 +2023,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm, CallOrConstructMode mode, Handle code) { // ----------- S t a t e ------------- - // -- r3 : the number of arguments (not including the receiver) + // -- r3 : the number of arguments // -- r6 : the new.target (for [[Construct]] calls) // -- r4 : the target to call (can be any Object) // -- r5 : start index (to support rest parameters) @@ -2008,12 +2051,14 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm, Label stack_done, stack_overflow; __ LoadU64(r8, MemOperand(fp, StandardFrameConstants::kArgCOffset)); + if (kJSArgcIncludesReceiver) { + __ subi(r8, r8, Operand(kJSArgcReceiverSlots)); + } __ sub(r8, r8, r5, LeaveOE, SetRC); __ ble(&stack_done, cr0); { // ----------- S t a t e ------------- - // -- r3 : the number of arguments already in the stack (not including the - // receiver) + // -- r3 : the number of arguments already in the stack // -- r4 : the target to call (can be any Object) // -- r5 : start index (to support rest parameters) // -- r6 : the new.target (for [[Construct]] calls) @@ -2069,7 +2114,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm, void Builtins::Generate_CallFunction(MacroAssembler* masm, ConvertReceiverMode mode) { // ----------- S t a t e ------------- - // -- r3 : the number of arguments (not including the receiver) + // -- r3 : the number of arguments // -- r4 : the function to call (checked to be a JSFunction) // ----------------------------------- __ AssertFunction(r4); @@ -2095,7 +2140,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm, __ bne(&done_convert, cr0); { // ----------- S t a t e ------------- - // -- r3 : the number of arguments (not including the receiver) + // -- r3 : the number of arguments // -- r4 : the function to call (checked to be a JSFunction) // -- r5 : the shared function info. // -- cp : the function context. @@ -2148,7 +2193,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm, __ bind(&done_convert); // ----------- S t a t e ------------- - // -- r3 : the number of arguments (not including the receiver) + // -- r3 : the number of arguments // -- r4 : the function to call (checked to be a JSFunction) // -- r5 : the shared function info. // -- cp : the function context. @@ -2171,7 +2216,7 @@ namespace { void Generate_PushBoundArguments(MacroAssembler* masm) { // ----------- S t a t e ------------- - // -- r3 : the number of arguments (not including the receiver) + // -- r3 : the number of arguments // -- r4 : target (checked to be a JSBoundFunction) // -- r6 : new.target (only in case of [[Construct]]) // ----------------------------------- @@ -2184,7 +2229,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) { __ beq(&no_bound_arguments, cr0); { // ----------- S t a t e ------------- - // -- r3 : the number of arguments (not including the receiver) + // -- r3 : the number of arguments // -- r4 : target (checked to be a JSBoundFunction) // -- r5 : the [[BoundArguments]] (implemented as FixedArray) // -- r6 : new.target (only in case of [[Construct]]) @@ -2244,7 +2289,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) { // static void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) { // ----------- S t a t e ------------- - // -- r3 : the number of arguments (not including the receiver) + // -- r3 : the number of arguments // -- r4 : the function to call (checked to be a JSBoundFunction) // ----------------------------------- __ AssertBoundFunction(r4); @@ -2267,7 +2312,7 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) { // static void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) { // ----------- S t a t e ------------- - // -- r3 : the number of arguments (not including the receiver) + // -- r3 : the number of arguments // -- r4 : the target to call (can be any Object). // ----------------------------------- Register argc = r3; @@ -2337,7 +2382,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) { // static void Builtins::Generate_ConstructFunction(MacroAssembler* masm) { // ----------- S t a t e ------------- - // -- r3 : the number of arguments (not including the receiver) + // -- r3 : the number of arguments // -- r4 : the constructor to call (checked to be a JSFunction) // -- r6 : the new target (checked to be a constructor) // ----------------------------------- @@ -2369,7 +2414,7 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) { // static void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) { // ----------- S t a t e ------------- - // -- r3 : the number of arguments (not including the receiver) + // -- r3 : the number of arguments // -- r4 : the function to call (checked to be a JSBoundFunction) // -- r6 : the new target (checked to be a constructor) // ----------------------------------- @@ -2396,7 +2441,7 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) { // static void Builtins::Generate_Construct(MacroAssembler* masm) { // ----------- S t a t e ------------- - // -- r3 : the number of arguments (not including the receiver) + // -- r3 : the number of arguments // -- r4 : the constructor to call (can be any Object) // -- r6 : the new target (either the same as the constructor or // the JSFunction on which new was invoked initially) diff --git a/deps/v8/src/builtins/promise-abstract-operations.tq b/deps/v8/src/builtins/promise-abstract-operations.tq index 5c871d3ff0e761..2e2dd0e1ef71e9 100644 --- a/deps/v8/src/builtins/promise-abstract-operations.tq +++ b/deps/v8/src/builtins/promise-abstract-operations.tq @@ -113,7 +113,7 @@ transitioning macro MorphAndEnqueuePromiseReaction(implicit context: Context)( // Morph {current} from a PromiseReaction into a PromiseReactionJobTask // and schedule that on the microtask queue. We try to minimize the number - // of stores here to avoid screwing up the store buffer. + // of stores here to avoid write barrier overhead. static_assert( kPromiseReactionSize == kPromiseReactionJobTaskSizeOfAllPromiseReactionJobTasks); diff --git a/deps/v8/src/builtins/promise-all.tq b/deps/v8/src/builtins/promise-all.tq index 602908d7f66e27..cd55ec9f3b867c 100644 --- a/deps/v8/src/builtins/promise-all.tq +++ b/deps/v8/src/builtins/promise-all.tq @@ -139,7 +139,7 @@ transitioning macro PerformPromiseAll( constructor: Constructor, capability: PromiseCapability, promiseResolveFunction: JSAny, createResolveElementFunctor: F1, createRejectElementFunctor: F2): JSAny labels -Reject(Object) { +Reject(JSAny) { const promise = capability.promise; const resolve = capability.resolve; const reject = capability.reject; @@ -172,7 +172,7 @@ Reject(Object) { // to true. // ReturnIfAbrupt(nextValue). nextValue = iterator::IteratorValue(next, fastIteratorResultMap); - } catch (e) { + } catch (e, _message) { goto Reject(e); } @@ -262,7 +262,7 @@ Reject(Object) { // Set index to index + 1. index += 1; } - } catch (e) deferred { + } catch (e, _message) deferred { iterator::IteratorCloseOnException(iter); goto Reject(e); } label Done {} @@ -354,11 +354,9 @@ transitioning macro GeneratePromiseAll( nativeContext, i, constructor, capability, promiseResolveFunction, createResolveElementFunctor, createRejectElementFunctor) otherwise Reject; - } catch (e) deferred { + } catch (e, _message) deferred { goto Reject(e); - } label Reject(e: Object) deferred { - // Exception must be bound to a JS value. - const e = UnsafeCast(e); + } label Reject(e: JSAny) deferred { const reject = UnsafeCast(capability.reject); Call(context, reject, Undefined, e); return capability.promise; diff --git a/deps/v8/src/builtins/promise-any.tq b/deps/v8/src/builtins/promise-any.tq index 1555511eda315d..d50b8b5574e900 100644 --- a/deps/v8/src/builtins/promise-any.tq +++ b/deps/v8/src/builtins/promise-any.tq @@ -159,7 +159,7 @@ transitioning macro PerformPromiseAny(implicit context: Context)( nativeContext: NativeContext, iteratorRecord: iterator::IteratorRecord, constructor: Constructor, resultCapability: PromiseCapability, promiseResolveFunction: JSAny): JSAny labels -Reject(Object) { +Reject(JSAny) { // 1. Assert: ! IsConstructor(constructor) is true. // 2. Assert: resultCapability is a PromiseCapability Record. @@ -198,7 +198,7 @@ Reject(Object) { // g. ReturnIfAbrupt(nextValue). nextValue = iterator::IteratorValue(next, fastIteratorResultMap); - } catch (e) { + } catch (e, _message) { goto Reject(e); } @@ -280,7 +280,7 @@ Reject(Object) { context, rejectElement, kPromiseForwardingHandlerSymbol, True); } } - } catch (e) deferred { + } catch (e, _message) deferred { iterator::IteratorCloseOnException(iteratorRecord); goto Reject(e); } label Done {} @@ -361,9 +361,9 @@ PromiseAny( nativeContext, iteratorRecord, constructor, capability, promiseResolveFunction) otherwise Reject; - } catch (e) deferred { + } catch (e, _message) deferred { goto Reject(e); - } label Reject(e: Object) deferred { + } label Reject(e: JSAny) deferred { // Exception must be bound to a JS value. dcheck(e != TheHole); Call( diff --git a/deps/v8/src/builtins/promise-constructor.tq b/deps/v8/src/builtins/promise-constructor.tq index b5f7292a77cc70..eec333f4ce8ea6 100644 --- a/deps/v8/src/builtins/promise-constructor.tq +++ b/deps/v8/src/builtins/promise-constructor.tq @@ -85,7 +85,7 @@ PromiseConstructor( const reject = funcs.reject; try { Call(context, UnsafeCast(executor), Undefined, resolve, reject); - } catch (e) { + } catch (e, _message) { Call(context, reject, Undefined, e); } diff --git a/deps/v8/src/builtins/promise-jobs.tq b/deps/v8/src/builtins/promise-jobs.tq index 77d2e7cf9c4813..9a9d22af9474f3 100644 --- a/deps/v8/src/builtins/promise-jobs.tq +++ b/deps/v8/src/builtins/promise-jobs.tq @@ -66,7 +66,7 @@ PromiseResolveThenableJob(implicit context: Context)( try { return Call( context, UnsafeCast(then), thenable, resolve, reject); - } catch (e) { + } catch (e, _message) { return Call(context, UnsafeCast(reject), Undefined, e); } } diff --git a/deps/v8/src/builtins/promise-misc.tq b/deps/v8/src/builtins/promise-misc.tq index e8b4842dd5e459..99c4006da250ca 100644 --- a/deps/v8/src/builtins/promise-misc.tq +++ b/deps/v8/src/builtins/promise-misc.tq @@ -112,7 +112,7 @@ transitioning macro RunContextPromiseHookInit(implicit context: Context)( try { Call(context, hook, Undefined, promise, parentObject); - } catch (e) { + } catch (e, _message) { runtime::ReportMessageFromMicrotask(e); } } @@ -189,7 +189,7 @@ transitioning macro RunContextPromiseHook(implicit context: Context)( try { Call(context, hook, Undefined, promise); - } catch (e) { + } catch (e, _message) { runtime::ReportMessageFromMicrotask(e); } } diff --git a/deps/v8/src/builtins/promise-race.tq b/deps/v8/src/builtins/promise-race.tq index eed1fae3890e4a..1d15dde666664e 100644 --- a/deps/v8/src/builtins/promise-race.tq +++ b/deps/v8/src/builtins/promise-race.tq @@ -47,7 +47,7 @@ PromiseRace( // Let iterator be GetIterator(iterable). // IfAbruptRejectPromise(iterator, promiseCapability). i = iterator::GetIterator(iterable); - } catch (e) deferred { + } catch (e, _message) deferred { goto Reject(e); } @@ -69,7 +69,7 @@ PromiseRace( // to true. // ReturnIfAbrupt(nextValue). nextValue = iterator::IteratorValue(next, fastIteratorResultMap); - } catch (e) { + } catch (e, _message) { goto Reject(e); } // Let nextPromise be ? Call(constructor, _promiseResolve_, « @@ -91,14 +91,12 @@ PromiseRace( context, thenResult, kPromiseHandledBySymbol, promise); } } - } catch (e) deferred { + } catch (e, _message) deferred { iterator::IteratorCloseOnException(i); goto Reject(e); } - } label Reject(exception: Object) deferred { - Call( - context, UnsafeCast(reject), Undefined, - UnsafeCast(exception)); + } label Reject(exception: JSAny) deferred { + Call(context, UnsafeCast(reject), Undefined, exception); return promise; } unreachable; diff --git a/deps/v8/src/builtins/promise-reaction-job.tq b/deps/v8/src/builtins/promise-reaction-job.tq index 0374b2a3fe0674..30283591075c17 100644 --- a/deps/v8/src/builtins/promise-reaction-job.tq +++ b/deps/v8/src/builtins/promise-reaction-job.tq @@ -60,7 +60,7 @@ macro FuflfillPromiseReactionJob( const resolve = UnsafeCast(capability.resolve); try { return Call(context, resolve, Undefined, result); - } catch (e) { + } catch (e, _message) { return RejectPromiseReactionJob( context, promiseOrCapability, e, reactionType); } @@ -98,7 +98,7 @@ macro PromiseReactionJob( return FuflfillPromiseReactionJob( context, promiseOrCapability, result, reactionType); } - } catch (e) { + } catch (e, _message) { return RejectPromiseReactionJob( context, promiseOrCapability, e, reactionType); } diff --git a/deps/v8/src/builtins/promise-resolve.tq b/deps/v8/src/builtins/promise-resolve.tq index 5b0a82ca3d14e0..114b1e922b1f48 100644 --- a/deps/v8/src/builtins/promise-resolve.tq +++ b/deps/v8/src/builtins/promise-resolve.tq @@ -165,7 +165,7 @@ ResolvePromise(implicit context: Context)( // 10. If then is an abrupt completion, then try { then = GetProperty(resolution, kThenString); - } catch (e) { + } catch (e, _message) { // a. Return RejectPromise(promise, then.[[Value]]). return RejectPromise(promise, e, False); } diff --git a/deps/v8/src/builtins/riscv64/builtins-riscv64.cc b/deps/v8/src/builtins/riscv64/builtins-riscv64.cc index 51a08c12967366..f5c3600850b28c 100644 --- a/deps/v8/src/builtins/riscv64/builtins-riscv64.cc +++ b/deps/v8/src/builtins/riscv64/builtins-riscv64.cc @@ -194,8 +194,11 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { __ Lwu(func_info, FieldMemOperand(func_info, SharedFunctionInfo::kFlagsOffset)); __ DecodeField(func_info); - __ JumpIfIsInRange(func_info, kDefaultDerivedConstructor, - kDerivedConstructor, ¬_create_implicit_receiver); + __ JumpIfIsInRange( + func_info, + static_cast(FunctionKind::kDefaultDerivedConstructor), + static_cast(FunctionKind::kDerivedConstructor), + ¬_create_implicit_receiver); Register scratch = func_info; Register scratch2 = temps.Acquire(); // If not derived class constructor: Allocate the new receiver object. @@ -921,8 +924,8 @@ static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm, Runtime::FunctionId function_id) { ASM_CODE_COMMENT(masm); Label no_match; - __ Branch(&no_match, ne, actual_marker, Operand(expected_marker), - Label::Distance::kNear); + __ Branch(&no_match, ne, actual_marker, + Operand(static_cast(expected_marker)), Label::Distance::kNear); GenerateTailCallToReturnedCode(masm, function_id); __ bind(&no_match); } diff --git a/deps/v8/src/builtins/s390/builtins-s390.cc b/deps/v8/src/builtins/s390/builtins-s390.cc index 3b51a086ec0fcf..3fe9ebc68358b8 100644 --- a/deps/v8/src/builtins/s390/builtins-s390.cc +++ b/deps/v8/src/builtins/s390/builtins-s390.cc @@ -33,6 +33,254 @@ namespace internal { #define __ ACCESS_MASM(masm) +namespace { + +static void AssertCodeIsBaseline(MacroAssembler* masm, Register code, + Register scratch) { + DCHECK(!AreAliased(code, scratch)); + // Verify that the code kind is baseline code via the CodeKind. + __ LoadU64(scratch, FieldMemOperand(code, Code::kFlagsOffset)); + __ DecodeField(scratch); + __ CmpS64(scratch, Operand(static_cast(CodeKind::BASELINE))); + __ Assert(eq, AbortReason::kExpectedBaselineData); +} + +static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm, + Register sfi_data, + Register scratch1, + Label* is_baseline) { + USE(GetSharedFunctionInfoBytecodeOrBaseline); + ASM_CODE_COMMENT(masm); + Label done; + __ CompareObjectType(sfi_data, scratch1, scratch1, CODET_TYPE); + if (FLAG_debug_code) { + Label not_baseline; + __ b(ne, ¬_baseline); + AssertCodeIsBaseline(masm, sfi_data, scratch1); + __ beq(is_baseline); + __ bind(¬_baseline); + } else { + __ beq(is_baseline); + } + __ CmpS32(scratch1, Operand(INTERPRETER_DATA_TYPE)); + __ bne(&done); + __ LoadTaggedPointerField( + sfi_data, + FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset)); + + __ bind(&done); +} + +void Generate_OSREntry(MacroAssembler* masm, Register entry_address, + intptr_t offset) { + if (is_int20(offset)) { + __ lay(r14, MemOperand(entry_address, offset)); + } else { + __ AddS64(r14, entry_address, Operand(offset)); + } + + // "return" to the OSR entry point of the function. + __ Ret(); +} + +// Restarts execution either at the current or next (in execution order) +// bytecode. If there is baseline code on the shared function info, converts an +// interpreter frame into a baseline frame and continues execution in baseline +// code. Otherwise execution continues with bytecode. +void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, + bool next_bytecode, + bool is_osr = false) { + Label start; + __ bind(&start); + + // Get function from the frame. + Register closure = r3; + __ LoadU64(closure, MemOperand(fp, StandardFrameConstants::kFunctionOffset)); + + // Get the Code object from the shared function info. + Register code_obj = r8; + __ LoadTaggedPointerField( + code_obj, + FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset)); + __ LoadTaggedPointerField( + code_obj, + FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset)); + + // Check if we have baseline code. For OSR entry it is safe to assume we + // always have baseline code. + if (!is_osr) { + Label start_with_baseline; + __ CompareObjectType(code_obj, r5, r5, CODET_TYPE); + __ b(eq, &start_with_baseline); + + // Start with bytecode as there is no baseline code. + Builtin builtin_id = next_bytecode + ? Builtin::kInterpreterEnterAtNextBytecode + : Builtin::kInterpreterEnterAtBytecode; + __ Jump(masm->isolate()->builtins()->code_handle(builtin_id), + RelocInfo::CODE_TARGET); + + // Start with baseline code. + __ bind(&start_with_baseline); + } else if (FLAG_debug_code) { + __ CompareObjectType(code_obj, r5, r5, CODET_TYPE); + __ Assert(eq, AbortReason::kExpectedBaselineData); + } + + if (FLAG_debug_code) { + AssertCodeIsBaseline(masm, code_obj, r5); + } + + // Load the feedback vector. + Register feedback_vector = r4; + __ LoadTaggedPointerField( + feedback_vector, + FieldMemOperand(closure, JSFunction::kFeedbackCellOffset)); + __ LoadTaggedPointerField( + feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset)); + + Label install_baseline_code; + // Check if feedback vector is valid. If not, call prepare for baseline to + // allocate it. + __ CompareObjectType(feedback_vector, r5, r5, FEEDBACK_VECTOR_TYPE); + __ b(ne, &install_baseline_code); + + // Save BytecodeOffset from the stack frame. + __ LoadU64(kInterpreterBytecodeOffsetRegister, + MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); + __ SmiUntag(kInterpreterBytecodeOffsetRegister); + // Replace BytecodeOffset with the feedback vector. + __ StoreU64(feedback_vector, + MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); + feedback_vector = no_reg; + + // Compute baseline pc for bytecode offset. + ExternalReference get_baseline_pc_extref; + if (next_bytecode || is_osr) { + get_baseline_pc_extref = + ExternalReference::baseline_pc_for_next_executed_bytecode(); + } else { + get_baseline_pc_extref = + ExternalReference::baseline_pc_for_bytecode_offset(); + } + Register get_baseline_pc = r5; + __ Move(get_baseline_pc, get_baseline_pc_extref); + + // If the code deoptimizes during the implicit function entry stack interrupt + // check, it will have a bailout ID of kFunctionEntryBytecodeOffset, which is + // not a valid bytecode offset. + // TODO(pthier): Investigate if it is feasible to handle this special case + // in TurboFan instead of here. + Label valid_bytecode_offset, function_entry_bytecode; + if (!is_osr) { + __ CmpS64(kInterpreterBytecodeOffsetRegister, + Operand(BytecodeArray::kHeaderSize - kHeapObjectTag + + kFunctionEntryBytecodeOffset)); + __ b(eq, &function_entry_bytecode); + } + + __ SubS64(kInterpreterBytecodeOffsetRegister, + kInterpreterBytecodeOffsetRegister, + Operand(BytecodeArray::kHeaderSize - kHeapObjectTag)); + + __ bind(&valid_bytecode_offset); + // Get bytecode array from the stack frame. + __ LoadU64(kInterpreterBytecodeArrayRegister, + MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp)); + // Save the accumulator register, since it's clobbered by the below call. + __ Push(kInterpreterAccumulatorRegister); + { + Register arg_reg_1 = r2; + Register arg_reg_2 = r3; + Register arg_reg_3 = r4; + __ mov(arg_reg_1, code_obj); + __ mov(arg_reg_2, kInterpreterBytecodeOffsetRegister); + __ mov(arg_reg_3, kInterpreterBytecodeArrayRegister); + FrameScope scope(masm, StackFrame::INTERNAL); + __ PrepareCallCFunction(3, 0, r1); + __ CallCFunction(get_baseline_pc, 3, 0); + } + __ AddS64(code_obj, code_obj, kReturnRegister0); + __ Pop(kInterpreterAccumulatorRegister); + + if (is_osr) { + Register scratch = r1; + __ mov(scratch, Operand(0)); + __ StoreU16(scratch, + FieldMemOperand(kInterpreterBytecodeArrayRegister, + BytecodeArray::kOsrLoopNestingLevelOffset)); + Generate_OSREntry(masm, code_obj, Code::kHeaderSize - kHeapObjectTag); + } else { + __ AddS64(code_obj, code_obj, Operand(Code::kHeaderSize - kHeapObjectTag)); + __ Jump(code_obj); + } + __ Trap(); // Unreachable. + + if (!is_osr) { + __ bind(&function_entry_bytecode); + // If the bytecode offset is kFunctionEntryOffset, get the start address of + // the first bytecode. + __ mov(kInterpreterBytecodeOffsetRegister, Operand(0)); + if (next_bytecode) { + __ Move(get_baseline_pc, + ExternalReference::baseline_pc_for_bytecode_offset()); + } + __ b(&valid_bytecode_offset); + } + + __ bind(&install_baseline_code); + { + FrameScope scope(masm, StackFrame::INTERNAL); + __ Push(kInterpreterAccumulatorRegister); + __ Push(closure); + __ CallRuntime(Runtime::kInstallBaselineCode, 1); + __ Pop(kInterpreterAccumulatorRegister); + } + // Retry from the start after installing baseline code. + __ b(&start); +} + +void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) { + ASM_CODE_COMMENT(masm); + { + FrameScope scope(masm, StackFrame::INTERNAL); + __ CallRuntime(Runtime::kCompileForOnStackReplacement); + } + + // If the code object is null, just return to the caller. + Label skip; + __ CmpSmiLiteral(r2, Smi::zero(), r0); + __ bne(&skip); + __ Ret(); + + __ bind(&skip); + + if (is_interpreter) { + // Drop the handler frame that is be sitting on top of the actual + // JavaScript frame. This is the case then OSR is triggered from bytecode. + __ LeaveFrame(StackFrame::STUB); + } + + // Load deoptimization data from the code object. + // = [#deoptimization_data_offset] + __ LoadTaggedPointerField( + r3, + FieldMemOperand(r2, Code::kDeoptimizationDataOrInterpreterDataOffset)); + + // Load the OSR entrypoint offset from the deoptimization data. + // = [#header_size + #osr_pc_offset] + __ SmiUntagField( + r3, FieldMemOperand(r3, FixedArray::OffsetOfElementAt( + DeoptimizationData::kOsrPcOffsetIndex))); + + // Compute the target address = code_obj + header_size + osr_offset + // = + #header_size + + __ AddS64(r2, r3); + Generate_OSREntry(masm, r2, Code::kHeaderSize - kHeapObjectTag); +} + +} // namespace + void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address) { __ Move(kJavaScriptCallExtraArg1Register, ExternalReference::Create(address)); __ Jump(BUILTIN_CODE(masm->isolate(), AdaptorWithBuiltinExitFrame), @@ -81,7 +329,11 @@ void Generate_PushArguments(MacroAssembler* masm, Register array, Register argc, Register counter = scratch; Register value = ip; Label loop, entry; - __ mov(counter, argc); + if (kJSArgcIncludesReceiver) { + __ SubS64(counter, argc, Operand(kJSArgcReceiverSlots)); + } else { + __ mov(counter, argc); + } __ b(&entry); __ bind(&loop); __ ShiftLeftU64(value, counter, Operand(kSystemPointerSizeLog2)); @@ -151,7 +403,9 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) { } // Remove caller arguments from the stack and return. __ DropArguments(scratch, TurboAssembler::kCountIsSmi, - TurboAssembler::kCountExcludesReceiver); + kJSArgcIncludesReceiver + ? TurboAssembler::kCountIncludesReceiver + : TurboAssembler::kCountExcludesReceiver); __ Ret(); __ bind(&stack_overflow); @@ -198,8 +452,10 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { r6, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset)); __ LoadU32(r6, FieldMemOperand(r6, SharedFunctionInfo::kFlagsOffset)); __ DecodeField(r6); - __ JumpIfIsInRange(r6, kDefaultDerivedConstructor, kDerivedConstructor, - ¬_create_implicit_receiver); + __ JumpIfIsInRange( + r6, static_cast(FunctionKind::kDefaultDerivedConstructor), + static_cast(FunctionKind::kDerivedConstructor), + ¬_create_implicit_receiver); // If not derived class constructor: Allocate the new receiver object. __ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1, r6, @@ -307,7 +563,9 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { // Remove caller arguments from the stack and return. __ DropArguments(r3, TurboAssembler::kCountIsSmi, - TurboAssembler::kCountExcludesReceiver); + kJSArgcIncludesReceiver + ? TurboAssembler::kCountIncludesReceiver + : TurboAssembler::kCountExcludesReceiver); __ Ret(); __ bind(&check_receiver); @@ -339,19 +597,6 @@ void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) { Generate_JSBuiltinsConstructStubHelper(masm); } -static void GetSharedFunctionInfoBytecode(MacroAssembler* masm, - Register sfi_data, - Register scratch1) { - Label done; - - __ CompareObjectType(sfi_data, scratch1, scratch1, INTERPRETER_DATA_TYPE); - __ bne(&done, Label::kNear); - __ LoadTaggedPointerField( - sfi_data, - FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset)); - __ bind(&done); -} - // static void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { // ----------- S t a t e ------------- @@ -416,6 +661,9 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { r5, FieldMemOperand(r6, JSFunction::kSharedFunctionInfoOffset)); __ LoadU16( r5, FieldMemOperand(r5, SharedFunctionInfo::kFormalParameterCountOffset)); + if (kJSArgcIncludesReceiver) { + __ SubS64(r5, r5, Operand(kJSArgcReceiverSlots)); + } __ LoadTaggedPointerField( r4, FieldMemOperand(r3, JSGeneratorObject::kParametersAndRegistersOffset)); @@ -440,13 +688,15 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { // Underlying function needs to have bytecode available. if (FLAG_debug_code) { + Label is_baseline; __ LoadTaggedPointerField( r5, FieldMemOperand(r6, JSFunction::kSharedFunctionInfoOffset)); __ LoadTaggedPointerField( r5, FieldMemOperand(r5, SharedFunctionInfo::kFunctionDataOffset)); - GetSharedFunctionInfoBytecode(masm, r5, ip); + GetSharedFunctionInfoBytecodeOrBaseline(masm, r5, ip, &is_baseline); __ CompareObjectType(r5, r5, r5, BYTECODE_ARRAY_TYPE); __ Assert(eq, AbortReason::kMissingBytecodeArray); + __ bind(&is_baseline); } // Resume (Ignition/TurboFan) generator object. @@ -780,7 +1030,11 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm, // Check if we have enough stack space to push all arguments. Label enough_stack_space, stack_overflow; - __ AddS64(r7, r2, Operand(1)); + if (kJSArgcIncludesReceiver) { + __ mov(r7, r2); + } else { + __ AddS64(r7, r2, Operand(1)); + } __ StackOverflowCheck(r7, r1, &stack_overflow); __ b(&enough_stack_space); __ bind(&stack_overflow); @@ -887,8 +1141,10 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1, MemOperand(fp, StandardFrameConstants::kArgCOffset)); __ ShiftLeftU64(actual_params_size, actual_params_size, Operand(kSystemPointerSizeLog2)); - __ AddS64(actual_params_size, actual_params_size, - Operand(kSystemPointerSize)); + if (!kJSArgcIncludesReceiver) { + __ AddS64(actual_params_size, actual_params_size, + Operand(kSystemPointerSize)); + } // If actual is bigger than formal, then we should use it to free up the stack // arguments. @@ -911,7 +1167,7 @@ static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm, OptimizationMarker expected_marker, Runtime::FunctionId function_id) { Label no_match; - __ CmpS64(actual_marker, Operand(expected_marker)); + __ CmpS64(actual_marker, Operand(static_cast(expected_marker))); __ bne(&no_match); GenerateTailCallToReturnedCode(masm, function_id); __ bind(&no_match); @@ -1097,12 +1353,177 @@ static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot( TailCallOptimizedCodeSlot(masm, optimized_code_entry, r8); } +// Read off the optimization state in the feedback vector and check if there +// is optimized code or a optimization marker that needs to be processed. +static void LoadOptimizationStateAndJumpIfNeedsProcessing( + MacroAssembler* masm, Register optimization_state, Register feedback_vector, + Label* has_optimized_code_or_marker) { + ASM_CODE_COMMENT(masm); + USE(LoadOptimizationStateAndJumpIfNeedsProcessing); + DCHECK(!AreAliased(optimization_state, feedback_vector)); + __ LoadU32(optimization_state, + FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset)); + CHECK( + is_uint16(FeedbackVector::kHasOptimizedCodeOrCompileOptimizedMarkerMask)); + __ tmll( + optimization_state, + Operand(FeedbackVector::kHasOptimizedCodeOrCompileOptimizedMarkerMask)); + __ b(Condition(7), has_optimized_code_or_marker); +} + +#if ENABLE_SPARKPLUG +// static +void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { + // UseScratchRegisterScope temps(masm); + // Need a few extra registers + // temps.Include(r8, r9); + + auto descriptor = + Builtins::CallInterfaceDescriptorFor(Builtin::kBaselineOutOfLinePrologue); + Register closure = descriptor.GetRegisterParameter( + BaselineOutOfLinePrologueDescriptor::kClosure); + // Load the feedback vector from the closure. + Register feedback_vector = ip; + __ LoadTaggedPointerField( + feedback_vector, + FieldMemOperand(closure, JSFunction::kFeedbackCellOffset)); + __ LoadTaggedPointerField( + feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset)); + + if (FLAG_debug_code) { + Register scratch = r1; + __ CompareObjectType(feedback_vector, scratch, scratch, + FEEDBACK_VECTOR_TYPE); + __ Assert(eq, AbortReason::kExpectedFeedbackVector); + } + + // Check for an optimization marker. + Label has_optimized_code_or_marker; + Register optimization_state = r9; + { + LoadOptimizationStateAndJumpIfNeedsProcessing( + masm, optimization_state, feedback_vector, + &has_optimized_code_or_marker); + } + + // Increment invocation count for the function. + { + Register invocation_count = r1; + __ LoadU64(invocation_count, + FieldMemOperand(feedback_vector, + FeedbackVector::kInvocationCountOffset)); + __ AddU64(invocation_count, Operand(1)); + __ StoreU64(invocation_count, + FieldMemOperand(feedback_vector, + FeedbackVector::kInvocationCountOffset)); + } + + FrameScope frame_scope(masm, StackFrame::MANUAL); + { + ASM_CODE_COMMENT_STRING(masm, "Frame Setup"); + // Normally the first thing we'd do here is Push(lr, fp), but we already + // entered the frame in BaselineCompiler::Prologue, as we had to use the + // value lr before the call to this BaselineOutOfLinePrologue builtin. + + Register callee_context = descriptor.GetRegisterParameter( + BaselineOutOfLinePrologueDescriptor::kCalleeContext); + Register callee_js_function = descriptor.GetRegisterParameter( + BaselineOutOfLinePrologueDescriptor::kClosure); + __ Push(callee_context, callee_js_function); + DCHECK_EQ(callee_js_function, kJavaScriptCallTargetRegister); + DCHECK_EQ(callee_js_function, kJSFunctionRegister); + + Register argc = descriptor.GetRegisterParameter( + BaselineOutOfLinePrologueDescriptor::kJavaScriptCallArgCount); + // We'll use the bytecode for both code age/OSR resetting, and pushing onto + // the frame, so load it into a register. + Register bytecodeArray = descriptor.GetRegisterParameter( + BaselineOutOfLinePrologueDescriptor::kInterpreterBytecodeArray); + + // Reset code age and the OSR arming. The OSR field and BytecodeAgeOffset + // are 8-bit fields next to each other, so we could just optimize by writing + // a 16-bit. These static asserts guard our assumption is valid. + STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset == + BytecodeArray::kOsrLoopNestingLevelOffset + kCharSize); + STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0); + { + Register scratch = r0; + __ mov(scratch, Operand(0)); + __ StoreU16(scratch, + FieldMemOperand(bytecodeArray, + BytecodeArray::kOsrLoopNestingLevelOffset)); + } + + __ Push(argc, bytecodeArray); + + // Baseline code frames store the feedback vector where interpreter would + // store the bytecode offset. + if (FLAG_debug_code) { + Register scratch = r1; + __ CompareObjectType(feedback_vector, scratch, scratch, + FEEDBACK_VECTOR_TYPE); + __ Assert(eq, AbortReason::kExpectedFeedbackVector); + } + __ Push(feedback_vector); + } + + Label call_stack_guard; + Register frame_size = descriptor.GetRegisterParameter( + BaselineOutOfLinePrologueDescriptor::kStackFrameSize); + { + ASM_CODE_COMMENT_STRING(masm, "Stack/interrupt check"); + // Stack check. This folds the checks for both the interrupt stack limit + // check and the real stack limit into one by just checking for the + // interrupt limit. The interrupt limit is either equal to the real stack + // limit or tighter. By ensuring we have space until that limit after + // building the frame we can quickly precheck both at once. + + Register sp_minus_frame_size = r1; + Register interrupt_limit = r0; + __ SubS64(sp_minus_frame_size, sp, frame_size); + __ LoadStackLimit(interrupt_limit, StackLimitKind::kInterruptStackLimit); + __ CmpU64(sp_minus_frame_size, interrupt_limit); + __ blt(&call_stack_guard); + } + + // Do "fast" return to the caller pc in lr. + __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue); + __ Ret(); + + __ bind(&has_optimized_code_or_marker); + { + ASM_CODE_COMMENT_STRING(masm, "Optimized marker check"); + + // Drop the frame created by the baseline call. + __ Pop(r14, fp); + MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(masm, optimization_state, + feedback_vector); + __ Trap(); + } + + __ bind(&call_stack_guard); + { + ASM_CODE_COMMENT_STRING(masm, "Stack/interrupt call"); + FrameScope frame_scope(masm, StackFrame::INTERNAL); + // Save incoming new target or generator + __ Push(kJavaScriptCallNewTargetRegister); + __ SmiTag(frame_size); + __ Push(frame_size); + __ CallRuntime(Runtime::kStackGuardWithGap); + __ Pop(kJavaScriptCallNewTargetRegister); + } + + __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue); + __ Ret(); +} +#endif + // Generate code for entering a JS function with the interpreter. // On entry to the function the receiver and arguments have been pushed on the // stack left to right. // // The live registers are: -// o r2: actual argument count (not including the receiver) +// o r2: actual argument count // o r3: the JS function object being called. // o r5: the incoming new target or generator object // o cp: our context @@ -1125,7 +1546,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { __ LoadTaggedPointerField( kInterpreterBytecodeArrayRegister, FieldMemOperand(r6, SharedFunctionInfo::kFunctionDataOffset)); - GetSharedFunctionInfoBytecode(masm, kInterpreterBytecodeArrayRegister, ip); + + Label is_baseline; + GetSharedFunctionInfoBytecodeOrBaseline( + masm, kInterpreterBytecodeArrayRegister, ip, &is_baseline); // The bytecode array could have been flushed from the shared function info, // if so, call into CompileLazy. @@ -1320,6 +1744,39 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(masm, optimization_state, feedback_vector); + __ bind(&is_baseline); + { + // Load the feedback vector from the closure. + __ LoadTaggedPointerField( + feedback_vector, + FieldMemOperand(closure, JSFunction::kFeedbackCellOffset)); + __ LoadTaggedPointerField( + feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset)); + + Label install_baseline_code; + // Check if feedback vector is valid. If not, call prepare for baseline to + // allocate it. + __ LoadTaggedPointerField( + ip, FieldMemOperand(feedback_vector, HeapObject::kMapOffset)); + __ LoadU16(ip, FieldMemOperand(ip, Map::kInstanceTypeOffset)); + __ CmpS32(ip, Operand(FEEDBACK_VECTOR_TYPE)); + __ b(ne, &install_baseline_code); + + // Check for an optimization marker. + LoadOptimizationStateAndJumpIfNeedsProcessing( + masm, optimization_state, feedback_vector, + &has_optimized_code_or_marker); + + // Load the baseline code into the closure. + __ mov(r4, kInterpreterBytecodeArrayRegister); + static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch"); + ReplaceClosureCodeWithOptimizedCode(masm, r4, closure, ip, r1); + __ JumpCodeObject(r4); + + __ bind(&install_baseline_code); + GenerateTailCallToReturnedCode(masm, Runtime::kInstallBaselineCode); + } + __ bind(&compile_lazy); GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy); @@ -1346,7 +1803,7 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl( InterpreterPushArgsMode mode) { DCHECK(mode != InterpreterPushArgsMode::kArrayFunction); // ----------- S t a t e ------------- - // -- r2 : the number of arguments (not including the receiver) + // -- r2 : the number of arguments // -- r4 : the address of the first argument to be pushed. Subsequent // arguments should be consecutive above this, in the same order as // they are to be pushed onto the stack. @@ -1358,15 +1815,18 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl( __ SubS64(r2, r2, Operand(1)); } - // Calculate number of arguments (AddS64 one for receiver). - __ AddS64(r5, r2, Operand(1)); - __ StackOverflowCheck(r5, ip, &stack_overflow); - - if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) { - // Don't copy receiver. Argument count is correct. + const bool skip_receiver = + receiver_mode == ConvertReceiverMode::kNullOrUndefined; + if (kJSArgcIncludesReceiver && skip_receiver) { + __ SubS64(r5, r2, Operand(kJSArgcReceiverSlots)); + } else if (!kJSArgcIncludesReceiver && !skip_receiver) { + __ AddS64(r5, r2, Operand(1)); + } else { __ mov(r5, r2); } + __ StackOverflowCheck(r5, ip, &stack_overflow); + // Push the arguments. GenerateInterpreterPushArgs(masm, r5, r4, r6); @@ -1402,23 +1862,27 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl( void Builtins::Generate_InterpreterPushArgsThenConstructImpl( MacroAssembler* masm, InterpreterPushArgsMode mode) { // ----------- S t a t e ------------- - // -- r2 : argument count (not including receiver) + // -- r2 : argument count // -- r5 : new target // -- r3 : constructor to call // -- r4 : allocation site feedback if available, undefined otherwise. // -- r6 : address of the first argument // ----------------------------------- Label stack_overflow; - __ AddS64(r7, r2, Operand(1)); - __ StackOverflowCheck(r7, ip, &stack_overflow); + __ StackOverflowCheck(r2, ip, &stack_overflow); if (mode == InterpreterPushArgsMode::kWithFinalSpread) { // The spread argument should not be pushed. __ SubS64(r2, r2, Operand(1)); } + Register argc_without_receiver = r2; + if (kJSArgcIncludesReceiver) { + argc_without_receiver = ip; + __ SubS64(argc_without_receiver, r2, Operand(kJSArgcReceiverSlots)); + } // Push the arguments. r4 and r5 will be modified. - GenerateInterpreterPushArgs(masm, r2, r6, r7); + GenerateInterpreterPushArgs(masm, argc_without_receiver, r6, r7); // Push a slot for the receiver to be constructed. __ mov(r0, Operand::Zero()); @@ -1621,13 +2085,14 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm, // Overwrite the hole inserted by the deoptimizer with the return value from // the LAZY deopt point. r0 contains the arguments count, the return value // from LAZY is always the last argument. - __ AddS64(r2, r2, - Operand(BuiltinContinuationFrameConstants::kFixedSlotCount)); + constexpr int return_value_offset = + BuiltinContinuationFrameConstants::kFixedSlotCount - + kJSArgcReceiverSlots; + __ AddS64(r2, r2, Operand(return_value_offset)); __ ShiftLeftU64(r1, r2, Operand(kSystemPointerSizeLog2)); __ StoreU64(scratch, MemOperand(sp, r1)); // Recover arguments count. - __ SubS64(r2, r2, - Operand(BuiltinContinuationFrameConstants::kFixedSlotCount)); + __ SubS64(r2, r2, Operand(return_value_offset)); } __ LoadU64( fp, @@ -1675,46 +2140,6 @@ void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) { __ Ret(); } -void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) { - { - FrameScope scope(masm, StackFrame::INTERNAL); - __ CallRuntime(Runtime::kCompileForOnStackReplacement); - } - - // If the code object is null, just return to the caller. - Label skip; - __ CmpSmiLiteral(r2, Smi::zero(), r0); - __ bne(&skip); - __ Ret(); - - __ bind(&skip); - - // Drop the handler frame that is be sitting on top of the actual - // JavaScript frame. This is the case then OSR is triggered from bytecode. - __ LeaveFrame(StackFrame::STUB); - - // Load deoptimization data from the code object. - // = [#deoptimization_data_offset] - __ LoadTaggedPointerField( - r3, - FieldMemOperand(r2, Code::kDeoptimizationDataOrInterpreterDataOffset)); - - // Load the OSR entrypoint offset from the deoptimization data. - // = [#header_size + #osr_pc_offset] - __ SmiUntagField( - r3, FieldMemOperand(r3, FixedArray::OffsetOfElementAt( - DeoptimizationData::kOsrPcOffsetIndex))); - - // Compute the target address = code_obj + header_size + osr_offset - // = + #header_size + - __ AddS64(r2, r3); - __ AddS64(r0, r2, Operand(Code::kHeaderSize - kHeapObjectTag)); - __ mov(r14, r0); - - // And "return" to the OSR entry point of the function. - __ Ret(); -} - // static void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) { // ----------- S t a t e ------------- @@ -1733,16 +2158,18 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) { Label done; __ LoadU64(r3, MemOperand(sp)); // receiver - __ cghi(r2, Operand(1)); + __ CmpS64(r2, Operand(JSParameterCount(1))); __ blt(&done); __ LoadU64(r7, MemOperand(sp, kSystemPointerSize)); // thisArg - __ cghi(r2, Operand(2)); + __ CmpS64(r2, Operand(JSParameterCount(2))); __ blt(&done); __ LoadU64(r4, MemOperand(sp, 2 * kSystemPointerSize)); // argArray __ bind(&done); - __ DropArgumentsAndPushNewReceiver(r2, r7, TurboAssembler::kCountIsInteger, - TurboAssembler::kCountExcludesReceiver); + __ DropArgumentsAndPushNewReceiver( + r2, r7, TurboAssembler::kCountIsInteger, + kJSArgcIncludesReceiver ? TurboAssembler::kCountIncludesReceiver + : TurboAssembler::kCountExcludesReceiver); } // ----------- S t a t e ------------- @@ -1768,7 +2195,7 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) { // arguments to the receiver. __ bind(&no_arguments); { - __ mov(r2, Operand::Zero()); + __ mov(r2, Operand(JSParameterCount(0))); __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET); } } @@ -1782,7 +2209,7 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) { // r2: actual number of arguments { Label done; - __ cghi(r2, Operand::Zero()); + __ CmpS64(r2, Operand(JSParameterCount(0))); __ b(ne, &done); __ PushRoot(RootIndex::kUndefinedValue); __ AddS64(r2, r2, Operand(1)); @@ -1815,19 +2242,21 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) { Label done; - __ cghi(r2, Operand(1)); + __ CmpS64(r2, Operand(JSParameterCount(1))); __ blt(&done); __ LoadU64(r3, MemOperand(sp, kSystemPointerSize)); // thisArg - __ cghi(r2, Operand(2)); + __ CmpS64(r2, Operand(JSParameterCount(2))); __ blt(&done); __ LoadU64(r7, MemOperand(sp, 2 * kSystemPointerSize)); // argArray - __ cghi(r2, Operand(3)); + __ CmpS64(r2, Operand(JSParameterCount(3))); __ blt(&done); __ LoadU64(r4, MemOperand(sp, 3 * kSystemPointerSize)); // argArray __ bind(&done); - __ DropArgumentsAndPushNewReceiver(r2, r7, TurboAssembler::kCountIsInteger, - TurboAssembler::kCountExcludesReceiver); + __ DropArgumentsAndPushNewReceiver( + r2, r7, TurboAssembler::kCountIsInteger, + kJSArgcIncludesReceiver ? TurboAssembler::kCountIncludesReceiver + : TurboAssembler::kCountExcludesReceiver); } // ----------- S t a t e ------------- @@ -1865,19 +2294,21 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) { Label done; __ mov(r6, r3); - __ cghi(r2, Operand(1)); + __ CmpS64(r2, Operand(JSParameterCount(1))); __ blt(&done); __ LoadU64(r3, MemOperand(sp, kSystemPointerSize)); // thisArg __ mov(r5, r3); - __ cghi(r2, Operand(2)); + __ CmpS64(r2, Operand(JSParameterCount(2))); __ blt(&done); __ LoadU64(r4, MemOperand(sp, 2 * kSystemPointerSize)); // argArray - __ cghi(r2, Operand(3)); + __ CmpS64(r2, Operand(JSParameterCount(3))); __ blt(&done); __ LoadU64(r5, MemOperand(sp, 3 * kSystemPointerSize)); // argArray __ bind(&done); - __ DropArgumentsAndPushNewReceiver(r2, r6, TurboAssembler::kCountIsInteger, - TurboAssembler::kCountExcludesReceiver); + __ DropArgumentsAndPushNewReceiver( + r2, r6, TurboAssembler::kCountIsInteger, + kJSArgcIncludesReceiver ? TurboAssembler::kCountIncludesReceiver + : TurboAssembler::kCountExcludesReceiver); } // ----------- S t a t e ------------- @@ -1926,7 +2357,11 @@ void Generate_AllocateSpaceAndShiftExistingArguments( Label loop, done; __ bind(&loop); __ CmpS64(old_sp, end); - __ bgt(&done); + if (kJSArgcIncludesReceiver) { + __ bge(&done); + } else { + __ bgt(&done); + } __ LoadU64(value, MemOperand(old_sp)); __ lay(old_sp, MemOperand(old_sp, kSystemPointerSize)); __ StoreU64(value, MemOperand(dest)); @@ -1946,7 +2381,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm, Handle code) { // ----------- S t a t e ------------- // -- r3 : target - // -- r2 : number of parameters on the stack (not including the receiver) + // -- r2 : number of parameters on the stack // -- r4 : arguments list (a FixedArray) // -- r6 : len (number of elements to push from args) // -- r5 : new.target (for [[Construct]]) @@ -2019,7 +2454,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm, CallOrConstructMode mode, Handle code) { // ----------- S t a t e ------------- - // -- r2 : the number of arguments (not including the receiver) + // -- r2 : the number of arguments // -- r5 : the new.target (for [[Construct]] calls) // -- r3 : the target to call (can be any Object) // -- r4 : start index (to support rest parameters) @@ -2047,12 +2482,14 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm, Label stack_done, stack_overflow; __ LoadU64(r7, MemOperand(fp, StandardFrameConstants::kArgCOffset)); + if (kJSArgcIncludesReceiver) { + __ SubS64(r7, r7, Operand(kJSArgcReceiverSlots)); + } __ SubS64(r7, r7, r4); __ ble(&stack_done); { // ----------- S t a t e ------------- - // -- r2 : the number of arguments already in the stack (not including the - // receiver) + // -- r2 : the number of arguments already in the stack // -- r3 : the target to call (can be any Object) // -- r4 : start index (to support rest parameters) // -- r5 : the new.target (for [[Construct]] calls) @@ -2109,7 +2546,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm, void Builtins::Generate_CallFunction(MacroAssembler* masm, ConvertReceiverMode mode) { // ----------- S t a t e ------------- - // -- r2 : the number of arguments (not including the receiver) + // -- r2 : the number of arguments // -- r3 : the function to call (checked to be a JSFunction) // ----------------------------------- __ AssertFunction(r3); @@ -2135,7 +2572,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm, __ bne(&done_convert); { // ----------- S t a t e ------------- - // -- r2 : the number of arguments (not including the receiver) + // -- r2 : the number of arguments // -- r3 : the function to call (checked to be a JSFunction) // -- r4 : the shared function info. // -- cp : the function context. @@ -2188,7 +2625,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm, __ bind(&done_convert); // ----------- S t a t e ------------- - // -- r2 : the number of arguments (not including the receiver) + // -- r2 : the number of arguments // -- r3 : the function to call (checked to be a JSFunction) // -- r4 : the shared function info. // -- cp : the function context. @@ -2211,7 +2648,7 @@ namespace { void Generate_PushBoundArguments(MacroAssembler* masm) { // ----------- S t a t e ------------- - // -- r2 : the number of arguments (not including the receiver) + // -- r2 : the number of arguments // -- r3 : target (checked to be a JSBoundFunction) // -- r5 : new.target (only in case of [[Construct]]) // ----------------------------------- @@ -2225,7 +2662,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) { __ beq(&no_bound_arguments); { // ----------- S t a t e ------------- - // -- r2 : the number of arguments (not including the receiver) + // -- r2 : the number of arguments // -- r3 : target (checked to be a JSBoundFunction) // -- r4 : the [[BoundArguments]] (implemented as FixedArray) // -- r5 : new.target (only in case of [[Construct]]) @@ -2282,7 +2719,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) { // static void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) { // ----------- S t a t e ------------- - // -- r2 : the number of arguments (not including the receiver) + // -- r2 : the number of arguments // -- r3 : the function to call (checked to be a JSBoundFunction) // ----------------------------------- __ AssertBoundFunction(r3); @@ -2305,7 +2742,7 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) { // static void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) { // ----------- S t a t e ------------- - // -- r2 : the number of arguments (not including the receiver) + // -- r2 : the number of arguments // -- r3 : the target to call (can be any Object). // ----------------------------------- Register argc = r2; @@ -2376,7 +2813,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) { // static void Builtins::Generate_ConstructFunction(MacroAssembler* masm) { // ----------- S t a t e ------------- - // -- r2 : the number of arguments (not including the receiver) + // -- r2 : the number of arguments // -- r3 : the constructor to call (checked to be a JSFunction) // -- r5 : the new target (checked to be a constructor) // ----------------------------------- @@ -2407,7 +2844,7 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) { // static void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) { // ----------- S t a t e ------------- - // -- r2 : the number of arguments (not including the receiver) + // -- r2 : the number of arguments // -- r3 : the function to call (checked to be a JSBoundFunction) // -- r5 : the new target (checked to be a constructor) // ----------------------------------- @@ -2434,7 +2871,7 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) { // static void Builtins::Generate_Construct(MacroAssembler* masm) { // ----------- S t a t e ------------- - // -- r2 : the number of arguments (not including the receiver) + // -- r2 : the number of arguments // -- r3 : the constructor to call (can be any Object) // -- r5 : the new target (either the same as the constructor or // the JSFunction on which new was invoked initially) @@ -3460,22 +3897,31 @@ void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) { Generate_DeoptimizationEntry(masm, DeoptimizeKind::kLazy); } +void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) { + return OnStackReplacement(masm, true); +} + +#if ENABLE_SPARKPLUG +void Builtins::Generate_BaselineOnStackReplacement(MacroAssembler* masm) { + __ LoadU64(kContextRegister, + MemOperand(fp, BaselineFrameConstants::kContextOffset)); + return OnStackReplacement(masm, false); +} +#endif + void Builtins::Generate_BaselineOrInterpreterEnterAtBytecode( MacroAssembler* masm) { - // Implement on this platform, https://crrev.com/c/2695591. - __ bkpt(0); + Generate_BaselineOrInterpreterEntry(masm, false); } void Builtins::Generate_BaselineOrInterpreterEnterAtNextBytecode( MacroAssembler* masm) { - // Implement on this platform, https://crrev.com/c/2695591. - __ bkpt(0); + Generate_BaselineOrInterpreterEntry(masm, true); } void Builtins::Generate_InterpreterOnStackReplacement_ToBaseline( MacroAssembler* masm) { - // Implement on this platform, https://crrev.com/c/2800112. - __ bkpt(0); + Generate_BaselineOrInterpreterEntry(masm, false, true); } void Builtins::Generate_DynamicCheckMapsTrampoline(MacroAssembler* masm) { diff --git a/deps/v8/src/builtins/setup-builtins-internal.cc b/deps/v8/src/builtins/setup-builtins-internal.cc index 9dcecdab3302e6..31537997937f2a 100644 --- a/deps/v8/src/builtins/setup-builtins-internal.cc +++ b/deps/v8/src/builtins/setup-builtins-internal.cc @@ -199,6 +199,9 @@ void SetupIsolateDelegate::AddBuiltin(Builtins* builtins, Builtin builtin, Code code) { DCHECK_EQ(builtin, code.builtin_id()); builtins->set_code(builtin, code); + if (V8_EXTERNAL_CODE_SPACE_BOOL) { + builtins->set_codet(builtin, ToCodeT(code)); + } } // static @@ -220,7 +223,7 @@ void SetupIsolateDelegate::ReplacePlaceholders(Isolate* isolate) { // Replace references from all builtin code objects to placeholders. Builtins* builtins = isolate->builtins(); DisallowGarbageCollection no_gc; - CodeSpaceMemoryModificationScope modification_scope(isolate->heap()); + CodePageCollectionMemoryModificationScope modification_scope(isolate->heap()); static const int kRelocMask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET) | RelocInfo::ModeMask(RelocInfo::FULL_EMBEDDED_OBJECT) | @@ -230,6 +233,8 @@ void SetupIsolateDelegate::ReplacePlaceholders(Isolate* isolate) { for (Builtin builtin = Builtins::kFirst; builtin <= Builtins::kLast; ++builtin) { Code code = builtins->code(builtin); + isolate->heap()->UnprotectAndRegisterMemoryChunk( + code, UnprotectMemoryOrigin::kMainThread); bool flush_icache = false; for (RelocIterator it(code, kRelocMask); !it.done(); it.next()) { RelocInfo* rinfo = it.rinfo(); diff --git a/deps/v8/src/builtins/torque-internal.tq b/deps/v8/src/builtins/torque-internal.tq index 9fe503f5f5351c..8765a7b8ac4ef5 100644 --- a/deps/v8/src/builtins/torque-internal.tq +++ b/deps/v8/src/builtins/torque-internal.tq @@ -24,6 +24,20 @@ macro Subslice(slice: MutableSlice, start: intptr, length: intptr): slice.object, offset, length); } +namespace unsafe { + +macro AddOffset(ref: &T, offset: intptr): &T { + return torque_internal::unsafe::NewReference( + ref.object, ref.offset + torque_internal::TimesSizeOf(offset)); +} + +macro AddOffset(ref: const &T, offset: intptr): const &T { + return torque_internal::unsafe::NewReference( + ref.object, ref.offset + torque_internal::TimesSizeOf(offset)); +} + +} // namespace unsafe + namespace torque_internal { // Unsafe is a marker that we require to be passed when calling internal APIs // that might lead to unsoundness when used incorrectly. Unsafe markers should @@ -73,12 +87,15 @@ extern macro GCUnsafeReferenceToRawPtr( struct Slice { macro TryAtIndex(index: intptr): Reference labels OutOfBounds { if (Convert(index) < Convert(this.length)) { - return unsafe::NewReference( - this.object, this.offset + TimesSizeOf(index)); + return this.UncheckedAtIndex(index); } else { goto OutOfBounds; } } + macro UncheckedAtIndex(index: intptr): Reference { + return unsafe::NewReference( + this.object, this.offset + TimesSizeOf(index)); + } macro AtIndex(index: intptr): Reference { return this.TryAtIndex(index) otherwise unreachable; @@ -317,6 +334,16 @@ intrinsic %IndexedFieldLength(o: T, f: constexpr string): intptr; intrinsic %FieldSlice( o: T, f: constexpr string): TSlice; +extern macro GetPendingMessage(): TheHole|JSMessageObject; +extern macro SetPendingMessage(TheHole | JSMessageObject): void; + +// This is implicitly performed at the beginning of Torque catch-blocks. +macro GetAndResetPendingMessage(): TheHole|JSMessageObject { + const message = GetPendingMessage(); + SetPendingMessage(TheHole); + return message; +} + } // namespace torque_internal // Indicates that an array-field should not be initialized. diff --git a/deps/v8/src/builtins/typed-array-createtypedarray.tq b/deps/v8/src/builtins/typed-array-createtypedarray.tq index 45a396afe63808..9004b32ef78e7e 100644 --- a/deps/v8/src/builtins/typed-array-createtypedarray.tq +++ b/deps/v8/src/builtins/typed-array-createtypedarray.tq @@ -9,7 +9,7 @@ extern builtin IterableToListMayPreserveHoles( Context, Object, Callable): JSArray; extern macro TypedArrayBuiltinsAssembler::AllocateEmptyOnHeapBuffer( - implicit context: Context)(uintptr): JSArrayBuffer; + implicit context: Context)(): JSArrayBuffer; extern macro CodeStubAssembler::AllocateByteArray(uintptr): ByteArray; extern macro TypedArrayBuiltinsAssembler::GetDefaultConstructor( implicit context: Context)(JSTypedArray): JSFunction; @@ -93,7 +93,7 @@ transitioning macro TypedArrayInitialize(implicit context: Context)( if (byteLength > kMaxTypedArrayInHeap) goto AllocateOffHeap; - const buffer = AllocateEmptyOnHeapBuffer(byteLength); + const buffer = AllocateEmptyOnHeapBuffer(); const isOnHeap: constexpr bool = true; const isLengthTracking: constexpr bool = false; @@ -292,7 +292,7 @@ transitioning macro ConstructByArrayBuffer(implicit context: Context)( // in the step 12 branch. newByteLength = bufferByteLength - offset; newLength = elementsInfo.CalculateLength(newByteLength) - otherwise IfInvalidLength; + otherwise IfInvalidOffset; // 12. Else, } else { @@ -335,6 +335,7 @@ transitioning macro TypedArrayCreateByLength(implicit context: Context)( // ValidateTypedArray currently returns the array, not the ViewBuffer. const newTypedArray: JSTypedArray = ValidateTypedArray(context, newTypedArrayObj, methodName); + // TODO(v8:11111): bit_field should be initialized to 0. newTypedArray.bit_field.is_length_tracking = false; newTypedArray.bit_field.is_backed_by_rab = false; diff --git a/deps/v8/src/builtins/typed-array.tq b/deps/v8/src/builtins/typed-array.tq index 5ddb1072ae1fab..c242851de2c40a 100644 --- a/deps/v8/src/builtins/typed-array.tq +++ b/deps/v8/src/builtins/typed-array.tq @@ -180,17 +180,18 @@ extern macro TypedArrayBuiltinsAssembler::SetJSTypedArrayOnHeapDataPtr( JSTypedArray, ByteArray, uintptr): void; extern macro TypedArrayBuiltinsAssembler::SetJSTypedArrayOffHeapDataPtr( JSTypedArray, RawPtr, uintptr): void; -extern macro IsJSTypedArrayDetachedOrOutOfBounds(JSTypedArray): - never labels Detached, NotDetached; +extern macro IsJSArrayBufferViewDetachedOrOutOfBounds(JSArrayBufferView): + never labels DetachedOrOutOfBounds, NotDetachedNorOutOfBounds; // AttachedJSTypedArray guards that the array's buffer is not detached. transient type AttachedJSTypedArray extends JSTypedArray; macro EnsureAttached(array: JSTypedArray): AttachedJSTypedArray - labels Detached { + labels DetachedOrOutOfBounds { try { - IsJSTypedArrayDetachedOrOutOfBounds(array) otherwise Detached, NotDetached; - } label NotDetached { + IsJSArrayBufferViewDetachedOrOutOfBounds(array) + otherwise DetachedOrOutOfBounds, NotDetachedNorOutOfBounds; + } label NotDetachedNorOutOfBounds { return %RawDownCast(array); } } diff --git a/deps/v8/src/builtins/wasm.tq b/deps/v8/src/builtins/wasm.tq index aadb17c3a04663..cfe17018bad9aa 100644 --- a/deps/v8/src/builtins/wasm.tq +++ b/deps/v8/src/builtins/wasm.tq @@ -64,7 +64,7 @@ extern macro WasmBuiltinsAssembler::LoadContextFromInstance(WasmInstanceObject): NativeContext; extern macro WasmBuiltinsAssembler::LoadTablesFromInstance(WasmInstanceObject): FixedArray; -extern macro WasmBuiltinsAssembler::LoadExternalFunctionsFromInstance( +extern macro WasmBuiltinsAssembler::LoadInternalFunctionsFromInstance( WasmInstanceObject): FixedArray; extern macro WasmBuiltinsAssembler::LoadManagedObjectMapsFromInstance( WasmInstanceObject): FixedArray; @@ -227,7 +227,7 @@ builtin WasmTableSet(tableIndex: intptr, index: int32, value: Object): Object { builtin WasmRefFunc(index: uint32): Object { const instance: WasmInstanceObject = LoadInstanceFromFrame(); try { - const table: FixedArray = LoadExternalFunctionsFromInstance(instance); + const table: FixedArray = LoadInternalFunctionsFromInstance(instance); if (table == Undefined) goto CallRuntime; const functionIndex: intptr = Signed(ChangeUint32ToWord(index)); const result: Object = LoadFixedArrayElement(table, functionIndex); @@ -475,16 +475,11 @@ struct TargetAndInstance { instance: HeapObject; // WasmInstanceObject or WasmApiFunctionRef } -macro GetTargetAndInstance(funcref: JSFunction): TargetAndInstance { - const sfi = funcref.shared_function_info; - dcheck(Is(sfi.function_data)); - const funcData = UnsafeCast(sfi.function_data); - const ref = funcData.ref; - let target = funcData.foreign_address_ptr; +macro GetTargetAndInstance(funcref: WasmInternalFunction): TargetAndInstance { + const ref = funcref.ref; + let target = funcref.foreign_address_ptr; if (Signed(target) == IntPtrConstant(0)) { - const wrapper = - UnsafeCast(funcData).wasm_to_js_wrapper_code; - target = GetCodeEntry(wrapper); + target = GetCodeEntry(funcref.code); } return TargetAndInstance{target: target, instance: ref}; } @@ -493,19 +488,23 @@ macro GetTargetAndInstance(funcref: JSFunction): TargetAndInstance { // Two slots per call_ref instruction. These slots' values can be: // - uninitialized: (undefined, ). Note: we use {undefined} as the // sentinel as an optimization, as it's the default value for FixedArrays. -// - monomorphic: (funcref, call_ref_data) +// - monomorphic: (funcref, count (smi)). The second slot is a counter for how +// often the funcref in the first slot has been seen. // - polymorphic: (fixed_array, ). In this case, the array -// contains 2..4 pairs (funcref, call_ref_data) (like monomorphic data). +// contains 2..4 pairs (funcref, count (smi)) (like monomorphic data). // - megamorphic: ("megamorphic" sentinel, ) - +// +// TODO(rstz): The counter might overflow if it exceeds the range of a Smi. +// This can lead to incorrect inlining decisions. builtin CallRefIC( - vector: FixedArray, index: intptr, funcref: JSFunction): TargetAndInstance { + vector: FixedArray, index: intptr, + funcref: WasmInternalFunction): TargetAndInstance { const value = vector.objects[index]; if (value == funcref) { // Monomorphic hit. Check for this case first to maximize its performance. - const data = UnsafeCast(vector.objects[index + 1]); - data.count = data.count + 1; - return TargetAndInstance{target: data.target, instance: data.instance}; + const count = UnsafeCast(vector.objects[index + 1]) + SmiConstant(1); + vector.objects[index + 1] = count; + return GetTargetAndInstance(funcref); } // Check for polymorphic hit; its performance is second-most-important. if (Is(value)) { @@ -513,9 +512,9 @@ builtin CallRefIC( for (let i: intptr = 0; i < entries.length_intptr; i += 2) { if (entries.objects[i] == funcref) { // Polymorphic hit. - const data = UnsafeCast(entries.objects[i + 1]); - data.count = data.count + 1; - return TargetAndInstance{target: data.target, instance: data.instance}; + const count = UnsafeCast(entries.objects[i + 1]) + SmiConstant(1); + entries.objects[i + 1] = count; + return GetTargetAndInstance(funcref); } } } @@ -523,10 +522,8 @@ builtin CallRefIC( // instance. They all fall through to returning the computed data. const result = GetTargetAndInstance(funcref); if (TaggedEqual(value, Undefined)) { - const data = new - CallRefData{instance: result.instance, target: result.target, count: 1}; vector.objects[index] = funcref; - vector.objects[index + 1] = data; + vector.objects[index + 1] = SmiConstant(1); } else if (Is(value)) { // Polymorphic miss. const entries = UnsafeCast(value); @@ -534,8 +531,6 @@ builtin CallRefIC( vector.objects[index] = ic::kMegamorphicSymbol; vector.objects[index + 1] = ic::kMegamorphicSymbol; } else { - const data = new - CallRefData{instance: result.instance, target: result.target, count: 1}; const newEntries = UnsafeCast(AllocateFixedArray( ElementsKind::PACKED_ELEMENTS, entries.length_intptr + 2, AllocationFlag::kNone)); @@ -544,22 +539,20 @@ builtin CallRefIC( } const newIndex = entries.length_intptr; newEntries.objects[newIndex] = funcref; - newEntries.objects[newIndex + 1] = data; + newEntries.objects[newIndex + 1] = SmiConstant(1); vector.objects[index] = newEntries; } - } else if (Is(value)) { + } else if (Is(value)) { // Monomorphic miss. - const data = new - CallRefData{instance: result.instance, target: result.target, count: 1}; const newEntries = UnsafeCast(AllocateFixedArray( ElementsKind::PACKED_ELEMENTS, 4, AllocationFlag::kNone)); newEntries.objects[0] = value; newEntries.objects[1] = vector.objects[index + 1]; newEntries.objects[2] = funcref; - newEntries.objects[3] = data; + newEntries.objects[3] = SmiConstant(1); vector.objects[index] = newEntries; - // Clear the old pointer to the first entry's data object; the specific - // value we write doesn't matter. + // Clear the first entry's counter; the specific value we write doesn't + // matter. vector.objects[index + 1] = Undefined; } // The "ic::IsMegamorphic(value)" case doesn't need to do anything. diff --git a/deps/v8/src/builtins/x64/builtins-x64.cc b/deps/v8/src/builtins/x64/builtins-x64.cc index 7beedbc3fd702a..125614fa3de4cf 100644 --- a/deps/v8/src/builtins/x64/builtins-x64.cc +++ b/deps/v8/src/builtins/x64/builtins-x64.cc @@ -78,7 +78,7 @@ static void GenerateTailCallToReturnedCode( __ Pop(kJavaScriptCallTargetRegister); } static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch"); - __ JumpCodeObject(rcx, jump_mode); + __ JumpCodeTObject(rcx, jump_mode); } namespace { @@ -212,8 +212,10 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { rbx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset)); __ movl(rbx, FieldOperand(rbx, SharedFunctionInfo::kFlagsOffset)); __ DecodeField(rbx); - __ JumpIfIsInRange(rbx, kDefaultDerivedConstructor, kDerivedConstructor, - ¬_create_implicit_receiver, Label::kNear); + __ JumpIfIsInRange( + rbx, static_cast(FunctionKind::kDefaultDerivedConstructor), + static_cast(FunctionKind::kDerivedConstructor), + ¬_create_implicit_receiver, Label::kNear); // If not derived class constructor: Allocate the new receiver object. __ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1); @@ -948,7 +950,7 @@ static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm, Runtime::FunctionId function_id) { ASM_CODE_COMMENT(masm); Label no_match; - __ Cmp(actual_marker, expected_marker); + __ Cmp(actual_marker, static_cast(expected_marker)); __ j(not_equal, &no_match); GenerateTailCallToReturnedCode(masm, function_id); __ bind(&no_match); @@ -2974,15 +2976,9 @@ void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) { // ------------------------------------------- // Compute offsets and prepare for GC. // ------------------------------------------- - // We will have to save a value indicating the GC the number - // of values on the top of the stack that have to be scanned before calling - // the Wasm function. - constexpr int kFrameMarkerOffset = -kSystemPointerSize; - constexpr int kGCScanSlotCountOffset = - kFrameMarkerOffset - kSystemPointerSize; // The number of parameters passed to this function. constexpr int kInParamCountOffset = - kGCScanSlotCountOffset - kSystemPointerSize; + BuiltinWasmWrapperConstants::kGCScanSlotCountOffset - kSystemPointerSize; // The number of parameters according to the signature. constexpr int kParamCountOffset = kInParamCountOffset - kSystemPointerSize; constexpr int kReturnCountOffset = kParamCountOffset - kSystemPointerSize; @@ -3389,17 +3385,20 @@ void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) { Register function_entry = function_data; Register scratch = r12; + __ LoadAnyTaggedField( + function_entry, + FieldOperand(function_data, WasmExportedFunctionData::kInternalOffset)); __ LoadExternalPointerField( function_entry, - FieldOperand(function_data, - WasmExportedFunctionData::kForeignAddressOffset), + FieldOperand(function_entry, WasmInternalFunction::kForeignAddressOffset), kForeignForeignAddressTag, scratch); function_data = no_reg; scratch = no_reg; // We set the indicating value for the GC to the proper one for Wasm call. constexpr int kWasmCallGCScanSlotCount = 0; - __ Move(MemOperand(rbp, kGCScanSlotCountOffset), kWasmCallGCScanSlotCount); + __ Move(MemOperand(rbp, BuiltinWasmWrapperConstants::kGCScanSlotCountOffset), + kWasmCallGCScanSlotCount); // ------------------------------------------- // Call the Wasm function. @@ -3482,10 +3481,12 @@ void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) { // The builtin expects the parameter to be in register param = rax. constexpr int kBuiltinCallGCScanSlotCount = 2; - PrepareForBuiltinCall(masm, MemOperand(rbp, kGCScanSlotCountOffset), - kBuiltinCallGCScanSlotCount, current_param, param_limit, - current_int_param_slot, current_float_param_slot, - valuetypes_array_ptr, wasm_instance, function_data); + PrepareForBuiltinCall( + masm, + MemOperand(rbp, BuiltinWasmWrapperConstants::kGCScanSlotCountOffset), + kBuiltinCallGCScanSlotCount, current_param, param_limit, + current_int_param_slot, current_float_param_slot, valuetypes_array_ptr, + wasm_instance, function_data); Label param_kWasmI32_not_smi; Label param_kWasmI64; @@ -3632,7 +3633,8 @@ void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) { // ------------------------------------------- __ bind(&compile_wrapper); // Enable GC. - MemOperand GCScanSlotPlace = MemOperand(rbp, kGCScanSlotCountOffset); + MemOperand GCScanSlotPlace = + MemOperand(rbp, BuiltinWasmWrapperConstants::kGCScanSlotCountOffset); __ Move(GCScanSlotPlace, 4); // Save registers to the stack. __ pushq(wasm_instance); @@ -3656,6 +3658,7 @@ namespace { // Helper function for WasmReturnPromiseOnSuspend. void LoadJumpBuffer(MacroAssembler* masm, Register jmpbuf) { __ movq(rsp, MemOperand(jmpbuf, wasm::kJmpBufSpOffset)); + __ movq(rbp, MemOperand(jmpbuf, wasm::kJmpBufFpOffset)); // The stack limit is set separately under the ExecutionAccess lock. // TODO(thibaudm): Reload live registers. } @@ -3663,7 +3666,7 @@ void LoadJumpBuffer(MacroAssembler* masm, Register jmpbuf) { void Builtins::Generate_WasmReturnPromiseOnSuspend(MacroAssembler* masm) { // Set up the stackframe. - __ EnterFrame(StackFrame::JS_TO_WASM); + __ EnterFrame(StackFrame::RETURN_PROMISE_ON_SUSPEND); // Parameters. Register closure = kJSFunctionRegister; // rdi @@ -3672,14 +3675,11 @@ void Builtins::Generate_WasmReturnPromiseOnSuspend(MacroAssembler* masm) { __ decq(param_count); } - constexpr int kFrameMarkerOffset = -kSystemPointerSize; - constexpr int kParamCountOffset = kFrameMarkerOffset - kSystemPointerSize; - // The frame marker is not included in the slot count. - constexpr int kNumSpillSlots = - -(kParamCountOffset - kFrameMarkerOffset) / kSystemPointerSize; - __ subq(rsp, Immediate(kNumSpillSlots * kSystemPointerSize)); + __ subq(rsp, Immediate(ReturnPromiseOnSuspendFrameConstants::kSpillAreaSize)); - __ movq(MemOperand(rbp, kParamCountOffset), param_count); + __ movq( + MemOperand(rbp, ReturnPromiseOnSuspendFrameConstants::kParamCountOffset), + param_count); // ------------------------------------------- // Get the instance and wasm call target. @@ -3707,10 +3707,7 @@ void Builtins::Generate_WasmReturnPromiseOnSuspend(MacroAssembler* masm) { // ------------------------------------------- Register active_continuation = rax; Register foreign_jmpbuf = rbx; - __ LoadAnyTaggedField( - active_continuation, - FieldOperand(wasm_instance, - WasmInstanceObject::kActiveContinuationOffset)); + __ LoadRoot(active_continuation, RootIndex::kActiveContinuation); __ LoadAnyTaggedField( foreign_jmpbuf, FieldOperand(active_continuation, WasmContinuationObject::kJmpbufOffset)); @@ -3719,6 +3716,7 @@ void Builtins::Generate_WasmReturnPromiseOnSuspend(MacroAssembler* masm) { jmpbuf, FieldOperand(foreign_jmpbuf, Foreign::kForeignAddressOffset), kForeignForeignAddressTag, r8); __ movq(MemOperand(jmpbuf, wasm::kJmpBufSpOffset), rsp); + __ movq(MemOperand(jmpbuf, wasm::kJmpBufFpOffset), rbp); Register stack_limit_address = rcx; __ movq(stack_limit_address, FieldOperand(wasm_instance, @@ -3735,11 +3733,12 @@ void Builtins::Generate_WasmReturnPromiseOnSuspend(MacroAssembler* masm) { // ------------------------------------------- // Allocate a new continuation. // ------------------------------------------- + MemOperand GCScanSlotPlace = + MemOperand(rbp, BuiltinWasmWrapperConstants::kGCScanSlotCountOffset); + __ Move(GCScanSlotPlace, 2); __ Push(wasm_instance); __ Push(function_data); - __ Push(wasm_instance); __ Move(kContextRegister, Smi::zero()); - // TODO(thibaudm): Handle GC. __ CallRuntime(Runtime::kWasmAllocateContinuation); __ Pop(function_data); __ Pop(wasm_instance); @@ -3759,9 +3758,9 @@ void Builtins::Generate_WasmReturnPromiseOnSuspend(MacroAssembler* masm) { target_jmpbuf, FieldOperand(foreign_jmpbuf, Foreign::kForeignAddressOffset), kForeignForeignAddressTag, r8); + __ Move(GCScanSlotPlace, 0); // Switch stack! LoadJumpBuffer(masm, target_jmpbuf); - __ movq(rbp, rsp); // New stack, there is no frame yet. foreign_jmpbuf = no_reg; target_jmpbuf = no_reg; // live: [rsi, rdi] @@ -3778,10 +3777,12 @@ void Builtins::Generate_WasmReturnPromiseOnSuspend(MacroAssembler* masm) { MemOperand(kRootRegister, Isolate::thread_in_wasm_flag_address_offset())); __ movl(MemOperand(thread_in_wasm_flag_addr, 0), Immediate(1)); Register function_entry = function_data; + __ LoadAnyTaggedField( + function_entry, + FieldOperand(function_entry, WasmExportedFunctionData::kInternalOffset)); __ LoadExternalPointerField( function_entry, - FieldOperand(function_data, - WasmExportedFunctionData::kForeignAddressOffset), + FieldOperand(function_data, WasmInternalFunction::kForeignAddressOffset), kForeignForeignAddressTag, r8); __ Push(wasm_instance); __ call(function_entry); @@ -3800,10 +3801,7 @@ void Builtins::Generate_WasmReturnPromiseOnSuspend(MacroAssembler* masm) { // Reload parent continuation. // ------------------------------------------- active_continuation = rbx; - __ LoadAnyTaggedField( - active_continuation, - FieldOperand(wasm_instance, - WasmInstanceObject::kActiveContinuationOffset)); + __ LoadRoot(active_continuation, RootIndex::kActiveContinuation); Register parent = rdx; __ LoadAnyTaggedField( parent, @@ -3814,20 +3812,7 @@ void Builtins::Generate_WasmReturnPromiseOnSuspend(MacroAssembler* masm) { // ------------------------------------------- // Update instance active continuation. // ------------------------------------------- - Register object = WriteBarrierDescriptor::ObjectRegister(); - Register slot_address = WriteBarrierDescriptor::SlotAddressRegister(); - DCHECK_EQ(object, rdi); - DCHECK((slot_address == rbx || slot_address == r8)); - // Save reg clobbered by the write barrier. - __ movq(rax, parent); - __ movq(object, wasm_instance); - __ StoreTaggedField( - FieldOperand(object, WasmInstanceObject::kActiveContinuationOffset), - parent); - __ RecordWriteField(object, WasmInstanceObject::kActiveContinuationOffset, - parent, slot_address, SaveFPRegsMode::kIgnore); - // Restore reg clobbered by the write barrier. - __ movq(parent, rax); + __ movq(masm->RootAsOperand(RootIndex::kActiveContinuation), parent); foreign_jmpbuf = rax; __ LoadAnyTaggedField( foreign_jmpbuf, @@ -3838,9 +3823,8 @@ void Builtins::Generate_WasmReturnPromiseOnSuspend(MacroAssembler* masm) { kForeignForeignAddressTag, r8); // Switch stack! LoadJumpBuffer(masm, jmpbuf); - __ leaq(rbp, Operand(rsp, (kNumSpillSlots + 1) * kSystemPointerSize)); + __ Move(GCScanSlotPlace, 1); __ Push(wasm_instance); // Spill. - __ Push(wasm_instance); // First arg. __ Move(kContextRegister, Smi::zero()); __ CallRuntime(Runtime::kWasmSyncStackLimit); __ Pop(wasm_instance); @@ -3852,8 +3836,10 @@ void Builtins::Generate_WasmReturnPromiseOnSuspend(MacroAssembler* masm) { // ------------------------------------------- // Epilogue. // ------------------------------------------- - __ movq(param_count, MemOperand(rbp, kParamCountOffset)); - __ LeaveFrame(StackFrame::JS_TO_WASM); + __ movq( + param_count, + MemOperand(rbp, ReturnPromiseOnSuspendFrameConstants::kParamCountOffset)); + __ LeaveFrame(StackFrame::RETURN_PROMISE_ON_SUSPEND); __ DropArguments(param_count, r8, TurboAssembler::kCountIsInteger, TurboAssembler::kCountExcludesReceiver); __ ret(0); diff --git a/deps/v8/src/codegen/OWNERS b/deps/v8/src/codegen/OWNERS index a3c3ffdba6b36f..bf654f67893b40 100644 --- a/deps/v8/src/codegen/OWNERS +++ b/deps/v8/src/codegen/OWNERS @@ -10,3 +10,5 @@ mslekova@chromium.org mvstanton@chromium.org nicohartmann@chromium.org zhin@chromium.org + +per-file compiler.*=marja@chromium.org diff --git a/deps/v8/src/codegen/arm/assembler-arm-inl.h b/deps/v8/src/codegen/arm/assembler-arm-inl.h index 0ee81b2f945de9..9080b3e0b3a682 100644 --- a/deps/v8/src/codegen/arm/assembler-arm-inl.h +++ b/deps/v8/src/codegen/arm/assembler-arm-inl.h @@ -195,7 +195,7 @@ Operand::Operand(const ExternalReference& f) value_.immediate = static_cast(f.address()); } -Operand::Operand(Smi value) : rmode_(RelocInfo::NONE) { +Operand::Operand(Smi value) : rmode_(RelocInfo::NO_INFO) { value_.immediate = static_cast(value.ptr()); } diff --git a/deps/v8/src/codegen/arm/assembler-arm.cc b/deps/v8/src/codegen/arm/assembler-arm.cc index 38d691007f3ff5..e434cac32dea4b 100644 --- a/deps/v8/src/codegen/arm/assembler-arm.cc +++ b/deps/v8/src/codegen/arm/assembler-arm.cc @@ -1132,7 +1132,7 @@ bool MustOutputRelocInfo(RelocInfo::Mode rmode, const Assembler* assembler) { if (RelocInfo::IsOnlyForSerializer(rmode)) { if (assembler->predictable_code_size()) return true; return assembler->options().record_reloc_info_for_serialization; - } else if (RelocInfo::IsNone(rmode)) { + } else if (RelocInfo::IsNoInfo(rmode)) { return false; } return true; @@ -1464,7 +1464,7 @@ int Assembler::branch_offset(Label* L) { // Branch instructions. void Assembler::b(int branch_offset, Condition cond, RelocInfo::Mode rmode) { - if (!RelocInfo::IsNone(rmode)) RecordRelocInfo(rmode); + if (!RelocInfo::IsNoInfo(rmode)) RecordRelocInfo(rmode); DCHECK_EQ(branch_offset & 3, 0); int imm24 = branch_offset >> 2; const bool b_imm_check = is_int24(imm24); @@ -1478,7 +1478,7 @@ void Assembler::b(int branch_offset, Condition cond, RelocInfo::Mode rmode) { } void Assembler::bl(int branch_offset, Condition cond, RelocInfo::Mode rmode) { - if (!RelocInfo::IsNone(rmode)) RecordRelocInfo(rmode); + if (!RelocInfo::IsNoInfo(rmode)) RecordRelocInfo(rmode); DCHECK_EQ(branch_offset & 3, 0); int imm24 = branch_offset >> 2; const bool bl_imm_check = is_int24(imm24); @@ -5226,7 +5226,7 @@ void Assembler::dd(uint32_t data, RelocInfo::Mode rmode) { // blocked before using dd. DCHECK(is_const_pool_blocked() || pending_32_bit_constants_.empty()); CheckBuffer(); - if (!RelocInfo::IsNone(rmode)) { + if (!RelocInfo::IsNoInfo(rmode)) { DCHECK(RelocInfo::IsDataEmbeddedObject(rmode) || RelocInfo::IsLiteralConstant(rmode)); RecordRelocInfo(rmode); @@ -5240,7 +5240,7 @@ void Assembler::dq(uint64_t value, RelocInfo::Mode rmode) { // blocked before using dq. DCHECK(is_const_pool_blocked() || pending_32_bit_constants_.empty()); CheckBuffer(); - if (!RelocInfo::IsNone(rmode)) { + if (!RelocInfo::IsNoInfo(rmode)) { DCHECK(RelocInfo::IsDataEmbeddedObject(rmode) || RelocInfo::IsLiteralConstant(rmode)); RecordRelocInfo(rmode); diff --git a/deps/v8/src/codegen/arm/assembler-arm.h b/deps/v8/src/codegen/arm/assembler-arm.h index a7d224a09457ed..4cce50f7957867 100644 --- a/deps/v8/src/codegen/arm/assembler-arm.h +++ b/deps/v8/src/codegen/arm/assembler-arm.h @@ -87,7 +87,7 @@ class V8_EXPORT_PRIVATE Operand { public: // immediate V8_INLINE explicit Operand(int32_t immediate, - RelocInfo::Mode rmode = RelocInfo::NONE) + RelocInfo::Mode rmode = RelocInfo::NO_INFO) : rmode_(rmode) { value_.immediate = immediate; } @@ -405,9 +405,9 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { // Branch instructions void b(int branch_offset, Condition cond = al, - RelocInfo::Mode rmode = RelocInfo::NONE); + RelocInfo::Mode rmode = RelocInfo::NO_INFO); void bl(int branch_offset, Condition cond = al, - RelocInfo::Mode rmode = RelocInfo::NONE); + RelocInfo::Mode rmode = RelocInfo::NO_INFO); void blx(int branch_offset); // v5 and above void blx(Register target, Condition cond = al); // v5 and above void bx(Register target, Condition cond = al); // v5 and above, plus v4t @@ -1095,9 +1095,9 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { // called before any use of db/dd/dq/dp to ensure that constant pools // are not emitted as part of the tables generated. void db(uint8_t data); - void dd(uint32_t data, RelocInfo::Mode rmode = RelocInfo::NONE); - void dq(uint64_t data, RelocInfo::Mode rmode = RelocInfo::NONE); - void dp(uintptr_t data, RelocInfo::Mode rmode = RelocInfo::NONE) { + void dd(uint32_t data, RelocInfo::Mode rmode = RelocInfo::NO_INFO); + void dq(uint64_t data, RelocInfo::Mode rmode = RelocInfo::NO_INFO); + void dp(uintptr_t data, RelocInfo::Mode rmode = RelocInfo::NO_INFO) { dd(data, rmode); } diff --git a/deps/v8/src/codegen/arm/macro-assembler-arm.cc b/deps/v8/src/codegen/arm/macro-assembler-arm.cc index 5c46c64b3eebee..95eb8795e95b9f 100644 --- a/deps/v8/src/codegen/arm/macro-assembler-arm.cc +++ b/deps/v8/src/codegen/arm/macro-assembler-arm.cc @@ -2022,7 +2022,7 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin, Jump(code, RelocInfo::CODE_TARGET); } -void MacroAssembler::JumpToInstructionStream(Address entry) { +void MacroAssembler::JumpToOffHeapInstructionStream(Address entry) { mov(kOffHeapTrampolineRegister, Operand(entry, RelocInfo::OFF_HEAP_TARGET)); Jump(kOffHeapTrampolineRegister); } diff --git a/deps/v8/src/codegen/arm/macro-assembler-arm.h b/deps/v8/src/codegen/arm/macro-assembler-arm.h index 73efa120028f2c..e43aec485f10e8 100644 --- a/deps/v8/src/codegen/arm/macro-assembler-arm.h +++ b/deps/v8/src/codegen/arm/macro-assembler-arm.h @@ -800,7 +800,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { bool builtin_exit_frame = false); // Generates a trampoline to jump to the off-heap instruction stream. - void JumpToInstructionStream(Address entry); + void JumpToOffHeapInstructionStream(Address entry); // --------------------------------------------------------------------------- // In-place weak references. diff --git a/deps/v8/src/codegen/arm64/assembler-arm64-inl.h b/deps/v8/src/codegen/arm64/assembler-arm64-inl.h index c5a1d4fd8ad0aa..40b9a94dd85d07 100644 --- a/deps/v8/src/codegen/arm64/assembler-arm64-inl.h +++ b/deps/v8/src/codegen/arm64/assembler-arm64-inl.h @@ -192,7 +192,7 @@ inline VRegister CPURegister::Q() const { // Default initializer is for int types template struct ImmediateInitializer { - static inline RelocInfo::Mode rmode_for(T) { return RelocInfo::NONE; } + static inline RelocInfo::Mode rmode_for(T) { return RelocInfo::NO_INFO; } static inline int64_t immediate_for(T t) { STATIC_ASSERT(sizeof(T) <= 8); STATIC_ASSERT(std::is_integral::value || std::is_enum::value); @@ -202,7 +202,7 @@ struct ImmediateInitializer { template <> struct ImmediateInitializer { - static inline RelocInfo::Mode rmode_for(Smi t) { return RelocInfo::NONE; } + static inline RelocInfo::Mode rmode_for(Smi t) { return RelocInfo::NO_INFO; } static inline int64_t immediate_for(Smi t) { return static_cast(t.ptr()); } diff --git a/deps/v8/src/codegen/arm64/assembler-arm64.cc b/deps/v8/src/codegen/arm64/assembler-arm64.cc index 627c7ae0213351..fd5cd326ecde55 100644 --- a/deps/v8/src/codegen/arm64/assembler-arm64.cc +++ b/deps/v8/src/codegen/arm64/assembler-arm64.cc @@ -314,7 +314,7 @@ bool Operand::NeedsRelocation(const Assembler* assembler) const { return assembler->options().record_reloc_info_for_serialization; } - return !RelocInfo::IsNone(rmode); + return !RelocInfo::IsNoInfo(rmode); } // Assembler @@ -4375,13 +4375,15 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data, void Assembler::near_jump(int offset, RelocInfo::Mode rmode) { BlockPoolsScope no_pool_before_b_instr(this); - if (!RelocInfo::IsNone(rmode)) RecordRelocInfo(rmode, offset, NO_POOL_ENTRY); + if (!RelocInfo::IsNoInfo(rmode)) + RecordRelocInfo(rmode, offset, NO_POOL_ENTRY); b(offset); } void Assembler::near_call(int offset, RelocInfo::Mode rmode) { BlockPoolsScope no_pool_before_bl_instr(this); - if (!RelocInfo::IsNone(rmode)) RecordRelocInfo(rmode, offset, NO_POOL_ENTRY); + if (!RelocInfo::IsNoInfo(rmode)) + RecordRelocInfo(rmode, offset, NO_POOL_ENTRY); bl(offset); } diff --git a/deps/v8/src/codegen/arm64/assembler-arm64.h b/deps/v8/src/codegen/arm64/assembler-arm64.h index dac90f8058c9dd..df8fadf1f1bf02 100644 --- a/deps/v8/src/codegen/arm64/assembler-arm64.h +++ b/deps/v8/src/codegen/arm64/assembler-arm64.h @@ -2065,27 +2065,27 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { // Required by V8. void db(uint8_t data) { dc8(data); } - void dd(uint32_t data, RelocInfo::Mode rmode = RelocInfo::NONE) { + void dd(uint32_t data, RelocInfo::Mode rmode = RelocInfo::NO_INFO) { BlockPoolsScope no_pool_scope(this); - if (!RelocInfo::IsNone(rmode)) { + if (!RelocInfo::IsNoInfo(rmode)) { DCHECK(RelocInfo::IsDataEmbeddedObject(rmode) || RelocInfo::IsLiteralConstant(rmode)); RecordRelocInfo(rmode); } dc32(data); } - void dq(uint64_t data, RelocInfo::Mode rmode = RelocInfo::NONE) { + void dq(uint64_t data, RelocInfo::Mode rmode = RelocInfo::NO_INFO) { BlockPoolsScope no_pool_scope(this); - if (!RelocInfo::IsNone(rmode)) { + if (!RelocInfo::IsNoInfo(rmode)) { DCHECK(RelocInfo::IsDataEmbeddedObject(rmode) || RelocInfo::IsLiteralConstant(rmode)); RecordRelocInfo(rmode); } dc64(data); } - void dp(uintptr_t data, RelocInfo::Mode rmode = RelocInfo::NONE) { + void dp(uintptr_t data, RelocInfo::Mode rmode = RelocInfo::NO_INFO) { BlockPoolsScope no_pool_scope(this); - if (!RelocInfo::IsNone(rmode)) { + if (!RelocInfo::IsNoInfo(rmode)) { DCHECK(RelocInfo::IsDataEmbeddedObject(rmode) || RelocInfo::IsLiteralConstant(rmode)); RecordRelocInfo(rmode); diff --git a/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc b/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc index bcf2e4574ab030..58920c343a8d49 100644 --- a/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc +++ b/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc @@ -1655,7 +1655,7 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin, Jump(code, RelocInfo::CODE_TARGET); } -void MacroAssembler::JumpToInstructionStream(Address entry) { +void MacroAssembler::JumpToOffHeapInstructionStream(Address entry) { Ldr(kOffHeapTrampolineRegister, Operand(entry, RelocInfo::OFF_HEAP_TARGET)); Br(kOffHeapTrampolineRegister); } diff --git a/deps/v8/src/codegen/arm64/macro-assembler-arm64.h b/deps/v8/src/codegen/arm64/macro-assembler-arm64.h index 165d702c31e9a0..7c972bd307f1fb 100644 --- a/deps/v8/src/codegen/arm64/macro-assembler-arm64.h +++ b/deps/v8/src/codegen/arm64/macro-assembler-arm64.h @@ -1911,7 +1911,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { bool builtin_exit_frame = false); // Generates a trampoline to jump to the off-heap instruction stream. - void JumpToInstructionStream(Address entry); + void JumpToOffHeapInstructionStream(Address entry); // Registers used through the invocation chain are hard-coded. // We force passing the parameters to ensure the contracts are correctly diff --git a/deps/v8/src/codegen/arm64/register-arm64.h b/deps/v8/src/codegen/arm64/register-arm64.h index ae6c4c920037c0..29a4212aacfb76 100644 --- a/deps/v8/src/codegen/arm64/register-arm64.h +++ b/deps/v8/src/codegen/arm64/register-arm64.h @@ -547,8 +547,6 @@ using Simd128Register = VRegister; // Lists of registers. class V8_EXPORT_PRIVATE CPURegList { public: - CPURegList() = default; - template explicit CPURegList(CPURegister reg0, CPURegisters... regs) : list_(CPURegister::ListOf(reg0, regs...)), diff --git a/deps/v8/src/codegen/assembler.h b/deps/v8/src/codegen/assembler.h index 50711046e6b1f4..651952027893ed 100644 --- a/deps/v8/src/codegen/assembler.h +++ b/deps/v8/src/codegen/assembler.h @@ -39,6 +39,7 @@ #include #include +#include "src/base/macros.h" #include "src/base/memory.h" #include "src/codegen/code-comments.h" #include "src/codegen/cpu-features.h" @@ -64,7 +65,7 @@ using base::WriteUnalignedValue; // Forward declarations. class EmbeddedData; -class InstructionStream; +class OffHeapInstructionStream; class Isolate; class SCTableReference; class SourcePosition; @@ -387,7 +388,7 @@ class V8_EXPORT_PRIVATE AssemblerBase : public Malloced { void RequestHeapObject(HeapObjectRequest request); bool ShouldRecordRelocInfo(RelocInfo::Mode rmode) const { - DCHECK(!RelocInfo::IsNone(rmode)); + DCHECK(!RelocInfo::IsNoInfo(rmode)); if (options().disable_reloc_info_for_patching) return false; if (RelocInfo::IsOnlyForSerializer(rmode) && !options().record_reloc_info_for_serialization && !FLAG_debug_code) { @@ -470,7 +471,7 @@ class V8_EXPORT_PRIVATE V8_NODISCARD CpuFeatureScope { #ifdef V8_CODE_COMMENTS #define ASM_CODE_COMMENT(asm) ASM_CODE_COMMENT_STRING(asm, __func__) #define ASM_CODE_COMMENT_STRING(asm, comment) \ - AssemblerBase::CodeComment asm_code_comment(asm, comment) + AssemblerBase::CodeComment UNIQUE_IDENTIFIER(asm_code_comment)(asm, comment) #else #define ASM_CODE_COMMENT(asm) #define ASM_CODE_COMMENT_STRING(asm, ...) diff --git a/deps/v8/src/codegen/code-reference.cc b/deps/v8/src/codegen/code-reference.cc index 0c550fa0d300cb..27ff425a2f9dfa 100644 --- a/deps/v8/src/codegen/code-reference.cc +++ b/deps/v8/src/codegen/code-reference.cc @@ -86,26 +86,26 @@ struct CodeDescOps { ret CodeReference::method() const { \ DCHECK(!is_null()); \ switch (kind_) { \ - case JS: \ + case Kind::JS: \ return JSOps{js_code_}.method(); \ - case WASM: \ + case Kind::WASM: \ return WasmOps{wasm_code_}.method(); \ - case CODE_DESC: \ + case Kind::CODE_DESC: \ return CodeDescOps{code_desc_}.method(); \ default: \ UNREACHABLE(); \ } \ } #else -#define DISPATCH(ret, method) \ - ret CodeReference::method() const { \ - DCHECK(!is_null()); \ - DCHECK(kind_ == JS || kind_ == CODE_DESC); \ - if (kind_ == JS) { \ - return JSOps{js_code_}.method(); \ - } else { \ - return CodeDescOps{code_desc_}.method(); \ - } \ +#define DISPATCH(ret, method) \ + ret CodeReference::method() const { \ + DCHECK(!is_null()); \ + DCHECK(kind_ == Kind::JS || kind_ == Kind::CODE_DESC); \ + if (kind_ == Kind::JS) { \ + return JSOps{js_code_}.method(); \ + } else { \ + return CodeDescOps{code_desc_}.method(); \ + } \ } #endif // V8_ENABLE_WEBASSEMBLY diff --git a/deps/v8/src/codegen/code-reference.h b/deps/v8/src/codegen/code-reference.h index 8ff3581689f5ae..9b54b6074e4958 100644 --- a/deps/v8/src/codegen/code-reference.h +++ b/deps/v8/src/codegen/code-reference.h @@ -20,12 +20,13 @@ class WasmCode; class CodeReference { public: - CodeReference() : kind_(NONE), null_(nullptr) {} + CodeReference() : kind_(Kind::NONE), null_(nullptr) {} explicit CodeReference(const wasm::WasmCode* wasm_code) - : kind_(WASM), wasm_code_(wasm_code) {} + : kind_(Kind::WASM), wasm_code_(wasm_code) {} explicit CodeReference(const CodeDesc* code_desc) - : kind_(CODE_DESC), code_desc_(code_desc) {} - explicit CodeReference(Handle js_code) : kind_(JS), js_code_(js_code) {} + : kind_(Kind::CODE_DESC), code_desc_(code_desc) {} + explicit CodeReference(Handle js_code) + : kind_(Kind::JS), js_code_(js_code) {} Address constant_pool() const; Address instruction_start() const; @@ -37,22 +38,22 @@ class CodeReference { Address code_comments() const; int code_comments_size() const; - bool is_null() const { return kind_ == NONE; } - bool is_js() const { return kind_ == JS; } - bool is_wasm_code() const { return kind_ == WASM; } + bool is_null() const { return kind_ == Kind::NONE; } + bool is_js() const { return kind_ == Kind::JS; } + bool is_wasm_code() const { return kind_ == Kind::WASM; } Handle as_js_code() const { - DCHECK_EQ(JS, kind_); + DCHECK_EQ(Kind::JS, kind_); return js_code_; } const wasm::WasmCode* as_wasm_code() const { - DCHECK_EQ(WASM, kind_); + DCHECK_EQ(Kind::WASM, kind_); return wasm_code_; } private: - enum { NONE, JS, WASM, CODE_DESC } kind_; + enum class Kind { NONE, JS, WASM, CODE_DESC } kind_; union { std::nullptr_t null_; const wasm::WasmCode* wasm_code_; diff --git a/deps/v8/src/codegen/code-stub-assembler.cc b/deps/v8/src/codegen/code-stub-assembler.cc index 4a9c06bdd89d40..db50f7d3e4146c 100644 --- a/deps/v8/src/codegen/code-stub-assembler.cc +++ b/deps/v8/src/codegen/code-stub-assembler.cc @@ -22,6 +22,7 @@ #include "src/objects/descriptor-array.h" #include "src/objects/function-kind.h" #include "src/objects/heap-number.h" +#include "src/objects/instance-type.h" #include "src/objects/js-generator.h" #include "src/objects/oddball.h" #include "src/objects/ordered-hash-table-inl.h" @@ -1539,16 +1540,21 @@ void CodeStubAssembler::BranchIfToBooleanIsTrue(TNode value, } } -#ifdef V8_CAGED_POINTERS - -TNode CodeStubAssembler::LoadCagedPointerFromObject( +TNode CodeStubAssembler::LoadCagedPointerFromObject( TNode object, TNode field_offset) { - return LoadObjectField(object, field_offset); +#ifdef V8_CAGED_POINTERS + return ReinterpretCast( + LoadObjectField(object, field_offset)); +#else + return LoadObjectField(object, field_offset); +#endif // V8_CAGED_POINTERS } void CodeStubAssembler::StoreCagedPointerToObject(TNode object, TNode offset, - TNode pointer) { + TNode pointer) { +#ifdef V8_CAGED_POINTERS + TNode caged_pointer = ReinterpretCast(pointer); #ifdef DEBUG // Verify pointer points into the cage. TNode cage_base_address = @@ -1557,13 +1563,26 @@ void CodeStubAssembler::StoreCagedPointerToObject(TNode object, ExternalConstant(ExternalReference::virtual_memory_cage_end_address()); TNode cage_base = Load(cage_base_address); TNode cage_end = Load(cage_end_address); - CSA_CHECK(this, UintPtrGreaterThanOrEqual(pointer, cage_base)); - CSA_CHECK(this, UintPtrLessThan(pointer, cage_end)); -#endif - StoreObjectFieldNoWriteBarrier(object, offset, pointer); + CSA_DCHECK(this, UintPtrGreaterThanOrEqual(caged_pointer, cage_base)); + CSA_DCHECK(this, UintPtrLessThan(caged_pointer, cage_end)); +#endif // DEBUG + StoreObjectFieldNoWriteBarrier(object, offset, caged_pointer); +#else + StoreObjectFieldNoWriteBarrier(object, offset, pointer); +#endif // V8_CAGED_POINTERS } +TNode CodeStubAssembler::EmptyBackingStoreBufferConstant() { +#ifdef V8_CAGED_POINTERS + // TODO(chromium:1218005) consider creating a LoadCagedPointerConstant() if + // more of these constants are required later on. + TNode empty_backing_store_buffer = + ExternalConstant(ExternalReference::empty_backing_store_buffer()); + return Load(empty_backing_store_buffer); +#else + return ReinterpretCast(IntPtrConstant(0)); #endif // V8_CAGED_POINTERS +} TNode CodeStubAssembler::ChangeUint32ToExternalPointer( TNode value) { @@ -1679,6 +1698,11 @@ TNode CodeStubAssembler::LoadFromParentFrame(int offset) { return LoadFullTagged(frame_pointer, IntPtrConstant(offset)); } +TNode CodeStubAssembler::LoadUint8Ptr(TNode ptr, + TNode offset) { + return Load(IntPtrAdd(ReinterpretCast(ptr), offset)); +} + TNode CodeStubAssembler::LoadAndUntagObjectField( TNode object, int offset) { // Please use LoadMap(object) instead. @@ -2892,8 +2916,10 @@ TNode CodeStubAssembler::IsGeneratorFunction( SharedFunctionInfo::kFlagsOffset)); // See IsGeneratorFunction(FunctionKind kind). - return IsInRange(function_kind, FunctionKind::kAsyncConciseGeneratorMethod, - FunctionKind::kConciseGeneratorMethod); + return IsInRange( + function_kind, + static_cast(FunctionKind::kAsyncConciseGeneratorMethod), + static_cast(FunctionKind::kConciseGeneratorMethod)); } TNode CodeStubAssembler::IsJSFunctionWithPrototypeSlot( @@ -6142,6 +6168,20 @@ void CodeStubAssembler::ThrowTypeError(TNode context, Unreachable(); } +TNode CodeStubAssembler::GetPendingMessage() { + TNode pending_message = ExternalConstant( + ExternalReference::address_of_pending_message(isolate())); + return UncheckedCast(LoadFullTagged(pending_message)); +} +void CodeStubAssembler::SetPendingMessage(TNode message) { + CSA_DCHECK(this, Word32Or(IsTheHole(message), + InstanceTypeEqual(LoadInstanceType(message), + JS_MESSAGE_OBJECT_TYPE))); + TNode pending_message = ExternalConstant( + ExternalReference::address_of_pending_message(isolate())); + StoreFullTaggedNoWriteBarrier(pending_message, message); +} + TNode CodeStubAssembler::InstanceTypeEqual(TNode instance_type, int type) { return Word32Equal(instance_type, Int32Constant(type)); @@ -6362,8 +6402,8 @@ TNode CodeStubAssembler::IsSeqOneByteStringInstanceType( CSA_DCHECK(this, IsStringInstanceType(instance_type)); return Word32Equal( Word32And(instance_type, - Int32Constant(kStringRepresentationMask | kStringEncodingMask)), - Int32Constant(kSeqStringTag | kOneByteStringTag)); + Int32Constant(kStringRepresentationAndEncodingMask)), + Int32Constant(kSeqOneByteStringTag)); } TNode CodeStubAssembler::IsConsStringInstanceType( @@ -8089,6 +8129,25 @@ TNode> CodeStubAssembler::ExternalTwoByteStringGetChars( std::make_pair(MachineType::AnyTagged(), string))); } +TNode> CodeStubAssembler::IntlAsciiCollationWeightsL1() { +#ifdef V8_INTL_SUPPORT + TNode ptr = + ExternalConstant(ExternalReference::intl_ascii_collation_weights_l1()); + return ReinterpretCast>(ptr); +#else + UNREACHABLE(); +#endif +} +TNode> CodeStubAssembler::IntlAsciiCollationWeightsL3() { +#ifdef V8_INTL_SUPPORT + TNode ptr = + ExternalConstant(ExternalReference::intl_ascii_collation_weights_l3()); + return ReinterpretCast>(ptr); +#else + UNREACHABLE(); +#endif +} + void CodeStubAssembler::TryInternalizeString( TNode string, Label* if_index, TVariable* var_index, Label* if_internalized, TVariable* var_internalized, @@ -8561,7 +8620,9 @@ TNode CodeStubAssembler::BasicLoadNumberDictionaryElement( TNode details = LoadDetailsByKeyIndex(dictionary, index); TNode kind = DecodeWord32(details); // TODO(jkummerow): Support accessors without missing? - GotoIfNot(Word32Equal(kind, Int32Constant(kData)), not_data); + GotoIfNot( + Word32Equal(kind, Int32Constant(static_cast(PropertyKind::kData))), + not_data); // Finally, load the value. return LoadValueByKeyIndex(dictionary, index); } @@ -8607,7 +8668,7 @@ void CodeStubAssembler::InsertEntry( StoreValueByKeyIndex(dictionary, index, value); // Prepare details of the new property. - PropertyDetails d(kData, NONE, + PropertyDetails d(PropertyKind::kData, NONE, PropertyDetails::kConstIfDictConstnessTracking); enum_index = @@ -8677,10 +8738,10 @@ template <> void CodeStubAssembler::Add(TNode dictionary, TNode key, TNode value, Label* bailout) { - PropertyDetails d(kData, NONE, + PropertyDetails d(PropertyKind::kData, NONE, PropertyDetails::kConstIfDictConstnessTracking); - PropertyDetails d_dont_enum(kData, DONT_ENUM, + PropertyDetails d_dont_enum(PropertyKind::kData, DONT_ENUM, PropertyDetails::kConstIfDictConstnessTracking); TNode details_byte_enum = UncheckedCast(Uint32Constant(d.ToByte())); @@ -9517,7 +9578,9 @@ TNode CodeStubAssembler::CallGetterIfAccessor( Label done(this), if_accessor_info(this, Label::kDeferred); TNode kind = DecodeWord32(details); - GotoIf(Word32Equal(kind, Int32Constant(kData)), &done); + GotoIf( + Word32Equal(kind, Int32Constant(static_cast(PropertyKind::kData))), + &done); // Accessor case. GotoIfNot(IsAccessorPair(CAST(value)), &if_accessor_info); @@ -11399,7 +11462,7 @@ TNode CodeStubAssembler::CreateAllocationSiteInFeedbackVector( // Store an empty fixed array for the code dependency. StoreObjectFieldRoot(site, AllocationSite::kDependentCodeOffset, - RootIndex::kEmptyWeakFixedArray); + DependentCode::kEmptyDependentCode); // Link the object to the allocation site list TNode site_list = ExternalConstant( @@ -13830,8 +13893,8 @@ void CodeStubAssembler::ThrowIfArrayBufferViewBufferIsDetached( TNode CodeStubAssembler::LoadJSArrayBufferBackingStorePtr( TNode array_buffer) { - return LoadObjectField(array_buffer, - JSArrayBuffer::kBackingStoreOffset); + return LoadCagedPointerFromObject(array_buffer, + JSArrayBuffer::kBackingStoreOffset); } TNode CodeStubAssembler::LoadJSArrayBufferViewBuffer( @@ -13858,7 +13921,7 @@ TNode CodeStubAssembler::LoadJSTypedArrayLengthAndCheckDetached( TNode buffer = LoadJSArrayBufferViewBuffer(typed_array); Label variable_length(this), fixed_length(this), end(this); - Branch(IsVariableLengthTypedArray(typed_array), &variable_length, + Branch(IsVariableLengthJSArrayBufferView(typed_array), &variable_length, &fixed_length); BIND(&variable_length); { @@ -13881,36 +13944,55 @@ TNode CodeStubAssembler::LoadJSTypedArrayLengthAndCheckDetached( // ES #sec-integerindexedobjectlength TNode CodeStubAssembler::LoadVariableLengthJSTypedArrayLength( - TNode array, TNode buffer, Label* miss) { + TNode array, TNode buffer, + Label* detached_or_out_of_bounds) { + // byte_length already takes array's offset into account. + TNode byte_length = LoadVariableLengthJSArrayBufferViewByteLength( + array, buffer, detached_or_out_of_bounds); + TNode element_size = + RabGsabElementsKindToElementByteSize(LoadElementsKind(array)); + return Unsigned(IntPtrDiv(Signed(byte_length), element_size)); +} + +TNode +CodeStubAssembler::LoadVariableLengthJSArrayBufferViewByteLength( + TNode array, TNode buffer, + Label* detached_or_out_of_bounds) { Label is_gsab(this), is_rab(this), end(this); TVARIABLE(UintPtrT, result); + TNode array_byte_offset = LoadJSArrayBufferViewByteOffset(array); Branch(IsSharedArrayBuffer(buffer), &is_gsab, &is_rab); BIND(&is_gsab); { - // Non-length-tracking GSAB-backed TypedArrays shouldn't end up here. - CSA_DCHECK(this, IsLengthTrackingTypedArray(array)); + // Non-length-tracking GSAB-backed ArrayBufferViews shouldn't end up here. + CSA_DCHECK(this, IsLengthTrackingJSArrayBufferView(array)); // Read the byte length from the BackingStore. - const TNode length_function = ExternalConstant( - ExternalReference::length_tracking_gsab_backed_typed_array_length()); + const TNode byte_length_function = + ExternalConstant(ExternalReference::gsab_byte_length()); TNode isolate_ptr = ExternalConstant(ExternalReference::isolate_address(isolate())); - result = UncheckedCast( - CallCFunction(length_function, MachineType::UintPtr(), + TNode buffer_byte_length = UncheckedCast( + CallCFunction(byte_length_function, MachineType::UintPtr(), std::make_pair(MachineType::Pointer(), isolate_ptr), - std::make_pair(MachineType::AnyTagged(), array))); + std::make_pair(MachineType::AnyTagged(), buffer))); + // Since the SharedArrayBuffer can't shrink, and we've managed to create + // this JSArrayBufferDataView without throwing an exception, we know that + // buffer_byte_length >= array_byte_offset. + CSA_CHECK(this, + UintPtrGreaterThanOrEqual(buffer_byte_length, array_byte_offset)); + result = UintPtrSub(buffer_byte_length, array_byte_offset); Goto(&end); } BIND(&is_rab); { - GotoIf(IsDetachedBuffer(buffer), miss); + GotoIf(IsDetachedBuffer(buffer), detached_or_out_of_bounds); TNode buffer_byte_length = LoadJSArrayBufferByteLength(buffer); - TNode array_byte_offset = LoadJSArrayBufferViewByteOffset(array); Label is_length_tracking(this), not_length_tracking(this); - Branch(IsLengthTrackingTypedArray(array), &is_length_tracking, + Branch(IsLengthTrackingJSArrayBufferView(array), &is_length_tracking, ¬_length_tracking); BIND(&is_length_tracking); @@ -13918,16 +14000,8 @@ TNode CodeStubAssembler::LoadVariableLengthJSTypedArrayLength( // The backing RAB might have been shrunk so that the start of the // TypedArray is already out of bounds. GotoIfNot(UintPtrLessThanOrEqual(array_byte_offset, buffer_byte_length), - miss); - // length = (buffer_byte_length - byte_offset) / element_size - // Conversion to signed is OK since buffer_byte_length < - // JSArrayBuffer::kMaxByteLength. - TNode element_size = - RabGsabElementsKindToElementByteSize(LoadElementsKind(array)); - TNode length = - IntPtrDiv(Signed(UintPtrSub(buffer_byte_length, array_byte_offset)), - element_size); - result = Unsigned(length); + detached_or_out_of_bounds); + result = UintPtrSub(buffer_byte_length, array_byte_offset); Goto(&end); } @@ -13940,8 +14014,8 @@ TNode CodeStubAssembler::LoadVariableLengthJSTypedArrayLength( GotoIfNot(UintPtrGreaterThanOrEqual( buffer_byte_length, UintPtrAdd(array_byte_offset, array_byte_length)), - miss); - result = LoadJSTypedArrayLength(array); + detached_or_out_of_bounds); + result = array_byte_length; Goto(&end); } } @@ -13949,13 +14023,13 @@ TNode CodeStubAssembler::LoadVariableLengthJSTypedArrayLength( return result.value(); } -void CodeStubAssembler::IsJSTypedArrayDetachedOrOutOfBounds( - TNode array, Label* detached_or_oob, +void CodeStubAssembler::IsJSArrayBufferViewDetachedOrOutOfBounds( + TNode array, Label* detached_or_oob, Label* not_detached_nor_oob) { TNode buffer = LoadJSArrayBufferViewBuffer(array); GotoIf(IsDetachedBuffer(buffer), detached_or_oob); - GotoIfNot(IsVariableLengthTypedArray(array), not_detached_nor_oob); + GotoIfNot(IsVariableLengthJSArrayBufferView(array), not_detached_nor_oob); GotoIf(IsSharedArrayBuffer(buffer), not_detached_nor_oob); { @@ -13963,7 +14037,7 @@ void CodeStubAssembler::IsJSTypedArrayDetachedOrOutOfBounds( TNode array_byte_offset = LoadJSArrayBufferViewByteOffset(array); Label length_tracking(this), not_length_tracking(this); - Branch(IsLengthTrackingTypedArray(array), &length_tracking, + Branch(IsLengthTrackingJSArrayBufferView(array), &length_tracking, ¬_length_tracking); BIND(&length_tracking); @@ -14066,10 +14140,10 @@ TNode CodeStubAssembler::GetTypedArrayBuffer( Label call_runtime(this), done(this); TVARIABLE(Object, var_result); + GotoIf(IsOnHeapTypedArray(array), &call_runtime); + TNode buffer = LoadJSArrayBufferViewBuffer(array); GotoIf(IsDetachedBuffer(buffer), &call_runtime); - TNode backing_store = LoadJSArrayBufferBackingStorePtr(buffer); - GotoIf(WordEqual(backing_store, IntPtrConstant(0)), &call_runtime); var_result = buffer; Goto(&done); @@ -14332,24 +14406,30 @@ TNode CodeStubAssembler::NeedsAnyPromiseHooks(TNode flags) { return Word32NotEqual(flags, Int32Constant(0)); } -TNode CodeStubAssembler::LoadBuiltin(TNode builtin_id) { +TNode CodeStubAssembler::LoadBuiltin(TNode builtin_id) { CSA_DCHECK(this, SmiBelow(builtin_id, SmiConstant(Builtins::kBuiltinCount))); TNode offset = ElementOffsetFromIndex(SmiToBInt(builtin_id), SYSTEM_POINTER_ELEMENTS); - return CAST(BitcastWordToTagged(Load( - ExternalConstant(ExternalReference::builtins_address(isolate())), - offset))); + TNode table = ExternalConstant( +#ifdef V8_EXTERNAL_CODE_SPACE + ExternalReference::builtins_code_data_container_table(isolate()) +#else + ExternalReference::builtins_table(isolate()) +#endif // V8_EXTERNAL_CODE_SPACE + ); // NOLINT(whitespace/parens) + + return CAST(BitcastWordToTagged(Load(table, offset))); } -TNode CodeStubAssembler::GetSharedFunctionInfoCode( +TNode CodeStubAssembler::GetSharedFunctionInfoCode( TNode shared_info, TVariable* data_type_out, Label* if_compile_lazy) { TNode sfi_data = LoadObjectField(shared_info, SharedFunctionInfo::kFunctionDataOffset); - TVARIABLE(Code, sfi_code); + TVARIABLE(CodeT, sfi_code); Label done(this); Label check_instance_type(this); @@ -14378,6 +14458,8 @@ TNode CodeStubAssembler::GetSharedFunctionInfoCode( CODET_TYPE, UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_TYPE, UNCOMPILED_DATA_WITH_PREPARSE_DATA_TYPE, + UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_WITH_JOB_TYPE, + UNCOMPILED_DATA_WITH_PREPARSE_DATA_AND_JOB_TYPE, FUNCTION_TEMPLATE_INFO_TYPE, #if V8_ENABLE_WEBASSEMBLY WASM_CAPI_FUNCTION_DATA_TYPE, @@ -14389,16 +14471,17 @@ TNode CodeStubAssembler::GetSharedFunctionInfoCode( Label check_is_bytecode_array(this); Label check_is_baseline_data(this); Label check_is_asm_wasm_data(this); - Label check_is_uncompiled_data_without_preparse_data(this); - Label check_is_uncompiled_data_with_preparse_data(this); + Label check_is_uncompiled_data(this); Label check_is_function_template_info(this); Label check_is_interpreter_data(this); Label check_is_wasm_function_data(this); Label* case_labels[] = { &check_is_bytecode_array, &check_is_baseline_data, - &check_is_uncompiled_data_without_preparse_data, - &check_is_uncompiled_data_with_preparse_data, + &check_is_uncompiled_data, + &check_is_uncompiled_data, + &check_is_uncompiled_data, + &check_is_uncompiled_data, &check_is_function_template_info, #if V8_ENABLE_WEBASSEMBLY &check_is_wasm_function_data, @@ -14413,28 +14496,26 @@ TNode CodeStubAssembler::GetSharedFunctionInfoCode( // IsBytecodeArray: Interpret bytecode BIND(&check_is_bytecode_array); - sfi_code = HeapConstant(BUILTIN_CODE(isolate(), InterpreterEntryTrampoline)); + sfi_code = HeapConstant(BUILTIN_CODET(isolate(), InterpreterEntryTrampoline)); Goto(&done); // IsBaselineData: Execute baseline code BIND(&check_is_baseline_data); { TNode baseline_code = CAST(sfi_data); - sfi_code = FromCodeT(baseline_code); + sfi_code = baseline_code; Goto(&done); } // IsUncompiledDataWithPreparseData | IsUncompiledDataWithoutPreparseData: // Compile lazy - BIND(&check_is_uncompiled_data_with_preparse_data); - Goto(&check_is_uncompiled_data_without_preparse_data); - BIND(&check_is_uncompiled_data_without_preparse_data); - sfi_code = HeapConstant(BUILTIN_CODE(isolate(), CompileLazy)); + BIND(&check_is_uncompiled_data); + sfi_code = HeapConstant(BUILTIN_CODET(isolate(), CompileLazy)); Goto(if_compile_lazy ? if_compile_lazy : &done); // IsFunctionTemplateInfo: API call BIND(&check_is_function_template_info); - sfi_code = HeapConstant(BUILTIN_CODE(isolate(), HandleApiCall)); + sfi_code = HeapConstant(BUILTIN_CODET(isolate(), HandleApiCall)); Goto(&done); // IsInterpreterData: Interpret bytecode @@ -14445,7 +14526,7 @@ TNode CodeStubAssembler::GetSharedFunctionInfoCode( { TNode trampoline = LoadInterpreterDataInterpreterTrampoline(CAST(sfi_data)); - sfi_code = FromCodeT(trampoline); + sfi_code = trampoline; } Goto(&done); @@ -14458,7 +14539,7 @@ TNode CodeStubAssembler::GetSharedFunctionInfoCode( // IsAsmWasmData: Instantiate using AsmWasmData BIND(&check_is_asm_wasm_data); - sfi_code = HeapConstant(BUILTIN_CODE(isolate(), InstantiateAsmJs)); + sfi_code = HeapConstant(BUILTIN_CODET(isolate(), InstantiateAsmJs)); Goto(&done); #endif // V8_ENABLE_WEBASSEMBLY @@ -14482,8 +14563,7 @@ TNode CodeStubAssembler::GetCodeEntry(TNode code) { TNode CodeStubAssembler::AllocateFunctionWithMapAndContext( TNode map, TNode shared_info, TNode context) { - // TODO(v8:11880): avoid roundtrips between cdc and code. - const TNode code = GetSharedFunctionInfoCode(shared_info); + const TNode code = GetSharedFunctionInfoCode(shared_info); // TODO(ishell): All the callers of this function pass map loaded from // Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX. So we can remove @@ -14502,7 +14582,7 @@ TNode CodeStubAssembler::AllocateFunctionWithMapAndContext( StoreObjectFieldNoWriteBarrier(fun, JSFunction::kSharedFunctionInfoOffset, shared_info); StoreObjectFieldNoWriteBarrier(fun, JSFunction::kContextOffset, context); - StoreObjectField(fun, JSFunction::kCodeOffset, ToCodeT(code)); + StoreObjectField(fun, JSFunction::kCodeOffset, code); return CAST(fun); } diff --git a/deps/v8/src/codegen/code-stub-assembler.h b/deps/v8/src/codegen/code-stub-assembler.h index 4d16af8a3d63e9..109bd9cfa42348 100644 --- a/deps/v8/src/codegen/code-stub-assembler.h +++ b/deps/v8/src/codegen/code-stub-assembler.h @@ -9,6 +9,7 @@ #include "src/base/macros.h" #include "src/codegen/bailout-reason.h" +#include "src/codegen/tnode.h" #include "src/common/globals.h" #include "src/common/message-template.h" #include "src/compiler/code-assembler.h" @@ -1042,32 +1043,29 @@ class V8_EXPORT_PRIVATE CodeStubAssembler // Works only with V8_ENABLE_FORCE_SLOW_PATH compile time flag. Nop otherwise. void GotoIfForceSlowPath(Label* if_true); -#ifdef V8_CAGED_POINTERS - // // Caged pointer related functionality. // // Load a caged pointer value from an object. - TNode LoadCagedPointerFromObject(TNode object, - int offset) { + TNode LoadCagedPointerFromObject(TNode object, + int offset) { return LoadCagedPointerFromObject(object, IntPtrConstant(offset)); } - TNode LoadCagedPointerFromObject(TNode object, - TNode offset); + TNode LoadCagedPointerFromObject(TNode object, + TNode offset); // Stored a caged pointer value to an object. void StoreCagedPointerToObject(TNode object, int offset, - TNode pointer) { + TNode pointer) { StoreCagedPointerToObject(object, IntPtrConstant(offset), pointer); } void StoreCagedPointerToObject(TNode object, - TNode offset, - TNode pointer); + TNode offset, TNode pointer); -#endif // V8_CAGED_POINTERS + TNode EmptyBackingStoreBufferConstant(); // // ExternalPointerT-related functionality. @@ -1147,14 +1145,14 @@ class V8_EXPORT_PRIVATE CodeStubAssembler TNode LoadJSTypedArrayExternalPointerPtr( TNode holder) { - return LoadObjectField(holder, - JSTypedArray::kExternalPointerOffset); + return LoadCagedPointerFromObject(holder, + JSTypedArray::kExternalPointerOffset); } void StoreJSTypedArrayExternalPointerPtr(TNode holder, TNode value) { - StoreObjectFieldNoWriteBarrier( - holder, JSTypedArray::kExternalPointerOffset, value); + StoreCagedPointerToObject(holder, JSTypedArray::kExternalPointerOffset, + value); } // Load value from current parent frame by given offset in bytes. @@ -1178,6 +1176,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler TNode LoadBufferIntptr(TNode buffer, int offset) { return LoadBufferData(buffer, offset); } + TNode LoadUint8Ptr(TNode ptr, TNode offset); + // Load a field from an object on the heap. template , TNode>::value && @@ -2461,6 +2461,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler base::Optional> arg1 = base::nullopt, base::Optional> arg2 = base::nullopt); + TNode GetPendingMessage(); + void SetPendingMessage(TNode message); + // Type checks. // Check whether the map is for an object with special properties, such as a // JSProxy or an object with interceptors. @@ -2937,6 +2940,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler TNode> ExternalTwoByteStringGetChars( TNode string); + TNode> IntlAsciiCollationWeightsL1(); + TNode> IntlAsciiCollationWeightsL3(); + // Performs a hash computation and string table lookup for the given string, // and jumps to: // - |if_index| if the string is an array index like "123"; |var_index| @@ -3603,15 +3609,20 @@ class V8_EXPORT_PRIVATE CodeStubAssembler // Helper for length tracking JSTypedArrays and JSTypedArrays backed by // ResizableArrayBuffer. TNode LoadVariableLengthJSTypedArrayLength( - TNode array, TNode buffer, Label* miss); + TNode array, TNode buffer, + Label* detached_or_out_of_bounds); // Helper for length tracking JSTypedArrays and JSTypedArrays backed by // ResizableArrayBuffer. TNode LoadVariableLengthJSTypedArrayByteLength( TNode context, TNode array, TNode buffer); - void IsJSTypedArrayDetachedOrOutOfBounds(TNode array, - Label* detached_or_oob, - Label* not_detached_nor_oob); + TNode LoadVariableLengthJSArrayBufferViewByteLength( + TNode array, TNode buffer, + Label* detached_or_out_of_bounds); + + void IsJSArrayBufferViewDetachedOrOutOfBounds(TNode array, + Label* detached_or_oob, + Label* not_detached_nor_oob); TNode RabGsabElementsKindToElementByteSize( TNode elementsKind); @@ -3629,7 +3640,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler ElementsKind kind = HOLEY_ELEMENTS); // Load a builtin's code from the builtin array in the isolate. - TNode LoadBuiltin(TNode builtin_id); + TNode LoadBuiltin(TNode builtin_id); // Figure out the SFI's code object using its data field. // If |data_type_out| is provided, the instance type of the function data will @@ -3637,7 +3648,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler // data_type_out will be set to 0. // If |if_compile_lazy| is provided then the execution will go to the given // label in case of an CompileLazy code object. - TNode GetSharedFunctionInfoCode( + TNode GetSharedFunctionInfoCode( TNode shared_info, TVariable* data_type_out = nullptr, Label* if_compile_lazy = nullptr); diff --git a/deps/v8/src/codegen/compilation-cache.cc b/deps/v8/src/codegen/compilation-cache.cc index 861bd2904f271f..725f054c4edb5a 100644 --- a/deps/v8/src/codegen/compilation-cache.cc +++ b/deps/v8/src/codegen/compilation-cache.cc @@ -136,12 +136,16 @@ bool HasOrigin(Isolate* isolate, Handle function_info, return false; } - Handle host_defined_options; - if (!script_details.host_defined_options.ToHandle(&host_defined_options)) { - host_defined_options = isolate->factory()->empty_fixed_array(); + // TODO(cbruni, chromium:1244145): Remove once migrated to the context + Handle maybe_host_defined_options; + if (!script_details.host_defined_options.ToHandle( + &maybe_host_defined_options)) { + maybe_host_defined_options = isolate->factory()->empty_fixed_array(); } - - Handle script_options(script->host_defined_options(), isolate); + Handle host_defined_options = + Handle::cast(maybe_host_defined_options); + Handle script_options( + FixedArray::cast(script->host_defined_options()), isolate); int length = host_defined_options->length(); if (length != script_options->length()) return false; diff --git a/deps/v8/src/codegen/compiler.cc b/deps/v8/src/codegen/compiler.cc index b7eafaf0d984aa..d6032988971fc4 100644 --- a/deps/v8/src/codegen/compiler.cc +++ b/deps/v8/src/codegen/compiler.cc @@ -36,7 +36,9 @@ #include "src/execution/local-isolate.h" #include "src/execution/runtime-profiler.h" #include "src/execution/vm-state-inl.h" +#include "src/handles/handles.h" #include "src/handles/maybe-handles.h" +#include "src/handles/persistent-handles.h" #include "src/heap/heap-inl.h" #include "src/heap/local-factory-inl.h" #include "src/heap/local-heap-inl.h" @@ -551,7 +553,7 @@ void InstallInterpreterTrampolineCopy( INTERPRETER_DATA_TYPE, AllocationType::kOld)); interpreter_data->set_bytecode_array(*bytecode_array); - interpreter_data->set_interpreter_trampoline(*code); + interpreter_data->set_interpreter_trampoline(ToCodeT(*code)); shared_info->set_interpreter_data(*interpreter_data); @@ -637,16 +639,18 @@ void UpdateSharedFunctionFlagsAfterCompilation(FunctionLiteral* literal, SharedFunctionInfo shared_info) { DCHECK_EQ(shared_info.language_mode(), literal->language_mode()); + // These fields are all initialised in ParseInfo from the SharedFunctionInfo, + // and then set back on the literal after parse. Hence, they should already + // match. + DCHECK_EQ(shared_info.requires_instance_members_initializer(), + literal->requires_instance_members_initializer()); + DCHECK_EQ(shared_info.class_scope_has_private_brand(), + literal->class_scope_has_private_brand()); + DCHECK_EQ(shared_info.has_static_private_methods_or_accessors(), + literal->has_static_private_methods_or_accessors()); + shared_info.set_has_duplicate_parameters(literal->has_duplicate_parameters()); shared_info.UpdateAndFinalizeExpectedNofPropertiesFromEstimate(literal); - if (literal->dont_optimize_reason() != BailoutReason::kNoReason) { - shared_info.DisableOptimization(literal->dont_optimize_reason()); - } - - shared_info.set_class_scope_has_private_brand( - literal->class_scope_has_private_brand()); - shared_info.set_has_static_private_methods_or_accessors( - literal->has_static_private_methods_or_accessors()); shared_info.SetScopeInfo(*literal->scope()->scope_info()); } @@ -683,7 +687,7 @@ CompilationJob::Status FinalizeSingleUnoptimizedCompilationJob( std::unique_ptr ExecuteSingleUnoptimizedCompilationJob( - ParseInfo* parse_info, FunctionLiteral* literal, + ParseInfo* parse_info, FunctionLiteral* literal, Handle