diff --git a/common.gypi b/common.gypi index cc916c6c226ecb..9c5efa905dfd3c 100644 --- a/common.gypi +++ b/common.gypi @@ -38,7 +38,7 @@ # Reset this number to 0 on major V8 upgrades. # Increment by one for each non-official patch applied to deps/v8. - 'v8_embedder_string': '-node.5', + 'v8_embedder_string': '-node.7', ##### V8 defaults for Node.js ##### diff --git a/deps/v8/DEPS b/deps/v8/DEPS index fdd96f9b820f3d..a87c01e49d0fe7 100644 --- a/deps/v8/DEPS +++ b/deps/v8/DEPS @@ -9,6 +9,9 @@ vars = { 'download_jsfunfuzz': False, 'download_mips_toolchain': False, 'check_v8_header_includes': False, + + # luci-go CIPD package version. + 'luci_go': 'git_revision:fdf05508e8a66c773a41521e0243c9d11b9a2a1c', } deps = { @@ -51,7 +54,7 @@ deps = { 'v8/third_party/markupsafe': Var('chromium_url') + '/chromium/src/third_party/markupsafe.git' + '@' + '8f45f5cfa0009d2a70589bcda0349b8cb2b72783', 'v8/tools/swarming_client': - Var('chromium_url') + '/infra/luci/client-py.git' + '@' + '486c9b53c4d54dd4b95bb6ce0e31160e600dfc11', + Var('chromium_url') + '/infra/luci/client-py.git' + '@' + '0e3e1c4dc4e79f25a5b58fcbc135dc93183c0c54', 'v8/test/benchmarks/data': Var('chromium_url') + '/v8/deps/third_party/benchmarks.git' + '@' + '05d7188267b4560491ff9155c5ee13e207ecd65f', 'v8/test/mozilla/data': @@ -82,8 +85,24 @@ deps = { }, 'v8/tools/clang': Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + '7792d28b069af6dd3a86d1ba83b7f5c4ede605dc', - 'v8/tools/luci-go': - Var('chromium_url') + '/chromium/src/tools/luci-go.git' + '@' + '445d7c4b6a4f10e188edb395b132e3996b127691', + 'v8/tools/luci-go': { + 'packages': [ + { + 'package': 'infra/tools/luci/isolate/${{platform}}', + 'version': Var('luci_go'), + }, + { + 'package': 'infra/tools/luci/isolated/${{platform}}', + 'version': Var('luci_go'), + }, + { + 'package': 'infra/tools/luci/swarming/${{platform}}', + 'version': Var('luci_go'), + }, + ], + 'condition': 'host_cpu != "s390"', + 'dep_type': 'cipd', + }, 'v8/test/wasm-js': Var('chromium_url') + '/external/github.com/WebAssembly/spec.git' + '@' + 'db9cd40808a90ecc5f4a23e88fb375c8f60b8d52', } diff --git a/deps/v8/include/v8-version.h b/deps/v8/include/v8-version.h index 8624767047c2f1..114e57c58eaf7d 100644 --- a/deps/v8/include/v8-version.h +++ b/deps/v8/include/v8-version.h @@ -11,7 +11,7 @@ #define V8_MAJOR_VERSION 7 #define V8_MINOR_VERSION 1 #define V8_BUILD_NUMBER 302 -#define V8_PATCH_LEVEL 28 +#define V8_PATCH_LEVEL 33 // Use 1 for candidates and 0 otherwise. // (Boolean macro values are not supported by all preprocessors.) diff --git a/deps/v8/src/builtins/array-splice.tq b/deps/v8/src/builtins/array-splice.tq index 16a192d2c001c9..5746f4cdf6714a 100644 --- a/deps/v8/src/builtins/array-splice.tq +++ b/deps/v8/src/builtins/array-splice.tq @@ -9,8 +9,12 @@ module array { // zero-length input FixedArray is handled here. macro Extract( elements: FixedArrayBase, first: Smi, count: Smi, - capacity: Smi): FixedArrayType { - return UnsafeCast( + capacity: Smi): FixedArrayType; + + Extract( + elements: FixedArrayBase, first: Smi, count: Smi, + capacity: Smi): FixedArray { + return UnsafeCast( ExtractFixedArray(elements, first, count, capacity)); } @@ -24,31 +28,73 @@ module array { ExtractFixedArray(elements, first, count, capacity)); } + macro DoMoveElements( + elements: FixedArrayType, dstIndex: Smi, srcIndex: Smi, + count: Smi): void { + TorqueMoveElements( + elements, Convert(dstIndex), Convert(srcIndex), + Convert(count)); + } + + macro StoreHoles( + elements: FixedArrayType, holeStartIndex: Smi, holeEndIndex: Smi): void { + for (let i: Smi = holeStartIndex; i < holeEndIndex; i++) { + StoreArrayHole(elements, i); + } + } + + macro DoCopyElements( + dstElements: FixedArrayType, dstIndex: Smi, srcElements: FixedArrayType, + srcIndex: Smi, count: Smi): void { + TorqueCopyElements( + dstElements, Convert(dstIndex), srcElements, + Convert(srcIndex), Convert(count)); + } + macro FastSplice( args: constexpr Arguments, a: JSArray, length: Smi, newLength: Smi, lengthDelta: Smi, actualStart: Smi, insertCount: Smi, actualDeleteCount: Smi): void labels Bailout { - const elements: FixedArrayBase = a.elements; - const elementsMap: Map = elements.map; - - // If the spliced array is larger then the - // source array, then allocate a new FixedArrayType to hold the result. - let newElements: FixedArrayBase = elements; - if (elementsMap == kCOWMap || lengthDelta > 0) { - newElements = - Extract(elements, 0, actualStart, newLength); - if (elementsMap == kCOWMap) { - newElements.map = elementsMap; + // Make sure elements are writable. + EnsureWriteableFastElements(a); + + if (insertCount != actualDeleteCount) { + const elements: FixedArrayBase = a.elements; + const dstIndex: Smi = actualStart + insertCount; + const srcIndex: Smi = actualStart + actualDeleteCount; + const count: Smi = length - actualDeleteCount - actualStart; + if (insertCount < actualDeleteCount) { + // Shrink. + DoMoveElements( + UnsafeCast(elements), dstIndex, srcIndex, count); + StoreHoles( + UnsafeCast(elements), newLength, length); + } else if (insertCount > actualDeleteCount) { + // If the backing store is big enough, then moving elements is enough. + if (newLength <= elements.length) { + DoMoveElements( + UnsafeCast(elements), dstIndex, srcIndex, count); + } else { + // Grow. + let capacity: Smi = CalculateNewElementsCapacity(newLength); + const newElements: FixedArrayType = + Extract(elements, 0, actualStart, capacity); + a.elements = newElements; + if (elements.length > 0) { + DoCopyElements( + newElements, dstIndex, UnsafeCast(elements), + srcIndex, count); + } + } } - a.elements = newElements; } - // Copy over inserted elements. + // Copy arguments. let k: Smi = actualStart; if (insertCount > 0) { const typedNewElements: FixedArrayType = - UnsafeCast(newElements); + UnsafeCast(a.elements); for (let e: Object of args [2: ]) { // The argument elements were already validated to be an appropriate // {ElementType} to store in {FixedArrayType}. @@ -56,31 +102,6 @@ module array { } } - // Copy over elements after deleted elements. - let count: Smi = length - actualStart - actualDeleteCount; - while (count > 0) { - const typedElements: FixedArrayType = - UnsafeCast(elements); - const typedNewElements: FixedArrayType = - UnsafeCast(newElements); - CopyArrayElement(typedElements, typedNewElements, k - lengthDelta, k); - k++; - count--; - } - - // Fill rest of spliced FixedArray with the hole, but only if the - // destination FixedArray is the original array's, since otherwise the array - // is pre-filled with holes. - if (elements == newElements) { - const typedNewElements: FixedArrayType = - UnsafeCast(newElements); - const limit: Smi = elements.length; - while (k < limit) { - StoreArrayHole(typedNewElements, k); - k++; - } - } - // Update the array's length after all the FixedArray shuffling is done. a.length = newLength; } diff --git a/deps/v8/src/builtins/base.tq b/deps/v8/src/builtins/base.tq index 20c4f4b9e03dd8..3f5029834db820 100644 --- a/deps/v8/src/builtins/base.tq +++ b/deps/v8/src/builtins/base.tq @@ -844,6 +844,8 @@ macro AllowNonNumberElements(kind: ElementsKind): ElementsKind { extern macro AllocateZeroedFixedArray(intptr): FixedArray; extern macro AllocateZeroedFixedDoubleArray(intptr): FixedDoubleArray; +extern macro CalculateNewElementsCapacity(Smi): Smi; + extern macro CopyFixedArrayElements( constexpr ElementsKind, FixedArray, constexpr ElementsKind, FixedArray, intptr, intptr, intptr): void; @@ -879,6 +881,36 @@ extern macro ExtractFixedArray( extern builtin ExtractFastJSArray(Context, JSArray, Smi, Smi): JSArray; +extern macro MoveElements( + constexpr ElementsKind, FixedArrayBase, intptr, intptr, intptr): void; +macro TorqueMoveElements( + elements: FixedArray, dstIndex: intptr, srcIndex: intptr, + count: intptr): void { + MoveElements(HOLEY_ELEMENTS, elements, dstIndex, srcIndex, count); +} +macro TorqueMoveElements( + elements: FixedDoubleArray, dstIndex: intptr, srcIndex: intptr, + count: intptr): void { + MoveElements(HOLEY_DOUBLE_ELEMENTS, elements, dstIndex, srcIndex, count); +} + +extern macro CopyElements( + constexpr ElementsKind, FixedArrayBase, intptr, FixedArrayBase, intptr, + intptr): void; +macro TorqueCopyElements( + dstElements: FixedArray, dstIndex: intptr, srcElements: FixedArray, + srcIndex: intptr, count: intptr): void { + CopyElements( + HOLEY_ELEMENTS, dstElements, dstIndex, srcElements, srcIndex, count); +} +macro TorqueCopyElements( + dstElements: FixedDoubleArray, dstIndex: intptr, + srcElements: FixedDoubleArray, srcIndex: intptr, count: intptr): void { + CopyElements( + HOLEY_DOUBLE_ELEMENTS, dstElements, dstIndex, srcElements, srcIndex, + count); +} + macro LoadElementNoHole(a: JSArray, index: Smi): Object labels IfHole; diff --git a/deps/v8/src/builtins/builtins-constructor-gen.cc b/deps/v8/src/builtins/builtins-constructor-gen.cc index 26c97ba6813f69..d34236bab77051 100644 --- a/deps/v8/src/builtins/builtins-constructor-gen.cc +++ b/deps/v8/src/builtins/builtins-constructor-gen.cc @@ -534,8 +534,7 @@ Node* ConstructorBuiltinsAssembler::EmitCreateShallowObjectLiteral( VARIABLE(offset, MachineType::PointerRepresentation(), IntPtrConstant(JSObject::kHeaderSize)); // Mutable heap numbers only occur on 32-bit platforms. - bool may_use_mutable_heap_numbers = - FLAG_track_double_fields && !FLAG_unbox_double_fields; + bool may_use_mutable_heap_numbers = !FLAG_unbox_double_fields; { Comment("Copy in-object properties fast"); Label continue_fast(this, &offset); diff --git a/deps/v8/src/code-stub-assembler.cc b/deps/v8/src/code-stub-assembler.cc index e307ca5cc3796b..6a70ee825e9035 100644 --- a/deps/v8/src/code-stub-assembler.cc +++ b/deps/v8/src/code-stub-assembler.cc @@ -3067,6 +3067,24 @@ TNode CodeStubAssembler::AllocateMutableHeapNumber() { return UncheckedCast(result); } +TNode CodeStubAssembler::CloneIfMutablePrimitive(TNode object) { + TVARIABLE(Object, result, object); + Label done(this); + + GotoIf(TaggedIsSmi(object), &done); + GotoIfNot(IsMutableHeapNumber(UncheckedCast(object)), &done); + { + // Mutable heap number found --- allocate a clone. + TNode value = + LoadHeapNumberValue(UncheckedCast(object)); + result = AllocateMutableHeapNumberWithValue(value); + Goto(&done); + } + + BIND(&done); + return result.value(); +} + TNode CodeStubAssembler::AllocateMutableHeapNumberWithValue( SloppyTNode value) { TNode result = AllocateMutableHeapNumber(); @@ -4543,6 +4561,179 @@ void CodeStubAssembler::FillFixedDoubleArrayWithZero( backing_store, IntPtrConstant(0), byte_length); } +void CodeStubAssembler::JumpIfPointersFromHereAreInteresting( + TNode object, Label* interesting) { + Label finished(this); + TNode object_word = BitcastTaggedToWord(object); + TNode object_page = PageFromAddress(object_word); + TNode page_flags = UncheckedCast(Load( + MachineType::IntPtr(), object_page, IntPtrConstant(Page::kFlagsOffset))); + Branch( + WordEqual(WordAnd(page_flags, + IntPtrConstant( + MemoryChunk::kPointersFromHereAreInterestingMask)), + IntPtrConstant(0)), + &finished, interesting); + BIND(&finished); +} + +void CodeStubAssembler::MoveElements(ElementsKind kind, + TNode elements, + TNode dst_index, + TNode src_index, + TNode length) { + Label finished(this); + Label needs_barrier(this); + const bool needs_barrier_check = IsObjectElementsKind(kind); + + DCHECK(IsFastElementsKind(kind)); + CSA_ASSERT(this, IsFixedArrayWithKind(elements, kind)); + CSA_ASSERT(this, + IntPtrLessThanOrEqual(IntPtrAdd(dst_index, length), + LoadAndUntagFixedArrayBaseLength(elements))); + CSA_ASSERT(this, + IntPtrLessThanOrEqual(IntPtrAdd(src_index, length), + LoadAndUntagFixedArrayBaseLength(elements))); + + // The write barrier can be ignored if {elements} is in new space, or if + // we have a SMI or double ElementsKind. + if (needs_barrier_check) { + JumpIfPointersFromHereAreInteresting(elements, &needs_barrier); + } + + const TNode source_byte_length = + IntPtrMul(length, IntPtrConstant(ElementsKindToByteSize(kind))); + static const int32_t fa_base_data_offset = + FixedArrayBase::kHeaderSize - kHeapObjectTag; + TNode elements_intptr = BitcastTaggedToWord(elements); + TNode target_data_ptr = + IntPtrAdd(elements_intptr, + ElementOffsetFromIndex(dst_index, kind, INTPTR_PARAMETERS, + fa_base_data_offset)); + TNode source_data_ptr = + IntPtrAdd(elements_intptr, + ElementOffsetFromIndex(src_index, kind, INTPTR_PARAMETERS, + fa_base_data_offset)); + TNode memmove = + ExternalConstant(ExternalReference::libc_memmove_function()); + CallCFunction3(MachineType::Pointer(), MachineType::Pointer(), + MachineType::Pointer(), MachineType::UintPtr(), memmove, + target_data_ptr, source_data_ptr, source_byte_length); + + if (needs_barrier_check) { + Goto(&finished); + + BIND(&needs_barrier); + { + const TNode begin = src_index; + const TNode end = IntPtrAdd(begin, length); + + // If dst_index is less than src_index, then walk forward. + const TNode delta = + IntPtrMul(IntPtrSub(dst_index, begin), + IntPtrConstant(ElementsKindToByteSize(kind))); + auto loop_body = [&](Node* array, Node* offset) { + Node* const element = Load(MachineType::AnyTagged(), array, offset); + Node* const delta_offset = IntPtrAdd(offset, delta); + Store(array, delta_offset, element); + }; + + Label iterate_forward(this); + Label iterate_backward(this); + Branch(IntPtrLessThan(delta, IntPtrConstant(0)), &iterate_forward, + &iterate_backward); + BIND(&iterate_forward); + { + // Make a loop for the stores. + BuildFastFixedArrayForEach(elements, kind, begin, end, loop_body, + INTPTR_PARAMETERS, + ForEachDirection::kForward); + Goto(&finished); + } + + BIND(&iterate_backward); + { + BuildFastFixedArrayForEach(elements, kind, begin, end, loop_body, + INTPTR_PARAMETERS, + ForEachDirection::kReverse); + Goto(&finished); + } + } + BIND(&finished); + } +} + +void CodeStubAssembler::CopyElements(ElementsKind kind, + TNode dst_elements, + TNode dst_index, + TNode src_elements, + TNode src_index, + TNode length) { + Label finished(this); + Label needs_barrier(this); + const bool needs_barrier_check = IsObjectElementsKind(kind); + + DCHECK(IsFastElementsKind(kind)); + CSA_ASSERT(this, IsFixedArrayWithKind(dst_elements, kind)); + CSA_ASSERT(this, IsFixedArrayWithKind(src_elements, kind)); + CSA_ASSERT(this, IntPtrLessThanOrEqual( + IntPtrAdd(dst_index, length), + LoadAndUntagFixedArrayBaseLength(dst_elements))); + CSA_ASSERT(this, IntPtrLessThanOrEqual( + IntPtrAdd(src_index, length), + LoadAndUntagFixedArrayBaseLength(src_elements))); + CSA_ASSERT(this, WordNotEqual(dst_elements, src_elements)); + + // The write barrier can be ignored if {dst_elements} is in new space, or if + // we have a SMI or double ElementsKind. + if (needs_barrier_check) { + JumpIfPointersFromHereAreInteresting(dst_elements, &needs_barrier); + } + + TNode source_byte_length = + IntPtrMul(length, IntPtrConstant(ElementsKindToByteSize(kind))); + static const int32_t fa_base_data_offset = + FixedArrayBase::kHeaderSize - kHeapObjectTag; + TNode src_offset_start = ElementOffsetFromIndex( + src_index, kind, INTPTR_PARAMETERS, fa_base_data_offset); + TNode dst_offset_start = ElementOffsetFromIndex( + dst_index, kind, INTPTR_PARAMETERS, fa_base_data_offset); + TNode src_elements_intptr = BitcastTaggedToWord(src_elements); + TNode source_data_ptr = + IntPtrAdd(src_elements_intptr, src_offset_start); + TNode dst_elements_intptr = BitcastTaggedToWord(dst_elements); + TNode dst_data_ptr = + IntPtrAdd(dst_elements_intptr, dst_offset_start); + TNode memcpy = + ExternalConstant(ExternalReference::libc_memcpy_function()); + CallCFunction3(MachineType::Pointer(), MachineType::Pointer(), + MachineType::Pointer(), MachineType::UintPtr(), memcpy, + dst_data_ptr, source_data_ptr, source_byte_length); + + if (needs_barrier_check) { + Goto(&finished); + + BIND(&needs_barrier); + { + const TNode begin = src_index; + const TNode end = IntPtrAdd(begin, length); + const TNode delta = + IntPtrMul(IntPtrSub(dst_index, src_index), + IntPtrConstant(ElementsKindToByteSize(kind))); + BuildFastFixedArrayForEach( + src_elements, kind, begin, end, + [&](Node* array, Node* offset) { + Node* const element = Load(MachineType::AnyTagged(), array, offset); + Node* const delta_offset = IntPtrAdd(offset, delta); + Store(dst_elements, delta_offset, element); + }, + INTPTR_PARAMETERS, ForEachDirection::kForward); + Goto(&finished); + } + BIND(&finished); + } +} + void CodeStubAssembler::CopyFixedArrayElements( ElementsKind from_kind, Node* from_array, ElementsKind to_kind, Node* to_array, Node* first_element, Node* element_count, Node* capacity, @@ -4731,7 +4922,8 @@ void CodeStubAssembler::CopyPropertyArrayValues(Node* from_array, Node* to_array, Node* property_count, WriteBarrierMode barrier_mode, - ParameterMode mode) { + ParameterMode mode, + DestroySource destroy_source) { CSA_SLOW_ASSERT(this, MatchesParameterMode(property_count, mode)); CSA_SLOW_ASSERT(this, Word32Or(IsPropertyArray(from_array), IsEmptyFixedArray(from_array))); @@ -4739,13 +4931,25 @@ void CodeStubAssembler::CopyPropertyArrayValues(Node* from_array, Comment("[ CopyPropertyArrayValues"); bool needs_write_barrier = barrier_mode == UPDATE_WRITE_BARRIER; + + if (destroy_source == DestroySource::kNo) { + // PropertyArray may contain MutableHeapNumbers, which will be cloned on the + // heap, requiring a write barrier. + needs_write_barrier = true; + } + Node* start = IntPtrOrSmiConstant(0, mode); ElementsKind kind = PACKED_ELEMENTS; BuildFastFixedArrayForEach( from_array, kind, start, property_count, - [this, to_array, needs_write_barrier](Node* array, Node* offset) { + [this, to_array, needs_write_barrier, destroy_source](Node* array, + Node* offset) { Node* value = Load(MachineType::AnyTagged(), array, offset); + if (destroy_source == DestroySource::kNo) { + value = CloneIfMutablePrimitive(CAST(value)); + } + if (needs_write_barrier) { Store(to_array, offset, value); } else { @@ -4754,6 +4958,18 @@ void CodeStubAssembler::CopyPropertyArrayValues(Node* from_array, } }, mode); + +#ifdef DEBUG + // Zap {from_array} if the copying above has made it invalid. + if (destroy_source == DestroySource::kYes) { + Label did_zap(this); + GotoIf(IsEmptyFixedArray(from_array), &did_zap); + FillPropertyArrayWithUndefined(from_array, start, property_count, mode); + + Goto(&did_zap); + BIND(&did_zap); + } +#endif Comment("] CopyPropertyArrayValues"); } diff --git a/deps/v8/src/code-stub-assembler.h b/deps/v8/src/code-stub-assembler.h index 69ac5e27bb2fb3..2be168af8513d7 100644 --- a/deps/v8/src/code-stub-assembler.h +++ b/deps/v8/src/code-stub-assembler.h @@ -1512,10 +1512,19 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler { Node* to_index, ParameterMode mode = INTPTR_PARAMETERS); - void CopyPropertyArrayValues( - Node* from_array, Node* to_array, Node* length, - WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER, - ParameterMode mode = INTPTR_PARAMETERS); + enum class DestroySource { kNo, kYes }; + + // Specify DestroySource::kYes if {from_array} is being supplanted by + // {to_array}. This offers a slight performance benefit by simply copying the + // array word by word. The source may be destroyed at the end of this macro. + // + // Otherwise, specify DestroySource::kNo for operations where an Object is + // being cloned, to ensure that MutableHeapNumbers are unique between the + // source and cloned object. + void CopyPropertyArrayValues(Node* from_array, Node* to_array, Node* length, + WriteBarrierMode barrier_mode, + ParameterMode mode, + DestroySource destroy_source); // Copies all elements from |from_array| of |length| size to // |to_array| of the same size respecting the elements kind. @@ -1567,6 +1576,25 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler { SMI_PARAMETERS); } + void JumpIfPointersFromHereAreInteresting(TNode object, + Label* interesting); + + // Efficiently copy elements within a single array. The regions + // [src_index, src_index + length) and [dst_index, dst_index + length) + // can be overlapping. + void MoveElements(ElementsKind kind, TNode elements, + TNode dst_index, TNode src_index, + TNode length); + + // Efficiently copy elements from one array to another. The ElementsKind + // needs to be the same. Copy from src_elements at + // [src_index, src_index + length) to dst_elements at + // [dst_index, dst_index + length). + void CopyElements(ElementsKind kind, TNode dst_elements, + TNode dst_index, + TNode src_elements, + TNode src_index, TNode length); + TNode HeapObjectToFixedArray(TNode base, Label* cast_fail); @@ -1740,6 +1768,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler { Node* CalculateNewElementsCapacity(Node* old_capacity, ParameterMode mode = INTPTR_PARAMETERS); + TNode CalculateNewElementsCapacity(TNode old_capacity) { + return CAST(CalculateNewElementsCapacity(old_capacity, SMI_PARAMETERS)); + } + // Tries to grow the |elements| array of given |object| to store the |key| // or bails out if the growing gap is too big. Returns new elements. Node* TryGrowElementsCapacity(Node* object, Node* elements, ElementsKind kind, @@ -3050,6 +3082,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler { void InitializeFunctionContext(Node* native_context, Node* context, int slots); + // Allocate a clone of a mutable primitive, if {object} is a + // MutableHeapNumber. + TNode CloneIfMutablePrimitive(TNode object); + private: friend class CodeStubArguments; diff --git a/deps/v8/src/compiler/representation-change.cc b/deps/v8/src/compiler/representation-change.cc index ad4c5c916c088a..b141cad7736031 100644 --- a/deps/v8/src/compiler/representation-change.cc +++ b/deps/v8/src/compiler/representation-change.cc @@ -586,7 +586,7 @@ Node* RepresentationChanger::GetFloat32RepresentationFor( } else if (output_rep == MachineRepresentation::kFloat64) { op = machine()->TruncateFloat64ToFloat32(); } else if (output_rep == MachineRepresentation::kWord64) { - if (output_type.Is(Type::Signed32())) { + if (output_type.Is(cache_.kSafeInteger)) { // int64 -> float64 -> float32 op = machine()->ChangeInt64ToFloat64(); node = jsgraph()->graph()->NewNode(op, node); diff --git a/deps/v8/src/ic/accessor-assembler.cc b/deps/v8/src/ic/accessor-assembler.cc index f730c505552151..0cb326763c9cb8 100644 --- a/deps/v8/src/ic/accessor-assembler.cc +++ b/deps/v8/src/ic/accessor-assembler.cc @@ -1683,7 +1683,8 @@ Node* AccessorAssembler::ExtendPropertiesBackingStore(Node* object, // |new_properties| is guaranteed to be in new space, so we can skip // the write barrier. CopyPropertyArrayValues(var_properties.value(), new_properties, - var_length.value(), SKIP_WRITE_BARRIER, mode); + var_length.value(), SKIP_WRITE_BARRIER, mode, + DestroySource::kYes); // TODO(gsathya): Clean up the type conversions by creating smarter // helpers that do the correct op based on the mode. @@ -3565,7 +3566,7 @@ void AccessorAssembler::GenerateCloneObjectIC_Slow() { void AccessorAssembler::GenerateCloneObjectIC() { typedef CloneObjectWithVectorDescriptor Descriptor; - Node* source = Parameter(Descriptor::kSource); + TNode source = CAST(Parameter(Descriptor::kSource)); Node* flags = Parameter(Descriptor::kFlags); Node* slot = Parameter(Descriptor::kSlot); Node* vector = Parameter(Descriptor::kVector); @@ -3575,8 +3576,7 @@ void AccessorAssembler::GenerateCloneObjectIC() { Label miss(this, Label::kDeferred), try_polymorphic(this, Label::kDeferred), try_megamorphic(this, Label::kDeferred); - CSA_SLOW_ASSERT(this, TaggedIsNotSmi(source)); - Node* source_map = LoadMap(UncheckedCast(source)); + TNode source_map = LoadMap(UncheckedCast(source)); GotoIf(IsDeprecatedMap(source_map), &miss); TNode feedback = TryMonomorphicCase( slot, vector, source_map, &if_handler, &var_handler, &try_polymorphic); @@ -3597,7 +3597,7 @@ void AccessorAssembler::GenerateCloneObjectIC() { // The IC fast case should only be taken if the result map a compatible // elements kind with the source object. - TNode source_elements = LoadElements(source); + TNode source_elements = LoadElements(CAST(source)); auto flags = ExtractFixedArrayFlag::kAllFixedArraysDontCopyCOW; var_elements = CAST(CloneFixedArray(source_elements, flags)); @@ -3620,7 +3620,7 @@ void AccessorAssembler::GenerateCloneObjectIC() { auto mode = INTPTR_PARAMETERS; var_properties = CAST(AllocatePropertyArray(length, mode)); CopyPropertyArrayValues(source_properties, var_properties.value(), length, - SKIP_WRITE_BARRIER, mode); + SKIP_WRITE_BARRIER, mode, DestroySource::kNo); } Goto(&allocate_object); @@ -3632,21 +3632,45 @@ void AccessorAssembler::GenerateCloneObjectIC() { // Lastly, clone any in-object properties. // Determine the inobject property capacity of both objects, and copy the // smaller number into the resulting object. - Node* source_start = LoadMapInobjectPropertiesStartInWords(source_map); - Node* source_size = LoadMapInstanceSizeInWords(source_map); - Node* result_start = LoadMapInobjectPropertiesStartInWords(result_map); - Node* field_offset_difference = + TNode source_start = + LoadMapInobjectPropertiesStartInWords(source_map); + TNode source_size = LoadMapInstanceSizeInWords(source_map); + TNode result_start = + LoadMapInobjectPropertiesStartInWords(result_map); + TNode field_offset_difference = TimesPointerSize(IntPtrSub(result_start, source_start)); - BuildFastLoop(source_start, source_size, - [=](Node* field_index) { - Node* field_offset = TimesPointerSize(field_index); - Node* field = LoadObjectField(source, field_offset); - Node* result_offset = - IntPtrAdd(field_offset, field_offset_difference); - StoreObjectFieldNoWriteBarrier(object, result_offset, - field); - }, - 1, INTPTR_PARAMETERS, IndexAdvanceMode::kPost); + + // If MutableHeapNumbers may be present in-object, allocations may occur + // within this loop, thus the write barrier is required. + // + // TODO(caitp): skip the write barrier until the first MutableHeapNumber + // field is found + const bool may_use_mutable_heap_numbers = !FLAG_unbox_double_fields; + + BuildFastLoop( + source_start, source_size, + [=](Node* field_index) { + TNode field_offset = + TimesPointerSize(UncheckedCast(field_index)); + + if (may_use_mutable_heap_numbers) { + TNode field = LoadObjectField(source, field_offset); + field = CloneIfMutablePrimitive(field); + TNode result_offset = + IntPtrAdd(field_offset, field_offset_difference); + StoreObjectField(object, result_offset, field); + } else { + // Copy fields as raw data. + TNode field = UncheckedCast( + LoadObjectField(source, field_offset, MachineType::IntPtr())); + TNode result_offset = + IntPtrAdd(field_offset, field_offset_difference); + StoreObjectFieldNoWriteBarrier( + object, result_offset, field, + MachineType::IntPtr().representation()); + } + }, + 1, INTPTR_PARAMETERS, IndexAdvanceMode::kPost); Return(object); } diff --git a/deps/v8/test/mjsunit/es9/object-spread-ic.js b/deps/v8/test/mjsunit/es9/object-spread-ic.js index d76ffd4eb80de8..55d60f2cf8ae29 100644 --- a/deps/v8/test/mjsunit/es9/object-spread-ic.js +++ b/deps/v8/test/mjsunit/es9/object-spread-ic.js @@ -99,3 +99,25 @@ // Megamorphic assertEquals({ boop: 1 }, f({ boop: 1 })); })(); + +// There are 2 paths in CloneObjectIC's handler which need to handle double +// fields specially --- in object properties, and copying the property array. +function testMutableInlineProperties() { + function inobject() { "use strict"; this.x = 1.1; } + const src = new inobject(); + const x0 = src.x; + const clone = { ...src, x: x0 + 1 }; + assertEquals(x0, src.x); + assertEquals({ x: 2.1 }, clone); +} +testMutableInlineProperties() + +function testMutableOutOfLineProperties() { + const src = { a: 1, b: 2, c: 3 }; + src.x = 2.3; + const x0 = src.x; + const clone = { ...src, x: x0 + 1 }; + assertEquals(x0, src.x); + assertEquals({ a: 1, b: 2, c: 3, x: 3.3 }, clone); +} +testMutableOutOfLineProperties(); diff --git a/deps/v8/test/mjsunit/es9/regress/regress-902965.js b/deps/v8/test/mjsunit/es9/regress/regress-902965.js new file mode 100644 index 00000000000000..e2035b242f25b4 --- /dev/null +++ b/deps/v8/test/mjsunit/es9/regress/regress-902965.js @@ -0,0 +1,12 @@ +// Copyright 2018 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Previously, spreading in-object properties would always treat double fields +// as tagged, potentially dereferencing a Float64. +function inobjectDouble() { + "use strict"; + this.x = -3.9; +} +const instance = new inobjectDouble(); +const clone = { ...instance, }; diff --git a/deps/v8/test/mjsunit/es9/regress/regress-903070.js b/deps/v8/test/mjsunit/es9/regress/regress-903070.js new file mode 100644 index 00000000000000..cca02ee0c41de2 --- /dev/null +++ b/deps/v8/test/mjsunit/es9/regress/regress-903070.js @@ -0,0 +1,15 @@ +// Copyright 2018 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +function clone(src) { + return { ...src }; +} + +function inobjectDoubles() { + "use strict"; + this.p0 = -6400510997704731; +} + +// Check that unboxed double is not treated as tagged +assertEquals({ p0: -6400510997704731 }, clone(new inobjectDoubles())); diff --git a/deps/v8/test/mjsunit/regress/regress-895691.js b/deps/v8/test/mjsunit/regress/regress-895691.js new file mode 100644 index 00000000000000..6f63ac6c9b025f --- /dev/null +++ b/deps/v8/test/mjsunit/regress/regress-895691.js @@ -0,0 +1,18 @@ +// Copyright 2018 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Flags: --allow-natives-syntax + +const n = 2**32; +const x = new Float32Array(); + +function f() { + for (var i = 96; i < 100; i += 4) { + x[i] = i + n; + } +} + +f(); +%OptimizeFunctionOnNextCall(f); +f(); diff --git a/deps/v8/tools/__init__.py b/deps/v8/tools/__init__.py new file mode 100644 index 00000000000000..3841a861c8396a --- /dev/null +++ b/deps/v8/tools/__init__.py @@ -0,0 +1,4 @@ +#!/usr/bin/env python +# Copyright 2018 the V8 project authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. diff --git a/deps/v8/tools/unittests/__init__.py b/deps/v8/tools/unittests/__init__.py new file mode 100644 index 00000000000000..3841a861c8396a --- /dev/null +++ b/deps/v8/tools/unittests/__init__.py @@ -0,0 +1,4 @@ +#!/usr/bin/env python +# Copyright 2018 the V8 project authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. diff --git a/deps/v8/tools/unittests/v8_presubmit_test.py b/deps/v8/tools/unittests/v8_presubmit_test.py new file mode 100755 index 00000000000000..2c66d1891b8ff8 --- /dev/null +++ b/deps/v8/tools/unittests/v8_presubmit_test.py @@ -0,0 +1,91 @@ +#!/usr/bin/env python +# Copyright 2018 the V8 project authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +import os +import sys +import tempfile +import unittest + +# Configuring the path for the v8_presubmit module +TOOLS_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) +sys.path.append(TOOLS_ROOT) + +from v8_presubmit import FileContentsCache, CacheableSourceFileProcessor + + +class FakeCachedProcessor(CacheableSourceFileProcessor): + def __init__(self, cache_file_path): + super(FakeCachedProcessor, self).__init__( + use_cache=True, cache_file_path=cache_file_path, file_type='.test') + def GetProcessorWorker(self): + return object + def GetProcessorScript(self): + return "echo", [] + def DetectUnformattedFiles(_, cmd, worker, files): + raise NotImplementedError + +class FileContentsCacheTest(unittest.TestCase): + def setUp(self): + _, self.cache_file_path = tempfile.mkstemp() + cache = FileContentsCache(self.cache_file_path) + cache.Load() + + def generate_file(): + _, file_name = tempfile.mkstemp() + with open(file_name, "w") as f: + f.write(file_name) + + return file_name + + self.target_files = [generate_file() for _ in range(2)] + unchanged_files = cache.FilterUnchangedFiles(self.target_files) + self.assertEqual(len(unchanged_files), 2) + cache.Save() + + def tearDown(self): + for file in [self.cache_file_path] + self.target_files: + os.remove(file) + + def testCachesFiles(self): + cache = FileContentsCache(self.cache_file_path) + cache.Load() + + changed_files = cache.FilterUnchangedFiles(self.target_files) + self.assertListEqual(changed_files, []) + + modified_file = self.target_files[0] + with open(modified_file, "w") as f: + f.write("modification") + + changed_files = cache.FilterUnchangedFiles(self.target_files) + self.assertListEqual(changed_files, [modified_file]) + + def testCacheableSourceFileProcessor(self): + class CachedProcessor(FakeCachedProcessor): + def DetectFilesToChange(_, files): + self.assertListEqual(files, []) + return [] + + cached_processor = CachedProcessor(cache_file_path=self.cache_file_path) + cached_processor.ProcessFiles(self.target_files) + + def testCacheableSourceFileProcessorWithModifications(self): + modified_file = self.target_files[0] + with open(modified_file, "w") as f: + f.write("modification") + + class CachedProcessor(FakeCachedProcessor): + def DetectFilesToChange(_, files): + self.assertListEqual(files, [modified_file]) + return [] + + cached_processor = CachedProcessor( + cache_file_path=self.cache_file_path, + ) + cached_processor.ProcessFiles(self.target_files) + + +if __name__ == '__main__': + unittest.main() diff --git a/deps/v8/tools/v8_presubmit.py b/deps/v8/tools/v8_presubmit.py index f35bd9a2eec3f9..22d6bf5389c6df 100755 --- a/deps/v8/tools/v8_presubmit.py +++ b/deps/v8/tools/v8_presubmit.py @@ -228,17 +228,98 @@ def FindFilesIn(self, path): return result -class CppLintProcessor(SourceFileProcessor): +class CacheableSourceFileProcessor(SourceFileProcessor): + """Utility class that allows caching ProcessFiles() method calls. + + In order to use it, create a ProcessFilesWithoutCaching method that returns + the files requiring intervention after processing the source files. + """ + + def __init__(self, use_cache, cache_file_path, file_type): + self.use_cache = use_cache + self.cache_file_path = cache_file_path + self.file_type = file_type + + def GetProcessorWorker(self): + """Expected to return the worker function to run the formatter.""" + raise NotImplementedError + + def GetProcessorScript(self): + """Expected to return a tuple + (path to the format processor script, list of arguments).""" + raise NotImplementedError + + def GetProcessorCommand(self): + format_processor, options = self.GetProcessorScript() + if not format_processor: + print('Could not find the formatter for % files' % self.file_type) + sys.exit(1) + + command = [sys.executable, format_processor] + command.extend(options) + + return command + + def ProcessFiles(self, files): + if self.use_cache: + cache = FileContentsCache(self.cache_file_path) + cache.Load() + files = cache.FilterUnchangedFiles(files) + + if len(files) == 0: + print 'No changes in %s files detected. Skipping check' % self.file_type + return True + + files_requiring_changes = self.DetectFilesToChange(files) + print ( + 'Total %s files found that require formatting: %d' % + (self.file_type, len(files_requiring_changes))) + if self.use_cache: + for file in files_requiring_changes: + cache.RemoveFile(file) + + cache.Save() + + return files_requiring_changes == [] + + def DetectFilesToChange(self, files): + command = self.GetProcessorCommand() + worker = self.GetProcessorWorker() + + commands = [command + [file] for file in files] + count = multiprocessing.cpu_count() + pool = multiprocessing.Pool(count) + try: + results = pool.map_async(worker, commands).get(timeout=240) + except KeyboardInterrupt: + print "\nCaught KeyboardInterrupt, terminating workers." + pool.terminate() + pool.join() + sys.exit(1) + + unformatted_files = [] + for index, errors in enumerate(results): + if errors > 0: + unformatted_files.append(files[index]) + + return unformatted_files + + +class CppLintProcessor(CacheableSourceFileProcessor): """ Lint files to check that they follow the google code style. """ + def __init__(self, use_cache=True): + super(CppLintProcessor, self).__init__( + use_cache=use_cache, cache_file_path='.cpplint-cache', file_type='C/C++') + def IsRelevant(self, name): return name.endswith('.cc') or name.endswith('.h') def IgnoreDir(self, name): return (super(CppLintProcessor, self).IgnoreDir(name) - or (name == 'third_party')) + or (name == 'third_party')) IGNORE_LINT = ['export-template.h', 'flag-definitions.h'] @@ -251,55 +332,30 @@ def GetPathsToSearch(self): test_dirs = ['cctest', 'common', 'fuzzer', 'inspector', 'unittests'] return dirs + [join('test', dir) for dir in test_dirs] - def GetCpplintScript(self, prio_path): - for path in [prio_path] + os.environ["PATH"].split(os.pathsep): + def GetProcessorWorker(self): + return CppLintWorker + + def GetProcessorScript(self): + filters = ','.join([n for n in LINT_RULES]) + arguments = ['--filter', filters] + for path in [TOOLS_PATH] + os.environ["PATH"].split(os.pathsep): path = path.strip('"') - cpplint = os.path.join(path, "cpplint.py") + cpplint = os.path.join(path, 'cpplint.py') if os.path.isfile(cpplint): - return cpplint - - return None - - def ProcessFiles(self, files): - good_files_cache = FileContentsCache('.cpplint-cache') - good_files_cache.Load() - files = good_files_cache.FilterUnchangedFiles(files) - if len(files) == 0: - print 'No changes in C/C++ files detected. Skipping cpplint check.' - return True - - filters = ",".join([n for n in LINT_RULES]) - cpplint = self.GetCpplintScript(TOOLS_PATH) - if cpplint is None: - print('Could not find cpplint.py. Make sure ' - 'depot_tools is installed and in the path.') - sys.exit(1) - - command = [sys.executable, cpplint, '--filter', filters] - - commands = [command + [file] for file in files] - count = multiprocessing.cpu_count() - pool = multiprocessing.Pool(count) - try: - results = pool.map_async(CppLintWorker, commands).get(999999) - except KeyboardInterrupt: - print "\nCaught KeyboardInterrupt, terminating workers." - sys.exit(1) + return cpplint, arguments - for i in range(len(files)): - if results[i] > 0: - good_files_cache.RemoveFile(files[i]) + return None, arguments - total_errors = sum(results) - print "Total C/C++ files found that require formatting: %d" % total_errors - good_files_cache.Save() - return total_errors == 0 -class TorqueFormatProcessor(SourceFileProcessor): +class TorqueFormatProcessor(CacheableSourceFileProcessor): """ Check .tq files to verify they follow the Torque style guide. """ + def __init__(self, use_cache=True): + super(TorqueFormatProcessor, self).__init__( + use_cache=use_cache, cache_file_path='.torquelint-cache', file_type='Torque') + def IsRelevant(self, name): return name.endswith('.tq') @@ -308,47 +364,17 @@ def GetPathsToSearch(self): test_dirs = ['torque'] return dirs + [join('test', dir) for dir in test_dirs] - def GetTorquelintScript(self): + def GetProcessorWorker(self): + return TorqueLintWorker + + def GetProcessorScript(self): torque_tools = os.path.join(TOOLS_PATH, "torque") torque_path = os.path.join(torque_tools, "format-torque.py") - + arguments = ['-l'] if os.path.isfile(torque_path): - return torque_path - - return None - - def ProcessFiles(self, files): - good_files_cache = FileContentsCache('.torquelint-cache') - good_files_cache.Load() - files = good_files_cache.FilterUnchangedFiles(files) - if len(files) == 0: - print 'No changes in Torque files detected. Skipping Torque lint check.' - return True - - torquelint = self.GetTorquelintScript() - if torquelint is None: - print('Could not find format-torque.') - sys.exit(1) - - command = [sys.executable, torquelint, '-l'] - - commands = [command + [file] for file in files] - count = multiprocessing.cpu_count() - pool = multiprocessing.Pool(count) - try: - results = pool.map_async(TorqueLintWorker, commands).get() - except KeyboardInterrupt: - print "\nCaught KeyboardInterrupt, terminating workers." - sys.exit(1) - - for i in range(len(files)): - if results[i] > 0: - good_files_cache.RemoveFile(files[i]) + return torque_path, arguments - total_errors = sum(results) - print "Total Torque files requiring formatting: %d" % total_errors - good_files_cache.Save() - return total_errors == 0 + return None, arguments COPYRIGHT_HEADER_PATTERN = re.compile( r'Copyright [\d-]*20[0-1][0-9] the V8 project authors. All rights reserved.') @@ -639,6 +665,7 @@ def PyTests(workspace): print 'Running ' + script result &= subprocess.call( [sys.executable, script], stdout=subprocess.PIPE) == 0 + return result @@ -646,6 +673,9 @@ def GetOptions(): result = optparse.OptionParser() result.add_option('--no-lint', help="Do not run cpplint", default=False, action="store_true") + result.add_option('--no-linter-cache', help="Do not cache linter results", default=False, + action="store_true") + return result @@ -656,11 +686,13 @@ def Main(): success = True print "Running checkdeps..." success &= CheckDeps(workspace) + use_linter_cache = not options.no_linter_cache if not options.no_lint: print "Running C++ lint check..." - success &= CppLintProcessor().RunOnPath(workspace) + success &= CppLintProcessor(use_cache=use_linter_cache).RunOnPath(workspace) + print "Running Torque formatting check..." - success &= TorqueFormatProcessor().RunOnPath(workspace) + success &= TorqueFormatProcessor(use_cache=use_linter_cache).RunOnPath(workspace) print "Running copyright header, trailing whitespaces and " \ "two empty lines between declarations check..." success &= SourceProcessor().RunOnPath(workspace)