Skip to content
This repository has been archived by the owner on Apr 3, 2020. It is now read-only.

Commit

Permalink
Revert of [heap] Uncommit pooled pages concurrently (patchset #5 id:1…
Browse files Browse the repository at this point in the history
…20001 of https://codereview.chromium.org/1913083002/ )

Reason for revert:
Creates a spike of OOM(v8) crashers on Win32.

Original issue's description:
> [heap] Uncommit pooled pages concurrently
>
> - Move the concurrent unmapping to MemoryAllocator
> - Hide (private) members where possible
> - MemoryAllocator:Free is now the bottleneck for freeing
> - Pooled pages are either allocated from a set of pooled pages are obtained
>   through work stealing from the concurrent unmapper
>
> BUG=chromium:605866, chromium:581412
> LOG=N
>
> Committed: https://crrev.com/2158df87116906160cebc3ad20c97f454822da03
> Cr-Commit-Position: refs/heads/master@{#35797}

TBR=hpayer@chromium.org,mlippautz@chromium.org
# Skipping CQ checks because original CL landed less than 1 days ago.
NOPRESUBMIT=true
NOTREECHECKS=true
NOTRY=true
BUG=chromium:605866, chromium:581412

Review URL: https://codereview.chromium.org/1925563003

Cr-Commit-Position: refs/heads/master@{#35819}
  • Loading branch information
natorion authored and Commit bot committed Apr 27, 2016
1 parent ff19726 commit 25ff296
Show file tree
Hide file tree
Showing 7 changed files with 149 additions and 210 deletions.
2 changes: 1 addition & 1 deletion src/deoptimizer.cc
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ DeoptimizerData::DeoptimizerData(MemoryAllocator* allocator)

DeoptimizerData::~DeoptimizerData() {
for (int i = 0; i <= Deoptimizer::kLastBailoutType; ++i) {
allocator_->Free<MemoryAllocator::kFull>(deopt_entry_code_[i]);
allocator_->Free(deopt_entry_code_[i]);
deopt_entry_code_[i] = NULL;
}
}
Expand Down
74 changes: 74 additions & 0 deletions src/heap/heap.cc
Original file line number Diff line number Diff line change
Expand Up @@ -157,6 +157,9 @@ Heap::Heap()
current_gc_flags_(Heap::kNoGCFlags),
current_gc_callback_flags_(GCCallbackFlags::kNoGCCallbackFlags),
external_string_table_(this),
chunks_queued_for_free_(NULL),
concurrent_unmapping_tasks_active_(0),
pending_unmapping_tasks_semaphore_(0),
gc_callbacks_depth_(0),
deserialization_complete_(false),
strong_roots_list_(NULL),
Expand Down Expand Up @@ -5444,6 +5447,8 @@ void Heap::TearDown() {
delete scavenge_job_;
scavenge_job_ = nullptr;

WaitUntilUnmappingOfFreeChunksCompleted();

delete array_buffer_tracker_;
array_buffer_tracker_ = nullptr;

Expand Down Expand Up @@ -6249,6 +6254,75 @@ void Heap::ExternalStringTable::TearDown() {
}


class Heap::UnmapFreeMemoryTask : public v8::Task {
public:
UnmapFreeMemoryTask(Heap* heap, MemoryChunk* head)
: heap_(heap), head_(head) {}
virtual ~UnmapFreeMemoryTask() {}

private:
// v8::Task overrides.
void Run() override {
heap_->FreeQueuedChunks(head_);
heap_->pending_unmapping_tasks_semaphore_.Signal();
}

Heap* heap_;
MemoryChunk* head_;

DISALLOW_COPY_AND_ASSIGN(UnmapFreeMemoryTask);
};


void Heap::WaitUntilUnmappingOfFreeChunksCompleted() {
while (concurrent_unmapping_tasks_active_ > 0) {
pending_unmapping_tasks_semaphore_.Wait();
concurrent_unmapping_tasks_active_--;
}
}


void Heap::QueueMemoryChunkForFree(MemoryChunk* chunk) {
// PreFree logically frees the memory chunk. However, the actual freeing
// will happen on a separate thread sometime later.
memory_allocator()->PreFreeMemory(chunk);

// The chunks added to this queue will be freed by a concurrent thread.
chunk->set_next_chunk(chunks_queued_for_free_);
chunks_queued_for_free_ = chunk;
}


void Heap::FreeQueuedChunks() {
if (chunks_queued_for_free_ != NULL) {
if (FLAG_concurrent_sweeping) {
V8::GetCurrentPlatform()->CallOnBackgroundThread(
new UnmapFreeMemoryTask(this, chunks_queued_for_free_),
v8::Platform::kShortRunningTask);
} else {
FreeQueuedChunks(chunks_queued_for_free_);
pending_unmapping_tasks_semaphore_.Signal();
}
chunks_queued_for_free_ = NULL;
} else {
// If we do not have anything to unmap, we just signal the semaphore
// that we are done.
pending_unmapping_tasks_semaphore_.Signal();
}
concurrent_unmapping_tasks_active_++;
}


void Heap::FreeQueuedChunks(MemoryChunk* list_head) {
MemoryChunk* next;
MemoryChunk* chunk;
for (chunk = list_head; chunk != NULL; chunk = next) {
next = chunk->next_chunk();
memory_allocator()->PerformFreeMemory(chunk);
}
}


void Heap::RememberUnmappedPage(Address page, bool compacted) {
uintptr_t p = reinterpret_cast<uintptr_t>(page);
// Tag the page pointer to make it findable in the dump file.
Expand Down
12 changes: 12 additions & 0 deletions src/heap/heap.h
Original file line number Diff line number Diff line change
Expand Up @@ -778,6 +778,11 @@ class Heap {

inline bool OldGenerationAllocationLimitReached();

void QueueMemoryChunkForFree(MemoryChunk* chunk);
void FreeQueuedChunks(MemoryChunk* list_head);
void FreeQueuedChunks();
void WaitUntilUnmappingOfFreeChunksCompleted();

// Completely clear the Instanceof cache (to stop it keeping objects alive
// around a GC).
inline void CompletelyClearInstanceofCache();
Expand Down Expand Up @@ -1385,6 +1390,7 @@ class Heap {

private:
class PretenuringScope;
class UnmapFreeMemoryTask;

// External strings table is a place where all external strings are
// registered. We need to keep track of such strings to properly
Expand Down Expand Up @@ -2202,6 +2208,12 @@ class Heap {

ExternalStringTable external_string_table_;

MemoryChunk* chunks_queued_for_free_;

size_t concurrent_unmapping_tasks_active_;

base::Semaphore pending_unmapping_tasks_semaphore_;

base::Mutex relocation_mutex_;

int gc_callbacks_depth_;
Expand Down
6 changes: 3 additions & 3 deletions src/heap/mark-compact.cc
Original file line number Diff line number Diff line change
Expand Up @@ -846,7 +846,7 @@ void MarkCompactCollector::Prepare() {

// If concurrent unmapping tasks are still running, we should wait for
// them here.
heap()->memory_allocator()->unmapper()->WaitUntilCompleted();
heap()->WaitUntilUnmappingOfFreeChunksCompleted();

// Clear marking bits if incremental marking is aborted.
if (was_marked_incrementally_ && heap_->ShouldAbortIncrementalMarking()) {
Expand Down Expand Up @@ -3539,7 +3539,7 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
// slots only handles old space (for unboxed doubles), and thus map space can
// still contain stale pointers. We only free the chunks after pointer updates
// to still have access to page headers.
heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
heap()->FreeQueuedChunks();

{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_CLEAN_UP);
Expand Down Expand Up @@ -3727,7 +3727,7 @@ void MarkCompactCollector::ReleaseEvacuationCandidates() {
}
evacuation_candidates_.Rewind(0);
compacting_ = false;
heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
heap()->FreeQueuedChunks();
}

int MarkCompactCollector::Sweeper::ParallelSweepSpace(AllocationSpace identity,
Expand Down
Loading

0 comments on commit 25ff296

Please sign in to comment.