Skip to content

Commit

Permalink
Revert of [heap] Introduce parallel compaction algorithm. (patchset c…
Browse files Browse the repository at this point in the history
…rosswalk-project#9 id:160001 of https://codereview.chromium.org/1343333002/ )

Reason for revert:
Check failed: https://chromegw.corp.google.com/i/client.v8/builders/V8%20Win64/builds/5535/steps/Check%20%28flakes%29/logs/IndependentWeakHandle

Original issue's description:
> [heap] Introduce parallel compaction algorithm.
>
> - The number of parallel tasks is still 1, i.e., we only compact on the main
>   thread.
> - Remove emergency memory (PagedSpace, and CodeRange)
> - Introduce partial compaction of pages.
> - Logic for multiple tasks is in place.
>
> BUG=chromium:524425
> LOG=N
>
> Committed: https://crrev.com/61ea4f55616d3f7bc2ce049a678f16f7475e03e0
> Cr-Commit-Position: refs/heads/master@{#30787}

TBR=hpayer@chromium.org
NOPRESUBMIT=true
NOTREECHECKS=true
NOTRY=true
BUG=chromium:524425

Review URL: https://codereview.chromium.org/1347873003

Cr-Commit-Position: refs/heads/master@{#30788}
  • Loading branch information
mlippautz authored and Commit bot committed Sep 17, 2015
1 parent 61ea4f5 commit 7a0a0b8
Show file tree
Hide file tree
Showing 4 changed files with 195 additions and 218 deletions.
219 changes: 84 additions & 135 deletions src/heap/mark-compact.cc
Original file line number Diff line number Diff line change
Expand Up @@ -57,8 +57,7 @@ MarkCompactCollector::MarkCompactCollector(Heap* heap)
sweeping_in_progress_(false),
parallel_compaction_in_progress_(false),
pending_sweeper_jobs_semaphore_(0),
pending_compaction_tasks_semaphore_(0),
concurrent_compaction_tasks_active_(0),
pending_compaction_jobs_semaphore_(0),
evacuation_(false),
slots_buffer_allocator_(nullptr),
migration_slots_buffer_(nullptr),
Expand Down Expand Up @@ -475,21 +474,21 @@ void MarkCompactCollector::ClearMarkbits() {

class MarkCompactCollector::CompactionTask : public v8::Task {
public:
explicit CompactionTask(Heap* heap, CompactionSpaceCollection* spaces)
: heap_(heap), spaces_(spaces) {}
explicit CompactionTask(Heap* heap) : heap_(heap) {}

virtual ~CompactionTask() {}

private:
// v8::Task overrides.
void Run() override {
heap_->mark_compact_collector()->EvacuatePages(spaces_);
// TODO(mlippautz, hpayer): EvacuatePages is not thread-safe and can just be
// called by one thread concurrently.
heap_->mark_compact_collector()->EvacuatePages();
heap_->mark_compact_collector()
->pending_compaction_tasks_semaphore_.Signal();
->pending_compaction_jobs_semaphore_.Signal();
}

Heap* heap_;
CompactionSpaceCollection* spaces_;

DISALLOW_COPY_AND_ASSIGN(CompactionTask);
};
Expand Down Expand Up @@ -3352,10 +3351,11 @@ void MarkCompactCollector::EvacuateNewSpace() {
}


bool MarkCompactCollector::EvacuateLiveObjectsFromPage(
void MarkCompactCollector::EvacuateLiveObjectsFromPage(
Page* p, PagedSpace* target_space) {
AlwaysAllocateScope always_allocate(isolate());
DCHECK(p->IsEvacuationCandidate() && !p->WasSwept());
p->SetWasSwept();

int offsets[16];

Expand All @@ -3376,8 +3376,17 @@ bool MarkCompactCollector::EvacuateLiveObjectsFromPage(
HeapObject* target_object = nullptr;
AllocationResult allocation = target_space->AllocateRaw(size, alignment);
if (!allocation.To(&target_object)) {
return false;
// If allocation failed, use emergency memory and re-try allocation.
CHECK(target_space->HasEmergencyMemory());
target_space->UseEmergencyMemory();
allocation = target_space->AllocateRaw(size, alignment);
}
if (!allocation.To(&target_object)) {
// OS refused to give us memory.
V8::FatalProcessOutOfMemory("Evacuation");
return;
}

MigrateObject(target_object, object, size, target_space->identity());
DCHECK(object->map_word().IsForwardingAddress());
}
Expand All @@ -3386,142 +3395,80 @@ bool MarkCompactCollector::EvacuateLiveObjectsFromPage(
*cell = 0;
}
p->ResetLiveBytes();
return true;
}


void MarkCompactCollector::EvacuatePagesInParallel() {
if (evacuation_candidates_.length() == 0) return;

int num_tasks = 1;
if (FLAG_parallel_compaction) {
num_tasks = NumberOfParallelCompactionTasks();
}

// Set up compaction spaces.
CompactionSpaceCollection** compaction_spaces_for_tasks =
new CompactionSpaceCollection*[num_tasks];
for (int i = 0; i < num_tasks; i++) {
compaction_spaces_for_tasks[i] = new CompactionSpaceCollection(heap());
}

compaction_spaces_for_tasks[0]->Get(OLD_SPACE)->MoveOverFreeMemory(
heap()->old_space());
compaction_spaces_for_tasks[0]
->Get(CODE_SPACE)
->MoveOverFreeMemory(heap()->code_space());

parallel_compaction_in_progress_ = true;
// Kick off parallel tasks.
for (int i = 1; i < num_tasks; i++) {
concurrent_compaction_tasks_active_++;
V8::GetCurrentPlatform()->CallOnBackgroundThread(
new CompactionTask(heap(), compaction_spaces_for_tasks[i]),
v8::Platform::kShortRunningTask);
}

// Contribute in main thread. Counter and signal are in principal not needed.
concurrent_compaction_tasks_active_++;
EvacuatePages(compaction_spaces_for_tasks[0]);
pending_compaction_tasks_semaphore_.Signal();

WaitUntilCompactionCompleted();

// Merge back memory (compacted and unused) from compaction spaces.
for (int i = 0; i < num_tasks; i++) {
heap()->old_space()->MergeCompactionSpace(
compaction_spaces_for_tasks[i]->Get(OLD_SPACE));
heap()->code_space()->MergeCompactionSpace(
compaction_spaces_for_tasks[i]->Get(CODE_SPACE));
delete compaction_spaces_for_tasks[i];
}
delete[] compaction_spaces_for_tasks;

// Finalize sequentially.
const int num_pages = evacuation_candidates_.length();
int abandoned_pages = 0;
for (int i = 0; i < num_pages; i++) {
Page* p = evacuation_candidates_[i];
switch (p->parallel_compaction_state().Value()) {
case MemoryChunk::ParallelCompactingState::kCompactingAborted:
// We have partially compacted the page, i.e., some objects may have
// moved, others are still in place.
// We need to:
// - Leave the evacuation candidate flag for later processing of
// slots buffer entries.
// - Leave the slots buffer there for processing of entries added by
// the write barrier.
// - Rescan the page as slot recording in the migration buffer only
// happens upon moving (which we potentially didn't do).
// - Leave the page in the list of pages of a space since we could not
// fully evacuate it.
DCHECK(p->IsEvacuationCandidate());
p->SetFlag(Page::RESCAN_ON_EVACUATION);
abandoned_pages++;
break;
case MemoryChunk::kCompactingFinalize:
DCHECK(p->IsEvacuationCandidate());
p->SetWasSwept();
p->Unlink();
break;
case MemoryChunk::kCompactingDone:
DCHECK(p->IsFlagSet(Page::POPULAR_PAGE));
DCHECK(p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
break;
default:
// We should not observe kCompactingInProgress, or kCompactingDone.
UNREACHABLE();
}
p->parallel_compaction_state().SetValue(MemoryChunk::kCompactingDone);
}
if (num_pages > 0) {
if (FLAG_trace_fragmentation) {
if (abandoned_pages != 0) {
PrintF(
" Abandoned (at least partially) %d out of %d page compactions due"
" to lack of memory\n",
abandoned_pages, num_pages);
} else {
PrintF(" Compacted %d pages\n", num_pages);
}
}
}
V8::GetCurrentPlatform()->CallOnBackgroundThread(
new CompactionTask(heap()), v8::Platform::kShortRunningTask);
}


void MarkCompactCollector::WaitUntilCompactionCompleted() {
while (concurrent_compaction_tasks_active_-- > 0) {
pending_compaction_tasks_semaphore_.Wait();
}
pending_compaction_jobs_semaphore_.Wait();
parallel_compaction_in_progress_ = false;
}


void MarkCompactCollector::EvacuatePages(
CompactionSpaceCollection* compaction_spaces) {
for (int i = 0; i < evacuation_candidates_.length(); i++) {
void MarkCompactCollector::EvacuatePages() {
int npages = evacuation_candidates_.length();
int abandoned_pages = 0;
for (int i = 0; i < npages; i++) {
Page* p = evacuation_candidates_[i];
DCHECK(p->IsEvacuationCandidate() ||
p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
DCHECK(static_cast<int>(p->parallel_sweeping()) ==
MemoryChunk::SWEEPING_DONE);
if (p->parallel_compaction_state().TrySetValue(
MemoryChunk::kCompactingDone, MemoryChunk::kCompactingInProgress)) {
if (p->IsEvacuationCandidate()) {
DCHECK_EQ(p->parallel_compaction_state().Value(),
MemoryChunk::kCompactingInProgress);
if (EvacuateLiveObjectsFromPage(
p, compaction_spaces->Get(p->owner()->identity()))) {
p->parallel_compaction_state().SetValue(
MemoryChunk::kCompactingFinalize);
} else {
p->parallel_compaction_state().SetValue(
MemoryChunk::kCompactingAborted);
PagedSpace* space = static_cast<PagedSpace*>(p->owner());
// Allocate emergency memory for the case when compaction fails due to out
// of memory.
if (!space->HasEmergencyMemory()) {
space->CreateEmergencyMemory(); // If the OS lets us.
}
if (p->IsEvacuationCandidate()) {
// During compaction we might have to request a new page in order to free
// up a page. Check that we actually got an emergency page above so we
// can guarantee that this succeeds.
if (space->HasEmergencyMemory()) {
EvacuateLiveObjectsFromPage(p, static_cast<PagedSpace*>(p->owner()));
// Unlink the page from the list of pages here. We must not iterate
// over that page later (e.g. when scan on scavenge pages are
// processed). The page itself will be freed later and is still
// reachable from the evacuation candidates list.
p->Unlink();
} else {
// Without room for expansion evacuation is not guaranteed to succeed.
// Pessimistically abandon unevacuated pages.
for (int j = i; j < npages; j++) {
Page* page = evacuation_candidates_[j];
slots_buffer_allocator_->DeallocateChain(
page->slots_buffer_address());
page->ClearEvacuationCandidate();
page->SetFlag(Page::RESCAN_ON_EVACUATION);
}
abandoned_pages = npages - i;
break;
}
}
}
if (npages > 0) {
// Release emergency memory.
PagedSpaces spaces(heap());
for (PagedSpace* space = spaces.next(); space != NULL;
space = spaces.next()) {
if (space->HasEmergencyMemory()) {
space->FreeEmergencyMemory();
}
}
if (FLAG_trace_fragmentation) {
if (abandoned_pages != 0) {
PrintF(
" Abandon %d out of %d page defragmentations due to lack of "
"memory\n",
abandoned_pages, npages);
} else {
// There could be popular pages in the list of evacuation candidates
// which we do compact.
p->parallel_compaction_state().SetValue(MemoryChunk::kCompactingDone);
PrintF(" Defragmented %d pages\n", npages);
}
}
}
Expand Down Expand Up @@ -3710,7 +3657,12 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
GCTracer::Scope gc_scope(heap()->tracer(),
GCTracer::Scope::MC_EVACUATE_PAGES);
EvacuationScope evacuation_scope(this);
EvacuatePagesInParallel();
if (FLAG_parallel_compaction) {
EvacuatePagesInParallel();
WaitUntilCompactionCompleted();
} else {
EvacuatePages();
}
}

// Second pass: find pointers to new space and update them.
Expand Down Expand Up @@ -3770,15 +3722,13 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
PrintF(" page %p slots buffer: %d\n", reinterpret_cast<void*>(p),
SlotsBuffer::SizeOfChain(p->slots_buffer()));
}
slots_buffer_allocator_->DeallocateChain(p->slots_buffer_address());

// Important: skip list should be cleared only after roots were updated
// because root iteration traverses the stack and might have to find
// code objects from non-updated pc pointing into evacuation candidate.
SkipList* list = p->skip_list();
if (list != NULL) list->Clear();
}
if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
} else {
if (FLAG_gc_verbose) {
PrintF("Sweeping 0x%" V8PRIxPTR " during evacuation.\n",
reinterpret_cast<intptr_t>(p));
Expand Down Expand Up @@ -3808,12 +3758,6 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
break;
}
}
if (p->IsEvacuationCandidate() &&
p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
// Case where we've aborted compacting a page. Clear the flag here to
// avoid release the page later on.
p->ClearEvacuationCandidate();
}
}
}

Expand Down Expand Up @@ -3860,6 +3804,7 @@ void MarkCompactCollector::ReleaseEvacuationCandidates() {
PagedSpace* space = static_cast<PagedSpace*>(p->owner());
space->Free(p->area_start(), p->area_size());
p->set_scan_on_scavenge(false);
slots_buffer_allocator_->DeallocateChain(p->slots_buffer_address());
p->ResetLiveBytes();
space->ReleasePage(p);
}
Expand Down Expand Up @@ -4475,6 +4420,10 @@ void MarkCompactCollector::SweepSpaces() {

// Deallocate evacuated candidate pages.
ReleaseEvacuationCandidates();
CodeRange* code_range = heap()->isolate()->code_range();
if (code_range != NULL && code_range->valid()) {
code_range->ReserveEmergencyBlock();
}

if (FLAG_print_cumulative_gc_stat) {
heap_->tracer()->AddSweepingTime(base::OS::TimeCurrentMillis() -
Expand Down
18 changes: 5 additions & 13 deletions src/heap/mark-compact.h
Original file line number Diff line number Diff line change
Expand Up @@ -553,11 +553,8 @@ class MarkCompactCollector {
// Synchronize sweeper threads.
base::Semaphore pending_sweeper_jobs_semaphore_;

// Synchronize compaction tasks.
base::Semaphore pending_compaction_tasks_semaphore_;

// Number of active compaction tasks (including main thread).
intptr_t concurrent_compaction_tasks_active_;
// Synchronize compaction threads.
base::Semaphore pending_compaction_jobs_semaphore_;

bool evacuation_;

Expand Down Expand Up @@ -715,16 +712,11 @@ class MarkCompactCollector {

void EvacuateNewSpace();

bool EvacuateLiveObjectsFromPage(Page* p, PagedSpace* target_space);
void EvacuateLiveObjectsFromPage(Page* p, PagedSpace* target_space);

void EvacuatePages(CompactionSpaceCollection* compaction_spaces);
void EvacuatePagesInParallel();
void EvacuatePages();

int NumberOfParallelCompactionTasks() {
// TODO(hpayer, mlippautz): Figure out some logic to determine the number
// of compaction tasks.
return 1;
}
void EvacuatePagesInParallel();

void WaitUntilCompactionCompleted();

Expand Down
Loading

0 comments on commit 7a0a0b8

Please sign in to comment.