src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp

changeset 7285
1d6eb209432a
parent 7257
e7d0505c8a30
child 7333
b12a2a9b05ca
child 7481
ee10217e3d03
equal deleted inserted replaced
7284:1bd99e1dc168 7285:1d6eb209432a
2476 void G1CollectedHeap::collect(GCCause::Cause cause) { 2476 void G1CollectedHeap::collect(GCCause::Cause cause) {
2477 assert_heap_not_locked(); 2477 assert_heap_not_locked();
2478 2478
2479 unsigned int gc_count_before; 2479 unsigned int gc_count_before;
2480 unsigned int old_marking_count_before; 2480 unsigned int old_marking_count_before;
2481 unsigned int full_gc_count_before;
2481 bool retry_gc; 2482 bool retry_gc;
2482 2483
2483 do { 2484 do {
2484 retry_gc = false; 2485 retry_gc = false;
2485 2486
2486 { 2487 {
2487 MutexLocker ml(Heap_lock); 2488 MutexLocker ml(Heap_lock);
2488 2489
2489 // Read the GC count while holding the Heap_lock 2490 // Read the GC count while holding the Heap_lock
2490 gc_count_before = total_collections(); 2491 gc_count_before = total_collections();
2492 full_gc_count_before = total_full_collections();
2491 old_marking_count_before = _old_marking_cycles_started; 2493 old_marking_count_before = _old_marking_cycles_started;
2492 } 2494 }
2493 2495
2494 if (should_do_concurrent_full_gc(cause)) { 2496 if (should_do_concurrent_full_gc(cause)) {
2495 // Schedule an initial-mark evacuation pause that will start a 2497 // Schedule an initial-mark evacuation pause that will start a
2530 g1_policy()->max_pause_time_ms(), 2532 g1_policy()->max_pause_time_ms(),
2531 cause); 2533 cause);
2532 VMThread::execute(&op); 2534 VMThread::execute(&op);
2533 } else { 2535 } else {
2534 // Schedule a Full GC. 2536 // Schedule a Full GC.
2535 VM_G1CollectFull op(gc_count_before, old_marking_count_before, cause); 2537 VM_G1CollectFull op(gc_count_before, full_gc_count_before, cause);
2536 VMThread::execute(&op); 2538 VMThread::execute(&op);
2537 } 2539 }
2538 } 2540 }
2539 } while (retry_gc); 2541 } while (retry_gc);
2540 } 2542 }

mercurial