src/share/vm/gc_implementation/g1/concurrentMark.cpp

changeset 6992
2c6ef90f030a
parent 6977
4dfab3faf5e7
child 6996
f3aeae1f9fc5
equal deleted inserted replaced
6991:882004b9e7e1 6992:2c6ef90f030a
22 * 22 *
23 */ 23 */
24 24
25 #include "precompiled.hpp" 25 #include "precompiled.hpp"
26 #include "classfile/symbolTable.hpp" 26 #include "classfile/symbolTable.hpp"
27 #include "code/codeCache.hpp"
27 #include "gc_implementation/g1/concurrentMark.inline.hpp" 28 #include "gc_implementation/g1/concurrentMark.inline.hpp"
28 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp" 29 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
29 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" 30 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
30 #include "gc_implementation/g1/g1CollectorPolicy.hpp" 31 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
31 #include "gc_implementation/g1/g1ErgoVerbose.hpp" 32 #include "gc_implementation/g1/g1ErgoVerbose.hpp"
37 #include "gc_implementation/g1/heapRegionSeq.inline.hpp" 38 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
38 #include "gc_implementation/shared/vmGCOperations.hpp" 39 #include "gc_implementation/shared/vmGCOperations.hpp"
39 #include "gc_implementation/shared/gcTimer.hpp" 40 #include "gc_implementation/shared/gcTimer.hpp"
40 #include "gc_implementation/shared/gcTrace.hpp" 41 #include "gc_implementation/shared/gcTrace.hpp"
41 #include "gc_implementation/shared/gcTraceTime.hpp" 42 #include "gc_implementation/shared/gcTraceTime.hpp"
43 #include "memory/allocation.hpp"
42 #include "memory/genOopClosures.inline.hpp" 44 #include "memory/genOopClosures.inline.hpp"
43 #include "memory/referencePolicy.hpp" 45 #include "memory/referencePolicy.hpp"
44 #include "memory/resourceArea.hpp" 46 #include "memory/resourceArea.hpp"
45 #include "oops/oop.inline.hpp" 47 #include "oops/oop.inline.hpp"
46 #include "runtime/handles.inline.hpp" 48 #include "runtime/handles.inline.hpp"
55 _shifter(shifter) { 57 _shifter(shifter) {
56 _bmStartWord = 0; 58 _bmStartWord = 0;
57 _bmWordSize = 0; 59 _bmWordSize = 0;
58 } 60 }
59 61
60 HeapWord* CMBitMapRO::getNextMarkedWordAddress(HeapWord* addr, 62 HeapWord* CMBitMapRO::getNextMarkedWordAddress(const HeapWord* addr,
61 HeapWord* limit) const { 63 const HeapWord* limit) const {
62 // First we must round addr *up* to a possible object boundary. 64 // First we must round addr *up* to a possible object boundary.
63 addr = (HeapWord*)align_size_up((intptr_t)addr, 65 addr = (HeapWord*)align_size_up((intptr_t)addr,
64 HeapWordSize << _shifter); 66 HeapWordSize << _shifter);
65 size_t addrOffset = heapWordToOffset(addr); 67 size_t addrOffset = heapWordToOffset(addr);
66 if (limit == NULL) { 68 if (limit == NULL) {
73 assert(nextAddr == limit || isMarked(nextAddr), 75 assert(nextAddr == limit || isMarked(nextAddr),
74 "get_next_one postcondition"); 76 "get_next_one postcondition");
75 return nextAddr; 77 return nextAddr;
76 } 78 }
77 79
78 HeapWord* CMBitMapRO::getNextUnmarkedWordAddress(HeapWord* addr, 80 HeapWord* CMBitMapRO::getNextUnmarkedWordAddress(const HeapWord* addr,
79 HeapWord* limit) const { 81 const HeapWord* limit) const {
80 size_t addrOffset = heapWordToOffset(addr); 82 size_t addrOffset = heapWordToOffset(addr);
81 if (limit == NULL) { 83 if (limit == NULL) {
82 limit = _bmStartWord + _bmWordSize; 84 limit = _bmStartWord + _bmWordSize;
83 } 85 }
84 size_t limitOffset = heapWordToOffset(limit); 86 size_t limitOffset = heapWordToOffset(limit);
1220 } 1222 }
1221 } 1223 }
1222 }; 1224 };
1223 1225
1224 void ConcurrentMark::scanRootRegions() { 1226 void ConcurrentMark::scanRootRegions() {
1227 // Start of concurrent marking.
1228 ClassLoaderDataGraph::clear_claimed_marks();
1229
1225 // scan_in_progress() will have been set to true only if there was 1230 // scan_in_progress() will have been set to true only if there was
1226 // at least one root region to scan. So, if it's false, we 1231 // at least one root region to scan. So, if it's false, we
1227 // should not attempt to do any further work. 1232 // should not attempt to do any further work.
1228 if (root_regions()->scan_in_progress()) { 1233 if (root_regions()->scan_in_progress()) {
1229 _parallel_marking_threads = calc_parallel_marking_threads(); 1234 _parallel_marking_threads = calc_parallel_marking_threads();
1268 set_concurrency_and_phase(active_workers, true /* concurrent */); 1273 set_concurrency_and_phase(active_workers, true /* concurrent */);
1269 1274
1270 CMConcurrentMarkingTask markingTask(this, cmThread()); 1275 CMConcurrentMarkingTask markingTask(this, cmThread());
1271 if (use_parallel_marking_threads()) { 1276 if (use_parallel_marking_threads()) {
1272 _parallel_workers->set_active_workers((int)active_workers); 1277 _parallel_workers->set_active_workers((int)active_workers);
1273 // Don't set _n_par_threads because it affects MT in proceess_strong_roots() 1278 // Don't set _n_par_threads because it affects MT in process_roots()
1274 // and the decisions on that MT processing is made elsewhere. 1279 // and the decisions on that MT processing is made elsewhere.
1275 assert(_parallel_workers->active_workers() > 0, "Should have been set"); 1280 assert(_parallel_workers->active_workers() > 0, "Should have been set");
1276 _parallel_workers->run_task(&markingTask); 1281 _parallel_workers->run_task(&markingTask);
1277 } else { 1282 } else {
1278 markingTask.work(0); 1283 markingTask.work(0);
2136 2141
2137 // Clean up will have freed any regions completely full of garbage. 2142 // Clean up will have freed any regions completely full of garbage.
2138 // Update the soft reference policy with the new heap occupancy. 2143 // Update the soft reference policy with the new heap occupancy.
2139 Universe::update_heap_info_at_gc(); 2144 Universe::update_heap_info_at_gc();
2140 2145
2141 // We need to make this be a "collection" so any collection pause that
2142 // races with it goes around and waits for completeCleanup to finish.
2143 g1h->increment_total_collections();
2144
2145 // We reclaimed old regions so we should calculate the sizes to make
2146 // sure we update the old gen/space data.
2147 g1h->g1mm()->update_sizes();
2148
2149 if (VerifyDuringGC) { 2146 if (VerifyDuringGC) {
2150 HandleMark hm; // handle scope 2147 HandleMark hm; // handle scope
2151 Universe::heap()->prepare_for_verify(); 2148 Universe::heap()->prepare_for_verify();
2152 Universe::verify(VerifyOption_G1UsePrevMarking, 2149 Universe::verify(VerifyOption_G1UsePrevMarking,
2153 " VerifyDuringGC:(after)"); 2150 " VerifyDuringGC:(after)");
2154 } 2151 }
2155 2152
2156 g1h->verify_region_sets_optional(); 2153 g1h->verify_region_sets_optional();
2154
2155 // We need to make this be a "collection" so any collection pause that
2156 // races with it goes around and waits for completeCleanup to finish.
2157 g1h->increment_total_collections();
2158
2159 // Clean out dead classes and update Metaspace sizes.
2160 ClassLoaderDataGraph::purge();
2161 MetaspaceGC::compute_new_size();
2162
2163 // We reclaimed old regions so we should calculate the sizes to make
2164 // sure we update the old gen/space data.
2165 g1h->g1mm()->update_sizes();
2166
2157 g1h->trace_heap_after_concurrent_cycle(); 2167 g1h->trace_heap_after_concurrent_cycle();
2158 } 2168 }
2159 2169
2160 void ConcurrentMark::completeCleanup() { 2170 void ConcurrentMark::completeCleanup() {
2161 if (has_aborted()) return; 2171 if (has_aborted()) return;
2438 _g1h->set_par_threads(_active_workers); 2448 _g1h->set_par_threads(_active_workers);
2439 _workers->run_task(&enq_task_proxy); 2449 _workers->run_task(&enq_task_proxy);
2440 _g1h->set_par_threads(0); 2450 _g1h->set_par_threads(0);
2441 } 2451 }
2442 2452
2453 void ConcurrentMark::weakRefsWorkParallelPart(BoolObjectClosure* is_alive, bool purged_classes) {
2454 G1CollectedHeap::heap()->parallel_cleaning(is_alive, true, true, purged_classes);
2455 }
2456
2457 // Helper class to get rid of some boilerplate code.
2458 class G1RemarkGCTraceTime : public GCTraceTime {
2459 static bool doit_and_prepend(bool doit) {
2460 if (doit) {
2461 gclog_or_tty->put(' ');
2462 }
2463 return doit;
2464 }
2465
2466 public:
2467 G1RemarkGCTraceTime(const char* title, bool doit)
2468 : GCTraceTime(title, doit_and_prepend(doit), false, G1CollectedHeap::heap()->gc_timer_cm(),
2469 G1CollectedHeap::heap()->concurrent_mark()->concurrent_gc_id()) {
2470 }
2471 };
2472
2443 void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) { 2473 void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
2444 if (has_overflown()) { 2474 if (has_overflown()) {
2445 // Skip processing the discovered references if we have 2475 // Skip processing the discovered references if we have
2446 // overflown the global marking stack. Reference objects 2476 // overflown the global marking stack. Reference objects
2447 // only get discovered once so it is OK to not 2477 // only get discovered once so it is OK to not
2550 if (has_overflown()) { 2580 if (has_overflown()) {
2551 // We can not trust g1_is_alive if the marking stack overflowed 2581 // We can not trust g1_is_alive if the marking stack overflowed
2552 return; 2582 return;
2553 } 2583 }
2554 2584
2555 g1h->unlink_string_and_symbol_table(&g1_is_alive, 2585 assert(_markStack.isEmpty(), "Marking should have completed");
2556 /* process_strings */ false, // currently strings are always roots 2586
2557 /* process_symbols */ true); 2587 // Unload Klasses, String, Symbols, Code Cache, etc.
2588
2589 G1RemarkGCTraceTime trace("Unloading", G1Log::finer());
2590
2591 bool purged_classes;
2592
2593 {
2594 G1RemarkGCTraceTime trace("System Dictionary Unloading", G1Log::finest());
2595 purged_classes = SystemDictionary::do_unloading(&g1_is_alive);
2596 }
2597
2598 {
2599 G1RemarkGCTraceTime trace("Parallel Unloading", G1Log::finest());
2600 weakRefsWorkParallelPart(&g1_is_alive, purged_classes);
2601 }
2602
2603 if (G1StringDedup::is_enabled()) {
2604 G1RemarkGCTraceTime trace("String Deduplication Unlink", G1Log::finest());
2605 G1StringDedup::unlink(&g1_is_alive);
2606 }
2558 } 2607 }
2559 2608
2560 void ConcurrentMark::swapMarkBitMaps() { 2609 void ConcurrentMark::swapMarkBitMaps() {
2561 CMBitMapRO* temp = _prevMarkBitMap; 2610 CMBitMapRO* temp = _prevMarkBitMap;
2562 _prevMarkBitMap = (CMBitMapRO*)_nextMarkBitMap; 2611 _prevMarkBitMap = (CMBitMapRO*)_nextMarkBitMap;
2563 _nextMarkBitMap = (CMBitMap*) temp; 2612 _nextMarkBitMap = (CMBitMap*) temp;
2564 } 2613 }
2614
2615 class CMObjectClosure;
2616
2617 // Closure for iterating over objects, currently only used for
2618 // processing SATB buffers.
2619 class CMObjectClosure : public ObjectClosure {
2620 private:
2621 CMTask* _task;
2622
2623 public:
2624 void do_object(oop obj) {
2625 _task->deal_with_reference(obj);
2626 }
2627
2628 CMObjectClosure(CMTask* task) : _task(task) { }
2629 };
2630
2631 class G1RemarkThreadsClosure : public ThreadClosure {
2632 CMObjectClosure _cm_obj;
2633 G1CMOopClosure _cm_cl;
2634 MarkingCodeBlobClosure _code_cl;
2635 int _thread_parity;
2636 bool _is_par;
2637
2638 public:
2639 G1RemarkThreadsClosure(G1CollectedHeap* g1h, CMTask* task, bool is_par) :
2640 _cm_obj(task), _cm_cl(g1h, g1h->concurrent_mark(), task), _code_cl(&_cm_cl, !CodeBlobToOopClosure::FixRelocations),
2641 _thread_parity(SharedHeap::heap()->strong_roots_parity()), _is_par(is_par) {}
2642
2643 void do_thread(Thread* thread) {
2644 if (thread->is_Java_thread()) {
2645 if (thread->claim_oops_do(_is_par, _thread_parity)) {
2646 JavaThread* jt = (JavaThread*)thread;
2647
2648 // In theory it should not be neccessary to explicitly walk the nmethods to find roots for concurrent marking
2649 // however the liveness of oops reachable from nmethods have very complex lifecycles:
2650 // * Alive if on the stack of an executing method
2651 // * Weakly reachable otherwise
2652 // Some objects reachable from nmethods, such as the class loader (or klass_holder) of the receiver should be
2653 // live by the SATB invariant but other oops recorded in nmethods may behave differently.
2654 jt->nmethods_do(&_code_cl);
2655
2656 jt->satb_mark_queue().apply_closure_and_empty(&_cm_obj);
2657 }
2658 } else if (thread->is_VM_thread()) {
2659 if (thread->claim_oops_do(_is_par, _thread_parity)) {
2660 JavaThread::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(&_cm_obj);
2661 }
2662 }
2663 }
2664 };
2565 2665
2566 class CMRemarkTask: public AbstractGangTask { 2666 class CMRemarkTask: public AbstractGangTask {
2567 private: 2667 private:
2568 ConcurrentMark* _cm; 2668 ConcurrentMark* _cm;
2569 bool _is_serial; 2669 bool _is_serial;
2572 // Since all available tasks are actually started, we should 2672 // Since all available tasks are actually started, we should
2573 // only proceed if we're supposed to be actived. 2673 // only proceed if we're supposed to be actived.
2574 if (worker_id < _cm->active_tasks()) { 2674 if (worker_id < _cm->active_tasks()) {
2575 CMTask* task = _cm->task(worker_id); 2675 CMTask* task = _cm->task(worker_id);
2576 task->record_start_time(); 2676 task->record_start_time();
2677 {
2678 ResourceMark rm;
2679 HandleMark hm;
2680
2681 G1RemarkThreadsClosure threads_f(G1CollectedHeap::heap(), task, !_is_serial);
2682 Threads::threads_do(&threads_f);
2683 }
2684
2577 do { 2685 do {
2578 task->do_marking_step(1000000000.0 /* something very large */, 2686 task->do_marking_step(1000000000.0 /* something very large */,
2579 true /* do_termination */, 2687 true /* do_termination */,
2580 _is_serial); 2688 _is_serial);
2581 } while (task->has_aborted() && !_cm->has_overflown()); 2689 } while (task->has_aborted() && !_cm->has_overflown());
2593 2701
2594 void ConcurrentMark::checkpointRootsFinalWork() { 2702 void ConcurrentMark::checkpointRootsFinalWork() {
2595 ResourceMark rm; 2703 ResourceMark rm;
2596 HandleMark hm; 2704 HandleMark hm;
2597 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 2705 G1CollectedHeap* g1h = G1CollectedHeap::heap();
2706
2707 G1RemarkGCTraceTime trace("Finalize Marking", G1Log::finer());
2598 2708
2599 g1h->ensure_parsability(false); 2709 g1h->ensure_parsability(false);
2600 2710
2601 if (G1CollectedHeap::use_parallel_gc_threads()) { 2711 if (G1CollectedHeap::use_parallel_gc_threads()) {
2602 G1CollectedHeap::StrongRootsScope srs(g1h); 2712 G1CollectedHeap::StrongRootsScope srs(g1h);
3419 // the iteration 3529 // the iteration
3420 return !_task->has_aborted(); 3530 return !_task->has_aborted();
3421 } 3531 }
3422 }; 3532 };
3423 3533
3424 // Closure for iterating over objects, currently only used for
3425 // processing SATB buffers.
3426 class CMObjectClosure : public ObjectClosure {
3427 private:
3428 CMTask* _task;
3429
3430 public:
3431 void do_object(oop obj) {
3432 _task->deal_with_reference(obj);
3433 }
3434
3435 CMObjectClosure(CMTask* task) : _task(task) { }
3436 };
3437
3438 G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h, 3534 G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h,
3439 ConcurrentMark* cm, 3535 ConcurrentMark* cm,
3440 CMTask* task) 3536 CMTask* task)
3441 : _g1h(g1h), _cm(cm), _task(task) { 3537 : _g1h(g1h), _cm(cm), _task(task) {
3442 assert(_ref_processor == NULL, "should be initialized to NULL"); 3538 assert(_ref_processor == NULL, "should be initialized to NULL");
3895 if (_cm->verbose_medium()) { 3991 if (_cm->verbose_medium()) {
3896 gclog_or_tty->print_cr("[%u] processed an SATB buffer", _worker_id); 3992 gclog_or_tty->print_cr("[%u] processed an SATB buffer", _worker_id);
3897 } 3993 }
3898 statsOnly( ++_satb_buffers_processed ); 3994 statsOnly( ++_satb_buffers_processed );
3899 regular_clock_call(); 3995 regular_clock_call();
3900 }
3901 }
3902
3903 if (!concurrent() && !has_aborted()) {
3904 // We should only do this during remark.
3905 if (G1CollectedHeap::use_parallel_gc_threads()) {
3906 satb_mq_set.par_iterate_closure_all_threads(_worker_id);
3907 } else {
3908 satb_mq_set.iterate_closure_all_threads();
3909 } 3996 }
3910 } 3997 }
3911 3998
3912 _draining_satb_buffers = false; 3999 _draining_satb_buffers = false;
3913 4000

mercurial