src/share/vm/gc_implementation/g1/concurrentMark.cpp

changeset 6992
2c6ef90f030a
parent 6977
4dfab3faf5e7
child 6996
f3aeae1f9fc5
     1.1 --- a/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Tue Jul 01 09:03:55 2014 +0200
     1.2 +++ b/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Mon Jul 07 10:12:40 2014 +0200
     1.3 @@ -24,6 +24,7 @@
     1.4  
     1.5  #include "precompiled.hpp"
     1.6  #include "classfile/symbolTable.hpp"
     1.7 +#include "code/codeCache.hpp"
     1.8  #include "gc_implementation/g1/concurrentMark.inline.hpp"
     1.9  #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
    1.10  #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
    1.11 @@ -39,6 +40,7 @@
    1.12  #include "gc_implementation/shared/gcTimer.hpp"
    1.13  #include "gc_implementation/shared/gcTrace.hpp"
    1.14  #include "gc_implementation/shared/gcTraceTime.hpp"
    1.15 +#include "memory/allocation.hpp"
    1.16  #include "memory/genOopClosures.inline.hpp"
    1.17  #include "memory/referencePolicy.hpp"
    1.18  #include "memory/resourceArea.hpp"
    1.19 @@ -57,8 +59,8 @@
    1.20    _bmWordSize = 0;
    1.21  }
    1.22  
    1.23 -HeapWord* CMBitMapRO::getNextMarkedWordAddress(HeapWord* addr,
    1.24 -                                               HeapWord* limit) const {
    1.25 +HeapWord* CMBitMapRO::getNextMarkedWordAddress(const HeapWord* addr,
    1.26 +                                               const HeapWord* limit) const {
    1.27    // First we must round addr *up* to a possible object boundary.
    1.28    addr = (HeapWord*)align_size_up((intptr_t)addr,
    1.29                                    HeapWordSize << _shifter);
    1.30 @@ -75,8 +77,8 @@
    1.31    return nextAddr;
    1.32  }
    1.33  
    1.34 -HeapWord* CMBitMapRO::getNextUnmarkedWordAddress(HeapWord* addr,
    1.35 -                                                 HeapWord* limit) const {
    1.36 +HeapWord* CMBitMapRO::getNextUnmarkedWordAddress(const HeapWord* addr,
    1.37 +                                                 const HeapWord* limit) const {
    1.38    size_t addrOffset = heapWordToOffset(addr);
    1.39    if (limit == NULL) {
    1.40      limit = _bmStartWord + _bmWordSize;
    1.41 @@ -1222,6 +1224,9 @@
    1.42  };
    1.43  
    1.44  void ConcurrentMark::scanRootRegions() {
    1.45 +  // Start of concurrent marking.
    1.46 +  ClassLoaderDataGraph::clear_claimed_marks();
    1.47 +
    1.48    // scan_in_progress() will have been set to true only if there was
    1.49    // at least one root region to scan. So, if it's false, we
    1.50    // should not attempt to do any further work.
    1.51 @@ -1270,7 +1275,7 @@
    1.52    CMConcurrentMarkingTask markingTask(this, cmThread());
    1.53    if (use_parallel_marking_threads()) {
    1.54      _parallel_workers->set_active_workers((int)active_workers);
    1.55 -    // Don't set _n_par_threads because it affects MT in proceess_strong_roots()
    1.56 +    // Don't set _n_par_threads because it affects MT in process_roots()
    1.57      // and the decisions on that MT processing is made elsewhere.
    1.58      assert(_parallel_workers->active_workers() > 0, "Should have been set");
    1.59      _parallel_workers->run_task(&markingTask);
    1.60 @@ -2138,14 +2143,6 @@
    1.61    // Update the soft reference policy with the new heap occupancy.
    1.62    Universe::update_heap_info_at_gc();
    1.63  
    1.64 -  // We need to make this be a "collection" so any collection pause that
    1.65 -  // races with it goes around and waits for completeCleanup to finish.
    1.66 -  g1h->increment_total_collections();
    1.67 -
    1.68 -  // We reclaimed old regions so we should calculate the sizes to make
    1.69 -  // sure we update the old gen/space data.
    1.70 -  g1h->g1mm()->update_sizes();
    1.71 -
    1.72    if (VerifyDuringGC) {
    1.73      HandleMark hm;  // handle scope
    1.74      Universe::heap()->prepare_for_verify();
    1.75 @@ -2154,6 +2151,19 @@
    1.76    }
    1.77  
    1.78    g1h->verify_region_sets_optional();
    1.79 +
    1.80 +  // We need to make this be a "collection" so any collection pause that
    1.81 +  // races with it goes around and waits for completeCleanup to finish.
    1.82 +  g1h->increment_total_collections();
    1.83 +
    1.84 +  // Clean out dead classes and update Metaspace sizes.
    1.85 +  ClassLoaderDataGraph::purge();
    1.86 +  MetaspaceGC::compute_new_size();
    1.87 +
    1.88 +  // We reclaimed old regions so we should calculate the sizes to make
    1.89 +  // sure we update the old gen/space data.
    1.90 +  g1h->g1mm()->update_sizes();
    1.91 +
    1.92    g1h->trace_heap_after_concurrent_cycle();
    1.93  }
    1.94  
    1.95 @@ -2440,6 +2450,26 @@
    1.96    _g1h->set_par_threads(0);
    1.97  }
    1.98  
    1.99 +void ConcurrentMark::weakRefsWorkParallelPart(BoolObjectClosure* is_alive, bool purged_classes) {
   1.100 +  G1CollectedHeap::heap()->parallel_cleaning(is_alive, true, true, purged_classes);
   1.101 +}
   1.102 +
   1.103 +// Helper class to get rid of some boilerplate code.
   1.104 +class G1RemarkGCTraceTime : public GCTraceTime {
   1.105 +  static bool doit_and_prepend(bool doit) {
   1.106 +    if (doit) {
   1.107 +      gclog_or_tty->put(' ');
   1.108 +    }
   1.109 +    return doit;
   1.110 +  }
   1.111 +
   1.112 + public:
   1.113 +  G1RemarkGCTraceTime(const char* title, bool doit)
   1.114 +    : GCTraceTime(title, doit_and_prepend(doit), false, G1CollectedHeap::heap()->gc_timer_cm(),
   1.115 +        G1CollectedHeap::heap()->concurrent_mark()->concurrent_gc_id()) {
   1.116 +  }
   1.117 +};
   1.118 +
   1.119  void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
   1.120    if (has_overflown()) {
   1.121      // Skip processing the discovered references if we have
   1.122 @@ -2552,9 +2582,28 @@
   1.123      return;
   1.124    }
   1.125  
   1.126 -  g1h->unlink_string_and_symbol_table(&g1_is_alive,
   1.127 -                                      /* process_strings */ false, // currently strings are always roots
   1.128 -                                      /* process_symbols */ true);
   1.129 +  assert(_markStack.isEmpty(), "Marking should have completed");
   1.130 +
   1.131 +  // Unload Klasses, String, Symbols, Code Cache, etc.
   1.132 +
   1.133 +  G1RemarkGCTraceTime trace("Unloading", G1Log::finer());
   1.134 +
   1.135 +  bool purged_classes;
   1.136 +
   1.137 +  {
   1.138 +    G1RemarkGCTraceTime trace("System Dictionary Unloading", G1Log::finest());
   1.139 +    purged_classes = SystemDictionary::do_unloading(&g1_is_alive);
   1.140 +  }
   1.141 +
   1.142 +  {
   1.143 +    G1RemarkGCTraceTime trace("Parallel Unloading", G1Log::finest());
   1.144 +    weakRefsWorkParallelPart(&g1_is_alive, purged_classes);
   1.145 +  }
   1.146 +
   1.147 +  if (G1StringDedup::is_enabled()) {
   1.148 +    G1RemarkGCTraceTime trace("String Deduplication Unlink", G1Log::finest());
   1.149 +    G1StringDedup::unlink(&g1_is_alive);
   1.150 +  }
   1.151  }
   1.152  
   1.153  void ConcurrentMark::swapMarkBitMaps() {
   1.154 @@ -2563,6 +2612,57 @@
   1.155    _nextMarkBitMap  = (CMBitMap*)  temp;
   1.156  }
   1.157  
   1.158 +class CMObjectClosure;
   1.159 +
   1.160 +// Closure for iterating over objects, currently only used for
   1.161 +// processing SATB buffers.
   1.162 +class CMObjectClosure : public ObjectClosure {
   1.163 +private:
   1.164 +  CMTask* _task;
   1.165 +
   1.166 +public:
   1.167 +  void do_object(oop obj) {
   1.168 +    _task->deal_with_reference(obj);
   1.169 +  }
   1.170 +
   1.171 +  CMObjectClosure(CMTask* task) : _task(task) { }
   1.172 +};
   1.173 +
   1.174 +class G1RemarkThreadsClosure : public ThreadClosure {
   1.175 +  CMObjectClosure _cm_obj;
   1.176 +  G1CMOopClosure _cm_cl;
   1.177 +  MarkingCodeBlobClosure _code_cl;
   1.178 +  int _thread_parity;
   1.179 +  bool _is_par;
   1.180 +
   1.181 + public:
   1.182 +  G1RemarkThreadsClosure(G1CollectedHeap* g1h, CMTask* task, bool is_par) :
   1.183 +    _cm_obj(task), _cm_cl(g1h, g1h->concurrent_mark(), task), _code_cl(&_cm_cl, !CodeBlobToOopClosure::FixRelocations),
   1.184 +    _thread_parity(SharedHeap::heap()->strong_roots_parity()), _is_par(is_par) {}
   1.185 +
   1.186 +  void do_thread(Thread* thread) {
   1.187 +    if (thread->is_Java_thread()) {
   1.188 +      if (thread->claim_oops_do(_is_par, _thread_parity)) {
   1.189 +        JavaThread* jt = (JavaThread*)thread;
   1.190 +
   1.191 +        // In theory it should not be neccessary to explicitly walk the nmethods to find roots for concurrent marking
   1.192 +        // however the liveness of oops reachable from nmethods have very complex lifecycles:
   1.193 +        // * Alive if on the stack of an executing method
   1.194 +        // * Weakly reachable otherwise
   1.195 +        // Some objects reachable from nmethods, such as the class loader (or klass_holder) of the receiver should be
   1.196 +        // live by the SATB invariant but other oops recorded in nmethods may behave differently.
   1.197 +        jt->nmethods_do(&_code_cl);
   1.198 +
   1.199 +        jt->satb_mark_queue().apply_closure_and_empty(&_cm_obj);
   1.200 +      }
   1.201 +    } else if (thread->is_VM_thread()) {
   1.202 +      if (thread->claim_oops_do(_is_par, _thread_parity)) {
   1.203 +        JavaThread::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(&_cm_obj);
   1.204 +      }
   1.205 +    }
   1.206 +  }
   1.207 +};
   1.208 +
   1.209  class CMRemarkTask: public AbstractGangTask {
   1.210  private:
   1.211    ConcurrentMark* _cm;
   1.212 @@ -2574,6 +2674,14 @@
   1.213      if (worker_id < _cm->active_tasks()) {
   1.214        CMTask* task = _cm->task(worker_id);
   1.215        task->record_start_time();
   1.216 +      {
   1.217 +        ResourceMark rm;
   1.218 +        HandleMark hm;
   1.219 +
   1.220 +        G1RemarkThreadsClosure threads_f(G1CollectedHeap::heap(), task, !_is_serial);
   1.221 +        Threads::threads_do(&threads_f);
   1.222 +      }
   1.223 +
   1.224        do {
   1.225          task->do_marking_step(1000000000.0 /* something very large */,
   1.226                                true         /* do_termination       */,
   1.227 @@ -2596,6 +2704,8 @@
   1.228    HandleMark   hm;
   1.229    G1CollectedHeap* g1h = G1CollectedHeap::heap();
   1.230  
   1.231 +  G1RemarkGCTraceTime trace("Finalize Marking", G1Log::finer());
   1.232 +
   1.233    g1h->ensure_parsability(false);
   1.234  
   1.235    if (G1CollectedHeap::use_parallel_gc_threads()) {
   1.236 @@ -3421,20 +3531,6 @@
   1.237    }
   1.238  };
   1.239  
   1.240 -// Closure for iterating over objects, currently only used for
   1.241 -// processing SATB buffers.
   1.242 -class CMObjectClosure : public ObjectClosure {
   1.243 -private:
   1.244 -  CMTask* _task;
   1.245 -
   1.246 -public:
   1.247 -  void do_object(oop obj) {
   1.248 -    _task->deal_with_reference(obj);
   1.249 -  }
   1.250 -
   1.251 -  CMObjectClosure(CMTask* task) : _task(task) { }
   1.252 -};
   1.253 -
   1.254  G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h,
   1.255                                 ConcurrentMark* cm,
   1.256                                 CMTask* task)
   1.257 @@ -3900,15 +3996,6 @@
   1.258      }
   1.259    }
   1.260  
   1.261 -  if (!concurrent() && !has_aborted()) {
   1.262 -    // We should only do this during remark.
   1.263 -    if (G1CollectedHeap::use_parallel_gc_threads()) {
   1.264 -      satb_mq_set.par_iterate_closure_all_threads(_worker_id);
   1.265 -    } else {
   1.266 -      satb_mq_set.iterate_closure_all_threads();
   1.267 -    }
   1.268 -  }
   1.269 -
   1.270    _draining_satb_buffers = false;
   1.271  
   1.272    assert(has_aborted() ||

mercurial