src/share/vm/memory/sharedHeap.cpp

changeset 6992
2c6ef90f030a
parent 6972
64ac9c55d666
child 6996
f3aeae1f9fc5
     1.1 --- a/src/share/vm/memory/sharedHeap.cpp	Tue Jul 01 09:03:55 2014 +0200
     1.2 +++ b/src/share/vm/memory/sharedHeap.cpp	Mon Jul 07 10:12:40 2014 +0200
     1.3 @@ -29,6 +29,7 @@
     1.4  #include "gc_interface/collectedHeap.inline.hpp"
     1.5  #include "memory/sharedHeap.hpp"
     1.6  #include "oops/oop.inline.hpp"
     1.7 +#include "runtime/atomic.inline.hpp"
     1.8  #include "runtime/fprofiler.hpp"
     1.9  #include "runtime/java.hpp"
    1.10  #include "services/management.hpp"
    1.11 @@ -39,8 +40,8 @@
    1.12  
    1.13  SharedHeap* SharedHeap::_sh;
    1.14  
    1.15 -// The set of potentially parallel tasks in strong root scanning.
    1.16 -enum SH_process_strong_roots_tasks {
    1.17 +// The set of potentially parallel tasks in root scanning.
    1.18 +enum SH_process_roots_tasks {
    1.19    SH_PS_Universe_oops_do,
    1.20    SH_PS_JNIHandles_oops_do,
    1.21    SH_PS_ObjectSynchronizer_oops_do,
    1.22 @@ -58,6 +59,7 @@
    1.23    CollectedHeap(),
    1.24    _collector_policy(policy_),
    1.25    _rem_set(NULL),
    1.26 +  _strong_roots_scope(NULL),
    1.27    _strong_roots_parity(0),
    1.28    _process_strong_tasks(new SubTasksDone(SH_PS_NumElements)),
    1.29    _workers(NULL)
    1.30 @@ -114,6 +116,19 @@
    1.31  static AssertNonScavengableClosure assert_is_non_scavengable_closure;
    1.32  #endif
    1.33  
    1.34 +SharedHeap::StrongRootsScope* SharedHeap::active_strong_roots_scope() const {
    1.35 +  return _strong_roots_scope;
    1.36 +}
    1.37 +void SharedHeap::register_strong_roots_scope(SharedHeap::StrongRootsScope* scope) {
    1.38 +  assert(_strong_roots_scope == NULL, "Should only have one StrongRootsScope active");
    1.39 +  assert(scope != NULL, "Illegal argument");
    1.40 +  _strong_roots_scope = scope;
    1.41 +}
    1.42 +void SharedHeap::unregister_strong_roots_scope(SharedHeap::StrongRootsScope* scope) {
    1.43 +  assert(_strong_roots_scope == scope, "Wrong scope unregistered");
    1.44 +  _strong_roots_scope = NULL;
    1.45 +}
    1.46 +
    1.47  void SharedHeap::change_strong_roots_parity() {
    1.48    // Also set the new collection parity.
    1.49    assert(_strong_roots_parity >= 0 && _strong_roots_parity <= 2,
    1.50 @@ -124,111 +139,160 @@
    1.51           "Not in range.");
    1.52  }
    1.53  
    1.54 -SharedHeap::StrongRootsScope::StrongRootsScope(SharedHeap* outer, bool activate)
    1.55 -  : MarkScope(activate)
    1.56 +SharedHeap::StrongRootsScope::StrongRootsScope(SharedHeap* heap, bool activate)
    1.57 +  : MarkScope(activate), _sh(heap), _n_workers_done_with_threads(0)
    1.58  {
    1.59    if (_active) {
    1.60 -    outer->change_strong_roots_parity();
    1.61 +    _sh->register_strong_roots_scope(this);
    1.62 +    _sh->change_strong_roots_parity();
    1.63      // Zero the claimed high water mark in the StringTable
    1.64      StringTable::clear_parallel_claimed_index();
    1.65    }
    1.66  }
    1.67  
    1.68  SharedHeap::StrongRootsScope::~StrongRootsScope() {
    1.69 -  // nothing particular
    1.70 +  if (_active) {
    1.71 +    _sh->unregister_strong_roots_scope(this);
    1.72 +  }
    1.73 +}
    1.74 +
    1.75 +Monitor* SharedHeap::StrongRootsScope::_lock = new Monitor(Mutex::leaf, "StrongRootsScope lock", false);
    1.76 +
    1.77 +void SharedHeap::StrongRootsScope::mark_worker_done_with_threads(uint n_workers) {
    1.78 +  // The Thread work barrier is only needed by G1.
    1.79 +  // No need to use the barrier if this is single-threaded code.
    1.80 +  if (UseG1GC && n_workers > 0) {
    1.81 +    uint new_value = (uint)Atomic::add(1, &_n_workers_done_with_threads);
    1.82 +    if (new_value == n_workers) {
    1.83 +      // This thread is last. Notify the others.
    1.84 +      MonitorLockerEx ml(_lock, Mutex::_no_safepoint_check_flag);
    1.85 +      _lock->notify_all();
    1.86 +    }
    1.87 +  }
    1.88 +}
    1.89 +
    1.90 +void SharedHeap::StrongRootsScope::wait_until_all_workers_done_with_threads(uint n_workers) {
    1.91 +  // No need to use the barrier if this is single-threaded code.
    1.92 +  if (n_workers > 0 && (uint)_n_workers_done_with_threads != n_workers) {
    1.93 +    MonitorLockerEx ml(_lock, Mutex::_no_safepoint_check_flag);
    1.94 +    while ((uint)_n_workers_done_with_threads != n_workers) {
    1.95 +      _lock->wait(Mutex::_no_safepoint_check_flag, 0, false);
    1.96 +    }
    1.97 +  }
    1.98 +}
    1.99 +
   1.100 +void SharedHeap::process_roots(bool activate_scope,
   1.101 +                               ScanningOption so,
   1.102 +                               OopClosure* strong_roots,
   1.103 +                               OopClosure* weak_roots,
   1.104 +                               CLDClosure* strong_cld_closure,
   1.105 +                               CLDClosure* weak_cld_closure,
   1.106 +                               CodeBlobClosure* code_roots) {
   1.107 +  StrongRootsScope srs(this, activate_scope);
   1.108 +
   1.109 +  // General roots.
   1.110 +  assert(_strong_roots_parity != 0, "must have called prologue code");
   1.111 +  assert(code_roots != NULL, "code root closure should always be set");
   1.112 +  // _n_termination for _process_strong_tasks should be set up stream
   1.113 +  // in a method not running in a GC worker.  Otherwise the GC worker
   1.114 +  // could be trying to change the termination condition while the task
   1.115 +  // is executing in another GC worker.
   1.116 +
   1.117 +  // Iterating over the CLDG and the Threads are done early to allow G1 to
   1.118 +  // first process the strong CLDs and nmethods and then, after a barrier,
   1.119 +  // let the thread process the weak CLDs and nmethods.
   1.120 +
   1.121 +  if (!_process_strong_tasks->is_task_claimed(SH_PS_ClassLoaderDataGraph_oops_do)) {
   1.122 +    ClassLoaderDataGraph::roots_cld_do(strong_cld_closure, weak_cld_closure);
   1.123 +  }
   1.124 +
   1.125 +  // Some CLDs contained in the thread frames should be considered strong.
   1.126 +  // Don't process them if they will be processed during the ClassLoaderDataGraph phase.
   1.127 +  CLDClosure* roots_from_clds_p = (strong_cld_closure != weak_cld_closure) ? strong_cld_closure : NULL;
   1.128 +  // Only process code roots from thread stacks if we aren't visiting the entire CodeCache anyway
   1.129 +  CodeBlobClosure* roots_from_code_p = (so & SO_AllCodeCache) ? NULL : code_roots;
   1.130 +
   1.131 +  Threads::possibly_parallel_oops_do(strong_roots, roots_from_clds_p, roots_from_code_p);
   1.132 +
   1.133 +  // This is the point where this worker thread will not find more strong CLDs/nmethods.
   1.134 +  // Report this so G1 can synchronize the strong and weak CLDs/nmethods processing.
   1.135 +  active_strong_roots_scope()->mark_worker_done_with_threads(n_par_threads());
   1.136 +
   1.137 +  if (!_process_strong_tasks->is_task_claimed(SH_PS_Universe_oops_do)) {
   1.138 +    Universe::oops_do(strong_roots);
   1.139 +  }
   1.140 +  // Global (strong) JNI handles
   1.141 +  if (!_process_strong_tasks->is_task_claimed(SH_PS_JNIHandles_oops_do))
   1.142 +    JNIHandles::oops_do(strong_roots);
   1.143 +
   1.144 +  if (!_process_strong_tasks-> is_task_claimed(SH_PS_ObjectSynchronizer_oops_do))
   1.145 +    ObjectSynchronizer::oops_do(strong_roots);
   1.146 +  if (!_process_strong_tasks->is_task_claimed(SH_PS_FlatProfiler_oops_do))
   1.147 +    FlatProfiler::oops_do(strong_roots);
   1.148 +  if (!_process_strong_tasks->is_task_claimed(SH_PS_Management_oops_do))
   1.149 +    Management::oops_do(strong_roots);
   1.150 +  if (!_process_strong_tasks->is_task_claimed(SH_PS_jvmti_oops_do))
   1.151 +    JvmtiExport::oops_do(strong_roots);
   1.152 +
   1.153 +  if (!_process_strong_tasks->is_task_claimed(SH_PS_SystemDictionary_oops_do)) {
   1.154 +    SystemDictionary::roots_oops_do(strong_roots, weak_roots);
   1.155 +  }
   1.156 +
   1.157 +  // All threads execute the following. A specific chunk of buckets
   1.158 +  // from the StringTable are the individual tasks.
   1.159 +  if (weak_roots != NULL) {
   1.160 +    if (CollectedHeap::use_parallel_gc_threads()) {
   1.161 +      StringTable::possibly_parallel_oops_do(weak_roots);
   1.162 +    } else {
   1.163 +      StringTable::oops_do(weak_roots);
   1.164 +    }
   1.165 +  }
   1.166 +
   1.167 +  if (!_process_strong_tasks->is_task_claimed(SH_PS_CodeCache_oops_do)) {
   1.168 +    if (so & SO_ScavengeCodeCache) {
   1.169 +      assert(code_roots != NULL, "must supply closure for code cache");
   1.170 +
   1.171 +      // We only visit parts of the CodeCache when scavenging.
   1.172 +      CodeCache::scavenge_root_nmethods_do(code_roots);
   1.173 +    }
   1.174 +    if (so & SO_AllCodeCache) {
   1.175 +      assert(code_roots != NULL, "must supply closure for code cache");
   1.176 +
   1.177 +      // CMSCollector uses this to do intermediate-strength collections.
   1.178 +      // We scan the entire code cache, since CodeCache::do_unloading is not called.
   1.179 +      CodeCache::blobs_do(code_roots);
   1.180 +    }
   1.181 +    // Verify that the code cache contents are not subject to
   1.182 +    // movement by a scavenging collection.
   1.183 +    DEBUG_ONLY(CodeBlobToOopClosure assert_code_is_non_scavengable(&assert_is_non_scavengable_closure, !CodeBlobToOopClosure::FixRelocations));
   1.184 +    DEBUG_ONLY(CodeCache::asserted_non_scavengable_nmethods_do(&assert_code_is_non_scavengable));
   1.185 +  }
   1.186 +
   1.187 +  _process_strong_tasks->all_tasks_completed();
   1.188 +}
   1.189 +
   1.190 +void SharedHeap::process_all_roots(bool activate_scope,
   1.191 +                                   ScanningOption so,
   1.192 +                                   OopClosure* roots,
   1.193 +                                   CLDClosure* cld_closure,
   1.194 +                                   CodeBlobClosure* code_closure) {
   1.195 +  process_roots(activate_scope, so,
   1.196 +                roots, roots,
   1.197 +                cld_closure, cld_closure,
   1.198 +                code_closure);
   1.199  }
   1.200  
   1.201  void SharedHeap::process_strong_roots(bool activate_scope,
   1.202                                        ScanningOption so,
   1.203                                        OopClosure* roots,
   1.204 -                                      KlassClosure* klass_closure) {
   1.205 -  StrongRootsScope srs(this, activate_scope);
   1.206 +                                      CLDClosure* cld_closure,
   1.207 +                                      CodeBlobClosure* code_closure) {
   1.208 +  process_roots(activate_scope, so,
   1.209 +                roots, NULL,
   1.210 +                cld_closure, NULL,
   1.211 +                code_closure);
   1.212 +}
   1.213  
   1.214 -  // General strong roots.
   1.215 -  assert(_strong_roots_parity != 0, "must have called prologue code");
   1.216 -  // _n_termination for _process_strong_tasks should be set up stream
   1.217 -  // in a method not running in a GC worker.  Otherwise the GC worker
   1.218 -  // could be trying to change the termination condition while the task
   1.219 -  // is executing in another GC worker.
   1.220 -  if (!_process_strong_tasks->is_task_claimed(SH_PS_Universe_oops_do)) {
   1.221 -    Universe::oops_do(roots);
   1.222 -  }
   1.223 -  // Global (strong) JNI handles
   1.224 -  if (!_process_strong_tasks->is_task_claimed(SH_PS_JNIHandles_oops_do))
   1.225 -    JNIHandles::oops_do(roots);
   1.226 -
   1.227 -  CodeBlobToOopClosure code_roots(roots, true);
   1.228 -
   1.229 -  CLDToOopClosure roots_from_clds(roots);
   1.230 -  // If we limit class scanning to SO_SystemClasses we need to apply a CLD closure to
   1.231 -  // CLDs which are strongly reachable from the thread stacks.
   1.232 -  CLDToOopClosure* roots_from_clds_p = ((so & SO_SystemClasses) ? &roots_from_clds : NULL);
   1.233 -  // All threads execute this; the individual threads are task groups.
   1.234 -  if (CollectedHeap::use_parallel_gc_threads()) {
   1.235 -    Threads::possibly_parallel_oops_do(roots, roots_from_clds_p, &code_roots);
   1.236 -  } else {
   1.237 -    Threads::oops_do(roots, roots_from_clds_p, &code_roots);
   1.238 -  }
   1.239 -
   1.240 -  if (!_process_strong_tasks-> is_task_claimed(SH_PS_ObjectSynchronizer_oops_do))
   1.241 -    ObjectSynchronizer::oops_do(roots);
   1.242 -  if (!_process_strong_tasks->is_task_claimed(SH_PS_FlatProfiler_oops_do))
   1.243 -    FlatProfiler::oops_do(roots);
   1.244 -  if (!_process_strong_tasks->is_task_claimed(SH_PS_Management_oops_do))
   1.245 -    Management::oops_do(roots);
   1.246 -  if (!_process_strong_tasks->is_task_claimed(SH_PS_jvmti_oops_do))
   1.247 -    JvmtiExport::oops_do(roots);
   1.248 -
   1.249 -  if (!_process_strong_tasks->is_task_claimed(SH_PS_SystemDictionary_oops_do)) {
   1.250 -    if (so & SO_AllClasses) {
   1.251 -      SystemDictionary::oops_do(roots);
   1.252 -    } else if (so & SO_SystemClasses) {
   1.253 -      SystemDictionary::always_strong_oops_do(roots);
   1.254 -    } else {
   1.255 -      fatal("We should always have selected either SO_AllClasses or SO_SystemClasses");
   1.256 -    }
   1.257 -  }
   1.258 -
   1.259 -  if (!_process_strong_tasks->is_task_claimed(SH_PS_ClassLoaderDataGraph_oops_do)) {
   1.260 -    if (so & SO_AllClasses) {
   1.261 -      ClassLoaderDataGraph::oops_do(roots, klass_closure, /* must_claim */ false);
   1.262 -    } else if (so & SO_SystemClasses) {
   1.263 -      ClassLoaderDataGraph::always_strong_oops_do(roots, klass_closure, /* must_claim */ true);
   1.264 -    }
   1.265 -  }
   1.266 -
   1.267 -  // All threads execute the following. A specific chunk of buckets
   1.268 -  // from the StringTable are the individual tasks.
   1.269 -  if (so & SO_Strings) {
   1.270 -    if (CollectedHeap::use_parallel_gc_threads()) {
   1.271 -      StringTable::possibly_parallel_oops_do(roots);
   1.272 -    } else {
   1.273 -      StringTable::oops_do(roots);
   1.274 -    }
   1.275 -  }
   1.276 -
   1.277 -  if (!_process_strong_tasks->is_task_claimed(SH_PS_CodeCache_oops_do)) {
   1.278 -    if (so & SO_ScavengeCodeCache) {
   1.279 -      assert(&code_roots != NULL, "must supply closure for code cache");
   1.280 -
   1.281 -      // We only visit parts of the CodeCache when scavenging.
   1.282 -      CodeCache::scavenge_root_nmethods_do(&code_roots);
   1.283 -    }
   1.284 -    if (so & SO_AllCodeCache) {
   1.285 -      assert(&code_roots != NULL, "must supply closure for code cache");
   1.286 -
   1.287 -      // CMSCollector uses this to do intermediate-strength collections.
   1.288 -      // We scan the entire code cache, since CodeCache::do_unloading is not called.
   1.289 -      CodeCache::blobs_do(&code_roots);
   1.290 -    }
   1.291 -    // Verify that the code cache contents are not subject to
   1.292 -    // movement by a scavenging collection.
   1.293 -    DEBUG_ONLY(CodeBlobToOopClosure assert_code_is_non_scavengable(&assert_is_non_scavengable_closure, /*do_marking=*/ false));
   1.294 -    DEBUG_ONLY(CodeCache::asserted_non_scavengable_nmethods_do(&assert_code_is_non_scavengable));
   1.295 -  }
   1.296 -
   1.297 -  _process_strong_tasks->all_tasks_completed();
   1.298 -}
   1.299  
   1.300  class AlwaysTrueClosure: public BoolObjectClosure {
   1.301  public:

mercurial