src/share/vm/memory/sharedHeap.cpp

Mon, 07 Jul 2014 10:12:40 +0200

author
stefank
date
Mon, 07 Jul 2014 10:12:40 +0200
changeset 6992
2c6ef90f030a
parent 6972
64ac9c55d666
child 6996
f3aeae1f9fc5
permissions
-rw-r--r--

8049421: G1 Class Unloading after completing a concurrent mark cycle
Reviewed-by: tschatzl, ehelin, brutisso, coleenp, roland, iveresov
Contributed-by: stefan.karlsson@oracle.com, mikael.gerdin@oracle.com

duke@435 1 /*
drchase@6680 2 * Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
duke@435 22 *
duke@435 23 */
duke@435 24
stefank@2314 25 #include "precompiled.hpp"
stefank@2314 26 #include "classfile/symbolTable.hpp"
stefank@2314 27 #include "classfile/systemDictionary.hpp"
stefank@2314 28 #include "code/codeCache.hpp"
stefank@2314 29 #include "gc_interface/collectedHeap.inline.hpp"
stefank@2314 30 #include "memory/sharedHeap.hpp"
stefank@2314 31 #include "oops/oop.inline.hpp"
stefank@6992 32 #include "runtime/atomic.inline.hpp"
stefank@2314 33 #include "runtime/fprofiler.hpp"
stefank@2314 34 #include "runtime/java.hpp"
stefank@2314 35 #include "services/management.hpp"
stefank@2314 36 #include "utilities/copy.hpp"
stefank@2314 37 #include "utilities/workgroup.hpp"
duke@435 38
drchase@6680 39 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
drchase@6680 40
duke@435 41 SharedHeap* SharedHeap::_sh;
duke@435 42
stefank@6992 43 // The set of potentially parallel tasks in root scanning.
stefank@6992 44 enum SH_process_roots_tasks {
duke@435 45 SH_PS_Universe_oops_do,
duke@435 46 SH_PS_JNIHandles_oops_do,
duke@435 47 SH_PS_ObjectSynchronizer_oops_do,
duke@435 48 SH_PS_FlatProfiler_oops_do,
duke@435 49 SH_PS_Management_oops_do,
duke@435 50 SH_PS_SystemDictionary_oops_do,
stefank@5194 51 SH_PS_ClassLoaderDataGraph_oops_do,
duke@435 52 SH_PS_jvmti_oops_do,
duke@435 53 SH_PS_CodeCache_oops_do,
duke@435 54 // Leave this one last.
duke@435 55 SH_PS_NumElements
duke@435 56 };
duke@435 57
duke@435 58 SharedHeap::SharedHeap(CollectorPolicy* policy_) :
duke@435 59 CollectedHeap(),
duke@435 60 _collector_policy(policy_),
coleenp@4037 61 _rem_set(NULL),
stefank@6992 62 _strong_roots_scope(NULL),
duke@435 63 _strong_roots_parity(0),
duke@435 64 _process_strong_tasks(new SubTasksDone(SH_PS_NumElements)),
jmasa@2188 65 _workers(NULL)
duke@435 66 {
duke@435 67 if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) {
duke@435 68 vm_exit_during_initialization("Failed necessary allocation.");
duke@435 69 }
duke@435 70 _sh = this; // ch is static, should be set only once.
duke@435 71 if ((UseParNewGC ||
jmasa@5461 72 (UseConcMarkSweepGC && (CMSParallelInitialMarkEnabled ||
jmasa@5461 73 CMSParallelRemarkEnabled)) ||
ysr@777 74 UseG1GC) &&
duke@435 75 ParallelGCThreads > 0) {
jmasa@2188 76 _workers = new FlexibleWorkGang("Parallel GC Threads", ParallelGCThreads,
ysr@777 77 /* are_GC_task_threads */true,
ysr@777 78 /* are_ConcurrentGC_threads */false);
duke@435 79 if (_workers == NULL) {
duke@435 80 vm_exit_during_initialization("Failed necessary allocation.");
jmasa@2188 81 } else {
jmasa@2188 82 _workers->initialize_workers();
duke@435 83 }
duke@435 84 }
duke@435 85 }
duke@435 86
jmasa@3294 87 int SharedHeap::n_termination() {
jmasa@3294 88 return _process_strong_tasks->n_threads();
jmasa@3294 89 }
jmasa@3294 90
jmasa@3294 91 void SharedHeap::set_n_termination(int t) {
jmasa@3294 92 _process_strong_tasks->set_n_threads(t);
jmasa@3294 93 }
jmasa@3294 94
ysr@777 95 bool SharedHeap::heap_lock_held_for_gc() {
ysr@777 96 Thread* t = Thread::current();
ysr@777 97 return Heap_lock->owned_by_self()
ysr@777 98 || ( (t->is_GC_task_thread() || t->is_VM_thread())
ysr@777 99 && _thread_holds_heap_lock_for_gc);
ysr@777 100 }
duke@435 101
jmasa@3357 102 void SharedHeap::set_par_threads(uint t) {
jmasa@2188 103 assert(t == 0 || !UseSerialGC, "Cannot have parallel threads");
duke@435 104 _n_par_threads = t;
jmasa@2188 105 _process_strong_tasks->set_n_threads(t);
duke@435 106 }
duke@435 107
jmasa@2909 108 #ifdef ASSERT
jmasa@2909 109 class AssertNonScavengableClosure: public OopClosure {
jmasa@2909 110 public:
jmasa@2909 111 virtual void do_oop(oop* p) {
jmasa@2909 112 assert(!Universe::heap()->is_in_partial_collection(*p),
jmasa@2909 113 "Referent should not be scavengable."); }
jmasa@2909 114 virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
jmasa@2909 115 };
jmasa@2909 116 static AssertNonScavengableClosure assert_is_non_scavengable_closure;
jmasa@2909 117 #endif
jmasa@2909 118
stefank@6992 119 SharedHeap::StrongRootsScope* SharedHeap::active_strong_roots_scope() const {
stefank@6992 120 return _strong_roots_scope;
stefank@6992 121 }
stefank@6992 122 void SharedHeap::register_strong_roots_scope(SharedHeap::StrongRootsScope* scope) {
stefank@6992 123 assert(_strong_roots_scope == NULL, "Should only have one StrongRootsScope active");
stefank@6992 124 assert(scope != NULL, "Illegal argument");
stefank@6992 125 _strong_roots_scope = scope;
stefank@6992 126 }
stefank@6992 127 void SharedHeap::unregister_strong_roots_scope(SharedHeap::StrongRootsScope* scope) {
stefank@6992 128 assert(_strong_roots_scope == scope, "Wrong scope unregistered");
stefank@6992 129 _strong_roots_scope = NULL;
stefank@6992 130 }
stefank@6992 131
duke@435 132 void SharedHeap::change_strong_roots_parity() {
duke@435 133 // Also set the new collection parity.
duke@435 134 assert(_strong_roots_parity >= 0 && _strong_roots_parity <= 2,
duke@435 135 "Not in range.");
duke@435 136 _strong_roots_parity++;
duke@435 137 if (_strong_roots_parity == 3) _strong_roots_parity = 1;
duke@435 138 assert(_strong_roots_parity >= 1 && _strong_roots_parity <= 2,
duke@435 139 "Not in range.");
duke@435 140 }
duke@435 141
stefank@6992 142 SharedHeap::StrongRootsScope::StrongRootsScope(SharedHeap* heap, bool activate)
stefank@6992 143 : MarkScope(activate), _sh(heap), _n_workers_done_with_threads(0)
jrose@1424 144 {
jrose@1424 145 if (_active) {
stefank@6992 146 _sh->register_strong_roots_scope(this);
stefank@6992 147 _sh->change_strong_roots_parity();
johnc@5277 148 // Zero the claimed high water mark in the StringTable
johnc@5277 149 StringTable::clear_parallel_claimed_index();
jrose@1424 150 }
jrose@1424 151 }
jrose@1424 152
jrose@1424 153 SharedHeap::StrongRootsScope::~StrongRootsScope() {
stefank@6992 154 if (_active) {
stefank@6992 155 _sh->unregister_strong_roots_scope(this);
stefank@6992 156 }
stefank@6992 157 }
stefank@6992 158
stefank@6992 159 Monitor* SharedHeap::StrongRootsScope::_lock = new Monitor(Mutex::leaf, "StrongRootsScope lock", false);
stefank@6992 160
stefank@6992 161 void SharedHeap::StrongRootsScope::mark_worker_done_with_threads(uint n_workers) {
stefank@6992 162 // The Thread work barrier is only needed by G1.
stefank@6992 163 // No need to use the barrier if this is single-threaded code.
stefank@6992 164 if (UseG1GC && n_workers > 0) {
stefank@6992 165 uint new_value = (uint)Atomic::add(1, &_n_workers_done_with_threads);
stefank@6992 166 if (new_value == n_workers) {
stefank@6992 167 // This thread is last. Notify the others.
stefank@6992 168 MonitorLockerEx ml(_lock, Mutex::_no_safepoint_check_flag);
stefank@6992 169 _lock->notify_all();
stefank@6992 170 }
stefank@6992 171 }
stefank@6992 172 }
stefank@6992 173
stefank@6992 174 void SharedHeap::StrongRootsScope::wait_until_all_workers_done_with_threads(uint n_workers) {
stefank@6992 175 // No need to use the barrier if this is single-threaded code.
stefank@6992 176 if (n_workers > 0 && (uint)_n_workers_done_with_threads != n_workers) {
stefank@6992 177 MonitorLockerEx ml(_lock, Mutex::_no_safepoint_check_flag);
stefank@6992 178 while ((uint)_n_workers_done_with_threads != n_workers) {
stefank@6992 179 _lock->wait(Mutex::_no_safepoint_check_flag, 0, false);
stefank@6992 180 }
stefank@6992 181 }
stefank@6992 182 }
stefank@6992 183
stefank@6992 184 void SharedHeap::process_roots(bool activate_scope,
stefank@6992 185 ScanningOption so,
stefank@6992 186 OopClosure* strong_roots,
stefank@6992 187 OopClosure* weak_roots,
stefank@6992 188 CLDClosure* strong_cld_closure,
stefank@6992 189 CLDClosure* weak_cld_closure,
stefank@6992 190 CodeBlobClosure* code_roots) {
stefank@6992 191 StrongRootsScope srs(this, activate_scope);
stefank@6992 192
stefank@6992 193 // General roots.
stefank@6992 194 assert(_strong_roots_parity != 0, "must have called prologue code");
stefank@6992 195 assert(code_roots != NULL, "code root closure should always be set");
stefank@6992 196 // _n_termination for _process_strong_tasks should be set up stream
stefank@6992 197 // in a method not running in a GC worker. Otherwise the GC worker
stefank@6992 198 // could be trying to change the termination condition while the task
stefank@6992 199 // is executing in another GC worker.
stefank@6992 200
stefank@6992 201 // Iterating over the CLDG and the Threads are done early to allow G1 to
stefank@6992 202 // first process the strong CLDs and nmethods and then, after a barrier,
stefank@6992 203 // let the thread process the weak CLDs and nmethods.
stefank@6992 204
stefank@6992 205 if (!_process_strong_tasks->is_task_claimed(SH_PS_ClassLoaderDataGraph_oops_do)) {
stefank@6992 206 ClassLoaderDataGraph::roots_cld_do(strong_cld_closure, weak_cld_closure);
stefank@6992 207 }
stefank@6992 208
stefank@6992 209 // Some CLDs contained in the thread frames should be considered strong.
stefank@6992 210 // Don't process them if they will be processed during the ClassLoaderDataGraph phase.
stefank@6992 211 CLDClosure* roots_from_clds_p = (strong_cld_closure != weak_cld_closure) ? strong_cld_closure : NULL;
stefank@6992 212 // Only process code roots from thread stacks if we aren't visiting the entire CodeCache anyway
stefank@6992 213 CodeBlobClosure* roots_from_code_p = (so & SO_AllCodeCache) ? NULL : code_roots;
stefank@6992 214
stefank@6992 215 Threads::possibly_parallel_oops_do(strong_roots, roots_from_clds_p, roots_from_code_p);
stefank@6992 216
stefank@6992 217 // This is the point where this worker thread will not find more strong CLDs/nmethods.
stefank@6992 218 // Report this so G1 can synchronize the strong and weak CLDs/nmethods processing.
stefank@6992 219 active_strong_roots_scope()->mark_worker_done_with_threads(n_par_threads());
stefank@6992 220
stefank@6992 221 if (!_process_strong_tasks->is_task_claimed(SH_PS_Universe_oops_do)) {
stefank@6992 222 Universe::oops_do(strong_roots);
stefank@6992 223 }
stefank@6992 224 // Global (strong) JNI handles
stefank@6992 225 if (!_process_strong_tasks->is_task_claimed(SH_PS_JNIHandles_oops_do))
stefank@6992 226 JNIHandles::oops_do(strong_roots);
stefank@6992 227
stefank@6992 228 if (!_process_strong_tasks-> is_task_claimed(SH_PS_ObjectSynchronizer_oops_do))
stefank@6992 229 ObjectSynchronizer::oops_do(strong_roots);
stefank@6992 230 if (!_process_strong_tasks->is_task_claimed(SH_PS_FlatProfiler_oops_do))
stefank@6992 231 FlatProfiler::oops_do(strong_roots);
stefank@6992 232 if (!_process_strong_tasks->is_task_claimed(SH_PS_Management_oops_do))
stefank@6992 233 Management::oops_do(strong_roots);
stefank@6992 234 if (!_process_strong_tasks->is_task_claimed(SH_PS_jvmti_oops_do))
stefank@6992 235 JvmtiExport::oops_do(strong_roots);
stefank@6992 236
stefank@6992 237 if (!_process_strong_tasks->is_task_claimed(SH_PS_SystemDictionary_oops_do)) {
stefank@6992 238 SystemDictionary::roots_oops_do(strong_roots, weak_roots);
stefank@6992 239 }
stefank@6992 240
stefank@6992 241 // All threads execute the following. A specific chunk of buckets
stefank@6992 242 // from the StringTable are the individual tasks.
stefank@6992 243 if (weak_roots != NULL) {
stefank@6992 244 if (CollectedHeap::use_parallel_gc_threads()) {
stefank@6992 245 StringTable::possibly_parallel_oops_do(weak_roots);
stefank@6992 246 } else {
stefank@6992 247 StringTable::oops_do(weak_roots);
stefank@6992 248 }
stefank@6992 249 }
stefank@6992 250
stefank@6992 251 if (!_process_strong_tasks->is_task_claimed(SH_PS_CodeCache_oops_do)) {
stefank@6992 252 if (so & SO_ScavengeCodeCache) {
stefank@6992 253 assert(code_roots != NULL, "must supply closure for code cache");
stefank@6992 254
stefank@6992 255 // We only visit parts of the CodeCache when scavenging.
stefank@6992 256 CodeCache::scavenge_root_nmethods_do(code_roots);
stefank@6992 257 }
stefank@6992 258 if (so & SO_AllCodeCache) {
stefank@6992 259 assert(code_roots != NULL, "must supply closure for code cache");
stefank@6992 260
stefank@6992 261 // CMSCollector uses this to do intermediate-strength collections.
stefank@6992 262 // We scan the entire code cache, since CodeCache::do_unloading is not called.
stefank@6992 263 CodeCache::blobs_do(code_roots);
stefank@6992 264 }
stefank@6992 265 // Verify that the code cache contents are not subject to
stefank@6992 266 // movement by a scavenging collection.
stefank@6992 267 DEBUG_ONLY(CodeBlobToOopClosure assert_code_is_non_scavengable(&assert_is_non_scavengable_closure, !CodeBlobToOopClosure::FixRelocations));
stefank@6992 268 DEBUG_ONLY(CodeCache::asserted_non_scavengable_nmethods_do(&assert_code_is_non_scavengable));
stefank@6992 269 }
stefank@6992 270
stefank@6992 271 _process_strong_tasks->all_tasks_completed();
stefank@6992 272 }
stefank@6992 273
stefank@6992 274 void SharedHeap::process_all_roots(bool activate_scope,
stefank@6992 275 ScanningOption so,
stefank@6992 276 OopClosure* roots,
stefank@6992 277 CLDClosure* cld_closure,
stefank@6992 278 CodeBlobClosure* code_closure) {
stefank@6992 279 process_roots(activate_scope, so,
stefank@6992 280 roots, roots,
stefank@6992 281 cld_closure, cld_closure,
stefank@6992 282 code_closure);
jrose@1424 283 }
jrose@1424 284
jrose@1424 285 void SharedHeap::process_strong_roots(bool activate_scope,
duke@435 286 ScanningOption so,
duke@435 287 OopClosure* roots,
stefank@6992 288 CLDClosure* cld_closure,
stefank@6992 289 CodeBlobClosure* code_closure) {
stefank@6992 290 process_roots(activate_scope, so,
stefank@6992 291 roots, NULL,
stefank@6992 292 cld_closure, NULL,
stefank@6992 293 code_closure);
stefank@6992 294 }
coleenp@4037 295
duke@435 296
duke@435 297 class AlwaysTrueClosure: public BoolObjectClosure {
duke@435 298 public:
duke@435 299 bool do_object_b(oop p) { return true; }
duke@435 300 };
duke@435 301 static AlwaysTrueClosure always_true;
duke@435 302
stefank@6971 303 void SharedHeap::process_weak_roots(OopClosure* root_closure) {
duke@435 304 // Global (weak) JNI handles
duke@435 305 JNIHandles::weak_oops_do(&always_true, root_closure);
stefank@5011 306 }
duke@435 307
duke@435 308 void SharedHeap::set_barrier_set(BarrierSet* bs) {
duke@435 309 _barrier_set = bs;
duke@435 310 // Cached barrier set for fast access in oops
duke@435 311 oopDesc::set_bs(bs);
duke@435 312 }
duke@435 313
duke@435 314 void SharedHeap::post_initialize() {
jwilhelm@6085 315 CollectedHeap::post_initialize();
duke@435 316 ref_processing_init();
duke@435 317 }
duke@435 318
coleenp@4037 319 void SharedHeap::ref_processing_init() {}
duke@435 320
duke@435 321 // Some utilities.
ysr@777 322 void SharedHeap::print_size_transition(outputStream* out,
ysr@777 323 size_t bytes_before,
duke@435 324 size_t bytes_after,
duke@435 325 size_t capacity) {
ysr@777 326 out->print(" %d%s->%d%s(%d%s)",
duke@435 327 byte_size_in_proper_unit(bytes_before),
duke@435 328 proper_unit_for_byte_size(bytes_before),
duke@435 329 byte_size_in_proper_unit(bytes_after),
duke@435 330 proper_unit_for_byte_size(bytes_after),
duke@435 331 byte_size_in_proper_unit(capacity),
duke@435 332 proper_unit_for_byte_size(capacity));
duke@435 333 }

mercurial