Mon, 07 Jul 2014 10:12:40 +0200
8049421: G1 Class Unloading after completing a concurrent mark cycle
Reviewed-by: tschatzl, ehelin, brutisso, coleenp, roland, iveresov
Contributed-by: stefan.karlsson@oracle.com, mikael.gerdin@oracle.com
1 /*
2 * Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "classfile/symbolTable.hpp"
27 #include "classfile/systemDictionary.hpp"
28 #include "code/codeCache.hpp"
29 #include "gc_interface/collectedHeap.inline.hpp"
30 #include "memory/sharedHeap.hpp"
31 #include "oops/oop.inline.hpp"
32 #include "runtime/atomic.inline.hpp"
33 #include "runtime/fprofiler.hpp"
34 #include "runtime/java.hpp"
35 #include "services/management.hpp"
36 #include "utilities/copy.hpp"
37 #include "utilities/workgroup.hpp"
39 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
41 SharedHeap* SharedHeap::_sh;
43 // The set of potentially parallel tasks in root scanning.
44 enum SH_process_roots_tasks {
45 SH_PS_Universe_oops_do,
46 SH_PS_JNIHandles_oops_do,
47 SH_PS_ObjectSynchronizer_oops_do,
48 SH_PS_FlatProfiler_oops_do,
49 SH_PS_Management_oops_do,
50 SH_PS_SystemDictionary_oops_do,
51 SH_PS_ClassLoaderDataGraph_oops_do,
52 SH_PS_jvmti_oops_do,
53 SH_PS_CodeCache_oops_do,
54 // Leave this one last.
55 SH_PS_NumElements
56 };
58 SharedHeap::SharedHeap(CollectorPolicy* policy_) :
59 CollectedHeap(),
60 _collector_policy(policy_),
61 _rem_set(NULL),
62 _strong_roots_scope(NULL),
63 _strong_roots_parity(0),
64 _process_strong_tasks(new SubTasksDone(SH_PS_NumElements)),
65 _workers(NULL)
66 {
67 if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) {
68 vm_exit_during_initialization("Failed necessary allocation.");
69 }
70 _sh = this; // ch is static, should be set only once.
71 if ((UseParNewGC ||
72 (UseConcMarkSweepGC && (CMSParallelInitialMarkEnabled ||
73 CMSParallelRemarkEnabled)) ||
74 UseG1GC) &&
75 ParallelGCThreads > 0) {
76 _workers = new FlexibleWorkGang("Parallel GC Threads", ParallelGCThreads,
77 /* are_GC_task_threads */true,
78 /* are_ConcurrentGC_threads */false);
79 if (_workers == NULL) {
80 vm_exit_during_initialization("Failed necessary allocation.");
81 } else {
82 _workers->initialize_workers();
83 }
84 }
85 }
87 int SharedHeap::n_termination() {
88 return _process_strong_tasks->n_threads();
89 }
91 void SharedHeap::set_n_termination(int t) {
92 _process_strong_tasks->set_n_threads(t);
93 }
95 bool SharedHeap::heap_lock_held_for_gc() {
96 Thread* t = Thread::current();
97 return Heap_lock->owned_by_self()
98 || ( (t->is_GC_task_thread() || t->is_VM_thread())
99 && _thread_holds_heap_lock_for_gc);
100 }
102 void SharedHeap::set_par_threads(uint t) {
103 assert(t == 0 || !UseSerialGC, "Cannot have parallel threads");
104 _n_par_threads = t;
105 _process_strong_tasks->set_n_threads(t);
106 }
108 #ifdef ASSERT
109 class AssertNonScavengableClosure: public OopClosure {
110 public:
111 virtual void do_oop(oop* p) {
112 assert(!Universe::heap()->is_in_partial_collection(*p),
113 "Referent should not be scavengable."); }
114 virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
115 };
116 static AssertNonScavengableClosure assert_is_non_scavengable_closure;
117 #endif
119 SharedHeap::StrongRootsScope* SharedHeap::active_strong_roots_scope() const {
120 return _strong_roots_scope;
121 }
122 void SharedHeap::register_strong_roots_scope(SharedHeap::StrongRootsScope* scope) {
123 assert(_strong_roots_scope == NULL, "Should only have one StrongRootsScope active");
124 assert(scope != NULL, "Illegal argument");
125 _strong_roots_scope = scope;
126 }
127 void SharedHeap::unregister_strong_roots_scope(SharedHeap::StrongRootsScope* scope) {
128 assert(_strong_roots_scope == scope, "Wrong scope unregistered");
129 _strong_roots_scope = NULL;
130 }
132 void SharedHeap::change_strong_roots_parity() {
133 // Also set the new collection parity.
134 assert(_strong_roots_parity >= 0 && _strong_roots_parity <= 2,
135 "Not in range.");
136 _strong_roots_parity++;
137 if (_strong_roots_parity == 3) _strong_roots_parity = 1;
138 assert(_strong_roots_parity >= 1 && _strong_roots_parity <= 2,
139 "Not in range.");
140 }
142 SharedHeap::StrongRootsScope::StrongRootsScope(SharedHeap* heap, bool activate)
143 : MarkScope(activate), _sh(heap), _n_workers_done_with_threads(0)
144 {
145 if (_active) {
146 _sh->register_strong_roots_scope(this);
147 _sh->change_strong_roots_parity();
148 // Zero the claimed high water mark in the StringTable
149 StringTable::clear_parallel_claimed_index();
150 }
151 }
153 SharedHeap::StrongRootsScope::~StrongRootsScope() {
154 if (_active) {
155 _sh->unregister_strong_roots_scope(this);
156 }
157 }
159 Monitor* SharedHeap::StrongRootsScope::_lock = new Monitor(Mutex::leaf, "StrongRootsScope lock", false);
161 void SharedHeap::StrongRootsScope::mark_worker_done_with_threads(uint n_workers) {
162 // The Thread work barrier is only needed by G1.
163 // No need to use the barrier if this is single-threaded code.
164 if (UseG1GC && n_workers > 0) {
165 uint new_value = (uint)Atomic::add(1, &_n_workers_done_with_threads);
166 if (new_value == n_workers) {
167 // This thread is last. Notify the others.
168 MonitorLockerEx ml(_lock, Mutex::_no_safepoint_check_flag);
169 _lock->notify_all();
170 }
171 }
172 }
174 void SharedHeap::StrongRootsScope::wait_until_all_workers_done_with_threads(uint n_workers) {
175 // No need to use the barrier if this is single-threaded code.
176 if (n_workers > 0 && (uint)_n_workers_done_with_threads != n_workers) {
177 MonitorLockerEx ml(_lock, Mutex::_no_safepoint_check_flag);
178 while ((uint)_n_workers_done_with_threads != n_workers) {
179 _lock->wait(Mutex::_no_safepoint_check_flag, 0, false);
180 }
181 }
182 }
184 void SharedHeap::process_roots(bool activate_scope,
185 ScanningOption so,
186 OopClosure* strong_roots,
187 OopClosure* weak_roots,
188 CLDClosure* strong_cld_closure,
189 CLDClosure* weak_cld_closure,
190 CodeBlobClosure* code_roots) {
191 StrongRootsScope srs(this, activate_scope);
193 // General roots.
194 assert(_strong_roots_parity != 0, "must have called prologue code");
195 assert(code_roots != NULL, "code root closure should always be set");
196 // _n_termination for _process_strong_tasks should be set up stream
197 // in a method not running in a GC worker. Otherwise the GC worker
198 // could be trying to change the termination condition while the task
199 // is executing in another GC worker.
201 // Iterating over the CLDG and the Threads are done early to allow G1 to
202 // first process the strong CLDs and nmethods and then, after a barrier,
203 // let the thread process the weak CLDs and nmethods.
205 if (!_process_strong_tasks->is_task_claimed(SH_PS_ClassLoaderDataGraph_oops_do)) {
206 ClassLoaderDataGraph::roots_cld_do(strong_cld_closure, weak_cld_closure);
207 }
209 // Some CLDs contained in the thread frames should be considered strong.
210 // Don't process them if they will be processed during the ClassLoaderDataGraph phase.
211 CLDClosure* roots_from_clds_p = (strong_cld_closure != weak_cld_closure) ? strong_cld_closure : NULL;
212 // Only process code roots from thread stacks if we aren't visiting the entire CodeCache anyway
213 CodeBlobClosure* roots_from_code_p = (so & SO_AllCodeCache) ? NULL : code_roots;
215 Threads::possibly_parallel_oops_do(strong_roots, roots_from_clds_p, roots_from_code_p);
217 // This is the point where this worker thread will not find more strong CLDs/nmethods.
218 // Report this so G1 can synchronize the strong and weak CLDs/nmethods processing.
219 active_strong_roots_scope()->mark_worker_done_with_threads(n_par_threads());
221 if (!_process_strong_tasks->is_task_claimed(SH_PS_Universe_oops_do)) {
222 Universe::oops_do(strong_roots);
223 }
224 // Global (strong) JNI handles
225 if (!_process_strong_tasks->is_task_claimed(SH_PS_JNIHandles_oops_do))
226 JNIHandles::oops_do(strong_roots);
228 if (!_process_strong_tasks-> is_task_claimed(SH_PS_ObjectSynchronizer_oops_do))
229 ObjectSynchronizer::oops_do(strong_roots);
230 if (!_process_strong_tasks->is_task_claimed(SH_PS_FlatProfiler_oops_do))
231 FlatProfiler::oops_do(strong_roots);
232 if (!_process_strong_tasks->is_task_claimed(SH_PS_Management_oops_do))
233 Management::oops_do(strong_roots);
234 if (!_process_strong_tasks->is_task_claimed(SH_PS_jvmti_oops_do))
235 JvmtiExport::oops_do(strong_roots);
237 if (!_process_strong_tasks->is_task_claimed(SH_PS_SystemDictionary_oops_do)) {
238 SystemDictionary::roots_oops_do(strong_roots, weak_roots);
239 }
241 // All threads execute the following. A specific chunk of buckets
242 // from the StringTable are the individual tasks.
243 if (weak_roots != NULL) {
244 if (CollectedHeap::use_parallel_gc_threads()) {
245 StringTable::possibly_parallel_oops_do(weak_roots);
246 } else {
247 StringTable::oops_do(weak_roots);
248 }
249 }
251 if (!_process_strong_tasks->is_task_claimed(SH_PS_CodeCache_oops_do)) {
252 if (so & SO_ScavengeCodeCache) {
253 assert(code_roots != NULL, "must supply closure for code cache");
255 // We only visit parts of the CodeCache when scavenging.
256 CodeCache::scavenge_root_nmethods_do(code_roots);
257 }
258 if (so & SO_AllCodeCache) {
259 assert(code_roots != NULL, "must supply closure for code cache");
261 // CMSCollector uses this to do intermediate-strength collections.
262 // We scan the entire code cache, since CodeCache::do_unloading is not called.
263 CodeCache::blobs_do(code_roots);
264 }
265 // Verify that the code cache contents are not subject to
266 // movement by a scavenging collection.
267 DEBUG_ONLY(CodeBlobToOopClosure assert_code_is_non_scavengable(&assert_is_non_scavengable_closure, !CodeBlobToOopClosure::FixRelocations));
268 DEBUG_ONLY(CodeCache::asserted_non_scavengable_nmethods_do(&assert_code_is_non_scavengable));
269 }
271 _process_strong_tasks->all_tasks_completed();
272 }
274 void SharedHeap::process_all_roots(bool activate_scope,
275 ScanningOption so,
276 OopClosure* roots,
277 CLDClosure* cld_closure,
278 CodeBlobClosure* code_closure) {
279 process_roots(activate_scope, so,
280 roots, roots,
281 cld_closure, cld_closure,
282 code_closure);
283 }
285 void SharedHeap::process_strong_roots(bool activate_scope,
286 ScanningOption so,
287 OopClosure* roots,
288 CLDClosure* cld_closure,
289 CodeBlobClosure* code_closure) {
290 process_roots(activate_scope, so,
291 roots, NULL,
292 cld_closure, NULL,
293 code_closure);
294 }
297 class AlwaysTrueClosure: public BoolObjectClosure {
298 public:
299 bool do_object_b(oop p) { return true; }
300 };
301 static AlwaysTrueClosure always_true;
303 void SharedHeap::process_weak_roots(OopClosure* root_closure) {
304 // Global (weak) JNI handles
305 JNIHandles::weak_oops_do(&always_true, root_closure);
306 }
308 void SharedHeap::set_barrier_set(BarrierSet* bs) {
309 _barrier_set = bs;
310 // Cached barrier set for fast access in oops
311 oopDesc::set_bs(bs);
312 }
314 void SharedHeap::post_initialize() {
315 CollectedHeap::post_initialize();
316 ref_processing_init();
317 }
319 void SharedHeap::ref_processing_init() {}
321 // Some utilities.
322 void SharedHeap::print_size_transition(outputStream* out,
323 size_t bytes_before,
324 size_t bytes_after,
325 size_t capacity) {
326 out->print(" %d%s->%d%s(%d%s)",
327 byte_size_in_proper_unit(bytes_before),
328 proper_unit_for_byte_size(bytes_before),
329 byte_size_in_proper_unit(bytes_after),
330 proper_unit_for_byte_size(bytes_after),
331 byte_size_in_proper_unit(capacity),
332 proper_unit_for_byte_size(capacity));
333 }