Thu, 24 Mar 2011 15:47:01 -0700
7029036: Card-table verification hangs with all framework collectors, except G1, even before the first GC
Summary: When verifying clean card ranges, use memory-range-bounded iteration over oops of objects overlapping that range, thus avoiding the otherwise quadratic worst-case cost of scanning large object arrays.
Reviewed-by: jmasa, jwilhelm, tonyp
1 /*
2 * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "classfile/symbolTable.hpp"
27 #include "classfile/systemDictionary.hpp"
28 #include "code/codeCache.hpp"
29 #include "gc_interface/collectedHeap.inline.hpp"
30 #include "memory/sharedHeap.hpp"
31 #include "oops/oop.inline.hpp"
32 #include "runtime/fprofiler.hpp"
33 #include "runtime/java.hpp"
34 #include "services/management.hpp"
35 #include "utilities/copy.hpp"
36 #include "utilities/workgroup.hpp"
38 SharedHeap* SharedHeap::_sh;
40 // The set of potentially parallel tasks in strong root scanning.
41 enum SH_process_strong_roots_tasks {
42 SH_PS_Universe_oops_do,
43 SH_PS_JNIHandles_oops_do,
44 SH_PS_ObjectSynchronizer_oops_do,
45 SH_PS_FlatProfiler_oops_do,
46 SH_PS_Management_oops_do,
47 SH_PS_SystemDictionary_oops_do,
48 SH_PS_jvmti_oops_do,
49 SH_PS_SymbolTable_oops_do,
50 SH_PS_StringTable_oops_do,
51 SH_PS_CodeCache_oops_do,
52 // Leave this one last.
53 SH_PS_NumElements
54 };
56 SharedHeap::SharedHeap(CollectorPolicy* policy_) :
57 CollectedHeap(),
58 _collector_policy(policy_),
59 _perm_gen(NULL), _rem_set(NULL),
60 _strong_roots_parity(0),
61 _process_strong_tasks(new SubTasksDone(SH_PS_NumElements)),
62 _n_par_threads(0),
63 _workers(NULL)
64 {
65 if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) {
66 vm_exit_during_initialization("Failed necessary allocation.");
67 }
68 _sh = this; // ch is static, should be set only once.
69 if ((UseParNewGC ||
70 (UseConcMarkSweepGC && CMSParallelRemarkEnabled) ||
71 UseG1GC) &&
72 ParallelGCThreads > 0) {
73 _workers = new FlexibleWorkGang("Parallel GC Threads", ParallelGCThreads,
74 /* are_GC_task_threads */true,
75 /* are_ConcurrentGC_threads */false);
76 if (_workers == NULL) {
77 vm_exit_during_initialization("Failed necessary allocation.");
78 } else {
79 _workers->initialize_workers();
80 }
81 }
82 }
84 bool SharedHeap::heap_lock_held_for_gc() {
85 Thread* t = Thread::current();
86 return Heap_lock->owned_by_self()
87 || ( (t->is_GC_task_thread() || t->is_VM_thread())
88 && _thread_holds_heap_lock_for_gc);
89 }
91 void SharedHeap::set_par_threads(int t) {
92 assert(t == 0 || !UseSerialGC, "Cannot have parallel threads");
93 _n_par_threads = t;
94 _process_strong_tasks->set_n_threads(t);
95 }
97 class AssertIsPermClosure: public OopClosure {
98 public:
99 virtual void do_oop(oop* p) {
100 assert((*p) == NULL || (*p)->is_perm(), "Referent should be perm.");
101 }
102 virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
103 };
104 static AssertIsPermClosure assert_is_perm_closure;
106 void SharedHeap::change_strong_roots_parity() {
107 // Also set the new collection parity.
108 assert(_strong_roots_parity >= 0 && _strong_roots_parity <= 2,
109 "Not in range.");
110 _strong_roots_parity++;
111 if (_strong_roots_parity == 3) _strong_roots_parity = 1;
112 assert(_strong_roots_parity >= 1 && _strong_roots_parity <= 2,
113 "Not in range.");
114 }
116 SharedHeap::StrongRootsScope::StrongRootsScope(SharedHeap* outer, bool activate)
117 : MarkScope(activate)
118 {
119 if (_active) {
120 outer->change_strong_roots_parity();
121 }
122 }
124 SharedHeap::StrongRootsScope::~StrongRootsScope() {
125 // nothing particular
126 }
128 void SharedHeap::process_strong_roots(bool activate_scope,
129 bool collecting_perm_gen,
130 ScanningOption so,
131 OopClosure* roots,
132 CodeBlobClosure* code_roots,
133 OopsInGenClosure* perm_blk) {
134 StrongRootsScope srs(this, activate_scope);
135 // General strong roots.
136 assert(_strong_roots_parity != 0, "must have called prologue code");
137 if (!_process_strong_tasks->is_task_claimed(SH_PS_Universe_oops_do)) {
138 Universe::oops_do(roots);
139 ReferenceProcessor::oops_do(roots);
140 // Consider perm-gen discovered lists to be strong.
141 perm_gen()->ref_processor()->weak_oops_do(roots);
142 }
143 // Global (strong) JNI handles
144 if (!_process_strong_tasks->is_task_claimed(SH_PS_JNIHandles_oops_do))
145 JNIHandles::oops_do(roots);
146 // All threads execute this; the individual threads are task groups.
147 if (ParallelGCThreads > 0) {
148 Threads::possibly_parallel_oops_do(roots, code_roots);
149 } else {
150 Threads::oops_do(roots, code_roots);
151 }
152 if (!_process_strong_tasks-> is_task_claimed(SH_PS_ObjectSynchronizer_oops_do))
153 ObjectSynchronizer::oops_do(roots);
154 if (!_process_strong_tasks->is_task_claimed(SH_PS_FlatProfiler_oops_do))
155 FlatProfiler::oops_do(roots);
156 if (!_process_strong_tasks->is_task_claimed(SH_PS_Management_oops_do))
157 Management::oops_do(roots);
158 if (!_process_strong_tasks->is_task_claimed(SH_PS_jvmti_oops_do))
159 JvmtiExport::oops_do(roots);
161 if (!_process_strong_tasks->is_task_claimed(SH_PS_SystemDictionary_oops_do)) {
162 if (so & SO_AllClasses) {
163 SystemDictionary::oops_do(roots);
164 } else
165 if (so & SO_SystemClasses) {
166 SystemDictionary::always_strong_oops_do(roots);
167 }
168 }
170 if (!_process_strong_tasks->is_task_claimed(SH_PS_SymbolTable_oops_do)) {
171 }
173 if (!_process_strong_tasks->is_task_claimed(SH_PS_StringTable_oops_do)) {
174 if (so & SO_Strings || (!collecting_perm_gen && !JavaObjectsInPerm)) {
175 StringTable::oops_do(roots);
176 }
177 if (JavaObjectsInPerm) {
178 // Verify the string table contents are in the perm gen
179 NOT_PRODUCT(StringTable::oops_do(&assert_is_perm_closure));
180 }
181 }
183 if (!_process_strong_tasks->is_task_claimed(SH_PS_CodeCache_oops_do)) {
184 if (so & SO_CodeCache) {
185 // (Currently, CMSCollector uses this to do intermediate-strength collections.)
186 assert(collecting_perm_gen, "scanning all of code cache");
187 assert(code_roots != NULL, "must supply closure for code cache");
188 if (code_roots != NULL) {
189 CodeCache::blobs_do(code_roots);
190 }
191 } else if (so & (SO_SystemClasses|SO_AllClasses)) {
192 if (!collecting_perm_gen) {
193 // If we are collecting from class statics, but we are not going to
194 // visit all of the CodeCache, collect from the non-perm roots if any.
195 // This makes the code cache function temporarily as a source of strong
196 // roots for oops, until the next major collection.
197 //
198 // If collecting_perm_gen is true, we require that this phase will call
199 // CodeCache::do_unloading. This will kill off nmethods with expired
200 // weak references, such as stale invokedynamic targets.
201 CodeCache::scavenge_root_nmethods_do(code_roots);
202 }
203 }
204 // Verify if the code cache contents are in the perm gen
205 NOT_PRODUCT(CodeBlobToOopClosure assert_code_is_perm(&assert_is_perm_closure, /*do_marking=*/ false));
206 NOT_PRODUCT(CodeCache::asserted_non_scavengable_nmethods_do(&assert_code_is_perm));
207 }
209 if (!collecting_perm_gen) {
210 // All threads perform this; coordination is handled internally.
212 rem_set()->younger_refs_iterate(perm_gen(), perm_blk);
213 }
214 _process_strong_tasks->all_tasks_completed();
215 }
217 class AlwaysTrueClosure: public BoolObjectClosure {
218 public:
219 void do_object(oop p) { ShouldNotReachHere(); }
220 bool do_object_b(oop p) { return true; }
221 };
222 static AlwaysTrueClosure always_true;
224 class SkipAdjustingSharedStrings: public OopClosure {
225 OopClosure* _clo;
226 public:
227 SkipAdjustingSharedStrings(OopClosure* clo) : _clo(clo) {}
229 virtual void do_oop(oop* p) {
230 oop o = (*p);
231 if (!o->is_shared_readwrite()) {
232 _clo->do_oop(p);
233 }
234 }
235 virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
236 };
238 // Unmarked shared Strings in the StringTable (which got there due to
239 // being in the constant pools of as-yet unloaded shared classes) were
240 // not marked and therefore did not have their mark words preserved.
241 // These entries are also deliberately not purged from the string
242 // table during unloading of unmarked strings. If an identity hash
243 // code was computed for any of these objects, it will not have been
244 // cleared to zero during the forwarding process or by the
245 // RecursiveAdjustSharedObjectClosure, and will be confused by the
246 // adjusting process as a forwarding pointer. We need to skip
247 // forwarding StringTable entries which contain unmarked shared
248 // Strings. Actually, since shared strings won't be moving, we can
249 // just skip adjusting any shared entries in the string table.
251 void SharedHeap::process_weak_roots(OopClosure* root_closure,
252 CodeBlobClosure* code_roots,
253 OopClosure* non_root_closure) {
254 // Global (weak) JNI handles
255 JNIHandles::weak_oops_do(&always_true, root_closure);
257 CodeCache::blobs_do(code_roots);
258 if (UseSharedSpaces && !DumpSharedSpaces) {
259 SkipAdjustingSharedStrings skip_closure(root_closure);
260 StringTable::oops_do(&skip_closure);
261 } else {
262 StringTable::oops_do(root_closure);
263 }
264 }
266 void SharedHeap::set_barrier_set(BarrierSet* bs) {
267 _barrier_set = bs;
268 // Cached barrier set for fast access in oops
269 oopDesc::set_bs(bs);
270 }
272 void SharedHeap::post_initialize() {
273 ref_processing_init();
274 }
276 void SharedHeap::ref_processing_init() {
277 perm_gen()->ref_processor_init();
278 }
280 // Some utilities.
281 void SharedHeap::print_size_transition(outputStream* out,
282 size_t bytes_before,
283 size_t bytes_after,
284 size_t capacity) {
285 out->print(" %d%s->%d%s(%d%s)",
286 byte_size_in_proper_unit(bytes_before),
287 proper_unit_for_byte_size(bytes_before),
288 byte_size_in_proper_unit(bytes_after),
289 proper_unit_for_byte_size(bytes_after),
290 byte_size_in_proper_unit(capacity),
291 proper_unit_for_byte_size(capacity));
292 }