src/share/vm/memory/sharedHeap.cpp

changeset 6992
2c6ef90f030a
parent 6972
64ac9c55d666
child 6996
f3aeae1f9fc5
equal deleted inserted replaced
6991:882004b9e7e1 6992:2c6ef90f030a
27 #include "classfile/systemDictionary.hpp" 27 #include "classfile/systemDictionary.hpp"
28 #include "code/codeCache.hpp" 28 #include "code/codeCache.hpp"
29 #include "gc_interface/collectedHeap.inline.hpp" 29 #include "gc_interface/collectedHeap.inline.hpp"
30 #include "memory/sharedHeap.hpp" 30 #include "memory/sharedHeap.hpp"
31 #include "oops/oop.inline.hpp" 31 #include "oops/oop.inline.hpp"
32 #include "runtime/atomic.inline.hpp"
32 #include "runtime/fprofiler.hpp" 33 #include "runtime/fprofiler.hpp"
33 #include "runtime/java.hpp" 34 #include "runtime/java.hpp"
34 #include "services/management.hpp" 35 #include "services/management.hpp"
35 #include "utilities/copy.hpp" 36 #include "utilities/copy.hpp"
36 #include "utilities/workgroup.hpp" 37 #include "utilities/workgroup.hpp"
37 38
38 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC 39 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
39 40
40 SharedHeap* SharedHeap::_sh; 41 SharedHeap* SharedHeap::_sh;
41 42
42 // The set of potentially parallel tasks in strong root scanning. 43 // The set of potentially parallel tasks in root scanning.
43 enum SH_process_strong_roots_tasks { 44 enum SH_process_roots_tasks {
44 SH_PS_Universe_oops_do, 45 SH_PS_Universe_oops_do,
45 SH_PS_JNIHandles_oops_do, 46 SH_PS_JNIHandles_oops_do,
46 SH_PS_ObjectSynchronizer_oops_do, 47 SH_PS_ObjectSynchronizer_oops_do,
47 SH_PS_FlatProfiler_oops_do, 48 SH_PS_FlatProfiler_oops_do,
48 SH_PS_Management_oops_do, 49 SH_PS_Management_oops_do,
56 57
57 SharedHeap::SharedHeap(CollectorPolicy* policy_) : 58 SharedHeap::SharedHeap(CollectorPolicy* policy_) :
58 CollectedHeap(), 59 CollectedHeap(),
59 _collector_policy(policy_), 60 _collector_policy(policy_),
60 _rem_set(NULL), 61 _rem_set(NULL),
62 _strong_roots_scope(NULL),
61 _strong_roots_parity(0), 63 _strong_roots_parity(0),
62 _process_strong_tasks(new SubTasksDone(SH_PS_NumElements)), 64 _process_strong_tasks(new SubTasksDone(SH_PS_NumElements)),
63 _workers(NULL) 65 _workers(NULL)
64 { 66 {
65 if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) { 67 if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) {
112 virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); } 114 virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
113 }; 115 };
114 static AssertNonScavengableClosure assert_is_non_scavengable_closure; 116 static AssertNonScavengableClosure assert_is_non_scavengable_closure;
115 #endif 117 #endif
116 118
119 SharedHeap::StrongRootsScope* SharedHeap::active_strong_roots_scope() const {
120 return _strong_roots_scope;
121 }
122 void SharedHeap::register_strong_roots_scope(SharedHeap::StrongRootsScope* scope) {
123 assert(_strong_roots_scope == NULL, "Should only have one StrongRootsScope active");
124 assert(scope != NULL, "Illegal argument");
125 _strong_roots_scope = scope;
126 }
127 void SharedHeap::unregister_strong_roots_scope(SharedHeap::StrongRootsScope* scope) {
128 assert(_strong_roots_scope == scope, "Wrong scope unregistered");
129 _strong_roots_scope = NULL;
130 }
131
117 void SharedHeap::change_strong_roots_parity() { 132 void SharedHeap::change_strong_roots_parity() {
118 // Also set the new collection parity. 133 // Also set the new collection parity.
119 assert(_strong_roots_parity >= 0 && _strong_roots_parity <= 2, 134 assert(_strong_roots_parity >= 0 && _strong_roots_parity <= 2,
120 "Not in range."); 135 "Not in range.");
121 _strong_roots_parity++; 136 _strong_roots_parity++;
122 if (_strong_roots_parity == 3) _strong_roots_parity = 1; 137 if (_strong_roots_parity == 3) _strong_roots_parity = 1;
123 assert(_strong_roots_parity >= 1 && _strong_roots_parity <= 2, 138 assert(_strong_roots_parity >= 1 && _strong_roots_parity <= 2,
124 "Not in range."); 139 "Not in range.");
125 } 140 }
126 141
127 SharedHeap::StrongRootsScope::StrongRootsScope(SharedHeap* outer, bool activate) 142 SharedHeap::StrongRootsScope::StrongRootsScope(SharedHeap* heap, bool activate)
128 : MarkScope(activate) 143 : MarkScope(activate), _sh(heap), _n_workers_done_with_threads(0)
129 { 144 {
130 if (_active) { 145 if (_active) {
131 outer->change_strong_roots_parity(); 146 _sh->register_strong_roots_scope(this);
147 _sh->change_strong_roots_parity();
132 // Zero the claimed high water mark in the StringTable 148 // Zero the claimed high water mark in the StringTable
133 StringTable::clear_parallel_claimed_index(); 149 StringTable::clear_parallel_claimed_index();
134 } 150 }
135 } 151 }
136 152
137 SharedHeap::StrongRootsScope::~StrongRootsScope() { 153 SharedHeap::StrongRootsScope::~StrongRootsScope() {
138 // nothing particular 154 if (_active) {
139 } 155 _sh->unregister_strong_roots_scope(this);
140 156 }
141 void SharedHeap::process_strong_roots(bool activate_scope, 157 }
142 ScanningOption so, 158
143 OopClosure* roots, 159 Monitor* SharedHeap::StrongRootsScope::_lock = new Monitor(Mutex::leaf, "StrongRootsScope lock", false);
144 KlassClosure* klass_closure) { 160
161 void SharedHeap::StrongRootsScope::mark_worker_done_with_threads(uint n_workers) {
162 // The Thread work barrier is only needed by G1.
163 // No need to use the barrier if this is single-threaded code.
164 if (UseG1GC && n_workers > 0) {
165 uint new_value = (uint)Atomic::add(1, &_n_workers_done_with_threads);
166 if (new_value == n_workers) {
167 // This thread is last. Notify the others.
168 MonitorLockerEx ml(_lock, Mutex::_no_safepoint_check_flag);
169 _lock->notify_all();
170 }
171 }
172 }
173
174 void SharedHeap::StrongRootsScope::wait_until_all_workers_done_with_threads(uint n_workers) {
175 // No need to use the barrier if this is single-threaded code.
176 if (n_workers > 0 && (uint)_n_workers_done_with_threads != n_workers) {
177 MonitorLockerEx ml(_lock, Mutex::_no_safepoint_check_flag);
178 while ((uint)_n_workers_done_with_threads != n_workers) {
179 _lock->wait(Mutex::_no_safepoint_check_flag, 0, false);
180 }
181 }
182 }
183
184 void SharedHeap::process_roots(bool activate_scope,
185 ScanningOption so,
186 OopClosure* strong_roots,
187 OopClosure* weak_roots,
188 CLDClosure* strong_cld_closure,
189 CLDClosure* weak_cld_closure,
190 CodeBlobClosure* code_roots) {
145 StrongRootsScope srs(this, activate_scope); 191 StrongRootsScope srs(this, activate_scope);
146 192
147 // General strong roots. 193 // General roots.
148 assert(_strong_roots_parity != 0, "must have called prologue code"); 194 assert(_strong_roots_parity != 0, "must have called prologue code");
195 assert(code_roots != NULL, "code root closure should always be set");
149 // _n_termination for _process_strong_tasks should be set up stream 196 // _n_termination for _process_strong_tasks should be set up stream
150 // in a method not running in a GC worker. Otherwise the GC worker 197 // in a method not running in a GC worker. Otherwise the GC worker
151 // could be trying to change the termination condition while the task 198 // could be trying to change the termination condition while the task
152 // is executing in another GC worker. 199 // is executing in another GC worker.
200
201 // Iterating over the CLDG and the Threads are done early to allow G1 to
202 // first process the strong CLDs and nmethods and then, after a barrier,
203 // let the thread process the weak CLDs and nmethods.
204
205 if (!_process_strong_tasks->is_task_claimed(SH_PS_ClassLoaderDataGraph_oops_do)) {
206 ClassLoaderDataGraph::roots_cld_do(strong_cld_closure, weak_cld_closure);
207 }
208
209 // Some CLDs contained in the thread frames should be considered strong.
210 // Don't process them if they will be processed during the ClassLoaderDataGraph phase.
211 CLDClosure* roots_from_clds_p = (strong_cld_closure != weak_cld_closure) ? strong_cld_closure : NULL;
212 // Only process code roots from thread stacks if we aren't visiting the entire CodeCache anyway
213 CodeBlobClosure* roots_from_code_p = (so & SO_AllCodeCache) ? NULL : code_roots;
214
215 Threads::possibly_parallel_oops_do(strong_roots, roots_from_clds_p, roots_from_code_p);
216
217 // This is the point where this worker thread will not find more strong CLDs/nmethods.
218 // Report this so G1 can synchronize the strong and weak CLDs/nmethods processing.
219 active_strong_roots_scope()->mark_worker_done_with_threads(n_par_threads());
220
153 if (!_process_strong_tasks->is_task_claimed(SH_PS_Universe_oops_do)) { 221 if (!_process_strong_tasks->is_task_claimed(SH_PS_Universe_oops_do)) {
154 Universe::oops_do(roots); 222 Universe::oops_do(strong_roots);
155 } 223 }
156 // Global (strong) JNI handles 224 // Global (strong) JNI handles
157 if (!_process_strong_tasks->is_task_claimed(SH_PS_JNIHandles_oops_do)) 225 if (!_process_strong_tasks->is_task_claimed(SH_PS_JNIHandles_oops_do))
158 JNIHandles::oops_do(roots); 226 JNIHandles::oops_do(strong_roots);
159
160 CodeBlobToOopClosure code_roots(roots, true);
161
162 CLDToOopClosure roots_from_clds(roots);
163 // If we limit class scanning to SO_SystemClasses we need to apply a CLD closure to
164 // CLDs which are strongly reachable from the thread stacks.
165 CLDToOopClosure* roots_from_clds_p = ((so & SO_SystemClasses) ? &roots_from_clds : NULL);
166 // All threads execute this; the individual threads are task groups.
167 if (CollectedHeap::use_parallel_gc_threads()) {
168 Threads::possibly_parallel_oops_do(roots, roots_from_clds_p, &code_roots);
169 } else {
170 Threads::oops_do(roots, roots_from_clds_p, &code_roots);
171 }
172 227
173 if (!_process_strong_tasks-> is_task_claimed(SH_PS_ObjectSynchronizer_oops_do)) 228 if (!_process_strong_tasks-> is_task_claimed(SH_PS_ObjectSynchronizer_oops_do))
174 ObjectSynchronizer::oops_do(roots); 229 ObjectSynchronizer::oops_do(strong_roots);
175 if (!_process_strong_tasks->is_task_claimed(SH_PS_FlatProfiler_oops_do)) 230 if (!_process_strong_tasks->is_task_claimed(SH_PS_FlatProfiler_oops_do))
176 FlatProfiler::oops_do(roots); 231 FlatProfiler::oops_do(strong_roots);
177 if (!_process_strong_tasks->is_task_claimed(SH_PS_Management_oops_do)) 232 if (!_process_strong_tasks->is_task_claimed(SH_PS_Management_oops_do))
178 Management::oops_do(roots); 233 Management::oops_do(strong_roots);
179 if (!_process_strong_tasks->is_task_claimed(SH_PS_jvmti_oops_do)) 234 if (!_process_strong_tasks->is_task_claimed(SH_PS_jvmti_oops_do))
180 JvmtiExport::oops_do(roots); 235 JvmtiExport::oops_do(strong_roots);
181 236
182 if (!_process_strong_tasks->is_task_claimed(SH_PS_SystemDictionary_oops_do)) { 237 if (!_process_strong_tasks->is_task_claimed(SH_PS_SystemDictionary_oops_do)) {
183 if (so & SO_AllClasses) { 238 SystemDictionary::roots_oops_do(strong_roots, weak_roots);
184 SystemDictionary::oops_do(roots);
185 } else if (so & SO_SystemClasses) {
186 SystemDictionary::always_strong_oops_do(roots);
187 } else {
188 fatal("We should always have selected either SO_AllClasses or SO_SystemClasses");
189 }
190 }
191
192 if (!_process_strong_tasks->is_task_claimed(SH_PS_ClassLoaderDataGraph_oops_do)) {
193 if (so & SO_AllClasses) {
194 ClassLoaderDataGraph::oops_do(roots, klass_closure, /* must_claim */ false);
195 } else if (so & SO_SystemClasses) {
196 ClassLoaderDataGraph::always_strong_oops_do(roots, klass_closure, /* must_claim */ true);
197 }
198 } 239 }
199 240
200 // All threads execute the following. A specific chunk of buckets 241 // All threads execute the following. A specific chunk of buckets
201 // from the StringTable are the individual tasks. 242 // from the StringTable are the individual tasks.
202 if (so & SO_Strings) { 243 if (weak_roots != NULL) {
203 if (CollectedHeap::use_parallel_gc_threads()) { 244 if (CollectedHeap::use_parallel_gc_threads()) {
204 StringTable::possibly_parallel_oops_do(roots); 245 StringTable::possibly_parallel_oops_do(weak_roots);
205 } else { 246 } else {
206 StringTable::oops_do(roots); 247 StringTable::oops_do(weak_roots);
207 } 248 }
208 } 249 }
209 250
210 if (!_process_strong_tasks->is_task_claimed(SH_PS_CodeCache_oops_do)) { 251 if (!_process_strong_tasks->is_task_claimed(SH_PS_CodeCache_oops_do)) {
211 if (so & SO_ScavengeCodeCache) { 252 if (so & SO_ScavengeCodeCache) {
212 assert(&code_roots != NULL, "must supply closure for code cache"); 253 assert(code_roots != NULL, "must supply closure for code cache");
213 254
214 // We only visit parts of the CodeCache when scavenging. 255 // We only visit parts of the CodeCache when scavenging.
215 CodeCache::scavenge_root_nmethods_do(&code_roots); 256 CodeCache::scavenge_root_nmethods_do(code_roots);
216 } 257 }
217 if (so & SO_AllCodeCache) { 258 if (so & SO_AllCodeCache) {
218 assert(&code_roots != NULL, "must supply closure for code cache"); 259 assert(code_roots != NULL, "must supply closure for code cache");
219 260
220 // CMSCollector uses this to do intermediate-strength collections. 261 // CMSCollector uses this to do intermediate-strength collections.
221 // We scan the entire code cache, since CodeCache::do_unloading is not called. 262 // We scan the entire code cache, since CodeCache::do_unloading is not called.
222 CodeCache::blobs_do(&code_roots); 263 CodeCache::blobs_do(code_roots);
223 } 264 }
224 // Verify that the code cache contents are not subject to 265 // Verify that the code cache contents are not subject to
225 // movement by a scavenging collection. 266 // movement by a scavenging collection.
226 DEBUG_ONLY(CodeBlobToOopClosure assert_code_is_non_scavengable(&assert_is_non_scavengable_closure, /*do_marking=*/ false)); 267 DEBUG_ONLY(CodeBlobToOopClosure assert_code_is_non_scavengable(&assert_is_non_scavengable_closure, !CodeBlobToOopClosure::FixRelocations));
227 DEBUG_ONLY(CodeCache::asserted_non_scavengable_nmethods_do(&assert_code_is_non_scavengable)); 268 DEBUG_ONLY(CodeCache::asserted_non_scavengable_nmethods_do(&assert_code_is_non_scavengable));
228 } 269 }
229 270
230 _process_strong_tasks->all_tasks_completed(); 271 _process_strong_tasks->all_tasks_completed();
231 } 272 }
273
274 void SharedHeap::process_all_roots(bool activate_scope,
275 ScanningOption so,
276 OopClosure* roots,
277 CLDClosure* cld_closure,
278 CodeBlobClosure* code_closure) {
279 process_roots(activate_scope, so,
280 roots, roots,
281 cld_closure, cld_closure,
282 code_closure);
283 }
284
285 void SharedHeap::process_strong_roots(bool activate_scope,
286 ScanningOption so,
287 OopClosure* roots,
288 CLDClosure* cld_closure,
289 CodeBlobClosure* code_closure) {
290 process_roots(activate_scope, so,
291 roots, NULL,
292 cld_closure, NULL,
293 code_closure);
294 }
295
232 296
233 class AlwaysTrueClosure: public BoolObjectClosure { 297 class AlwaysTrueClosure: public BoolObjectClosure {
234 public: 298 public:
235 bool do_object_b(oop p) { return true; } 299 bool do_object_b(oop p) { return true; }
236 }; 300 };

mercurial