|
1 /* |
|
2 * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved. |
|
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
|
4 * |
|
5 * This code is free software; you can redistribute it and/or modify it |
|
6 * under the terms of the GNU General Public License version 2 only, as |
|
7 * published by the Free Software Foundation. |
|
8 * |
|
9 * This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 * version 2 for more details (a copy is included in the LICENSE file that |
|
13 * accompanied this code). |
|
14 * |
|
15 * You should have received a copy of the GNU General Public License version |
|
16 * 2 along with this work; if not, write to the Free Software Foundation, |
|
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 * |
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
|
20 * or visit www.oracle.com if you need additional information or have any |
|
21 * questions. |
|
22 * |
|
23 */ |
|
24 |
|
25 #include "precompiled.hpp" |
|
26 #include "classfile/symbolTable.hpp" |
|
27 #include "classfile/systemDictionary.hpp" |
|
28 #include "code/codeCache.hpp" |
|
29 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp" |
|
30 #include "gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp" |
|
31 #include "gc_implementation/parallelScavenge/psMarkSweep.hpp" |
|
32 #include "gc_implementation/parallelScavenge/psMarkSweepDecorator.hpp" |
|
33 #include "gc_implementation/parallelScavenge/psOldGen.hpp" |
|
34 #include "gc_implementation/parallelScavenge/psScavenge.hpp" |
|
35 #include "gc_implementation/parallelScavenge/psYoungGen.hpp" |
|
36 #include "gc_implementation/shared/gcHeapSummary.hpp" |
|
37 #include "gc_implementation/shared/gcTimer.hpp" |
|
38 #include "gc_implementation/shared/gcTrace.hpp" |
|
39 #include "gc_implementation/shared/gcTraceTime.hpp" |
|
40 #include "gc_implementation/shared/isGCActiveMark.hpp" |
|
41 #include "gc_implementation/shared/markSweep.hpp" |
|
42 #include "gc_implementation/shared/spaceDecorator.hpp" |
|
43 #include "gc_interface/gcCause.hpp" |
|
44 #include "memory/gcLocker.inline.hpp" |
|
45 #include "memory/referencePolicy.hpp" |
|
46 #include "memory/referenceProcessor.hpp" |
|
47 #include "oops/oop.inline.hpp" |
|
48 #include "runtime/biasedLocking.hpp" |
|
49 #include "runtime/fprofiler.hpp" |
|
50 #include "runtime/safepoint.hpp" |
|
51 #include "runtime/vmThread.hpp" |
|
52 #include "services/management.hpp" |
|
53 #include "services/memoryService.hpp" |
|
54 #include "utilities/events.hpp" |
|
55 #include "utilities/stack.inline.hpp" |
|
56 |
|
57 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC |
|
58 |
|
59 elapsedTimer PSMarkSweep::_accumulated_time; |
|
60 jlong PSMarkSweep::_time_of_last_gc = 0; |
|
61 CollectorCounters* PSMarkSweep::_counters = NULL; |
|
62 |
|
63 void PSMarkSweep::initialize() { |
|
64 MemRegion mr = Universe::heap()->reserved_region(); |
|
65 _ref_processor = new ReferenceProcessor(mr); // a vanilla ref proc |
|
66 _counters = new CollectorCounters("PSMarkSweep", 1); |
|
67 } |
|
68 |
|
69 // This method contains all heap specific policy for invoking mark sweep. |
|
70 // PSMarkSweep::invoke_no_policy() will only attempt to mark-sweep-compact |
|
71 // the heap. It will do nothing further. If we need to bail out for policy |
|
72 // reasons, scavenge before full gc, or any other specialized behavior, it |
|
73 // needs to be added here. |
|
74 // |
|
75 // Note that this method should only be called from the vm_thread while |
|
76 // at a safepoint! |
|
77 // |
|
78 // Note that the all_soft_refs_clear flag in the collector policy |
|
79 // may be true because this method can be called without intervening |
|
80 // activity. For example when the heap space is tight and full measure |
|
81 // are being taken to free space. |
|
82 |
|
83 void PSMarkSweep::invoke(bool maximum_heap_compaction) { |
|
84 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); |
|
85 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); |
|
86 assert(!Universe::heap()->is_gc_active(), "not reentrant"); |
|
87 |
|
88 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); |
|
89 GCCause::Cause gc_cause = heap->gc_cause(); |
|
90 PSAdaptiveSizePolicy* policy = heap->size_policy(); |
|
91 IsGCActiveMark mark; |
|
92 |
|
93 if (ScavengeBeforeFullGC) { |
|
94 PSScavenge::invoke_no_policy(); |
|
95 } |
|
96 |
|
97 const bool clear_all_soft_refs = |
|
98 heap->collector_policy()->should_clear_all_soft_refs(); |
|
99 |
|
100 uint count = maximum_heap_compaction ? 1 : MarkSweepAlwaysCompactCount; |
|
101 UIntFlagSetting flag_setting(MarkSweepAlwaysCompactCount, count); |
|
102 PSMarkSweep::invoke_no_policy(clear_all_soft_refs || maximum_heap_compaction); |
|
103 } |
|
104 |
|
105 // This method contains no policy. You should probably |
|
106 // be calling invoke() instead. |
|
107 bool PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) { |
|
108 assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint"); |
|
109 assert(ref_processor() != NULL, "Sanity"); |
|
110 |
|
111 if (GC_locker::check_active_before_gc()) { |
|
112 return false; |
|
113 } |
|
114 |
|
115 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); |
|
116 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); |
|
117 GCCause::Cause gc_cause = heap->gc_cause(); |
|
118 |
|
119 _gc_timer->register_gc_start(); |
|
120 _gc_tracer->report_gc_start(gc_cause, _gc_timer->gc_start()); |
|
121 |
|
122 PSAdaptiveSizePolicy* size_policy = heap->size_policy(); |
|
123 |
|
124 // The scope of casr should end after code that can change |
|
125 // CollectorPolicy::_should_clear_all_soft_refs. |
|
126 ClearedAllSoftRefs casr(clear_all_softrefs, heap->collector_policy()); |
|
127 |
|
128 PSYoungGen* young_gen = heap->young_gen(); |
|
129 PSOldGen* old_gen = heap->old_gen(); |
|
130 |
|
131 // Increment the invocation count |
|
132 heap->increment_total_collections(true /* full */); |
|
133 |
|
134 // Save information needed to minimize mangling |
|
135 heap->record_gen_tops_before_GC(); |
|
136 |
|
137 // We need to track unique mark sweep invocations as well. |
|
138 _total_invocations++; |
|
139 |
|
140 AdaptiveSizePolicyOutput(size_policy, heap->total_collections()); |
|
141 |
|
142 heap->print_heap_before_gc(); |
|
143 heap->trace_heap_before_gc(_gc_tracer); |
|
144 |
|
145 // Fill in TLABs |
|
146 heap->accumulate_statistics_all_tlabs(); |
|
147 heap->ensure_parsability(true); // retire TLABs |
|
148 |
|
149 if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) { |
|
150 HandleMark hm; // Discard invalid handles created during verification |
|
151 Universe::verify(" VerifyBeforeGC:"); |
|
152 } |
|
153 |
|
154 // Verify object start arrays |
|
155 if (VerifyObjectStartArray && |
|
156 VerifyBeforeGC) { |
|
157 old_gen->verify_object_start_array(); |
|
158 } |
|
159 |
|
160 heap->pre_full_gc_dump(_gc_timer); |
|
161 |
|
162 // Filled in below to track the state of the young gen after the collection. |
|
163 bool eden_empty; |
|
164 bool survivors_empty; |
|
165 bool young_gen_empty; |
|
166 |
|
167 { |
|
168 HandleMark hm; |
|
169 |
|
170 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); |
|
171 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); |
|
172 GCTraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, NULL); |
|
173 TraceCollectorStats tcs(counters()); |
|
174 TraceMemoryManagerStats tms(true /* Full GC */,gc_cause); |
|
175 |
|
176 if (TraceGen1Time) accumulated_time()->start(); |
|
177 |
|
178 // Let the size policy know we're starting |
|
179 size_policy->major_collection_begin(); |
|
180 |
|
181 CodeCache::gc_prologue(); |
|
182 Threads::gc_prologue(); |
|
183 BiasedLocking::preserve_marks(); |
|
184 |
|
185 // Capture heap size before collection for printing. |
|
186 size_t prev_used = heap->used(); |
|
187 |
|
188 // Capture metadata size before collection for sizing. |
|
189 size_t metadata_prev_used = MetaspaceAux::used_bytes(); |
|
190 |
|
191 // For PrintGCDetails |
|
192 size_t old_gen_prev_used = old_gen->used_in_bytes(); |
|
193 size_t young_gen_prev_used = young_gen->used_in_bytes(); |
|
194 |
|
195 allocate_stacks(); |
|
196 |
|
197 COMPILER2_PRESENT(DerivedPointerTable::clear()); |
|
198 |
|
199 ref_processor()->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/); |
|
200 ref_processor()->setup_policy(clear_all_softrefs); |
|
201 |
|
202 mark_sweep_phase1(clear_all_softrefs); |
|
203 |
|
204 mark_sweep_phase2(); |
|
205 |
|
206 // Don't add any more derived pointers during phase3 |
|
207 COMPILER2_PRESENT(assert(DerivedPointerTable::is_active(), "Sanity")); |
|
208 COMPILER2_PRESENT(DerivedPointerTable::set_active(false)); |
|
209 |
|
210 mark_sweep_phase3(); |
|
211 |
|
212 mark_sweep_phase4(); |
|
213 |
|
214 restore_marks(); |
|
215 |
|
216 deallocate_stacks(); |
|
217 |
|
218 if (ZapUnusedHeapArea) { |
|
219 // Do a complete mangle (top to end) because the usage for |
|
220 // scratch does not maintain a top pointer. |
|
221 young_gen->to_space()->mangle_unused_area_complete(); |
|
222 } |
|
223 |
|
224 eden_empty = young_gen->eden_space()->is_empty(); |
|
225 if (!eden_empty) { |
|
226 eden_empty = absorb_live_data_from_eden(size_policy, young_gen, old_gen); |
|
227 } |
|
228 |
|
229 // Update heap occupancy information which is used as |
|
230 // input to soft ref clearing policy at the next gc. |
|
231 Universe::update_heap_info_at_gc(); |
|
232 |
|
233 survivors_empty = young_gen->from_space()->is_empty() && |
|
234 young_gen->to_space()->is_empty(); |
|
235 young_gen_empty = eden_empty && survivors_empty; |
|
236 |
|
237 BarrierSet* bs = heap->barrier_set(); |
|
238 if (bs->is_a(BarrierSet::ModRef)) { |
|
239 ModRefBarrierSet* modBS = (ModRefBarrierSet*)bs; |
|
240 MemRegion old_mr = heap->old_gen()->reserved(); |
|
241 if (young_gen_empty) { |
|
242 modBS->clear(MemRegion(old_mr.start(), old_mr.end())); |
|
243 } else { |
|
244 modBS->invalidate(MemRegion(old_mr.start(), old_mr.end())); |
|
245 } |
|
246 } |
|
247 |
|
248 // Delete metaspaces for unloaded class loaders and clean up loader_data graph |
|
249 ClassLoaderDataGraph::purge(); |
|
250 MetaspaceAux::verify_metrics(); |
|
251 |
|
252 BiasedLocking::restore_marks(); |
|
253 Threads::gc_epilogue(); |
|
254 CodeCache::gc_epilogue(); |
|
255 JvmtiExport::gc_epilogue(); |
|
256 |
|
257 COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); |
|
258 |
|
259 ref_processor()->enqueue_discovered_references(NULL); |
|
260 |
|
261 // Update time of last GC |
|
262 reset_millis_since_last_gc(); |
|
263 |
|
264 // Let the size policy know we're done |
|
265 size_policy->major_collection_end(old_gen->used_in_bytes(), gc_cause); |
|
266 |
|
267 if (UseAdaptiveSizePolicy) { |
|
268 |
|
269 if (PrintAdaptiveSizePolicy) { |
|
270 gclog_or_tty->print("AdaptiveSizeStart: "); |
|
271 gclog_or_tty->stamp(); |
|
272 gclog_or_tty->print_cr(" collection: %d ", |
|
273 heap->total_collections()); |
|
274 if (Verbose) { |
|
275 gclog_or_tty->print("old_gen_capacity: %d young_gen_capacity: %d", |
|
276 old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes()); |
|
277 } |
|
278 } |
|
279 |
|
280 // Don't check if the size_policy is ready here. Let |
|
281 // the size_policy check that internally. |
|
282 if (UseAdaptiveGenerationSizePolicyAtMajorCollection && |
|
283 ((gc_cause != GCCause::_java_lang_system_gc) || |
|
284 UseAdaptiveSizePolicyWithSystemGC)) { |
|
285 // Calculate optimal free space amounts |
|
286 assert(young_gen->max_size() > |
|
287 young_gen->from_space()->capacity_in_bytes() + |
|
288 young_gen->to_space()->capacity_in_bytes(), |
|
289 "Sizes of space in young gen are out-of-bounds"); |
|
290 |
|
291 size_t young_live = young_gen->used_in_bytes(); |
|
292 size_t eden_live = young_gen->eden_space()->used_in_bytes(); |
|
293 size_t old_live = old_gen->used_in_bytes(); |
|
294 size_t cur_eden = young_gen->eden_space()->capacity_in_bytes(); |
|
295 size_t max_old_gen_size = old_gen->max_gen_size(); |
|
296 size_t max_eden_size = young_gen->max_size() - |
|
297 young_gen->from_space()->capacity_in_bytes() - |
|
298 young_gen->to_space()->capacity_in_bytes(); |
|
299 |
|
300 // Used for diagnostics |
|
301 size_policy->clear_generation_free_space_flags(); |
|
302 |
|
303 size_policy->compute_generations_free_space(young_live, |
|
304 eden_live, |
|
305 old_live, |
|
306 cur_eden, |
|
307 max_old_gen_size, |
|
308 max_eden_size, |
|
309 true /* full gc*/); |
|
310 |
|
311 size_policy->check_gc_overhead_limit(young_live, |
|
312 eden_live, |
|
313 max_old_gen_size, |
|
314 max_eden_size, |
|
315 true /* full gc*/, |
|
316 gc_cause, |
|
317 heap->collector_policy()); |
|
318 |
|
319 size_policy->decay_supplemental_growth(true /* full gc*/); |
|
320 |
|
321 heap->resize_old_gen(size_policy->calculated_old_free_size_in_bytes()); |
|
322 |
|
323 // Don't resize the young generation at an major collection. A |
|
324 // desired young generation size may have been calculated but |
|
325 // resizing the young generation complicates the code because the |
|
326 // resizing of the old generation may have moved the boundary |
|
327 // between the young generation and the old generation. Let the |
|
328 // young generation resizing happen at the minor collections. |
|
329 } |
|
330 if (PrintAdaptiveSizePolicy) { |
|
331 gclog_or_tty->print_cr("AdaptiveSizeStop: collection: %d ", |
|
332 heap->total_collections()); |
|
333 } |
|
334 } |
|
335 |
|
336 if (UsePerfData) { |
|
337 heap->gc_policy_counters()->update_counters(); |
|
338 heap->gc_policy_counters()->update_old_capacity( |
|
339 old_gen->capacity_in_bytes()); |
|
340 heap->gc_policy_counters()->update_young_capacity( |
|
341 young_gen->capacity_in_bytes()); |
|
342 } |
|
343 |
|
344 heap->resize_all_tlabs(); |
|
345 |
|
346 // We collected the heap, recalculate the metaspace capacity |
|
347 MetaspaceGC::compute_new_size(); |
|
348 |
|
349 if (TraceGen1Time) accumulated_time()->stop(); |
|
350 |
|
351 if (PrintGC) { |
|
352 if (PrintGCDetails) { |
|
353 // Don't print a GC timestamp here. This is after the GC so |
|
354 // would be confusing. |
|
355 young_gen->print_used_change(young_gen_prev_used); |
|
356 old_gen->print_used_change(old_gen_prev_used); |
|
357 } |
|
358 heap->print_heap_change(prev_used); |
|
359 if (PrintGCDetails) { |
|
360 MetaspaceAux::print_metaspace_change(metadata_prev_used); |
|
361 } |
|
362 } |
|
363 |
|
364 // Track memory usage and detect low memory |
|
365 MemoryService::track_memory_usage(); |
|
366 heap->update_counters(); |
|
367 } |
|
368 |
|
369 if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) { |
|
370 HandleMark hm; // Discard invalid handles created during verification |
|
371 Universe::verify(" VerifyAfterGC:"); |
|
372 } |
|
373 |
|
374 // Re-verify object start arrays |
|
375 if (VerifyObjectStartArray && |
|
376 VerifyAfterGC) { |
|
377 old_gen->verify_object_start_array(); |
|
378 } |
|
379 |
|
380 if (ZapUnusedHeapArea) { |
|
381 old_gen->object_space()->check_mangled_unused_area_complete(); |
|
382 } |
|
383 |
|
384 NOT_PRODUCT(ref_processor()->verify_no_references_recorded()); |
|
385 |
|
386 heap->print_heap_after_gc(); |
|
387 heap->trace_heap_after_gc(_gc_tracer); |
|
388 |
|
389 heap->post_full_gc_dump(_gc_timer); |
|
390 |
|
391 #ifdef TRACESPINNING |
|
392 ParallelTaskTerminator::print_termination_counts(); |
|
393 #endif |
|
394 |
|
395 _gc_timer->register_gc_end(); |
|
396 |
|
397 _gc_tracer->report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions()); |
|
398 |
|
399 return true; |
|
400 } |
|
401 |
|
402 bool PSMarkSweep::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy, |
|
403 PSYoungGen* young_gen, |
|
404 PSOldGen* old_gen) { |
|
405 MutableSpace* const eden_space = young_gen->eden_space(); |
|
406 assert(!eden_space->is_empty(), "eden must be non-empty"); |
|
407 assert(young_gen->virtual_space()->alignment() == |
|
408 old_gen->virtual_space()->alignment(), "alignments do not match"); |
|
409 |
|
410 if (!(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary)) { |
|
411 return false; |
|
412 } |
|
413 |
|
414 // Both generations must be completely committed. |
|
415 if (young_gen->virtual_space()->uncommitted_size() != 0) { |
|
416 return false; |
|
417 } |
|
418 if (old_gen->virtual_space()->uncommitted_size() != 0) { |
|
419 return false; |
|
420 } |
|
421 |
|
422 // Figure out how much to take from eden. Include the average amount promoted |
|
423 // in the total; otherwise the next young gen GC will simply bail out to a |
|
424 // full GC. |
|
425 const size_t alignment = old_gen->virtual_space()->alignment(); |
|
426 const size_t eden_used = eden_space->used_in_bytes(); |
|
427 const size_t promoted = (size_t)size_policy->avg_promoted()->padded_average(); |
|
428 const size_t absorb_size = align_size_up(eden_used + promoted, alignment); |
|
429 const size_t eden_capacity = eden_space->capacity_in_bytes(); |
|
430 |
|
431 if (absorb_size >= eden_capacity) { |
|
432 return false; // Must leave some space in eden. |
|
433 } |
|
434 |
|
435 const size_t new_young_size = young_gen->capacity_in_bytes() - absorb_size; |
|
436 if (new_young_size < young_gen->min_gen_size()) { |
|
437 return false; // Respect young gen minimum size. |
|
438 } |
|
439 |
|
440 if (TraceAdaptiveGCBoundary && Verbose) { |
|
441 gclog_or_tty->print(" absorbing " SIZE_FORMAT "K: " |
|
442 "eden " SIZE_FORMAT "K->" SIZE_FORMAT "K " |
|
443 "from " SIZE_FORMAT "K, to " SIZE_FORMAT "K " |
|
444 "young_gen " SIZE_FORMAT "K->" SIZE_FORMAT "K ", |
|
445 absorb_size / K, |
|
446 eden_capacity / K, (eden_capacity - absorb_size) / K, |
|
447 young_gen->from_space()->used_in_bytes() / K, |
|
448 young_gen->to_space()->used_in_bytes() / K, |
|
449 young_gen->capacity_in_bytes() / K, new_young_size / K); |
|
450 } |
|
451 |
|
452 // Fill the unused part of the old gen. |
|
453 MutableSpace* const old_space = old_gen->object_space(); |
|
454 HeapWord* const unused_start = old_space->top(); |
|
455 size_t const unused_words = pointer_delta(old_space->end(), unused_start); |
|
456 |
|
457 if (unused_words > 0) { |
|
458 if (unused_words < CollectedHeap::min_fill_size()) { |
|
459 return false; // If the old gen cannot be filled, must give up. |
|
460 } |
|
461 CollectedHeap::fill_with_objects(unused_start, unused_words); |
|
462 } |
|
463 |
|
464 // Take the live data from eden and set both top and end in the old gen to |
|
465 // eden top. (Need to set end because reset_after_change() mangles the region |
|
466 // from end to virtual_space->high() in debug builds). |
|
467 HeapWord* const new_top = eden_space->top(); |
|
468 old_gen->virtual_space()->expand_into(young_gen->virtual_space(), |
|
469 absorb_size); |
|
470 young_gen->reset_after_change(); |
|
471 old_space->set_top(new_top); |
|
472 old_space->set_end(new_top); |
|
473 old_gen->reset_after_change(); |
|
474 |
|
475 // Update the object start array for the filler object and the data from eden. |
|
476 ObjectStartArray* const start_array = old_gen->start_array(); |
|
477 for (HeapWord* p = unused_start; p < new_top; p += oop(p)->size()) { |
|
478 start_array->allocate_block(p); |
|
479 } |
|
480 |
|
481 // Could update the promoted average here, but it is not typically updated at |
|
482 // full GCs and the value to use is unclear. Something like |
|
483 // |
|
484 // cur_promoted_avg + absorb_size / number_of_scavenges_since_last_full_gc. |
|
485 |
|
486 size_policy->set_bytes_absorbed_from_eden(absorb_size); |
|
487 return true; |
|
488 } |
|
489 |
|
490 void PSMarkSweep::allocate_stacks() { |
|
491 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); |
|
492 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); |
|
493 |
|
494 PSYoungGen* young_gen = heap->young_gen(); |
|
495 |
|
496 MutableSpace* to_space = young_gen->to_space(); |
|
497 _preserved_marks = (PreservedMark*)to_space->top(); |
|
498 _preserved_count = 0; |
|
499 |
|
500 // We want to calculate the size in bytes first. |
|
501 _preserved_count_max = pointer_delta(to_space->end(), to_space->top(), sizeof(jbyte)); |
|
502 // Now divide by the size of a PreservedMark |
|
503 _preserved_count_max /= sizeof(PreservedMark); |
|
504 } |
|
505 |
|
506 |
|
507 void PSMarkSweep::deallocate_stacks() { |
|
508 _preserved_mark_stack.clear(true); |
|
509 _preserved_oop_stack.clear(true); |
|
510 _marking_stack.clear(); |
|
511 _objarray_stack.clear(true); |
|
512 } |
|
513 |
|
514 void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) { |
|
515 // Recursively traverse all live objects and mark them |
|
516 GCTraceTime tm("phase 1", PrintGCDetails && Verbose, true, _gc_timer); |
|
517 trace(" 1"); |
|
518 |
|
519 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); |
|
520 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); |
|
521 |
|
522 // Need to clear claim bits before the tracing starts. |
|
523 ClassLoaderDataGraph::clear_claimed_marks(); |
|
524 |
|
525 // General strong roots. |
|
526 { |
|
527 ParallelScavengeHeap::ParStrongRootsScope psrs; |
|
528 Universe::oops_do(mark_and_push_closure()); |
|
529 JNIHandles::oops_do(mark_and_push_closure()); // Global (strong) JNI handles |
|
530 CLDToOopClosure mark_and_push_from_cld(mark_and_push_closure()); |
|
531 CodeBlobToOopClosure each_active_code_blob(mark_and_push_closure(), /*do_marking=*/ true); |
|
532 Threads::oops_do(mark_and_push_closure(), &mark_and_push_from_cld, &each_active_code_blob); |
|
533 ObjectSynchronizer::oops_do(mark_and_push_closure()); |
|
534 FlatProfiler::oops_do(mark_and_push_closure()); |
|
535 Management::oops_do(mark_and_push_closure()); |
|
536 JvmtiExport::oops_do(mark_and_push_closure()); |
|
537 SystemDictionary::always_strong_oops_do(mark_and_push_closure()); |
|
538 ClassLoaderDataGraph::always_strong_oops_do(mark_and_push_closure(), follow_klass_closure(), true); |
|
539 // Do not treat nmethods as strong roots for mark/sweep, since we can unload them. |
|
540 //CodeCache::scavenge_root_nmethods_do(CodeBlobToOopClosure(mark_and_push_closure())); |
|
541 } |
|
542 |
|
543 // Flush marking stack. |
|
544 follow_stack(); |
|
545 |
|
546 // Process reference objects found during marking |
|
547 { |
|
548 ref_processor()->setup_policy(clear_all_softrefs); |
|
549 const ReferenceProcessorStats& stats = |
|
550 ref_processor()->process_discovered_references( |
|
551 is_alive_closure(), mark_and_push_closure(), follow_stack_closure(), NULL, _gc_timer); |
|
552 gc_tracer()->report_gc_reference_stats(stats); |
|
553 } |
|
554 |
|
555 // This is the point where the entire marking should have completed. |
|
556 assert(_marking_stack.is_empty(), "Marking should have completed"); |
|
557 |
|
558 // Unload classes and purge the SystemDictionary. |
|
559 bool purged_class = SystemDictionary::do_unloading(is_alive_closure()); |
|
560 |
|
561 // Unload nmethods. |
|
562 CodeCache::do_unloading(is_alive_closure(), purged_class); |
|
563 |
|
564 // Prune dead klasses from subklass/sibling/implementor lists. |
|
565 Klass::clean_weak_klass_links(is_alive_closure()); |
|
566 |
|
567 // Delete entries for dead interned strings. |
|
568 StringTable::unlink(is_alive_closure()); |
|
569 |
|
570 // Clean up unreferenced symbols in symbol table. |
|
571 SymbolTable::unlink(); |
|
572 _gc_tracer->report_object_count_after_gc(is_alive_closure()); |
|
573 } |
|
574 |
|
575 |
|
576 void PSMarkSweep::mark_sweep_phase2() { |
|
577 GCTraceTime tm("phase 2", PrintGCDetails && Verbose, true, _gc_timer); |
|
578 trace("2"); |
|
579 |
|
580 // Now all live objects are marked, compute the new object addresses. |
|
581 |
|
582 // It is not required that we traverse spaces in the same order in |
|
583 // phase2, phase3 and phase4, but the ValidateMarkSweep live oops |
|
584 // tracking expects us to do so. See comment under phase4. |
|
585 |
|
586 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); |
|
587 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); |
|
588 |
|
589 PSOldGen* old_gen = heap->old_gen(); |
|
590 |
|
591 // Begin compacting into the old gen |
|
592 PSMarkSweepDecorator::set_destination_decorator_tenured(); |
|
593 |
|
594 // This will also compact the young gen spaces. |
|
595 old_gen->precompact(); |
|
596 } |
|
597 |
|
598 // This should be moved to the shared markSweep code! |
|
599 class PSAlwaysTrueClosure: public BoolObjectClosure { |
|
600 public: |
|
601 bool do_object_b(oop p) { return true; } |
|
602 }; |
|
603 static PSAlwaysTrueClosure always_true; |
|
604 |
|
605 void PSMarkSweep::mark_sweep_phase3() { |
|
606 // Adjust the pointers to reflect the new locations |
|
607 GCTraceTime tm("phase 3", PrintGCDetails && Verbose, true, _gc_timer); |
|
608 trace("3"); |
|
609 |
|
610 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); |
|
611 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); |
|
612 |
|
613 PSYoungGen* young_gen = heap->young_gen(); |
|
614 PSOldGen* old_gen = heap->old_gen(); |
|
615 |
|
616 // Need to clear claim bits before the tracing starts. |
|
617 ClassLoaderDataGraph::clear_claimed_marks(); |
|
618 |
|
619 // General strong roots. |
|
620 Universe::oops_do(adjust_pointer_closure()); |
|
621 JNIHandles::oops_do(adjust_pointer_closure()); // Global (strong) JNI handles |
|
622 CLDToOopClosure adjust_from_cld(adjust_pointer_closure()); |
|
623 Threads::oops_do(adjust_pointer_closure(), &adjust_from_cld, NULL); |
|
624 ObjectSynchronizer::oops_do(adjust_pointer_closure()); |
|
625 FlatProfiler::oops_do(adjust_pointer_closure()); |
|
626 Management::oops_do(adjust_pointer_closure()); |
|
627 JvmtiExport::oops_do(adjust_pointer_closure()); |
|
628 // SO_AllClasses |
|
629 SystemDictionary::oops_do(adjust_pointer_closure()); |
|
630 ClassLoaderDataGraph::oops_do(adjust_pointer_closure(), adjust_klass_closure(), true); |
|
631 |
|
632 // Now adjust pointers in remaining weak roots. (All of which should |
|
633 // have been cleared if they pointed to non-surviving objects.) |
|
634 // Global (weak) JNI handles |
|
635 JNIHandles::weak_oops_do(&always_true, adjust_pointer_closure()); |
|
636 |
|
637 CodeCache::oops_do(adjust_pointer_closure()); |
|
638 StringTable::oops_do(adjust_pointer_closure()); |
|
639 ref_processor()->weak_oops_do(adjust_pointer_closure()); |
|
640 PSScavenge::reference_processor()->weak_oops_do(adjust_pointer_closure()); |
|
641 |
|
642 adjust_marks(); |
|
643 |
|
644 young_gen->adjust_pointers(); |
|
645 old_gen->adjust_pointers(); |
|
646 } |
|
647 |
|
648 void PSMarkSweep::mark_sweep_phase4() { |
|
649 EventMark m("4 compact heap"); |
|
650 GCTraceTime tm("phase 4", PrintGCDetails && Verbose, true, _gc_timer); |
|
651 trace("4"); |
|
652 |
|
653 // All pointers are now adjusted, move objects accordingly |
|
654 |
|
655 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); |
|
656 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); |
|
657 |
|
658 PSYoungGen* young_gen = heap->young_gen(); |
|
659 PSOldGen* old_gen = heap->old_gen(); |
|
660 |
|
661 old_gen->compact(); |
|
662 young_gen->compact(); |
|
663 } |
|
664 |
|
665 jlong PSMarkSweep::millis_since_last_gc() { |
|
666 // We need a monotonically non-deccreasing time in ms but |
|
667 // os::javaTimeMillis() does not guarantee monotonicity. |
|
668 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; |
|
669 jlong ret_val = now - _time_of_last_gc; |
|
670 // XXX See note in genCollectedHeap::millis_since_last_gc(). |
|
671 if (ret_val < 0) { |
|
672 NOT_PRODUCT(warning("time warp: "INT64_FORMAT, ret_val);) |
|
673 return 0; |
|
674 } |
|
675 return ret_val; |
|
676 } |
|
677 |
|
678 void PSMarkSweep::reset_millis_since_last_gc() { |
|
679 // We need a monotonically non-deccreasing time in ms but |
|
680 // os::javaTimeMillis() does not guarantee monotonicity. |
|
681 _time_of_last_gc = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; |
|
682 } |