Thu, 22 Sep 2011 10:57:37 -0700
6484982: G1: process references during evacuation pauses
Summary: G1 now uses two reference processors - one is used by concurrent marking and the other is used by STW GCs (both full and incremental evacuation pauses). In an evacuation pause, the reference processor is embedded into the closures used to scan objects. Doing so causes causes reference objects to be 'discovered' by the reference processor. At the end of the evacuation pause, these discovered reference objects are processed - preserving (and copying) referent objects (and their reachable graphs) as appropriate.
Reviewed-by: ysr, jwilhelm, brutisso, stefank, tonyp
1 /*
2 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "classfile/symbolTable.hpp"
27 #include "classfile/systemDictionary.hpp"
28 #include "code/codeCache.hpp"
29 #include "gc_implementation/parallelScavenge/generationSizer.hpp"
30 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
31 #include "gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp"
32 #include "gc_implementation/parallelScavenge/psMarkSweep.hpp"
33 #include "gc_implementation/parallelScavenge/psMarkSweepDecorator.hpp"
34 #include "gc_implementation/parallelScavenge/psOldGen.hpp"
35 #include "gc_implementation/parallelScavenge/psPermGen.hpp"
36 #include "gc_implementation/parallelScavenge/psScavenge.hpp"
37 #include "gc_implementation/parallelScavenge/psYoungGen.hpp"
38 #include "gc_implementation/shared/isGCActiveMark.hpp"
39 #include "gc_implementation/shared/spaceDecorator.hpp"
40 #include "gc_interface/gcCause.hpp"
41 #include "memory/gcLocker.inline.hpp"
42 #include "memory/referencePolicy.hpp"
43 #include "memory/referenceProcessor.hpp"
44 #include "oops/oop.inline.hpp"
45 #include "runtime/biasedLocking.hpp"
46 #include "runtime/fprofiler.hpp"
47 #include "runtime/safepoint.hpp"
48 #include "runtime/vmThread.hpp"
49 #include "services/management.hpp"
50 #include "services/memoryService.hpp"
51 #include "utilities/events.hpp"
52 #include "utilities/stack.inline.hpp"
54 elapsedTimer PSMarkSweep::_accumulated_time;
55 unsigned int PSMarkSweep::_total_invocations = 0;
56 jlong PSMarkSweep::_time_of_last_gc = 0;
57 CollectorCounters* PSMarkSweep::_counters = NULL;
59 void PSMarkSweep::initialize() {
60 MemRegion mr = Universe::heap()->reserved_region();
61 _ref_processor = new ReferenceProcessor(mr); // a vanilla ref proc
62 _counters = new CollectorCounters("PSMarkSweep", 1);
63 }
65 // This method contains all heap specific policy for invoking mark sweep.
66 // PSMarkSweep::invoke_no_policy() will only attempt to mark-sweep-compact
67 // the heap. It will do nothing further. If we need to bail out for policy
68 // reasons, scavenge before full gc, or any other specialized behavior, it
69 // needs to be added here.
70 //
71 // Note that this method should only be called from the vm_thread while
72 // at a safepoint!
73 //
74 // Note that the all_soft_refs_clear flag in the collector policy
75 // may be true because this method can be called without intervening
76 // activity. For example when the heap space is tight and full measure
77 // are being taken to free space.
79 void PSMarkSweep::invoke(bool maximum_heap_compaction) {
80 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
81 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
82 assert(!Universe::heap()->is_gc_active(), "not reentrant");
84 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
85 GCCause::Cause gc_cause = heap->gc_cause();
86 PSAdaptiveSizePolicy* policy = heap->size_policy();
87 IsGCActiveMark mark;
89 if (ScavengeBeforeFullGC) {
90 PSScavenge::invoke_no_policy();
91 }
93 const bool clear_all_soft_refs =
94 heap->collector_policy()->should_clear_all_soft_refs();
96 int count = (maximum_heap_compaction)?1:MarkSweepAlwaysCompactCount;
97 IntFlagSetting flag_setting(MarkSweepAlwaysCompactCount, count);
98 PSMarkSweep::invoke_no_policy(clear_all_soft_refs || maximum_heap_compaction);
99 }
101 // This method contains no policy. You should probably
102 // be calling invoke() instead.
103 void PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
104 assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
105 assert(ref_processor() != NULL, "Sanity");
107 if (GC_locker::check_active_before_gc()) {
108 return;
109 }
111 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
112 GCCause::Cause gc_cause = heap->gc_cause();
113 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
114 PSAdaptiveSizePolicy* size_policy = heap->size_policy();
116 // The scope of casr should end after code that can change
117 // CollectorPolicy::_should_clear_all_soft_refs.
118 ClearedAllSoftRefs casr(clear_all_softrefs, heap->collector_policy());
120 PSYoungGen* young_gen = heap->young_gen();
121 PSOldGen* old_gen = heap->old_gen();
122 PSPermGen* perm_gen = heap->perm_gen();
124 // Increment the invocation count
125 heap->increment_total_collections(true /* full */);
127 // Save information needed to minimize mangling
128 heap->record_gen_tops_before_GC();
130 // We need to track unique mark sweep invocations as well.
131 _total_invocations++;
133 AdaptiveSizePolicyOutput(size_policy, heap->total_collections());
135 if (PrintHeapAtGC) {
136 Universe::print_heap_before_gc();
137 }
139 // Fill in TLABs
140 heap->accumulate_statistics_all_tlabs();
141 heap->ensure_parsability(true); // retire TLABs
143 if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
144 HandleMark hm; // Discard invalid handles created during verification
145 gclog_or_tty->print(" VerifyBeforeGC:");
146 Universe::verify(true);
147 }
149 // Verify object start arrays
150 if (VerifyObjectStartArray &&
151 VerifyBeforeGC) {
152 old_gen->verify_object_start_array();
153 perm_gen->verify_object_start_array();
154 }
156 heap->pre_full_gc_dump();
158 // Filled in below to track the state of the young gen after the collection.
159 bool eden_empty;
160 bool survivors_empty;
161 bool young_gen_empty;
163 {
164 HandleMark hm;
165 const bool is_system_gc = gc_cause == GCCause::_java_lang_system_gc;
166 // This is useful for debugging but don't change the output the
167 // the customer sees.
168 const char* gc_cause_str = "Full GC";
169 if (is_system_gc && PrintGCDetails) {
170 gc_cause_str = "Full GC (System)";
171 }
172 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
173 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
174 TraceTime t1(gc_cause_str, PrintGC, !PrintGCDetails, gclog_or_tty);
175 TraceCollectorStats tcs(counters());
176 TraceMemoryManagerStats tms(true /* Full GC */,gc_cause);
178 if (TraceGen1Time) accumulated_time()->start();
180 // Let the size policy know we're starting
181 size_policy->major_collection_begin();
183 // When collecting the permanent generation methodOops may be moving,
184 // so we either have to flush all bcp data or convert it into bci.
185 CodeCache::gc_prologue();
186 Threads::gc_prologue();
187 BiasedLocking::preserve_marks();
189 // Capture heap size before collection for printing.
190 size_t prev_used = heap->used();
192 // Capture perm gen size before collection for sizing.
193 size_t perm_gen_prev_used = perm_gen->used_in_bytes();
195 // For PrintGCDetails
196 size_t old_gen_prev_used = old_gen->used_in_bytes();
197 size_t young_gen_prev_used = young_gen->used_in_bytes();
199 allocate_stacks();
201 COMPILER2_PRESENT(DerivedPointerTable::clear());
203 ref_processor()->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
204 ref_processor()->setup_policy(clear_all_softrefs);
206 mark_sweep_phase1(clear_all_softrefs);
208 mark_sweep_phase2();
210 // Don't add any more derived pointers during phase3
211 COMPILER2_PRESENT(assert(DerivedPointerTable::is_active(), "Sanity"));
212 COMPILER2_PRESENT(DerivedPointerTable::set_active(false));
214 mark_sweep_phase3();
216 mark_sweep_phase4();
218 restore_marks();
220 deallocate_stacks();
222 if (ZapUnusedHeapArea) {
223 // Do a complete mangle (top to end) because the usage for
224 // scratch does not maintain a top pointer.
225 young_gen->to_space()->mangle_unused_area_complete();
226 }
228 eden_empty = young_gen->eden_space()->is_empty();
229 if (!eden_empty) {
230 eden_empty = absorb_live_data_from_eden(size_policy, young_gen, old_gen);
231 }
233 // Update heap occupancy information which is used as
234 // input to soft ref clearing policy at the next gc.
235 Universe::update_heap_info_at_gc();
237 survivors_empty = young_gen->from_space()->is_empty() &&
238 young_gen->to_space()->is_empty();
239 young_gen_empty = eden_empty && survivors_empty;
241 BarrierSet* bs = heap->barrier_set();
242 if (bs->is_a(BarrierSet::ModRef)) {
243 ModRefBarrierSet* modBS = (ModRefBarrierSet*)bs;
244 MemRegion old_mr = heap->old_gen()->reserved();
245 MemRegion perm_mr = heap->perm_gen()->reserved();
246 assert(perm_mr.end() <= old_mr.start(), "Generations out of order");
248 if (young_gen_empty) {
249 modBS->clear(MemRegion(perm_mr.start(), old_mr.end()));
250 } else {
251 modBS->invalidate(MemRegion(perm_mr.start(), old_mr.end()));
252 }
253 }
255 BiasedLocking::restore_marks();
256 Threads::gc_epilogue();
257 CodeCache::gc_epilogue();
258 JvmtiExport::gc_epilogue();
260 COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
262 ref_processor()->enqueue_discovered_references(NULL);
264 // Update time of last GC
265 reset_millis_since_last_gc();
267 // Let the size policy know we're done
268 size_policy->major_collection_end(old_gen->used_in_bytes(), gc_cause);
270 if (UseAdaptiveSizePolicy) {
272 if (PrintAdaptiveSizePolicy) {
273 gclog_or_tty->print("AdaptiveSizeStart: ");
274 gclog_or_tty->stamp();
275 gclog_or_tty->print_cr(" collection: %d ",
276 heap->total_collections());
277 if (Verbose) {
278 gclog_or_tty->print("old_gen_capacity: %d young_gen_capacity: %d"
279 " perm_gen_capacity: %d ",
280 old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes(),
281 perm_gen->capacity_in_bytes());
282 }
283 }
285 // Don't check if the size_policy is ready here. Let
286 // the size_policy check that internally.
287 if (UseAdaptiveGenerationSizePolicyAtMajorCollection &&
288 ((gc_cause != GCCause::_java_lang_system_gc) ||
289 UseAdaptiveSizePolicyWithSystemGC)) {
290 // Calculate optimal free space amounts
291 assert(young_gen->max_size() >
292 young_gen->from_space()->capacity_in_bytes() +
293 young_gen->to_space()->capacity_in_bytes(),
294 "Sizes of space in young gen are out-of-bounds");
295 size_t max_eden_size = young_gen->max_size() -
296 young_gen->from_space()->capacity_in_bytes() -
297 young_gen->to_space()->capacity_in_bytes();
298 size_policy->compute_generation_free_space(young_gen->used_in_bytes(),
299 young_gen->eden_space()->used_in_bytes(),
300 old_gen->used_in_bytes(),
301 perm_gen->used_in_bytes(),
302 young_gen->eden_space()->capacity_in_bytes(),
303 old_gen->max_gen_size(),
304 max_eden_size,
305 true /* full gc*/,
306 gc_cause,
307 heap->collector_policy());
309 heap->resize_old_gen(size_policy->calculated_old_free_size_in_bytes());
311 // Don't resize the young generation at an major collection. A
312 // desired young generation size may have been calculated but
313 // resizing the young generation complicates the code because the
314 // resizing of the old generation may have moved the boundary
315 // between the young generation and the old generation. Let the
316 // young generation resizing happen at the minor collections.
317 }
318 if (PrintAdaptiveSizePolicy) {
319 gclog_or_tty->print_cr("AdaptiveSizeStop: collection: %d ",
320 heap->total_collections());
321 }
322 }
324 if (UsePerfData) {
325 heap->gc_policy_counters()->update_counters();
326 heap->gc_policy_counters()->update_old_capacity(
327 old_gen->capacity_in_bytes());
328 heap->gc_policy_counters()->update_young_capacity(
329 young_gen->capacity_in_bytes());
330 }
332 heap->resize_all_tlabs();
334 // We collected the perm gen, so we'll resize it here.
335 perm_gen->compute_new_size(perm_gen_prev_used);
337 if (TraceGen1Time) accumulated_time()->stop();
339 if (PrintGC) {
340 if (PrintGCDetails) {
341 // Don't print a GC timestamp here. This is after the GC so
342 // would be confusing.
343 young_gen->print_used_change(young_gen_prev_used);
344 old_gen->print_used_change(old_gen_prev_used);
345 }
346 heap->print_heap_change(prev_used);
347 // Do perm gen after heap becase prev_used does
348 // not include the perm gen (done this way in the other
349 // collectors).
350 if (PrintGCDetails) {
351 perm_gen->print_used_change(perm_gen_prev_used);
352 }
353 }
355 // Track memory usage and detect low memory
356 MemoryService::track_memory_usage();
357 heap->update_counters();
358 }
360 if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
361 HandleMark hm; // Discard invalid handles created during verification
362 gclog_or_tty->print(" VerifyAfterGC:");
363 Universe::verify(false);
364 }
366 // Re-verify object start arrays
367 if (VerifyObjectStartArray &&
368 VerifyAfterGC) {
369 old_gen->verify_object_start_array();
370 perm_gen->verify_object_start_array();
371 }
373 if (ZapUnusedHeapArea) {
374 old_gen->object_space()->check_mangled_unused_area_complete();
375 perm_gen->object_space()->check_mangled_unused_area_complete();
376 }
378 NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
380 if (PrintHeapAtGC) {
381 Universe::print_heap_after_gc();
382 }
384 heap->post_full_gc_dump();
386 #ifdef TRACESPINNING
387 ParallelTaskTerminator::print_termination_counts();
388 #endif
389 }
391 bool PSMarkSweep::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy,
392 PSYoungGen* young_gen,
393 PSOldGen* old_gen) {
394 MutableSpace* const eden_space = young_gen->eden_space();
395 assert(!eden_space->is_empty(), "eden must be non-empty");
396 assert(young_gen->virtual_space()->alignment() ==
397 old_gen->virtual_space()->alignment(), "alignments do not match");
399 if (!(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary)) {
400 return false;
401 }
403 // Both generations must be completely committed.
404 if (young_gen->virtual_space()->uncommitted_size() != 0) {
405 return false;
406 }
407 if (old_gen->virtual_space()->uncommitted_size() != 0) {
408 return false;
409 }
411 // Figure out how much to take from eden. Include the average amount promoted
412 // in the total; otherwise the next young gen GC will simply bail out to a
413 // full GC.
414 const size_t alignment = old_gen->virtual_space()->alignment();
415 const size_t eden_used = eden_space->used_in_bytes();
416 const size_t promoted = (size_t)size_policy->avg_promoted()->padded_average();
417 const size_t absorb_size = align_size_up(eden_used + promoted, alignment);
418 const size_t eden_capacity = eden_space->capacity_in_bytes();
420 if (absorb_size >= eden_capacity) {
421 return false; // Must leave some space in eden.
422 }
424 const size_t new_young_size = young_gen->capacity_in_bytes() - absorb_size;
425 if (new_young_size < young_gen->min_gen_size()) {
426 return false; // Respect young gen minimum size.
427 }
429 if (TraceAdaptiveGCBoundary && Verbose) {
430 gclog_or_tty->print(" absorbing " SIZE_FORMAT "K: "
431 "eden " SIZE_FORMAT "K->" SIZE_FORMAT "K "
432 "from " SIZE_FORMAT "K, to " SIZE_FORMAT "K "
433 "young_gen " SIZE_FORMAT "K->" SIZE_FORMAT "K ",
434 absorb_size / K,
435 eden_capacity / K, (eden_capacity - absorb_size) / K,
436 young_gen->from_space()->used_in_bytes() / K,
437 young_gen->to_space()->used_in_bytes() / K,
438 young_gen->capacity_in_bytes() / K, new_young_size / K);
439 }
441 // Fill the unused part of the old gen.
442 MutableSpace* const old_space = old_gen->object_space();
443 HeapWord* const unused_start = old_space->top();
444 size_t const unused_words = pointer_delta(old_space->end(), unused_start);
446 if (unused_words > 0) {
447 if (unused_words < CollectedHeap::min_fill_size()) {
448 return false; // If the old gen cannot be filled, must give up.
449 }
450 CollectedHeap::fill_with_objects(unused_start, unused_words);
451 }
453 // Take the live data from eden and set both top and end in the old gen to
454 // eden top. (Need to set end because reset_after_change() mangles the region
455 // from end to virtual_space->high() in debug builds).
456 HeapWord* const new_top = eden_space->top();
457 old_gen->virtual_space()->expand_into(young_gen->virtual_space(),
458 absorb_size);
459 young_gen->reset_after_change();
460 old_space->set_top(new_top);
461 old_space->set_end(new_top);
462 old_gen->reset_after_change();
464 // Update the object start array for the filler object and the data from eden.
465 ObjectStartArray* const start_array = old_gen->start_array();
466 for (HeapWord* p = unused_start; p < new_top; p += oop(p)->size()) {
467 start_array->allocate_block(p);
468 }
470 // Could update the promoted average here, but it is not typically updated at
471 // full GCs and the value to use is unclear. Something like
472 //
473 // cur_promoted_avg + absorb_size / number_of_scavenges_since_last_full_gc.
475 size_policy->set_bytes_absorbed_from_eden(absorb_size);
476 return true;
477 }
479 void PSMarkSweep::allocate_stacks() {
480 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
481 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
483 PSYoungGen* young_gen = heap->young_gen();
485 MutableSpace* to_space = young_gen->to_space();
486 _preserved_marks = (PreservedMark*)to_space->top();
487 _preserved_count = 0;
489 // We want to calculate the size in bytes first.
490 _preserved_count_max = pointer_delta(to_space->end(), to_space->top(), sizeof(jbyte));
491 // Now divide by the size of a PreservedMark
492 _preserved_count_max /= sizeof(PreservedMark);
493 }
496 void PSMarkSweep::deallocate_stacks() {
497 _preserved_mark_stack.clear(true);
498 _preserved_oop_stack.clear(true);
499 _marking_stack.clear();
500 _objarray_stack.clear(true);
501 _revisit_klass_stack.clear(true);
502 _revisit_mdo_stack.clear(true);
503 }
505 void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
506 // Recursively traverse all live objects and mark them
507 EventMark m("1 mark object");
508 TraceTime tm("phase 1", PrintGCDetails && Verbose, true, gclog_or_tty);
509 trace(" 1");
511 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
512 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
514 // General strong roots.
515 {
516 ParallelScavengeHeap::ParStrongRootsScope psrs;
517 Universe::oops_do(mark_and_push_closure());
518 JNIHandles::oops_do(mark_and_push_closure()); // Global (strong) JNI handles
519 CodeBlobToOopClosure each_active_code_blob(mark_and_push_closure(), /*do_marking=*/ true);
520 Threads::oops_do(mark_and_push_closure(), &each_active_code_blob);
521 ObjectSynchronizer::oops_do(mark_and_push_closure());
522 FlatProfiler::oops_do(mark_and_push_closure());
523 Management::oops_do(mark_and_push_closure());
524 JvmtiExport::oops_do(mark_and_push_closure());
525 SystemDictionary::always_strong_oops_do(mark_and_push_closure());
526 // Do not treat nmethods as strong roots for mark/sweep, since we can unload them.
527 //CodeCache::scavenge_root_nmethods_do(CodeBlobToOopClosure(mark_and_push_closure()));
528 }
530 // Flush marking stack.
531 follow_stack();
533 // Process reference objects found during marking
534 {
535 ref_processor()->setup_policy(clear_all_softrefs);
536 ref_processor()->process_discovered_references(
537 is_alive_closure(), mark_and_push_closure(), follow_stack_closure(), NULL);
538 }
540 // Follow system dictionary roots and unload classes
541 bool purged_class = SystemDictionary::do_unloading(is_alive_closure());
543 // Follow code cache roots
544 CodeCache::do_unloading(is_alive_closure(), mark_and_push_closure(),
545 purged_class);
546 follow_stack(); // Flush marking stack
548 // Update subklass/sibling/implementor links of live klasses
549 follow_weak_klass_links();
550 assert(_marking_stack.is_empty(), "just drained");
552 // Visit memoized mdo's and clear unmarked weak refs
553 follow_mdo_weak_refs();
554 assert(_marking_stack.is_empty(), "just drained");
556 // Visit interned string tables and delete unmarked oops
557 StringTable::unlink(is_alive_closure());
558 // Clean up unreferenced symbols in symbol table.
559 SymbolTable::unlink();
561 assert(_marking_stack.is_empty(), "stack should be empty by now");
562 }
565 void PSMarkSweep::mark_sweep_phase2() {
566 EventMark m("2 compute new addresses");
567 TraceTime tm("phase 2", PrintGCDetails && Verbose, true, gclog_or_tty);
568 trace("2");
570 // Now all live objects are marked, compute the new object addresses.
572 // It is imperative that we traverse perm_gen LAST. If dead space is
573 // allowed a range of dead object may get overwritten by a dead int
574 // array. If perm_gen is not traversed last a klassOop may get
575 // overwritten. This is fine since it is dead, but if the class has dead
576 // instances we have to skip them, and in order to find their size we
577 // need the klassOop!
578 //
579 // It is not required that we traverse spaces in the same order in
580 // phase2, phase3 and phase4, but the ValidateMarkSweep live oops
581 // tracking expects us to do so. See comment under phase4.
583 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
584 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
586 PSOldGen* old_gen = heap->old_gen();
587 PSPermGen* perm_gen = heap->perm_gen();
589 // Begin compacting into the old gen
590 PSMarkSweepDecorator::set_destination_decorator_tenured();
592 // This will also compact the young gen spaces.
593 old_gen->precompact();
595 // Compact the perm gen into the perm gen
596 PSMarkSweepDecorator::set_destination_decorator_perm_gen();
598 perm_gen->precompact();
599 }
601 // This should be moved to the shared markSweep code!
602 class PSAlwaysTrueClosure: public BoolObjectClosure {
603 public:
604 void do_object(oop p) { ShouldNotReachHere(); }
605 bool do_object_b(oop p) { return true; }
606 };
607 static PSAlwaysTrueClosure always_true;
609 void PSMarkSweep::mark_sweep_phase3() {
610 // Adjust the pointers to reflect the new locations
611 EventMark m("3 adjust pointers");
612 TraceTime tm("phase 3", PrintGCDetails && Verbose, true, gclog_or_tty);
613 trace("3");
615 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
616 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
618 PSYoungGen* young_gen = heap->young_gen();
619 PSOldGen* old_gen = heap->old_gen();
620 PSPermGen* perm_gen = heap->perm_gen();
622 // General strong roots.
623 Universe::oops_do(adjust_root_pointer_closure());
624 JNIHandles::oops_do(adjust_root_pointer_closure()); // Global (strong) JNI handles
625 Threads::oops_do(adjust_root_pointer_closure(), NULL);
626 ObjectSynchronizer::oops_do(adjust_root_pointer_closure());
627 FlatProfiler::oops_do(adjust_root_pointer_closure());
628 Management::oops_do(adjust_root_pointer_closure());
629 JvmtiExport::oops_do(adjust_root_pointer_closure());
630 // SO_AllClasses
631 SystemDictionary::oops_do(adjust_root_pointer_closure());
632 //CodeCache::scavenge_root_nmethods_oops_do(adjust_root_pointer_closure());
634 // Now adjust pointers in remaining weak roots. (All of which should
635 // have been cleared if they pointed to non-surviving objects.)
636 // Global (weak) JNI handles
637 JNIHandles::weak_oops_do(&always_true, adjust_root_pointer_closure());
639 CodeCache::oops_do(adjust_pointer_closure());
640 StringTable::oops_do(adjust_root_pointer_closure());
641 ref_processor()->weak_oops_do(adjust_root_pointer_closure());
642 PSScavenge::reference_processor()->weak_oops_do(adjust_root_pointer_closure());
644 adjust_marks();
646 young_gen->adjust_pointers();
647 old_gen->adjust_pointers();
648 perm_gen->adjust_pointers();
649 }
651 void PSMarkSweep::mark_sweep_phase4() {
652 EventMark m("4 compact heap");
653 TraceTime tm("phase 4", PrintGCDetails && Verbose, true, gclog_or_tty);
654 trace("4");
656 // All pointers are now adjusted, move objects accordingly
658 // It is imperative that we traverse perm_gen first in phase4. All
659 // classes must be allocated earlier than their instances, and traversing
660 // perm_gen first makes sure that all klassOops have moved to their new
661 // location before any instance does a dispatch through it's klass!
662 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
663 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
665 PSYoungGen* young_gen = heap->young_gen();
666 PSOldGen* old_gen = heap->old_gen();
667 PSPermGen* perm_gen = heap->perm_gen();
669 perm_gen->compact();
670 old_gen->compact();
671 young_gen->compact();
672 }
674 jlong PSMarkSweep::millis_since_last_gc() {
675 jlong ret_val = os::javaTimeMillis() - _time_of_last_gc;
676 // XXX See note in genCollectedHeap::millis_since_last_gc().
677 if (ret_val < 0) {
678 NOT_PRODUCT(warning("time warp: %d", ret_val);)
679 return 0;
680 }
681 return ret_val;
682 }
684 void PSMarkSweep::reset_millis_since_last_gc() {
685 _time_of_last_gc = os::javaTimeMillis();
686 }