Wed, 23 Sep 2009 23:56:15 -0700
Merge
1 /*
2 * Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
25 #include "incls/_precompiled.incl"
26 #include "incls/_psMarkSweep.cpp.incl"
28 elapsedTimer PSMarkSweep::_accumulated_time;
29 unsigned int PSMarkSweep::_total_invocations = 0;
30 jlong PSMarkSweep::_time_of_last_gc = 0;
31 CollectorCounters* PSMarkSweep::_counters = NULL;
33 void PSMarkSweep::initialize() {
34 MemRegion mr = Universe::heap()->reserved_region();
35 _ref_processor = new ReferenceProcessor(mr,
36 true, // atomic_discovery
37 false); // mt_discovery
38 _counters = new CollectorCounters("PSMarkSweep", 1);
39 }
41 // This method contains all heap specific policy for invoking mark sweep.
42 // PSMarkSweep::invoke_no_policy() will only attempt to mark-sweep-compact
43 // the heap. It will do nothing further. If we need to bail out for policy
44 // reasons, scavenge before full gc, or any other specialized behavior, it
45 // needs to be added here.
46 //
47 // Note that this method should only be called from the vm_thread while
48 // at a safepoint!
49 void PSMarkSweep::invoke(bool maximum_heap_compaction) {
50 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
51 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
52 assert(!Universe::heap()->is_gc_active(), "not reentrant");
54 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
55 GCCause::Cause gc_cause = heap->gc_cause();
56 PSAdaptiveSizePolicy* policy = heap->size_policy();
58 // Before each allocation/collection attempt, find out from the
59 // policy object if GCs are, on the whole, taking too long. If so,
60 // bail out without attempting a collection. The exceptions are
61 // for explicitly requested GC's.
62 if (!policy->gc_time_limit_exceeded() ||
63 GCCause::is_user_requested_gc(gc_cause) ||
64 GCCause::is_serviceability_requested_gc(gc_cause)) {
65 IsGCActiveMark mark;
67 if (ScavengeBeforeFullGC) {
68 PSScavenge::invoke_no_policy();
69 }
71 int count = (maximum_heap_compaction)?1:MarkSweepAlwaysCompactCount;
72 IntFlagSetting flag_setting(MarkSweepAlwaysCompactCount, count);
73 PSMarkSweep::invoke_no_policy(maximum_heap_compaction);
74 }
75 }
77 // This method contains no policy. You should probably
78 // be calling invoke() instead.
79 void PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
80 assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
81 assert(ref_processor() != NULL, "Sanity");
83 if (GC_locker::check_active_before_gc()) {
84 return;
85 }
87 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
88 GCCause::Cause gc_cause = heap->gc_cause();
89 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
90 PSAdaptiveSizePolicy* size_policy = heap->size_policy();
92 PSYoungGen* young_gen = heap->young_gen();
93 PSOldGen* old_gen = heap->old_gen();
94 PSPermGen* perm_gen = heap->perm_gen();
96 // Increment the invocation count
97 heap->increment_total_collections(true /* full */);
99 // Save information needed to minimize mangling
100 heap->record_gen_tops_before_GC();
102 // We need to track unique mark sweep invocations as well.
103 _total_invocations++;
105 AdaptiveSizePolicyOutput(size_policy, heap->total_collections());
107 if (PrintHeapAtGC) {
108 Universe::print_heap_before_gc();
109 }
111 // Fill in TLABs
112 heap->accumulate_statistics_all_tlabs();
113 heap->ensure_parsability(true); // retire TLABs
115 if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
116 HandleMark hm; // Discard invalid handles created during verification
117 gclog_or_tty->print(" VerifyBeforeGC:");
118 Universe::verify(true);
119 }
121 // Verify object start arrays
122 if (VerifyObjectStartArray &&
123 VerifyBeforeGC) {
124 old_gen->verify_object_start_array();
125 perm_gen->verify_object_start_array();
126 }
128 heap->pre_full_gc_dump();
130 // Filled in below to track the state of the young gen after the collection.
131 bool eden_empty;
132 bool survivors_empty;
133 bool young_gen_empty;
135 {
136 HandleMark hm;
137 const bool is_system_gc = gc_cause == GCCause::_java_lang_system_gc;
138 // This is useful for debugging but don't change the output the
139 // the customer sees.
140 const char* gc_cause_str = "Full GC";
141 if (is_system_gc && PrintGCDetails) {
142 gc_cause_str = "Full GC (System)";
143 }
144 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
145 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
146 TraceTime t1(gc_cause_str, PrintGC, !PrintGCDetails, gclog_or_tty);
147 TraceCollectorStats tcs(counters());
148 TraceMemoryManagerStats tms(true /* Full GC */);
150 if (TraceGen1Time) accumulated_time()->start();
152 // Let the size policy know we're starting
153 size_policy->major_collection_begin();
155 // When collecting the permanent generation methodOops may be moving,
156 // so we either have to flush all bcp data or convert it into bci.
157 CodeCache::gc_prologue();
158 Threads::gc_prologue();
159 BiasedLocking::preserve_marks();
161 // Capture heap size before collection for printing.
162 size_t prev_used = heap->used();
164 // Capture perm gen size before collection for sizing.
165 size_t perm_gen_prev_used = perm_gen->used_in_bytes();
167 // For PrintGCDetails
168 size_t old_gen_prev_used = old_gen->used_in_bytes();
169 size_t young_gen_prev_used = young_gen->used_in_bytes();
171 allocate_stacks();
173 NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
174 COMPILER2_PRESENT(DerivedPointerTable::clear());
176 ref_processor()->enable_discovery();
177 ref_processor()->setup_policy(clear_all_softrefs);
179 mark_sweep_phase1(clear_all_softrefs);
181 mark_sweep_phase2();
183 // Don't add any more derived pointers during phase3
184 COMPILER2_PRESENT(assert(DerivedPointerTable::is_active(), "Sanity"));
185 COMPILER2_PRESENT(DerivedPointerTable::set_active(false));
187 mark_sweep_phase3();
189 mark_sweep_phase4();
191 restore_marks();
193 deallocate_stacks();
195 if (ZapUnusedHeapArea) {
196 // Do a complete mangle (top to end) because the usage for
197 // scratch does not maintain a top pointer.
198 young_gen->to_space()->mangle_unused_area_complete();
199 }
201 eden_empty = young_gen->eden_space()->is_empty();
202 if (!eden_empty) {
203 eden_empty = absorb_live_data_from_eden(size_policy, young_gen, old_gen);
204 }
206 // Update heap occupancy information which is used as
207 // input to soft ref clearing policy at the next gc.
208 Universe::update_heap_info_at_gc();
210 survivors_empty = young_gen->from_space()->is_empty() &&
211 young_gen->to_space()->is_empty();
212 young_gen_empty = eden_empty && survivors_empty;
214 BarrierSet* bs = heap->barrier_set();
215 if (bs->is_a(BarrierSet::ModRef)) {
216 ModRefBarrierSet* modBS = (ModRefBarrierSet*)bs;
217 MemRegion old_mr = heap->old_gen()->reserved();
218 MemRegion perm_mr = heap->perm_gen()->reserved();
219 assert(perm_mr.end() <= old_mr.start(), "Generations out of order");
221 if (young_gen_empty) {
222 modBS->clear(MemRegion(perm_mr.start(), old_mr.end()));
223 } else {
224 modBS->invalidate(MemRegion(perm_mr.start(), old_mr.end()));
225 }
226 }
228 BiasedLocking::restore_marks();
229 Threads::gc_epilogue();
230 CodeCache::gc_epilogue();
232 COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
234 ref_processor()->enqueue_discovered_references(NULL);
236 // Update time of last GC
237 reset_millis_since_last_gc();
239 // Let the size policy know we're done
240 size_policy->major_collection_end(old_gen->used_in_bytes(), gc_cause);
242 if (UseAdaptiveSizePolicy) {
244 if (PrintAdaptiveSizePolicy) {
245 gclog_or_tty->print("AdaptiveSizeStart: ");
246 gclog_or_tty->stamp();
247 gclog_or_tty->print_cr(" collection: %d ",
248 heap->total_collections());
249 if (Verbose) {
250 gclog_or_tty->print("old_gen_capacity: %d young_gen_capacity: %d"
251 " perm_gen_capacity: %d ",
252 old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes(),
253 perm_gen->capacity_in_bytes());
254 }
255 }
257 // Don't check if the size_policy is ready here. Let
258 // the size_policy check that internally.
259 if (UseAdaptiveGenerationSizePolicyAtMajorCollection &&
260 ((gc_cause != GCCause::_java_lang_system_gc) ||
261 UseAdaptiveSizePolicyWithSystemGC)) {
262 // Calculate optimal free space amounts
263 assert(young_gen->max_size() >
264 young_gen->from_space()->capacity_in_bytes() +
265 young_gen->to_space()->capacity_in_bytes(),
266 "Sizes of space in young gen are out-of-bounds");
267 size_t max_eden_size = young_gen->max_size() -
268 young_gen->from_space()->capacity_in_bytes() -
269 young_gen->to_space()->capacity_in_bytes();
270 size_policy->compute_generation_free_space(young_gen->used_in_bytes(),
271 young_gen->eden_space()->used_in_bytes(),
272 old_gen->used_in_bytes(),
273 perm_gen->used_in_bytes(),
274 young_gen->eden_space()->capacity_in_bytes(),
275 old_gen->max_gen_size(),
276 max_eden_size,
277 true /* full gc*/,
278 gc_cause);
280 heap->resize_old_gen(size_policy->calculated_old_free_size_in_bytes());
282 // Don't resize the young generation at an major collection. A
283 // desired young generation size may have been calculated but
284 // resizing the young generation complicates the code because the
285 // resizing of the old generation may have moved the boundary
286 // between the young generation and the old generation. Let the
287 // young generation resizing happen at the minor collections.
288 }
289 if (PrintAdaptiveSizePolicy) {
290 gclog_or_tty->print_cr("AdaptiveSizeStop: collection: %d ",
291 heap->total_collections());
292 }
293 }
295 if (UsePerfData) {
296 heap->gc_policy_counters()->update_counters();
297 heap->gc_policy_counters()->update_old_capacity(
298 old_gen->capacity_in_bytes());
299 heap->gc_policy_counters()->update_young_capacity(
300 young_gen->capacity_in_bytes());
301 }
303 heap->resize_all_tlabs();
305 // We collected the perm gen, so we'll resize it here.
306 perm_gen->compute_new_size(perm_gen_prev_used);
308 if (TraceGen1Time) accumulated_time()->stop();
310 if (PrintGC) {
311 if (PrintGCDetails) {
312 // Don't print a GC timestamp here. This is after the GC so
313 // would be confusing.
314 young_gen->print_used_change(young_gen_prev_used);
315 old_gen->print_used_change(old_gen_prev_used);
316 }
317 heap->print_heap_change(prev_used);
318 // Do perm gen after heap becase prev_used does
319 // not include the perm gen (done this way in the other
320 // collectors).
321 if (PrintGCDetails) {
322 perm_gen->print_used_change(perm_gen_prev_used);
323 }
324 }
326 // Track memory usage and detect low memory
327 MemoryService::track_memory_usage();
328 heap->update_counters();
330 if (PrintGCDetails) {
331 if (size_policy->print_gc_time_limit_would_be_exceeded()) {
332 if (size_policy->gc_time_limit_exceeded()) {
333 gclog_or_tty->print_cr(" GC time is exceeding GCTimeLimit "
334 "of %d%%", GCTimeLimit);
335 } else {
336 gclog_or_tty->print_cr(" GC time would exceed GCTimeLimit "
337 "of %d%%", GCTimeLimit);
338 }
339 }
340 size_policy->set_print_gc_time_limit_would_be_exceeded(false);
341 }
342 }
344 if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
345 HandleMark hm; // Discard invalid handles created during verification
346 gclog_or_tty->print(" VerifyAfterGC:");
347 Universe::verify(false);
348 }
350 // Re-verify object start arrays
351 if (VerifyObjectStartArray &&
352 VerifyAfterGC) {
353 old_gen->verify_object_start_array();
354 perm_gen->verify_object_start_array();
355 }
357 if (ZapUnusedHeapArea) {
358 old_gen->object_space()->check_mangled_unused_area_complete();
359 perm_gen->object_space()->check_mangled_unused_area_complete();
360 }
362 NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
364 if (PrintHeapAtGC) {
365 Universe::print_heap_after_gc();
366 }
368 heap->post_full_gc_dump();
370 #ifdef TRACESPINNING
371 ParallelTaskTerminator::print_termination_counts();
372 #endif
373 }
375 bool PSMarkSweep::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy,
376 PSYoungGen* young_gen,
377 PSOldGen* old_gen) {
378 MutableSpace* const eden_space = young_gen->eden_space();
379 assert(!eden_space->is_empty(), "eden must be non-empty");
380 assert(young_gen->virtual_space()->alignment() ==
381 old_gen->virtual_space()->alignment(), "alignments do not match");
383 if (!(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary)) {
384 return false;
385 }
387 // Both generations must be completely committed.
388 if (young_gen->virtual_space()->uncommitted_size() != 0) {
389 return false;
390 }
391 if (old_gen->virtual_space()->uncommitted_size() != 0) {
392 return false;
393 }
395 // Figure out how much to take from eden. Include the average amount promoted
396 // in the total; otherwise the next young gen GC will simply bail out to a
397 // full GC.
398 const size_t alignment = old_gen->virtual_space()->alignment();
399 const size_t eden_used = eden_space->used_in_bytes();
400 const size_t promoted = (size_t)size_policy->avg_promoted()->padded_average();
401 const size_t absorb_size = align_size_up(eden_used + promoted, alignment);
402 const size_t eden_capacity = eden_space->capacity_in_bytes();
404 if (absorb_size >= eden_capacity) {
405 return false; // Must leave some space in eden.
406 }
408 const size_t new_young_size = young_gen->capacity_in_bytes() - absorb_size;
409 if (new_young_size < young_gen->min_gen_size()) {
410 return false; // Respect young gen minimum size.
411 }
413 if (TraceAdaptiveGCBoundary && Verbose) {
414 gclog_or_tty->print(" absorbing " SIZE_FORMAT "K: "
415 "eden " SIZE_FORMAT "K->" SIZE_FORMAT "K "
416 "from " SIZE_FORMAT "K, to " SIZE_FORMAT "K "
417 "young_gen " SIZE_FORMAT "K->" SIZE_FORMAT "K ",
418 absorb_size / K,
419 eden_capacity / K, (eden_capacity - absorb_size) / K,
420 young_gen->from_space()->used_in_bytes() / K,
421 young_gen->to_space()->used_in_bytes() / K,
422 young_gen->capacity_in_bytes() / K, new_young_size / K);
423 }
425 // Fill the unused part of the old gen.
426 MutableSpace* const old_space = old_gen->object_space();
427 HeapWord* const unused_start = old_space->top();
428 size_t const unused_words = pointer_delta(old_space->end(), unused_start);
430 if (unused_words > 0) {
431 if (unused_words < CollectedHeap::min_fill_size()) {
432 return false; // If the old gen cannot be filled, must give up.
433 }
434 CollectedHeap::fill_with_objects(unused_start, unused_words);
435 }
437 // Take the live data from eden and set both top and end in the old gen to
438 // eden top. (Need to set end because reset_after_change() mangles the region
439 // from end to virtual_space->high() in debug builds).
440 HeapWord* const new_top = eden_space->top();
441 old_gen->virtual_space()->expand_into(young_gen->virtual_space(),
442 absorb_size);
443 young_gen->reset_after_change();
444 old_space->set_top(new_top);
445 old_space->set_end(new_top);
446 old_gen->reset_after_change();
448 // Update the object start array for the filler object and the data from eden.
449 ObjectStartArray* const start_array = old_gen->start_array();
450 for (HeapWord* p = unused_start; p < new_top; p += oop(p)->size()) {
451 start_array->allocate_block(p);
452 }
454 // Could update the promoted average here, but it is not typically updated at
455 // full GCs and the value to use is unclear. Something like
456 //
457 // cur_promoted_avg + absorb_size / number_of_scavenges_since_last_full_gc.
459 size_policy->set_bytes_absorbed_from_eden(absorb_size);
460 return true;
461 }
463 void PSMarkSweep::allocate_stacks() {
464 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
465 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
467 PSYoungGen* young_gen = heap->young_gen();
469 MutableSpace* to_space = young_gen->to_space();
470 _preserved_marks = (PreservedMark*)to_space->top();
471 _preserved_count = 0;
473 // We want to calculate the size in bytes first.
474 _preserved_count_max = pointer_delta(to_space->end(), to_space->top(), sizeof(jbyte));
475 // Now divide by the size of a PreservedMark
476 _preserved_count_max /= sizeof(PreservedMark);
478 _preserved_mark_stack = NULL;
479 _preserved_oop_stack = NULL;
481 _marking_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(4000, true);
483 int size = SystemDictionary::number_of_classes() * 2;
484 _revisit_klass_stack = new (ResourceObj::C_HEAP) GrowableArray<Klass*>(size, true);
485 // (#klass/k)^2, for k ~ 10 appears a better setting, but this will have to do for
486 // now until we investigate a more optimal setting.
487 _revisit_mdo_stack = new (ResourceObj::C_HEAP) GrowableArray<DataLayout*>(size*2, true);
488 }
491 void PSMarkSweep::deallocate_stacks() {
492 if (_preserved_oop_stack) {
493 delete _preserved_mark_stack;
494 _preserved_mark_stack = NULL;
495 delete _preserved_oop_stack;
496 _preserved_oop_stack = NULL;
497 }
499 delete _marking_stack;
500 delete _revisit_klass_stack;
501 delete _revisit_mdo_stack;
502 }
504 void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
505 // Recursively traverse all live objects and mark them
506 EventMark m("1 mark object");
507 TraceTime tm("phase 1", PrintGCDetails && Verbose, true, gclog_or_tty);
508 trace(" 1");
510 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
511 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
513 // General strong roots.
514 {
515 ParallelScavengeHeap::ParStrongRootsScope psrs;
516 Universe::oops_do(mark_and_push_closure());
517 ReferenceProcessor::oops_do(mark_and_push_closure());
518 JNIHandles::oops_do(mark_and_push_closure()); // Global (strong) JNI handles
519 CodeBlobToOopClosure each_active_code_blob(mark_and_push_closure(), /*do_marking=*/ true);
520 Threads::oops_do(mark_and_push_closure(), &each_active_code_blob);
521 ObjectSynchronizer::oops_do(mark_and_push_closure());
522 FlatProfiler::oops_do(mark_and_push_closure());
523 Management::oops_do(mark_and_push_closure());
524 JvmtiExport::oops_do(mark_and_push_closure());
525 SystemDictionary::always_strong_oops_do(mark_and_push_closure());
526 vmSymbols::oops_do(mark_and_push_closure());
527 // Do not treat nmethods as strong roots for mark/sweep, since we can unload them.
528 //CodeCache::scavenge_root_nmethods_do(CodeBlobToOopClosure(mark_and_push_closure()));
529 }
531 // Flush marking stack.
532 follow_stack();
534 // Process reference objects found during marking
535 {
536 ref_processor()->setup_policy(clear_all_softrefs);
537 ref_processor()->process_discovered_references(
538 is_alive_closure(), mark_and_push_closure(), follow_stack_closure(), NULL);
539 }
541 // Follow system dictionary roots and unload classes
542 bool purged_class = SystemDictionary::do_unloading(is_alive_closure());
544 // Follow code cache roots
545 CodeCache::do_unloading(is_alive_closure(), mark_and_push_closure(),
546 purged_class);
547 follow_stack(); // Flush marking stack
549 // Update subklass/sibling/implementor links of live klasses
550 follow_weak_klass_links();
551 assert(_marking_stack->is_empty(), "just drained");
553 // Visit memoized mdo's and clear unmarked weak refs
554 follow_mdo_weak_refs();
555 assert(_marking_stack->is_empty(), "just drained");
557 // Visit symbol and interned string tables and delete unmarked oops
558 SymbolTable::unlink(is_alive_closure());
559 StringTable::unlink(is_alive_closure());
561 assert(_marking_stack->is_empty(), "stack should be empty by now");
562 }
565 void PSMarkSweep::mark_sweep_phase2() {
566 EventMark m("2 compute new addresses");
567 TraceTime tm("phase 2", PrintGCDetails && Verbose, true, gclog_or_tty);
568 trace("2");
570 // Now all live objects are marked, compute the new object addresses.
572 // It is imperative that we traverse perm_gen LAST. If dead space is
573 // allowed a range of dead object may get overwritten by a dead int
574 // array. If perm_gen is not traversed last a klassOop may get
575 // overwritten. This is fine since it is dead, but if the class has dead
576 // instances we have to skip them, and in order to find their size we
577 // need the klassOop!
578 //
579 // It is not required that we traverse spaces in the same order in
580 // phase2, phase3 and phase4, but the ValidateMarkSweep live oops
581 // tracking expects us to do so. See comment under phase4.
583 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
584 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
586 PSOldGen* old_gen = heap->old_gen();
587 PSPermGen* perm_gen = heap->perm_gen();
589 // Begin compacting into the old gen
590 PSMarkSweepDecorator::set_destination_decorator_tenured();
592 // This will also compact the young gen spaces.
593 old_gen->precompact();
595 // Compact the perm gen into the perm gen
596 PSMarkSweepDecorator::set_destination_decorator_perm_gen();
598 perm_gen->precompact();
599 }
601 // This should be moved to the shared markSweep code!
602 class PSAlwaysTrueClosure: public BoolObjectClosure {
603 public:
604 void do_object(oop p) { ShouldNotReachHere(); }
605 bool do_object_b(oop p) { return true; }
606 };
607 static PSAlwaysTrueClosure always_true;
609 void PSMarkSweep::mark_sweep_phase3() {
610 // Adjust the pointers to reflect the new locations
611 EventMark m("3 adjust pointers");
612 TraceTime tm("phase 3", PrintGCDetails && Verbose, true, gclog_or_tty);
613 trace("3");
615 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
616 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
618 PSYoungGen* young_gen = heap->young_gen();
619 PSOldGen* old_gen = heap->old_gen();
620 PSPermGen* perm_gen = heap->perm_gen();
622 // General strong roots.
623 Universe::oops_do(adjust_root_pointer_closure());
624 ReferenceProcessor::oops_do(adjust_root_pointer_closure());
625 JNIHandles::oops_do(adjust_root_pointer_closure()); // Global (strong) JNI handles
626 Threads::oops_do(adjust_root_pointer_closure(), NULL);
627 ObjectSynchronizer::oops_do(adjust_root_pointer_closure());
628 FlatProfiler::oops_do(adjust_root_pointer_closure());
629 Management::oops_do(adjust_root_pointer_closure());
630 JvmtiExport::oops_do(adjust_root_pointer_closure());
631 // SO_AllClasses
632 SystemDictionary::oops_do(adjust_root_pointer_closure());
633 vmSymbols::oops_do(adjust_root_pointer_closure());
634 //CodeCache::scavenge_root_nmethods_oops_do(adjust_root_pointer_closure());
636 // Now adjust pointers in remaining weak roots. (All of which should
637 // have been cleared if they pointed to non-surviving objects.)
638 // Global (weak) JNI handles
639 JNIHandles::weak_oops_do(&always_true, adjust_root_pointer_closure());
641 CodeCache::oops_do(adjust_pointer_closure());
642 SymbolTable::oops_do(adjust_root_pointer_closure());
643 StringTable::oops_do(adjust_root_pointer_closure());
644 ref_processor()->weak_oops_do(adjust_root_pointer_closure());
645 PSScavenge::reference_processor()->weak_oops_do(adjust_root_pointer_closure());
647 adjust_marks();
649 young_gen->adjust_pointers();
650 old_gen->adjust_pointers();
651 perm_gen->adjust_pointers();
652 }
654 void PSMarkSweep::mark_sweep_phase4() {
655 EventMark m("4 compact heap");
656 TraceTime tm("phase 4", PrintGCDetails && Verbose, true, gclog_or_tty);
657 trace("4");
659 // All pointers are now adjusted, move objects accordingly
661 // It is imperative that we traverse perm_gen first in phase4. All
662 // classes must be allocated earlier than their instances, and traversing
663 // perm_gen first makes sure that all klassOops have moved to their new
664 // location before any instance does a dispatch through it's klass!
665 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
666 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
668 PSYoungGen* young_gen = heap->young_gen();
669 PSOldGen* old_gen = heap->old_gen();
670 PSPermGen* perm_gen = heap->perm_gen();
672 perm_gen->compact();
673 old_gen->compact();
674 young_gen->compact();
675 }
677 jlong PSMarkSweep::millis_since_last_gc() {
678 jlong ret_val = os::javaTimeMillis() - _time_of_last_gc;
679 // XXX See note in genCollectedHeap::millis_since_last_gc().
680 if (ret_val < 0) {
681 NOT_PRODUCT(warning("time warp: %d", ret_val);)
682 return 0;
683 }
684 return ret_val;
685 }
687 void PSMarkSweep::reset_millis_since_last_gc() {
688 _time_of_last_gc = os::javaTimeMillis();
689 }