Fri, 16 Mar 2012 16:14:04 +0100
7154517: Build error in hotspot-gc without precompiled headers
Reviewed-by: jcoomes, brutisso
1 /*
2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "classfile/symbolTable.hpp"
27 #include "classfile/systemDictionary.hpp"
28 #include "code/codeCache.hpp"
29 #include "gc_implementation/parallelScavenge/generationSizer.hpp"
30 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
31 #include "gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp"
32 #include "gc_implementation/parallelScavenge/psMarkSweep.hpp"
33 #include "gc_implementation/parallelScavenge/psMarkSweepDecorator.hpp"
34 #include "gc_implementation/parallelScavenge/psOldGen.hpp"
35 #include "gc_implementation/parallelScavenge/psPermGen.hpp"
36 #include "gc_implementation/parallelScavenge/psScavenge.hpp"
37 #include "gc_implementation/parallelScavenge/psYoungGen.hpp"
38 #include "gc_implementation/shared/isGCActiveMark.hpp"
39 #include "gc_implementation/shared/spaceDecorator.hpp"
40 #include "gc_interface/gcCause.hpp"
41 #include "memory/gcLocker.inline.hpp"
42 #include "memory/referencePolicy.hpp"
43 #include "memory/referenceProcessor.hpp"
44 #include "oops/oop.inline.hpp"
45 #include "runtime/biasedLocking.hpp"
46 #include "runtime/fprofiler.hpp"
47 #include "runtime/safepoint.hpp"
48 #include "runtime/vmThread.hpp"
49 #include "services/management.hpp"
50 #include "services/memoryService.hpp"
51 #include "utilities/events.hpp"
52 #include "utilities/stack.inline.hpp"
54 elapsedTimer PSMarkSweep::_accumulated_time;
55 unsigned int PSMarkSweep::_total_invocations = 0;
56 jlong PSMarkSweep::_time_of_last_gc = 0;
57 CollectorCounters* PSMarkSweep::_counters = NULL;
59 void PSMarkSweep::initialize() {
60 MemRegion mr = Universe::heap()->reserved_region();
61 _ref_processor = new ReferenceProcessor(mr); // a vanilla ref proc
62 _counters = new CollectorCounters("PSMarkSweep", 1);
63 }
65 // This method contains all heap specific policy for invoking mark sweep.
66 // PSMarkSweep::invoke_no_policy() will only attempt to mark-sweep-compact
67 // the heap. It will do nothing further. If we need to bail out for policy
68 // reasons, scavenge before full gc, or any other specialized behavior, it
69 // needs to be added here.
70 //
71 // Note that this method should only be called from the vm_thread while
72 // at a safepoint!
73 //
74 // Note that the all_soft_refs_clear flag in the collector policy
75 // may be true because this method can be called without intervening
76 // activity. For example when the heap space is tight and full measure
77 // are being taken to free space.
79 void PSMarkSweep::invoke(bool maximum_heap_compaction) {
80 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
81 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
82 assert(!Universe::heap()->is_gc_active(), "not reentrant");
84 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
85 GCCause::Cause gc_cause = heap->gc_cause();
86 PSAdaptiveSizePolicy* policy = heap->size_policy();
87 IsGCActiveMark mark;
89 if (ScavengeBeforeFullGC) {
90 PSScavenge::invoke_no_policy();
91 }
93 const bool clear_all_soft_refs =
94 heap->collector_policy()->should_clear_all_soft_refs();
96 int count = (maximum_heap_compaction)?1:MarkSweepAlwaysCompactCount;
97 IntFlagSetting flag_setting(MarkSweepAlwaysCompactCount, count);
98 PSMarkSweep::invoke_no_policy(clear_all_soft_refs || maximum_heap_compaction);
99 }
101 // This method contains no policy. You should probably
102 // be calling invoke() instead.
103 bool PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
104 assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
105 assert(ref_processor() != NULL, "Sanity");
107 if (GC_locker::check_active_before_gc()) {
108 return false;
109 }
111 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
112 GCCause::Cause gc_cause = heap->gc_cause();
113 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
114 PSAdaptiveSizePolicy* size_policy = heap->size_policy();
116 // The scope of casr should end after code that can change
117 // CollectorPolicy::_should_clear_all_soft_refs.
118 ClearedAllSoftRefs casr(clear_all_softrefs, heap->collector_policy());
120 PSYoungGen* young_gen = heap->young_gen();
121 PSOldGen* old_gen = heap->old_gen();
122 PSPermGen* perm_gen = heap->perm_gen();
124 // Increment the invocation count
125 heap->increment_total_collections(true /* full */);
127 // Save information needed to minimize mangling
128 heap->record_gen_tops_before_GC();
130 // We need to track unique mark sweep invocations as well.
131 _total_invocations++;
133 AdaptiveSizePolicyOutput(size_policy, heap->total_collections());
135 heap->print_heap_before_gc();
137 // Fill in TLABs
138 heap->accumulate_statistics_all_tlabs();
139 heap->ensure_parsability(true); // retire TLABs
141 if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
142 HandleMark hm; // Discard invalid handles created during verification
143 gclog_or_tty->print(" VerifyBeforeGC:");
144 Universe::verify(true);
145 }
147 // Verify object start arrays
148 if (VerifyObjectStartArray &&
149 VerifyBeforeGC) {
150 old_gen->verify_object_start_array();
151 perm_gen->verify_object_start_array();
152 }
154 heap->pre_full_gc_dump();
156 // Filled in below to track the state of the young gen after the collection.
157 bool eden_empty;
158 bool survivors_empty;
159 bool young_gen_empty;
161 {
162 HandleMark hm;
163 const bool is_system_gc = gc_cause == GCCause::_java_lang_system_gc;
164 // This is useful for debugging but don't change the output the
165 // the customer sees.
166 const char* gc_cause_str = "Full GC";
167 if (is_system_gc && PrintGCDetails) {
168 gc_cause_str = "Full GC (System)";
169 }
170 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
171 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
172 TraceTime t1(gc_cause_str, PrintGC, !PrintGCDetails, gclog_or_tty);
173 TraceCollectorStats tcs(counters());
174 TraceMemoryManagerStats tms(true /* Full GC */,gc_cause);
176 if (TraceGen1Time) accumulated_time()->start();
178 // Let the size policy know we're starting
179 size_policy->major_collection_begin();
181 // When collecting the permanent generation methodOops may be moving,
182 // so we either have to flush all bcp data or convert it into bci.
183 CodeCache::gc_prologue();
184 Threads::gc_prologue();
185 BiasedLocking::preserve_marks();
187 // Capture heap size before collection for printing.
188 size_t prev_used = heap->used();
190 // Capture perm gen size before collection for sizing.
191 size_t perm_gen_prev_used = perm_gen->used_in_bytes();
193 // For PrintGCDetails
194 size_t old_gen_prev_used = old_gen->used_in_bytes();
195 size_t young_gen_prev_used = young_gen->used_in_bytes();
197 allocate_stacks();
199 COMPILER2_PRESENT(DerivedPointerTable::clear());
201 ref_processor()->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
202 ref_processor()->setup_policy(clear_all_softrefs);
204 mark_sweep_phase1(clear_all_softrefs);
206 mark_sweep_phase2();
208 // Don't add any more derived pointers during phase3
209 COMPILER2_PRESENT(assert(DerivedPointerTable::is_active(), "Sanity"));
210 COMPILER2_PRESENT(DerivedPointerTable::set_active(false));
212 mark_sweep_phase3();
214 mark_sweep_phase4();
216 restore_marks();
218 deallocate_stacks();
220 if (ZapUnusedHeapArea) {
221 // Do a complete mangle (top to end) because the usage for
222 // scratch does not maintain a top pointer.
223 young_gen->to_space()->mangle_unused_area_complete();
224 }
226 eden_empty = young_gen->eden_space()->is_empty();
227 if (!eden_empty) {
228 eden_empty = absorb_live_data_from_eden(size_policy, young_gen, old_gen);
229 }
231 // Update heap occupancy information which is used as
232 // input to soft ref clearing policy at the next gc.
233 Universe::update_heap_info_at_gc();
235 survivors_empty = young_gen->from_space()->is_empty() &&
236 young_gen->to_space()->is_empty();
237 young_gen_empty = eden_empty && survivors_empty;
239 BarrierSet* bs = heap->barrier_set();
240 if (bs->is_a(BarrierSet::ModRef)) {
241 ModRefBarrierSet* modBS = (ModRefBarrierSet*)bs;
242 MemRegion old_mr = heap->old_gen()->reserved();
243 MemRegion perm_mr = heap->perm_gen()->reserved();
244 assert(perm_mr.end() <= old_mr.start(), "Generations out of order");
246 if (young_gen_empty) {
247 modBS->clear(MemRegion(perm_mr.start(), old_mr.end()));
248 } else {
249 modBS->invalidate(MemRegion(perm_mr.start(), old_mr.end()));
250 }
251 }
253 BiasedLocking::restore_marks();
254 Threads::gc_epilogue();
255 CodeCache::gc_epilogue();
256 JvmtiExport::gc_epilogue();
258 COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
260 ref_processor()->enqueue_discovered_references(NULL);
262 // Update time of last GC
263 reset_millis_since_last_gc();
265 // Let the size policy know we're done
266 size_policy->major_collection_end(old_gen->used_in_bytes(), gc_cause);
268 if (UseAdaptiveSizePolicy) {
270 if (PrintAdaptiveSizePolicy) {
271 gclog_or_tty->print("AdaptiveSizeStart: ");
272 gclog_or_tty->stamp();
273 gclog_or_tty->print_cr(" collection: %d ",
274 heap->total_collections());
275 if (Verbose) {
276 gclog_or_tty->print("old_gen_capacity: %d young_gen_capacity: %d"
277 " perm_gen_capacity: %d ",
278 old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes(),
279 perm_gen->capacity_in_bytes());
280 }
281 }
283 // Don't check if the size_policy is ready here. Let
284 // the size_policy check that internally.
285 if (UseAdaptiveGenerationSizePolicyAtMajorCollection &&
286 ((gc_cause != GCCause::_java_lang_system_gc) ||
287 UseAdaptiveSizePolicyWithSystemGC)) {
288 // Calculate optimal free space amounts
289 assert(young_gen->max_size() >
290 young_gen->from_space()->capacity_in_bytes() +
291 young_gen->to_space()->capacity_in_bytes(),
292 "Sizes of space in young gen are out-of-bounds");
293 size_t max_eden_size = young_gen->max_size() -
294 young_gen->from_space()->capacity_in_bytes() -
295 young_gen->to_space()->capacity_in_bytes();
296 size_policy->compute_generation_free_space(young_gen->used_in_bytes(),
297 young_gen->eden_space()->used_in_bytes(),
298 old_gen->used_in_bytes(),
299 perm_gen->used_in_bytes(),
300 young_gen->eden_space()->capacity_in_bytes(),
301 old_gen->max_gen_size(),
302 max_eden_size,
303 true /* full gc*/,
304 gc_cause,
305 heap->collector_policy());
307 heap->resize_old_gen(size_policy->calculated_old_free_size_in_bytes());
309 // Don't resize the young generation at an major collection. A
310 // desired young generation size may have been calculated but
311 // resizing the young generation complicates the code because the
312 // resizing of the old generation may have moved the boundary
313 // between the young generation and the old generation. Let the
314 // young generation resizing happen at the minor collections.
315 }
316 if (PrintAdaptiveSizePolicy) {
317 gclog_or_tty->print_cr("AdaptiveSizeStop: collection: %d ",
318 heap->total_collections());
319 }
320 }
322 if (UsePerfData) {
323 heap->gc_policy_counters()->update_counters();
324 heap->gc_policy_counters()->update_old_capacity(
325 old_gen->capacity_in_bytes());
326 heap->gc_policy_counters()->update_young_capacity(
327 young_gen->capacity_in_bytes());
328 }
330 heap->resize_all_tlabs();
332 // We collected the perm gen, so we'll resize it here.
333 perm_gen->compute_new_size(perm_gen_prev_used);
335 if (TraceGen1Time) accumulated_time()->stop();
337 if (PrintGC) {
338 if (PrintGCDetails) {
339 // Don't print a GC timestamp here. This is after the GC so
340 // would be confusing.
341 young_gen->print_used_change(young_gen_prev_used);
342 old_gen->print_used_change(old_gen_prev_used);
343 }
344 heap->print_heap_change(prev_used);
345 // Do perm gen after heap becase prev_used does
346 // not include the perm gen (done this way in the other
347 // collectors).
348 if (PrintGCDetails) {
349 perm_gen->print_used_change(perm_gen_prev_used);
350 }
351 }
353 // Track memory usage and detect low memory
354 MemoryService::track_memory_usage();
355 heap->update_counters();
356 }
358 if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
359 HandleMark hm; // Discard invalid handles created during verification
360 gclog_or_tty->print(" VerifyAfterGC:");
361 Universe::verify(false);
362 }
364 // Re-verify object start arrays
365 if (VerifyObjectStartArray &&
366 VerifyAfterGC) {
367 old_gen->verify_object_start_array();
368 perm_gen->verify_object_start_array();
369 }
371 if (ZapUnusedHeapArea) {
372 old_gen->object_space()->check_mangled_unused_area_complete();
373 perm_gen->object_space()->check_mangled_unused_area_complete();
374 }
376 NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
378 heap->print_heap_after_gc();
380 heap->post_full_gc_dump();
382 #ifdef TRACESPINNING
383 ParallelTaskTerminator::print_termination_counts();
384 #endif
386 return true;
387 }
389 bool PSMarkSweep::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy,
390 PSYoungGen* young_gen,
391 PSOldGen* old_gen) {
392 MutableSpace* const eden_space = young_gen->eden_space();
393 assert(!eden_space->is_empty(), "eden must be non-empty");
394 assert(young_gen->virtual_space()->alignment() ==
395 old_gen->virtual_space()->alignment(), "alignments do not match");
397 if (!(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary)) {
398 return false;
399 }
401 // Both generations must be completely committed.
402 if (young_gen->virtual_space()->uncommitted_size() != 0) {
403 return false;
404 }
405 if (old_gen->virtual_space()->uncommitted_size() != 0) {
406 return false;
407 }
409 // Figure out how much to take from eden. Include the average amount promoted
410 // in the total; otherwise the next young gen GC will simply bail out to a
411 // full GC.
412 const size_t alignment = old_gen->virtual_space()->alignment();
413 const size_t eden_used = eden_space->used_in_bytes();
414 const size_t promoted = (size_t)size_policy->avg_promoted()->padded_average();
415 const size_t absorb_size = align_size_up(eden_used + promoted, alignment);
416 const size_t eden_capacity = eden_space->capacity_in_bytes();
418 if (absorb_size >= eden_capacity) {
419 return false; // Must leave some space in eden.
420 }
422 const size_t new_young_size = young_gen->capacity_in_bytes() - absorb_size;
423 if (new_young_size < young_gen->min_gen_size()) {
424 return false; // Respect young gen minimum size.
425 }
427 if (TraceAdaptiveGCBoundary && Verbose) {
428 gclog_or_tty->print(" absorbing " SIZE_FORMAT "K: "
429 "eden " SIZE_FORMAT "K->" SIZE_FORMAT "K "
430 "from " SIZE_FORMAT "K, to " SIZE_FORMAT "K "
431 "young_gen " SIZE_FORMAT "K->" SIZE_FORMAT "K ",
432 absorb_size / K,
433 eden_capacity / K, (eden_capacity - absorb_size) / K,
434 young_gen->from_space()->used_in_bytes() / K,
435 young_gen->to_space()->used_in_bytes() / K,
436 young_gen->capacity_in_bytes() / K, new_young_size / K);
437 }
439 // Fill the unused part of the old gen.
440 MutableSpace* const old_space = old_gen->object_space();
441 HeapWord* const unused_start = old_space->top();
442 size_t const unused_words = pointer_delta(old_space->end(), unused_start);
444 if (unused_words > 0) {
445 if (unused_words < CollectedHeap::min_fill_size()) {
446 return false; // If the old gen cannot be filled, must give up.
447 }
448 CollectedHeap::fill_with_objects(unused_start, unused_words);
449 }
451 // Take the live data from eden and set both top and end in the old gen to
452 // eden top. (Need to set end because reset_after_change() mangles the region
453 // from end to virtual_space->high() in debug builds).
454 HeapWord* const new_top = eden_space->top();
455 old_gen->virtual_space()->expand_into(young_gen->virtual_space(),
456 absorb_size);
457 young_gen->reset_after_change();
458 old_space->set_top(new_top);
459 old_space->set_end(new_top);
460 old_gen->reset_after_change();
462 // Update the object start array for the filler object and the data from eden.
463 ObjectStartArray* const start_array = old_gen->start_array();
464 for (HeapWord* p = unused_start; p < new_top; p += oop(p)->size()) {
465 start_array->allocate_block(p);
466 }
468 // Could update the promoted average here, but it is not typically updated at
469 // full GCs and the value to use is unclear. Something like
470 //
471 // cur_promoted_avg + absorb_size / number_of_scavenges_since_last_full_gc.
473 size_policy->set_bytes_absorbed_from_eden(absorb_size);
474 return true;
475 }
477 void PSMarkSweep::allocate_stacks() {
478 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
479 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
481 PSYoungGen* young_gen = heap->young_gen();
483 MutableSpace* to_space = young_gen->to_space();
484 _preserved_marks = (PreservedMark*)to_space->top();
485 _preserved_count = 0;
487 // We want to calculate the size in bytes first.
488 _preserved_count_max = pointer_delta(to_space->end(), to_space->top(), sizeof(jbyte));
489 // Now divide by the size of a PreservedMark
490 _preserved_count_max /= sizeof(PreservedMark);
491 }
494 void PSMarkSweep::deallocate_stacks() {
495 _preserved_mark_stack.clear(true);
496 _preserved_oop_stack.clear(true);
497 _marking_stack.clear();
498 _objarray_stack.clear(true);
499 _revisit_klass_stack.clear(true);
500 _revisit_mdo_stack.clear(true);
501 }
503 void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
504 // Recursively traverse all live objects and mark them
505 TraceTime tm("phase 1", PrintGCDetails && Verbose, true, gclog_or_tty);
506 trace(" 1");
508 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
509 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
511 // General strong roots.
512 {
513 ParallelScavengeHeap::ParStrongRootsScope psrs;
514 Universe::oops_do(mark_and_push_closure());
515 JNIHandles::oops_do(mark_and_push_closure()); // Global (strong) JNI handles
516 CodeBlobToOopClosure each_active_code_blob(mark_and_push_closure(), /*do_marking=*/ true);
517 Threads::oops_do(mark_and_push_closure(), &each_active_code_blob);
518 ObjectSynchronizer::oops_do(mark_and_push_closure());
519 FlatProfiler::oops_do(mark_and_push_closure());
520 Management::oops_do(mark_and_push_closure());
521 JvmtiExport::oops_do(mark_and_push_closure());
522 SystemDictionary::always_strong_oops_do(mark_and_push_closure());
523 // Do not treat nmethods as strong roots for mark/sweep, since we can unload them.
524 //CodeCache::scavenge_root_nmethods_do(CodeBlobToOopClosure(mark_and_push_closure()));
525 }
527 // Flush marking stack.
528 follow_stack();
530 // Process reference objects found during marking
531 {
532 ref_processor()->setup_policy(clear_all_softrefs);
533 ref_processor()->process_discovered_references(
534 is_alive_closure(), mark_and_push_closure(), follow_stack_closure(), NULL);
535 }
537 // Follow system dictionary roots and unload classes
538 bool purged_class = SystemDictionary::do_unloading(is_alive_closure());
540 // Follow code cache roots
541 CodeCache::do_unloading(is_alive_closure(), mark_and_push_closure(),
542 purged_class);
543 follow_stack(); // Flush marking stack
545 // Update subklass/sibling/implementor links of live klasses
546 follow_weak_klass_links();
547 assert(_marking_stack.is_empty(), "just drained");
549 // Visit memoized mdo's and clear unmarked weak refs
550 follow_mdo_weak_refs();
551 assert(_marking_stack.is_empty(), "just drained");
553 // Visit interned string tables and delete unmarked oops
554 StringTable::unlink(is_alive_closure());
555 // Clean up unreferenced symbols in symbol table.
556 SymbolTable::unlink();
558 assert(_marking_stack.is_empty(), "stack should be empty by now");
559 }
562 void PSMarkSweep::mark_sweep_phase2() {
563 TraceTime tm("phase 2", PrintGCDetails && Verbose, true, gclog_or_tty);
564 trace("2");
566 // Now all live objects are marked, compute the new object addresses.
568 // It is imperative that we traverse perm_gen LAST. If dead space is
569 // allowed a range of dead object may get overwritten by a dead int
570 // array. If perm_gen is not traversed last a klassOop may get
571 // overwritten. This is fine since it is dead, but if the class has dead
572 // instances we have to skip them, and in order to find their size we
573 // need the klassOop!
574 //
575 // It is not required that we traverse spaces in the same order in
576 // phase2, phase3 and phase4, but the ValidateMarkSweep live oops
577 // tracking expects us to do so. See comment under phase4.
579 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
580 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
582 PSOldGen* old_gen = heap->old_gen();
583 PSPermGen* perm_gen = heap->perm_gen();
585 // Begin compacting into the old gen
586 PSMarkSweepDecorator::set_destination_decorator_tenured();
588 // This will also compact the young gen spaces.
589 old_gen->precompact();
591 // Compact the perm gen into the perm gen
592 PSMarkSweepDecorator::set_destination_decorator_perm_gen();
594 perm_gen->precompact();
595 }
597 // This should be moved to the shared markSweep code!
598 class PSAlwaysTrueClosure: public BoolObjectClosure {
599 public:
600 void do_object(oop p) { ShouldNotReachHere(); }
601 bool do_object_b(oop p) { return true; }
602 };
603 static PSAlwaysTrueClosure always_true;
605 void PSMarkSweep::mark_sweep_phase3() {
606 // Adjust the pointers to reflect the new locations
607 TraceTime tm("phase 3", PrintGCDetails && Verbose, true, gclog_or_tty);
608 trace("3");
610 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
611 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
613 PSYoungGen* young_gen = heap->young_gen();
614 PSOldGen* old_gen = heap->old_gen();
615 PSPermGen* perm_gen = heap->perm_gen();
617 // General strong roots.
618 Universe::oops_do(adjust_root_pointer_closure());
619 JNIHandles::oops_do(adjust_root_pointer_closure()); // Global (strong) JNI handles
620 Threads::oops_do(adjust_root_pointer_closure(), NULL);
621 ObjectSynchronizer::oops_do(adjust_root_pointer_closure());
622 FlatProfiler::oops_do(adjust_root_pointer_closure());
623 Management::oops_do(adjust_root_pointer_closure());
624 JvmtiExport::oops_do(adjust_root_pointer_closure());
625 // SO_AllClasses
626 SystemDictionary::oops_do(adjust_root_pointer_closure());
627 //CodeCache::scavenge_root_nmethods_oops_do(adjust_root_pointer_closure());
629 // Now adjust pointers in remaining weak roots. (All of which should
630 // have been cleared if they pointed to non-surviving objects.)
631 // Global (weak) JNI handles
632 JNIHandles::weak_oops_do(&always_true, adjust_root_pointer_closure());
634 CodeCache::oops_do(adjust_pointer_closure());
635 StringTable::oops_do(adjust_root_pointer_closure());
636 ref_processor()->weak_oops_do(adjust_root_pointer_closure());
637 PSScavenge::reference_processor()->weak_oops_do(adjust_root_pointer_closure());
639 adjust_marks();
641 young_gen->adjust_pointers();
642 old_gen->adjust_pointers();
643 perm_gen->adjust_pointers();
644 }
646 void PSMarkSweep::mark_sweep_phase4() {
647 EventMark m("4 compact heap");
648 TraceTime tm("phase 4", PrintGCDetails && Verbose, true, gclog_or_tty);
649 trace("4");
651 // All pointers are now adjusted, move objects accordingly
653 // It is imperative that we traverse perm_gen first in phase4. All
654 // classes must be allocated earlier than their instances, and traversing
655 // perm_gen first makes sure that all klassOops have moved to their new
656 // location before any instance does a dispatch through it's klass!
657 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
658 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
660 PSYoungGen* young_gen = heap->young_gen();
661 PSOldGen* old_gen = heap->old_gen();
662 PSPermGen* perm_gen = heap->perm_gen();
664 perm_gen->compact();
665 old_gen->compact();
666 young_gen->compact();
667 }
669 jlong PSMarkSweep::millis_since_last_gc() {
670 // We need a monotonically non-deccreasing time in ms but
671 // os::javaTimeMillis() does not guarantee monotonicity.
672 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
673 jlong ret_val = now - _time_of_last_gc;
674 // XXX See note in genCollectedHeap::millis_since_last_gc().
675 if (ret_val < 0) {
676 NOT_PRODUCT(warning("time warp: "INT64_FORMAT, ret_val);)
677 return 0;
678 }
679 return ret_val;
680 }
682 void PSMarkSweep::reset_millis_since_last_gc() {
683 // We need a monotonically non-deccreasing time in ms but
684 // os::javaTimeMillis() does not guarantee monotonicity.
685 _time_of_last_gc = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
686 }