Fri, 31 May 2013 14:32:44 +0200
8022880: False sharing between PSPromotionManager instances
Summary: Pad the PSPromotionManager instances in the manager array.
Reviewed-by: brutisso, jmasa
1 /*
2 * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "classfile/symbolTable.hpp"
27 #include "classfile/systemDictionary.hpp"
28 #include "code/codeCache.hpp"
29 #include "gc_implementation/parallelScavenge/generationSizer.hpp"
30 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
31 #include "gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp"
32 #include "gc_implementation/parallelScavenge/psMarkSweep.hpp"
33 #include "gc_implementation/parallelScavenge/psMarkSweepDecorator.hpp"
34 #include "gc_implementation/parallelScavenge/psOldGen.hpp"
35 #include "gc_implementation/parallelScavenge/psScavenge.hpp"
36 #include "gc_implementation/parallelScavenge/psYoungGen.hpp"
37 #include "gc_implementation/shared/gcHeapSummary.hpp"
38 #include "gc_implementation/shared/gcTimer.hpp"
39 #include "gc_implementation/shared/gcTrace.hpp"
40 #include "gc_implementation/shared/gcTraceTime.hpp"
41 #include "gc_implementation/shared/isGCActiveMark.hpp"
42 #include "gc_implementation/shared/markSweep.hpp"
43 #include "gc_implementation/shared/spaceDecorator.hpp"
44 #include "gc_interface/gcCause.hpp"
45 #include "memory/gcLocker.inline.hpp"
46 #include "memory/referencePolicy.hpp"
47 #include "memory/referenceProcessor.hpp"
48 #include "oops/oop.inline.hpp"
49 #include "runtime/biasedLocking.hpp"
50 #include "runtime/fprofiler.hpp"
51 #include "runtime/safepoint.hpp"
52 #include "runtime/vmThread.hpp"
53 #include "services/management.hpp"
54 #include "services/memoryService.hpp"
55 #include "utilities/events.hpp"
56 #include "utilities/stack.inline.hpp"
58 elapsedTimer PSMarkSweep::_accumulated_time;
59 jlong PSMarkSweep::_time_of_last_gc = 0;
60 CollectorCounters* PSMarkSweep::_counters = NULL;
62 void PSMarkSweep::initialize() {
63 MemRegion mr = Universe::heap()->reserved_region();
64 _ref_processor = new ReferenceProcessor(mr); // a vanilla ref proc
65 _counters = new CollectorCounters("PSMarkSweep", 1);
66 }
68 // This method contains all heap specific policy for invoking mark sweep.
69 // PSMarkSweep::invoke_no_policy() will only attempt to mark-sweep-compact
70 // the heap. It will do nothing further. If we need to bail out for policy
71 // reasons, scavenge before full gc, or any other specialized behavior, it
72 // needs to be added here.
73 //
74 // Note that this method should only be called from the vm_thread while
75 // at a safepoint!
76 //
77 // Note that the all_soft_refs_clear flag in the collector policy
78 // may be true because this method can be called without intervening
79 // activity. For example when the heap space is tight and full measure
80 // are being taken to free space.
82 void PSMarkSweep::invoke(bool maximum_heap_compaction) {
83 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
84 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
85 assert(!Universe::heap()->is_gc_active(), "not reentrant");
87 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
88 GCCause::Cause gc_cause = heap->gc_cause();
89 PSAdaptiveSizePolicy* policy = heap->size_policy();
90 IsGCActiveMark mark;
92 if (ScavengeBeforeFullGC) {
93 PSScavenge::invoke_no_policy();
94 }
96 const bool clear_all_soft_refs =
97 heap->collector_policy()->should_clear_all_soft_refs();
99 uint count = maximum_heap_compaction ? 1 : MarkSweepAlwaysCompactCount;
100 UIntFlagSetting flag_setting(MarkSweepAlwaysCompactCount, count);
101 PSMarkSweep::invoke_no_policy(clear_all_soft_refs || maximum_heap_compaction);
102 }
104 // This method contains no policy. You should probably
105 // be calling invoke() instead.
106 bool PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
107 assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
108 assert(ref_processor() != NULL, "Sanity");
110 if (GC_locker::check_active_before_gc()) {
111 return false;
112 }
114 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
115 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
116 GCCause::Cause gc_cause = heap->gc_cause();
118 _gc_timer->register_gc_start(os::elapsed_counter());
119 _gc_tracer->report_gc_start(gc_cause, _gc_timer->gc_start());
121 PSAdaptiveSizePolicy* size_policy = heap->size_policy();
123 // The scope of casr should end after code that can change
124 // CollectorPolicy::_should_clear_all_soft_refs.
125 ClearedAllSoftRefs casr(clear_all_softrefs, heap->collector_policy());
127 PSYoungGen* young_gen = heap->young_gen();
128 PSOldGen* old_gen = heap->old_gen();
130 // Increment the invocation count
131 heap->increment_total_collections(true /* full */);
133 // Save information needed to minimize mangling
134 heap->record_gen_tops_before_GC();
136 // We need to track unique mark sweep invocations as well.
137 _total_invocations++;
139 AdaptiveSizePolicyOutput(size_policy, heap->total_collections());
141 heap->print_heap_before_gc();
142 heap->trace_heap_before_gc(_gc_tracer);
144 // Fill in TLABs
145 heap->accumulate_statistics_all_tlabs();
146 heap->ensure_parsability(true); // retire TLABs
148 if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
149 HandleMark hm; // Discard invalid handles created during verification
150 Universe::verify(" VerifyBeforeGC:");
151 }
153 // Verify object start arrays
154 if (VerifyObjectStartArray &&
155 VerifyBeforeGC) {
156 old_gen->verify_object_start_array();
157 }
159 heap->pre_full_gc_dump(_gc_timer);
161 // Filled in below to track the state of the young gen after the collection.
162 bool eden_empty;
163 bool survivors_empty;
164 bool young_gen_empty;
166 {
167 HandleMark hm;
169 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
170 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
171 GCTraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, NULL);
172 TraceCollectorStats tcs(counters());
173 TraceMemoryManagerStats tms(true /* Full GC */,gc_cause);
175 if (TraceGen1Time) accumulated_time()->start();
177 // Let the size policy know we're starting
178 size_policy->major_collection_begin();
180 CodeCache::gc_prologue();
181 Threads::gc_prologue();
182 BiasedLocking::preserve_marks();
184 // Capture heap size before collection for printing.
185 size_t prev_used = heap->used();
187 // Capture metadata size before collection for sizing.
188 size_t metadata_prev_used = MetaspaceAux::allocated_used_bytes();
190 // For PrintGCDetails
191 size_t old_gen_prev_used = old_gen->used_in_bytes();
192 size_t young_gen_prev_used = young_gen->used_in_bytes();
194 allocate_stacks();
196 COMPILER2_PRESENT(DerivedPointerTable::clear());
198 ref_processor()->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
199 ref_processor()->setup_policy(clear_all_softrefs);
201 mark_sweep_phase1(clear_all_softrefs);
203 mark_sweep_phase2();
205 // Don't add any more derived pointers during phase3
206 COMPILER2_PRESENT(assert(DerivedPointerTable::is_active(), "Sanity"));
207 COMPILER2_PRESENT(DerivedPointerTable::set_active(false));
209 mark_sweep_phase3();
211 mark_sweep_phase4();
213 restore_marks();
215 deallocate_stacks();
217 if (ZapUnusedHeapArea) {
218 // Do a complete mangle (top to end) because the usage for
219 // scratch does not maintain a top pointer.
220 young_gen->to_space()->mangle_unused_area_complete();
221 }
223 eden_empty = young_gen->eden_space()->is_empty();
224 if (!eden_empty) {
225 eden_empty = absorb_live_data_from_eden(size_policy, young_gen, old_gen);
226 }
228 // Update heap occupancy information which is used as
229 // input to soft ref clearing policy at the next gc.
230 Universe::update_heap_info_at_gc();
232 survivors_empty = young_gen->from_space()->is_empty() &&
233 young_gen->to_space()->is_empty();
234 young_gen_empty = eden_empty && survivors_empty;
236 BarrierSet* bs = heap->barrier_set();
237 if (bs->is_a(BarrierSet::ModRef)) {
238 ModRefBarrierSet* modBS = (ModRefBarrierSet*)bs;
239 MemRegion old_mr = heap->old_gen()->reserved();
240 if (young_gen_empty) {
241 modBS->clear(MemRegion(old_mr.start(), old_mr.end()));
242 } else {
243 modBS->invalidate(MemRegion(old_mr.start(), old_mr.end()));
244 }
245 }
247 // Delete metaspaces for unloaded class loaders and clean up loader_data graph
248 ClassLoaderDataGraph::purge();
249 MetaspaceAux::verify_metrics();
251 BiasedLocking::restore_marks();
252 Threads::gc_epilogue();
253 CodeCache::gc_epilogue();
254 JvmtiExport::gc_epilogue();
256 COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
258 ref_processor()->enqueue_discovered_references(NULL);
260 // Update time of last GC
261 reset_millis_since_last_gc();
263 // Let the size policy know we're done
264 size_policy->major_collection_end(old_gen->used_in_bytes(), gc_cause);
266 if (UseAdaptiveSizePolicy) {
268 if (PrintAdaptiveSizePolicy) {
269 gclog_or_tty->print("AdaptiveSizeStart: ");
270 gclog_or_tty->stamp();
271 gclog_or_tty->print_cr(" collection: %d ",
272 heap->total_collections());
273 if (Verbose) {
274 gclog_or_tty->print("old_gen_capacity: %d young_gen_capacity: %d",
275 old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes());
276 }
277 }
279 // Don't check if the size_policy is ready here. Let
280 // the size_policy check that internally.
281 if (UseAdaptiveGenerationSizePolicyAtMajorCollection &&
282 ((gc_cause != GCCause::_java_lang_system_gc) ||
283 UseAdaptiveSizePolicyWithSystemGC)) {
284 // Calculate optimal free space amounts
285 assert(young_gen->max_size() >
286 young_gen->from_space()->capacity_in_bytes() +
287 young_gen->to_space()->capacity_in_bytes(),
288 "Sizes of space in young gen are out-of-bounds");
290 size_t young_live = young_gen->used_in_bytes();
291 size_t eden_live = young_gen->eden_space()->used_in_bytes();
292 size_t old_live = old_gen->used_in_bytes();
293 size_t cur_eden = young_gen->eden_space()->capacity_in_bytes();
294 size_t max_old_gen_size = old_gen->max_gen_size();
295 size_t max_eden_size = young_gen->max_size() -
296 young_gen->from_space()->capacity_in_bytes() -
297 young_gen->to_space()->capacity_in_bytes();
299 // Used for diagnostics
300 size_policy->clear_generation_free_space_flags();
302 size_policy->compute_generations_free_space(young_live,
303 eden_live,
304 old_live,
305 cur_eden,
306 max_old_gen_size,
307 max_eden_size,
308 true /* full gc*/);
310 size_policy->check_gc_overhead_limit(young_live,
311 eden_live,
312 max_old_gen_size,
313 max_eden_size,
314 true /* full gc*/,
315 gc_cause,
316 heap->collector_policy());
318 size_policy->decay_supplemental_growth(true /* full gc*/);
320 heap->resize_old_gen(size_policy->calculated_old_free_size_in_bytes());
322 // Don't resize the young generation at an major collection. A
323 // desired young generation size may have been calculated but
324 // resizing the young generation complicates the code because the
325 // resizing of the old generation may have moved the boundary
326 // between the young generation and the old generation. Let the
327 // young generation resizing happen at the minor collections.
328 }
329 if (PrintAdaptiveSizePolicy) {
330 gclog_or_tty->print_cr("AdaptiveSizeStop: collection: %d ",
331 heap->total_collections());
332 }
333 }
335 if (UsePerfData) {
336 heap->gc_policy_counters()->update_counters();
337 heap->gc_policy_counters()->update_old_capacity(
338 old_gen->capacity_in_bytes());
339 heap->gc_policy_counters()->update_young_capacity(
340 young_gen->capacity_in_bytes());
341 }
343 heap->resize_all_tlabs();
345 // We collected the heap, recalculate the metaspace capacity
346 MetaspaceGC::compute_new_size();
348 if (TraceGen1Time) accumulated_time()->stop();
350 if (PrintGC) {
351 if (PrintGCDetails) {
352 // Don't print a GC timestamp here. This is after the GC so
353 // would be confusing.
354 young_gen->print_used_change(young_gen_prev_used);
355 old_gen->print_used_change(old_gen_prev_used);
356 }
357 heap->print_heap_change(prev_used);
358 if (PrintGCDetails) {
359 MetaspaceAux::print_metaspace_change(metadata_prev_used);
360 }
361 }
363 // Track memory usage and detect low memory
364 MemoryService::track_memory_usage();
365 heap->update_counters();
366 }
368 if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
369 HandleMark hm; // Discard invalid handles created during verification
370 Universe::verify(" VerifyAfterGC:");
371 }
373 // Re-verify object start arrays
374 if (VerifyObjectStartArray &&
375 VerifyAfterGC) {
376 old_gen->verify_object_start_array();
377 }
379 if (ZapUnusedHeapArea) {
380 old_gen->object_space()->check_mangled_unused_area_complete();
381 }
383 NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
385 heap->print_heap_after_gc();
386 heap->trace_heap_after_gc(_gc_tracer);
388 heap->post_full_gc_dump(_gc_timer);
390 #ifdef TRACESPINNING
391 ParallelTaskTerminator::print_termination_counts();
392 #endif
394 _gc_timer->register_gc_end(os::elapsed_counter());
396 _gc_tracer->report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions());
398 return true;
399 }
401 bool PSMarkSweep::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy,
402 PSYoungGen* young_gen,
403 PSOldGen* old_gen) {
404 MutableSpace* const eden_space = young_gen->eden_space();
405 assert(!eden_space->is_empty(), "eden must be non-empty");
406 assert(young_gen->virtual_space()->alignment() ==
407 old_gen->virtual_space()->alignment(), "alignments do not match");
409 if (!(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary)) {
410 return false;
411 }
413 // Both generations must be completely committed.
414 if (young_gen->virtual_space()->uncommitted_size() != 0) {
415 return false;
416 }
417 if (old_gen->virtual_space()->uncommitted_size() != 0) {
418 return false;
419 }
421 // Figure out how much to take from eden. Include the average amount promoted
422 // in the total; otherwise the next young gen GC will simply bail out to a
423 // full GC.
424 const size_t alignment = old_gen->virtual_space()->alignment();
425 const size_t eden_used = eden_space->used_in_bytes();
426 const size_t promoted = (size_t)size_policy->avg_promoted()->padded_average();
427 const size_t absorb_size = align_size_up(eden_used + promoted, alignment);
428 const size_t eden_capacity = eden_space->capacity_in_bytes();
430 if (absorb_size >= eden_capacity) {
431 return false; // Must leave some space in eden.
432 }
434 const size_t new_young_size = young_gen->capacity_in_bytes() - absorb_size;
435 if (new_young_size < young_gen->min_gen_size()) {
436 return false; // Respect young gen minimum size.
437 }
439 if (TraceAdaptiveGCBoundary && Verbose) {
440 gclog_or_tty->print(" absorbing " SIZE_FORMAT "K: "
441 "eden " SIZE_FORMAT "K->" SIZE_FORMAT "K "
442 "from " SIZE_FORMAT "K, to " SIZE_FORMAT "K "
443 "young_gen " SIZE_FORMAT "K->" SIZE_FORMAT "K ",
444 absorb_size / K,
445 eden_capacity / K, (eden_capacity - absorb_size) / K,
446 young_gen->from_space()->used_in_bytes() / K,
447 young_gen->to_space()->used_in_bytes() / K,
448 young_gen->capacity_in_bytes() / K, new_young_size / K);
449 }
451 // Fill the unused part of the old gen.
452 MutableSpace* const old_space = old_gen->object_space();
453 HeapWord* const unused_start = old_space->top();
454 size_t const unused_words = pointer_delta(old_space->end(), unused_start);
456 if (unused_words > 0) {
457 if (unused_words < CollectedHeap::min_fill_size()) {
458 return false; // If the old gen cannot be filled, must give up.
459 }
460 CollectedHeap::fill_with_objects(unused_start, unused_words);
461 }
463 // Take the live data from eden and set both top and end in the old gen to
464 // eden top. (Need to set end because reset_after_change() mangles the region
465 // from end to virtual_space->high() in debug builds).
466 HeapWord* const new_top = eden_space->top();
467 old_gen->virtual_space()->expand_into(young_gen->virtual_space(),
468 absorb_size);
469 young_gen->reset_after_change();
470 old_space->set_top(new_top);
471 old_space->set_end(new_top);
472 old_gen->reset_after_change();
474 // Update the object start array for the filler object and the data from eden.
475 ObjectStartArray* const start_array = old_gen->start_array();
476 for (HeapWord* p = unused_start; p < new_top; p += oop(p)->size()) {
477 start_array->allocate_block(p);
478 }
480 // Could update the promoted average here, but it is not typically updated at
481 // full GCs and the value to use is unclear. Something like
482 //
483 // cur_promoted_avg + absorb_size / number_of_scavenges_since_last_full_gc.
485 size_policy->set_bytes_absorbed_from_eden(absorb_size);
486 return true;
487 }
489 void PSMarkSweep::allocate_stacks() {
490 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
491 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
493 PSYoungGen* young_gen = heap->young_gen();
495 MutableSpace* to_space = young_gen->to_space();
496 _preserved_marks = (PreservedMark*)to_space->top();
497 _preserved_count = 0;
499 // We want to calculate the size in bytes first.
500 _preserved_count_max = pointer_delta(to_space->end(), to_space->top(), sizeof(jbyte));
501 // Now divide by the size of a PreservedMark
502 _preserved_count_max /= sizeof(PreservedMark);
503 }
506 void PSMarkSweep::deallocate_stacks() {
507 _preserved_mark_stack.clear(true);
508 _preserved_oop_stack.clear(true);
509 _marking_stack.clear();
510 _objarray_stack.clear(true);
511 }
513 void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
514 // Recursively traverse all live objects and mark them
515 GCTraceTime tm("phase 1", PrintGCDetails && Verbose, true, _gc_timer);
516 trace(" 1");
518 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
519 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
521 // Need to clear claim bits before the tracing starts.
522 ClassLoaderDataGraph::clear_claimed_marks();
524 // General strong roots.
525 {
526 ParallelScavengeHeap::ParStrongRootsScope psrs;
527 Universe::oops_do(mark_and_push_closure());
528 JNIHandles::oops_do(mark_and_push_closure()); // Global (strong) JNI handles
529 CLDToOopClosure mark_and_push_from_cld(mark_and_push_closure());
530 CodeBlobToOopClosure each_active_code_blob(mark_and_push_closure(), /*do_marking=*/ true);
531 Threads::oops_do(mark_and_push_closure(), &mark_and_push_from_cld, &each_active_code_blob);
532 ObjectSynchronizer::oops_do(mark_and_push_closure());
533 FlatProfiler::oops_do(mark_and_push_closure());
534 Management::oops_do(mark_and_push_closure());
535 JvmtiExport::oops_do(mark_and_push_closure());
536 SystemDictionary::always_strong_oops_do(mark_and_push_closure());
537 ClassLoaderDataGraph::always_strong_oops_do(mark_and_push_closure(), follow_klass_closure(), true);
538 // Do not treat nmethods as strong roots for mark/sweep, since we can unload them.
539 //CodeCache::scavenge_root_nmethods_do(CodeBlobToOopClosure(mark_and_push_closure()));
540 }
542 // Flush marking stack.
543 follow_stack();
545 // Process reference objects found during marking
546 {
547 ref_processor()->setup_policy(clear_all_softrefs);
548 const ReferenceProcessorStats& stats =
549 ref_processor()->process_discovered_references(
550 is_alive_closure(), mark_and_push_closure(), follow_stack_closure(), NULL, _gc_timer);
551 gc_tracer()->report_gc_reference_stats(stats);
552 }
554 // This is the point where the entire marking should have completed.
555 assert(_marking_stack.is_empty(), "Marking should have completed");
557 // Unload classes and purge the SystemDictionary.
558 bool purged_class = SystemDictionary::do_unloading(is_alive_closure());
560 // Unload nmethods.
561 CodeCache::do_unloading(is_alive_closure(), purged_class);
563 // Prune dead klasses from subklass/sibling/implementor lists.
564 Klass::clean_weak_klass_links(is_alive_closure());
566 // Delete entries for dead interned strings.
567 StringTable::unlink(is_alive_closure());
569 // Clean up unreferenced symbols in symbol table.
570 SymbolTable::unlink();
571 _gc_tracer->report_object_count_after_gc(is_alive_closure());
572 }
575 void PSMarkSweep::mark_sweep_phase2() {
576 GCTraceTime tm("phase 2", PrintGCDetails && Verbose, true, _gc_timer);
577 trace("2");
579 // Now all live objects are marked, compute the new object addresses.
581 // It is not required that we traverse spaces in the same order in
582 // phase2, phase3 and phase4, but the ValidateMarkSweep live oops
583 // tracking expects us to do so. See comment under phase4.
585 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
586 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
588 PSOldGen* old_gen = heap->old_gen();
590 // Begin compacting into the old gen
591 PSMarkSweepDecorator::set_destination_decorator_tenured();
593 // This will also compact the young gen spaces.
594 old_gen->precompact();
595 }
597 // This should be moved to the shared markSweep code!
598 class PSAlwaysTrueClosure: public BoolObjectClosure {
599 public:
600 bool do_object_b(oop p) { return true; }
601 };
602 static PSAlwaysTrueClosure always_true;
604 void PSMarkSweep::mark_sweep_phase3() {
605 // Adjust the pointers to reflect the new locations
606 GCTraceTime tm("phase 3", PrintGCDetails && Verbose, true, _gc_timer);
607 trace("3");
609 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
610 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
612 PSYoungGen* young_gen = heap->young_gen();
613 PSOldGen* old_gen = heap->old_gen();
615 // Need to clear claim bits before the tracing starts.
616 ClassLoaderDataGraph::clear_claimed_marks();
618 // General strong roots.
619 Universe::oops_do(adjust_pointer_closure());
620 JNIHandles::oops_do(adjust_pointer_closure()); // Global (strong) JNI handles
621 CLDToOopClosure adjust_from_cld(adjust_pointer_closure());
622 Threads::oops_do(adjust_pointer_closure(), &adjust_from_cld, NULL);
623 ObjectSynchronizer::oops_do(adjust_pointer_closure());
624 FlatProfiler::oops_do(adjust_pointer_closure());
625 Management::oops_do(adjust_pointer_closure());
626 JvmtiExport::oops_do(adjust_pointer_closure());
627 // SO_AllClasses
628 SystemDictionary::oops_do(adjust_pointer_closure());
629 ClassLoaderDataGraph::oops_do(adjust_pointer_closure(), adjust_klass_closure(), true);
631 // Now adjust pointers in remaining weak roots. (All of which should
632 // have been cleared if they pointed to non-surviving objects.)
633 // Global (weak) JNI handles
634 JNIHandles::weak_oops_do(&always_true, adjust_pointer_closure());
636 CodeCache::oops_do(adjust_pointer_closure());
637 StringTable::oops_do(adjust_pointer_closure());
638 ref_processor()->weak_oops_do(adjust_pointer_closure());
639 PSScavenge::reference_processor()->weak_oops_do(adjust_pointer_closure());
641 adjust_marks();
643 young_gen->adjust_pointers();
644 old_gen->adjust_pointers();
645 }
647 void PSMarkSweep::mark_sweep_phase4() {
648 EventMark m("4 compact heap");
649 GCTraceTime tm("phase 4", PrintGCDetails && Verbose, true, _gc_timer);
650 trace("4");
652 // All pointers are now adjusted, move objects accordingly
654 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
655 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
657 PSYoungGen* young_gen = heap->young_gen();
658 PSOldGen* old_gen = heap->old_gen();
660 old_gen->compact();
661 young_gen->compact();
662 }
664 jlong PSMarkSweep::millis_since_last_gc() {
665 // We need a monotonically non-deccreasing time in ms but
666 // os::javaTimeMillis() does not guarantee monotonicity.
667 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
668 jlong ret_val = now - _time_of_last_gc;
669 // XXX See note in genCollectedHeap::millis_since_last_gc().
670 if (ret_val < 0) {
671 NOT_PRODUCT(warning("time warp: "INT64_FORMAT, ret_val);)
672 return 0;
673 }
674 return ret_val;
675 }
677 void PSMarkSweep::reset_millis_since_last_gc() {
678 // We need a monotonically non-deccreasing time in ms but
679 // os::javaTimeMillis() does not guarantee monotonicity.
680 _time_of_last_gc = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
681 }