src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp

Thu, 27 Jan 2011 16:11:27 -0800

author
coleenp
date
Thu, 27 Jan 2011 16:11:27 -0800
changeset 2497
3582bf76420e
parent 2467
9afee0b9fc1d
child 2651
92da084fefc9
permissions
-rw-r--r--

6990754: Use native memory and reference counting to implement SymbolTable
Summary: move symbols from permgen into C heap and reference count them
Reviewed-by: never, acorn, jmasa, stefank

duke@435 1 /*
trims@1907 2 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
duke@435 22 *
duke@435 23 */
duke@435 24
stefank@2314 25 #include "precompiled.hpp"
stefank@2314 26 #include "classfile/symbolTable.hpp"
stefank@2314 27 #include "classfile/systemDictionary.hpp"
stefank@2314 28 #include "code/codeCache.hpp"
stefank@2314 29 #include "gc_implementation/parallelScavenge/generationSizer.hpp"
stefank@2314 30 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
stefank@2314 31 #include "gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp"
stefank@2314 32 #include "gc_implementation/parallelScavenge/psMarkSweep.hpp"
stefank@2314 33 #include "gc_implementation/parallelScavenge/psMarkSweepDecorator.hpp"
stefank@2314 34 #include "gc_implementation/parallelScavenge/psOldGen.hpp"
stefank@2314 35 #include "gc_implementation/parallelScavenge/psPermGen.hpp"
stefank@2314 36 #include "gc_implementation/parallelScavenge/psScavenge.hpp"
stefank@2314 37 #include "gc_implementation/parallelScavenge/psYoungGen.hpp"
stefank@2314 38 #include "gc_implementation/shared/isGCActiveMark.hpp"
stefank@2314 39 #include "gc_implementation/shared/spaceDecorator.hpp"
stefank@2314 40 #include "gc_interface/gcCause.hpp"
stefank@2314 41 #include "memory/gcLocker.inline.hpp"
stefank@2314 42 #include "memory/referencePolicy.hpp"
stefank@2314 43 #include "memory/referenceProcessor.hpp"
stefank@2314 44 #include "oops/oop.inline.hpp"
stefank@2314 45 #include "runtime/biasedLocking.hpp"
stefank@2314 46 #include "runtime/fprofiler.hpp"
stefank@2314 47 #include "runtime/safepoint.hpp"
stefank@2314 48 #include "runtime/vmThread.hpp"
stefank@2314 49 #include "services/management.hpp"
stefank@2314 50 #include "services/memoryService.hpp"
stefank@2314 51 #include "utilities/events.hpp"
stefank@2314 52 #include "utilities/stack.inline.hpp"
duke@435 53
duke@435 54 elapsedTimer PSMarkSweep::_accumulated_time;
duke@435 55 unsigned int PSMarkSweep::_total_invocations = 0;
duke@435 56 jlong PSMarkSweep::_time_of_last_gc = 0;
duke@435 57 CollectorCounters* PSMarkSweep::_counters = NULL;
duke@435 58
duke@435 59 void PSMarkSweep::initialize() {
duke@435 60 MemRegion mr = Universe::heap()->reserved_region();
duke@435 61 _ref_processor = new ReferenceProcessor(mr,
duke@435 62 true, // atomic_discovery
duke@435 63 false); // mt_discovery
jcoomes@809 64 _counters = new CollectorCounters("PSMarkSweep", 1);
duke@435 65 }
duke@435 66
duke@435 67 // This method contains all heap specific policy for invoking mark sweep.
duke@435 68 // PSMarkSweep::invoke_no_policy() will only attempt to mark-sweep-compact
duke@435 69 // the heap. It will do nothing further. If we need to bail out for policy
duke@435 70 // reasons, scavenge before full gc, or any other specialized behavior, it
duke@435 71 // needs to be added here.
duke@435 72 //
duke@435 73 // Note that this method should only be called from the vm_thread while
duke@435 74 // at a safepoint!
jmasa@1822 75 //
jmasa@1822 76 // Note that the all_soft_refs_clear flag in the collector policy
jmasa@1822 77 // may be true because this method can be called without intervening
jmasa@1822 78 // activity. For example when the heap space is tight and full measure
jmasa@1822 79 // are being taken to free space.
jmasa@1822 80
duke@435 81 void PSMarkSweep::invoke(bool maximum_heap_compaction) {
duke@435 82 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
duke@435 83 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
duke@435 84 assert(!Universe::heap()->is_gc_active(), "not reentrant");
duke@435 85
duke@435 86 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
duke@435 87 GCCause::Cause gc_cause = heap->gc_cause();
duke@435 88 PSAdaptiveSizePolicy* policy = heap->size_policy();
jmasa@1822 89 IsGCActiveMark mark;
duke@435 90
jmasa@1822 91 if (ScavengeBeforeFullGC) {
jmasa@1822 92 PSScavenge::invoke_no_policy();
jmasa@1822 93 }
duke@435 94
jmasa@1822 95 const bool clear_all_soft_refs =
jmasa@1822 96 heap->collector_policy()->should_clear_all_soft_refs();
duke@435 97
jmasa@1822 98 int count = (maximum_heap_compaction)?1:MarkSweepAlwaysCompactCount;
jmasa@1822 99 IntFlagSetting flag_setting(MarkSweepAlwaysCompactCount, count);
jmasa@1822 100 PSMarkSweep::invoke_no_policy(clear_all_soft_refs || maximum_heap_compaction);
duke@435 101 }
duke@435 102
duke@435 103 // This method contains no policy. You should probably
duke@435 104 // be calling invoke() instead.
duke@435 105 void PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
duke@435 106 assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
duke@435 107 assert(ref_processor() != NULL, "Sanity");
duke@435 108
duke@435 109 if (GC_locker::check_active_before_gc()) {
duke@435 110 return;
duke@435 111 }
duke@435 112
duke@435 113 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
duke@435 114 GCCause::Cause gc_cause = heap->gc_cause();
duke@435 115 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
duke@435 116 PSAdaptiveSizePolicy* size_policy = heap->size_policy();
duke@435 117
jmasa@1822 118 // The scope of casr should end after code that can change
jmasa@1822 119 // CollectorPolicy::_should_clear_all_soft_refs.
jmasa@1822 120 ClearedAllSoftRefs casr(clear_all_softrefs, heap->collector_policy());
jmasa@1822 121
duke@435 122 PSYoungGen* young_gen = heap->young_gen();
duke@435 123 PSOldGen* old_gen = heap->old_gen();
duke@435 124 PSPermGen* perm_gen = heap->perm_gen();
duke@435 125
duke@435 126 // Increment the invocation count
duke@435 127 heap->increment_total_collections(true /* full */);
duke@435 128
jmasa@698 129 // Save information needed to minimize mangling
jmasa@698 130 heap->record_gen_tops_before_GC();
jmasa@698 131
duke@435 132 // We need to track unique mark sweep invocations as well.
duke@435 133 _total_invocations++;
duke@435 134
duke@435 135 AdaptiveSizePolicyOutput(size_policy, heap->total_collections());
duke@435 136
duke@435 137 if (PrintHeapAtGC) {
duke@435 138 Universe::print_heap_before_gc();
duke@435 139 }
duke@435 140
duke@435 141 // Fill in TLABs
duke@435 142 heap->accumulate_statistics_all_tlabs();
duke@435 143 heap->ensure_parsability(true); // retire TLABs
duke@435 144
duke@435 145 if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
duke@435 146 HandleMark hm; // Discard invalid handles created during verification
duke@435 147 gclog_or_tty->print(" VerifyBeforeGC:");
duke@435 148 Universe::verify(true);
duke@435 149 }
duke@435 150
duke@435 151 // Verify object start arrays
duke@435 152 if (VerifyObjectStartArray &&
duke@435 153 VerifyBeforeGC) {
duke@435 154 old_gen->verify_object_start_array();
duke@435 155 perm_gen->verify_object_start_array();
duke@435 156 }
duke@435 157
ysr@1050 158 heap->pre_full_gc_dump();
ysr@1050 159
duke@435 160 // Filled in below to track the state of the young gen after the collection.
duke@435 161 bool eden_empty;
duke@435 162 bool survivors_empty;
duke@435 163 bool young_gen_empty;
duke@435 164
duke@435 165 {
duke@435 166 HandleMark hm;
duke@435 167 const bool is_system_gc = gc_cause == GCCause::_java_lang_system_gc;
duke@435 168 // This is useful for debugging but don't change the output the
duke@435 169 // the customer sees.
duke@435 170 const char* gc_cause_str = "Full GC";
duke@435 171 if (is_system_gc && PrintGCDetails) {
duke@435 172 gc_cause_str = "Full GC (System)";
duke@435 173 }
duke@435 174 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
duke@435 175 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
duke@435 176 TraceTime t1(gc_cause_str, PrintGC, !PrintGCDetails, gclog_or_tty);
duke@435 177 TraceCollectorStats tcs(counters());
duke@435 178 TraceMemoryManagerStats tms(true /* Full GC */);
duke@435 179
duke@435 180 if (TraceGen1Time) accumulated_time()->start();
duke@435 181
duke@435 182 // Let the size policy know we're starting
duke@435 183 size_policy->major_collection_begin();
duke@435 184
duke@435 185 // When collecting the permanent generation methodOops may be moving,
duke@435 186 // so we either have to flush all bcp data or convert it into bci.
duke@435 187 CodeCache::gc_prologue();
duke@435 188 Threads::gc_prologue();
duke@435 189 BiasedLocking::preserve_marks();
duke@435 190
duke@435 191 // Capture heap size before collection for printing.
duke@435 192 size_t prev_used = heap->used();
duke@435 193
duke@435 194 // Capture perm gen size before collection for sizing.
duke@435 195 size_t perm_gen_prev_used = perm_gen->used_in_bytes();
duke@435 196
duke@435 197 // For PrintGCDetails
duke@435 198 size_t old_gen_prev_used = old_gen->used_in_bytes();
duke@435 199 size_t young_gen_prev_used = young_gen->used_in_bytes();
duke@435 200
duke@435 201 allocate_stacks();
duke@435 202
duke@435 203 NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
duke@435 204 COMPILER2_PRESENT(DerivedPointerTable::clear());
duke@435 205
duke@435 206 ref_processor()->enable_discovery();
ysr@892 207 ref_processor()->setup_policy(clear_all_softrefs);
duke@435 208
duke@435 209 mark_sweep_phase1(clear_all_softrefs);
duke@435 210
duke@435 211 mark_sweep_phase2();
duke@435 212
duke@435 213 // Don't add any more derived pointers during phase3
duke@435 214 COMPILER2_PRESENT(assert(DerivedPointerTable::is_active(), "Sanity"));
duke@435 215 COMPILER2_PRESENT(DerivedPointerTable::set_active(false));
duke@435 216
duke@435 217 mark_sweep_phase3();
duke@435 218
duke@435 219 mark_sweep_phase4();
duke@435 220
duke@435 221 restore_marks();
duke@435 222
duke@435 223 deallocate_stacks();
duke@435 224
jmasa@698 225 if (ZapUnusedHeapArea) {
jmasa@698 226 // Do a complete mangle (top to end) because the usage for
jmasa@698 227 // scratch does not maintain a top pointer.
jmasa@698 228 young_gen->to_space()->mangle_unused_area_complete();
jmasa@698 229 }
jmasa@698 230
duke@435 231 eden_empty = young_gen->eden_space()->is_empty();
duke@435 232 if (!eden_empty) {
duke@435 233 eden_empty = absorb_live_data_from_eden(size_policy, young_gen, old_gen);
duke@435 234 }
duke@435 235
duke@435 236 // Update heap occupancy information which is used as
duke@435 237 // input to soft ref clearing policy at the next gc.
duke@435 238 Universe::update_heap_info_at_gc();
duke@435 239
duke@435 240 survivors_empty = young_gen->from_space()->is_empty() &&
jmasa@698 241 young_gen->to_space()->is_empty();
duke@435 242 young_gen_empty = eden_empty && survivors_empty;
duke@435 243
duke@435 244 BarrierSet* bs = heap->barrier_set();
duke@435 245 if (bs->is_a(BarrierSet::ModRef)) {
duke@435 246 ModRefBarrierSet* modBS = (ModRefBarrierSet*)bs;
duke@435 247 MemRegion old_mr = heap->old_gen()->reserved();
duke@435 248 MemRegion perm_mr = heap->perm_gen()->reserved();
duke@435 249 assert(perm_mr.end() <= old_mr.start(), "Generations out of order");
duke@435 250
duke@435 251 if (young_gen_empty) {
duke@435 252 modBS->clear(MemRegion(perm_mr.start(), old_mr.end()));
duke@435 253 } else {
duke@435 254 modBS->invalidate(MemRegion(perm_mr.start(), old_mr.end()));
duke@435 255 }
duke@435 256 }
duke@435 257
duke@435 258 BiasedLocking::restore_marks();
duke@435 259 Threads::gc_epilogue();
duke@435 260 CodeCache::gc_epilogue();
kamg@2467 261 JvmtiExport::gc_epilogue();
duke@435 262
duke@435 263 COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
duke@435 264
duke@435 265 ref_processor()->enqueue_discovered_references(NULL);
duke@435 266
duke@435 267 // Update time of last GC
duke@435 268 reset_millis_since_last_gc();
duke@435 269
duke@435 270 // Let the size policy know we're done
duke@435 271 size_policy->major_collection_end(old_gen->used_in_bytes(), gc_cause);
duke@435 272
duke@435 273 if (UseAdaptiveSizePolicy) {
duke@435 274
duke@435 275 if (PrintAdaptiveSizePolicy) {
duke@435 276 gclog_or_tty->print("AdaptiveSizeStart: ");
duke@435 277 gclog_or_tty->stamp();
duke@435 278 gclog_or_tty->print_cr(" collection: %d ",
duke@435 279 heap->total_collections());
duke@435 280 if (Verbose) {
duke@435 281 gclog_or_tty->print("old_gen_capacity: %d young_gen_capacity: %d"
duke@435 282 " perm_gen_capacity: %d ",
duke@435 283 old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes(),
duke@435 284 perm_gen->capacity_in_bytes());
duke@435 285 }
duke@435 286 }
duke@435 287
duke@435 288 // Don't check if the size_policy is ready here. Let
duke@435 289 // the size_policy check that internally.
duke@435 290 if (UseAdaptiveGenerationSizePolicyAtMajorCollection &&
duke@435 291 ((gc_cause != GCCause::_java_lang_system_gc) ||
duke@435 292 UseAdaptiveSizePolicyWithSystemGC)) {
duke@435 293 // Calculate optimal free space amounts
duke@435 294 assert(young_gen->max_size() >
duke@435 295 young_gen->from_space()->capacity_in_bytes() +
duke@435 296 young_gen->to_space()->capacity_in_bytes(),
duke@435 297 "Sizes of space in young gen are out-of-bounds");
duke@435 298 size_t max_eden_size = young_gen->max_size() -
duke@435 299 young_gen->from_space()->capacity_in_bytes() -
duke@435 300 young_gen->to_space()->capacity_in_bytes();
duke@435 301 size_policy->compute_generation_free_space(young_gen->used_in_bytes(),
duke@435 302 young_gen->eden_space()->used_in_bytes(),
duke@435 303 old_gen->used_in_bytes(),
duke@435 304 perm_gen->used_in_bytes(),
duke@435 305 young_gen->eden_space()->capacity_in_bytes(),
duke@435 306 old_gen->max_gen_size(),
duke@435 307 max_eden_size,
duke@435 308 true /* full gc*/,
jmasa@1822 309 gc_cause,
jmasa@1822 310 heap->collector_policy());
duke@435 311
duke@435 312 heap->resize_old_gen(size_policy->calculated_old_free_size_in_bytes());
duke@435 313
duke@435 314 // Don't resize the young generation at an major collection. A
duke@435 315 // desired young generation size may have been calculated but
duke@435 316 // resizing the young generation complicates the code because the
duke@435 317 // resizing of the old generation may have moved the boundary
duke@435 318 // between the young generation and the old generation. Let the
duke@435 319 // young generation resizing happen at the minor collections.
duke@435 320 }
duke@435 321 if (PrintAdaptiveSizePolicy) {
duke@435 322 gclog_or_tty->print_cr("AdaptiveSizeStop: collection: %d ",
duke@435 323 heap->total_collections());
duke@435 324 }
duke@435 325 }
duke@435 326
duke@435 327 if (UsePerfData) {
duke@435 328 heap->gc_policy_counters()->update_counters();
duke@435 329 heap->gc_policy_counters()->update_old_capacity(
duke@435 330 old_gen->capacity_in_bytes());
duke@435 331 heap->gc_policy_counters()->update_young_capacity(
duke@435 332 young_gen->capacity_in_bytes());
duke@435 333 }
duke@435 334
duke@435 335 heap->resize_all_tlabs();
duke@435 336
duke@435 337 // We collected the perm gen, so we'll resize it here.
duke@435 338 perm_gen->compute_new_size(perm_gen_prev_used);
duke@435 339
duke@435 340 if (TraceGen1Time) accumulated_time()->stop();
duke@435 341
duke@435 342 if (PrintGC) {
duke@435 343 if (PrintGCDetails) {
duke@435 344 // Don't print a GC timestamp here. This is after the GC so
duke@435 345 // would be confusing.
duke@435 346 young_gen->print_used_change(young_gen_prev_used);
duke@435 347 old_gen->print_used_change(old_gen_prev_used);
duke@435 348 }
duke@435 349 heap->print_heap_change(prev_used);
duke@435 350 // Do perm gen after heap becase prev_used does
duke@435 351 // not include the perm gen (done this way in the other
duke@435 352 // collectors).
duke@435 353 if (PrintGCDetails) {
duke@435 354 perm_gen->print_used_change(perm_gen_prev_used);
duke@435 355 }
duke@435 356 }
duke@435 357
duke@435 358 // Track memory usage and detect low memory
duke@435 359 MemoryService::track_memory_usage();
duke@435 360 heap->update_counters();
duke@435 361 }
duke@435 362
duke@435 363 if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
duke@435 364 HandleMark hm; // Discard invalid handles created during verification
duke@435 365 gclog_or_tty->print(" VerifyAfterGC:");
duke@435 366 Universe::verify(false);
duke@435 367 }
duke@435 368
duke@435 369 // Re-verify object start arrays
duke@435 370 if (VerifyObjectStartArray &&
duke@435 371 VerifyAfterGC) {
duke@435 372 old_gen->verify_object_start_array();
duke@435 373 perm_gen->verify_object_start_array();
duke@435 374 }
duke@435 375
jmasa@698 376 if (ZapUnusedHeapArea) {
jmasa@698 377 old_gen->object_space()->check_mangled_unused_area_complete();
jmasa@698 378 perm_gen->object_space()->check_mangled_unused_area_complete();
jmasa@698 379 }
jmasa@698 380
duke@435 381 NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
duke@435 382
duke@435 383 if (PrintHeapAtGC) {
duke@435 384 Universe::print_heap_after_gc();
duke@435 385 }
jmasa@981 386
ysr@1050 387 heap->post_full_gc_dump();
ysr@1050 388
jmasa@981 389 #ifdef TRACESPINNING
jmasa@981 390 ParallelTaskTerminator::print_termination_counts();
jmasa@981 391 #endif
duke@435 392 }
duke@435 393
duke@435 394 bool PSMarkSweep::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy,
duke@435 395 PSYoungGen* young_gen,
duke@435 396 PSOldGen* old_gen) {
duke@435 397 MutableSpace* const eden_space = young_gen->eden_space();
duke@435 398 assert(!eden_space->is_empty(), "eden must be non-empty");
duke@435 399 assert(young_gen->virtual_space()->alignment() ==
duke@435 400 old_gen->virtual_space()->alignment(), "alignments do not match");
duke@435 401
duke@435 402 if (!(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary)) {
duke@435 403 return false;
duke@435 404 }
duke@435 405
duke@435 406 // Both generations must be completely committed.
duke@435 407 if (young_gen->virtual_space()->uncommitted_size() != 0) {
duke@435 408 return false;
duke@435 409 }
duke@435 410 if (old_gen->virtual_space()->uncommitted_size() != 0) {
duke@435 411 return false;
duke@435 412 }
duke@435 413
duke@435 414 // Figure out how much to take from eden. Include the average amount promoted
duke@435 415 // in the total; otherwise the next young gen GC will simply bail out to a
duke@435 416 // full GC.
duke@435 417 const size_t alignment = old_gen->virtual_space()->alignment();
duke@435 418 const size_t eden_used = eden_space->used_in_bytes();
jcoomes@916 419 const size_t promoted = (size_t)size_policy->avg_promoted()->padded_average();
duke@435 420 const size_t absorb_size = align_size_up(eden_used + promoted, alignment);
duke@435 421 const size_t eden_capacity = eden_space->capacity_in_bytes();
duke@435 422
duke@435 423 if (absorb_size >= eden_capacity) {
duke@435 424 return false; // Must leave some space in eden.
duke@435 425 }
duke@435 426
duke@435 427 const size_t new_young_size = young_gen->capacity_in_bytes() - absorb_size;
duke@435 428 if (new_young_size < young_gen->min_gen_size()) {
duke@435 429 return false; // Respect young gen minimum size.
duke@435 430 }
duke@435 431
duke@435 432 if (TraceAdaptiveGCBoundary && Verbose) {
duke@435 433 gclog_or_tty->print(" absorbing " SIZE_FORMAT "K: "
duke@435 434 "eden " SIZE_FORMAT "K->" SIZE_FORMAT "K "
duke@435 435 "from " SIZE_FORMAT "K, to " SIZE_FORMAT "K "
duke@435 436 "young_gen " SIZE_FORMAT "K->" SIZE_FORMAT "K ",
duke@435 437 absorb_size / K,
duke@435 438 eden_capacity / K, (eden_capacity - absorb_size) / K,
duke@435 439 young_gen->from_space()->used_in_bytes() / K,
duke@435 440 young_gen->to_space()->used_in_bytes() / K,
duke@435 441 young_gen->capacity_in_bytes() / K, new_young_size / K);
duke@435 442 }
duke@435 443
duke@435 444 // Fill the unused part of the old gen.
duke@435 445 MutableSpace* const old_space = old_gen->object_space();
jcoomes@916 446 HeapWord* const unused_start = old_space->top();
jcoomes@916 447 size_t const unused_words = pointer_delta(old_space->end(), unused_start);
duke@435 448
jcoomes@916 449 if (unused_words > 0) {
jcoomes@916 450 if (unused_words < CollectedHeap::min_fill_size()) {
jcoomes@916 451 return false; // If the old gen cannot be filled, must give up.
jcoomes@916 452 }
jcoomes@916 453 CollectedHeap::fill_with_objects(unused_start, unused_words);
duke@435 454 }
duke@435 455
duke@435 456 // Take the live data from eden and set both top and end in the old gen to
duke@435 457 // eden top. (Need to set end because reset_after_change() mangles the region
duke@435 458 // from end to virtual_space->high() in debug builds).
duke@435 459 HeapWord* const new_top = eden_space->top();
duke@435 460 old_gen->virtual_space()->expand_into(young_gen->virtual_space(),
duke@435 461 absorb_size);
duke@435 462 young_gen->reset_after_change();
duke@435 463 old_space->set_top(new_top);
duke@435 464 old_space->set_end(new_top);
duke@435 465 old_gen->reset_after_change();
duke@435 466
duke@435 467 // Update the object start array for the filler object and the data from eden.
duke@435 468 ObjectStartArray* const start_array = old_gen->start_array();
jcoomes@916 469 for (HeapWord* p = unused_start; p < new_top; p += oop(p)->size()) {
jcoomes@916 470 start_array->allocate_block(p);
duke@435 471 }
duke@435 472
duke@435 473 // Could update the promoted average here, but it is not typically updated at
duke@435 474 // full GCs and the value to use is unclear. Something like
duke@435 475 //
duke@435 476 // cur_promoted_avg + absorb_size / number_of_scavenges_since_last_full_gc.
duke@435 477
duke@435 478 size_policy->set_bytes_absorbed_from_eden(absorb_size);
duke@435 479 return true;
duke@435 480 }
duke@435 481
duke@435 482 void PSMarkSweep::allocate_stacks() {
duke@435 483 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
duke@435 484 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
duke@435 485
duke@435 486 PSYoungGen* young_gen = heap->young_gen();
duke@435 487
duke@435 488 MutableSpace* to_space = young_gen->to_space();
duke@435 489 _preserved_marks = (PreservedMark*)to_space->top();
duke@435 490 _preserved_count = 0;
duke@435 491
duke@435 492 // We want to calculate the size in bytes first.
duke@435 493 _preserved_count_max = pointer_delta(to_space->end(), to_space->top(), sizeof(jbyte));
duke@435 494 // Now divide by the size of a PreservedMark
duke@435 495 _preserved_count_max /= sizeof(PreservedMark);
duke@435 496 }
duke@435 497
duke@435 498
duke@435 499 void PSMarkSweep::deallocate_stacks() {
jcoomes@2191 500 _preserved_mark_stack.clear(true);
jcoomes@2191 501 _preserved_oop_stack.clear(true);
jcoomes@2191 502 _marking_stack.clear();
jcoomes@2191 503 _objarray_stack.clear(true);
jcoomes@2191 504 _revisit_klass_stack.clear(true);
jcoomes@2191 505 _revisit_mdo_stack.clear(true);
duke@435 506 }
duke@435 507
duke@435 508 void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
duke@435 509 // Recursively traverse all live objects and mark them
duke@435 510 EventMark m("1 mark object");
duke@435 511 TraceTime tm("phase 1", PrintGCDetails && Verbose, true, gclog_or_tty);
duke@435 512 trace(" 1");
duke@435 513
duke@435 514 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
duke@435 515 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
duke@435 516
duke@435 517 // General strong roots.
jrose@1424 518 {
jrose@1424 519 ParallelScavengeHeap::ParStrongRootsScope psrs;
jrose@1424 520 Universe::oops_do(mark_and_push_closure());
jrose@1424 521 ReferenceProcessor::oops_do(mark_and_push_closure());
jrose@1424 522 JNIHandles::oops_do(mark_and_push_closure()); // Global (strong) JNI handles
jrose@1424 523 CodeBlobToOopClosure each_active_code_blob(mark_and_push_closure(), /*do_marking=*/ true);
jrose@1424 524 Threads::oops_do(mark_and_push_closure(), &each_active_code_blob);
jrose@1424 525 ObjectSynchronizer::oops_do(mark_and_push_closure());
jrose@1424 526 FlatProfiler::oops_do(mark_and_push_closure());
jrose@1424 527 Management::oops_do(mark_and_push_closure());
jrose@1424 528 JvmtiExport::oops_do(mark_and_push_closure());
jrose@1424 529 SystemDictionary::always_strong_oops_do(mark_and_push_closure());
jrose@1424 530 // Do not treat nmethods as strong roots for mark/sweep, since we can unload them.
jrose@1424 531 //CodeCache::scavenge_root_nmethods_do(CodeBlobToOopClosure(mark_and_push_closure()));
jrose@1424 532 }
duke@435 533
duke@435 534 // Flush marking stack.
duke@435 535 follow_stack();
duke@435 536
duke@435 537 // Process reference objects found during marking
duke@435 538 {
ysr@892 539 ref_processor()->setup_policy(clear_all_softrefs);
duke@435 540 ref_processor()->process_discovered_references(
ysr@888 541 is_alive_closure(), mark_and_push_closure(), follow_stack_closure(), NULL);
duke@435 542 }
duke@435 543
duke@435 544 // Follow system dictionary roots and unload classes
duke@435 545 bool purged_class = SystemDictionary::do_unloading(is_alive_closure());
duke@435 546
duke@435 547 // Follow code cache roots
duke@435 548 CodeCache::do_unloading(is_alive_closure(), mark_and_push_closure(),
duke@435 549 purged_class);
duke@435 550 follow_stack(); // Flush marking stack
duke@435 551
duke@435 552 // Update subklass/sibling/implementor links of live klasses
duke@435 553 follow_weak_klass_links();
jcoomes@2191 554 assert(_marking_stack.is_empty(), "just drained");
duke@435 555
ysr@1376 556 // Visit memoized mdo's and clear unmarked weak refs
ysr@1376 557 follow_mdo_weak_refs();
jcoomes@2191 558 assert(_marking_stack.is_empty(), "just drained");
ysr@1376 559
coleenp@2497 560 // Visit interned string tables and delete unmarked oops
duke@435 561 StringTable::unlink(is_alive_closure());
coleenp@2497 562 // Clean up unreferenced symbols in symbol table.
coleenp@2497 563 SymbolTable::unlink();
duke@435 564
jcoomes@2191 565 assert(_marking_stack.is_empty(), "stack should be empty by now");
duke@435 566 }
duke@435 567
duke@435 568
duke@435 569 void PSMarkSweep::mark_sweep_phase2() {
duke@435 570 EventMark m("2 compute new addresses");
duke@435 571 TraceTime tm("phase 2", PrintGCDetails && Verbose, true, gclog_or_tty);
duke@435 572 trace("2");
duke@435 573
duke@435 574 // Now all live objects are marked, compute the new object addresses.
duke@435 575
duke@435 576 // It is imperative that we traverse perm_gen LAST. If dead space is
duke@435 577 // allowed a range of dead object may get overwritten by a dead int
duke@435 578 // array. If perm_gen is not traversed last a klassOop may get
duke@435 579 // overwritten. This is fine since it is dead, but if the class has dead
duke@435 580 // instances we have to skip them, and in order to find their size we
duke@435 581 // need the klassOop!
duke@435 582 //
duke@435 583 // It is not required that we traverse spaces in the same order in
duke@435 584 // phase2, phase3 and phase4, but the ValidateMarkSweep live oops
duke@435 585 // tracking expects us to do so. See comment under phase4.
duke@435 586
duke@435 587 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
duke@435 588 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
duke@435 589
duke@435 590 PSOldGen* old_gen = heap->old_gen();
duke@435 591 PSPermGen* perm_gen = heap->perm_gen();
duke@435 592
duke@435 593 // Begin compacting into the old gen
duke@435 594 PSMarkSweepDecorator::set_destination_decorator_tenured();
duke@435 595
duke@435 596 // This will also compact the young gen spaces.
duke@435 597 old_gen->precompact();
duke@435 598
duke@435 599 // Compact the perm gen into the perm gen
duke@435 600 PSMarkSweepDecorator::set_destination_decorator_perm_gen();
duke@435 601
duke@435 602 perm_gen->precompact();
duke@435 603 }
duke@435 604
duke@435 605 // This should be moved to the shared markSweep code!
duke@435 606 class PSAlwaysTrueClosure: public BoolObjectClosure {
duke@435 607 public:
duke@435 608 void do_object(oop p) { ShouldNotReachHere(); }
duke@435 609 bool do_object_b(oop p) { return true; }
duke@435 610 };
duke@435 611 static PSAlwaysTrueClosure always_true;
duke@435 612
duke@435 613 void PSMarkSweep::mark_sweep_phase3() {
duke@435 614 // Adjust the pointers to reflect the new locations
duke@435 615 EventMark m("3 adjust pointers");
duke@435 616 TraceTime tm("phase 3", PrintGCDetails && Verbose, true, gclog_or_tty);
duke@435 617 trace("3");
duke@435 618
duke@435 619 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
duke@435 620 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
duke@435 621
duke@435 622 PSYoungGen* young_gen = heap->young_gen();
duke@435 623 PSOldGen* old_gen = heap->old_gen();
duke@435 624 PSPermGen* perm_gen = heap->perm_gen();
duke@435 625
duke@435 626 // General strong roots.
duke@435 627 Universe::oops_do(adjust_root_pointer_closure());
duke@435 628 ReferenceProcessor::oops_do(adjust_root_pointer_closure());
duke@435 629 JNIHandles::oops_do(adjust_root_pointer_closure()); // Global (strong) JNI handles
jrose@1424 630 Threads::oops_do(adjust_root_pointer_closure(), NULL);
duke@435 631 ObjectSynchronizer::oops_do(adjust_root_pointer_closure());
duke@435 632 FlatProfiler::oops_do(adjust_root_pointer_closure());
duke@435 633 Management::oops_do(adjust_root_pointer_closure());
duke@435 634 JvmtiExport::oops_do(adjust_root_pointer_closure());
duke@435 635 // SO_AllClasses
duke@435 636 SystemDictionary::oops_do(adjust_root_pointer_closure());
jrose@1424 637 //CodeCache::scavenge_root_nmethods_oops_do(adjust_root_pointer_closure());
duke@435 638
duke@435 639 // Now adjust pointers in remaining weak roots. (All of which should
duke@435 640 // have been cleared if they pointed to non-surviving objects.)
duke@435 641 // Global (weak) JNI handles
duke@435 642 JNIHandles::weak_oops_do(&always_true, adjust_root_pointer_closure());
duke@435 643
duke@435 644 CodeCache::oops_do(adjust_pointer_closure());
duke@435 645 StringTable::oops_do(adjust_root_pointer_closure());
duke@435 646 ref_processor()->weak_oops_do(adjust_root_pointer_closure());
duke@435 647 PSScavenge::reference_processor()->weak_oops_do(adjust_root_pointer_closure());
duke@435 648
duke@435 649 adjust_marks();
duke@435 650
duke@435 651 young_gen->adjust_pointers();
duke@435 652 old_gen->adjust_pointers();
duke@435 653 perm_gen->adjust_pointers();
duke@435 654 }
duke@435 655
duke@435 656 void PSMarkSweep::mark_sweep_phase4() {
duke@435 657 EventMark m("4 compact heap");
duke@435 658 TraceTime tm("phase 4", PrintGCDetails && Verbose, true, gclog_or_tty);
duke@435 659 trace("4");
duke@435 660
duke@435 661 // All pointers are now adjusted, move objects accordingly
duke@435 662
duke@435 663 // It is imperative that we traverse perm_gen first in phase4. All
duke@435 664 // classes must be allocated earlier than their instances, and traversing
duke@435 665 // perm_gen first makes sure that all klassOops have moved to their new
duke@435 666 // location before any instance does a dispatch through it's klass!
duke@435 667 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
duke@435 668 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
duke@435 669
duke@435 670 PSYoungGen* young_gen = heap->young_gen();
duke@435 671 PSOldGen* old_gen = heap->old_gen();
duke@435 672 PSPermGen* perm_gen = heap->perm_gen();
duke@435 673
duke@435 674 perm_gen->compact();
duke@435 675 old_gen->compact();
duke@435 676 young_gen->compact();
duke@435 677 }
duke@435 678
duke@435 679 jlong PSMarkSweep::millis_since_last_gc() {
duke@435 680 jlong ret_val = os::javaTimeMillis() - _time_of_last_gc;
duke@435 681 // XXX See note in genCollectedHeap::millis_since_last_gc().
duke@435 682 if (ret_val < 0) {
duke@435 683 NOT_PRODUCT(warning("time warp: %d", ret_val);)
duke@435 684 return 0;
duke@435 685 }
duke@435 686 return ret_val;
duke@435 687 }
duke@435 688
duke@435 689 void PSMarkSweep::reset_millis_since_last_gc() {
duke@435 690 _time_of_last_gc = os::javaTimeMillis();
duke@435 691 }

mercurial