src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp

Fri, 29 Apr 2016 00:06:10 +0800

author
aoqi
date
Fri, 29 Apr 2016 00:06:10 +0800
changeset 1
2d8a650513c2
parent 0
f90c822e73f8
child 25
873fd82b133d
permissions
-rw-r--r--

Added MIPS 64-bit port.

aoqi@0 1 /*
aoqi@0 2 * Copyright (c) 2002, 2014, Oracle and/or its affiliates. All rights reserved.
aoqi@0 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
aoqi@0 4 *
aoqi@0 5 * This code is free software; you can redistribute it and/or modify it
aoqi@0 6 * under the terms of the GNU General Public License version 2 only, as
aoqi@0 7 * published by the Free Software Foundation.
aoqi@0 8 *
aoqi@0 9 * This code is distributed in the hope that it will be useful, but WITHOUT
aoqi@0 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
aoqi@0 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
aoqi@0 12 * version 2 for more details (a copy is included in the LICENSE file that
aoqi@0 13 * accompanied this code).
aoqi@0 14 *
aoqi@0 15 * You should have received a copy of the GNU General Public License version
aoqi@0 16 * 2 along with this work; if not, write to the Free Software Foundation,
aoqi@0 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
aoqi@0 18 *
aoqi@0 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
aoqi@0 20 * or visit www.oracle.com if you need additional information or have any
aoqi@0 21 * questions.
aoqi@0 22 *
aoqi@0 23 */
aoqi@0 24
aoqi@1 25 /*
aoqi@1 26 * This file has been modified by Loongson Technology in 2015. These
aoqi@1 27 * modifications are Copyright (c) 2015 Loongson Technology, and are made
aoqi@1 28 * available on the same license terms set forth above.
aoqi@1 29 */
aoqi@1 30
aoqi@0 31 #include "precompiled.hpp"
aoqi@0 32 #include "classfile/symbolTable.hpp"
aoqi@0 33 #include "code/codeCache.hpp"
aoqi@0 34 #include "gc_implementation/parallelScavenge/cardTableExtension.hpp"
aoqi@0 35 #include "gc_implementation/parallelScavenge/gcTaskManager.hpp"
aoqi@0 36 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
aoqi@0 37 #include "gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp"
aoqi@0 38 #include "gc_implementation/parallelScavenge/psMarkSweep.hpp"
aoqi@0 39 #include "gc_implementation/parallelScavenge/psParallelCompact.hpp"
aoqi@0 40 #include "gc_implementation/parallelScavenge/psScavenge.inline.hpp"
aoqi@0 41 #include "gc_implementation/parallelScavenge/psTasks.hpp"
aoqi@0 42 #include "gc_implementation/shared/gcHeapSummary.hpp"
aoqi@0 43 #include "gc_implementation/shared/gcTimer.hpp"
aoqi@0 44 #include "gc_implementation/shared/gcTrace.hpp"
aoqi@0 45 #include "gc_implementation/shared/gcTraceTime.hpp"
aoqi@0 46 #include "gc_implementation/shared/isGCActiveMark.hpp"
aoqi@1 47 #include "gc_implementation/shared/mutableNUMASpace.hpp"
aoqi@0 48 #include "gc_implementation/shared/spaceDecorator.hpp"
aoqi@0 49 #include "gc_interface/gcCause.hpp"
aoqi@0 50 #include "memory/collectorPolicy.hpp"
aoqi@0 51 #include "memory/gcLocker.inline.hpp"
aoqi@0 52 #include "memory/referencePolicy.hpp"
aoqi@0 53 #include "memory/referenceProcessor.hpp"
aoqi@0 54 #include "memory/resourceArea.hpp"
aoqi@0 55 #include "oops/oop.inline.hpp"
aoqi@0 56 #include "oops/oop.psgc.inline.hpp"
aoqi@0 57 #include "runtime/biasedLocking.hpp"
aoqi@0 58 #include "runtime/fprofiler.hpp"
aoqi@0 59 #include "runtime/handles.inline.hpp"
aoqi@0 60 #include "runtime/threadCritical.hpp"
aoqi@0 61 #include "runtime/vmThread.hpp"
aoqi@0 62 #include "runtime/vm_operations.hpp"
aoqi@0 63 #include "services/memoryService.hpp"
aoqi@0 64 #include "utilities/stack.inline.hpp"
aoqi@0 65
aoqi@0 66 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
aoqi@0 67
aoqi@0 68 HeapWord* PSScavenge::_to_space_top_before_gc = NULL;
aoqi@0 69 int PSScavenge::_consecutive_skipped_scavenges = 0;
aoqi@0 70 ReferenceProcessor* PSScavenge::_ref_processor = NULL;
aoqi@0 71 CardTableExtension* PSScavenge::_card_table = NULL;
aoqi@0 72 bool PSScavenge::_survivor_overflow = false;
aoqi@0 73 uint PSScavenge::_tenuring_threshold = 0;
aoqi@0 74 HeapWord* PSScavenge::_young_generation_boundary = NULL;
aoqi@0 75 uintptr_t PSScavenge::_young_generation_boundary_compressed = 0;
aoqi@0 76 elapsedTimer PSScavenge::_accumulated_time;
aoqi@0 77 STWGCTimer PSScavenge::_gc_timer;
aoqi@0 78 ParallelScavengeTracer PSScavenge::_gc_tracer;
aoqi@0 79 Stack<markOop, mtGC> PSScavenge::_preserved_mark_stack;
aoqi@0 80 Stack<oop, mtGC> PSScavenge::_preserved_oop_stack;
aoqi@0 81 CollectorCounters* PSScavenge::_counters = NULL;
aoqi@0 82
aoqi@0 83 // Define before use
aoqi@0 84 class PSIsAliveClosure: public BoolObjectClosure {
aoqi@0 85 public:
aoqi@0 86 bool do_object_b(oop p) {
aoqi@0 87 return (!PSScavenge::is_obj_in_young(p)) || p->is_forwarded();
aoqi@0 88 }
aoqi@0 89 };
aoqi@0 90
aoqi@0 91 PSIsAliveClosure PSScavenge::_is_alive_closure;
aoqi@0 92
aoqi@0 93 class PSKeepAliveClosure: public OopClosure {
aoqi@0 94 protected:
aoqi@0 95 MutableSpace* _to_space;
aoqi@0 96 PSPromotionManager* _promotion_manager;
aoqi@0 97
aoqi@0 98 public:
aoqi@0 99 PSKeepAliveClosure(PSPromotionManager* pm) : _promotion_manager(pm) {
aoqi@0 100 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
aoqi@0 101 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
aoqi@0 102 _to_space = heap->young_gen()->to_space();
aoqi@0 103
aoqi@0 104 assert(_promotion_manager != NULL, "Sanity");
aoqi@0 105 }
aoqi@0 106
aoqi@0 107 template <class T> void do_oop_work(T* p) {
aoqi@0 108 assert (!oopDesc::is_null(*p), "expected non-null ref");
aoqi@0 109 assert ((oopDesc::load_decode_heap_oop_not_null(p))->is_oop(),
aoqi@0 110 "expected an oop while scanning weak refs");
aoqi@0 111
aoqi@0 112 // Weak refs may be visited more than once.
aoqi@0 113 if (PSScavenge::should_scavenge(p, _to_space)) {
aoqi@0 114 PSScavenge::copy_and_push_safe_barrier<T, /*promote_immediately=*/false>(_promotion_manager, p);
aoqi@0 115 }
aoqi@0 116 }
aoqi@0 117 virtual void do_oop(oop* p) { PSKeepAliveClosure::do_oop_work(p); }
aoqi@0 118 virtual void do_oop(narrowOop* p) { PSKeepAliveClosure::do_oop_work(p); }
aoqi@0 119 };
aoqi@0 120
aoqi@0 121 class PSEvacuateFollowersClosure: public VoidClosure {
aoqi@0 122 private:
aoqi@0 123 PSPromotionManager* _promotion_manager;
aoqi@0 124 public:
aoqi@0 125 PSEvacuateFollowersClosure(PSPromotionManager* pm) : _promotion_manager(pm) {}
aoqi@0 126
aoqi@0 127 virtual void do_void() {
aoqi@0 128 assert(_promotion_manager != NULL, "Sanity");
aoqi@0 129 _promotion_manager->drain_stacks(true);
aoqi@0 130 guarantee(_promotion_manager->stacks_empty(),
aoqi@0 131 "stacks should be empty at this point");
aoqi@0 132 }
aoqi@0 133 };
aoqi@0 134
aoqi@0 135 class PSPromotionFailedClosure : public ObjectClosure {
aoqi@0 136 virtual void do_object(oop obj) {
aoqi@0 137 if (obj->is_forwarded()) {
aoqi@0 138 obj->init_mark();
aoqi@0 139 }
aoqi@0 140 }
aoqi@0 141 };
aoqi@0 142
aoqi@0 143 class PSRefProcTaskProxy: public GCTask {
aoqi@0 144 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
aoqi@0 145 ProcessTask & _rp_task;
aoqi@0 146 uint _work_id;
aoqi@0 147 public:
aoqi@0 148 PSRefProcTaskProxy(ProcessTask & rp_task, uint work_id)
aoqi@0 149 : _rp_task(rp_task),
aoqi@0 150 _work_id(work_id)
aoqi@0 151 { }
aoqi@0 152
aoqi@0 153 private:
aoqi@0 154 virtual char* name() { return (char *)"Process referents by policy in parallel"; }
aoqi@0 155 virtual void do_it(GCTaskManager* manager, uint which);
aoqi@0 156 };
aoqi@0 157
aoqi@0 158 void PSRefProcTaskProxy::do_it(GCTaskManager* manager, uint which)
aoqi@0 159 {
aoqi@0 160 PSPromotionManager* promotion_manager =
aoqi@0 161 PSPromotionManager::gc_thread_promotion_manager(which);
aoqi@0 162 assert(promotion_manager != NULL, "sanity check");
aoqi@0 163 PSKeepAliveClosure keep_alive(promotion_manager);
aoqi@0 164 PSEvacuateFollowersClosure evac_followers(promotion_manager);
aoqi@0 165 PSIsAliveClosure is_alive;
aoqi@0 166 _rp_task.work(_work_id, is_alive, keep_alive, evac_followers);
aoqi@0 167 }
aoqi@0 168
aoqi@0 169 class PSRefEnqueueTaskProxy: public GCTask {
aoqi@0 170 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
aoqi@0 171 EnqueueTask& _enq_task;
aoqi@0 172 uint _work_id;
aoqi@0 173
aoqi@0 174 public:
aoqi@0 175 PSRefEnqueueTaskProxy(EnqueueTask& enq_task, uint work_id)
aoqi@0 176 : _enq_task(enq_task),
aoqi@0 177 _work_id(work_id)
aoqi@0 178 { }
aoqi@0 179
aoqi@0 180 virtual char* name() { return (char *)"Enqueue reference objects in parallel"; }
aoqi@0 181 virtual void do_it(GCTaskManager* manager, uint which)
aoqi@0 182 {
aoqi@0 183 _enq_task.work(_work_id);
aoqi@0 184 }
aoqi@0 185 };
aoqi@0 186
aoqi@0 187 class PSRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
aoqi@0 188 virtual void execute(ProcessTask& task);
aoqi@0 189 virtual void execute(EnqueueTask& task);
aoqi@0 190 };
aoqi@0 191
aoqi@0 192 void PSRefProcTaskExecutor::execute(ProcessTask& task)
aoqi@0 193 {
aoqi@0 194 GCTaskQueue* q = GCTaskQueue::create();
aoqi@0 195 GCTaskManager* manager = ParallelScavengeHeap::gc_task_manager();
aoqi@0 196 for(uint i=0; i < manager->active_workers(); i++) {
aoqi@0 197 q->enqueue(new PSRefProcTaskProxy(task, i));
aoqi@0 198 }
aoqi@0 199 ParallelTaskTerminator terminator(manager->active_workers(),
aoqi@0 200 (TaskQueueSetSuper*) PSPromotionManager::stack_array_depth());
aoqi@0 201 if (task.marks_oops_alive() && manager->active_workers() > 1) {
aoqi@0 202 for (uint j = 0; j < manager->active_workers(); j++) {
aoqi@0 203 q->enqueue(new StealTask(&terminator));
aoqi@0 204 }
aoqi@0 205 }
aoqi@0 206 manager->execute_and_wait(q);
aoqi@0 207 }
aoqi@0 208
aoqi@0 209
aoqi@0 210 void PSRefProcTaskExecutor::execute(EnqueueTask& task)
aoqi@0 211 {
aoqi@0 212 GCTaskQueue* q = GCTaskQueue::create();
aoqi@0 213 GCTaskManager* manager = ParallelScavengeHeap::gc_task_manager();
aoqi@0 214 for(uint i=0; i < manager->active_workers(); i++) {
aoqi@0 215 q->enqueue(new PSRefEnqueueTaskProxy(task, i));
aoqi@0 216 }
aoqi@0 217 manager->execute_and_wait(q);
aoqi@0 218 }
aoqi@0 219
aoqi@0 220 // This method contains all heap specific policy for invoking scavenge.
aoqi@0 221 // PSScavenge::invoke_no_policy() will do nothing but attempt to
aoqi@0 222 // scavenge. It will not clean up after failed promotions, bail out if
aoqi@0 223 // we've exceeded policy time limits, or any other special behavior.
aoqi@0 224 // All such policy should be placed here.
aoqi@0 225 //
aoqi@0 226 // Note that this method should only be called from the vm_thread while
aoqi@0 227 // at a safepoint!
aoqi@0 228 bool PSScavenge::invoke() {
aoqi@0 229 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
aoqi@0 230 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
aoqi@0 231 assert(!Universe::heap()->is_gc_active(), "not reentrant");
aoqi@0 232
aoqi@0 233 ParallelScavengeHeap* const heap = (ParallelScavengeHeap*)Universe::heap();
aoqi@0 234 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
aoqi@0 235
aoqi@0 236 PSAdaptiveSizePolicy* policy = heap->size_policy();
aoqi@0 237 IsGCActiveMark mark;
aoqi@0 238
aoqi@0 239 const bool scavenge_done = PSScavenge::invoke_no_policy();
aoqi@1 240 bool need_full_gc;
aoqi@1 241 if(UseOldNUMA) {
aoqi@1 242 need_full_gc = !scavenge_done ||
aoqi@1 243 policy->should_full_GC(heap->old_gen()->free_in_bytes_numa());
aoqi@1 244 }
aoqi@1 245 else {
aoqi@1 246 need_full_gc = !scavenge_done ||
aoqi@1 247 policy->should_full_GC(heap->old_gen()->free_in_bytes());
aoqi@1 248 }
aoqi@0 249 bool full_gc_done = false;
aoqi@0 250
aoqi@0 251 if (UsePerfData) {
aoqi@0 252 PSGCAdaptivePolicyCounters* const counters = heap->gc_policy_counters();
aoqi@0 253 const int ffs_val = need_full_gc ? full_follows_scavenge : not_skipped;
aoqi@0 254 counters->update_full_follows_scavenge(ffs_val);
aoqi@0 255 }
aoqi@0 256
aoqi@0 257 if (need_full_gc) {
aoqi@0 258 GCCauseSetter gccs(heap, GCCause::_adaptive_size_policy);
aoqi@0 259 CollectorPolicy* cp = heap->collector_policy();
aoqi@0 260 const bool clear_all_softrefs = cp->should_clear_all_soft_refs();
aoqi@0 261
aoqi@0 262 if (UseParallelOldGC) {
aoqi@0 263 full_gc_done = PSParallelCompact::invoke_no_policy(clear_all_softrefs);
aoqi@0 264 } else {
aoqi@0 265 full_gc_done = PSMarkSweep::invoke_no_policy(clear_all_softrefs);
aoqi@0 266 }
aoqi@0 267 }
aoqi@0 268
aoqi@0 269 return full_gc_done;
aoqi@0 270 }
aoqi@0 271
aoqi@1 272 /* 2014/7/7 Liao: Add these variables to stastic detail information during GC. */
aoqi@1 273 /* Used for objects copy stastic. */
aoqi@1 274 float each_gc_copy_time[16];
aoqi@1 275 int each_gc_copy_fre[16];
aoqi@1 276
aoqi@1 277 /* Used for GC details stastic. */
aoqi@1 278 float total_gc_time = 0;
aoqi@1 279 int total_gc_fre = 0;
aoqi@1 280
aoqi@1 281 /* Used to statstic ThreadRoots optimization. */
aoqi@1 282 int task_tag[16];
aoqi@1 283 //Used to stastic each cpu
aoqi@1 284 int each_total_num[16];
aoqi@1 285 int each_eden_total_num[3][16];
aoqi@1 286 int each_eden_aligned_num[3][16];
aoqi@1 287 //Used to stastic every GC
aoqi@1 288 int every_total_num;
aoqi@1 289 int every_eden_total_num[3];
aoqi@1 290 int every_eden_aligned_num[3];
aoqi@1 291 //Used to stastic all the time
aoqi@1 292 int all_total_num;
aoqi@1 293 int all_eden_total_num[3];
aoqi@1 294 int all_eden_aligned_num[3];
aoqi@1 295
aoqi@0 296 // This method contains no policy. You should probably
aoqi@0 297 // be calling invoke() instead.
aoqi@0 298 bool PSScavenge::invoke_no_policy() {
aoqi@0 299 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
aoqi@0 300 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
aoqi@0 301
aoqi@0 302 assert(_preserved_mark_stack.is_empty(), "should be empty");
aoqi@0 303 assert(_preserved_oop_stack.is_empty(), "should be empty");
aoqi@0 304
aoqi@0 305 _gc_timer.register_gc_start();
aoqi@0 306
aoqi@0 307 TimeStamp scavenge_entry;
aoqi@0 308 TimeStamp scavenge_midpoint;
aoqi@0 309 TimeStamp scavenge_exit;
aoqi@0 310
aoqi@0 311 scavenge_entry.update();
aoqi@0 312
aoqi@0 313 if (GC_locker::check_active_before_gc()) {
aoqi@0 314 return false;
aoqi@0 315 }
aoqi@0 316
aoqi@0 317 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
aoqi@0 318 GCCause::Cause gc_cause = heap->gc_cause();
aoqi@0 319 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
aoqi@0 320
aoqi@0 321 // Check for potential problems.
aoqi@0 322 if (!should_attempt_scavenge()) {
aoqi@0 323 return false;
aoqi@0 324 }
aoqi@0 325
aoqi@0 326 _gc_tracer.report_gc_start(heap->gc_cause(), _gc_timer.gc_start());
aoqi@0 327
aoqi@0 328 bool promotion_failure_occurred = false;
aoqi@0 329
aoqi@0 330 PSYoungGen* young_gen = heap->young_gen();
aoqi@0 331 PSOldGen* old_gen = heap->old_gen();
aoqi@0 332 PSAdaptiveSizePolicy* size_policy = heap->size_policy();
aoqi@0 333
aoqi@0 334 heap->increment_total_collections();
aoqi@0 335
aoqi@0 336 AdaptiveSizePolicyOutput(size_policy, heap->total_collections());
aoqi@0 337
aoqi@0 338 if ((gc_cause != GCCause::_java_lang_system_gc) ||
aoqi@0 339 UseAdaptiveSizePolicyWithSystemGC) {
aoqi@0 340 // Gather the feedback data for eden occupancy.
aoqi@0 341 young_gen->eden_space()->accumulate_statistics();
aoqi@0 342 }
aoqi@0 343
aoqi@0 344 if (ZapUnusedHeapArea) {
aoqi@0 345 // Save information needed to minimize mangling
aoqi@0 346 heap->record_gen_tops_before_GC();
aoqi@0 347 }
aoqi@0 348
aoqi@1 349 if(UseStasticCopy) {
aoqi@1 350 for(uint i = 0; i < ParallelGCThreads; i++) {
aoqi@1 351 each_gc_copy_time[i] = 0;
aoqi@1 352 each_gc_copy_fre[i] = 0;
aoqi@1 353 }
aoqi@1 354 }
aoqi@1 355
aoqi@1 356 if(UseStasticScavenge) {
aoqi@1 357 for(int j = 0; j < 3; j++) {
aoqi@1 358 for(uint i = 0; i < ParallelGCThreads; i++) {
aoqi@1 359 task_tag[i] = 0;
aoqi@1 360
aoqi@1 361 each_total_num[i] = 0;
aoqi@1 362 each_eden_total_num[j][i] = 0;
aoqi@1 363 each_eden_aligned_num[j][i] = 0;
aoqi@1 364
aoqi@1 365 every_total_num = 0;
aoqi@1 366 every_eden_total_num[j] = 0;
aoqi@1 367 every_eden_aligned_num[j] = 0;
aoqi@1 368 }
aoqi@1 369 }
aoqi@1 370 }
aoqi@1 371
aoqi@0 372 heap->print_heap_before_gc();
aoqi@0 373 heap->trace_heap_before_gc(&_gc_tracer);
aoqi@0 374
aoqi@0 375 assert(!NeverTenure || _tenuring_threshold == markOopDesc::max_age + 1, "Sanity");
aoqi@0 376 assert(!AlwaysTenure || _tenuring_threshold == 0, "Sanity");
aoqi@0 377
aoqi@0 378 size_t prev_used = heap->used();
aoqi@0 379
aoqi@0 380 // Fill in TLABs
aoqi@0 381 heap->accumulate_statistics_all_tlabs();
aoqi@0 382 heap->ensure_parsability(true); // retire TLABs
aoqi@0 383
aoqi@0 384 if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
aoqi@0 385 HandleMark hm; // Discard invalid handles created during verification
aoqi@0 386 Universe::verify(" VerifyBeforeGC:");
aoqi@0 387 }
aoqi@0 388
aoqi@0 389 {
aoqi@0 390 ResourceMark rm;
aoqi@0 391 HandleMark hm;
aoqi@0 392
aoqi@0 393 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
aoqi@0 394 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
aoqi@0 395 GCTraceTime t1(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, NULL);
aoqi@0 396 TraceCollectorStats tcs(counters());
aoqi@0 397 TraceMemoryManagerStats tms(false /* not full GC */,gc_cause);
aoqi@0 398
aoqi@0 399 if (TraceGen0Time) accumulated_time()->start();
aoqi@0 400
aoqi@0 401 // Let the size policy know we're starting
aoqi@0 402 size_policy->minor_collection_begin();
aoqi@0 403
aoqi@0 404 // Verify the object start arrays.
aoqi@0 405 if (VerifyObjectStartArray &&
aoqi@0 406 VerifyBeforeGC) {
aoqi@0 407 old_gen->verify_object_start_array();
aoqi@0 408 }
aoqi@0 409
aoqi@0 410 // Verify no unmarked old->young roots
aoqi@0 411 if (VerifyRememberedSets) {
aoqi@0 412 CardTableExtension::verify_all_young_refs_imprecise();
aoqi@0 413 }
aoqi@0 414
aoqi@0 415 if (!ScavengeWithObjectsInToSpace) {
aoqi@0 416 assert(young_gen->to_space()->is_empty(),
aoqi@0 417 "Attempt to scavenge with live objects in to_space");
aoqi@0 418 young_gen->to_space()->clear(SpaceDecorator::Mangle);
aoqi@0 419 } else if (ZapUnusedHeapArea) {
aoqi@0 420 young_gen->to_space()->mangle_unused_area();
aoqi@0 421 }
aoqi@0 422 save_to_space_top_before_gc();
aoqi@0 423
aoqi@0 424 COMPILER2_PRESENT(DerivedPointerTable::clear());
aoqi@0 425
aoqi@0 426 reference_processor()->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
aoqi@0 427 reference_processor()->setup_policy(false);
aoqi@0 428
aoqi@0 429 // We track how much was promoted to the next generation for
aoqi@0 430 // the AdaptiveSizePolicy.
aoqi@0 431 size_t old_gen_used_before = old_gen->used_in_bytes();
aoqi@0 432
aoqi@0 433 // For PrintGCDetails
aoqi@0 434 size_t young_gen_used_before = young_gen->used_in_bytes();
aoqi@0 435
aoqi@0 436 // Reset our survivor overflow.
aoqi@0 437 set_survivor_overflow(false);
aoqi@0 438
aoqi@0 439 // We need to save the old top values before
aoqi@0 440 // creating the promotion_manager. We pass the top
aoqi@0 441 // values to the card_table, to prevent it from
aoqi@0 442 // straying into the promotion labs.
aoqi@0 443 HeapWord* old_top = old_gen->object_space()->top();
aoqi@0 444
aoqi@0 445 // Release all previously held resources
aoqi@0 446 gc_task_manager()->release_all_resources();
aoqi@0 447
aoqi@0 448 // Set the number of GC threads to be used in this collection
aoqi@0 449 gc_task_manager()->set_active_gang();
aoqi@0 450 gc_task_manager()->task_idle_workers();
aoqi@0 451 // Get the active number of workers here and use that value
aoqi@0 452 // throughout the methods.
aoqi@0 453 uint active_workers = gc_task_manager()->active_workers();
aoqi@0 454 heap->set_par_threads(active_workers);
aoqi@0 455
aoqi@0 456 PSPromotionManager::pre_scavenge();
aoqi@0 457
aoqi@0 458 // We'll use the promotion manager again later.
aoqi@0 459 PSPromotionManager* promotion_manager = PSPromotionManager::vm_thread_promotion_manager();
aoqi@0 460 {
aoqi@0 461 GCTraceTime tm("Scavenge", false, false, &_gc_timer);
aoqi@0 462 ParallelScavengeHeap::ParStrongRootsScope psrs;
aoqi@0 463
aoqi@0 464 GCTaskQueue* q = GCTaskQueue::create();
aoqi@0 465
aoqi@1 466 if(UseOldNUMA) {
aoqi@1 467 MutableSpace* sp;
aoqi@1 468 MutableNUMASpace::LGRPSpace *ls;
aoqi@1 469 MutableNUMASpace* s = (MutableNUMASpace*) old_gen->object_space();
aoqi@1 470 int i, j;
aoqi@1 471 i = s->lgrp_spaces()->length();
aoqi@1 472 HeapWord** gen_top = (HeapWord**) malloc (i * sizeof(HeapWord));
aoqi@1 473 for(j = 0; j < i; j++) {
aoqi@1 474 ls = s->lgrp_spaces()->at(j);
aoqi@1 475 sp = ls->space();
aoqi@1 476 *(gen_top + j) = sp->top();
aoqi@1 477 }
aoqi@1 478
aoqi@1 479 if (!old_gen->object_space()->is_empty()) {
aoqi@1 480 uint stripe_total = active_workers;
aoqi@1 481 for(uint i=0; i < stripe_total; i++) {
aoqi@1 482 q->enqueue(new OldToYoungRootsTask_OldNUMA(old_gen, gen_top, i, stripe_total));
aoqi@1 483 }
aoqi@1 484 }
aoqi@1 485 }
aoqi@1 486 else {
aoqi@1 487 if (!old_gen->object_space()->is_empty()) {
aoqi@1 488 // There are only old-to-young pointers if there are objects
aoqi@1 489 // in the old gen.
aoqi@1 490 uint stripe_total = active_workers;
aoqi@1 491 for(uint i=0; i < stripe_total; i++) {
aoqi@1 492 q->enqueue(new OldToYoungRootsTask(old_gen, old_top, i, stripe_total));
aoqi@1 493 }
aoqi@0 494 }
aoqi@0 495 }
aoqi@0 496
aoqi@0 497 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::universe));
aoqi@0 498 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::jni_handles));
aoqi@0 499 // We scan the thread roots in parallel
aoqi@0 500 Threads::create_thread_roots_tasks(q);
aoqi@0 501 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::object_synchronizer));
aoqi@0 502 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::flat_profiler));
aoqi@0 503 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::management));
aoqi@0 504 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::system_dictionary));
aoqi@0 505 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::class_loader_data));
aoqi@0 506 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::jvmti));
aoqi@0 507 q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::code_cache));
aoqi@0 508
aoqi@0 509 ParallelTaskTerminator terminator(
aoqi@0 510 active_workers,
aoqi@0 511 (TaskQueueSetSuper*) promotion_manager->stack_array_depth());
aoqi@0 512 if (active_workers > 1) {
aoqi@0 513 for (uint j = 0; j < active_workers; j++) {
aoqi@0 514 q->enqueue(new StealTask(&terminator));
aoqi@0 515 }
aoqi@0 516 }
aoqi@0 517
aoqi@0 518 gc_task_manager()->execute_and_wait(q);
aoqi@0 519 }
aoqi@0 520
aoqi@0 521 scavenge_midpoint.update();
aoqi@0 522
aoqi@0 523 // Process reference objects discovered during scavenge
aoqi@0 524 {
aoqi@0 525 GCTraceTime tm("References", false, false, &_gc_timer);
aoqi@0 526
aoqi@0 527 reference_processor()->setup_policy(false); // not always_clear
aoqi@0 528 reference_processor()->set_active_mt_degree(active_workers);
aoqi@0 529 PSKeepAliveClosure keep_alive(promotion_manager);
aoqi@0 530 PSEvacuateFollowersClosure evac_followers(promotion_manager);
aoqi@0 531 ReferenceProcessorStats stats;
aoqi@0 532 if (reference_processor()->processing_is_mt()) {
aoqi@0 533 PSRefProcTaskExecutor task_executor;
aoqi@0 534 stats = reference_processor()->process_discovered_references(
aoqi@0 535 &_is_alive_closure, &keep_alive, &evac_followers, &task_executor,
aoqi@0 536 &_gc_timer);
aoqi@0 537 } else {
aoqi@0 538 stats = reference_processor()->process_discovered_references(
aoqi@0 539 &_is_alive_closure, &keep_alive, &evac_followers, NULL, &_gc_timer);
aoqi@0 540 }
aoqi@0 541
aoqi@0 542 _gc_tracer.report_gc_reference_stats(stats);
aoqi@0 543
aoqi@0 544 // Enqueue reference objects discovered during scavenge.
aoqi@0 545 if (reference_processor()->processing_is_mt()) {
aoqi@0 546 PSRefProcTaskExecutor task_executor;
aoqi@0 547 reference_processor()->enqueue_discovered_references(&task_executor);
aoqi@0 548 } else {
aoqi@0 549 reference_processor()->enqueue_discovered_references(NULL);
aoqi@0 550 }
aoqi@0 551 }
aoqi@0 552
aoqi@0 553 {
aoqi@0 554 GCTraceTime tm("StringTable", false, false, &_gc_timer);
aoqi@0 555 // Unlink any dead interned Strings and process the remaining live ones.
aoqi@0 556 PSScavengeRootsClosure root_closure(promotion_manager);
aoqi@0 557 StringTable::unlink_or_oops_do(&_is_alive_closure, &root_closure);
aoqi@0 558 }
aoqi@0 559
aoqi@0 560 // Finally, flush the promotion_manager's labs, and deallocate its stacks.
aoqi@0 561 promotion_failure_occurred = PSPromotionManager::post_scavenge(_gc_tracer);
aoqi@0 562 if (promotion_failure_occurred) {
aoqi@0 563 clean_up_failed_promotion();
aoqi@0 564 if (PrintGC) {
aoqi@0 565 gclog_or_tty->print("--");
aoqi@0 566 }
aoqi@0 567 }
aoqi@0 568
aoqi@0 569 // Let the size policy know we're done. Note that we count promotion
aoqi@0 570 // failure cleanup time as part of the collection (otherwise, we're
aoqi@0 571 // implicitly saying it's mutator time).
aoqi@0 572 size_policy->minor_collection_end(gc_cause);
aoqi@0 573
aoqi@0 574 if (!promotion_failure_occurred) {
aoqi@0 575 // Swap the survivor spaces.
aoqi@0 576 young_gen->eden_space()->clear(SpaceDecorator::Mangle);
aoqi@0 577 young_gen->from_space()->clear(SpaceDecorator::Mangle);
aoqi@0 578 young_gen->swap_spaces();
aoqi@0 579
aoqi@0 580 size_t survived = young_gen->from_space()->used_in_bytes();
aoqi@0 581 size_t promoted = old_gen->used_in_bytes() - old_gen_used_before;
aoqi@0 582 size_policy->update_averages(_survivor_overflow, survived, promoted);
aoqi@0 583
aoqi@0 584 // A successful scavenge should restart the GC time limit count which is
aoqi@0 585 // for full GC's.
aoqi@0 586 size_policy->reset_gc_overhead_limit_count();
aoqi@0 587 if (UseAdaptiveSizePolicy) {
aoqi@0 588 // Calculate the new survivor size and tenuring threshold
aoqi@0 589
aoqi@0 590 if (PrintAdaptiveSizePolicy) {
aoqi@0 591 gclog_or_tty->print("AdaptiveSizeStart: ");
aoqi@0 592 gclog_or_tty->stamp();
aoqi@0 593 gclog_or_tty->print_cr(" collection: %d ",
aoqi@0 594 heap->total_collections());
aoqi@0 595
aoqi@0 596 if (Verbose) {
aoqi@0 597 gclog_or_tty->print("old_gen_capacity: %d young_gen_capacity: %d",
aoqi@0 598 old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes());
aoqi@0 599 }
aoqi@0 600 }
aoqi@0 601
aoqi@0 602
aoqi@0 603 if (UsePerfData) {
aoqi@0 604 PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters();
aoqi@0 605 counters->update_old_eden_size(
aoqi@0 606 size_policy->calculated_eden_size_in_bytes());
aoqi@0 607 counters->update_old_promo_size(
aoqi@0 608 size_policy->calculated_promo_size_in_bytes());
aoqi@0 609 counters->update_old_capacity(old_gen->capacity_in_bytes());
aoqi@0 610 counters->update_young_capacity(young_gen->capacity_in_bytes());
aoqi@0 611 counters->update_survived(survived);
aoqi@0 612 counters->update_promoted(promoted);
aoqi@0 613 counters->update_survivor_overflowed(_survivor_overflow);
aoqi@0 614 }
aoqi@0 615
aoqi@0 616 size_t max_young_size = young_gen->max_size();
aoqi@0 617
aoqi@0 618 // Deciding a free ratio in the young generation is tricky, so if
aoqi@0 619 // MinHeapFreeRatio or MaxHeapFreeRatio are in use (implicating
aoqi@0 620 // that the old generation size may have been limited because of them) we
aoqi@0 621 // should then limit our young generation size using NewRatio to have it
aoqi@0 622 // follow the old generation size.
aoqi@0 623 if (MinHeapFreeRatio != 0 || MaxHeapFreeRatio != 100) {
aoqi@0 624 max_young_size = MIN2(old_gen->capacity_in_bytes() / NewRatio, young_gen->max_size());
aoqi@0 625 }
aoqi@0 626
aoqi@0 627 size_t survivor_limit =
aoqi@0 628 size_policy->max_survivor_size(max_young_size);
aoqi@0 629 _tenuring_threshold =
aoqi@0 630 size_policy->compute_survivor_space_size_and_threshold(
aoqi@0 631 _survivor_overflow,
aoqi@0 632 _tenuring_threshold,
aoqi@0 633 survivor_limit);
aoqi@0 634
aoqi@0 635 if (PrintTenuringDistribution) {
aoqi@0 636 gclog_or_tty->cr();
aoqi@0 637 gclog_or_tty->print_cr("Desired survivor size " SIZE_FORMAT " bytes, new threshold %u (max %u)",
aoqi@0 638 size_policy->calculated_survivor_size_in_bytes(),
aoqi@0 639 _tenuring_threshold, MaxTenuringThreshold);
aoqi@0 640 }
aoqi@0 641
aoqi@0 642 if (UsePerfData) {
aoqi@0 643 PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters();
aoqi@0 644 counters->update_tenuring_threshold(_tenuring_threshold);
aoqi@0 645 counters->update_survivor_size_counters();
aoqi@0 646 }
aoqi@0 647
aoqi@0 648 // Do call at minor collections?
aoqi@0 649 // Don't check if the size_policy is ready at this
aoqi@0 650 // level. Let the size_policy check that internally.
aoqi@0 651 if (UseAdaptiveGenerationSizePolicyAtMinorCollection &&
aoqi@0 652 ((gc_cause != GCCause::_java_lang_system_gc) ||
aoqi@0 653 UseAdaptiveSizePolicyWithSystemGC)) {
aoqi@0 654
aoqi@0 655 // Calculate optimial free space amounts
aoqi@0 656 assert(young_gen->max_size() >
aoqi@0 657 young_gen->from_space()->capacity_in_bytes() +
aoqi@0 658 young_gen->to_space()->capacity_in_bytes(),
aoqi@0 659 "Sizes of space in young gen are out-of-bounds");
aoqi@0 660
aoqi@0 661 size_t young_live = young_gen->used_in_bytes();
aoqi@0 662 size_t eden_live = young_gen->eden_space()->used_in_bytes();
aoqi@0 663 size_t cur_eden = young_gen->eden_space()->capacity_in_bytes();
aoqi@0 664 size_t max_old_gen_size = old_gen->max_gen_size();
aoqi@0 665 size_t max_eden_size = max_young_size -
aoqi@0 666 young_gen->from_space()->capacity_in_bytes() -
aoqi@0 667 young_gen->to_space()->capacity_in_bytes();
aoqi@0 668
aoqi@0 669 // Used for diagnostics
aoqi@0 670 size_policy->clear_generation_free_space_flags();
aoqi@0 671
aoqi@0 672 size_policy->compute_eden_space_size(young_live,
aoqi@0 673 eden_live,
aoqi@0 674 cur_eden,
aoqi@0 675 max_eden_size,
aoqi@0 676 false /* not full gc*/);
aoqi@0 677
aoqi@0 678 size_policy->check_gc_overhead_limit(young_live,
aoqi@0 679 eden_live,
aoqi@0 680 max_old_gen_size,
aoqi@0 681 max_eden_size,
aoqi@0 682 false /* not full gc*/,
aoqi@0 683 gc_cause,
aoqi@0 684 heap->collector_policy());
aoqi@0 685
aoqi@0 686 size_policy->decay_supplemental_growth(false /* not full gc*/);
aoqi@0 687 }
aoqi@0 688 // Resize the young generation at every collection
aoqi@0 689 // even if new sizes have not been calculated. This is
aoqi@0 690 // to allow resizes that may have been inhibited by the
aoqi@0 691 // relative location of the "to" and "from" spaces.
aoqi@0 692
aoqi@0 693 // Resizing the old gen at minor collects can cause increases
aoqi@0 694 // that don't feed back to the generation sizing policy until
aoqi@0 695 // a major collection. Don't resize the old gen here.
aoqi@0 696
aoqi@0 697 heap->resize_young_gen(size_policy->calculated_eden_size_in_bytes(),
aoqi@0 698 size_policy->calculated_survivor_size_in_bytes());
aoqi@0 699
aoqi@0 700 if (PrintAdaptiveSizePolicy) {
aoqi@0 701 gclog_or_tty->print_cr("AdaptiveSizeStop: collection: %d ",
aoqi@0 702 heap->total_collections());
aoqi@0 703 }
aoqi@0 704 }
aoqi@0 705
aoqi@0 706 // Update the structure of the eden. With NUMA-eden CPU hotplugging or offlining can
aoqi@0 707 // cause the change of the heap layout. Make sure eden is reshaped if that's the case.
aoqi@0 708 // Also update() will case adaptive NUMA chunk resizing.
aoqi@0 709 assert(young_gen->eden_space()->is_empty(), "eden space should be empty now");
aoqi@0 710 young_gen->eden_space()->update();
aoqi@0 711
aoqi@0 712 heap->gc_policy_counters()->update_counters();
aoqi@0 713
aoqi@0 714 heap->resize_all_tlabs();
aoqi@0 715
aoqi@0 716 assert(young_gen->to_space()->is_empty(), "to space should be empty now");
aoqi@0 717 }
aoqi@0 718
aoqi@0 719 COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
aoqi@0 720
aoqi@0 721 NOT_PRODUCT(reference_processor()->verify_no_references_recorded());
aoqi@0 722
aoqi@0 723 {
aoqi@0 724 GCTraceTime tm("Prune Scavenge Root Methods", false, false, &_gc_timer);
aoqi@0 725
aoqi@0 726 CodeCache::prune_scavenge_root_nmethods();
aoqi@0 727 }
aoqi@0 728
aoqi@0 729 // Re-verify object start arrays
aoqi@0 730 if (VerifyObjectStartArray &&
aoqi@0 731 VerifyAfterGC) {
aoqi@0 732 old_gen->verify_object_start_array();
aoqi@0 733 }
aoqi@0 734
aoqi@0 735 // Verify all old -> young cards are now precise
aoqi@0 736 if (VerifyRememberedSets) {
aoqi@0 737 // Precise verification will give false positives. Until this is fixed,
aoqi@0 738 // use imprecise verification.
aoqi@0 739 // CardTableExtension::verify_all_young_refs_precise();
aoqi@0 740 CardTableExtension::verify_all_young_refs_imprecise();
aoqi@0 741 }
aoqi@0 742
aoqi@0 743 if (TraceGen0Time) accumulated_time()->stop();
aoqi@0 744
aoqi@0 745 if (PrintGC) {
aoqi@0 746 if (PrintGCDetails) {
aoqi@0 747 // Don't print a GC timestamp here. This is after the GC so
aoqi@0 748 // would be confusing.
aoqi@0 749 young_gen->print_used_change(young_gen_used_before);
aoqi@0 750 }
aoqi@0 751 heap->print_heap_change(prev_used);
aoqi@0 752 }
aoqi@0 753
aoqi@0 754 // Track memory usage and detect low memory
aoqi@0 755 MemoryService::track_memory_usage();
aoqi@0 756 heap->update_counters();
aoqi@0 757
aoqi@0 758 gc_task_manager()->release_idle_workers();
aoqi@0 759 }
aoqi@0 760
aoqi@0 761 if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
aoqi@0 762 HandleMark hm; // Discard invalid handles created during verification
aoqi@0 763 Universe::verify(" VerifyAfterGC:");
aoqi@0 764 }
aoqi@0 765
aoqi@0 766 heap->print_heap_after_gc();
aoqi@0 767 heap->trace_heap_after_gc(&_gc_tracer);
aoqi@0 768 _gc_tracer.report_tenuring_threshold(tenuring_threshold());
aoqi@0 769
aoqi@0 770 if (ZapUnusedHeapArea) {
aoqi@0 771 young_gen->eden_space()->check_mangled_unused_area_complete();
aoqi@0 772 young_gen->from_space()->check_mangled_unused_area_complete();
aoqi@0 773 young_gen->to_space()->check_mangled_unused_area_complete();
aoqi@0 774 }
aoqi@0 775
aoqi@0 776 scavenge_exit.update();
aoqi@0 777
aoqi@0 778 if (PrintGCTaskTimeStamps) {
aoqi@0 779 tty->print_cr("VM-Thread " INT64_FORMAT " " INT64_FORMAT " " INT64_FORMAT,
aoqi@0 780 scavenge_entry.ticks(), scavenge_midpoint.ticks(),
aoqi@0 781 scavenge_exit.ticks());
aoqi@0 782 gc_task_manager()->print_task_time_stamps();
aoqi@0 783 }
aoqi@1 784
aoqi@1 785 if(PrintGCDetails) {
aoqi@1 786 float young_gc_time;
aoqi@1 787 total_gc_fre++;
aoqi@1 788 young_gc_time = ((float)(scavenge_exit.ticks() - scavenge_entry.ticks()))/1e9;
aoqi@1 789 total_gc_time = total_gc_time + ((float)(scavenge_exit.ticks() - scavenge_entry.ticks()))/1e9;
aoqi@1 790 tty->print_cr("total_gc_fre = %d, young_gc_time = %f, total_gc_time = %f", total_gc_fre, young_gc_time, total_gc_time);
aoqi@1 791 }
aoqi@1 792
aoqi@1 793 if(UseStasticCopy) {
aoqi@1 794 for(uint i = 0; i < ParallelGCThreads; i++) {
aoqi@1 795 tty->print_cr("each_gc_copy_time[%d] = %f", i, each_gc_copy_time[i]/each_gc_copy_fre[i]);
aoqi@1 796 }
aoqi@1 797 tty->print_cr("");
aoqi@1 798 for(uint i = 0; i < ParallelGCThreads; i++) {
aoqi@1 799 tty->print_cr("each_gc_copy_fre[%d] = %d", i, each_gc_copy_fre[i]);
aoqi@1 800 }
aoqi@1 801 }
aoqi@1 802
aoqi@1 803 if(UseStasticScavenge) {
aoqi@1 804 for(int i = 0; i < 3; i++) {
aoqi@1 805 for(uint j = 0; j < ParallelGCThreads; j++) {
aoqi@1 806 every_eden_total_num[i] += each_eden_total_num[i][j];
aoqi@1 807 every_eden_aligned_num[i] += each_eden_aligned_num[i][j];
aoqi@1 808 }
aoqi@1 809 }
aoqi@1 810
aoqi@1 811 for(uint i = 0; i < ParallelGCThreads; i++) {
aoqi@1 812 every_total_num += each_total_num[i];
aoqi@1 813 }
aoqi@1 814
aoqi@1 815 all_total_num += every_total_num;
aoqi@1 816
aoqi@1 817 for(int i = 0; i < 3; i++) {
aoqi@1 818 all_eden_total_num[i] += every_eden_total_num[i];
aoqi@1 819 all_eden_aligned_num[i] += every_eden_aligned_num[i];
aoqi@1 820 }
aoqi@1 821
aoqi@1 822 tty->print_cr("============= Every GCDetails: =============");
aoqi@1 823 tty->print_cr("ThreadRootTask: prop of all = %f, prop of aligned = %f", (float)every_eden_total_num[0]/(float)every_total_num, (float)every_eden_aligned_num[0]/(float)every_eden_total_num[0]);
aoqi@1 824 tty->print_cr("OldToYoungRootTask: prop of all = %f, prop of aligned = %f", (float)every_eden_total_num[1]/(float)every_total_num, (float)every_eden_aligned_num[1]/(float)every_eden_total_num[1]);
aoqi@1 825 tty->print_cr("StealTask: prop of all = %f, prop of aligned = %f", (float)every_eden_total_num[2]/(float)every_total_num, (float)every_eden_aligned_num[2]/(float)every_eden_total_num[2]);
aoqi@1 826 tty->print_cr("");
aoqi@1 827
aoqi@1 828 tty->print_cr("============= Total GCDetails: =============");
aoqi@1 829 tty->print_cr("ThreadRootTask: prop of all = %f, prop of aligned = %f", (float)all_eden_total_num[0]/(float)all_total_num, (float)all_eden_aligned_num[0]/(float)all_eden_total_num[0]);
aoqi@1 830 tty->print_cr("OldToYoungRootTask: prop of all = %f, prop of aligned = %f", (float)all_eden_total_num[1]/(float)all_total_num, (float)all_eden_aligned_num[1]/(float)all_eden_total_num[1]);
aoqi@1 831 tty->print_cr("StealTask: prop of all = %f, prop of aligned = %f", (float)all_eden_total_num[2]/(float)all_total_num, (float)all_eden_aligned_num[2]/(float)all_eden_total_num[2]);
aoqi@1 832 tty->print_cr("");
aoqi@1 833 }
aoqi@0 834
aoqi@0 835 #ifdef TRACESPINNING
aoqi@0 836 ParallelTaskTerminator::print_termination_counts();
aoqi@0 837 #endif
aoqi@0 838
aoqi@0 839
aoqi@0 840 _gc_timer.register_gc_end();
aoqi@0 841
aoqi@0 842 _gc_tracer.report_gc_end(_gc_timer.gc_end(), _gc_timer.time_partitions());
aoqi@0 843
aoqi@0 844 return !promotion_failure_occurred;
aoqi@0 845 }
aoqi@0 846
aoqi@0 847 // This method iterates over all objects in the young generation,
aoqi@0 848 // unforwarding markOops. It then restores any preserved mark oops,
aoqi@0 849 // and clears the _preserved_mark_stack.
aoqi@0 850 void PSScavenge::clean_up_failed_promotion() {
aoqi@0 851 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
aoqi@0 852 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
aoqi@0 853
aoqi@0 854 PSYoungGen* young_gen = heap->young_gen();
aoqi@0 855
aoqi@0 856 {
aoqi@0 857 ResourceMark rm;
aoqi@0 858
aoqi@0 859 // Unforward all pointers in the young gen.
aoqi@0 860 PSPromotionFailedClosure unforward_closure;
aoqi@0 861 young_gen->object_iterate(&unforward_closure);
aoqi@0 862
aoqi@0 863 if (PrintGC && Verbose) {
aoqi@0 864 gclog_or_tty->print_cr("Restoring %d marks", _preserved_oop_stack.size());
aoqi@0 865 }
aoqi@0 866
aoqi@0 867 // Restore any saved marks.
aoqi@0 868 while (!_preserved_oop_stack.is_empty()) {
aoqi@0 869 oop obj = _preserved_oop_stack.pop();
aoqi@0 870 markOop mark = _preserved_mark_stack.pop();
aoqi@0 871 obj->set_mark(mark);
aoqi@0 872 }
aoqi@0 873
aoqi@0 874 // Clear the preserved mark and oop stack caches.
aoqi@0 875 _preserved_mark_stack.clear(true);
aoqi@0 876 _preserved_oop_stack.clear(true);
aoqi@0 877 }
aoqi@0 878
aoqi@0 879 // Reset the PromotionFailureALot counters.
aoqi@0 880 NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();)
aoqi@0 881 }
aoqi@0 882
aoqi@0 883 // This method is called whenever an attempt to promote an object
aoqi@0 884 // fails. Some markOops will need preservation, some will not. Note
aoqi@0 885 // that the entire eden is traversed after a failed promotion, with
aoqi@0 886 // all forwarded headers replaced by the default markOop. This means
aoqi@0 887 // it is not necessary to preserve most markOops.
aoqi@0 888 void PSScavenge::oop_promotion_failed(oop obj, markOop obj_mark) {
aoqi@0 889 if (obj_mark->must_be_preserved_for_promotion_failure(obj)) {
aoqi@0 890 // Should use per-worker private stacks here rather than
aoqi@0 891 // locking a common pair of stacks.
aoqi@0 892 ThreadCritical tc;
aoqi@0 893 _preserved_oop_stack.push(obj);
aoqi@0 894 _preserved_mark_stack.push(obj_mark);
aoqi@0 895 }
aoqi@0 896 }
aoqi@0 897
aoqi@0 898 bool PSScavenge::should_attempt_scavenge() {
aoqi@0 899 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
aoqi@0 900 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
aoqi@0 901 PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters();
aoqi@0 902
aoqi@0 903 if (UsePerfData) {
aoqi@0 904 counters->update_scavenge_skipped(not_skipped);
aoqi@0 905 }
aoqi@0 906
aoqi@0 907 PSYoungGen* young_gen = heap->young_gen();
aoqi@0 908 PSOldGen* old_gen = heap->old_gen();
aoqi@0 909
aoqi@0 910 if (!ScavengeWithObjectsInToSpace) {
aoqi@0 911 // Do not attempt to promote unless to_space is empty
aoqi@0 912 if (!young_gen->to_space()->is_empty()) {
aoqi@0 913 _consecutive_skipped_scavenges++;
aoqi@0 914 if (UsePerfData) {
aoqi@0 915 counters->update_scavenge_skipped(to_space_not_empty);
aoqi@0 916 }
aoqi@0 917 return false;
aoqi@0 918 }
aoqi@0 919 }
aoqi@0 920
aoqi@0 921 // Test to see if the scavenge will likely fail.
aoqi@0 922 PSAdaptiveSizePolicy* policy = heap->size_policy();
aoqi@0 923
aoqi@0 924 // A similar test is done in the policy's should_full_GC(). If this is
aoqi@0 925 // changed, decide if that test should also be changed.
aoqi@0 926 size_t avg_promoted = (size_t) policy->padded_average_promoted_in_bytes();
aoqi@0 927 size_t promotion_estimate = MIN2(avg_promoted, young_gen->used_in_bytes());
aoqi@0 928 bool result = promotion_estimate < old_gen->free_in_bytes();
aoqi@0 929
aoqi@0 930 if (PrintGCDetails && Verbose) {
aoqi@0 931 gclog_or_tty->print(result ? " do scavenge: " : " skip scavenge: ");
aoqi@0 932 gclog_or_tty->print_cr(" average_promoted " SIZE_FORMAT
aoqi@0 933 " padded_average_promoted " SIZE_FORMAT
aoqi@0 934 " free in old gen " SIZE_FORMAT,
aoqi@0 935 (size_t) policy->average_promoted_in_bytes(),
aoqi@0 936 (size_t) policy->padded_average_promoted_in_bytes(),
aoqi@0 937 old_gen->free_in_bytes());
aoqi@0 938 if (young_gen->used_in_bytes() <
aoqi@0 939 (size_t) policy->padded_average_promoted_in_bytes()) {
aoqi@0 940 gclog_or_tty->print_cr(" padded_promoted_average is greater"
aoqi@0 941 " than maximum promotion = " SIZE_FORMAT, young_gen->used_in_bytes());
aoqi@0 942 }
aoqi@0 943 }
aoqi@0 944
aoqi@0 945 if (result) {
aoqi@0 946 _consecutive_skipped_scavenges = 0;
aoqi@0 947 } else {
aoqi@0 948 _consecutive_skipped_scavenges++;
aoqi@0 949 if (UsePerfData) {
aoqi@0 950 counters->update_scavenge_skipped(promoted_too_large);
aoqi@0 951 }
aoqi@0 952 }
aoqi@0 953 return result;
aoqi@0 954 }
aoqi@0 955
aoqi@0 956 // Used to add tasks
aoqi@0 957 GCTaskManager* const PSScavenge::gc_task_manager() {
aoqi@0 958 assert(ParallelScavengeHeap::gc_task_manager() != NULL,
aoqi@0 959 "shouldn't return NULL");
aoqi@0 960 return ParallelScavengeHeap::gc_task_manager();
aoqi@0 961 }
aoqi@0 962
aoqi@0 963 void PSScavenge::initialize() {
aoqi@0 964 // Arguments must have been parsed
aoqi@0 965
aoqi@0 966 if (AlwaysTenure) {
aoqi@0 967 _tenuring_threshold = 0;
aoqi@0 968 } else if (NeverTenure) {
aoqi@0 969 _tenuring_threshold = markOopDesc::max_age + 1;
aoqi@0 970 } else {
aoqi@0 971 // We want to smooth out our startup times for the AdaptiveSizePolicy
aoqi@0 972 _tenuring_threshold = (UseAdaptiveSizePolicy) ? InitialTenuringThreshold :
aoqi@0 973 MaxTenuringThreshold;
aoqi@0 974 }
aoqi@0 975
aoqi@0 976 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
aoqi@0 977 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
aoqi@0 978
aoqi@0 979 PSYoungGen* young_gen = heap->young_gen();
aoqi@0 980 PSOldGen* old_gen = heap->old_gen();
aoqi@0 981
aoqi@0 982 // Set boundary between young_gen and old_gen
aoqi@0 983 assert(old_gen->reserved().end() <= young_gen->eden_space()->bottom(),
aoqi@0 984 "old above young");
aoqi@0 985 set_young_generation_boundary(young_gen->eden_space()->bottom());
aoqi@0 986
aoqi@0 987 // Initialize ref handling object for scavenging.
aoqi@0 988 MemRegion mr = young_gen->reserved();
aoqi@0 989
aoqi@0 990 _ref_processor =
aoqi@0 991 new ReferenceProcessor(mr, // span
aoqi@0 992 ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing
aoqi@0 993 (int) ParallelGCThreads, // mt processing degree
aoqi@0 994 true, // mt discovery
aoqi@0 995 (int) ParallelGCThreads, // mt discovery degree
aoqi@0 996 true, // atomic_discovery
aoqi@0 997 NULL); // header provides liveness info
aoqi@0 998
aoqi@0 999 // Cache the cardtable
aoqi@0 1000 BarrierSet* bs = Universe::heap()->barrier_set();
aoqi@0 1001 assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind");
aoqi@0 1002 _card_table = (CardTableExtension*)bs;
aoqi@0 1003
aoqi@0 1004 _counters = new CollectorCounters("PSScavenge", 0);
aoqi@0 1005 }

mercurial