src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp

changeset 435
a61af66fc99e
child 548
ba764ed4b6f2
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp	Sat Dec 01 00:00:00 2007 +0000
     1.3 @@ -0,0 +1,788 @@
     1.4 +/*
     1.5 + * Copyright 2002-2007 Sun Microsystems, Inc.  All Rights Reserved.
     1.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     1.7 + *
     1.8 + * This code is free software; you can redistribute it and/or modify it
     1.9 + * under the terms of the GNU General Public License version 2 only, as
    1.10 + * published by the Free Software Foundation.
    1.11 + *
    1.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
    1.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    1.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    1.15 + * version 2 for more details (a copy is included in the LICENSE file that
    1.16 + * accompanied this code).
    1.17 + *
    1.18 + * You should have received a copy of the GNU General Public License version
    1.19 + * 2 along with this work; if not, write to the Free Software Foundation,
    1.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    1.21 + *
    1.22 + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
    1.23 + * CA 95054 USA or visit www.sun.com if you need additional information or
    1.24 + * have any questions.
    1.25 + *
    1.26 + */
    1.27 +
    1.28 +
    1.29 +# include "incls/_precompiled.incl"
    1.30 +# include "incls/_psScavenge.cpp.incl"
    1.31 +
    1.32 +HeapWord*                  PSScavenge::_to_space_top_before_gc = NULL;
    1.33 +int                        PSScavenge::_consecutive_skipped_scavenges = 0;
    1.34 +ReferenceProcessor*        PSScavenge::_ref_processor = NULL;
    1.35 +CardTableExtension*        PSScavenge::_card_table = NULL;
    1.36 +bool                       PSScavenge::_survivor_overflow = false;
    1.37 +int                        PSScavenge::_tenuring_threshold = 0;
    1.38 +HeapWord*                  PSScavenge::_young_generation_boundary = NULL;
    1.39 +elapsedTimer               PSScavenge::_accumulated_time;
    1.40 +GrowableArray<markOop>*    PSScavenge::_preserved_mark_stack = NULL;
    1.41 +GrowableArray<oop>*        PSScavenge::_preserved_oop_stack = NULL;
    1.42 +CollectorCounters*         PSScavenge::_counters = NULL;
    1.43 +
    1.44 +// Define before use
    1.45 +class PSIsAliveClosure: public BoolObjectClosure {
    1.46 +public:
    1.47 +  void do_object(oop p) {
    1.48 +    assert(false, "Do not call.");
    1.49 +  }
    1.50 +  bool do_object_b(oop p) {
    1.51 +    return (!PSScavenge::is_obj_in_young((HeapWord*) p)) || p->is_forwarded();
    1.52 +  }
    1.53 +};
    1.54 +
    1.55 +PSIsAliveClosure PSScavenge::_is_alive_closure;
    1.56 +
    1.57 +class PSKeepAliveClosure: public OopClosure {
    1.58 +protected:
    1.59 +  MutableSpace* _to_space;
    1.60 +  PSPromotionManager* _promotion_manager;
    1.61 +
    1.62 +public:
    1.63 +  PSKeepAliveClosure(PSPromotionManager* pm) : _promotion_manager(pm) {
    1.64 +    ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
    1.65 +    assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
    1.66 +    _to_space = heap->young_gen()->to_space();
    1.67 +
    1.68 +    assert(_promotion_manager != NULL, "Sanity");
    1.69 +  }
    1.70 +
    1.71 +  void do_oop(oop* p) {
    1.72 +    assert (*p != NULL, "expected non-null ref");
    1.73 +    assert ((*p)->is_oop(), "expected an oop while scanning weak refs");
    1.74 +
    1.75 +    oop obj = oop(*p);
    1.76 +    // Weak refs may be visited more than once.
    1.77 +    if (PSScavenge::should_scavenge(obj, _to_space)) {
    1.78 +      PSScavenge::copy_and_push_safe_barrier(_promotion_manager, p);
    1.79 +    }
    1.80 +  }
    1.81 +};
    1.82 +
    1.83 +class PSEvacuateFollowersClosure: public VoidClosure {
    1.84 + private:
    1.85 +  PSPromotionManager* _promotion_manager;
    1.86 + public:
    1.87 +  PSEvacuateFollowersClosure(PSPromotionManager* pm) : _promotion_manager(pm) {}
    1.88 +
    1.89 +  void do_void() {
    1.90 +    assert(_promotion_manager != NULL, "Sanity");
    1.91 +    _promotion_manager->drain_stacks(true);
    1.92 +    guarantee(_promotion_manager->stacks_empty(),
    1.93 +              "stacks should be empty at this point");
    1.94 +  }
    1.95 +};
    1.96 +
    1.97 +class PSPromotionFailedClosure : public ObjectClosure {
    1.98 +  virtual void do_object(oop obj) {
    1.99 +    if (obj->is_forwarded()) {
   1.100 +      obj->init_mark();
   1.101 +    }
   1.102 +  }
   1.103 +};
   1.104 +
   1.105 +class PSRefProcTaskProxy: public GCTask {
   1.106 +  typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
   1.107 +  ProcessTask & _rp_task;
   1.108 +  uint          _work_id;
   1.109 +public:
   1.110 +  PSRefProcTaskProxy(ProcessTask & rp_task, uint work_id)
   1.111 +    : _rp_task(rp_task),
   1.112 +      _work_id(work_id)
   1.113 +  { }
   1.114 +
   1.115 +private:
   1.116 +  virtual char* name() { return (char *)"Process referents by policy in parallel"; }
   1.117 +  virtual void do_it(GCTaskManager* manager, uint which);
   1.118 +};
   1.119 +
   1.120 +void PSRefProcTaskProxy::do_it(GCTaskManager* manager, uint which)
   1.121 +{
   1.122 +  PSPromotionManager* promotion_manager =
   1.123 +    PSPromotionManager::gc_thread_promotion_manager(which);
   1.124 +  assert(promotion_manager != NULL, "sanity check");
   1.125 +  PSKeepAliveClosure keep_alive(promotion_manager);
   1.126 +  PSEvacuateFollowersClosure evac_followers(promotion_manager);
   1.127 +  PSIsAliveClosure is_alive;
   1.128 +  _rp_task.work(_work_id, is_alive, keep_alive, evac_followers);
   1.129 +}
   1.130 +
   1.131 +class PSRefEnqueueTaskProxy: public GCTask {
   1.132 +  typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
   1.133 +  EnqueueTask& _enq_task;
   1.134 +  uint         _work_id;
   1.135 +
   1.136 +public:
   1.137 +  PSRefEnqueueTaskProxy(EnqueueTask& enq_task, uint work_id)
   1.138 +    : _enq_task(enq_task),
   1.139 +      _work_id(work_id)
   1.140 +  { }
   1.141 +
   1.142 +  virtual char* name() { return (char *)"Enqueue reference objects in parallel"; }
   1.143 +  virtual void do_it(GCTaskManager* manager, uint which)
   1.144 +  {
   1.145 +    _enq_task.work(_work_id);
   1.146 +  }
   1.147 +};
   1.148 +
   1.149 +class PSRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
   1.150 +  virtual void execute(ProcessTask& task);
   1.151 +  virtual void execute(EnqueueTask& task);
   1.152 +};
   1.153 +
   1.154 +void PSRefProcTaskExecutor::execute(ProcessTask& task)
   1.155 +{
   1.156 +  GCTaskQueue* q = GCTaskQueue::create();
   1.157 +  for(uint i=0; i<ParallelGCThreads; i++) {
   1.158 +    q->enqueue(new PSRefProcTaskProxy(task, i));
   1.159 +  }
   1.160 +  ParallelTaskTerminator terminator(
   1.161 +    ParallelScavengeHeap::gc_task_manager()->workers(),
   1.162 +    UseDepthFirstScavengeOrder ?
   1.163 +        (TaskQueueSetSuper*) PSPromotionManager::stack_array_depth()
   1.164 +      : (TaskQueueSetSuper*) PSPromotionManager::stack_array_breadth());
   1.165 +  if (task.marks_oops_alive() && ParallelGCThreads > 1) {
   1.166 +    for (uint j=0; j<ParallelGCThreads; j++) {
   1.167 +      q->enqueue(new StealTask(&terminator));
   1.168 +    }
   1.169 +  }
   1.170 +  ParallelScavengeHeap::gc_task_manager()->execute_and_wait(q);
   1.171 +}
   1.172 +
   1.173 +
   1.174 +void PSRefProcTaskExecutor::execute(EnqueueTask& task)
   1.175 +{
   1.176 +  GCTaskQueue* q = GCTaskQueue::create();
   1.177 +  for(uint i=0; i<ParallelGCThreads; i++) {
   1.178 +    q->enqueue(new PSRefEnqueueTaskProxy(task, i));
   1.179 +  }
   1.180 +  ParallelScavengeHeap::gc_task_manager()->execute_and_wait(q);
   1.181 +}
   1.182 +
   1.183 +// This method contains all heap specific policy for invoking scavenge.
   1.184 +// PSScavenge::invoke_no_policy() will do nothing but attempt to
   1.185 +// scavenge. It will not clean up after failed promotions, bail out if
   1.186 +// we've exceeded policy time limits, or any other special behavior.
   1.187 +// All such policy should be placed here.
   1.188 +//
   1.189 +// Note that this method should only be called from the vm_thread while
   1.190 +// at a safepoint!
   1.191 +void PSScavenge::invoke()
   1.192 +{
   1.193 +  assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
   1.194 +  assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
   1.195 +  assert(!Universe::heap()->is_gc_active(), "not reentrant");
   1.196 +
   1.197 +  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
   1.198 +  assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
   1.199 +
   1.200 +  PSAdaptiveSizePolicy* policy = heap->size_policy();
   1.201 +
   1.202 +  // Before each allocation/collection attempt, find out from the
   1.203 +  // policy object if GCs are, on the whole, taking too long. If so,
   1.204 +  // bail out without attempting a collection.
   1.205 +  if (!policy->gc_time_limit_exceeded()) {
   1.206 +    IsGCActiveMark mark;
   1.207 +
   1.208 +    bool scavenge_was_done = PSScavenge::invoke_no_policy();
   1.209 +
   1.210 +    PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters();
   1.211 +    if (UsePerfData)
   1.212 +      counters->update_full_follows_scavenge(0);
   1.213 +    if (!scavenge_was_done ||
   1.214 +        policy->should_full_GC(heap->old_gen()->free_in_bytes())) {
   1.215 +      if (UsePerfData)
   1.216 +        counters->update_full_follows_scavenge(full_follows_scavenge);
   1.217 +
   1.218 +      GCCauseSetter gccs(heap, GCCause::_adaptive_size_policy);
   1.219 +      if (UseParallelOldGC) {
   1.220 +        PSParallelCompact::invoke_no_policy(false);
   1.221 +      } else {
   1.222 +        PSMarkSweep::invoke_no_policy(false);
   1.223 +      }
   1.224 +    }
   1.225 +  }
   1.226 +}
   1.227 +
   1.228 +// This method contains no policy. You should probably
   1.229 +// be calling invoke() instead.
   1.230 +bool PSScavenge::invoke_no_policy() {
   1.231 +  assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
   1.232 +  assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
   1.233 +
   1.234 +  TimeStamp scavenge_entry;
   1.235 +  TimeStamp scavenge_midpoint;
   1.236 +  TimeStamp scavenge_exit;
   1.237 +
   1.238 +  scavenge_entry.update();
   1.239 +
   1.240 +  if (GC_locker::check_active_before_gc()) {
   1.241 +    return false;
   1.242 +  }
   1.243 +
   1.244 +  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
   1.245 +  GCCause::Cause gc_cause = heap->gc_cause();
   1.246 +  assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
   1.247 +
   1.248 +  // Check for potential problems.
   1.249 +  if (!should_attempt_scavenge()) {
   1.250 +    return false;
   1.251 +  }
   1.252 +
   1.253 +  bool promotion_failure_occurred = false;
   1.254 +
   1.255 +  PSYoungGen* young_gen = heap->young_gen();
   1.256 +  PSOldGen* old_gen = heap->old_gen();
   1.257 +  PSPermGen* perm_gen = heap->perm_gen();
   1.258 +  PSAdaptiveSizePolicy* size_policy = heap->size_policy();
   1.259 +  heap->increment_total_collections();
   1.260 +
   1.261 +  AdaptiveSizePolicyOutput(size_policy, heap->total_collections());
   1.262 +
   1.263 +  if ((gc_cause != GCCause::_java_lang_system_gc) ||
   1.264 +       UseAdaptiveSizePolicyWithSystemGC) {
   1.265 +    // Gather the feedback data for eden occupancy.
   1.266 +    young_gen->eden_space()->accumulate_statistics();
   1.267 +  }
   1.268 +
   1.269 +  if (PrintHeapAtGC) {
   1.270 +    Universe::print_heap_before_gc();
   1.271 +  }
   1.272 +
   1.273 +  assert(!NeverTenure || _tenuring_threshold == markOopDesc::max_age + 1, "Sanity");
   1.274 +  assert(!AlwaysTenure || _tenuring_threshold == 0, "Sanity");
   1.275 +
   1.276 +  size_t prev_used = heap->used();
   1.277 +  assert(promotion_failed() == false, "Sanity");
   1.278 +
   1.279 +  // Fill in TLABs
   1.280 +  heap->accumulate_statistics_all_tlabs();
   1.281 +  heap->ensure_parsability(true);  // retire TLABs
   1.282 +
   1.283 +  if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
   1.284 +    HandleMark hm;  // Discard invalid handles created during verification
   1.285 +    gclog_or_tty->print(" VerifyBeforeGC:");
   1.286 +    Universe::verify(true);
   1.287 +  }
   1.288 +
   1.289 +  {
   1.290 +    ResourceMark rm;
   1.291 +    HandleMark hm;
   1.292 +
   1.293 +    gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
   1.294 +    TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
   1.295 +    TraceTime t1("GC", PrintGC, !PrintGCDetails, gclog_or_tty);
   1.296 +    TraceCollectorStats tcs(counters());
   1.297 +    TraceMemoryManagerStats tms(false /* not full GC */);
   1.298 +
   1.299 +    if (TraceGen0Time) accumulated_time()->start();
   1.300 +
   1.301 +    // Let the size policy know we're starting
   1.302 +    size_policy->minor_collection_begin();
   1.303 +
   1.304 +    // Verify the object start arrays.
   1.305 +    if (VerifyObjectStartArray &&
   1.306 +        VerifyBeforeGC) {
   1.307 +      old_gen->verify_object_start_array();
   1.308 +      perm_gen->verify_object_start_array();
   1.309 +    }
   1.310 +
   1.311 +    // Verify no unmarked old->young roots
   1.312 +    if (VerifyRememberedSets) {
   1.313 +      CardTableExtension::verify_all_young_refs_imprecise();
   1.314 +    }
   1.315 +
   1.316 +    if (!ScavengeWithObjectsInToSpace) {
   1.317 +      assert(young_gen->to_space()->is_empty(),
   1.318 +             "Attempt to scavenge with live objects in to_space");
   1.319 +      young_gen->to_space()->clear();
   1.320 +    } else if (ZapUnusedHeapArea) {
   1.321 +      young_gen->to_space()->mangle_unused_area();
   1.322 +    }
   1.323 +    save_to_space_top_before_gc();
   1.324 +
   1.325 +    NOT_PRODUCT(reference_processor()->verify_no_references_recorded());
   1.326 +    COMPILER2_PRESENT(DerivedPointerTable::clear());
   1.327 +
   1.328 +    reference_processor()->enable_discovery();
   1.329 +
   1.330 +    // We track how much was promoted to the next generation for
   1.331 +    // the AdaptiveSizePolicy.
   1.332 +    size_t old_gen_used_before = old_gen->used_in_bytes();
   1.333 +
   1.334 +    // For PrintGCDetails
   1.335 +    size_t young_gen_used_before = young_gen->used_in_bytes();
   1.336 +
   1.337 +    // Reset our survivor overflow.
   1.338 +    set_survivor_overflow(false);
   1.339 +
   1.340 +    // We need to save the old/perm top values before
   1.341 +    // creating the promotion_manager. We pass the top
   1.342 +    // values to the card_table, to prevent it from
   1.343 +    // straying into the promotion labs.
   1.344 +    HeapWord* old_top = old_gen->object_space()->top();
   1.345 +    HeapWord* perm_top = perm_gen->object_space()->top();
   1.346 +
   1.347 +    // Release all previously held resources
   1.348 +    gc_task_manager()->release_all_resources();
   1.349 +
   1.350 +    PSPromotionManager::pre_scavenge();
   1.351 +
   1.352 +    // We'll use the promotion manager again later.
   1.353 +    PSPromotionManager* promotion_manager = PSPromotionManager::vm_thread_promotion_manager();
   1.354 +    {
   1.355 +      // TraceTime("Roots");
   1.356 +
   1.357 +      GCTaskQueue* q = GCTaskQueue::create();
   1.358 +
   1.359 +      for(uint i=0; i<ParallelGCThreads; i++) {
   1.360 +        q->enqueue(new OldToYoungRootsTask(old_gen, old_top, i));
   1.361 +      }
   1.362 +
   1.363 +      q->enqueue(new SerialOldToYoungRootsTask(perm_gen, perm_top));
   1.364 +
   1.365 +      q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::universe));
   1.366 +      q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::jni_handles));
   1.367 +      // We scan the thread roots in parallel
   1.368 +      Threads::create_thread_roots_tasks(q);
   1.369 +      q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::object_synchronizer));
   1.370 +      q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::flat_profiler));
   1.371 +      q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::management));
   1.372 +      q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::system_dictionary));
   1.373 +      q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::jvmti));
   1.374 +
   1.375 +      ParallelTaskTerminator terminator(
   1.376 +        gc_task_manager()->workers(),
   1.377 +        promotion_manager->depth_first() ?
   1.378 +            (TaskQueueSetSuper*) promotion_manager->stack_array_depth()
   1.379 +          : (TaskQueueSetSuper*) promotion_manager->stack_array_breadth());
   1.380 +      if (ParallelGCThreads>1) {
   1.381 +        for (uint j=0; j<ParallelGCThreads; j++) {
   1.382 +          q->enqueue(new StealTask(&terminator));
   1.383 +        }
   1.384 +      }
   1.385 +
   1.386 +      gc_task_manager()->execute_and_wait(q);
   1.387 +    }
   1.388 +
   1.389 +    scavenge_midpoint.update();
   1.390 +
   1.391 +    // Process reference objects discovered during scavenge
   1.392 +    {
   1.393 +#ifdef COMPILER2
   1.394 +      ReferencePolicy *soft_ref_policy = new LRUMaxHeapPolicy();
   1.395 +#else
   1.396 +      ReferencePolicy *soft_ref_policy = new LRUCurrentHeapPolicy();
   1.397 +#endif // COMPILER2
   1.398 +
   1.399 +      PSKeepAliveClosure keep_alive(promotion_manager);
   1.400 +      PSEvacuateFollowersClosure evac_followers(promotion_manager);
   1.401 +      assert(soft_ref_policy != NULL,"No soft reference policy");
   1.402 +      if (reference_processor()->processing_is_mt()) {
   1.403 +        PSRefProcTaskExecutor task_executor;
   1.404 +        reference_processor()->process_discovered_references(
   1.405 +          soft_ref_policy, &_is_alive_closure, &keep_alive, &evac_followers,
   1.406 +          &task_executor);
   1.407 +      } else {
   1.408 +        reference_processor()->process_discovered_references(
   1.409 +          soft_ref_policy, &_is_alive_closure, &keep_alive, &evac_followers,
   1.410 +          NULL);
   1.411 +      }
   1.412 +    }
   1.413 +
   1.414 +    // Enqueue reference objects discovered during scavenge.
   1.415 +    if (reference_processor()->processing_is_mt()) {
   1.416 +      PSRefProcTaskExecutor task_executor;
   1.417 +      reference_processor()->enqueue_discovered_references(&task_executor);
   1.418 +    } else {
   1.419 +      reference_processor()->enqueue_discovered_references(NULL);
   1.420 +    }
   1.421 +
   1.422 +    // Finally, flush the promotion_manager's labs, and deallocate its stacks.
   1.423 +    assert(promotion_manager->claimed_stack_empty(), "Sanity");
   1.424 +    PSPromotionManager::post_scavenge();
   1.425 +
   1.426 +    promotion_failure_occurred = promotion_failed();
   1.427 +    if (promotion_failure_occurred) {
   1.428 +      clean_up_failed_promotion();
   1.429 +      if (PrintGC) {
   1.430 +        gclog_or_tty->print("--");
   1.431 +      }
   1.432 +    }
   1.433 +
   1.434 +    // Let the size policy know we're done.  Note that we count promotion
   1.435 +    // failure cleanup time as part of the collection (otherwise, we're
   1.436 +    // implicitly saying it's mutator time).
   1.437 +    size_policy->minor_collection_end(gc_cause);
   1.438 +
   1.439 +    if (!promotion_failure_occurred) {
   1.440 +      // Swap the survivor spaces.
   1.441 +      young_gen->eden_space()->clear();
   1.442 +      young_gen->from_space()->clear();
   1.443 +      young_gen->swap_spaces();
   1.444 +
   1.445 +      size_t survived = young_gen->from_space()->used_in_bytes();
   1.446 +      size_t promoted = old_gen->used_in_bytes() - old_gen_used_before;
   1.447 +      size_policy->update_averages(_survivor_overflow, survived, promoted);
   1.448 +
   1.449 +      if (UseAdaptiveSizePolicy) {
   1.450 +        // Calculate the new survivor size and tenuring threshold
   1.451 +
   1.452 +        if (PrintAdaptiveSizePolicy) {
   1.453 +          gclog_or_tty->print("AdaptiveSizeStart: ");
   1.454 +          gclog_or_tty->stamp();
   1.455 +          gclog_or_tty->print_cr(" collection: %d ",
   1.456 +                         heap->total_collections());
   1.457 +
   1.458 +          if (Verbose) {
   1.459 +            gclog_or_tty->print("old_gen_capacity: %d young_gen_capacity: %d"
   1.460 +              " perm_gen_capacity: %d ",
   1.461 +              old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes(),
   1.462 +              perm_gen->capacity_in_bytes());
   1.463 +          }
   1.464 +        }
   1.465 +
   1.466 +
   1.467 +        if (UsePerfData) {
   1.468 +          PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters();
   1.469 +          counters->update_old_eden_size(
   1.470 +            size_policy->calculated_eden_size_in_bytes());
   1.471 +          counters->update_old_promo_size(
   1.472 +            size_policy->calculated_promo_size_in_bytes());
   1.473 +          counters->update_old_capacity(old_gen->capacity_in_bytes());
   1.474 +          counters->update_young_capacity(young_gen->capacity_in_bytes());
   1.475 +          counters->update_survived(survived);
   1.476 +          counters->update_promoted(promoted);
   1.477 +          counters->update_survivor_overflowed(_survivor_overflow);
   1.478 +        }
   1.479 +
   1.480 +        size_t survivor_limit =
   1.481 +          size_policy->max_survivor_size(young_gen->max_size());
   1.482 +        _tenuring_threshold =
   1.483 +          size_policy->compute_survivor_space_size_and_threshold(
   1.484 +                                                           _survivor_overflow,
   1.485 +                                                           _tenuring_threshold,
   1.486 +                                                           survivor_limit);
   1.487 +
   1.488 +       if (PrintTenuringDistribution) {
   1.489 +         gclog_or_tty->cr();
   1.490 +         gclog_or_tty->print_cr("Desired survivor size %ld bytes, new threshold %d (max %d)",
   1.491 +                                size_policy->calculated_survivor_size_in_bytes(),
   1.492 +                                _tenuring_threshold, MaxTenuringThreshold);
   1.493 +       }
   1.494 +
   1.495 +        if (UsePerfData) {
   1.496 +          PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters();
   1.497 +          counters->update_tenuring_threshold(_tenuring_threshold);
   1.498 +          counters->update_survivor_size_counters();
   1.499 +        }
   1.500 +
   1.501 +        // Do call at minor collections?
   1.502 +        // Don't check if the size_policy is ready at this
   1.503 +        // level.  Let the size_policy check that internally.
   1.504 +        if (UseAdaptiveSizePolicy &&
   1.505 +            UseAdaptiveGenerationSizePolicyAtMinorCollection &&
   1.506 +            ((gc_cause != GCCause::_java_lang_system_gc) ||
   1.507 +              UseAdaptiveSizePolicyWithSystemGC)) {
   1.508 +
   1.509 +          // Calculate optimial free space amounts
   1.510 +          assert(young_gen->max_size() >
   1.511 +            young_gen->from_space()->capacity_in_bytes() +
   1.512 +            young_gen->to_space()->capacity_in_bytes(),
   1.513 +            "Sizes of space in young gen are out-of-bounds");
   1.514 +          size_t max_eden_size = young_gen->max_size() -
   1.515 +            young_gen->from_space()->capacity_in_bytes() -
   1.516 +            young_gen->to_space()->capacity_in_bytes();
   1.517 +          size_policy->compute_generation_free_space(young_gen->used_in_bytes(),
   1.518 +                                   young_gen->eden_space()->used_in_bytes(),
   1.519 +                                   old_gen->used_in_bytes(),
   1.520 +                                   perm_gen->used_in_bytes(),
   1.521 +                                   young_gen->eden_space()->capacity_in_bytes(),
   1.522 +                                   old_gen->max_gen_size(),
   1.523 +                                   max_eden_size,
   1.524 +                                   false  /* full gc*/,
   1.525 +                                   gc_cause);
   1.526 +
   1.527 +        }
   1.528 +        // Resize the young generation at every collection
   1.529 +        // even if new sizes have not been calculated.  This is
   1.530 +        // to allow resizes that may have been inhibited by the
   1.531 +        // relative location of the "to" and "from" spaces.
   1.532 +
   1.533 +        // Resizing the old gen at minor collects can cause increases
   1.534 +        // that don't feed back to the generation sizing policy until
   1.535 +        // a major collection.  Don't resize the old gen here.
   1.536 +
   1.537 +        heap->resize_young_gen(size_policy->calculated_eden_size_in_bytes(),
   1.538 +                        size_policy->calculated_survivor_size_in_bytes());
   1.539 +
   1.540 +        if (PrintAdaptiveSizePolicy) {
   1.541 +          gclog_or_tty->print_cr("AdaptiveSizeStop: collection: %d ",
   1.542 +                         heap->total_collections());
   1.543 +        }
   1.544 +      }
   1.545 +
   1.546 +      // Update the structure of the eden. With NUMA-eden CPU hotplugging or offlining can
   1.547 +      // cause the change of the heap layout. Make sure eden is reshaped if that's the case.
   1.548 +      // Also update() will case adaptive NUMA chunk resizing.
   1.549 +      assert(young_gen->eden_space()->is_empty(), "eden space should be empty now");
   1.550 +      young_gen->eden_space()->update();
   1.551 +
   1.552 +      heap->gc_policy_counters()->update_counters();
   1.553 +
   1.554 +      heap->resize_all_tlabs();
   1.555 +
   1.556 +      assert(young_gen->to_space()->is_empty(), "to space should be empty now");
   1.557 +    }
   1.558 +
   1.559 +    COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
   1.560 +
   1.561 +    NOT_PRODUCT(reference_processor()->verify_no_references_recorded());
   1.562 +
   1.563 +    // Re-verify object start arrays
   1.564 +    if (VerifyObjectStartArray &&
   1.565 +        VerifyAfterGC) {
   1.566 +      old_gen->verify_object_start_array();
   1.567 +      perm_gen->verify_object_start_array();
   1.568 +    }
   1.569 +
   1.570 +    // Verify all old -> young cards are now precise
   1.571 +    if (VerifyRememberedSets) {
   1.572 +      // Precise verification will give false positives. Until this is fixed,
   1.573 +      // use imprecise verification.
   1.574 +      // CardTableExtension::verify_all_young_refs_precise();
   1.575 +      CardTableExtension::verify_all_young_refs_imprecise();
   1.576 +    }
   1.577 +
   1.578 +    if (TraceGen0Time) accumulated_time()->stop();
   1.579 +
   1.580 +    if (PrintGC) {
   1.581 +      if (PrintGCDetails) {
   1.582 +        // Don't print a GC timestamp here.  This is after the GC so
   1.583 +        // would be confusing.
   1.584 +        young_gen->print_used_change(young_gen_used_before);
   1.585 +      }
   1.586 +      heap->print_heap_change(prev_used);
   1.587 +    }
   1.588 +
   1.589 +    // Track memory usage and detect low memory
   1.590 +    MemoryService::track_memory_usage();
   1.591 +    heap->update_counters();
   1.592 +  }
   1.593 +
   1.594 +  if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
   1.595 +    HandleMark hm;  // Discard invalid handles created during verification
   1.596 +    gclog_or_tty->print(" VerifyAfterGC:");
   1.597 +    Universe::verify(false);
   1.598 +  }
   1.599 +
   1.600 +  if (PrintHeapAtGC) {
   1.601 +    Universe::print_heap_after_gc();
   1.602 +  }
   1.603 +
   1.604 +  scavenge_exit.update();
   1.605 +
   1.606 +  if (PrintGCTaskTimeStamps) {
   1.607 +    tty->print_cr("VM-Thread " INT64_FORMAT " " INT64_FORMAT " " INT64_FORMAT,
   1.608 +                  scavenge_entry.ticks(), scavenge_midpoint.ticks(),
   1.609 +                  scavenge_exit.ticks());
   1.610 +    gc_task_manager()->print_task_time_stamps();
   1.611 +  }
   1.612 +
   1.613 +  return !promotion_failure_occurred;
   1.614 +}
   1.615 +
   1.616 +// This method iterates over all objects in the young generation,
   1.617 +// unforwarding markOops. It then restores any preserved mark oops,
   1.618 +// and clears the _preserved_mark_stack.
   1.619 +void PSScavenge::clean_up_failed_promotion() {
   1.620 +  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
   1.621 +  assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
   1.622 +  assert(promotion_failed(), "Sanity");
   1.623 +
   1.624 +  PSYoungGen* young_gen = heap->young_gen();
   1.625 +
   1.626 +  {
   1.627 +    ResourceMark rm;
   1.628 +
   1.629 +    // Unforward all pointers in the young gen.
   1.630 +    PSPromotionFailedClosure unforward_closure;
   1.631 +    young_gen->object_iterate(&unforward_closure);
   1.632 +
   1.633 +    if (PrintGC && Verbose) {
   1.634 +      gclog_or_tty->print_cr("Restoring %d marks",
   1.635 +                              _preserved_oop_stack->length());
   1.636 +    }
   1.637 +
   1.638 +    // Restore any saved marks.
   1.639 +    for (int i=0; i < _preserved_oop_stack->length(); i++) {
   1.640 +      oop obj       = _preserved_oop_stack->at(i);
   1.641 +      markOop mark  = _preserved_mark_stack->at(i);
   1.642 +      obj->set_mark(mark);
   1.643 +    }
   1.644 +
   1.645 +    // Deallocate the preserved mark and oop stacks.
   1.646 +    // The stacks were allocated as CHeap objects, so
   1.647 +    // we must call delete to prevent mem leaks.
   1.648 +    delete _preserved_mark_stack;
   1.649 +    _preserved_mark_stack = NULL;
   1.650 +    delete _preserved_oop_stack;
   1.651 +    _preserved_oop_stack = NULL;
   1.652 +  }
   1.653 +
   1.654 +  // Reset the PromotionFailureALot counters.
   1.655 +  NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();)
   1.656 +}
   1.657 +
   1.658 +// This method is called whenever an attempt to promote an object
   1.659 +// fails. Some markOops will need preserving, some will not. Note
   1.660 +// that the entire eden is traversed after a failed promotion, with
   1.661 +// all forwarded headers replaced by the default markOop. This means
   1.662 +// it is not neccessary to preserve most markOops.
   1.663 +void PSScavenge::oop_promotion_failed(oop obj, markOop obj_mark) {
   1.664 +  if (_preserved_mark_stack == NULL) {
   1.665 +    ThreadCritical tc; // Lock and retest
   1.666 +    if (_preserved_mark_stack == NULL) {
   1.667 +      assert(_preserved_oop_stack == NULL, "Sanity");
   1.668 +      _preserved_mark_stack = new (ResourceObj::C_HEAP) GrowableArray<markOop>(40, true);
   1.669 +      _preserved_oop_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(40, true);
   1.670 +    }
   1.671 +  }
   1.672 +
   1.673 +  // Because we must hold the ThreadCritical lock before using
   1.674 +  // the stacks, we should be safe from observing partial allocations,
   1.675 +  // which are also guarded by the ThreadCritical lock.
   1.676 +  if (obj_mark->must_be_preserved_for_promotion_failure(obj)) {
   1.677 +    ThreadCritical tc;
   1.678 +    _preserved_oop_stack->push(obj);
   1.679 +    _preserved_mark_stack->push(obj_mark);
   1.680 +  }
   1.681 +}
   1.682 +
   1.683 +bool PSScavenge::should_attempt_scavenge() {
   1.684 +  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
   1.685 +  assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
   1.686 +  PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters();
   1.687 +
   1.688 +  if (UsePerfData) {
   1.689 +    counters->update_scavenge_skipped(not_skipped);
   1.690 +  }
   1.691 +
   1.692 +  PSYoungGen* young_gen = heap->young_gen();
   1.693 +  PSOldGen* old_gen = heap->old_gen();
   1.694 +
   1.695 +  if (!ScavengeWithObjectsInToSpace) {
   1.696 +    // Do not attempt to promote unless to_space is empty
   1.697 +    if (!young_gen->to_space()->is_empty()) {
   1.698 +      _consecutive_skipped_scavenges++;
   1.699 +      if (UsePerfData) {
   1.700 +        counters->update_scavenge_skipped(to_space_not_empty);
   1.701 +      }
   1.702 +      return false;
   1.703 +    }
   1.704 +  }
   1.705 +
   1.706 +  // Test to see if the scavenge will likely fail.
   1.707 +  PSAdaptiveSizePolicy* policy = heap->size_policy();
   1.708 +
   1.709 +  // A similar test is done in the policy's should_full_GC().  If this is
   1.710 +  // changed, decide if that test should also be changed.
   1.711 +  size_t avg_promoted = (size_t) policy->padded_average_promoted_in_bytes();
   1.712 +  size_t promotion_estimate = MIN2(avg_promoted, young_gen->used_in_bytes());
   1.713 +  bool result = promotion_estimate < old_gen->free_in_bytes();
   1.714 +
   1.715 +  if (PrintGCDetails && Verbose) {
   1.716 +    gclog_or_tty->print(result ? "  do scavenge: " : "  skip scavenge: ");
   1.717 +    gclog_or_tty->print_cr(" average_promoted " SIZE_FORMAT
   1.718 +      " padded_average_promoted " SIZE_FORMAT
   1.719 +      " free in old gen " SIZE_FORMAT,
   1.720 +      (size_t) policy->average_promoted_in_bytes(),
   1.721 +      (size_t) policy->padded_average_promoted_in_bytes(),
   1.722 +      old_gen->free_in_bytes());
   1.723 +    if (young_gen->used_in_bytes() <
   1.724 +        (size_t) policy->padded_average_promoted_in_bytes()) {
   1.725 +      gclog_or_tty->print_cr(" padded_promoted_average is greater"
   1.726 +        " than maximum promotion = " SIZE_FORMAT, young_gen->used_in_bytes());
   1.727 +    }
   1.728 +  }
   1.729 +
   1.730 +  if (result) {
   1.731 +    _consecutive_skipped_scavenges = 0;
   1.732 +  } else {
   1.733 +    _consecutive_skipped_scavenges++;
   1.734 +    if (UsePerfData) {
   1.735 +      counters->update_scavenge_skipped(promoted_too_large);
   1.736 +    }
   1.737 +  }
   1.738 +  return result;
   1.739 +}
   1.740 +
   1.741 +  // Used to add tasks
   1.742 +GCTaskManager* const PSScavenge::gc_task_manager() {
   1.743 +  assert(ParallelScavengeHeap::gc_task_manager() != NULL,
   1.744 +   "shouldn't return NULL");
   1.745 +  return ParallelScavengeHeap::gc_task_manager();
   1.746 +}
   1.747 +
   1.748 +void PSScavenge::initialize() {
   1.749 +  // Arguments must have been parsed
   1.750 +
   1.751 +  if (AlwaysTenure) {
   1.752 +    _tenuring_threshold = 0;
   1.753 +  } else if (NeverTenure) {
   1.754 +    _tenuring_threshold = markOopDesc::max_age + 1;
   1.755 +  } else {
   1.756 +    // We want to smooth out our startup times for the AdaptiveSizePolicy
   1.757 +    _tenuring_threshold = (UseAdaptiveSizePolicy) ? InitialTenuringThreshold :
   1.758 +                                                    MaxTenuringThreshold;
   1.759 +  }
   1.760 +
   1.761 +  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
   1.762 +  assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
   1.763 +
   1.764 +  PSYoungGen* young_gen = heap->young_gen();
   1.765 +  PSOldGen* old_gen = heap->old_gen();
   1.766 +  PSPermGen* perm_gen = heap->perm_gen();
   1.767 +
   1.768 +  // Set boundary between young_gen and old_gen
   1.769 +  assert(perm_gen->reserved().end() <= old_gen->object_space()->bottom(),
   1.770 +         "perm above old");
   1.771 +  assert(old_gen->reserved().end() <= young_gen->eden_space()->bottom(),
   1.772 +         "old above young");
   1.773 +  _young_generation_boundary = young_gen->eden_space()->bottom();
   1.774 +
   1.775 +  // Initialize ref handling object for scavenging.
   1.776 +  MemRegion mr = young_gen->reserved();
   1.777 +  _ref_processor = ReferenceProcessor::create_ref_processor(
   1.778 +    mr,                         // span
   1.779 +    true,                       // atomic_discovery
   1.780 +    true,                       // mt_discovery
   1.781 +    NULL,                       // is_alive_non_header
   1.782 +    ParallelGCThreads,
   1.783 +    ParallelRefProcEnabled);
   1.784 +
   1.785 +  // Cache the cardtable
   1.786 +  BarrierSet* bs = Universe::heap()->barrier_set();
   1.787 +  assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind");
   1.788 +  _card_table = (CardTableExtension*)bs;
   1.789 +
   1.790 +  _counters = new CollectorCounters("PSScavenge", 0);
   1.791 +}

mercurial