src/share/vm/gc_implementation/parNew/parNewGeneration.cpp

changeset 888
c96030fff130
parent 704
850fdf70db2b
child 889
df4305d4c1a1
     1.1 --- a/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Thu Nov 20 12:27:41 2008 -0800
     1.2 +++ b/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Thu Nov 20 16:56:09 2008 -0800
     1.3 @@ -759,17 +759,12 @@
     1.4                 thread_state_set.steals(),
     1.5                 thread_state_set.pops()+thread_state_set.steals());
     1.6    }
     1.7 -  assert(thread_state_set.pushes() == thread_state_set.pops() + thread_state_set.steals(),
     1.8 +  assert(thread_state_set.pushes() == thread_state_set.pops()
     1.9 +                                    + thread_state_set.steals(),
    1.10           "Or else the queues are leaky.");
    1.11  
    1.12 -  // For now, process discovered weak refs sequentially.
    1.13 -#ifdef COMPILER2
    1.14 -  ReferencePolicy *soft_ref_policy = new LRUMaxHeapPolicy();
    1.15 -#else
    1.16 -  ReferencePolicy *soft_ref_policy = new LRUCurrentHeapPolicy();
    1.17 -#endif // COMPILER2
    1.18 -
    1.19    // Process (weak) reference objects found during scavenge.
    1.20 +  ReferenceProcessor* rp = ref_processor();
    1.21    IsAliveClosure is_alive(this);
    1.22    ScanWeakRefClosure scan_weak_ref(this);
    1.23    KeepAliveClosure keep_alive(&scan_weak_ref);
    1.24 @@ -778,18 +773,17 @@
    1.25    set_promo_failure_scan_stack_closure(&scan_without_gc_barrier);
    1.26    EvacuateFollowersClosureGeneral evacuate_followers(gch, _level,
    1.27      &scan_without_gc_barrier, &scan_with_gc_barrier);
    1.28 -  if (ref_processor()->processing_is_mt()) {
    1.29 +  rp->snap_policy(clear_all_soft_refs);
    1.30 +  if (rp->processing_is_mt()) {
    1.31      ParNewRefProcTaskExecutor task_executor(*this, thread_state_set);
    1.32 -    ref_processor()->process_discovered_references(
    1.33 -        soft_ref_policy, &is_alive, &keep_alive, &evacuate_followers,
    1.34 -        &task_executor);
    1.35 +    rp->process_discovered_references(&is_alive, &keep_alive,
    1.36 +                                      &evacuate_followers, &task_executor);
    1.37    } else {
    1.38      thread_state_set.flush();
    1.39      gch->set_par_threads(0);  // 0 ==> non-parallel.
    1.40      gch->save_marks();
    1.41 -    ref_processor()->process_discovered_references(
    1.42 -      soft_ref_policy, &is_alive, &keep_alive, &evacuate_followers,
    1.43 -      NULL);
    1.44 +    rp->process_discovered_references(&is_alive, &keep_alive,
    1.45 +                                      &evacuate_followers, NULL);
    1.46    }
    1.47    if (!promotion_failed()) {
    1.48      // Swap the survivor spaces.
    1.49 @@ -851,14 +845,14 @@
    1.50  
    1.51    SpecializationStats::print();
    1.52  
    1.53 -  ref_processor()->set_enqueuing_is_done(true);
    1.54 -  if (ref_processor()->processing_is_mt()) {
    1.55 +  rp->set_enqueuing_is_done(true);
    1.56 +  if (rp->processing_is_mt()) {
    1.57      ParNewRefProcTaskExecutor task_executor(*this, thread_state_set);
    1.58 -    ref_processor()->enqueue_discovered_references(&task_executor);
    1.59 +    rp->enqueue_discovered_references(&task_executor);
    1.60    } else {
    1.61 -    ref_processor()->enqueue_discovered_references(NULL);
    1.62 +    rp->enqueue_discovered_references(NULL);
    1.63    }
    1.64 -  ref_processor()->verify_no_references_recorded();
    1.65 +  rp->verify_no_references_recorded();
    1.66  }
    1.67  
    1.68  static int sum;

mercurial