Merge

Tue, 02 Mar 2010 13:57:46 -0800

author
johnc
date
Tue, 02 Mar 2010 13:57:46 -0800
changeset 1721
ab75c83d7c37
parent 1716
7d236a9688c5
parent 1720
a1c410de27e4
child 1723
c76ca382971b

Merge

src/share/vm/includeDB_core file | annotate | diff | comparison | revisions
     1.1 --- a/src/share/vm/gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.cpp	Mon Mar 01 12:12:35 2010 -0800
     1.2 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.cpp	Tue Mar 02 13:57:46 2010 -0800
     1.3 @@ -46,9 +46,9 @@
     1.4  
     1.5    _processor_count = os::active_processor_count();
     1.6  
     1.7 -  if (CMSConcurrentMTEnabled && (ParallelCMSThreads > 1)) {
     1.8 +  if (CMSConcurrentMTEnabled && (ConcGCThreads > 1)) {
     1.9      assert(_processor_count > 0, "Processor count is suspect");
    1.10 -    _concurrent_processor_count = MIN2((uint) ParallelCMSThreads,
    1.11 +    _concurrent_processor_count = MIN2((uint) ConcGCThreads,
    1.12                                         (uint) _processor_count);
    1.13    } else {
    1.14      _concurrent_processor_count = 1;
     2.1 --- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Mon Mar 01 12:12:35 2010 -0800
     2.2 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Tue Mar 02 13:57:46 2010 -0800
     2.3 @@ -606,7 +606,7 @@
     2.4      assert(_modUnionTable.covers(_span), "_modUnionTable inconsistency?");
     2.5    }
     2.6  
     2.7 -  if (!_markStack.allocate(CMSMarkStackSize)) {
     2.8 +  if (!_markStack.allocate(MarkStackSize)) {
     2.9      warning("Failed to allocate CMS Marking Stack");
    2.10      return;
    2.11    }
    2.12 @@ -617,13 +617,13 @@
    2.13  
    2.14    // Support for multi-threaded concurrent phases
    2.15    if (ParallelGCThreads > 0 && CMSConcurrentMTEnabled) {
    2.16 -    if (FLAG_IS_DEFAULT(ParallelCMSThreads)) {
    2.17 +    if (FLAG_IS_DEFAULT(ConcGCThreads)) {
    2.18        // just for now
    2.19 -      FLAG_SET_DEFAULT(ParallelCMSThreads, (ParallelGCThreads + 3)/4);
    2.20 -    }
    2.21 -    if (ParallelCMSThreads > 1) {
    2.22 +      FLAG_SET_DEFAULT(ConcGCThreads, (ParallelGCThreads + 3)/4);
    2.23 +    }
    2.24 +    if (ConcGCThreads > 1) {
    2.25        _conc_workers = new YieldingFlexibleWorkGang("Parallel CMS Threads",
    2.26 -                                 ParallelCMSThreads, true);
    2.27 +                                 ConcGCThreads, true);
    2.28        if (_conc_workers == NULL) {
    2.29          warning("GC/CMS: _conc_workers allocation failure: "
    2.30                "forcing -CMSConcurrentMTEnabled");
    2.31 @@ -634,13 +634,13 @@
    2.32      }
    2.33    }
    2.34    if (!CMSConcurrentMTEnabled) {
    2.35 -    ParallelCMSThreads = 0;
    2.36 +    ConcGCThreads = 0;
    2.37    } else {
    2.38      // Turn off CMSCleanOnEnter optimization temporarily for
    2.39      // the MT case where it's not fixed yet; see 6178663.
    2.40      CMSCleanOnEnter = false;
    2.41    }
    2.42 -  assert((_conc_workers != NULL) == (ParallelCMSThreads > 1),
    2.43 +  assert((_conc_workers != NULL) == (ConcGCThreads > 1),
    2.44           "Inconsistency");
    2.45  
    2.46    // Parallel task queues; these are shared for the
    2.47 @@ -648,7 +648,7 @@
    2.48    // are not shared with parallel scavenge (ParNew).
    2.49    {
    2.50      uint i;
    2.51 -    uint num_queues = (uint) MAX2(ParallelGCThreads, ParallelCMSThreads);
    2.52 +    uint num_queues = (uint) MAX2(ParallelGCThreads, ConcGCThreads);
    2.53  
    2.54      if ((CMSParallelRemarkEnabled || CMSConcurrentMTEnabled
    2.55           || ParallelRefProcEnabled)
    2.56 @@ -3657,7 +3657,7 @@
    2.57    assert(_revisitStack.isEmpty(), "tabula rasa");
    2.58    DEBUG_ONLY(RememberKlassesChecker cmx(should_unload_classes());)
    2.59    bool result = false;
    2.60 -  if (CMSConcurrentMTEnabled && ParallelCMSThreads > 0) {
    2.61 +  if (CMSConcurrentMTEnabled && ConcGCThreads > 0) {
    2.62      result = do_marking_mt(asynch);
    2.63    } else {
    2.64      result = do_marking_st(asynch);
    2.65 @@ -4174,10 +4174,10 @@
    2.66  }
    2.67  
    2.68  bool CMSCollector::do_marking_mt(bool asynch) {
    2.69 -  assert(ParallelCMSThreads > 0 && conc_workers() != NULL, "precondition");
    2.70 +  assert(ConcGCThreads > 0 && conc_workers() != NULL, "precondition");
    2.71    // In the future this would be determined ergonomically, based
    2.72    // on #cpu's, # active mutator threads (and load), and mutation rate.
    2.73 -  int num_workers = ParallelCMSThreads;
    2.74 +  int num_workers = ConcGCThreads;
    2.75  
    2.76    CompactibleFreeListSpace* cms_space  = _cmsGen->cmsSpace();
    2.77    CompactibleFreeListSpace* perm_space = _permGen->cmsSpace();
    2.78 @@ -6429,8 +6429,8 @@
    2.79  // For now we take the expedient path of just disabling the
    2.80  // messages for the problematic case.)
    2.81  void CMSMarkStack::expand() {
    2.82 -  assert(_capacity <= CMSMarkStackSizeMax, "stack bigger than permitted");
    2.83 -  if (_capacity == CMSMarkStackSizeMax) {
    2.84 +  assert(_capacity <= MarkStackSizeMax, "stack bigger than permitted");
    2.85 +  if (_capacity == MarkStackSizeMax) {
    2.86      if (_hit_limit++ == 0 && !CMSConcurrentMTEnabled && PrintGCDetails) {
    2.87        // We print a warning message only once per CMS cycle.
    2.88        gclog_or_tty->print_cr(" (benign) Hit CMSMarkStack max size limit");
    2.89 @@ -6438,7 +6438,7 @@
    2.90      return;
    2.91    }
    2.92    // Double capacity if possible
    2.93 -  size_t new_capacity = MIN2(_capacity*2, CMSMarkStackSizeMax);
    2.94 +  size_t new_capacity = MIN2(_capacity*2, MarkStackSizeMax);
    2.95    // Do not give up existing stack until we have managed to
    2.96    // get the double capacity that we desired.
    2.97    ReservedSpace rs(ReservedSpace::allocation_align_size_up(
     3.1 --- a/src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp	Mon Mar 01 12:12:35 2010 -0800
     3.2 +++ b/src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp	Tue Mar 02 13:57:46 2010 -0800
     3.3 @@ -44,20 +44,20 @@
     3.4  {
     3.5  
     3.6    // Ergomonically select initial concurrent refinement parameters
     3.7 -  if (FLAG_IS_DEFAULT(G1ConcRefineGreenZone)) {
     3.8 -    FLAG_SET_DEFAULT(G1ConcRefineGreenZone, MAX2<int>(ParallelGCThreads, 1));
     3.9 +  if (FLAG_IS_DEFAULT(G1ConcRefinementGreenZone)) {
    3.10 +    FLAG_SET_DEFAULT(G1ConcRefinementGreenZone, MAX2<int>(ParallelGCThreads, 1));
    3.11    }
    3.12 -  set_green_zone(G1ConcRefineGreenZone);
    3.13 +  set_green_zone(G1ConcRefinementGreenZone);
    3.14  
    3.15 -  if (FLAG_IS_DEFAULT(G1ConcRefineYellowZone)) {
    3.16 -    FLAG_SET_DEFAULT(G1ConcRefineYellowZone, green_zone() * 3);
    3.17 +  if (FLAG_IS_DEFAULT(G1ConcRefinementYellowZone)) {
    3.18 +    FLAG_SET_DEFAULT(G1ConcRefinementYellowZone, green_zone() * 3);
    3.19    }
    3.20 -  set_yellow_zone(MAX2<int>(G1ConcRefineYellowZone, green_zone()));
    3.21 +  set_yellow_zone(MAX2<int>(G1ConcRefinementYellowZone, green_zone()));
    3.22  
    3.23 -  if (FLAG_IS_DEFAULT(G1ConcRefineRedZone)) {
    3.24 -    FLAG_SET_DEFAULT(G1ConcRefineRedZone, yellow_zone() * 2);
    3.25 +  if (FLAG_IS_DEFAULT(G1ConcRefinementRedZone)) {
    3.26 +    FLAG_SET_DEFAULT(G1ConcRefinementRedZone, yellow_zone() * 2);
    3.27    }
    3.28 -  set_red_zone(MAX2<int>(G1ConcRefineRedZone, yellow_zone()));
    3.29 +  set_red_zone(MAX2<int>(G1ConcRefinementRedZone, yellow_zone()));
    3.30    _n_worker_threads = thread_num();
    3.31    // We need one extra thread to do the young gen rset size sampling.
    3.32    _n_threads = _n_worker_threads + 1;
    3.33 @@ -76,15 +76,15 @@
    3.34  }
    3.35  
    3.36  void ConcurrentG1Refine::reset_threshold_step() {
    3.37 -  if (FLAG_IS_DEFAULT(G1ConcRefineThresholdStep)) {
    3.38 +  if (FLAG_IS_DEFAULT(G1ConcRefinementThresholdStep)) {
    3.39      _thread_threshold_step = (yellow_zone() - green_zone()) / (worker_thread_num() + 1);
    3.40    } else {
    3.41 -    _thread_threshold_step = G1ConcRefineThresholdStep;
    3.42 +    _thread_threshold_step = G1ConcRefinementThresholdStep;
    3.43    }
    3.44  }
    3.45  
    3.46  int ConcurrentG1Refine::thread_num() {
    3.47 -  return MAX2<int>((G1ParallelRSetThreads > 0) ? G1ParallelRSetThreads : ParallelGCThreads, 1);
    3.48 +  return MAX2<int>((G1ConcRefinementThreads > 0) ? G1ConcRefinementThreads : ParallelGCThreads, 1);
    3.49  }
    3.50  
    3.51  void ConcurrentG1Refine::init() {
     4.1 --- a/src/share/vm/gc_implementation/g1/concurrentG1Refine.hpp	Mon Mar 01 12:12:35 2010 -0800
     4.2 +++ b/src/share/vm/gc_implementation/g1/concurrentG1Refine.hpp	Tue Mar 02 13:57:46 2010 -0800
     4.3 @@ -39,7 +39,8 @@
     4.4    * running. If the length becomes red (max queue length) the mutators start
     4.5    * processing the buffers.
     4.6    *
     4.7 -  * There are some interesting cases (with G1AdaptiveConcRefine turned off):
     4.8 +  * There are some interesting cases (when G1UseAdaptiveConcRefinement
     4.9 +  * is turned off):
    4.10    * 1) green = yellow = red = 0. In this case the mutator will process all
    4.11    *    buffers. Except for those that are created by the deferred updates
    4.12    *    machinery during a collection.
     5.1 --- a/src/share/vm/gc_implementation/g1/concurrentG1RefineThread.cpp	Mon Mar 01 12:12:35 2010 -0800
     5.2 +++ b/src/share/vm/gc_implementation/g1/concurrentG1RefineThread.cpp	Tue Mar 02 13:57:46 2010 -0800
     5.3 @@ -107,7 +107,7 @@
     5.4      if (_should_terminate) {
     5.5        break;
     5.6      }
     5.7 -    _monitor->wait(Mutex::_no_safepoint_check_flag, G1ConcRefineServiceInterval);
     5.8 +    _monitor->wait(Mutex::_no_safepoint_check_flag, G1ConcRefinementServiceIntervalMillis);
     5.9    }
    5.10  }
    5.11  
    5.12 @@ -127,7 +127,7 @@
    5.13  void ConcurrentG1RefineThread::activate() {
    5.14    MutexLockerEx x(_monitor, Mutex::_no_safepoint_check_flag);
    5.15    if (_worker_id > 0) {
    5.16 -    if (G1TraceConcurrentRefinement) {
    5.17 +    if (G1TraceConcRefinement) {
    5.18        DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
    5.19        gclog_or_tty->print_cr("G1-Refine-activated worker %d, on threshold %d, current %d",
    5.20                               _worker_id, _threshold, (int)dcqs.completed_buffers_num());
    5.21 @@ -143,7 +143,7 @@
    5.22  void ConcurrentG1RefineThread::deactivate() {
    5.23    MutexLockerEx x(_monitor, Mutex::_no_safepoint_check_flag);
    5.24    if (_worker_id > 0) {
    5.25 -    if (G1TraceConcurrentRefinement) {
    5.26 +    if (G1TraceConcRefinement) {
    5.27        DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
    5.28        gclog_or_tty->print_cr("G1-Refine-deactivated worker %d, off threshold %d, current %d",
    5.29                               _worker_id, _deactivation_threshold, (int)dcqs.completed_buffers_num());
    5.30 @@ -218,9 +218,13 @@
    5.31  
    5.32  
    5.33  void ConcurrentG1RefineThread::yield() {
    5.34 -  if (G1TraceConcurrentRefinement) gclog_or_tty->print_cr("G1-Refine-yield");
    5.35 +  if (G1TraceConcRefinement) {
    5.36 +    gclog_or_tty->print_cr("G1-Refine-yield");
    5.37 +  }
    5.38    _sts.yield("G1 refine");
    5.39 -  if (G1TraceConcurrentRefinement) gclog_or_tty->print_cr("G1-Refine-yield-end");
    5.40 +  if (G1TraceConcRefinement) {
    5.41 +    gclog_or_tty->print_cr("G1-Refine-yield-end");
    5.42 +  }
    5.43  }
    5.44  
    5.45  void ConcurrentG1RefineThread::stop() {
    5.46 @@ -241,7 +245,9 @@
    5.47        Terminator_lock->wait();
    5.48      }
    5.49    }
    5.50 -  if (G1TraceConcurrentRefinement) gclog_or_tty->print_cr("G1-Refine-stop");
    5.51 +  if (G1TraceConcRefinement) {
    5.52 +    gclog_or_tty->print_cr("G1-Refine-stop");
    5.53 +  }
    5.54  }
    5.55  
    5.56  void ConcurrentG1RefineThread::print() const {
     6.1 --- a/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Mon Mar 01 12:12:35 2010 -0800
     6.2 +++ b/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Tue Mar 02 13:57:46 2010 -0800
     6.3 @@ -447,7 +447,7 @@
     6.4      gclog_or_tty->print_cr("[global] init, heap start = "PTR_FORMAT", "
     6.5                             "heap end = "PTR_FORMAT, _heap_start, _heap_end);
     6.6  
     6.7 -  _markStack.allocate(G1MarkStackSize);
     6.8 +  _markStack.allocate(MarkStackSize);
     6.9    _regionStack.allocate(G1MarkRegionStackSize);
    6.10  
    6.11    // Create & start a ConcurrentMark thread.
    6.12 @@ -461,7 +461,7 @@
    6.13    assert(_markBitMap2.covers(rs), "_markBitMap2 inconsistency");
    6.14  
    6.15    SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set();
    6.16 -  satb_qs.set_buffer_size(G1SATBLogBufferSize);
    6.17 +  satb_qs.set_buffer_size(G1SATBBufferSize);
    6.18  
    6.19    int size = (int) MAX2(ParallelGCThreads, (size_t)1);
    6.20    _par_cleanup_thread_state = NEW_C_HEAP_ARRAY(ParCleanupThreadState*, size);
    6.21 @@ -483,8 +483,8 @@
    6.22      _accum_task_vtime[i] = 0.0;
    6.23    }
    6.24  
    6.25 -  if (ParallelMarkingThreads > ParallelGCThreads) {
    6.26 -    vm_exit_during_initialization("Can't have more ParallelMarkingThreads "
    6.27 +  if (ConcGCThreads > ParallelGCThreads) {
    6.28 +    vm_exit_during_initialization("Can't have more ConcGCThreads "
    6.29                                    "than ParallelGCThreads.");
    6.30    }
    6.31    if (ParallelGCThreads == 0) {
    6.32 @@ -494,11 +494,11 @@
    6.33      _sleep_factor             = 0.0;
    6.34      _marking_task_overhead    = 1.0;
    6.35    } else {
    6.36 -    if (ParallelMarkingThreads > 0) {
    6.37 -      // notice that ParallelMarkingThreads overwrites G1MarkingOverheadPercent
    6.38 +    if (ConcGCThreads > 0) {
    6.39 +      // notice that ConcGCThreads overwrites G1MarkingOverheadPercent
    6.40        // if both are set
    6.41  
    6.42 -      _parallel_marking_threads = ParallelMarkingThreads;
    6.43 +      _parallel_marking_threads = ConcGCThreads;
    6.44        _sleep_factor             = 0.0;
    6.45        _marking_task_overhead    = 1.0;
    6.46      } else if (G1MarkingOverheadPercent > 0) {
     7.1 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Mon Mar 01 12:12:35 2010 -0800
     7.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Tue Mar 02 13:57:46 2010 -0800
     7.3 @@ -583,7 +583,7 @@
     7.4             res->zero_fill_state() == HeapRegion::Allocated)),
     7.5           "Non-young alloc Regions must be zero filled (and non-H)");
     7.6  
     7.7 -  if (G1PrintRegions) {
     7.8 +  if (G1PrintHeapRegions) {
     7.9      if (res != NULL) {
    7.10        gclog_or_tty->print_cr("new alloc region %d:["PTR_FORMAT", "PTR_FORMAT"], "
    7.11                               "top "PTR_FORMAT,
    7.12 @@ -2477,7 +2477,7 @@
    7.13    if (G1SummarizeRSetStats) {
    7.14      g1_rem_set()->print_summary_info();
    7.15    }
    7.16 -  if (G1SummarizeConcurrentMark) {
    7.17 +  if (G1SummarizeConcMark) {
    7.18      concurrent_mark()->print_summary_info();
    7.19    }
    7.20    if (G1SummarizeZFStats) {
    7.21 @@ -3480,7 +3480,7 @@
    7.22    HeapRegion* r = heap_region_containing(old);
    7.23    if (!r->evacuation_failed()) {
    7.24      r->set_evacuation_failed(true);
    7.25 -    if (G1PrintRegions) {
    7.26 +    if (G1PrintHeapRegions) {
    7.27        gclog_or_tty->print("evacuation failed in heap region "PTR_FORMAT" "
    7.28                            "["PTR_FORMAT","PTR_FORMAT")\n",
    7.29                            r, r->bottom(), r->end());
    7.30 @@ -4002,9 +4002,7 @@
    7.31        _g1h->g1_policy()->record_obj_copy_time(i, elapsed_ms-term_ms);
    7.32        _g1h->g1_policy()->record_termination_time(i, term_ms);
    7.33      }
    7.34 -    if (G1UseSurvivorSpaces) {
    7.35 -      _g1h->g1_policy()->record_thread_age_table(pss.age_table());
    7.36 -    }
    7.37 +    _g1h->g1_policy()->record_thread_age_table(pss.age_table());
    7.38      _g1h->update_surviving_young_words(pss.surviving_young_words()+1);
    7.39  
    7.40      // Clean up any par-expanded rem sets.
     8.1 --- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Mon Mar 01 12:12:35 2010 -0800
     8.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Tue Mar 02 13:57:46 2010 -0800
     8.3 @@ -270,14 +270,10 @@
     8.4    _concurrent_mark_cleanup_times_ms->add(0.20);
     8.5    _tenuring_threshold = MaxTenuringThreshold;
     8.6  
     8.7 -  if (G1UseSurvivorSpaces) {
     8.8 -    // if G1FixedSurvivorSpaceSize is 0 which means the size is not
     8.9 -    // fixed, then _max_survivor_regions will be calculated at
    8.10 -    // calculate_young_list_target_config during initialization
    8.11 -    _max_survivor_regions = G1FixedSurvivorSpaceSize / HeapRegion::GrainBytes;
    8.12 -  } else {
    8.13 -    _max_survivor_regions = 0;
    8.14 -  }
    8.15 +  // if G1FixedSurvivorSpaceSize is 0 which means the size is not
    8.16 +  // fixed, then _max_survivor_regions will be calculated at
    8.17 +  // calculate_young_list_target_config during initialization
    8.18 +  _max_survivor_regions = G1FixedSurvivorSpaceSize / HeapRegion::GrainBytes;
    8.19  
    8.20    initialize_all();
    8.21  }
    8.22 @@ -296,28 +292,54 @@
    8.23    CollectorPolicy::initialize_flags();
    8.24  }
    8.25  
    8.26 +// The easiest way to deal with the parsing of the NewSize /
    8.27 +// MaxNewSize / etc. parameteres is to re-use the code in the
    8.28 +// TwoGenerationCollectorPolicy class. This is similar to what
    8.29 +// ParallelScavenge does with its GenerationSizer class (see
    8.30 +// ParallelScavengeHeap::initialize()). We might change this in the
    8.31 +// future, but it's a good start.
    8.32 +class G1YoungGenSizer : public TwoGenerationCollectorPolicy {
    8.33 +  size_t size_to_region_num(size_t byte_size) {
    8.34 +    return MAX2((size_t) 1, byte_size / HeapRegion::GrainBytes);
    8.35 +  }
    8.36 +
    8.37 +public:
    8.38 +  G1YoungGenSizer() {
    8.39 +    initialize_flags();
    8.40 +    initialize_size_info();
    8.41 +  }
    8.42 +
    8.43 +  size_t min_young_region_num() {
    8.44 +    return size_to_region_num(_min_gen0_size);
    8.45 +  }
    8.46 +  size_t initial_young_region_num() {
    8.47 +    return size_to_region_num(_initial_gen0_size);
    8.48 +  }
    8.49 +  size_t max_young_region_num() {
    8.50 +    return size_to_region_num(_max_gen0_size);
    8.51 +  }
    8.52 +};
    8.53 +
    8.54  void G1CollectorPolicy::init() {
    8.55    // Set aside an initial future to_space.
    8.56    _g1 = G1CollectedHeap::heap();
    8.57 -  size_t regions = Universe::heap()->capacity() / HeapRegion::GrainBytes;
    8.58  
    8.59    assert(Heap_lock->owned_by_self(), "Locking discipline.");
    8.60  
    8.61 -  if (G1SteadyStateUsed < 50) {
    8.62 -    vm_exit_during_initialization("G1SteadyStateUsed must be at least 50%.");
    8.63 -  }
    8.64 -
    8.65    initialize_gc_policy_counters();
    8.66  
    8.67    if (G1Gen) {
    8.68      _in_young_gc_mode = true;
    8.69  
    8.70 -    if (G1YoungGenSize == 0) {
    8.71 +    G1YoungGenSizer sizer;
    8.72 +    size_t initial_region_num = sizer.initial_young_region_num();
    8.73 +
    8.74 +    if (UseAdaptiveSizePolicy) {
    8.75        set_adaptive_young_list_length(true);
    8.76        _young_list_fixed_length = 0;
    8.77      } else {
    8.78        set_adaptive_young_list_length(false);
    8.79 -      _young_list_fixed_length = (G1YoungGenSize / HeapRegion::GrainBytes);
    8.80 +      _young_list_fixed_length = initial_region_num;
    8.81      }
    8.82       _free_regions_at_end_of_collection = _g1->free_regions();
    8.83       _scan_only_regions_at_end_of_collection = 0;
    8.84 @@ -455,7 +477,7 @@
    8.85    guarantee( adaptive_young_list_length(), "pre-condition" );
    8.86  
    8.87    double start_time_sec = os::elapsedTime();
    8.88 -  size_t min_reserve_perc = MAX2((size_t)2, (size_t)G1MinReservePercent);
    8.89 +  size_t min_reserve_perc = MAX2((size_t)2, (size_t)G1ReservePercent);
    8.90    min_reserve_perc = MIN2((size_t) 50, min_reserve_perc);
    8.91    size_t reserve_regions =
    8.92      (size_t) ((double) min_reserve_perc * (double) _g1->n_regions() / 100.0);
    8.93 @@ -1110,10 +1132,7 @@
    8.94    size_t short_lived_so_length = _young_list_so_prefix_length;
    8.95    _short_lived_surv_rate_group->record_scan_only_prefix(short_lived_so_length);
    8.96    tag_scan_only(short_lived_so_length);
    8.97 -
    8.98 -  if (G1UseSurvivorSpaces) {
    8.99 -    _survivors_age_table.clear();
   8.100 -  }
   8.101 +  _survivors_age_table.clear();
   8.102  
   8.103    assert( verify_young_ages(), "region age verification" );
   8.104  }
   8.105 @@ -1432,7 +1451,7 @@
   8.106        record_concurrent_mark_init_end_pre(0.0);
   8.107  
   8.108      size_t min_used_targ =
   8.109 -      (_g1->capacity() / 100) * (G1SteadyStateUsed - G1SteadyStateUsedDelta);
   8.110 +      (_g1->capacity() / 100) * InitiatingHeapOccupancyPercent;
   8.111  
   8.112      if (cur_used_bytes > min_used_targ) {
   8.113        if (cur_used_bytes <= _prev_collection_pause_used_at_end_bytes) {
   8.114 @@ -1916,7 +1935,7 @@
   8.115    calculate_young_list_target_config();
   8.116  
   8.117    // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
   8.118 -  double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSUpdatePauseFractionPercent / 100.0;
   8.119 +  double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
   8.120    adjust_concurrent_refinement(update_rs_time, update_rs_processed_buffers, update_rs_time_goal_ms);
   8.121  
   8.122    // </NEW PREDICTION>
   8.123 @@ -1932,7 +1951,7 @@
   8.124    DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
   8.125    ConcurrentG1Refine *cg1r = G1CollectedHeap::heap()->concurrent_g1_refine();
   8.126  
   8.127 -  if (G1AdaptiveConcRefine) {
   8.128 +  if (G1UseAdaptiveConcRefinement) {
   8.129      const int k_gy = 3, k_gr = 6;
   8.130      const double inc_k = 1.1, dec_k = 0.9;
   8.131  
   8.132 @@ -2607,9 +2626,6 @@
   8.133  // Calculates survivor space parameters.
   8.134  void G1CollectorPolicy::calculate_survivors_policy()
   8.135  {
   8.136 -  if (!G1UseSurvivorSpaces) {
   8.137 -    return;
   8.138 -  }
   8.139    if (G1FixedSurvivorSpaceSize == 0) {
   8.140      _max_survivor_regions = _young_list_target_length / SurvivorRatio;
   8.141    } else {
   8.142 @@ -2628,13 +2644,6 @@
   8.143  G1CollectorPolicy_BestRegionsFirst::should_do_collection_pause(size_t
   8.144                                                                 word_size) {
   8.145    assert(_g1->regions_accounted_for(), "Region leakage!");
   8.146 -  // Initiate a pause when we reach the steady-state "used" target.
   8.147 -  size_t used_hard = (_g1->capacity() / 100) * G1SteadyStateUsed;
   8.148 -  size_t used_soft =
   8.149 -   MAX2((_g1->capacity() / 100) * (G1SteadyStateUsed - G1SteadyStateUsedDelta),
   8.150 -        used_hard/2);
   8.151 -  size_t used = _g1->used();
   8.152 -
   8.153    double max_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0;
   8.154  
   8.155    size_t young_list_length = _g1->young_list_length();
   8.156 @@ -2867,7 +2876,7 @@
   8.157  // estimate of the number of live bytes.
   8.158  void G1CollectorPolicy::
   8.159  add_to_collection_set(HeapRegion* hr) {
   8.160 -  if (G1PrintRegions) {
   8.161 +  if (G1PrintHeapRegions) {
   8.162      gclog_or_tty->print_cr("added region to cset %d:["PTR_FORMAT", "PTR_FORMAT"], "
   8.163                    "top "PTR_FORMAT", young %s",
   8.164                    hr->hrs_index(), hr->bottom(), hr->end(),
     9.1 --- a/src/share/vm/gc_implementation/g1/g1MMUTracker.cpp	Mon Mar 01 12:12:35 2010 -0800
     9.2 +++ b/src/share/vm/gc_implementation/g1/g1MMUTracker.cpp	Tue Mar 02 13:57:46 2010 -0800
     9.3 @@ -88,13 +88,13 @@
     9.4      //     the time slice than what's allowed)
     9.5      //   consolidate the two entries with the minimum gap between them
     9.6      //     (this might allow less GC time than what's allowed)
     9.7 -    guarantee(NOT_PRODUCT(ScavengeALot ||) G1ForgetfulMMUTracker,
     9.8 -              "array full, currently we can't recover unless +G1ForgetfulMMUTracker");
     9.9 +    guarantee(NOT_PRODUCT(ScavengeALot ||) G1UseFixedWindowMMUTracker,
    9.10 +              "array full, currently we can't recover unless +G1UseFixedWindowMMUTracker");
    9.11      // In the case where ScavengeALot is true, such overflow is not
    9.12      // uncommon; in such cases, we can, without much loss of precision
    9.13      // or performance (we are GC'ing most of the time anyway!),
    9.14      // simply overwrite the oldest entry in the tracker: this
    9.15 -    // is also the behaviour when G1ForgetfulMMUTracker is enabled.
    9.16 +    // is also the behaviour when G1UseFixedWindowMMUTracker is enabled.
    9.17      _head_index = trim_index(_head_index + 1);
    9.18      assert(_head_index == _tail_index, "Because we have a full circular buffer");
    9.19      _tail_index = trim_index(_tail_index + 1);
    10.1 --- a/src/share/vm/gc_implementation/g1/g1MMUTracker.hpp	Mon Mar 01 12:12:35 2010 -0800
    10.2 +++ b/src/share/vm/gc_implementation/g1/g1MMUTracker.hpp	Tue Mar 02 13:57:46 2010 -0800
    10.3 @@ -101,7 +101,7 @@
    10.4    // If the array is full, an easy fix is to look for the pauses with
    10.5    // the shortest gap between them and consolidate them.
    10.6    // For now, we have taken the expedient alternative of forgetting
    10.7 -  // the oldest entry in the event that +G1ForgetfulMMUTracker, thus
    10.8 +  // the oldest entry in the event that +G1UseFixedWindowMMUTracker, thus
    10.9    // potentially violating MMU specs for some time thereafter.
   10.10  
   10.11    G1MMUTrackerQueueElem _array[QueueLength];
    11.1 --- a/src/share/vm/gc_implementation/g1/g1RemSet.cpp	Mon Mar 01 12:12:35 2010 -0800
    11.2 +++ b/src/share/vm/gc_implementation/g1/g1RemSet.cpp	Tue Mar 02 13:57:46 2010 -0800
    11.3 @@ -467,7 +467,7 @@
    11.4      // and they are causing failures. When we resolve said race
    11.5      // conditions, we'll revert back to parallel remembered set
    11.6      // updating and scanning. See CRs 6677707 and 6677708.
    11.7 -    if (G1ParallelRSetUpdatingEnabled || (worker_i == 0)) {
    11.8 +    if (G1UseParallelRSetUpdating || (worker_i == 0)) {
    11.9        updateRS(worker_i);
   11.10        scanNewRefsRS(oc, worker_i);
   11.11      } else {
   11.12 @@ -476,7 +476,7 @@
   11.13        _g1p->record_update_rs_time(worker_i, 0.0);
   11.14        _g1p->record_scan_new_refs_time(worker_i, 0.0);
   11.15      }
   11.16 -    if (G1ParallelRSetScanningEnabled || (worker_i == 0)) {
   11.17 +    if (G1UseParallelRSetScanning || (worker_i == 0)) {
   11.18        scanRS(oc, worker_i);
   11.19      } else {
   11.20        _g1p->record_scan_rs_start_time(worker_i, os::elapsedTime() * 1000.0);
    12.1 --- a/src/share/vm/gc_implementation/g1/g1_globals.hpp	Mon Mar 01 12:12:35 2010 -0800
    12.2 +++ b/src/share/vm/gc_implementation/g1/g1_globals.hpp	Tue Mar 02 13:57:46 2010 -0800
    12.3 @@ -37,9 +37,6 @@
    12.4    develop(intx, G1MarkingOverheadPercent, 0,                                \
    12.5            "Overhead of concurrent marking")                                 \
    12.6                                                                              \
    12.7 -  product(uintx, G1YoungGenSize, 0,                                         \
    12.8 -          "Size of the G1 young generation, 0 is the adaptive policy")      \
    12.9 -                                                                            \
   12.10    develop(bool, G1Gen, true,                                                \
   12.11            "If true, it will enable the generational G1")                    \
   12.12                                                                              \
   12.13 @@ -70,7 +67,7 @@
   12.14    develop(intx, G1PausesBtwnConcMark, -1,                                   \
   12.15            "If positive, fixed number of pauses between conc markings")      \
   12.16                                                                              \
   12.17 -  diagnostic(bool, G1SummarizeConcurrentMark, false,                        \
   12.18 +  diagnostic(bool, G1SummarizeConcMark, false,                              \
   12.19            "Summarize concurrent mark info")                                 \
   12.20                                                                              \
   12.21    diagnostic(bool, G1SummarizeRSetStats, false,                             \
   12.22 @@ -85,12 +82,9 @@
   12.23    diagnostic(bool, G1SummarizeZFStats, false,                               \
   12.24            "Summarize zero-filling info")                                    \
   12.25                                                                              \
   12.26 -  diagnostic(bool, G1TraceConcurrentRefinement, false,                      \
   12.27 +  diagnostic(bool, G1TraceConcRefinement, false,                            \
   12.28            "Trace G1 concurrent refinement")                                 \
   12.29                                                                              \
   12.30 -  product(intx, G1MarkStackSize, 2 * 1024 * 1024,                           \
   12.31 -          "Size of the mark stack for concurrent marking.")                 \
   12.32 -                                                                            \
   12.33    product(intx, G1MarkRegionStackSize, 1024 * 1024,                         \
   12.34            "Size of the region stack for concurrent marking.")               \
   12.35                                                                              \
   12.36 @@ -100,20 +94,13 @@
   12.37    develop(intx, G1ConcZFMaxRegions, 1,                                      \
   12.38            "Stop zero-filling when # of zf'd regions reaches")               \
   12.39                                                                              \
   12.40 -  product(intx, G1SteadyStateUsed, 90,                                      \
   12.41 -          "If non-0, try to maintain 'used' at this pct (of max)")          \
   12.42 -                                                                            \
   12.43 -  product(intx, G1SteadyStateUsedDelta, 30,                                 \
   12.44 -          "If G1SteadyStateUsed is non-0, then do pause this number of "    \
   12.45 -          "of percentage points earlier if no marking is in progress.")     \
   12.46 -                                                                            \
   12.47    develop(bool, G1SATBBarrierPrintNullPreVals, false,                       \
   12.48            "If true, count frac of ptr writes with null pre-vals.")          \
   12.49                                                                              \
   12.50 -  product(intx, G1SATBLogBufferSize, 1*K,                                   \
   12.51 +  product(intx, G1SATBBufferSize, 1*K,                                      \
   12.52            "Number of entries in an SATB log buffer.")                       \
   12.53                                                                              \
   12.54 -  product(intx, G1SATBProcessCompletedThreshold, 20,                        \
   12.55 +  develop(intx, G1SATBProcessCompletedThreshold, 20,                        \
   12.56            "Number of completed buffers that triggers log processing.")      \
   12.57                                                                              \
   12.58    develop(intx, G1ExtraRegionSurvRate, 33,                                  \
   12.59 @@ -127,7 +114,7 @@
   12.60    develop(bool, G1SATBPrintStubs, false,                                    \
   12.61            "If true, print generated stubs for the SATB barrier")            \
   12.62                                                                              \
   12.63 -  product(intx, G1ExpandByPercentOfAvailable, 20,                           \
   12.64 +  experimental(intx, G1ExpandByPercentOfAvailable, 20,                      \
   12.65            "When expanding, % of uncommitted space to claim.")               \
   12.66                                                                              \
   12.67    develop(bool, G1RSBarrierRegionFilter, true,                              \
   12.68 @@ -165,36 +152,36 @@
   12.69    product(intx, G1UpdateBufferSize, 256,                                    \
   12.70            "Size of an update buffer")                                       \
   12.71                                                                              \
   12.72 -  product(intx, G1ConcRefineYellowZone, 0,                                  \
   12.73 +  product(intx, G1ConcRefinementYellowZone, 0,                              \
   12.74            "Number of enqueued update buffers that will "                    \
   12.75            "trigger concurrent processing. Will be selected ergonomically "  \
   12.76            "by default.")                                                    \
   12.77                                                                              \
   12.78 -  product(intx, G1ConcRefineRedZone, 0,                                     \
   12.79 +  product(intx, G1ConcRefinementRedZone, 0,                                 \
   12.80            "Maximum number of enqueued update buffers before mutator "       \
   12.81            "threads start processing new ones instead of enqueueing them. "  \
   12.82            "Will be selected ergonomically by default. Zero will disable "   \
   12.83            "concurrent processing.")                                         \
   12.84                                                                              \
   12.85 -  product(intx, G1ConcRefineGreenZone, 0,                                   \
   12.86 +  product(intx, G1ConcRefinementGreenZone, 0,                               \
   12.87            "The number of update buffers that are left in the queue by the " \
   12.88            "concurrent processing threads. Will be selected ergonomically "  \
   12.89            "by default.")                                                    \
   12.90                                                                              \
   12.91 -  product(intx, G1ConcRefineServiceInterval, 300,                           \
   12.92 +  product(intx, G1ConcRefinementServiceIntervalMillis, 300,                 \
   12.93            "The last concurrent refinement thread wakes up every "           \
   12.94            "specified number of milliseconds to do miscellaneous work.")     \
   12.95                                                                              \
   12.96 -  product(intx, G1ConcRefineThresholdStep, 0,                               \
   12.97 +  product(intx, G1ConcRefinementThresholdStep, 0,                           \
   12.98            "Each time the rset update queue increases by this amount "       \
   12.99            "activate the next refinement thread if available. "              \
  12.100            "Will be selected ergonomically by default.")                     \
  12.101                                                                              \
  12.102 -  product(intx, G1RSUpdatePauseFractionPercent, 10,                         \
  12.103 +  product(intx, G1RSetUpdatingPauseTimePercent, 10,                         \
  12.104            "A target percentage of time that is allowed to be spend on "     \
  12.105            "process RS update buffers during the collection pause.")         \
  12.106                                                                              \
  12.107 -  product(bool, G1AdaptiveConcRefine, true,                                 \
  12.108 +  product(bool, G1UseAdaptiveConcRefinement, true,                          \
  12.109            "Select green, yellow and red zones adaptively to meet the "      \
  12.110            "the pause requirements.")                                        \
  12.111                                                                              \
  12.112 @@ -245,15 +232,15 @@
  12.113            "the number of regions for which we'll print a surv rate "        \
  12.114            "summary.")                                                       \
  12.115                                                                              \
  12.116 -  product(bool, G1UseScanOnlyPrefix, false,                                 \
  12.117 +  develop(bool, G1UseScanOnlyPrefix, false,                                 \
  12.118            "It determines whether the system will calculate an optimum "     \
  12.119            "scan-only set.")                                                 \
  12.120                                                                              \
  12.121 -  product(intx, G1MinReservePercent, 10,                                    \
  12.122 +  product(intx, G1ReservePercent, 10,                                       \
  12.123            "It determines the minimum reserve we should have in the heap "   \
  12.124            "to minimize the probability of promotion failure.")              \
  12.125                                                                              \
  12.126 -  diagnostic(bool, G1PrintRegions, false,                                   \
  12.127 +  diagnostic(bool, G1PrintHeapRegions, false,                               \
  12.128            "If set G1 will print information on which regions are being "    \
  12.129            "allocated and which are reclaimed.")                             \
  12.130                                                                              \
  12.131 @@ -263,9 +250,6 @@
  12.132    develop(bool, G1HRRSFlushLogBuffersOnVerify, false,                       \
  12.133            "Forces flushing of log buffers before verification.")            \
  12.134                                                                              \
  12.135 -  product(bool, G1UseSurvivorSpaces, true,                                  \
  12.136 -          "When true, use survivor space.")                                 \
  12.137 -                                                                            \
  12.138    develop(bool, G1FailOnFPError, false,                                     \
  12.139            "When set, G1 will fail when it encounters an FP 'error', "       \
  12.140            "so as to allow debugging")                                       \
  12.141 @@ -280,21 +264,21 @@
  12.142            "If non-0 is the size of the G1 survivor space, "                 \
  12.143            "otherwise SurvivorRatio is used to determine the size")          \
  12.144                                                                              \
  12.145 -  product(bool, G1ForgetfulMMUTracker, false,                               \
  12.146 +  product(bool, G1UseFixedWindowMMUTracker, false,                          \
  12.147            "If the MMU tracker's memory is full, forget the oldest entry")   \
  12.148                                                                              \
  12.149    product(uintx, G1HeapRegionSize, 0,                                       \
  12.150            "Size of the G1 regions.")                                        \
  12.151                                                                              \
  12.152 -  experimental(bool, G1ParallelRSetUpdatingEnabled, false,                  \
  12.153 +  experimental(bool, G1UseParallelRSetUpdating, false,                      \
  12.154            "Enables the parallelization of remembered set updating "         \
  12.155            "during evacuation pauses")                                       \
  12.156                                                                              \
  12.157 -  experimental(bool, G1ParallelRSetScanningEnabled, false,                  \
  12.158 +  experimental(bool, G1UseParallelRSetScanning, false,                      \
  12.159            "Enables the parallelization of remembered set scanning "         \
  12.160            "during evacuation pauses")                                       \
  12.161                                                                              \
  12.162 -  product(uintx, G1ParallelRSetThreads, 0,                                  \
  12.163 +  product(uintx, G1ConcRefinementThreads, 0,                                \
  12.164            "If non-0 is the number of parallel rem set update threads, "     \
  12.165            "otherwise the value is determined ergonomically.")               \
  12.166                                                                              \
    13.1 --- a/src/share/vm/includeDB_core	Mon Mar 01 12:12:35 2010 -0800
    13.2 +++ b/src/share/vm/includeDB_core	Tue Mar 02 13:57:46 2010 -0800
    13.3 @@ -176,6 +176,7 @@
    13.4  arguments.cpp                           oop.inline.hpp
    13.5  arguments.cpp                           os_<os_family>.inline.hpp
    13.6  arguments.cpp                           referenceProcessor.hpp
    13.7 +arguments.cpp                           taskqueue.hpp
    13.8  arguments.cpp                           universe.inline.hpp
    13.9  arguments.cpp                           vm_version_<arch>.hpp
   13.10  
    14.1 --- a/src/share/vm/runtime/arguments.cpp	Mon Mar 01 12:12:35 2010 -0800
    14.2 +++ b/src/share/vm/runtime/arguments.cpp	Tue Mar 02 13:57:46 2010 -0800
    14.3 @@ -1203,6 +1203,11 @@
    14.4    if (!FLAG_IS_DEFAULT(CMSParPromoteBlocksToClaim) || !FLAG_IS_DEFAULT(OldPLABWeight)) {
    14.5      CFLS_LAB::modify_initialization(OldPLABSize, OldPLABWeight);
    14.6    }
    14.7 +  if (PrintGCDetails && Verbose) {
    14.8 +    tty->print_cr("MarkStackSize: %uk  MarkStackSizeMax: %uk",
    14.9 +      MarkStackSize / K, MarkStackSizeMax / K);
   14.10 +    tty->print_cr("ConcGCThreads: %u", ConcGCThreads);
   14.11 +  }
   14.12  }
   14.13  #endif // KERNEL
   14.14  
   14.15 @@ -1339,6 +1344,17 @@
   14.16    if (FLAG_IS_DEFAULT(MaxGCPauseMillis)) {
   14.17      FLAG_SET_DEFAULT(MaxGCPauseMillis, 200);
   14.18    }
   14.19 +
   14.20 +  if (FLAG_IS_DEFAULT(MarkStackSize)) {
   14.21 +    // Size as a multiple of TaskQueueSuper::N which is larger
   14.22 +    // for 64-bit.
   14.23 +    FLAG_SET_DEFAULT(MarkStackSize, 128 * TaskQueueSuper::total_size());
   14.24 +  }
   14.25 +  if (PrintGCDetails && Verbose) {
   14.26 +    tty->print_cr("MarkStackSize: %uk  MarkStackSizeMax: %uk",
   14.27 +      MarkStackSize / K, MarkStackSizeMax / K);
   14.28 +    tty->print_cr("ConcGCThreads: %u", ConcGCThreads);
   14.29 +  }
   14.30  }
   14.31  
   14.32  void Arguments::set_heap_size() {
   14.33 @@ -1737,6 +1753,11 @@
   14.34      status = false;
   14.35    }
   14.36  
   14.37 +  if (UseG1GC) {
   14.38 +    status = status && verify_percentage(InitiatingHeapOccupancyPercent,
   14.39 +                                         "InitiatingHeapOccupancyPercent");
   14.40 +  }
   14.41 +
   14.42    status = status && verify_interval(RefDiscoveryPolicy,
   14.43                                       ReferenceProcessor::DiscoveryPolicyMin,
   14.44                                       ReferenceProcessor::DiscoveryPolicyMax,
   14.45 @@ -1795,6 +1816,29 @@
   14.46    return false;
   14.47  }
   14.48  
   14.49 +bool Arguments::parse_uintx(const char* value,
   14.50 +                            uintx* uintx_arg,
   14.51 +                            uintx min_size) {
   14.52 +
   14.53 +  // Check the sign first since atomull() parses only unsigned values.
   14.54 +  bool value_is_positive = !(*value == '-');
   14.55 +
   14.56 +  if (value_is_positive) {
   14.57 +    julong n;
   14.58 +    bool good_return = atomull(value, &n);
   14.59 +    if (good_return) {
   14.60 +      bool above_minimum = n >= min_size;
   14.61 +      bool value_is_too_large = n > max_uintx;
   14.62 +
   14.63 +      if (above_minimum && !value_is_too_large) {
   14.64 +        *uintx_arg = n;
   14.65 +        return true;
   14.66 +      }
   14.67 +    }
   14.68 +  }
   14.69 +  return false;
   14.70 +}
   14.71 +
   14.72  Arguments::ArgsRange Arguments::parse_memory_size(const char* s,
   14.73                                                    julong* long_arg,
   14.74                                                    julong min_size) {
   14.75 @@ -2453,6 +2497,37 @@
   14.76        jio_fprintf(defaultStream::error_stream(),
   14.77                    "Please use -XX:YoungPLABSize in place of "
   14.78                    "-XX:ParallelGCToSpaceAllocBufferSize in the future\n");
   14.79 +    } else if (match_option(option, "-XX:CMSMarkStackSize=", &tail) ||
   14.80 +               match_option(option, "-XX:G1MarkStackSize=", &tail)) {
   14.81 +      julong stack_size = 0;
   14.82 +      ArgsRange errcode = parse_memory_size(tail, &stack_size, 1);
   14.83 +      if (errcode != arg_in_range) {
   14.84 +        jio_fprintf(defaultStream::error_stream(),
   14.85 +                    "Invalid mark stack size: %s\n", option->optionString);
   14.86 +        describe_range_error(errcode);
   14.87 +        return JNI_EINVAL;
   14.88 +      }
   14.89 +      FLAG_SET_CMDLINE(uintx, MarkStackSize, stack_size);
   14.90 +    } else if (match_option(option, "-XX:CMSMarkStackSizeMax=", &tail)) {
   14.91 +      julong max_stack_size = 0;
   14.92 +      ArgsRange errcode = parse_memory_size(tail, &max_stack_size, 1);
   14.93 +      if (errcode != arg_in_range) {
   14.94 +        jio_fprintf(defaultStream::error_stream(),
   14.95 +                    "Invalid maximum mark stack size: %s\n",
   14.96 +                    option->optionString);
   14.97 +        describe_range_error(errcode);
   14.98 +        return JNI_EINVAL;
   14.99 +      }
  14.100 +      FLAG_SET_CMDLINE(uintx, MarkStackSizeMax, max_stack_size);
  14.101 +    } else if (match_option(option, "-XX:ParallelMarkingThreads=", &tail) ||
  14.102 +               match_option(option, "-XX:ParallelCMSThreads=", &tail)) {
  14.103 +      uintx conc_threads = 0;
  14.104 +      if (!parse_uintx(tail, &conc_threads, 1)) {
  14.105 +        jio_fprintf(defaultStream::error_stream(),
  14.106 +                    "Invalid concurrent threads: %s\n", option->optionString);
  14.107 +        return JNI_EINVAL;
  14.108 +      }
  14.109 +      FLAG_SET_CMDLINE(uintx, ConcGCThreads, conc_threads);
  14.110      } else if (match_option(option, "-XX:", &tail)) { // -XX:xxxx
  14.111        // Skip -XX:Flags= since that case has already been handled
  14.112        if (strncmp(tail, "Flags=", strlen("Flags=")) != 0) {
    15.1 --- a/src/share/vm/runtime/arguments.hpp	Mon Mar 01 12:12:35 2010 -0800
    15.2 +++ b/src/share/vm/runtime/arguments.hpp	Tue Mar 02 13:57:46 2010 -0800
    15.3 @@ -343,6 +343,12 @@
    15.4    static ArgsRange check_memory_size(julong size, julong min_size);
    15.5    static ArgsRange parse_memory_size(const char* s, julong* long_arg,
    15.6                                       julong min_size);
    15.7 +  // Parse a string for a unsigned integer.  Returns true if value
    15.8 +  // is an unsigned integer greater than or equal to the minimum
    15.9 +  // parameter passed and returns the value in uintx_arg.  Returns
   15.10 +  // false otherwise, with uintx_arg undefined.
   15.11 +  static bool parse_uintx(const char* value, uintx* uintx_arg,
   15.12 +                          uintx min_size);
   15.13  
   15.14    // methods to build strings from individual args
   15.15    static void build_jvm_args(const char* arg);
    16.1 --- a/src/share/vm/runtime/globals.hpp	Mon Mar 01 12:12:35 2010 -0800
    16.2 +++ b/src/share/vm/runtime/globals.hpp	Tue Mar 02 13:57:46 2010 -0800
    16.3 @@ -1245,9 +1245,6 @@
    16.4    product(uintx, ParallelGCThreads, 0,                                      \
    16.5            "Number of parallel threads parallel gc will use")                \
    16.6                                                                              \
    16.7 -  product(uintx, ParallelCMSThreads, 0,                                     \
    16.8 -          "Max number of threads CMS will use for concurrent work")         \
    16.9 -                                                                            \
   16.10    develop(bool, ParallelOldGCSplitALot, false,                              \
   16.11            "Provoke splitting (copying data from a young gen space to"       \
   16.12            "multiple destination spaces)")                                   \
   16.13 @@ -1258,8 +1255,8 @@
   16.14    develop(bool, TraceRegionTasksQueuing, false,                             \
   16.15            "Trace the queuing of the region tasks")                          \
   16.16                                                                              \
   16.17 -  product(uintx, ParallelMarkingThreads, 0,                                 \
   16.18 -          "Number of marking threads concurrent gc will use")               \
   16.19 +  product(uintx, ConcGCThreads, 0,                                          \
   16.20 +          "Number of threads concurrent gc will use")                       \
   16.21                                                                              \
   16.22    product(uintx, YoungPLABSize, 4096,                                       \
   16.23            "Size of young gen promotion labs (in HeapWords)")                \
   16.24 @@ -1535,11 +1532,11 @@
   16.25    develop(bool, CMSOverflowEarlyRestoration, false,                         \
   16.26            "Whether preserved marks should be restored early")               \
   16.27                                                                              \
   16.28 -  product(uintx, CMSMarkStackSize, NOT_LP64(32*K) LP64_ONLY(4*M),           \
   16.29 -          "Size of CMS marking stack")                                      \
   16.30 -                                                                            \
   16.31 -  product(uintx, CMSMarkStackSizeMax, NOT_LP64(4*M) LP64_ONLY(512*M),       \
   16.32 -          "Max size of CMS marking stack")                                  \
   16.33 +  product(uintx, MarkStackSize, NOT_LP64(32*K) LP64_ONLY(4*M),              \
   16.34 +          "Size of marking stack")                                          \
   16.35 +                                                                            \
   16.36 +  product(uintx, MarkStackSizeMax, NOT_LP64(4*M) LP64_ONLY(512*M),          \
   16.37 +          "Max size of marking stack")                                      \
   16.38                                                                              \
   16.39    notproduct(bool, CMSMarkStackOverflowALot, false,                         \
   16.40            "Whether we should simulate frequent marking stack / work queue"  \
   16.41 @@ -1724,6 +1721,13 @@
   16.42            "Percentage CMS generation occupancy to start a CMS collection "  \
   16.43            "cycle. A negative value means that CMSTriggerRatio is used")     \
   16.44                                                                              \
   16.45 +  product(uintx, InitiatingHeapOccupancyPercent, 45,                        \
   16.46 +          "Percentage of the (entire) heap occupancy to start a "           \
   16.47 +          "concurrent GC cycle. It us used by GCs that trigger a "          \
   16.48 +          "concurrent GC cycle based on the occupancy of the entire heap, " \
   16.49 +          "not just one of the generations (e.g., G1). A value of 0 "       \
   16.50 +          "denotes 'do constant GC cycles'.")                               \
   16.51 +                                                                            \
   16.52    product(intx, CMSInitiatingPermOccupancyFraction, -1,                     \
   16.53            "Percentage CMS perm generation occupancy to start a "            \
   16.54            "CMScollection cycle. A negative value means that "               \
    17.1 --- a/src/share/vm/utilities/taskqueue.hpp	Mon Mar 01 12:12:35 2010 -0800
    17.2 +++ b/src/share/vm/utilities/taskqueue.hpp	Tue Mar 02 13:57:46 2010 -0800
    17.3 @@ -133,6 +133,9 @@
    17.4    // Maximum number of elements allowed in the queue.  This is two less
    17.5    // than the actual queue size, for somewhat complicated reasons.
    17.6    uint max_elems() { return N - 2; }
    17.7 +
    17.8 +  // Total size of queue.
    17.9 +  static const uint total_size() { return N; }
   17.10  };
   17.11  
   17.12  template<class E> class GenericTaskQueue: public TaskQueueSuper {

mercurial