src/share/vm/gc_implementation/shared/adaptiveSizePolicy.cpp

changeset 3294
bca17e38de00
parent 2314
f95d63e2154a
child 4129
22b8d3d181d9
     1.1 --- a/src/share/vm/gc_implementation/shared/adaptiveSizePolicy.cpp	Tue Nov 22 04:47:10 2011 -0500
     1.2 +++ b/src/share/vm/gc_implementation/shared/adaptiveSizePolicy.cpp	Tue Aug 09 10:16:01 2011 -0700
     1.3 @@ -28,8 +28,10 @@
     1.4  #include "memory/collectorPolicy.hpp"
     1.5  #include "runtime/timer.hpp"
     1.6  #include "utilities/ostream.hpp"
     1.7 +#include "utilities/workgroup.hpp"
     1.8  elapsedTimer AdaptiveSizePolicy::_minor_timer;
     1.9  elapsedTimer AdaptiveSizePolicy::_major_timer;
    1.10 +bool AdaptiveSizePolicy::_debug_perturbation = false;
    1.11  
    1.12  // The throughput goal is implemented as
    1.13  //      _throughput_goal = 1 - ( 1 / (1 + gc_cost_ratio))
    1.14 @@ -88,6 +90,134 @@
    1.15    _young_gen_policy_is_ready = false;
    1.16  }
    1.17  
    1.18 +//  If the number of GC threads was set on the command line,
    1.19 +// use it.
    1.20 +//  Else
    1.21 +//    Calculate the number of GC threads based on the number of Java threads.
    1.22 +//    Calculate the number of GC threads based on the size of the heap.
    1.23 +//    Use the larger.
    1.24 +
    1.25 +int AdaptiveSizePolicy::calc_default_active_workers(uintx total_workers,
    1.26 +                                            const uintx min_workers,
    1.27 +                                            uintx active_workers,
    1.28 +                                            uintx application_workers) {
    1.29 +  // If the user has specifically set the number of
    1.30 +  // GC threads, use them.
    1.31 +
    1.32 +  // If the user has turned off using a dynamic number of GC threads
    1.33 +  // or the users has requested a specific number, set the active
    1.34 +  // number of workers to all the workers.
    1.35 +
    1.36 +  uintx new_active_workers = total_workers;
    1.37 +  uintx prev_active_workers = active_workers;
    1.38 +  uintx active_workers_by_JT = 0;
    1.39 +  uintx active_workers_by_heap_size = 0;
    1.40 +
    1.41 +  // Always use at least min_workers but use up to
    1.42 +  // GCThreadsPerJavaThreads * application threads.
    1.43 +  active_workers_by_JT =
    1.44 +    MAX2((uintx) GCWorkersPerJavaThread * application_workers,
    1.45 +         min_workers);
    1.46 +
    1.47 +  // Choose a number of GC threads based on the current size
    1.48 +  // of the heap.  This may be complicated because the size of
    1.49 +  // the heap depends on factors such as the thoughput goal.
    1.50 +  // Still a large heap should be collected by more GC threads.
    1.51 +  active_workers_by_heap_size =
    1.52 +      MAX2((size_t) 2U, Universe::heap()->capacity() / HeapSizePerGCThread);
    1.53 +
    1.54 +  uintx max_active_workers =
    1.55 +    MAX2(active_workers_by_JT, active_workers_by_heap_size);
    1.56 +
    1.57 +  // Limit the number of workers to the the number created,
    1.58 +  // (workers()).
    1.59 +  new_active_workers = MIN2(max_active_workers,
    1.60 +                                (uintx) total_workers);
    1.61 +
    1.62 +  // Increase GC workers instantly but decrease them more
    1.63 +  // slowly.
    1.64 +  if (new_active_workers < prev_active_workers) {
    1.65 +    new_active_workers =
    1.66 +      MAX2(min_workers, (prev_active_workers + new_active_workers) / 2);
    1.67 +  }
    1.68 +
    1.69 +  // Check once more that the number of workers is within the limits.
    1.70 +  assert(min_workers <= total_workers, "Minimum workers not consistent with total workers");
    1.71 +  assert(new_active_workers >= min_workers, "Minimum workers not observed");
    1.72 +  assert(new_active_workers <= total_workers, "Total workers not observed");
    1.73 +
    1.74 +  if (ForceDynamicNumberOfGCThreads) {
    1.75 +    // Assume this is debugging and jiggle the number of GC threads.
    1.76 +    if (new_active_workers == prev_active_workers) {
    1.77 +      if (new_active_workers < total_workers) {
    1.78 +        new_active_workers++;
    1.79 +      } else if (new_active_workers > min_workers) {
    1.80 +        new_active_workers--;
    1.81 +      }
    1.82 +    }
    1.83 +    if (new_active_workers == total_workers) {
    1.84 +      if (_debug_perturbation) {
    1.85 +        new_active_workers =  min_workers;
    1.86 +      }
    1.87 +      _debug_perturbation = !_debug_perturbation;
    1.88 +    }
    1.89 +    assert((new_active_workers <= (uintx) ParallelGCThreads) &&
    1.90 +           (new_active_workers >= min_workers),
    1.91 +      "Jiggled active workers too much");
    1.92 +  }
    1.93 +
    1.94 +  if (TraceDynamicGCThreads) {
    1.95 +     gclog_or_tty->print_cr("GCTaskManager::calc_default_active_workers() : "
    1.96 +       "active_workers(): %d  new_acitve_workers: %d  "
    1.97 +       "prev_active_workers: %d\n"
    1.98 +       " active_workers_by_JT: %d  active_workers_by_heap_size: %d",
    1.99 +       active_workers, new_active_workers, prev_active_workers,
   1.100 +       active_workers_by_JT, active_workers_by_heap_size);
   1.101 +  }
   1.102 +  assert(new_active_workers > 0, "Always need at least 1");
   1.103 +  return new_active_workers;
   1.104 +}
   1.105 +
   1.106 +int AdaptiveSizePolicy::calc_active_workers(uintx total_workers,
   1.107 +                                            uintx active_workers,
   1.108 +                                            uintx application_workers) {
   1.109 +  // If the user has specifically set the number of
   1.110 +  // GC threads, use them.
   1.111 +
   1.112 +  // If the user has turned off using a dynamic number of GC threads
   1.113 +  // or the users has requested a specific number, set the active
   1.114 +  // number of workers to all the workers.
   1.115 +
   1.116 +  int new_active_workers;
   1.117 +  if (!UseDynamicNumberOfGCThreads ||
   1.118 +     (!FLAG_IS_DEFAULT(ParallelGCThreads) && !ForceDynamicNumberOfGCThreads)) {
   1.119 +    new_active_workers = total_workers;
   1.120 +  } else {
   1.121 +    new_active_workers = calc_default_active_workers(total_workers,
   1.122 +                                                     2, /* Minimum number of workers */
   1.123 +                                                     active_workers,
   1.124 +                                                     application_workers);
   1.125 +  }
   1.126 +  assert(new_active_workers > 0, "Always need at least 1");
   1.127 +  return new_active_workers;
   1.128 +}
   1.129 +
   1.130 +int AdaptiveSizePolicy::calc_active_conc_workers(uintx total_workers,
   1.131 +                                                 uintx active_workers,
   1.132 +                                                 uintx application_workers) {
   1.133 +  if (!UseDynamicNumberOfGCThreads ||
   1.134 +     (!FLAG_IS_DEFAULT(ConcGCThreads) && !ForceDynamicNumberOfGCThreads)) {
   1.135 +    return ConcGCThreads;
   1.136 +  } else {
   1.137 +    int no_of_gc_threads = calc_default_active_workers(
   1.138 +                             total_workers,
   1.139 +                             1, /* Minimum number of workers */
   1.140 +                             active_workers,
   1.141 +                             application_workers);
   1.142 +    return no_of_gc_threads;
   1.143 +  }
   1.144 +}
   1.145 +
   1.146  bool AdaptiveSizePolicy::tenuring_threshold_change() const {
   1.147    return decrement_tenuring_threshold_for_gc_cost() ||
   1.148           increment_tenuring_threshold_for_gc_cost() ||

mercurial