src/share/vm/gc_implementation/g1/g1MonitoringSupport.hpp

changeset 3176
8229bd737950
parent 2821
b52782ae3880
child 3180
81aa07130d30
     1.1 --- a/src/share/vm/gc_implementation/g1/g1MonitoringSupport.hpp	Thu Sep 22 10:57:37 2011 -0700
     1.2 +++ b/src/share/vm/gc_implementation/g1/g1MonitoringSupport.hpp	Fri Sep 23 16:07:49 2011 -0400
     1.3 @@ -28,101 +28,93 @@
     1.4  #include "gc_implementation/shared/hSpaceCounters.hpp"
     1.5  
     1.6  class G1CollectedHeap;
     1.7 -class G1SpaceMonitoringSupport;
     1.8  
     1.9 -// Class for monitoring logical spaces in G1.
    1.10 -// G1 defines a set of regions as a young
    1.11 -// collection (analogous to a young generation).
    1.12 -// The young collection is a logical generation
    1.13 -// with no fixed chunk (see space.hpp) reflecting
    1.14 -// the address space for the generation.  In addition
    1.15 -// to the young collection there is its complement
    1.16 -// the non-young collection that is simply the regions
    1.17 -// not in the young collection.  The non-young collection
    1.18 -// is treated here as a logical old generation only
    1.19 -// because the monitoring tools expect a generational
    1.20 -// heap.  The monitoring tools expect that a Space
    1.21 -// (see space.hpp) exists that describe the
    1.22 -// address space of young collection and non-young
    1.23 -// collection and such a view is provided here.
    1.24 +// Class for monitoring logical spaces in G1. It provides data for
    1.25 +// both G1's jstat counters as well as G1's memory pools.
    1.26  //
    1.27 -// This class provides interfaces to access
    1.28 -// the value of variables for the young collection
    1.29 -// that include the "capacity" and "used" of the
    1.30 -// young collection along with constant values
    1.31 -// for the minimum and maximum capacities for
    1.32 -// the logical spaces.  Similarly for the non-young
    1.33 -// collection.
    1.34 +// G1 splits the heap into heap regions and each heap region belongs
    1.35 +// to one of the following categories:
    1.36  //
    1.37 -// Also provided are counters for G1 concurrent collections
    1.38 -// and stop-the-world full heap collecitons.
    1.39 +// * eden      : regions that have been allocated since the last GC
    1.40 +// * survivors : regions with objects that survived the last few GCs
    1.41 +// * old       : long-lived non-humongous regions
    1.42 +// * humongous : humongous regions
    1.43 +// * free      : free regions
    1.44  //
    1.45 -// Below is a description of how "used" and "capactiy"
    1.46 -// (or committed) is calculated for the logical spaces.
    1.47 +// The combination of eden and survivor regions form the equivalent of
    1.48 +// the young generation in the other GCs. The combination of old and
    1.49 +// humongous regions form the equivalent of the old generation in the
    1.50 +// other GCs. Free regions do not have a good equivalent in the other
    1.51 +// GCs given that they can be allocated as any of the other region types.
    1.52  //
    1.53 -// 1) The used space calculation for a pool is not necessarily
    1.54 -// independent of the others. We can easily get from G1 the overall
    1.55 -// used space in the entire heap, the number of regions in the young
    1.56 -// generation (includes both eden and survivors), and the number of
    1.57 -// survivor regions. So, from that we calculate:
    1.58 +// The monitoring tools expect the heap to contain a number of
    1.59 +// generations (young, old, perm) and each generation to contain a
    1.60 +// number of spaces (young: eden, survivors, old). Given that G1 does
    1.61 +// not maintain those spaces physically (e.g., the set of
    1.62 +// non-contiguous eden regions can be considered as a "logical"
    1.63 +// space), we'll provide the illusion that those generations and
    1.64 +// spaces exist. In reality, each generation and space refers to a set
    1.65 +// of heap regions that are potentially non-contiguous.
    1.66  //
    1.67 -//  survivor_used = survivor_num * region_size
    1.68 -//  eden_used     = young_region_num * region_size - survivor_used
    1.69 -//  old_gen_used  = overall_used - eden_used - survivor_used
    1.70 +// This class provides interfaces to access the min, current, and max
    1.71 +// capacity and current occupancy for each of G1's logical spaces and
    1.72 +// generations we expose to the monitoring tools. Also provided are
    1.73 +// counters for G1 concurrent collections and stop-the-world full heap
    1.74 +// collections.
    1.75  //
    1.76 -// Note that survivor_used and eden_used are upper bounds. To get the
    1.77 -// actual value we would have to iterate over the regions and add up
    1.78 -// ->used(). But that'd be expensive. So, we'll accept some lack of
    1.79 -// accuracy for those two. But, we have to be careful when calculating
    1.80 -// old_gen_used, in case we subtract from overall_used more then the
    1.81 -// actual number and our result goes negative.
    1.82 +// Below is a description of how the various sizes are calculated.
    1.83  //
    1.84 -// 2) Calculating the used space is straightforward, as described
    1.85 -// above. However, how do we calculate the committed space, given that
    1.86 -// we allocate space for the eden, survivor, and old gen out of the
    1.87 -// same pool of regions? One way to do this is to use the used value
    1.88 -// as also the committed value for the eden and survivor spaces and
    1.89 -// then calculate the old gen committed space as follows:
    1.90 +// * Current Capacity
    1.91  //
    1.92 -//  old_gen_committed = overall_committed - eden_committed - survivor_committed
    1.93 +//    - heap_capacity = current heap capacity (e.g., current committed size)
    1.94 +//    - young_gen_capacity = current max young gen target capacity
    1.95 +//          (i.e., young gen target capacity + max allowed expansion capacity)
    1.96 +//    - survivor_capacity = current survivor region capacity
    1.97 +//    - eden_capacity = young_gen_capacity - survivor_capacity
    1.98 +//    - old_capacity = heap_capacity - young_gen_capacity
    1.99  //
   1.100 -// Maybe a better way to do that would be to calculate used for eden
   1.101 -// and survivor as a sum of ->used() over their regions and then
   1.102 -// calculate committed as region_num * region_size (i.e., what we use
   1.103 -// to calculate the used space now). This is something to consider
   1.104 -// in the future.
   1.105 +//    What we do in the above is to distribute the free regions among
   1.106 +//    eden_capacity and old_capacity.
   1.107  //
   1.108 -// 3) Another decision that is again not straightforward is what is
   1.109 -// the max size that each memory pool can grow to. One way to do this
   1.110 -// would be to use the committed size for the max for the eden and
   1.111 -// survivors and calculate the old gen max as follows (basically, it's
   1.112 -// a similar pattern to what we use for the committed space, as
   1.113 -// described above):
   1.114 +// * Occupancy
   1.115  //
   1.116 -//  old_gen_max = overall_max - eden_max - survivor_max
   1.117 +//    - young_gen_used = current young region capacity
   1.118 +//    - survivor_used = survivor_capacity
   1.119 +//    - eden_used = young_gen_used - survivor_used
   1.120 +//    - old_used = overall_used - young_gen_used
   1.121  //
   1.122 -// Unfortunately, the above makes the max of each pool fluctuate over
   1.123 -// time and, even though this is allowed according to the spec, it
   1.124 -// broke several assumptions in the M&M framework (there were cases
   1.125 -// where used would reach a value greater than max). So, for max we
   1.126 -// use -1, which means "undefined" according to the spec.
   1.127 +//    Unfortunately, we currently only keep track of the number of
   1.128 +//    currently allocated young and survivor regions + the overall used
   1.129 +//    bytes in the heap, so the above can be a little inaccurate.
   1.130  //
   1.131 -// 4) Now, there is a very subtle issue with all the above. The
   1.132 -// framework will call get_memory_usage() on the three pools
   1.133 -// asynchronously. As a result, each call might get a different value
   1.134 -// for, say, survivor_num which will yield inconsistent values for
   1.135 -// eden_used, survivor_used, and old_gen_used (as survivor_num is used
   1.136 -// in the calculation of all three). This would normally be
   1.137 -// ok. However, it's possible that this might cause the sum of
   1.138 -// eden_used, survivor_used, and old_gen_used to go over the max heap
   1.139 -// size and this seems to sometimes cause JConsole (and maybe other
   1.140 -// clients) to get confused. There's not a really an easy / clean
   1.141 -// solution to this problem, due to the asynchrounous nature of the
   1.142 -// framework.
   1.143 +// * Min Capacity
   1.144 +//
   1.145 +//    We set this to 0 for all spaces. We could consider setting the old
   1.146 +//    min capacity to the min capacity of the heap (see 7078465).
   1.147 +//
   1.148 +// * Max Capacity
   1.149 +//
   1.150 +//    For jstat, we set the max capacity of all spaces to heap_capacity,
   1.151 +//    given that we don't always have a reasonably upper bound on how big
   1.152 +//    each space can grow. For the memory pools, we actually make the max
   1.153 +//    capacity undefined. We could consider setting the old max capacity
   1.154 +//    to the max capacity of the heap (see 7078465).
   1.155 +//
   1.156 +// If we had more accurate occupancy / capacity information per
   1.157 +// region set the above calculations would be greatly simplified and
   1.158 +// be made more accurate.
   1.159 +//
   1.160 +// We update all the above synchronously and we store the results in
   1.161 +// fields so that we just read said fields when needed. A subtle point
   1.162 +// is that all the above sizes need to be recalculated when the old
   1.163 +// gen changes capacity (after a GC or after a humongous allocation)
   1.164 +// but only the eden occupancy changes when a new eden region is
   1.165 +// allocated. So, in the latter case we have minimal recalcuation to
   1.166 +// do which is important as we want to keep the eden region allocation
   1.167 +// path as low-overhead as possible.
   1.168  
   1.169  class G1MonitoringSupport : public CHeapObj {
   1.170    G1CollectedHeap* _g1h;
   1.171 -  VirtualSpace* _g1_storage_addr;
   1.172  
   1.173    // jstat performance counters
   1.174    //  incremental collections both fully and partially young
   1.175 @@ -133,9 +125,9 @@
   1.176    // _from_counters, and _to_counters are associated with
   1.177    // this "generational" counter.
   1.178    GenerationCounters*  _young_collection_counters;
   1.179 -  //  non-young collection set counters. The _old_space_counters
   1.180 +  //  old collection set counters. The _old_space_counters
   1.181    // below are associated with this "generational" counter.
   1.182 -  GenerationCounters*  _non_young_collection_counters;
   1.183 +  GenerationCounters*  _old_collection_counters;
   1.184    // Counters for the capacity and used for
   1.185    //   the whole heap
   1.186    HSpaceCounters*      _old_space_counters;
   1.187 @@ -145,6 +137,27 @@
   1.188    HSpaceCounters*      _from_counters;
   1.189    HSpaceCounters*      _to_counters;
   1.190  
   1.191 +  // When it's appropriate to recalculate the various sizes (at the
   1.192 +  // end of a GC, when a new eden region is allocated, etc.) we store
   1.193 +  // them here so that we can easily report them when needed and not
   1.194 +  // have to recalculate them every time.
   1.195 +
   1.196 +  size_t _overall_reserved;
   1.197 +  size_t _overall_committed;
   1.198 +  size_t _overall_used;
   1.199 +
   1.200 +  size_t _young_region_num;
   1.201 +  size_t _young_gen_committed;
   1.202 +  size_t _eden_committed;
   1.203 +  size_t _eden_used;
   1.204 +  size_t _survivor_committed;
   1.205 +  size_t _survivor_used;
   1.206 +
   1.207 +  size_t _old_committed;
   1.208 +  size_t _old_used;
   1.209 +
   1.210 +  G1CollectedHeap* g1h() { return _g1h; }
   1.211 +
   1.212    // It returns x - y if x > y, 0 otherwise.
   1.213    // As described in the comment above, some of the inputs to the
   1.214    // calculations we have to do are obtained concurrently and hence
   1.215 @@ -160,15 +173,35 @@
   1.216      }
   1.217    }
   1.218  
   1.219 +  // Recalculate all the sizes.
   1.220 +  void recalculate_sizes();
   1.221 +  // Recalculate only what's necessary when a new eden region is allocated.
   1.222 +  void recalculate_eden_size();
   1.223 +
   1.224   public:
   1.225 -  G1MonitoringSupport(G1CollectedHeap* g1h, VirtualSpace* g1_storage_addr);
   1.226 +  G1MonitoringSupport(G1CollectedHeap* g1h);
   1.227  
   1.228 -  G1CollectedHeap* g1h() { return _g1h; }
   1.229 -  VirtualSpace* g1_storage_addr() { return _g1_storage_addr; }
   1.230 +  // Unfortunately, the jstat tool assumes that no space has 0
   1.231 +  // capacity. In our case, given that each space is logical, it's
   1.232 +  // possible that no regions will be allocated to it, hence to have 0
   1.233 +  // capacity (e.g., if there are no survivor regions, the survivor
   1.234 +  // space has 0 capacity). The way we deal with this is to always pad
   1.235 +  // each capacity value we report to jstat by a very small amount to
   1.236 +  // make sure that it's never zero. Given that we sometimes have to
   1.237 +  // report a capacity of a generation that contains several spaces
   1.238 +  // (e.g., young gen includes one eden, two survivor spaces), the
   1.239 +  // mult parameter is provided in order to adding the appropriate
   1.240 +  // padding multiple times so that the capacities add up correctly.
   1.241 +  static size_t pad_capacity(size_t size_bytes, size_t mult = 1) {
   1.242 +    return size_bytes + MinObjAlignmentInBytes * mult;
   1.243 +  }
   1.244  
   1.245 -  // Performance Counter accessors
   1.246 -  void update_counters();
   1.247 -  void update_eden_counters();
   1.248 +  // Recalculate all the sizes from scratch and update all the jstat
   1.249 +  // counters accordingly.
   1.250 +  void update_sizes();
   1.251 +  // Recalculate only what's necessary when a new eden region is
   1.252 +  // allocated and update any jstat counters that need to be updated.
   1.253 +  void update_eden_size();
   1.254  
   1.255    CollectorCounters* incremental_collection_counters() {
   1.256      return _incremental_collection_counters;
   1.257 @@ -176,8 +209,11 @@
   1.258    CollectorCounters* full_collection_counters() {
   1.259      return _full_collection_counters;
   1.260    }
   1.261 -  GenerationCounters* non_young_collection_counters() {
   1.262 -    return _non_young_collection_counters;
   1.263 +  GenerationCounters* young_collection_counters() {
   1.264 +    return _young_collection_counters;
   1.265 +  }
   1.266 +  GenerationCounters* old_collection_counters() {
   1.267 +    return _old_collection_counters;
   1.268    }
   1.269    HSpaceCounters*      old_space_counters() { return _old_space_counters; }
   1.270    HSpaceCounters*      eden_counters() { return _eden_counters; }
   1.271 @@ -187,17 +223,45 @@
   1.272    // Monitoring support used by
   1.273    //   MemoryService
   1.274    //   jstat counters
   1.275 -  size_t overall_committed();
   1.276 -  size_t overall_used();
   1.277  
   1.278 -  size_t eden_space_committed();
   1.279 -  size_t eden_space_used();
   1.280 +  size_t overall_reserved()           { return _overall_reserved;     }
   1.281 +  size_t overall_committed()          { return _overall_committed;    }
   1.282 +  size_t overall_used()               { return _overall_used;         }
   1.283  
   1.284 -  size_t survivor_space_committed();
   1.285 -  size_t survivor_space_used();
   1.286 +  size_t young_gen_committed()        { return _young_gen_committed;  }
   1.287 +  size_t young_gen_max()              { return overall_reserved();    }
   1.288 +  size_t eden_space_committed()       { return _eden_committed;       }
   1.289 +  size_t eden_space_used()            { return _eden_used;            }
   1.290 +  size_t survivor_space_committed()   { return _survivor_committed;   }
   1.291 +  size_t survivor_space_used()        { return _survivor_used;        }
   1.292  
   1.293 -  size_t old_space_committed();
   1.294 -  size_t old_space_used();
   1.295 +  size_t old_gen_committed()          { return old_space_committed(); }
   1.296 +  size_t old_gen_max()                { return overall_reserved();    }
   1.297 +  size_t old_space_committed()        { return _old_committed;        }
   1.298 +  size_t old_space_used()             { return _old_used;             }
   1.299 +};
   1.300 +
   1.301 +class G1GenerationCounters: public GenerationCounters {
   1.302 +protected:
   1.303 +  G1MonitoringSupport* _g1mm;
   1.304 +
   1.305 +public:
   1.306 +  G1GenerationCounters(G1MonitoringSupport* g1mm,
   1.307 +                       const char* name, int ordinal, int spaces,
   1.308 +                       size_t min_capacity, size_t max_capacity,
   1.309 +                       size_t curr_capacity);
   1.310 +};
   1.311 +
   1.312 +class G1YoungGenerationCounters: public G1GenerationCounters {
   1.313 +public:
   1.314 +  G1YoungGenerationCounters(G1MonitoringSupport* g1mm, const char* name);
   1.315 +  virtual void update_all();
   1.316 +};
   1.317 +
   1.318 +class G1OldGenerationCounters: public G1GenerationCounters {
   1.319 +public:
   1.320 +  G1OldGenerationCounters(G1MonitoringSupport* g1mm, const char* name);
   1.321 +  virtual void update_all();
   1.322  };
   1.323  
   1.324  #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1MONITORINGSUPPORT_HPP

mercurial