src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp

Thu, 22 Sep 2011 10:57:37 -0700

author
johnc
date
Thu, 22 Sep 2011 10:57:37 -0700
changeset 3175
4dfb2df418f2
parent 2783
eda9eb483d29
child 3298
7913e93dca52
permissions
-rw-r--r--

6484982: G1: process references during evacuation pauses
Summary: G1 now uses two reference processors - one is used by concurrent marking and the other is used by STW GCs (both full and incremental evacuation pauses). In an evacuation pause, the reference processor is embedded into the closures used to scan objects. Doing so causes causes reference objects to be 'discovered' by the reference processor. At the end of the evacuation pause, these discovered reference objects are processed - preserving (and copying) referent objects (and their reachable graphs) as appropriate.
Reviewed-by: ysr, jwilhelm, brutisso, stefank, tonyp

     1 /*
     2  * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #ifndef SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPARALLELCOMPACT_HPP
    26 #define SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPARALLELCOMPACT_HPP
    28 #include "gc_implementation/parallelScavenge/objectStartArray.hpp"
    29 #include "gc_implementation/parallelScavenge/parMarkBitMap.hpp"
    30 #include "gc_implementation/parallelScavenge/psCompactionManager.hpp"
    31 #include "gc_implementation/shared/collectorCounters.hpp"
    32 #include "gc_implementation/shared/markSweep.hpp"
    33 #include "gc_implementation/shared/mutableSpace.hpp"
    34 #include "memory/sharedHeap.hpp"
    35 #include "oops/oop.hpp"
    37 class ParallelScavengeHeap;
    38 class PSAdaptiveSizePolicy;
    39 class PSYoungGen;
    40 class PSOldGen;
    41 class PSPermGen;
    42 class ParCompactionManager;
    43 class ParallelTaskTerminator;
    44 class PSParallelCompact;
    45 class GCTaskManager;
    46 class GCTaskQueue;
    47 class PreGCValues;
    48 class MoveAndUpdateClosure;
    49 class RefProcTaskExecutor;
    51 // The SplitInfo class holds the information needed to 'split' a source region
    52 // so that the live data can be copied to two destination *spaces*.  Normally,
    53 // all the live data in a region is copied to a single destination space (e.g.,
    54 // everything live in a region in eden is copied entirely into the old gen).
    55 // However, when the heap is nearly full, all the live data in eden may not fit
    56 // into the old gen.  Copying only some of the regions from eden to old gen
    57 // requires finding a region that does not contain a partial object (i.e., no
    58 // live object crosses the region boundary) somewhere near the last object that
    59 // does fit into the old gen.  Since it's not always possible to find such a
    60 // region, splitting is necessary for predictable behavior.
    61 //
    62 // A region is always split at the end of the partial object.  This avoids
    63 // additional tests when calculating the new location of a pointer, which is a
    64 // very hot code path.  The partial object and everything to its left will be
    65 // copied to another space (call it dest_space_1).  The live data to the right
    66 // of the partial object will be copied either within the space itself, or to a
    67 // different destination space (distinct from dest_space_1).
    68 //
    69 // Split points are identified during the summary phase, when region
    70 // destinations are computed:  data about the split, including the
    71 // partial_object_size, is recorded in a SplitInfo record and the
    72 // partial_object_size field in the summary data is set to zero.  The zeroing is
    73 // possible (and necessary) since the partial object will move to a different
    74 // destination space than anything to its right, thus the partial object should
    75 // not affect the locations of any objects to its right.
    76 //
    77 // The recorded data is used during the compaction phase, but only rarely:  when
    78 // the partial object on the split region will be copied across a destination
    79 // region boundary.  This test is made once each time a region is filled, and is
    80 // a simple address comparison, so the overhead is negligible (see
    81 // PSParallelCompact::first_src_addr()).
    82 //
    83 // Notes:
    84 //
    85 // Only regions with partial objects are split; a region without a partial
    86 // object does not need any extra bookkeeping.
    87 //
    88 // At most one region is split per space, so the amount of data required is
    89 // constant.
    90 //
    91 // A region is split only when the destination space would overflow.  Once that
    92 // happens, the destination space is abandoned and no other data (even from
    93 // other source spaces) is targeted to that destination space.  Abandoning the
    94 // destination space may leave a somewhat large unused area at the end, if a
    95 // large object caused the overflow.
    96 //
    97 // Future work:
    98 //
    99 // More bookkeeping would be required to continue to use the destination space.
   100 // The most general solution would allow data from regions in two different
   101 // source spaces to be "joined" in a single destination region.  At the very
   102 // least, additional code would be required in next_src_region() to detect the
   103 // join and skip to an out-of-order source region.  If the join region was also
   104 // the last destination region to which a split region was copied (the most
   105 // likely case), then additional work would be needed to get fill_region() to
   106 // stop iteration and switch to a new source region at the right point.  Basic
   107 // idea would be to use a fake value for the top of the source space.  It is
   108 // doable, if a bit tricky.
   109 //
   110 // A simpler (but less general) solution would fill the remainder of the
   111 // destination region with a dummy object and continue filling the next
   112 // destination region.
   114 class SplitInfo
   115 {
   116 public:
   117   // Return true if this split info is valid (i.e., if a split has been
   118   // recorded).  The very first region cannot have a partial object and thus is
   119   // never split, so 0 is the 'invalid' value.
   120   bool is_valid() const { return _src_region_idx > 0; }
   122   // Return true if this split holds data for the specified source region.
   123   inline bool is_split(size_t source_region) const;
   125   // The index of the split region, the size of the partial object on that
   126   // region and the destination of the partial object.
   127   size_t    src_region_idx() const   { return _src_region_idx; }
   128   size_t    partial_obj_size() const { return _partial_obj_size; }
   129   HeapWord* destination() const      { return _destination; }
   131   // The destination count of the partial object referenced by this split
   132   // (either 1 or 2).  This must be added to the destination count of the
   133   // remainder of the source region.
   134   unsigned int destination_count() const { return _destination_count; }
   136   // If a word within the partial object will be written to the first word of a
   137   // destination region, this is the address of the destination region;
   138   // otherwise this is NULL.
   139   HeapWord* dest_region_addr() const     { return _dest_region_addr; }
   141   // If a word within the partial object will be written to the first word of a
   142   // destination region, this is the address of that word within the partial
   143   // object; otherwise this is NULL.
   144   HeapWord* first_src_addr() const       { return _first_src_addr; }
   146   // Record the data necessary to split the region src_region_idx.
   147   void record(size_t src_region_idx, size_t partial_obj_size,
   148               HeapWord* destination);
   150   void clear();
   152   DEBUG_ONLY(void verify_clear();)
   154 private:
   155   size_t       _src_region_idx;
   156   size_t       _partial_obj_size;
   157   HeapWord*    _destination;
   158   unsigned int _destination_count;
   159   HeapWord*    _dest_region_addr;
   160   HeapWord*    _first_src_addr;
   161 };
   163 inline bool SplitInfo::is_split(size_t region_idx) const
   164 {
   165   return _src_region_idx == region_idx && is_valid();
   166 }
   168 class SpaceInfo
   169 {
   170  public:
   171   MutableSpace* space() const { return _space; }
   173   // Where the free space will start after the collection.  Valid only after the
   174   // summary phase completes.
   175   HeapWord* new_top() const { return _new_top; }
   177   // Allows new_top to be set.
   178   HeapWord** new_top_addr() { return &_new_top; }
   180   // Where the smallest allowable dense prefix ends (used only for perm gen).
   181   HeapWord* min_dense_prefix() const { return _min_dense_prefix; }
   183   // Where the dense prefix ends, or the compacted region begins.
   184   HeapWord* dense_prefix() const { return _dense_prefix; }
   186   // The start array for the (generation containing the) space, or NULL if there
   187   // is no start array.
   188   ObjectStartArray* start_array() const { return _start_array; }
   190   SplitInfo& split_info() { return _split_info; }
   192   void set_space(MutableSpace* s)           { _space = s; }
   193   void set_new_top(HeapWord* addr)          { _new_top = addr; }
   194   void set_min_dense_prefix(HeapWord* addr) { _min_dense_prefix = addr; }
   195   void set_dense_prefix(HeapWord* addr)     { _dense_prefix = addr; }
   196   void set_start_array(ObjectStartArray* s) { _start_array = s; }
   198   void publish_new_top() const              { _space->set_top(_new_top); }
   200  private:
   201   MutableSpace*     _space;
   202   HeapWord*         _new_top;
   203   HeapWord*         _min_dense_prefix;
   204   HeapWord*         _dense_prefix;
   205   ObjectStartArray* _start_array;
   206   SplitInfo         _split_info;
   207 };
   209 class ParallelCompactData
   210 {
   211 public:
   212   // Sizes are in HeapWords, unless indicated otherwise.
   213   static const size_t Log2RegionSize;
   214   static const size_t RegionSize;
   215   static const size_t RegionSizeBytes;
   217   // Mask for the bits in a size_t to get an offset within a region.
   218   static const size_t RegionSizeOffsetMask;
   219   // Mask for the bits in a pointer to get an offset within a region.
   220   static const size_t RegionAddrOffsetMask;
   221   // Mask for the bits in a pointer to get the address of the start of a region.
   222   static const size_t RegionAddrMask;
   224   class RegionData
   225   {
   226   public:
   227     // Destination address of the region.
   228     HeapWord* destination() const { return _destination; }
   230     // The first region containing data destined for this region.
   231     size_t source_region() const { return _source_region; }
   233     // The object (if any) starting in this region and ending in a different
   234     // region that could not be updated during the main (parallel) compaction
   235     // phase.  This is different from _partial_obj_addr, which is an object that
   236     // extends onto a source region.  However, the two uses do not overlap in
   237     // time, so the same field is used to save space.
   238     HeapWord* deferred_obj_addr() const { return _partial_obj_addr; }
   240     // The starting address of the partial object extending onto the region.
   241     HeapWord* partial_obj_addr() const { return _partial_obj_addr; }
   243     // Size of the partial object extending onto the region (words).
   244     size_t partial_obj_size() const { return _partial_obj_size; }
   246     // Size of live data that lies within this region due to objects that start
   247     // in this region (words).  This does not include the partial object
   248     // extending onto the region (if any), or the part of an object that extends
   249     // onto the next region (if any).
   250     size_t live_obj_size() const { return _dc_and_los & los_mask; }
   252     // Total live data that lies within the region (words).
   253     size_t data_size() const { return partial_obj_size() + live_obj_size(); }
   255     // The destination_count is the number of other regions to which data from
   256     // this region will be copied.  At the end of the summary phase, the valid
   257     // values of destination_count are
   258     //
   259     // 0 - data from the region will be compacted completely into itself, or the
   260     //     region is empty.  The region can be claimed and then filled.
   261     // 1 - data from the region will be compacted into 1 other region; some
   262     //     data from the region may also be compacted into the region itself.
   263     // 2 - data from the region will be copied to 2 other regions.
   264     //
   265     // During compaction as regions are emptied, the destination_count is
   266     // decremented (atomically) and when it reaches 0, it can be claimed and
   267     // then filled.
   268     //
   269     // A region is claimed for processing by atomically changing the
   270     // destination_count to the claimed value (dc_claimed).  After a region has
   271     // been filled, the destination_count should be set to the completed value
   272     // (dc_completed).
   273     inline uint destination_count() const;
   274     inline uint destination_count_raw() const;
   276     // The location of the java heap data that corresponds to this region.
   277     inline HeapWord* data_location() const;
   279     // The highest address referenced by objects in this region.
   280     inline HeapWord* highest_ref() const;
   282     // Whether this region is available to be claimed, has been claimed, or has
   283     // been completed.
   284     //
   285     // Minor subtlety:  claimed() returns true if the region is marked
   286     // completed(), which is desirable since a region must be claimed before it
   287     // can be completed.
   288     bool available() const { return _dc_and_los < dc_one; }
   289     bool claimed() const   { return _dc_and_los >= dc_claimed; }
   290     bool completed() const { return _dc_and_los >= dc_completed; }
   292     // These are not atomic.
   293     void set_destination(HeapWord* addr)       { _destination = addr; }
   294     void set_source_region(size_t region)      { _source_region = region; }
   295     void set_deferred_obj_addr(HeapWord* addr) { _partial_obj_addr = addr; }
   296     void set_partial_obj_addr(HeapWord* addr)  { _partial_obj_addr = addr; }
   297     void set_partial_obj_size(size_t words)    {
   298       _partial_obj_size = (region_sz_t) words;
   299     }
   301     inline void set_destination_count(uint count);
   302     inline void set_live_obj_size(size_t words);
   303     inline void set_data_location(HeapWord* addr);
   304     inline void set_completed();
   305     inline bool claim_unsafe();
   307     // These are atomic.
   308     inline void add_live_obj(size_t words);
   309     inline void set_highest_ref(HeapWord* addr);
   310     inline void decrement_destination_count();
   311     inline bool claim();
   313   private:
   314     // The type used to represent object sizes within a region.
   315     typedef uint region_sz_t;
   317     // Constants for manipulating the _dc_and_los field, which holds both the
   318     // destination count and live obj size.  The live obj size lives at the
   319     // least significant end so no masking is necessary when adding.
   320     static const region_sz_t dc_shift;           // Shift amount.
   321     static const region_sz_t dc_mask;            // Mask for destination count.
   322     static const region_sz_t dc_one;             // 1, shifted appropriately.
   323     static const region_sz_t dc_claimed;         // Region has been claimed.
   324     static const region_sz_t dc_completed;       // Region has been completed.
   325     static const region_sz_t los_mask;           // Mask for live obj size.
   327     HeapWord*            _destination;
   328     size_t               _source_region;
   329     HeapWord*            _partial_obj_addr;
   330     region_sz_t          _partial_obj_size;
   331     region_sz_t volatile _dc_and_los;
   332 #ifdef ASSERT
   333     // These enable optimizations that are only partially implemented.  Use
   334     // debug builds to prevent the code fragments from breaking.
   335     HeapWord*            _data_location;
   336     HeapWord*            _highest_ref;
   337 #endif  // #ifdef ASSERT
   339 #ifdef ASSERT
   340    public:
   341     uint            _pushed;   // 0 until region is pushed onto a worker's stack
   342    private:
   343 #endif
   344   };
   346 public:
   347   ParallelCompactData();
   348   bool initialize(MemRegion covered_region);
   350   size_t region_count() const { return _region_count; }
   352   // Convert region indices to/from RegionData pointers.
   353   inline RegionData* region(size_t region_idx) const;
   354   inline size_t     region(const RegionData* const region_ptr) const;
   356   // Returns true if the given address is contained within the region
   357   bool region_contains(size_t region_index, HeapWord* addr);
   359   void add_obj(HeapWord* addr, size_t len);
   360   void add_obj(oop p, size_t len) { add_obj((HeapWord*)p, len); }
   362   // Fill in the regions covering [beg, end) so that no data moves; i.e., the
   363   // destination of region n is simply the start of region n.  The argument beg
   364   // must be region-aligned; end need not be.
   365   void summarize_dense_prefix(HeapWord* beg, HeapWord* end);
   367   HeapWord* summarize_split_space(size_t src_region, SplitInfo& split_info,
   368                                   HeapWord* destination, HeapWord* target_end,
   369                                   HeapWord** target_next);
   370   bool summarize(SplitInfo& split_info,
   371                  HeapWord* source_beg, HeapWord* source_end,
   372                  HeapWord** source_next,
   373                  HeapWord* target_beg, HeapWord* target_end,
   374                  HeapWord** target_next);
   376   void clear();
   377   void clear_range(size_t beg_region, size_t end_region);
   378   void clear_range(HeapWord* beg, HeapWord* end) {
   379     clear_range(addr_to_region_idx(beg), addr_to_region_idx(end));
   380   }
   382   // Return the number of words between addr and the start of the region
   383   // containing addr.
   384   inline size_t     region_offset(const HeapWord* addr) const;
   386   // Convert addresses to/from a region index or region pointer.
   387   inline size_t     addr_to_region_idx(const HeapWord* addr) const;
   388   inline RegionData* addr_to_region_ptr(const HeapWord* addr) const;
   389   inline HeapWord*  region_to_addr(size_t region) const;
   390   inline HeapWord*  region_to_addr(size_t region, size_t offset) const;
   391   inline HeapWord*  region_to_addr(const RegionData* region) const;
   393   inline HeapWord*  region_align_down(HeapWord* addr) const;
   394   inline HeapWord*  region_align_up(HeapWord* addr) const;
   395   inline bool       is_region_aligned(HeapWord* addr) const;
   397   // Return the address one past the end of the partial object.
   398   HeapWord* partial_obj_end(size_t region_idx) const;
   400   // Return the new location of the object p after the
   401   // the compaction.
   402   HeapWord* calc_new_pointer(HeapWord* addr);
   404   HeapWord* calc_new_pointer(oop p) {
   405     return calc_new_pointer((HeapWord*) p);
   406   }
   408   // Return the updated address for the given klass
   409   klassOop calc_new_klass(klassOop);
   411 #ifdef  ASSERT
   412   void verify_clear(const PSVirtualSpace* vspace);
   413   void verify_clear();
   414 #endif  // #ifdef ASSERT
   416 private:
   417   bool initialize_region_data(size_t region_size);
   418   PSVirtualSpace* create_vspace(size_t count, size_t element_size);
   420 private:
   421   HeapWord*       _region_start;
   422 #ifdef  ASSERT
   423   HeapWord*       _region_end;
   424 #endif  // #ifdef ASSERT
   426   PSVirtualSpace* _region_vspace;
   427   RegionData*     _region_data;
   428   size_t          _region_count;
   429 };
   431 inline uint
   432 ParallelCompactData::RegionData::destination_count_raw() const
   433 {
   434   return _dc_and_los & dc_mask;
   435 }
   437 inline uint
   438 ParallelCompactData::RegionData::destination_count() const
   439 {
   440   return destination_count_raw() >> dc_shift;
   441 }
   443 inline void
   444 ParallelCompactData::RegionData::set_destination_count(uint count)
   445 {
   446   assert(count <= (dc_completed >> dc_shift), "count too large");
   447   const region_sz_t live_sz = (region_sz_t) live_obj_size();
   448   _dc_and_los = (count << dc_shift) | live_sz;
   449 }
   451 inline void ParallelCompactData::RegionData::set_live_obj_size(size_t words)
   452 {
   453   assert(words <= los_mask, "would overflow");
   454   _dc_and_los = destination_count_raw() | (region_sz_t)words;
   455 }
   457 inline void ParallelCompactData::RegionData::decrement_destination_count()
   458 {
   459   assert(_dc_and_los < dc_claimed, "already claimed");
   460   assert(_dc_and_los >= dc_one, "count would go negative");
   461   Atomic::add((int)dc_mask, (volatile int*)&_dc_and_los);
   462 }
   464 inline HeapWord* ParallelCompactData::RegionData::data_location() const
   465 {
   466   DEBUG_ONLY(return _data_location;)
   467   NOT_DEBUG(return NULL;)
   468 }
   470 inline HeapWord* ParallelCompactData::RegionData::highest_ref() const
   471 {
   472   DEBUG_ONLY(return _highest_ref;)
   473   NOT_DEBUG(return NULL;)
   474 }
   476 inline void ParallelCompactData::RegionData::set_data_location(HeapWord* addr)
   477 {
   478   DEBUG_ONLY(_data_location = addr;)
   479 }
   481 inline void ParallelCompactData::RegionData::set_completed()
   482 {
   483   assert(claimed(), "must be claimed first");
   484   _dc_and_los = dc_completed | (region_sz_t) live_obj_size();
   485 }
   487 // MT-unsafe claiming of a region.  Should only be used during single threaded
   488 // execution.
   489 inline bool ParallelCompactData::RegionData::claim_unsafe()
   490 {
   491   if (available()) {
   492     _dc_and_los |= dc_claimed;
   493     return true;
   494   }
   495   return false;
   496 }
   498 inline void ParallelCompactData::RegionData::add_live_obj(size_t words)
   499 {
   500   assert(words <= (size_t)los_mask - live_obj_size(), "overflow");
   501   Atomic::add((int) words, (volatile int*) &_dc_and_los);
   502 }
   504 inline void ParallelCompactData::RegionData::set_highest_ref(HeapWord* addr)
   505 {
   506 #ifdef ASSERT
   507   HeapWord* tmp = _highest_ref;
   508   while (addr > tmp) {
   509     tmp = (HeapWord*)Atomic::cmpxchg_ptr(addr, &_highest_ref, tmp);
   510   }
   511 #endif  // #ifdef ASSERT
   512 }
   514 inline bool ParallelCompactData::RegionData::claim()
   515 {
   516   const int los = (int) live_obj_size();
   517   const int old = Atomic::cmpxchg(dc_claimed | los,
   518                                   (volatile int*) &_dc_and_los, los);
   519   return old == los;
   520 }
   522 inline ParallelCompactData::RegionData*
   523 ParallelCompactData::region(size_t region_idx) const
   524 {
   525   assert(region_idx <= region_count(), "bad arg");
   526   return _region_data + region_idx;
   527 }
   529 inline size_t
   530 ParallelCompactData::region(const RegionData* const region_ptr) const
   531 {
   532   assert(region_ptr >= _region_data, "bad arg");
   533   assert(region_ptr <= _region_data + region_count(), "bad arg");
   534   return pointer_delta(region_ptr, _region_data, sizeof(RegionData));
   535 }
   537 inline size_t
   538 ParallelCompactData::region_offset(const HeapWord* addr) const
   539 {
   540   assert(addr >= _region_start, "bad addr");
   541   assert(addr <= _region_end, "bad addr");
   542   return (size_t(addr) & RegionAddrOffsetMask) >> LogHeapWordSize;
   543 }
   545 inline size_t
   546 ParallelCompactData::addr_to_region_idx(const HeapWord* addr) const
   547 {
   548   assert(addr >= _region_start, "bad addr");
   549   assert(addr <= _region_end, "bad addr");
   550   return pointer_delta(addr, _region_start) >> Log2RegionSize;
   551 }
   553 inline ParallelCompactData::RegionData*
   554 ParallelCompactData::addr_to_region_ptr(const HeapWord* addr) const
   555 {
   556   return region(addr_to_region_idx(addr));
   557 }
   559 inline HeapWord*
   560 ParallelCompactData::region_to_addr(size_t region) const
   561 {
   562   assert(region <= _region_count, "region out of range");
   563   return _region_start + (region << Log2RegionSize);
   564 }
   566 inline HeapWord*
   567 ParallelCompactData::region_to_addr(const RegionData* region) const
   568 {
   569   return region_to_addr(pointer_delta(region, _region_data,
   570                                       sizeof(RegionData)));
   571 }
   573 inline HeapWord*
   574 ParallelCompactData::region_to_addr(size_t region, size_t offset) const
   575 {
   576   assert(region <= _region_count, "region out of range");
   577   assert(offset < RegionSize, "offset too big");  // This may be too strict.
   578   return region_to_addr(region) + offset;
   579 }
   581 inline HeapWord*
   582 ParallelCompactData::region_align_down(HeapWord* addr) const
   583 {
   584   assert(addr >= _region_start, "bad addr");
   585   assert(addr < _region_end + RegionSize, "bad addr");
   586   return (HeapWord*)(size_t(addr) & RegionAddrMask);
   587 }
   589 inline HeapWord*
   590 ParallelCompactData::region_align_up(HeapWord* addr) const
   591 {
   592   assert(addr >= _region_start, "bad addr");
   593   assert(addr <= _region_end, "bad addr");
   594   return region_align_down(addr + RegionSizeOffsetMask);
   595 }
   597 inline bool
   598 ParallelCompactData::is_region_aligned(HeapWord* addr) const
   599 {
   600   return region_offset(addr) == 0;
   601 }
   603 // Abstract closure for use with ParMarkBitMap::iterate(), which will invoke the
   604 // do_addr() method.
   605 //
   606 // The closure is initialized with the number of heap words to process
   607 // (words_remaining()), and becomes 'full' when it reaches 0.  The do_addr()
   608 // methods in subclasses should update the total as words are processed.  Since
   609 // only one subclass actually uses this mechanism to terminate iteration, the
   610 // default initial value is > 0.  The implementation is here and not in the
   611 // single subclass that uses it to avoid making is_full() virtual, and thus
   612 // adding a virtual call per live object.
   614 class ParMarkBitMapClosure: public StackObj {
   615  public:
   616   typedef ParMarkBitMap::idx_t idx_t;
   617   typedef ParMarkBitMap::IterationStatus IterationStatus;
   619  public:
   620   inline ParMarkBitMapClosure(ParMarkBitMap* mbm, ParCompactionManager* cm,
   621                               size_t words = max_uintx);
   623   inline ParCompactionManager* compaction_manager() const;
   624   inline ParMarkBitMap*        bitmap() const;
   625   inline size_t                words_remaining() const;
   626   inline bool                  is_full() const;
   627   inline HeapWord*             source() const;
   629   inline void                  set_source(HeapWord* addr);
   631   virtual IterationStatus do_addr(HeapWord* addr, size_t words) = 0;
   633  protected:
   634   inline void decrement_words_remaining(size_t words);
   636  private:
   637   ParMarkBitMap* const        _bitmap;
   638   ParCompactionManager* const _compaction_manager;
   639   DEBUG_ONLY(const size_t     _initial_words_remaining;) // Useful in debugger.
   640   size_t                      _words_remaining; // Words left to copy.
   642  protected:
   643   HeapWord*                   _source;          // Next addr that would be read.
   644 };
   646 inline
   647 ParMarkBitMapClosure::ParMarkBitMapClosure(ParMarkBitMap* bitmap,
   648                                            ParCompactionManager* cm,
   649                                            size_t words):
   650   _bitmap(bitmap), _compaction_manager(cm)
   651 #ifdef  ASSERT
   652   , _initial_words_remaining(words)
   653 #endif
   654 {
   655   _words_remaining = words;
   656   _source = NULL;
   657 }
   659 inline ParCompactionManager* ParMarkBitMapClosure::compaction_manager() const {
   660   return _compaction_manager;
   661 }
   663 inline ParMarkBitMap* ParMarkBitMapClosure::bitmap() const {
   664   return _bitmap;
   665 }
   667 inline size_t ParMarkBitMapClosure::words_remaining() const {
   668   return _words_remaining;
   669 }
   671 inline bool ParMarkBitMapClosure::is_full() const {
   672   return words_remaining() == 0;
   673 }
   675 inline HeapWord* ParMarkBitMapClosure::source() const {
   676   return _source;
   677 }
   679 inline void ParMarkBitMapClosure::set_source(HeapWord* addr) {
   680   _source = addr;
   681 }
   683 inline void ParMarkBitMapClosure::decrement_words_remaining(size_t words) {
   684   assert(_words_remaining >= words, "processed too many words");
   685   _words_remaining -= words;
   686 }
   688 // The UseParallelOldGC collector is a stop-the-world garbage collector that
   689 // does parts of the collection using parallel threads.  The collection includes
   690 // the tenured generation and the young generation.  The permanent generation is
   691 // collected at the same time as the other two generations but the permanent
   692 // generation is collect by a single GC thread.  The permanent generation is
   693 // collected serially because of the requirement that during the processing of a
   694 // klass AAA, any objects reference by AAA must already have been processed.
   695 // This requirement is enforced by a left (lower address) to right (higher
   696 // address) sliding compaction.
   697 //
   698 // There are four phases of the collection.
   699 //
   700 //      - marking phase
   701 //      - summary phase
   702 //      - compacting phase
   703 //      - clean up phase
   704 //
   705 // Roughly speaking these phases correspond, respectively, to
   706 //      - mark all the live objects
   707 //      - calculate the destination of each object at the end of the collection
   708 //      - move the objects to their destination
   709 //      - update some references and reinitialize some variables
   710 //
   711 // These three phases are invoked in PSParallelCompact::invoke_no_policy().  The
   712 // marking phase is implemented in PSParallelCompact::marking_phase() and does a
   713 // complete marking of the heap.  The summary phase is implemented in
   714 // PSParallelCompact::summary_phase().  The move and update phase is implemented
   715 // in PSParallelCompact::compact().
   716 //
   717 // A space that is being collected is divided into regions and with each region
   718 // is associated an object of type ParallelCompactData.  Each region is of a
   719 // fixed size and typically will contain more than 1 object and may have parts
   720 // of objects at the front and back of the region.
   721 //
   722 // region            -----+---------------------+----------
   723 // objects covered   [ AAA  )[ BBB )[ CCC   )[ DDD     )
   724 //
   725 // The marking phase does a complete marking of all live objects in the heap.
   726 // The marking also compiles the size of the data for all live objects covered
   727 // by the region.  This size includes the part of any live object spanning onto
   728 // the region (part of AAA if it is live) from the front, all live objects
   729 // contained in the region (BBB and/or CCC if they are live), and the part of
   730 // any live objects covered by the region that extends off the region (part of
   731 // DDD if it is live).  The marking phase uses multiple GC threads and marking
   732 // is done in a bit array of type ParMarkBitMap.  The marking of the bit map is
   733 // done atomically as is the accumulation of the size of the live objects
   734 // covered by a region.
   735 //
   736 // The summary phase calculates the total live data to the left of each region
   737 // XXX.  Based on that total and the bottom of the space, it can calculate the
   738 // starting location of the live data in XXX.  The summary phase calculates for
   739 // each region XXX quantites such as
   740 //
   741 //      - the amount of live data at the beginning of a region from an object
   742 //        entering the region.
   743 //      - the location of the first live data on the region
   744 //      - a count of the number of regions receiving live data from XXX.
   745 //
   746 // See ParallelCompactData for precise details.  The summary phase also
   747 // calculates the dense prefix for the compaction.  The dense prefix is a
   748 // portion at the beginning of the space that is not moved.  The objects in the
   749 // dense prefix do need to have their object references updated.  See method
   750 // summarize_dense_prefix().
   751 //
   752 // The summary phase is done using 1 GC thread.
   753 //
   754 // The compaction phase moves objects to their new location and updates all
   755 // references in the object.
   756 //
   757 // A current exception is that objects that cross a region boundary are moved
   758 // but do not have their references updated.  References are not updated because
   759 // it cannot easily be determined if the klass pointer KKK for the object AAA
   760 // has been updated.  KKK likely resides in a region to the left of the region
   761 // containing AAA.  These AAA's have there references updated at the end in a
   762 // clean up phase.  See the method PSParallelCompact::update_deferred_objects().
   763 // An alternate strategy is being investigated for this deferral of updating.
   764 //
   765 // Compaction is done on a region basis.  A region that is ready to be filled is
   766 // put on a ready list and GC threads take region off the list and fill them.  A
   767 // region is ready to be filled if it empty of live objects.  Such a region may
   768 // have been initially empty (only contained dead objects) or may have had all
   769 // its live objects copied out already.  A region that compacts into itself is
   770 // also ready for filling.  The ready list is initially filled with empty
   771 // regions and regions compacting into themselves.  There is always at least 1
   772 // region that can be put on the ready list.  The regions are atomically added
   773 // and removed from the ready list.
   775 class PSParallelCompact : AllStatic {
   776  public:
   777   // Convenient access to type names.
   778   typedef ParMarkBitMap::idx_t idx_t;
   779   typedef ParallelCompactData::RegionData RegionData;
   781   typedef enum {
   782     perm_space_id, old_space_id, eden_space_id,
   783     from_space_id, to_space_id, last_space_id
   784   } SpaceId;
   786  public:
   787   // Inline closure decls
   788   //
   789   class IsAliveClosure: public BoolObjectClosure {
   790    public:
   791     virtual void do_object(oop p);
   792     virtual bool do_object_b(oop p);
   793   };
   795   class KeepAliveClosure: public OopClosure {
   796    private:
   797     ParCompactionManager* _compaction_manager;
   798    protected:
   799     template <class T> inline void do_oop_work(T* p);
   800    public:
   801     KeepAliveClosure(ParCompactionManager* cm) : _compaction_manager(cm) { }
   802     virtual void do_oop(oop* p);
   803     virtual void do_oop(narrowOop* p);
   804   };
   806   // Current unused
   807   class FollowRootClosure: public OopsInGenClosure {
   808    private:
   809     ParCompactionManager* _compaction_manager;
   810    public:
   811     FollowRootClosure(ParCompactionManager* cm) : _compaction_manager(cm) { }
   812     virtual void do_oop(oop* p);
   813     virtual void do_oop(narrowOop* p);
   814  };
   816   class FollowStackClosure: public VoidClosure {
   817    private:
   818     ParCompactionManager* _compaction_manager;
   819    public:
   820     FollowStackClosure(ParCompactionManager* cm) : _compaction_manager(cm) { }
   821     virtual void do_void();
   822   };
   824   class AdjustPointerClosure: public OopsInGenClosure {
   825    private:
   826     bool _is_root;
   827    public:
   828     AdjustPointerClosure(bool is_root) : _is_root(is_root) { }
   829     virtual void do_oop(oop* p);
   830     virtual void do_oop(narrowOop* p);
   831     // do not walk from thread stacks to the code cache on this phase
   832     virtual void do_code_blob(CodeBlob* cb) const { }
   833   };
   835   // Closure for verifying update of pointers.  Does not
   836   // have any side effects.
   837   class VerifyUpdateClosure: public ParMarkBitMapClosure {
   838     const MutableSpace* _space; // Is this ever used?
   840    public:
   841     VerifyUpdateClosure(ParCompactionManager* cm, const MutableSpace* sp) :
   842       ParMarkBitMapClosure(PSParallelCompact::mark_bitmap(), cm), _space(sp)
   843     { }
   845     virtual IterationStatus do_addr(HeapWord* addr, size_t words);
   847     const MutableSpace* space() { return _space; }
   848   };
   850   // Closure for updating objects altered for debug checking
   851   class ResetObjectsClosure: public ParMarkBitMapClosure {
   852    public:
   853     ResetObjectsClosure(ParCompactionManager* cm):
   854       ParMarkBitMapClosure(PSParallelCompact::mark_bitmap(), cm)
   855     { }
   857     virtual IterationStatus do_addr(HeapWord* addr, size_t words);
   858   };
   860   friend class KeepAliveClosure;
   861   friend class FollowStackClosure;
   862   friend class AdjustPointerClosure;
   863   friend class FollowRootClosure;
   864   friend class instanceKlassKlass;
   865   friend class RefProcTaskProxy;
   867  private:
   868   static elapsedTimer         _accumulated_time;
   869   static unsigned int         _total_invocations;
   870   static unsigned int         _maximum_compaction_gc_num;
   871   static jlong                _time_of_last_gc;   // ms
   872   static CollectorCounters*   _counters;
   873   static ParMarkBitMap        _mark_bitmap;
   874   static ParallelCompactData  _summary_data;
   875   static IsAliveClosure       _is_alive_closure;
   876   static SpaceInfo            _space_info[last_space_id];
   877   static bool                 _print_phases;
   878   static AdjustPointerClosure _adjust_root_pointer_closure;
   879   static AdjustPointerClosure _adjust_pointer_closure;
   881   // Reference processing (used in ...follow_contents)
   882   static ReferenceProcessor*  _ref_processor;
   884   // Updated location of intArrayKlassObj.
   885   static klassOop _updated_int_array_klass_obj;
   887   // Values computed at initialization and used by dead_wood_limiter().
   888   static double _dwl_mean;
   889   static double _dwl_std_dev;
   890   static double _dwl_first_term;
   891   static double _dwl_adjustment;
   892 #ifdef  ASSERT
   893   static bool   _dwl_initialized;
   894 #endif  // #ifdef ASSERT
   896  private:
   897   // Closure accessors
   898   static OopClosure* adjust_pointer_closure()      { return (OopClosure*)&_adjust_pointer_closure; }
   899   static OopClosure* adjust_root_pointer_closure() { return (OopClosure*)&_adjust_root_pointer_closure; }
   900   static BoolObjectClosure* is_alive_closure()     { return (BoolObjectClosure*)&_is_alive_closure; }
   902   static void initialize_space_info();
   904   // Return true if details about individual phases should be printed.
   905   static inline bool print_phases();
   907   // Clear the marking bitmap and summary data that cover the specified space.
   908   static void clear_data_covering_space(SpaceId id);
   910   static void pre_compact(PreGCValues* pre_gc_values);
   911   static void post_compact();
   913   // Mark live objects
   914   static void marking_phase(ParCompactionManager* cm,
   915                             bool maximum_heap_compaction);
   916   static void follow_weak_klass_links();
   917   static void follow_mdo_weak_refs();
   919   template <class T> static inline void adjust_pointer(T* p, bool is_root);
   920   static void adjust_root_pointer(oop* p) { adjust_pointer(p, true); }
   922   template <class T>
   923   static inline void follow_root(ParCompactionManager* cm, T* p);
   925   // Compute the dense prefix for the designated space.  This is an experimental
   926   // implementation currently not used in production.
   927   static HeapWord* compute_dense_prefix_via_density(const SpaceId id,
   928                                                     bool maximum_compaction);
   930   // Methods used to compute the dense prefix.
   932   // Compute the value of the normal distribution at x = density.  The mean and
   933   // standard deviation are values saved by initialize_dead_wood_limiter().
   934   static inline double normal_distribution(double density);
   936   // Initialize the static vars used by dead_wood_limiter().
   937   static void initialize_dead_wood_limiter();
   939   // Return the percentage of space that can be treated as "dead wood" (i.e.,
   940   // not reclaimed).
   941   static double dead_wood_limiter(double density, size_t min_percent);
   943   // Find the first (left-most) region in the range [beg, end) that has at least
   944   // dead_words of dead space to the left.  The argument beg must be the first
   945   // region in the space that is not completely live.
   946   static RegionData* dead_wood_limit_region(const RegionData* beg,
   947                                             const RegionData* end,
   948                                             size_t dead_words);
   950   // Return a pointer to the first region in the range [beg, end) that is not
   951   // completely full.
   952   static RegionData* first_dead_space_region(const RegionData* beg,
   953                                              const RegionData* end);
   955   // Return a value indicating the benefit or 'yield' if the compacted region
   956   // were to start (or equivalently if the dense prefix were to end) at the
   957   // candidate region.  Higher values are better.
   958   //
   959   // The value is based on the amount of space reclaimed vs. the costs of (a)
   960   // updating references in the dense prefix plus (b) copying objects and
   961   // updating references in the compacted region.
   962   static inline double reclaimed_ratio(const RegionData* const candidate,
   963                                        HeapWord* const bottom,
   964                                        HeapWord* const top,
   965                                        HeapWord* const new_top);
   967   // Compute the dense prefix for the designated space.
   968   static HeapWord* compute_dense_prefix(const SpaceId id,
   969                                         bool maximum_compaction);
   971   // Return true if dead space crosses onto the specified Region; bit must be
   972   // the bit index corresponding to the first word of the Region.
   973   static inline bool dead_space_crosses_boundary(const RegionData* region,
   974                                                  idx_t bit);
   976   // Summary phase utility routine to fill dead space (if any) at the dense
   977   // prefix boundary.  Should only be called if the the dense prefix is
   978   // non-empty.
   979   static void fill_dense_prefix_end(SpaceId id);
   981   // Clear the summary data source_region field for the specified addresses.
   982   static void clear_source_region(HeapWord* beg_addr, HeapWord* end_addr);
   984 #ifndef PRODUCT
   985   // Routines to provoke splitting a young gen space (ParallelOldGCSplitALot).
   987   // Fill the region [start, start + words) with live object(s).  Only usable
   988   // for the old and permanent generations.
   989   static void fill_with_live_objects(SpaceId id, HeapWord* const start,
   990                                      size_t words);
   991   // Include the new objects in the summary data.
   992   static void summarize_new_objects(SpaceId id, HeapWord* start);
   994   // Add live objects to a survivor space since it's rare that both survivors
   995   // are non-empty.
   996   static void provoke_split_fill_survivor(SpaceId id);
   998   // Add live objects and/or choose the dense prefix to provoke splitting.
   999   static void provoke_split(bool & maximum_compaction);
  1000 #endif
  1002   static void summarize_spaces_quick();
  1003   static void summarize_space(SpaceId id, bool maximum_compaction);
  1004   static void summary_phase(ParCompactionManager* cm, bool maximum_compaction);
  1006   // Adjust addresses in roots.  Does not adjust addresses in heap.
  1007   static void adjust_roots();
  1009   // Serial code executed in preparation for the compaction phase.
  1010   static void compact_prologue();
  1012   // Move objects to new locations.
  1013   static void compact_perm(ParCompactionManager* cm);
  1014   static void compact();
  1016   // Add available regions to the stack and draining tasks to the task queue.
  1017   static void enqueue_region_draining_tasks(GCTaskQueue* q,
  1018                                             uint parallel_gc_threads);
  1020   // Add dense prefix update tasks to the task queue.
  1021   static void enqueue_dense_prefix_tasks(GCTaskQueue* q,
  1022                                          uint parallel_gc_threads);
  1024   // Add region stealing tasks to the task queue.
  1025   static void enqueue_region_stealing_tasks(
  1026                                        GCTaskQueue* q,
  1027                                        ParallelTaskTerminator* terminator_ptr,
  1028                                        uint parallel_gc_threads);
  1030   // If objects are left in eden after a collection, try to move the boundary
  1031   // and absorb them into the old gen.  Returns true if eden was emptied.
  1032   static bool absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy,
  1033                                          PSYoungGen* young_gen,
  1034                                          PSOldGen* old_gen);
  1036   // Reset time since last full gc
  1037   static void reset_millis_since_last_gc();
  1039  protected:
  1040 #ifdef VALIDATE_MARK_SWEEP
  1041   static GrowableArray<void*>*           _root_refs_stack;
  1042   static GrowableArray<oop> *            _live_oops;
  1043   static GrowableArray<oop> *            _live_oops_moved_to;
  1044   static GrowableArray<size_t>*          _live_oops_size;
  1045   static size_t                          _live_oops_index;
  1046   static size_t                          _live_oops_index_at_perm;
  1047   static GrowableArray<void*>*           _other_refs_stack;
  1048   static GrowableArray<void*>*           _adjusted_pointers;
  1049   static bool                            _pointer_tracking;
  1050   static bool                            _root_tracking;
  1052   // The following arrays are saved since the time of the last GC and
  1053   // assist in tracking down problems where someone has done an errant
  1054   // store into the heap, usually to an oop that wasn't properly
  1055   // handleized across a GC. If we crash or otherwise fail before the
  1056   // next GC, we can query these arrays to find out the object we had
  1057   // intended to do the store to (assuming it is still alive) and the
  1058   // offset within that object. Covered under RecordMarkSweepCompaction.
  1059   static GrowableArray<HeapWord*> *      _cur_gc_live_oops;
  1060   static GrowableArray<HeapWord*> *      _cur_gc_live_oops_moved_to;
  1061   static GrowableArray<size_t>*          _cur_gc_live_oops_size;
  1062   static GrowableArray<HeapWord*> *      _last_gc_live_oops;
  1063   static GrowableArray<HeapWord*> *      _last_gc_live_oops_moved_to;
  1064   static GrowableArray<size_t>*          _last_gc_live_oops_size;
  1065 #endif
  1067  public:
  1068   class MarkAndPushClosure: public OopClosure {
  1069    private:
  1070     ParCompactionManager* _compaction_manager;
  1071    public:
  1072     MarkAndPushClosure(ParCompactionManager* cm) : _compaction_manager(cm) { }
  1073     virtual void do_oop(oop* p);
  1074     virtual void do_oop(narrowOop* p);
  1075   };
  1077   PSParallelCompact();
  1079   // Convenient accessor for Universe::heap().
  1080   static ParallelScavengeHeap* gc_heap() {
  1081     return (ParallelScavengeHeap*)Universe::heap();
  1084   static void invoke(bool maximum_heap_compaction);
  1085   static void invoke_no_policy(bool maximum_heap_compaction);
  1087   static void post_initialize();
  1088   // Perform initialization for PSParallelCompact that requires
  1089   // allocations.  This should be called during the VM initialization
  1090   // at a pointer where it would be appropriate to return a JNI_ENOMEM
  1091   // in the event of a failure.
  1092   static bool initialize();
  1094   // Public accessors
  1095   static elapsedTimer* accumulated_time() { return &_accumulated_time; }
  1096   static unsigned int total_invocations() { return _total_invocations; }
  1097   static CollectorCounters* counters()    { return _counters; }
  1099   // Used to add tasks
  1100   static GCTaskManager* const gc_task_manager();
  1101   static klassOop updated_int_array_klass_obj() {
  1102     return _updated_int_array_klass_obj;
  1105   // Marking support
  1106   static inline bool mark_obj(oop obj);
  1107   // Check mark and maybe push on marking stack
  1108   template <class T> static inline void mark_and_push(ParCompactionManager* cm,
  1109                                                       T* p);
  1111   // Compaction support.
  1112   // Return true if p is in the range [beg_addr, end_addr).
  1113   static inline bool is_in(HeapWord* p, HeapWord* beg_addr, HeapWord* end_addr);
  1114   static inline bool is_in(oop* p, HeapWord* beg_addr, HeapWord* end_addr);
  1116   // Convenience wrappers for per-space data kept in _space_info.
  1117   static inline MutableSpace*     space(SpaceId space_id);
  1118   static inline HeapWord*         new_top(SpaceId space_id);
  1119   static inline HeapWord*         dense_prefix(SpaceId space_id);
  1120   static inline ObjectStartArray* start_array(SpaceId space_id);
  1122   // Return true if the klass should be updated.
  1123   static inline bool should_update_klass(klassOop k);
  1125   // Move and update the live objects in the specified space.
  1126   static void move_and_update(ParCompactionManager* cm, SpaceId space_id);
  1128   // Process the end of the given region range in the dense prefix.
  1129   // This includes saving any object not updated.
  1130   static void dense_prefix_regions_epilogue(ParCompactionManager* cm,
  1131                                             size_t region_start_index,
  1132                                             size_t region_end_index,
  1133                                             idx_t exiting_object_offset,
  1134                                             idx_t region_offset_start,
  1135                                             idx_t region_offset_end);
  1137   // Update a region in the dense prefix.  For each live object
  1138   // in the region, update it's interior references.  For each
  1139   // dead object, fill it with deadwood. Dead space at the end
  1140   // of a region range will be filled to the start of the next
  1141   // live object regardless of the region_index_end.  None of the
  1142   // objects in the dense prefix move and dead space is dead
  1143   // (holds only dead objects that don't need any processing), so
  1144   // dead space can be filled in any order.
  1145   static void update_and_deadwood_in_dense_prefix(ParCompactionManager* cm,
  1146                                                   SpaceId space_id,
  1147                                                   size_t region_index_start,
  1148                                                   size_t region_index_end);
  1150   // Return the address of the count + 1st live word in the range [beg, end).
  1151   static HeapWord* skip_live_words(HeapWord* beg, HeapWord* end, size_t count);
  1153   // Return the address of the word to be copied to dest_addr, which must be
  1154   // aligned to a region boundary.
  1155   static HeapWord* first_src_addr(HeapWord* const dest_addr,
  1156                                   SpaceId src_space_id,
  1157                                   size_t src_region_idx);
  1159   // Determine the next source region, set closure.source() to the start of the
  1160   // new region return the region index.  Parameter end_addr is the address one
  1161   // beyond the end of source range just processed.  If necessary, switch to a
  1162   // new source space and set src_space_id (in-out parameter) and src_space_top
  1163   // (out parameter) accordingly.
  1164   static size_t next_src_region(MoveAndUpdateClosure& closure,
  1165                                 SpaceId& src_space_id,
  1166                                 HeapWord*& src_space_top,
  1167                                 HeapWord* end_addr);
  1169   // Decrement the destination count for each non-empty source region in the
  1170   // range [beg_region, region(region_align_up(end_addr))).  If the destination
  1171   // count for a region goes to 0 and it needs to be filled, enqueue it.
  1172   static void decrement_destination_counts(ParCompactionManager* cm,
  1173                                            SpaceId src_space_id,
  1174                                            size_t beg_region,
  1175                                            HeapWord* end_addr);
  1177   // Fill a region, copying objects from one or more source regions.
  1178   static void fill_region(ParCompactionManager* cm, size_t region_idx);
  1179   static void fill_and_update_region(ParCompactionManager* cm, size_t region) {
  1180     fill_region(cm, region);
  1183   // Update the deferred objects in the space.
  1184   static void update_deferred_objects(ParCompactionManager* cm, SpaceId id);
  1186   // Mark pointer and follow contents.
  1187   template <class T>
  1188   static inline void mark_and_follow(ParCompactionManager* cm, T* p);
  1190   static ParMarkBitMap* mark_bitmap() { return &_mark_bitmap; }
  1191   static ParallelCompactData& summary_data() { return _summary_data; }
  1193   static inline void adjust_pointer(oop* p)       { adjust_pointer(p, false); }
  1194   static inline void adjust_pointer(narrowOop* p) { adjust_pointer(p, false); }
  1196   // Reference Processing
  1197   static ReferenceProcessor* const ref_processor() { return _ref_processor; }
  1199   // Return the SpaceId for the given address.
  1200   static SpaceId space_id(HeapWord* addr);
  1202   // Time since last full gc (in milliseconds).
  1203   static jlong millis_since_last_gc();
  1205 #ifdef VALIDATE_MARK_SWEEP
  1206   static void track_adjusted_pointer(void* p, bool isroot);
  1207   static void check_adjust_pointer(void* p);
  1208   static void track_interior_pointers(oop obj);
  1209   static void check_interior_pointers();
  1211   static void reset_live_oop_tracking(bool at_perm);
  1212   static void register_live_oop(oop p, size_t size);
  1213   static void validate_live_oop(oop p, size_t size);
  1214   static void live_oop_moved_to(HeapWord* q, size_t size, HeapWord* compaction_top);
  1215   static void compaction_complete();
  1217   // Querying operation of RecordMarkSweepCompaction results.
  1218   // Finds and prints the current base oop and offset for a word
  1219   // within an oop that was live during the last GC. Helpful for
  1220   // tracking down heap stomps.
  1221   static void print_new_location_of_heap_address(HeapWord* q);
  1222 #endif  // #ifdef VALIDATE_MARK_SWEEP
  1224   // Call backs for class unloading
  1225   // Update subklass/sibling/implementor links at end of marking.
  1226   static void revisit_weak_klass_link(ParCompactionManager* cm, Klass* k);
  1228   // Clear unmarked oops in MDOs at the end of marking.
  1229   static void revisit_mdo(ParCompactionManager* cm, DataLayout* p);
  1231 #ifndef PRODUCT
  1232   // Debugging support.
  1233   static const char* space_names[last_space_id];
  1234   static void print_region_ranges();
  1235   static void print_dense_prefix_stats(const char* const algorithm,
  1236                                        const SpaceId id,
  1237                                        const bool maximum_compaction,
  1238                                        HeapWord* const addr);
  1239   static void summary_phase_msg(SpaceId dst_space_id,
  1240                                 HeapWord* dst_beg, HeapWord* dst_end,
  1241                                 SpaceId src_space_id,
  1242                                 HeapWord* src_beg, HeapWord* src_end);
  1243 #endif  // #ifndef PRODUCT
  1245 #ifdef  ASSERT
  1246   // Sanity check the new location of a word in the heap.
  1247   static inline void check_new_location(HeapWord* old_addr, HeapWord* new_addr);
  1248   // Verify that all the regions have been emptied.
  1249   static void verify_complete(SpaceId space_id);
  1250 #endif  // #ifdef ASSERT
  1251 };
  1253 inline bool PSParallelCompact::mark_obj(oop obj) {
  1254   const int obj_size = obj->size();
  1255   if (mark_bitmap()->mark_obj(obj, obj_size)) {
  1256     _summary_data.add_obj(obj, obj_size);
  1257     return true;
  1258   } else {
  1259     return false;
  1263 template <class T>
  1264 inline void PSParallelCompact::follow_root(ParCompactionManager* cm, T* p) {
  1265   assert(!Universe::heap()->is_in_reserved(p),
  1266          "roots shouldn't be things within the heap");
  1267 #ifdef VALIDATE_MARK_SWEEP
  1268   if (ValidateMarkSweep) {
  1269     guarantee(!_root_refs_stack->contains(p), "should only be in here once");
  1270     _root_refs_stack->push(p);
  1272 #endif
  1273   T heap_oop = oopDesc::load_heap_oop(p);
  1274   if (!oopDesc::is_null(heap_oop)) {
  1275     oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
  1276     if (mark_bitmap()->is_unmarked(obj)) {
  1277       if (mark_obj(obj)) {
  1278         obj->follow_contents(cm);
  1282   cm->follow_marking_stacks();
  1285 template <class T>
  1286 inline void PSParallelCompact::mark_and_follow(ParCompactionManager* cm,
  1287                                                T* p) {
  1288   T heap_oop = oopDesc::load_heap_oop(p);
  1289   if (!oopDesc::is_null(heap_oop)) {
  1290     oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
  1291     if (mark_bitmap()->is_unmarked(obj)) {
  1292       if (mark_obj(obj)) {
  1293         obj->follow_contents(cm);
  1299 template <class T>
  1300 inline void PSParallelCompact::mark_and_push(ParCompactionManager* cm, T* p) {
  1301   T heap_oop = oopDesc::load_heap_oop(p);
  1302   if (!oopDesc::is_null(heap_oop)) {
  1303     oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
  1304     if (mark_bitmap()->is_unmarked(obj) && mark_obj(obj)) {
  1305       cm->push(obj);
  1310 template <class T>
  1311 inline void PSParallelCompact::adjust_pointer(T* p, bool isroot) {
  1312   T heap_oop = oopDesc::load_heap_oop(p);
  1313   if (!oopDesc::is_null(heap_oop)) {
  1314     oop obj     = oopDesc::decode_heap_oop_not_null(heap_oop);
  1315     oop new_obj = (oop)summary_data().calc_new_pointer(obj);
  1316     assert(new_obj != NULL ||                     // is forwarding ptr?
  1317            obj->is_shared(),                      // never forwarded?
  1318            "should be forwarded");
  1319     // Just always do the update unconditionally?
  1320     if (new_obj != NULL) {
  1321       assert(Universe::heap()->is_in_reserved(new_obj),
  1322              "should be in object space");
  1323       oopDesc::encode_store_heap_oop_not_null(p, new_obj);
  1326   VALIDATE_MARK_SWEEP_ONLY(track_adjusted_pointer(p, isroot));
  1329 template <class T>
  1330 inline void PSParallelCompact::KeepAliveClosure::do_oop_work(T* p) {
  1331 #ifdef VALIDATE_MARK_SWEEP
  1332   if (ValidateMarkSweep) {
  1333     if (!Universe::heap()->is_in_reserved(p)) {
  1334       _root_refs_stack->push(p);
  1335     } else {
  1336       _other_refs_stack->push(p);
  1339 #endif
  1340   mark_and_push(_compaction_manager, p);
  1343 inline bool PSParallelCompact::print_phases() {
  1344   return _print_phases;
  1347 inline double PSParallelCompact::normal_distribution(double density) {
  1348   assert(_dwl_initialized, "uninitialized");
  1349   const double squared_term = (density - _dwl_mean) / _dwl_std_dev;
  1350   return _dwl_first_term * exp(-0.5 * squared_term * squared_term);
  1353 inline bool
  1354 PSParallelCompact::dead_space_crosses_boundary(const RegionData* region,
  1355                                                idx_t bit)
  1357   assert(bit > 0, "cannot call this for the first bit/region");
  1358   assert(_summary_data.region_to_addr(region) == _mark_bitmap.bit_to_addr(bit),
  1359          "sanity check");
  1361   // Dead space crosses the boundary if (1) a partial object does not extend
  1362   // onto the region, (2) an object does not start at the beginning of the
  1363   // region, and (3) an object does not end at the end of the prior region.
  1364   return region->partial_obj_size() == 0 &&
  1365     !_mark_bitmap.is_obj_beg(bit) &&
  1366     !_mark_bitmap.is_obj_end(bit - 1);
  1369 inline bool
  1370 PSParallelCompact::is_in(HeapWord* p, HeapWord* beg_addr, HeapWord* end_addr) {
  1371   return p >= beg_addr && p < end_addr;
  1374 inline bool
  1375 PSParallelCompact::is_in(oop* p, HeapWord* beg_addr, HeapWord* end_addr) {
  1376   return is_in((HeapWord*)p, beg_addr, end_addr);
  1379 inline MutableSpace* PSParallelCompact::space(SpaceId id) {
  1380   assert(id < last_space_id, "id out of range");
  1381   return _space_info[id].space();
  1384 inline HeapWord* PSParallelCompact::new_top(SpaceId id) {
  1385   assert(id < last_space_id, "id out of range");
  1386   return _space_info[id].new_top();
  1389 inline HeapWord* PSParallelCompact::dense_prefix(SpaceId id) {
  1390   assert(id < last_space_id, "id out of range");
  1391   return _space_info[id].dense_prefix();
  1394 inline ObjectStartArray* PSParallelCompact::start_array(SpaceId id) {
  1395   assert(id < last_space_id, "id out of range");
  1396   return _space_info[id].start_array();
  1399 inline bool PSParallelCompact::should_update_klass(klassOop k) {
  1400   return ((HeapWord*) k) >= dense_prefix(perm_space_id);
  1403 #ifdef ASSERT
  1404 inline void
  1405 PSParallelCompact::check_new_location(HeapWord* old_addr, HeapWord* new_addr)
  1407   assert(old_addr >= new_addr || space_id(old_addr) != space_id(new_addr),
  1408          "must move left or to a different space");
  1409   assert(is_object_aligned((intptr_t)old_addr) && is_object_aligned((intptr_t)new_addr),
  1410          "checking alignment");
  1412 #endif // ASSERT
  1414 class MoveAndUpdateClosure: public ParMarkBitMapClosure {
  1415  public:
  1416   inline MoveAndUpdateClosure(ParMarkBitMap* bitmap, ParCompactionManager* cm,
  1417                               ObjectStartArray* start_array,
  1418                               HeapWord* destination, size_t words);
  1420   // Accessors.
  1421   HeapWord* destination() const         { return _destination; }
  1423   // If the object will fit (size <= words_remaining()), copy it to the current
  1424   // destination, update the interior oops and the start array and return either
  1425   // full (if the closure is full) or incomplete.  If the object will not fit,
  1426   // return would_overflow.
  1427   virtual IterationStatus do_addr(HeapWord* addr, size_t size);
  1429   // Copy enough words to fill this closure, starting at source().  Interior
  1430   // oops and the start array are not updated.  Return full.
  1431   IterationStatus copy_until_full();
  1433   // Copy enough words to fill this closure or to the end of an object,
  1434   // whichever is smaller, starting at source().  Interior oops and the start
  1435   // array are not updated.
  1436   void copy_partial_obj();
  1438  protected:
  1439   // Update variables to indicate that word_count words were processed.
  1440   inline void update_state(size_t word_count);
  1442  protected:
  1443   ObjectStartArray* const _start_array;
  1444   HeapWord*               _destination;         // Next addr to be written.
  1445 };
  1447 inline
  1448 MoveAndUpdateClosure::MoveAndUpdateClosure(ParMarkBitMap* bitmap,
  1449                                            ParCompactionManager* cm,
  1450                                            ObjectStartArray* start_array,
  1451                                            HeapWord* destination,
  1452                                            size_t words) :
  1453   ParMarkBitMapClosure(bitmap, cm, words), _start_array(start_array)
  1455   _destination = destination;
  1458 inline void MoveAndUpdateClosure::update_state(size_t words)
  1460   decrement_words_remaining(words);
  1461   _source += words;
  1462   _destination += words;
  1465 class UpdateOnlyClosure: public ParMarkBitMapClosure {
  1466  private:
  1467   const PSParallelCompact::SpaceId _space_id;
  1468   ObjectStartArray* const          _start_array;
  1470  public:
  1471   UpdateOnlyClosure(ParMarkBitMap* mbm,
  1472                     ParCompactionManager* cm,
  1473                     PSParallelCompact::SpaceId space_id);
  1475   // Update the object.
  1476   virtual IterationStatus do_addr(HeapWord* addr, size_t words);
  1478   inline void do_addr(HeapWord* addr);
  1479 };
  1481 inline void UpdateOnlyClosure::do_addr(HeapWord* addr)
  1483   _start_array->allocate_block(addr);
  1484   oop(addr)->update_contents(compaction_manager());
  1487 class FillClosure: public ParMarkBitMapClosure
  1489 public:
  1490   FillClosure(ParCompactionManager* cm, PSParallelCompact::SpaceId space_id) :
  1491     ParMarkBitMapClosure(PSParallelCompact::mark_bitmap(), cm),
  1492     _start_array(PSParallelCompact::start_array(space_id))
  1494     assert(space_id == PSParallelCompact::perm_space_id ||
  1495            space_id == PSParallelCompact::old_space_id,
  1496            "cannot use FillClosure in the young gen");
  1499   virtual IterationStatus do_addr(HeapWord* addr, size_t size) {
  1500     CollectedHeap::fill_with_objects(addr, size);
  1501     HeapWord* const end = addr + size;
  1502     do {
  1503       _start_array->allocate_block(addr);
  1504       addr += oop(addr)->size();
  1505     } while (addr < end);
  1506     return ParMarkBitMap::incomplete;
  1509 private:
  1510   ObjectStartArray* const _start_array;
  1511 };
  1513 #endif // SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPARALLELCOMPACT_HPP

mercurial