src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp

changeset 810
81cd571500b0
parent 704
850fdf70db2b
child 811
0166ac265d53
     1.1 --- a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp	Tue Sep 30 11:49:31 2008 -0700
     1.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp	Tue Sep 30 12:20:22 2008 -0700
     1.3 @@ -76,87 +76,87 @@
     1.4  {
     1.5  public:
     1.6    // Sizes are in HeapWords, unless indicated otherwise.
     1.7 -  static const size_t Log2ChunkSize;
     1.8 -  static const size_t ChunkSize;
     1.9 -  static const size_t ChunkSizeBytes;
    1.10 +  static const size_t Log2RegionSize;
    1.11 +  static const size_t RegionSize;
    1.12 +  static const size_t RegionSizeBytes;
    1.13  
    1.14 -  // Mask for the bits in a size_t to get an offset within a chunk.
    1.15 -  static const size_t ChunkSizeOffsetMask;
    1.16 -  // Mask for the bits in a pointer to get an offset within a chunk.
    1.17 -  static const size_t ChunkAddrOffsetMask;
    1.18 -  // Mask for the bits in a pointer to get the address of the start of a chunk.
    1.19 -  static const size_t ChunkAddrMask;
    1.20 +  // Mask for the bits in a size_t to get an offset within a region.
    1.21 +  static const size_t RegionSizeOffsetMask;
    1.22 +  // Mask for the bits in a pointer to get an offset within a region.
    1.23 +  static const size_t RegionAddrOffsetMask;
    1.24 +  // Mask for the bits in a pointer to get the address of the start of a region.
    1.25 +  static const size_t RegionAddrMask;
    1.26  
    1.27    static const size_t Log2BlockSize;
    1.28    static const size_t BlockSize;
    1.29    static const size_t BlockOffsetMask;
    1.30    static const size_t BlockMask;
    1.31  
    1.32 -  static const size_t BlocksPerChunk;
    1.33 +  static const size_t BlocksPerRegion;
    1.34  
    1.35 -  class ChunkData
    1.36 +  class RegionData
    1.37    {
    1.38    public:
    1.39 -    // Destination address of the chunk.
    1.40 +    // Destination address of the region.
    1.41      HeapWord* destination() const { return _destination; }
    1.42  
    1.43 -    // The first chunk containing data destined for this chunk.
    1.44 -    size_t source_chunk() const { return _source_chunk; }
    1.45 +    // The first region containing data destined for this region.
    1.46 +    size_t source_region() const { return _source_region; }
    1.47  
    1.48 -    // The object (if any) starting in this chunk and ending in a different
    1.49 -    // chunk that could not be updated during the main (parallel) compaction
    1.50 +    // The object (if any) starting in this region and ending in a different
    1.51 +    // region that could not be updated during the main (parallel) compaction
    1.52      // phase.  This is different from _partial_obj_addr, which is an object that
    1.53 -    // extends onto a source chunk.  However, the two uses do not overlap in
    1.54 +    // extends onto a source region.  However, the two uses do not overlap in
    1.55      // time, so the same field is used to save space.
    1.56      HeapWord* deferred_obj_addr() const { return _partial_obj_addr; }
    1.57  
    1.58 -    // The starting address of the partial object extending onto the chunk.
    1.59 +    // The starting address of the partial object extending onto the region.
    1.60      HeapWord* partial_obj_addr() const { return _partial_obj_addr; }
    1.61  
    1.62 -    // Size of the partial object extending onto the chunk (words).
    1.63 +    // Size of the partial object extending onto the region (words).
    1.64      size_t partial_obj_size() const { return _partial_obj_size; }
    1.65  
    1.66 -    // Size of live data that lies within this chunk due to objects that start
    1.67 -    // in this chunk (words).  This does not include the partial object
    1.68 -    // extending onto the chunk (if any), or the part of an object that extends
    1.69 -    // onto the next chunk (if any).
    1.70 +    // Size of live data that lies within this region due to objects that start
    1.71 +    // in this region (words).  This does not include the partial object
    1.72 +    // extending onto the region (if any), or the part of an object that extends
    1.73 +    // onto the next region (if any).
    1.74      size_t live_obj_size() const { return _dc_and_los & los_mask; }
    1.75  
    1.76 -    // Total live data that lies within the chunk (words).
    1.77 +    // Total live data that lies within the region (words).
    1.78      size_t data_size() const { return partial_obj_size() + live_obj_size(); }
    1.79  
    1.80 -    // The destination_count is the number of other chunks to which data from
    1.81 -    // this chunk will be copied.  At the end of the summary phase, the valid
    1.82 +    // The destination_count is the number of other regions to which data from
    1.83 +    // this region will be copied.  At the end of the summary phase, the valid
    1.84      // values of destination_count are
    1.85      //
    1.86 -    // 0 - data from the chunk will be compacted completely into itself, or the
    1.87 -    //     chunk is empty.  The chunk can be claimed and then filled.
    1.88 -    // 1 - data from the chunk will be compacted into 1 other chunk; some
    1.89 -    //     data from the chunk may also be compacted into the chunk itself.
    1.90 -    // 2 - data from the chunk will be copied to 2 other chunks.
    1.91 +    // 0 - data from the region will be compacted completely into itself, or the
    1.92 +    //     region is empty.  The region can be claimed and then filled.
    1.93 +    // 1 - data from the region will be compacted into 1 other region; some
    1.94 +    //     data from the region may also be compacted into the region itself.
    1.95 +    // 2 - data from the region will be copied to 2 other regions.
    1.96      //
    1.97 -    // During compaction as chunks are emptied, the destination_count is
    1.98 +    // During compaction as regions are emptied, the destination_count is
    1.99      // decremented (atomically) and when it reaches 0, it can be claimed and
   1.100      // then filled.
   1.101      //
   1.102 -    // A chunk is claimed for processing by atomically changing the
   1.103 -    // destination_count to the claimed value (dc_claimed).  After a chunk has
   1.104 +    // A region is claimed for processing by atomically changing the
   1.105 +    // destination_count to the claimed value (dc_claimed).  After a region has
   1.106      // been filled, the destination_count should be set to the completed value
   1.107      // (dc_completed).
   1.108      inline uint destination_count() const;
   1.109      inline uint destination_count_raw() const;
   1.110  
   1.111 -    // The location of the java heap data that corresponds to this chunk.
   1.112 +    // The location of the java heap data that corresponds to this region.
   1.113      inline HeapWord* data_location() const;
   1.114  
   1.115 -    // The highest address referenced by objects in this chunk.
   1.116 +    // The highest address referenced by objects in this region.
   1.117      inline HeapWord* highest_ref() const;
   1.118  
   1.119 -    // Whether this chunk is available to be claimed, has been claimed, or has
   1.120 +    // Whether this region is available to be claimed, has been claimed, or has
   1.121      // been completed.
   1.122      //
   1.123 -    // Minor subtlety:  claimed() returns true if the chunk is marked
   1.124 -    // completed(), which is desirable since a chunk must be claimed before it
   1.125 +    // Minor subtlety:  claimed() returns true if the region is marked
   1.126 +    // completed(), which is desirable since a region must be claimed before it
   1.127      // can be completed.
   1.128      bool available() const { return _dc_and_los < dc_one; }
   1.129      bool claimed() const   { return _dc_and_los >= dc_claimed; }
   1.130 @@ -164,11 +164,11 @@
   1.131  
   1.132      // These are not atomic.
   1.133      void set_destination(HeapWord* addr)       { _destination = addr; }
   1.134 -    void set_source_chunk(size_t chunk)        { _source_chunk = chunk; }
   1.135 +    void set_source_region(size_t region)      { _source_region = region; }
   1.136      void set_deferred_obj_addr(HeapWord* addr) { _partial_obj_addr = addr; }
   1.137      void set_partial_obj_addr(HeapWord* addr)  { _partial_obj_addr = addr; }
   1.138      void set_partial_obj_size(size_t words)    {
   1.139 -      _partial_obj_size = (chunk_sz_t) words;
   1.140 +      _partial_obj_size = (region_sz_t) words;
   1.141      }
   1.142  
   1.143      inline void set_destination_count(uint count);
   1.144 @@ -184,44 +184,44 @@
   1.145      inline bool claim();
   1.146  
   1.147    private:
   1.148 -    // The type used to represent object sizes within a chunk.
   1.149 -    typedef uint chunk_sz_t;
   1.150 +    // The type used to represent object sizes within a region.
   1.151 +    typedef uint region_sz_t;
   1.152  
   1.153      // Constants for manipulating the _dc_and_los field, which holds both the
   1.154      // destination count and live obj size.  The live obj size lives at the
   1.155      // least significant end so no masking is necessary when adding.
   1.156 -    static const chunk_sz_t dc_shift;           // Shift amount.
   1.157 -    static const chunk_sz_t dc_mask;            // Mask for destination count.
   1.158 -    static const chunk_sz_t dc_one;             // 1, shifted appropriately.
   1.159 -    static const chunk_sz_t dc_claimed;         // Chunk has been claimed.
   1.160 -    static const chunk_sz_t dc_completed;       // Chunk has been completed.
   1.161 -    static const chunk_sz_t los_mask;           // Mask for live obj size.
   1.162 +    static const region_sz_t dc_shift;           // Shift amount.
   1.163 +    static const region_sz_t dc_mask;            // Mask for destination count.
   1.164 +    static const region_sz_t dc_one;             // 1, shifted appropriately.
   1.165 +    static const region_sz_t dc_claimed;         // Region has been claimed.
   1.166 +    static const region_sz_t dc_completed;       // Region has been completed.
   1.167 +    static const region_sz_t los_mask;           // Mask for live obj size.
   1.168  
   1.169 -    HeapWord*           _destination;
   1.170 -    size_t              _source_chunk;
   1.171 -    HeapWord*           _partial_obj_addr;
   1.172 -    chunk_sz_t          _partial_obj_size;
   1.173 -    chunk_sz_t volatile _dc_and_los;
   1.174 +    HeapWord*            _destination;
   1.175 +    size_t               _source_region;
   1.176 +    HeapWord*            _partial_obj_addr;
   1.177 +    region_sz_t          _partial_obj_size;
   1.178 +    region_sz_t volatile _dc_and_los;
   1.179  #ifdef ASSERT
   1.180      // These enable optimizations that are only partially implemented.  Use
   1.181      // debug builds to prevent the code fragments from breaking.
   1.182 -    HeapWord*           _data_location;
   1.183 -    HeapWord*           _highest_ref;
   1.184 +    HeapWord*            _data_location;
   1.185 +    HeapWord*            _highest_ref;
   1.186  #endif  // #ifdef ASSERT
   1.187  
   1.188  #ifdef ASSERT
   1.189     public:
   1.190 -    uint            _pushed;    // 0 until chunk is pushed onto a worker's stack
   1.191 +    uint            _pushed;   // 0 until region is pushed onto a worker's stack
   1.192     private:
   1.193  #endif
   1.194    };
   1.195  
   1.196    // 'Blocks' allow shorter sections of the bitmap to be searched.  Each Block
   1.197 -  // holds an offset, which is the amount of live data in the Chunk to the left
   1.198 +  // holds an offset, which is the amount of live data in the Region to the left
   1.199    // of the first live object in the Block.  This amount of live data will
   1.200    // include any object extending into the block. The first block in
   1.201 -  // a chunk does not include any partial object extending into the
   1.202 -  // the chunk.
   1.203 +  // a region does not include any partial object extending into the
   1.204 +  // the region.
   1.205    //
   1.206    // The offset also encodes the
   1.207    // 'parity' of the first 1 bit in the Block:  a positive offset means the
   1.208 @@ -286,27 +286,27 @@
   1.209    ParallelCompactData();
   1.210    bool initialize(MemRegion covered_region);
   1.211  
   1.212 -  size_t chunk_count() const { return _chunk_count; }
   1.213 +  size_t region_count() const { return _region_count; }
   1.214  
   1.215 -  // Convert chunk indices to/from ChunkData pointers.
   1.216 -  inline ChunkData* chunk(size_t chunk_idx) const;
   1.217 -  inline size_t     chunk(const ChunkData* const chunk_ptr) const;
   1.218 +  // Convert region indices to/from RegionData pointers.
   1.219 +  inline RegionData* region(size_t region_idx) const;
   1.220 +  inline size_t     region(const RegionData* const region_ptr) const;
   1.221  
   1.222 -  // Returns true if the given address is contained within the chunk
   1.223 -  bool chunk_contains(size_t chunk_index, HeapWord* addr);
   1.224 +  // Returns true if the given address is contained within the region
   1.225 +  bool region_contains(size_t region_index, HeapWord* addr);
   1.226  
   1.227    size_t block_count() const { return _block_count; }
   1.228    inline BlockData* block(size_t n) const;
   1.229  
   1.230 -  // Returns true if the given block is in the given chunk.
   1.231 -  static bool chunk_contains_block(size_t chunk_index, size_t block_index);
   1.232 +  // Returns true if the given block is in the given region.
   1.233 +  static bool region_contains_block(size_t region_index, size_t block_index);
   1.234  
   1.235    void add_obj(HeapWord* addr, size_t len);
   1.236    void add_obj(oop p, size_t len) { add_obj((HeapWord*)p, len); }
   1.237  
   1.238 -  // Fill in the chunks covering [beg, end) so that no data moves; i.e., the
   1.239 -  // destination of chunk n is simply the start of chunk n.  The argument beg
   1.240 -  // must be chunk-aligned; end need not be.
   1.241 +  // Fill in the regions covering [beg, end) so that no data moves; i.e., the
   1.242 +  // destination of region n is simply the start of region n.  The argument beg
   1.243 +  // must be region-aligned; end need not be.
   1.244    void summarize_dense_prefix(HeapWord* beg, HeapWord* end);
   1.245  
   1.246    bool summarize(HeapWord* target_beg, HeapWord* target_end,
   1.247 @@ -314,27 +314,27 @@
   1.248                   HeapWord** target_next, HeapWord** source_next = 0);
   1.249  
   1.250    void clear();
   1.251 -  void clear_range(size_t beg_chunk, size_t end_chunk);
   1.252 +  void clear_range(size_t beg_region, size_t end_region);
   1.253    void clear_range(HeapWord* beg, HeapWord* end) {
   1.254 -    clear_range(addr_to_chunk_idx(beg), addr_to_chunk_idx(end));
   1.255 +    clear_range(addr_to_region_idx(beg), addr_to_region_idx(end));
   1.256    }
   1.257  
   1.258 -  // Return the number of words between addr and the start of the chunk
   1.259 +  // Return the number of words between addr and the start of the region
   1.260    // containing addr.
   1.261 -  inline size_t     chunk_offset(const HeapWord* addr) const;
   1.262 +  inline size_t     region_offset(const HeapWord* addr) const;
   1.263  
   1.264 -  // Convert addresses to/from a chunk index or chunk pointer.
   1.265 -  inline size_t     addr_to_chunk_idx(const HeapWord* addr) const;
   1.266 -  inline ChunkData* addr_to_chunk_ptr(const HeapWord* addr) const;
   1.267 -  inline HeapWord*  chunk_to_addr(size_t chunk) const;
   1.268 -  inline HeapWord*  chunk_to_addr(size_t chunk, size_t offset) const;
   1.269 -  inline HeapWord*  chunk_to_addr(const ChunkData* chunk) const;
   1.270 +  // Convert addresses to/from a region index or region pointer.
   1.271 +  inline size_t     addr_to_region_idx(const HeapWord* addr) const;
   1.272 +  inline RegionData* addr_to_region_ptr(const HeapWord* addr) const;
   1.273 +  inline HeapWord*  region_to_addr(size_t region) const;
   1.274 +  inline HeapWord*  region_to_addr(size_t region, size_t offset) const;
   1.275 +  inline HeapWord*  region_to_addr(const RegionData* region) const;
   1.276  
   1.277 -  inline HeapWord*  chunk_align_down(HeapWord* addr) const;
   1.278 -  inline HeapWord*  chunk_align_up(HeapWord* addr) const;
   1.279 -  inline bool       is_chunk_aligned(HeapWord* addr) const;
   1.280 +  inline HeapWord*  region_align_down(HeapWord* addr) const;
   1.281 +  inline HeapWord*  region_align_up(HeapWord* addr) const;
   1.282 +  inline bool       is_region_aligned(HeapWord* addr) const;
   1.283  
   1.284 -  // Analogous to chunk_offset() for blocks.
   1.285 +  // Analogous to region_offset() for blocks.
   1.286    size_t     block_offset(const HeapWord* addr) const;
   1.287    size_t     addr_to_block_idx(const HeapWord* addr) const;
   1.288    size_t     addr_to_block_idx(const oop obj) const {
   1.289 @@ -344,7 +344,7 @@
   1.290    inline HeapWord*  block_to_addr(size_t block) const;
   1.291  
   1.292    // Return the address one past the end of the partial object.
   1.293 -  HeapWord* partial_obj_end(size_t chunk_idx) const;
   1.294 +  HeapWord* partial_obj_end(size_t region_idx) const;
   1.295  
   1.296    // Return the new location of the object p after the
   1.297    // the compaction.
   1.298 @@ -353,8 +353,8 @@
   1.299    // Same as calc_new_pointer() using blocks.
   1.300    HeapWord* block_calc_new_pointer(HeapWord* addr);
   1.301  
   1.302 -  // Same as calc_new_pointer() using chunks.
   1.303 -  HeapWord* chunk_calc_new_pointer(HeapWord* addr);
   1.304 +  // Same as calc_new_pointer() using regions.
   1.305 +  HeapWord* region_calc_new_pointer(HeapWord* addr);
   1.306  
   1.307    HeapWord* calc_new_pointer(oop p) {
   1.308      return calc_new_pointer((HeapWord*) p);
   1.309 @@ -364,7 +364,7 @@
   1.310    klassOop calc_new_klass(klassOop);
   1.311  
   1.312    // Given a block returns true if the partial object for the
   1.313 -  // corresponding chunk ends in the block.  Returns false, otherwise
   1.314 +  // corresponding region ends in the block.  Returns false, otherwise
   1.315    // If there is no partial object, returns false.
   1.316    bool partial_obj_ends_in_block(size_t block_index);
   1.317  
   1.318 @@ -378,7 +378,7 @@
   1.319  
   1.320  private:
   1.321    bool initialize_block_data(size_t region_size);
   1.322 -  bool initialize_chunk_data(size_t region_size);
   1.323 +  bool initialize_region_data(size_t region_size);
   1.324    PSVirtualSpace* create_vspace(size_t count, size_t element_size);
   1.325  
   1.326  private:
   1.327 @@ -387,9 +387,9 @@
   1.328    HeapWord*       _region_end;
   1.329  #endif  // #ifdef ASSERT
   1.330  
   1.331 -  PSVirtualSpace* _chunk_vspace;
   1.332 -  ChunkData*      _chunk_data;
   1.333 -  size_t          _chunk_count;
   1.334 +  PSVirtualSpace* _region_vspace;
   1.335 +  RegionData*     _region_data;
   1.336 +  size_t          _region_count;
   1.337  
   1.338    PSVirtualSpace* _block_vspace;
   1.339    BlockData*      _block_data;
   1.340 @@ -397,64 +397,64 @@
   1.341  };
   1.342  
   1.343  inline uint
   1.344 -ParallelCompactData::ChunkData::destination_count_raw() const
   1.345 +ParallelCompactData::RegionData::destination_count_raw() const
   1.346  {
   1.347    return _dc_and_los & dc_mask;
   1.348  }
   1.349  
   1.350  inline uint
   1.351 -ParallelCompactData::ChunkData::destination_count() const
   1.352 +ParallelCompactData::RegionData::destination_count() const
   1.353  {
   1.354    return destination_count_raw() >> dc_shift;
   1.355  }
   1.356  
   1.357  inline void
   1.358 -ParallelCompactData::ChunkData::set_destination_count(uint count)
   1.359 +ParallelCompactData::RegionData::set_destination_count(uint count)
   1.360  {
   1.361    assert(count <= (dc_completed >> dc_shift), "count too large");
   1.362 -  const chunk_sz_t live_sz = (chunk_sz_t) live_obj_size();
   1.363 +  const region_sz_t live_sz = (region_sz_t) live_obj_size();
   1.364    _dc_and_los = (count << dc_shift) | live_sz;
   1.365  }
   1.366  
   1.367 -inline void ParallelCompactData::ChunkData::set_live_obj_size(size_t words)
   1.368 +inline void ParallelCompactData::RegionData::set_live_obj_size(size_t words)
   1.369  {
   1.370    assert(words <= los_mask, "would overflow");
   1.371 -  _dc_and_los = destination_count_raw() | (chunk_sz_t)words;
   1.372 +  _dc_and_los = destination_count_raw() | (region_sz_t)words;
   1.373  }
   1.374  
   1.375 -inline void ParallelCompactData::ChunkData::decrement_destination_count()
   1.376 +inline void ParallelCompactData::RegionData::decrement_destination_count()
   1.377  {
   1.378    assert(_dc_and_los < dc_claimed, "already claimed");
   1.379    assert(_dc_and_los >= dc_one, "count would go negative");
   1.380    Atomic::add((int)dc_mask, (volatile int*)&_dc_and_los);
   1.381  }
   1.382  
   1.383 -inline HeapWord* ParallelCompactData::ChunkData::data_location() const
   1.384 +inline HeapWord* ParallelCompactData::RegionData::data_location() const
   1.385  {
   1.386    DEBUG_ONLY(return _data_location;)
   1.387    NOT_DEBUG(return NULL;)
   1.388  }
   1.389  
   1.390 -inline HeapWord* ParallelCompactData::ChunkData::highest_ref() const
   1.391 +inline HeapWord* ParallelCompactData::RegionData::highest_ref() const
   1.392  {
   1.393    DEBUG_ONLY(return _highest_ref;)
   1.394    NOT_DEBUG(return NULL;)
   1.395  }
   1.396  
   1.397 -inline void ParallelCompactData::ChunkData::set_data_location(HeapWord* addr)
   1.398 +inline void ParallelCompactData::RegionData::set_data_location(HeapWord* addr)
   1.399  {
   1.400    DEBUG_ONLY(_data_location = addr;)
   1.401  }
   1.402  
   1.403 -inline void ParallelCompactData::ChunkData::set_completed()
   1.404 +inline void ParallelCompactData::RegionData::set_completed()
   1.405  {
   1.406    assert(claimed(), "must be claimed first");
   1.407 -  _dc_and_los = dc_completed | (chunk_sz_t) live_obj_size();
   1.408 +  _dc_and_los = dc_completed | (region_sz_t) live_obj_size();
   1.409  }
   1.410  
   1.411 -// MT-unsafe claiming of a chunk.  Should only be used during single threaded
   1.412 +// MT-unsafe claiming of a region.  Should only be used during single threaded
   1.413  // execution.
   1.414 -inline bool ParallelCompactData::ChunkData::claim_unsafe()
   1.415 +inline bool ParallelCompactData::RegionData::claim_unsafe()
   1.416  {
   1.417    if (available()) {
   1.418      _dc_and_los |= dc_claimed;
   1.419 @@ -463,13 +463,13 @@
   1.420    return false;
   1.421  }
   1.422  
   1.423 -inline void ParallelCompactData::ChunkData::add_live_obj(size_t words)
   1.424 +inline void ParallelCompactData::RegionData::add_live_obj(size_t words)
   1.425  {
   1.426    assert(words <= (size_t)los_mask - live_obj_size(), "overflow");
   1.427    Atomic::add((int) words, (volatile int*) &_dc_and_los);
   1.428  }
   1.429  
   1.430 -inline void ParallelCompactData::ChunkData::set_highest_ref(HeapWord* addr)
   1.431 +inline void ParallelCompactData::RegionData::set_highest_ref(HeapWord* addr)
   1.432  {
   1.433  #ifdef ASSERT
   1.434    HeapWord* tmp = _highest_ref;
   1.435 @@ -479,7 +479,7 @@
   1.436  #endif  // #ifdef ASSERT
   1.437  }
   1.438  
   1.439 -inline bool ParallelCompactData::ChunkData::claim()
   1.440 +inline bool ParallelCompactData::RegionData::claim()
   1.441  {
   1.442    const int los = (int) live_obj_size();
   1.443    const int old = Atomic::cmpxchg(dc_claimed | los,
   1.444 @@ -487,19 +487,19 @@
   1.445    return old == los;
   1.446  }
   1.447  
   1.448 -inline ParallelCompactData::ChunkData*
   1.449 -ParallelCompactData::chunk(size_t chunk_idx) const
   1.450 +inline ParallelCompactData::RegionData*
   1.451 +ParallelCompactData::region(size_t region_idx) const
   1.452  {
   1.453 -  assert(chunk_idx <= chunk_count(), "bad arg");
   1.454 -  return _chunk_data + chunk_idx;
   1.455 +  assert(region_idx <= region_count(), "bad arg");
   1.456 +  return _region_data + region_idx;
   1.457  }
   1.458  
   1.459  inline size_t
   1.460 -ParallelCompactData::chunk(const ChunkData* const chunk_ptr) const
   1.461 +ParallelCompactData::region(const RegionData* const region_ptr) const
   1.462  {
   1.463 -  assert(chunk_ptr >= _chunk_data, "bad arg");
   1.464 -  assert(chunk_ptr <= _chunk_data + chunk_count(), "bad arg");
   1.465 -  return pointer_delta(chunk_ptr, _chunk_data, sizeof(ChunkData));
   1.466 +  assert(region_ptr >= _region_data, "bad arg");
   1.467 +  assert(region_ptr <= _region_data + region_count(), "bad arg");
   1.468 +  return pointer_delta(region_ptr, _region_data, sizeof(RegionData));
   1.469  }
   1.470  
   1.471  inline ParallelCompactData::BlockData*
   1.472 @@ -509,68 +509,69 @@
   1.473  }
   1.474  
   1.475  inline size_t
   1.476 -ParallelCompactData::chunk_offset(const HeapWord* addr) const
   1.477 +ParallelCompactData::region_offset(const HeapWord* addr) const
   1.478  {
   1.479    assert(addr >= _region_start, "bad addr");
   1.480    assert(addr <= _region_end, "bad addr");
   1.481 -  return (size_t(addr) & ChunkAddrOffsetMask) >> LogHeapWordSize;
   1.482 +  return (size_t(addr) & RegionAddrOffsetMask) >> LogHeapWordSize;
   1.483  }
   1.484  
   1.485  inline size_t
   1.486 -ParallelCompactData::addr_to_chunk_idx(const HeapWord* addr) const
   1.487 +ParallelCompactData::addr_to_region_idx(const HeapWord* addr) const
   1.488  {
   1.489    assert(addr >= _region_start, "bad addr");
   1.490    assert(addr <= _region_end, "bad addr");
   1.491 -  return pointer_delta(addr, _region_start) >> Log2ChunkSize;
   1.492 +  return pointer_delta(addr, _region_start) >> Log2RegionSize;
   1.493  }
   1.494  
   1.495 -inline ParallelCompactData::ChunkData*
   1.496 -ParallelCompactData::addr_to_chunk_ptr(const HeapWord* addr) const
   1.497 +inline ParallelCompactData::RegionData*
   1.498 +ParallelCompactData::addr_to_region_ptr(const HeapWord* addr) const
   1.499  {
   1.500 -  return chunk(addr_to_chunk_idx(addr));
   1.501 +  return region(addr_to_region_idx(addr));
   1.502  }
   1.503  
   1.504  inline HeapWord*
   1.505 -ParallelCompactData::chunk_to_addr(size_t chunk) const
   1.506 +ParallelCompactData::region_to_addr(size_t region) const
   1.507  {
   1.508 -  assert(chunk <= _chunk_count, "chunk out of range");
   1.509 -  return _region_start + (chunk << Log2ChunkSize);
   1.510 +  assert(region <= _region_count, "region out of range");
   1.511 +  return _region_start + (region << Log2RegionSize);
   1.512  }
   1.513  
   1.514  inline HeapWord*
   1.515 -ParallelCompactData::chunk_to_addr(const ChunkData* chunk) const
   1.516 +ParallelCompactData::region_to_addr(const RegionData* region) const
   1.517  {
   1.518 -  return chunk_to_addr(pointer_delta(chunk, _chunk_data, sizeof(ChunkData)));
   1.519 +  return region_to_addr(pointer_delta(region, _region_data,
   1.520 +                                      sizeof(RegionData)));
   1.521  }
   1.522  
   1.523  inline HeapWord*
   1.524 -ParallelCompactData::chunk_to_addr(size_t chunk, size_t offset) const
   1.525 +ParallelCompactData::region_to_addr(size_t region, size_t offset) const
   1.526  {
   1.527 -  assert(chunk <= _chunk_count, "chunk out of range");
   1.528 -  assert(offset < ChunkSize, "offset too big");  // This may be too strict.
   1.529 -  return chunk_to_addr(chunk) + offset;
   1.530 +  assert(region <= _region_count, "region out of range");
   1.531 +  assert(offset < RegionSize, "offset too big");  // This may be too strict.
   1.532 +  return region_to_addr(region) + offset;
   1.533  }
   1.534  
   1.535  inline HeapWord*
   1.536 -ParallelCompactData::chunk_align_down(HeapWord* addr) const
   1.537 +ParallelCompactData::region_align_down(HeapWord* addr) const
   1.538  {
   1.539    assert(addr >= _region_start, "bad addr");
   1.540 -  assert(addr < _region_end + ChunkSize, "bad addr");
   1.541 -  return (HeapWord*)(size_t(addr) & ChunkAddrMask);
   1.542 +  assert(addr < _region_end + RegionSize, "bad addr");
   1.543 +  return (HeapWord*)(size_t(addr) & RegionAddrMask);
   1.544  }
   1.545  
   1.546  inline HeapWord*
   1.547 -ParallelCompactData::chunk_align_up(HeapWord* addr) const
   1.548 +ParallelCompactData::region_align_up(HeapWord* addr) const
   1.549  {
   1.550    assert(addr >= _region_start, "bad addr");
   1.551    assert(addr <= _region_end, "bad addr");
   1.552 -  return chunk_align_down(addr + ChunkSizeOffsetMask);
   1.553 +  return region_align_down(addr + RegionSizeOffsetMask);
   1.554  }
   1.555  
   1.556  inline bool
   1.557 -ParallelCompactData::is_chunk_aligned(HeapWord* addr) const
   1.558 +ParallelCompactData::is_region_aligned(HeapWord* addr) const
   1.559  {
   1.560 -  return chunk_offset(addr) == 0;
   1.561 +  return region_offset(addr) == 0;
   1.562  }
   1.563  
   1.564  inline size_t
   1.565 @@ -692,40 +693,39 @@
   1.566    // ParallelCompactData::BlockData::blk_ofs_t _live_data_left;
   1.567    size_t    _live_data_left;
   1.568    size_t    _cur_block;
   1.569 -  HeapWord* _chunk_start;
   1.570 -  HeapWord* _chunk_end;
   1.571 -  size_t    _chunk_index;
   1.572 +  HeapWord* _region_start;
   1.573 +  HeapWord* _region_end;
   1.574 +  size_t    _region_index;
   1.575  
   1.576   public:
   1.577    BitBlockUpdateClosure(ParMarkBitMap* mbm,
   1.578                          ParCompactionManager* cm,
   1.579 -                        size_t chunk_index);
   1.580 +                        size_t region_index);
   1.581  
   1.582    size_t cur_block() { return _cur_block; }
   1.583 -  size_t chunk_index() { return _chunk_index; }
   1.584 +  size_t region_index() { return _region_index; }
   1.585    size_t live_data_left() { return _live_data_left; }
   1.586    // Returns true the first bit in the current block (cur_block) is
   1.587    // a start bit.
   1.588 -  // Returns true if the current block is within the chunk for the closure;
   1.589 -  bool chunk_contains_cur_block();
   1.590 +  // Returns true if the current block is within the region for the closure;
   1.591 +  bool region_contains_cur_block();
   1.592  
   1.593 -  // Set the chunk index and related chunk values for
   1.594 -  // a new chunk.
   1.595 -  void reset_chunk(size_t chunk_index);
   1.596 +  // Set the region index and related region values for
   1.597 +  // a new region.
   1.598 +  void reset_region(size_t region_index);
   1.599  
   1.600    virtual IterationStatus do_addr(HeapWord* addr, size_t words);
   1.601  };
   1.602  
   1.603 -// The UseParallelOldGC collector is a stop-the-world garbage
   1.604 -// collector that does parts of the collection using parallel threads.
   1.605 -// The collection includes the tenured generation and the young
   1.606 -// generation.  The permanent generation is collected at the same
   1.607 -// time as the other two generations but the permanent generation
   1.608 -// is collect by a single GC thread.  The permanent generation is
   1.609 -// collected serially because of the requirement that during the
   1.610 -// processing of a klass AAA, any objects reference by AAA must
   1.611 -// already have been processed.  This requirement is enforced by
   1.612 -// a left (lower address) to right (higher address) sliding compaction.
   1.613 +// The UseParallelOldGC collector is a stop-the-world garbage collector that
   1.614 +// does parts of the collection using parallel threads.  The collection includes
   1.615 +// the tenured generation and the young generation.  The permanent generation is
   1.616 +// collected at the same time as the other two generations but the permanent
   1.617 +// generation is collect by a single GC thread.  The permanent generation is
   1.618 +// collected serially because of the requirement that during the processing of a
   1.619 +// klass AAA, any objects reference by AAA must already have been processed.
   1.620 +// This requirement is enforced by a left (lower address) to right (higher
   1.621 +// address) sliding compaction.
   1.622  //
   1.623  // There are four phases of the collection.
   1.624  //
   1.625 @@ -740,80 +740,75 @@
   1.626  //      - move the objects to their destination
   1.627  //      - update some references and reinitialize some variables
   1.628  //
   1.629 -// These three phases are invoked in PSParallelCompact::invoke_no_policy().
   1.630 -// The marking phase is implemented in PSParallelCompact::marking_phase()
   1.631 -// and does a complete marking of the heap.
   1.632 -// The summary phase is implemented in PSParallelCompact::summary_phase().
   1.633 -// The move and update phase is implemented in PSParallelCompact::compact().
   1.634 +// These three phases are invoked in PSParallelCompact::invoke_no_policy().  The
   1.635 +// marking phase is implemented in PSParallelCompact::marking_phase() and does a
   1.636 +// complete marking of the heap.  The summary phase is implemented in
   1.637 +// PSParallelCompact::summary_phase().  The move and update phase is implemented
   1.638 +// in PSParallelCompact::compact().
   1.639  //
   1.640 -// A space that is being collected is divided into chunks and with
   1.641 -// each chunk is associated an object of type ParallelCompactData.
   1.642 -// Each chunk is of a fixed size and typically will contain more than
   1.643 -// 1 object and may have parts of objects at the front and back of the
   1.644 -// chunk.
   1.645 +// A space that is being collected is divided into regions and with each region
   1.646 +// is associated an object of type ParallelCompactData.  Each region is of a
   1.647 +// fixed size and typically will contain more than 1 object and may have parts
   1.648 +// of objects at the front and back of the region.
   1.649  //
   1.650 -// chunk            -----+---------------------+----------
   1.651 +// region            -----+---------------------+----------
   1.652  // objects covered   [ AAA  )[ BBB )[ CCC   )[ DDD     )
   1.653  //
   1.654 -// The marking phase does a complete marking of all live objects in the
   1.655 -// heap.  The marking also compiles the size of the data for
   1.656 -// all live objects covered by the chunk.  This size includes the
   1.657 -// part of any live object spanning onto the chunk (part of AAA
   1.658 -// if it is live) from the front, all live objects contained in the chunk
   1.659 -// (BBB and/or CCC if they are live), and the part of any live objects
   1.660 -// covered by the chunk that extends off the chunk (part of DDD if it is
   1.661 -// live).  The marking phase uses multiple GC threads and marking is
   1.662 -// done in a bit array of type ParMarkBitMap.  The marking of the
   1.663 -// bit map is done atomically as is the accumulation of the size of the
   1.664 -// live objects covered by a chunk.
   1.665 +// The marking phase does a complete marking of all live objects in the heap.
   1.666 +// The marking also compiles the size of the data for all live objects covered
   1.667 +// by the region.  This size includes the part of any live object spanning onto
   1.668 +// the region (part of AAA if it is live) from the front, all live objects
   1.669 +// contained in the region (BBB and/or CCC if they are live), and the part of
   1.670 +// any live objects covered by the region that extends off the region (part of
   1.671 +// DDD if it is live).  The marking phase uses multiple GC threads and marking
   1.672 +// is done in a bit array of type ParMarkBitMap.  The marking of the bit map is
   1.673 +// done atomically as is the accumulation of the size of the live objects
   1.674 +// covered by a region.
   1.675  //
   1.676 -// The summary phase calculates the total live data to the left of
   1.677 -// each chunk XXX.  Based on that total and the bottom of the space,
   1.678 -// it can calculate the starting location of the live data in XXX.
   1.679 -// The summary phase calculates for each chunk XXX quantites such as
   1.680 +// The summary phase calculates the total live data to the left of each region
   1.681 +// XXX.  Based on that total and the bottom of the space, it can calculate the
   1.682 +// starting location of the live data in XXX.  The summary phase calculates for
   1.683 +// each region XXX quantites such as
   1.684  //
   1.685 -//      - the amount of live data at the beginning of a chunk from an object
   1.686 -//      entering the chunk.
   1.687 -//      - the location of the first live data on the chunk
   1.688 -//      - a count of the number of chunks receiving live data from XXX.
   1.689 +//      - the amount of live data at the beginning of a region from an object
   1.690 +//        entering the region.
   1.691 +//      - the location of the first live data on the region
   1.692 +//      - a count of the number of regions receiving live data from XXX.
   1.693  //
   1.694  // See ParallelCompactData for precise details.  The summary phase also
   1.695 -// calculates the dense prefix for the compaction.  The dense prefix
   1.696 -// is a portion at the beginning of the space that is not moved.  The
   1.697 -// objects in the dense prefix do need to have their object references
   1.698 -// updated.  See method summarize_dense_prefix().
   1.699 +// calculates the dense prefix for the compaction.  The dense prefix is a
   1.700 +// portion at the beginning of the space that is not moved.  The objects in the
   1.701 +// dense prefix do need to have their object references updated.  See method
   1.702 +// summarize_dense_prefix().
   1.703  //
   1.704  // The summary phase is done using 1 GC thread.
   1.705  //
   1.706 -// The compaction phase moves objects to their new location and updates
   1.707 -// all references in the object.
   1.708 +// The compaction phase moves objects to their new location and updates all
   1.709 +// references in the object.
   1.710  //
   1.711 -// A current exception is that objects that cross a chunk boundary
   1.712 -// are moved but do not have their references updated.  References are
   1.713 -// not updated because it cannot easily be determined if the klass
   1.714 -// pointer KKK for the object AAA has been updated.  KKK likely resides
   1.715 -// in a chunk to the left of the chunk containing AAA.  These AAA's
   1.716 -// have there references updated at the end in a clean up phase.
   1.717 -// See the method PSParallelCompact::update_deferred_objects().  An
   1.718 -// alternate strategy is being investigated for this deferral of updating.
   1.719 +// A current exception is that objects that cross a region boundary are moved
   1.720 +// but do not have their references updated.  References are not updated because
   1.721 +// it cannot easily be determined if the klass pointer KKK for the object AAA
   1.722 +// has been updated.  KKK likely resides in a region to the left of the region
   1.723 +// containing AAA.  These AAA's have there references updated at the end in a
   1.724 +// clean up phase.  See the method PSParallelCompact::update_deferred_objects().
   1.725 +// An alternate strategy is being investigated for this deferral of updating.
   1.726  //
   1.727 -// Compaction is done on a chunk basis.  A chunk that is ready to be
   1.728 -// filled is put on a ready list and GC threads take chunk off the list
   1.729 -// and fill them.  A chunk is ready to be filled if it
   1.730 -// empty of live objects.  Such a chunk may have been initially
   1.731 -// empty (only contained
   1.732 -// dead objects) or may have had all its live objects copied out already.
   1.733 -// A chunk that compacts into itself is also ready for filling.  The
   1.734 -// ready list is initially filled with empty chunks and chunks compacting
   1.735 -// into themselves.  There is always at least 1 chunk that can be put on
   1.736 -// the ready list.  The chunks are atomically added and removed from
   1.737 -// the ready list.
   1.738 -//
   1.739 +// Compaction is done on a region basis.  A region that is ready to be filled is
   1.740 +// put on a ready list and GC threads take region off the list and fill them.  A
   1.741 +// region is ready to be filled if it empty of live objects.  Such a region may
   1.742 +// have been initially empty (only contained dead objects) or may have had all
   1.743 +// its live objects copied out already.  A region that compacts into itself is
   1.744 +// also ready for filling.  The ready list is initially filled with empty
   1.745 +// regions and regions compacting into themselves.  There is always at least 1
   1.746 +// region that can be put on the ready list.  The regions are atomically added
   1.747 +// and removed from the ready list.
   1.748 +
   1.749  class PSParallelCompact : AllStatic {
   1.750   public:
   1.751    // Convenient access to type names.
   1.752    typedef ParMarkBitMap::idx_t idx_t;
   1.753 -  typedef ParallelCompactData::ChunkData ChunkData;
   1.754 +  typedef ParallelCompactData::RegionData RegionData;
   1.755    typedef ParallelCompactData::BlockData BlockData;
   1.756  
   1.757    typedef enum {
   1.758 @@ -977,26 +972,26 @@
   1.759    // not reclaimed).
   1.760    static double dead_wood_limiter(double density, size_t min_percent);
   1.761  
   1.762 -  // Find the first (left-most) chunk in the range [beg, end) that has at least
   1.763 +  // Find the first (left-most) region in the range [beg, end) that has at least
   1.764    // dead_words of dead space to the left.  The argument beg must be the first
   1.765 -  // chunk in the space that is not completely live.
   1.766 -  static ChunkData* dead_wood_limit_chunk(const ChunkData* beg,
   1.767 -                                          const ChunkData* end,
   1.768 -                                          size_t dead_words);
   1.769 +  // region in the space that is not completely live.
   1.770 +  static RegionData* dead_wood_limit_region(const RegionData* beg,
   1.771 +                                            const RegionData* end,
   1.772 +                                            size_t dead_words);
   1.773  
   1.774 -  // Return a pointer to the first chunk in the range [beg, end) that is not
   1.775 +  // Return a pointer to the first region in the range [beg, end) that is not
   1.776    // completely full.
   1.777 -  static ChunkData* first_dead_space_chunk(const ChunkData* beg,
   1.778 -                                           const ChunkData* end);
   1.779 +  static RegionData* first_dead_space_region(const RegionData* beg,
   1.780 +                                             const RegionData* end);
   1.781  
   1.782    // Return a value indicating the benefit or 'yield' if the compacted region
   1.783    // were to start (or equivalently if the dense prefix were to end) at the
   1.784 -  // candidate chunk.  Higher values are better.
   1.785 +  // candidate region.  Higher values are better.
   1.786    //
   1.787    // The value is based on the amount of space reclaimed vs. the costs of (a)
   1.788    // updating references in the dense prefix plus (b) copying objects and
   1.789    // updating references in the compacted region.
   1.790 -  static inline double reclaimed_ratio(const ChunkData* const candidate,
   1.791 +  static inline double reclaimed_ratio(const RegionData* const candidate,
   1.792                                         HeapWord* const bottom,
   1.793                                         HeapWord* const top,
   1.794                                         HeapWord* const new_top);
   1.795 @@ -1005,9 +1000,9 @@
   1.796    static HeapWord* compute_dense_prefix(const SpaceId id,
   1.797                                          bool maximum_compaction);
   1.798  
   1.799 -  // Return true if dead space crosses onto the specified Chunk; bit must be the
   1.800 -  // bit index corresponding to the first word of the Chunk.
   1.801 -  static inline bool dead_space_crosses_boundary(const ChunkData* chunk,
   1.802 +  // Return true if dead space crosses onto the specified Region; bit must be
   1.803 +  // the bit index corresponding to the first word of the Region.
   1.804 +  static inline bool dead_space_crosses_boundary(const RegionData* region,
   1.805                                                   idx_t bit);
   1.806  
   1.807    // Summary phase utility routine to fill dead space (if any) at the dense
   1.808 @@ -1038,16 +1033,16 @@
   1.809    static void compact_perm(ParCompactionManager* cm);
   1.810    static void compact();
   1.811  
   1.812 -  // Add available chunks to the stack and draining tasks to the task queue.
   1.813 -  static void enqueue_chunk_draining_tasks(GCTaskQueue* q,
   1.814 -                                           uint parallel_gc_threads);
   1.815 +  // Add available regions to the stack and draining tasks to the task queue.
   1.816 +  static void enqueue_region_draining_tasks(GCTaskQueue* q,
   1.817 +                                            uint parallel_gc_threads);
   1.818  
   1.819    // Add dense prefix update tasks to the task queue.
   1.820    static void enqueue_dense_prefix_tasks(GCTaskQueue* q,
   1.821                                           uint parallel_gc_threads);
   1.822  
   1.823 -  // Add chunk stealing tasks to the task queue.
   1.824 -  static void enqueue_chunk_stealing_tasks(
   1.825 +  // Add region stealing tasks to the task queue.
   1.826 +  static void enqueue_region_stealing_tasks(
   1.827                                         GCTaskQueue* q,
   1.828                                         ParallelTaskTerminator* terminator_ptr,
   1.829                                         uint parallel_gc_threads);
   1.830 @@ -1154,56 +1149,56 @@
   1.831    // Move and update the live objects in the specified space.
   1.832    static void move_and_update(ParCompactionManager* cm, SpaceId space_id);
   1.833  
   1.834 -  // Process the end of the given chunk range in the dense prefix.
   1.835 +  // Process the end of the given region range in the dense prefix.
   1.836    // This includes saving any object not updated.
   1.837 -  static void dense_prefix_chunks_epilogue(ParCompactionManager* cm,
   1.838 -                                           size_t chunk_start_index,
   1.839 -                                           size_t chunk_end_index,
   1.840 -                                           idx_t exiting_object_offset,
   1.841 -                                           idx_t chunk_offset_start,
   1.842 -                                           idx_t chunk_offset_end);
   1.843 +  static void dense_prefix_regions_epilogue(ParCompactionManager* cm,
   1.844 +                                            size_t region_start_index,
   1.845 +                                            size_t region_end_index,
   1.846 +                                            idx_t exiting_object_offset,
   1.847 +                                            idx_t region_offset_start,
   1.848 +                                            idx_t region_offset_end);
   1.849  
   1.850 -  // Update a chunk in the dense prefix.  For each live object
   1.851 -  // in the chunk, update it's interior references.  For each
   1.852 +  // Update a region in the dense prefix.  For each live object
   1.853 +  // in the region, update it's interior references.  For each
   1.854    // dead object, fill it with deadwood. Dead space at the end
   1.855 -  // of a chunk range will be filled to the start of the next
   1.856 -  // live object regardless of the chunk_index_end.  None of the
   1.857 +  // of a region range will be filled to the start of the next
   1.858 +  // live object regardless of the region_index_end.  None of the
   1.859    // objects in the dense prefix move and dead space is dead
   1.860    // (holds only dead objects that don't need any processing), so
   1.861    // dead space can be filled in any order.
   1.862    static void update_and_deadwood_in_dense_prefix(ParCompactionManager* cm,
   1.863                                                    SpaceId space_id,
   1.864 -                                                  size_t chunk_index_start,
   1.865 -                                                  size_t chunk_index_end);
   1.866 +                                                  size_t region_index_start,
   1.867 +                                                  size_t region_index_end);
   1.868  
   1.869    // Return the address of the count + 1st live word in the range [beg, end).
   1.870    static HeapWord* skip_live_words(HeapWord* beg, HeapWord* end, size_t count);
   1.871  
   1.872    // Return the address of the word to be copied to dest_addr, which must be
   1.873 -  // aligned to a chunk boundary.
   1.874 +  // aligned to a region boundary.
   1.875    static HeapWord* first_src_addr(HeapWord* const dest_addr,
   1.876 -                                  size_t src_chunk_idx);
   1.877 +                                  size_t src_region_idx);
   1.878  
   1.879 -  // Determine the next source chunk, set closure.source() to the start of the
   1.880 -  // new chunk return the chunk index.  Parameter end_addr is the address one
   1.881 +  // Determine the next source region, set closure.source() to the start of the
   1.882 +  // new region return the region index.  Parameter end_addr is the address one
   1.883    // beyond the end of source range just processed.  If necessary, switch to a
   1.884    // new source space and set src_space_id (in-out parameter) and src_space_top
   1.885    // (out parameter) accordingly.
   1.886 -  static size_t next_src_chunk(MoveAndUpdateClosure& closure,
   1.887 -                               SpaceId& src_space_id,
   1.888 -                               HeapWord*& src_space_top,
   1.889 -                               HeapWord* end_addr);
   1.890 +  static size_t next_src_region(MoveAndUpdateClosure& closure,
   1.891 +                                SpaceId& src_space_id,
   1.892 +                                HeapWord*& src_space_top,
   1.893 +                                HeapWord* end_addr);
   1.894  
   1.895 -  // Decrement the destination count for each non-empty source chunk in the
   1.896 -  // range [beg_chunk, chunk(chunk_align_up(end_addr))).
   1.897 +  // Decrement the destination count for each non-empty source region in the
   1.898 +  // range [beg_region, region(region_align_up(end_addr))).
   1.899    static void decrement_destination_counts(ParCompactionManager* cm,
   1.900 -                                           size_t beg_chunk,
   1.901 +                                           size_t beg_region,
   1.902                                             HeapWord* end_addr);
   1.903  
   1.904 -  // Fill a chunk, copying objects from one or more source chunks.
   1.905 -  static void fill_chunk(ParCompactionManager* cm, size_t chunk_idx);
   1.906 -  static void fill_and_update_chunk(ParCompactionManager* cm, size_t chunk) {
   1.907 -    fill_chunk(cm, chunk);
   1.908 +  // Fill a region, copying objects from one or more source regions.
   1.909 +  static void fill_region(ParCompactionManager* cm, size_t region_idx);
   1.910 +  static void fill_and_update_region(ParCompactionManager* cm, size_t region) {
   1.911 +    fill_region(cm, region);
   1.912    }
   1.913  
   1.914    // Update the deferred objects in the space.
   1.915 @@ -1259,7 +1254,7 @@
   1.916  #ifndef PRODUCT
   1.917    // Debugging support.
   1.918    static const char* space_names[last_space_id];
   1.919 -  static void print_chunk_ranges();
   1.920 +  static void print_region_ranges();
   1.921    static void print_dense_prefix_stats(const char* const algorithm,
   1.922                                         const SpaceId id,
   1.923                                         const bool maximum_compaction,
   1.924 @@ -1267,7 +1262,7 @@
   1.925  #endif  // #ifndef PRODUCT
   1.926  
   1.927  #ifdef  ASSERT
   1.928 -  // Verify that all the chunks have been emptied.
   1.929 +  // Verify that all the regions have been emptied.
   1.930    static void verify_complete(SpaceId space_id);
   1.931  #endif  // #ifdef ASSERT
   1.932  };
   1.933 @@ -1376,17 +1371,17 @@
   1.934  }
   1.935  
   1.936  inline bool
   1.937 -PSParallelCompact::dead_space_crosses_boundary(const ChunkData* chunk,
   1.938 +PSParallelCompact::dead_space_crosses_boundary(const RegionData* region,
   1.939                                                 idx_t bit)
   1.940  {
   1.941 -  assert(bit > 0, "cannot call this for the first bit/chunk");
   1.942 -  assert(_summary_data.chunk_to_addr(chunk) == _mark_bitmap.bit_to_addr(bit),
   1.943 +  assert(bit > 0, "cannot call this for the first bit/region");
   1.944 +  assert(_summary_data.region_to_addr(region) == _mark_bitmap.bit_to_addr(bit),
   1.945           "sanity check");
   1.946  
   1.947    // Dead space crosses the boundary if (1) a partial object does not extend
   1.948 -  // onto the chunk, (2) an object does not start at the beginning of the chunk,
   1.949 -  // and (3) an object does not end at the end of the prior chunk.
   1.950 -  return chunk->partial_obj_size() == 0 &&
   1.951 +  // onto the region, (2) an object does not start at the beginning of the
   1.952 +  // region, and (3) an object does not end at the end of the prior region.
   1.953 +  return region->partial_obj_size() == 0 &&
   1.954      !_mark_bitmap.is_obj_beg(bit) &&
   1.955      !_mark_bitmap.is_obj_end(bit - 1);
   1.956  }

mercurial