6725697: par compact - rename class ChunkData to RegionData

Tue, 30 Sep 2008 12:20:22 -0700

author
jcoomes
date
Tue, 30 Sep 2008 12:20:22 -0700
changeset 810
81cd571500b0
parent 809
a4b729f5b611
child 811
0166ac265d53

6725697: par compact - rename class ChunkData to RegionData
Reviewed-by: iveresov, tonyp

src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/parallelScavenge/pcTasks.hpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.cpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.hpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp file | annotate | diff | comparison | revisions
src/share/vm/runtime/globals.hpp file | annotate | diff | comparison | revisions
src/share/vm/utilities/taskqueue.cpp file | annotate | diff | comparison | revisions
src/share/vm/utilities/taskqueue.hpp file | annotate | diff | comparison | revisions
     1.1 --- a/src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp	Tue Sep 30 11:49:31 2008 -0700
     1.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp	Tue Sep 30 12:20:22 2008 -0700
     1.3 @@ -146,7 +146,7 @@
     1.4  {
     1.5    ParallelScavengeHeap* heap = PSParallelCompact::gc_heap();
     1.6    uint parallel_gc_threads = heap->gc_task_manager()->workers();
     1.7 -  ChunkTaskQueueSet* qset = ParCompactionManager::chunk_array();
     1.8 +  RegionTaskQueueSet* qset = ParCompactionManager::region_array();
     1.9    ParallelTaskTerminator terminator(parallel_gc_threads, qset);
    1.10    GCTaskQueue* q = GCTaskQueue::create();
    1.11    for(uint i=0; i<parallel_gc_threads; i++) {
    1.12 @@ -205,38 +205,38 @@
    1.13  }
    1.14  
    1.15  //
    1.16 -// StealChunkCompactionTask
    1.17 +// StealRegionCompactionTask
    1.18  //
    1.19  
    1.20  
    1.21 -StealChunkCompactionTask::StealChunkCompactionTask(ParallelTaskTerminator* t) :
    1.22 -  _terminator(t) {};
    1.23 +StealRegionCompactionTask::StealRegionCompactionTask(ParallelTaskTerminator* t):
    1.24 +  _terminator(t) {}
    1.25  
    1.26 -void StealChunkCompactionTask::do_it(GCTaskManager* manager, uint which) {
    1.27 +void StealRegionCompactionTask::do_it(GCTaskManager* manager, uint which) {
    1.28    assert(Universe::heap()->is_gc_active(), "called outside gc");
    1.29  
    1.30 -  NOT_PRODUCT(TraceTime tm("StealChunkCompactionTask",
    1.31 +  NOT_PRODUCT(TraceTime tm("StealRegionCompactionTask",
    1.32      PrintGCDetails && TraceParallelOldGCTasks, true, gclog_or_tty));
    1.33  
    1.34    ParCompactionManager* cm =
    1.35      ParCompactionManager::gc_thread_compaction_manager(which);
    1.36  
    1.37 -  // Has to drain stacks first because there may be chunks on
    1.38 +  // Has to drain stacks first because there may be regions on
    1.39    // preloaded onto the stack and this thread may never have
    1.40    // done a draining task.  Are the draining tasks needed?
    1.41  
    1.42 -  cm->drain_chunk_stacks();
    1.43 +  cm->drain_region_stacks();
    1.44  
    1.45 -  size_t chunk_index = 0;
    1.46 +  size_t region_index = 0;
    1.47    int random_seed = 17;
    1.48  
    1.49    // If we're the termination task, try 10 rounds of stealing before
    1.50    // setting the termination flag
    1.51  
    1.52    while(true) {
    1.53 -    if (ParCompactionManager::steal(which, &random_seed, chunk_index)) {
    1.54 -      PSParallelCompact::fill_and_update_chunk(cm, chunk_index);
    1.55 -      cm->drain_chunk_stacks();
    1.56 +    if (ParCompactionManager::steal(which, &random_seed, region_index)) {
    1.57 +      PSParallelCompact::fill_and_update_region(cm, region_index);
    1.58 +      cm->drain_region_stacks();
    1.59      } else {
    1.60        if (terminator()->offer_termination()) {
    1.61          break;
    1.62 @@ -249,11 +249,10 @@
    1.63  
    1.64  UpdateDensePrefixTask::UpdateDensePrefixTask(
    1.65                                     PSParallelCompact::SpaceId space_id,
    1.66 -                                   size_t chunk_index_start,
    1.67 -                                   size_t chunk_index_end) :
    1.68 -  _space_id(space_id), _chunk_index_start(chunk_index_start),
    1.69 -  _chunk_index_end(chunk_index_end)
    1.70 -{}
    1.71 +                                   size_t region_index_start,
    1.72 +                                   size_t region_index_end) :
    1.73 +  _space_id(space_id), _region_index_start(region_index_start),
    1.74 +  _region_index_end(region_index_end) {}
    1.75  
    1.76  void UpdateDensePrefixTask::do_it(GCTaskManager* manager, uint which) {
    1.77  
    1.78 @@ -265,8 +264,8 @@
    1.79  
    1.80    PSParallelCompact::update_and_deadwood_in_dense_prefix(cm,
    1.81                                                           _space_id,
    1.82 -                                                         _chunk_index_start,
    1.83 -                                                         _chunk_index_end);
    1.84 +                                                         _region_index_start,
    1.85 +                                                         _region_index_end);
    1.86  }
    1.87  
    1.88  void DrainStacksCompactionTask::do_it(GCTaskManager* manager, uint which) {
    1.89 @@ -278,6 +277,6 @@
    1.90    ParCompactionManager* cm =
    1.91      ParCompactionManager::gc_thread_compaction_manager(which);
    1.92  
    1.93 -  // Process any chunks already in the compaction managers stacks.
    1.94 -  cm->drain_chunk_stacks();
    1.95 +  // Process any regions already in the compaction managers stacks.
    1.96 +  cm->drain_region_stacks();
    1.97  }
     2.1 --- a/src/share/vm/gc_implementation/parallelScavenge/pcTasks.hpp	Tue Sep 30 11:49:31 2008 -0700
     2.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/pcTasks.hpp	Tue Sep 30 12:20:22 2008 -0700
     2.3 @@ -188,18 +188,18 @@
     2.4  };
     2.5  
     2.6  //
     2.7 -// StealChunkCompactionTask
     2.8 +// StealRegionCompactionTask
     2.9  //
    2.10  // This task is used to distribute work to idle threads.
    2.11  //
    2.12  
    2.13 -class StealChunkCompactionTask : public GCTask {
    2.14 +class StealRegionCompactionTask : public GCTask {
    2.15   private:
    2.16     ParallelTaskTerminator* const _terminator;
    2.17   public:
    2.18 -  StealChunkCompactionTask(ParallelTaskTerminator* t);
    2.19 +  StealRegionCompactionTask(ParallelTaskTerminator* t);
    2.20  
    2.21 -  char* name() { return (char *)"steal-chunk-task"; }
    2.22 +  char* name() { return (char *)"steal-region-task"; }
    2.23    ParallelTaskTerminator* terminator() { return _terminator; }
    2.24  
    2.25    virtual void do_it(GCTaskManager* manager, uint which);
    2.26 @@ -215,15 +215,15 @@
    2.27  class UpdateDensePrefixTask : public GCTask {
    2.28   private:
    2.29    PSParallelCompact::SpaceId _space_id;
    2.30 -  size_t _chunk_index_start;
    2.31 -  size_t _chunk_index_end;
    2.32 +  size_t _region_index_start;
    2.33 +  size_t _region_index_end;
    2.34  
    2.35   public:
    2.36    char* name() { return (char *)"update-dense_prefix-task"; }
    2.37  
    2.38    UpdateDensePrefixTask(PSParallelCompact::SpaceId space_id,
    2.39 -                        size_t chunk_index_start,
    2.40 -                        size_t chunk_index_end);
    2.41 +                        size_t region_index_start,
    2.42 +                        size_t region_index_end);
    2.43  
    2.44    virtual void do_it(GCTaskManager* manager, uint which);
    2.45  };
    2.46 @@ -231,17 +231,17 @@
    2.47  //
    2.48  // DrainStacksCompactionTask
    2.49  //
    2.50 -// This task processes chunks that have been added to the stacks of each
    2.51 +// This task processes regions that have been added to the stacks of each
    2.52  // compaction manager.
    2.53  //
    2.54  // Trying to use one draining thread does not work because there are no
    2.55  // guarantees about which task will be picked up by which thread.  For example,
    2.56 -// if thread A gets all the preloaded chunks, thread A may not get a draining
    2.57 +// if thread A gets all the preloaded regions, thread A may not get a draining
    2.58  // task (they may all be done by other threads).
    2.59  //
    2.60  
    2.61  class DrainStacksCompactionTask : public GCTask {
    2.62   public:
    2.63 -  char* name() { return (char *)"drain-chunk-task"; }
    2.64 +  char* name() { return (char *)"drain-region-task"; }
    2.65    virtual void do_it(GCTaskManager* manager, uint which);
    2.66  };
     3.1 --- a/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.cpp	Tue Sep 30 11:49:31 2008 -0700
     3.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.cpp	Tue Sep 30 12:20:22 2008 -0700
     3.3 @@ -30,7 +30,7 @@
     3.4  OopTaskQueueSet*     ParCompactionManager::_stack_array = NULL;
     3.5  ObjectStartArray*    ParCompactionManager::_start_array = NULL;
     3.6  ParMarkBitMap*       ParCompactionManager::_mark_bitmap = NULL;
     3.7 -ChunkTaskQueueSet*   ParCompactionManager::_chunk_array = NULL;
     3.8 +RegionTaskQueueSet*   ParCompactionManager::_region_array = NULL;
     3.9  
    3.10  ParCompactionManager::ParCompactionManager() :
    3.11      _action(CopyAndUpdate) {
    3.12 @@ -46,13 +46,13 @@
    3.13  
    3.14    // We want the overflow stack to be permanent
    3.15    _overflow_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(10, true);
    3.16 -#ifdef USE_ChunkTaskQueueWithOverflow
    3.17 -  chunk_stack()->initialize();
    3.18 +#ifdef USE_RegionTaskQueueWithOverflow
    3.19 +  region_stack()->initialize();
    3.20  #else
    3.21 -  chunk_stack()->initialize();
    3.22 +  region_stack()->initialize();
    3.23  
    3.24    // We want the overflow stack to be permanent
    3.25 -  _chunk_overflow_stack =
    3.26 +  _region_overflow_stack =
    3.27      new (ResourceObj::C_HEAP) GrowableArray<size_t>(10, true);
    3.28  #endif
    3.29  
    3.30 @@ -86,18 +86,18 @@
    3.31  
    3.32    _stack_array = new OopTaskQueueSet(parallel_gc_threads);
    3.33    guarantee(_stack_array != NULL, "Count not initialize promotion manager");
    3.34 -  _chunk_array = new ChunkTaskQueueSet(parallel_gc_threads);
    3.35 -  guarantee(_chunk_array != NULL, "Count not initialize promotion manager");
    3.36 +  _region_array = new RegionTaskQueueSet(parallel_gc_threads);
    3.37 +  guarantee(_region_array != NULL, "Count not initialize promotion manager");
    3.38  
    3.39    // Create and register the ParCompactionManager(s) for the worker threads.
    3.40    for(uint i=0; i<parallel_gc_threads; i++) {
    3.41      _manager_array[i] = new ParCompactionManager();
    3.42      guarantee(_manager_array[i] != NULL, "Could not create ParCompactionManager");
    3.43      stack_array()->register_queue(i, _manager_array[i]->marking_stack());
    3.44 -#ifdef USE_ChunkTaskQueueWithOverflow
    3.45 -    chunk_array()->register_queue(i, _manager_array[i]->chunk_stack()->task_queue());
    3.46 +#ifdef USE_RegionTaskQueueWithOverflow
    3.47 +    region_array()->register_queue(i, _manager_array[i]->region_stack()->task_queue());
    3.48  #else
    3.49 -    chunk_array()->register_queue(i, _manager_array[i]->chunk_stack());
    3.50 +    region_array()->register_queue(i, _manager_array[i]->region_stack());
    3.51  #endif
    3.52    }
    3.53  
    3.54 @@ -153,31 +153,31 @@
    3.55    return NULL;
    3.56  }
    3.57  
    3.58 -// Save chunk on a stack
    3.59 -void ParCompactionManager::save_for_processing(size_t chunk_index) {
    3.60 +// Save region on a stack
    3.61 +void ParCompactionManager::save_for_processing(size_t region_index) {
    3.62  #ifdef ASSERT
    3.63    const ParallelCompactData& sd = PSParallelCompact::summary_data();
    3.64 -  ParallelCompactData::ChunkData* const chunk_ptr = sd.chunk(chunk_index);
    3.65 -  assert(chunk_ptr->claimed(), "must be claimed");
    3.66 -  assert(chunk_ptr->_pushed++ == 0, "should only be pushed once");
    3.67 +  ParallelCompactData::RegionData* const region_ptr = sd.region(region_index);
    3.68 +  assert(region_ptr->claimed(), "must be claimed");
    3.69 +  assert(region_ptr->_pushed++ == 0, "should only be pushed once");
    3.70  #endif
    3.71 -  chunk_stack_push(chunk_index);
    3.72 +  region_stack_push(region_index);
    3.73  }
    3.74  
    3.75 -void ParCompactionManager::chunk_stack_push(size_t chunk_index) {
    3.76 +void ParCompactionManager::region_stack_push(size_t region_index) {
    3.77  
    3.78 -#ifdef USE_ChunkTaskQueueWithOverflow
    3.79 -  chunk_stack()->save(chunk_index);
    3.80 +#ifdef USE_RegionTaskQueueWithOverflow
    3.81 +  region_stack()->save(region_index);
    3.82  #else
    3.83 -  if(!chunk_stack()->push(chunk_index)) {
    3.84 -    chunk_overflow_stack()->push(chunk_index);
    3.85 +  if(!region_stack()->push(region_index)) {
    3.86 +    region_overflow_stack()->push(region_index);
    3.87    }
    3.88  #endif
    3.89  }
    3.90  
    3.91 -bool ParCompactionManager::retrieve_for_processing(size_t& chunk_index) {
    3.92 -#ifdef USE_ChunkTaskQueueWithOverflow
    3.93 -  return chunk_stack()->retrieve(chunk_index);
    3.94 +bool ParCompactionManager::retrieve_for_processing(size_t& region_index) {
    3.95 +#ifdef USE_RegionTaskQueueWithOverflow
    3.96 +  return region_stack()->retrieve(region_index);
    3.97  #else
    3.98    // Should not be used in the parallel case
    3.99    ShouldNotReachHere();
   3.100 @@ -230,14 +230,14 @@
   3.101    assert(overflow_stack()->length() == 0, "Sanity");
   3.102  }
   3.103  
   3.104 -void ParCompactionManager::drain_chunk_overflow_stack() {
   3.105 -  size_t chunk_index = (size_t) -1;
   3.106 -  while(chunk_stack()->retrieve_from_overflow(chunk_index)) {
   3.107 -    PSParallelCompact::fill_and_update_chunk(this, chunk_index);
   3.108 +void ParCompactionManager::drain_region_overflow_stack() {
   3.109 +  size_t region_index = (size_t) -1;
   3.110 +  while(region_stack()->retrieve_from_overflow(region_index)) {
   3.111 +    PSParallelCompact::fill_and_update_region(this, region_index);
   3.112    }
   3.113  }
   3.114  
   3.115 -void ParCompactionManager::drain_chunk_stacks() {
   3.116 +void ParCompactionManager::drain_region_stacks() {
   3.117  #ifdef ASSERT
   3.118    ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
   3.119    assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
   3.120 @@ -249,42 +249,42 @@
   3.121  #if 1 // def DO_PARALLEL - the serial code hasn't been updated
   3.122    do {
   3.123  
   3.124 -#ifdef USE_ChunkTaskQueueWithOverflow
   3.125 +#ifdef USE_RegionTaskQueueWithOverflow
   3.126      // Drain overflow stack first, so other threads can steal from
   3.127      // claimed stack while we work.
   3.128 -    size_t chunk_index = (size_t) -1;
   3.129 -    while(chunk_stack()->retrieve_from_overflow(chunk_index)) {
   3.130 -      PSParallelCompact::fill_and_update_chunk(this, chunk_index);
   3.131 +    size_t region_index = (size_t) -1;
   3.132 +    while(region_stack()->retrieve_from_overflow(region_index)) {
   3.133 +      PSParallelCompact::fill_and_update_region(this, region_index);
   3.134      }
   3.135  
   3.136 -    while (chunk_stack()->retrieve_from_stealable_queue(chunk_index)) {
   3.137 -      PSParallelCompact::fill_and_update_chunk(this, chunk_index);
   3.138 +    while (region_stack()->retrieve_from_stealable_queue(region_index)) {
   3.139 +      PSParallelCompact::fill_and_update_region(this, region_index);
   3.140      }
   3.141 -  } while (!chunk_stack()->is_empty());
   3.142 +  } while (!region_stack()->is_empty());
   3.143  #else
   3.144      // Drain overflow stack first, so other threads can steal from
   3.145      // claimed stack while we work.
   3.146 -    while(!chunk_overflow_stack()->is_empty()) {
   3.147 -      size_t chunk_index = chunk_overflow_stack()->pop();
   3.148 -      PSParallelCompact::fill_and_update_chunk(this, chunk_index);
   3.149 +    while(!region_overflow_stack()->is_empty()) {
   3.150 +      size_t region_index = region_overflow_stack()->pop();
   3.151 +      PSParallelCompact::fill_and_update_region(this, region_index);
   3.152      }
   3.153  
   3.154 -    size_t chunk_index = -1;
   3.155 +    size_t region_index = -1;
   3.156      // obj is a reference!!!
   3.157 -    while (chunk_stack()->pop_local(chunk_index)) {
   3.158 +    while (region_stack()->pop_local(region_index)) {
   3.159        // It would be nice to assert about the type of objects we might
   3.160        // pop, but they can come from anywhere, unfortunately.
   3.161 -      PSParallelCompact::fill_and_update_chunk(this, chunk_index);
   3.162 +      PSParallelCompact::fill_and_update_region(this, region_index);
   3.163      }
   3.164 -  } while((chunk_stack()->size() != 0) ||
   3.165 -          (chunk_overflow_stack()->length() != 0));
   3.166 +  } while((region_stack()->size() != 0) ||
   3.167 +          (region_overflow_stack()->length() != 0));
   3.168  #endif
   3.169  
   3.170 -#ifdef USE_ChunkTaskQueueWithOverflow
   3.171 -  assert(chunk_stack()->is_empty(), "Sanity");
   3.172 +#ifdef USE_RegionTaskQueueWithOverflow
   3.173 +  assert(region_stack()->is_empty(), "Sanity");
   3.174  #else
   3.175 -  assert(chunk_stack()->size() == 0, "Sanity");
   3.176 -  assert(chunk_overflow_stack()->length() == 0, "Sanity");
   3.177 +  assert(region_stack()->size() == 0, "Sanity");
   3.178 +  assert(region_overflow_stack()->length() == 0, "Sanity");
   3.179  #endif
   3.180  #else
   3.181    oop obj;
     4.1 --- a/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.hpp	Tue Sep 30 11:49:31 2008 -0700
     4.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.hpp	Tue Sep 30 12:20:22 2008 -0700
     4.3 @@ -52,7 +52,7 @@
     4.4    friend class ParallelTaskTerminator;
     4.5    friend class ParMarkBitMap;
     4.6    friend class PSParallelCompact;
     4.7 -  friend class StealChunkCompactionTask;
     4.8 +  friend class StealRegionCompactionTask;
     4.9    friend class UpdateAndFillClosure;
    4.10    friend class RefProcTaskExecutor;
    4.11  
    4.12 @@ -72,27 +72,27 @@
    4.13  // ------------------------  End don't putback if not needed
    4.14  
    4.15   private:
    4.16 -  static ParCompactionManager**  _manager_array;
    4.17 -  static OopTaskQueueSet*      _stack_array;
    4.18 -  static ObjectStartArray*     _start_array;
    4.19 -  static ChunkTaskQueueSet*    _chunk_array;
    4.20 -  static PSOldGen*             _old_gen;
    4.21 +  static ParCompactionManager** _manager_array;
    4.22 +  static OopTaskQueueSet*       _stack_array;
    4.23 +  static ObjectStartArray*      _start_array;
    4.24 +  static RegionTaskQueueSet*    _region_array;
    4.25 +  static PSOldGen*              _old_gen;
    4.26  
    4.27 -  OopTaskQueue                 _marking_stack;
    4.28 -  GrowableArray<oop>*          _overflow_stack;
    4.29 +  OopTaskQueue                  _marking_stack;
    4.30 +  GrowableArray<oop>*           _overflow_stack;
    4.31    // Is there a way to reuse the _marking_stack for the
    4.32 -  // saving empty chunks?  For now just create a different
    4.33 +  // saving empty regions?  For now just create a different
    4.34    // type of TaskQueue.
    4.35  
    4.36 -#ifdef USE_ChunkTaskQueueWithOverflow
    4.37 -  ChunkTaskQueueWithOverflow   _chunk_stack;
    4.38 +#ifdef USE_RegionTaskQueueWithOverflow
    4.39 +  RegionTaskQueueWithOverflow   _region_stack;
    4.40  #else
    4.41 -  ChunkTaskQueue               _chunk_stack;
    4.42 -  GrowableArray<size_t>*       _chunk_overflow_stack;
    4.43 +  RegionTaskQueue               _region_stack;
    4.44 +  GrowableArray<size_t>*        _region_overflow_stack;
    4.45  #endif
    4.46  
    4.47  #if 1  // does this happen enough to need a per thread stack?
    4.48 -  GrowableArray<Klass*>*       _revisit_klass_stack;
    4.49 +  GrowableArray<Klass*>*        _revisit_klass_stack;
    4.50  #endif
    4.51    static ParMarkBitMap* _mark_bitmap;
    4.52  
    4.53 @@ -100,21 +100,22 @@
    4.54  
    4.55    static PSOldGen* old_gen()             { return _old_gen; }
    4.56    static ObjectStartArray* start_array() { return _start_array; }
    4.57 -  static OopTaskQueueSet* stack_array()   { return _stack_array; }
    4.58 +  static OopTaskQueueSet* stack_array()  { return _stack_array; }
    4.59  
    4.60    static void initialize(ParMarkBitMap* mbm);
    4.61  
    4.62   protected:
    4.63    // Array of tasks.  Needed by the ParallelTaskTerminator.
    4.64 -  static ChunkTaskQueueSet* chunk_array()   { return _chunk_array; }
    4.65 -
    4.66 -  OopTaskQueue*  marking_stack()          { return &_marking_stack; }
    4.67 -  GrowableArray<oop>* overflow_stack()    { return _overflow_stack; }
    4.68 -#ifdef USE_ChunkTaskQueueWithOverflow
    4.69 -  ChunkTaskQueueWithOverflow* chunk_stack() { return &_chunk_stack; }
    4.70 +  static RegionTaskQueueSet* region_array()      { return _region_array; }
    4.71 +  OopTaskQueue*  marking_stack()                 { return &_marking_stack; }
    4.72 +  GrowableArray<oop>* overflow_stack()           { return _overflow_stack; }
    4.73 +#ifdef USE_RegionTaskQueueWithOverflow
    4.74 +  RegionTaskQueueWithOverflow* region_stack()    { return &_region_stack; }
    4.75  #else
    4.76 -  ChunkTaskQueue*  chunk_stack()          { return &_chunk_stack; }
    4.77 -  GrowableArray<size_t>* chunk_overflow_stack() { return _chunk_overflow_stack; }
    4.78 +  RegionTaskQueue*  region_stack()               { return &_region_stack; }
    4.79 +  GrowableArray<size_t>* region_overflow_stack() {
    4.80 +    return _region_overflow_stack;
    4.81 +  }
    4.82  #endif
    4.83  
    4.84    // Pushes onto the marking stack.  If the marking stack is full,
    4.85 @@ -123,9 +124,9 @@
    4.86    // Do not implement an equivalent stack_pop.  Deal with the
    4.87    // marking stack and overflow stack directly.
    4.88  
    4.89 -  // Pushes onto the chunk stack.  If the chunk stack is full,
    4.90 -  // pushes onto the chunk overflow stack.
    4.91 -  void chunk_stack_push(size_t chunk_index);
    4.92 +  // Pushes onto the region stack.  If the region stack is full,
    4.93 +  // pushes onto the region overflow stack.
    4.94 +  void region_stack_push(size_t region_index);
    4.95   public:
    4.96  
    4.97    Action action() { return _action; }
    4.98 @@ -160,10 +161,10 @@
    4.99    // Get a oop for scanning.  If returns null, no oop were found.
   4.100    oop retrieve_for_scanning();
   4.101  
   4.102 -  // Save chunk for later processing.  Must not fail.
   4.103 -  void save_for_processing(size_t chunk_index);
   4.104 -  // Get a chunk for processing.  If returns null, no chunk were found.
   4.105 -  bool retrieve_for_processing(size_t& chunk_index);
   4.106 +  // Save region for later processing.  Must not fail.
   4.107 +  void save_for_processing(size_t region_index);
   4.108 +  // Get a region for processing.  If returns null, no region were found.
   4.109 +  bool retrieve_for_processing(size_t& region_index);
   4.110  
   4.111    // Access function for compaction managers
   4.112    static ParCompactionManager* gc_thread_compaction_manager(int index);
   4.113 @@ -172,18 +173,18 @@
   4.114      return stack_array()->steal(queue_num, seed, t);
   4.115    }
   4.116  
   4.117 -  static bool steal(int queue_num, int* seed, ChunkTask& t) {
   4.118 -    return chunk_array()->steal(queue_num, seed, t);
   4.119 +  static bool steal(int queue_num, int* seed, RegionTask& t) {
   4.120 +    return region_array()->steal(queue_num, seed, t);
   4.121    }
   4.122  
   4.123    // Process tasks remaining on any stack
   4.124    void drain_marking_stacks(OopClosure *blk);
   4.125  
   4.126    // Process tasks remaining on any stack
   4.127 -  void drain_chunk_stacks();
   4.128 +  void drain_region_stacks();
   4.129  
   4.130    // Process tasks remaining on any stack
   4.131 -  void drain_chunk_overflow_stack();
   4.132 +  void drain_region_overflow_stack();
   4.133  
   4.134    // Debugging support
   4.135  #ifdef ASSERT
     5.1 --- a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Tue Sep 30 11:49:31 2008 -0700
     5.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Tue Sep 30 12:20:22 2008 -0700
     5.3 @@ -28,12 +28,13 @@
     5.4  #include <math.h>
     5.5  
     5.6  // All sizes are in HeapWords.
     5.7 -const size_t ParallelCompactData::Log2ChunkSize  = 9; // 512 words
     5.8 -const size_t ParallelCompactData::ChunkSize      = (size_t)1 << Log2ChunkSize;
     5.9 -const size_t ParallelCompactData::ChunkSizeBytes = ChunkSize << LogHeapWordSize;
    5.10 -const size_t ParallelCompactData::ChunkSizeOffsetMask = ChunkSize - 1;
    5.11 -const size_t ParallelCompactData::ChunkAddrOffsetMask = ChunkSizeBytes - 1;
    5.12 -const size_t ParallelCompactData::ChunkAddrMask  = ~ChunkAddrOffsetMask;
    5.13 +const size_t ParallelCompactData::Log2RegionSize  = 9; // 512 words
    5.14 +const size_t ParallelCompactData::RegionSize      = (size_t)1 << Log2RegionSize;
    5.15 +const size_t ParallelCompactData::RegionSizeBytes =
    5.16 +  RegionSize << LogHeapWordSize;
    5.17 +const size_t ParallelCompactData::RegionSizeOffsetMask = RegionSize - 1;
    5.18 +const size_t ParallelCompactData::RegionAddrOffsetMask = RegionSizeBytes - 1;
    5.19 +const size_t ParallelCompactData::RegionAddrMask  = ~RegionAddrOffsetMask;
    5.20  
    5.21  // 32-bit:  128 words covers 4 bitmap words
    5.22  // 64-bit:  128 words covers 2 bitmap words
    5.23 @@ -42,25 +43,25 @@
    5.24  const size_t ParallelCompactData::BlockOffsetMask = BlockSize - 1;
    5.25  const size_t ParallelCompactData::BlockMask       = ~BlockOffsetMask;
    5.26  
    5.27 -const size_t ParallelCompactData::BlocksPerChunk = ChunkSize / BlockSize;
    5.28 -
    5.29 -const ParallelCompactData::ChunkData::chunk_sz_t
    5.30 -ParallelCompactData::ChunkData::dc_shift = 27;
    5.31 -
    5.32 -const ParallelCompactData::ChunkData::chunk_sz_t
    5.33 -ParallelCompactData::ChunkData::dc_mask = ~0U << dc_shift;
    5.34 -
    5.35 -const ParallelCompactData::ChunkData::chunk_sz_t
    5.36 -ParallelCompactData::ChunkData::dc_one = 0x1U << dc_shift;
    5.37 -
    5.38 -const ParallelCompactData::ChunkData::chunk_sz_t
    5.39 -ParallelCompactData::ChunkData::los_mask = ~dc_mask;
    5.40 -
    5.41 -const ParallelCompactData::ChunkData::chunk_sz_t
    5.42 -ParallelCompactData::ChunkData::dc_claimed = 0x8U << dc_shift;
    5.43 -
    5.44 -const ParallelCompactData::ChunkData::chunk_sz_t
    5.45 -ParallelCompactData::ChunkData::dc_completed = 0xcU << dc_shift;
    5.46 +const size_t ParallelCompactData::BlocksPerRegion = RegionSize / BlockSize;
    5.47 +
    5.48 +const ParallelCompactData::RegionData::region_sz_t
    5.49 +ParallelCompactData::RegionData::dc_shift = 27;
    5.50 +
    5.51 +const ParallelCompactData::RegionData::region_sz_t
    5.52 +ParallelCompactData::RegionData::dc_mask = ~0U << dc_shift;
    5.53 +
    5.54 +const ParallelCompactData::RegionData::region_sz_t
    5.55 +ParallelCompactData::RegionData::dc_one = 0x1U << dc_shift;
    5.56 +
    5.57 +const ParallelCompactData::RegionData::region_sz_t
    5.58 +ParallelCompactData::RegionData::los_mask = ~dc_mask;
    5.59 +
    5.60 +const ParallelCompactData::RegionData::region_sz_t
    5.61 +ParallelCompactData::RegionData::dc_claimed = 0x8U << dc_shift;
    5.62 +
    5.63 +const ParallelCompactData::RegionData::region_sz_t
    5.64 +ParallelCompactData::RegionData::dc_completed = 0xcU << dc_shift;
    5.65  
    5.66  #ifdef ASSERT
    5.67  short   ParallelCompactData::BlockData::_cur_phase = 0;
    5.68 @@ -105,7 +106,7 @@
    5.69    "perm", "old ", "eden", "from", "to  "
    5.70  };
    5.71  
    5.72 -void PSParallelCompact::print_chunk_ranges()
    5.73 +void PSParallelCompact::print_region_ranges()
    5.74  {
    5.75    tty->print_cr("space  bottom     top        end        new_top");
    5.76    tty->print_cr("------ ---------- ---------- ---------- ----------");
    5.77 @@ -116,31 +117,31 @@
    5.78                    SIZE_FORMAT_W(10) " " SIZE_FORMAT_W(10) " "
    5.79                    SIZE_FORMAT_W(10) " " SIZE_FORMAT_W(10) " ",
    5.80                    id, space_names[id],
    5.81 -                  summary_data().addr_to_chunk_idx(space->bottom()),
    5.82 -                  summary_data().addr_to_chunk_idx(space->top()),
    5.83 -                  summary_data().addr_to_chunk_idx(space->end()),
    5.84 -                  summary_data().addr_to_chunk_idx(_space_info[id].new_top()));
    5.85 +                  summary_data().addr_to_region_idx(space->bottom()),
    5.86 +                  summary_data().addr_to_region_idx(space->top()),
    5.87 +                  summary_data().addr_to_region_idx(space->end()),
    5.88 +                  summary_data().addr_to_region_idx(_space_info[id].new_top()));
    5.89    }
    5.90  }
    5.91  
    5.92  void
    5.93 -print_generic_summary_chunk(size_t i, const ParallelCompactData::ChunkData* c)
    5.94 +print_generic_summary_region(size_t i, const ParallelCompactData::RegionData* c)
    5.95  {
    5.96 -#define CHUNK_IDX_FORMAT        SIZE_FORMAT_W(7)
    5.97 -#define CHUNK_DATA_FORMAT       SIZE_FORMAT_W(5)
    5.98 +#define REGION_IDX_FORMAT        SIZE_FORMAT_W(7)
    5.99 +#define REGION_DATA_FORMAT       SIZE_FORMAT_W(5)
   5.100  
   5.101    ParallelCompactData& sd = PSParallelCompact::summary_data();
   5.102 -  size_t dci = c->destination() ? sd.addr_to_chunk_idx(c->destination()) : 0;
   5.103 -  tty->print_cr(CHUNK_IDX_FORMAT " " PTR_FORMAT " "
   5.104 -                CHUNK_IDX_FORMAT " " PTR_FORMAT " "
   5.105 -                CHUNK_DATA_FORMAT " " CHUNK_DATA_FORMAT " "
   5.106 -                CHUNK_DATA_FORMAT " " CHUNK_IDX_FORMAT " %d",
   5.107 +  size_t dci = c->destination() ? sd.addr_to_region_idx(c->destination()) : 0;
   5.108 +  tty->print_cr(REGION_IDX_FORMAT " " PTR_FORMAT " "
   5.109 +                REGION_IDX_FORMAT " " PTR_FORMAT " "
   5.110 +                REGION_DATA_FORMAT " " REGION_DATA_FORMAT " "
   5.111 +                REGION_DATA_FORMAT " " REGION_IDX_FORMAT " %d",
   5.112                  i, c->data_location(), dci, c->destination(),
   5.113                  c->partial_obj_size(), c->live_obj_size(),
   5.114 -                c->data_size(), c->source_chunk(), c->destination_count());
   5.115 -
   5.116 -#undef  CHUNK_IDX_FORMAT
   5.117 -#undef  CHUNK_DATA_FORMAT
   5.118 +                c->data_size(), c->source_region(), c->destination_count());
   5.119 +
   5.120 +#undef  REGION_IDX_FORMAT
   5.121 +#undef  REGION_DATA_FORMAT
   5.122  }
   5.123  
   5.124  void
   5.125 @@ -149,14 +150,14 @@
   5.126                             HeapWord* const end_addr)
   5.127  {
   5.128    size_t total_words = 0;
   5.129 -  size_t i = summary_data.addr_to_chunk_idx(beg_addr);
   5.130 -  const size_t last = summary_data.addr_to_chunk_idx(end_addr);
   5.131 +  size_t i = summary_data.addr_to_region_idx(beg_addr);
   5.132 +  const size_t last = summary_data.addr_to_region_idx(end_addr);
   5.133    HeapWord* pdest = 0;
   5.134  
   5.135    while (i <= last) {
   5.136 -    ParallelCompactData::ChunkData* c = summary_data.chunk(i);
   5.137 +    ParallelCompactData::RegionData* c = summary_data.region(i);
   5.138      if (c->data_size() != 0 || c->destination() != pdest) {
   5.139 -      print_generic_summary_chunk(i, c);
   5.140 +      print_generic_summary_region(i, c);
   5.141        total_words += c->data_size();
   5.142        pdest = c->destination();
   5.143      }
   5.144 @@ -178,16 +179,16 @@
   5.145  }
   5.146  
   5.147  void
   5.148 -print_initial_summary_chunk(size_t i,
   5.149 -                            const ParallelCompactData::ChunkData* c,
   5.150 -                            bool newline = true)
   5.151 +print_initial_summary_region(size_t i,
   5.152 +                             const ParallelCompactData::RegionData* c,
   5.153 +                             bool newline = true)
   5.154  {
   5.155    tty->print(SIZE_FORMAT_W(5) " " PTR_FORMAT " "
   5.156               SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " "
   5.157               SIZE_FORMAT_W(5) " " SIZE_FORMAT_W(5) " %d",
   5.158               i, c->destination(),
   5.159               c->partial_obj_size(), c->live_obj_size(),
   5.160 -             c->data_size(), c->source_chunk(), c->destination_count());
   5.161 +             c->data_size(), c->source_region(), c->destination_count());
   5.162    if (newline) tty->cr();
   5.163  }
   5.164  
   5.165 @@ -198,47 +199,48 @@
   5.166      return;
   5.167    }
   5.168  
   5.169 -  const size_t chunk_size = ParallelCompactData::ChunkSize;
   5.170 -  HeapWord* const top_aligned_up = summary_data.chunk_align_up(space->top());
   5.171 -  const size_t end_chunk = summary_data.addr_to_chunk_idx(top_aligned_up);
   5.172 -  const ParallelCompactData::ChunkData* c = summary_data.chunk(end_chunk - 1);
   5.173 +  const size_t region_size = ParallelCompactData::RegionSize;
   5.174 +  typedef ParallelCompactData::RegionData RegionData;
   5.175 +  HeapWord* const top_aligned_up = summary_data.region_align_up(space->top());
   5.176 +  const size_t end_region = summary_data.addr_to_region_idx(top_aligned_up);
   5.177 +  const RegionData* c = summary_data.region(end_region - 1);
   5.178    HeapWord* end_addr = c->destination() + c->data_size();
   5.179    const size_t live_in_space = pointer_delta(end_addr, space->bottom());
   5.180  
   5.181 -  // Print (and count) the full chunks at the beginning of the space.
   5.182 -  size_t full_chunk_count = 0;
   5.183 -  size_t i = summary_data.addr_to_chunk_idx(space->bottom());
   5.184 -  while (i < end_chunk && summary_data.chunk(i)->data_size() == chunk_size) {
   5.185 -    print_initial_summary_chunk(i, summary_data.chunk(i));
   5.186 -    ++full_chunk_count;
   5.187 +  // Print (and count) the full regions at the beginning of the space.
   5.188 +  size_t full_region_count = 0;
   5.189 +  size_t i = summary_data.addr_to_region_idx(space->bottom());
   5.190 +  while (i < end_region && summary_data.region(i)->data_size() == region_size) {
   5.191 +    print_initial_summary_region(i, summary_data.region(i));
   5.192 +    ++full_region_count;
   5.193      ++i;
   5.194    }
   5.195  
   5.196 -  size_t live_to_right = live_in_space - full_chunk_count * chunk_size;
   5.197 +  size_t live_to_right = live_in_space - full_region_count * region_size;
   5.198  
   5.199    double max_reclaimed_ratio = 0.0;
   5.200 -  size_t max_reclaimed_ratio_chunk = 0;
   5.201 +  size_t max_reclaimed_ratio_region = 0;
   5.202    size_t max_dead_to_right = 0;
   5.203    size_t max_live_to_right = 0;
   5.204  
   5.205 -  // Print the 'reclaimed ratio' for chunks while there is something live in the
   5.206 -  // chunk or to the right of it.  The remaining chunks are empty (and
   5.207 +  // Print the 'reclaimed ratio' for regions while there is something live in
   5.208 +  // the region or to the right of it.  The remaining regions are empty (and
   5.209    // uninteresting), and computing the ratio will result in division by 0.
   5.210 -  while (i < end_chunk && live_to_right > 0) {
   5.211 -    c = summary_data.chunk(i);
   5.212 -    HeapWord* const chunk_addr = summary_data.chunk_to_addr(i);
   5.213 -    const size_t used_to_right = pointer_delta(space->top(), chunk_addr);
   5.214 +  while (i < end_region && live_to_right > 0) {
   5.215 +    c = summary_data.region(i);
   5.216 +    HeapWord* const region_addr = summary_data.region_to_addr(i);
   5.217 +    const size_t used_to_right = pointer_delta(space->top(), region_addr);
   5.218      const size_t dead_to_right = used_to_right - live_to_right;
   5.219      const double reclaimed_ratio = double(dead_to_right) / live_to_right;
   5.220  
   5.221      if (reclaimed_ratio > max_reclaimed_ratio) {
   5.222              max_reclaimed_ratio = reclaimed_ratio;
   5.223 -            max_reclaimed_ratio_chunk = i;
   5.224 +            max_reclaimed_ratio_region = i;
   5.225              max_dead_to_right = dead_to_right;
   5.226              max_live_to_right = live_to_right;
   5.227      }
   5.228  
   5.229 -    print_initial_summary_chunk(i, c, false);
   5.230 +    print_initial_summary_region(i, c, false);
   5.231      tty->print_cr(" %12.10f " SIZE_FORMAT_W(10) " " SIZE_FORMAT_W(10),
   5.232                    reclaimed_ratio, dead_to_right, live_to_right);
   5.233  
   5.234 @@ -246,14 +248,14 @@
   5.235      ++i;
   5.236    }
   5.237  
   5.238 -  // Any remaining chunks are empty.  Print one more if there is one.
   5.239 -  if (i < end_chunk) {
   5.240 -    print_initial_summary_chunk(i, summary_data.chunk(i));
   5.241 +  // Any remaining regions are empty.  Print one more if there is one.
   5.242 +  if (i < end_region) {
   5.243 +    print_initial_summary_region(i, summary_data.region(i));
   5.244    }
   5.245  
   5.246    tty->print_cr("max:  " SIZE_FORMAT_W(4) " d2r=" SIZE_FORMAT_W(10) " "
   5.247                  "l2r=" SIZE_FORMAT_W(10) " max_ratio=%14.12f",
   5.248 -                max_reclaimed_ratio_chunk, max_dead_to_right,
   5.249 +                max_reclaimed_ratio_region, max_dead_to_right,
   5.250                  max_live_to_right, max_reclaimed_ratio);
   5.251  }
   5.252  
   5.253 @@ -285,9 +287,9 @@
   5.254  {
   5.255    _region_start = 0;
   5.256  
   5.257 -  _chunk_vspace = 0;
   5.258 -  _chunk_data = 0;
   5.259 -  _chunk_count = 0;
   5.260 +  _region_vspace = 0;
   5.261 +  _region_data = 0;
   5.262 +  _region_count = 0;
   5.263  
   5.264    _block_vspace = 0;
   5.265    _block_data = 0;
   5.266 @@ -300,16 +302,16 @@
   5.267    const size_t region_size = covered_region.word_size();
   5.268    DEBUG_ONLY(_region_end = _region_start + region_size;)
   5.269  
   5.270 -  assert(chunk_align_down(_region_start) == _region_start,
   5.271 +  assert(region_align_down(_region_start) == _region_start,
   5.272           "region start not aligned");
   5.273 -  assert((region_size & ChunkSizeOffsetMask) == 0,
   5.274 -         "region size not a multiple of ChunkSize");
   5.275 -
   5.276 -  bool result = initialize_chunk_data(region_size);
   5.277 +  assert((region_size & RegionSizeOffsetMask) == 0,
   5.278 +         "region size not a multiple of RegionSize");
   5.279 +
   5.280 +  bool result = initialize_region_data(region_size);
   5.281  
   5.282    // Initialize the block data if it will be used for updating pointers, or if
   5.283    // this is a debug build.
   5.284 -  if (!UseParallelOldGCChunkPointerCalc || trueInDebug) {
   5.285 +  if (!UseParallelOldGCRegionPointerCalc || trueInDebug) {
   5.286      result = result && initialize_block_data(region_size);
   5.287    }
   5.288  
   5.289 @@ -342,13 +344,13 @@
   5.290    return 0;
   5.291  }
   5.292  
   5.293 -bool ParallelCompactData::initialize_chunk_data(size_t region_size)
   5.294 +bool ParallelCompactData::initialize_region_data(size_t region_size)
   5.295  {
   5.296 -  const size_t count = (region_size + ChunkSizeOffsetMask) >> Log2ChunkSize;
   5.297 -  _chunk_vspace = create_vspace(count, sizeof(ChunkData));
   5.298 -  if (_chunk_vspace != 0) {
   5.299 -    _chunk_data = (ChunkData*)_chunk_vspace->reserved_low_addr();
   5.300 -    _chunk_count = count;
   5.301 +  const size_t count = (region_size + RegionSizeOffsetMask) >> Log2RegionSize;
   5.302 +  _region_vspace = create_vspace(count, sizeof(RegionData));
   5.303 +  if (_region_vspace != 0) {
   5.304 +    _region_data = (RegionData*)_region_vspace->reserved_low_addr();
   5.305 +    _region_count = count;
   5.306      return true;
   5.307    }
   5.308    return false;
   5.309 @@ -371,35 +373,35 @@
   5.310    if (_block_data) {
   5.311      memset(_block_data, 0, _block_vspace->committed_size());
   5.312    }
   5.313 -  memset(_chunk_data, 0, _chunk_vspace->committed_size());
   5.314 +  memset(_region_data, 0, _region_vspace->committed_size());
   5.315  }
   5.316  
   5.317 -void ParallelCompactData::clear_range(size_t beg_chunk, size_t end_chunk) {
   5.318 -  assert(beg_chunk <= _chunk_count, "beg_chunk out of range");
   5.319 -  assert(end_chunk <= _chunk_count, "end_chunk out of range");
   5.320 -  assert(ChunkSize % BlockSize == 0, "ChunkSize not a multiple of BlockSize");
   5.321 -
   5.322 -  const size_t chunk_cnt = end_chunk - beg_chunk;
   5.323 +void ParallelCompactData::clear_range(size_t beg_region, size_t end_region) {
   5.324 +  assert(beg_region <= _region_count, "beg_region out of range");
   5.325 +  assert(end_region <= _region_count, "end_region out of range");
   5.326 +  assert(RegionSize % BlockSize == 0, "RegionSize not a multiple of BlockSize");
   5.327 +
   5.328 +  const size_t region_cnt = end_region - beg_region;
   5.329  
   5.330    if (_block_data) {
   5.331 -    const size_t blocks_per_chunk = ChunkSize / BlockSize;
   5.332 -    const size_t beg_block = beg_chunk * blocks_per_chunk;
   5.333 -    const size_t block_cnt = chunk_cnt * blocks_per_chunk;
   5.334 +    const size_t blocks_per_region = RegionSize / BlockSize;
   5.335 +    const size_t beg_block = beg_region * blocks_per_region;
   5.336 +    const size_t block_cnt = region_cnt * blocks_per_region;
   5.337      memset(_block_data + beg_block, 0, block_cnt * sizeof(BlockData));
   5.338    }
   5.339 -  memset(_chunk_data + beg_chunk, 0, chunk_cnt * sizeof(ChunkData));
   5.340 +  memset(_region_data + beg_region, 0, region_cnt * sizeof(RegionData));
   5.341  }
   5.342  
   5.343 -HeapWord* ParallelCompactData::partial_obj_end(size_t chunk_idx) const
   5.344 +HeapWord* ParallelCompactData::partial_obj_end(size_t region_idx) const
   5.345  {
   5.346 -  const ChunkData* cur_cp = chunk(chunk_idx);
   5.347 -  const ChunkData* const end_cp = chunk(chunk_count() - 1);
   5.348 -
   5.349 -  HeapWord* result = chunk_to_addr(chunk_idx);
   5.350 +  const RegionData* cur_cp = region(region_idx);
   5.351 +  const RegionData* const end_cp = region(region_count() - 1);
   5.352 +
   5.353 +  HeapWord* result = region_to_addr(region_idx);
   5.354    if (cur_cp < end_cp) {
   5.355      do {
   5.356        result += cur_cp->partial_obj_size();
   5.357 -    } while (cur_cp->partial_obj_size() == ChunkSize && ++cur_cp < end_cp);
   5.358 +    } while (cur_cp->partial_obj_size() == RegionSize && ++cur_cp < end_cp);
   5.359    }
   5.360    return result;
   5.361  }
   5.362 @@ -407,56 +409,56 @@
   5.363  void ParallelCompactData::add_obj(HeapWord* addr, size_t len)
   5.364  {
   5.365    const size_t obj_ofs = pointer_delta(addr, _region_start);
   5.366 -  const size_t beg_chunk = obj_ofs >> Log2ChunkSize;
   5.367 -  const size_t end_chunk = (obj_ofs + len - 1) >> Log2ChunkSize;
   5.368 +  const size_t beg_region = obj_ofs >> Log2RegionSize;
   5.369 +  const size_t end_region = (obj_ofs + len - 1) >> Log2RegionSize;
   5.370  
   5.371    DEBUG_ONLY(Atomic::inc_ptr(&add_obj_count);)
   5.372    DEBUG_ONLY(Atomic::add_ptr(len, &add_obj_size);)
   5.373  
   5.374 -  if (beg_chunk == end_chunk) {
   5.375 -    // All in one chunk.
   5.376 -    _chunk_data[beg_chunk].add_live_obj(len);
   5.377 +  if (beg_region == end_region) {
   5.378 +    // All in one region.
   5.379 +    _region_data[beg_region].add_live_obj(len);
   5.380      return;
   5.381    }
   5.382  
   5.383 -  // First chunk.
   5.384 -  const size_t beg_ofs = chunk_offset(addr);
   5.385 -  _chunk_data[beg_chunk].add_live_obj(ChunkSize - beg_ofs);
   5.386 +  // First region.
   5.387 +  const size_t beg_ofs = region_offset(addr);
   5.388 +  _region_data[beg_region].add_live_obj(RegionSize - beg_ofs);
   5.389  
   5.390    klassOop klass = ((oop)addr)->klass();
   5.391 -  // Middle chunks--completely spanned by this object.
   5.392 -  for (size_t chunk = beg_chunk + 1; chunk < end_chunk; ++chunk) {
   5.393 -    _chunk_data[chunk].set_partial_obj_size(ChunkSize);
   5.394 -    _chunk_data[chunk].set_partial_obj_addr(addr);
   5.395 +  // Middle regions--completely spanned by this object.
   5.396 +  for (size_t region = beg_region + 1; region < end_region; ++region) {
   5.397 +    _region_data[region].set_partial_obj_size(RegionSize);
   5.398 +    _region_data[region].set_partial_obj_addr(addr);
   5.399    }
   5.400  
   5.401 -  // Last chunk.
   5.402 -  const size_t end_ofs = chunk_offset(addr + len - 1);
   5.403 -  _chunk_data[end_chunk].set_partial_obj_size(end_ofs + 1);
   5.404 -  _chunk_data[end_chunk].set_partial_obj_addr(addr);
   5.405 +  // Last region.
   5.406 +  const size_t end_ofs = region_offset(addr + len - 1);
   5.407 +  _region_data[end_region].set_partial_obj_size(end_ofs + 1);
   5.408 +  _region_data[end_region].set_partial_obj_addr(addr);
   5.409  }
   5.410  
   5.411  void
   5.412  ParallelCompactData::summarize_dense_prefix(HeapWord* beg, HeapWord* end)
   5.413  {
   5.414 -  assert(chunk_offset(beg) == 0, "not ChunkSize aligned");
   5.415 -  assert(chunk_offset(end) == 0, "not ChunkSize aligned");
   5.416 -
   5.417 -  size_t cur_chunk = addr_to_chunk_idx(beg);
   5.418 -  const size_t end_chunk = addr_to_chunk_idx(end);
   5.419 +  assert(region_offset(beg) == 0, "not RegionSize aligned");
   5.420 +  assert(region_offset(end) == 0, "not RegionSize aligned");
   5.421 +
   5.422 +  size_t cur_region = addr_to_region_idx(beg);
   5.423 +  const size_t end_region = addr_to_region_idx(end);
   5.424    HeapWord* addr = beg;
   5.425 -  while (cur_chunk < end_chunk) {
   5.426 -    _chunk_data[cur_chunk].set_destination(addr);
   5.427 -    _chunk_data[cur_chunk].set_destination_count(0);
   5.428 -    _chunk_data[cur_chunk].set_source_chunk(cur_chunk);
   5.429 -    _chunk_data[cur_chunk].set_data_location(addr);
   5.430 -
   5.431 -    // Update live_obj_size so the chunk appears completely full.
   5.432 -    size_t live_size = ChunkSize - _chunk_data[cur_chunk].partial_obj_size();
   5.433 -    _chunk_data[cur_chunk].set_live_obj_size(live_size);
   5.434 -
   5.435 -    ++cur_chunk;
   5.436 -    addr += ChunkSize;
   5.437 +  while (cur_region < end_region) {
   5.438 +    _region_data[cur_region].set_destination(addr);
   5.439 +    _region_data[cur_region].set_destination_count(0);
   5.440 +    _region_data[cur_region].set_source_region(cur_region);
   5.441 +    _region_data[cur_region].set_data_location(addr);
   5.442 +
   5.443 +    // Update live_obj_size so the region appears completely full.
   5.444 +    size_t live_size = RegionSize - _region_data[cur_region].partial_obj_size();
   5.445 +    _region_data[cur_region].set_live_obj_size(live_size);
   5.446 +
   5.447 +    ++cur_region;
   5.448 +    addr += RegionSize;
   5.449    }
   5.450  }
   5.451  
   5.452 @@ -465,7 +467,7 @@
   5.453                                      HeapWord** target_next,
   5.454                                      HeapWord** source_next) {
   5.455    // This is too strict.
   5.456 -  // assert(chunk_offset(source_beg) == 0, "not ChunkSize aligned");
   5.457 +  // assert(region_offset(source_beg) == 0, "not RegionSize aligned");
   5.458  
   5.459    if (TraceParallelOldGCSummaryPhase) {
   5.460      tty->print_cr("tb=" PTR_FORMAT " te=" PTR_FORMAT " "
   5.461 @@ -477,85 +479,86 @@
   5.462                    source_next != 0 ? *source_next : (HeapWord*) 0);
   5.463    }
   5.464  
   5.465 -  size_t cur_chunk = addr_to_chunk_idx(source_beg);
   5.466 -  const size_t end_chunk = addr_to_chunk_idx(chunk_align_up(source_end));
   5.467 +  size_t cur_region = addr_to_region_idx(source_beg);
   5.468 +  const size_t end_region = addr_to_region_idx(region_align_up(source_end));
   5.469  
   5.470    HeapWord *dest_addr = target_beg;
   5.471 -  while (cur_chunk < end_chunk) {
   5.472 -    size_t words = _chunk_data[cur_chunk].data_size();
   5.473 +  while (cur_region < end_region) {
   5.474 +    size_t words = _region_data[cur_region].data_size();
   5.475  
   5.476  #if     1
   5.477      assert(pointer_delta(target_end, dest_addr) >= words,
   5.478             "source region does not fit into target region");
   5.479  #else
   5.480 -    // XXX - need some work on the corner cases here.  If the chunk does not
   5.481 -    // fit, then must either make sure any partial_obj from the chunk fits, or
   5.482 -    // 'undo' the initial part of the partial_obj that is in the previous chunk.
   5.483 +    // XXX - need some work on the corner cases here.  If the region does not
   5.484 +    // fit, then must either make sure any partial_obj from the region fits, or
   5.485 +    // "undo" the initial part of the partial_obj that is in the previous
   5.486 +    // region.
   5.487      if (dest_addr + words >= target_end) {
   5.488        // Let the caller know where to continue.
   5.489        *target_next = dest_addr;
   5.490 -      *source_next = chunk_to_addr(cur_chunk);
   5.491 +      *source_next = region_to_addr(cur_region);
   5.492        return false;
   5.493      }
   5.494  #endif  // #if 1
   5.495  
   5.496 -    _chunk_data[cur_chunk].set_destination(dest_addr);
   5.497 -
   5.498 -    // Set the destination_count for cur_chunk, and if necessary, update
   5.499 -    // source_chunk for a destination chunk.  The source_chunk field is updated
   5.500 -    // if cur_chunk is the first (left-most) chunk to be copied to a destination
   5.501 -    // chunk.
   5.502 +    _region_data[cur_region].set_destination(dest_addr);
   5.503 +
   5.504 +    // Set the destination_count for cur_region, and if necessary, update
   5.505 +    // source_region for a destination region.  The source_region field is
   5.506 +    // updated if cur_region is the first (left-most) region to be copied to a
   5.507 +    // destination region.
   5.508      //
   5.509 -    // The destination_count calculation is a bit subtle.  A chunk that has data
   5.510 -    // that compacts into itself does not count itself as a destination.  This
   5.511 -    // maintains the invariant that a zero count means the chunk is available
   5.512 -    // and can be claimed and then filled.
   5.513 +    // The destination_count calculation is a bit subtle.  A region that has
   5.514 +    // data that compacts into itself does not count itself as a destination.
   5.515 +    // This maintains the invariant that a zero count means the region is
   5.516 +    // available and can be claimed and then filled.
   5.517      if (words > 0) {
   5.518        HeapWord* const last_addr = dest_addr + words - 1;
   5.519 -      const size_t dest_chunk_1 = addr_to_chunk_idx(dest_addr);
   5.520 -      const size_t dest_chunk_2 = addr_to_chunk_idx(last_addr);
   5.521 +      const size_t dest_region_1 = addr_to_region_idx(dest_addr);
   5.522 +      const size_t dest_region_2 = addr_to_region_idx(last_addr);
   5.523  #if     0
   5.524 -      // Initially assume that the destination chunks will be the same and
   5.525 +      // Initially assume that the destination regions will be the same and
   5.526        // adjust the value below if necessary.  Under this assumption, if
   5.527 -      // cur_chunk == dest_chunk_2, then cur_chunk will be compacted completely
   5.528 -      // into itself.
   5.529 -      uint destination_count = cur_chunk == dest_chunk_2 ? 0 : 1;
   5.530 -      if (dest_chunk_1 != dest_chunk_2) {
   5.531 -        // Destination chunks differ; adjust destination_count.
   5.532 +      // cur_region == dest_region_2, then cur_region will be compacted
   5.533 +      // completely into itself.
   5.534 +      uint destination_count = cur_region == dest_region_2 ? 0 : 1;
   5.535 +      if (dest_region_1 != dest_region_2) {
   5.536 +        // Destination regions differ; adjust destination_count.
   5.537          destination_count += 1;
   5.538 -        // Data from cur_chunk will be copied to the start of dest_chunk_2.
   5.539 -        _chunk_data[dest_chunk_2].set_source_chunk(cur_chunk);
   5.540 -      } else if (chunk_offset(dest_addr) == 0) {
   5.541 -        // Data from cur_chunk will be copied to the start of the destination
   5.542 -        // chunk.
   5.543 -        _chunk_data[dest_chunk_1].set_source_chunk(cur_chunk);
   5.544 +        // Data from cur_region will be copied to the start of dest_region_2.
   5.545 +        _region_data[dest_region_2].set_source_region(cur_region);
   5.546 +      } else if (region_offset(dest_addr) == 0) {
   5.547 +        // Data from cur_region will be copied to the start of the destination
   5.548 +        // region.
   5.549 +        _region_data[dest_region_1].set_source_region(cur_region);
   5.550        }
   5.551  #else
   5.552 -      // Initially assume that the destination chunks will be different and
   5.553 +      // Initially assume that the destination regions will be different and
   5.554        // adjust the value below if necessary.  Under this assumption, if
   5.555 -      // cur_chunk == dest_chunk2, then cur_chunk will be compacted partially
   5.556 -      // into dest_chunk_1 and partially into itself.
   5.557 -      uint destination_count = cur_chunk == dest_chunk_2 ? 1 : 2;
   5.558 -      if (dest_chunk_1 != dest_chunk_2) {
   5.559 -        // Data from cur_chunk will be copied to the start of dest_chunk_2.
   5.560 -        _chunk_data[dest_chunk_2].set_source_chunk(cur_chunk);
   5.561 +      // cur_region == dest_region2, then cur_region will be compacted partially
   5.562 +      // into dest_region_1 and partially into itself.
   5.563 +      uint destination_count = cur_region == dest_region_2 ? 1 : 2;
   5.564 +      if (dest_region_1 != dest_region_2) {
   5.565 +        // Data from cur_region will be copied to the start of dest_region_2.
   5.566 +        _region_data[dest_region_2].set_source_region(cur_region);
   5.567        } else {
   5.568 -        // Destination chunks are the same; adjust destination_count.
   5.569 +        // Destination regions are the same; adjust destination_count.
   5.570          destination_count -= 1;
   5.571 -        if (chunk_offset(dest_addr) == 0) {
   5.572 -          // Data from cur_chunk will be copied to the start of the destination
   5.573 -          // chunk.
   5.574 -          _chunk_data[dest_chunk_1].set_source_chunk(cur_chunk);
   5.575 +        if (region_offset(dest_addr) == 0) {
   5.576 +          // Data from cur_region will be copied to the start of the destination
   5.577 +          // region.
   5.578 +          _region_data[dest_region_1].set_source_region(cur_region);
   5.579          }
   5.580        }
   5.581  #endif  // #if 0
   5.582  
   5.583 -      _chunk_data[cur_chunk].set_destination_count(destination_count);
   5.584 -      _chunk_data[cur_chunk].set_data_location(chunk_to_addr(cur_chunk));
   5.585 +      _region_data[cur_region].set_destination_count(destination_count);
   5.586 +      _region_data[cur_region].set_data_location(region_to_addr(cur_region));
   5.587        dest_addr += words;
   5.588      }
   5.589  
   5.590 -    ++cur_chunk;
   5.591 +    ++cur_region;
   5.592    }
   5.593  
   5.594    *target_next = dest_addr;
   5.595 @@ -565,8 +568,8 @@
   5.596  bool ParallelCompactData::partial_obj_ends_in_block(size_t block_index) {
   5.597    HeapWord* block_addr = block_to_addr(block_index);
   5.598    HeapWord* block_end_addr = block_addr + BlockSize;
   5.599 -  size_t chunk_index = addr_to_chunk_idx(block_addr);
   5.600 -  HeapWord* partial_obj_end_addr = partial_obj_end(chunk_index);
   5.601 +  size_t region_index = addr_to_region_idx(block_addr);
   5.602 +  HeapWord* partial_obj_end_addr = partial_obj_end(region_index);
   5.603  
   5.604    // An object that ends at the end of the block, ends
   5.605    // in the block (the last word of the object is to
   5.606 @@ -581,8 +584,8 @@
   5.607  
   5.608  HeapWord* ParallelCompactData::calc_new_pointer(HeapWord* addr) {
   5.609    HeapWord* result = NULL;
   5.610 -  if (UseParallelOldGCChunkPointerCalc) {
   5.611 -    result = chunk_calc_new_pointer(addr);
   5.612 +  if (UseParallelOldGCRegionPointerCalc) {
   5.613 +    result = region_calc_new_pointer(addr);
   5.614    } else {
   5.615      result = block_calc_new_pointer(addr);
   5.616    }
   5.617 @@ -595,7 +598,7 @@
   5.618  // the object is dead.  But don't wast the cycles to explicitly check
   5.619  // that it is dead since only live objects should be passed in.
   5.620  
   5.621 -HeapWord* ParallelCompactData::chunk_calc_new_pointer(HeapWord* addr) {
   5.622 +HeapWord* ParallelCompactData::region_calc_new_pointer(HeapWord* addr) {
   5.623    assert(addr != NULL, "Should detect NULL oop earlier");
   5.624    assert(PSParallelCompact::gc_heap()->is_in(addr), "addr not in heap");
   5.625  #ifdef ASSERT
   5.626 @@ -605,30 +608,30 @@
   5.627  #endif
   5.628    assert(PSParallelCompact::mark_bitmap()->is_marked(addr), "obj not marked");
   5.629  
   5.630 -  // Chunk covering the object.
   5.631 -  size_t chunk_index = addr_to_chunk_idx(addr);
   5.632 -  const ChunkData* const chunk_ptr = chunk(chunk_index);
   5.633 -  HeapWord* const chunk_addr = chunk_align_down(addr);
   5.634 -
   5.635 -  assert(addr < chunk_addr + ChunkSize, "Chunk does not cover object");
   5.636 -  assert(addr_to_chunk_ptr(chunk_addr) == chunk_ptr, "sanity check");
   5.637 -
   5.638 -  HeapWord* result = chunk_ptr->destination();
   5.639 -
   5.640 -  // If all the data in the chunk is live, then the new location of the object
   5.641 -  // can be calculated from the destination of the chunk plus the offset of the
   5.642 -  // object in the chunk.
   5.643 -  if (chunk_ptr->data_size() == ChunkSize) {
   5.644 -    result += pointer_delta(addr, chunk_addr);
   5.645 +  // Region covering the object.
   5.646 +  size_t region_index = addr_to_region_idx(addr);
   5.647 +  const RegionData* const region_ptr = region(region_index);
   5.648 +  HeapWord* const region_addr = region_align_down(addr);
   5.649 +
   5.650 +  assert(addr < region_addr + RegionSize, "Region does not cover object");
   5.651 +  assert(addr_to_region_ptr(region_addr) == region_ptr, "sanity check");
   5.652 +
   5.653 +  HeapWord* result = region_ptr->destination();
   5.654 +
   5.655 +  // If all the data in the region is live, then the new location of the object
   5.656 +  // can be calculated from the destination of the region plus the offset of the
   5.657 +  // object in the region.
   5.658 +  if (region_ptr->data_size() == RegionSize) {
   5.659 +    result += pointer_delta(addr, region_addr);
   5.660      return result;
   5.661    }
   5.662  
   5.663    // The new location of the object is
   5.664 -  //    chunk destination +
   5.665 -  //    size of the partial object extending onto the chunk +
   5.666 -  //    sizes of the live objects in the Chunk that are to the left of addr
   5.667 -  const size_t partial_obj_size = chunk_ptr->partial_obj_size();
   5.668 -  HeapWord* const search_start = chunk_addr + partial_obj_size;
   5.669 +  //    region destination +
   5.670 +  //    size of the partial object extending onto the region +
   5.671 +  //    sizes of the live objects in the Region that are to the left of addr
   5.672 +  const size_t partial_obj_size = region_ptr->partial_obj_size();
   5.673 +  HeapWord* const search_start = region_addr + partial_obj_size;
   5.674  
   5.675    const ParMarkBitMap* bitmap = PSParallelCompact::mark_bitmap();
   5.676    size_t live_to_left = bitmap->live_words_in_range(search_start, oop(addr));
   5.677 @@ -648,37 +651,37 @@
   5.678  #endif
   5.679    assert(PSParallelCompact::mark_bitmap()->is_marked(addr), "obj not marked");
   5.680  
   5.681 -  // Chunk covering the object.
   5.682 -  size_t chunk_index = addr_to_chunk_idx(addr);
   5.683 -  const ChunkData* const chunk_ptr = chunk(chunk_index);
   5.684 -  HeapWord* const chunk_addr = chunk_align_down(addr);
   5.685 -
   5.686 -  assert(addr < chunk_addr + ChunkSize, "Chunk does not cover object");
   5.687 -  assert(addr_to_chunk_ptr(chunk_addr) == chunk_ptr, "sanity check");
   5.688 -
   5.689 -  HeapWord* result = chunk_ptr->destination();
   5.690 -
   5.691 -  // If all the data in the chunk is live, then the new location of the object
   5.692 -  // can be calculated from the destination of the chunk plus the offset of the
   5.693 -  // object in the chunk.
   5.694 -  if (chunk_ptr->data_size() == ChunkSize) {
   5.695 -    result += pointer_delta(addr, chunk_addr);
   5.696 +  // Region covering the object.
   5.697 +  size_t region_index = addr_to_region_idx(addr);
   5.698 +  const RegionData* const region_ptr = region(region_index);
   5.699 +  HeapWord* const region_addr = region_align_down(addr);
   5.700 +
   5.701 +  assert(addr < region_addr + RegionSize, "Region does not cover object");
   5.702 +  assert(addr_to_region_ptr(region_addr) == region_ptr, "sanity check");
   5.703 +
   5.704 +  HeapWord* result = region_ptr->destination();
   5.705 +
   5.706 +  // If all the data in the region is live, then the new location of the object
   5.707 +  // can be calculated from the destination of the region plus the offset of the
   5.708 +  // object in the region.
   5.709 +  if (region_ptr->data_size() == RegionSize) {
   5.710 +    result += pointer_delta(addr, region_addr);
   5.711      return result;
   5.712    }
   5.713  
   5.714    // The new location of the object is
   5.715 -  //    chunk destination +
   5.716 +  //    region destination +
   5.717    //    block offset +
   5.718    //    sizes of the live objects in the Block that are to the left of addr
   5.719    const size_t block_offset = addr_to_block_ptr(addr)->offset();
   5.720 -  HeapWord* const search_start = chunk_addr + block_offset;
   5.721 +  HeapWord* const search_start = region_addr + block_offset;
   5.722  
   5.723    const ParMarkBitMap* bitmap = PSParallelCompact::mark_bitmap();
   5.724    size_t live_to_left = bitmap->live_words_in_range(search_start, oop(addr));
   5.725  
   5.726    result += block_offset + live_to_left;
   5.727    assert(result <= addr, "object cannot move to the right");
   5.728 -  assert(result == chunk_calc_new_pointer(addr), "Should match");
   5.729 +  assert(result == region_calc_new_pointer(addr), "Should match");
   5.730    return result;
   5.731  }
   5.732  
   5.733 @@ -705,15 +708,15 @@
   5.734  
   5.735  void ParallelCompactData::verify_clear()
   5.736  {
   5.737 -  verify_clear(_chunk_vspace);
   5.738 +  verify_clear(_region_vspace);
   5.739    verify_clear(_block_vspace);
   5.740  }
   5.741  #endif  // #ifdef ASSERT
   5.742  
   5.743  #ifdef NOT_PRODUCT
   5.744 -ParallelCompactData::ChunkData* debug_chunk(size_t chunk_index) {
   5.745 +ParallelCompactData::RegionData* debug_region(size_t region_index) {
   5.746    ParallelCompactData& sd = PSParallelCompact::summary_data();
   5.747 -  return sd.chunk(chunk_index);
   5.748 +  return sd.region(region_index);
   5.749  }
   5.750  #endif
   5.751  
   5.752 @@ -866,10 +869,10 @@
   5.753    const idx_t end_bit = BitMap::word_align_up(_mark_bitmap.addr_to_bit(top));
   5.754    _mark_bitmap.clear_range(beg_bit, end_bit);
   5.755  
   5.756 -  const size_t beg_chunk = _summary_data.addr_to_chunk_idx(bot);
   5.757 -  const size_t end_chunk =
   5.758 -    _summary_data.addr_to_chunk_idx(_summary_data.chunk_align_up(max_top));
   5.759 -  _summary_data.clear_range(beg_chunk, end_chunk);
   5.760 +  const size_t beg_region = _summary_data.addr_to_region_idx(bot);
   5.761 +  const size_t end_region =
   5.762 +    _summary_data.addr_to_region_idx(_summary_data.region_align_up(max_top));
   5.763 +  _summary_data.clear_range(beg_region, end_region);
   5.764  }
   5.765  
   5.766  void PSParallelCompact::pre_compact(PreGCValues* pre_gc_values)
   5.767 @@ -985,19 +988,19 @@
   5.768  PSParallelCompact::compute_dense_prefix_via_density(const SpaceId id,
   5.769                                                      bool maximum_compaction)
   5.770  {
   5.771 -  const size_t chunk_size = ParallelCompactData::ChunkSize;
   5.772 +  const size_t region_size = ParallelCompactData::RegionSize;
   5.773    const ParallelCompactData& sd = summary_data();
   5.774  
   5.775    const MutableSpace* const space = _space_info[id].space();
   5.776 -  HeapWord* const top_aligned_up = sd.chunk_align_up(space->top());
   5.777 -  const ChunkData* const beg_cp = sd.addr_to_chunk_ptr(space->bottom());
   5.778 -  const ChunkData* const end_cp = sd.addr_to_chunk_ptr(top_aligned_up);
   5.779 -
   5.780 -  // Skip full chunks at the beginning of the space--they are necessarily part
   5.781 +  HeapWord* const top_aligned_up = sd.region_align_up(space->top());
   5.782 +  const RegionData* const beg_cp = sd.addr_to_region_ptr(space->bottom());
   5.783 +  const RegionData* const end_cp = sd.addr_to_region_ptr(top_aligned_up);
   5.784 +
   5.785 +  // Skip full regions at the beginning of the space--they are necessarily part
   5.786    // of the dense prefix.
   5.787    size_t full_count = 0;
   5.788 -  const ChunkData* cp;
   5.789 -  for (cp = beg_cp; cp < end_cp && cp->data_size() == chunk_size; ++cp) {
   5.790 +  const RegionData* cp;
   5.791 +  for (cp = beg_cp; cp < end_cp && cp->data_size() == region_size; ++cp) {
   5.792      ++full_count;
   5.793    }
   5.794  
   5.795 @@ -1006,7 +1009,7 @@
   5.796    const bool interval_ended = gcs_since_max > HeapMaximumCompactionInterval;
   5.797    if (maximum_compaction || cp == end_cp || interval_ended) {
   5.798      _maximum_compaction_gc_num = total_invocations();
   5.799 -    return sd.chunk_to_addr(cp);
   5.800 +    return sd.region_to_addr(cp);
   5.801    }
   5.802  
   5.803    HeapWord* const new_top = _space_info[id].new_top();
   5.804 @@ -1029,52 +1032,53 @@
   5.805    }
   5.806  
   5.807    // XXX - Use binary search?
   5.808 -  HeapWord* dense_prefix = sd.chunk_to_addr(cp);
   5.809 -  const ChunkData* full_cp = cp;
   5.810 -  const ChunkData* const top_cp = sd.addr_to_chunk_ptr(space->top() - 1);
   5.811 +  HeapWord* dense_prefix = sd.region_to_addr(cp);
   5.812 +  const RegionData* full_cp = cp;
   5.813 +  const RegionData* const top_cp = sd.addr_to_region_ptr(space->top() - 1);
   5.814    while (cp < end_cp) {
   5.815 -    HeapWord* chunk_destination = cp->destination();
   5.816 -    const size_t cur_deadwood = pointer_delta(dense_prefix, chunk_destination);
   5.817 +    HeapWord* region_destination = cp->destination();
   5.818 +    const size_t cur_deadwood = pointer_delta(dense_prefix, region_destination);
   5.819      if (TraceParallelOldGCDensePrefix && Verbose) {
   5.820        tty->print_cr("c#=" SIZE_FORMAT_W(4) " dst=" PTR_FORMAT " "
   5.821                      "dp=" SIZE_FORMAT_W(8) " " "cdw=" SIZE_FORMAT_W(8),
   5.822 -                    sd.chunk(cp), chunk_destination,
   5.823 +                    sd.region(cp), region_destination,
   5.824                      dense_prefix, cur_deadwood);
   5.825      }
   5.826  
   5.827      if (cur_deadwood >= deadwood_goal) {
   5.828 -      // Found the chunk that has the correct amount of deadwood to the left.
   5.829 -      // This typically occurs after crossing a fairly sparse set of chunks, so
   5.830 -      // iterate backwards over those sparse chunks, looking for the chunk that
   5.831 -      // has the lowest density of live objects 'to the right.'
   5.832 -      size_t space_to_left = sd.chunk(cp) * chunk_size;
   5.833 +      // Found the region that has the correct amount of deadwood to the left.
   5.834 +      // This typically occurs after crossing a fairly sparse set of regions, so
   5.835 +      // iterate backwards over those sparse regions, looking for the region
   5.836 +      // that has the lowest density of live objects 'to the right.'
   5.837 +      size_t space_to_left = sd.region(cp) * region_size;
   5.838        size_t live_to_left = space_to_left - cur_deadwood;
   5.839        size_t space_to_right = space_capacity - space_to_left;
   5.840        size_t live_to_right = space_live - live_to_left;
   5.841        double density_to_right = double(live_to_right) / space_to_right;
   5.842        while (cp > full_cp) {
   5.843          --cp;
   5.844 -        const size_t prev_chunk_live_to_right = live_to_right - cp->data_size();
   5.845 -        const size_t prev_chunk_space_to_right = space_to_right + chunk_size;
   5.846 -        double prev_chunk_density_to_right =
   5.847 -          double(prev_chunk_live_to_right) / prev_chunk_space_to_right;
   5.848 -        if (density_to_right <= prev_chunk_density_to_right) {
   5.849 +        const size_t prev_region_live_to_right = live_to_right -
   5.850 +          cp->data_size();
   5.851 +        const size_t prev_region_space_to_right = space_to_right + region_size;
   5.852 +        double prev_region_density_to_right =
   5.853 +          double(prev_region_live_to_right) / prev_region_space_to_right;
   5.854 +        if (density_to_right <= prev_region_density_to_right) {
   5.855            return dense_prefix;
   5.856          }
   5.857          if (TraceParallelOldGCDensePrefix && Verbose) {
   5.858            tty->print_cr("backing up from c=" SIZE_FORMAT_W(4) " d2r=%10.8f "
   5.859 -                        "pc_d2r=%10.8f", sd.chunk(cp), density_to_right,
   5.860 -                        prev_chunk_density_to_right);
   5.861 +                        "pc_d2r=%10.8f", sd.region(cp), density_to_right,
   5.862 +                        prev_region_density_to_right);
   5.863          }
   5.864 -        dense_prefix -= chunk_size;
   5.865 -        live_to_right = prev_chunk_live_to_right;
   5.866 -        space_to_right = prev_chunk_space_to_right;
   5.867 -        density_to_right = prev_chunk_density_to_right;
   5.868 +        dense_prefix -= region_size;
   5.869 +        live_to_right = prev_region_live_to_right;
   5.870 +        space_to_right = prev_region_space_to_right;
   5.871 +        density_to_right = prev_region_density_to_right;
   5.872        }
   5.873        return dense_prefix;
   5.874      }
   5.875  
   5.876 -    dense_prefix += chunk_size;
   5.877 +    dense_prefix += region_size;
   5.878      ++cp;
   5.879    }
   5.880  
   5.881 @@ -1087,8 +1091,8 @@
   5.882                                                   const bool maximum_compaction,
   5.883                                                   HeapWord* const addr)
   5.884  {
   5.885 -  const size_t chunk_idx = summary_data().addr_to_chunk_idx(addr);
   5.886 -  ChunkData* const cp = summary_data().chunk(chunk_idx);
   5.887 +  const size_t region_idx = summary_data().addr_to_region_idx(addr);
   5.888 +  RegionData* const cp = summary_data().region(region_idx);
   5.889    const MutableSpace* const space = _space_info[id].space();
   5.890    HeapWord* const new_top = _space_info[id].new_top();
   5.891  
   5.892 @@ -1104,7 +1108,7 @@
   5.893                  "d2l=" SIZE_FORMAT " d2l%%=%6.4f "
   5.894                  "d2r=" SIZE_FORMAT " l2r=" SIZE_FORMAT
   5.895                  " ratio=%10.8f",
   5.896 -                algorithm, addr, chunk_idx,
   5.897 +                algorithm, addr, region_idx,
   5.898                  space_live,
   5.899                  dead_to_left, dead_to_left_pct,
   5.900                  dead_to_right, live_to_right,
   5.901 @@ -1166,52 +1170,52 @@
   5.902    return MAX2(limit, 0.0);
   5.903  }
   5.904  
   5.905 -ParallelCompactData::ChunkData*
   5.906 -PSParallelCompact::first_dead_space_chunk(const ChunkData* beg,
   5.907 -                                          const ChunkData* end)
   5.908 +ParallelCompactData::RegionData*
   5.909 +PSParallelCompact::first_dead_space_region(const RegionData* beg,
   5.910 +                                           const RegionData* end)
   5.911  {
   5.912 -  const size_t chunk_size = ParallelCompactData::ChunkSize;
   5.913 +  const size_t region_size = ParallelCompactData::RegionSize;
   5.914    ParallelCompactData& sd = summary_data();
   5.915 -  size_t left = sd.chunk(beg);
   5.916 -  size_t right = end > beg ? sd.chunk(end) - 1 : left;
   5.917 +  size_t left = sd.region(beg);
   5.918 +  size_t right = end > beg ? sd.region(end) - 1 : left;
   5.919  
   5.920    // Binary search.
   5.921    while (left < right) {
   5.922      // Equivalent to (left + right) / 2, but does not overflow.
   5.923      const size_t middle = left + (right - left) / 2;
   5.924 -    ChunkData* const middle_ptr = sd.chunk(middle);
   5.925 +    RegionData* const middle_ptr = sd.region(middle);
   5.926      HeapWord* const dest = middle_ptr->destination();
   5.927 -    HeapWord* const addr = sd.chunk_to_addr(middle);
   5.928 +    HeapWord* const addr = sd.region_to_addr(middle);
   5.929      assert(dest != NULL, "sanity");
   5.930      assert(dest <= addr, "must move left");
   5.931  
   5.932      if (middle > left && dest < addr) {
   5.933        right = middle - 1;
   5.934 -    } else if (middle < right && middle_ptr->data_size() == chunk_size) {
   5.935 +    } else if (middle < right && middle_ptr->data_size() == region_size) {
   5.936        left = middle + 1;
   5.937      } else {
   5.938        return middle_ptr;
   5.939      }
   5.940    }
   5.941 -  return sd.chunk(left);
   5.942 +  return sd.region(left);
   5.943  }
   5.944  
   5.945 -ParallelCompactData::ChunkData*
   5.946 -PSParallelCompact::dead_wood_limit_chunk(const ChunkData* beg,
   5.947 -                                         const ChunkData* end,
   5.948 -                                         size_t dead_words)
   5.949 +ParallelCompactData::RegionData*
   5.950 +PSParallelCompact::dead_wood_limit_region(const RegionData* beg,
   5.951 +                                          const RegionData* end,
   5.952 +                                          size_t dead_words)
   5.953  {
   5.954    ParallelCompactData& sd = summary_data();
   5.955 -  size_t left = sd.chunk(beg);
   5.956 -  size_t right = end > beg ? sd.chunk(end) - 1 : left;
   5.957 +  size_t left = sd.region(beg);
   5.958 +  size_t right = end > beg ? sd.region(end) - 1 : left;
   5.959  
   5.960    // Binary search.
   5.961    while (left < right) {
   5.962      // Equivalent to (left + right) / 2, but does not overflow.
   5.963      const size_t middle = left + (right - left) / 2;
   5.964 -    ChunkData* const middle_ptr = sd.chunk(middle);
   5.965 +    RegionData* const middle_ptr = sd.region(middle);
   5.966      HeapWord* const dest = middle_ptr->destination();
   5.967 -    HeapWord* const addr = sd.chunk_to_addr(middle);
   5.968 +    HeapWord* const addr = sd.region_to_addr(middle);
   5.969      assert(dest != NULL, "sanity");
   5.970      assert(dest <= addr, "must move left");
   5.971  
   5.972 @@ -1224,13 +1228,13 @@
   5.973        return middle_ptr;
   5.974      }
   5.975    }
   5.976 -  return sd.chunk(left);
   5.977 +  return sd.region(left);
   5.978  }
   5.979  
   5.980  // The result is valid during the summary phase, after the initial summarization
   5.981  // of each space into itself, and before final summarization.
   5.982  inline double
   5.983 -PSParallelCompact::reclaimed_ratio(const ChunkData* const cp,
   5.984 +PSParallelCompact::reclaimed_ratio(const RegionData* const cp,
   5.985                                     HeapWord* const bottom,
   5.986                                     HeapWord* const top,
   5.987                                     HeapWord* const new_top)
   5.988 @@ -1244,12 +1248,13 @@
   5.989    assert(top >= new_top, "summary data problem?");
   5.990    assert(new_top > bottom, "space is empty; should not be here");
   5.991    assert(new_top >= cp->destination(), "sanity");
   5.992 -  assert(top >= sd.chunk_to_addr(cp), "sanity");
   5.993 +  assert(top >= sd.region_to_addr(cp), "sanity");
   5.994  
   5.995    HeapWord* const destination = cp->destination();
   5.996    const size_t dense_prefix_live  = pointer_delta(destination, bottom);
   5.997    const size_t compacted_region_live = pointer_delta(new_top, destination);
   5.998 -  const size_t compacted_region_used = pointer_delta(top, sd.chunk_to_addr(cp));
   5.999 +  const size_t compacted_region_used = pointer_delta(top,
  5.1000 +                                                     sd.region_to_addr(cp));
  5.1001    const size_t reclaimable = compacted_region_used - compacted_region_live;
  5.1002  
  5.1003    const double divisor = dense_prefix_live + 1.25 * compacted_region_live;
  5.1004 @@ -1257,39 +1262,40 @@
  5.1005  }
  5.1006  
  5.1007  // Return the address of the end of the dense prefix, a.k.a. the start of the
  5.1008 -// compacted region.  The address is always on a chunk boundary.
  5.1009 +// compacted region.  The address is always on a region boundary.
  5.1010  //
  5.1011 -// Completely full chunks at the left are skipped, since no compaction can occur
  5.1012 -// in those chunks.  Then the maximum amount of dead wood to allow is computed,
  5.1013 -// based on the density (amount live / capacity) of the generation; the chunk
  5.1014 -// with approximately that amount of dead space to the left is identified as the
  5.1015 -// limit chunk.  Chunks between the last completely full chunk and the limit
  5.1016 -// chunk are scanned and the one that has the best (maximum) reclaimed_ratio()
  5.1017 -// is selected.
  5.1018 +// Completely full regions at the left are skipped, since no compaction can
  5.1019 +// occur in those regions.  Then the maximum amount of dead wood to allow is
  5.1020 +// computed, based on the density (amount live / capacity) of the generation;
  5.1021 +// the region with approximately that amount of dead space to the left is
  5.1022 +// identified as the limit region.  Regions between the last completely full
  5.1023 +// region and the limit region are scanned and the one that has the best
  5.1024 +// (maximum) reclaimed_ratio() is selected.
  5.1025  HeapWord*
  5.1026  PSParallelCompact::compute_dense_prefix(const SpaceId id,
  5.1027                                          bool maximum_compaction)
  5.1028  {
  5.1029 -  const size_t chunk_size = ParallelCompactData::ChunkSize;
  5.1030 +  const size_t region_size = ParallelCompactData::RegionSize;
  5.1031    const ParallelCompactData& sd = summary_data();
  5.1032  
  5.1033    const MutableSpace* const space = _space_info[id].space();
  5.1034    HeapWord* const top = space->top();
  5.1035 -  HeapWord* const top_aligned_up = sd.chunk_align_up(top);
  5.1036 +  HeapWord* const top_aligned_up = sd.region_align_up(top);
  5.1037    HeapWord* const new_top = _space_info[id].new_top();
  5.1038 -  HeapWord* const new_top_aligned_up = sd.chunk_align_up(new_top);
  5.1039 +  HeapWord* const new_top_aligned_up = sd.region_align_up(new_top);
  5.1040    HeapWord* const bottom = space->bottom();
  5.1041 -  const ChunkData* const beg_cp = sd.addr_to_chunk_ptr(bottom);
  5.1042 -  const ChunkData* const top_cp = sd.addr_to_chunk_ptr(top_aligned_up);
  5.1043 -  const ChunkData* const new_top_cp = sd.addr_to_chunk_ptr(new_top_aligned_up);
  5.1044 -
  5.1045 -  // Skip full chunks at the beginning of the space--they are necessarily part
  5.1046 +  const RegionData* const beg_cp = sd.addr_to_region_ptr(bottom);
  5.1047 +  const RegionData* const top_cp = sd.addr_to_region_ptr(top_aligned_up);
  5.1048 +  const RegionData* const new_top_cp =
  5.1049 +    sd.addr_to_region_ptr(new_top_aligned_up);
  5.1050 +
  5.1051 +  // Skip full regions at the beginning of the space--they are necessarily part
  5.1052    // of the dense prefix.
  5.1053 -  const ChunkData* const full_cp = first_dead_space_chunk(beg_cp, new_top_cp);
  5.1054 -  assert(full_cp->destination() == sd.chunk_to_addr(full_cp) ||
  5.1055 +  const RegionData* const full_cp = first_dead_space_region(beg_cp, new_top_cp);
  5.1056 +  assert(full_cp->destination() == sd.region_to_addr(full_cp) ||
  5.1057           space->is_empty(), "no dead space allowed to the left");
  5.1058 -  assert(full_cp->data_size() < chunk_size || full_cp == new_top_cp - 1,
  5.1059 -         "chunk must have dead space");
  5.1060 +  assert(full_cp->data_size() < region_size || full_cp == new_top_cp - 1,
  5.1061 +         "region must have dead space");
  5.1062  
  5.1063    // The gc number is saved whenever a maximum compaction is done, and used to
  5.1064    // determine when the maximum compaction interval has expired.  This avoids
  5.1065 @@ -1300,7 +1306,7 @@
  5.1066      total_invocations() == HeapFirstMaximumCompactionCount;
  5.1067    if (maximum_compaction || full_cp == top_cp || interval_ended) {
  5.1068      _maximum_compaction_gc_num = total_invocations();
  5.1069 -    return sd.chunk_to_addr(full_cp);
  5.1070 +    return sd.region_to_addr(full_cp);
  5.1071    }
  5.1072  
  5.1073    const size_t space_live = pointer_delta(new_top, bottom);
  5.1074 @@ -1326,15 +1332,15 @@
  5.1075                    dead_wood_max, dead_wood_limit);
  5.1076    }
  5.1077  
  5.1078 -  // Locate the chunk with the desired amount of dead space to the left.
  5.1079 -  const ChunkData* const limit_cp =
  5.1080 -    dead_wood_limit_chunk(full_cp, top_cp, dead_wood_limit);
  5.1081 -
  5.1082 -  // Scan from the first chunk with dead space to the limit chunk and find the
  5.1083 +  // Locate the region with the desired amount of dead space to the left.
  5.1084 +  const RegionData* const limit_cp =
  5.1085 +    dead_wood_limit_region(full_cp, top_cp, dead_wood_limit);
  5.1086 +
  5.1087 +  // Scan from the first region with dead space to the limit region and find the
  5.1088    // one with the best (largest) reclaimed ratio.
  5.1089    double best_ratio = 0.0;
  5.1090 -  const ChunkData* best_cp = full_cp;
  5.1091 -  for (const ChunkData* cp = full_cp; cp < limit_cp; ++cp) {
  5.1092 +  const RegionData* best_cp = full_cp;
  5.1093 +  for (const RegionData* cp = full_cp; cp < limit_cp; ++cp) {
  5.1094      double tmp_ratio = reclaimed_ratio(cp, bottom, top, new_top);
  5.1095      if (tmp_ratio > best_ratio) {
  5.1096        best_cp = cp;
  5.1097 @@ -1343,18 +1349,18 @@
  5.1098    }
  5.1099  
  5.1100  #if     0
  5.1101 -  // Something to consider:  if the chunk with the best ratio is 'close to' the
  5.1102 -  // first chunk w/free space, choose the first chunk with free space
  5.1103 -  // ("first-free").  The first-free chunk is usually near the start of the
  5.1104 +  // Something to consider:  if the region with the best ratio is 'close to' the
  5.1105 +  // first region w/free space, choose the first region with free space
  5.1106 +  // ("first-free").  The first-free region is usually near the start of the
  5.1107    // heap, which means we are copying most of the heap already, so copy a bit
  5.1108    // more to get complete compaction.
  5.1109 -  if (pointer_delta(best_cp, full_cp, sizeof(ChunkData)) < 4) {
  5.1110 +  if (pointer_delta(best_cp, full_cp, sizeof(RegionData)) < 4) {
  5.1111      _maximum_compaction_gc_num = total_invocations();
  5.1112      best_cp = full_cp;
  5.1113    }
  5.1114  #endif  // #if 0
  5.1115  
  5.1116 -  return sd.chunk_to_addr(best_cp);
  5.1117 +  return sd.region_to_addr(best_cp);
  5.1118  }
  5.1119  
  5.1120  void PSParallelCompact::summarize_spaces_quick()
  5.1121 @@ -1372,9 +1378,9 @@
  5.1122  void PSParallelCompact::fill_dense_prefix_end(SpaceId id)
  5.1123  {
  5.1124    HeapWord* const dense_prefix_end = dense_prefix(id);
  5.1125 -  const ChunkData* chunk = _summary_data.addr_to_chunk_ptr(dense_prefix_end);
  5.1126 +  const RegionData* region = _summary_data.addr_to_region_ptr(dense_prefix_end);
  5.1127    const idx_t dense_prefix_bit = _mark_bitmap.addr_to_bit(dense_prefix_end);
  5.1128 -  if (dead_space_crosses_boundary(chunk, dense_prefix_bit)) {
  5.1129 +  if (dead_space_crosses_boundary(region, dense_prefix_bit)) {
  5.1130      // Only enough dead space is filled so that any remaining dead space to the
  5.1131      // left is larger than the minimum filler object.  (The remainder is filled
  5.1132      // during the copy/update phase.)
  5.1133 @@ -1465,7 +1471,7 @@
  5.1134        fill_dense_prefix_end(id);
  5.1135      }
  5.1136  
  5.1137 -    // Compute the destination of each Chunk, and thus each object.
  5.1138 +    // Compute the destination of each Region, and thus each object.
  5.1139      _summary_data.summarize_dense_prefix(space->bottom(), dense_prefix_end);
  5.1140      _summary_data.summarize(dense_prefix_end, space->end(),
  5.1141                              dense_prefix_end, space->top(),
  5.1142 @@ -1473,19 +1479,19 @@
  5.1143    }
  5.1144  
  5.1145    if (TraceParallelOldGCSummaryPhase) {
  5.1146 -    const size_t chunk_size = ParallelCompactData::ChunkSize;
  5.1147 +    const size_t region_size = ParallelCompactData::RegionSize;
  5.1148      HeapWord* const dense_prefix_end = _space_info[id].dense_prefix();
  5.1149 -    const size_t dp_chunk = _summary_data.addr_to_chunk_idx(dense_prefix_end);
  5.1150 +    const size_t dp_region = _summary_data.addr_to_region_idx(dense_prefix_end);
  5.1151      const size_t dp_words = pointer_delta(dense_prefix_end, space->bottom());
  5.1152      HeapWord* const new_top = _space_info[id].new_top();
  5.1153 -    const HeapWord* nt_aligned_up = _summary_data.chunk_align_up(new_top);
  5.1154 +    const HeapWord* nt_aligned_up = _summary_data.region_align_up(new_top);
  5.1155      const size_t cr_words = pointer_delta(nt_aligned_up, dense_prefix_end);
  5.1156      tty->print_cr("id=%d cap=" SIZE_FORMAT " dp=" PTR_FORMAT " "
  5.1157 -                  "dp_chunk=" SIZE_FORMAT " " "dp_count=" SIZE_FORMAT " "
  5.1158 +                  "dp_region=" SIZE_FORMAT " " "dp_count=" SIZE_FORMAT " "
  5.1159                    "cr_count=" SIZE_FORMAT " " "nt=" PTR_FORMAT,
  5.1160                    id, space->capacity_in_words(), dense_prefix_end,
  5.1161 -                  dp_chunk, dp_words / chunk_size,
  5.1162 -                  cr_words / chunk_size, new_top);
  5.1163 +                  dp_region, dp_words / region_size,
  5.1164 +                  cr_words / region_size, new_top);
  5.1165    }
  5.1166  }
  5.1167  
  5.1168 @@ -1513,7 +1519,7 @@
  5.1169    if (TraceParallelOldGCSummaryPhase) {
  5.1170      tty->print_cr("summary_phase:  after summarizing each space to self");
  5.1171      Universe::print();
  5.1172 -    NOT_PRODUCT(print_chunk_ranges());
  5.1173 +    NOT_PRODUCT(print_region_ranges());
  5.1174      if (Verbose) {
  5.1175        NOT_PRODUCT(print_initial_summary_data(_summary_data, _space_info));
  5.1176      }
  5.1177 @@ -1559,14 +1565,15 @@
  5.1178                                space->bottom(), space->top(),
  5.1179                                new_top_addr);
  5.1180  
  5.1181 -      // Clear the source_chunk field for each chunk in the space.
  5.1182 +      // Clear the source_region field for each region in the space.
  5.1183        HeapWord* const new_top = _space_info[id].new_top();
  5.1184 -      HeapWord* const clear_end = _summary_data.chunk_align_up(new_top);
  5.1185 -      ChunkData* beg_chunk = _summary_data.addr_to_chunk_ptr(space->bottom());
  5.1186 -      ChunkData* end_chunk = _summary_data.addr_to_chunk_ptr(clear_end);
  5.1187 -      while (beg_chunk < end_chunk) {
  5.1188 -        beg_chunk->set_source_chunk(0);
  5.1189 -        ++beg_chunk;
  5.1190 +      HeapWord* const clear_end = _summary_data.region_align_up(new_top);
  5.1191 +      RegionData* beg_region =
  5.1192 +        _summary_data.addr_to_region_ptr(space->bottom());
  5.1193 +      RegionData* end_region = _summary_data.addr_to_region_ptr(clear_end);
  5.1194 +      while (beg_region < end_region) {
  5.1195 +        beg_region->set_source_region(0);
  5.1196 +        ++beg_region;
  5.1197        }
  5.1198  
  5.1199        // Reset the new_top value for the space.
  5.1200 @@ -1574,13 +1581,13 @@
  5.1201      }
  5.1202    }
  5.1203  
  5.1204 -  // Fill in the block data after any changes to the chunks have
  5.1205 +  // Fill in the block data after any changes to the regions have
  5.1206    // been made.
  5.1207  #ifdef  ASSERT
  5.1208    summarize_blocks(cm, perm_space_id);
  5.1209    summarize_blocks(cm, old_space_id);
  5.1210  #else
  5.1211 -  if (!UseParallelOldGCChunkPointerCalc) {
  5.1212 +  if (!UseParallelOldGCRegionPointerCalc) {
  5.1213      summarize_blocks(cm, perm_space_id);
  5.1214      summarize_blocks(cm, old_space_id);
  5.1215    }
  5.1216 @@ -1589,7 +1596,7 @@
  5.1217    if (TraceParallelOldGCSummaryPhase) {
  5.1218      tty->print_cr("summary_phase:  after final summarization");
  5.1219      Universe::print();
  5.1220 -    NOT_PRODUCT(print_chunk_ranges());
  5.1221 +    NOT_PRODUCT(print_region_ranges());
  5.1222      if (Verbose) {
  5.1223        NOT_PRODUCT(print_generic_summary_data(_summary_data, _space_info));
  5.1224      }
  5.1225 @@ -1598,7 +1605,7 @@
  5.1226  
  5.1227  // Fill in the BlockData.
  5.1228  // Iterate over the spaces and within each space iterate over
  5.1229 -// the chunks and fill in the BlockData for each chunk.
  5.1230 +// the regions and fill in the BlockData for each region.
  5.1231  
  5.1232  void PSParallelCompact::summarize_blocks(ParCompactionManager* cm,
  5.1233                                           SpaceId first_compaction_space_id) {
  5.1234 @@ -1607,40 +1614,41 @@
  5.1235    for (SpaceId cur_space_id = first_compaction_space_id;
  5.1236         cur_space_id != last_space_id;
  5.1237         cur_space_id = next_compaction_space_id(cur_space_id)) {
  5.1238 -    // Iterate over the chunks in the space
  5.1239 -    size_t start_chunk_index =
  5.1240 -      _summary_data.addr_to_chunk_idx(space(cur_space_id)->bottom());
  5.1241 +    // Iterate over the regions in the space
  5.1242 +    size_t start_region_index =
  5.1243 +      _summary_data.addr_to_region_idx(space(cur_space_id)->bottom());
  5.1244      BitBlockUpdateClosure bbu(mark_bitmap(),
  5.1245                                cm,
  5.1246 -                              start_chunk_index);
  5.1247 +                              start_region_index);
  5.1248      // Iterate over blocks.
  5.1249 -    for (size_t chunk_index =  start_chunk_index;
  5.1250 -         chunk_index < _summary_data.chunk_count() &&
  5.1251 -         _summary_data.chunk_to_addr(chunk_index) < space(cur_space_id)->top();
  5.1252 -         chunk_index++) {
  5.1253 -
  5.1254 -      // Reset the closure for the new chunk.  Note that the closure
  5.1255 -      // maintains some data that does not get reset for each chunk
  5.1256 +    for (size_t region_index =  start_region_index;
  5.1257 +         region_index < _summary_data.region_count() &&
  5.1258 +           _summary_data.region_to_addr(region_index) <
  5.1259 +           space(cur_space_id)->top();
  5.1260 +         region_index++) {
  5.1261 +
  5.1262 +      // Reset the closure for the new region.  Note that the closure
  5.1263 +      // maintains some data that does not get reset for each region
  5.1264        // so a new instance of the closure is no appropriate.
  5.1265 -      bbu.reset_chunk(chunk_index);
  5.1266 +      bbu.reset_region(region_index);
  5.1267  
  5.1268        // Start the iteration with the first live object.  This
  5.1269 -      // may return the end of the chunk.  That is acceptable since
  5.1270 +      // may return the end of the region.  That is acceptable since
  5.1271        // it will properly limit the iterations.
  5.1272        ParMarkBitMap::idx_t left_offset = mark_bitmap()->addr_to_bit(
  5.1273 -        _summary_data.first_live_or_end_in_chunk(chunk_index));
  5.1274 -
  5.1275 -      // End the iteration at the end of the chunk.
  5.1276 -      HeapWord* chunk_addr = _summary_data.chunk_to_addr(chunk_index);
  5.1277 -      HeapWord* chunk_end = chunk_addr + ParallelCompactData::ChunkSize;
  5.1278 +        _summary_data.first_live_or_end_in_region(region_index));
  5.1279 +
  5.1280 +      // End the iteration at the end of the region.
  5.1281 +      HeapWord* region_addr = _summary_data.region_to_addr(region_index);
  5.1282 +      HeapWord* region_end = region_addr + ParallelCompactData::RegionSize;
  5.1283        ParMarkBitMap::idx_t right_offset =
  5.1284 -        mark_bitmap()->addr_to_bit(chunk_end);
  5.1285 +        mark_bitmap()->addr_to_bit(region_end);
  5.1286  
  5.1287        // Blocks that have not objects starting in them can be
  5.1288        // skipped because their data will never be used.
  5.1289        if (left_offset < right_offset) {
  5.1290  
  5.1291 -        // Iterate through the objects in the chunk.
  5.1292 +        // Iterate through the objects in the region.
  5.1293          ParMarkBitMap::idx_t last_offset =
  5.1294            mark_bitmap()->pair_iterate(&bbu, left_offset, right_offset);
  5.1295  
  5.1296 @@ -1649,7 +1657,7 @@
  5.1297          // is then the offset for the last start bit.  In this situation
  5.1298          // the "offset" field for the next block to the right (_cur_block + 1)
  5.1299          // will not have been update although there may be live data
  5.1300 -        // to the left of the chunk.
  5.1301 +        // to the left of the region.
  5.1302  
  5.1303          size_t cur_block_plus_1 = bbu.cur_block() + 1;
  5.1304          HeapWord* cur_block_plus_1_addr =
  5.1305 @@ -1669,23 +1677,23 @@
  5.1306   #else
  5.1307          // The current block has already been updated.  The only block
  5.1308          // that remains to be updated is the block where the last
  5.1309 -        // object in the chunk starts.
  5.1310 +        // object in the region starts.
  5.1311          size_t last_block = _summary_data.addr_to_block_idx(last_offset_addr);
  5.1312   #endif
  5.1313          assert_bit_is_start(last_offset);
  5.1314          assert((last_block == _summary_data.block_count()) ||
  5.1315               (_summary_data.block(last_block)->raw_offset() == 0),
  5.1316            "Should not have been set");
  5.1317 -        // Is the last block still in the current chunk?  If still
  5.1318 -        // in this chunk, update the last block (the counting that
  5.1319 +        // Is the last block still in the current region?  If still
  5.1320 +        // in this region, update the last block (the counting that
  5.1321          // included the current block is meant for the offset of the last
  5.1322 -        // block).  If not in this chunk, do nothing.  Should not
  5.1323 -        // update a block in the next chunk.
  5.1324 -        if (ParallelCompactData::chunk_contains_block(bbu.chunk_index(),
  5.1325 -                                                      last_block)) {
  5.1326 +        // block).  If not in this region, do nothing.  Should not
  5.1327 +        // update a block in the next region.
  5.1328 +        if (ParallelCompactData::region_contains_block(bbu.region_index(),
  5.1329 +                                                       last_block)) {
  5.1330            if (last_offset < right_offset) {
  5.1331 -            // The last object started in this chunk but ends beyond
  5.1332 -            // this chunk.  Update the block for this last object.
  5.1333 +            // The last object started in this region but ends beyond
  5.1334 +            // this region.  Update the block for this last object.
  5.1335              assert(mark_bitmap()->is_marked(last_offset), "Should be marked");
  5.1336              // No end bit was found.  The closure takes care of
  5.1337              // the cases where
  5.1338 @@ -1693,7 +1701,7 @@
  5.1339              //   an objects starts and ends in the next block
  5.1340              // It does not handle the case where an object is
  5.1341              // the first object in a later block and extends
  5.1342 -            // past the end of the chunk (i.e., the closure
  5.1343 +            // past the end of the region (i.e., the closure
  5.1344              // only handles complete objects that are in the range
  5.1345              // it is given).  That object is handed back here
  5.1346              // for any special consideration necessary.
  5.1347 @@ -1709,7 +1717,7 @@
  5.1348              // the AA+1 is the trigger that updates AA.  Objects are being
  5.1349              // counted in the current block for updaing a following
  5.1350              // block.  An object may start in later block
  5.1351 -            // block but may extend beyond the last block in the chunk.
  5.1352 +            // block but may extend beyond the last block in the region.
  5.1353              // Updates are only done when the end of an object has been
  5.1354              // found. If the last object (covered by block L) starts
  5.1355              // beyond the current block, then no object ends in L (otherwise
  5.1356 @@ -1717,7 +1725,7 @@
  5.1357              // a start bit.
  5.1358              //
  5.1359              // Else the last objects start in the current block and ends
  5.1360 -            // beyond the chunk.  The current block has already been
  5.1361 +            // beyond the region.  The current block has already been
  5.1362              // updated and there is no later block (with an object
  5.1363              // starting in it) that needs to be updated.
  5.1364              //
  5.1365 @@ -1728,14 +1736,14 @@
  5.1366                //   The start of the object is on a later block
  5.1367                // (to the right of the current block and there are no
  5.1368                // complete live objects to the left of this last object
  5.1369 -              // within the chunk.
  5.1370 +              // within the region.
  5.1371                //   The first bit in the block is for the start of the
  5.1372                // last object.
  5.1373                _summary_data.block(last_block)->set_start_bit_offset(
  5.1374                  bbu.live_data_left());
  5.1375              } else {
  5.1376                //   The start of the last object was found in
  5.1377 -              // the current chunk (which has already
  5.1378 +              // the current region (which has already
  5.1379                // been updated).
  5.1380                assert(bbu.cur_block() ==
  5.1381                        _summary_data.addr_to_block_idx(last_offset_addr),
  5.1382 @@ -1743,15 +1751,15 @@
  5.1383              }
  5.1384  #ifdef ASSERT
  5.1385              // Is there enough block information to find this object?
  5.1386 -            // The destination of the chunk has not been set so the
  5.1387 +            // The destination of the region has not been set so the
  5.1388              // values returned by calc_new_pointer() and
  5.1389              // block_calc_new_pointer() will only be
  5.1390              // offsets.  But they should agree.
  5.1391 -            HeapWord* moved_obj_with_chunks =
  5.1392 -              _summary_data.chunk_calc_new_pointer(last_offset_addr);
  5.1393 +            HeapWord* moved_obj_with_regions =
  5.1394 +              _summary_data.region_calc_new_pointer(last_offset_addr);
  5.1395              HeapWord* moved_obj_with_blocks =
  5.1396                _summary_data.calc_new_pointer(last_offset_addr);
  5.1397 -            assert(moved_obj_with_chunks == moved_obj_with_blocks,
  5.1398 +            assert(moved_obj_with_regions == moved_obj_with_blocks,
  5.1399                "Block calculation is wrong");
  5.1400  #endif
  5.1401            } else if (last_block < _summary_data.block_count()) {
  5.1402 @@ -1764,38 +1772,38 @@
  5.1403  #ifdef ASSERT
  5.1404          // Is there enough block information to find this object?
  5.1405            HeapWord* left_offset_addr = mark_bitmap()->bit_to_addr(left_offset);
  5.1406 -        HeapWord* moved_obj_with_chunks =
  5.1407 +        HeapWord* moved_obj_with_regions =
  5.1408            _summary_data.calc_new_pointer(left_offset_addr);
  5.1409          HeapWord* moved_obj_with_blocks =
  5.1410            _summary_data.calc_new_pointer(left_offset_addr);
  5.1411 -          assert(moved_obj_with_chunks == moved_obj_with_blocks,
  5.1412 +          assert(moved_obj_with_regions == moved_obj_with_blocks,
  5.1413            "Block calculation is wrong");
  5.1414  #endif
  5.1415  
  5.1416 -        // Is there another block after the end of this chunk?
  5.1417 +        // Is there another block after the end of this region?
  5.1418  #ifdef ASSERT
  5.1419          if (last_block < _summary_data.block_count()) {
  5.1420          // No object may have been found in a block.  If that
  5.1421 -        // block is at the end of the chunk, the iteration will
  5.1422 +        // block is at the end of the region, the iteration will
  5.1423          // terminate without incrementing the current block so
  5.1424          // that the current block is not the last block in the
  5.1425 -        // chunk.  That situation precludes asserting that the
  5.1426 -        // current block is the last block in the chunk.  Assert
  5.1427 +        // region.  That situation precludes asserting that the
  5.1428 +        // current block is the last block in the region.  Assert
  5.1429          // the lesser condition that the current block does not
  5.1430 -        // exceed the chunk.
  5.1431 +        // exceed the region.
  5.1432            assert(_summary_data.block_to_addr(last_block) <=
  5.1433 -               (_summary_data.chunk_to_addr(chunk_index) +
  5.1434 -                 ParallelCompactData::ChunkSize),
  5.1435 -              "Chunk and block inconsistency");
  5.1436 +               (_summary_data.region_to_addr(region_index) +
  5.1437 +                 ParallelCompactData::RegionSize),
  5.1438 +              "Region and block inconsistency");
  5.1439            assert(last_offset <= right_offset, "Iteration over ran end");
  5.1440          }
  5.1441  #endif
  5.1442        }
  5.1443  #ifdef ASSERT
  5.1444        if (PrintGCDetails && Verbose) {
  5.1445 -        if (_summary_data.chunk(chunk_index)->partial_obj_size() == 1) {
  5.1446 +        if (_summary_data.region(region_index)->partial_obj_size() == 1) {
  5.1447            size_t first_block =
  5.1448 -            chunk_index / ParallelCompactData::BlocksPerChunk;
  5.1449 +            region_index / ParallelCompactData::BlocksPerRegion;
  5.1450            gclog_or_tty->print_cr("first_block " PTR_FORMAT
  5.1451              " _offset " PTR_FORMAT
  5.1452              "_first_is_start_bit %d",
  5.1453 @@ -1845,18 +1853,18 @@
  5.1454    }
  5.1455  }
  5.1456  
  5.1457 -bool ParallelCompactData::chunk_contains(size_t chunk_index, HeapWord* addr) {
  5.1458 -  size_t addr_chunk_index = addr_to_chunk_idx(addr);
  5.1459 -  return chunk_index == addr_chunk_index;
  5.1460 +bool ParallelCompactData::region_contains(size_t region_index, HeapWord* addr) {
  5.1461 +  size_t addr_region_index = addr_to_region_idx(addr);
  5.1462 +  return region_index == addr_region_index;
  5.1463  }
  5.1464  
  5.1465 -bool ParallelCompactData::chunk_contains_block(size_t chunk_index,
  5.1466 -                                               size_t block_index) {
  5.1467 -  size_t first_block_in_chunk = chunk_index * BlocksPerChunk;
  5.1468 -  size_t last_block_in_chunk = (chunk_index + 1) * BlocksPerChunk - 1;
  5.1469 -
  5.1470 -  return (first_block_in_chunk <= block_index) &&
  5.1471 -         (block_index <= last_block_in_chunk);
  5.1472 +bool ParallelCompactData::region_contains_block(size_t region_index,
  5.1473 +                                                size_t block_index) {
  5.1474 +  size_t first_block_in_region = region_index * BlocksPerRegion;
  5.1475 +  size_t last_block_in_region = (region_index + 1) * BlocksPerRegion - 1;
  5.1476 +
  5.1477 +  return (first_block_in_region <= block_index) &&
  5.1478 +         (block_index <= last_block_in_region);
  5.1479  }
  5.1480  
  5.1481  // This method contains no policy. You should probably
  5.1482 @@ -2205,7 +2213,7 @@
  5.1483  
  5.1484    ParallelScavengeHeap* heap = gc_heap();
  5.1485    uint parallel_gc_threads = heap->gc_task_manager()->workers();
  5.1486 -  TaskQueueSetSuper* qset = ParCompactionManager::chunk_array();
  5.1487 +  TaskQueueSetSuper* qset = ParCompactionManager::region_array();
  5.1488    ParallelTaskTerminator terminator(parallel_gc_threads, qset);
  5.1489  
  5.1490    PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm);
  5.1491 @@ -2343,8 +2351,9 @@
  5.1492    move_and_update(cm, perm_space_id);
  5.1493  }
  5.1494  
  5.1495 -void PSParallelCompact::enqueue_chunk_draining_tasks(GCTaskQueue* q,
  5.1496 -                                                     uint parallel_gc_threads) {
  5.1497 +void PSParallelCompact::enqueue_region_draining_tasks(GCTaskQueue* q,
  5.1498 +                                                      uint parallel_gc_threads)
  5.1499 +{
  5.1500    TraceTime tm("drain task setup", print_phases(), true, gclog_or_tty);
  5.1501  
  5.1502    const unsigned int task_count = MAX2(parallel_gc_threads, 1U);
  5.1503 @@ -2352,13 +2361,13 @@
  5.1504      q->enqueue(new DrainStacksCompactionTask());
  5.1505    }
  5.1506  
  5.1507 -  // Find all chunks that are available (can be filled immediately) and
  5.1508 +  // Find all regions that are available (can be filled immediately) and
  5.1509    // distribute them to the thread stacks.  The iteration is done in reverse
  5.1510 -  // order (high to low) so the chunks will be removed in ascending order.
  5.1511 +  // order (high to low) so the regions will be removed in ascending order.
  5.1512  
  5.1513    const ParallelCompactData& sd = PSParallelCompact::summary_data();
  5.1514  
  5.1515 -  size_t fillable_chunks = 0;   // A count for diagnostic purposes.
  5.1516 +  size_t fillable_regions = 0;   // A count for diagnostic purposes.
  5.1517    unsigned int which = 0;       // The worker thread number.
  5.1518  
  5.1519    for (unsigned int id = to_space_id; id > perm_space_id; --id) {
  5.1520 @@ -2366,25 +2375,26 @@
  5.1521      MutableSpace* const space = space_info->space();
  5.1522      HeapWord* const new_top = space_info->new_top();
  5.1523  
  5.1524 -    const size_t beg_chunk = sd.addr_to_chunk_idx(space_info->dense_prefix());
  5.1525 -    const size_t end_chunk = sd.addr_to_chunk_idx(sd.chunk_align_up(new_top));
  5.1526 -    assert(end_chunk > 0, "perm gen cannot be empty");
  5.1527 -
  5.1528 -    for (size_t cur = end_chunk - 1; cur >= beg_chunk; --cur) {
  5.1529 -      if (sd.chunk(cur)->claim_unsafe()) {
  5.1530 +    const size_t beg_region = sd.addr_to_region_idx(space_info->dense_prefix());
  5.1531 +    const size_t end_region =
  5.1532 +      sd.addr_to_region_idx(sd.region_align_up(new_top));
  5.1533 +    assert(end_region > 0, "perm gen cannot be empty");
  5.1534 +
  5.1535 +    for (size_t cur = end_region - 1; cur >= beg_region; --cur) {
  5.1536 +      if (sd.region(cur)->claim_unsafe()) {
  5.1537          ParCompactionManager* cm = ParCompactionManager::manager_array(which);
  5.1538          cm->save_for_processing(cur);
  5.1539  
  5.1540          if (TraceParallelOldGCCompactionPhase && Verbose) {
  5.1541 -          const size_t count_mod_8 = fillable_chunks & 7;
  5.1542 +          const size_t count_mod_8 = fillable_regions & 7;
  5.1543            if (count_mod_8 == 0) gclog_or_tty->print("fillable: ");
  5.1544            gclog_or_tty->print(" " SIZE_FORMAT_W(7), cur);
  5.1545            if (count_mod_8 == 7) gclog_or_tty->cr();
  5.1546          }
  5.1547  
  5.1548 -        NOT_PRODUCT(++fillable_chunks;)
  5.1549 -
  5.1550 -        // Assign chunks to threads in round-robin fashion.
  5.1551 +        NOT_PRODUCT(++fillable_regions;)
  5.1552 +
  5.1553 +        // Assign regions to threads in round-robin fashion.
  5.1554          if (++which == task_count) {
  5.1555            which = 0;
  5.1556          }
  5.1557 @@ -2393,8 +2403,8 @@
  5.1558    }
  5.1559  
  5.1560    if (TraceParallelOldGCCompactionPhase) {
  5.1561 -    if (Verbose && (fillable_chunks & 7) != 0) gclog_or_tty->cr();
  5.1562 -    gclog_or_tty->print_cr("%u initially fillable chunks", fillable_chunks);
  5.1563 +    if (Verbose && (fillable_regions & 7) != 0) gclog_or_tty->cr();
  5.1564 +    gclog_or_tty->print_cr("%u initially fillable regions", fillable_regions);
  5.1565    }
  5.1566  }
  5.1567  
  5.1568 @@ -2407,7 +2417,7 @@
  5.1569    ParallelCompactData& sd = PSParallelCompact::summary_data();
  5.1570  
  5.1571    // Iterate over all the spaces adding tasks for updating
  5.1572 -  // chunks in the dense prefix.  Assume that 1 gc thread
  5.1573 +  // regions in the dense prefix.  Assume that 1 gc thread
  5.1574    // will work on opening the gaps and the remaining gc threads
  5.1575    // will work on the dense prefix.
  5.1576    SpaceId space_id = old_space_id;
  5.1577 @@ -2421,30 +2431,31 @@
  5.1578        continue;
  5.1579      }
  5.1580  
  5.1581 -    // The dense prefix is before this chunk.
  5.1582 -    size_t chunk_index_end_dense_prefix =
  5.1583 -        sd.addr_to_chunk_idx(dense_prefix_end);
  5.1584 -    ChunkData* const dense_prefix_cp = sd.chunk(chunk_index_end_dense_prefix);
  5.1585 +    // The dense prefix is before this region.
  5.1586 +    size_t region_index_end_dense_prefix =
  5.1587 +        sd.addr_to_region_idx(dense_prefix_end);
  5.1588 +    RegionData* const dense_prefix_cp =
  5.1589 +      sd.region(region_index_end_dense_prefix);
  5.1590      assert(dense_prefix_end == space->end() ||
  5.1591             dense_prefix_cp->available() ||
  5.1592             dense_prefix_cp->claimed(),
  5.1593 -           "The chunk after the dense prefix should always be ready to fill");
  5.1594 -
  5.1595 -    size_t chunk_index_start = sd.addr_to_chunk_idx(space->bottom());
  5.1596 +           "The region after the dense prefix should always be ready to fill");
  5.1597 +
  5.1598 +    size_t region_index_start = sd.addr_to_region_idx(space->bottom());
  5.1599  
  5.1600      // Is there dense prefix work?
  5.1601 -    size_t total_dense_prefix_chunks =
  5.1602 -      chunk_index_end_dense_prefix - chunk_index_start;
  5.1603 -    // How many chunks of the dense prefix should be given to
  5.1604 +    size_t total_dense_prefix_regions =
  5.1605 +      region_index_end_dense_prefix - region_index_start;
  5.1606 +    // How many regions of the dense prefix should be given to
  5.1607      // each thread?
  5.1608 -    if (total_dense_prefix_chunks > 0) {
  5.1609 +    if (total_dense_prefix_regions > 0) {
  5.1610        uint tasks_for_dense_prefix = 1;
  5.1611        if (UseParallelDensePrefixUpdate) {
  5.1612 -        if (total_dense_prefix_chunks <=
  5.1613 +        if (total_dense_prefix_regions <=
  5.1614              (parallel_gc_threads * PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING)) {
  5.1615            // Don't over partition.  This assumes that
  5.1616            // PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING is a small integer value
  5.1617 -          // so there are not many chunks to process.
  5.1618 +          // so there are not many regions to process.
  5.1619            tasks_for_dense_prefix = parallel_gc_threads;
  5.1620          } else {
  5.1621            // Over partition
  5.1622 @@ -2452,50 +2463,50 @@
  5.1623              PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING;
  5.1624          }
  5.1625        }
  5.1626 -      size_t chunks_per_thread = total_dense_prefix_chunks /
  5.1627 +      size_t regions_per_thread = total_dense_prefix_regions /
  5.1628          tasks_for_dense_prefix;
  5.1629 -      // Give each thread at least 1 chunk.
  5.1630 -      if (chunks_per_thread == 0) {
  5.1631 -        chunks_per_thread = 1;
  5.1632 +      // Give each thread at least 1 region.
  5.1633 +      if (regions_per_thread == 0) {
  5.1634 +        regions_per_thread = 1;
  5.1635        }
  5.1636  
  5.1637        for (uint k = 0; k < tasks_for_dense_prefix; k++) {
  5.1638 -        if (chunk_index_start >= chunk_index_end_dense_prefix) {
  5.1639 +        if (region_index_start >= region_index_end_dense_prefix) {
  5.1640            break;
  5.1641          }
  5.1642 -        // chunk_index_end is not processed
  5.1643 -        size_t chunk_index_end = MIN2(chunk_index_start + chunks_per_thread,
  5.1644 -                                      chunk_index_end_dense_prefix);
  5.1645 +        // region_index_end is not processed
  5.1646 +        size_t region_index_end = MIN2(region_index_start + regions_per_thread,
  5.1647 +                                       region_index_end_dense_prefix);
  5.1648          q->enqueue(new UpdateDensePrefixTask(
  5.1649                                   space_id,
  5.1650 -                                 chunk_index_start,
  5.1651 -                                 chunk_index_end));
  5.1652 -        chunk_index_start = chunk_index_end;
  5.1653 +                                 region_index_start,
  5.1654 +                                 region_index_end));
  5.1655 +        region_index_start = region_index_end;
  5.1656        }
  5.1657      }
  5.1658      // This gets any part of the dense prefix that did not
  5.1659      // fit evenly.
  5.1660 -    if (chunk_index_start < chunk_index_end_dense_prefix) {
  5.1661 +    if (region_index_start < region_index_end_dense_prefix) {
  5.1662        q->enqueue(new UpdateDensePrefixTask(
  5.1663                                   space_id,
  5.1664 -                                 chunk_index_start,
  5.1665 -                                 chunk_index_end_dense_prefix));
  5.1666 +                                 region_index_start,
  5.1667 +                                 region_index_end_dense_prefix));
  5.1668      }
  5.1669      space_id = next_compaction_space_id(space_id);
  5.1670    }  // End tasks for dense prefix
  5.1671  }
  5.1672  
  5.1673 -void PSParallelCompact::enqueue_chunk_stealing_tasks(
  5.1674 +void PSParallelCompact::enqueue_region_stealing_tasks(
  5.1675                                       GCTaskQueue* q,
  5.1676                                       ParallelTaskTerminator* terminator_ptr,
  5.1677                                       uint parallel_gc_threads) {
  5.1678    TraceTime tm("steal task setup", print_phases(), true, gclog_or_tty);
  5.1679  
  5.1680 -  // Once a thread has drained it's stack, it should try to steal chunks from
  5.1681 +  // Once a thread has drained it's stack, it should try to steal regions from
  5.1682    // other threads.
  5.1683    if (parallel_gc_threads > 1) {
  5.1684      for (uint j = 0; j < parallel_gc_threads; j++) {
  5.1685 -      q->enqueue(new StealChunkCompactionTask(terminator_ptr));
  5.1686 +      q->enqueue(new StealRegionCompactionTask(terminator_ptr));
  5.1687      }
  5.1688    }
  5.1689  }
  5.1690 @@ -2510,13 +2521,13 @@
  5.1691    PSOldGen* old_gen = heap->old_gen();
  5.1692    old_gen->start_array()->reset();
  5.1693    uint parallel_gc_threads = heap->gc_task_manager()->workers();
  5.1694 -  TaskQueueSetSuper* qset = ParCompactionManager::chunk_array();
  5.1695 +  TaskQueueSetSuper* qset = ParCompactionManager::region_array();
  5.1696    ParallelTaskTerminator terminator(parallel_gc_threads, qset);
  5.1697  
  5.1698    GCTaskQueue* q = GCTaskQueue::create();
  5.1699 -  enqueue_chunk_draining_tasks(q, parallel_gc_threads);
  5.1700 +  enqueue_region_draining_tasks(q, parallel_gc_threads);
  5.1701    enqueue_dense_prefix_tasks(q, parallel_gc_threads);
  5.1702 -  enqueue_chunk_stealing_tasks(q, &terminator, parallel_gc_threads);
  5.1703 +  enqueue_region_stealing_tasks(q, &terminator, parallel_gc_threads);
  5.1704  
  5.1705    {
  5.1706      TraceTime tm_pc("par compact", print_phases(), true, gclog_or_tty);
  5.1707 @@ -2532,9 +2543,9 @@
  5.1708      WaitForBarrierGCTask::destroy(fin);
  5.1709  
  5.1710  #ifdef  ASSERT
  5.1711 -    // Verify that all chunks have been processed before the deferred updates.
  5.1712 +    // Verify that all regions have been processed before the deferred updates.
  5.1713      // Note that perm_space_id is skipped; this type of verification is not
  5.1714 -    // valid until the perm gen is compacted by chunks.
  5.1715 +    // valid until the perm gen is compacted by regions.
  5.1716      for (unsigned int id = old_space_id; id < last_space_id; ++id) {
  5.1717        verify_complete(SpaceId(id));
  5.1718      }
  5.1719 @@ -2553,42 +2564,42 @@
  5.1720  
  5.1721  #ifdef  ASSERT
  5.1722  void PSParallelCompact::verify_complete(SpaceId space_id) {
  5.1723 -  // All Chunks between space bottom() to new_top() should be marked as filled
  5.1724 -  // and all Chunks between new_top() and top() should be available (i.e.,
  5.1725 +  // All Regions between space bottom() to new_top() should be marked as filled
  5.1726 +  // and all Regions between new_top() and top() should be available (i.e.,
  5.1727    // should have been emptied).
  5.1728    ParallelCompactData& sd = summary_data();
  5.1729    SpaceInfo si = _space_info[space_id];
  5.1730 -  HeapWord* new_top_addr = sd.chunk_align_up(si.new_top());
  5.1731 -  HeapWord* old_top_addr = sd.chunk_align_up(si.space()->top());
  5.1732 -  const size_t beg_chunk = sd.addr_to_chunk_idx(si.space()->bottom());
  5.1733 -  const size_t new_top_chunk = sd.addr_to_chunk_idx(new_top_addr);
  5.1734 -  const size_t old_top_chunk = sd.addr_to_chunk_idx(old_top_addr);
  5.1735 +  HeapWord* new_top_addr = sd.region_align_up(si.new_top());
  5.1736 +  HeapWord* old_top_addr = sd.region_align_up(si.space()->top());
  5.1737 +  const size_t beg_region = sd.addr_to_region_idx(si.space()->bottom());
  5.1738 +  const size_t new_top_region = sd.addr_to_region_idx(new_top_addr);
  5.1739 +  const size_t old_top_region = sd.addr_to_region_idx(old_top_addr);
  5.1740  
  5.1741    bool issued_a_warning = false;
  5.1742  
  5.1743 -  size_t cur_chunk;
  5.1744 -  for (cur_chunk = beg_chunk; cur_chunk < new_top_chunk; ++cur_chunk) {
  5.1745 -    const ChunkData* const c = sd.chunk(cur_chunk);
  5.1746 +  size_t cur_region;
  5.1747 +  for (cur_region = beg_region; cur_region < new_top_region; ++cur_region) {
  5.1748 +    const RegionData* const c = sd.region(cur_region);
  5.1749      if (!c->completed()) {
  5.1750 -      warning("chunk " SIZE_FORMAT " not filled:  "
  5.1751 +      warning("region " SIZE_FORMAT " not filled:  "
  5.1752                "destination_count=" SIZE_FORMAT,
  5.1753 -              cur_chunk, c->destination_count());
  5.1754 +              cur_region, c->destination_count());
  5.1755        issued_a_warning = true;
  5.1756      }
  5.1757    }
  5.1758  
  5.1759 -  for (cur_chunk = new_top_chunk; cur_chunk < old_top_chunk; ++cur_chunk) {
  5.1760 -    const ChunkData* const c = sd.chunk(cur_chunk);
  5.1761 +  for (cur_region = new_top_region; cur_region < old_top_region; ++cur_region) {
  5.1762 +    const RegionData* const c = sd.region(cur_region);
  5.1763      if (!c->available()) {
  5.1764 -      warning("chunk " SIZE_FORMAT " not empty:   "
  5.1765 +      warning("region " SIZE_FORMAT " not empty:   "
  5.1766                "destination_count=" SIZE_FORMAT,
  5.1767 -              cur_chunk, c->destination_count());
  5.1768 +              cur_region, c->destination_count());
  5.1769        issued_a_warning = true;
  5.1770      }
  5.1771    }
  5.1772  
  5.1773    if (issued_a_warning) {
  5.1774 -    print_chunk_ranges();
  5.1775 +    print_region_ranges();
  5.1776    }
  5.1777  }
  5.1778  #endif  // #ifdef ASSERT
  5.1779 @@ -2789,46 +2800,47 @@
  5.1780  }
  5.1781  #endif //VALIDATE_MARK_SWEEP
  5.1782  
  5.1783 -// Update interior oops in the ranges of chunks [beg_chunk, end_chunk).
  5.1784 +// Update interior oops in the ranges of regions [beg_region, end_region).
  5.1785  void
  5.1786  PSParallelCompact::update_and_deadwood_in_dense_prefix(ParCompactionManager* cm,
  5.1787                                                         SpaceId space_id,
  5.1788 -                                                       size_t beg_chunk,
  5.1789 -                                                       size_t end_chunk) {
  5.1790 +                                                       size_t beg_region,
  5.1791 +                                                       size_t end_region) {
  5.1792    ParallelCompactData& sd = summary_data();
  5.1793    ParMarkBitMap* const mbm = mark_bitmap();
  5.1794  
  5.1795 -  HeapWord* beg_addr = sd.chunk_to_addr(beg_chunk);
  5.1796 -  HeapWord* const end_addr = sd.chunk_to_addr(end_chunk);
  5.1797 -  assert(beg_chunk <= end_chunk, "bad chunk range");
  5.1798 +  HeapWord* beg_addr = sd.region_to_addr(beg_region);
  5.1799 +  HeapWord* const end_addr = sd.region_to_addr(end_region);
  5.1800 +  assert(beg_region <= end_region, "bad region range");
  5.1801    assert(end_addr <= dense_prefix(space_id), "not in the dense prefix");
  5.1802  
  5.1803  #ifdef  ASSERT
  5.1804 -  // Claim the chunks to avoid triggering an assert when they are marked as
  5.1805 +  // Claim the regions to avoid triggering an assert when they are marked as
  5.1806    // filled.
  5.1807 -  for (size_t claim_chunk = beg_chunk; claim_chunk < end_chunk; ++claim_chunk) {
  5.1808 -    assert(sd.chunk(claim_chunk)->claim_unsafe(), "claim() failed");
  5.1809 +  for (size_t claim_region = beg_region; claim_region < end_region; ++claim_region) {
  5.1810 +    assert(sd.region(claim_region)->claim_unsafe(), "claim() failed");
  5.1811    }
  5.1812  #endif  // #ifdef ASSERT
  5.1813  
  5.1814    if (beg_addr != space(space_id)->bottom()) {
  5.1815      // Find the first live object or block of dead space that *starts* in this
  5.1816 -    // range of chunks.  If a partial object crosses onto the chunk, skip it; it
  5.1817 -    // will be marked for 'deferred update' when the object head is processed.
  5.1818 -    // If dead space crosses onto the chunk, it is also skipped; it will be
  5.1819 -    // filled when the prior chunk is processed.  If neither of those apply, the
  5.1820 -    // first word in the chunk is the start of a live object or dead space.
  5.1821 +    // range of regions.  If a partial object crosses onto the region, skip it;
  5.1822 +    // it will be marked for 'deferred update' when the object head is
  5.1823 +    // processed.  If dead space crosses onto the region, it is also skipped; it
  5.1824 +    // will be filled when the prior region is processed.  If neither of those
  5.1825 +    // apply, the first word in the region is the start of a live object or dead
  5.1826 +    // space.
  5.1827      assert(beg_addr > space(space_id)->bottom(), "sanity");
  5.1828 -    const ChunkData* const cp = sd.chunk(beg_chunk);
  5.1829 +    const RegionData* const cp = sd.region(beg_region);
  5.1830      if (cp->partial_obj_size() != 0) {
  5.1831 -      beg_addr = sd.partial_obj_end(beg_chunk);
  5.1832 +      beg_addr = sd.partial_obj_end(beg_region);
  5.1833      } else if (dead_space_crosses_boundary(cp, mbm->addr_to_bit(beg_addr))) {
  5.1834        beg_addr = mbm->find_obj_beg(beg_addr, end_addr);
  5.1835      }
  5.1836    }
  5.1837  
  5.1838    if (beg_addr < end_addr) {
  5.1839 -    // A live object or block of dead space starts in this range of Chunks.
  5.1840 +    // A live object or block of dead space starts in this range of Regions.
  5.1841       HeapWord* const dense_prefix_end = dense_prefix(space_id);
  5.1842  
  5.1843      // Create closures and iterate.
  5.1844 @@ -2842,10 +2854,10 @@
  5.1845      }
  5.1846    }
  5.1847  
  5.1848 -  // Mark the chunks as filled.
  5.1849 -  ChunkData* const beg_cp = sd.chunk(beg_chunk);
  5.1850 -  ChunkData* const end_cp = sd.chunk(end_chunk);
  5.1851 -  for (ChunkData* cp = beg_cp; cp < end_cp; ++cp) {
  5.1852 +  // Mark the regions as filled.
  5.1853 +  RegionData* const beg_cp = sd.region(beg_region);
  5.1854 +  RegionData* const end_cp = sd.region(end_region);
  5.1855 +  for (RegionData* cp = beg_cp; cp < end_cp; ++cp) {
  5.1856      cp->set_completed();
  5.1857    }
  5.1858  }
  5.1859 @@ -2877,13 +2889,13 @@
  5.1860    const MutableSpace* const space = space_info->space();
  5.1861    assert(space_info->dense_prefix() >= space->bottom(), "dense_prefix not set");
  5.1862    HeapWord* const beg_addr = space_info->dense_prefix();
  5.1863 -  HeapWord* const end_addr = sd.chunk_align_up(space_info->new_top());
  5.1864 -
  5.1865 -  const ChunkData* const beg_chunk = sd.addr_to_chunk_ptr(beg_addr);
  5.1866 -  const ChunkData* const end_chunk = sd.addr_to_chunk_ptr(end_addr);
  5.1867 -  const ChunkData* cur_chunk;
  5.1868 -  for (cur_chunk = beg_chunk; cur_chunk < end_chunk; ++cur_chunk) {
  5.1869 -    HeapWord* const addr = cur_chunk->deferred_obj_addr();
  5.1870 +  HeapWord* const end_addr = sd.region_align_up(space_info->new_top());
  5.1871 +
  5.1872 +  const RegionData* const beg_region = sd.addr_to_region_ptr(beg_addr);
  5.1873 +  const RegionData* const end_region = sd.addr_to_region_ptr(end_addr);
  5.1874 +  const RegionData* cur_region;
  5.1875 +  for (cur_region = beg_region; cur_region < end_region; ++cur_region) {
  5.1876 +    HeapWord* const addr = cur_region->deferred_obj_addr();
  5.1877      if (addr != NULL) {
  5.1878        if (start_array != NULL) {
  5.1879          start_array->allocate_block(addr);
  5.1880 @@ -2929,45 +2941,45 @@
  5.1881  
  5.1882  HeapWord*
  5.1883  PSParallelCompact::first_src_addr(HeapWord* const dest_addr,
  5.1884 -                                 size_t src_chunk_idx)
  5.1885 +                                 size_t src_region_idx)
  5.1886  {
  5.1887    ParMarkBitMap* const bitmap = mark_bitmap();
  5.1888    const ParallelCompactData& sd = summary_data();
  5.1889 -  const size_t ChunkSize = ParallelCompactData::ChunkSize;
  5.1890 -
  5.1891 -  assert(sd.is_chunk_aligned(dest_addr), "not aligned");
  5.1892 -
  5.1893 -  const ChunkData* const src_chunk_ptr = sd.chunk(src_chunk_idx);
  5.1894 -  const size_t partial_obj_size = src_chunk_ptr->partial_obj_size();
  5.1895 -  HeapWord* const src_chunk_destination = src_chunk_ptr->destination();
  5.1896 -
  5.1897 -  assert(dest_addr >= src_chunk_destination, "wrong src chunk");
  5.1898 -  assert(src_chunk_ptr->data_size() > 0, "src chunk cannot be empty");
  5.1899 -
  5.1900 -  HeapWord* const src_chunk_beg = sd.chunk_to_addr(src_chunk_idx);
  5.1901 -  HeapWord* const src_chunk_end = src_chunk_beg + ChunkSize;
  5.1902 -
  5.1903 -  HeapWord* addr = src_chunk_beg;
  5.1904 -  if (dest_addr == src_chunk_destination) {
  5.1905 -    // Return the first live word in the source chunk.
  5.1906 +  const size_t RegionSize = ParallelCompactData::RegionSize;
  5.1907 +
  5.1908 +  assert(sd.is_region_aligned(dest_addr), "not aligned");
  5.1909 +
  5.1910 +  const RegionData* const src_region_ptr = sd.region(src_region_idx);
  5.1911 +  const size_t partial_obj_size = src_region_ptr->partial_obj_size();
  5.1912 +  HeapWord* const src_region_destination = src_region_ptr->destination();
  5.1913 +
  5.1914 +  assert(dest_addr >= src_region_destination, "wrong src region");
  5.1915 +  assert(src_region_ptr->data_size() > 0, "src region cannot be empty");
  5.1916 +
  5.1917 +  HeapWord* const src_region_beg = sd.region_to_addr(src_region_idx);
  5.1918 +  HeapWord* const src_region_end = src_region_beg + RegionSize;
  5.1919 +
  5.1920 +  HeapWord* addr = src_region_beg;
  5.1921 +  if (dest_addr == src_region_destination) {
  5.1922 +    // Return the first live word in the source region.
  5.1923      if (partial_obj_size == 0) {
  5.1924 -      addr = bitmap->find_obj_beg(addr, src_chunk_end);
  5.1925 -      assert(addr < src_chunk_end, "no objects start in src chunk");
  5.1926 +      addr = bitmap->find_obj_beg(addr, src_region_end);
  5.1927 +      assert(addr < src_region_end, "no objects start in src region");
  5.1928      }
  5.1929      return addr;
  5.1930    }
  5.1931  
  5.1932    // Must skip some live data.
  5.1933 -  size_t words_to_skip = dest_addr - src_chunk_destination;
  5.1934 -  assert(src_chunk_ptr->data_size() > words_to_skip, "wrong src chunk");
  5.1935 +  size_t words_to_skip = dest_addr - src_region_destination;
  5.1936 +  assert(src_region_ptr->data_size() > words_to_skip, "wrong src region");
  5.1937  
  5.1938    if (partial_obj_size >= words_to_skip) {
  5.1939      // All the live words to skip are part of the partial object.
  5.1940      addr += words_to_skip;
  5.1941      if (partial_obj_size == words_to_skip) {
  5.1942        // Find the first live word past the partial object.
  5.1943 -      addr = bitmap->find_obj_beg(addr, src_chunk_end);
  5.1944 -      assert(addr < src_chunk_end, "wrong src chunk");
  5.1945 +      addr = bitmap->find_obj_beg(addr, src_region_end);
  5.1946 +      assert(addr < src_region_end, "wrong src region");
  5.1947      }
  5.1948      return addr;
  5.1949    }
  5.1950 @@ -2978,63 +2990,64 @@
  5.1951      addr += partial_obj_size;
  5.1952    }
  5.1953  
  5.1954 -  // Skip over live words due to objects that start in the chunk.
  5.1955 -  addr = skip_live_words(addr, src_chunk_end, words_to_skip);
  5.1956 -  assert(addr < src_chunk_end, "wrong src chunk");
  5.1957 +  // Skip over live words due to objects that start in the region.
  5.1958 +  addr = skip_live_words(addr, src_region_end, words_to_skip);
  5.1959 +  assert(addr < src_region_end, "wrong src region");
  5.1960    return addr;
  5.1961  }
  5.1962  
  5.1963  void PSParallelCompact::decrement_destination_counts(ParCompactionManager* cm,
  5.1964 -                                                     size_t beg_chunk,
  5.1965 +                                                     size_t beg_region,
  5.1966                                                       HeapWord* end_addr)
  5.1967  {
  5.1968    ParallelCompactData& sd = summary_data();
  5.1969 -  ChunkData* const beg = sd.chunk(beg_chunk);
  5.1970 -  HeapWord* const end_addr_aligned_up = sd.chunk_align_up(end_addr);
  5.1971 -  ChunkData* const end = sd.addr_to_chunk_ptr(end_addr_aligned_up);
  5.1972 -  size_t cur_idx = beg_chunk;
  5.1973 -  for (ChunkData* cur = beg; cur < end; ++cur, ++cur_idx) {
  5.1974 -    assert(cur->data_size() > 0, "chunk must have live data");
  5.1975 +  RegionData* const beg = sd.region(beg_region);
  5.1976 +  HeapWord* const end_addr_aligned_up = sd.region_align_up(end_addr);
  5.1977 +  RegionData* const end = sd.addr_to_region_ptr(end_addr_aligned_up);
  5.1978 +  size_t cur_idx = beg_region;
  5.1979 +  for (RegionData* cur = beg; cur < end; ++cur, ++cur_idx) {
  5.1980 +    assert(cur->data_size() > 0, "region must have live data");
  5.1981      cur->decrement_destination_count();
  5.1982 -    if (cur_idx <= cur->source_chunk() && cur->available() && cur->claim()) {
  5.1983 +    if (cur_idx <= cur->source_region() && cur->available() && cur->claim()) {
  5.1984        cm->save_for_processing(cur_idx);
  5.1985      }
  5.1986    }
  5.1987  }
  5.1988  
  5.1989 -size_t PSParallelCompact::next_src_chunk(MoveAndUpdateClosure& closure,
  5.1990 -                                         SpaceId& src_space_id,
  5.1991 -                                         HeapWord*& src_space_top,
  5.1992 -                                         HeapWord* end_addr)
  5.1993 +size_t PSParallelCompact::next_src_region(MoveAndUpdateClosure& closure,
  5.1994 +                                          SpaceId& src_space_id,
  5.1995 +                                          HeapWord*& src_space_top,
  5.1996 +                                          HeapWord* end_addr)
  5.1997  {
  5.1998 -  typedef ParallelCompactData::ChunkData ChunkData;
  5.1999 +  typedef ParallelCompactData::RegionData RegionData;
  5.2000  
  5.2001    ParallelCompactData& sd = PSParallelCompact::summary_data();
  5.2002 -  const size_t chunk_size = ParallelCompactData::ChunkSize;
  5.2003 -
  5.2004 -  size_t src_chunk_idx = 0;
  5.2005 -
  5.2006 -  // Skip empty chunks (if any) up to the top of the space.
  5.2007 -  HeapWord* const src_aligned_up = sd.chunk_align_up(end_addr);
  5.2008 -  ChunkData* src_chunk_ptr = sd.addr_to_chunk_ptr(src_aligned_up);
  5.2009 -  HeapWord* const top_aligned_up = sd.chunk_align_up(src_space_top);
  5.2010 -  const ChunkData* const top_chunk_ptr = sd.addr_to_chunk_ptr(top_aligned_up);
  5.2011 -  while (src_chunk_ptr < top_chunk_ptr && src_chunk_ptr->data_size() == 0) {
  5.2012 -    ++src_chunk_ptr;
  5.2013 +  const size_t region_size = ParallelCompactData::RegionSize;
  5.2014 +
  5.2015 +  size_t src_region_idx = 0;
  5.2016 +
  5.2017 +  // Skip empty regions (if any) up to the top of the space.
  5.2018 +  HeapWord* const src_aligned_up = sd.region_align_up(end_addr);
  5.2019 +  RegionData* src_region_ptr = sd.addr_to_region_ptr(src_aligned_up);
  5.2020 +  HeapWord* const top_aligned_up = sd.region_align_up(src_space_top);
  5.2021 +  const RegionData* const top_region_ptr =
  5.2022 +    sd.addr_to_region_ptr(top_aligned_up);
  5.2023 +  while (src_region_ptr < top_region_ptr && src_region_ptr->data_size() == 0) {
  5.2024 +    ++src_region_ptr;
  5.2025    }
  5.2026  
  5.2027 -  if (src_chunk_ptr < top_chunk_ptr) {
  5.2028 -    // The next source chunk is in the current space.  Update src_chunk_idx and
  5.2029 -    // the source address to match src_chunk_ptr.
  5.2030 -    src_chunk_idx = sd.chunk(src_chunk_ptr);
  5.2031 -    HeapWord* const src_chunk_addr = sd.chunk_to_addr(src_chunk_idx);
  5.2032 -    if (src_chunk_addr > closure.source()) {
  5.2033 -      closure.set_source(src_chunk_addr);
  5.2034 +  if (src_region_ptr < top_region_ptr) {
  5.2035 +    // The next source region is in the current space.  Update src_region_idx
  5.2036 +    // and the source address to match src_region_ptr.
  5.2037 +    src_region_idx = sd.region(src_region_ptr);
  5.2038 +    HeapWord* const src_region_addr = sd.region_to_addr(src_region_idx);
  5.2039 +    if (src_region_addr > closure.source()) {
  5.2040 +      closure.set_source(src_region_addr);
  5.2041      }
  5.2042 -    return src_chunk_idx;
  5.2043 +    return src_region_idx;
  5.2044    }
  5.2045  
  5.2046 -  // Switch to a new source space and find the first non-empty chunk.
  5.2047 +  // Switch to a new source space and find the first non-empty region.
  5.2048    unsigned int space_id = src_space_id + 1;
  5.2049    assert(space_id < last_space_id, "not enough spaces");
  5.2050  
  5.2051 @@ -3043,14 +3056,14 @@
  5.2052    do {
  5.2053      MutableSpace* space = _space_info[space_id].space();
  5.2054      HeapWord* const bottom = space->bottom();
  5.2055 -    const ChunkData* const bottom_cp = sd.addr_to_chunk_ptr(bottom);
  5.2056 +    const RegionData* const bottom_cp = sd.addr_to_region_ptr(bottom);
  5.2057  
  5.2058      // Iterate over the spaces that do not compact into themselves.
  5.2059      if (bottom_cp->destination() != bottom) {
  5.2060 -      HeapWord* const top_aligned_up = sd.chunk_align_up(space->top());
  5.2061 -      const ChunkData* const top_cp = sd.addr_to_chunk_ptr(top_aligned_up);
  5.2062 -
  5.2063 -      for (const ChunkData* src_cp = bottom_cp; src_cp < top_cp; ++src_cp) {
  5.2064 +      HeapWord* const top_aligned_up = sd.region_align_up(space->top());
  5.2065 +      const RegionData* const top_cp = sd.addr_to_region_ptr(top_aligned_up);
  5.2066 +
  5.2067 +      for (const RegionData* src_cp = bottom_cp; src_cp < top_cp; ++src_cp) {
  5.2068          if (src_cp->live_obj_size() > 0) {
  5.2069            // Found it.
  5.2070            assert(src_cp->destination() == destination,
  5.2071 @@ -3060,9 +3073,9 @@
  5.2072  
  5.2073            src_space_id = SpaceId(space_id);
  5.2074            src_space_top = space->top();
  5.2075 -          const size_t src_chunk_idx = sd.chunk(src_cp);
  5.2076 -          closure.set_source(sd.chunk_to_addr(src_chunk_idx));
  5.2077 -          return src_chunk_idx;
  5.2078 +          const size_t src_region_idx = sd.region(src_cp);
  5.2079 +          closure.set_source(sd.region_to_addr(src_region_idx));
  5.2080 +          return src_region_idx;
  5.2081          } else {
  5.2082            assert(src_cp->data_size() == 0, "sanity");
  5.2083          }
  5.2084 @@ -3070,38 +3083,38 @@
  5.2085      }
  5.2086    } while (++space_id < last_space_id);
  5.2087  
  5.2088 -  assert(false, "no source chunk was found");
  5.2089 +  assert(false, "no source region was found");
  5.2090    return 0;
  5.2091  }
  5.2092  
  5.2093 -void PSParallelCompact::fill_chunk(ParCompactionManager* cm, size_t chunk_idx)
  5.2094 +void PSParallelCompact::fill_region(ParCompactionManager* cm, size_t region_idx)
  5.2095  {
  5.2096    typedef ParMarkBitMap::IterationStatus IterationStatus;
  5.2097 -  const size_t ChunkSize = ParallelCompactData::ChunkSize;
  5.2098 +  const size_t RegionSize = ParallelCompactData::RegionSize;
  5.2099    ParMarkBitMap* const bitmap = mark_bitmap();
  5.2100    ParallelCompactData& sd = summary_data();
  5.2101 -  ChunkData* const chunk_ptr = sd.chunk(chunk_idx);
  5.2102 +  RegionData* const region_ptr = sd.region(region_idx);
  5.2103  
  5.2104    // Get the items needed to construct the closure.
  5.2105 -  HeapWord* dest_addr = sd.chunk_to_addr(chunk_idx);
  5.2106 +  HeapWord* dest_addr = sd.region_to_addr(region_idx);
  5.2107    SpaceId dest_space_id = space_id(dest_addr);
  5.2108    ObjectStartArray* start_array = _space_info[dest_space_id].start_array();
  5.2109    HeapWord* new_top = _space_info[dest_space_id].new_top();
  5.2110    assert(dest_addr < new_top, "sanity");
  5.2111 -  const size_t words = MIN2(pointer_delta(new_top, dest_addr), ChunkSize);
  5.2112 -
  5.2113 -  // Get the source chunk and related info.
  5.2114 -  size_t src_chunk_idx = chunk_ptr->source_chunk();
  5.2115 -  SpaceId src_space_id = space_id(sd.chunk_to_addr(src_chunk_idx));
  5.2116 +  const size_t words = MIN2(pointer_delta(new_top, dest_addr), RegionSize);
  5.2117 +
  5.2118 +  // Get the source region and related info.
  5.2119 +  size_t src_region_idx = region_ptr->source_region();
  5.2120 +  SpaceId src_space_id = space_id(sd.region_to_addr(src_region_idx));
  5.2121    HeapWord* src_space_top = _space_info[src_space_id].space()->top();
  5.2122  
  5.2123    MoveAndUpdateClosure closure(bitmap, cm, start_array, dest_addr, words);
  5.2124 -  closure.set_source(first_src_addr(dest_addr, src_chunk_idx));
  5.2125 -
  5.2126 -  // Adjust src_chunk_idx to prepare for decrementing destination counts (the
  5.2127 -  // destination count is not decremented when a chunk is copied to itself).
  5.2128 -  if (src_chunk_idx == chunk_idx) {
  5.2129 -    src_chunk_idx += 1;
  5.2130 +  closure.set_source(first_src_addr(dest_addr, src_region_idx));
  5.2131 +
  5.2132 +  // Adjust src_region_idx to prepare for decrementing destination counts (the
  5.2133 +  // destination count is not decremented when a region is copied to itself).
  5.2134 +  if (src_region_idx == region_idx) {
  5.2135 +    src_region_idx += 1;
  5.2136    }
  5.2137  
  5.2138    if (bitmap->is_unmarked(closure.source())) {
  5.2139 @@ -3111,32 +3124,33 @@
  5.2140      HeapWord* const old_src_addr = closure.source();
  5.2141      closure.copy_partial_obj();
  5.2142      if (closure.is_full()) {
  5.2143 -      decrement_destination_counts(cm, src_chunk_idx, closure.source());
  5.2144 -      chunk_ptr->set_deferred_obj_addr(NULL);
  5.2145 -      chunk_ptr->set_completed();
  5.2146 +      decrement_destination_counts(cm, src_region_idx, closure.source());
  5.2147 +      region_ptr->set_deferred_obj_addr(NULL);
  5.2148 +      region_ptr->set_completed();
  5.2149        return;
  5.2150      }
  5.2151  
  5.2152 -    HeapWord* const end_addr = sd.chunk_align_down(closure.source());
  5.2153 -    if (sd.chunk_align_down(old_src_addr) != end_addr) {
  5.2154 -      // The partial object was copied from more than one source chunk.
  5.2155 -      decrement_destination_counts(cm, src_chunk_idx, end_addr);
  5.2156 -
  5.2157 -      // Move to the next source chunk, possibly switching spaces as well.  All
  5.2158 +    HeapWord* const end_addr = sd.region_align_down(closure.source());
  5.2159 +    if (sd.region_align_down(old_src_addr) != end_addr) {
  5.2160 +      // The partial object was copied from more than one source region.
  5.2161 +      decrement_destination_counts(cm, src_region_idx, end_addr);
  5.2162 +
  5.2163 +      // Move to the next source region, possibly switching spaces as well.  All
  5.2164        // args except end_addr may be modified.
  5.2165 -      src_chunk_idx = next_src_chunk(closure, src_space_id, src_space_top,
  5.2166 -                                     end_addr);
  5.2167 +      src_region_idx = next_src_region(closure, src_space_id, src_space_top,
  5.2168 +                                       end_addr);
  5.2169      }
  5.2170    }
  5.2171  
  5.2172    do {
  5.2173      HeapWord* const cur_addr = closure.source();
  5.2174 -    HeapWord* const end_addr = MIN2(sd.chunk_align_up(cur_addr + 1),
  5.2175 +    HeapWord* const end_addr = MIN2(sd.region_align_up(cur_addr + 1),
  5.2176                                      src_space_top);
  5.2177      IterationStatus status = bitmap->iterate(&closure, cur_addr, end_addr);
  5.2178  
  5.2179      if (status == ParMarkBitMap::incomplete) {
  5.2180 -      // The last obj that starts in the source chunk does not end in the chunk.
  5.2181 +      // The last obj that starts in the source region does not end in the
  5.2182 +      // region.
  5.2183        assert(closure.source() < end_addr, "sanity")
  5.2184        HeapWord* const obj_beg = closure.source();
  5.2185        HeapWord* const range_end = MIN2(obj_beg + closure.words_remaining(),
  5.2186 @@ -3155,28 +3169,28 @@
  5.2187  
  5.2188      if (status == ParMarkBitMap::would_overflow) {
  5.2189        // The last object did not fit.  Note that interior oop updates were
  5.2190 -      // deferred, then copy enough of the object to fill the chunk.
  5.2191 -      chunk_ptr->set_deferred_obj_addr(closure.destination());
  5.2192 +      // deferred, then copy enough of the object to fill the region.
  5.2193 +      region_ptr->set_deferred_obj_addr(closure.destination());
  5.2194        status = closure.copy_until_full(); // copies from closure.source()
  5.2195  
  5.2196 -      decrement_destination_counts(cm, src_chunk_idx, closure.source());
  5.2197 -      chunk_ptr->set_completed();
  5.2198 +      decrement_destination_counts(cm, src_region_idx, closure.source());
  5.2199 +      region_ptr->set_completed();
  5.2200        return;
  5.2201      }
  5.2202  
  5.2203      if (status == ParMarkBitMap::full) {
  5.2204 -      decrement_destination_counts(cm, src_chunk_idx, closure.source());
  5.2205 -      chunk_ptr->set_deferred_obj_addr(NULL);
  5.2206 -      chunk_ptr->set_completed();
  5.2207 +      decrement_destination_counts(cm, src_region_idx, closure.source());
  5.2208 +      region_ptr->set_deferred_obj_addr(NULL);
  5.2209 +      region_ptr->set_completed();
  5.2210        return;
  5.2211      }
  5.2212  
  5.2213 -    decrement_destination_counts(cm, src_chunk_idx, end_addr);
  5.2214 -
  5.2215 -    // Move to the next source chunk, possibly switching spaces as well.  All
  5.2216 +    decrement_destination_counts(cm, src_region_idx, end_addr);
  5.2217 +
  5.2218 +    // Move to the next source region, possibly switching spaces as well.  All
  5.2219      // args except end_addr may be modified.
  5.2220 -    src_chunk_idx = next_src_chunk(closure, src_space_id, src_space_top,
  5.2221 -                                   end_addr);
  5.2222 +    src_region_idx = next_src_region(closure, src_space_id, src_space_top,
  5.2223 +                                     end_addr);
  5.2224    } while (true);
  5.2225  }
  5.2226  
  5.2227 @@ -3208,15 +3222,15 @@
  5.2228    }
  5.2229  #endif
  5.2230  
  5.2231 -  const size_t beg_chunk = sd.addr_to_chunk_idx(beg_addr);
  5.2232 -  const size_t dp_chunk = sd.addr_to_chunk_idx(dp_addr);
  5.2233 -  if (beg_chunk < dp_chunk) {
  5.2234 -    update_and_deadwood_in_dense_prefix(cm, space_id, beg_chunk, dp_chunk);
  5.2235 +  const size_t beg_region = sd.addr_to_region_idx(beg_addr);
  5.2236 +  const size_t dp_region = sd.addr_to_region_idx(dp_addr);
  5.2237 +  if (beg_region < dp_region) {
  5.2238 +    update_and_deadwood_in_dense_prefix(cm, space_id, beg_region, dp_region);
  5.2239    }
  5.2240  
  5.2241 -  // The destination of the first live object that starts in the chunk is one
  5.2242 -  // past the end of the partial object entering the chunk (if any).
  5.2243 -  HeapWord* const dest_addr = sd.partial_obj_end(dp_chunk);
  5.2244 +  // The destination of the first live object that starts in the region is one
  5.2245 +  // past the end of the partial object entering the region (if any).
  5.2246 +  HeapWord* const dest_addr = sd.partial_obj_end(dp_region);
  5.2247    HeapWord* const new_top = _space_info[space_id].new_top();
  5.2248    assert(new_top >= dest_addr, "bad new_top value");
  5.2249    const size_t words = pointer_delta(new_top, dest_addr);
  5.2250 @@ -3327,41 +3341,41 @@
  5.2251  
  5.2252  BitBlockUpdateClosure::BitBlockUpdateClosure(ParMarkBitMap* mbm,
  5.2253                          ParCompactionManager* cm,
  5.2254 -                        size_t chunk_index) :
  5.2255 +                        size_t region_index) :
  5.2256                          ParMarkBitMapClosure(mbm, cm),
  5.2257                          _live_data_left(0),
  5.2258                          _cur_block(0) {
  5.2259 -  _chunk_start =
  5.2260 -    PSParallelCompact::summary_data().chunk_to_addr(chunk_index);
  5.2261 -  _chunk_end =
  5.2262 -    PSParallelCompact::summary_data().chunk_to_addr(chunk_index) +
  5.2263 -                 ParallelCompactData::ChunkSize;
  5.2264 -  _chunk_index = chunk_index;
  5.2265 +  _region_start =
  5.2266 +    PSParallelCompact::summary_data().region_to_addr(region_index);
  5.2267 +  _region_end =
  5.2268 +    PSParallelCompact::summary_data().region_to_addr(region_index) +
  5.2269 +                 ParallelCompactData::RegionSize;
  5.2270 +  _region_index = region_index;
  5.2271    _cur_block =
  5.2272 -    PSParallelCompact::summary_data().addr_to_block_idx(_chunk_start);
  5.2273 +    PSParallelCompact::summary_data().addr_to_block_idx(_region_start);
  5.2274  }
  5.2275  
  5.2276 -bool BitBlockUpdateClosure::chunk_contains_cur_block() {
  5.2277 -  return ParallelCompactData::chunk_contains_block(_chunk_index, _cur_block);
  5.2278 +bool BitBlockUpdateClosure::region_contains_cur_block() {
  5.2279 +  return ParallelCompactData::region_contains_block(_region_index, _cur_block);
  5.2280  }
  5.2281  
  5.2282 -void BitBlockUpdateClosure::reset_chunk(size_t chunk_index) {
  5.2283 +void BitBlockUpdateClosure::reset_region(size_t region_index) {
  5.2284    DEBUG_ONLY(ParallelCompactData::BlockData::set_cur_phase(7);)
  5.2285    ParallelCompactData& sd = PSParallelCompact::summary_data();
  5.2286 -  _chunk_index = chunk_index;
  5.2287 +  _region_index = region_index;
  5.2288    _live_data_left = 0;
  5.2289 -  _chunk_start = sd.chunk_to_addr(chunk_index);
  5.2290 -  _chunk_end = sd.chunk_to_addr(chunk_index) + ParallelCompactData::ChunkSize;
  5.2291 -
  5.2292 -  // The first block in this chunk
  5.2293 -  size_t first_block =  sd.addr_to_block_idx(_chunk_start);
  5.2294 -  size_t partial_live_size = sd.chunk(chunk_index)->partial_obj_size();
  5.2295 +  _region_start = sd.region_to_addr(region_index);
  5.2296 +  _region_end = sd.region_to_addr(region_index) + ParallelCompactData::RegionSize;
  5.2297 +
  5.2298 +  // The first block in this region
  5.2299 +  size_t first_block =  sd.addr_to_block_idx(_region_start);
  5.2300 +  size_t partial_live_size = sd.region(region_index)->partial_obj_size();
  5.2301  
  5.2302    // Set the offset to 0. By definition it should have that value
  5.2303 -  // but it may have been written while processing an earlier chunk.
  5.2304 +  // but it may have been written while processing an earlier region.
  5.2305    if (partial_live_size == 0) {
  5.2306 -    // No live object extends onto the chunk.  The first bit
  5.2307 -    // in the bit map for the first chunk must be a start bit.
  5.2308 +    // No live object extends onto the region.  The first bit
  5.2309 +    // in the bit map for the first region must be a start bit.
  5.2310      // Although there may not be any marked bits, it is safe
  5.2311      // to set it as a start bit.
  5.2312      sd.block(first_block)->set_start_bit_offset(0);
  5.2313 @@ -3413,8 +3427,8 @@
  5.2314    ParallelCompactData& sd = PSParallelCompact::summary_data();
  5.2315  
  5.2316    assert(bitmap()->obj_size(obj) == words, "bad size");
  5.2317 -  assert(_chunk_start <= obj, "object is not in chunk");
  5.2318 -  assert(obj + words <= _chunk_end, "object is not in chunk");
  5.2319 +  assert(_region_start <= obj, "object is not in region");
  5.2320 +  assert(obj + words <= _region_end, "object is not in region");
  5.2321  
  5.2322    // Update the live data to the left
  5.2323    size_t prev_live_data_left = _live_data_left;
  5.2324 @@ -3432,7 +3446,7 @@
  5.2325      // the new block with the data size that does not include this object.
  5.2326      //
  5.2327      // The first bit in block_of_obj is a start bit except in the
  5.2328 -    // case where the partial object for the chunk extends into
  5.2329 +    // case where the partial object for the region extends into
  5.2330      // this block.
  5.2331      if (sd.partial_obj_ends_in_block(block_of_obj)) {
  5.2332        sd.block(block_of_obj)->set_end_bit_offset(prev_live_data_left);
  5.2333 @@ -3449,9 +3463,9 @@
  5.2334        // The offset for blocks with no objects starting in them
  5.2335        // (e.g., blocks between _cur_block and  block_of_obj_last)
  5.2336        // should not be needed.
  5.2337 -      // Note that block_of_obj_last may be in another chunk.  If so,
  5.2338 +      // Note that block_of_obj_last may be in another region.  If so,
  5.2339        // it should be overwritten later.  This is a problem (writting
  5.2340 -      // into a block in a later chunk) for parallel execution.
  5.2341 +      // into a block in a later region) for parallel execution.
  5.2342        assert(obj < block_of_obj_last_addr,
  5.2343          "Object should start in previous block");
  5.2344  
  5.2345 @@ -3485,7 +3499,7 @@
  5.2346    }
  5.2347  
  5.2348    // Return incomplete if there are more blocks to be done.
  5.2349 -  if (chunk_contains_cur_block()) {
  5.2350 +  if (region_contains_cur_block()) {
  5.2351      return ParMarkBitMap::incomplete;
  5.2352    }
  5.2353    return ParMarkBitMap::complete;
     6.1 --- a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp	Tue Sep 30 11:49:31 2008 -0700
     6.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp	Tue Sep 30 12:20:22 2008 -0700
     6.3 @@ -76,87 +76,87 @@
     6.4  {
     6.5  public:
     6.6    // Sizes are in HeapWords, unless indicated otherwise.
     6.7 -  static const size_t Log2ChunkSize;
     6.8 -  static const size_t ChunkSize;
     6.9 -  static const size_t ChunkSizeBytes;
    6.10 +  static const size_t Log2RegionSize;
    6.11 +  static const size_t RegionSize;
    6.12 +  static const size_t RegionSizeBytes;
    6.13  
    6.14 -  // Mask for the bits in a size_t to get an offset within a chunk.
    6.15 -  static const size_t ChunkSizeOffsetMask;
    6.16 -  // Mask for the bits in a pointer to get an offset within a chunk.
    6.17 -  static const size_t ChunkAddrOffsetMask;
    6.18 -  // Mask for the bits in a pointer to get the address of the start of a chunk.
    6.19 -  static const size_t ChunkAddrMask;
    6.20 +  // Mask for the bits in a size_t to get an offset within a region.
    6.21 +  static const size_t RegionSizeOffsetMask;
    6.22 +  // Mask for the bits in a pointer to get an offset within a region.
    6.23 +  static const size_t RegionAddrOffsetMask;
    6.24 +  // Mask for the bits in a pointer to get the address of the start of a region.
    6.25 +  static const size_t RegionAddrMask;
    6.26  
    6.27    static const size_t Log2BlockSize;
    6.28    static const size_t BlockSize;
    6.29    static const size_t BlockOffsetMask;
    6.30    static const size_t BlockMask;
    6.31  
    6.32 -  static const size_t BlocksPerChunk;
    6.33 +  static const size_t BlocksPerRegion;
    6.34  
    6.35 -  class ChunkData
    6.36 +  class RegionData
    6.37    {
    6.38    public:
    6.39 -    // Destination address of the chunk.
    6.40 +    // Destination address of the region.
    6.41      HeapWord* destination() const { return _destination; }
    6.42  
    6.43 -    // The first chunk containing data destined for this chunk.
    6.44 -    size_t source_chunk() const { return _source_chunk; }
    6.45 +    // The first region containing data destined for this region.
    6.46 +    size_t source_region() const { return _source_region; }
    6.47  
    6.48 -    // The object (if any) starting in this chunk and ending in a different
    6.49 -    // chunk that could not be updated during the main (parallel) compaction
    6.50 +    // The object (if any) starting in this region and ending in a different
    6.51 +    // region that could not be updated during the main (parallel) compaction
    6.52      // phase.  This is different from _partial_obj_addr, which is an object that
    6.53 -    // extends onto a source chunk.  However, the two uses do not overlap in
    6.54 +    // extends onto a source region.  However, the two uses do not overlap in
    6.55      // time, so the same field is used to save space.
    6.56      HeapWord* deferred_obj_addr() const { return _partial_obj_addr; }
    6.57  
    6.58 -    // The starting address of the partial object extending onto the chunk.
    6.59 +    // The starting address of the partial object extending onto the region.
    6.60      HeapWord* partial_obj_addr() const { return _partial_obj_addr; }
    6.61  
    6.62 -    // Size of the partial object extending onto the chunk (words).
    6.63 +    // Size of the partial object extending onto the region (words).
    6.64      size_t partial_obj_size() const { return _partial_obj_size; }
    6.65  
    6.66 -    // Size of live data that lies within this chunk due to objects that start
    6.67 -    // in this chunk (words).  This does not include the partial object
    6.68 -    // extending onto the chunk (if any), or the part of an object that extends
    6.69 -    // onto the next chunk (if any).
    6.70 +    // Size of live data that lies within this region due to objects that start
    6.71 +    // in this region (words).  This does not include the partial object
    6.72 +    // extending onto the region (if any), or the part of an object that extends
    6.73 +    // onto the next region (if any).
    6.74      size_t live_obj_size() const { return _dc_and_los & los_mask; }
    6.75  
    6.76 -    // Total live data that lies within the chunk (words).
    6.77 +    // Total live data that lies within the region (words).
    6.78      size_t data_size() const { return partial_obj_size() + live_obj_size(); }
    6.79  
    6.80 -    // The destination_count is the number of other chunks to which data from
    6.81 -    // this chunk will be copied.  At the end of the summary phase, the valid
    6.82 +    // The destination_count is the number of other regions to which data from
    6.83 +    // this region will be copied.  At the end of the summary phase, the valid
    6.84      // values of destination_count are
    6.85      //
    6.86 -    // 0 - data from the chunk will be compacted completely into itself, or the
    6.87 -    //     chunk is empty.  The chunk can be claimed and then filled.
    6.88 -    // 1 - data from the chunk will be compacted into 1 other chunk; some
    6.89 -    //     data from the chunk may also be compacted into the chunk itself.
    6.90 -    // 2 - data from the chunk will be copied to 2 other chunks.
    6.91 +    // 0 - data from the region will be compacted completely into itself, or the
    6.92 +    //     region is empty.  The region can be claimed and then filled.
    6.93 +    // 1 - data from the region will be compacted into 1 other region; some
    6.94 +    //     data from the region may also be compacted into the region itself.
    6.95 +    // 2 - data from the region will be copied to 2 other regions.
    6.96      //
    6.97 -    // During compaction as chunks are emptied, the destination_count is
    6.98 +    // During compaction as regions are emptied, the destination_count is
    6.99      // decremented (atomically) and when it reaches 0, it can be claimed and
   6.100      // then filled.
   6.101      //
   6.102 -    // A chunk is claimed for processing by atomically changing the
   6.103 -    // destination_count to the claimed value (dc_claimed).  After a chunk has
   6.104 +    // A region is claimed for processing by atomically changing the
   6.105 +    // destination_count to the claimed value (dc_claimed).  After a region has
   6.106      // been filled, the destination_count should be set to the completed value
   6.107      // (dc_completed).
   6.108      inline uint destination_count() const;
   6.109      inline uint destination_count_raw() const;
   6.110  
   6.111 -    // The location of the java heap data that corresponds to this chunk.
   6.112 +    // The location of the java heap data that corresponds to this region.
   6.113      inline HeapWord* data_location() const;
   6.114  
   6.115 -    // The highest address referenced by objects in this chunk.
   6.116 +    // The highest address referenced by objects in this region.
   6.117      inline HeapWord* highest_ref() const;
   6.118  
   6.119 -    // Whether this chunk is available to be claimed, has been claimed, or has
   6.120 +    // Whether this region is available to be claimed, has been claimed, or has
   6.121      // been completed.
   6.122      //
   6.123 -    // Minor subtlety:  claimed() returns true if the chunk is marked
   6.124 -    // completed(), which is desirable since a chunk must be claimed before it
   6.125 +    // Minor subtlety:  claimed() returns true if the region is marked
   6.126 +    // completed(), which is desirable since a region must be claimed before it
   6.127      // can be completed.
   6.128      bool available() const { return _dc_and_los < dc_one; }
   6.129      bool claimed() const   { return _dc_and_los >= dc_claimed; }
   6.130 @@ -164,11 +164,11 @@
   6.131  
   6.132      // These are not atomic.
   6.133      void set_destination(HeapWord* addr)       { _destination = addr; }
   6.134 -    void set_source_chunk(size_t chunk)        { _source_chunk = chunk; }
   6.135 +    void set_source_region(size_t region)      { _source_region = region; }
   6.136      void set_deferred_obj_addr(HeapWord* addr) { _partial_obj_addr = addr; }
   6.137      void set_partial_obj_addr(HeapWord* addr)  { _partial_obj_addr = addr; }
   6.138      void set_partial_obj_size(size_t words)    {
   6.139 -      _partial_obj_size = (chunk_sz_t) words;
   6.140 +      _partial_obj_size = (region_sz_t) words;
   6.141      }
   6.142  
   6.143      inline void set_destination_count(uint count);
   6.144 @@ -184,44 +184,44 @@
   6.145      inline bool claim();
   6.146  
   6.147    private:
   6.148 -    // The type used to represent object sizes within a chunk.
   6.149 -    typedef uint chunk_sz_t;
   6.150 +    // The type used to represent object sizes within a region.
   6.151 +    typedef uint region_sz_t;
   6.152  
   6.153      // Constants for manipulating the _dc_and_los field, which holds both the
   6.154      // destination count and live obj size.  The live obj size lives at the
   6.155      // least significant end so no masking is necessary when adding.
   6.156 -    static const chunk_sz_t dc_shift;           // Shift amount.
   6.157 -    static const chunk_sz_t dc_mask;            // Mask for destination count.
   6.158 -    static const chunk_sz_t dc_one;             // 1, shifted appropriately.
   6.159 -    static const chunk_sz_t dc_claimed;         // Chunk has been claimed.
   6.160 -    static const chunk_sz_t dc_completed;       // Chunk has been completed.
   6.161 -    static const chunk_sz_t los_mask;           // Mask for live obj size.
   6.162 +    static const region_sz_t dc_shift;           // Shift amount.
   6.163 +    static const region_sz_t dc_mask;            // Mask for destination count.
   6.164 +    static const region_sz_t dc_one;             // 1, shifted appropriately.
   6.165 +    static const region_sz_t dc_claimed;         // Region has been claimed.
   6.166 +    static const region_sz_t dc_completed;       // Region has been completed.
   6.167 +    static const region_sz_t los_mask;           // Mask for live obj size.
   6.168  
   6.169 -    HeapWord*           _destination;
   6.170 -    size_t              _source_chunk;
   6.171 -    HeapWord*           _partial_obj_addr;
   6.172 -    chunk_sz_t          _partial_obj_size;
   6.173 -    chunk_sz_t volatile _dc_and_los;
   6.174 +    HeapWord*            _destination;
   6.175 +    size_t               _source_region;
   6.176 +    HeapWord*            _partial_obj_addr;
   6.177 +    region_sz_t          _partial_obj_size;
   6.178 +    region_sz_t volatile _dc_and_los;
   6.179  #ifdef ASSERT
   6.180      // These enable optimizations that are only partially implemented.  Use
   6.181      // debug builds to prevent the code fragments from breaking.
   6.182 -    HeapWord*           _data_location;
   6.183 -    HeapWord*           _highest_ref;
   6.184 +    HeapWord*            _data_location;
   6.185 +    HeapWord*            _highest_ref;
   6.186  #endif  // #ifdef ASSERT
   6.187  
   6.188  #ifdef ASSERT
   6.189     public:
   6.190 -    uint            _pushed;    // 0 until chunk is pushed onto a worker's stack
   6.191 +    uint            _pushed;   // 0 until region is pushed onto a worker's stack
   6.192     private:
   6.193  #endif
   6.194    };
   6.195  
   6.196    // 'Blocks' allow shorter sections of the bitmap to be searched.  Each Block
   6.197 -  // holds an offset, which is the amount of live data in the Chunk to the left
   6.198 +  // holds an offset, which is the amount of live data in the Region to the left
   6.199    // of the first live object in the Block.  This amount of live data will
   6.200    // include any object extending into the block. The first block in
   6.201 -  // a chunk does not include any partial object extending into the
   6.202 -  // the chunk.
   6.203 +  // a region does not include any partial object extending into the
   6.204 +  // the region.
   6.205    //
   6.206    // The offset also encodes the
   6.207    // 'parity' of the first 1 bit in the Block:  a positive offset means the
   6.208 @@ -286,27 +286,27 @@
   6.209    ParallelCompactData();
   6.210    bool initialize(MemRegion covered_region);
   6.211  
   6.212 -  size_t chunk_count() const { return _chunk_count; }
   6.213 +  size_t region_count() const { return _region_count; }
   6.214  
   6.215 -  // Convert chunk indices to/from ChunkData pointers.
   6.216 -  inline ChunkData* chunk(size_t chunk_idx) const;
   6.217 -  inline size_t     chunk(const ChunkData* const chunk_ptr) const;
   6.218 +  // Convert region indices to/from RegionData pointers.
   6.219 +  inline RegionData* region(size_t region_idx) const;
   6.220 +  inline size_t     region(const RegionData* const region_ptr) const;
   6.221  
   6.222 -  // Returns true if the given address is contained within the chunk
   6.223 -  bool chunk_contains(size_t chunk_index, HeapWord* addr);
   6.224 +  // Returns true if the given address is contained within the region
   6.225 +  bool region_contains(size_t region_index, HeapWord* addr);
   6.226  
   6.227    size_t block_count() const { return _block_count; }
   6.228    inline BlockData* block(size_t n) const;
   6.229  
   6.230 -  // Returns true if the given block is in the given chunk.
   6.231 -  static bool chunk_contains_block(size_t chunk_index, size_t block_index);
   6.232 +  // Returns true if the given block is in the given region.
   6.233 +  static bool region_contains_block(size_t region_index, size_t block_index);
   6.234  
   6.235    void add_obj(HeapWord* addr, size_t len);
   6.236    void add_obj(oop p, size_t len) { add_obj((HeapWord*)p, len); }
   6.237  
   6.238 -  // Fill in the chunks covering [beg, end) so that no data moves; i.e., the
   6.239 -  // destination of chunk n is simply the start of chunk n.  The argument beg
   6.240 -  // must be chunk-aligned; end need not be.
   6.241 +  // Fill in the regions covering [beg, end) so that no data moves; i.e., the
   6.242 +  // destination of region n is simply the start of region n.  The argument beg
   6.243 +  // must be region-aligned; end need not be.
   6.244    void summarize_dense_prefix(HeapWord* beg, HeapWord* end);
   6.245  
   6.246    bool summarize(HeapWord* target_beg, HeapWord* target_end,
   6.247 @@ -314,27 +314,27 @@
   6.248                   HeapWord** target_next, HeapWord** source_next = 0);
   6.249  
   6.250    void clear();
   6.251 -  void clear_range(size_t beg_chunk, size_t end_chunk);
   6.252 +  void clear_range(size_t beg_region, size_t end_region);
   6.253    void clear_range(HeapWord* beg, HeapWord* end) {
   6.254 -    clear_range(addr_to_chunk_idx(beg), addr_to_chunk_idx(end));
   6.255 +    clear_range(addr_to_region_idx(beg), addr_to_region_idx(end));
   6.256    }
   6.257  
   6.258 -  // Return the number of words between addr and the start of the chunk
   6.259 +  // Return the number of words between addr and the start of the region
   6.260    // containing addr.
   6.261 -  inline size_t     chunk_offset(const HeapWord* addr) const;
   6.262 +  inline size_t     region_offset(const HeapWord* addr) const;
   6.263  
   6.264 -  // Convert addresses to/from a chunk index or chunk pointer.
   6.265 -  inline size_t     addr_to_chunk_idx(const HeapWord* addr) const;
   6.266 -  inline ChunkData* addr_to_chunk_ptr(const HeapWord* addr) const;
   6.267 -  inline HeapWord*  chunk_to_addr(size_t chunk) const;
   6.268 -  inline HeapWord*  chunk_to_addr(size_t chunk, size_t offset) const;
   6.269 -  inline HeapWord*  chunk_to_addr(const ChunkData* chunk) const;
   6.270 +  // Convert addresses to/from a region index or region pointer.
   6.271 +  inline size_t     addr_to_region_idx(const HeapWord* addr) const;
   6.272 +  inline RegionData* addr_to_region_ptr(const HeapWord* addr) const;
   6.273 +  inline HeapWord*  region_to_addr(size_t region) const;
   6.274 +  inline HeapWord*  region_to_addr(size_t region, size_t offset) const;
   6.275 +  inline HeapWord*  region_to_addr(const RegionData* region) const;
   6.276  
   6.277 -  inline HeapWord*  chunk_align_down(HeapWord* addr) const;
   6.278 -  inline HeapWord*  chunk_align_up(HeapWord* addr) const;
   6.279 -  inline bool       is_chunk_aligned(HeapWord* addr) const;
   6.280 +  inline HeapWord*  region_align_down(HeapWord* addr) const;
   6.281 +  inline HeapWord*  region_align_up(HeapWord* addr) const;
   6.282 +  inline bool       is_region_aligned(HeapWord* addr) const;
   6.283  
   6.284 -  // Analogous to chunk_offset() for blocks.
   6.285 +  // Analogous to region_offset() for blocks.
   6.286    size_t     block_offset(const HeapWord* addr) const;
   6.287    size_t     addr_to_block_idx(const HeapWord* addr) const;
   6.288    size_t     addr_to_block_idx(const oop obj) const {
   6.289 @@ -344,7 +344,7 @@
   6.290    inline HeapWord*  block_to_addr(size_t block) const;
   6.291  
   6.292    // Return the address one past the end of the partial object.
   6.293 -  HeapWord* partial_obj_end(size_t chunk_idx) const;
   6.294 +  HeapWord* partial_obj_end(size_t region_idx) const;
   6.295  
   6.296    // Return the new location of the object p after the
   6.297    // the compaction.
   6.298 @@ -353,8 +353,8 @@
   6.299    // Same as calc_new_pointer() using blocks.
   6.300    HeapWord* block_calc_new_pointer(HeapWord* addr);
   6.301  
   6.302 -  // Same as calc_new_pointer() using chunks.
   6.303 -  HeapWord* chunk_calc_new_pointer(HeapWord* addr);
   6.304 +  // Same as calc_new_pointer() using regions.
   6.305 +  HeapWord* region_calc_new_pointer(HeapWord* addr);
   6.306  
   6.307    HeapWord* calc_new_pointer(oop p) {
   6.308      return calc_new_pointer((HeapWord*) p);
   6.309 @@ -364,7 +364,7 @@
   6.310    klassOop calc_new_klass(klassOop);
   6.311  
   6.312    // Given a block returns true if the partial object for the
   6.313 -  // corresponding chunk ends in the block.  Returns false, otherwise
   6.314 +  // corresponding region ends in the block.  Returns false, otherwise
   6.315    // If there is no partial object, returns false.
   6.316    bool partial_obj_ends_in_block(size_t block_index);
   6.317  
   6.318 @@ -378,7 +378,7 @@
   6.319  
   6.320  private:
   6.321    bool initialize_block_data(size_t region_size);
   6.322 -  bool initialize_chunk_data(size_t region_size);
   6.323 +  bool initialize_region_data(size_t region_size);
   6.324    PSVirtualSpace* create_vspace(size_t count, size_t element_size);
   6.325  
   6.326  private:
   6.327 @@ -387,9 +387,9 @@
   6.328    HeapWord*       _region_end;
   6.329  #endif  // #ifdef ASSERT
   6.330  
   6.331 -  PSVirtualSpace* _chunk_vspace;
   6.332 -  ChunkData*      _chunk_data;
   6.333 -  size_t          _chunk_count;
   6.334 +  PSVirtualSpace* _region_vspace;
   6.335 +  RegionData*     _region_data;
   6.336 +  size_t          _region_count;
   6.337  
   6.338    PSVirtualSpace* _block_vspace;
   6.339    BlockData*      _block_data;
   6.340 @@ -397,64 +397,64 @@
   6.341  };
   6.342  
   6.343  inline uint
   6.344 -ParallelCompactData::ChunkData::destination_count_raw() const
   6.345 +ParallelCompactData::RegionData::destination_count_raw() const
   6.346  {
   6.347    return _dc_and_los & dc_mask;
   6.348  }
   6.349  
   6.350  inline uint
   6.351 -ParallelCompactData::ChunkData::destination_count() const
   6.352 +ParallelCompactData::RegionData::destination_count() const
   6.353  {
   6.354    return destination_count_raw() >> dc_shift;
   6.355  }
   6.356  
   6.357  inline void
   6.358 -ParallelCompactData::ChunkData::set_destination_count(uint count)
   6.359 +ParallelCompactData::RegionData::set_destination_count(uint count)
   6.360  {
   6.361    assert(count <= (dc_completed >> dc_shift), "count too large");
   6.362 -  const chunk_sz_t live_sz = (chunk_sz_t) live_obj_size();
   6.363 +  const region_sz_t live_sz = (region_sz_t) live_obj_size();
   6.364    _dc_and_los = (count << dc_shift) | live_sz;
   6.365  }
   6.366  
   6.367 -inline void ParallelCompactData::ChunkData::set_live_obj_size(size_t words)
   6.368 +inline void ParallelCompactData::RegionData::set_live_obj_size(size_t words)
   6.369  {
   6.370    assert(words <= los_mask, "would overflow");
   6.371 -  _dc_and_los = destination_count_raw() | (chunk_sz_t)words;
   6.372 +  _dc_and_los = destination_count_raw() | (region_sz_t)words;
   6.373  }
   6.374  
   6.375 -inline void ParallelCompactData::ChunkData::decrement_destination_count()
   6.376 +inline void ParallelCompactData::RegionData::decrement_destination_count()
   6.377  {
   6.378    assert(_dc_and_los < dc_claimed, "already claimed");
   6.379    assert(_dc_and_los >= dc_one, "count would go negative");
   6.380    Atomic::add((int)dc_mask, (volatile int*)&_dc_and_los);
   6.381  }
   6.382  
   6.383 -inline HeapWord* ParallelCompactData::ChunkData::data_location() const
   6.384 +inline HeapWord* ParallelCompactData::RegionData::data_location() const
   6.385  {
   6.386    DEBUG_ONLY(return _data_location;)
   6.387    NOT_DEBUG(return NULL;)
   6.388  }
   6.389  
   6.390 -inline HeapWord* ParallelCompactData::ChunkData::highest_ref() const
   6.391 +inline HeapWord* ParallelCompactData::RegionData::highest_ref() const
   6.392  {
   6.393    DEBUG_ONLY(return _highest_ref;)
   6.394    NOT_DEBUG(return NULL;)
   6.395  }
   6.396  
   6.397 -inline void ParallelCompactData::ChunkData::set_data_location(HeapWord* addr)
   6.398 +inline void ParallelCompactData::RegionData::set_data_location(HeapWord* addr)
   6.399  {
   6.400    DEBUG_ONLY(_data_location = addr;)
   6.401  }
   6.402  
   6.403 -inline void ParallelCompactData::ChunkData::set_completed()
   6.404 +inline void ParallelCompactData::RegionData::set_completed()
   6.405  {
   6.406    assert(claimed(), "must be claimed first");
   6.407 -  _dc_and_los = dc_completed | (chunk_sz_t) live_obj_size();
   6.408 +  _dc_and_los = dc_completed | (region_sz_t) live_obj_size();
   6.409  }
   6.410  
   6.411 -// MT-unsafe claiming of a chunk.  Should only be used during single threaded
   6.412 +// MT-unsafe claiming of a region.  Should only be used during single threaded
   6.413  // execution.
   6.414 -inline bool ParallelCompactData::ChunkData::claim_unsafe()
   6.415 +inline bool ParallelCompactData::RegionData::claim_unsafe()
   6.416  {
   6.417    if (available()) {
   6.418      _dc_and_los |= dc_claimed;
   6.419 @@ -463,13 +463,13 @@
   6.420    return false;
   6.421  }
   6.422  
   6.423 -inline void ParallelCompactData::ChunkData::add_live_obj(size_t words)
   6.424 +inline void ParallelCompactData::RegionData::add_live_obj(size_t words)
   6.425  {
   6.426    assert(words <= (size_t)los_mask - live_obj_size(), "overflow");
   6.427    Atomic::add((int) words, (volatile int*) &_dc_and_los);
   6.428  }
   6.429  
   6.430 -inline void ParallelCompactData::ChunkData::set_highest_ref(HeapWord* addr)
   6.431 +inline void ParallelCompactData::RegionData::set_highest_ref(HeapWord* addr)
   6.432  {
   6.433  #ifdef ASSERT
   6.434    HeapWord* tmp = _highest_ref;
   6.435 @@ -479,7 +479,7 @@
   6.436  #endif  // #ifdef ASSERT
   6.437  }
   6.438  
   6.439 -inline bool ParallelCompactData::ChunkData::claim()
   6.440 +inline bool ParallelCompactData::RegionData::claim()
   6.441  {
   6.442    const int los = (int) live_obj_size();
   6.443    const int old = Atomic::cmpxchg(dc_claimed | los,
   6.444 @@ -487,19 +487,19 @@
   6.445    return old == los;
   6.446  }
   6.447  
   6.448 -inline ParallelCompactData::ChunkData*
   6.449 -ParallelCompactData::chunk(size_t chunk_idx) const
   6.450 +inline ParallelCompactData::RegionData*
   6.451 +ParallelCompactData::region(size_t region_idx) const
   6.452  {
   6.453 -  assert(chunk_idx <= chunk_count(), "bad arg");
   6.454 -  return _chunk_data + chunk_idx;
   6.455 +  assert(region_idx <= region_count(), "bad arg");
   6.456 +  return _region_data + region_idx;
   6.457  }
   6.458  
   6.459  inline size_t
   6.460 -ParallelCompactData::chunk(const ChunkData* const chunk_ptr) const
   6.461 +ParallelCompactData::region(const RegionData* const region_ptr) const
   6.462  {
   6.463 -  assert(chunk_ptr >= _chunk_data, "bad arg");
   6.464 -  assert(chunk_ptr <= _chunk_data + chunk_count(), "bad arg");
   6.465 -  return pointer_delta(chunk_ptr, _chunk_data, sizeof(ChunkData));
   6.466 +  assert(region_ptr >= _region_data, "bad arg");
   6.467 +  assert(region_ptr <= _region_data + region_count(), "bad arg");
   6.468 +  return pointer_delta(region_ptr, _region_data, sizeof(RegionData));
   6.469  }
   6.470  
   6.471  inline ParallelCompactData::BlockData*
   6.472 @@ -509,68 +509,69 @@
   6.473  }
   6.474  
   6.475  inline size_t
   6.476 -ParallelCompactData::chunk_offset(const HeapWord* addr) const
   6.477 +ParallelCompactData::region_offset(const HeapWord* addr) const
   6.478  {
   6.479    assert(addr >= _region_start, "bad addr");
   6.480    assert(addr <= _region_end, "bad addr");
   6.481 -  return (size_t(addr) & ChunkAddrOffsetMask) >> LogHeapWordSize;
   6.482 +  return (size_t(addr) & RegionAddrOffsetMask) >> LogHeapWordSize;
   6.483  }
   6.484  
   6.485  inline size_t
   6.486 -ParallelCompactData::addr_to_chunk_idx(const HeapWord* addr) const
   6.487 +ParallelCompactData::addr_to_region_idx(const HeapWord* addr) const
   6.488  {
   6.489    assert(addr >= _region_start, "bad addr");
   6.490    assert(addr <= _region_end, "bad addr");
   6.491 -  return pointer_delta(addr, _region_start) >> Log2ChunkSize;
   6.492 +  return pointer_delta(addr, _region_start) >> Log2RegionSize;
   6.493  }
   6.494  
   6.495 -inline ParallelCompactData::ChunkData*
   6.496 -ParallelCompactData::addr_to_chunk_ptr(const HeapWord* addr) const
   6.497 +inline ParallelCompactData::RegionData*
   6.498 +ParallelCompactData::addr_to_region_ptr(const HeapWord* addr) const
   6.499  {
   6.500 -  return chunk(addr_to_chunk_idx(addr));
   6.501 +  return region(addr_to_region_idx(addr));
   6.502  }
   6.503  
   6.504  inline HeapWord*
   6.505 -ParallelCompactData::chunk_to_addr(size_t chunk) const
   6.506 +ParallelCompactData::region_to_addr(size_t region) const
   6.507  {
   6.508 -  assert(chunk <= _chunk_count, "chunk out of range");
   6.509 -  return _region_start + (chunk << Log2ChunkSize);
   6.510 +  assert(region <= _region_count, "region out of range");
   6.511 +  return _region_start + (region << Log2RegionSize);
   6.512  }
   6.513  
   6.514  inline HeapWord*
   6.515 -ParallelCompactData::chunk_to_addr(const ChunkData* chunk) const
   6.516 +ParallelCompactData::region_to_addr(const RegionData* region) const
   6.517  {
   6.518 -  return chunk_to_addr(pointer_delta(chunk, _chunk_data, sizeof(ChunkData)));
   6.519 +  return region_to_addr(pointer_delta(region, _region_data,
   6.520 +                                      sizeof(RegionData)));
   6.521  }
   6.522  
   6.523  inline HeapWord*
   6.524 -ParallelCompactData::chunk_to_addr(size_t chunk, size_t offset) const
   6.525 +ParallelCompactData::region_to_addr(size_t region, size_t offset) const
   6.526  {
   6.527 -  assert(chunk <= _chunk_count, "chunk out of range");
   6.528 -  assert(offset < ChunkSize, "offset too big");  // This may be too strict.
   6.529 -  return chunk_to_addr(chunk) + offset;
   6.530 +  assert(region <= _region_count, "region out of range");
   6.531 +  assert(offset < RegionSize, "offset too big");  // This may be too strict.
   6.532 +  return region_to_addr(region) + offset;
   6.533  }
   6.534  
   6.535  inline HeapWord*
   6.536 -ParallelCompactData::chunk_align_down(HeapWord* addr) const
   6.537 +ParallelCompactData::region_align_down(HeapWord* addr) const
   6.538  {
   6.539    assert(addr >= _region_start, "bad addr");
   6.540 -  assert(addr < _region_end + ChunkSize, "bad addr");
   6.541 -  return (HeapWord*)(size_t(addr) & ChunkAddrMask);
   6.542 +  assert(addr < _region_end + RegionSize, "bad addr");
   6.543 +  return (HeapWord*)(size_t(addr) & RegionAddrMask);
   6.544  }
   6.545  
   6.546  inline HeapWord*
   6.547 -ParallelCompactData::chunk_align_up(HeapWord* addr) const
   6.548 +ParallelCompactData::region_align_up(HeapWord* addr) const
   6.549  {
   6.550    assert(addr >= _region_start, "bad addr");
   6.551    assert(addr <= _region_end, "bad addr");
   6.552 -  return chunk_align_down(addr + ChunkSizeOffsetMask);
   6.553 +  return region_align_down(addr + RegionSizeOffsetMask);
   6.554  }
   6.555  
   6.556  inline bool
   6.557 -ParallelCompactData::is_chunk_aligned(HeapWord* addr) const
   6.558 +ParallelCompactData::is_region_aligned(HeapWord* addr) const
   6.559  {
   6.560 -  return chunk_offset(addr) == 0;
   6.561 +  return region_offset(addr) == 0;
   6.562  }
   6.563  
   6.564  inline size_t
   6.565 @@ -692,40 +693,39 @@
   6.566    // ParallelCompactData::BlockData::blk_ofs_t _live_data_left;
   6.567    size_t    _live_data_left;
   6.568    size_t    _cur_block;
   6.569 -  HeapWord* _chunk_start;
   6.570 -  HeapWord* _chunk_end;
   6.571 -  size_t    _chunk_index;
   6.572 +  HeapWord* _region_start;
   6.573 +  HeapWord* _region_end;
   6.574 +  size_t    _region_index;
   6.575  
   6.576   public:
   6.577    BitBlockUpdateClosure(ParMarkBitMap* mbm,
   6.578                          ParCompactionManager* cm,
   6.579 -                        size_t chunk_index);
   6.580 +                        size_t region_index);
   6.581  
   6.582    size_t cur_block() { return _cur_block; }
   6.583 -  size_t chunk_index() { return _chunk_index; }
   6.584 +  size_t region_index() { return _region_index; }
   6.585    size_t live_data_left() { return _live_data_left; }
   6.586    // Returns true the first bit in the current block (cur_block) is
   6.587    // a start bit.
   6.588 -  // Returns true if the current block is within the chunk for the closure;
   6.589 -  bool chunk_contains_cur_block();
   6.590 +  // Returns true if the current block is within the region for the closure;
   6.591 +  bool region_contains_cur_block();
   6.592  
   6.593 -  // Set the chunk index and related chunk values for
   6.594 -  // a new chunk.
   6.595 -  void reset_chunk(size_t chunk_index);
   6.596 +  // Set the region index and related region values for
   6.597 +  // a new region.
   6.598 +  void reset_region(size_t region_index);
   6.599  
   6.600    virtual IterationStatus do_addr(HeapWord* addr, size_t words);
   6.601  };
   6.602  
   6.603 -// The UseParallelOldGC collector is a stop-the-world garbage
   6.604 -// collector that does parts of the collection using parallel threads.
   6.605 -// The collection includes the tenured generation and the young
   6.606 -// generation.  The permanent generation is collected at the same
   6.607 -// time as the other two generations but the permanent generation
   6.608 -// is collect by a single GC thread.  The permanent generation is
   6.609 -// collected serially because of the requirement that during the
   6.610 -// processing of a klass AAA, any objects reference by AAA must
   6.611 -// already have been processed.  This requirement is enforced by
   6.612 -// a left (lower address) to right (higher address) sliding compaction.
   6.613 +// The UseParallelOldGC collector is a stop-the-world garbage collector that
   6.614 +// does parts of the collection using parallel threads.  The collection includes
   6.615 +// the tenured generation and the young generation.  The permanent generation is
   6.616 +// collected at the same time as the other two generations but the permanent
   6.617 +// generation is collect by a single GC thread.  The permanent generation is
   6.618 +// collected serially because of the requirement that during the processing of a
   6.619 +// klass AAA, any objects reference by AAA must already have been processed.
   6.620 +// This requirement is enforced by a left (lower address) to right (higher
   6.621 +// address) sliding compaction.
   6.622  //
   6.623  // There are four phases of the collection.
   6.624  //
   6.625 @@ -740,80 +740,75 @@
   6.626  //      - move the objects to their destination
   6.627  //      - update some references and reinitialize some variables
   6.628  //
   6.629 -// These three phases are invoked in PSParallelCompact::invoke_no_policy().
   6.630 -// The marking phase is implemented in PSParallelCompact::marking_phase()
   6.631 -// and does a complete marking of the heap.
   6.632 -// The summary phase is implemented in PSParallelCompact::summary_phase().
   6.633 -// The move and update phase is implemented in PSParallelCompact::compact().
   6.634 +// These three phases are invoked in PSParallelCompact::invoke_no_policy().  The
   6.635 +// marking phase is implemented in PSParallelCompact::marking_phase() and does a
   6.636 +// complete marking of the heap.  The summary phase is implemented in
   6.637 +// PSParallelCompact::summary_phase().  The move and update phase is implemented
   6.638 +// in PSParallelCompact::compact().
   6.639  //
   6.640 -// A space that is being collected is divided into chunks and with
   6.641 -// each chunk is associated an object of type ParallelCompactData.
   6.642 -// Each chunk is of a fixed size and typically will contain more than
   6.643 -// 1 object and may have parts of objects at the front and back of the
   6.644 -// chunk.
   6.645 +// A space that is being collected is divided into regions and with each region
   6.646 +// is associated an object of type ParallelCompactData.  Each region is of a
   6.647 +// fixed size and typically will contain more than 1 object and may have parts
   6.648 +// of objects at the front and back of the region.
   6.649  //
   6.650 -// chunk            -----+---------------------+----------
   6.651 +// region            -----+---------------------+----------
   6.652  // objects covered   [ AAA  )[ BBB )[ CCC   )[ DDD     )
   6.653  //
   6.654 -// The marking phase does a complete marking of all live objects in the
   6.655 -// heap.  The marking also compiles the size of the data for
   6.656 -// all live objects covered by the chunk.  This size includes the
   6.657 -// part of any live object spanning onto the chunk (part of AAA
   6.658 -// if it is live) from the front, all live objects contained in the chunk
   6.659 -// (BBB and/or CCC if they are live), and the part of any live objects
   6.660 -// covered by the chunk that extends off the chunk (part of DDD if it is
   6.661 -// live).  The marking phase uses multiple GC threads and marking is
   6.662 -// done in a bit array of type ParMarkBitMap.  The marking of the
   6.663 -// bit map is done atomically as is the accumulation of the size of the
   6.664 -// live objects covered by a chunk.
   6.665 +// The marking phase does a complete marking of all live objects in the heap.
   6.666 +// The marking also compiles the size of the data for all live objects covered
   6.667 +// by the region.  This size includes the part of any live object spanning onto
   6.668 +// the region (part of AAA if it is live) from the front, all live objects
   6.669 +// contained in the region (BBB and/or CCC if they are live), and the part of
   6.670 +// any live objects covered by the region that extends off the region (part of
   6.671 +// DDD if it is live).  The marking phase uses multiple GC threads and marking
   6.672 +// is done in a bit array of type ParMarkBitMap.  The marking of the bit map is
   6.673 +// done atomically as is the accumulation of the size of the live objects
   6.674 +// covered by a region.
   6.675  //
   6.676 -// The summary phase calculates the total live data to the left of
   6.677 -// each chunk XXX.  Based on that total and the bottom of the space,
   6.678 -// it can calculate the starting location of the live data in XXX.
   6.679 -// The summary phase calculates for each chunk XXX quantites such as
   6.680 +// The summary phase calculates the total live data to the left of each region
   6.681 +// XXX.  Based on that total and the bottom of the space, it can calculate the
   6.682 +// starting location of the live data in XXX.  The summary phase calculates for
   6.683 +// each region XXX quantites such as
   6.684  //
   6.685 -//      - the amount of live data at the beginning of a chunk from an object
   6.686 -//      entering the chunk.
   6.687 -//      - the location of the first live data on the chunk
   6.688 -//      - a count of the number of chunks receiving live data from XXX.
   6.689 +//      - the amount of live data at the beginning of a region from an object
   6.690 +//        entering the region.
   6.691 +//      - the location of the first live data on the region
   6.692 +//      - a count of the number of regions receiving live data from XXX.
   6.693  //
   6.694  // See ParallelCompactData for precise details.  The summary phase also
   6.695 -// calculates the dense prefix for the compaction.  The dense prefix
   6.696 -// is a portion at the beginning of the space that is not moved.  The
   6.697 -// objects in the dense prefix do need to have their object references
   6.698 -// updated.  See method summarize_dense_prefix().
   6.699 +// calculates the dense prefix for the compaction.  The dense prefix is a
   6.700 +// portion at the beginning of the space that is not moved.  The objects in the
   6.701 +// dense prefix do need to have their object references updated.  See method
   6.702 +// summarize_dense_prefix().
   6.703  //
   6.704  // The summary phase is done using 1 GC thread.
   6.705  //
   6.706 -// The compaction phase moves objects to their new location and updates
   6.707 -// all references in the object.
   6.708 +// The compaction phase moves objects to their new location and updates all
   6.709 +// references in the object.
   6.710  //
   6.711 -// A current exception is that objects that cross a chunk boundary
   6.712 -// are moved but do not have their references updated.  References are
   6.713 -// not updated because it cannot easily be determined if the klass
   6.714 -// pointer KKK for the object AAA has been updated.  KKK likely resides
   6.715 -// in a chunk to the left of the chunk containing AAA.  These AAA's
   6.716 -// have there references updated at the end in a clean up phase.
   6.717 -// See the method PSParallelCompact::update_deferred_objects().  An
   6.718 -// alternate strategy is being investigated for this deferral of updating.
   6.719 +// A current exception is that objects that cross a region boundary are moved
   6.720 +// but do not have their references updated.  References are not updated because
   6.721 +// it cannot easily be determined if the klass pointer KKK for the object AAA
   6.722 +// has been updated.  KKK likely resides in a region to the left of the region
   6.723 +// containing AAA.  These AAA's have there references updated at the end in a
   6.724 +// clean up phase.  See the method PSParallelCompact::update_deferred_objects().
   6.725 +// An alternate strategy is being investigated for this deferral of updating.
   6.726  //
   6.727 -// Compaction is done on a chunk basis.  A chunk that is ready to be
   6.728 -// filled is put on a ready list and GC threads take chunk off the list
   6.729 -// and fill them.  A chunk is ready to be filled if it
   6.730 -// empty of live objects.  Such a chunk may have been initially
   6.731 -// empty (only contained
   6.732 -// dead objects) or may have had all its live objects copied out already.
   6.733 -// A chunk that compacts into itself is also ready for filling.  The
   6.734 -// ready list is initially filled with empty chunks and chunks compacting
   6.735 -// into themselves.  There is always at least 1 chunk that can be put on
   6.736 -// the ready list.  The chunks are atomically added and removed from
   6.737 -// the ready list.
   6.738 -//
   6.739 +// Compaction is done on a region basis.  A region that is ready to be filled is
   6.740 +// put on a ready list and GC threads take region off the list and fill them.  A
   6.741 +// region is ready to be filled if it empty of live objects.  Such a region may
   6.742 +// have been initially empty (only contained dead objects) or may have had all
   6.743 +// its live objects copied out already.  A region that compacts into itself is
   6.744 +// also ready for filling.  The ready list is initially filled with empty
   6.745 +// regions and regions compacting into themselves.  There is always at least 1
   6.746 +// region that can be put on the ready list.  The regions are atomically added
   6.747 +// and removed from the ready list.
   6.748 +
   6.749  class PSParallelCompact : AllStatic {
   6.750   public:
   6.751    // Convenient access to type names.
   6.752    typedef ParMarkBitMap::idx_t idx_t;
   6.753 -  typedef ParallelCompactData::ChunkData ChunkData;
   6.754 +  typedef ParallelCompactData::RegionData RegionData;
   6.755    typedef ParallelCompactData::BlockData BlockData;
   6.756  
   6.757    typedef enum {
   6.758 @@ -977,26 +972,26 @@
   6.759    // not reclaimed).
   6.760    static double dead_wood_limiter(double density, size_t min_percent);
   6.761  
   6.762 -  // Find the first (left-most) chunk in the range [beg, end) that has at least
   6.763 +  // Find the first (left-most) region in the range [beg, end) that has at least
   6.764    // dead_words of dead space to the left.  The argument beg must be the first
   6.765 -  // chunk in the space that is not completely live.
   6.766 -  static ChunkData* dead_wood_limit_chunk(const ChunkData* beg,
   6.767 -                                          const ChunkData* end,
   6.768 -                                          size_t dead_words);
   6.769 +  // region in the space that is not completely live.
   6.770 +  static RegionData* dead_wood_limit_region(const RegionData* beg,
   6.771 +                                            const RegionData* end,
   6.772 +                                            size_t dead_words);
   6.773  
   6.774 -  // Return a pointer to the first chunk in the range [beg, end) that is not
   6.775 +  // Return a pointer to the first region in the range [beg, end) that is not
   6.776    // completely full.
   6.777 -  static ChunkData* first_dead_space_chunk(const ChunkData* beg,
   6.778 -                                           const ChunkData* end);
   6.779 +  static RegionData* first_dead_space_region(const RegionData* beg,
   6.780 +                                             const RegionData* end);
   6.781  
   6.782    // Return a value indicating the benefit or 'yield' if the compacted region
   6.783    // were to start (or equivalently if the dense prefix were to end) at the
   6.784 -  // candidate chunk.  Higher values are better.
   6.785 +  // candidate region.  Higher values are better.
   6.786    //
   6.787    // The value is based on the amount of space reclaimed vs. the costs of (a)
   6.788    // updating references in the dense prefix plus (b) copying objects and
   6.789    // updating references in the compacted region.
   6.790 -  static inline double reclaimed_ratio(const ChunkData* const candidate,
   6.791 +  static inline double reclaimed_ratio(const RegionData* const candidate,
   6.792                                         HeapWord* const bottom,
   6.793                                         HeapWord* const top,
   6.794                                         HeapWord* const new_top);
   6.795 @@ -1005,9 +1000,9 @@
   6.796    static HeapWord* compute_dense_prefix(const SpaceId id,
   6.797                                          bool maximum_compaction);
   6.798  
   6.799 -  // Return true if dead space crosses onto the specified Chunk; bit must be the
   6.800 -  // bit index corresponding to the first word of the Chunk.
   6.801 -  static inline bool dead_space_crosses_boundary(const ChunkData* chunk,
   6.802 +  // Return true if dead space crosses onto the specified Region; bit must be
   6.803 +  // the bit index corresponding to the first word of the Region.
   6.804 +  static inline bool dead_space_crosses_boundary(const RegionData* region,
   6.805                                                   idx_t bit);
   6.806  
   6.807    // Summary phase utility routine to fill dead space (if any) at the dense
   6.808 @@ -1038,16 +1033,16 @@
   6.809    static void compact_perm(ParCompactionManager* cm);
   6.810    static void compact();
   6.811  
   6.812 -  // Add available chunks to the stack and draining tasks to the task queue.
   6.813 -  static void enqueue_chunk_draining_tasks(GCTaskQueue* q,
   6.814 -                                           uint parallel_gc_threads);
   6.815 +  // Add available regions to the stack and draining tasks to the task queue.
   6.816 +  static void enqueue_region_draining_tasks(GCTaskQueue* q,
   6.817 +                                            uint parallel_gc_threads);
   6.818  
   6.819    // Add dense prefix update tasks to the task queue.
   6.820    static void enqueue_dense_prefix_tasks(GCTaskQueue* q,
   6.821                                           uint parallel_gc_threads);
   6.822  
   6.823 -  // Add chunk stealing tasks to the task queue.
   6.824 -  static void enqueue_chunk_stealing_tasks(
   6.825 +  // Add region stealing tasks to the task queue.
   6.826 +  static void enqueue_region_stealing_tasks(
   6.827                                         GCTaskQueue* q,
   6.828                                         ParallelTaskTerminator* terminator_ptr,
   6.829                                         uint parallel_gc_threads);
   6.830 @@ -1154,56 +1149,56 @@
   6.831    // Move and update the live objects in the specified space.
   6.832    static void move_and_update(ParCompactionManager* cm, SpaceId space_id);
   6.833  
   6.834 -  // Process the end of the given chunk range in the dense prefix.
   6.835 +  // Process the end of the given region range in the dense prefix.
   6.836    // This includes saving any object not updated.
   6.837 -  static void dense_prefix_chunks_epilogue(ParCompactionManager* cm,
   6.838 -                                           size_t chunk_start_index,
   6.839 -                                           size_t chunk_end_index,
   6.840 -                                           idx_t exiting_object_offset,
   6.841 -                                           idx_t chunk_offset_start,
   6.842 -                                           idx_t chunk_offset_end);
   6.843 +  static void dense_prefix_regions_epilogue(ParCompactionManager* cm,
   6.844 +                                            size_t region_start_index,
   6.845 +                                            size_t region_end_index,
   6.846 +                                            idx_t exiting_object_offset,
   6.847 +                                            idx_t region_offset_start,
   6.848 +                                            idx_t region_offset_end);
   6.849  
   6.850 -  // Update a chunk in the dense prefix.  For each live object
   6.851 -  // in the chunk, update it's interior references.  For each
   6.852 +  // Update a region in the dense prefix.  For each live object
   6.853 +  // in the region, update it's interior references.  For each
   6.854    // dead object, fill it with deadwood. Dead space at the end
   6.855 -  // of a chunk range will be filled to the start of the next
   6.856 -  // live object regardless of the chunk_index_end.  None of the
   6.857 +  // of a region range will be filled to the start of the next
   6.858 +  // live object regardless of the region_index_end.  None of the
   6.859    // objects in the dense prefix move and dead space is dead
   6.860    // (holds only dead objects that don't need any processing), so
   6.861    // dead space can be filled in any order.
   6.862    static void update_and_deadwood_in_dense_prefix(ParCompactionManager* cm,
   6.863                                                    SpaceId space_id,
   6.864 -                                                  size_t chunk_index_start,
   6.865 -                                                  size_t chunk_index_end);
   6.866 +                                                  size_t region_index_start,
   6.867 +                                                  size_t region_index_end);
   6.868  
   6.869    // Return the address of the count + 1st live word in the range [beg, end).
   6.870    static HeapWord* skip_live_words(HeapWord* beg, HeapWord* end, size_t count);
   6.871  
   6.872    // Return the address of the word to be copied to dest_addr, which must be
   6.873 -  // aligned to a chunk boundary.
   6.874 +  // aligned to a region boundary.
   6.875    static HeapWord* first_src_addr(HeapWord* const dest_addr,
   6.876 -                                  size_t src_chunk_idx);
   6.877 +                                  size_t src_region_idx);
   6.878  
   6.879 -  // Determine the next source chunk, set closure.source() to the start of the
   6.880 -  // new chunk return the chunk index.  Parameter end_addr is the address one
   6.881 +  // Determine the next source region, set closure.source() to the start of the
   6.882 +  // new region return the region index.  Parameter end_addr is the address one
   6.883    // beyond the end of source range just processed.  If necessary, switch to a
   6.884    // new source space and set src_space_id (in-out parameter) and src_space_top
   6.885    // (out parameter) accordingly.
   6.886 -  static size_t next_src_chunk(MoveAndUpdateClosure& closure,
   6.887 -                               SpaceId& src_space_id,
   6.888 -                               HeapWord*& src_space_top,
   6.889 -                               HeapWord* end_addr);
   6.890 +  static size_t next_src_region(MoveAndUpdateClosure& closure,
   6.891 +                                SpaceId& src_space_id,
   6.892 +                                HeapWord*& src_space_top,
   6.893 +                                HeapWord* end_addr);
   6.894  
   6.895 -  // Decrement the destination count for each non-empty source chunk in the
   6.896 -  // range [beg_chunk, chunk(chunk_align_up(end_addr))).
   6.897 +  // Decrement the destination count for each non-empty source region in the
   6.898 +  // range [beg_region, region(region_align_up(end_addr))).
   6.899    static void decrement_destination_counts(ParCompactionManager* cm,
   6.900 -                                           size_t beg_chunk,
   6.901 +                                           size_t beg_region,
   6.902                                             HeapWord* end_addr);
   6.903  
   6.904 -  // Fill a chunk, copying objects from one or more source chunks.
   6.905 -  static void fill_chunk(ParCompactionManager* cm, size_t chunk_idx);
   6.906 -  static void fill_and_update_chunk(ParCompactionManager* cm, size_t chunk) {
   6.907 -    fill_chunk(cm, chunk);
   6.908 +  // Fill a region, copying objects from one or more source regions.
   6.909 +  static void fill_region(ParCompactionManager* cm, size_t region_idx);
   6.910 +  static void fill_and_update_region(ParCompactionManager* cm, size_t region) {
   6.911 +    fill_region(cm, region);
   6.912    }
   6.913  
   6.914    // Update the deferred objects in the space.
   6.915 @@ -1259,7 +1254,7 @@
   6.916  #ifndef PRODUCT
   6.917    // Debugging support.
   6.918    static const char* space_names[last_space_id];
   6.919 -  static void print_chunk_ranges();
   6.920 +  static void print_region_ranges();
   6.921    static void print_dense_prefix_stats(const char* const algorithm,
   6.922                                         const SpaceId id,
   6.923                                         const bool maximum_compaction,
   6.924 @@ -1267,7 +1262,7 @@
   6.925  #endif  // #ifndef PRODUCT
   6.926  
   6.927  #ifdef  ASSERT
   6.928 -  // Verify that all the chunks have been emptied.
   6.929 +  // Verify that all the regions have been emptied.
   6.930    static void verify_complete(SpaceId space_id);
   6.931  #endif  // #ifdef ASSERT
   6.932  };
   6.933 @@ -1376,17 +1371,17 @@
   6.934  }
   6.935  
   6.936  inline bool
   6.937 -PSParallelCompact::dead_space_crosses_boundary(const ChunkData* chunk,
   6.938 +PSParallelCompact::dead_space_crosses_boundary(const RegionData* region,
   6.939                                                 idx_t bit)
   6.940  {
   6.941 -  assert(bit > 0, "cannot call this for the first bit/chunk");
   6.942 -  assert(_summary_data.chunk_to_addr(chunk) == _mark_bitmap.bit_to_addr(bit),
   6.943 +  assert(bit > 0, "cannot call this for the first bit/region");
   6.944 +  assert(_summary_data.region_to_addr(region) == _mark_bitmap.bit_to_addr(bit),
   6.945           "sanity check");
   6.946  
   6.947    // Dead space crosses the boundary if (1) a partial object does not extend
   6.948 -  // onto the chunk, (2) an object does not start at the beginning of the chunk,
   6.949 -  // and (3) an object does not end at the end of the prior chunk.
   6.950 -  return chunk->partial_obj_size() == 0 &&
   6.951 +  // onto the region, (2) an object does not start at the beginning of the
   6.952 +  // region, and (3) an object does not end at the end of the prior region.
   6.953 +  return region->partial_obj_size() == 0 &&
   6.954      !_mark_bitmap.is_obj_beg(bit) &&
   6.955      !_mark_bitmap.is_obj_end(bit - 1);
   6.956  }
     7.1 --- a/src/share/vm/runtime/globals.hpp	Tue Sep 30 11:49:31 2008 -0700
     7.2 +++ b/src/share/vm/runtime/globals.hpp	Tue Sep 30 12:20:22 2008 -0700
     7.3 @@ -1157,9 +1157,9 @@
     7.4            "In the Parallel Old garbage collector use parallel dense"        \
     7.5            " prefix update")                                                 \
     7.6                                                                              \
     7.7 -  develop(bool, UseParallelOldGCChunkPointerCalc, true,                     \
     7.8 -          "In the Parallel Old garbage collector use chucks to calculate"   \
     7.9 -          " new object locations")                                          \
    7.10 +  develop(bool, UseParallelOldGCRegionPointerCalc, true,                    \
    7.11 +          "In the Parallel Old garbage collector use regions to calculate"  \
    7.12 +          "new object locations")                                           \
    7.13                                                                              \
    7.14    product(uintx, HeapMaximumCompactionInterval, 20,                         \
    7.15            "How often should we maximally compact the heap (not allowing "   \
    7.16 @@ -1195,8 +1195,8 @@
    7.17    develop(bool, ParallelOldMTUnsafeUpdateLiveData, false,                   \
    7.18            "Use the Parallel Old MT unsafe in update of live size")          \
    7.19                                                                              \
    7.20 -  develop(bool, TraceChunkTasksQueuing, false,                              \
    7.21 -          "Trace the queuing of the chunk tasks")                           \
    7.22 +  develop(bool, TraceRegionTasksQueuing, false,                             \
    7.23 +          "Trace the queuing of the region tasks")                          \
    7.24                                                                              \
    7.25    product(uintx, ParallelMarkingThreads, 0,                                 \
    7.26            "Number of marking threads concurrent gc will use")               \
     8.1 --- a/src/share/vm/utilities/taskqueue.cpp	Tue Sep 30 11:49:31 2008 -0700
     8.2 +++ b/src/share/vm/utilities/taskqueue.cpp	Tue Sep 30 12:20:22 2008 -0700
     8.3 @@ -109,72 +109,72 @@
     8.4    }
     8.5  }
     8.6  
     8.7 -bool ChunkTaskQueueWithOverflow::is_empty() {
     8.8 -  return (_chunk_queue.size() == 0) &&
     8.9 +bool RegionTaskQueueWithOverflow::is_empty() {
    8.10 +  return (_region_queue.size() == 0) &&
    8.11           (_overflow_stack->length() == 0);
    8.12  }
    8.13  
    8.14 -bool ChunkTaskQueueWithOverflow::stealable_is_empty() {
    8.15 -  return _chunk_queue.size() == 0;
    8.16 +bool RegionTaskQueueWithOverflow::stealable_is_empty() {
    8.17 +  return _region_queue.size() == 0;
    8.18  }
    8.19  
    8.20 -bool ChunkTaskQueueWithOverflow::overflow_is_empty() {
    8.21 +bool RegionTaskQueueWithOverflow::overflow_is_empty() {
    8.22    return _overflow_stack->length() == 0;
    8.23  }
    8.24  
    8.25 -void ChunkTaskQueueWithOverflow::initialize() {
    8.26 -  _chunk_queue.initialize();
    8.27 +void RegionTaskQueueWithOverflow::initialize() {
    8.28 +  _region_queue.initialize();
    8.29    assert(_overflow_stack == 0, "Creating memory leak");
    8.30    _overflow_stack =
    8.31 -    new (ResourceObj::C_HEAP) GrowableArray<ChunkTask>(10, true);
    8.32 +    new (ResourceObj::C_HEAP) GrowableArray<RegionTask>(10, true);
    8.33  }
    8.34  
    8.35 -void ChunkTaskQueueWithOverflow::save(ChunkTask t) {
    8.36 -  if (TraceChunkTasksQueuing && Verbose) {
    8.37 +void RegionTaskQueueWithOverflow::save(RegionTask t) {
    8.38 +  if (TraceRegionTasksQueuing && Verbose) {
    8.39      gclog_or_tty->print_cr("CTQ: save " PTR_FORMAT, t);
    8.40    }
    8.41 -  if(!_chunk_queue.push(t)) {
    8.42 +  if(!_region_queue.push(t)) {
    8.43      _overflow_stack->push(t);
    8.44    }
    8.45  }
    8.46  
    8.47 -// Note that using this method will retrieve all chunks
    8.48 +// Note that using this method will retrieve all regions
    8.49  // that have been saved but that it will always check
    8.50  // the overflow stack.  It may be more efficient to
    8.51  // check the stealable queue and the overflow stack
    8.52  // separately.
    8.53 -bool ChunkTaskQueueWithOverflow::retrieve(ChunkTask& chunk_task) {
    8.54 -  bool result = retrieve_from_overflow(chunk_task);
    8.55 +bool RegionTaskQueueWithOverflow::retrieve(RegionTask& region_task) {
    8.56 +  bool result = retrieve_from_overflow(region_task);
    8.57    if (!result) {
    8.58 -    result = retrieve_from_stealable_queue(chunk_task);
    8.59 +    result = retrieve_from_stealable_queue(region_task);
    8.60    }
    8.61 -  if (TraceChunkTasksQueuing && Verbose && result) {
    8.62 +  if (TraceRegionTasksQueuing && Verbose && result) {
    8.63      gclog_or_tty->print_cr("  CTQ: retrieve " PTR_FORMAT, result);
    8.64    }
    8.65    return result;
    8.66  }
    8.67  
    8.68 -bool ChunkTaskQueueWithOverflow::retrieve_from_stealable_queue(
    8.69 -                                   ChunkTask& chunk_task) {
    8.70 -  bool result = _chunk_queue.pop_local(chunk_task);
    8.71 -  if (TraceChunkTasksQueuing && Verbose) {
    8.72 -    gclog_or_tty->print_cr("CTQ: retrieve_stealable " PTR_FORMAT, chunk_task);
    8.73 +bool RegionTaskQueueWithOverflow::retrieve_from_stealable_queue(
    8.74 +                                   RegionTask& region_task) {
    8.75 +  bool result = _region_queue.pop_local(region_task);
    8.76 +  if (TraceRegionTasksQueuing && Verbose) {
    8.77 +    gclog_or_tty->print_cr("CTQ: retrieve_stealable " PTR_FORMAT, region_task);
    8.78    }
    8.79    return result;
    8.80  }
    8.81  
    8.82 -bool ChunkTaskQueueWithOverflow::retrieve_from_overflow(
    8.83 -                                        ChunkTask& chunk_task) {
    8.84 +bool
    8.85 +RegionTaskQueueWithOverflow::retrieve_from_overflow(RegionTask& region_task) {
    8.86    bool result;
    8.87    if (!_overflow_stack->is_empty()) {
    8.88 -    chunk_task = _overflow_stack->pop();
    8.89 +    region_task = _overflow_stack->pop();
    8.90      result = true;
    8.91    } else {
    8.92 -    chunk_task = (ChunkTask) NULL;
    8.93 +    region_task = (RegionTask) NULL;
    8.94      result = false;
    8.95    }
    8.96 -  if (TraceChunkTasksQueuing && Verbose) {
    8.97 -    gclog_or_tty->print_cr("CTQ: retrieve_stealable " PTR_FORMAT, chunk_task);
    8.98 +  if (TraceRegionTasksQueuing && Verbose) {
    8.99 +    gclog_or_tty->print_cr("CTQ: retrieve_stealable " PTR_FORMAT, region_task);
   8.100    }
   8.101    return result;
   8.102  }
     9.1 --- a/src/share/vm/utilities/taskqueue.hpp	Tue Sep 30 11:49:31 2008 -0700
     9.2 +++ b/src/share/vm/utilities/taskqueue.hpp	Tue Sep 30 12:20:22 2008 -0700
     9.3 @@ -557,32 +557,32 @@
     9.4  typedef GenericTaskQueue<StarTask>     OopStarTaskQueue;
     9.5  typedef GenericTaskQueueSet<StarTask>  OopStarTaskQueueSet;
     9.6  
     9.7 -typedef size_t ChunkTask;  // index for chunk
     9.8 -typedef GenericTaskQueue<ChunkTask>    ChunkTaskQueue;
     9.9 -typedef GenericTaskQueueSet<ChunkTask> ChunkTaskQueueSet;
    9.10 +typedef size_t RegionTask;  // index for region
    9.11 +typedef GenericTaskQueue<RegionTask>    RegionTaskQueue;
    9.12 +typedef GenericTaskQueueSet<RegionTask> RegionTaskQueueSet;
    9.13  
    9.14 -class ChunkTaskQueueWithOverflow: public CHeapObj {
    9.15 +class RegionTaskQueueWithOverflow: public CHeapObj {
    9.16   protected:
    9.17 -  ChunkTaskQueue              _chunk_queue;
    9.18 -  GrowableArray<ChunkTask>*   _overflow_stack;
    9.19 +  RegionTaskQueue              _region_queue;
    9.20 +  GrowableArray<RegionTask>*   _overflow_stack;
    9.21  
    9.22   public:
    9.23 -  ChunkTaskQueueWithOverflow() : _overflow_stack(NULL) {}
    9.24 +  RegionTaskQueueWithOverflow() : _overflow_stack(NULL) {}
    9.25    // Initialize both stealable queue and overflow
    9.26    void initialize();
    9.27    // Save first to stealable queue and then to overflow
    9.28 -  void save(ChunkTask t);
    9.29 +  void save(RegionTask t);
    9.30    // Retrieve first from overflow and then from stealable queue
    9.31 -  bool retrieve(ChunkTask& chunk_index);
    9.32 +  bool retrieve(RegionTask& region_index);
    9.33    // Retrieve from stealable queue
    9.34 -  bool retrieve_from_stealable_queue(ChunkTask& chunk_index);
    9.35 +  bool retrieve_from_stealable_queue(RegionTask& region_index);
    9.36    // Retrieve from overflow
    9.37 -  bool retrieve_from_overflow(ChunkTask& chunk_index);
    9.38 +  bool retrieve_from_overflow(RegionTask& region_index);
    9.39    bool is_empty();
    9.40    bool stealable_is_empty();
    9.41    bool overflow_is_empty();
    9.42 -  juint stealable_size() { return _chunk_queue.size(); }
    9.43 -  ChunkTaskQueue* task_queue() { return &_chunk_queue; }
    9.44 +  juint stealable_size() { return _region_queue.size(); }
    9.45 +  RegionTaskQueue* task_queue() { return &_region_queue; }
    9.46  };
    9.47  
    9.48 -#define USE_ChunkTaskQueueWithOverflow
    9.49 +#define USE_RegionTaskQueueWithOverflow

mercurial