src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.cpp

changeset 435
a61af66fc99e
child 810
81cd571500b0
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.cpp	Sat Dec 01 00:00:00 2007 +0000
     1.3 @@ -0,0 +1,301 @@
     1.4 +/*
     1.5 + * Copyright 2005-2006 Sun Microsystems, Inc.  All Rights Reserved.
     1.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     1.7 + *
     1.8 + * This code is free software; you can redistribute it and/or modify it
     1.9 + * under the terms of the GNU General Public License version 2 only, as
    1.10 + * published by the Free Software Foundation.
    1.11 + *
    1.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
    1.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    1.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    1.15 + * version 2 for more details (a copy is included in the LICENSE file that
    1.16 + * accompanied this code).
    1.17 + *
    1.18 + * You should have received a copy of the GNU General Public License version
    1.19 + * 2 along with this work; if not, write to the Free Software Foundation,
    1.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    1.21 + *
    1.22 + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
    1.23 + * CA 95054 USA or visit www.sun.com if you need additional information or
    1.24 + * have any questions.
    1.25 + *
    1.26 + */
    1.27 +
    1.28 +#include "incls/_precompiled.incl"
    1.29 +#include "incls/_psCompactionManager.cpp.incl"
    1.30 +
    1.31 +PSOldGen*            ParCompactionManager::_old_gen = NULL;
    1.32 +ParCompactionManager**  ParCompactionManager::_manager_array = NULL;
    1.33 +OopTaskQueueSet*     ParCompactionManager::_stack_array = NULL;
    1.34 +ObjectStartArray*    ParCompactionManager::_start_array = NULL;
    1.35 +ParMarkBitMap*       ParCompactionManager::_mark_bitmap = NULL;
    1.36 +ChunkTaskQueueSet*   ParCompactionManager::_chunk_array = NULL;
    1.37 +
    1.38 +ParCompactionManager::ParCompactionManager() :
    1.39 +    _action(CopyAndUpdate) {
    1.40 +
    1.41 +  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
    1.42 +  assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
    1.43 +
    1.44 +  _old_gen = heap->old_gen();
    1.45 +  _start_array = old_gen()->start_array();
    1.46 +
    1.47 +
    1.48 +  marking_stack()->initialize();
    1.49 +
    1.50 +  // We want the overflow stack to be permanent
    1.51 +  _overflow_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(10, true);
    1.52 +#ifdef USE_ChunkTaskQueueWithOverflow
    1.53 +  chunk_stack()->initialize();
    1.54 +#else
    1.55 +  chunk_stack()->initialize();
    1.56 +
    1.57 +  // We want the overflow stack to be permanent
    1.58 +  _chunk_overflow_stack =
    1.59 +    new (ResourceObj::C_HEAP) GrowableArray<size_t>(10, true);
    1.60 +#endif
    1.61 +
    1.62 +  // Note that _revisit_klass_stack is allocated out of the
    1.63 +  // C heap (as opposed to out of ResourceArena).
    1.64 +  int size =
    1.65 +    (SystemDictionary::number_of_classes() * 2) * 2 / ParallelGCThreads;
    1.66 +  _revisit_klass_stack = new (ResourceObj::C_HEAP) GrowableArray<Klass*>(size, true);
    1.67 +
    1.68 +}
    1.69 +
    1.70 +ParCompactionManager::~ParCompactionManager() {
    1.71 +  delete _overflow_stack;
    1.72 +  delete _revisit_klass_stack;
    1.73 +  // _manager_array and _stack_array are statics
    1.74 +  // shared with all instances of ParCompactionManager
    1.75 +  // should not be deallocated.
    1.76 +}
    1.77 +
    1.78 +void ParCompactionManager::initialize(ParMarkBitMap* mbm) {
    1.79 +  assert(PSParallelCompact::gc_task_manager() != NULL,
    1.80 +    "Needed for initialization");
    1.81 +
    1.82 +  _mark_bitmap = mbm;
    1.83 +
    1.84 +  uint parallel_gc_threads = PSParallelCompact::gc_task_manager()->workers();
    1.85 +
    1.86 +  assert(_manager_array == NULL, "Attempt to initialize twice");
    1.87 +  _manager_array = NEW_C_HEAP_ARRAY(ParCompactionManager*, parallel_gc_threads+1 );
    1.88 +  guarantee(_manager_array != NULL, "Could not initialize promotion manager");
    1.89 +
    1.90 +  _stack_array = new OopTaskQueueSet(parallel_gc_threads);
    1.91 +  guarantee(_stack_array != NULL, "Count not initialize promotion manager");
    1.92 +  _chunk_array = new ChunkTaskQueueSet(parallel_gc_threads);
    1.93 +  guarantee(_chunk_array != NULL, "Count not initialize promotion manager");
    1.94 +
    1.95 +  // Create and register the ParCompactionManager(s) for the worker threads.
    1.96 +  for(uint i=0; i<parallel_gc_threads; i++) {
    1.97 +    _manager_array[i] = new ParCompactionManager();
    1.98 +    guarantee(_manager_array[i] != NULL, "Could not create ParCompactionManager");
    1.99 +    stack_array()->register_queue(i, _manager_array[i]->marking_stack());
   1.100 +#ifdef USE_ChunkTaskQueueWithOverflow
   1.101 +    chunk_array()->register_queue(i, _manager_array[i]->chunk_stack()->task_queue());
   1.102 +#else
   1.103 +    chunk_array()->register_queue(i, _manager_array[i]->chunk_stack());
   1.104 +#endif
   1.105 +  }
   1.106 +
   1.107 +  // The VMThread gets its own ParCompactionManager, which is not available
   1.108 +  // for work stealing.
   1.109 +  _manager_array[parallel_gc_threads] = new ParCompactionManager();
   1.110 +  guarantee(_manager_array[parallel_gc_threads] != NULL,
   1.111 +    "Could not create ParCompactionManager");
   1.112 +  assert(PSParallelCompact::gc_task_manager()->workers() != 0,
   1.113 +    "Not initialized?");
   1.114 +}
   1.115 +
   1.116 +bool ParCompactionManager::should_update() {
   1.117 +  assert(action() != NotValid, "Action is not set");
   1.118 +  return (action() == ParCompactionManager::Update) ||
   1.119 +         (action() == ParCompactionManager::CopyAndUpdate) ||
   1.120 +         (action() == ParCompactionManager::UpdateAndCopy);
   1.121 +}
   1.122 +
   1.123 +bool ParCompactionManager::should_copy() {
   1.124 +  assert(action() != NotValid, "Action is not set");
   1.125 +  return (action() == ParCompactionManager::Copy) ||
   1.126 +         (action() == ParCompactionManager::CopyAndUpdate) ||
   1.127 +         (action() == ParCompactionManager::UpdateAndCopy);
   1.128 +}
   1.129 +
   1.130 +bool ParCompactionManager::should_verify_only() {
   1.131 +  assert(action() != NotValid, "Action is not set");
   1.132 +  return action() == ParCompactionManager::VerifyUpdate;
   1.133 +}
   1.134 +
   1.135 +bool ParCompactionManager::should_reset_only() {
   1.136 +  assert(action() != NotValid, "Action is not set");
   1.137 +  return action() == ParCompactionManager::ResetObjects;
   1.138 +}
   1.139 +
   1.140 +// For now save on a stack
   1.141 +void ParCompactionManager::save_for_scanning(oop m) {
   1.142 +  stack_push(m);
   1.143 +}
   1.144 +
   1.145 +void ParCompactionManager::stack_push(oop obj) {
   1.146 +
   1.147 +  if(!marking_stack()->push(obj)) {
   1.148 +    overflow_stack()->push(obj);
   1.149 +  }
   1.150 +}
   1.151 +
   1.152 +oop ParCompactionManager::retrieve_for_scanning() {
   1.153 +
   1.154 +  // Should not be used in the parallel case
   1.155 +  ShouldNotReachHere();
   1.156 +  return NULL;
   1.157 +}
   1.158 +
   1.159 +// Save chunk on a stack
   1.160 +void ParCompactionManager::save_for_processing(size_t chunk_index) {
   1.161 +#ifdef ASSERT
   1.162 +  const ParallelCompactData& sd = PSParallelCompact::summary_data();
   1.163 +  ParallelCompactData::ChunkData* const chunk_ptr = sd.chunk(chunk_index);
   1.164 +  assert(chunk_ptr->claimed(), "must be claimed");
   1.165 +  assert(chunk_ptr->_pushed++ == 0, "should only be pushed once");
   1.166 +#endif
   1.167 +  chunk_stack_push(chunk_index);
   1.168 +}
   1.169 +
   1.170 +void ParCompactionManager::chunk_stack_push(size_t chunk_index) {
   1.171 +
   1.172 +#ifdef USE_ChunkTaskQueueWithOverflow
   1.173 +  chunk_stack()->save(chunk_index);
   1.174 +#else
   1.175 +  if(!chunk_stack()->push(chunk_index)) {
   1.176 +    chunk_overflow_stack()->push(chunk_index);
   1.177 +  }
   1.178 +#endif
   1.179 +}
   1.180 +
   1.181 +bool ParCompactionManager::retrieve_for_processing(size_t& chunk_index) {
   1.182 +#ifdef USE_ChunkTaskQueueWithOverflow
   1.183 +  return chunk_stack()->retrieve(chunk_index);
   1.184 +#else
   1.185 +  // Should not be used in the parallel case
   1.186 +  ShouldNotReachHere();
   1.187 +  return false;
   1.188 +#endif
   1.189 +}
   1.190 +
   1.191 +ParCompactionManager*
   1.192 +ParCompactionManager::gc_thread_compaction_manager(int index) {
   1.193 +  assert(index >= 0 && index < (int)ParallelGCThreads, "index out of range");
   1.194 +  assert(_manager_array != NULL, "Sanity");
   1.195 +  return _manager_array[index];
   1.196 +}
   1.197 +
   1.198 +void ParCompactionManager::reset() {
   1.199 +  for(uint i=0; i<ParallelGCThreads+1; i++) {
   1.200 +    manager_array(i)->revisit_klass_stack()->clear();
   1.201 +  }
   1.202 +}
   1.203 +
   1.204 +void ParCompactionManager::drain_marking_stacks(OopClosure* blk) {
   1.205 +#ifdef ASSERT
   1.206 +  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
   1.207 +  assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
   1.208 +  MutableSpace* to_space = heap->young_gen()->to_space();
   1.209 +  MutableSpace* old_space = heap->old_gen()->object_space();
   1.210 +  MutableSpace* perm_space = heap->perm_gen()->object_space();
   1.211 +#endif /* ASSERT */
   1.212 +
   1.213 +
   1.214 +  do {
   1.215 +
   1.216 +    // Drain overflow stack first, so other threads can steal from
   1.217 +    // claimed stack while we work.
   1.218 +    while(!overflow_stack()->is_empty()) {
   1.219 +      oop obj = overflow_stack()->pop();
   1.220 +      obj->follow_contents(this);
   1.221 +    }
   1.222 +
   1.223 +    oop obj;
   1.224 +    // obj is a reference!!!
   1.225 +    while (marking_stack()->pop_local(obj)) {
   1.226 +      // It would be nice to assert about the type of objects we might
   1.227 +      // pop, but they can come from anywhere, unfortunately.
   1.228 +      obj->follow_contents(this);
   1.229 +    }
   1.230 +  } while((marking_stack()->size() != 0) || (overflow_stack()->length() != 0));
   1.231 +
   1.232 +  assert(marking_stack()->size() == 0, "Sanity");
   1.233 +  assert(overflow_stack()->length() == 0, "Sanity");
   1.234 +}
   1.235 +
   1.236 +void ParCompactionManager::drain_chunk_overflow_stack() {
   1.237 +  size_t chunk_index = (size_t) -1;
   1.238 +  while(chunk_stack()->retrieve_from_overflow(chunk_index)) {
   1.239 +    PSParallelCompact::fill_and_update_chunk(this, chunk_index);
   1.240 +  }
   1.241 +}
   1.242 +
   1.243 +void ParCompactionManager::drain_chunk_stacks() {
   1.244 +#ifdef ASSERT
   1.245 +  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
   1.246 +  assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
   1.247 +  MutableSpace* to_space = heap->young_gen()->to_space();
   1.248 +  MutableSpace* old_space = heap->old_gen()->object_space();
   1.249 +  MutableSpace* perm_space = heap->perm_gen()->object_space();
   1.250 +#endif /* ASSERT */
   1.251 +
   1.252 +#if 1 // def DO_PARALLEL - the serial code hasn't been updated
   1.253 +  do {
   1.254 +
   1.255 +#ifdef USE_ChunkTaskQueueWithOverflow
   1.256 +    // Drain overflow stack first, so other threads can steal from
   1.257 +    // claimed stack while we work.
   1.258 +    size_t chunk_index = (size_t) -1;
   1.259 +    while(chunk_stack()->retrieve_from_overflow(chunk_index)) {
   1.260 +      PSParallelCompact::fill_and_update_chunk(this, chunk_index);
   1.261 +    }
   1.262 +
   1.263 +    while (chunk_stack()->retrieve_from_stealable_queue(chunk_index)) {
   1.264 +      PSParallelCompact::fill_and_update_chunk(this, chunk_index);
   1.265 +    }
   1.266 +  } while (!chunk_stack()->is_empty());
   1.267 +#else
   1.268 +    // Drain overflow stack first, so other threads can steal from
   1.269 +    // claimed stack while we work.
   1.270 +    while(!chunk_overflow_stack()->is_empty()) {
   1.271 +      size_t chunk_index = chunk_overflow_stack()->pop();
   1.272 +      PSParallelCompact::fill_and_update_chunk(this, chunk_index);
   1.273 +    }
   1.274 +
   1.275 +    size_t chunk_index = -1;
   1.276 +    // obj is a reference!!!
   1.277 +    while (chunk_stack()->pop_local(chunk_index)) {
   1.278 +      // It would be nice to assert about the type of objects we might
   1.279 +      // pop, but they can come from anywhere, unfortunately.
   1.280 +      PSParallelCompact::fill_and_update_chunk(this, chunk_index);
   1.281 +    }
   1.282 +  } while((chunk_stack()->size() != 0) ||
   1.283 +          (chunk_overflow_stack()->length() != 0));
   1.284 +#endif
   1.285 +
   1.286 +#ifdef USE_ChunkTaskQueueWithOverflow
   1.287 +  assert(chunk_stack()->is_empty(), "Sanity");
   1.288 +#else
   1.289 +  assert(chunk_stack()->size() == 0, "Sanity");
   1.290 +  assert(chunk_overflow_stack()->length() == 0, "Sanity");
   1.291 +#endif
   1.292 +#else
   1.293 +  oop obj;
   1.294 +  while (obj = retrieve_for_scanning()) {
   1.295 +    obj->follow_contents(this);
   1.296 +  }
   1.297 +#endif
   1.298 +}
   1.299 +
   1.300 +#ifdef ASSERT
   1.301 +bool ParCompactionManager::stacks_have_been_allocated() {
   1.302 +  return (revisit_klass_stack()->data_addr() != NULL);
   1.303 +}
   1.304 +#endif

mercurial