src/share/vm/utilities/workgroup.cpp

changeset 0
f90c822e73f8
child 6876
710a3c8b516e
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/src/share/vm/utilities/workgroup.cpp	Wed Apr 27 01:25:04 2016 +0800
     1.3 @@ -0,0 +1,653 @@
     1.4 +/*
     1.5 + * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
     1.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     1.7 + *
     1.8 + * This code is free software; you can redistribute it and/or modify it
     1.9 + * under the terms of the GNU General Public License version 2 only, as
    1.10 + * published by the Free Software Foundation.
    1.11 + *
    1.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
    1.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    1.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    1.15 + * version 2 for more details (a copy is included in the LICENSE file that
    1.16 + * accompanied this code).
    1.17 + *
    1.18 + * You should have received a copy of the GNU General Public License version
    1.19 + * 2 along with this work; if not, write to the Free Software Foundation,
    1.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    1.21 + *
    1.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    1.23 + * or visit www.oracle.com if you need additional information or have any
    1.24 + * questions.
    1.25 + *
    1.26 + */
    1.27 +
    1.28 +#include "precompiled.hpp"
    1.29 +#include "memory/allocation.hpp"
    1.30 +#include "memory/allocation.inline.hpp"
    1.31 +#include "runtime/os.hpp"
    1.32 +#include "utilities/workgroup.hpp"
    1.33 +
    1.34 +PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
    1.35 +
    1.36 +// Definitions of WorkGang methods.
    1.37 +
    1.38 +AbstractWorkGang::AbstractWorkGang(const char* name,
    1.39 +                                   bool  are_GC_task_threads,
    1.40 +                                   bool  are_ConcurrentGC_threads) :
    1.41 +  _name(name),
    1.42 +  _are_GC_task_threads(are_GC_task_threads),
    1.43 +  _are_ConcurrentGC_threads(are_ConcurrentGC_threads) {
    1.44 +
    1.45 +  assert(!(are_GC_task_threads && are_ConcurrentGC_threads),
    1.46 +         "They cannot both be STW GC and Concurrent threads" );
    1.47 +
    1.48 +  // Other initialization.
    1.49 +  _monitor = new Monitor(/* priority */       Mutex::leaf,
    1.50 +                         /* name */           "WorkGroup monitor",
    1.51 +                         /* allow_vm_block */ are_GC_task_threads);
    1.52 +  assert(monitor() != NULL, "Failed to allocate monitor");
    1.53 +  _terminate = false;
    1.54 +  _task = NULL;
    1.55 +  _sequence_number = 0;
    1.56 +  _started_workers = 0;
    1.57 +  _finished_workers = 0;
    1.58 +}
    1.59 +
    1.60 +WorkGang::WorkGang(const char* name,
    1.61 +                   uint        workers,
    1.62 +                   bool        are_GC_task_threads,
    1.63 +                   bool        are_ConcurrentGC_threads) :
    1.64 +  AbstractWorkGang(name, are_GC_task_threads, are_ConcurrentGC_threads) {
    1.65 +  _total_workers = workers;
    1.66 +}
    1.67 +
    1.68 +GangWorker* WorkGang::allocate_worker(uint which) {
    1.69 +  GangWorker* new_worker = new GangWorker(this, which);
    1.70 +  return new_worker;
    1.71 +}
    1.72 +
    1.73 +// The current implementation will exit if the allocation
    1.74 +// of any worker fails.  Still, return a boolean so that
    1.75 +// a future implementation can possibly do a partial
    1.76 +// initialization of the workers and report such to the
    1.77 +// caller.
    1.78 +bool WorkGang::initialize_workers() {
    1.79 +
    1.80 +  if (TraceWorkGang) {
    1.81 +    tty->print_cr("Constructing work gang %s with %d threads",
    1.82 +                  name(),
    1.83 +                  total_workers());
    1.84 +  }
    1.85 +  _gang_workers = NEW_C_HEAP_ARRAY(GangWorker*, total_workers(), mtInternal);
    1.86 +  if (gang_workers() == NULL) {
    1.87 +    vm_exit_out_of_memory(0, OOM_MALLOC_ERROR, "Cannot create GangWorker array.");
    1.88 +    return false;
    1.89 +  }
    1.90 +  os::ThreadType worker_type;
    1.91 +  if (are_ConcurrentGC_threads()) {
    1.92 +    worker_type = os::cgc_thread;
    1.93 +  } else {
    1.94 +    worker_type = os::pgc_thread;
    1.95 +  }
    1.96 +  for (uint worker = 0; worker < total_workers(); worker += 1) {
    1.97 +    GangWorker* new_worker = allocate_worker(worker);
    1.98 +    assert(new_worker != NULL, "Failed to allocate GangWorker");
    1.99 +    _gang_workers[worker] = new_worker;
   1.100 +    if (new_worker == NULL || !os::create_thread(new_worker, worker_type)) {
   1.101 +      vm_exit_out_of_memory(0, OOM_MALLOC_ERROR,
   1.102 +              "Cannot create worker GC thread. Out of system resources.");
   1.103 +      return false;
   1.104 +    }
   1.105 +    if (!DisableStartThread) {
   1.106 +      os::start_thread(new_worker);
   1.107 +    }
   1.108 +  }
   1.109 +  return true;
   1.110 +}
   1.111 +
   1.112 +AbstractWorkGang::~AbstractWorkGang() {
   1.113 +  if (TraceWorkGang) {
   1.114 +    tty->print_cr("Destructing work gang %s", name());
   1.115 +  }
   1.116 +  stop();   // stop all the workers
   1.117 +  for (uint worker = 0; worker < total_workers(); worker += 1) {
   1.118 +    delete gang_worker(worker);
   1.119 +  }
   1.120 +  delete gang_workers();
   1.121 +  delete monitor();
   1.122 +}
   1.123 +
   1.124 +GangWorker* AbstractWorkGang::gang_worker(uint i) const {
   1.125 +  // Array index bounds checking.
   1.126 +  GangWorker* result = NULL;
   1.127 +  assert(gang_workers() != NULL, "No workers for indexing");
   1.128 +  assert(((i >= 0) && (i < total_workers())), "Worker index out of bounds");
   1.129 +  result = _gang_workers[i];
   1.130 +  assert(result != NULL, "Indexing to null worker");
   1.131 +  return result;
   1.132 +}
   1.133 +
   1.134 +void WorkGang::run_task(AbstractGangTask* task) {
   1.135 +  run_task(task, total_workers());
   1.136 +}
   1.137 +
   1.138 +void WorkGang::run_task(AbstractGangTask* task, uint no_of_parallel_workers) {
   1.139 +  task->set_for_termination(no_of_parallel_workers);
   1.140 +
   1.141 +  // This thread is executed by the VM thread which does not block
   1.142 +  // on ordinary MutexLocker's.
   1.143 +  MutexLockerEx ml(monitor(), Mutex::_no_safepoint_check_flag);
   1.144 +  if (TraceWorkGang) {
   1.145 +    tty->print_cr("Running work gang %s task %s", name(), task->name());
   1.146 +  }
   1.147 +  // Tell all the workers to run a task.
   1.148 +  assert(task != NULL, "Running a null task");
   1.149 +  // Initialize.
   1.150 +  _task = task;
   1.151 +  _sequence_number += 1;
   1.152 +  _started_workers = 0;
   1.153 +  _finished_workers = 0;
   1.154 +  // Tell the workers to get to work.
   1.155 +  monitor()->notify_all();
   1.156 +  // Wait for them to be finished
   1.157 +  while (finished_workers() < no_of_parallel_workers) {
   1.158 +    if (TraceWorkGang) {
   1.159 +      tty->print_cr("Waiting in work gang %s: %d/%d finished sequence %d",
   1.160 +                    name(), finished_workers(), no_of_parallel_workers,
   1.161 +                    _sequence_number);
   1.162 +    }
   1.163 +    monitor()->wait(/* no_safepoint_check */ true);
   1.164 +  }
   1.165 +  _task = NULL;
   1.166 +  if (TraceWorkGang) {
   1.167 +    tty->print_cr("\nFinished work gang %s: %d/%d sequence %d",
   1.168 +                  name(), finished_workers(), no_of_parallel_workers,
   1.169 +                  _sequence_number);
   1.170 +    Thread* me = Thread::current();
   1.171 +    tty->print_cr("  T: 0x%x  VM_thread: %d", me, me->is_VM_thread());
   1.172 +  }
   1.173 +}
   1.174 +
   1.175 +void FlexibleWorkGang::run_task(AbstractGangTask* task) {
   1.176 +  // If active_workers() is passed, _finished_workers
   1.177 +  // must only be incremented for workers that find non_null
   1.178 +  // work (as opposed to all those that just check that the
   1.179 +  // task is not null).
   1.180 +  WorkGang::run_task(task, (uint) active_workers());
   1.181 +}
   1.182 +
   1.183 +void AbstractWorkGang::stop() {
   1.184 +  // Tell all workers to terminate, then wait for them to become inactive.
   1.185 +  MutexLockerEx ml(monitor(), Mutex::_no_safepoint_check_flag);
   1.186 +  if (TraceWorkGang) {
   1.187 +    tty->print_cr("Stopping work gang %s task %s", name(), task()->name());
   1.188 +  }
   1.189 +  _task = NULL;
   1.190 +  _terminate = true;
   1.191 +  monitor()->notify_all();
   1.192 +  while (finished_workers() < active_workers()) {
   1.193 +    if (TraceWorkGang) {
   1.194 +      tty->print_cr("Waiting in work gang %s: %d/%d finished",
   1.195 +                    name(), finished_workers(), active_workers());
   1.196 +    }
   1.197 +    monitor()->wait(/* no_safepoint_check */ true);
   1.198 +  }
   1.199 +}
   1.200 +
   1.201 +void AbstractWorkGang::internal_worker_poll(WorkData* data) const {
   1.202 +  assert(monitor()->owned_by_self(), "worker_poll is an internal method");
   1.203 +  assert(data != NULL, "worker data is null");
   1.204 +  data->set_terminate(terminate());
   1.205 +  data->set_task(task());
   1.206 +  data->set_sequence_number(sequence_number());
   1.207 +}
   1.208 +
   1.209 +void AbstractWorkGang::internal_note_start() {
   1.210 +  assert(monitor()->owned_by_self(), "note_finish is an internal method");
   1.211 +  _started_workers += 1;
   1.212 +}
   1.213 +
   1.214 +void AbstractWorkGang::internal_note_finish() {
   1.215 +  assert(monitor()->owned_by_self(), "note_finish is an internal method");
   1.216 +  _finished_workers += 1;
   1.217 +}
   1.218 +
   1.219 +void AbstractWorkGang::print_worker_threads_on(outputStream* st) const {
   1.220 +  uint    num_thr = total_workers();
   1.221 +  for (uint i = 0; i < num_thr; i++) {
   1.222 +    gang_worker(i)->print_on(st);
   1.223 +    st->cr();
   1.224 +  }
   1.225 +}
   1.226 +
   1.227 +void AbstractWorkGang::threads_do(ThreadClosure* tc) const {
   1.228 +  assert(tc != NULL, "Null ThreadClosure");
   1.229 +  uint num_thr = total_workers();
   1.230 +  for (uint i = 0; i < num_thr; i++) {
   1.231 +    tc->do_thread(gang_worker(i));
   1.232 +  }
   1.233 +}
   1.234 +
   1.235 +// GangWorker methods.
   1.236 +
   1.237 +GangWorker::GangWorker(AbstractWorkGang* gang, uint id) {
   1.238 +  _gang = gang;
   1.239 +  set_id(id);
   1.240 +  set_name("Gang worker#%d (%s)", id, gang->name());
   1.241 +}
   1.242 +
   1.243 +void GangWorker::run() {
   1.244 +  initialize();
   1.245 +  loop();
   1.246 +}
   1.247 +
   1.248 +void GangWorker::initialize() {
   1.249 +  this->initialize_thread_local_storage();
   1.250 +  this->record_stack_base_and_size();
   1.251 +  assert(_gang != NULL, "No gang to run in");
   1.252 +  os::set_priority(this, NearMaxPriority);
   1.253 +  if (TraceWorkGang) {
   1.254 +    tty->print_cr("Running gang worker for gang %s id %d",
   1.255 +                  gang()->name(), id());
   1.256 +  }
   1.257 +  // The VM thread should not execute here because MutexLocker's are used
   1.258 +  // as (opposed to MutexLockerEx's).
   1.259 +  assert(!Thread::current()->is_VM_thread(), "VM thread should not be part"
   1.260 +         " of a work gang");
   1.261 +}
   1.262 +
   1.263 +void GangWorker::loop() {
   1.264 +  int previous_sequence_number = 0;
   1.265 +  Monitor* gang_monitor = gang()->monitor();
   1.266 +  for ( ; /* !terminate() */; ) {
   1.267 +    WorkData data;
   1.268 +    int part;  // Initialized below.
   1.269 +    {
   1.270 +      // Grab the gang mutex.
   1.271 +      MutexLocker ml(gang_monitor);
   1.272 +      // Wait for something to do.
   1.273 +      // Polling outside the while { wait } avoids missed notifies
   1.274 +      // in the outer loop.
   1.275 +      gang()->internal_worker_poll(&data);
   1.276 +      if (TraceWorkGang) {
   1.277 +        tty->print("Polled outside for work in gang %s worker %d",
   1.278 +                   gang()->name(), id());
   1.279 +        tty->print("  terminate: %s",
   1.280 +                   data.terminate() ? "true" : "false");
   1.281 +        tty->print("  sequence: %d (prev: %d)",
   1.282 +                   data.sequence_number(), previous_sequence_number);
   1.283 +        if (data.task() != NULL) {
   1.284 +          tty->print("  task: %s", data.task()->name());
   1.285 +        } else {
   1.286 +          tty->print("  task: NULL");
   1.287 +        }
   1.288 +        tty->cr();
   1.289 +      }
   1.290 +      for ( ; /* break or return */; ) {
   1.291 +        // Terminate if requested.
   1.292 +        if (data.terminate()) {
   1.293 +          gang()->internal_note_finish();
   1.294 +          gang_monitor->notify_all();
   1.295 +          return;
   1.296 +        }
   1.297 +        // Check for new work.
   1.298 +        if ((data.task() != NULL) &&
   1.299 +            (data.sequence_number() != previous_sequence_number)) {
   1.300 +          if (gang()->needs_more_workers()) {
   1.301 +            gang()->internal_note_start();
   1.302 +            gang_monitor->notify_all();
   1.303 +            part = gang()->started_workers() - 1;
   1.304 +            break;
   1.305 +          }
   1.306 +        }
   1.307 +        // Nothing to do.
   1.308 +        gang_monitor->wait(/* no_safepoint_check */ true);
   1.309 +        gang()->internal_worker_poll(&data);
   1.310 +        if (TraceWorkGang) {
   1.311 +          tty->print("Polled inside for work in gang %s worker %d",
   1.312 +                     gang()->name(), id());
   1.313 +          tty->print("  terminate: %s",
   1.314 +                     data.terminate() ? "true" : "false");
   1.315 +          tty->print("  sequence: %d (prev: %d)",
   1.316 +                     data.sequence_number(), previous_sequence_number);
   1.317 +          if (data.task() != NULL) {
   1.318 +            tty->print("  task: %s", data.task()->name());
   1.319 +          } else {
   1.320 +            tty->print("  task: NULL");
   1.321 +          }
   1.322 +          tty->cr();
   1.323 +        }
   1.324 +      }
   1.325 +      // Drop gang mutex.
   1.326 +    }
   1.327 +    if (TraceWorkGang) {
   1.328 +      tty->print("Work for work gang %s id %d task %s part %d",
   1.329 +                 gang()->name(), id(), data.task()->name(), part);
   1.330 +    }
   1.331 +    assert(data.task() != NULL, "Got null task");
   1.332 +    data.task()->work(part);
   1.333 +    {
   1.334 +      if (TraceWorkGang) {
   1.335 +        tty->print("Finish for work gang %s id %d task %s part %d",
   1.336 +                   gang()->name(), id(), data.task()->name(), part);
   1.337 +      }
   1.338 +      // Grab the gang mutex.
   1.339 +      MutexLocker ml(gang_monitor);
   1.340 +      gang()->internal_note_finish();
   1.341 +      // Tell the gang you are done.
   1.342 +      gang_monitor->notify_all();
   1.343 +      // Drop the gang mutex.
   1.344 +    }
   1.345 +    previous_sequence_number = data.sequence_number();
   1.346 +  }
   1.347 +}
   1.348 +
   1.349 +bool GangWorker::is_GC_task_thread() const {
   1.350 +  return gang()->are_GC_task_threads();
   1.351 +}
   1.352 +
   1.353 +bool GangWorker::is_ConcurrentGC_thread() const {
   1.354 +  return gang()->are_ConcurrentGC_threads();
   1.355 +}
   1.356 +
   1.357 +void GangWorker::print_on(outputStream* st) const {
   1.358 +  st->print("\"%s\" ", name());
   1.359 +  Thread::print_on(st);
   1.360 +  st->cr();
   1.361 +}
   1.362 +
   1.363 +// Printing methods
   1.364 +
   1.365 +const char* AbstractWorkGang::name() const {
   1.366 +  return _name;
   1.367 +}
   1.368 +
   1.369 +#ifndef PRODUCT
   1.370 +
   1.371 +const char* AbstractGangTask::name() const {
   1.372 +  return _name;
   1.373 +}
   1.374 +
   1.375 +#endif /* PRODUCT */
   1.376 +
   1.377 +// FlexibleWorkGang
   1.378 +
   1.379 +
   1.380 +// *** WorkGangBarrierSync
   1.381 +
   1.382 +WorkGangBarrierSync::WorkGangBarrierSync()
   1.383 +  : _monitor(Mutex::safepoint, "work gang barrier sync", true),
   1.384 +    _n_workers(0), _n_completed(0), _should_reset(false), _aborted(false) {
   1.385 +}
   1.386 +
   1.387 +WorkGangBarrierSync::WorkGangBarrierSync(uint n_workers, const char* name)
   1.388 +  : _monitor(Mutex::safepoint, name, true),
   1.389 +    _n_workers(n_workers), _n_completed(0), _should_reset(false), _aborted(false) {
   1.390 +}
   1.391 +
   1.392 +void WorkGangBarrierSync::set_n_workers(uint n_workers) {
   1.393 +  _n_workers    = n_workers;
   1.394 +  _n_completed  = 0;
   1.395 +  _should_reset = false;
   1.396 +  _aborted      = false;
   1.397 +}
   1.398 +
   1.399 +bool WorkGangBarrierSync::enter() {
   1.400 +  MutexLockerEx x(monitor(), Mutex::_no_safepoint_check_flag);
   1.401 +  if (should_reset()) {
   1.402 +    // The should_reset() was set and we are the first worker to enter
   1.403 +    // the sync barrier. We will zero the n_completed() count which
   1.404 +    // effectively resets the barrier.
   1.405 +    zero_completed();
   1.406 +    set_should_reset(false);
   1.407 +  }
   1.408 +  inc_completed();
   1.409 +  if (n_completed() == n_workers()) {
   1.410 +    // At this point we would like to reset the barrier to be ready in
   1.411 +    // case it is used again. However, we cannot set n_completed() to
   1.412 +    // 0, even after the notify_all(), given that some other workers
   1.413 +    // might still be waiting for n_completed() to become ==
   1.414 +    // n_workers(). So, if we set n_completed() to 0, those workers
   1.415 +    // will get stuck (as they will wake up, see that n_completed() !=
   1.416 +    // n_workers() and go back to sleep). Instead, we raise the
   1.417 +    // should_reset() flag and the barrier will be reset the first
   1.418 +    // time a worker enters it again.
   1.419 +    set_should_reset(true);
   1.420 +    monitor()->notify_all();
   1.421 +  } else {
   1.422 +    while (n_completed() != n_workers() && !aborted()) {
   1.423 +      monitor()->wait(/* no_safepoint_check */ true);
   1.424 +    }
   1.425 +  }
   1.426 +  return !aborted();
   1.427 +}
   1.428 +
   1.429 +void WorkGangBarrierSync::abort() {
   1.430 +  MutexLockerEx x(monitor(), Mutex::_no_safepoint_check_flag);
   1.431 +  set_aborted();
   1.432 +  monitor()->notify_all();
   1.433 +}
   1.434 +
   1.435 +// SubTasksDone functions.
   1.436 +
   1.437 +SubTasksDone::SubTasksDone(uint n) :
   1.438 +  _n_tasks(n), _n_threads(1), _tasks(NULL) {
   1.439 +  _tasks = NEW_C_HEAP_ARRAY(uint, n, mtInternal);
   1.440 +  guarantee(_tasks != NULL, "alloc failure");
   1.441 +  clear();
   1.442 +}
   1.443 +
   1.444 +bool SubTasksDone::valid() {
   1.445 +  return _tasks != NULL;
   1.446 +}
   1.447 +
   1.448 +void SubTasksDone::set_n_threads(uint t) {
   1.449 +  assert(_claimed == 0 || _threads_completed == _n_threads,
   1.450 +         "should not be called while tasks are being processed!");
   1.451 +  _n_threads = (t == 0 ? 1 : t);
   1.452 +}
   1.453 +
   1.454 +void SubTasksDone::clear() {
   1.455 +  for (uint i = 0; i < _n_tasks; i++) {
   1.456 +    _tasks[i] = 0;
   1.457 +  }
   1.458 +  _threads_completed = 0;
   1.459 +#ifdef ASSERT
   1.460 +  _claimed = 0;
   1.461 +#endif
   1.462 +}
   1.463 +
   1.464 +bool SubTasksDone::is_task_claimed(uint t) {
   1.465 +  assert(0 <= t && t < _n_tasks, "bad task id.");
   1.466 +  uint old = _tasks[t];
   1.467 +  if (old == 0) {
   1.468 +    old = Atomic::cmpxchg(1, &_tasks[t], 0);
   1.469 +  }
   1.470 +  assert(_tasks[t] == 1, "What else?");
   1.471 +  bool res = old != 0;
   1.472 +#ifdef ASSERT
   1.473 +  if (!res) {
   1.474 +    assert(_claimed < _n_tasks, "Too many tasks claimed; missing clear?");
   1.475 +    Atomic::inc((volatile jint*) &_claimed);
   1.476 +  }
   1.477 +#endif
   1.478 +  return res;
   1.479 +}
   1.480 +
   1.481 +void SubTasksDone::all_tasks_completed() {
   1.482 +  jint observed = _threads_completed;
   1.483 +  jint old;
   1.484 +  do {
   1.485 +    old = observed;
   1.486 +    observed = Atomic::cmpxchg(old+1, &_threads_completed, old);
   1.487 +  } while (observed != old);
   1.488 +  // If this was the last thread checking in, clear the tasks.
   1.489 +  if (observed+1 == (jint)_n_threads) clear();
   1.490 +}
   1.491 +
   1.492 +
   1.493 +SubTasksDone::~SubTasksDone() {
   1.494 +  if (_tasks != NULL) FREE_C_HEAP_ARRAY(jint, _tasks, mtInternal);
   1.495 +}
   1.496 +
   1.497 +// *** SequentialSubTasksDone
   1.498 +
   1.499 +void SequentialSubTasksDone::clear() {
   1.500 +  _n_tasks   = _n_claimed   = 0;
   1.501 +  _n_threads = _n_completed = 0;
   1.502 +}
   1.503 +
   1.504 +bool SequentialSubTasksDone::valid() {
   1.505 +  return _n_threads > 0;
   1.506 +}
   1.507 +
   1.508 +bool SequentialSubTasksDone::is_task_claimed(uint& t) {
   1.509 +  uint* n_claimed_ptr = &_n_claimed;
   1.510 +  t = *n_claimed_ptr;
   1.511 +  while (t < _n_tasks) {
   1.512 +    jint res = Atomic::cmpxchg(t+1, n_claimed_ptr, t);
   1.513 +    if (res == (jint)t) {
   1.514 +      return false;
   1.515 +    }
   1.516 +    t = *n_claimed_ptr;
   1.517 +  }
   1.518 +  return true;
   1.519 +}
   1.520 +
   1.521 +bool SequentialSubTasksDone::all_tasks_completed() {
   1.522 +  uint* n_completed_ptr = &_n_completed;
   1.523 +  uint  complete        = *n_completed_ptr;
   1.524 +  while (true) {
   1.525 +    uint res = Atomic::cmpxchg(complete+1, n_completed_ptr, complete);
   1.526 +    if (res == complete) {
   1.527 +      break;
   1.528 +    }
   1.529 +    complete = res;
   1.530 +  }
   1.531 +  if (complete+1 == _n_threads) {
   1.532 +    clear();
   1.533 +    return true;
   1.534 +  }
   1.535 +  return false;
   1.536 +}
   1.537 +
   1.538 +bool FreeIdSet::_stat_init = false;
   1.539 +FreeIdSet* FreeIdSet::_sets[NSets];
   1.540 +bool FreeIdSet::_safepoint;
   1.541 +
   1.542 +FreeIdSet::FreeIdSet(int sz, Monitor* mon) :
   1.543 +  _sz(sz), _mon(mon), _hd(0), _waiters(0), _index(-1), _claimed(0)
   1.544 +{
   1.545 +  _ids = NEW_C_HEAP_ARRAY(int, sz, mtInternal);
   1.546 +  for (int i = 0; i < sz; i++) _ids[i] = i+1;
   1.547 +  _ids[sz-1] = end_of_list; // end of list.
   1.548 +  if (_stat_init) {
   1.549 +    for (int j = 0; j < NSets; j++) _sets[j] = NULL;
   1.550 +    _stat_init = true;
   1.551 +  }
   1.552 +  // Add to sets.  (This should happen while the system is still single-threaded.)
   1.553 +  for (int j = 0; j < NSets; j++) {
   1.554 +    if (_sets[j] == NULL) {
   1.555 +      _sets[j] = this;
   1.556 +      _index = j;
   1.557 +      break;
   1.558 +    }
   1.559 +  }
   1.560 +  guarantee(_index != -1, "Too many FreeIdSets in use!");
   1.561 +}
   1.562 +
   1.563 +FreeIdSet::~FreeIdSet() {
   1.564 +  _sets[_index] = NULL;
   1.565 +  FREE_C_HEAP_ARRAY(int, _ids, mtInternal);
   1.566 +}
   1.567 +
   1.568 +void FreeIdSet::set_safepoint(bool b) {
   1.569 +  _safepoint = b;
   1.570 +  if (b) {
   1.571 +    for (int j = 0; j < NSets; j++) {
   1.572 +      if (_sets[j] != NULL && _sets[j]->_waiters > 0) {
   1.573 +        Monitor* mon = _sets[j]->_mon;
   1.574 +        mon->lock_without_safepoint_check();
   1.575 +        mon->notify_all();
   1.576 +        mon->unlock();
   1.577 +      }
   1.578 +    }
   1.579 +  }
   1.580 +}
   1.581 +
   1.582 +#define FID_STATS 0
   1.583 +
   1.584 +int FreeIdSet::claim_par_id() {
   1.585 +#if FID_STATS
   1.586 +  thread_t tslf = thr_self();
   1.587 +  tty->print("claim_par_id[%d]: sz = %d, claimed = %d\n", tslf, _sz, _claimed);
   1.588 +#endif
   1.589 +  MutexLockerEx x(_mon, Mutex::_no_safepoint_check_flag);
   1.590 +  while (!_safepoint && _hd == end_of_list) {
   1.591 +    _waiters++;
   1.592 +#if FID_STATS
   1.593 +    if (_waiters > 5) {
   1.594 +      tty->print("claim_par_id waiting[%d]: %d waiters, %d claimed.\n",
   1.595 +                 tslf, _waiters, _claimed);
   1.596 +    }
   1.597 +#endif
   1.598 +    _mon->wait(Mutex::_no_safepoint_check_flag);
   1.599 +    _waiters--;
   1.600 +  }
   1.601 +  if (_hd == end_of_list) {
   1.602 +#if FID_STATS
   1.603 +    tty->print("claim_par_id[%d]: returning EOL.\n", tslf);
   1.604 +#endif
   1.605 +    return -1;
   1.606 +  } else {
   1.607 +    int res = _hd;
   1.608 +    _hd = _ids[res];
   1.609 +    _ids[res] = claimed;  // For debugging.
   1.610 +    _claimed++;
   1.611 +#if FID_STATS
   1.612 +    tty->print("claim_par_id[%d]: returning %d, claimed = %d.\n",
   1.613 +               tslf, res, _claimed);
   1.614 +#endif
   1.615 +    return res;
   1.616 +  }
   1.617 +}
   1.618 +
   1.619 +bool FreeIdSet::claim_perm_id(int i) {
   1.620 +  assert(0 <= i && i < _sz, "Out of range.");
   1.621 +  MutexLockerEx x(_mon, Mutex::_no_safepoint_check_flag);
   1.622 +  int prev = end_of_list;
   1.623 +  int cur = _hd;
   1.624 +  while (cur != end_of_list) {
   1.625 +    if (cur == i) {
   1.626 +      if (prev == end_of_list) {
   1.627 +        _hd = _ids[cur];
   1.628 +      } else {
   1.629 +        _ids[prev] = _ids[cur];
   1.630 +      }
   1.631 +      _ids[cur] = claimed;
   1.632 +      _claimed++;
   1.633 +      return true;
   1.634 +    } else {
   1.635 +      prev = cur;
   1.636 +      cur = _ids[cur];
   1.637 +    }
   1.638 +  }
   1.639 +  return false;
   1.640 +
   1.641 +}
   1.642 +
   1.643 +void FreeIdSet::release_par_id(int id) {
   1.644 +  MutexLockerEx x(_mon, Mutex::_no_safepoint_check_flag);
   1.645 +  assert(_ids[id] == claimed, "Precondition.");
   1.646 +  _ids[id] = _hd;
   1.647 +  _hd = id;
   1.648 +  _claimed--;
   1.649 +#if FID_STATS
   1.650 +  tty->print("[%d] release_par_id(%d), waiters =%d,  claimed = %d.\n",
   1.651 +             thr_self(), id, _waiters, _claimed);
   1.652 +#endif
   1.653 +  if (_waiters > 0)
   1.654 +    // Notify all would be safer, but this is OK, right?
   1.655 +    _mon->notify_all();
   1.656 +}

mercurial