src/share/vm/utilities/workgroup.cpp

changeset 777
37f87013dfd8
parent 435
a61af66fc99e
child 1907
c18cbe5936b8
     1.1 --- a/src/share/vm/utilities/workgroup.cpp	Wed Jun 04 13:51:09 2008 -0700
     1.2 +++ b/src/share/vm/utilities/workgroup.cpp	Thu Jun 05 15:57:56 2008 -0700
     1.3 @@ -28,13 +28,19 @@
     1.4  // Definitions of WorkGang methods.
     1.5  
     1.6  AbstractWorkGang::AbstractWorkGang(const char* name,
     1.7 -                                   bool  are_GC_threads) :
     1.8 +                                   bool  are_GC_task_threads,
     1.9 +                                   bool  are_ConcurrentGC_threads) :
    1.10    _name(name),
    1.11 -  _are_GC_threads(are_GC_threads) {
    1.12 +  _are_GC_task_threads(are_GC_task_threads),
    1.13 +  _are_ConcurrentGC_threads(are_ConcurrentGC_threads) {
    1.14 +
    1.15 +  assert(!(are_GC_task_threads && are_ConcurrentGC_threads),
    1.16 +         "They cannot both be STW GC and Concurrent threads" );
    1.17 +
    1.18    // Other initialization.
    1.19    _monitor = new Monitor(/* priority */       Mutex::leaf,
    1.20                           /* name */           "WorkGroup monitor",
    1.21 -                         /* allow_vm_block */ are_GC_threads);
    1.22 +                         /* allow_vm_block */ are_GC_task_threads);
    1.23    assert(monitor() != NULL, "Failed to allocate monitor");
    1.24    _terminate = false;
    1.25    _task = NULL;
    1.26 @@ -44,16 +50,21 @@
    1.27  }
    1.28  
    1.29  WorkGang::WorkGang(const char* name,
    1.30 -                   int           workers,
    1.31 -                   bool          are_GC_threads) :
    1.32 -  AbstractWorkGang(name, are_GC_threads) {
    1.33 +                   int         workers,
    1.34 +                   bool        are_GC_task_threads,
    1.35 +                   bool        are_ConcurrentGC_threads) :
    1.36 +  AbstractWorkGang(name, are_GC_task_threads, are_ConcurrentGC_threads)
    1.37 +{
    1.38    // Save arguments.
    1.39    _total_workers = workers;
    1.40 +
    1.41    if (TraceWorkGang) {
    1.42      tty->print_cr("Constructing work gang %s with %d threads", name, workers);
    1.43    }
    1.44    _gang_workers = NEW_C_HEAP_ARRAY(GangWorker*, workers);
    1.45 -  assert(gang_workers() != NULL, "Failed to allocate gang workers");
    1.46 +  if (gang_workers() == NULL) {
    1.47 +    vm_exit_out_of_memory(0, "Cannot create GangWorker array.");
    1.48 +  }
    1.49    for (int worker = 0; worker < total_workers(); worker += 1) {
    1.50      GangWorker* new_worker = new GangWorker(this, worker);
    1.51      assert(new_worker != NULL, "Failed to allocate GangWorker");
    1.52 @@ -285,7 +296,11 @@
    1.53  }
    1.54  
    1.55  bool GangWorker::is_GC_task_thread() const {
    1.56 -  return gang()->are_GC_threads();
    1.57 +  return gang()->are_GC_task_threads();
    1.58 +}
    1.59 +
    1.60 +bool GangWorker::is_ConcurrentGC_thread() const {
    1.61 +  return gang()->are_ConcurrentGC_threads();
    1.62  }
    1.63  
    1.64  void GangWorker::print_on(outputStream* st) const {
    1.65 @@ -312,26 +327,43 @@
    1.66  
    1.67  WorkGangBarrierSync::WorkGangBarrierSync()
    1.68    : _monitor(Mutex::safepoint, "work gang barrier sync", true),
    1.69 -    _n_workers(0), _n_completed(0) {
    1.70 +    _n_workers(0), _n_completed(0), _should_reset(false) {
    1.71  }
    1.72  
    1.73  WorkGangBarrierSync::WorkGangBarrierSync(int n_workers, const char* name)
    1.74    : _monitor(Mutex::safepoint, name, true),
    1.75 -    _n_workers(n_workers), _n_completed(0) {
    1.76 +    _n_workers(n_workers), _n_completed(0), _should_reset(false) {
    1.77  }
    1.78  
    1.79  void WorkGangBarrierSync::set_n_workers(int n_workers) {
    1.80    _n_workers   = n_workers;
    1.81    _n_completed = 0;
    1.82 +  _should_reset = false;
    1.83  }
    1.84  
    1.85  void WorkGangBarrierSync::enter() {
    1.86    MutexLockerEx x(monitor(), Mutex::_no_safepoint_check_flag);
    1.87 +  if (should_reset()) {
    1.88 +    // The should_reset() was set and we are the first worker to enter
    1.89 +    // the sync barrier. We will zero the n_completed() count which
    1.90 +    // effectively resets the barrier.
    1.91 +    zero_completed();
    1.92 +    set_should_reset(false);
    1.93 +  }
    1.94    inc_completed();
    1.95    if (n_completed() == n_workers()) {
    1.96 +    // At this point we would like to reset the barrier to be ready in
    1.97 +    // case it is used again. However, we cannot set n_completed() to
    1.98 +    // 0, even after the notify_all(), given that some other workers
    1.99 +    // might still be waiting for n_completed() to become ==
   1.100 +    // n_workers(). So, if we set n_completed() to 0, those workers
   1.101 +    // will get stuck (as they will wake up, see that n_completed() !=
   1.102 +    // n_workers() and go back to sleep). Instead, we raise the
   1.103 +    // should_reset() flag and the barrier will be reset the first
   1.104 +    // time a worker enters it again.
   1.105 +    set_should_reset(true);
   1.106      monitor()->notify_all();
   1.107 -  }
   1.108 -  else {
   1.109 +  } else {
   1.110      while (n_completed() != n_workers()) {
   1.111        monitor()->wait(/* no_safepoint_check */ true);
   1.112      }
   1.113 @@ -442,3 +474,122 @@
   1.114    }
   1.115    return false;
   1.116  }
   1.117 +
   1.118 +bool FreeIdSet::_stat_init = false;
   1.119 +FreeIdSet* FreeIdSet::_sets[NSets];
   1.120 +bool FreeIdSet::_safepoint;
   1.121 +
   1.122 +FreeIdSet::FreeIdSet(int sz, Monitor* mon) :
   1.123 +  _sz(sz), _mon(mon), _hd(0), _waiters(0), _index(-1), _claimed(0)
   1.124 +{
   1.125 +  _ids = new int[sz];
   1.126 +  for (int i = 0; i < sz; i++) _ids[i] = i+1;
   1.127 +  _ids[sz-1] = end_of_list; // end of list.
   1.128 +  if (_stat_init) {
   1.129 +    for (int j = 0; j < NSets; j++) _sets[j] = NULL;
   1.130 +    _stat_init = true;
   1.131 +  }
   1.132 +  // Add to sets.  (This should happen while the system is still single-threaded.)
   1.133 +  for (int j = 0; j < NSets; j++) {
   1.134 +    if (_sets[j] == NULL) {
   1.135 +      _sets[j] = this;
   1.136 +      _index = j;
   1.137 +      break;
   1.138 +    }
   1.139 +  }
   1.140 +  guarantee(_index != -1, "Too many FreeIdSets in use!");
   1.141 +}
   1.142 +
   1.143 +FreeIdSet::~FreeIdSet() {
   1.144 +  _sets[_index] = NULL;
   1.145 +}
   1.146 +
   1.147 +void FreeIdSet::set_safepoint(bool b) {
   1.148 +  _safepoint = b;
   1.149 +  if (b) {
   1.150 +    for (int j = 0; j < NSets; j++) {
   1.151 +      if (_sets[j] != NULL && _sets[j]->_waiters > 0) {
   1.152 +        Monitor* mon = _sets[j]->_mon;
   1.153 +        mon->lock_without_safepoint_check();
   1.154 +        mon->notify_all();
   1.155 +        mon->unlock();
   1.156 +      }
   1.157 +    }
   1.158 +  }
   1.159 +}
   1.160 +
   1.161 +#define FID_STATS 0
   1.162 +
   1.163 +int FreeIdSet::claim_par_id() {
   1.164 +#if FID_STATS
   1.165 +  thread_t tslf = thr_self();
   1.166 +  tty->print("claim_par_id[%d]: sz = %d, claimed = %d\n", tslf, _sz, _claimed);
   1.167 +#endif
   1.168 +  MutexLockerEx x(_mon, Mutex::_no_safepoint_check_flag);
   1.169 +  while (!_safepoint && _hd == end_of_list) {
   1.170 +    _waiters++;
   1.171 +#if FID_STATS
   1.172 +    if (_waiters > 5) {
   1.173 +      tty->print("claim_par_id waiting[%d]: %d waiters, %d claimed.\n",
   1.174 +                 tslf, _waiters, _claimed);
   1.175 +    }
   1.176 +#endif
   1.177 +    _mon->wait(Mutex::_no_safepoint_check_flag);
   1.178 +    _waiters--;
   1.179 +  }
   1.180 +  if (_hd == end_of_list) {
   1.181 +#if FID_STATS
   1.182 +    tty->print("claim_par_id[%d]: returning EOL.\n", tslf);
   1.183 +#endif
   1.184 +    return -1;
   1.185 +  } else {
   1.186 +    int res = _hd;
   1.187 +    _hd = _ids[res];
   1.188 +    _ids[res] = claimed;  // For debugging.
   1.189 +    _claimed++;
   1.190 +#if FID_STATS
   1.191 +    tty->print("claim_par_id[%d]: returning %d, claimed = %d.\n",
   1.192 +               tslf, res, _claimed);
   1.193 +#endif
   1.194 +    return res;
   1.195 +  }
   1.196 +}
   1.197 +
   1.198 +bool FreeIdSet::claim_perm_id(int i) {
   1.199 +  assert(0 <= i && i < _sz, "Out of range.");
   1.200 +  MutexLockerEx x(_mon, Mutex::_no_safepoint_check_flag);
   1.201 +  int prev = end_of_list;
   1.202 +  int cur = _hd;
   1.203 +  while (cur != end_of_list) {
   1.204 +    if (cur == i) {
   1.205 +      if (prev == end_of_list) {
   1.206 +        _hd = _ids[cur];
   1.207 +      } else {
   1.208 +        _ids[prev] = _ids[cur];
   1.209 +      }
   1.210 +      _ids[cur] = claimed;
   1.211 +      _claimed++;
   1.212 +      return true;
   1.213 +    } else {
   1.214 +      prev = cur;
   1.215 +      cur = _ids[cur];
   1.216 +    }
   1.217 +  }
   1.218 +  return false;
   1.219 +
   1.220 +}
   1.221 +
   1.222 +void FreeIdSet::release_par_id(int id) {
   1.223 +  MutexLockerEx x(_mon, Mutex::_no_safepoint_check_flag);
   1.224 +  assert(_ids[id] == claimed, "Precondition.");
   1.225 +  _ids[id] = _hd;
   1.226 +  _hd = id;
   1.227 +  _claimed--;
   1.228 +#if FID_STATS
   1.229 +  tty->print("[%d] release_par_id(%d), waiters =%d,  claimed = %d.\n",
   1.230 +             thr_self(), id, _waiters, _claimed);
   1.231 +#endif
   1.232 +  if (_waiters > 0)
   1.233 +    // Notify all would be safer, but this is OK, right?
   1.234 +    _mon->notify_all();
   1.235 +}

mercurial