src/share/vm/utilities/workgroup.cpp

changeset 777
37f87013dfd8
parent 435
a61af66fc99e
child 1907
c18cbe5936b8
equal deleted inserted replaced
624:0b27f3512f9e 777:37f87013dfd8
26 # include "incls/_workgroup.cpp.incl" 26 # include "incls/_workgroup.cpp.incl"
27 27
28 // Definitions of WorkGang methods. 28 // Definitions of WorkGang methods.
29 29
30 AbstractWorkGang::AbstractWorkGang(const char* name, 30 AbstractWorkGang::AbstractWorkGang(const char* name,
31 bool are_GC_threads) : 31 bool are_GC_task_threads,
32 bool are_ConcurrentGC_threads) :
32 _name(name), 33 _name(name),
33 _are_GC_threads(are_GC_threads) { 34 _are_GC_task_threads(are_GC_task_threads),
35 _are_ConcurrentGC_threads(are_ConcurrentGC_threads) {
36
37 assert(!(are_GC_task_threads && are_ConcurrentGC_threads),
38 "They cannot both be STW GC and Concurrent threads" );
39
34 // Other initialization. 40 // Other initialization.
35 _monitor = new Monitor(/* priority */ Mutex::leaf, 41 _monitor = new Monitor(/* priority */ Mutex::leaf,
36 /* name */ "WorkGroup monitor", 42 /* name */ "WorkGroup monitor",
37 /* allow_vm_block */ are_GC_threads); 43 /* allow_vm_block */ are_GC_task_threads);
38 assert(monitor() != NULL, "Failed to allocate monitor"); 44 assert(monitor() != NULL, "Failed to allocate monitor");
39 _terminate = false; 45 _terminate = false;
40 _task = NULL; 46 _task = NULL;
41 _sequence_number = 0; 47 _sequence_number = 0;
42 _started_workers = 0; 48 _started_workers = 0;
43 _finished_workers = 0; 49 _finished_workers = 0;
44 } 50 }
45 51
46 WorkGang::WorkGang(const char* name, 52 WorkGang::WorkGang(const char* name,
47 int workers, 53 int workers,
48 bool are_GC_threads) : 54 bool are_GC_task_threads,
49 AbstractWorkGang(name, are_GC_threads) { 55 bool are_ConcurrentGC_threads) :
56 AbstractWorkGang(name, are_GC_task_threads, are_ConcurrentGC_threads)
57 {
50 // Save arguments. 58 // Save arguments.
51 _total_workers = workers; 59 _total_workers = workers;
60
52 if (TraceWorkGang) { 61 if (TraceWorkGang) {
53 tty->print_cr("Constructing work gang %s with %d threads", name, workers); 62 tty->print_cr("Constructing work gang %s with %d threads", name, workers);
54 } 63 }
55 _gang_workers = NEW_C_HEAP_ARRAY(GangWorker*, workers); 64 _gang_workers = NEW_C_HEAP_ARRAY(GangWorker*, workers);
56 assert(gang_workers() != NULL, "Failed to allocate gang workers"); 65 if (gang_workers() == NULL) {
66 vm_exit_out_of_memory(0, "Cannot create GangWorker array.");
67 }
57 for (int worker = 0; worker < total_workers(); worker += 1) { 68 for (int worker = 0; worker < total_workers(); worker += 1) {
58 GangWorker* new_worker = new GangWorker(this, worker); 69 GangWorker* new_worker = new GangWorker(this, worker);
59 assert(new_worker != NULL, "Failed to allocate GangWorker"); 70 assert(new_worker != NULL, "Failed to allocate GangWorker");
60 _gang_workers[worker] = new_worker; 71 _gang_workers[worker] = new_worker;
61 if (new_worker == NULL || !os::create_thread(new_worker, os::pgc_thread)) 72 if (new_worker == NULL || !os::create_thread(new_worker, os::pgc_thread))
283 previous_sequence_number = data.sequence_number(); 294 previous_sequence_number = data.sequence_number();
284 } 295 }
285 } 296 }
286 297
287 bool GangWorker::is_GC_task_thread() const { 298 bool GangWorker::is_GC_task_thread() const {
288 return gang()->are_GC_threads(); 299 return gang()->are_GC_task_threads();
300 }
301
302 bool GangWorker::is_ConcurrentGC_thread() const {
303 return gang()->are_ConcurrentGC_threads();
289 } 304 }
290 305
291 void GangWorker::print_on(outputStream* st) const { 306 void GangWorker::print_on(outputStream* st) const {
292 st->print("\"%s\" ", name()); 307 st->print("\"%s\" ", name());
293 Thread::print_on(st); 308 Thread::print_on(st);
310 325
311 // *** WorkGangBarrierSync 326 // *** WorkGangBarrierSync
312 327
313 WorkGangBarrierSync::WorkGangBarrierSync() 328 WorkGangBarrierSync::WorkGangBarrierSync()
314 : _monitor(Mutex::safepoint, "work gang barrier sync", true), 329 : _monitor(Mutex::safepoint, "work gang barrier sync", true),
315 _n_workers(0), _n_completed(0) { 330 _n_workers(0), _n_completed(0), _should_reset(false) {
316 } 331 }
317 332
318 WorkGangBarrierSync::WorkGangBarrierSync(int n_workers, const char* name) 333 WorkGangBarrierSync::WorkGangBarrierSync(int n_workers, const char* name)
319 : _monitor(Mutex::safepoint, name, true), 334 : _monitor(Mutex::safepoint, name, true),
320 _n_workers(n_workers), _n_completed(0) { 335 _n_workers(n_workers), _n_completed(0), _should_reset(false) {
321 } 336 }
322 337
323 void WorkGangBarrierSync::set_n_workers(int n_workers) { 338 void WorkGangBarrierSync::set_n_workers(int n_workers) {
324 _n_workers = n_workers; 339 _n_workers = n_workers;
325 _n_completed = 0; 340 _n_completed = 0;
341 _should_reset = false;
326 } 342 }
327 343
328 void WorkGangBarrierSync::enter() { 344 void WorkGangBarrierSync::enter() {
329 MutexLockerEx x(monitor(), Mutex::_no_safepoint_check_flag); 345 MutexLockerEx x(monitor(), Mutex::_no_safepoint_check_flag);
346 if (should_reset()) {
347 // The should_reset() was set and we are the first worker to enter
348 // the sync barrier. We will zero the n_completed() count which
349 // effectively resets the barrier.
350 zero_completed();
351 set_should_reset(false);
352 }
330 inc_completed(); 353 inc_completed();
331 if (n_completed() == n_workers()) { 354 if (n_completed() == n_workers()) {
355 // At this point we would like to reset the barrier to be ready in
356 // case it is used again. However, we cannot set n_completed() to
357 // 0, even after the notify_all(), given that some other workers
358 // might still be waiting for n_completed() to become ==
359 // n_workers(). So, if we set n_completed() to 0, those workers
360 // will get stuck (as they will wake up, see that n_completed() !=
361 // n_workers() and go back to sleep). Instead, we raise the
362 // should_reset() flag and the barrier will be reset the first
363 // time a worker enters it again.
364 set_should_reset(true);
332 monitor()->notify_all(); 365 monitor()->notify_all();
333 } 366 } else {
334 else {
335 while (n_completed() != n_workers()) { 367 while (n_completed() != n_workers()) {
336 monitor()->wait(/* no_safepoint_check */ true); 368 monitor()->wait(/* no_safepoint_check */ true);
337 } 369 }
338 } 370 }
339 } 371 }
440 clear(); 472 clear();
441 return true; 473 return true;
442 } 474 }
443 return false; 475 return false;
444 } 476 }
477
478 bool FreeIdSet::_stat_init = false;
479 FreeIdSet* FreeIdSet::_sets[NSets];
480 bool FreeIdSet::_safepoint;
481
482 FreeIdSet::FreeIdSet(int sz, Monitor* mon) :
483 _sz(sz), _mon(mon), _hd(0), _waiters(0), _index(-1), _claimed(0)
484 {
485 _ids = new int[sz];
486 for (int i = 0; i < sz; i++) _ids[i] = i+1;
487 _ids[sz-1] = end_of_list; // end of list.
488 if (_stat_init) {
489 for (int j = 0; j < NSets; j++) _sets[j] = NULL;
490 _stat_init = true;
491 }
492 // Add to sets. (This should happen while the system is still single-threaded.)
493 for (int j = 0; j < NSets; j++) {
494 if (_sets[j] == NULL) {
495 _sets[j] = this;
496 _index = j;
497 break;
498 }
499 }
500 guarantee(_index != -1, "Too many FreeIdSets in use!");
501 }
502
503 FreeIdSet::~FreeIdSet() {
504 _sets[_index] = NULL;
505 }
506
507 void FreeIdSet::set_safepoint(bool b) {
508 _safepoint = b;
509 if (b) {
510 for (int j = 0; j < NSets; j++) {
511 if (_sets[j] != NULL && _sets[j]->_waiters > 0) {
512 Monitor* mon = _sets[j]->_mon;
513 mon->lock_without_safepoint_check();
514 mon->notify_all();
515 mon->unlock();
516 }
517 }
518 }
519 }
520
521 #define FID_STATS 0
522
523 int FreeIdSet::claim_par_id() {
524 #if FID_STATS
525 thread_t tslf = thr_self();
526 tty->print("claim_par_id[%d]: sz = %d, claimed = %d\n", tslf, _sz, _claimed);
527 #endif
528 MutexLockerEx x(_mon, Mutex::_no_safepoint_check_flag);
529 while (!_safepoint && _hd == end_of_list) {
530 _waiters++;
531 #if FID_STATS
532 if (_waiters > 5) {
533 tty->print("claim_par_id waiting[%d]: %d waiters, %d claimed.\n",
534 tslf, _waiters, _claimed);
535 }
536 #endif
537 _mon->wait(Mutex::_no_safepoint_check_flag);
538 _waiters--;
539 }
540 if (_hd == end_of_list) {
541 #if FID_STATS
542 tty->print("claim_par_id[%d]: returning EOL.\n", tslf);
543 #endif
544 return -1;
545 } else {
546 int res = _hd;
547 _hd = _ids[res];
548 _ids[res] = claimed; // For debugging.
549 _claimed++;
550 #if FID_STATS
551 tty->print("claim_par_id[%d]: returning %d, claimed = %d.\n",
552 tslf, res, _claimed);
553 #endif
554 return res;
555 }
556 }
557
558 bool FreeIdSet::claim_perm_id(int i) {
559 assert(0 <= i && i < _sz, "Out of range.");
560 MutexLockerEx x(_mon, Mutex::_no_safepoint_check_flag);
561 int prev = end_of_list;
562 int cur = _hd;
563 while (cur != end_of_list) {
564 if (cur == i) {
565 if (prev == end_of_list) {
566 _hd = _ids[cur];
567 } else {
568 _ids[prev] = _ids[cur];
569 }
570 _ids[cur] = claimed;
571 _claimed++;
572 return true;
573 } else {
574 prev = cur;
575 cur = _ids[cur];
576 }
577 }
578 return false;
579
580 }
581
582 void FreeIdSet::release_par_id(int id) {
583 MutexLockerEx x(_mon, Mutex::_no_safepoint_check_flag);
584 assert(_ids[id] == claimed, "Precondition.");
585 _ids[id] = _hd;
586 _hd = id;
587 _claimed--;
588 #if FID_STATS
589 tty->print("[%d] release_par_id(%d), waiters =%d, claimed = %d.\n",
590 thr_self(), id, _waiters, _claimed);
591 #endif
592 if (_waiters > 0)
593 // Notify all would be safer, but this is OK, right?
594 _mon->notify_all();
595 }

mercurial