Fri, 29 Apr 2016 00:06:10 +0800
Added MIPS 64-bit port.
1 /*
2 * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 /*
26 * This file has been modified by Loongson Technology in 2015. These
27 * modifications are Copyright (c) 2015 Loongson Technology, and are made
28 * available on the same license terms set forth above.
29 */
31 #ifndef SHARE_VM_UTILITIES_TASKQUEUE_HPP
32 #define SHARE_VM_UTILITIES_TASKQUEUE_HPP
34 #include "memory/allocation.hpp"
35 #include "memory/allocation.inline.hpp"
36 #include "runtime/mutex.hpp"
37 #include "utilities/stack.hpp"
38 #ifdef TARGET_OS_ARCH_linux_x86
39 # include "orderAccess_linux_x86.inline.hpp"
40 #endif
41 #ifdef TARGET_OS_ARCH_linux_sparc
42 # include "orderAccess_linux_sparc.inline.hpp"
43 #endif
44 #ifdef TARGET_OS_ARCH_linux_zero
45 # include "orderAccess_linux_zero.inline.hpp"
46 #endif
47 #ifdef TARGET_OS_ARCH_solaris_x86
48 # include "orderAccess_solaris_x86.inline.hpp"
49 #endif
50 #ifdef TARGET_OS_ARCH_solaris_sparc
51 # include "orderAccess_solaris_sparc.inline.hpp"
52 #endif
53 #ifdef TARGET_OS_ARCH_windows_x86
54 # include "orderAccess_windows_x86.inline.hpp"
55 #endif
56 #ifdef TARGET_OS_ARCH_linux_arm
57 # include "orderAccess_linux_arm.inline.hpp"
58 #endif
59 #ifdef TARGET_OS_ARCH_linux_ppc
60 # include "orderAccess_linux_ppc.inline.hpp"
61 #endif
62 #ifdef TARGET_OS_ARCH_aix_ppc
63 # include "orderAccess_aix_ppc.inline.hpp"
64 #endif
65 #ifdef TARGET_OS_ARCH_bsd_x86
66 # include "orderAccess_bsd_x86.inline.hpp"
67 #endif
68 #ifdef TARGET_OS_ARCH_bsd_zero
69 # include "orderAccess_bsd_zero.inline.hpp"
70 #endif
72 // Simple TaskQueue stats that are collected by default in debug builds.
74 #if !defined(TASKQUEUE_STATS) && defined(ASSERT)
75 #define TASKQUEUE_STATS 1
76 #elif !defined(TASKQUEUE_STATS)
77 #define TASKQUEUE_STATS 0
78 #endif
80 #if TASKQUEUE_STATS
81 #define TASKQUEUE_STATS_ONLY(code) code
82 #else
83 #define TASKQUEUE_STATS_ONLY(code)
84 #endif // TASKQUEUE_STATS
86 #if TASKQUEUE_STATS
87 class TaskQueueStats {
88 public:
89 enum StatId {
90 push, // number of taskqueue pushes
91 pop, // number of taskqueue pops
92 pop_slow, // subset of taskqueue pops that were done slow-path
93 steal_attempt, // number of taskqueue steal attempts
94 steal, // number of taskqueue steals
95 overflow, // number of overflow pushes
96 overflow_max_len, // max length of overflow stack
97 last_stat_id
98 };
100 public:
101 inline TaskQueueStats() { reset(); }
103 inline void record_push() { ++_stats[push]; }
104 inline void record_pop() { ++_stats[pop]; }
105 inline void record_pop_slow() { record_pop(); ++_stats[pop_slow]; }
106 inline void record_steal(bool success);
107 inline void record_overflow(size_t new_length);
109 TaskQueueStats & operator +=(const TaskQueueStats & addend);
111 inline size_t get(StatId id) const { return _stats[id]; }
112 inline const size_t* get() const { return _stats; }
114 inline void reset();
116 // Print the specified line of the header (does not include a line separator).
117 static void print_header(unsigned int line, outputStream* const stream = tty,
118 unsigned int width = 10);
119 // Print the statistics (does not include a line separator).
120 void print(outputStream* const stream = tty, unsigned int width = 10) const;
122 DEBUG_ONLY(void verify() const;)
124 private:
125 size_t _stats[last_stat_id];
126 static const char * const _names[last_stat_id];
127 };
129 void TaskQueueStats::record_steal(bool success) {
130 ++_stats[steal_attempt];
131 if (success) ++_stats[steal];
132 }
134 void TaskQueueStats::record_overflow(size_t new_len) {
135 ++_stats[overflow];
136 if (new_len > _stats[overflow_max_len]) _stats[overflow_max_len] = new_len;
137 }
139 void TaskQueueStats::reset() {
140 memset(_stats, 0, sizeof(_stats));
141 }
142 #endif // TASKQUEUE_STATS
144 // TaskQueueSuper collects functionality common to all GenericTaskQueue instances.
146 template <unsigned int N, MEMFLAGS F>
147 class TaskQueueSuper: public CHeapObj<F> {
148 protected:
149 // Internal type for indexing the queue; also used for the tag.
150 typedef NOT_LP64(uint16_t) LP64_ONLY(uint32_t) idx_t;
152 // The first free element after the last one pushed (mod N).
153 volatile uint _bottom;
155 enum { MOD_N_MASK = N - 1 };
157 class Age {
158 public:
159 Age(size_t data = 0) { _data = data; }
160 Age(const Age& age) { _data = age._data; }
161 Age(idx_t top, idx_t tag) { _fields._top = top; _fields._tag = tag; }
163 Age get() const volatile { return _data; }
164 void set(Age age) volatile { _data = age._data; }
166 idx_t top() const volatile { return _fields._top; }
167 idx_t tag() const volatile { return _fields._tag; }
169 // Increment top; if it wraps, increment tag also.
170 void increment() {
171 _fields._top = increment_index(_fields._top);
172 if (_fields._top == 0) ++_fields._tag;
173 }
175 Age cmpxchg(const Age new_age, const Age old_age) volatile {
176 return (size_t) Atomic::cmpxchg_ptr((intptr_t)new_age._data,
177 (volatile intptr_t *)&_data,
178 (intptr_t)old_age._data);
179 }
181 bool operator ==(const Age& other) const { return _data == other._data; }
183 private:
184 struct fields {
185 idx_t _top;
186 idx_t _tag;
187 };
188 union {
189 size_t _data;
190 fields _fields;
191 };
192 };
194 volatile Age _age;
196 // These both operate mod N.
197 static uint increment_index(uint ind) {
198 return (ind + 1) & MOD_N_MASK;
199 }
200 static uint decrement_index(uint ind) {
201 return (ind - 1) & MOD_N_MASK;
202 }
204 // Returns a number in the range [0..N). If the result is "N-1", it should be
205 // interpreted as 0.
206 uint dirty_size(uint bot, uint top) const {
207 return (bot - top) & MOD_N_MASK;
208 }
210 // Returns the size corresponding to the given "bot" and "top".
211 uint size(uint bot, uint top) const {
212 uint sz = dirty_size(bot, top);
213 // Has the queue "wrapped", so that bottom is less than top? There's a
214 // complicated special case here. A pair of threads could perform pop_local
215 // and pop_global operations concurrently, starting from a state in which
216 // _bottom == _top+1. The pop_local could succeed in decrementing _bottom,
217 // and the pop_global in incrementing _top (in which case the pop_global
218 // will be awarded the contested queue element.) The resulting state must
219 // be interpreted as an empty queue. (We only need to worry about one such
220 // event: only the queue owner performs pop_local's, and several concurrent
221 // threads attempting to perform the pop_global will all perform the same
222 // CAS, and only one can succeed.) Any stealing thread that reads after
223 // either the increment or decrement will see an empty queue, and will not
224 // join the competitors. The "sz == -1 || sz == N-1" state will not be
225 // modified by concurrent queues, so the owner thread can reset the state to
226 // _bottom == top so subsequent pushes will be performed normally.
227 return (sz == N - 1) ? 0 : sz;
228 }
230 public:
231 TaskQueueSuper() : _bottom(0), _age() {}
233 // Return true if the TaskQueue contains/does not contain any tasks.
234 bool peek() const { return _bottom != _age.top(); }
235 bool is_empty() const { return size() == 0; }
237 // Return an estimate of the number of elements in the queue.
238 // The "careful" version admits the possibility of pop_local/pop_global
239 // races.
240 uint size() const {
241 return size(_bottom, _age.top());
242 }
244 uint dirty_size() const {
245 return dirty_size(_bottom, _age.top());
246 }
248 void set_empty() {
249 _bottom = 0;
250 _age.set(0);
251 }
253 // Maximum number of elements allowed in the queue. This is two less
254 // than the actual queue size, for somewhat complicated reasons.
255 uint max_elems() const { return N - 2; }
257 // Total size of queue.
258 static const uint total_size() { return N; }
260 TASKQUEUE_STATS_ONLY(TaskQueueStats stats;)
261 };
263 //
264 // GenericTaskQueue implements an ABP, Aurora-Blumofe-Plaxton, double-
265 // ended-queue (deque), intended for use in work stealing. Queue operations
266 // are non-blocking.
267 //
268 // A queue owner thread performs push() and pop_local() operations on one end
269 // of the queue, while other threads may steal work using the pop_global()
270 // method.
271 //
272 // The main difference to the original algorithm is that this
273 // implementation allows wrap-around at the end of its allocated
274 // storage, which is an array.
275 //
276 // The original paper is:
277 //
278 // Arora, N. S., Blumofe, R. D., and Plaxton, C. G.
279 // Thread scheduling for multiprogrammed multiprocessors.
280 // Theory of Computing Systems 34, 2 (2001), 115-144.
281 //
282 // The following paper provides an correctness proof and an
283 // implementation for weakly ordered memory models including (pseudo-)
284 // code containing memory barriers for a Chase-Lev deque. Chase-Lev is
285 // similar to ABP, with the main difference that it allows resizing of the
286 // underlying storage:
287 //
288 // Le, N. M., Pop, A., Cohen A., and Nardell, F. Z.
289 // Correct and efficient work-stealing for weak memory models
290 // Proceedings of the 18th ACM SIGPLAN symposium on Principles and
291 // practice of parallel programming (PPoPP 2013), 69-80
292 //
294 template <class E, MEMFLAGS F, unsigned int N = TASKQUEUE_SIZE>
295 class GenericTaskQueue: public TaskQueueSuper<N, F> {
296 ArrayAllocator<E, F> _array_allocator;
297 protected:
298 typedef typename TaskQueueSuper<N, F>::Age Age;
299 typedef typename TaskQueueSuper<N, F>::idx_t idx_t;
301 using TaskQueueSuper<N, F>::_bottom;
302 using TaskQueueSuper<N, F>::_age;
303 using TaskQueueSuper<N, F>::increment_index;
304 using TaskQueueSuper<N, F>::decrement_index;
305 using TaskQueueSuper<N, F>::dirty_size;
307 public:
308 using TaskQueueSuper<N, F>::max_elems;
309 using TaskQueueSuper<N, F>::size;
311 #if TASKQUEUE_STATS
312 using TaskQueueSuper<N, F>::stats;
313 #endif
315 private:
316 // Slow paths for push, pop_local. (pop_global has no fast path.)
317 bool push_slow(E t, uint dirty_n_elems);
318 bool pop_local_slow(uint localBot, Age oldAge);
320 public:
321 typedef E element_type;
323 // Initializes the queue to empty.
324 GenericTaskQueue();
326 void initialize();
328 // Push the task "t" on the queue. Returns "false" iff the queue is full.
329 inline bool push(E t);
331 // Attempts to claim a task from the "local" end of the queue (the most
332 // recently pushed). If successful, returns true and sets t to the task;
333 // otherwise, returns false (the queue is empty).
334 inline bool pop_local(volatile E& t);
336 // Like pop_local(), but uses the "global" end of the queue (the least
337 // recently pushed).
338 bool pop_global(volatile E& t);
340 // Delete any resource associated with the queue.
341 ~GenericTaskQueue();
343 // apply the closure to all elements in the task queue
344 void oops_do(OopClosure* f);
346 private:
347 // Element array.
348 volatile E* _elems;
349 };
351 template<class E, MEMFLAGS F, unsigned int N>
352 GenericTaskQueue<E, F, N>::GenericTaskQueue() {
353 assert(sizeof(Age) == sizeof(size_t), "Depends on this.");
354 }
356 template<class E, MEMFLAGS F, unsigned int N>
357 void GenericTaskQueue<E, F, N>::initialize() {
358 _elems = _array_allocator.allocate(N);
359 }
361 template<class E, MEMFLAGS F, unsigned int N>
362 void GenericTaskQueue<E, F, N>::oops_do(OopClosure* f) {
363 // tty->print_cr("START OopTaskQueue::oops_do");
364 uint iters = size();
365 uint index = _bottom;
366 for (uint i = 0; i < iters; ++i) {
367 index = decrement_index(index);
368 // tty->print_cr(" doing entry %d," INTPTR_T " -> " INTPTR_T,
369 // index, &_elems[index], _elems[index]);
370 E* t = (E*)&_elems[index]; // cast away volatility
371 oop* p = (oop*)t;
372 assert((*t)->is_oop_or_null(), "Not an oop or null");
373 f->do_oop(p);
374 }
375 // tty->print_cr("END OopTaskQueue::oops_do");
376 }
378 template<class E, MEMFLAGS F, unsigned int N>
379 bool GenericTaskQueue<E, F, N>::push_slow(E t, uint dirty_n_elems) {
380 if (dirty_n_elems == N - 1) {
381 // Actually means 0, so do the push.
382 uint localBot = _bottom;
383 // g++ complains if the volatile result of the assignment is
384 // unused, so we cast the volatile away. We cannot cast directly
385 // to void, because gcc treats that as not using the result of the
386 // assignment. However, casting to E& means that we trigger an
387 // unused-value warning. So, we cast the E& to void.
388 (void)const_cast<E&>(_elems[localBot] = t);
389 OrderAccess::release_store(&_bottom, increment_index(localBot));
390 TASKQUEUE_STATS_ONLY(stats.record_push());
391 return true;
392 }
393 return false;
394 }
396 // pop_local_slow() is done by the owning thread and is trying to
397 // get the last task in the queue. It will compete with pop_global()
398 // that will be used by other threads. The tag age is incremented
399 // whenever the queue goes empty which it will do here if this thread
400 // gets the last task or in pop_global() if the queue wraps (top == 0
401 // and pop_global() succeeds, see pop_global()).
402 template<class E, MEMFLAGS F, unsigned int N>
403 bool GenericTaskQueue<E, F, N>::pop_local_slow(uint localBot, Age oldAge) {
404 // This queue was observed to contain exactly one element; either this
405 // thread will claim it, or a competing "pop_global". In either case,
406 // the queue will be logically empty afterwards. Create a new Age value
407 // that represents the empty queue for the given value of "_bottom". (We
408 // must also increment "tag" because of the case where "bottom == 1",
409 // "top == 0". A pop_global could read the queue element in that case,
410 // then have the owner thread do a pop followed by another push. Without
411 // the incrementing of "tag", the pop_global's CAS could succeed,
412 // allowing it to believe it has claimed the stale element.)
413 Age newAge((idx_t)localBot, oldAge.tag() + 1);
414 // Perhaps a competing pop_global has already incremented "top", in which
415 // case it wins the element.
416 if (localBot == oldAge.top()) {
417 // No competing pop_global has yet incremented "top"; we'll try to
418 // install new_age, thus claiming the element.
419 Age tempAge = _age.cmpxchg(newAge, oldAge);
420 if (tempAge == oldAge) {
421 // We win.
422 assert(dirty_size(localBot, _age.top()) != N - 1, "sanity");
423 TASKQUEUE_STATS_ONLY(stats.record_pop_slow());
424 return true;
425 }
426 }
427 // We lose; a completing pop_global gets the element. But the queue is empty
428 // and top is greater than bottom. Fix this representation of the empty queue
429 // to become the canonical one.
430 _age.set(newAge);
431 assert(dirty_size(localBot, _age.top()) != N - 1, "sanity");
432 return false;
433 }
435 template<class E, MEMFLAGS F, unsigned int N>
436 bool GenericTaskQueue<E, F, N>::pop_global(volatile E& t) {
437 Age oldAge = _age.get();
438 // Architectures with weak memory model require a barrier here
439 // to guarantee that bottom is not older than age,
440 // which is crucial for the correctness of the algorithm.
441 #if !(defined SPARC || defined IA32 || defined AMD64)
442 OrderAccess::fence();
443 #endif
444 uint localBot = OrderAccess::load_acquire((volatile juint*)&_bottom);
445 uint n_elems = size(localBot, oldAge.top());
446 if (n_elems == 0) {
447 return false;
448 }
450 // g++ complains if the volatile result of the assignment is
451 // unused, so we cast the volatile away. We cannot cast directly
452 // to void, because gcc treats that as not using the result of the
453 // assignment. However, casting to E& means that we trigger an
454 // unused-value warning. So, we cast the E& to void.
455 (void) const_cast<E&>(t = _elems[oldAge.top()]);
456 Age newAge(oldAge);
457 newAge.increment();
458 Age resAge = _age.cmpxchg(newAge, oldAge);
460 // Note that using "_bottom" here might fail, since a pop_local might
461 // have decremented it.
462 assert(dirty_size(localBot, newAge.top()) != N - 1, "sanity");
463 return resAge == oldAge;
464 }
466 template<class E, MEMFLAGS F, unsigned int N>
467 GenericTaskQueue<E, F, N>::~GenericTaskQueue() {
468 FREE_C_HEAP_ARRAY(E, _elems, F);
469 }
471 // OverflowTaskQueue is a TaskQueue that also includes an overflow stack for
472 // elements that do not fit in the TaskQueue.
473 //
474 // This class hides two methods from super classes:
475 //
476 // push() - push onto the task queue or, if that fails, onto the overflow stack
477 // is_empty() - return true if both the TaskQueue and overflow stack are empty
478 //
479 // Note that size() is not hidden--it returns the number of elements in the
480 // TaskQueue, and does not include the size of the overflow stack. This
481 // simplifies replacement of GenericTaskQueues with OverflowTaskQueues.
482 template<class E, MEMFLAGS F, unsigned int N = TASKQUEUE_SIZE>
483 class OverflowTaskQueue: public GenericTaskQueue<E, F, N>
484 {
485 public:
486 typedef Stack<E, F> overflow_t;
487 typedef GenericTaskQueue<E, F, N> taskqueue_t;
489 TASKQUEUE_STATS_ONLY(using taskqueue_t::stats;)
491 // Push task t onto the queue or onto the overflow stack. Return true.
492 inline bool push(E t);
494 // Attempt to pop from the overflow stack; return true if anything was popped.
495 inline bool pop_overflow(E& t);
497 inline overflow_t* overflow_stack() { return &_overflow_stack; }
499 inline bool taskqueue_empty() const { return taskqueue_t::is_empty(); }
500 inline bool overflow_empty() const { return _overflow_stack.is_empty(); }
501 inline bool is_empty() const {
502 return taskqueue_empty() && overflow_empty();
503 }
505 private:
506 overflow_t _overflow_stack;
507 };
509 template <class E, MEMFLAGS F, unsigned int N>
510 bool OverflowTaskQueue<E, F, N>::push(E t)
511 {
512 if (!taskqueue_t::push(t)) {
513 overflow_stack()->push(t);
514 TASKQUEUE_STATS_ONLY(stats.record_overflow(overflow_stack()->size()));
515 }
516 return true;
517 }
519 template <class E, MEMFLAGS F, unsigned int N>
520 bool OverflowTaskQueue<E, F, N>::pop_overflow(E& t)
521 {
522 if (overflow_empty()) return false;
523 t = overflow_stack()->pop();
524 return true;
525 }
527 class TaskQueueSetSuper {
528 protected:
529 static int randomParkAndMiller(int* seed0);
530 public:
531 // Returns "true" if some TaskQueue in the set contains a task.
532 virtual bool peek() = 0;
533 };
535 template <MEMFLAGS F> class TaskQueueSetSuperImpl: public CHeapObj<F>, public TaskQueueSetSuper {
536 };
538 template<class T, MEMFLAGS F>
539 class GenericTaskQueueSet: public TaskQueueSetSuperImpl<F> {
540 private:
541 uint _n;
542 T** _queues;
544 public:
545 typedef typename T::element_type E;
547 GenericTaskQueueSet(int n) : _n(n) {
548 typedef T* GenericTaskQueuePtr;
549 _queues = NEW_C_HEAP_ARRAY(GenericTaskQueuePtr, n, F);
550 for (int i = 0; i < n; i++) {
551 _queues[i] = NULL;
552 }
553 }
555 bool steal_best_of_2(uint queue_num, int* seed, E& t);
557 void register_queue(uint i, T* q);
559 T* queue(uint n);
561 // The thread with queue number "queue_num" (and whose random number seed is
562 // at "seed") is trying to steal a task from some other queue. (It may try
563 // several queues, according to some configuration parameter.) If some steal
564 // succeeds, returns "true" and sets "t" to the stolen task, otherwise returns
565 // false.
566 bool steal(uint queue_num, int* seed, E& t);
568 bool peek();
569 };
571 template<class T, MEMFLAGS F> void
572 GenericTaskQueueSet<T, F>::register_queue(uint i, T* q) {
573 assert(i < _n, "index out of range.");
574 _queues[i] = q;
575 }
577 template<class T, MEMFLAGS F> T*
578 GenericTaskQueueSet<T, F>::queue(uint i) {
579 return _queues[i];
580 }
582 template<class T, MEMFLAGS F> bool
583 GenericTaskQueueSet<T, F>::steal(uint queue_num, int* seed, E& t) {
584 for (uint i = 0; i < 2 * _n; i++) {
585 if (steal_best_of_2(queue_num, seed, t)) {
586 TASKQUEUE_STATS_ONLY(queue(queue_num)->stats.record_steal(true));
587 return true;
588 }
589 }
590 TASKQUEUE_STATS_ONLY(queue(queue_num)->stats.record_steal(false));
591 return false;
592 }
594 template<class T, MEMFLAGS F> bool
595 GenericTaskQueueSet<T, F>::steal_best_of_2(uint queue_num, int* seed, E& t) {
596 if (_n > 2) {
597 if(UseNUMASteal) {
598 uint i = 10;
599 uint k = queue_num;
600 while ((k == queue_num || (k - queue_num) > 3 || (queue_num - k) > 3) && i > 0) {
601 i--;
602 k = TaskQueueSetSuper::randomParkAndMiller(seed) % _n;
603 }
604 if(i > 0) {
605 return _queues[k]->pop_global(t);
606 }
607 else {
608 while (k == queue_num) {
609 k = TaskQueueSetSuper::randomParkAndMiller(seed) % _n;
610 }
611 return _queues[k]->pop_global(t);
612 }
613 }
614 else{
615 uint k1 = queue_num;
616 while (k1 == queue_num) k1 = TaskQueueSetSuper::randomParkAndMiller(seed) % _n;
617 uint k2 = queue_num;
618 while (k2 == queue_num || k2 == k1) k2 = TaskQueueSetSuper::randomParkAndMiller(seed) % _n;
619 // Sample both and try the larger.
620 uint sz1 = _queues[k1]->size();
621 uint sz2 = _queues[k2]->size();
622 if (sz2 > sz1) return _queues[k2]->pop_global(t);
623 else return _queues[k1]->pop_global(t);
624 }
625 } else if (_n == 2) {
626 // Just try the other one.
627 uint k = (queue_num + 1) % 2;
628 return _queues[k]->pop_global(t);
629 } else {
630 assert(_n == 1, "can't be zero.");
631 return false;
632 }
633 }
635 template<class T, MEMFLAGS F>
636 bool GenericTaskQueueSet<T, F>::peek() {
637 // Try all the queues.
638 for (uint j = 0; j < _n; j++) {
639 if (_queues[j]->peek())
640 return true;
641 }
642 return false;
643 }
645 // When to terminate from the termination protocol.
646 class TerminatorTerminator: public CHeapObj<mtInternal> {
647 public:
648 virtual bool should_exit_termination() = 0;
649 };
651 // A class to aid in the termination of a set of parallel tasks using
652 // TaskQueueSet's for work stealing.
654 #undef TRACESPINNING
656 class ParallelTaskTerminator: public StackObj {
657 private:
658 int _n_threads;
659 TaskQueueSetSuper* _queue_set;
660 int _offered_termination;
662 #ifdef TRACESPINNING
663 static uint _total_yields;
664 static uint _total_spins;
665 static uint _total_peeks;
666 #endif
668 bool peek_in_queue_set();
669 protected:
670 virtual void yield();
671 void sleep(uint millis);
673 public:
675 // "n_threads" is the number of threads to be terminated. "queue_set" is a
676 // queue sets of work queues of other threads.
677 ParallelTaskTerminator(int n_threads, TaskQueueSetSuper* queue_set);
679 // The current thread has no work, and is ready to terminate if everyone
680 // else is. If returns "true", all threads are terminated. If returns
681 // "false", available work has been observed in one of the task queues,
682 // so the global task is not complete.
683 bool offer_termination() {
684 return offer_termination(NULL);
685 }
687 // As above, but it also terminates if the should_exit_termination()
688 // method of the terminator parameter returns true. If terminator is
689 // NULL, then it is ignored.
690 bool offer_termination(TerminatorTerminator* terminator);
692 // Reset the terminator, so that it may be reused again.
693 // The caller is responsible for ensuring that this is done
694 // in an MT-safe manner, once the previous round of use of
695 // the terminator is finished.
696 void reset_for_reuse();
697 // Same as above but the number of parallel threads is set to the
698 // given number.
699 void reset_for_reuse(int n_threads);
701 #ifdef TRACESPINNING
702 static uint total_yields() { return _total_yields; }
703 static uint total_spins() { return _total_spins; }
704 static uint total_peeks() { return _total_peeks; }
705 static void print_termination_counts();
706 #endif
707 };
709 template<class E, MEMFLAGS F, unsigned int N> inline bool
710 GenericTaskQueue<E, F, N>::push(E t) {
711 uint localBot = _bottom;
712 assert(localBot < N, "_bottom out of range.");
713 idx_t top = _age.top();
714 uint dirty_n_elems = dirty_size(localBot, top);
715 assert(dirty_n_elems < N, "n_elems out of range.");
716 if (dirty_n_elems < max_elems()) {
717 // g++ complains if the volatile result of the assignment is
718 // unused, so we cast the volatile away. We cannot cast directly
719 // to void, because gcc treats that as not using the result of the
720 // assignment. However, casting to E& means that we trigger an
721 // unused-value warning. So, we cast the E& to void.
722 (void) const_cast<E&>(_elems[localBot] = t);
723 OrderAccess::release_store(&_bottom, increment_index(localBot));
724 TASKQUEUE_STATS_ONLY(stats.record_push());
725 return true;
726 } else {
727 return push_slow(t, dirty_n_elems);
728 }
729 }
731 template<class E, MEMFLAGS F, unsigned int N> inline bool
732 GenericTaskQueue<E, F, N>::pop_local(volatile E& t) {
733 uint localBot = _bottom;
734 // This value cannot be N-1. That can only occur as a result of
735 // the assignment to bottom in this method. If it does, this method
736 // resets the size to 0 before the next call (which is sequential,
737 // since this is pop_local.)
738 uint dirty_n_elems = dirty_size(localBot, _age.top());
739 assert(dirty_n_elems != N - 1, "Shouldn't be possible...");
740 if (dirty_n_elems == 0) return false;
741 localBot = decrement_index(localBot);
742 _bottom = localBot;
743 // This is necessary to prevent any read below from being reordered
744 // before the store just above.
745 OrderAccess::fence();
746 // g++ complains if the volatile result of the assignment is
747 // unused, so we cast the volatile away. We cannot cast directly
748 // to void, because gcc treats that as not using the result of the
749 // assignment. However, casting to E& means that we trigger an
750 // unused-value warning. So, we cast the E& to void.
751 (void) const_cast<E&>(t = _elems[localBot]);
752 // This is a second read of "age"; the "size()" above is the first.
753 // If there's still at least one element in the queue, based on the
754 // "_bottom" and "age" we've read, then there can be no interference with
755 // a "pop_global" operation, and we're done.
756 idx_t tp = _age.top(); // XXX
757 if (size(localBot, tp) > 0) {
758 assert(dirty_size(localBot, tp) != N - 1, "sanity");
759 TASKQUEUE_STATS_ONLY(stats.record_pop());
760 return true;
761 } else {
762 // Otherwise, the queue contained exactly one element; we take the slow
763 // path.
764 return pop_local_slow(localBot, _age.get());
765 }
766 }
768 typedef GenericTaskQueue<oop, mtGC> OopTaskQueue;
769 typedef GenericTaskQueueSet<OopTaskQueue, mtGC> OopTaskQueueSet;
771 #ifdef _MSC_VER
772 #pragma warning(push)
773 // warning C4522: multiple assignment operators specified
774 #pragma warning(disable:4522)
775 #endif
777 // This is a container class for either an oop* or a narrowOop*.
778 // Both are pushed onto a task queue and the consumer will test is_narrow()
779 // to determine which should be processed.
780 class StarTask {
781 void* _holder; // either union oop* or narrowOop*
783 enum { COMPRESSED_OOP_MASK = 1 };
785 public:
786 StarTask(narrowOop* p) {
787 assert(((uintptr_t)p & COMPRESSED_OOP_MASK) == 0, "Information loss!");
788 _holder = (void *)((uintptr_t)p | COMPRESSED_OOP_MASK);
789 }
790 StarTask(oop* p) {
791 assert(((uintptr_t)p & COMPRESSED_OOP_MASK) == 0, "Information loss!");
792 _holder = (void*)p;
793 }
794 StarTask() { _holder = NULL; }
795 operator oop*() { return (oop*)_holder; }
796 operator narrowOop*() {
797 return (narrowOop*)((uintptr_t)_holder & ~COMPRESSED_OOP_MASK);
798 }
800 StarTask& operator=(const StarTask& t) {
801 _holder = t._holder;
802 return *this;
803 }
804 volatile StarTask& operator=(const volatile StarTask& t) volatile {
805 _holder = t._holder;
806 return *this;
807 }
809 bool is_narrow() const {
810 return (((uintptr_t)_holder & COMPRESSED_OOP_MASK) != 0);
811 }
812 };
814 class ObjArrayTask
815 {
816 public:
817 ObjArrayTask(oop o = NULL, int idx = 0): _obj(o), _index(idx) { }
818 ObjArrayTask(oop o, size_t idx): _obj(o), _index(int(idx)) {
819 assert(idx <= size_t(max_jint), "too big");
820 }
821 ObjArrayTask(const ObjArrayTask& t): _obj(t._obj), _index(t._index) { }
823 ObjArrayTask& operator =(const ObjArrayTask& t) {
824 _obj = t._obj;
825 _index = t._index;
826 return *this;
827 }
828 volatile ObjArrayTask&
829 operator =(const volatile ObjArrayTask& t) volatile {
830 (void)const_cast<oop&>(_obj = t._obj);
831 _index = t._index;
832 return *this;
833 }
835 inline oop obj() const { return _obj; }
836 inline int index() const { return _index; }
838 DEBUG_ONLY(bool is_valid() const); // Tasks to be pushed/popped must be valid.
840 private:
841 oop _obj;
842 int _index;
843 };
845 #ifdef _MSC_VER
846 #pragma warning(pop)
847 #endif
849 typedef OverflowTaskQueue<StarTask, mtClass> OopStarTaskQueue;
850 typedef GenericTaskQueueSet<OopStarTaskQueue, mtClass> OopStarTaskQueueSet;
852 typedef OverflowTaskQueue<size_t, mtInternal> RegionTaskQueue;
853 typedef GenericTaskQueueSet<RegionTaskQueue, mtClass> RegionTaskQueueSet;
856 #endif // SHARE_VM_UTILITIES_TASKQUEUE_HPP