Tue, 04 Jul 2017 09:57:19 +0800
[GC] For weak memory models, more memory barriers are needed for the GC lock-free algorithms.
1 /*
2 * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #ifndef SHARE_VM_UTILITIES_TASKQUEUE_HPP
26 #define SHARE_VM_UTILITIES_TASKQUEUE_HPP
28 #include "memory/allocation.hpp"
29 #include "memory/allocation.inline.hpp"
30 #include "runtime/mutex.hpp"
31 #include "utilities/stack.hpp"
32 #ifdef TARGET_OS_ARCH_linux_x86
33 # include "orderAccess_linux_x86.inline.hpp"
34 #endif
35 #ifdef TARGET_OS_ARCH_linux_sparc
36 # include "orderAccess_linux_sparc.inline.hpp"
37 #endif
38 #ifdef TARGET_OS_ARCH_linux_zero
39 # include "orderAccess_linux_zero.inline.hpp"
40 #endif
41 #ifdef TARGET_OS_ARCH_solaris_x86
42 # include "orderAccess_solaris_x86.inline.hpp"
43 #endif
44 #ifdef TARGET_OS_ARCH_solaris_sparc
45 # include "orderAccess_solaris_sparc.inline.hpp"
46 #endif
47 #ifdef TARGET_OS_ARCH_windows_x86
48 # include "orderAccess_windows_x86.inline.hpp"
49 #endif
50 #ifdef TARGET_OS_ARCH_linux_arm
51 # include "orderAccess_linux_arm.inline.hpp"
52 #endif
53 #ifdef TARGET_OS_ARCH_linux_ppc
54 # include "orderAccess_linux_ppc.inline.hpp"
55 #endif
56 #ifdef TARGET_OS_ARCH_aix_ppc
57 # include "orderAccess_aix_ppc.inline.hpp"
58 #endif
59 #ifdef TARGET_OS_ARCH_bsd_x86
60 # include "orderAccess_bsd_x86.inline.hpp"
61 #endif
62 #ifdef TARGET_OS_ARCH_bsd_zero
63 # include "orderAccess_bsd_zero.inline.hpp"
64 #endif
66 // Simple TaskQueue stats that are collected by default in debug builds.
68 #if !defined(TASKQUEUE_STATS) && defined(ASSERT)
69 #define TASKQUEUE_STATS 1
70 #elif !defined(TASKQUEUE_STATS)
71 #define TASKQUEUE_STATS 0
72 #endif
74 #if TASKQUEUE_STATS
75 #define TASKQUEUE_STATS_ONLY(code) code
76 #else
77 #define TASKQUEUE_STATS_ONLY(code)
78 #endif // TASKQUEUE_STATS
80 #if TASKQUEUE_STATS
81 class TaskQueueStats {
82 public:
83 enum StatId {
84 push, // number of taskqueue pushes
85 pop, // number of taskqueue pops
86 pop_slow, // subset of taskqueue pops that were done slow-path
87 steal_attempt, // number of taskqueue steal attempts
88 steal, // number of taskqueue steals
89 overflow, // number of overflow pushes
90 overflow_max_len, // max length of overflow stack
91 last_stat_id
92 };
94 public:
95 inline TaskQueueStats() { reset(); }
97 inline void record_push() { ++_stats[push]; }
98 inline void record_pop() { ++_stats[pop]; }
99 inline void record_pop_slow() { record_pop(); ++_stats[pop_slow]; }
100 inline void record_steal(bool success);
101 inline void record_overflow(size_t new_length);
103 TaskQueueStats & operator +=(const TaskQueueStats & addend);
105 inline size_t get(StatId id) const { return _stats[id]; }
106 inline const size_t* get() const { return _stats; }
108 inline void reset();
110 // Print the specified line of the header (does not include a line separator).
111 static void print_header(unsigned int line, outputStream* const stream = tty,
112 unsigned int width = 10);
113 // Print the statistics (does not include a line separator).
114 void print(outputStream* const stream = tty, unsigned int width = 10) const;
116 DEBUG_ONLY(void verify() const;)
118 private:
119 size_t _stats[last_stat_id];
120 static const char * const _names[last_stat_id];
121 };
123 void TaskQueueStats::record_steal(bool success) {
124 ++_stats[steal_attempt];
125 if (success) ++_stats[steal];
126 }
128 void TaskQueueStats::record_overflow(size_t new_len) {
129 ++_stats[overflow];
130 if (new_len > _stats[overflow_max_len]) _stats[overflow_max_len] = new_len;
131 }
133 void TaskQueueStats::reset() {
134 memset(_stats, 0, sizeof(_stats));
135 }
136 #endif // TASKQUEUE_STATS
138 // TaskQueueSuper collects functionality common to all GenericTaskQueue instances.
140 template <unsigned int N, MEMFLAGS F>
141 class TaskQueueSuper: public CHeapObj<F> {
142 protected:
143 // Internal type for indexing the queue; also used for the tag.
144 typedef NOT_LP64(uint16_t) LP64_ONLY(uint32_t) idx_t;
146 #ifdef MIPS64
147 private:
148 #endif
149 // The first free element after the last one pushed (mod N).
150 volatile uint _bottom;
152 #ifdef MIPS64
153 protected:
154 inline uint get_bottom() const {
155 return OrderAccess::load_acquire((volatile juint*)&_bottom);
156 }
158 inline void set_bottom(uint new_bottom) {
159 OrderAccess::release_store(&_bottom, new_bottom);
160 }
161 #endif
163 enum { MOD_N_MASK = N - 1 };
165 class Age {
166 public:
167 Age(size_t data = 0) { _data = data; }
168 Age(const Age& age) { _data = age._data; }
169 Age(idx_t top, idx_t tag) { _fields._top = top; _fields._tag = tag; }
171 #ifndef MIPS64
172 Age get() const volatile { return _data; }
173 void set(Age age) volatile { _data = age._data; }
174 idx_t top() const volatile { return _fields._top; }
175 idx_t tag() const volatile { return _fields._tag; }
176 #else
177 Age get() const volatile {
178 size_t res = OrderAccess::load_ptr_acquire((volatile intptr_t*) &_data);
179 return *(Age*)(&res);
180 }
182 void set(Age age) volatile { OrderAccess::release_store_ptr((volatile intptr_t*) &_data, *(size_t*)(&age._data)); }
183 idx_t top() const volatile { return OrderAccess::load_acquire((volatile idx_t*) &(_fields._top)); }
184 idx_t tag() const volatile { return OrderAccess::load_acquire((volatile idx_t*) &(_fields._tag)); }
185 #endif
187 // Increment top; if it wraps, increment tag also.
188 void increment() {
189 _fields._top = increment_index(_fields._top);
190 if (_fields._top == 0) ++_fields._tag;
191 }
193 Age cmpxchg(const Age new_age, const Age old_age) volatile {
194 return (size_t) Atomic::cmpxchg_ptr((intptr_t)new_age._data,
195 (volatile intptr_t *)&_data,
196 (intptr_t)old_age._data);
197 }
199 bool operator ==(const Age& other) const { return _data == other._data; }
201 private:
202 struct fields {
203 idx_t _top;
204 idx_t _tag;
205 };
206 union {
207 size_t _data;
208 fields _fields;
209 };
210 };
212 volatile Age _age;
214 // These both operate mod N.
215 static uint increment_index(uint ind) {
216 return (ind + 1) & MOD_N_MASK;
217 }
218 static uint decrement_index(uint ind) {
219 return (ind - 1) & MOD_N_MASK;
220 }
222 // Returns a number in the range [0..N). If the result is "N-1", it should be
223 // interpreted as 0.
224 uint dirty_size(uint bot, uint top) const {
225 return (bot - top) & MOD_N_MASK;
226 }
228 // Returns the size corresponding to the given "bot" and "top".
229 uint size(uint bot, uint top) const {
230 uint sz = dirty_size(bot, top);
231 // Has the queue "wrapped", so that bottom is less than top? There's a
232 // complicated special case here. A pair of threads could perform pop_local
233 // and pop_global operations concurrently, starting from a state in which
234 // _bottom == _top+1. The pop_local could succeed in decrementing _bottom,
235 // and the pop_global in incrementing _top (in which case the pop_global
236 // will be awarded the contested queue element.) The resulting state must
237 // be interpreted as an empty queue. (We only need to worry about one such
238 // event: only the queue owner performs pop_local's, and several concurrent
239 // threads attempting to perform the pop_global will all perform the same
240 // CAS, and only one can succeed.) Any stealing thread that reads after
241 // either the increment or decrement will see an empty queue, and will not
242 // join the competitors. The "sz == -1 || sz == N-1" state will not be
243 // modified by concurrent queues, so the owner thread can reset the state to
244 // _bottom == top so subsequent pushes will be performed normally.
245 return (sz == N - 1) ? 0 : sz;
246 }
248 public:
249 TaskQueueSuper() : _bottom(0), _age() {}
251 // Return true if the TaskQueue contains/does not contain any tasks.
252 bool peek() const {
253 #ifdef MIPS64
254 return get_bottom() != _age.top();
255 #else
256 return _bottom != _age.top();
257 #endif
258 }
260 bool is_empty() const { return size() == 0; }
262 // Return an estimate of the number of elements in the queue.
263 // The "careful" version admits the possibility of pop_local/pop_global
264 // races.
265 uint size() const {
266 #ifdef MIPS64
267 return size(get_bottom(), _age.top());
268 #else
269 return size(_bottom, _age.top());
270 #endif
271 }
273 uint dirty_size() const {
274 #ifdef MIPS64
275 return dirty_size(get_bottom(), _age.top());
276 #else
277 return dirty_size(_bottom, _age.top());
278 #endif
279 }
281 void set_empty() {
282 #ifdef MIPS64
283 set_bottom(0);
284 #else
285 _bottom = 0;
286 #endif
287 _age.set(0);
288 }
290 // Maximum number of elements allowed in the queue. This is two less
291 // than the actual queue size, for somewhat complicated reasons.
292 uint max_elems() const { return N - 2; }
294 // Total size of queue.
295 static const uint total_size() { return N; }
297 TASKQUEUE_STATS_ONLY(TaskQueueStats stats;)
298 };
300 //
301 // GenericTaskQueue implements an ABP, Aurora-Blumofe-Plaxton, double-
302 // ended-queue (deque), intended for use in work stealing. Queue operations
303 // are non-blocking.
304 //
305 // A queue owner thread performs push() and pop_local() operations on one end
306 // of the queue, while other threads may steal work using the pop_global()
307 // method.
308 //
309 // The main difference to the original algorithm is that this
310 // implementation allows wrap-around at the end of its allocated
311 // storage, which is an array.
312 //
313 // The original paper is:
314 //
315 // Arora, N. S., Blumofe, R. D., and Plaxton, C. G.
316 // Thread scheduling for multiprogrammed multiprocessors.
317 // Theory of Computing Systems 34, 2 (2001), 115-144.
318 //
319 // The following paper provides an correctness proof and an
320 // implementation for weakly ordered memory models including (pseudo-)
321 // code containing memory barriers for a Chase-Lev deque. Chase-Lev is
322 // similar to ABP, with the main difference that it allows resizing of the
323 // underlying storage:
324 //
325 // Le, N. M., Pop, A., Cohen A., and Nardell, F. Z.
326 // Correct and efficient work-stealing for weak memory models
327 // Proceedings of the 18th ACM SIGPLAN symposium on Principles and
328 // practice of parallel programming (PPoPP 2013), 69-80
329 //
331 template <class E, MEMFLAGS F, unsigned int N = TASKQUEUE_SIZE>
332 class GenericTaskQueue: public TaskQueueSuper<N, F> {
333 ArrayAllocator<E, F> _array_allocator;
334 protected:
335 typedef typename TaskQueueSuper<N, F>::Age Age;
336 typedef typename TaskQueueSuper<N, F>::idx_t idx_t;
338 #ifndef MIPS64
339 using TaskQueueSuper<N, F>::_bottom;
340 #endif
341 using TaskQueueSuper<N, F>::_age;
342 using TaskQueueSuper<N, F>::increment_index;
343 using TaskQueueSuper<N, F>::decrement_index;
344 using TaskQueueSuper<N, F>::dirty_size;
346 public:
347 using TaskQueueSuper<N, F>::max_elems;
348 using TaskQueueSuper<N, F>::size;
350 #if TASKQUEUE_STATS
351 using TaskQueueSuper<N, F>::stats;
352 #endif
354 private:
355 // Slow paths for push, pop_local. (pop_global has no fast path.)
356 bool push_slow(E t, uint dirty_n_elems);
357 bool pop_local_slow(uint localBot, Age oldAge);
359 public:
360 typedef E element_type;
362 // Initializes the queue to empty.
363 GenericTaskQueue();
365 void initialize();
367 // Push the task "t" on the queue. Returns "false" iff the queue is full.
368 inline bool push(E t);
370 // Attempts to claim a task from the "local" end of the queue (the most
371 // recently pushed). If successful, returns true and sets t to the task;
372 // otherwise, returns false (the queue is empty).
373 inline bool pop_local(volatile E& t);
375 // Like pop_local(), but uses the "global" end of the queue (the least
376 // recently pushed).
377 bool pop_global(volatile E& t);
379 // Delete any resource associated with the queue.
380 ~GenericTaskQueue();
382 // apply the closure to all elements in the task queue
383 void oops_do(OopClosure* f);
385 private:
386 // Element array.
387 volatile E* _elems;
388 };
390 template<class E, MEMFLAGS F, unsigned int N>
391 GenericTaskQueue<E, F, N>::GenericTaskQueue() {
392 assert(sizeof(Age) == sizeof(size_t), "Depends on this.");
393 }
395 template<class E, MEMFLAGS F, unsigned int N>
396 void GenericTaskQueue<E, F, N>::initialize() {
397 _elems = _array_allocator.allocate(N);
398 }
400 template<class E, MEMFLAGS F, unsigned int N>
401 void GenericTaskQueue<E, F, N>::oops_do(OopClosure* f) {
402 // tty->print_cr("START OopTaskQueue::oops_do");
403 uint iters = size();
404 #ifdef MIPS64
405 uint index = this->get_bottom();
406 #else
407 uint index = _bottom;
408 #endif
409 for (uint i = 0; i < iters; ++i) {
410 index = decrement_index(index);
411 // tty->print_cr(" doing entry %d," INTPTR_T " -> " INTPTR_T,
412 // index, &_elems[index], _elems[index]);
413 E* t = (E*)&_elems[index]; // cast away volatility
414 oop* p = (oop*)t;
415 assert((*t)->is_oop_or_null(), "Not an oop or null");
416 f->do_oop(p);
417 }
418 // tty->print_cr("END OopTaskQueue::oops_do");
419 }
421 template<class E, MEMFLAGS F, unsigned int N>
422 bool GenericTaskQueue<E, F, N>::push_slow(E t, uint dirty_n_elems) {
423 if (dirty_n_elems == N - 1) {
424 // Actually means 0, so do the push.
425 #ifdef MIPS64
426 uint localBot = this->get_bottom();
427 #else
428 uint localBot = _bottom;
429 #endif
430 // g++ complains if the volatile result of the assignment is
431 // unused, so we cast the volatile away. We cannot cast directly
432 // to void, because gcc treats that as not using the result of the
433 // assignment. However, casting to E& means that we trigger an
434 // unused-value warning. So, we cast the E& to void.
435 (void)const_cast<E&>(_elems[localBot] = t);
436 #ifdef MIPS64
437 this->set_bottom(increment_index(localBot));
438 #else
439 OrderAccess::release_store(&_bottom, increment_index(localBot));
440 #endif
441 TASKQUEUE_STATS_ONLY(stats.record_push());
442 return true;
443 }
444 return false;
445 }
447 // pop_local_slow() is done by the owning thread and is trying to
448 // get the last task in the queue. It will compete with pop_global()
449 // that will be used by other threads. The tag age is incremented
450 // whenever the queue goes empty which it will do here if this thread
451 // gets the last task or in pop_global() if the queue wraps (top == 0
452 // and pop_global() succeeds, see pop_global()).
453 template<class E, MEMFLAGS F, unsigned int N>
454 bool GenericTaskQueue<E, F, N>::pop_local_slow(uint localBot, Age oldAge) {
455 // This queue was observed to contain exactly one element; either this
456 // thread will claim it, or a competing "pop_global". In either case,
457 // the queue will be logically empty afterwards. Create a new Age value
458 // that represents the empty queue for the given value of "_bottom". (We
459 // must also increment "tag" because of the case where "bottom == 1",
460 // "top == 0". A pop_global could read the queue element in that case,
461 // then have the owner thread do a pop followed by another push. Without
462 // the incrementing of "tag", the pop_global's CAS could succeed,
463 // allowing it to believe it has claimed the stale element.)
464 Age newAge((idx_t)localBot, oldAge.tag() + 1);
465 // Perhaps a competing pop_global has already incremented "top", in which
466 // case it wins the element.
467 if (localBot == oldAge.top()) {
468 // No competing pop_global has yet incremented "top"; we'll try to
469 // install new_age, thus claiming the element.
470 Age tempAge = _age.cmpxchg(newAge, oldAge);
471 if (tempAge == oldAge) {
472 // We win.
473 assert(dirty_size(localBot, _age.top()) != N - 1, "sanity");
474 TASKQUEUE_STATS_ONLY(stats.record_pop_slow());
475 return true;
476 }
477 }
478 // We lose; a completing pop_global gets the element. But the queue is empty
479 // and top is greater than bottom. Fix this representation of the empty queue
480 // to become the canonical one.
481 _age.set(newAge);
482 assert(dirty_size(localBot, _age.top()) != N - 1, "sanity");
483 return false;
484 }
486 template<class E, MEMFLAGS F, unsigned int N>
487 bool GenericTaskQueue<E, F, N>::pop_global(volatile E& t) {
488 Age oldAge = _age.get();
489 // Architectures with weak memory model require a barrier here
490 // to guarantee that bottom is not older than age,
491 // which is crucial for the correctness of the algorithm.
492 #if !(defined SPARC || defined IA32 || defined AMD64)
493 OrderAccess::fence();
494 #endif
495 #ifdef MIPS64
496 uint localBot = this->get_bottom();
497 #else
498 uint localBot = OrderAccess::load_acquire((volatile juint*)&_bottom);
499 #endif
500 uint n_elems = size(localBot, oldAge.top());
501 if (n_elems == 0) {
502 return false;
503 }
505 // g++ complains if the volatile result of the assignment is
506 // unused, so we cast the volatile away. We cannot cast directly
507 // to void, because gcc treats that as not using the result of the
508 // assignment. However, casting to E& means that we trigger an
509 // unused-value warning. So, we cast the E& to void.
510 (void) const_cast<E&>(t = _elems[oldAge.top()]);
511 Age newAge(oldAge);
512 newAge.increment();
513 Age resAge = _age.cmpxchg(newAge, oldAge);
515 // Note that using "_bottom" here might fail, since a pop_local might
516 // have decremented it.
517 assert(dirty_size(localBot, newAge.top()) != N - 1, "sanity");
518 return resAge == oldAge;
519 }
521 template<class E, MEMFLAGS F, unsigned int N>
522 GenericTaskQueue<E, F, N>::~GenericTaskQueue() {
523 FREE_C_HEAP_ARRAY(E, _elems, F);
524 }
526 // OverflowTaskQueue is a TaskQueue that also includes an overflow stack for
527 // elements that do not fit in the TaskQueue.
528 //
529 // This class hides two methods from super classes:
530 //
531 // push() - push onto the task queue or, if that fails, onto the overflow stack
532 // is_empty() - return true if both the TaskQueue and overflow stack are empty
533 //
534 // Note that size() is not hidden--it returns the number of elements in the
535 // TaskQueue, and does not include the size of the overflow stack. This
536 // simplifies replacement of GenericTaskQueues with OverflowTaskQueues.
537 template<class E, MEMFLAGS F, unsigned int N = TASKQUEUE_SIZE>
538 class OverflowTaskQueue: public GenericTaskQueue<E, F, N>
539 {
540 public:
541 typedef Stack<E, F> overflow_t;
542 typedef GenericTaskQueue<E, F, N> taskqueue_t;
544 TASKQUEUE_STATS_ONLY(using taskqueue_t::stats;)
546 // Push task t onto the queue or onto the overflow stack. Return true.
547 inline bool push(E t);
549 // Attempt to pop from the overflow stack; return true if anything was popped.
550 inline bool pop_overflow(E& t);
552 inline overflow_t* overflow_stack() { return &_overflow_stack; }
554 inline bool taskqueue_empty() const { return taskqueue_t::is_empty(); }
555 inline bool overflow_empty() const { return _overflow_stack.is_empty(); }
556 inline bool is_empty() const {
557 return taskqueue_empty() && overflow_empty();
558 }
560 private:
561 overflow_t _overflow_stack;
562 };
564 template <class E, MEMFLAGS F, unsigned int N>
565 bool OverflowTaskQueue<E, F, N>::push(E t)
566 {
567 if (!taskqueue_t::push(t)) {
568 overflow_stack()->push(t);
569 TASKQUEUE_STATS_ONLY(stats.record_overflow(overflow_stack()->size()));
570 }
571 return true;
572 }
574 template <class E, MEMFLAGS F, unsigned int N>
575 bool OverflowTaskQueue<E, F, N>::pop_overflow(E& t)
576 {
577 if (overflow_empty()) return false;
578 t = overflow_stack()->pop();
579 return true;
580 }
582 class TaskQueueSetSuper {
583 protected:
584 static int randomParkAndMiller(int* seed0);
585 public:
586 // Returns "true" if some TaskQueue in the set contains a task.
587 virtual bool peek() = 0;
588 };
590 template <MEMFLAGS F> class TaskQueueSetSuperImpl: public CHeapObj<F>, public TaskQueueSetSuper {
591 };
593 template<class T, MEMFLAGS F>
594 class GenericTaskQueueSet: public TaskQueueSetSuperImpl<F> {
595 private:
596 uint _n;
597 T** _queues;
599 public:
600 typedef typename T::element_type E;
602 GenericTaskQueueSet(int n) : _n(n) {
603 typedef T* GenericTaskQueuePtr;
604 _queues = NEW_C_HEAP_ARRAY(GenericTaskQueuePtr, n, F);
605 for (int i = 0; i < n; i++) {
606 _queues[i] = NULL;
607 }
608 }
610 bool steal_best_of_2(uint queue_num, int* seed, E& t);
612 void register_queue(uint i, T* q);
614 T* queue(uint n);
616 // The thread with queue number "queue_num" (and whose random number seed is
617 // at "seed") is trying to steal a task from some other queue. (It may try
618 // several queues, according to some configuration parameter.) If some steal
619 // succeeds, returns "true" and sets "t" to the stolen task, otherwise returns
620 // false.
621 bool steal(uint queue_num, int* seed, E& t);
623 bool peek();
624 };
626 template<class T, MEMFLAGS F> void
627 GenericTaskQueueSet<T, F>::register_queue(uint i, T* q) {
628 assert(i < _n, "index out of range.");
629 _queues[i] = q;
630 }
632 template<class T, MEMFLAGS F> T*
633 GenericTaskQueueSet<T, F>::queue(uint i) {
634 return _queues[i];
635 }
637 template<class T, MEMFLAGS F> bool
638 GenericTaskQueueSet<T, F>::steal(uint queue_num, int* seed, E& t) {
639 for (uint i = 0; i < 2 * _n; i++) {
640 if (steal_best_of_2(queue_num, seed, t)) {
641 TASKQUEUE_STATS_ONLY(queue(queue_num)->stats.record_steal(true));
642 return true;
643 }
644 }
645 TASKQUEUE_STATS_ONLY(queue(queue_num)->stats.record_steal(false));
646 return false;
647 }
649 template<class T, MEMFLAGS F> bool
650 GenericTaskQueueSet<T, F>::steal_best_of_2(uint queue_num, int* seed, E& t) {
651 if (_n > 2) {
652 uint k1 = queue_num;
653 while (k1 == queue_num) k1 = TaskQueueSetSuper::randomParkAndMiller(seed) % _n;
654 uint k2 = queue_num;
655 while (k2 == queue_num || k2 == k1) k2 = TaskQueueSetSuper::randomParkAndMiller(seed) % _n;
656 // Sample both and try the larger.
657 uint sz1 = _queues[k1]->size();
658 uint sz2 = _queues[k2]->size();
659 if (sz2 > sz1) return _queues[k2]->pop_global(t);
660 else return _queues[k1]->pop_global(t);
661 } else if (_n == 2) {
662 // Just try the other one.
663 uint k = (queue_num + 1) % 2;
664 return _queues[k]->pop_global(t);
665 } else {
666 assert(_n == 1, "can't be zero.");
667 return false;
668 }
669 }
671 template<class T, MEMFLAGS F>
672 bool GenericTaskQueueSet<T, F>::peek() {
673 // Try all the queues.
674 for (uint j = 0; j < _n; j++) {
675 if (_queues[j]->peek())
676 return true;
677 }
678 return false;
679 }
681 // When to terminate from the termination protocol.
682 class TerminatorTerminator: public CHeapObj<mtInternal> {
683 public:
684 virtual bool should_exit_termination() = 0;
685 };
687 // A class to aid in the termination of a set of parallel tasks using
688 // TaskQueueSet's for work stealing.
690 #undef TRACESPINNING
692 class ParallelTaskTerminator: public StackObj {
693 private:
694 int _n_threads;
695 TaskQueueSetSuper* _queue_set;
696 int _offered_termination;
698 #ifdef TRACESPINNING
699 static uint _total_yields;
700 static uint _total_spins;
701 static uint _total_peeks;
702 #endif
704 bool peek_in_queue_set();
705 protected:
706 virtual void yield();
707 void sleep(uint millis);
709 public:
711 // "n_threads" is the number of threads to be terminated. "queue_set" is a
712 // queue sets of work queues of other threads.
713 ParallelTaskTerminator(int n_threads, TaskQueueSetSuper* queue_set);
715 // The current thread has no work, and is ready to terminate if everyone
716 // else is. If returns "true", all threads are terminated. If returns
717 // "false", available work has been observed in one of the task queues,
718 // so the global task is not complete.
719 bool offer_termination() {
720 return offer_termination(NULL);
721 }
723 // As above, but it also terminates if the should_exit_termination()
724 // method of the terminator parameter returns true. If terminator is
725 // NULL, then it is ignored.
726 bool offer_termination(TerminatorTerminator* terminator);
728 // Reset the terminator, so that it may be reused again.
729 // The caller is responsible for ensuring that this is done
730 // in an MT-safe manner, once the previous round of use of
731 // the terminator is finished.
732 void reset_for_reuse();
733 // Same as above but the number of parallel threads is set to the
734 // given number.
735 void reset_for_reuse(int n_threads);
737 #ifdef TRACESPINNING
738 static uint total_yields() { return _total_yields; }
739 static uint total_spins() { return _total_spins; }
740 static uint total_peeks() { return _total_peeks; }
741 static void print_termination_counts();
742 #endif
743 };
745 template<class E, MEMFLAGS F, unsigned int N> inline bool
746 GenericTaskQueue<E, F, N>::push(E t) {
747 #ifdef MIPS64
748 uint localBot = this->get_bottom();
749 #else
750 uint localBot = _bottom;
751 #endif
752 assert(localBot < N, "_bottom out of range.");
753 idx_t top = _age.top();
754 uint dirty_n_elems = dirty_size(localBot, top);
755 assert(dirty_n_elems < N, "n_elems out of range.");
756 if (dirty_n_elems < max_elems()) {
757 // g++ complains if the volatile result of the assignment is
758 // unused, so we cast the volatile away. We cannot cast directly
759 // to void, because gcc treats that as not using the result of the
760 // assignment. However, casting to E& means that we trigger an
761 // unused-value warning. So, we cast the E& to void.
762 (void) const_cast<E&>(_elems[localBot] = t);
763 #ifdef MIPS64
764 this->set_bottom(increment_index(localBot));
765 #else
766 OrderAccess::release_store(&_bottom, increment_index(localBot));
767 #endif
768 TASKQUEUE_STATS_ONLY(stats.record_push());
769 return true;
770 } else {
771 return push_slow(t, dirty_n_elems);
772 }
773 }
775 template<class E, MEMFLAGS F, unsigned int N> inline bool
776 GenericTaskQueue<E, F, N>::pop_local(volatile E& t) {
777 #ifdef MIPS64
778 uint localBot = this->get_bottom();
779 #else
780 uint localBot = _bottom;
781 #endif
782 // This value cannot be N-1. That can only occur as a result of
783 // the assignment to bottom in this method. If it does, this method
784 // resets the size to 0 before the next call (which is sequential,
785 // since this is pop_local.)
786 uint dirty_n_elems = dirty_size(localBot, _age.top());
787 assert(dirty_n_elems != N - 1, "Shouldn't be possible...");
788 if (dirty_n_elems == 0) return false;
789 localBot = decrement_index(localBot);
790 #ifdef MIPS64
791 this->set_bottom(localBot);
792 #else
793 _bottom = localBot;
794 #endif
795 // This is necessary to prevent any read below from being reordered
796 // before the store just above.
797 OrderAccess::fence();
798 // g++ complains if the volatile result of the assignment is
799 // unused, so we cast the volatile away. We cannot cast directly
800 // to void, because gcc treats that as not using the result of the
801 // assignment. However, casting to E& means that we trigger an
802 // unused-value warning. So, we cast the E& to void.
803 (void) const_cast<E&>(t = _elems[localBot]);
804 // This is a second read of "age"; the "size()" above is the first.
805 // If there's still at least one element in the queue, based on the
806 // "_bottom" and "age" we've read, then there can be no interference with
807 // a "pop_global" operation, and we're done.
808 idx_t tp = _age.top(); // XXX
809 if (size(localBot, tp) > 0) {
810 assert(dirty_size(localBot, tp) != N - 1, "sanity");
811 TASKQUEUE_STATS_ONLY(stats.record_pop());
812 return true;
813 } else {
814 // Otherwise, the queue contained exactly one element; we take the slow
815 // path.
816 return pop_local_slow(localBot, _age.get());
817 }
818 }
820 typedef GenericTaskQueue<oop, mtGC> OopTaskQueue;
821 typedef GenericTaskQueueSet<OopTaskQueue, mtGC> OopTaskQueueSet;
823 #ifdef _MSC_VER
824 #pragma warning(push)
825 // warning C4522: multiple assignment operators specified
826 #pragma warning(disable:4522)
827 #endif
829 // This is a container class for either an oop* or a narrowOop*.
830 // Both are pushed onto a task queue and the consumer will test is_narrow()
831 // to determine which should be processed.
832 class StarTask {
833 void* _holder; // either union oop* or narrowOop*
835 enum { COMPRESSED_OOP_MASK = 1 };
837 public:
838 StarTask(narrowOop* p) {
839 assert(((uintptr_t)p & COMPRESSED_OOP_MASK) == 0, "Information loss!");
840 _holder = (void *)((uintptr_t)p | COMPRESSED_OOP_MASK);
841 }
842 StarTask(oop* p) {
843 assert(((uintptr_t)p & COMPRESSED_OOP_MASK) == 0, "Information loss!");
844 _holder = (void*)p;
845 }
846 StarTask() { _holder = NULL; }
847 operator oop*() { return (oop*)_holder; }
848 operator narrowOop*() {
849 return (narrowOop*)((uintptr_t)_holder & ~COMPRESSED_OOP_MASK);
850 }
852 StarTask& operator=(const StarTask& t) {
853 _holder = t._holder;
854 return *this;
855 }
856 volatile StarTask& operator=(const volatile StarTask& t) volatile {
857 _holder = t._holder;
858 return *this;
859 }
861 bool is_narrow() const {
862 return (((uintptr_t)_holder & COMPRESSED_OOP_MASK) != 0);
863 }
864 };
866 class ObjArrayTask
867 {
868 public:
869 ObjArrayTask(oop o = NULL, int idx = 0): _obj(o), _index(idx) { }
870 ObjArrayTask(oop o, size_t idx): _obj(o), _index(int(idx)) {
871 assert(idx <= size_t(max_jint), "too big");
872 }
873 ObjArrayTask(const ObjArrayTask& t): _obj(t._obj), _index(t._index) { }
875 ObjArrayTask& operator =(const ObjArrayTask& t) {
876 _obj = t._obj;
877 _index = t._index;
878 return *this;
879 }
880 volatile ObjArrayTask&
881 operator =(const volatile ObjArrayTask& t) volatile {
882 (void)const_cast<oop&>(_obj = t._obj);
883 _index = t._index;
884 return *this;
885 }
887 inline oop obj() const { return _obj; }
888 inline int index() const { return _index; }
890 DEBUG_ONLY(bool is_valid() const); // Tasks to be pushed/popped must be valid.
892 private:
893 oop _obj;
894 int _index;
895 };
897 #ifdef _MSC_VER
898 #pragma warning(pop)
899 #endif
901 typedef OverflowTaskQueue<StarTask, mtClass> OopStarTaskQueue;
902 typedef GenericTaskQueueSet<OopStarTaskQueue, mtClass> OopStarTaskQueueSet;
904 typedef OverflowTaskQueue<size_t, mtInternal> RegionTaskQueue;
905 typedef GenericTaskQueueSet<RegionTaskQueue, mtClass> RegionTaskQueueSet;
908 #endif // SHARE_VM_UTILITIES_TASKQUEUE_HPP