Sun, 25 Sep 2011 16:03:29 -0700
7089790: integrate bsd-port changes
Reviewed-by: kvn, twisti, jrose
Contributed-by: Kurt Miller <kurt@intricatesoftware.com>, Greg Lewis <glewis@eyesbeyond.com>, Jung-uk Kim <jkim@freebsd.org>, Christos Zoulas <christos@zoulas.com>, Landon Fuller <landonf@plausible.coop>, The FreeBSD Foundation <board@freebsdfoundation.org>, Michael Franz <mvfranz@gmail.com>, Roger Hoover <rhoover@apple.com>, Alexander Strange <astrange@apple.com>
1 /*
2 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #ifndef SHARE_VM_UTILITIES_TASKQUEUE_HPP
26 #define SHARE_VM_UTILITIES_TASKQUEUE_HPP
28 #include "memory/allocation.hpp"
29 #include "memory/allocation.inline.hpp"
30 #include "runtime/mutex.hpp"
31 #include "utilities/stack.hpp"
32 #ifdef TARGET_OS_ARCH_linux_x86
33 # include "orderAccess_linux_x86.inline.hpp"
34 #endif
35 #ifdef TARGET_OS_ARCH_linux_sparc
36 # include "orderAccess_linux_sparc.inline.hpp"
37 #endif
38 #ifdef TARGET_OS_ARCH_linux_zero
39 # include "orderAccess_linux_zero.inline.hpp"
40 #endif
41 #ifdef TARGET_OS_ARCH_solaris_x86
42 # include "orderAccess_solaris_x86.inline.hpp"
43 #endif
44 #ifdef TARGET_OS_ARCH_solaris_sparc
45 # include "orderAccess_solaris_sparc.inline.hpp"
46 #endif
47 #ifdef TARGET_OS_ARCH_windows_x86
48 # include "orderAccess_windows_x86.inline.hpp"
49 #endif
50 #ifdef TARGET_OS_ARCH_linux_arm
51 # include "orderAccess_linux_arm.inline.hpp"
52 #endif
53 #ifdef TARGET_OS_ARCH_linux_ppc
54 # include "orderAccess_linux_ppc.inline.hpp"
55 #endif
56 #ifdef TARGET_OS_ARCH_bsd_x86
57 # include "orderAccess_bsd_x86.inline.hpp"
58 #endif
59 #ifdef TARGET_OS_ARCH_bsd_zero
60 # include "orderAccess_bsd_zero.inline.hpp"
61 #endif
63 // Simple TaskQueue stats that are collected by default in debug builds.
65 #if !defined(TASKQUEUE_STATS) && defined(ASSERT)
66 #define TASKQUEUE_STATS 1
67 #elif !defined(TASKQUEUE_STATS)
68 #define TASKQUEUE_STATS 0
69 #endif
71 #if TASKQUEUE_STATS
72 #define TASKQUEUE_STATS_ONLY(code) code
73 #else
74 #define TASKQUEUE_STATS_ONLY(code)
75 #endif // TASKQUEUE_STATS
77 #if TASKQUEUE_STATS
78 class TaskQueueStats {
79 public:
80 enum StatId {
81 push, // number of taskqueue pushes
82 pop, // number of taskqueue pops
83 pop_slow, // subset of taskqueue pops that were done slow-path
84 steal_attempt, // number of taskqueue steal attempts
85 steal, // number of taskqueue steals
86 overflow, // number of overflow pushes
87 overflow_max_len, // max length of overflow stack
88 last_stat_id
89 };
91 public:
92 inline TaskQueueStats() { reset(); }
94 inline void record_push() { ++_stats[push]; }
95 inline void record_pop() { ++_stats[pop]; }
96 inline void record_pop_slow() { record_pop(); ++_stats[pop_slow]; }
97 inline void record_steal(bool success);
98 inline void record_overflow(size_t new_length);
100 TaskQueueStats & operator +=(const TaskQueueStats & addend);
102 inline size_t get(StatId id) const { return _stats[id]; }
103 inline const size_t* get() const { return _stats; }
105 inline void reset();
107 // Print the specified line of the header (does not include a line separator).
108 static void print_header(unsigned int line, outputStream* const stream = tty,
109 unsigned int width = 10);
110 // Print the statistics (does not include a line separator).
111 void print(outputStream* const stream = tty, unsigned int width = 10) const;
113 DEBUG_ONLY(void verify() const;)
115 private:
116 size_t _stats[last_stat_id];
117 static const char * const _names[last_stat_id];
118 };
120 void TaskQueueStats::record_steal(bool success) {
121 ++_stats[steal_attempt];
122 if (success) ++_stats[steal];
123 }
125 void TaskQueueStats::record_overflow(size_t new_len) {
126 ++_stats[overflow];
127 if (new_len > _stats[overflow_max_len]) _stats[overflow_max_len] = new_len;
128 }
130 void TaskQueueStats::reset() {
131 memset(_stats, 0, sizeof(_stats));
132 }
133 #endif // TASKQUEUE_STATS
135 template <unsigned int N>
136 class TaskQueueSuper: public CHeapObj {
137 protected:
138 // Internal type for indexing the queue; also used for the tag.
139 typedef NOT_LP64(uint16_t) LP64_ONLY(uint32_t) idx_t;
141 // The first free element after the last one pushed (mod N).
142 volatile uint _bottom;
144 enum { MOD_N_MASK = N - 1 };
146 class Age {
147 public:
148 Age(size_t data = 0) { _data = data; }
149 Age(const Age& age) { _data = age._data; }
150 Age(idx_t top, idx_t tag) { _fields._top = top; _fields._tag = tag; }
152 Age get() const volatile { return _data; }
153 void set(Age age) volatile { _data = age._data; }
155 idx_t top() const volatile { return _fields._top; }
156 idx_t tag() const volatile { return _fields._tag; }
158 // Increment top; if it wraps, increment tag also.
159 void increment() {
160 _fields._top = increment_index(_fields._top);
161 if (_fields._top == 0) ++_fields._tag;
162 }
164 Age cmpxchg(const Age new_age, const Age old_age) volatile {
165 return (size_t) Atomic::cmpxchg_ptr((intptr_t)new_age._data,
166 (volatile intptr_t *)&_data,
167 (intptr_t)old_age._data);
168 }
170 bool operator ==(const Age& other) const { return _data == other._data; }
172 private:
173 struct fields {
174 idx_t _top;
175 idx_t _tag;
176 };
177 union {
178 size_t _data;
179 fields _fields;
180 };
181 };
183 volatile Age _age;
185 // These both operate mod N.
186 static uint increment_index(uint ind) {
187 return (ind + 1) & MOD_N_MASK;
188 }
189 static uint decrement_index(uint ind) {
190 return (ind - 1) & MOD_N_MASK;
191 }
193 // Returns a number in the range [0..N). If the result is "N-1", it should be
194 // interpreted as 0.
195 uint dirty_size(uint bot, uint top) const {
196 return (bot - top) & MOD_N_MASK;
197 }
199 // Returns the size corresponding to the given "bot" and "top".
200 uint size(uint bot, uint top) const {
201 uint sz = dirty_size(bot, top);
202 // Has the queue "wrapped", so that bottom is less than top? There's a
203 // complicated special case here. A pair of threads could perform pop_local
204 // and pop_global operations concurrently, starting from a state in which
205 // _bottom == _top+1. The pop_local could succeed in decrementing _bottom,
206 // and the pop_global in incrementing _top (in which case the pop_global
207 // will be awarded the contested queue element.) The resulting state must
208 // be interpreted as an empty queue. (We only need to worry about one such
209 // event: only the queue owner performs pop_local's, and several concurrent
210 // threads attempting to perform the pop_global will all perform the same
211 // CAS, and only one can succeed.) Any stealing thread that reads after
212 // either the increment or decrement will see an empty queue, and will not
213 // join the competitors. The "sz == -1 || sz == N-1" state will not be
214 // modified by concurrent queues, so the owner thread can reset the state to
215 // _bottom == top so subsequent pushes will be performed normally.
216 return (sz == N - 1) ? 0 : sz;
217 }
219 public:
220 TaskQueueSuper() : _bottom(0), _age() {}
222 // Return true if the TaskQueue contains/does not contain any tasks.
223 bool peek() const { return _bottom != _age.top(); }
224 bool is_empty() const { return size() == 0; }
226 // Return an estimate of the number of elements in the queue.
227 // The "careful" version admits the possibility of pop_local/pop_global
228 // races.
229 uint size() const {
230 return size(_bottom, _age.top());
231 }
233 uint dirty_size() const {
234 return dirty_size(_bottom, _age.top());
235 }
237 void set_empty() {
238 _bottom = 0;
239 _age.set(0);
240 }
242 // Maximum number of elements allowed in the queue. This is two less
243 // than the actual queue size, for somewhat complicated reasons.
244 uint max_elems() const { return N - 2; }
246 // Total size of queue.
247 static const uint total_size() { return N; }
249 TASKQUEUE_STATS_ONLY(TaskQueueStats stats;)
250 };
252 template<class E, unsigned int N = TASKQUEUE_SIZE>
253 class GenericTaskQueue: public TaskQueueSuper<N> {
254 protected:
255 typedef typename TaskQueueSuper<N>::Age Age;
256 typedef typename TaskQueueSuper<N>::idx_t idx_t;
258 using TaskQueueSuper<N>::_bottom;
259 using TaskQueueSuper<N>::_age;
260 using TaskQueueSuper<N>::increment_index;
261 using TaskQueueSuper<N>::decrement_index;
262 using TaskQueueSuper<N>::dirty_size;
264 public:
265 using TaskQueueSuper<N>::max_elems;
266 using TaskQueueSuper<N>::size;
267 TASKQUEUE_STATS_ONLY(using TaskQueueSuper<N>::stats;)
269 private:
270 // Slow paths for push, pop_local. (pop_global has no fast path.)
271 bool push_slow(E t, uint dirty_n_elems);
272 bool pop_local_slow(uint localBot, Age oldAge);
274 public:
275 typedef E element_type;
277 // Initializes the queue to empty.
278 GenericTaskQueue();
280 void initialize();
282 // Push the task "t" on the queue. Returns "false" iff the queue is full.
283 inline bool push(E t);
285 // Attempts to claim a task from the "local" end of the queue (the most
286 // recently pushed). If successful, returns true and sets t to the task;
287 // otherwise, returns false (the queue is empty).
288 inline bool pop_local(E& t);
290 // Like pop_local(), but uses the "global" end of the queue (the least
291 // recently pushed).
292 bool pop_global(E& t);
294 // Delete any resource associated with the queue.
295 ~GenericTaskQueue();
297 // apply the closure to all elements in the task queue
298 void oops_do(OopClosure* f);
300 private:
301 // Element array.
302 volatile E* _elems;
303 };
305 template<class E, unsigned int N>
306 GenericTaskQueue<E, N>::GenericTaskQueue() {
307 assert(sizeof(Age) == sizeof(size_t), "Depends on this.");
308 }
310 template<class E, unsigned int N>
311 void GenericTaskQueue<E, N>::initialize() {
312 _elems = NEW_C_HEAP_ARRAY(E, N);
313 }
315 template<class E, unsigned int N>
316 void GenericTaskQueue<E, N>::oops_do(OopClosure* f) {
317 // tty->print_cr("START OopTaskQueue::oops_do");
318 uint iters = size();
319 uint index = _bottom;
320 for (uint i = 0; i < iters; ++i) {
321 index = decrement_index(index);
322 // tty->print_cr(" doing entry %d," INTPTR_T " -> " INTPTR_T,
323 // index, &_elems[index], _elems[index]);
324 E* t = (E*)&_elems[index]; // cast away volatility
325 oop* p = (oop*)t;
326 assert((*t)->is_oop_or_null(), "Not an oop or null");
327 f->do_oop(p);
328 }
329 // tty->print_cr("END OopTaskQueue::oops_do");
330 }
332 template<class E, unsigned int N>
333 bool GenericTaskQueue<E, N>::push_slow(E t, uint dirty_n_elems) {
334 if (dirty_n_elems == N - 1) {
335 // Actually means 0, so do the push.
336 uint localBot = _bottom;
337 // g++ complains if the volatile result of the assignment is unused.
338 const_cast<E&>(_elems[localBot] = t);
339 OrderAccess::release_store(&_bottom, increment_index(localBot));
340 TASKQUEUE_STATS_ONLY(stats.record_push());
341 return true;
342 }
343 return false;
344 }
346 // pop_local_slow() is done by the owning thread and is trying to
347 // get the last task in the queue. It will compete with pop_global()
348 // that will be used by other threads. The tag age is incremented
349 // whenever the queue goes empty which it will do here if this thread
350 // gets the last task or in pop_global() if the queue wraps (top == 0
351 // and pop_global() succeeds, see pop_global()).
352 template<class E, unsigned int N>
353 bool GenericTaskQueue<E, N>::pop_local_slow(uint localBot, Age oldAge) {
354 // This queue was observed to contain exactly one element; either this
355 // thread will claim it, or a competing "pop_global". In either case,
356 // the queue will be logically empty afterwards. Create a new Age value
357 // that represents the empty queue for the given value of "_bottom". (We
358 // must also increment "tag" because of the case where "bottom == 1",
359 // "top == 0". A pop_global could read the queue element in that case,
360 // then have the owner thread do a pop followed by another push. Without
361 // the incrementing of "tag", the pop_global's CAS could succeed,
362 // allowing it to believe it has claimed the stale element.)
363 Age newAge((idx_t)localBot, oldAge.tag() + 1);
364 // Perhaps a competing pop_global has already incremented "top", in which
365 // case it wins the element.
366 if (localBot == oldAge.top()) {
367 // No competing pop_global has yet incremented "top"; we'll try to
368 // install new_age, thus claiming the element.
369 Age tempAge = _age.cmpxchg(newAge, oldAge);
370 if (tempAge == oldAge) {
371 // We win.
372 assert(dirty_size(localBot, _age.top()) != N - 1, "sanity");
373 TASKQUEUE_STATS_ONLY(stats.record_pop_slow());
374 return true;
375 }
376 }
377 // We lose; a completing pop_global gets the element. But the queue is empty
378 // and top is greater than bottom. Fix this representation of the empty queue
379 // to become the canonical one.
380 _age.set(newAge);
381 assert(dirty_size(localBot, _age.top()) != N - 1, "sanity");
382 return false;
383 }
385 template<class E, unsigned int N>
386 bool GenericTaskQueue<E, N>::pop_global(E& t) {
387 Age oldAge = _age.get();
388 uint localBot = _bottom;
389 uint n_elems = size(localBot, oldAge.top());
390 if (n_elems == 0) {
391 return false;
392 }
394 const_cast<E&>(t = _elems[oldAge.top()]);
395 Age newAge(oldAge);
396 newAge.increment();
397 Age resAge = _age.cmpxchg(newAge, oldAge);
399 // Note that using "_bottom" here might fail, since a pop_local might
400 // have decremented it.
401 assert(dirty_size(localBot, newAge.top()) != N - 1, "sanity");
402 return resAge == oldAge;
403 }
405 template<class E, unsigned int N>
406 GenericTaskQueue<E, N>::~GenericTaskQueue() {
407 FREE_C_HEAP_ARRAY(E, _elems);
408 }
410 // OverflowTaskQueue is a TaskQueue that also includes an overflow stack for
411 // elements that do not fit in the TaskQueue.
412 //
413 // This class hides two methods from super classes:
414 //
415 // push() - push onto the task queue or, if that fails, onto the overflow stack
416 // is_empty() - return true if both the TaskQueue and overflow stack are empty
417 //
418 // Note that size() is not hidden--it returns the number of elements in the
419 // TaskQueue, and does not include the size of the overflow stack. This
420 // simplifies replacement of GenericTaskQueues with OverflowTaskQueues.
421 template<class E, unsigned int N = TASKQUEUE_SIZE>
422 class OverflowTaskQueue: public GenericTaskQueue<E, N>
423 {
424 public:
425 typedef Stack<E> overflow_t;
426 typedef GenericTaskQueue<E, N> taskqueue_t;
428 TASKQUEUE_STATS_ONLY(using taskqueue_t::stats;)
430 // Push task t onto the queue or onto the overflow stack. Return true.
431 inline bool push(E t);
433 // Attempt to pop from the overflow stack; return true if anything was popped.
434 inline bool pop_overflow(E& t);
436 inline overflow_t* overflow_stack() { return &_overflow_stack; }
438 inline bool taskqueue_empty() const { return taskqueue_t::is_empty(); }
439 inline bool overflow_empty() const { return _overflow_stack.is_empty(); }
440 inline bool is_empty() const {
441 return taskqueue_empty() && overflow_empty();
442 }
444 private:
445 overflow_t _overflow_stack;
446 };
448 template <class E, unsigned int N>
449 bool OverflowTaskQueue<E, N>::push(E t)
450 {
451 if (!taskqueue_t::push(t)) {
452 overflow_stack()->push(t);
453 TASKQUEUE_STATS_ONLY(stats.record_overflow(overflow_stack()->size()));
454 }
455 return true;
456 }
458 template <class E, unsigned int N>
459 bool OverflowTaskQueue<E, N>::pop_overflow(E& t)
460 {
461 if (overflow_empty()) return false;
462 t = overflow_stack()->pop();
463 return true;
464 }
466 class TaskQueueSetSuper: public CHeapObj {
467 protected:
468 static int randomParkAndMiller(int* seed0);
469 public:
470 // Returns "true" if some TaskQueue in the set contains a task.
471 virtual bool peek() = 0;
472 };
474 template<class T>
475 class GenericTaskQueueSet: public TaskQueueSetSuper {
476 private:
477 uint _n;
478 T** _queues;
480 public:
481 typedef typename T::element_type E;
483 GenericTaskQueueSet(int n) : _n(n) {
484 typedef T* GenericTaskQueuePtr;
485 _queues = NEW_C_HEAP_ARRAY(GenericTaskQueuePtr, n);
486 for (int i = 0; i < n; i++) {
487 _queues[i] = NULL;
488 }
489 }
491 bool steal_1_random(uint queue_num, int* seed, E& t);
492 bool steal_best_of_2(uint queue_num, int* seed, E& t);
493 bool steal_best_of_all(uint queue_num, int* seed, E& t);
495 void register_queue(uint i, T* q);
497 T* queue(uint n);
499 // The thread with queue number "queue_num" (and whose random number seed is
500 // at "seed") is trying to steal a task from some other queue. (It may try
501 // several queues, according to some configuration parameter.) If some steal
502 // succeeds, returns "true" and sets "t" to the stolen task, otherwise returns
503 // false.
504 bool steal(uint queue_num, int* seed, E& t);
506 bool peek();
507 };
509 template<class T> void
510 GenericTaskQueueSet<T>::register_queue(uint i, T* q) {
511 assert(i < _n, "index out of range.");
512 _queues[i] = q;
513 }
515 template<class T> T*
516 GenericTaskQueueSet<T>::queue(uint i) {
517 return _queues[i];
518 }
520 template<class T> bool
521 GenericTaskQueueSet<T>::steal(uint queue_num, int* seed, E& t) {
522 for (uint i = 0; i < 2 * _n; i++) {
523 if (steal_best_of_2(queue_num, seed, t)) {
524 TASKQUEUE_STATS_ONLY(queue(queue_num)->stats.record_steal(true));
525 return true;
526 }
527 }
528 TASKQUEUE_STATS_ONLY(queue(queue_num)->stats.record_steal(false));
529 return false;
530 }
532 template<class T> bool
533 GenericTaskQueueSet<T>::steal_best_of_all(uint queue_num, int* seed, E& t) {
534 if (_n > 2) {
535 int best_k;
536 uint best_sz = 0;
537 for (uint k = 0; k < _n; k++) {
538 if (k == queue_num) continue;
539 uint sz = _queues[k]->size();
540 if (sz > best_sz) {
541 best_sz = sz;
542 best_k = k;
543 }
544 }
545 return best_sz > 0 && _queues[best_k]->pop_global(t);
546 } else if (_n == 2) {
547 // Just try the other one.
548 int k = (queue_num + 1) % 2;
549 return _queues[k]->pop_global(t);
550 } else {
551 assert(_n == 1, "can't be zero.");
552 return false;
553 }
554 }
556 template<class T> bool
557 GenericTaskQueueSet<T>::steal_1_random(uint queue_num, int* seed, E& t) {
558 if (_n > 2) {
559 uint k = queue_num;
560 while (k == queue_num) k = randomParkAndMiller(seed) % _n;
561 return _queues[2]->pop_global(t);
562 } else if (_n == 2) {
563 // Just try the other one.
564 int k = (queue_num + 1) % 2;
565 return _queues[k]->pop_global(t);
566 } else {
567 assert(_n == 1, "can't be zero.");
568 return false;
569 }
570 }
572 template<class T> bool
573 GenericTaskQueueSet<T>::steal_best_of_2(uint queue_num, int* seed, E& t) {
574 if (_n > 2) {
575 uint k1 = queue_num;
576 while (k1 == queue_num) k1 = randomParkAndMiller(seed) % _n;
577 uint k2 = queue_num;
578 while (k2 == queue_num || k2 == k1) k2 = randomParkAndMiller(seed) % _n;
579 // Sample both and try the larger.
580 uint sz1 = _queues[k1]->size();
581 uint sz2 = _queues[k2]->size();
582 if (sz2 > sz1) return _queues[k2]->pop_global(t);
583 else return _queues[k1]->pop_global(t);
584 } else if (_n == 2) {
585 // Just try the other one.
586 uint k = (queue_num + 1) % 2;
587 return _queues[k]->pop_global(t);
588 } else {
589 assert(_n == 1, "can't be zero.");
590 return false;
591 }
592 }
594 template<class T>
595 bool GenericTaskQueueSet<T>::peek() {
596 // Try all the queues.
597 for (uint j = 0; j < _n; j++) {
598 if (_queues[j]->peek())
599 return true;
600 }
601 return false;
602 }
604 // When to terminate from the termination protocol.
605 class TerminatorTerminator: public CHeapObj {
606 public:
607 virtual bool should_exit_termination() = 0;
608 };
610 // A class to aid in the termination of a set of parallel tasks using
611 // TaskQueueSet's for work stealing.
613 #undef TRACESPINNING
615 class ParallelTaskTerminator: public StackObj {
616 private:
617 int _n_threads;
618 TaskQueueSetSuper* _queue_set;
619 int _offered_termination;
621 #ifdef TRACESPINNING
622 static uint _total_yields;
623 static uint _total_spins;
624 static uint _total_peeks;
625 #endif
627 bool peek_in_queue_set();
628 protected:
629 virtual void yield();
630 void sleep(uint millis);
632 public:
634 // "n_threads" is the number of threads to be terminated. "queue_set" is a
635 // queue sets of work queues of other threads.
636 ParallelTaskTerminator(int n_threads, TaskQueueSetSuper* queue_set);
638 // The current thread has no work, and is ready to terminate if everyone
639 // else is. If returns "true", all threads are terminated. If returns
640 // "false", available work has been observed in one of the task queues,
641 // so the global task is not complete.
642 bool offer_termination() {
643 return offer_termination(NULL);
644 }
646 // As above, but it also terminates if the should_exit_termination()
647 // method of the terminator parameter returns true. If terminator is
648 // NULL, then it is ignored.
649 bool offer_termination(TerminatorTerminator* terminator);
651 // Reset the terminator, so that it may be reused again.
652 // The caller is responsible for ensuring that this is done
653 // in an MT-safe manner, once the previous round of use of
654 // the terminator is finished.
655 void reset_for_reuse();
656 // Same as above but the number of parallel threads is set to the
657 // given number.
658 void reset_for_reuse(int n_threads);
660 #ifdef TRACESPINNING
661 static uint total_yields() { return _total_yields; }
662 static uint total_spins() { return _total_spins; }
663 static uint total_peeks() { return _total_peeks; }
664 static void print_termination_counts();
665 #endif
666 };
668 template<class E, unsigned int N> inline bool
669 GenericTaskQueue<E, N>::push(E t) {
670 uint localBot = _bottom;
671 assert((localBot >= 0) && (localBot < N), "_bottom out of range.");
672 idx_t top = _age.top();
673 uint dirty_n_elems = dirty_size(localBot, top);
674 assert(dirty_n_elems < N, "n_elems out of range.");
675 if (dirty_n_elems < max_elems()) {
676 // g++ complains if the volatile result of the assignment is unused.
677 const_cast<E&>(_elems[localBot] = t);
678 OrderAccess::release_store(&_bottom, increment_index(localBot));
679 TASKQUEUE_STATS_ONLY(stats.record_push());
680 return true;
681 } else {
682 return push_slow(t, dirty_n_elems);
683 }
684 }
686 template<class E, unsigned int N> inline bool
687 GenericTaskQueue<E, N>::pop_local(E& t) {
688 uint localBot = _bottom;
689 // This value cannot be N-1. That can only occur as a result of
690 // the assignment to bottom in this method. If it does, this method
691 // resets the size to 0 before the next call (which is sequential,
692 // since this is pop_local.)
693 uint dirty_n_elems = dirty_size(localBot, _age.top());
694 assert(dirty_n_elems != N - 1, "Shouldn't be possible...");
695 if (dirty_n_elems == 0) return false;
696 localBot = decrement_index(localBot);
697 _bottom = localBot;
698 // This is necessary to prevent any read below from being reordered
699 // before the store just above.
700 OrderAccess::fence();
701 const_cast<E&>(t = _elems[localBot]);
702 // This is a second read of "age"; the "size()" above is the first.
703 // If there's still at least one element in the queue, based on the
704 // "_bottom" and "age" we've read, then there can be no interference with
705 // a "pop_global" operation, and we're done.
706 idx_t tp = _age.top(); // XXX
707 if (size(localBot, tp) > 0) {
708 assert(dirty_size(localBot, tp) != N - 1, "sanity");
709 TASKQUEUE_STATS_ONLY(stats.record_pop());
710 return true;
711 } else {
712 // Otherwise, the queue contained exactly one element; we take the slow
713 // path.
714 return pop_local_slow(localBot, _age.get());
715 }
716 }
718 typedef GenericTaskQueue<oop> OopTaskQueue;
719 typedef GenericTaskQueueSet<OopTaskQueue> OopTaskQueueSet;
721 #ifdef _MSC_VER
722 #pragma warning(push)
723 // warning C4522: multiple assignment operators specified
724 #pragma warning(disable:4522)
725 #endif
727 // This is a container class for either an oop* or a narrowOop*.
728 // Both are pushed onto a task queue and the consumer will test is_narrow()
729 // to determine which should be processed.
730 class StarTask {
731 void* _holder; // either union oop* or narrowOop*
733 enum { COMPRESSED_OOP_MASK = 1 };
735 public:
736 StarTask(narrowOop* p) {
737 assert(((uintptr_t)p & COMPRESSED_OOP_MASK) == 0, "Information loss!");
738 _holder = (void *)((uintptr_t)p | COMPRESSED_OOP_MASK);
739 }
740 StarTask(oop* p) {
741 assert(((uintptr_t)p & COMPRESSED_OOP_MASK) == 0, "Information loss!");
742 _holder = (void*)p;
743 }
744 StarTask() { _holder = NULL; }
745 operator oop*() { return (oop*)_holder; }
746 operator narrowOop*() {
747 return (narrowOop*)((uintptr_t)_holder & ~COMPRESSED_OOP_MASK);
748 }
750 StarTask& operator=(const StarTask& t) {
751 _holder = t._holder;
752 return *this;
753 }
754 volatile StarTask& operator=(const volatile StarTask& t) volatile {
755 _holder = t._holder;
756 return *this;
757 }
759 bool is_narrow() const {
760 return (((uintptr_t)_holder & COMPRESSED_OOP_MASK) != 0);
761 }
762 };
764 class ObjArrayTask
765 {
766 public:
767 ObjArrayTask(oop o = NULL, int idx = 0): _obj(o), _index(idx) { }
768 ObjArrayTask(oop o, size_t idx): _obj(o), _index(int(idx)) {
769 assert(idx <= size_t(max_jint), "too big");
770 }
771 ObjArrayTask(const ObjArrayTask& t): _obj(t._obj), _index(t._index) { }
773 ObjArrayTask& operator =(const ObjArrayTask& t) {
774 _obj = t._obj;
775 _index = t._index;
776 return *this;
777 }
778 volatile ObjArrayTask&
779 operator =(const volatile ObjArrayTask& t) volatile {
780 _obj = t._obj;
781 _index = t._index;
782 return *this;
783 }
785 inline oop obj() const { return _obj; }
786 inline int index() const { return _index; }
788 DEBUG_ONLY(bool is_valid() const); // Tasks to be pushed/popped must be valid.
790 private:
791 oop _obj;
792 int _index;
793 };
795 #ifdef _MSC_VER
796 #pragma warning(pop)
797 #endif
799 typedef OverflowTaskQueue<StarTask> OopStarTaskQueue;
800 typedef GenericTaskQueueSet<OopStarTaskQueue> OopStarTaskQueueSet;
802 typedef OverflowTaskQueue<size_t> RegionTaskQueue;
803 typedef GenericTaskQueueSet<RegionTaskQueue> RegionTaskQueueSet;
806 #endif // SHARE_VM_UTILITIES_TASKQUEUE_HPP