Fri, 16 Jul 2010 21:33:21 -0700
6962947: shared TaskQueue statistics
Reviewed-by: tonyp, ysr
1 /*
2 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 // Simple TaskQueue stats that are collected by default in debug builds.
27 #if !defined(TASKQUEUE_STATS) && defined(ASSERT)
28 #define TASKQUEUE_STATS 1
29 #elif !defined(TASKQUEUE_STATS)
30 #define TASKQUEUE_STATS 0
31 #endif
33 #if TASKQUEUE_STATS
34 #define TASKQUEUE_STATS_ONLY(code) code
35 #else
36 #define TASKQUEUE_STATS_ONLY(code)
37 #endif // TASKQUEUE_STATS
39 #if TASKQUEUE_STATS
40 class TaskQueueStats {
41 public:
42 enum StatId {
43 push, // number of taskqueue pushes
44 pop, // number of taskqueue pops
45 pop_slow, // subset of taskqueue pops that were done slow-path
46 steal_attempt, // number of taskqueue steal attempts
47 steal, // number of taskqueue steals
48 overflow, // number of overflow pushes
49 overflow_max_len, // max length of overflow stack
50 last_stat_id
51 };
53 public:
54 inline TaskQueueStats() { reset(); }
56 inline void record_push() { ++_stats[push]; }
57 inline void record_pop() { ++_stats[pop]; }
58 inline void record_pop_slow() { record_pop(); ++_stats[pop_slow]; }
59 inline void record_steal(bool success);
60 inline void record_overflow(size_t new_length);
62 inline size_t get(StatId id) const { return _stats[id]; }
63 inline const size_t* get() const { return _stats; }
65 inline void reset();
67 static void print_header(unsigned int line, outputStream* const stream = tty,
68 unsigned int width = 10);
69 void print(outputStream* const stream = tty, unsigned int width = 10) const;
71 private:
72 size_t _stats[last_stat_id];
73 static const char * const _names[last_stat_id];
74 };
76 void TaskQueueStats::record_steal(bool success) {
77 ++_stats[steal_attempt];
78 if (success) ++_stats[steal];
79 }
81 void TaskQueueStats::record_overflow(size_t new_len) {
82 ++_stats[overflow];
83 if (new_len > _stats[overflow_max_len]) _stats[overflow_max_len] = new_len;
84 }
86 void TaskQueueStats::reset() {
87 memset(_stats, 0, sizeof(_stats));
88 }
89 #endif // TASKQUEUE_STATS
91 template <unsigned int N>
92 class TaskQueueSuper: public CHeapObj {
93 protected:
94 // Internal type for indexing the queue; also used for the tag.
95 typedef NOT_LP64(uint16_t) LP64_ONLY(uint32_t) idx_t;
97 // The first free element after the last one pushed (mod N).
98 volatile uint _bottom;
100 enum { MOD_N_MASK = N - 1 };
102 class Age {
103 public:
104 Age(size_t data = 0) { _data = data; }
105 Age(const Age& age) { _data = age._data; }
106 Age(idx_t top, idx_t tag) { _fields._top = top; _fields._tag = tag; }
108 Age get() const volatile { return _data; }
109 void set(Age age) volatile { _data = age._data; }
111 idx_t top() const volatile { return _fields._top; }
112 idx_t tag() const volatile { return _fields._tag; }
114 // Increment top; if it wraps, increment tag also.
115 void increment() {
116 _fields._top = increment_index(_fields._top);
117 if (_fields._top == 0) ++_fields._tag;
118 }
120 Age cmpxchg(const Age new_age, const Age old_age) volatile {
121 return (size_t) Atomic::cmpxchg_ptr((intptr_t)new_age._data,
122 (volatile intptr_t *)&_data,
123 (intptr_t)old_age._data);
124 }
126 bool operator ==(const Age& other) const { return _data == other._data; }
128 private:
129 struct fields {
130 idx_t _top;
131 idx_t _tag;
132 };
133 union {
134 size_t _data;
135 fields _fields;
136 };
137 };
139 volatile Age _age;
141 // These both operate mod N.
142 static uint increment_index(uint ind) {
143 return (ind + 1) & MOD_N_MASK;
144 }
145 static uint decrement_index(uint ind) {
146 return (ind - 1) & MOD_N_MASK;
147 }
149 // Returns a number in the range [0..N). If the result is "N-1", it should be
150 // interpreted as 0.
151 uint dirty_size(uint bot, uint top) const {
152 return (bot - top) & MOD_N_MASK;
153 }
155 // Returns the size corresponding to the given "bot" and "top".
156 uint size(uint bot, uint top) const {
157 uint sz = dirty_size(bot, top);
158 // Has the queue "wrapped", so that bottom is less than top? There's a
159 // complicated special case here. A pair of threads could perform pop_local
160 // and pop_global operations concurrently, starting from a state in which
161 // _bottom == _top+1. The pop_local could succeed in decrementing _bottom,
162 // and the pop_global in incrementing _top (in which case the pop_global
163 // will be awarded the contested queue element.) The resulting state must
164 // be interpreted as an empty queue. (We only need to worry about one such
165 // event: only the queue owner performs pop_local's, and several concurrent
166 // threads attempting to perform the pop_global will all perform the same
167 // CAS, and only one can succeed.) Any stealing thread that reads after
168 // either the increment or decrement will see an empty queue, and will not
169 // join the competitors. The "sz == -1 || sz == N-1" state will not be
170 // modified by concurrent queues, so the owner thread can reset the state to
171 // _bottom == top so subsequent pushes will be performed normally.
172 return (sz == N - 1) ? 0 : sz;
173 }
175 public:
176 TaskQueueSuper() : _bottom(0), _age() {}
178 // Return true if the TaskQueue contains/does not contain any tasks.
179 bool peek() const { return _bottom != _age.top(); }
180 bool is_empty() const { return size() == 0; }
182 // Return an estimate of the number of elements in the queue.
183 // The "careful" version admits the possibility of pop_local/pop_global
184 // races.
185 uint size() const {
186 return size(_bottom, _age.top());
187 }
189 uint dirty_size() const {
190 return dirty_size(_bottom, _age.top());
191 }
193 void set_empty() {
194 _bottom = 0;
195 _age.set(0);
196 }
198 // Maximum number of elements allowed in the queue. This is two less
199 // than the actual queue size, for somewhat complicated reasons.
200 uint max_elems() const { return N - 2; }
202 // Total size of queue.
203 static const uint total_size() { return N; }
205 TASKQUEUE_STATS_ONLY(TaskQueueStats stats;)
206 };
208 template<class E, unsigned int N = TASKQUEUE_SIZE>
209 class GenericTaskQueue: public TaskQueueSuper<N> {
210 protected:
211 typedef typename TaskQueueSuper<N>::Age Age;
212 typedef typename TaskQueueSuper<N>::idx_t idx_t;
214 using TaskQueueSuper<N>::_bottom;
215 using TaskQueueSuper<N>::_age;
216 using TaskQueueSuper<N>::increment_index;
217 using TaskQueueSuper<N>::decrement_index;
218 using TaskQueueSuper<N>::dirty_size;
220 public:
221 using TaskQueueSuper<N>::max_elems;
222 using TaskQueueSuper<N>::size;
223 TASKQUEUE_STATS_ONLY(using TaskQueueSuper<N>::stats;)
225 private:
226 // Slow paths for push, pop_local. (pop_global has no fast path.)
227 bool push_slow(E t, uint dirty_n_elems);
228 bool pop_local_slow(uint localBot, Age oldAge);
230 public:
231 typedef E element_type;
233 // Initializes the queue to empty.
234 GenericTaskQueue();
236 void initialize();
238 // Push the task "t" on the queue. Returns "false" iff the queue is full.
239 inline bool push(E t);
241 // Attempts to claim a task from the "local" end of the queue (the most
242 // recently pushed). If successful, returns true and sets t to the task;
243 // otherwise, returns false (the queue is empty).
244 inline bool pop_local(E& t);
246 // Like pop_local(), but uses the "global" end of the queue (the least
247 // recently pushed).
248 bool pop_global(E& t);
250 // Delete any resource associated with the queue.
251 ~GenericTaskQueue();
253 // apply the closure to all elements in the task queue
254 void oops_do(OopClosure* f);
256 private:
257 // Element array.
258 volatile E* _elems;
259 };
261 template<class E, unsigned int N>
262 GenericTaskQueue<E, N>::GenericTaskQueue() {
263 assert(sizeof(Age) == sizeof(size_t), "Depends on this.");
264 }
266 template<class E, unsigned int N>
267 void GenericTaskQueue<E, N>::initialize() {
268 _elems = NEW_C_HEAP_ARRAY(E, N);
269 }
271 template<class E, unsigned int N>
272 void GenericTaskQueue<E, N>::oops_do(OopClosure* f) {
273 // tty->print_cr("START OopTaskQueue::oops_do");
274 uint iters = size();
275 uint index = _bottom;
276 for (uint i = 0; i < iters; ++i) {
277 index = decrement_index(index);
278 // tty->print_cr(" doing entry %d," INTPTR_T " -> " INTPTR_T,
279 // index, &_elems[index], _elems[index]);
280 E* t = (E*)&_elems[index]; // cast away volatility
281 oop* p = (oop*)t;
282 assert((*t)->is_oop_or_null(), "Not an oop or null");
283 f->do_oop(p);
284 }
285 // tty->print_cr("END OopTaskQueue::oops_do");
286 }
288 template<class E, unsigned int N>
289 bool GenericTaskQueue<E, N>::push_slow(E t, uint dirty_n_elems) {
290 if (dirty_n_elems == N - 1) {
291 // Actually means 0, so do the push.
292 uint localBot = _bottom;
293 // g++ complains if the volatile result of the assignment is unused.
294 const_cast<E&>(_elems[localBot] = t);
295 OrderAccess::release_store(&_bottom, increment_index(localBot));
296 TASKQUEUE_STATS_ONLY(stats.record_push());
297 return true;
298 }
299 return false;
300 }
302 template<class E, unsigned int N>
303 bool GenericTaskQueue<E, N>::pop_local_slow(uint localBot, Age oldAge) {
304 // This queue was observed to contain exactly one element; either this
305 // thread will claim it, or a competing "pop_global". In either case,
306 // the queue will be logically empty afterwards. Create a new Age value
307 // that represents the empty queue for the given value of "_bottom". (We
308 // must also increment "tag" because of the case where "bottom == 1",
309 // "top == 0". A pop_global could read the queue element in that case,
310 // then have the owner thread do a pop followed by another push. Without
311 // the incrementing of "tag", the pop_global's CAS could succeed,
312 // allowing it to believe it has claimed the stale element.)
313 Age newAge((idx_t)localBot, oldAge.tag() + 1);
314 // Perhaps a competing pop_global has already incremented "top", in which
315 // case it wins the element.
316 if (localBot == oldAge.top()) {
317 // No competing pop_global has yet incremented "top"; we'll try to
318 // install new_age, thus claiming the element.
319 Age tempAge = _age.cmpxchg(newAge, oldAge);
320 if (tempAge == oldAge) {
321 // We win.
322 assert(dirty_size(localBot, _age.top()) != N - 1, "sanity");
323 TASKQUEUE_STATS_ONLY(stats.record_pop_slow());
324 return true;
325 }
326 }
327 // We lose; a completing pop_global gets the element. But the queue is empty
328 // and top is greater than bottom. Fix this representation of the empty queue
329 // to become the canonical one.
330 _age.set(newAge);
331 assert(dirty_size(localBot, _age.top()) != N - 1, "sanity");
332 return false;
333 }
335 template<class E, unsigned int N>
336 bool GenericTaskQueue<E, N>::pop_global(E& t) {
337 Age oldAge = _age.get();
338 uint localBot = _bottom;
339 uint n_elems = size(localBot, oldAge.top());
340 if (n_elems == 0) {
341 return false;
342 }
344 const_cast<E&>(t = _elems[oldAge.top()]);
345 Age newAge(oldAge);
346 newAge.increment();
347 Age resAge = _age.cmpxchg(newAge, oldAge);
349 // Note that using "_bottom" here might fail, since a pop_local might
350 // have decremented it.
351 assert(dirty_size(localBot, newAge.top()) != N - 1, "sanity");
352 return resAge == oldAge;
353 }
355 template<class E, unsigned int N>
356 GenericTaskQueue<E, N>::~GenericTaskQueue() {
357 FREE_C_HEAP_ARRAY(E, _elems);
358 }
360 // OverflowTaskQueue is a TaskQueue that also includes an overflow stack for
361 // elements that do not fit in the TaskQueue.
362 //
363 // Three methods from super classes are overridden:
364 //
365 // initialize() - initialize the super classes and create the overflow stack
366 // push() - push onto the task queue or, if that fails, onto the overflow stack
367 // is_empty() - return true if both the TaskQueue and overflow stack are empty
368 //
369 // Note that size() is not overridden--it returns the number of elements in the
370 // TaskQueue, and does not include the size of the overflow stack. This
371 // simplifies replacement of GenericTaskQueues with OverflowTaskQueues.
372 template<class E, unsigned int N = TASKQUEUE_SIZE>
373 class OverflowTaskQueue: public GenericTaskQueue<E, N>
374 {
375 public:
376 typedef GrowableArray<E> overflow_t;
377 typedef GenericTaskQueue<E, N> taskqueue_t;
379 TASKQUEUE_STATS_ONLY(using taskqueue_t::stats;)
381 OverflowTaskQueue();
382 ~OverflowTaskQueue();
383 void initialize();
385 inline overflow_t* overflow_stack() const { return _overflow_stack; }
387 // Push task t onto the queue or onto the overflow stack. Return true.
388 inline bool push(E t);
390 // Attempt to pop from the overflow stack; return true if anything was popped.
391 inline bool pop_overflow(E& t);
393 inline bool taskqueue_empty() const { return taskqueue_t::is_empty(); }
394 inline bool overflow_empty() const { return overflow_stack()->is_empty(); }
395 inline bool is_empty() const {
396 return taskqueue_empty() && overflow_empty();
397 }
399 private:
400 overflow_t* _overflow_stack;
401 };
403 template <class E, unsigned int N>
404 OverflowTaskQueue<E, N>::OverflowTaskQueue()
405 {
406 _overflow_stack = NULL;
407 }
409 template <class E, unsigned int N>
410 OverflowTaskQueue<E, N>::~OverflowTaskQueue()
411 {
412 if (_overflow_stack != NULL) {
413 delete _overflow_stack;
414 _overflow_stack = NULL;
415 }
416 }
418 template <class E, unsigned int N>
419 void OverflowTaskQueue<E, N>::initialize()
420 {
421 taskqueue_t::initialize();
422 assert(_overflow_stack == NULL, "memory leak");
423 _overflow_stack = new (ResourceObj::C_HEAP) GrowableArray<E>(10, true);
424 }
426 template <class E, unsigned int N>
427 bool OverflowTaskQueue<E, N>::push(E t)
428 {
429 if (!taskqueue_t::push(t)) {
430 overflow_stack()->push(t);
431 TASKQUEUE_STATS_ONLY(stats.record_overflow(overflow_stack()->length()));
432 }
433 return true;
434 }
436 template <class E, unsigned int N>
437 bool OverflowTaskQueue<E, N>::pop_overflow(E& t)
438 {
439 if (overflow_empty()) return false;
440 t = overflow_stack()->pop();
441 return true;
442 }
444 class TaskQueueSetSuper: public CHeapObj {
445 protected:
446 static int randomParkAndMiller(int* seed0);
447 public:
448 // Returns "true" if some TaskQueue in the set contains a task.
449 virtual bool peek() = 0;
450 };
452 template<class T>
453 class GenericTaskQueueSet: public TaskQueueSetSuper {
454 private:
455 uint _n;
456 T** _queues;
458 public:
459 typedef typename T::element_type E;
461 GenericTaskQueueSet(int n) : _n(n) {
462 typedef T* GenericTaskQueuePtr;
463 _queues = NEW_C_HEAP_ARRAY(GenericTaskQueuePtr, n);
464 for (int i = 0; i < n; i++) {
465 _queues[i] = NULL;
466 }
467 }
469 bool steal_1_random(uint queue_num, int* seed, E& t);
470 bool steal_best_of_2(uint queue_num, int* seed, E& t);
471 bool steal_best_of_all(uint queue_num, int* seed, E& t);
473 void register_queue(uint i, T* q);
475 T* queue(uint n);
477 // The thread with queue number "queue_num" (and whose random number seed is
478 // at "seed") is trying to steal a task from some other queue. (It may try
479 // several queues, according to some configuration parameter.) If some steal
480 // succeeds, returns "true" and sets "t" to the stolen task, otherwise returns
481 // false.
482 bool steal(uint queue_num, int* seed, E& t);
484 bool peek();
485 };
487 template<class T> void
488 GenericTaskQueueSet<T>::register_queue(uint i, T* q) {
489 assert(i < _n, "index out of range.");
490 _queues[i] = q;
491 }
493 template<class T> T*
494 GenericTaskQueueSet<T>::queue(uint i) {
495 return _queues[i];
496 }
498 template<class T> bool
499 GenericTaskQueueSet<T>::steal(uint queue_num, int* seed, E& t) {
500 for (uint i = 0; i < 2 * _n; i++) {
501 if (steal_best_of_2(queue_num, seed, t)) {
502 TASKQUEUE_STATS_ONLY(queue(queue_num)->stats.record_steal(true));
503 return true;
504 }
505 }
506 TASKQUEUE_STATS_ONLY(queue(queue_num)->stats.record_steal(false));
507 return false;
508 }
510 template<class T> bool
511 GenericTaskQueueSet<T>::steal_best_of_all(uint queue_num, int* seed, E& t) {
512 if (_n > 2) {
513 int best_k;
514 uint best_sz = 0;
515 for (uint k = 0; k < _n; k++) {
516 if (k == queue_num) continue;
517 uint sz = _queues[k]->size();
518 if (sz > best_sz) {
519 best_sz = sz;
520 best_k = k;
521 }
522 }
523 return best_sz > 0 && _queues[best_k]->pop_global(t);
524 } else if (_n == 2) {
525 // Just try the other one.
526 int k = (queue_num + 1) % 2;
527 return _queues[k]->pop_global(t);
528 } else {
529 assert(_n == 1, "can't be zero.");
530 return false;
531 }
532 }
534 template<class T> bool
535 GenericTaskQueueSet<T>::steal_1_random(uint queue_num, int* seed, E& t) {
536 if (_n > 2) {
537 uint k = queue_num;
538 while (k == queue_num) k = randomParkAndMiller(seed) % _n;
539 return _queues[2]->pop_global(t);
540 } else if (_n == 2) {
541 // Just try the other one.
542 int k = (queue_num + 1) % 2;
543 return _queues[k]->pop_global(t);
544 } else {
545 assert(_n == 1, "can't be zero.");
546 return false;
547 }
548 }
550 template<class T> bool
551 GenericTaskQueueSet<T>::steal_best_of_2(uint queue_num, int* seed, E& t) {
552 if (_n > 2) {
553 uint k1 = queue_num;
554 while (k1 == queue_num) k1 = randomParkAndMiller(seed) % _n;
555 uint k2 = queue_num;
556 while (k2 == queue_num || k2 == k1) k2 = randomParkAndMiller(seed) % _n;
557 // Sample both and try the larger.
558 uint sz1 = _queues[k1]->size();
559 uint sz2 = _queues[k2]->size();
560 if (sz2 > sz1) return _queues[k2]->pop_global(t);
561 else return _queues[k1]->pop_global(t);
562 } else if (_n == 2) {
563 // Just try the other one.
564 uint k = (queue_num + 1) % 2;
565 return _queues[k]->pop_global(t);
566 } else {
567 assert(_n == 1, "can't be zero.");
568 return false;
569 }
570 }
572 template<class T>
573 bool GenericTaskQueueSet<T>::peek() {
574 // Try all the queues.
575 for (uint j = 0; j < _n; j++) {
576 if (_queues[j]->peek())
577 return true;
578 }
579 return false;
580 }
582 // When to terminate from the termination protocol.
583 class TerminatorTerminator: public CHeapObj {
584 public:
585 virtual bool should_exit_termination() = 0;
586 };
588 // A class to aid in the termination of a set of parallel tasks using
589 // TaskQueueSet's for work stealing.
591 #undef TRACESPINNING
593 class ParallelTaskTerminator: public StackObj {
594 private:
595 int _n_threads;
596 TaskQueueSetSuper* _queue_set;
597 int _offered_termination;
599 #ifdef TRACESPINNING
600 static uint _total_yields;
601 static uint _total_spins;
602 static uint _total_peeks;
603 #endif
605 bool peek_in_queue_set();
606 protected:
607 virtual void yield();
608 void sleep(uint millis);
610 public:
612 // "n_threads" is the number of threads to be terminated. "queue_set" is a
613 // queue sets of work queues of other threads.
614 ParallelTaskTerminator(int n_threads, TaskQueueSetSuper* queue_set);
616 // The current thread has no work, and is ready to terminate if everyone
617 // else is. If returns "true", all threads are terminated. If returns
618 // "false", available work has been observed in one of the task queues,
619 // so the global task is not complete.
620 bool offer_termination() {
621 return offer_termination(NULL);
622 }
624 // As above, but it also terminates if the should_exit_termination()
625 // method of the terminator parameter returns true. If terminator is
626 // NULL, then it is ignored.
627 bool offer_termination(TerminatorTerminator* terminator);
629 // Reset the terminator, so that it may be reused again.
630 // The caller is responsible for ensuring that this is done
631 // in an MT-safe manner, once the previous round of use of
632 // the terminator is finished.
633 void reset_for_reuse();
635 #ifdef TRACESPINNING
636 static uint total_yields() { return _total_yields; }
637 static uint total_spins() { return _total_spins; }
638 static uint total_peeks() { return _total_peeks; }
639 static void print_termination_counts();
640 #endif
641 };
643 template<class E, unsigned int N> inline bool
644 GenericTaskQueue<E, N>::push(E t) {
645 uint localBot = _bottom;
646 assert((localBot >= 0) && (localBot < N), "_bottom out of range.");
647 idx_t top = _age.top();
648 uint dirty_n_elems = dirty_size(localBot, top);
649 assert(dirty_n_elems < N, "n_elems out of range.");
650 if (dirty_n_elems < max_elems()) {
651 // g++ complains if the volatile result of the assignment is unused.
652 const_cast<E&>(_elems[localBot] = t);
653 OrderAccess::release_store(&_bottom, increment_index(localBot));
654 TASKQUEUE_STATS_ONLY(stats.record_push());
655 return true;
656 } else {
657 return push_slow(t, dirty_n_elems);
658 }
659 }
661 template<class E, unsigned int N> inline bool
662 GenericTaskQueue<E, N>::pop_local(E& t) {
663 uint localBot = _bottom;
664 // This value cannot be N-1. That can only occur as a result of
665 // the assignment to bottom in this method. If it does, this method
666 // resets the size to 0 before the next call (which is sequential,
667 // since this is pop_local.)
668 uint dirty_n_elems = dirty_size(localBot, _age.top());
669 assert(dirty_n_elems != N - 1, "Shouldn't be possible...");
670 if (dirty_n_elems == 0) return false;
671 localBot = decrement_index(localBot);
672 _bottom = localBot;
673 // This is necessary to prevent any read below from being reordered
674 // before the store just above.
675 OrderAccess::fence();
676 const_cast<E&>(t = _elems[localBot]);
677 // This is a second read of "age"; the "size()" above is the first.
678 // If there's still at least one element in the queue, based on the
679 // "_bottom" and "age" we've read, then there can be no interference with
680 // a "pop_global" operation, and we're done.
681 idx_t tp = _age.top(); // XXX
682 if (size(localBot, tp) > 0) {
683 assert(dirty_size(localBot, tp) != N - 1, "sanity");
684 TASKQUEUE_STATS_ONLY(stats.record_pop());
685 return true;
686 } else {
687 // Otherwise, the queue contained exactly one element; we take the slow
688 // path.
689 return pop_local_slow(localBot, _age.get());
690 }
691 }
693 typedef GenericTaskQueue<oop> OopTaskQueue;
694 typedef GenericTaskQueueSet<OopTaskQueue> OopTaskQueueSet;
696 #ifdef _MSC_VER
697 #pragma warning(push)
698 // warning C4522: multiple assignment operators specified
699 #pragma warning(disable:4522)
700 #endif
702 // This is a container class for either an oop* or a narrowOop*.
703 // Both are pushed onto a task queue and the consumer will test is_narrow()
704 // to determine which should be processed.
705 class StarTask {
706 void* _holder; // either union oop* or narrowOop*
708 enum { COMPRESSED_OOP_MASK = 1 };
710 public:
711 StarTask(narrowOop* p) {
712 assert(((uintptr_t)p & COMPRESSED_OOP_MASK) == 0, "Information loss!");
713 _holder = (void *)((uintptr_t)p | COMPRESSED_OOP_MASK);
714 }
715 StarTask(oop* p) {
716 assert(((uintptr_t)p & COMPRESSED_OOP_MASK) == 0, "Information loss!");
717 _holder = (void*)p;
718 }
719 StarTask() { _holder = NULL; }
720 operator oop*() { return (oop*)_holder; }
721 operator narrowOop*() {
722 return (narrowOop*)((uintptr_t)_holder & ~COMPRESSED_OOP_MASK);
723 }
725 StarTask& operator=(const StarTask& t) {
726 _holder = t._holder;
727 return *this;
728 }
729 volatile StarTask& operator=(const volatile StarTask& t) volatile {
730 _holder = t._holder;
731 return *this;
732 }
734 bool is_narrow() const {
735 return (((uintptr_t)_holder & COMPRESSED_OOP_MASK) != 0);
736 }
737 };
739 class ObjArrayTask
740 {
741 public:
742 ObjArrayTask(oop o = NULL, int idx = 0): _obj(o), _index(idx) { }
743 ObjArrayTask(oop o, size_t idx): _obj(o), _index(int(idx)) {
744 assert(idx <= size_t(max_jint), "too big");
745 }
746 ObjArrayTask(const ObjArrayTask& t): _obj(t._obj), _index(t._index) { }
748 ObjArrayTask& operator =(const ObjArrayTask& t) {
749 _obj = t._obj;
750 _index = t._index;
751 return *this;
752 }
753 volatile ObjArrayTask&
754 operator =(const volatile ObjArrayTask& t) volatile {
755 _obj = t._obj;
756 _index = t._index;
757 return *this;
758 }
760 inline oop obj() const { return _obj; }
761 inline int index() const { return _index; }
763 DEBUG_ONLY(bool is_valid() const); // Tasks to be pushed/popped must be valid.
765 private:
766 oop _obj;
767 int _index;
768 };
770 #ifdef _MSC_VER
771 #pragma warning(pop)
772 #endif
774 typedef OverflowTaskQueue<StarTask> OopStarTaskQueue;
775 typedef GenericTaskQueueSet<OopStarTaskQueue> OopStarTaskQueueSet;
777 typedef OverflowTaskQueue<size_t> RegionTaskQueue;
778 typedef GenericTaskQueueSet<RegionTaskQueue> RegionTaskQueueSet;