Tue, 19 Aug 2014 08:34:25 -0400
8055007: NMT2: emptyStack missing in minimal build
Summary: Refactored emptyStack to a static member of NativeCallStack, which is accessible in minimal build.
Reviewed-by: coleenp, dholmes
1 /*
2 * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #ifndef SHARE_VM_UTILITIES_TASKQUEUE_HPP
26 #define SHARE_VM_UTILITIES_TASKQUEUE_HPP
28 #include "memory/allocation.hpp"
29 #include "memory/allocation.inline.hpp"
30 #include "runtime/mutex.hpp"
31 #include "runtime/orderAccess.inline.hpp"
32 #include "utilities/stack.hpp"
34 // Simple TaskQueue stats that are collected by default in debug builds.
36 #if !defined(TASKQUEUE_STATS) && defined(ASSERT)
37 #define TASKQUEUE_STATS 1
38 #elif !defined(TASKQUEUE_STATS)
39 #define TASKQUEUE_STATS 0
40 #endif
42 #if TASKQUEUE_STATS
43 #define TASKQUEUE_STATS_ONLY(code) code
44 #else
45 #define TASKQUEUE_STATS_ONLY(code)
46 #endif // TASKQUEUE_STATS
48 #if TASKQUEUE_STATS
49 class TaskQueueStats {
50 public:
51 enum StatId {
52 push, // number of taskqueue pushes
53 pop, // number of taskqueue pops
54 pop_slow, // subset of taskqueue pops that were done slow-path
55 steal_attempt, // number of taskqueue steal attempts
56 steal, // number of taskqueue steals
57 overflow, // number of overflow pushes
58 overflow_max_len, // max length of overflow stack
59 last_stat_id
60 };
62 public:
63 inline TaskQueueStats() { reset(); }
65 inline void record_push() { ++_stats[push]; }
66 inline void record_pop() { ++_stats[pop]; }
67 inline void record_pop_slow() { record_pop(); ++_stats[pop_slow]; }
68 inline void record_steal(bool success);
69 inline void record_overflow(size_t new_length);
71 TaskQueueStats & operator +=(const TaskQueueStats & addend);
73 inline size_t get(StatId id) const { return _stats[id]; }
74 inline const size_t* get() const { return _stats; }
76 inline void reset();
78 // Print the specified line of the header (does not include a line separator).
79 static void print_header(unsigned int line, outputStream* const stream = tty,
80 unsigned int width = 10);
81 // Print the statistics (does not include a line separator).
82 void print(outputStream* const stream = tty, unsigned int width = 10) const;
84 DEBUG_ONLY(void verify() const;)
86 private:
87 size_t _stats[last_stat_id];
88 static const char * const _names[last_stat_id];
89 };
91 void TaskQueueStats::record_steal(bool success) {
92 ++_stats[steal_attempt];
93 if (success) ++_stats[steal];
94 }
96 void TaskQueueStats::record_overflow(size_t new_len) {
97 ++_stats[overflow];
98 if (new_len > _stats[overflow_max_len]) _stats[overflow_max_len] = new_len;
99 }
101 void TaskQueueStats::reset() {
102 memset(_stats, 0, sizeof(_stats));
103 }
104 #endif // TASKQUEUE_STATS
106 // TaskQueueSuper collects functionality common to all GenericTaskQueue instances.
108 template <unsigned int N, MEMFLAGS F>
109 class TaskQueueSuper: public CHeapObj<F> {
110 protected:
111 // Internal type for indexing the queue; also used for the tag.
112 typedef NOT_LP64(uint16_t) LP64_ONLY(uint32_t) idx_t;
114 // The first free element after the last one pushed (mod N).
115 volatile uint _bottom;
117 enum { MOD_N_MASK = N - 1 };
119 class Age {
120 public:
121 Age(size_t data = 0) { _data = data; }
122 Age(const Age& age) { _data = age._data; }
123 Age(idx_t top, idx_t tag) { _fields._top = top; _fields._tag = tag; }
125 Age get() const volatile { return _data; }
126 void set(Age age) volatile { _data = age._data; }
128 idx_t top() const volatile { return _fields._top; }
129 idx_t tag() const volatile { return _fields._tag; }
131 // Increment top; if it wraps, increment tag also.
132 void increment() {
133 _fields._top = increment_index(_fields._top);
134 if (_fields._top == 0) ++_fields._tag;
135 }
137 Age cmpxchg(const Age new_age, const Age old_age) volatile {
138 return (size_t) Atomic::cmpxchg_ptr((intptr_t)new_age._data,
139 (volatile intptr_t *)&_data,
140 (intptr_t)old_age._data);
141 }
143 bool operator ==(const Age& other) const { return _data == other._data; }
145 private:
146 struct fields {
147 idx_t _top;
148 idx_t _tag;
149 };
150 union {
151 size_t _data;
152 fields _fields;
153 };
154 };
156 volatile Age _age;
158 // These both operate mod N.
159 static uint increment_index(uint ind) {
160 return (ind + 1) & MOD_N_MASK;
161 }
162 static uint decrement_index(uint ind) {
163 return (ind - 1) & MOD_N_MASK;
164 }
166 // Returns a number in the range [0..N). If the result is "N-1", it should be
167 // interpreted as 0.
168 uint dirty_size(uint bot, uint top) const {
169 return (bot - top) & MOD_N_MASK;
170 }
172 // Returns the size corresponding to the given "bot" and "top".
173 uint size(uint bot, uint top) const {
174 uint sz = dirty_size(bot, top);
175 // Has the queue "wrapped", so that bottom is less than top? There's a
176 // complicated special case here. A pair of threads could perform pop_local
177 // and pop_global operations concurrently, starting from a state in which
178 // _bottom == _top+1. The pop_local could succeed in decrementing _bottom,
179 // and the pop_global in incrementing _top (in which case the pop_global
180 // will be awarded the contested queue element.) The resulting state must
181 // be interpreted as an empty queue. (We only need to worry about one such
182 // event: only the queue owner performs pop_local's, and several concurrent
183 // threads attempting to perform the pop_global will all perform the same
184 // CAS, and only one can succeed.) Any stealing thread that reads after
185 // either the increment or decrement will see an empty queue, and will not
186 // join the competitors. The "sz == -1 || sz == N-1" state will not be
187 // modified by concurrent queues, so the owner thread can reset the state to
188 // _bottom == top so subsequent pushes will be performed normally.
189 return (sz == N - 1) ? 0 : sz;
190 }
192 public:
193 TaskQueueSuper() : _bottom(0), _age() {}
195 // Return true if the TaskQueue contains/does not contain any tasks.
196 bool peek() const { return _bottom != _age.top(); }
197 bool is_empty() const { return size() == 0; }
199 // Return an estimate of the number of elements in the queue.
200 // The "careful" version admits the possibility of pop_local/pop_global
201 // races.
202 uint size() const {
203 return size(_bottom, _age.top());
204 }
206 uint dirty_size() const {
207 return dirty_size(_bottom, _age.top());
208 }
210 void set_empty() {
211 _bottom = 0;
212 _age.set(0);
213 }
215 // Maximum number of elements allowed in the queue. This is two less
216 // than the actual queue size, for somewhat complicated reasons.
217 uint max_elems() const { return N - 2; }
219 // Total size of queue.
220 static const uint total_size() { return N; }
222 TASKQUEUE_STATS_ONLY(TaskQueueStats stats;)
223 };
225 //
226 // GenericTaskQueue implements an ABP, Aurora-Blumofe-Plaxton, double-
227 // ended-queue (deque), intended for use in work stealing. Queue operations
228 // are non-blocking.
229 //
230 // A queue owner thread performs push() and pop_local() operations on one end
231 // of the queue, while other threads may steal work using the pop_global()
232 // method.
233 //
234 // The main difference to the original algorithm is that this
235 // implementation allows wrap-around at the end of its allocated
236 // storage, which is an array.
237 //
238 // The original paper is:
239 //
240 // Arora, N. S., Blumofe, R. D., and Plaxton, C. G.
241 // Thread scheduling for multiprogrammed multiprocessors.
242 // Theory of Computing Systems 34, 2 (2001), 115-144.
243 //
244 // The following paper provides an correctness proof and an
245 // implementation for weakly ordered memory models including (pseudo-)
246 // code containing memory barriers for a Chase-Lev deque. Chase-Lev is
247 // similar to ABP, with the main difference that it allows resizing of the
248 // underlying storage:
249 //
250 // Le, N. M., Pop, A., Cohen A., and Nardell, F. Z.
251 // Correct and efficient work-stealing for weak memory models
252 // Proceedings of the 18th ACM SIGPLAN symposium on Principles and
253 // practice of parallel programming (PPoPP 2013), 69-80
254 //
256 template <class E, MEMFLAGS F, unsigned int N = TASKQUEUE_SIZE>
257 class GenericTaskQueue: public TaskQueueSuper<N, F> {
258 ArrayAllocator<E, F> _array_allocator;
259 protected:
260 typedef typename TaskQueueSuper<N, F>::Age Age;
261 typedef typename TaskQueueSuper<N, F>::idx_t idx_t;
263 using TaskQueueSuper<N, F>::_bottom;
264 using TaskQueueSuper<N, F>::_age;
265 using TaskQueueSuper<N, F>::increment_index;
266 using TaskQueueSuper<N, F>::decrement_index;
267 using TaskQueueSuper<N, F>::dirty_size;
269 public:
270 using TaskQueueSuper<N, F>::max_elems;
271 using TaskQueueSuper<N, F>::size;
273 #if TASKQUEUE_STATS
274 using TaskQueueSuper<N, F>::stats;
275 #endif
277 private:
278 // Slow paths for push, pop_local. (pop_global has no fast path.)
279 bool push_slow(E t, uint dirty_n_elems);
280 bool pop_local_slow(uint localBot, Age oldAge);
282 public:
283 typedef E element_type;
285 // Initializes the queue to empty.
286 GenericTaskQueue();
288 void initialize();
290 // Push the task "t" on the queue. Returns "false" iff the queue is full.
291 inline bool push(E t);
293 // Attempts to claim a task from the "local" end of the queue (the most
294 // recently pushed). If successful, returns true and sets t to the task;
295 // otherwise, returns false (the queue is empty).
296 inline bool pop_local(volatile E& t);
298 // Like pop_local(), but uses the "global" end of the queue (the least
299 // recently pushed).
300 bool pop_global(volatile E& t);
302 // Delete any resource associated with the queue.
303 ~GenericTaskQueue();
305 // apply the closure to all elements in the task queue
306 void oops_do(OopClosure* f);
308 private:
309 // Element array.
310 volatile E* _elems;
311 };
313 template<class E, MEMFLAGS F, unsigned int N>
314 GenericTaskQueue<E, F, N>::GenericTaskQueue() {
315 assert(sizeof(Age) == sizeof(size_t), "Depends on this.");
316 }
318 template<class E, MEMFLAGS F, unsigned int N>
319 void GenericTaskQueue<E, F, N>::initialize() {
320 _elems = _array_allocator.allocate(N);
321 }
323 template<class E, MEMFLAGS F, unsigned int N>
324 void GenericTaskQueue<E, F, N>::oops_do(OopClosure* f) {
325 // tty->print_cr("START OopTaskQueue::oops_do");
326 uint iters = size();
327 uint index = _bottom;
328 for (uint i = 0; i < iters; ++i) {
329 index = decrement_index(index);
330 // tty->print_cr(" doing entry %d," INTPTR_T " -> " INTPTR_T,
331 // index, &_elems[index], _elems[index]);
332 E* t = (E*)&_elems[index]; // cast away volatility
333 oop* p = (oop*)t;
334 assert((*t)->is_oop_or_null(), "Not an oop or null");
335 f->do_oop(p);
336 }
337 // tty->print_cr("END OopTaskQueue::oops_do");
338 }
340 template<class E, MEMFLAGS F, unsigned int N>
341 bool GenericTaskQueue<E, F, N>::push_slow(E t, uint dirty_n_elems) {
342 if (dirty_n_elems == N - 1) {
343 // Actually means 0, so do the push.
344 uint localBot = _bottom;
345 // g++ complains if the volatile result of the assignment is
346 // unused, so we cast the volatile away. We cannot cast directly
347 // to void, because gcc treats that as not using the result of the
348 // assignment. However, casting to E& means that we trigger an
349 // unused-value warning. So, we cast the E& to void.
350 (void)const_cast<E&>(_elems[localBot] = t);
351 OrderAccess::release_store(&_bottom, increment_index(localBot));
352 TASKQUEUE_STATS_ONLY(stats.record_push());
353 return true;
354 }
355 return false;
356 }
358 // pop_local_slow() is done by the owning thread and is trying to
359 // get the last task in the queue. It will compete with pop_global()
360 // that will be used by other threads. The tag age is incremented
361 // whenever the queue goes empty which it will do here if this thread
362 // gets the last task or in pop_global() if the queue wraps (top == 0
363 // and pop_global() succeeds, see pop_global()).
364 template<class E, MEMFLAGS F, unsigned int N>
365 bool GenericTaskQueue<E, F, N>::pop_local_slow(uint localBot, Age oldAge) {
366 // This queue was observed to contain exactly one element; either this
367 // thread will claim it, or a competing "pop_global". In either case,
368 // the queue will be logically empty afterwards. Create a new Age value
369 // that represents the empty queue for the given value of "_bottom". (We
370 // must also increment "tag" because of the case where "bottom == 1",
371 // "top == 0". A pop_global could read the queue element in that case,
372 // then have the owner thread do a pop followed by another push. Without
373 // the incrementing of "tag", the pop_global's CAS could succeed,
374 // allowing it to believe it has claimed the stale element.)
375 Age newAge((idx_t)localBot, oldAge.tag() + 1);
376 // Perhaps a competing pop_global has already incremented "top", in which
377 // case it wins the element.
378 if (localBot == oldAge.top()) {
379 // No competing pop_global has yet incremented "top"; we'll try to
380 // install new_age, thus claiming the element.
381 Age tempAge = _age.cmpxchg(newAge, oldAge);
382 if (tempAge == oldAge) {
383 // We win.
384 assert(dirty_size(localBot, _age.top()) != N - 1, "sanity");
385 TASKQUEUE_STATS_ONLY(stats.record_pop_slow());
386 return true;
387 }
388 }
389 // We lose; a completing pop_global gets the element. But the queue is empty
390 // and top is greater than bottom. Fix this representation of the empty queue
391 // to become the canonical one.
392 _age.set(newAge);
393 assert(dirty_size(localBot, _age.top()) != N - 1, "sanity");
394 return false;
395 }
397 template<class E, MEMFLAGS F, unsigned int N>
398 bool GenericTaskQueue<E, F, N>::pop_global(volatile E& t) {
399 Age oldAge = _age.get();
400 // Architectures with weak memory model require a barrier here
401 // to guarantee that bottom is not older than age,
402 // which is crucial for the correctness of the algorithm.
403 #if !(defined SPARC || defined IA32 || defined AMD64)
404 OrderAccess::fence();
405 #endif
406 uint localBot = OrderAccess::load_acquire((volatile juint*)&_bottom);
407 uint n_elems = size(localBot, oldAge.top());
408 if (n_elems == 0) {
409 return false;
410 }
412 // g++ complains if the volatile result of the assignment is
413 // unused, so we cast the volatile away. We cannot cast directly
414 // to void, because gcc treats that as not using the result of the
415 // assignment. However, casting to E& means that we trigger an
416 // unused-value warning. So, we cast the E& to void.
417 (void) const_cast<E&>(t = _elems[oldAge.top()]);
418 Age newAge(oldAge);
419 newAge.increment();
420 Age resAge = _age.cmpxchg(newAge, oldAge);
422 // Note that using "_bottom" here might fail, since a pop_local might
423 // have decremented it.
424 assert(dirty_size(localBot, newAge.top()) != N - 1, "sanity");
425 return resAge == oldAge;
426 }
428 template<class E, MEMFLAGS F, unsigned int N>
429 GenericTaskQueue<E, F, N>::~GenericTaskQueue() {
430 FREE_C_HEAP_ARRAY(E, _elems, F);
431 }
433 // OverflowTaskQueue is a TaskQueue that also includes an overflow stack for
434 // elements that do not fit in the TaskQueue.
435 //
436 // This class hides two methods from super classes:
437 //
438 // push() - push onto the task queue or, if that fails, onto the overflow stack
439 // is_empty() - return true if both the TaskQueue and overflow stack are empty
440 //
441 // Note that size() is not hidden--it returns the number of elements in the
442 // TaskQueue, and does not include the size of the overflow stack. This
443 // simplifies replacement of GenericTaskQueues with OverflowTaskQueues.
444 template<class E, MEMFLAGS F, unsigned int N = TASKQUEUE_SIZE>
445 class OverflowTaskQueue: public GenericTaskQueue<E, F, N>
446 {
447 public:
448 typedef Stack<E, F> overflow_t;
449 typedef GenericTaskQueue<E, F, N> taskqueue_t;
451 TASKQUEUE_STATS_ONLY(using taskqueue_t::stats;)
453 // Push task t onto the queue or onto the overflow stack. Return true.
454 inline bool push(E t);
456 // Attempt to pop from the overflow stack; return true if anything was popped.
457 inline bool pop_overflow(E& t);
459 inline overflow_t* overflow_stack() { return &_overflow_stack; }
461 inline bool taskqueue_empty() const { return taskqueue_t::is_empty(); }
462 inline bool overflow_empty() const { return _overflow_stack.is_empty(); }
463 inline bool is_empty() const {
464 return taskqueue_empty() && overflow_empty();
465 }
467 private:
468 overflow_t _overflow_stack;
469 };
471 template <class E, MEMFLAGS F, unsigned int N>
472 bool OverflowTaskQueue<E, F, N>::push(E t)
473 {
474 if (!taskqueue_t::push(t)) {
475 overflow_stack()->push(t);
476 TASKQUEUE_STATS_ONLY(stats.record_overflow(overflow_stack()->size()));
477 }
478 return true;
479 }
481 template <class E, MEMFLAGS F, unsigned int N>
482 bool OverflowTaskQueue<E, F, N>::pop_overflow(E& t)
483 {
484 if (overflow_empty()) return false;
485 t = overflow_stack()->pop();
486 return true;
487 }
489 class TaskQueueSetSuper {
490 protected:
491 static int randomParkAndMiller(int* seed0);
492 public:
493 // Returns "true" if some TaskQueue in the set contains a task.
494 virtual bool peek() = 0;
495 };
497 template <MEMFLAGS F> class TaskQueueSetSuperImpl: public CHeapObj<F>, public TaskQueueSetSuper {
498 };
500 template<class T, MEMFLAGS F>
501 class GenericTaskQueueSet: public TaskQueueSetSuperImpl<F> {
502 private:
503 uint _n;
504 T** _queues;
506 public:
507 typedef typename T::element_type E;
509 GenericTaskQueueSet(int n) : _n(n) {
510 typedef T* GenericTaskQueuePtr;
511 _queues = NEW_C_HEAP_ARRAY(GenericTaskQueuePtr, n, F);
512 for (int i = 0; i < n; i++) {
513 _queues[i] = NULL;
514 }
515 }
517 bool steal_best_of_2(uint queue_num, int* seed, E& t);
519 void register_queue(uint i, T* q);
521 T* queue(uint n);
523 // The thread with queue number "queue_num" (and whose random number seed is
524 // at "seed") is trying to steal a task from some other queue. (It may try
525 // several queues, according to some configuration parameter.) If some steal
526 // succeeds, returns "true" and sets "t" to the stolen task, otherwise returns
527 // false.
528 bool steal(uint queue_num, int* seed, E& t);
530 bool peek();
531 };
533 template<class T, MEMFLAGS F> void
534 GenericTaskQueueSet<T, F>::register_queue(uint i, T* q) {
535 assert(i < _n, "index out of range.");
536 _queues[i] = q;
537 }
539 template<class T, MEMFLAGS F> T*
540 GenericTaskQueueSet<T, F>::queue(uint i) {
541 return _queues[i];
542 }
544 template<class T, MEMFLAGS F> bool
545 GenericTaskQueueSet<T, F>::steal(uint queue_num, int* seed, E& t) {
546 for (uint i = 0; i < 2 * _n; i++) {
547 if (steal_best_of_2(queue_num, seed, t)) {
548 TASKQUEUE_STATS_ONLY(queue(queue_num)->stats.record_steal(true));
549 return true;
550 }
551 }
552 TASKQUEUE_STATS_ONLY(queue(queue_num)->stats.record_steal(false));
553 return false;
554 }
556 template<class T, MEMFLAGS F> bool
557 GenericTaskQueueSet<T, F>::steal_best_of_2(uint queue_num, int* seed, E& t) {
558 if (_n > 2) {
559 uint k1 = queue_num;
560 while (k1 == queue_num) k1 = TaskQueueSetSuper::randomParkAndMiller(seed) % _n;
561 uint k2 = queue_num;
562 while (k2 == queue_num || k2 == k1) k2 = TaskQueueSetSuper::randomParkAndMiller(seed) % _n;
563 // Sample both and try the larger.
564 uint sz1 = _queues[k1]->size();
565 uint sz2 = _queues[k2]->size();
566 if (sz2 > sz1) return _queues[k2]->pop_global(t);
567 else return _queues[k1]->pop_global(t);
568 } else if (_n == 2) {
569 // Just try the other one.
570 uint k = (queue_num + 1) % 2;
571 return _queues[k]->pop_global(t);
572 } else {
573 assert(_n == 1, "can't be zero.");
574 return false;
575 }
576 }
578 template<class T, MEMFLAGS F>
579 bool GenericTaskQueueSet<T, F>::peek() {
580 // Try all the queues.
581 for (uint j = 0; j < _n; j++) {
582 if (_queues[j]->peek())
583 return true;
584 }
585 return false;
586 }
588 // When to terminate from the termination protocol.
589 class TerminatorTerminator: public CHeapObj<mtInternal> {
590 public:
591 virtual bool should_exit_termination() = 0;
592 };
594 // A class to aid in the termination of a set of parallel tasks using
595 // TaskQueueSet's for work stealing.
597 #undef TRACESPINNING
599 class ParallelTaskTerminator: public StackObj {
600 private:
601 int _n_threads;
602 TaskQueueSetSuper* _queue_set;
603 int _offered_termination;
605 #ifdef TRACESPINNING
606 static uint _total_yields;
607 static uint _total_spins;
608 static uint _total_peeks;
609 #endif
611 bool peek_in_queue_set();
612 protected:
613 virtual void yield();
614 void sleep(uint millis);
616 public:
618 // "n_threads" is the number of threads to be terminated. "queue_set" is a
619 // queue sets of work queues of other threads.
620 ParallelTaskTerminator(int n_threads, TaskQueueSetSuper* queue_set);
622 // The current thread has no work, and is ready to terminate if everyone
623 // else is. If returns "true", all threads are terminated. If returns
624 // "false", available work has been observed in one of the task queues,
625 // so the global task is not complete.
626 bool offer_termination() {
627 return offer_termination(NULL);
628 }
630 // As above, but it also terminates if the should_exit_termination()
631 // method of the terminator parameter returns true. If terminator is
632 // NULL, then it is ignored.
633 bool offer_termination(TerminatorTerminator* terminator);
635 // Reset the terminator, so that it may be reused again.
636 // The caller is responsible for ensuring that this is done
637 // in an MT-safe manner, once the previous round of use of
638 // the terminator is finished.
639 void reset_for_reuse();
640 // Same as above but the number of parallel threads is set to the
641 // given number.
642 void reset_for_reuse(int n_threads);
644 #ifdef TRACESPINNING
645 static uint total_yields() { return _total_yields; }
646 static uint total_spins() { return _total_spins; }
647 static uint total_peeks() { return _total_peeks; }
648 static void print_termination_counts();
649 #endif
650 };
652 template<class E, MEMFLAGS F, unsigned int N> inline bool
653 GenericTaskQueue<E, F, N>::push(E t) {
654 uint localBot = _bottom;
655 assert(localBot < N, "_bottom out of range.");
656 idx_t top = _age.top();
657 uint dirty_n_elems = dirty_size(localBot, top);
658 assert(dirty_n_elems < N, "n_elems out of range.");
659 if (dirty_n_elems < max_elems()) {
660 // g++ complains if the volatile result of the assignment is
661 // unused, so we cast the volatile away. We cannot cast directly
662 // to void, because gcc treats that as not using the result of the
663 // assignment. However, casting to E& means that we trigger an
664 // unused-value warning. So, we cast the E& to void.
665 (void) const_cast<E&>(_elems[localBot] = t);
666 OrderAccess::release_store(&_bottom, increment_index(localBot));
667 TASKQUEUE_STATS_ONLY(stats.record_push());
668 return true;
669 } else {
670 return push_slow(t, dirty_n_elems);
671 }
672 }
674 template<class E, MEMFLAGS F, unsigned int N> inline bool
675 GenericTaskQueue<E, F, N>::pop_local(volatile E& t) {
676 uint localBot = _bottom;
677 // This value cannot be N-1. That can only occur as a result of
678 // the assignment to bottom in this method. If it does, this method
679 // resets the size to 0 before the next call (which is sequential,
680 // since this is pop_local.)
681 uint dirty_n_elems = dirty_size(localBot, _age.top());
682 assert(dirty_n_elems != N - 1, "Shouldn't be possible...");
683 if (dirty_n_elems == 0) return false;
684 localBot = decrement_index(localBot);
685 _bottom = localBot;
686 // This is necessary to prevent any read below from being reordered
687 // before the store just above.
688 OrderAccess::fence();
689 // g++ complains if the volatile result of the assignment is
690 // unused, so we cast the volatile away. We cannot cast directly
691 // to void, because gcc treats that as not using the result of the
692 // assignment. However, casting to E& means that we trigger an
693 // unused-value warning. So, we cast the E& to void.
694 (void) const_cast<E&>(t = _elems[localBot]);
695 // This is a second read of "age"; the "size()" above is the first.
696 // If there's still at least one element in the queue, based on the
697 // "_bottom" and "age" we've read, then there can be no interference with
698 // a "pop_global" operation, and we're done.
699 idx_t tp = _age.top(); // XXX
700 if (size(localBot, tp) > 0) {
701 assert(dirty_size(localBot, tp) != N - 1, "sanity");
702 TASKQUEUE_STATS_ONLY(stats.record_pop());
703 return true;
704 } else {
705 // Otherwise, the queue contained exactly one element; we take the slow
706 // path.
707 return pop_local_slow(localBot, _age.get());
708 }
709 }
711 typedef GenericTaskQueue<oop, mtGC> OopTaskQueue;
712 typedef GenericTaskQueueSet<OopTaskQueue, mtGC> OopTaskQueueSet;
714 #ifdef _MSC_VER
715 #pragma warning(push)
716 // warning C4522: multiple assignment operators specified
717 #pragma warning(disable:4522)
718 #endif
720 // This is a container class for either an oop* or a narrowOop*.
721 // Both are pushed onto a task queue and the consumer will test is_narrow()
722 // to determine which should be processed.
723 class StarTask {
724 void* _holder; // either union oop* or narrowOop*
726 enum { COMPRESSED_OOP_MASK = 1 };
728 public:
729 StarTask(narrowOop* p) {
730 assert(((uintptr_t)p & COMPRESSED_OOP_MASK) == 0, "Information loss!");
731 _holder = (void *)((uintptr_t)p | COMPRESSED_OOP_MASK);
732 }
733 StarTask(oop* p) {
734 assert(((uintptr_t)p & COMPRESSED_OOP_MASK) == 0, "Information loss!");
735 _holder = (void*)p;
736 }
737 StarTask() { _holder = NULL; }
738 operator oop*() { return (oop*)_holder; }
739 operator narrowOop*() {
740 return (narrowOop*)((uintptr_t)_holder & ~COMPRESSED_OOP_MASK);
741 }
743 StarTask& operator=(const StarTask& t) {
744 _holder = t._holder;
745 return *this;
746 }
747 volatile StarTask& operator=(const volatile StarTask& t) volatile {
748 _holder = t._holder;
749 return *this;
750 }
752 bool is_narrow() const {
753 return (((uintptr_t)_holder & COMPRESSED_OOP_MASK) != 0);
754 }
755 };
757 class ObjArrayTask
758 {
759 public:
760 ObjArrayTask(oop o = NULL, int idx = 0): _obj(o), _index(idx) { }
761 ObjArrayTask(oop o, size_t idx): _obj(o), _index(int(idx)) {
762 assert(idx <= size_t(max_jint), "too big");
763 }
764 ObjArrayTask(const ObjArrayTask& t): _obj(t._obj), _index(t._index) { }
766 ObjArrayTask& operator =(const ObjArrayTask& t) {
767 _obj = t._obj;
768 _index = t._index;
769 return *this;
770 }
771 volatile ObjArrayTask&
772 operator =(const volatile ObjArrayTask& t) volatile {
773 (void)const_cast<oop&>(_obj = t._obj);
774 _index = t._index;
775 return *this;
776 }
778 inline oop obj() const { return _obj; }
779 inline int index() const { return _index; }
781 DEBUG_ONLY(bool is_valid() const); // Tasks to be pushed/popped must be valid.
783 private:
784 oop _obj;
785 int _index;
786 };
788 #ifdef _MSC_VER
789 #pragma warning(pop)
790 #endif
792 typedef OverflowTaskQueue<StarTask, mtClass> OopStarTaskQueue;
793 typedef GenericTaskQueueSet<OopStarTaskQueue, mtClass> OopStarTaskQueueSet;
795 typedef OverflowTaskQueue<size_t, mtInternal> RegionTaskQueue;
796 typedef GenericTaskQueueSet<RegionTaskQueue, mtClass> RegionTaskQueueSet;
799 #endif // SHARE_VM_UTILITIES_TASKQUEUE_HPP