Tue, 06 Aug 2013 20:01:40 -0400
8012144: multiple SIGSEGVs fails on staxf
Summary: Forward port of 7u change to add additional fence() on RMO platforms, with a load_acquire on all platforms
Reviewed-by: dholmes, kvn
1 /*
2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #ifndef SHARE_VM_UTILITIES_TASKQUEUE_HPP
26 #define SHARE_VM_UTILITIES_TASKQUEUE_HPP
28 #include "memory/allocation.hpp"
29 #include "memory/allocation.inline.hpp"
30 #include "runtime/mutex.hpp"
31 #include "utilities/stack.hpp"
32 #ifdef TARGET_OS_ARCH_linux_x86
33 # include "orderAccess_linux_x86.inline.hpp"
34 #endif
35 #ifdef TARGET_OS_ARCH_linux_sparc
36 # include "orderAccess_linux_sparc.inline.hpp"
37 #endif
38 #ifdef TARGET_OS_ARCH_linux_zero
39 # include "orderAccess_linux_zero.inline.hpp"
40 #endif
41 #ifdef TARGET_OS_ARCH_solaris_x86
42 # include "orderAccess_solaris_x86.inline.hpp"
43 #endif
44 #ifdef TARGET_OS_ARCH_solaris_sparc
45 # include "orderAccess_solaris_sparc.inline.hpp"
46 #endif
47 #ifdef TARGET_OS_ARCH_windows_x86
48 # include "orderAccess_windows_x86.inline.hpp"
49 #endif
50 #ifdef TARGET_OS_ARCH_linux_arm
51 # include "orderAccess_linux_arm.inline.hpp"
52 #endif
53 #ifdef TARGET_OS_ARCH_linux_ppc
54 # include "orderAccess_linux_ppc.inline.hpp"
55 #endif
56 #ifdef TARGET_OS_ARCH_bsd_x86
57 # include "orderAccess_bsd_x86.inline.hpp"
58 #endif
59 #ifdef TARGET_OS_ARCH_bsd_zero
60 # include "orderAccess_bsd_zero.inline.hpp"
61 #endif
63 // Simple TaskQueue stats that are collected by default in debug builds.
65 #if !defined(TASKQUEUE_STATS) && defined(ASSERT)
66 #define TASKQUEUE_STATS 1
67 #elif !defined(TASKQUEUE_STATS)
68 #define TASKQUEUE_STATS 0
69 #endif
71 #if TASKQUEUE_STATS
72 #define TASKQUEUE_STATS_ONLY(code) code
73 #else
74 #define TASKQUEUE_STATS_ONLY(code)
75 #endif // TASKQUEUE_STATS
77 #if TASKQUEUE_STATS
78 class TaskQueueStats {
79 public:
80 enum StatId {
81 push, // number of taskqueue pushes
82 pop, // number of taskqueue pops
83 pop_slow, // subset of taskqueue pops that were done slow-path
84 steal_attempt, // number of taskqueue steal attempts
85 steal, // number of taskqueue steals
86 overflow, // number of overflow pushes
87 overflow_max_len, // max length of overflow stack
88 last_stat_id
89 };
91 public:
92 inline TaskQueueStats() { reset(); }
94 inline void record_push() { ++_stats[push]; }
95 inline void record_pop() { ++_stats[pop]; }
96 inline void record_pop_slow() { record_pop(); ++_stats[pop_slow]; }
97 inline void record_steal(bool success);
98 inline void record_overflow(size_t new_length);
100 TaskQueueStats & operator +=(const TaskQueueStats & addend);
102 inline size_t get(StatId id) const { return _stats[id]; }
103 inline const size_t* get() const { return _stats; }
105 inline void reset();
107 // Print the specified line of the header (does not include a line separator).
108 static void print_header(unsigned int line, outputStream* const stream = tty,
109 unsigned int width = 10);
110 // Print the statistics (does not include a line separator).
111 void print(outputStream* const stream = tty, unsigned int width = 10) const;
113 DEBUG_ONLY(void verify() const;)
115 private:
116 size_t _stats[last_stat_id];
117 static const char * const _names[last_stat_id];
118 };
120 void TaskQueueStats::record_steal(bool success) {
121 ++_stats[steal_attempt];
122 if (success) ++_stats[steal];
123 }
125 void TaskQueueStats::record_overflow(size_t new_len) {
126 ++_stats[overflow];
127 if (new_len > _stats[overflow_max_len]) _stats[overflow_max_len] = new_len;
128 }
130 void TaskQueueStats::reset() {
131 memset(_stats, 0, sizeof(_stats));
132 }
133 #endif // TASKQUEUE_STATS
135 template <unsigned int N, MEMFLAGS F>
136 class TaskQueueSuper: public CHeapObj<F> {
137 protected:
138 // Internal type for indexing the queue; also used for the tag.
139 typedef NOT_LP64(uint16_t) LP64_ONLY(uint32_t) idx_t;
141 // The first free element after the last one pushed (mod N).
142 volatile uint _bottom;
144 enum { MOD_N_MASK = N - 1 };
146 class Age {
147 public:
148 Age(size_t data = 0) { _data = data; }
149 Age(const Age& age) { _data = age._data; }
150 Age(idx_t top, idx_t tag) { _fields._top = top; _fields._tag = tag; }
152 Age get() const volatile { return _data; }
153 void set(Age age) volatile { _data = age._data; }
155 idx_t top() const volatile { return _fields._top; }
156 idx_t tag() const volatile { return _fields._tag; }
158 // Increment top; if it wraps, increment tag also.
159 void increment() {
160 _fields._top = increment_index(_fields._top);
161 if (_fields._top == 0) ++_fields._tag;
162 }
164 Age cmpxchg(const Age new_age, const Age old_age) volatile {
165 return (size_t) Atomic::cmpxchg_ptr((intptr_t)new_age._data,
166 (volatile intptr_t *)&_data,
167 (intptr_t)old_age._data);
168 }
170 bool operator ==(const Age& other) const { return _data == other._data; }
172 private:
173 struct fields {
174 idx_t _top;
175 idx_t _tag;
176 };
177 union {
178 size_t _data;
179 fields _fields;
180 };
181 };
183 volatile Age _age;
185 // These both operate mod N.
186 static uint increment_index(uint ind) {
187 return (ind + 1) & MOD_N_MASK;
188 }
189 static uint decrement_index(uint ind) {
190 return (ind - 1) & MOD_N_MASK;
191 }
193 // Returns a number in the range [0..N). If the result is "N-1", it should be
194 // interpreted as 0.
195 uint dirty_size(uint bot, uint top) const {
196 return (bot - top) & MOD_N_MASK;
197 }
199 // Returns the size corresponding to the given "bot" and "top".
200 uint size(uint bot, uint top) const {
201 uint sz = dirty_size(bot, top);
202 // Has the queue "wrapped", so that bottom is less than top? There's a
203 // complicated special case here. A pair of threads could perform pop_local
204 // and pop_global operations concurrently, starting from a state in which
205 // _bottom == _top+1. The pop_local could succeed in decrementing _bottom,
206 // and the pop_global in incrementing _top (in which case the pop_global
207 // will be awarded the contested queue element.) The resulting state must
208 // be interpreted as an empty queue. (We only need to worry about one such
209 // event: only the queue owner performs pop_local's, and several concurrent
210 // threads attempting to perform the pop_global will all perform the same
211 // CAS, and only one can succeed.) Any stealing thread that reads after
212 // either the increment or decrement will see an empty queue, and will not
213 // join the competitors. The "sz == -1 || sz == N-1" state will not be
214 // modified by concurrent queues, so the owner thread can reset the state to
215 // _bottom == top so subsequent pushes will be performed normally.
216 return (sz == N - 1) ? 0 : sz;
217 }
219 public:
220 TaskQueueSuper() : _bottom(0), _age() {}
222 // Return true if the TaskQueue contains/does not contain any tasks.
223 bool peek() const { return _bottom != _age.top(); }
224 bool is_empty() const { return size() == 0; }
226 // Return an estimate of the number of elements in the queue.
227 // The "careful" version admits the possibility of pop_local/pop_global
228 // races.
229 uint size() const {
230 return size(_bottom, _age.top());
231 }
233 uint dirty_size() const {
234 return dirty_size(_bottom, _age.top());
235 }
237 void set_empty() {
238 _bottom = 0;
239 _age.set(0);
240 }
242 // Maximum number of elements allowed in the queue. This is two less
243 // than the actual queue size, for somewhat complicated reasons.
244 uint max_elems() const { return N - 2; }
246 // Total size of queue.
247 static const uint total_size() { return N; }
249 TASKQUEUE_STATS_ONLY(TaskQueueStats stats;)
250 };
254 template <class E, MEMFLAGS F, unsigned int N = TASKQUEUE_SIZE>
255 class GenericTaskQueue: public TaskQueueSuper<N, F> {
256 ArrayAllocator<E, F> _array_allocator;
257 protected:
258 typedef typename TaskQueueSuper<N, F>::Age Age;
259 typedef typename TaskQueueSuper<N, F>::idx_t idx_t;
261 using TaskQueueSuper<N, F>::_bottom;
262 using TaskQueueSuper<N, F>::_age;
263 using TaskQueueSuper<N, F>::increment_index;
264 using TaskQueueSuper<N, F>::decrement_index;
265 using TaskQueueSuper<N, F>::dirty_size;
267 public:
268 using TaskQueueSuper<N, F>::max_elems;
269 using TaskQueueSuper<N, F>::size;
271 #if TASKQUEUE_STATS
272 using TaskQueueSuper<N, F>::stats;
273 #endif
275 private:
276 // Slow paths for push, pop_local. (pop_global has no fast path.)
277 bool push_slow(E t, uint dirty_n_elems);
278 bool pop_local_slow(uint localBot, Age oldAge);
280 public:
281 typedef E element_type;
283 // Initializes the queue to empty.
284 GenericTaskQueue();
286 void initialize();
288 // Push the task "t" on the queue. Returns "false" iff the queue is full.
289 inline bool push(E t);
291 // Attempts to claim a task from the "local" end of the queue (the most
292 // recently pushed). If successful, returns true and sets t to the task;
293 // otherwise, returns false (the queue is empty).
294 inline bool pop_local(E& t);
296 // Like pop_local(), but uses the "global" end of the queue (the least
297 // recently pushed).
298 bool pop_global(E& t);
300 // Delete any resource associated with the queue.
301 ~GenericTaskQueue();
303 // apply the closure to all elements in the task queue
304 void oops_do(OopClosure* f);
306 private:
307 // Element array.
308 volatile E* _elems;
309 };
311 template<class E, MEMFLAGS F, unsigned int N>
312 GenericTaskQueue<E, F, N>::GenericTaskQueue() {
313 assert(sizeof(Age) == sizeof(size_t), "Depends on this.");
314 }
316 template<class E, MEMFLAGS F, unsigned int N>
317 void GenericTaskQueue<E, F, N>::initialize() {
318 _elems = _array_allocator.allocate(N);
319 }
321 template<class E, MEMFLAGS F, unsigned int N>
322 void GenericTaskQueue<E, F, N>::oops_do(OopClosure* f) {
323 // tty->print_cr("START OopTaskQueue::oops_do");
324 uint iters = size();
325 uint index = _bottom;
326 for (uint i = 0; i < iters; ++i) {
327 index = decrement_index(index);
328 // tty->print_cr(" doing entry %d," INTPTR_T " -> " INTPTR_T,
329 // index, &_elems[index], _elems[index]);
330 E* t = (E*)&_elems[index]; // cast away volatility
331 oop* p = (oop*)t;
332 assert((*t)->is_oop_or_null(), "Not an oop or null");
333 f->do_oop(p);
334 }
335 // tty->print_cr("END OopTaskQueue::oops_do");
336 }
338 template<class E, MEMFLAGS F, unsigned int N>
339 bool GenericTaskQueue<E, F, N>::push_slow(E t, uint dirty_n_elems) {
340 if (dirty_n_elems == N - 1) {
341 // Actually means 0, so do the push.
342 uint localBot = _bottom;
343 // g++ complains if the volatile result of the assignment is
344 // unused, so we cast the volatile away. We cannot cast directly
345 // to void, because gcc treats that as not using the result of the
346 // assignment. However, casting to E& means that we trigger an
347 // unused-value warning. So, we cast the E& to void.
348 (void)const_cast<E&>(_elems[localBot] = t);
349 OrderAccess::release_store(&_bottom, increment_index(localBot));
350 TASKQUEUE_STATS_ONLY(stats.record_push());
351 return true;
352 }
353 return false;
354 }
356 // pop_local_slow() is done by the owning thread and is trying to
357 // get the last task in the queue. It will compete with pop_global()
358 // that will be used by other threads. The tag age is incremented
359 // whenever the queue goes empty which it will do here if this thread
360 // gets the last task or in pop_global() if the queue wraps (top == 0
361 // and pop_global() succeeds, see pop_global()).
362 template<class E, MEMFLAGS F, unsigned int N>
363 bool GenericTaskQueue<E, F, N>::pop_local_slow(uint localBot, Age oldAge) {
364 // This queue was observed to contain exactly one element; either this
365 // thread will claim it, or a competing "pop_global". In either case,
366 // the queue will be logically empty afterwards. Create a new Age value
367 // that represents the empty queue for the given value of "_bottom". (We
368 // must also increment "tag" because of the case where "bottom == 1",
369 // "top == 0". A pop_global could read the queue element in that case,
370 // then have the owner thread do a pop followed by another push. Without
371 // the incrementing of "tag", the pop_global's CAS could succeed,
372 // allowing it to believe it has claimed the stale element.)
373 Age newAge((idx_t)localBot, oldAge.tag() + 1);
374 // Perhaps a competing pop_global has already incremented "top", in which
375 // case it wins the element.
376 if (localBot == oldAge.top()) {
377 // No competing pop_global has yet incremented "top"; we'll try to
378 // install new_age, thus claiming the element.
379 Age tempAge = _age.cmpxchg(newAge, oldAge);
380 if (tempAge == oldAge) {
381 // We win.
382 assert(dirty_size(localBot, _age.top()) != N - 1, "sanity");
383 TASKQUEUE_STATS_ONLY(stats.record_pop_slow());
384 return true;
385 }
386 }
387 // We lose; a completing pop_global gets the element. But the queue is empty
388 // and top is greater than bottom. Fix this representation of the empty queue
389 // to become the canonical one.
390 _age.set(newAge);
391 assert(dirty_size(localBot, _age.top()) != N - 1, "sanity");
392 return false;
393 }
395 template<class E, MEMFLAGS F, unsigned int N>
396 bool GenericTaskQueue<E, F, N>::pop_global(E& t) {
397 Age oldAge = _age.get();
398 // Architectures with weak memory model require a barrier here
399 // to guarantee that bottom is not older than age,
400 // which is crucial for the correctness of the algorithm.
401 #if !(defined SPARC || defined IA32 || defined AMD64)
402 OrderAccess::fence();
403 #endif
404 uint localBot = OrderAccess::load_acquire((volatile juint*)&_bottom);
405 uint n_elems = size(localBot, oldAge.top());
406 if (n_elems == 0) {
407 return false;
408 }
410 // g++ complains if the volatile result of the assignment is
411 // unused, so we cast the volatile away. We cannot cast directly
412 // to void, because gcc treats that as not using the result of the
413 // assignment. However, casting to E& means that we trigger an
414 // unused-value warning. So, we cast the E& to void.
415 (void) const_cast<E&>(t = _elems[oldAge.top()]);
416 Age newAge(oldAge);
417 newAge.increment();
418 Age resAge = _age.cmpxchg(newAge, oldAge);
420 // Note that using "_bottom" here might fail, since a pop_local might
421 // have decremented it.
422 assert(dirty_size(localBot, newAge.top()) != N - 1, "sanity");
423 return resAge == oldAge;
424 }
426 template<class E, MEMFLAGS F, unsigned int N>
427 GenericTaskQueue<E, F, N>::~GenericTaskQueue() {
428 FREE_C_HEAP_ARRAY(E, _elems, F);
429 }
431 // OverflowTaskQueue is a TaskQueue that also includes an overflow stack for
432 // elements that do not fit in the TaskQueue.
433 //
434 // This class hides two methods from super classes:
435 //
436 // push() - push onto the task queue or, if that fails, onto the overflow stack
437 // is_empty() - return true if both the TaskQueue and overflow stack are empty
438 //
439 // Note that size() is not hidden--it returns the number of elements in the
440 // TaskQueue, and does not include the size of the overflow stack. This
441 // simplifies replacement of GenericTaskQueues with OverflowTaskQueues.
442 template<class E, MEMFLAGS F, unsigned int N = TASKQUEUE_SIZE>
443 class OverflowTaskQueue: public GenericTaskQueue<E, F, N>
444 {
445 public:
446 typedef Stack<E, F> overflow_t;
447 typedef GenericTaskQueue<E, F, N> taskqueue_t;
449 TASKQUEUE_STATS_ONLY(using taskqueue_t::stats;)
451 // Push task t onto the queue or onto the overflow stack. Return true.
452 inline bool push(E t);
454 // Attempt to pop from the overflow stack; return true if anything was popped.
455 inline bool pop_overflow(E& t);
457 inline overflow_t* overflow_stack() { return &_overflow_stack; }
459 inline bool taskqueue_empty() const { return taskqueue_t::is_empty(); }
460 inline bool overflow_empty() const { return _overflow_stack.is_empty(); }
461 inline bool is_empty() const {
462 return taskqueue_empty() && overflow_empty();
463 }
465 private:
466 overflow_t _overflow_stack;
467 };
469 template <class E, MEMFLAGS F, unsigned int N>
470 bool OverflowTaskQueue<E, F, N>::push(E t)
471 {
472 if (!taskqueue_t::push(t)) {
473 overflow_stack()->push(t);
474 TASKQUEUE_STATS_ONLY(stats.record_overflow(overflow_stack()->size()));
475 }
476 return true;
477 }
479 template <class E, MEMFLAGS F, unsigned int N>
480 bool OverflowTaskQueue<E, F, N>::pop_overflow(E& t)
481 {
482 if (overflow_empty()) return false;
483 t = overflow_stack()->pop();
484 return true;
485 }
487 class TaskQueueSetSuper {
488 protected:
489 static int randomParkAndMiller(int* seed0);
490 public:
491 // Returns "true" if some TaskQueue in the set contains a task.
492 virtual bool peek() = 0;
493 };
495 template <MEMFLAGS F> class TaskQueueSetSuperImpl: public CHeapObj<F>, public TaskQueueSetSuper {
496 };
498 template<class T, MEMFLAGS F>
499 class GenericTaskQueueSet: public TaskQueueSetSuperImpl<F> {
500 private:
501 uint _n;
502 T** _queues;
504 public:
505 typedef typename T::element_type E;
507 GenericTaskQueueSet(int n) : _n(n) {
508 typedef T* GenericTaskQueuePtr;
509 _queues = NEW_C_HEAP_ARRAY(GenericTaskQueuePtr, n, F);
510 for (int i = 0; i < n; i++) {
511 _queues[i] = NULL;
512 }
513 }
515 bool steal_best_of_2(uint queue_num, int* seed, E& t);
517 void register_queue(uint i, T* q);
519 T* queue(uint n);
521 // The thread with queue number "queue_num" (and whose random number seed is
522 // at "seed") is trying to steal a task from some other queue. (It may try
523 // several queues, according to some configuration parameter.) If some steal
524 // succeeds, returns "true" and sets "t" to the stolen task, otherwise returns
525 // false.
526 bool steal(uint queue_num, int* seed, E& t);
528 bool peek();
529 };
531 template<class T, MEMFLAGS F> void
532 GenericTaskQueueSet<T, F>::register_queue(uint i, T* q) {
533 assert(i < _n, "index out of range.");
534 _queues[i] = q;
535 }
537 template<class T, MEMFLAGS F> T*
538 GenericTaskQueueSet<T, F>::queue(uint i) {
539 return _queues[i];
540 }
542 template<class T, MEMFLAGS F> bool
543 GenericTaskQueueSet<T, F>::steal(uint queue_num, int* seed, E& t) {
544 for (uint i = 0; i < 2 * _n; i++) {
545 if (steal_best_of_2(queue_num, seed, t)) {
546 TASKQUEUE_STATS_ONLY(queue(queue_num)->stats.record_steal(true));
547 return true;
548 }
549 }
550 TASKQUEUE_STATS_ONLY(queue(queue_num)->stats.record_steal(false));
551 return false;
552 }
554 template<class T, MEMFLAGS F> bool
555 GenericTaskQueueSet<T, F>::steal_best_of_2(uint queue_num, int* seed, E& t) {
556 if (_n > 2) {
557 uint k1 = queue_num;
558 while (k1 == queue_num) k1 = TaskQueueSetSuper::randomParkAndMiller(seed) % _n;
559 uint k2 = queue_num;
560 while (k2 == queue_num || k2 == k1) k2 = TaskQueueSetSuper::randomParkAndMiller(seed) % _n;
561 // Sample both and try the larger.
562 uint sz1 = _queues[k1]->size();
563 uint sz2 = _queues[k2]->size();
564 if (sz2 > sz1) return _queues[k2]->pop_global(t);
565 else return _queues[k1]->pop_global(t);
566 } else if (_n == 2) {
567 // Just try the other one.
568 uint k = (queue_num + 1) % 2;
569 return _queues[k]->pop_global(t);
570 } else {
571 assert(_n == 1, "can't be zero.");
572 return false;
573 }
574 }
576 template<class T, MEMFLAGS F>
577 bool GenericTaskQueueSet<T, F>::peek() {
578 // Try all the queues.
579 for (uint j = 0; j < _n; j++) {
580 if (_queues[j]->peek())
581 return true;
582 }
583 return false;
584 }
586 // When to terminate from the termination protocol.
587 class TerminatorTerminator: public CHeapObj<mtInternal> {
588 public:
589 virtual bool should_exit_termination() = 0;
590 };
592 // A class to aid in the termination of a set of parallel tasks using
593 // TaskQueueSet's for work stealing.
595 #undef TRACESPINNING
597 class ParallelTaskTerminator: public StackObj {
598 private:
599 int _n_threads;
600 TaskQueueSetSuper* _queue_set;
601 int _offered_termination;
603 #ifdef TRACESPINNING
604 static uint _total_yields;
605 static uint _total_spins;
606 static uint _total_peeks;
607 #endif
609 bool peek_in_queue_set();
610 protected:
611 virtual void yield();
612 void sleep(uint millis);
614 public:
616 // "n_threads" is the number of threads to be terminated. "queue_set" is a
617 // queue sets of work queues of other threads.
618 ParallelTaskTerminator(int n_threads, TaskQueueSetSuper* queue_set);
620 // The current thread has no work, and is ready to terminate if everyone
621 // else is. If returns "true", all threads are terminated. If returns
622 // "false", available work has been observed in one of the task queues,
623 // so the global task is not complete.
624 bool offer_termination() {
625 return offer_termination(NULL);
626 }
628 // As above, but it also terminates if the should_exit_termination()
629 // method of the terminator parameter returns true. If terminator is
630 // NULL, then it is ignored.
631 bool offer_termination(TerminatorTerminator* terminator);
633 // Reset the terminator, so that it may be reused again.
634 // The caller is responsible for ensuring that this is done
635 // in an MT-safe manner, once the previous round of use of
636 // the terminator is finished.
637 void reset_for_reuse();
638 // Same as above but the number of parallel threads is set to the
639 // given number.
640 void reset_for_reuse(int n_threads);
642 #ifdef TRACESPINNING
643 static uint total_yields() { return _total_yields; }
644 static uint total_spins() { return _total_spins; }
645 static uint total_peeks() { return _total_peeks; }
646 static void print_termination_counts();
647 #endif
648 };
650 template<class E, MEMFLAGS F, unsigned int N> inline bool
651 GenericTaskQueue<E, F, N>::push(E t) {
652 uint localBot = _bottom;
653 assert(localBot < N, "_bottom out of range.");
654 idx_t top = _age.top();
655 uint dirty_n_elems = dirty_size(localBot, top);
656 assert(dirty_n_elems < N, "n_elems out of range.");
657 if (dirty_n_elems < max_elems()) {
658 // g++ complains if the volatile result of the assignment is
659 // unused, so we cast the volatile away. We cannot cast directly
660 // to void, because gcc treats that as not using the result of the
661 // assignment. However, casting to E& means that we trigger an
662 // unused-value warning. So, we cast the E& to void.
663 (void) const_cast<E&>(_elems[localBot] = t);
664 OrderAccess::release_store(&_bottom, increment_index(localBot));
665 TASKQUEUE_STATS_ONLY(stats.record_push());
666 return true;
667 } else {
668 return push_slow(t, dirty_n_elems);
669 }
670 }
672 template<class E, MEMFLAGS F, unsigned int N> inline bool
673 GenericTaskQueue<E, F, N>::pop_local(E& t) {
674 uint localBot = _bottom;
675 // This value cannot be N-1. That can only occur as a result of
676 // the assignment to bottom in this method. If it does, this method
677 // resets the size to 0 before the next call (which is sequential,
678 // since this is pop_local.)
679 uint dirty_n_elems = dirty_size(localBot, _age.top());
680 assert(dirty_n_elems != N - 1, "Shouldn't be possible...");
681 if (dirty_n_elems == 0) return false;
682 localBot = decrement_index(localBot);
683 _bottom = localBot;
684 // This is necessary to prevent any read below from being reordered
685 // before the store just above.
686 OrderAccess::fence();
687 // g++ complains if the volatile result of the assignment is
688 // unused, so we cast the volatile away. We cannot cast directly
689 // to void, because gcc treats that as not using the result of the
690 // assignment. However, casting to E& means that we trigger an
691 // unused-value warning. So, we cast the E& to void.
692 (void) const_cast<E&>(t = _elems[localBot]);
693 // This is a second read of "age"; the "size()" above is the first.
694 // If there's still at least one element in the queue, based on the
695 // "_bottom" and "age" we've read, then there can be no interference with
696 // a "pop_global" operation, and we're done.
697 idx_t tp = _age.top(); // XXX
698 if (size(localBot, tp) > 0) {
699 assert(dirty_size(localBot, tp) != N - 1, "sanity");
700 TASKQUEUE_STATS_ONLY(stats.record_pop());
701 return true;
702 } else {
703 // Otherwise, the queue contained exactly one element; we take the slow
704 // path.
705 return pop_local_slow(localBot, _age.get());
706 }
707 }
709 typedef GenericTaskQueue<oop, mtGC> OopTaskQueue;
710 typedef GenericTaskQueueSet<OopTaskQueue, mtGC> OopTaskQueueSet;
712 #ifdef _MSC_VER
713 #pragma warning(push)
714 // warning C4522: multiple assignment operators specified
715 #pragma warning(disable:4522)
716 #endif
718 // This is a container class for either an oop* or a narrowOop*.
719 // Both are pushed onto a task queue and the consumer will test is_narrow()
720 // to determine which should be processed.
721 class StarTask {
722 void* _holder; // either union oop* or narrowOop*
724 enum { COMPRESSED_OOP_MASK = 1 };
726 public:
727 StarTask(narrowOop* p) {
728 assert(((uintptr_t)p & COMPRESSED_OOP_MASK) == 0, "Information loss!");
729 _holder = (void *)((uintptr_t)p | COMPRESSED_OOP_MASK);
730 }
731 StarTask(oop* p) {
732 assert(((uintptr_t)p & COMPRESSED_OOP_MASK) == 0, "Information loss!");
733 _holder = (void*)p;
734 }
735 StarTask() { _holder = NULL; }
736 operator oop*() { return (oop*)_holder; }
737 operator narrowOop*() {
738 return (narrowOop*)((uintptr_t)_holder & ~COMPRESSED_OOP_MASK);
739 }
741 StarTask& operator=(const StarTask& t) {
742 _holder = t._holder;
743 return *this;
744 }
745 volatile StarTask& operator=(const volatile StarTask& t) volatile {
746 _holder = t._holder;
747 return *this;
748 }
750 bool is_narrow() const {
751 return (((uintptr_t)_holder & COMPRESSED_OOP_MASK) != 0);
752 }
753 };
755 class ObjArrayTask
756 {
757 public:
758 ObjArrayTask(oop o = NULL, int idx = 0): _obj(o), _index(idx) { }
759 ObjArrayTask(oop o, size_t idx): _obj(o), _index(int(idx)) {
760 assert(idx <= size_t(max_jint), "too big");
761 }
762 ObjArrayTask(const ObjArrayTask& t): _obj(t._obj), _index(t._index) { }
764 ObjArrayTask& operator =(const ObjArrayTask& t) {
765 _obj = t._obj;
766 _index = t._index;
767 return *this;
768 }
769 volatile ObjArrayTask&
770 operator =(const volatile ObjArrayTask& t) volatile {
771 _obj = t._obj;
772 _index = t._index;
773 return *this;
774 }
776 inline oop obj() const { return _obj; }
777 inline int index() const { return _index; }
779 DEBUG_ONLY(bool is_valid() const); // Tasks to be pushed/popped must be valid.
781 private:
782 oop _obj;
783 int _index;
784 };
786 #ifdef _MSC_VER
787 #pragma warning(pop)
788 #endif
790 typedef OverflowTaskQueue<StarTask, mtClass> OopStarTaskQueue;
791 typedef GenericTaskQueueSet<OopStarTaskQueue, mtClass> OopStarTaskQueueSet;
793 typedef OverflowTaskQueue<size_t, mtInternal> RegionTaskQueue;
794 typedef GenericTaskQueueSet<RegionTaskQueue, mtClass> RegionTaskQueueSet;
797 #endif // SHARE_VM_UTILITIES_TASKQUEUE_HPP