Wed, 12 Oct 2016 02:29:05 -0400
Sync during TaskSteal for 3A2000.
aoqi@0 | 1 | /* |
aoqi@0 | 2 | * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. |
aoqi@0 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
aoqi@0 | 4 | * |
aoqi@0 | 5 | * This code is free software; you can redistribute it and/or modify it |
aoqi@0 | 6 | * under the terms of the GNU General Public License version 2 only, as |
aoqi@0 | 7 | * published by the Free Software Foundation. |
aoqi@0 | 8 | * |
aoqi@0 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
aoqi@0 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
aoqi@0 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
aoqi@0 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
aoqi@0 | 13 | * accompanied this code). |
aoqi@0 | 14 | * |
aoqi@0 | 15 | * You should have received a copy of the GNU General Public License version |
aoqi@0 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
aoqi@0 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
aoqi@0 | 18 | * |
aoqi@0 | 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
aoqi@0 | 20 | * or visit www.oracle.com if you need additional information or have any |
aoqi@0 | 21 | * questions. |
aoqi@0 | 22 | * |
aoqi@0 | 23 | */ |
aoqi@0 | 24 | |
aoqi@0 | 25 | #ifndef SHARE_VM_UTILITIES_TASKQUEUE_HPP |
aoqi@0 | 26 | #define SHARE_VM_UTILITIES_TASKQUEUE_HPP |
aoqi@0 | 27 | |
aoqi@0 | 28 | #include "memory/allocation.hpp" |
aoqi@0 | 29 | #include "memory/allocation.inline.hpp" |
aoqi@0 | 30 | #include "runtime/mutex.hpp" |
aoqi@0 | 31 | #include "utilities/stack.hpp" |
aoqi@0 | 32 | #ifdef TARGET_OS_ARCH_linux_x86 |
aoqi@0 | 33 | # include "orderAccess_linux_x86.inline.hpp" |
aoqi@0 | 34 | #endif |
aoqi@0 | 35 | #ifdef TARGET_OS_ARCH_linux_sparc |
aoqi@0 | 36 | # include "orderAccess_linux_sparc.inline.hpp" |
aoqi@0 | 37 | #endif |
aoqi@0 | 38 | #ifdef TARGET_OS_ARCH_linux_zero |
aoqi@0 | 39 | # include "orderAccess_linux_zero.inline.hpp" |
aoqi@0 | 40 | #endif |
aoqi@0 | 41 | #ifdef TARGET_OS_ARCH_solaris_x86 |
aoqi@0 | 42 | # include "orderAccess_solaris_x86.inline.hpp" |
aoqi@0 | 43 | #endif |
aoqi@0 | 44 | #ifdef TARGET_OS_ARCH_solaris_sparc |
aoqi@0 | 45 | # include "orderAccess_solaris_sparc.inline.hpp" |
aoqi@0 | 46 | #endif |
aoqi@0 | 47 | #ifdef TARGET_OS_ARCH_windows_x86 |
aoqi@0 | 48 | # include "orderAccess_windows_x86.inline.hpp" |
aoqi@0 | 49 | #endif |
aoqi@0 | 50 | #ifdef TARGET_OS_ARCH_linux_arm |
aoqi@0 | 51 | # include "orderAccess_linux_arm.inline.hpp" |
aoqi@0 | 52 | #endif |
aoqi@0 | 53 | #ifdef TARGET_OS_ARCH_linux_ppc |
aoqi@0 | 54 | # include "orderAccess_linux_ppc.inline.hpp" |
aoqi@0 | 55 | #endif |
aoqi@0 | 56 | #ifdef TARGET_OS_ARCH_aix_ppc |
aoqi@0 | 57 | # include "orderAccess_aix_ppc.inline.hpp" |
aoqi@0 | 58 | #endif |
aoqi@0 | 59 | #ifdef TARGET_OS_ARCH_bsd_x86 |
aoqi@0 | 60 | # include "orderAccess_bsd_x86.inline.hpp" |
aoqi@0 | 61 | #endif |
aoqi@0 | 62 | #ifdef TARGET_OS_ARCH_bsd_zero |
aoqi@0 | 63 | # include "orderAccess_bsd_zero.inline.hpp" |
aoqi@0 | 64 | #endif |
aoqi@0 | 65 | |
aoqi@0 | 66 | // Simple TaskQueue stats that are collected by default in debug builds. |
aoqi@0 | 67 | |
aoqi@0 | 68 | #if !defined(TASKQUEUE_STATS) && defined(ASSERT) |
aoqi@0 | 69 | #define TASKQUEUE_STATS 1 |
aoqi@0 | 70 | #elif !defined(TASKQUEUE_STATS) |
aoqi@0 | 71 | #define TASKQUEUE_STATS 0 |
aoqi@0 | 72 | #endif |
aoqi@0 | 73 | |
aoqi@0 | 74 | #if TASKQUEUE_STATS |
aoqi@0 | 75 | #define TASKQUEUE_STATS_ONLY(code) code |
aoqi@0 | 76 | #else |
aoqi@0 | 77 | #define TASKQUEUE_STATS_ONLY(code) |
aoqi@0 | 78 | #endif // TASKQUEUE_STATS |
aoqi@0 | 79 | |
aoqi@0 | 80 | #if TASKQUEUE_STATS |
aoqi@0 | 81 | class TaskQueueStats { |
aoqi@0 | 82 | public: |
aoqi@0 | 83 | enum StatId { |
aoqi@0 | 84 | push, // number of taskqueue pushes |
aoqi@0 | 85 | pop, // number of taskqueue pops |
aoqi@0 | 86 | pop_slow, // subset of taskqueue pops that were done slow-path |
aoqi@0 | 87 | steal_attempt, // number of taskqueue steal attempts |
aoqi@0 | 88 | steal, // number of taskqueue steals |
aoqi@0 | 89 | overflow, // number of overflow pushes |
aoqi@0 | 90 | overflow_max_len, // max length of overflow stack |
aoqi@0 | 91 | last_stat_id |
aoqi@0 | 92 | }; |
aoqi@0 | 93 | |
aoqi@0 | 94 | public: |
aoqi@0 | 95 | inline TaskQueueStats() { reset(); } |
aoqi@0 | 96 | |
aoqi@0 | 97 | inline void record_push() { ++_stats[push]; } |
aoqi@0 | 98 | inline void record_pop() { ++_stats[pop]; } |
aoqi@0 | 99 | inline void record_pop_slow() { record_pop(); ++_stats[pop_slow]; } |
aoqi@0 | 100 | inline void record_steal(bool success); |
aoqi@0 | 101 | inline void record_overflow(size_t new_length); |
aoqi@0 | 102 | |
aoqi@0 | 103 | TaskQueueStats & operator +=(const TaskQueueStats & addend); |
aoqi@0 | 104 | |
aoqi@0 | 105 | inline size_t get(StatId id) const { return _stats[id]; } |
aoqi@0 | 106 | inline const size_t* get() const { return _stats; } |
aoqi@0 | 107 | |
aoqi@0 | 108 | inline void reset(); |
aoqi@0 | 109 | |
aoqi@0 | 110 | // Print the specified line of the header (does not include a line separator). |
aoqi@0 | 111 | static void print_header(unsigned int line, outputStream* const stream = tty, |
aoqi@0 | 112 | unsigned int width = 10); |
aoqi@0 | 113 | // Print the statistics (does not include a line separator). |
aoqi@0 | 114 | void print(outputStream* const stream = tty, unsigned int width = 10) const; |
aoqi@0 | 115 | |
aoqi@0 | 116 | DEBUG_ONLY(void verify() const;) |
aoqi@0 | 117 | |
aoqi@0 | 118 | private: |
aoqi@0 | 119 | size_t _stats[last_stat_id]; |
aoqi@0 | 120 | static const char * const _names[last_stat_id]; |
aoqi@0 | 121 | }; |
aoqi@0 | 122 | |
aoqi@0 | 123 | void TaskQueueStats::record_steal(bool success) { |
aoqi@0 | 124 | ++_stats[steal_attempt]; |
aoqi@0 | 125 | if (success) ++_stats[steal]; |
aoqi@0 | 126 | } |
aoqi@0 | 127 | |
aoqi@0 | 128 | void TaskQueueStats::record_overflow(size_t new_len) { |
aoqi@0 | 129 | ++_stats[overflow]; |
aoqi@0 | 130 | if (new_len > _stats[overflow_max_len]) _stats[overflow_max_len] = new_len; |
aoqi@0 | 131 | } |
aoqi@0 | 132 | |
aoqi@0 | 133 | void TaskQueueStats::reset() { |
aoqi@0 | 134 | memset(_stats, 0, sizeof(_stats)); |
aoqi@0 | 135 | } |
aoqi@0 | 136 | #endif // TASKQUEUE_STATS |
aoqi@0 | 137 | |
aoqi@0 | 138 | // TaskQueueSuper collects functionality common to all GenericTaskQueue instances. |
aoqi@0 | 139 | |
aoqi@0 | 140 | template <unsigned int N, MEMFLAGS F> |
aoqi@0 | 141 | class TaskQueueSuper: public CHeapObj<F> { |
aoqi@0 | 142 | protected: |
aoqi@0 | 143 | // Internal type for indexing the queue; also used for the tag. |
aoqi@0 | 144 | typedef NOT_LP64(uint16_t) LP64_ONLY(uint32_t) idx_t; |
aoqi@0 | 145 | |
aoqi@0 | 146 | // The first free element after the last one pushed (mod N). |
aoqi@0 | 147 | volatile uint _bottom; |
aoqi@0 | 148 | |
aoqi@0 | 149 | enum { MOD_N_MASK = N - 1 }; |
aoqi@0 | 150 | |
aoqi@0 | 151 | class Age { |
aoqi@0 | 152 | public: |
aoqi@0 | 153 | Age(size_t data = 0) { _data = data; } |
aoqi@0 | 154 | Age(const Age& age) { _data = age._data; } |
aoqi@0 | 155 | Age(idx_t top, idx_t tag) { _fields._top = top; _fields._tag = tag; } |
aoqi@0 | 156 | |
aoqi@0 | 157 | Age get() const volatile { return _data; } |
aoqi@0 | 158 | void set(Age age) volatile { _data = age._data; } |
aoqi@0 | 159 | |
aoqi@0 | 160 | idx_t top() const volatile { return _fields._top; } |
aoqi@0 | 161 | idx_t tag() const volatile { return _fields._tag; } |
aoqi@0 | 162 | |
aoqi@0 | 163 | // Increment top; if it wraps, increment tag also. |
aoqi@0 | 164 | void increment() { |
aoqi@0 | 165 | _fields._top = increment_index(_fields._top); |
aoqi@0 | 166 | if (_fields._top == 0) ++_fields._tag; |
aoqi@0 | 167 | } |
aoqi@0 | 168 | |
aoqi@0 | 169 | Age cmpxchg(const Age new_age, const Age old_age) volatile { |
aoqi@0 | 170 | return (size_t) Atomic::cmpxchg_ptr((intptr_t)new_age._data, |
aoqi@0 | 171 | (volatile intptr_t *)&_data, |
aoqi@0 | 172 | (intptr_t)old_age._data); |
aoqi@0 | 173 | } |
aoqi@0 | 174 | |
aoqi@0 | 175 | bool operator ==(const Age& other) const { return _data == other._data; } |
aoqi@0 | 176 | |
aoqi@0 | 177 | private: |
aoqi@0 | 178 | struct fields { |
aoqi@0 | 179 | idx_t _top; |
aoqi@0 | 180 | idx_t _tag; |
aoqi@0 | 181 | }; |
aoqi@0 | 182 | union { |
aoqi@0 | 183 | size_t _data; |
aoqi@0 | 184 | fields _fields; |
aoqi@0 | 185 | }; |
aoqi@0 | 186 | }; |
aoqi@0 | 187 | |
aoqi@0 | 188 | volatile Age _age; |
aoqi@0 | 189 | |
aoqi@0 | 190 | // These both operate mod N. |
aoqi@0 | 191 | static uint increment_index(uint ind) { |
aoqi@0 | 192 | return (ind + 1) & MOD_N_MASK; |
aoqi@0 | 193 | } |
aoqi@0 | 194 | static uint decrement_index(uint ind) { |
aoqi@0 | 195 | return (ind - 1) & MOD_N_MASK; |
aoqi@0 | 196 | } |
aoqi@0 | 197 | |
aoqi@0 | 198 | // Returns a number in the range [0..N). If the result is "N-1", it should be |
aoqi@0 | 199 | // interpreted as 0. |
aoqi@0 | 200 | uint dirty_size(uint bot, uint top) const { |
aoqi@0 | 201 | return (bot - top) & MOD_N_MASK; |
aoqi@0 | 202 | } |
aoqi@0 | 203 | |
aoqi@0 | 204 | // Returns the size corresponding to the given "bot" and "top". |
aoqi@0 | 205 | uint size(uint bot, uint top) const { |
aoqi@0 | 206 | uint sz = dirty_size(bot, top); |
aoqi@0 | 207 | // Has the queue "wrapped", so that bottom is less than top? There's a |
aoqi@0 | 208 | // complicated special case here. A pair of threads could perform pop_local |
aoqi@0 | 209 | // and pop_global operations concurrently, starting from a state in which |
aoqi@0 | 210 | // _bottom == _top+1. The pop_local could succeed in decrementing _bottom, |
aoqi@0 | 211 | // and the pop_global in incrementing _top (in which case the pop_global |
aoqi@0 | 212 | // will be awarded the contested queue element.) The resulting state must |
aoqi@0 | 213 | // be interpreted as an empty queue. (We only need to worry about one such |
aoqi@0 | 214 | // event: only the queue owner performs pop_local's, and several concurrent |
aoqi@0 | 215 | // threads attempting to perform the pop_global will all perform the same |
aoqi@0 | 216 | // CAS, and only one can succeed.) Any stealing thread that reads after |
aoqi@0 | 217 | // either the increment or decrement will see an empty queue, and will not |
aoqi@0 | 218 | // join the competitors. The "sz == -1 || sz == N-1" state will not be |
aoqi@0 | 219 | // modified by concurrent queues, so the owner thread can reset the state to |
aoqi@0 | 220 | // _bottom == top so subsequent pushes will be performed normally. |
aoqi@0 | 221 | return (sz == N - 1) ? 0 : sz; |
aoqi@0 | 222 | } |
aoqi@0 | 223 | |
aoqi@0 | 224 | public: |
aoqi@0 | 225 | TaskQueueSuper() : _bottom(0), _age() {} |
aoqi@0 | 226 | |
aoqi@0 | 227 | // Return true if the TaskQueue contains/does not contain any tasks. |
aoqi@0 | 228 | bool peek() const { return _bottom != _age.top(); } |
aoqi@0 | 229 | bool is_empty() const { return size() == 0; } |
aoqi@0 | 230 | |
aoqi@0 | 231 | // Return an estimate of the number of elements in the queue. |
aoqi@0 | 232 | // The "careful" version admits the possibility of pop_local/pop_global |
aoqi@0 | 233 | // races. |
aoqi@0 | 234 | uint size() const { |
aoqi@0 | 235 | return size(_bottom, _age.top()); |
aoqi@0 | 236 | } |
aoqi@0 | 237 | |
aoqi@0 | 238 | uint dirty_size() const { |
aoqi@0 | 239 | return dirty_size(_bottom, _age.top()); |
aoqi@0 | 240 | } |
aoqi@0 | 241 | |
aoqi@0 | 242 | void set_empty() { |
aoqi@0 | 243 | _bottom = 0; |
aoqi@0 | 244 | _age.set(0); |
aoqi@0 | 245 | } |
aoqi@0 | 246 | |
aoqi@0 | 247 | // Maximum number of elements allowed in the queue. This is two less |
aoqi@0 | 248 | // than the actual queue size, for somewhat complicated reasons. |
aoqi@0 | 249 | uint max_elems() const { return N - 2; } |
aoqi@0 | 250 | |
aoqi@0 | 251 | // Total size of queue. |
aoqi@0 | 252 | static const uint total_size() { return N; } |
aoqi@0 | 253 | |
aoqi@0 | 254 | TASKQUEUE_STATS_ONLY(TaskQueueStats stats;) |
aoqi@0 | 255 | }; |
aoqi@0 | 256 | |
aoqi@0 | 257 | // |
aoqi@0 | 258 | // GenericTaskQueue implements an ABP, Aurora-Blumofe-Plaxton, double- |
aoqi@0 | 259 | // ended-queue (deque), intended for use in work stealing. Queue operations |
aoqi@0 | 260 | // are non-blocking. |
aoqi@0 | 261 | // |
aoqi@0 | 262 | // A queue owner thread performs push() and pop_local() operations on one end |
aoqi@0 | 263 | // of the queue, while other threads may steal work using the pop_global() |
aoqi@0 | 264 | // method. |
aoqi@0 | 265 | // |
aoqi@0 | 266 | // The main difference to the original algorithm is that this |
aoqi@0 | 267 | // implementation allows wrap-around at the end of its allocated |
aoqi@0 | 268 | // storage, which is an array. |
aoqi@0 | 269 | // |
aoqi@0 | 270 | // The original paper is: |
aoqi@0 | 271 | // |
aoqi@0 | 272 | // Arora, N. S., Blumofe, R. D., and Plaxton, C. G. |
aoqi@0 | 273 | // Thread scheduling for multiprogrammed multiprocessors. |
aoqi@0 | 274 | // Theory of Computing Systems 34, 2 (2001), 115-144. |
aoqi@0 | 275 | // |
aoqi@0 | 276 | // The following paper provides an correctness proof and an |
aoqi@0 | 277 | // implementation for weakly ordered memory models including (pseudo-) |
aoqi@0 | 278 | // code containing memory barriers for a Chase-Lev deque. Chase-Lev is |
aoqi@0 | 279 | // similar to ABP, with the main difference that it allows resizing of the |
aoqi@0 | 280 | // underlying storage: |
aoqi@0 | 281 | // |
aoqi@0 | 282 | // Le, N. M., Pop, A., Cohen A., and Nardell, F. Z. |
aoqi@0 | 283 | // Correct and efficient work-stealing for weak memory models |
aoqi@0 | 284 | // Proceedings of the 18th ACM SIGPLAN symposium on Principles and |
aoqi@0 | 285 | // practice of parallel programming (PPoPP 2013), 69-80 |
aoqi@0 | 286 | // |
aoqi@0 | 287 | |
aoqi@0 | 288 | template <class E, MEMFLAGS F, unsigned int N = TASKQUEUE_SIZE> |
aoqi@0 | 289 | class GenericTaskQueue: public TaskQueueSuper<N, F> { |
aoqi@0 | 290 | ArrayAllocator<E, F> _array_allocator; |
aoqi@0 | 291 | protected: |
aoqi@0 | 292 | typedef typename TaskQueueSuper<N, F>::Age Age; |
aoqi@0 | 293 | typedef typename TaskQueueSuper<N, F>::idx_t idx_t; |
aoqi@0 | 294 | |
aoqi@0 | 295 | using TaskQueueSuper<N, F>::_bottom; |
aoqi@0 | 296 | using TaskQueueSuper<N, F>::_age; |
aoqi@0 | 297 | using TaskQueueSuper<N, F>::increment_index; |
aoqi@0 | 298 | using TaskQueueSuper<N, F>::decrement_index; |
aoqi@0 | 299 | using TaskQueueSuper<N, F>::dirty_size; |
aoqi@0 | 300 | |
aoqi@0 | 301 | public: |
aoqi@0 | 302 | using TaskQueueSuper<N, F>::max_elems; |
aoqi@0 | 303 | using TaskQueueSuper<N, F>::size; |
aoqi@0 | 304 | |
aoqi@0 | 305 | #if TASKQUEUE_STATS |
aoqi@0 | 306 | using TaskQueueSuper<N, F>::stats; |
aoqi@0 | 307 | #endif |
aoqi@0 | 308 | |
aoqi@0 | 309 | private: |
aoqi@0 | 310 | // Slow paths for push, pop_local. (pop_global has no fast path.) |
aoqi@0 | 311 | bool push_slow(E t, uint dirty_n_elems); |
aoqi@0 | 312 | bool pop_local_slow(uint localBot, Age oldAge); |
aoqi@0 | 313 | |
aoqi@0 | 314 | public: |
aoqi@0 | 315 | typedef E element_type; |
aoqi@0 | 316 | |
aoqi@0 | 317 | // Initializes the queue to empty. |
aoqi@0 | 318 | GenericTaskQueue(); |
aoqi@0 | 319 | |
aoqi@0 | 320 | void initialize(); |
aoqi@0 | 321 | |
aoqi@0 | 322 | // Push the task "t" on the queue. Returns "false" iff the queue is full. |
aoqi@0 | 323 | inline bool push(E t); |
aoqi@0 | 324 | |
aoqi@0 | 325 | // Attempts to claim a task from the "local" end of the queue (the most |
aoqi@0 | 326 | // recently pushed). If successful, returns true and sets t to the task; |
aoqi@0 | 327 | // otherwise, returns false (the queue is empty). |
aoqi@0 | 328 | inline bool pop_local(volatile E& t); |
aoqi@0 | 329 | |
aoqi@0 | 330 | // Like pop_local(), but uses the "global" end of the queue (the least |
aoqi@0 | 331 | // recently pushed). |
aoqi@0 | 332 | bool pop_global(volatile E& t); |
aoqi@0 | 333 | |
aoqi@0 | 334 | // Delete any resource associated with the queue. |
aoqi@0 | 335 | ~GenericTaskQueue(); |
aoqi@0 | 336 | |
aoqi@0 | 337 | // apply the closure to all elements in the task queue |
aoqi@0 | 338 | void oops_do(OopClosure* f); |
aoqi@0 | 339 | |
aoqi@0 | 340 | private: |
aoqi@0 | 341 | // Element array. |
aoqi@0 | 342 | volatile E* _elems; |
aoqi@0 | 343 | }; |
aoqi@0 | 344 | |
aoqi@0 | 345 | template<class E, MEMFLAGS F, unsigned int N> |
aoqi@0 | 346 | GenericTaskQueue<E, F, N>::GenericTaskQueue() { |
aoqi@0 | 347 | assert(sizeof(Age) == sizeof(size_t), "Depends on this."); |
aoqi@0 | 348 | } |
aoqi@0 | 349 | |
aoqi@0 | 350 | template<class E, MEMFLAGS F, unsigned int N> |
aoqi@0 | 351 | void GenericTaskQueue<E, F, N>::initialize() { |
aoqi@0 | 352 | _elems = _array_allocator.allocate(N); |
aoqi@0 | 353 | } |
aoqi@0 | 354 | |
aoqi@0 | 355 | template<class E, MEMFLAGS F, unsigned int N> |
aoqi@0 | 356 | void GenericTaskQueue<E, F, N>::oops_do(OopClosure* f) { |
aoqi@0 | 357 | // tty->print_cr("START OopTaskQueue::oops_do"); |
aoqi@0 | 358 | uint iters = size(); |
aoqi@0 | 359 | uint index = _bottom; |
aoqi@0 | 360 | for (uint i = 0; i < iters; ++i) { |
aoqi@0 | 361 | index = decrement_index(index); |
aoqi@0 | 362 | // tty->print_cr(" doing entry %d," INTPTR_T " -> " INTPTR_T, |
aoqi@0 | 363 | // index, &_elems[index], _elems[index]); |
aoqi@0 | 364 | E* t = (E*)&_elems[index]; // cast away volatility |
aoqi@0 | 365 | oop* p = (oop*)t; |
aoqi@0 | 366 | assert((*t)->is_oop_or_null(), "Not an oop or null"); |
aoqi@0 | 367 | f->do_oop(p); |
aoqi@0 | 368 | } |
aoqi@0 | 369 | // tty->print_cr("END OopTaskQueue::oops_do"); |
aoqi@0 | 370 | } |
aoqi@0 | 371 | |
aoqi@0 | 372 | template<class E, MEMFLAGS F, unsigned int N> |
aoqi@0 | 373 | bool GenericTaskQueue<E, F, N>::push_slow(E t, uint dirty_n_elems) { |
aoqi@0 | 374 | if (dirty_n_elems == N - 1) { |
aoqi@0 | 375 | // Actually means 0, so do the push. |
aoqi@0 | 376 | uint localBot = _bottom; |
aoqi@0 | 377 | // g++ complains if the volatile result of the assignment is |
aoqi@0 | 378 | // unused, so we cast the volatile away. We cannot cast directly |
aoqi@0 | 379 | // to void, because gcc treats that as not using the result of the |
aoqi@0 | 380 | // assignment. However, casting to E& means that we trigger an |
aoqi@0 | 381 | // unused-value warning. So, we cast the E& to void. |
aoqi@0 | 382 | (void)const_cast<E&>(_elems[localBot] = t); |
aoqi@0 | 383 | OrderAccess::release_store(&_bottom, increment_index(localBot)); |
aoqi@0 | 384 | TASKQUEUE_STATS_ONLY(stats.record_push()); |
fujie@133 | 385 | #ifdef MIPS64 |
fujie@133 | 386 | if (Use3A2000) OrderAccess::fence(); |
fujie@133 | 387 | #endif |
aoqi@0 | 388 | return true; |
aoqi@0 | 389 | } |
aoqi@0 | 390 | return false; |
aoqi@0 | 391 | } |
aoqi@0 | 392 | |
aoqi@0 | 393 | // pop_local_slow() is done by the owning thread and is trying to |
aoqi@0 | 394 | // get the last task in the queue. It will compete with pop_global() |
aoqi@0 | 395 | // that will be used by other threads. The tag age is incremented |
aoqi@0 | 396 | // whenever the queue goes empty which it will do here if this thread |
aoqi@0 | 397 | // gets the last task or in pop_global() if the queue wraps (top == 0 |
aoqi@0 | 398 | // and pop_global() succeeds, see pop_global()). |
aoqi@0 | 399 | template<class E, MEMFLAGS F, unsigned int N> |
aoqi@0 | 400 | bool GenericTaskQueue<E, F, N>::pop_local_slow(uint localBot, Age oldAge) { |
aoqi@0 | 401 | // This queue was observed to contain exactly one element; either this |
aoqi@0 | 402 | // thread will claim it, or a competing "pop_global". In either case, |
aoqi@0 | 403 | // the queue will be logically empty afterwards. Create a new Age value |
aoqi@0 | 404 | // that represents the empty queue for the given value of "_bottom". (We |
aoqi@0 | 405 | // must also increment "tag" because of the case where "bottom == 1", |
aoqi@0 | 406 | // "top == 0". A pop_global could read the queue element in that case, |
aoqi@0 | 407 | // then have the owner thread do a pop followed by another push. Without |
aoqi@0 | 408 | // the incrementing of "tag", the pop_global's CAS could succeed, |
aoqi@0 | 409 | // allowing it to believe it has claimed the stale element.) |
aoqi@0 | 410 | Age newAge((idx_t)localBot, oldAge.tag() + 1); |
aoqi@0 | 411 | // Perhaps a competing pop_global has already incremented "top", in which |
aoqi@0 | 412 | // case it wins the element. |
aoqi@0 | 413 | if (localBot == oldAge.top()) { |
aoqi@0 | 414 | // No competing pop_global has yet incremented "top"; we'll try to |
aoqi@0 | 415 | // install new_age, thus claiming the element. |
aoqi@0 | 416 | Age tempAge = _age.cmpxchg(newAge, oldAge); |
aoqi@0 | 417 | if (tempAge == oldAge) { |
aoqi@0 | 418 | // We win. |
aoqi@0 | 419 | assert(dirty_size(localBot, _age.top()) != N - 1, "sanity"); |
aoqi@0 | 420 | TASKQUEUE_STATS_ONLY(stats.record_pop_slow()); |
aoqi@0 | 421 | return true; |
aoqi@0 | 422 | } |
aoqi@0 | 423 | } |
aoqi@0 | 424 | // We lose; a completing pop_global gets the element. But the queue is empty |
aoqi@0 | 425 | // and top is greater than bottom. Fix this representation of the empty queue |
aoqi@0 | 426 | // to become the canonical one. |
aoqi@0 | 427 | _age.set(newAge); |
aoqi@0 | 428 | assert(dirty_size(localBot, _age.top()) != N - 1, "sanity"); |
aoqi@0 | 429 | return false; |
aoqi@0 | 430 | } |
aoqi@0 | 431 | |
aoqi@0 | 432 | template<class E, MEMFLAGS F, unsigned int N> |
aoqi@0 | 433 | bool GenericTaskQueue<E, F, N>::pop_global(volatile E& t) { |
aoqi@0 | 434 | Age oldAge = _age.get(); |
aoqi@0 | 435 | // Architectures with weak memory model require a barrier here |
aoqi@0 | 436 | // to guarantee that bottom is not older than age, |
aoqi@0 | 437 | // which is crucial for the correctness of the algorithm. |
aoqi@0 | 438 | #if !(defined SPARC || defined IA32 || defined AMD64) |
aoqi@0 | 439 | OrderAccess::fence(); |
aoqi@0 | 440 | #endif |
aoqi@0 | 441 | uint localBot = OrderAccess::load_acquire((volatile juint*)&_bottom); |
aoqi@0 | 442 | uint n_elems = size(localBot, oldAge.top()); |
aoqi@0 | 443 | if (n_elems == 0) { |
aoqi@0 | 444 | return false; |
aoqi@0 | 445 | } |
aoqi@0 | 446 | |
aoqi@0 | 447 | // g++ complains if the volatile result of the assignment is |
aoqi@0 | 448 | // unused, so we cast the volatile away. We cannot cast directly |
aoqi@0 | 449 | // to void, because gcc treats that as not using the result of the |
aoqi@0 | 450 | // assignment. However, casting to E& means that we trigger an |
aoqi@0 | 451 | // unused-value warning. So, we cast the E& to void. |
aoqi@0 | 452 | (void) const_cast<E&>(t = _elems[oldAge.top()]); |
aoqi@0 | 453 | Age newAge(oldAge); |
aoqi@0 | 454 | newAge.increment(); |
aoqi@0 | 455 | Age resAge = _age.cmpxchg(newAge, oldAge); |
aoqi@0 | 456 | |
aoqi@0 | 457 | // Note that using "_bottom" here might fail, since a pop_local might |
aoqi@0 | 458 | // have decremented it. |
aoqi@0 | 459 | assert(dirty_size(localBot, newAge.top()) != N - 1, "sanity"); |
aoqi@0 | 460 | return resAge == oldAge; |
aoqi@0 | 461 | } |
aoqi@0 | 462 | |
aoqi@0 | 463 | template<class E, MEMFLAGS F, unsigned int N> |
aoqi@0 | 464 | GenericTaskQueue<E, F, N>::~GenericTaskQueue() { |
aoqi@0 | 465 | FREE_C_HEAP_ARRAY(E, _elems, F); |
aoqi@0 | 466 | } |
aoqi@0 | 467 | |
aoqi@0 | 468 | // OverflowTaskQueue is a TaskQueue that also includes an overflow stack for |
aoqi@0 | 469 | // elements that do not fit in the TaskQueue. |
aoqi@0 | 470 | // |
aoqi@0 | 471 | // This class hides two methods from super classes: |
aoqi@0 | 472 | // |
aoqi@0 | 473 | // push() - push onto the task queue or, if that fails, onto the overflow stack |
aoqi@0 | 474 | // is_empty() - return true if both the TaskQueue and overflow stack are empty |
aoqi@0 | 475 | // |
aoqi@0 | 476 | // Note that size() is not hidden--it returns the number of elements in the |
aoqi@0 | 477 | // TaskQueue, and does not include the size of the overflow stack. This |
aoqi@0 | 478 | // simplifies replacement of GenericTaskQueues with OverflowTaskQueues. |
aoqi@0 | 479 | template<class E, MEMFLAGS F, unsigned int N = TASKQUEUE_SIZE> |
aoqi@0 | 480 | class OverflowTaskQueue: public GenericTaskQueue<E, F, N> |
aoqi@0 | 481 | { |
aoqi@0 | 482 | public: |
aoqi@0 | 483 | typedef Stack<E, F> overflow_t; |
aoqi@0 | 484 | typedef GenericTaskQueue<E, F, N> taskqueue_t; |
aoqi@0 | 485 | |
aoqi@0 | 486 | TASKQUEUE_STATS_ONLY(using taskqueue_t::stats;) |
aoqi@0 | 487 | |
aoqi@0 | 488 | // Push task t onto the queue or onto the overflow stack. Return true. |
aoqi@0 | 489 | inline bool push(E t); |
aoqi@0 | 490 | |
aoqi@0 | 491 | // Attempt to pop from the overflow stack; return true if anything was popped. |
aoqi@0 | 492 | inline bool pop_overflow(E& t); |
aoqi@0 | 493 | |
aoqi@0 | 494 | inline overflow_t* overflow_stack() { return &_overflow_stack; } |
aoqi@0 | 495 | |
aoqi@0 | 496 | inline bool taskqueue_empty() const { return taskqueue_t::is_empty(); } |
aoqi@0 | 497 | inline bool overflow_empty() const { return _overflow_stack.is_empty(); } |
aoqi@0 | 498 | inline bool is_empty() const { |
aoqi@0 | 499 | return taskqueue_empty() && overflow_empty(); |
aoqi@0 | 500 | } |
aoqi@0 | 501 | |
aoqi@0 | 502 | private: |
aoqi@0 | 503 | overflow_t _overflow_stack; |
aoqi@0 | 504 | }; |
aoqi@0 | 505 | |
aoqi@0 | 506 | template <class E, MEMFLAGS F, unsigned int N> |
aoqi@0 | 507 | bool OverflowTaskQueue<E, F, N>::push(E t) |
aoqi@0 | 508 | { |
aoqi@0 | 509 | if (!taskqueue_t::push(t)) { |
aoqi@0 | 510 | overflow_stack()->push(t); |
aoqi@0 | 511 | TASKQUEUE_STATS_ONLY(stats.record_overflow(overflow_stack()->size())); |
fujie@133 | 512 | #ifdef MIPS64 |
fujie@133 | 513 | if (Use3A2000) OrderAccess::fence(); |
fujie@133 | 514 | #endif |
aoqi@0 | 515 | } |
aoqi@0 | 516 | return true; |
aoqi@0 | 517 | } |
aoqi@0 | 518 | |
aoqi@0 | 519 | template <class E, MEMFLAGS F, unsigned int N> |
aoqi@0 | 520 | bool OverflowTaskQueue<E, F, N>::pop_overflow(E& t) |
aoqi@0 | 521 | { |
aoqi@0 | 522 | if (overflow_empty()) return false; |
aoqi@0 | 523 | t = overflow_stack()->pop(); |
aoqi@0 | 524 | return true; |
aoqi@0 | 525 | } |
aoqi@0 | 526 | |
aoqi@0 | 527 | class TaskQueueSetSuper { |
aoqi@0 | 528 | protected: |
aoqi@0 | 529 | static int randomParkAndMiller(int* seed0); |
aoqi@0 | 530 | public: |
aoqi@0 | 531 | // Returns "true" if some TaskQueue in the set contains a task. |
aoqi@0 | 532 | virtual bool peek() = 0; |
aoqi@0 | 533 | }; |
aoqi@0 | 534 | |
aoqi@0 | 535 | template <MEMFLAGS F> class TaskQueueSetSuperImpl: public CHeapObj<F>, public TaskQueueSetSuper { |
aoqi@0 | 536 | }; |
aoqi@0 | 537 | |
aoqi@0 | 538 | template<class T, MEMFLAGS F> |
aoqi@0 | 539 | class GenericTaskQueueSet: public TaskQueueSetSuperImpl<F> { |
aoqi@0 | 540 | private: |
aoqi@0 | 541 | uint _n; |
aoqi@0 | 542 | T** _queues; |
aoqi@0 | 543 | |
aoqi@0 | 544 | public: |
aoqi@0 | 545 | typedef typename T::element_type E; |
aoqi@0 | 546 | |
aoqi@0 | 547 | GenericTaskQueueSet(int n) : _n(n) { |
aoqi@0 | 548 | typedef T* GenericTaskQueuePtr; |
aoqi@0 | 549 | _queues = NEW_C_HEAP_ARRAY(GenericTaskQueuePtr, n, F); |
aoqi@0 | 550 | for (int i = 0; i < n; i++) { |
aoqi@0 | 551 | _queues[i] = NULL; |
aoqi@0 | 552 | } |
aoqi@0 | 553 | } |
aoqi@0 | 554 | |
aoqi@0 | 555 | bool steal_best_of_2(uint queue_num, int* seed, E& t); |
aoqi@0 | 556 | |
aoqi@0 | 557 | void register_queue(uint i, T* q); |
aoqi@0 | 558 | |
aoqi@0 | 559 | T* queue(uint n); |
aoqi@0 | 560 | |
aoqi@0 | 561 | // The thread with queue number "queue_num" (and whose random number seed is |
aoqi@0 | 562 | // at "seed") is trying to steal a task from some other queue. (It may try |
aoqi@0 | 563 | // several queues, according to some configuration parameter.) If some steal |
aoqi@0 | 564 | // succeeds, returns "true" and sets "t" to the stolen task, otherwise returns |
aoqi@0 | 565 | // false. |
aoqi@0 | 566 | bool steal(uint queue_num, int* seed, E& t); |
aoqi@0 | 567 | |
aoqi@0 | 568 | bool peek(); |
aoqi@0 | 569 | }; |
aoqi@0 | 570 | |
aoqi@0 | 571 | template<class T, MEMFLAGS F> void |
aoqi@0 | 572 | GenericTaskQueueSet<T, F>::register_queue(uint i, T* q) { |
aoqi@0 | 573 | assert(i < _n, "index out of range."); |
aoqi@0 | 574 | _queues[i] = q; |
aoqi@0 | 575 | } |
aoqi@0 | 576 | |
aoqi@0 | 577 | template<class T, MEMFLAGS F> T* |
aoqi@0 | 578 | GenericTaskQueueSet<T, F>::queue(uint i) { |
aoqi@0 | 579 | return _queues[i]; |
aoqi@0 | 580 | } |
aoqi@0 | 581 | |
aoqi@0 | 582 | template<class T, MEMFLAGS F> bool |
aoqi@0 | 583 | GenericTaskQueueSet<T, F>::steal(uint queue_num, int* seed, E& t) { |
aoqi@0 | 584 | for (uint i = 0; i < 2 * _n; i++) { |
aoqi@0 | 585 | if (steal_best_of_2(queue_num, seed, t)) { |
aoqi@0 | 586 | TASKQUEUE_STATS_ONLY(queue(queue_num)->stats.record_steal(true)); |
fujie@133 | 587 | #ifdef MIPS64 |
fujie@133 | 588 | if (Use3A2000) OrderAccess::fence(); |
fujie@133 | 589 | #endif |
aoqi@0 | 590 | return true; |
aoqi@0 | 591 | } |
aoqi@0 | 592 | } |
aoqi@0 | 593 | TASKQUEUE_STATS_ONLY(queue(queue_num)->stats.record_steal(false)); |
fujie@133 | 594 | #ifdef MIPS64 |
fujie@133 | 595 | if (Use3A2000) OrderAccess::fence(); |
fujie@133 | 596 | #endif |
aoqi@0 | 597 | return false; |
aoqi@0 | 598 | } |
aoqi@0 | 599 | |
aoqi@0 | 600 | template<class T, MEMFLAGS F> bool |
aoqi@0 | 601 | GenericTaskQueueSet<T, F>::steal_best_of_2(uint queue_num, int* seed, E& t) { |
aoqi@0 | 602 | if (_n > 2) { |
aoqi@25 | 603 | uint k1 = queue_num; |
aoqi@25 | 604 | while (k1 == queue_num) k1 = TaskQueueSetSuper::randomParkAndMiller(seed) % _n; |
aoqi@25 | 605 | uint k2 = queue_num; |
aoqi@25 | 606 | while (k2 == queue_num || k2 == k1) k2 = TaskQueueSetSuper::randomParkAndMiller(seed) % _n; |
aoqi@25 | 607 | // Sample both and try the larger. |
aoqi@25 | 608 | uint sz1 = _queues[k1]->size(); |
aoqi@25 | 609 | uint sz2 = _queues[k2]->size(); |
aoqi@25 | 610 | if (sz2 > sz1) return _queues[k2]->pop_global(t); |
aoqi@25 | 611 | else return _queues[k1]->pop_global(t); |
aoqi@0 | 612 | } else if (_n == 2) { |
aoqi@0 | 613 | // Just try the other one. |
aoqi@0 | 614 | uint k = (queue_num + 1) % 2; |
aoqi@0 | 615 | return _queues[k]->pop_global(t); |
aoqi@0 | 616 | } else { |
aoqi@0 | 617 | assert(_n == 1, "can't be zero."); |
aoqi@0 | 618 | return false; |
aoqi@0 | 619 | } |
aoqi@0 | 620 | } |
aoqi@0 | 621 | |
aoqi@0 | 622 | template<class T, MEMFLAGS F> |
aoqi@0 | 623 | bool GenericTaskQueueSet<T, F>::peek() { |
aoqi@0 | 624 | // Try all the queues. |
aoqi@0 | 625 | for (uint j = 0; j < _n; j++) { |
aoqi@0 | 626 | if (_queues[j]->peek()) |
aoqi@0 | 627 | return true; |
aoqi@0 | 628 | } |
aoqi@0 | 629 | return false; |
aoqi@0 | 630 | } |
aoqi@0 | 631 | |
aoqi@0 | 632 | // When to terminate from the termination protocol. |
aoqi@0 | 633 | class TerminatorTerminator: public CHeapObj<mtInternal> { |
aoqi@0 | 634 | public: |
aoqi@0 | 635 | virtual bool should_exit_termination() = 0; |
aoqi@0 | 636 | }; |
aoqi@0 | 637 | |
aoqi@0 | 638 | // A class to aid in the termination of a set of parallel tasks using |
aoqi@0 | 639 | // TaskQueueSet's for work stealing. |
aoqi@0 | 640 | |
aoqi@0 | 641 | #undef TRACESPINNING |
aoqi@0 | 642 | |
aoqi@0 | 643 | class ParallelTaskTerminator: public StackObj { |
aoqi@0 | 644 | private: |
aoqi@0 | 645 | int _n_threads; |
aoqi@0 | 646 | TaskQueueSetSuper* _queue_set; |
aoqi@0 | 647 | int _offered_termination; |
aoqi@0 | 648 | |
aoqi@0 | 649 | #ifdef TRACESPINNING |
aoqi@0 | 650 | static uint _total_yields; |
aoqi@0 | 651 | static uint _total_spins; |
aoqi@0 | 652 | static uint _total_peeks; |
aoqi@0 | 653 | #endif |
aoqi@0 | 654 | |
aoqi@0 | 655 | bool peek_in_queue_set(); |
aoqi@0 | 656 | protected: |
aoqi@0 | 657 | virtual void yield(); |
aoqi@0 | 658 | void sleep(uint millis); |
aoqi@0 | 659 | |
aoqi@0 | 660 | public: |
aoqi@0 | 661 | |
aoqi@0 | 662 | // "n_threads" is the number of threads to be terminated. "queue_set" is a |
aoqi@0 | 663 | // queue sets of work queues of other threads. |
aoqi@0 | 664 | ParallelTaskTerminator(int n_threads, TaskQueueSetSuper* queue_set); |
aoqi@0 | 665 | |
aoqi@0 | 666 | // The current thread has no work, and is ready to terminate if everyone |
aoqi@0 | 667 | // else is. If returns "true", all threads are terminated. If returns |
aoqi@0 | 668 | // "false", available work has been observed in one of the task queues, |
aoqi@0 | 669 | // so the global task is not complete. |
aoqi@0 | 670 | bool offer_termination() { |
aoqi@0 | 671 | return offer_termination(NULL); |
aoqi@0 | 672 | } |
aoqi@0 | 673 | |
aoqi@0 | 674 | // As above, but it also terminates if the should_exit_termination() |
aoqi@0 | 675 | // method of the terminator parameter returns true. If terminator is |
aoqi@0 | 676 | // NULL, then it is ignored. |
aoqi@0 | 677 | bool offer_termination(TerminatorTerminator* terminator); |
aoqi@0 | 678 | |
aoqi@0 | 679 | // Reset the terminator, so that it may be reused again. |
aoqi@0 | 680 | // The caller is responsible for ensuring that this is done |
aoqi@0 | 681 | // in an MT-safe manner, once the previous round of use of |
aoqi@0 | 682 | // the terminator is finished. |
aoqi@0 | 683 | void reset_for_reuse(); |
aoqi@0 | 684 | // Same as above but the number of parallel threads is set to the |
aoqi@0 | 685 | // given number. |
aoqi@0 | 686 | void reset_for_reuse(int n_threads); |
aoqi@0 | 687 | |
aoqi@0 | 688 | #ifdef TRACESPINNING |
aoqi@0 | 689 | static uint total_yields() { return _total_yields; } |
aoqi@0 | 690 | static uint total_spins() { return _total_spins; } |
aoqi@0 | 691 | static uint total_peeks() { return _total_peeks; } |
aoqi@0 | 692 | static void print_termination_counts(); |
aoqi@0 | 693 | #endif |
aoqi@0 | 694 | }; |
aoqi@0 | 695 | |
aoqi@0 | 696 | template<class E, MEMFLAGS F, unsigned int N> inline bool |
aoqi@0 | 697 | GenericTaskQueue<E, F, N>::push(E t) { |
aoqi@0 | 698 | uint localBot = _bottom; |
aoqi@0 | 699 | assert(localBot < N, "_bottom out of range."); |
aoqi@0 | 700 | idx_t top = _age.top(); |
aoqi@0 | 701 | uint dirty_n_elems = dirty_size(localBot, top); |
aoqi@0 | 702 | assert(dirty_n_elems < N, "n_elems out of range."); |
aoqi@0 | 703 | if (dirty_n_elems < max_elems()) { |
aoqi@0 | 704 | // g++ complains if the volatile result of the assignment is |
aoqi@0 | 705 | // unused, so we cast the volatile away. We cannot cast directly |
aoqi@0 | 706 | // to void, because gcc treats that as not using the result of the |
aoqi@0 | 707 | // assignment. However, casting to E& means that we trigger an |
aoqi@0 | 708 | // unused-value warning. So, we cast the E& to void. |
aoqi@0 | 709 | (void) const_cast<E&>(_elems[localBot] = t); |
aoqi@0 | 710 | OrderAccess::release_store(&_bottom, increment_index(localBot)); |
aoqi@0 | 711 | TASKQUEUE_STATS_ONLY(stats.record_push()); |
fujie@133 | 712 | #ifdef MIPS64 |
fujie@133 | 713 | if (Use3A2000) OrderAccess::fence(); |
fujie@133 | 714 | #endif |
aoqi@0 | 715 | return true; |
aoqi@0 | 716 | } else { |
aoqi@0 | 717 | return push_slow(t, dirty_n_elems); |
aoqi@0 | 718 | } |
aoqi@0 | 719 | } |
aoqi@0 | 720 | |
aoqi@0 | 721 | template<class E, MEMFLAGS F, unsigned int N> inline bool |
aoqi@0 | 722 | GenericTaskQueue<E, F, N>::pop_local(volatile E& t) { |
aoqi@0 | 723 | uint localBot = _bottom; |
aoqi@0 | 724 | // This value cannot be N-1. That can only occur as a result of |
aoqi@0 | 725 | // the assignment to bottom in this method. If it does, this method |
aoqi@0 | 726 | // resets the size to 0 before the next call (which is sequential, |
aoqi@0 | 727 | // since this is pop_local.) |
aoqi@0 | 728 | uint dirty_n_elems = dirty_size(localBot, _age.top()); |
aoqi@0 | 729 | assert(dirty_n_elems != N - 1, "Shouldn't be possible..."); |
aoqi@0 | 730 | if (dirty_n_elems == 0) return false; |
aoqi@0 | 731 | localBot = decrement_index(localBot); |
aoqi@0 | 732 | _bottom = localBot; |
aoqi@0 | 733 | // This is necessary to prevent any read below from being reordered |
aoqi@0 | 734 | // before the store just above. |
aoqi@0 | 735 | OrderAccess::fence(); |
aoqi@0 | 736 | // g++ complains if the volatile result of the assignment is |
aoqi@0 | 737 | // unused, so we cast the volatile away. We cannot cast directly |
aoqi@0 | 738 | // to void, because gcc treats that as not using the result of the |
aoqi@0 | 739 | // assignment. However, casting to E& means that we trigger an |
aoqi@0 | 740 | // unused-value warning. So, we cast the E& to void. |
aoqi@0 | 741 | (void) const_cast<E&>(t = _elems[localBot]); |
aoqi@0 | 742 | // This is a second read of "age"; the "size()" above is the first. |
aoqi@0 | 743 | // If there's still at least one element in the queue, based on the |
aoqi@0 | 744 | // "_bottom" and "age" we've read, then there can be no interference with |
aoqi@0 | 745 | // a "pop_global" operation, and we're done. |
aoqi@0 | 746 | idx_t tp = _age.top(); // XXX |
aoqi@0 | 747 | if (size(localBot, tp) > 0) { |
aoqi@0 | 748 | assert(dirty_size(localBot, tp) != N - 1, "sanity"); |
aoqi@0 | 749 | TASKQUEUE_STATS_ONLY(stats.record_pop()); |
aoqi@0 | 750 | return true; |
aoqi@0 | 751 | } else { |
aoqi@0 | 752 | // Otherwise, the queue contained exactly one element; we take the slow |
aoqi@0 | 753 | // path. |
aoqi@0 | 754 | return pop_local_slow(localBot, _age.get()); |
aoqi@0 | 755 | } |
aoqi@0 | 756 | } |
aoqi@0 | 757 | |
aoqi@0 | 758 | typedef GenericTaskQueue<oop, mtGC> OopTaskQueue; |
aoqi@0 | 759 | typedef GenericTaskQueueSet<OopTaskQueue, mtGC> OopTaskQueueSet; |
aoqi@0 | 760 | |
aoqi@0 | 761 | #ifdef _MSC_VER |
aoqi@0 | 762 | #pragma warning(push) |
aoqi@0 | 763 | // warning C4522: multiple assignment operators specified |
aoqi@0 | 764 | #pragma warning(disable:4522) |
aoqi@0 | 765 | #endif |
aoqi@0 | 766 | |
aoqi@0 | 767 | // This is a container class for either an oop* or a narrowOop*. |
aoqi@0 | 768 | // Both are pushed onto a task queue and the consumer will test is_narrow() |
aoqi@0 | 769 | // to determine which should be processed. |
aoqi@0 | 770 | class StarTask { |
aoqi@0 | 771 | void* _holder; // either union oop* or narrowOop* |
aoqi@0 | 772 | |
aoqi@0 | 773 | enum { COMPRESSED_OOP_MASK = 1 }; |
aoqi@0 | 774 | |
aoqi@0 | 775 | public: |
aoqi@0 | 776 | StarTask(narrowOop* p) { |
aoqi@0 | 777 | assert(((uintptr_t)p & COMPRESSED_OOP_MASK) == 0, "Information loss!"); |
aoqi@0 | 778 | _holder = (void *)((uintptr_t)p | COMPRESSED_OOP_MASK); |
aoqi@0 | 779 | } |
aoqi@0 | 780 | StarTask(oop* p) { |
aoqi@0 | 781 | assert(((uintptr_t)p & COMPRESSED_OOP_MASK) == 0, "Information loss!"); |
aoqi@0 | 782 | _holder = (void*)p; |
aoqi@0 | 783 | } |
aoqi@0 | 784 | StarTask() { _holder = NULL; } |
aoqi@0 | 785 | operator oop*() { return (oop*)_holder; } |
aoqi@0 | 786 | operator narrowOop*() { |
aoqi@0 | 787 | return (narrowOop*)((uintptr_t)_holder & ~COMPRESSED_OOP_MASK); |
aoqi@0 | 788 | } |
aoqi@0 | 789 | |
aoqi@0 | 790 | StarTask& operator=(const StarTask& t) { |
aoqi@0 | 791 | _holder = t._holder; |
aoqi@0 | 792 | return *this; |
aoqi@0 | 793 | } |
aoqi@0 | 794 | volatile StarTask& operator=(const volatile StarTask& t) volatile { |
aoqi@0 | 795 | _holder = t._holder; |
aoqi@0 | 796 | return *this; |
aoqi@0 | 797 | } |
aoqi@0 | 798 | |
aoqi@0 | 799 | bool is_narrow() const { |
aoqi@0 | 800 | return (((uintptr_t)_holder & COMPRESSED_OOP_MASK) != 0); |
aoqi@0 | 801 | } |
aoqi@0 | 802 | }; |
aoqi@0 | 803 | |
aoqi@0 | 804 | class ObjArrayTask |
aoqi@0 | 805 | { |
aoqi@0 | 806 | public: |
aoqi@0 | 807 | ObjArrayTask(oop o = NULL, int idx = 0): _obj(o), _index(idx) { } |
aoqi@0 | 808 | ObjArrayTask(oop o, size_t idx): _obj(o), _index(int(idx)) { |
aoqi@0 | 809 | assert(idx <= size_t(max_jint), "too big"); |
aoqi@0 | 810 | } |
aoqi@0 | 811 | ObjArrayTask(const ObjArrayTask& t): _obj(t._obj), _index(t._index) { } |
aoqi@0 | 812 | |
aoqi@0 | 813 | ObjArrayTask& operator =(const ObjArrayTask& t) { |
aoqi@0 | 814 | _obj = t._obj; |
aoqi@0 | 815 | _index = t._index; |
aoqi@0 | 816 | return *this; |
aoqi@0 | 817 | } |
aoqi@0 | 818 | volatile ObjArrayTask& |
aoqi@0 | 819 | operator =(const volatile ObjArrayTask& t) volatile { |
aoqi@0 | 820 | (void)const_cast<oop&>(_obj = t._obj); |
aoqi@0 | 821 | _index = t._index; |
aoqi@0 | 822 | return *this; |
aoqi@0 | 823 | } |
aoqi@0 | 824 | |
aoqi@0 | 825 | inline oop obj() const { return _obj; } |
aoqi@0 | 826 | inline int index() const { return _index; } |
aoqi@0 | 827 | |
aoqi@0 | 828 | DEBUG_ONLY(bool is_valid() const); // Tasks to be pushed/popped must be valid. |
aoqi@0 | 829 | |
aoqi@0 | 830 | private: |
aoqi@0 | 831 | oop _obj; |
aoqi@0 | 832 | int _index; |
aoqi@0 | 833 | }; |
aoqi@0 | 834 | |
aoqi@0 | 835 | #ifdef _MSC_VER |
aoqi@0 | 836 | #pragma warning(pop) |
aoqi@0 | 837 | #endif |
aoqi@0 | 838 | |
aoqi@0 | 839 | typedef OverflowTaskQueue<StarTask, mtClass> OopStarTaskQueue; |
aoqi@0 | 840 | typedef GenericTaskQueueSet<OopStarTaskQueue, mtClass> OopStarTaskQueueSet; |
aoqi@0 | 841 | |
aoqi@0 | 842 | typedef OverflowTaskQueue<size_t, mtInternal> RegionTaskQueue; |
aoqi@0 | 843 | typedef GenericTaskQueueSet<RegionTaskQueue, mtClass> RegionTaskQueueSet; |
aoqi@0 | 844 | |
aoqi@0 | 845 | |
aoqi@0 | 846 | #endif // SHARE_VM_UTILITIES_TASKQUEUE_HPP |