Wed, 03 Mar 2010 14:48:26 -0800
4396719: Mark Sweep stack overflow on deeply nested Object arrays
Summary: Use an explicit stack for object arrays and process them in chunks.
Reviewed-by: iveresov, apetrusenko
1 /*
2 * Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
25 template <unsigned int N>
26 class TaskQueueSuper: public CHeapObj {
27 protected:
28 // Internal type for indexing the queue; also used for the tag.
29 typedef NOT_LP64(uint16_t) LP64_ONLY(uint32_t) idx_t;
31 // The first free element after the last one pushed (mod N).
32 volatile uint _bottom;
34 enum { MOD_N_MASK = N - 1 };
36 class Age {
37 public:
38 Age(size_t data = 0) { _data = data; }
39 Age(const Age& age) { _data = age._data; }
40 Age(idx_t top, idx_t tag) { _fields._top = top; _fields._tag = tag; }
42 Age get() const volatile { return _data; }
43 void set(Age age) volatile { _data = age._data; }
45 idx_t top() const volatile { return _fields._top; }
46 idx_t tag() const volatile { return _fields._tag; }
48 // Increment top; if it wraps, increment tag also.
49 void increment() {
50 _fields._top = increment_index(_fields._top);
51 if (_fields._top == 0) ++_fields._tag;
52 }
54 Age cmpxchg(const Age new_age, const Age old_age) volatile {
55 return (size_t) Atomic::cmpxchg_ptr((intptr_t)new_age._data,
56 (volatile intptr_t *)&_data,
57 (intptr_t)old_age._data);
58 }
60 bool operator ==(const Age& other) const { return _data == other._data; }
62 private:
63 struct fields {
64 idx_t _top;
65 idx_t _tag;
66 };
67 union {
68 size_t _data;
69 fields _fields;
70 };
71 };
73 volatile Age _age;
75 // These both operate mod N.
76 static uint increment_index(uint ind) {
77 return (ind + 1) & MOD_N_MASK;
78 }
79 static uint decrement_index(uint ind) {
80 return (ind - 1) & MOD_N_MASK;
81 }
83 // Returns a number in the range [0..N). If the result is "N-1", it should be
84 // interpreted as 0.
85 uint dirty_size(uint bot, uint top) const {
86 return (bot - top) & MOD_N_MASK;
87 }
89 // Returns the size corresponding to the given "bot" and "top".
90 uint size(uint bot, uint top) const {
91 uint sz = dirty_size(bot, top);
92 // Has the queue "wrapped", so that bottom is less than top? There's a
93 // complicated special case here. A pair of threads could perform pop_local
94 // and pop_global operations concurrently, starting from a state in which
95 // _bottom == _top+1. The pop_local could succeed in decrementing _bottom,
96 // and the pop_global in incrementing _top (in which case the pop_global
97 // will be awarded the contested queue element.) The resulting state must
98 // be interpreted as an empty queue. (We only need to worry about one such
99 // event: only the queue owner performs pop_local's, and several concurrent
100 // threads attempting to perform the pop_global will all perform the same
101 // CAS, and only one can succeed.) Any stealing thread that reads after
102 // either the increment or decrement will see an empty queue, and will not
103 // join the competitors. The "sz == -1 || sz == N-1" state will not be
104 // modified by concurrent queues, so the owner thread can reset the state to
105 // _bottom == top so subsequent pushes will be performed normally.
106 return (sz == N - 1) ? 0 : sz;
107 }
109 public:
110 TaskQueueSuper() : _bottom(0), _age() {}
112 // Return true if the TaskQueue contains any tasks.
113 bool peek() { return _bottom != _age.top(); }
115 // Return an estimate of the number of elements in the queue.
116 // The "careful" version admits the possibility of pop_local/pop_global
117 // races.
118 uint size() const {
119 return size(_bottom, _age.top());
120 }
122 uint dirty_size() const {
123 return dirty_size(_bottom, _age.top());
124 }
126 void set_empty() {
127 _bottom = 0;
128 _age.set(0);
129 }
131 // Maximum number of elements allowed in the queue. This is two less
132 // than the actual queue size, for somewhat complicated reasons.
133 uint max_elems() const { return N - 2; }
135 // Total size of queue.
136 static const uint total_size() { return N; }
137 };
139 template<class E, unsigned int N = TASKQUEUE_SIZE>
140 class GenericTaskQueue: public TaskQueueSuper<N> {
141 protected:
142 typedef typename TaskQueueSuper<N>::Age Age;
143 typedef typename TaskQueueSuper<N>::idx_t idx_t;
145 using TaskQueueSuper<N>::_bottom;
146 using TaskQueueSuper<N>::_age;
147 using TaskQueueSuper<N>::increment_index;
148 using TaskQueueSuper<N>::decrement_index;
149 using TaskQueueSuper<N>::dirty_size;
151 public:
152 using TaskQueueSuper<N>::max_elems;
153 using TaskQueueSuper<N>::size;
155 private:
156 // Slow paths for push, pop_local. (pop_global has no fast path.)
157 bool push_slow(E t, uint dirty_n_elems);
158 bool pop_local_slow(uint localBot, Age oldAge);
160 public:
161 typedef E element_type;
163 // Initializes the queue to empty.
164 GenericTaskQueue();
166 void initialize();
168 // Push the task "t" on the queue. Returns "false" iff the queue is
169 // full.
170 inline bool push(E t);
172 // If succeeds in claiming a task (from the 'local' end, that is, the
173 // most recently pushed task), returns "true" and sets "t" to that task.
174 // Otherwise, the queue is empty and returns false.
175 inline bool pop_local(E& t);
177 // If succeeds in claiming a task (from the 'global' end, that is, the
178 // least recently pushed task), returns "true" and sets "t" to that task.
179 // Otherwise, the queue is empty and returns false.
180 bool pop_global(E& t);
182 // Delete any resource associated with the queue.
183 ~GenericTaskQueue();
185 // apply the closure to all elements in the task queue
186 void oops_do(OopClosure* f);
188 private:
189 // Element array.
190 volatile E* _elems;
191 };
193 template<class E, unsigned int N>
194 GenericTaskQueue<E, N>::GenericTaskQueue() {
195 assert(sizeof(Age) == sizeof(size_t), "Depends on this.");
196 }
198 template<class E, unsigned int N>
199 void GenericTaskQueue<E, N>::initialize() {
200 _elems = NEW_C_HEAP_ARRAY(E, N);
201 guarantee(_elems != NULL, "Allocation failed.");
202 }
204 template<class E, unsigned int N>
205 void GenericTaskQueue<E, N>::oops_do(OopClosure* f) {
206 // tty->print_cr("START OopTaskQueue::oops_do");
207 uint iters = size();
208 uint index = _bottom;
209 for (uint i = 0; i < iters; ++i) {
210 index = decrement_index(index);
211 // tty->print_cr(" doing entry %d," INTPTR_T " -> " INTPTR_T,
212 // index, &_elems[index], _elems[index]);
213 E* t = (E*)&_elems[index]; // cast away volatility
214 oop* p = (oop*)t;
215 assert((*t)->is_oop_or_null(), "Not an oop or null");
216 f->do_oop(p);
217 }
218 // tty->print_cr("END OopTaskQueue::oops_do");
219 }
221 template<class E, unsigned int N>
222 bool GenericTaskQueue<E, N>::push_slow(E t, uint dirty_n_elems) {
223 if (dirty_n_elems == N - 1) {
224 // Actually means 0, so do the push.
225 uint localBot = _bottom;
226 // g++ complains if the volatile result of the assignment is unused.
227 const_cast<E&>(_elems[localBot] = t);
228 OrderAccess::release_store(&_bottom, increment_index(localBot));
229 return true;
230 }
231 return false;
232 }
234 template<class E, unsigned int N>
235 bool GenericTaskQueue<E, N>::
236 pop_local_slow(uint localBot, Age oldAge) {
237 // This queue was observed to contain exactly one element; either this
238 // thread will claim it, or a competing "pop_global". In either case,
239 // the queue will be logically empty afterwards. Create a new Age value
240 // that represents the empty queue for the given value of "_bottom". (We
241 // must also increment "tag" because of the case where "bottom == 1",
242 // "top == 0". A pop_global could read the queue element in that case,
243 // then have the owner thread do a pop followed by another push. Without
244 // the incrementing of "tag", the pop_global's CAS could succeed,
245 // allowing it to believe it has claimed the stale element.)
246 Age newAge((idx_t)localBot, oldAge.tag() + 1);
247 // Perhaps a competing pop_global has already incremented "top", in which
248 // case it wins the element.
249 if (localBot == oldAge.top()) {
250 // No competing pop_global has yet incremented "top"; we'll try to
251 // install new_age, thus claiming the element.
252 Age tempAge = _age.cmpxchg(newAge, oldAge);
253 if (tempAge == oldAge) {
254 // We win.
255 assert(dirty_size(localBot, _age.top()) != N - 1, "sanity");
256 return true;
257 }
258 }
259 // We lose; a completing pop_global gets the element. But the queue is empty
260 // and top is greater than bottom. Fix this representation of the empty queue
261 // to become the canonical one.
262 _age.set(newAge);
263 assert(dirty_size(localBot, _age.top()) != N - 1, "sanity");
264 return false;
265 }
267 template<class E, unsigned int N>
268 bool GenericTaskQueue<E, N>::pop_global(E& t) {
269 Age oldAge = _age.get();
270 uint localBot = _bottom;
271 uint n_elems = size(localBot, oldAge.top());
272 if (n_elems == 0) {
273 return false;
274 }
276 const_cast<E&>(t = _elems[oldAge.top()]);
277 Age newAge(oldAge);
278 newAge.increment();
279 Age resAge = _age.cmpxchg(newAge, oldAge);
281 // Note that using "_bottom" here might fail, since a pop_local might
282 // have decremented it.
283 assert(dirty_size(localBot, newAge.top()) != N - 1, "sanity");
284 return resAge == oldAge;
285 }
287 template<class E, unsigned int N>
288 GenericTaskQueue<E, N>::~GenericTaskQueue() {
289 FREE_C_HEAP_ARRAY(E, _elems);
290 }
292 // Inherits the typedef of "Task" from above.
293 class TaskQueueSetSuper: public CHeapObj {
294 protected:
295 static int randomParkAndMiller(int* seed0);
296 public:
297 // Returns "true" if some TaskQueue in the set contains a task.
298 virtual bool peek() = 0;
299 };
301 template<class T>
302 class GenericTaskQueueSet: public TaskQueueSetSuper {
303 private:
304 uint _n;
305 T** _queues;
307 public:
308 typedef typename T::element_type E;
310 GenericTaskQueueSet(int n) : _n(n) {
311 typedef T* GenericTaskQueuePtr;
312 _queues = NEW_C_HEAP_ARRAY(GenericTaskQueuePtr, n);
313 for (int i = 0; i < n; i++) {
314 _queues[i] = NULL;
315 }
316 }
318 bool steal_1_random(uint queue_num, int* seed, E& t);
319 bool steal_best_of_2(uint queue_num, int* seed, E& t);
320 bool steal_best_of_all(uint queue_num, int* seed, E& t);
322 void register_queue(uint i, T* q);
324 T* queue(uint n);
326 // The thread with queue number "queue_num" (and whose random number seed
327 // is at "seed") is trying to steal a task from some other queue. (It
328 // may try several queues, according to some configuration parameter.)
329 // If some steal succeeds, returns "true" and sets "t" the stolen task,
330 // otherwise returns false.
331 bool steal(uint queue_num, int* seed, E& t);
333 bool peek();
334 };
336 template<class T> void
337 GenericTaskQueueSet<T>::register_queue(uint i, T* q) {
338 assert(i < _n, "index out of range.");
339 _queues[i] = q;
340 }
342 template<class T> T*
343 GenericTaskQueueSet<T>::queue(uint i) {
344 return _queues[i];
345 }
347 template<class T> bool
348 GenericTaskQueueSet<T>::steal(uint queue_num, int* seed, E& t) {
349 for (uint i = 0; i < 2 * _n; i++)
350 if (steal_best_of_2(queue_num, seed, t))
351 return true;
352 return false;
353 }
355 template<class T> bool
356 GenericTaskQueueSet<T>::steal_best_of_all(uint queue_num, int* seed, E& t) {
357 if (_n > 2) {
358 int best_k;
359 uint best_sz = 0;
360 for (uint k = 0; k < _n; k++) {
361 if (k == queue_num) continue;
362 uint sz = _queues[k]->size();
363 if (sz > best_sz) {
364 best_sz = sz;
365 best_k = k;
366 }
367 }
368 return best_sz > 0 && _queues[best_k]->pop_global(t);
369 } else if (_n == 2) {
370 // Just try the other one.
371 int k = (queue_num + 1) % 2;
372 return _queues[k]->pop_global(t);
373 } else {
374 assert(_n == 1, "can't be zero.");
375 return false;
376 }
377 }
379 template<class T> bool
380 GenericTaskQueueSet<T>::steal_1_random(uint queue_num, int* seed, E& t) {
381 if (_n > 2) {
382 uint k = queue_num;
383 while (k == queue_num) k = randomParkAndMiller(seed) % _n;
384 return _queues[2]->pop_global(t);
385 } else if (_n == 2) {
386 // Just try the other one.
387 int k = (queue_num + 1) % 2;
388 return _queues[k]->pop_global(t);
389 } else {
390 assert(_n == 1, "can't be zero.");
391 return false;
392 }
393 }
395 template<class T> bool
396 GenericTaskQueueSet<T>::steal_best_of_2(uint queue_num, int* seed, E& t) {
397 if (_n > 2) {
398 uint k1 = queue_num;
399 while (k1 == queue_num) k1 = randomParkAndMiller(seed) % _n;
400 uint k2 = queue_num;
401 while (k2 == queue_num || k2 == k1) k2 = randomParkAndMiller(seed) % _n;
402 // Sample both and try the larger.
403 uint sz1 = _queues[k1]->size();
404 uint sz2 = _queues[k2]->size();
405 if (sz2 > sz1) return _queues[k2]->pop_global(t);
406 else return _queues[k1]->pop_global(t);
407 } else if (_n == 2) {
408 // Just try the other one.
409 uint k = (queue_num + 1) % 2;
410 return _queues[k]->pop_global(t);
411 } else {
412 assert(_n == 1, "can't be zero.");
413 return false;
414 }
415 }
417 template<class T>
418 bool GenericTaskQueueSet<T>::peek() {
419 // Try all the queues.
420 for (uint j = 0; j < _n; j++) {
421 if (_queues[j]->peek())
422 return true;
423 }
424 return false;
425 }
427 // When to terminate from the termination protocol.
428 class TerminatorTerminator: public CHeapObj {
429 public:
430 virtual bool should_exit_termination() = 0;
431 };
433 // A class to aid in the termination of a set of parallel tasks using
434 // TaskQueueSet's for work stealing.
436 #undef TRACESPINNING
438 class ParallelTaskTerminator: public StackObj {
439 private:
440 int _n_threads;
441 TaskQueueSetSuper* _queue_set;
442 int _offered_termination;
444 #ifdef TRACESPINNING
445 static uint _total_yields;
446 static uint _total_spins;
447 static uint _total_peeks;
448 #endif
450 bool peek_in_queue_set();
451 protected:
452 virtual void yield();
453 void sleep(uint millis);
455 public:
457 // "n_threads" is the number of threads to be terminated. "queue_set" is a
458 // queue sets of work queues of other threads.
459 ParallelTaskTerminator(int n_threads, TaskQueueSetSuper* queue_set);
461 // The current thread has no work, and is ready to terminate if everyone
462 // else is. If returns "true", all threads are terminated. If returns
463 // "false", available work has been observed in one of the task queues,
464 // so the global task is not complete.
465 bool offer_termination() {
466 return offer_termination(NULL);
467 }
469 // As above, but it also terminates if the should_exit_termination()
470 // method of the terminator parameter returns true. If terminator is
471 // NULL, then it is ignored.
472 bool offer_termination(TerminatorTerminator* terminator);
474 // Reset the terminator, so that it may be reused again.
475 // The caller is responsible for ensuring that this is done
476 // in an MT-safe manner, once the previous round of use of
477 // the terminator is finished.
478 void reset_for_reuse();
480 #ifdef TRACESPINNING
481 static uint total_yields() { return _total_yields; }
482 static uint total_spins() { return _total_spins; }
483 static uint total_peeks() { return _total_peeks; }
484 static void print_termination_counts();
485 #endif
486 };
488 template<class E, unsigned int N> inline bool
489 GenericTaskQueue<E, N>::push(E t) {
490 uint localBot = _bottom;
491 assert((localBot >= 0) && (localBot < N), "_bottom out of range.");
492 idx_t top = _age.top();
493 uint dirty_n_elems = dirty_size(localBot, top);
494 assert(dirty_n_elems < N, "n_elems out of range.");
495 if (dirty_n_elems < max_elems()) {
496 // g++ complains if the volatile result of the assignment is unused.
497 const_cast<E&>(_elems[localBot] = t);
498 OrderAccess::release_store(&_bottom, increment_index(localBot));
499 return true;
500 } else {
501 return push_slow(t, dirty_n_elems);
502 }
503 }
505 template<class E, unsigned int N> inline bool
506 GenericTaskQueue<E, N>::pop_local(E& t) {
507 uint localBot = _bottom;
508 // This value cannot be N-1. That can only occur as a result of
509 // the assignment to bottom in this method. If it does, this method
510 // resets the size( to 0 before the next call (which is sequential,
511 // since this is pop_local.)
512 uint dirty_n_elems = dirty_size(localBot, _age.top());
513 assert(dirty_n_elems != N - 1, "Shouldn't be possible...");
514 if (dirty_n_elems == 0) return false;
515 localBot = decrement_index(localBot);
516 _bottom = localBot;
517 // This is necessary to prevent any read below from being reordered
518 // before the store just above.
519 OrderAccess::fence();
520 const_cast<E&>(t = _elems[localBot]);
521 // This is a second read of "age"; the "size()" above is the first.
522 // If there's still at least one element in the queue, based on the
523 // "_bottom" and "age" we've read, then there can be no interference with
524 // a "pop_global" operation, and we're done.
525 idx_t tp = _age.top(); // XXX
526 if (size(localBot, tp) > 0) {
527 assert(dirty_size(localBot, tp) != N - 1, "sanity");
528 return true;
529 } else {
530 // Otherwise, the queue contained exactly one element; we take the slow
531 // path.
532 return pop_local_slow(localBot, _age.get());
533 }
534 }
536 typedef oop Task;
537 typedef GenericTaskQueue<Task> OopTaskQueue;
538 typedef GenericTaskQueueSet<OopTaskQueue> OopTaskQueueSet;
540 #ifdef _MSC_VER
541 #pragma warning(push)
542 // warning C4522: multiple assignment operators specified
543 #pragma warning(disable:4522)
544 #endif
546 // This is a container class for either an oop* or a narrowOop*.
547 // Both are pushed onto a task queue and the consumer will test is_narrow()
548 // to determine which should be processed.
549 class StarTask {
550 void* _holder; // either union oop* or narrowOop*
552 enum { COMPRESSED_OOP_MASK = 1 };
554 public:
555 StarTask(narrowOop* p) {
556 assert(((uintptr_t)p & COMPRESSED_OOP_MASK) == 0, "Information loss!");
557 _holder = (void *)((uintptr_t)p | COMPRESSED_OOP_MASK);
558 }
559 StarTask(oop* p) {
560 assert(((uintptr_t)p & COMPRESSED_OOP_MASK) == 0, "Information loss!");
561 _holder = (void*)p;
562 }
563 StarTask() { _holder = NULL; }
564 operator oop*() { return (oop*)_holder; }
565 operator narrowOop*() {
566 return (narrowOop*)((uintptr_t)_holder & ~COMPRESSED_OOP_MASK);
567 }
569 StarTask& operator=(const StarTask& t) {
570 _holder = t._holder;
571 return *this;
572 }
573 volatile StarTask& operator=(const volatile StarTask& t) volatile {
574 _holder = t._holder;
575 return *this;
576 }
578 bool is_narrow() const {
579 return (((uintptr_t)_holder & COMPRESSED_OOP_MASK) != 0);
580 }
581 };
583 class ObjArrayTask
584 {
585 public:
586 ObjArrayTask(oop o = NULL, int idx = 0): _obj(o), _index(idx) { }
587 ObjArrayTask(oop o, size_t idx): _obj(o), _index(int(idx)) {
588 assert(idx <= size_t(max_jint), "too big");
589 }
590 ObjArrayTask(const ObjArrayTask& t): _obj(t._obj), _index(t._index) { }
592 ObjArrayTask& operator =(const ObjArrayTask& t) {
593 _obj = t._obj;
594 _index = t._index;
595 return *this;
596 }
597 volatile ObjArrayTask&
598 operator =(const volatile ObjArrayTask& t) volatile {
599 _obj = t._obj;
600 _index = t._index;
601 return *this;
602 }
604 inline oop obj() const { return _obj; }
605 inline int index() const { return _index; }
607 DEBUG_ONLY(bool is_valid() const); // Tasks to be pushed/popped must be valid.
609 private:
610 oop _obj;
611 int _index;
612 };
614 #ifdef _MSC_VER
615 #pragma warning(pop)
616 #endif
618 typedef GenericTaskQueue<StarTask> OopStarTaskQueue;
619 typedef GenericTaskQueueSet<OopStarTaskQueue> OopStarTaskQueueSet;
621 typedef size_t RegionTask; // index for region
622 typedef GenericTaskQueue<RegionTask> RegionTaskQueue;
623 typedef GenericTaskQueueSet<RegionTaskQueue> RegionTaskQueueSet;
625 class RegionTaskQueueWithOverflow: public CHeapObj {
626 protected:
627 RegionTaskQueue _region_queue;
628 GrowableArray<RegionTask>* _overflow_stack;
630 public:
631 RegionTaskQueueWithOverflow() : _overflow_stack(NULL) {}
632 // Initialize both stealable queue and overflow
633 void initialize();
634 // Save first to stealable queue and then to overflow
635 void save(RegionTask t);
636 // Retrieve first from overflow and then from stealable queue
637 bool retrieve(RegionTask& region_index);
638 // Retrieve from stealable queue
639 bool retrieve_from_stealable_queue(RegionTask& region_index);
640 // Retrieve from overflow
641 bool retrieve_from_overflow(RegionTask& region_index);
642 bool is_empty();
643 bool stealable_is_empty();
644 bool overflow_is_empty();
645 uint stealable_size() { return _region_queue.size(); }
646 RegionTaskQueue* task_queue() { return &_region_queue; }
647 };
649 #define USE_RegionTaskQueueWithOverflow