Tue, 14 Jul 2009 15:40:39 -0700
6700789: G1: Enable use of compressed oops with G1 heaps
Summary: Modifications to G1 so as to allow the use of compressed oops.
Reviewed-by: apetrusenko, coleenp, jmasa, kvn, never, phh, tonyp
1 /*
2 * Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
25 #ifdef LP64
26 typedef juint TAG_TYPE;
27 // for a taskqueue size of 4M
28 #define LOG_TASKQ_SIZE 22
29 #else
30 typedef jushort TAG_TYPE;
31 // for a taskqueue size of 16K
32 #define LOG_TASKQ_SIZE 14
33 #endif
35 class TaskQueueSuper: public CHeapObj {
36 protected:
37 // The first free element after the last one pushed (mod _n).
38 volatile uint _bottom;
40 // log2 of the size of the queue.
41 enum SomeProtectedConstants {
42 Log_n = LOG_TASKQ_SIZE
43 };
44 #undef LOG_TASKQ_SIZE
46 // Size of the queue.
47 uint n() { return (1 << Log_n); }
48 // For computing "x mod n" efficiently.
49 uint n_mod_mask() { return n() - 1; }
51 struct Age {
52 TAG_TYPE _top;
53 TAG_TYPE _tag;
55 TAG_TYPE tag() const { return _tag; }
56 TAG_TYPE top() const { return _top; }
58 Age() { _tag = 0; _top = 0; }
60 friend bool operator ==(const Age& a1, const Age& a2) {
61 return a1.tag() == a2.tag() && a1.top() == a2.top();
62 }
63 };
64 Age _age;
65 // These make sure we do single atomic reads and writes.
66 Age get_age() {
67 uint res = *(volatile uint*)(&_age);
68 return *(Age*)(&res);
69 }
70 void set_age(Age a) {
71 *(volatile uint*)(&_age) = *(uint*)(&a);
72 }
74 TAG_TYPE get_top() {
75 return get_age().top();
76 }
78 // These both operate mod _n.
79 uint increment_index(uint ind) {
80 return (ind + 1) & n_mod_mask();
81 }
82 uint decrement_index(uint ind) {
83 return (ind - 1) & n_mod_mask();
84 }
86 // Returns a number in the range [0.._n). If the result is "n-1", it
87 // should be interpreted as 0.
88 uint dirty_size(uint bot, uint top) {
89 return ((int)bot - (int)top) & n_mod_mask();
90 }
92 // Returns the size corresponding to the given "bot" and "top".
93 uint size(uint bot, uint top) {
94 uint sz = dirty_size(bot, top);
95 // Has the queue "wrapped", so that bottom is less than top?
96 // There's a complicated special case here. A pair of threads could
97 // perform pop_local and pop_global operations concurrently, starting
98 // from a state in which _bottom == _top+1. The pop_local could
99 // succeed in decrementing _bottom, and the pop_global in incrementing
100 // _top (in which case the pop_global will be awarded the contested
101 // queue element.) The resulting state must be interpreted as an empty
102 // queue. (We only need to worry about one such event: only the queue
103 // owner performs pop_local's, and several concurrent threads
104 // attempting to perform the pop_global will all perform the same CAS,
105 // and only one can succeed. Any stealing thread that reads after
106 // either the increment or decrement will see an empty queue, and will
107 // not join the competitors. The "sz == -1 || sz == _n-1" state will
108 // not be modified by concurrent queues, so the owner thread can reset
109 // the state to _bottom == top so subsequent pushes will be performed
110 // normally.
111 if (sz == (n()-1)) return 0;
112 else return sz;
113 }
115 public:
116 TaskQueueSuper() : _bottom(0), _age() {}
118 // Return "true" if the TaskQueue contains any tasks.
119 bool peek();
121 // Return an estimate of the number of elements in the queue.
122 // The "careful" version admits the possibility of pop_local/pop_global
123 // races.
124 uint size() {
125 return size(_bottom, get_top());
126 }
128 uint dirty_size() {
129 return dirty_size(_bottom, get_top());
130 }
132 void set_empty() {
133 _bottom = 0;
134 _age = Age();
135 }
137 // Maximum number of elements allowed in the queue. This is two less
138 // than the actual queue size, for somewhat complicated reasons.
139 uint max_elems() { return n() - 2; }
141 };
143 template<class E> class GenericTaskQueue: public TaskQueueSuper {
144 private:
145 // Slow paths for push, pop_local. (pop_global has no fast path.)
146 bool push_slow(E t, uint dirty_n_elems);
147 bool pop_local_slow(uint localBot, Age oldAge);
149 public:
150 // Initializes the queue to empty.
151 GenericTaskQueue();
153 void initialize();
155 // Push the task "t" on the queue. Returns "false" iff the queue is
156 // full.
157 inline bool push(E t);
159 // If succeeds in claiming a task (from the 'local' end, that is, the
160 // most recently pushed task), returns "true" and sets "t" to that task.
161 // Otherwise, the queue is empty and returns false.
162 inline bool pop_local(E& t);
164 // If succeeds in claiming a task (from the 'global' end, that is, the
165 // least recently pushed task), returns "true" and sets "t" to that task.
166 // Otherwise, the queue is empty and returns false.
167 bool pop_global(E& t);
169 // Delete any resource associated with the queue.
170 ~GenericTaskQueue();
172 // apply the closure to all elements in the task queue
173 void oops_do(OopClosure* f);
175 private:
176 // Element array.
177 volatile E* _elems;
178 };
180 template<class E>
181 GenericTaskQueue<E>::GenericTaskQueue():TaskQueueSuper() {
182 assert(sizeof(Age) == sizeof(int), "Depends on this.");
183 }
185 template<class E>
186 void GenericTaskQueue<E>::initialize() {
187 _elems = NEW_C_HEAP_ARRAY(E, n());
188 guarantee(_elems != NULL, "Allocation failed.");
189 }
191 template<class E>
192 void GenericTaskQueue<E>::oops_do(OopClosure* f) {
193 // tty->print_cr("START OopTaskQueue::oops_do");
194 uint iters = size();
195 uint index = _bottom;
196 for (uint i = 0; i < iters; ++i) {
197 index = decrement_index(index);
198 // tty->print_cr(" doing entry %d," INTPTR_T " -> " INTPTR_T,
199 // index, &_elems[index], _elems[index]);
200 E* t = (E*)&_elems[index]; // cast away volatility
201 oop* p = (oop*)t;
202 assert((*t)->is_oop_or_null(), "Not an oop or null");
203 f->do_oop(p);
204 }
205 // tty->print_cr("END OopTaskQueue::oops_do");
206 }
209 template<class E>
210 bool GenericTaskQueue<E>::push_slow(E t, uint dirty_n_elems) {
211 if (dirty_n_elems == n() - 1) {
212 // Actually means 0, so do the push.
213 uint localBot = _bottom;
214 _elems[localBot] = t;
215 _bottom = increment_index(localBot);
216 return true;
217 } else
218 return false;
219 }
221 template<class E>
222 bool GenericTaskQueue<E>::
223 pop_local_slow(uint localBot, Age oldAge) {
224 // This queue was observed to contain exactly one element; either this
225 // thread will claim it, or a competing "pop_global". In either case,
226 // the queue will be logically empty afterwards. Create a new Age value
227 // that represents the empty queue for the given value of "_bottom". (We
228 // must also increment "tag" because of the case where "bottom == 1",
229 // "top == 0". A pop_global could read the queue element in that case,
230 // then have the owner thread do a pop followed by another push. Without
231 // the incrementing of "tag", the pop_global's CAS could succeed,
232 // allowing it to believe it has claimed the stale element.)
233 Age newAge;
234 newAge._top = localBot;
235 newAge._tag = oldAge.tag() + 1;
236 // Perhaps a competing pop_global has already incremented "top", in which
237 // case it wins the element.
238 if (localBot == oldAge.top()) {
239 Age tempAge;
240 // No competing pop_global has yet incremented "top"; we'll try to
241 // install new_age, thus claiming the element.
242 assert(sizeof(Age) == sizeof(int), "Assumption about CAS unit.");
243 *(uint*)&tempAge = Atomic::cmpxchg(*(uint*)&newAge, (volatile uint*)&_age, *(uint*)&oldAge);
244 if (tempAge == oldAge) {
245 // We win.
246 assert(dirty_size(localBot, get_top()) != n() - 1,
247 "Shouldn't be possible...");
248 return true;
249 }
250 }
251 // We fail; a completing pop_global gets the element. But the queue is
252 // empty (and top is greater than bottom.) Fix this representation of
253 // the empty queue to become the canonical one.
254 set_age(newAge);
255 assert(dirty_size(localBot, get_top()) != n() - 1,
256 "Shouldn't be possible...");
257 return false;
258 }
260 template<class E>
261 bool GenericTaskQueue<E>::pop_global(E& t) {
262 Age newAge;
263 Age oldAge = get_age();
264 uint localBot = _bottom;
265 uint n_elems = size(localBot, oldAge.top());
266 if (n_elems == 0) {
267 return false;
268 }
269 t = _elems[oldAge.top()];
270 newAge = oldAge;
271 newAge._top = increment_index(newAge.top());
272 if ( newAge._top == 0 ) newAge._tag++;
273 Age resAge;
274 *(uint*)&resAge = Atomic::cmpxchg(*(uint*)&newAge, (volatile uint*)&_age, *(uint*)&oldAge);
275 // Note that using "_bottom" here might fail, since a pop_local might
276 // have decremented it.
277 assert(dirty_size(localBot, newAge._top) != n() - 1,
278 "Shouldn't be possible...");
279 return (resAge == oldAge);
280 }
282 template<class E>
283 GenericTaskQueue<E>::~GenericTaskQueue() {
284 FREE_C_HEAP_ARRAY(E, _elems);
285 }
287 // Inherits the typedef of "Task" from above.
288 class TaskQueueSetSuper: public CHeapObj {
289 protected:
290 static int randomParkAndMiller(int* seed0);
291 public:
292 // Returns "true" if some TaskQueue in the set contains a task.
293 virtual bool peek() = 0;
294 };
296 template<class E> class GenericTaskQueueSet: public TaskQueueSetSuper {
297 private:
298 uint _n;
299 GenericTaskQueue<E>** _queues;
301 public:
302 GenericTaskQueueSet(int n) : _n(n) {
303 typedef GenericTaskQueue<E>* GenericTaskQueuePtr;
304 _queues = NEW_C_HEAP_ARRAY(GenericTaskQueuePtr, n);
305 guarantee(_queues != NULL, "Allocation failure.");
306 for (int i = 0; i < n; i++) {
307 _queues[i] = NULL;
308 }
309 }
311 bool steal_1_random(uint queue_num, int* seed, E& t);
312 bool steal_best_of_2(uint queue_num, int* seed, E& t);
313 bool steal_best_of_all(uint queue_num, int* seed, E& t);
315 void register_queue(uint i, GenericTaskQueue<E>* q);
317 GenericTaskQueue<E>* queue(uint n);
319 // The thread with queue number "queue_num" (and whose random number seed
320 // is at "seed") is trying to steal a task from some other queue. (It
321 // may try several queues, according to some configuration parameter.)
322 // If some steal succeeds, returns "true" and sets "t" the stolen task,
323 // otherwise returns false.
324 bool steal(uint queue_num, int* seed, E& t);
326 bool peek();
327 };
329 template<class E>
330 void GenericTaskQueueSet<E>::register_queue(uint i, GenericTaskQueue<E>* q) {
331 assert(i < _n, "index out of range.");
332 _queues[i] = q;
333 }
335 template<class E>
336 GenericTaskQueue<E>* GenericTaskQueueSet<E>::queue(uint i) {
337 return _queues[i];
338 }
340 template<class E>
341 bool GenericTaskQueueSet<E>::steal(uint queue_num, int* seed, E& t) {
342 for (uint i = 0; i < 2 * _n; i++)
343 if (steal_best_of_2(queue_num, seed, t))
344 return true;
345 return false;
346 }
348 template<class E>
349 bool GenericTaskQueueSet<E>::steal_best_of_all(uint queue_num, int* seed, E& t) {
350 if (_n > 2) {
351 int best_k;
352 uint best_sz = 0;
353 for (uint k = 0; k < _n; k++) {
354 if (k == queue_num) continue;
355 uint sz = _queues[k]->size();
356 if (sz > best_sz) {
357 best_sz = sz;
358 best_k = k;
359 }
360 }
361 return best_sz > 0 && _queues[best_k]->pop_global(t);
362 } else if (_n == 2) {
363 // Just try the other one.
364 int k = (queue_num + 1) % 2;
365 return _queues[k]->pop_global(t);
366 } else {
367 assert(_n == 1, "can't be zero.");
368 return false;
369 }
370 }
372 template<class E>
373 bool GenericTaskQueueSet<E>::steal_1_random(uint queue_num, int* seed, E& t) {
374 if (_n > 2) {
375 uint k = queue_num;
376 while (k == queue_num) k = randomParkAndMiller(seed) % _n;
377 return _queues[2]->pop_global(t);
378 } else if (_n == 2) {
379 // Just try the other one.
380 int k = (queue_num + 1) % 2;
381 return _queues[k]->pop_global(t);
382 } else {
383 assert(_n == 1, "can't be zero.");
384 return false;
385 }
386 }
388 template<class E>
389 bool GenericTaskQueueSet<E>::steal_best_of_2(uint queue_num, int* seed, E& t) {
390 if (_n > 2) {
391 uint k1 = queue_num;
392 while (k1 == queue_num) k1 = randomParkAndMiller(seed) % _n;
393 uint k2 = queue_num;
394 while (k2 == queue_num || k2 == k1) k2 = randomParkAndMiller(seed) % _n;
395 // Sample both and try the larger.
396 uint sz1 = _queues[k1]->size();
397 uint sz2 = _queues[k2]->size();
398 if (sz2 > sz1) return _queues[k2]->pop_global(t);
399 else return _queues[k1]->pop_global(t);
400 } else if (_n == 2) {
401 // Just try the other one.
402 uint k = (queue_num + 1) % 2;
403 return _queues[k]->pop_global(t);
404 } else {
405 assert(_n == 1, "can't be zero.");
406 return false;
407 }
408 }
410 template<class E>
411 bool GenericTaskQueueSet<E>::peek() {
412 // Try all the queues.
413 for (uint j = 0; j < _n; j++) {
414 if (_queues[j]->peek())
415 return true;
416 }
417 return false;
418 }
420 // When to terminate from the termination protocol.
421 class TerminatorTerminator: public CHeapObj {
422 public:
423 virtual bool should_exit_termination() = 0;
424 };
426 // A class to aid in the termination of a set of parallel tasks using
427 // TaskQueueSet's for work stealing.
429 #undef TRACESPINNING
431 class ParallelTaskTerminator: public StackObj {
432 private:
433 int _n_threads;
434 TaskQueueSetSuper* _queue_set;
435 int _offered_termination;
437 #ifdef TRACESPINNING
438 static uint _total_yields;
439 static uint _total_spins;
440 static uint _total_peeks;
441 #endif
443 bool peek_in_queue_set();
444 protected:
445 virtual void yield();
446 void sleep(uint millis);
448 public:
450 // "n_threads" is the number of threads to be terminated. "queue_set" is a
451 // queue sets of work queues of other threads.
452 ParallelTaskTerminator(int n_threads, TaskQueueSetSuper* queue_set);
454 // The current thread has no work, and is ready to terminate if everyone
455 // else is. If returns "true", all threads are terminated. If returns
456 // "false", available work has been observed in one of the task queues,
457 // so the global task is not complete.
458 bool offer_termination() {
459 return offer_termination(NULL);
460 }
462 // As above, but it also terminates of the should_exit_termination()
463 // method of the terminator parameter returns true. If terminator is
464 // NULL, then it is ignored.
465 bool offer_termination(TerminatorTerminator* terminator);
467 // Reset the terminator, so that it may be reused again.
468 // The caller is responsible for ensuring that this is done
469 // in an MT-safe manner, once the previous round of use of
470 // the terminator is finished.
471 void reset_for_reuse();
473 #ifdef TRACESPINNING
474 static uint total_yields() { return _total_yields; }
475 static uint total_spins() { return _total_spins; }
476 static uint total_peeks() { return _total_peeks; }
477 static void print_termination_counts();
478 #endif
479 };
481 #define SIMPLE_STACK 0
483 template<class E> inline bool GenericTaskQueue<E>::push(E t) {
484 #if SIMPLE_STACK
485 uint localBot = _bottom;
486 if (_bottom < max_elems()) {
487 _elems[localBot] = t;
488 _bottom = localBot + 1;
489 return true;
490 } else {
491 return false;
492 }
493 #else
494 uint localBot = _bottom;
495 assert((localBot >= 0) && (localBot < n()), "_bottom out of range.");
496 TAG_TYPE top = get_top();
497 uint dirty_n_elems = dirty_size(localBot, top);
498 assert((dirty_n_elems >= 0) && (dirty_n_elems < n()),
499 "n_elems out of range.");
500 if (dirty_n_elems < max_elems()) {
501 _elems[localBot] = t;
502 _bottom = increment_index(localBot);
503 return true;
504 } else {
505 return push_slow(t, dirty_n_elems);
506 }
507 #endif
508 }
510 template<class E> inline bool GenericTaskQueue<E>::pop_local(E& t) {
511 #if SIMPLE_STACK
512 uint localBot = _bottom;
513 assert(localBot > 0, "precondition.");
514 localBot--;
515 t = _elems[localBot];
516 _bottom = localBot;
517 return true;
518 #else
519 uint localBot = _bottom;
520 // This value cannot be n-1. That can only occur as a result of
521 // the assignment to bottom in this method. If it does, this method
522 // resets the size( to 0 before the next call (which is sequential,
523 // since this is pop_local.)
524 uint dirty_n_elems = dirty_size(localBot, get_top());
525 assert(dirty_n_elems != n() - 1, "Shouldn't be possible...");
526 if (dirty_n_elems == 0) return false;
527 localBot = decrement_index(localBot);
528 _bottom = localBot;
529 // This is necessary to prevent any read below from being reordered
530 // before the store just above.
531 OrderAccess::fence();
532 t = _elems[localBot];
533 // This is a second read of "age"; the "size()" above is the first.
534 // If there's still at least one element in the queue, based on the
535 // "_bottom" and "age" we've read, then there can be no interference with
536 // a "pop_global" operation, and we're done.
537 TAG_TYPE tp = get_top(); // XXX
538 if (size(localBot, tp) > 0) {
539 assert(dirty_size(localBot, tp) != n() - 1,
540 "Shouldn't be possible...");
541 return true;
542 } else {
543 // Otherwise, the queue contained exactly one element; we take the slow
544 // path.
545 return pop_local_slow(localBot, get_age());
546 }
547 #endif
548 }
550 typedef oop Task;
551 typedef GenericTaskQueue<Task> OopTaskQueue;
552 typedef GenericTaskQueueSet<Task> OopTaskQueueSet;
555 #define COMPRESSED_OOP_MASK 1
557 // This is a container class for either an oop* or a narrowOop*.
558 // Both are pushed onto a task queue and the consumer will test is_narrow()
559 // to determine which should be processed.
560 class StarTask {
561 void* _holder; // either union oop* or narrowOop*
562 public:
563 StarTask(narrowOop* p) {
564 assert(((uintptr_t)p & COMPRESSED_OOP_MASK) == 0, "Information loss!");
565 _holder = (void *)((uintptr_t)p | COMPRESSED_OOP_MASK);
566 }
567 StarTask(oop* p) {
568 assert(((uintptr_t)p & COMPRESSED_OOP_MASK) == 0, "Information loss!");
569 _holder = (void*)p;
570 }
571 StarTask() { _holder = NULL; }
572 operator oop*() { return (oop*)_holder; }
573 operator narrowOop*() {
574 return (narrowOop*)((uintptr_t)_holder & ~COMPRESSED_OOP_MASK);
575 }
577 // Operators to preserve const/volatile in assignments required by gcc
578 void operator=(const volatile StarTask& t) volatile { _holder = t._holder; }
580 bool is_narrow() const {
581 return (((uintptr_t)_holder & COMPRESSED_OOP_MASK) != 0);
582 }
583 };
585 typedef GenericTaskQueue<StarTask> OopStarTaskQueue;
586 typedef GenericTaskQueueSet<StarTask> OopStarTaskQueueSet;
588 typedef size_t RegionTask; // index for region
589 typedef GenericTaskQueue<RegionTask> RegionTaskQueue;
590 typedef GenericTaskQueueSet<RegionTask> RegionTaskQueueSet;
592 class RegionTaskQueueWithOverflow: public CHeapObj {
593 protected:
594 RegionTaskQueue _region_queue;
595 GrowableArray<RegionTask>* _overflow_stack;
597 public:
598 RegionTaskQueueWithOverflow() : _overflow_stack(NULL) {}
599 // Initialize both stealable queue and overflow
600 void initialize();
601 // Save first to stealable queue and then to overflow
602 void save(RegionTask t);
603 // Retrieve first from overflow and then from stealable queue
604 bool retrieve(RegionTask& region_index);
605 // Retrieve from stealable queue
606 bool retrieve_from_stealable_queue(RegionTask& region_index);
607 // Retrieve from overflow
608 bool retrieve_from_overflow(RegionTask& region_index);
609 bool is_empty();
610 bool stealable_is_empty();
611 bool overflow_is_empty();
612 uint stealable_size() { return _region_queue.size(); }
613 RegionTaskQueue* task_queue() { return &_region_queue; }
614 };
616 #define USE_RegionTaskQueueWithOverflow