Tue, 30 Mar 2010 15:43:03 -0400
6937142: G1: improvements to debugging output (S-M)
Summary: Various fixes to the G1 debugging output.
Reviewed-by: johnc, iveresov
1 /*
2 * Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
25 class G1CollectedHeap;
26 class CMTask;
27 typedef GenericTaskQueue<oop> CMTaskQueue;
28 typedef GenericTaskQueueSet<CMTaskQueue> CMTaskQueueSet;
30 // A generic CM bit map. This is essentially a wrapper around the BitMap
31 // class, with one bit per (1<<_shifter) HeapWords.
33 class CMBitMapRO VALUE_OBJ_CLASS_SPEC {
34 protected:
35 HeapWord* _bmStartWord; // base address of range covered by map
36 size_t _bmWordSize; // map size (in #HeapWords covered)
37 const int _shifter; // map to char or bit
38 VirtualSpace _virtual_space; // underlying the bit map
39 BitMap _bm; // the bit map itself
41 public:
42 // constructor
43 CMBitMapRO(ReservedSpace rs, int shifter);
45 enum { do_yield = true };
47 // inquiries
48 HeapWord* startWord() const { return _bmStartWord; }
49 size_t sizeInWords() const { return _bmWordSize; }
50 // the following is one past the last word in space
51 HeapWord* endWord() const { return _bmStartWord + _bmWordSize; }
53 // read marks
55 bool isMarked(HeapWord* addr) const {
56 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
57 "outside underlying space?");
58 return _bm.at(heapWordToOffset(addr));
59 }
61 // iteration
62 bool iterate(BitMapClosure* cl) { return _bm.iterate(cl); }
63 bool iterate(BitMapClosure* cl, MemRegion mr);
65 // Return the address corresponding to the next marked bit at or after
66 // "addr", and before "limit", if "limit" is non-NULL. If there is no
67 // such bit, returns "limit" if that is non-NULL, or else "endWord()".
68 HeapWord* getNextMarkedWordAddress(HeapWord* addr,
69 HeapWord* limit = NULL) const;
70 // Return the address corresponding to the next unmarked bit at or after
71 // "addr", and before "limit", if "limit" is non-NULL. If there is no
72 // such bit, returns "limit" if that is non-NULL, or else "endWord()".
73 HeapWord* getNextUnmarkedWordAddress(HeapWord* addr,
74 HeapWord* limit = NULL) const;
76 // conversion utilities
77 // XXX Fix these so that offsets are size_t's...
78 HeapWord* offsetToHeapWord(size_t offset) const {
79 return _bmStartWord + (offset << _shifter);
80 }
81 size_t heapWordToOffset(HeapWord* addr) const {
82 return pointer_delta(addr, _bmStartWord) >> _shifter;
83 }
84 int heapWordDiffToOffsetDiff(size_t diff) const;
85 HeapWord* nextWord(HeapWord* addr) {
86 return offsetToHeapWord(heapWordToOffset(addr) + 1);
87 }
89 void mostly_disjoint_range_union(BitMap* from_bitmap,
90 size_t from_start_index,
91 HeapWord* to_start_word,
92 size_t word_num);
94 // debugging
95 NOT_PRODUCT(bool covers(ReservedSpace rs) const;)
96 };
98 class CMBitMap : public CMBitMapRO {
100 public:
101 // constructor
102 CMBitMap(ReservedSpace rs, int shifter) :
103 CMBitMapRO(rs, shifter) {}
105 // write marks
106 void mark(HeapWord* addr) {
107 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
108 "outside underlying space?");
109 _bm.at_put(heapWordToOffset(addr), true);
110 }
111 void clear(HeapWord* addr) {
112 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
113 "outside underlying space?");
114 _bm.at_put(heapWordToOffset(addr), false);
115 }
116 bool parMark(HeapWord* addr) {
117 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
118 "outside underlying space?");
119 return _bm.par_at_put(heapWordToOffset(addr), true);
120 }
121 bool parClear(HeapWord* addr) {
122 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
123 "outside underlying space?");
124 return _bm.par_at_put(heapWordToOffset(addr), false);
125 }
126 void markRange(MemRegion mr);
127 void clearAll();
128 void clearRange(MemRegion mr);
130 // Starting at the bit corresponding to "addr" (inclusive), find the next
131 // "1" bit, if any. This bit starts some run of consecutive "1"'s; find
132 // the end of this run (stopping at "end_addr"). Return the MemRegion
133 // covering from the start of the region corresponding to the first bit
134 // of the run to the end of the region corresponding to the last bit of
135 // the run. If there is no "1" bit at or after "addr", return an empty
136 // MemRegion.
137 MemRegion getAndClearMarkedRegion(HeapWord* addr, HeapWord* end_addr);
138 };
140 // Represents a marking stack used by the CM collector.
141 // Ideally this should be GrowableArray<> just like MSC's marking stack(s).
142 class CMMarkStack VALUE_OBJ_CLASS_SPEC {
143 ConcurrentMark* _cm;
144 oop* _base; // bottom of stack
145 jint _index; // one more than last occupied index
146 jint _capacity; // max #elements
147 jint _oops_do_bound; // Number of elements to include in next iteration.
148 NOT_PRODUCT(jint _max_depth;) // max depth plumbed during run
150 bool _overflow;
151 DEBUG_ONLY(bool _drain_in_progress;)
152 DEBUG_ONLY(bool _drain_in_progress_yields;)
154 public:
155 CMMarkStack(ConcurrentMark* cm);
156 ~CMMarkStack();
158 void allocate(size_t size);
160 oop pop() {
161 if (!isEmpty()) {
162 return _base[--_index] ;
163 }
164 return NULL;
165 }
167 // If overflow happens, don't do the push, and record the overflow.
168 // *Requires* that "ptr" is already marked.
169 void push(oop ptr) {
170 if (isFull()) {
171 // Record overflow.
172 _overflow = true;
173 return;
174 } else {
175 _base[_index++] = ptr;
176 NOT_PRODUCT(_max_depth = MAX2(_max_depth, _index));
177 }
178 }
179 // Non-block impl. Note: concurrency is allowed only with other
180 // "par_push" operations, not with "pop" or "drain". We would need
181 // parallel versions of them if such concurrency was desired.
182 void par_push(oop ptr);
184 // Pushes the first "n" elements of "ptr_arr" on the stack.
185 // Non-block impl. Note: concurrency is allowed only with other
186 // "par_adjoin_arr" or "push" operations, not with "pop" or "drain".
187 void par_adjoin_arr(oop* ptr_arr, int n);
189 // Pushes the first "n" elements of "ptr_arr" on the stack.
190 // Locking impl: concurrency is allowed only with
191 // "par_push_arr" and/or "par_pop_arr" operations, which use the same
192 // locking strategy.
193 void par_push_arr(oop* ptr_arr, int n);
195 // If returns false, the array was empty. Otherwise, removes up to "max"
196 // elements from the stack, and transfers them to "ptr_arr" in an
197 // unspecified order. The actual number transferred is given in "n" ("n
198 // == 0" is deliberately redundant with the return value.) Locking impl:
199 // concurrency is allowed only with "par_push_arr" and/or "par_pop_arr"
200 // operations, which use the same locking strategy.
201 bool par_pop_arr(oop* ptr_arr, int max, int* n);
203 // Drain the mark stack, applying the given closure to all fields of
204 // objects on the stack. (That is, continue until the stack is empty,
205 // even if closure applications add entries to the stack.) The "bm"
206 // argument, if non-null, may be used to verify that only marked objects
207 // are on the mark stack. If "yield_after" is "true", then the
208 // concurrent marker performing the drain offers to yield after
209 // processing each object. If a yield occurs, stops the drain operation
210 // and returns false. Otherwise, returns true.
211 template<class OopClosureClass>
212 bool drain(OopClosureClass* cl, CMBitMap* bm, bool yield_after = false);
214 bool isEmpty() { return _index == 0; }
215 bool isFull() { return _index == _capacity; }
216 int maxElems() { return _capacity; }
218 bool overflow() { return _overflow; }
219 void clear_overflow() { _overflow = false; }
221 int size() { return _index; }
223 void setEmpty() { _index = 0; clear_overflow(); }
225 // Record the current size; a subsequent "oops_do" will iterate only over
226 // indices valid at the time of this call.
227 void set_oops_do_bound(jint bound = -1) {
228 if (bound == -1) {
229 _oops_do_bound = _index;
230 } else {
231 _oops_do_bound = bound;
232 }
233 }
234 jint oops_do_bound() { return _oops_do_bound; }
235 // iterate over the oops in the mark stack, up to the bound recorded via
236 // the call above.
237 void oops_do(OopClosure* f);
238 };
240 class CMRegionStack VALUE_OBJ_CLASS_SPEC {
241 MemRegion* _base;
242 jint _capacity;
243 jint _index;
244 jint _oops_do_bound;
245 bool _overflow;
246 public:
247 CMRegionStack();
248 ~CMRegionStack();
249 void allocate(size_t size);
251 // This is lock-free; assumes that it will only be called in parallel
252 // with other "push" operations (no pops).
253 void push(MemRegion mr);
255 #if 0
256 // This is currently not used. See the comment in the .cpp file.
258 // Lock-free; assumes that it will only be called in parallel
259 // with other "pop" operations (no pushes).
260 MemRegion pop();
261 #endif // 0
263 // These two are the implementations that use a lock. They can be
264 // called concurrently with each other but they should not be called
265 // concurrently with the lock-free versions (push() / pop()).
266 void push_with_lock(MemRegion mr);
267 MemRegion pop_with_lock();
269 bool isEmpty() { return _index == 0; }
270 bool isFull() { return _index == _capacity; }
272 bool overflow() { return _overflow; }
273 void clear_overflow() { _overflow = false; }
275 int size() { return _index; }
277 // It iterates over the entries in the region stack and it
278 // invalidates (i.e. assigns MemRegion()) the ones that point to
279 // regions in the collection set.
280 bool invalidate_entries_into_cset();
282 // This gives an upper bound up to which the iteration in
283 // invalidate_entries_into_cset() will reach. This prevents
284 // newly-added entries to be unnecessarily scanned.
285 void set_oops_do_bound() {
286 _oops_do_bound = _index;
287 }
289 void setEmpty() { _index = 0; clear_overflow(); }
290 };
292 // this will enable a variety of different statistics per GC task
293 #define _MARKING_STATS_ 0
294 // this will enable the higher verbose levels
295 #define _MARKING_VERBOSE_ 0
297 #if _MARKING_STATS_
298 #define statsOnly(statement) \
299 do { \
300 statement ; \
301 } while (0)
302 #else // _MARKING_STATS_
303 #define statsOnly(statement) \
304 do { \
305 } while (0)
306 #endif // _MARKING_STATS_
308 typedef enum {
309 no_verbose = 0, // verbose turned off
310 stats_verbose, // only prints stats at the end of marking
311 low_verbose, // low verbose, mostly per region and per major event
312 medium_verbose, // a bit more detailed than low
313 high_verbose // per object verbose
314 } CMVerboseLevel;
317 class ConcurrentMarkThread;
319 class ConcurrentMark: public CHeapObj {
320 friend class ConcurrentMarkThread;
321 friend class CMTask;
322 friend class CMBitMapClosure;
323 friend class CSMarkOopClosure;
324 friend class CMGlobalObjectClosure;
325 friend class CMRemarkTask;
326 friend class CMConcurrentMarkingTask;
327 friend class G1ParNoteEndTask;
328 friend class CalcLiveObjectsClosure;
330 protected:
331 ConcurrentMarkThread* _cmThread; // the thread doing the work
332 G1CollectedHeap* _g1h; // the heap.
333 size_t _parallel_marking_threads; // the number of marking
334 // threads we'll use
335 double _sleep_factor; // how much we have to sleep, with
336 // respect to the work we just did, to
337 // meet the marking overhead goal
338 double _marking_task_overhead; // marking target overhead for
339 // a single task
341 // same as the two above, but for the cleanup task
342 double _cleanup_sleep_factor;
343 double _cleanup_task_overhead;
345 // Stuff related to age cohort processing.
346 struct ParCleanupThreadState {
347 char _pre[64];
348 UncleanRegionList list;
349 char _post[64];
350 };
351 ParCleanupThreadState** _par_cleanup_thread_state;
353 // CMS marking support structures
354 CMBitMap _markBitMap1;
355 CMBitMap _markBitMap2;
356 CMBitMapRO* _prevMarkBitMap; // completed mark bitmap
357 CMBitMap* _nextMarkBitMap; // under-construction mark bitmap
358 bool _at_least_one_mark_complete;
360 BitMap _region_bm;
361 BitMap _card_bm;
363 // Heap bounds
364 HeapWord* _heap_start;
365 HeapWord* _heap_end;
367 // For gray objects
368 CMMarkStack _markStack; // Grey objects behind global finger.
369 CMRegionStack _regionStack; // Grey regions behind global finger.
370 HeapWord* volatile _finger; // the global finger, region aligned,
371 // always points to the end of the
372 // last claimed region
374 // marking tasks
375 size_t _max_task_num; // maximum task number
376 size_t _active_tasks; // task num currently active
377 CMTask** _tasks; // task queue array (max_task_num len)
378 CMTaskQueueSet* _task_queues; // task queue set
379 ParallelTaskTerminator _terminator; // for termination
381 // Two sync barriers that are used to synchronise tasks when an
382 // overflow occurs. The algorithm is the following. All tasks enter
383 // the first one to ensure that they have all stopped manipulating
384 // the global data structures. After they exit it, they re-initialise
385 // their data structures and task 0 re-initialises the global data
386 // structures. Then, they enter the second sync barrier. This
387 // ensure, that no task starts doing work before all data
388 // structures (local and global) have been re-initialised. When they
389 // exit it, they are free to start working again.
390 WorkGangBarrierSync _first_overflow_barrier_sync;
391 WorkGangBarrierSync _second_overflow_barrier_sync;
394 // this is set by any task, when an overflow on the global data
395 // structures is detected.
396 volatile bool _has_overflown;
397 // true: marking is concurrent, false: we're in remark
398 volatile bool _concurrent;
399 // set at the end of a Full GC so that marking aborts
400 volatile bool _has_aborted;
401 // used when remark aborts due to an overflow to indicate that
402 // another concurrent marking phase should start
403 volatile bool _restart_for_overflow;
405 // This is true from the very start of concurrent marking until the
406 // point when all the tasks complete their work. It is really used
407 // to determine the points between the end of concurrent marking and
408 // time of remark.
409 volatile bool _concurrent_marking_in_progress;
411 // verbose level
412 CMVerboseLevel _verbose_level;
414 // These two fields are used to implement the optimisation that
415 // avoids pushing objects on the global/region stack if there are
416 // no collection set regions above the lowest finger.
418 // This is the lowest finger (among the global and local fingers),
419 // which is calculated before a new collection set is chosen.
420 HeapWord* _min_finger;
421 // If this flag is true, objects/regions that are marked below the
422 // finger should be pushed on the stack(s). If this is flag is
423 // false, it is safe not to push them on the stack(s).
424 bool _should_gray_objects;
426 // All of these times are in ms.
427 NumberSeq _init_times;
428 NumberSeq _remark_times;
429 NumberSeq _remark_mark_times;
430 NumberSeq _remark_weak_ref_times;
431 NumberSeq _cleanup_times;
432 double _total_counting_time;
433 double _total_rs_scrub_time;
435 double* _accum_task_vtime; // accumulated task vtime
437 WorkGang* _parallel_workers;
439 void weakRefsWork(bool clear_all_soft_refs);
441 void swapMarkBitMaps();
443 // It resets the global marking data structures, as well as the
444 // task local ones; should be called during initial mark.
445 void reset();
446 // It resets all the marking data structures.
447 void clear_marking_state();
449 // It should be called to indicate which phase we're in (concurrent
450 // mark or remark) and how many threads are currently active.
451 void set_phase(size_t active_tasks, bool concurrent);
452 // We do this after we're done with marking so that the marking data
453 // structures are initialised to a sensible and predictable state.
454 void set_non_marking_state();
456 // prints all gathered CM-related statistics
457 void print_stats();
459 // accessor methods
460 size_t parallel_marking_threads() { return _parallel_marking_threads; }
461 double sleep_factor() { return _sleep_factor; }
462 double marking_task_overhead() { return _marking_task_overhead;}
463 double cleanup_sleep_factor() { return _cleanup_sleep_factor; }
464 double cleanup_task_overhead() { return _cleanup_task_overhead;}
466 HeapWord* finger() { return _finger; }
467 bool concurrent() { return _concurrent; }
468 size_t active_tasks() { return _active_tasks; }
469 ParallelTaskTerminator* terminator() { return &_terminator; }
471 // It claims the next available region to be scanned by a marking
472 // task. It might return NULL if the next region is empty or we have
473 // run out of regions. In the latter case, out_of_regions()
474 // determines whether we've really run out of regions or the task
475 // should call claim_region() again. This might seem a bit
476 // awkward. Originally, the code was written so that claim_region()
477 // either successfully returned with a non-empty region or there
478 // were no more regions to be claimed. The problem with this was
479 // that, in certain circumstances, it iterated over large chunks of
480 // the heap finding only empty regions and, while it was working, it
481 // was preventing the calling task to call its regular clock
482 // method. So, this way, each task will spend very little time in
483 // claim_region() and is allowed to call the regular clock method
484 // frequently.
485 HeapRegion* claim_region(int task);
487 // It determines whether we've run out of regions to scan.
488 bool out_of_regions() { return _finger == _heap_end; }
490 // Returns the task with the given id
491 CMTask* task(int id) {
492 assert(0 <= id && id < (int) _active_tasks,
493 "task id not within active bounds");
494 return _tasks[id];
495 }
497 // Returns the task queue with the given id
498 CMTaskQueue* task_queue(int id) {
499 assert(0 <= id && id < (int) _active_tasks,
500 "task queue id not within active bounds");
501 return (CMTaskQueue*) _task_queues->queue(id);
502 }
504 // Returns the task queue set
505 CMTaskQueueSet* task_queues() { return _task_queues; }
507 // Access / manipulation of the overflow flag which is set to
508 // indicate that the global stack or region stack has overflown
509 bool has_overflown() { return _has_overflown; }
510 void set_has_overflown() { _has_overflown = true; }
511 void clear_has_overflown() { _has_overflown = false; }
513 bool has_aborted() { return _has_aborted; }
514 bool restart_for_overflow() { return _restart_for_overflow; }
516 // Methods to enter the two overflow sync barriers
517 void enter_first_sync_barrier(int task_num);
518 void enter_second_sync_barrier(int task_num);
520 public:
521 // Manipulation of the global mark stack.
522 // Notice that the first mark_stack_push is CAS-based, whereas the
523 // two below are Mutex-based. This is OK since the first one is only
524 // called during evacuation pauses and doesn't compete with the
525 // other two (which are called by the marking tasks during
526 // concurrent marking or remark).
527 bool mark_stack_push(oop p) {
528 _markStack.par_push(p);
529 if (_markStack.overflow()) {
530 set_has_overflown();
531 return false;
532 }
533 return true;
534 }
535 bool mark_stack_push(oop* arr, int n) {
536 _markStack.par_push_arr(arr, n);
537 if (_markStack.overflow()) {
538 set_has_overflown();
539 return false;
540 }
541 return true;
542 }
543 void mark_stack_pop(oop* arr, int max, int* n) {
544 _markStack.par_pop_arr(arr, max, n);
545 }
546 size_t mark_stack_size() { return _markStack.size(); }
547 size_t partial_mark_stack_size_target() { return _markStack.maxElems()/3; }
548 bool mark_stack_overflow() { return _markStack.overflow(); }
549 bool mark_stack_empty() { return _markStack.isEmpty(); }
551 // Manipulation of the region stack
552 bool region_stack_push(MemRegion mr) {
553 // Currently we only call the lock-free version during evacuation
554 // pauses.
555 assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped");
557 _regionStack.push(mr);
558 if (_regionStack.overflow()) {
559 set_has_overflown();
560 return false;
561 }
562 return true;
563 }
564 #if 0
565 // Currently this is not used. See the comment in the .cpp file.
566 MemRegion region_stack_pop() { return _regionStack.pop(); }
567 #endif // 0
569 bool region_stack_push_with_lock(MemRegion mr) {
570 // Currently we only call the lock-based version during either
571 // concurrent marking or remark.
572 assert(!SafepointSynchronize::is_at_safepoint() || !concurrent(),
573 "if we are at a safepoint it should be the remark safepoint");
575 _regionStack.push_with_lock(mr);
576 if (_regionStack.overflow()) {
577 set_has_overflown();
578 return false;
579 }
580 return true;
581 }
582 MemRegion region_stack_pop_with_lock() {
583 // Currently we only call the lock-based version during either
584 // concurrent marking or remark.
585 assert(!SafepointSynchronize::is_at_safepoint() || !concurrent(),
586 "if we are at a safepoint it should be the remark safepoint");
588 return _regionStack.pop_with_lock();
589 }
591 int region_stack_size() { return _regionStack.size(); }
592 bool region_stack_overflow() { return _regionStack.overflow(); }
593 bool region_stack_empty() { return _regionStack.isEmpty(); }
595 bool concurrent_marking_in_progress() {
596 return _concurrent_marking_in_progress;
597 }
598 void set_concurrent_marking_in_progress() {
599 _concurrent_marking_in_progress = true;
600 }
601 void clear_concurrent_marking_in_progress() {
602 _concurrent_marking_in_progress = false;
603 }
605 void update_accum_task_vtime(int i, double vtime) {
606 _accum_task_vtime[i] += vtime;
607 }
609 double all_task_accum_vtime() {
610 double ret = 0.0;
611 for (int i = 0; i < (int)_max_task_num; ++i)
612 ret += _accum_task_vtime[i];
613 return ret;
614 }
616 // Attempts to steal an object from the task queues of other tasks
617 bool try_stealing(int task_num, int* hash_seed, oop& obj) {
618 return _task_queues->steal(task_num, hash_seed, obj);
619 }
621 // It grays an object by first marking it. Then, if it's behind the
622 // global finger, it also pushes it on the global stack.
623 void deal_with_reference(oop obj);
625 ConcurrentMark(ReservedSpace rs, int max_regions);
626 ~ConcurrentMark();
627 ConcurrentMarkThread* cmThread() { return _cmThread; }
629 CMBitMapRO* prevMarkBitMap() const { return _prevMarkBitMap; }
630 CMBitMap* nextMarkBitMap() const { return _nextMarkBitMap; }
632 // The following three are interaction between CM and
633 // G1CollectedHeap
635 // This notifies CM that a root during initial-mark needs to be
636 // grayed and it's MT-safe. Currently, we just mark it. But, in the
637 // future, we can experiment with pushing it on the stack and we can
638 // do this without changing G1CollectedHeap.
639 void grayRoot(oop p);
640 // It's used during evacuation pauses to gray a region, if
641 // necessary, and it's MT-safe. It assumes that the caller has
642 // marked any objects on that region. If _should_gray_objects is
643 // true and we're still doing concurrent marking, the region is
644 // pushed on the region stack, if it is located below the global
645 // finger, otherwise we do nothing.
646 void grayRegionIfNecessary(MemRegion mr);
647 // It's used during evacuation pauses to mark and, if necessary,
648 // gray a single object and it's MT-safe. It assumes the caller did
649 // not mark the object. If _should_gray_objects is true and we're
650 // still doing concurrent marking, the objects is pushed on the
651 // global stack, if it is located below the global finger, otherwise
652 // we do nothing.
653 void markAndGrayObjectIfNecessary(oop p);
655 // It iterates over the heap and for each object it comes across it
656 // will dump the contents of its reference fields, as well as
657 // liveness information for the object and its referents. The dump
658 // will be written to a file with the following name:
659 // G1PrintReachableBaseFile + "." + str. use_prev_marking decides
660 // whether the prev (use_prev_marking == true) or next
661 // (use_prev_marking == false) marking information will be used to
662 // determine the liveness of each object / referent. If all is true,
663 // all objects in the heap will be dumped, otherwise only the live
664 // ones. In the dump the following symbols / abbreviations are used:
665 // M : an explicitly live object (its bitmap bit is set)
666 // > : an implicitly live object (over tams)
667 // O : an object outside the G1 heap (typically: in the perm gen)
668 // NOT : a reference field whose referent is not live
669 // AND MARKED : indicates that an object is both explicitly and
670 // implicitly live (it should be one or the other, not both)
671 void print_reachable(const char* str,
672 bool use_prev_marking, bool all) PRODUCT_RETURN;
674 // Clear the next marking bitmap (will be called concurrently).
675 void clearNextBitmap();
677 // main CMS steps and related support
678 void checkpointRootsInitial();
680 // These two do the work that needs to be done before and after the
681 // initial root checkpoint. Since this checkpoint can be done at two
682 // different points (i.e. an explicit pause or piggy-backed on a
683 // young collection), then it's nice to be able to easily share the
684 // pre/post code. It might be the case that we can put everything in
685 // the post method. TP
686 void checkpointRootsInitialPre();
687 void checkpointRootsInitialPost();
689 // Do concurrent phase of marking, to a tentative transitive closure.
690 void markFromRoots();
692 // Process all unprocessed SATB buffers. It is called at the
693 // beginning of an evacuation pause.
694 void drainAllSATBBuffers();
696 void checkpointRootsFinal(bool clear_all_soft_refs);
697 void checkpointRootsFinalWork();
698 void calcDesiredRegions();
699 void cleanup();
700 void completeCleanup();
702 // Mark in the previous bitmap. NB: this is usually read-only, so use
703 // this carefully!
704 void markPrev(oop p);
705 void clear(oop p);
706 // Clears marks for all objects in the given range, for both prev and
707 // next bitmaps. NB: the previous bitmap is usually read-only, so use
708 // this carefully!
709 void clearRangeBothMaps(MemRegion mr);
711 // Record the current top of the mark and region stacks; a
712 // subsequent oops_do() on the mark stack and
713 // invalidate_entries_into_cset() on the region stack will iterate
714 // only over indices valid at the time of this call.
715 void set_oops_do_bound() {
716 _markStack.set_oops_do_bound();
717 _regionStack.set_oops_do_bound();
718 }
719 // Iterate over the oops in the mark stack and all local queues. It
720 // also calls invalidate_entries_into_cset() on the region stack.
721 void oops_do(OopClosure* f);
722 // It is called at the end of an evacuation pause during marking so
723 // that CM is notified of where the new end of the heap is. It
724 // doesn't do anything if concurrent_marking_in_progress() is false,
725 // unless the force parameter is true.
726 void update_g1_committed(bool force = false);
728 void complete_marking_in_collection_set();
730 // It indicates that a new collection set is being chosen.
731 void newCSet();
732 // It registers a collection set heap region with CM. This is used
733 // to determine whether any heap regions are located above the finger.
734 void registerCSetRegion(HeapRegion* hr);
736 // Returns "true" if at least one mark has been completed.
737 bool at_least_one_mark_complete() { return _at_least_one_mark_complete; }
739 bool isMarked(oop p) const {
740 assert(p != NULL && p->is_oop(), "expected an oop");
741 HeapWord* addr = (HeapWord*)p;
742 assert(addr >= _nextMarkBitMap->startWord() ||
743 addr < _nextMarkBitMap->endWord(), "in a region");
745 return _nextMarkBitMap->isMarked(addr);
746 }
748 inline bool not_yet_marked(oop p) const;
750 // XXX Debug code
751 bool containing_card_is_marked(void* p);
752 bool containing_cards_are_marked(void* start, void* last);
754 bool isPrevMarked(oop p) const {
755 assert(p != NULL && p->is_oop(), "expected an oop");
756 HeapWord* addr = (HeapWord*)p;
757 assert(addr >= _prevMarkBitMap->startWord() ||
758 addr < _prevMarkBitMap->endWord(), "in a region");
760 return _prevMarkBitMap->isMarked(addr);
761 }
763 inline bool do_yield_check(int worker_i = 0);
764 inline bool should_yield();
766 // Called to abort the marking cycle after a Full GC takes palce.
767 void abort();
769 // This prints the global/local fingers. It is used for debugging.
770 NOT_PRODUCT(void print_finger();)
772 void print_summary_info();
774 void print_worker_threads_on(outputStream* st) const;
776 // The following indicate whether a given verbose level has been
777 // set. Notice that anything above stats is conditional to
778 // _MARKING_VERBOSE_ having been set to 1
779 bool verbose_stats()
780 { return _verbose_level >= stats_verbose; }
781 bool verbose_low()
782 { return _MARKING_VERBOSE_ && _verbose_level >= low_verbose; }
783 bool verbose_medium()
784 { return _MARKING_VERBOSE_ && _verbose_level >= medium_verbose; }
785 bool verbose_high()
786 { return _MARKING_VERBOSE_ && _verbose_level >= high_verbose; }
787 };
789 // A class representing a marking task.
790 class CMTask : public TerminatorTerminator {
791 private:
792 enum PrivateConstants {
793 // the regular clock call is called once the scanned words reaches
794 // this limit
795 words_scanned_period = 12*1024,
796 // the regular clock call is called once the number of visited
797 // references reaches this limit
798 refs_reached_period = 384,
799 // initial value for the hash seed, used in the work stealing code
800 init_hash_seed = 17,
801 // how many entries will be transferred between global stack and
802 // local queues
803 global_stack_transfer_size = 16
804 };
806 int _task_id;
807 G1CollectedHeap* _g1h;
808 ConcurrentMark* _cm;
809 CMBitMap* _nextMarkBitMap;
810 // the task queue of this task
811 CMTaskQueue* _task_queue;
812 private:
813 // the task queue set---needed for stealing
814 CMTaskQueueSet* _task_queues;
815 // indicates whether the task has been claimed---this is only for
816 // debugging purposes
817 bool _claimed;
819 // number of calls to this task
820 int _calls;
822 // when the virtual timer reaches this time, the marking step should
823 // exit
824 double _time_target_ms;
825 // the start time of the current marking step
826 double _start_time_ms;
828 // the oop closure used for iterations over oops
829 OopClosure* _oop_closure;
831 // the region this task is scanning, NULL if we're not scanning any
832 HeapRegion* _curr_region;
833 // the local finger of this task, NULL if we're not scanning a region
834 HeapWord* _finger;
835 // limit of the region this task is scanning, NULL if we're not scanning one
836 HeapWord* _region_limit;
838 // This is used only when we scan regions popped from the region
839 // stack. It records what the last object on such a region we
840 // scanned was. It is used to ensure that, if we abort region
841 // iteration, we do not rescan the first part of the region. This
842 // should be NULL when we're not scanning a region from the region
843 // stack.
844 HeapWord* _region_finger;
846 // the number of words this task has scanned
847 size_t _words_scanned;
848 // When _words_scanned reaches this limit, the regular clock is
849 // called. Notice that this might be decreased under certain
850 // circumstances (i.e. when we believe that we did an expensive
851 // operation).
852 size_t _words_scanned_limit;
853 // the initial value of _words_scanned_limit (i.e. what it was
854 // before it was decreased).
855 size_t _real_words_scanned_limit;
857 // the number of references this task has visited
858 size_t _refs_reached;
859 // When _refs_reached reaches this limit, the regular clock is
860 // called. Notice this this might be decreased under certain
861 // circumstances (i.e. when we believe that we did an expensive
862 // operation).
863 size_t _refs_reached_limit;
864 // the initial value of _refs_reached_limit (i.e. what it was before
865 // it was decreased).
866 size_t _real_refs_reached_limit;
868 // used by the work stealing stuff
869 int _hash_seed;
870 // if this is true, then the task has aborted for some reason
871 bool _has_aborted;
872 // set when the task aborts because it has met its time quota
873 bool _has_aborted_timed_out;
874 // true when we're draining SATB buffers; this avoids the task
875 // aborting due to SATB buffers being available (as we're already
876 // dealing with them)
877 bool _draining_satb_buffers;
879 // number sequence of past step times
880 NumberSeq _step_times_ms;
881 // elapsed time of this task
882 double _elapsed_time_ms;
883 // termination time of this task
884 double _termination_time_ms;
885 // when this task got into the termination protocol
886 double _termination_start_time_ms;
888 // true when the task is during a concurrent phase, false when it is
889 // in the remark phase (so, in the latter case, we do not have to
890 // check all the things that we have to check during the concurrent
891 // phase, i.e. SATB buffer availability...)
892 bool _concurrent;
894 TruncatedSeq _marking_step_diffs_ms;
896 // LOTS of statistics related with this task
897 #if _MARKING_STATS_
898 NumberSeq _all_clock_intervals_ms;
899 double _interval_start_time_ms;
901 int _aborted;
902 int _aborted_overflow;
903 int _aborted_cm_aborted;
904 int _aborted_yield;
905 int _aborted_timed_out;
906 int _aborted_satb;
907 int _aborted_termination;
909 int _steal_attempts;
910 int _steals;
912 int _clock_due_to_marking;
913 int _clock_due_to_scanning;
915 int _local_pushes;
916 int _local_pops;
917 int _local_max_size;
918 int _objs_scanned;
920 int _global_pushes;
921 int _global_pops;
922 int _global_max_size;
924 int _global_transfers_to;
925 int _global_transfers_from;
927 int _region_stack_pops;
929 int _regions_claimed;
930 int _objs_found_on_bitmap;
932 int _satb_buffers_processed;
933 #endif // _MARKING_STATS_
935 // it updates the local fields after this task has claimed
936 // a new region to scan
937 void setup_for_region(HeapRegion* hr);
938 // it brings up-to-date the limit of the region
939 void update_region_limit();
940 // it resets the local fields after a task has finished scanning a
941 // region
942 void giveup_current_region();
944 // called when either the words scanned or the refs visited limit
945 // has been reached
946 void reached_limit();
947 // recalculates the words scanned and refs visited limits
948 void recalculate_limits();
949 // decreases the words scanned and refs visited limits when we reach
950 // an expensive operation
951 void decrease_limits();
952 // it checks whether the words scanned or refs visited reached their
953 // respective limit and calls reached_limit() if they have
954 void check_limits() {
955 if (_words_scanned >= _words_scanned_limit ||
956 _refs_reached >= _refs_reached_limit)
957 reached_limit();
958 }
959 // this is supposed to be called regularly during a marking step as
960 // it checks a bunch of conditions that might cause the marking step
961 // to abort
962 void regular_clock_call();
963 bool concurrent() { return _concurrent; }
965 public:
966 // It resets the task; it should be called right at the beginning of
967 // a marking phase.
968 void reset(CMBitMap* _nextMarkBitMap);
969 // it clears all the fields that correspond to a claimed region.
970 void clear_region_fields();
972 void set_concurrent(bool concurrent) { _concurrent = concurrent; }
974 // The main method of this class which performs a marking step
975 // trying not to exceed the given duration. However, it might exit
976 // prematurely, according to some conditions (i.e. SATB buffers are
977 // available for processing).
978 void do_marking_step(double target_ms);
980 // These two calls start and stop the timer
981 void record_start_time() {
982 _elapsed_time_ms = os::elapsedTime() * 1000.0;
983 }
984 void record_end_time() {
985 _elapsed_time_ms = os::elapsedTime() * 1000.0 - _elapsed_time_ms;
986 }
988 // returns the task ID
989 int task_id() { return _task_id; }
991 // From TerminatorTerminator. It determines whether this task should
992 // exit the termination protocol after it's entered it.
993 virtual bool should_exit_termination();
995 HeapWord* finger() { return _finger; }
997 bool has_aborted() { return _has_aborted; }
998 void set_has_aborted() { _has_aborted = true; }
999 void clear_has_aborted() { _has_aborted = false; }
1000 bool claimed() { return _claimed; }
1002 void set_oop_closure(OopClosure* oop_closure) {
1003 _oop_closure = oop_closure;
1004 }
1006 // It grays the object by marking it and, if necessary, pushing it
1007 // on the local queue
1008 void deal_with_reference(oop obj);
1010 // It scans an object and visits its children.
1011 void scan_object(oop obj) {
1012 assert(_nextMarkBitMap->isMarked((HeapWord*) obj), "invariant");
1014 if (_cm->verbose_high())
1015 gclog_or_tty->print_cr("[%d] we're scanning object "PTR_FORMAT,
1016 _task_id, (void*) obj);
1018 size_t obj_size = obj->size();
1019 _words_scanned += obj_size;
1021 obj->oop_iterate(_oop_closure);
1022 statsOnly( ++_objs_scanned );
1023 check_limits();
1024 }
1026 // It pushes an object on the local queue.
1027 void push(oop obj);
1029 // These two move entries to/from the global stack.
1030 void move_entries_to_global_stack();
1031 void get_entries_from_global_stack();
1033 // It pops and scans objects from the local queue. If partially is
1034 // true, then it stops when the queue size is of a given limit. If
1035 // partially is false, then it stops when the queue is empty.
1036 void drain_local_queue(bool partially);
1037 // It moves entries from the global stack to the local queue and
1038 // drains the local queue. If partially is true, then it stops when
1039 // both the global stack and the local queue reach a given size. If
1040 // partially if false, it tries to empty them totally.
1041 void drain_global_stack(bool partially);
1042 // It keeps picking SATB buffers and processing them until no SATB
1043 // buffers are available.
1044 void drain_satb_buffers();
1045 // It keeps popping regions from the region stack and processing
1046 // them until the region stack is empty.
1047 void drain_region_stack(BitMapClosure* closure);
1049 // moves the local finger to a new location
1050 inline void move_finger_to(HeapWord* new_finger) {
1051 assert(new_finger >= _finger && new_finger < _region_limit, "invariant");
1052 _finger = new_finger;
1053 }
1055 // moves the region finger to a new location
1056 inline void move_region_finger_to(HeapWord* new_finger) {
1057 assert(new_finger < _cm->finger(), "invariant");
1058 _region_finger = new_finger;
1059 }
1061 CMTask(int task_num, ConcurrentMark *cm,
1062 CMTaskQueue* task_queue, CMTaskQueueSet* task_queues);
1064 // it prints statistics associated with this task
1065 void print_stats();
1067 #if _MARKING_STATS_
1068 void increase_objs_found_on_bitmap() { ++_objs_found_on_bitmap; }
1069 #endif // _MARKING_STATS_
1070 };