Tue, 15 Jan 2013 12:32:26 -0800
8001425: G1: Change the default values for certain G1 specific flags
Summary: Changes to default and ergonomic flag values recommended by performance team. Changes were also reviewed by Monica Beckwith <monica.beckwith@oracle.com>.
Reviewed-by: brutisso, huntch
1 /*
2 * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_HPP
26 #define SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_HPP
28 #include "gc_implementation/g1/heapRegionSets.hpp"
29 #include "utilities/taskqueue.hpp"
31 class G1CollectedHeap;
32 class CMTask;
33 typedef GenericTaskQueue<oop, mtGC> CMTaskQueue;
34 typedef GenericTaskQueueSet<CMTaskQueue, mtGC> CMTaskQueueSet;
36 // Closure used by CM during concurrent reference discovery
37 // and reference processing (during remarking) to determine
38 // if a particular object is alive. It is primarily used
39 // to determine if referents of discovered reference objects
40 // are alive. An instance is also embedded into the
41 // reference processor as the _is_alive_non_header field
42 class G1CMIsAliveClosure: public BoolObjectClosure {
43 G1CollectedHeap* _g1;
44 public:
45 G1CMIsAliveClosure(G1CollectedHeap* g1) : _g1(g1) { }
47 void do_object(oop obj) {
48 ShouldNotCallThis();
49 }
50 bool do_object_b(oop obj);
51 };
53 // A generic CM bit map. This is essentially a wrapper around the BitMap
54 // class, with one bit per (1<<_shifter) HeapWords.
56 class CMBitMapRO VALUE_OBJ_CLASS_SPEC {
57 protected:
58 HeapWord* _bmStartWord; // base address of range covered by map
59 size_t _bmWordSize; // map size (in #HeapWords covered)
60 const int _shifter; // map to char or bit
61 VirtualSpace _virtual_space; // underlying the bit map
62 BitMap _bm; // the bit map itself
64 public:
65 // constructor
66 CMBitMapRO(int shifter);
68 enum { do_yield = true };
70 // inquiries
71 HeapWord* startWord() const { return _bmStartWord; }
72 size_t sizeInWords() const { return _bmWordSize; }
73 // the following is one past the last word in space
74 HeapWord* endWord() const { return _bmStartWord + _bmWordSize; }
76 // read marks
78 bool isMarked(HeapWord* addr) const {
79 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
80 "outside underlying space?");
81 return _bm.at(heapWordToOffset(addr));
82 }
84 // iteration
85 inline bool iterate(BitMapClosure* cl, MemRegion mr);
86 inline bool iterate(BitMapClosure* cl);
88 // Return the address corresponding to the next marked bit at or after
89 // "addr", and before "limit", if "limit" is non-NULL. If there is no
90 // such bit, returns "limit" if that is non-NULL, or else "endWord()".
91 HeapWord* getNextMarkedWordAddress(HeapWord* addr,
92 HeapWord* limit = NULL) const;
93 // Return the address corresponding to the next unmarked bit at or after
94 // "addr", and before "limit", if "limit" is non-NULL. If there is no
95 // such bit, returns "limit" if that is non-NULL, or else "endWord()".
96 HeapWord* getNextUnmarkedWordAddress(HeapWord* addr,
97 HeapWord* limit = NULL) const;
99 // conversion utilities
100 // XXX Fix these so that offsets are size_t's...
101 HeapWord* offsetToHeapWord(size_t offset) const {
102 return _bmStartWord + (offset << _shifter);
103 }
104 size_t heapWordToOffset(HeapWord* addr) const {
105 return pointer_delta(addr, _bmStartWord) >> _shifter;
106 }
107 int heapWordDiffToOffsetDiff(size_t diff) const;
108 HeapWord* nextWord(HeapWord* addr) {
109 return offsetToHeapWord(heapWordToOffset(addr) + 1);
110 }
112 // debugging
113 NOT_PRODUCT(bool covers(ReservedSpace rs) const;)
114 };
116 class CMBitMap : public CMBitMapRO {
118 public:
119 // constructor
120 CMBitMap(int shifter) :
121 CMBitMapRO(shifter) {}
123 // Allocates the back store for the marking bitmap
124 bool allocate(ReservedSpace heap_rs);
126 // write marks
127 void mark(HeapWord* addr) {
128 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
129 "outside underlying space?");
130 _bm.set_bit(heapWordToOffset(addr));
131 }
132 void clear(HeapWord* addr) {
133 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
134 "outside underlying space?");
135 _bm.clear_bit(heapWordToOffset(addr));
136 }
137 bool parMark(HeapWord* addr) {
138 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
139 "outside underlying space?");
140 return _bm.par_set_bit(heapWordToOffset(addr));
141 }
142 bool parClear(HeapWord* addr) {
143 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
144 "outside underlying space?");
145 return _bm.par_clear_bit(heapWordToOffset(addr));
146 }
147 void markRange(MemRegion mr);
148 void clearAll();
149 void clearRange(MemRegion mr);
151 // Starting at the bit corresponding to "addr" (inclusive), find the next
152 // "1" bit, if any. This bit starts some run of consecutive "1"'s; find
153 // the end of this run (stopping at "end_addr"). Return the MemRegion
154 // covering from the start of the region corresponding to the first bit
155 // of the run to the end of the region corresponding to the last bit of
156 // the run. If there is no "1" bit at or after "addr", return an empty
157 // MemRegion.
158 MemRegion getAndClearMarkedRegion(HeapWord* addr, HeapWord* end_addr);
159 };
161 // Represents a marking stack used by ConcurrentMarking in the G1 collector.
162 class CMMarkStack VALUE_OBJ_CLASS_SPEC {
163 VirtualSpace _virtual_space; // Underlying backing store for actual stack
164 ConcurrentMark* _cm;
165 oop* _base; // bottom of stack
166 jint _index; // one more than last occupied index
167 jint _capacity; // max #elements
168 jint _saved_index; // value of _index saved at start of GC
169 NOT_PRODUCT(jint _max_depth;) // max depth plumbed during run
171 bool _overflow;
172 bool _should_expand;
173 DEBUG_ONLY(bool _drain_in_progress;)
174 DEBUG_ONLY(bool _drain_in_progress_yields;)
176 public:
177 CMMarkStack(ConcurrentMark* cm);
178 ~CMMarkStack();
180 #ifndef PRODUCT
181 jint max_depth() const {
182 return _max_depth;
183 }
184 #endif
186 bool allocate(size_t capacity);
188 oop pop() {
189 if (!isEmpty()) {
190 return _base[--_index] ;
191 }
192 return NULL;
193 }
195 // If overflow happens, don't do the push, and record the overflow.
196 // *Requires* that "ptr" is already marked.
197 void push(oop ptr) {
198 if (isFull()) {
199 // Record overflow.
200 _overflow = true;
201 return;
202 } else {
203 _base[_index++] = ptr;
204 NOT_PRODUCT(_max_depth = MAX2(_max_depth, _index));
205 }
206 }
207 // Non-block impl. Note: concurrency is allowed only with other
208 // "par_push" operations, not with "pop" or "drain". We would need
209 // parallel versions of them if such concurrency was desired.
210 void par_push(oop ptr);
212 // Pushes the first "n" elements of "ptr_arr" on the stack.
213 // Non-block impl. Note: concurrency is allowed only with other
214 // "par_adjoin_arr" or "push" operations, not with "pop" or "drain".
215 void par_adjoin_arr(oop* ptr_arr, int n);
217 // Pushes the first "n" elements of "ptr_arr" on the stack.
218 // Locking impl: concurrency is allowed only with
219 // "par_push_arr" and/or "par_pop_arr" operations, which use the same
220 // locking strategy.
221 void par_push_arr(oop* ptr_arr, int n);
223 // If returns false, the array was empty. Otherwise, removes up to "max"
224 // elements from the stack, and transfers them to "ptr_arr" in an
225 // unspecified order. The actual number transferred is given in "n" ("n
226 // == 0" is deliberately redundant with the return value.) Locking impl:
227 // concurrency is allowed only with "par_push_arr" and/or "par_pop_arr"
228 // operations, which use the same locking strategy.
229 bool par_pop_arr(oop* ptr_arr, int max, int* n);
231 // Drain the mark stack, applying the given closure to all fields of
232 // objects on the stack. (That is, continue until the stack is empty,
233 // even if closure applications add entries to the stack.) The "bm"
234 // argument, if non-null, may be used to verify that only marked objects
235 // are on the mark stack. If "yield_after" is "true", then the
236 // concurrent marker performing the drain offers to yield after
237 // processing each object. If a yield occurs, stops the drain operation
238 // and returns false. Otherwise, returns true.
239 template<class OopClosureClass>
240 bool drain(OopClosureClass* cl, CMBitMap* bm, bool yield_after = false);
242 bool isEmpty() { return _index == 0; }
243 bool isFull() { return _index == _capacity; }
244 int maxElems() { return _capacity; }
246 bool overflow() { return _overflow; }
247 void clear_overflow() { _overflow = false; }
249 bool should_expand() const { return _should_expand; }
250 void set_should_expand();
252 // Expand the stack, typically in response to an overflow condition
253 void expand();
255 int size() { return _index; }
257 void setEmpty() { _index = 0; clear_overflow(); }
259 // Record the current index.
260 void note_start_of_gc();
262 // Make sure that we have not added any entries to the stack during GC.
263 void note_end_of_gc();
265 // iterate over the oops in the mark stack, up to the bound recorded via
266 // the call above.
267 void oops_do(OopClosure* f);
268 };
270 class ForceOverflowSettings VALUE_OBJ_CLASS_SPEC {
271 private:
272 #ifndef PRODUCT
273 uintx _num_remaining;
274 bool _force;
275 #endif // !defined(PRODUCT)
277 public:
278 void init() PRODUCT_RETURN;
279 void update() PRODUCT_RETURN;
280 bool should_force() PRODUCT_RETURN_( return false; );
281 };
283 // this will enable a variety of different statistics per GC task
284 #define _MARKING_STATS_ 0
285 // this will enable the higher verbose levels
286 #define _MARKING_VERBOSE_ 0
288 #if _MARKING_STATS_
289 #define statsOnly(statement) \
290 do { \
291 statement ; \
292 } while (0)
293 #else // _MARKING_STATS_
294 #define statsOnly(statement) \
295 do { \
296 } while (0)
297 #endif // _MARKING_STATS_
299 typedef enum {
300 no_verbose = 0, // verbose turned off
301 stats_verbose, // only prints stats at the end of marking
302 low_verbose, // low verbose, mostly per region and per major event
303 medium_verbose, // a bit more detailed than low
304 high_verbose // per object verbose
305 } CMVerboseLevel;
307 class YoungList;
309 // Root Regions are regions that are not empty at the beginning of a
310 // marking cycle and which we might collect during an evacuation pause
311 // while the cycle is active. Given that, during evacuation pauses, we
312 // do not copy objects that are explicitly marked, what we have to do
313 // for the root regions is to scan them and mark all objects reachable
314 // from them. According to the SATB assumptions, we only need to visit
315 // each object once during marking. So, as long as we finish this scan
316 // before the next evacuation pause, we can copy the objects from the
317 // root regions without having to mark them or do anything else to them.
318 //
319 // Currently, we only support root region scanning once (at the start
320 // of the marking cycle) and the root regions are all the survivor
321 // regions populated during the initial-mark pause.
322 class CMRootRegions VALUE_OBJ_CLASS_SPEC {
323 private:
324 YoungList* _young_list;
325 ConcurrentMark* _cm;
327 volatile bool _scan_in_progress;
328 volatile bool _should_abort;
329 HeapRegion* volatile _next_survivor;
331 public:
332 CMRootRegions();
333 // We actually do most of the initialization in this method.
334 void init(G1CollectedHeap* g1h, ConcurrentMark* cm);
336 // Reset the claiming / scanning of the root regions.
337 void prepare_for_scan();
339 // Forces get_next() to return NULL so that the iteration aborts early.
340 void abort() { _should_abort = true; }
342 // Return true if the CM thread are actively scanning root regions,
343 // false otherwise.
344 bool scan_in_progress() { return _scan_in_progress; }
346 // Claim the next root region to scan atomically, or return NULL if
347 // all have been claimed.
348 HeapRegion* claim_next();
350 // Flag that we're done with root region scanning and notify anyone
351 // who's waiting on it. If aborted is false, assume that all regions
352 // have been claimed.
353 void scan_finished();
355 // If CM threads are still scanning root regions, wait until they
356 // are done. Return true if we had to wait, false otherwise.
357 bool wait_until_scan_finished();
358 };
360 class ConcurrentMarkThread;
362 class ConcurrentMark: public CHeapObj<mtGC> {
363 friend class CMMarkStack;
364 friend class ConcurrentMarkThread;
365 friend class CMTask;
366 friend class CMBitMapClosure;
367 friend class CMGlobalObjectClosure;
368 friend class CMRemarkTask;
369 friend class CMConcurrentMarkingTask;
370 friend class G1ParNoteEndTask;
371 friend class CalcLiveObjectsClosure;
372 friend class G1CMRefProcTaskProxy;
373 friend class G1CMRefProcTaskExecutor;
374 friend class G1CMParKeepAliveAndDrainClosure;
375 friend class G1CMParDrainMarkingStackClosure;
377 protected:
378 ConcurrentMarkThread* _cmThread; // the thread doing the work
379 G1CollectedHeap* _g1h; // the heap.
380 uint _parallel_marking_threads; // the number of marking
381 // threads we're use
382 uint _max_parallel_marking_threads; // max number of marking
383 // threads we'll ever use
384 double _sleep_factor; // how much we have to sleep, with
385 // respect to the work we just did, to
386 // meet the marking overhead goal
387 double _marking_task_overhead; // marking target overhead for
388 // a single task
390 // same as the two above, but for the cleanup task
391 double _cleanup_sleep_factor;
392 double _cleanup_task_overhead;
394 FreeRegionList _cleanup_list;
396 // Concurrent marking support structures
397 CMBitMap _markBitMap1;
398 CMBitMap _markBitMap2;
399 CMBitMapRO* _prevMarkBitMap; // completed mark bitmap
400 CMBitMap* _nextMarkBitMap; // under-construction mark bitmap
402 BitMap _region_bm;
403 BitMap _card_bm;
405 // Heap bounds
406 HeapWord* _heap_start;
407 HeapWord* _heap_end;
409 // Root region tracking and claiming.
410 CMRootRegions _root_regions;
412 // For gray objects
413 CMMarkStack _markStack; // Grey objects behind global finger.
414 HeapWord* volatile _finger; // the global finger, region aligned,
415 // always points to the end of the
416 // last claimed region
418 // marking tasks
419 uint _max_worker_id;// maximum worker id
420 uint _active_tasks; // task num currently active
421 CMTask** _tasks; // task queue array (max_worker_id len)
422 CMTaskQueueSet* _task_queues; // task queue set
423 ParallelTaskTerminator _terminator; // for termination
425 // Two sync barriers that are used to synchronise tasks when an
426 // overflow occurs. The algorithm is the following. All tasks enter
427 // the first one to ensure that they have all stopped manipulating
428 // the global data structures. After they exit it, they re-initialise
429 // their data structures and task 0 re-initialises the global data
430 // structures. Then, they enter the second sync barrier. This
431 // ensure, that no task starts doing work before all data
432 // structures (local and global) have been re-initialised. When they
433 // exit it, they are free to start working again.
434 WorkGangBarrierSync _first_overflow_barrier_sync;
435 WorkGangBarrierSync _second_overflow_barrier_sync;
437 // this is set by any task, when an overflow on the global data
438 // structures is detected.
439 volatile bool _has_overflown;
440 // true: marking is concurrent, false: we're in remark
441 volatile bool _concurrent;
442 // set at the end of a Full GC so that marking aborts
443 volatile bool _has_aborted;
445 // used when remark aborts due to an overflow to indicate that
446 // another concurrent marking phase should start
447 volatile bool _restart_for_overflow;
449 // This is true from the very start of concurrent marking until the
450 // point when all the tasks complete their work. It is really used
451 // to determine the points between the end of concurrent marking and
452 // time of remark.
453 volatile bool _concurrent_marking_in_progress;
455 // verbose level
456 CMVerboseLevel _verbose_level;
458 // All of these times are in ms.
459 NumberSeq _init_times;
460 NumberSeq _remark_times;
461 NumberSeq _remark_mark_times;
462 NumberSeq _remark_weak_ref_times;
463 NumberSeq _cleanup_times;
464 double _total_counting_time;
465 double _total_rs_scrub_time;
467 double* _accum_task_vtime; // accumulated task vtime
469 FlexibleWorkGang* _parallel_workers;
471 ForceOverflowSettings _force_overflow_conc;
472 ForceOverflowSettings _force_overflow_stw;
474 void weakRefsWork(bool clear_all_soft_refs);
476 void swapMarkBitMaps();
478 // It resets the global marking data structures, as well as the
479 // task local ones; should be called during initial mark.
480 void reset();
482 // Resets all the marking data structures. Called when we have to restart
483 // marking or when marking completes (via set_non_marking_state below).
484 void reset_marking_state(bool clear_overflow = true);
486 // We do this after we're done with marking so that the marking data
487 // structures are initialised to a sensible and predictable state.
488 void set_non_marking_state();
490 // It should be called to indicate which phase we're in (concurrent
491 // mark or remark) and how many threads are currently active.
492 void set_phase(uint active_tasks, bool concurrent);
494 // prints all gathered CM-related statistics
495 void print_stats();
497 bool cleanup_list_is_empty() {
498 return _cleanup_list.is_empty();
499 }
501 // accessor methods
502 uint parallel_marking_threads() { return _parallel_marking_threads; }
503 uint max_parallel_marking_threads() { return _max_parallel_marking_threads;}
504 double sleep_factor() { return _sleep_factor; }
505 double marking_task_overhead() { return _marking_task_overhead;}
506 double cleanup_sleep_factor() { return _cleanup_sleep_factor; }
507 double cleanup_task_overhead() { return _cleanup_task_overhead;}
509 HeapWord* finger() { return _finger; }
510 bool concurrent() { return _concurrent; }
511 uint active_tasks() { return _active_tasks; }
512 ParallelTaskTerminator* terminator() { return &_terminator; }
514 // It claims the next available region to be scanned by a marking
515 // task/thread. It might return NULL if the next region is empty or
516 // we have run out of regions. In the latter case, out_of_regions()
517 // determines whether we've really run out of regions or the task
518 // should call claim_region() again. This might seem a bit
519 // awkward. Originally, the code was written so that claim_region()
520 // either successfully returned with a non-empty region or there
521 // were no more regions to be claimed. The problem with this was
522 // that, in certain circumstances, it iterated over large chunks of
523 // the heap finding only empty regions and, while it was working, it
524 // was preventing the calling task to call its regular clock
525 // method. So, this way, each task will spend very little time in
526 // claim_region() and is allowed to call the regular clock method
527 // frequently.
528 HeapRegion* claim_region(uint worker_id);
530 // It determines whether we've run out of regions to scan.
531 bool out_of_regions() { return _finger == _heap_end; }
533 // Returns the task with the given id
534 CMTask* task(int id) {
535 assert(0 <= id && id < (int) _active_tasks,
536 "task id not within active bounds");
537 return _tasks[id];
538 }
540 // Returns the task queue with the given id
541 CMTaskQueue* task_queue(int id) {
542 assert(0 <= id && id < (int) _active_tasks,
543 "task queue id not within active bounds");
544 return (CMTaskQueue*) _task_queues->queue(id);
545 }
547 // Returns the task queue set
548 CMTaskQueueSet* task_queues() { return _task_queues; }
550 // Access / manipulation of the overflow flag which is set to
551 // indicate that the global stack has overflown
552 bool has_overflown() { return _has_overflown; }
553 void set_has_overflown() { _has_overflown = true; }
554 void clear_has_overflown() { _has_overflown = false; }
555 bool restart_for_overflow() { return _restart_for_overflow; }
557 bool has_aborted() { return _has_aborted; }
559 // Methods to enter the two overflow sync barriers
560 void enter_first_sync_barrier(uint worker_id);
561 void enter_second_sync_barrier(uint worker_id);
563 ForceOverflowSettings* force_overflow_conc() {
564 return &_force_overflow_conc;
565 }
567 ForceOverflowSettings* force_overflow_stw() {
568 return &_force_overflow_stw;
569 }
571 ForceOverflowSettings* force_overflow() {
572 if (concurrent()) {
573 return force_overflow_conc();
574 } else {
575 return force_overflow_stw();
576 }
577 }
579 // Live Data Counting data structures...
580 // These data structures are initialized at the start of
581 // marking. They are written to while marking is active.
582 // They are aggregated during remark; the aggregated values
583 // are then used to populate the _region_bm, _card_bm, and
584 // the total live bytes, which are then subsequently updated
585 // during cleanup.
587 // An array of bitmaps (one bit map per task). Each bitmap
588 // is used to record the cards spanned by the live objects
589 // marked by that task/worker.
590 BitMap* _count_card_bitmaps;
592 // Used to record the number of marked live bytes
593 // (for each region, by worker thread).
594 size_t** _count_marked_bytes;
596 // Card index of the bottom of the G1 heap. Used for biasing indices into
597 // the card bitmaps.
598 intptr_t _heap_bottom_card_num;
600 // Set to true when initialization is complete
601 bool _completed_initialization;
603 public:
604 // Manipulation of the global mark stack.
605 // Notice that the first mark_stack_push is CAS-based, whereas the
606 // two below are Mutex-based. This is OK since the first one is only
607 // called during evacuation pauses and doesn't compete with the
608 // other two (which are called by the marking tasks during
609 // concurrent marking or remark).
610 bool mark_stack_push(oop p) {
611 _markStack.par_push(p);
612 if (_markStack.overflow()) {
613 set_has_overflown();
614 return false;
615 }
616 return true;
617 }
618 bool mark_stack_push(oop* arr, int n) {
619 _markStack.par_push_arr(arr, n);
620 if (_markStack.overflow()) {
621 set_has_overflown();
622 return false;
623 }
624 return true;
625 }
626 void mark_stack_pop(oop* arr, int max, int* n) {
627 _markStack.par_pop_arr(arr, max, n);
628 }
629 size_t mark_stack_size() { return _markStack.size(); }
630 size_t partial_mark_stack_size_target() { return _markStack.maxElems()/3; }
631 bool mark_stack_overflow() { return _markStack.overflow(); }
632 bool mark_stack_empty() { return _markStack.isEmpty(); }
634 CMRootRegions* root_regions() { return &_root_regions; }
636 bool concurrent_marking_in_progress() {
637 return _concurrent_marking_in_progress;
638 }
639 void set_concurrent_marking_in_progress() {
640 _concurrent_marking_in_progress = true;
641 }
642 void clear_concurrent_marking_in_progress() {
643 _concurrent_marking_in_progress = false;
644 }
646 void update_accum_task_vtime(int i, double vtime) {
647 _accum_task_vtime[i] += vtime;
648 }
650 double all_task_accum_vtime() {
651 double ret = 0.0;
652 for (uint i = 0; i < _max_worker_id; ++i)
653 ret += _accum_task_vtime[i];
654 return ret;
655 }
657 // Attempts to steal an object from the task queues of other tasks
658 bool try_stealing(uint worker_id, int* hash_seed, oop& obj) {
659 return _task_queues->steal(worker_id, hash_seed, obj);
660 }
662 ConcurrentMark(G1CollectedHeap* g1h, ReservedSpace heap_rs);
663 ~ConcurrentMark();
665 ConcurrentMarkThread* cmThread() { return _cmThread; }
667 CMBitMapRO* prevMarkBitMap() const { return _prevMarkBitMap; }
668 CMBitMap* nextMarkBitMap() const { return _nextMarkBitMap; }
670 // Returns the number of GC threads to be used in a concurrent
671 // phase based on the number of GC threads being used in a STW
672 // phase.
673 uint scale_parallel_threads(uint n_par_threads);
675 // Calculates the number of GC threads to be used in a concurrent phase.
676 uint calc_parallel_marking_threads();
678 // The following three are interaction between CM and
679 // G1CollectedHeap
681 // This notifies CM that a root during initial-mark needs to be
682 // grayed. It is MT-safe. word_size is the size of the object in
683 // words. It is passed explicitly as sometimes we cannot calculate
684 // it from the given object because it might be in an inconsistent
685 // state (e.g., in to-space and being copied). So the caller is
686 // responsible for dealing with this issue (e.g., get the size from
687 // the from-space image when the to-space image might be
688 // inconsistent) and always passing the size. hr is the region that
689 // contains the object and it's passed optionally from callers who
690 // might already have it (no point in recalculating it).
691 inline void grayRoot(oop obj, size_t word_size,
692 uint worker_id, HeapRegion* hr = NULL);
694 // It iterates over the heap and for each object it comes across it
695 // will dump the contents of its reference fields, as well as
696 // liveness information for the object and its referents. The dump
697 // will be written to a file with the following name:
698 // G1PrintReachableBaseFile + "." + str.
699 // vo decides whether the prev (vo == UsePrevMarking), the next
700 // (vo == UseNextMarking) marking information, or the mark word
701 // (vo == UseMarkWord) will be used to determine the liveness of
702 // each object / referent.
703 // If all is true, all objects in the heap will be dumped, otherwise
704 // only the live ones. In the dump the following symbols / breviations
705 // are used:
706 // M : an explicitly live object (its bitmap bit is set)
707 // > : an implicitly live object (over tams)
708 // O : an object outside the G1 heap (typically: in the perm gen)
709 // NOT : a reference field whose referent is not live
710 // AND MARKED : indicates that an object is both explicitly and
711 // implicitly live (it should be one or the other, not both)
712 void print_reachable(const char* str,
713 VerifyOption vo, bool all) PRODUCT_RETURN;
715 // Clear the next marking bitmap (will be called concurrently).
716 void clearNextBitmap();
718 // These two do the work that needs to be done before and after the
719 // initial root checkpoint. Since this checkpoint can be done at two
720 // different points (i.e. an explicit pause or piggy-backed on a
721 // young collection), then it's nice to be able to easily share the
722 // pre/post code. It might be the case that we can put everything in
723 // the post method. TP
724 void checkpointRootsInitialPre();
725 void checkpointRootsInitialPost();
727 // Scan all the root regions and mark everything reachable from
728 // them.
729 void scanRootRegions();
731 // Scan a single root region and mark everything reachable from it.
732 void scanRootRegion(HeapRegion* hr, uint worker_id);
734 // Do concurrent phase of marking, to a tentative transitive closure.
735 void markFromRoots();
737 void checkpointRootsFinal(bool clear_all_soft_refs);
738 void checkpointRootsFinalWork();
739 void cleanup();
740 void completeCleanup();
742 // Mark in the previous bitmap. NB: this is usually read-only, so use
743 // this carefully!
744 inline void markPrev(oop p);
746 // Clears marks for all objects in the given range, for the prev,
747 // next, or both bitmaps. NB: the previous bitmap is usually
748 // read-only, so use this carefully!
749 void clearRangePrevBitmap(MemRegion mr);
750 void clearRangeNextBitmap(MemRegion mr);
751 void clearRangeBothBitmaps(MemRegion mr);
753 // Notify data structures that a GC has started.
754 void note_start_of_gc() {
755 _markStack.note_start_of_gc();
756 }
758 // Notify data structures that a GC is finished.
759 void note_end_of_gc() {
760 _markStack.note_end_of_gc();
761 }
763 // Verify that there are no CSet oops on the stacks (taskqueues /
764 // global mark stack), enqueued SATB buffers, per-thread SATB
765 // buffers, and fingers (global / per-task). The boolean parameters
766 // decide which of the above data structures to verify. If marking
767 // is not in progress, it's a no-op.
768 void verify_no_cset_oops(bool verify_stacks,
769 bool verify_enqueued_buffers,
770 bool verify_thread_buffers,
771 bool verify_fingers) PRODUCT_RETURN;
773 // It is called at the end of an evacuation pause during marking so
774 // that CM is notified of where the new end of the heap is. It
775 // doesn't do anything if concurrent_marking_in_progress() is false,
776 // unless the force parameter is true.
777 void update_g1_committed(bool force = false);
779 bool isMarked(oop p) const {
780 assert(p != NULL && p->is_oop(), "expected an oop");
781 HeapWord* addr = (HeapWord*)p;
782 assert(addr >= _nextMarkBitMap->startWord() ||
783 addr < _nextMarkBitMap->endWord(), "in a region");
785 return _nextMarkBitMap->isMarked(addr);
786 }
788 inline bool not_yet_marked(oop p) const;
790 // XXX Debug code
791 bool containing_card_is_marked(void* p);
792 bool containing_cards_are_marked(void* start, void* last);
794 bool isPrevMarked(oop p) const {
795 assert(p != NULL && p->is_oop(), "expected an oop");
796 HeapWord* addr = (HeapWord*)p;
797 assert(addr >= _prevMarkBitMap->startWord() ||
798 addr < _prevMarkBitMap->endWord(), "in a region");
800 return _prevMarkBitMap->isMarked(addr);
801 }
803 inline bool do_yield_check(uint worker_i = 0);
804 inline bool should_yield();
806 // Called to abort the marking cycle after a Full GC takes palce.
807 void abort();
809 // This prints the global/local fingers. It is used for debugging.
810 NOT_PRODUCT(void print_finger();)
812 void print_summary_info();
814 void print_worker_threads_on(outputStream* st) const;
816 // The following indicate whether a given verbose level has been
817 // set. Notice that anything above stats is conditional to
818 // _MARKING_VERBOSE_ having been set to 1
819 bool verbose_stats() {
820 return _verbose_level >= stats_verbose;
821 }
822 bool verbose_low() {
823 return _MARKING_VERBOSE_ && _verbose_level >= low_verbose;
824 }
825 bool verbose_medium() {
826 return _MARKING_VERBOSE_ && _verbose_level >= medium_verbose;
827 }
828 bool verbose_high() {
829 return _MARKING_VERBOSE_ && _verbose_level >= high_verbose;
830 }
832 // Liveness counting
834 // Utility routine to set an exclusive range of cards on the given
835 // card liveness bitmap
836 inline void set_card_bitmap_range(BitMap* card_bm,
837 BitMap::idx_t start_idx,
838 BitMap::idx_t end_idx,
839 bool is_par);
841 // Returns the card number of the bottom of the G1 heap.
842 // Used in biasing indices into accounting card bitmaps.
843 intptr_t heap_bottom_card_num() const {
844 return _heap_bottom_card_num;
845 }
847 // Returns the card bitmap for a given task or worker id.
848 BitMap* count_card_bitmap_for(uint worker_id) {
849 assert(0 <= worker_id && worker_id < _max_worker_id, "oob");
850 assert(_count_card_bitmaps != NULL, "uninitialized");
851 BitMap* task_card_bm = &_count_card_bitmaps[worker_id];
852 assert(task_card_bm->size() == _card_bm.size(), "size mismatch");
853 return task_card_bm;
854 }
856 // Returns the array containing the marked bytes for each region,
857 // for the given worker or task id.
858 size_t* count_marked_bytes_array_for(uint worker_id) {
859 assert(0 <= worker_id && worker_id < _max_worker_id, "oob");
860 assert(_count_marked_bytes != NULL, "uninitialized");
861 size_t* marked_bytes_array = _count_marked_bytes[worker_id];
862 assert(marked_bytes_array != NULL, "uninitialized");
863 return marked_bytes_array;
864 }
866 // Returns the index in the liveness accounting card table bitmap
867 // for the given address
868 inline BitMap::idx_t card_bitmap_index_for(HeapWord* addr);
870 // Counts the size of the given memory region in the the given
871 // marked_bytes array slot for the given HeapRegion.
872 // Sets the bits in the given card bitmap that are associated with the
873 // cards that are spanned by the memory region.
874 inline void count_region(MemRegion mr, HeapRegion* hr,
875 size_t* marked_bytes_array,
876 BitMap* task_card_bm);
878 // Counts the given memory region in the task/worker counting
879 // data structures for the given worker id.
880 inline void count_region(MemRegion mr, HeapRegion* hr, uint worker_id);
882 // Counts the given memory region in the task/worker counting
883 // data structures for the given worker id.
884 inline void count_region(MemRegion mr, uint worker_id);
886 // Counts the given object in the given task/worker counting
887 // data structures.
888 inline void count_object(oop obj, HeapRegion* hr,
889 size_t* marked_bytes_array,
890 BitMap* task_card_bm);
892 // Counts the given object in the task/worker counting data
893 // structures for the given worker id.
894 inline void count_object(oop obj, HeapRegion* hr, uint worker_id);
896 // Attempts to mark the given object and, if successful, counts
897 // the object in the given task/worker counting structures.
898 inline bool par_mark_and_count(oop obj, HeapRegion* hr,
899 size_t* marked_bytes_array,
900 BitMap* task_card_bm);
902 // Attempts to mark the given object and, if successful, counts
903 // the object in the task/worker counting structures for the
904 // given worker id.
905 inline bool par_mark_and_count(oop obj, size_t word_size,
906 HeapRegion* hr, uint worker_id);
908 // Attempts to mark the given object and, if successful, counts
909 // the object in the task/worker counting structures for the
910 // given worker id.
911 inline bool par_mark_and_count(oop obj, HeapRegion* hr, uint worker_id);
913 // Similar to the above routine but we don't know the heap region that
914 // contains the object to be marked/counted, which this routine looks up.
915 inline bool par_mark_and_count(oop obj, uint worker_id);
917 // Similar to the above routine but there are times when we cannot
918 // safely calculate the size of obj due to races and we, therefore,
919 // pass the size in as a parameter. It is the caller's reponsibility
920 // to ensure that the size passed in for obj is valid.
921 inline bool par_mark_and_count(oop obj, size_t word_size, uint worker_id);
923 // Unconditionally mark the given object, and unconditinally count
924 // the object in the counting structures for worker id 0.
925 // Should *not* be called from parallel code.
926 inline bool mark_and_count(oop obj, HeapRegion* hr);
928 // Similar to the above routine but we don't know the heap region that
929 // contains the object to be marked/counted, which this routine looks up.
930 // Should *not* be called from parallel code.
931 inline bool mark_and_count(oop obj);
933 // Returns true if initialization was successfully completed.
934 bool completed_initialization() const {
935 return _completed_initialization;
936 }
938 protected:
939 // Clear all the per-task bitmaps and arrays used to store the
940 // counting data.
941 void clear_all_count_data();
943 // Aggregates the counting data for each worker/task
944 // that was constructed while marking. Also sets
945 // the amount of marked bytes for each region and
946 // the top at concurrent mark count.
947 void aggregate_count_data();
949 // Verification routine
950 void verify_count_data();
951 };
953 // A class representing a marking task.
954 class CMTask : public TerminatorTerminator {
955 private:
956 enum PrivateConstants {
957 // the regular clock call is called once the scanned words reaches
958 // this limit
959 words_scanned_period = 12*1024,
960 // the regular clock call is called once the number of visited
961 // references reaches this limit
962 refs_reached_period = 384,
963 // initial value for the hash seed, used in the work stealing code
964 init_hash_seed = 17,
965 // how many entries will be transferred between global stack and
966 // local queues
967 global_stack_transfer_size = 16
968 };
970 uint _worker_id;
971 G1CollectedHeap* _g1h;
972 ConcurrentMark* _cm;
973 CMBitMap* _nextMarkBitMap;
974 // the task queue of this task
975 CMTaskQueue* _task_queue;
976 private:
977 // the task queue set---needed for stealing
978 CMTaskQueueSet* _task_queues;
979 // indicates whether the task has been claimed---this is only for
980 // debugging purposes
981 bool _claimed;
983 // number of calls to this task
984 int _calls;
986 // when the virtual timer reaches this time, the marking step should
987 // exit
988 double _time_target_ms;
989 // the start time of the current marking step
990 double _start_time_ms;
992 // the oop closure used for iterations over oops
993 G1CMOopClosure* _cm_oop_closure;
995 // the region this task is scanning, NULL if we're not scanning any
996 HeapRegion* _curr_region;
997 // the local finger of this task, NULL if we're not scanning a region
998 HeapWord* _finger;
999 // limit of the region this task is scanning, NULL if we're not scanning one
1000 HeapWord* _region_limit;
1002 // the number of words this task has scanned
1003 size_t _words_scanned;
1004 // When _words_scanned reaches this limit, the regular clock is
1005 // called. Notice that this might be decreased under certain
1006 // circumstances (i.e. when we believe that we did an expensive
1007 // operation).
1008 size_t _words_scanned_limit;
1009 // the initial value of _words_scanned_limit (i.e. what it was
1010 // before it was decreased).
1011 size_t _real_words_scanned_limit;
1013 // the number of references this task has visited
1014 size_t _refs_reached;
1015 // When _refs_reached reaches this limit, the regular clock is
1016 // called. Notice this this might be decreased under certain
1017 // circumstances (i.e. when we believe that we did an expensive
1018 // operation).
1019 size_t _refs_reached_limit;
1020 // the initial value of _refs_reached_limit (i.e. what it was before
1021 // it was decreased).
1022 size_t _real_refs_reached_limit;
1024 // used by the work stealing stuff
1025 int _hash_seed;
1026 // if this is true, then the task has aborted for some reason
1027 bool _has_aborted;
1028 // set when the task aborts because it has met its time quota
1029 bool _has_timed_out;
1030 // true when we're draining SATB buffers; this avoids the task
1031 // aborting due to SATB buffers being available (as we're already
1032 // dealing with them)
1033 bool _draining_satb_buffers;
1035 // number sequence of past step times
1036 NumberSeq _step_times_ms;
1037 // elapsed time of this task
1038 double _elapsed_time_ms;
1039 // termination time of this task
1040 double _termination_time_ms;
1041 // when this task got into the termination protocol
1042 double _termination_start_time_ms;
1044 // true when the task is during a concurrent phase, false when it is
1045 // in the remark phase (so, in the latter case, we do not have to
1046 // check all the things that we have to check during the concurrent
1047 // phase, i.e. SATB buffer availability...)
1048 bool _concurrent;
1050 TruncatedSeq _marking_step_diffs_ms;
1052 // Counting data structures. Embedding the task's marked_bytes_array
1053 // and card bitmap into the actual task saves having to go through
1054 // the ConcurrentMark object.
1055 size_t* _marked_bytes_array;
1056 BitMap* _card_bm;
1058 // LOTS of statistics related with this task
1059 #if _MARKING_STATS_
1060 NumberSeq _all_clock_intervals_ms;
1061 double _interval_start_time_ms;
1063 int _aborted;
1064 int _aborted_overflow;
1065 int _aborted_cm_aborted;
1066 int _aborted_yield;
1067 int _aborted_timed_out;
1068 int _aborted_satb;
1069 int _aborted_termination;
1071 int _steal_attempts;
1072 int _steals;
1074 int _clock_due_to_marking;
1075 int _clock_due_to_scanning;
1077 int _local_pushes;
1078 int _local_pops;
1079 int _local_max_size;
1080 int _objs_scanned;
1082 int _global_pushes;
1083 int _global_pops;
1084 int _global_max_size;
1086 int _global_transfers_to;
1087 int _global_transfers_from;
1089 int _regions_claimed;
1090 int _objs_found_on_bitmap;
1092 int _satb_buffers_processed;
1093 #endif // _MARKING_STATS_
1095 // it updates the local fields after this task has claimed
1096 // a new region to scan
1097 void setup_for_region(HeapRegion* hr);
1098 // it brings up-to-date the limit of the region
1099 void update_region_limit();
1101 // called when either the words scanned or the refs visited limit
1102 // has been reached
1103 void reached_limit();
1104 // recalculates the words scanned and refs visited limits
1105 void recalculate_limits();
1106 // decreases the words scanned and refs visited limits when we reach
1107 // an expensive operation
1108 void decrease_limits();
1109 // it checks whether the words scanned or refs visited reached their
1110 // respective limit and calls reached_limit() if they have
1111 void check_limits() {
1112 if (_words_scanned >= _words_scanned_limit ||
1113 _refs_reached >= _refs_reached_limit) {
1114 reached_limit();
1115 }
1116 }
1117 // this is supposed to be called regularly during a marking step as
1118 // it checks a bunch of conditions that might cause the marking step
1119 // to abort
1120 void regular_clock_call();
1121 bool concurrent() { return _concurrent; }
1123 public:
1124 // It resets the task; it should be called right at the beginning of
1125 // a marking phase.
1126 void reset(CMBitMap* _nextMarkBitMap);
1127 // it clears all the fields that correspond to a claimed region.
1128 void clear_region_fields();
1130 void set_concurrent(bool concurrent) { _concurrent = concurrent; }
1132 // The main method of this class which performs a marking step
1133 // trying not to exceed the given duration. However, it might exit
1134 // prematurely, according to some conditions (i.e. SATB buffers are
1135 // available for processing).
1136 void do_marking_step(double target_ms, bool do_stealing, bool do_termination);
1138 // These two calls start and stop the timer
1139 void record_start_time() {
1140 _elapsed_time_ms = os::elapsedTime() * 1000.0;
1141 }
1142 void record_end_time() {
1143 _elapsed_time_ms = os::elapsedTime() * 1000.0 - _elapsed_time_ms;
1144 }
1146 // returns the worker ID associated with this task.
1147 uint worker_id() { return _worker_id; }
1149 // From TerminatorTerminator. It determines whether this task should
1150 // exit the termination protocol after it's entered it.
1151 virtual bool should_exit_termination();
1153 // Resets the local region fields after a task has finished scanning a
1154 // region; or when they have become stale as a result of the region
1155 // being evacuated.
1156 void giveup_current_region();
1158 HeapWord* finger() { return _finger; }
1160 bool has_aborted() { return _has_aborted; }
1161 void set_has_aborted() { _has_aborted = true; }
1162 void clear_has_aborted() { _has_aborted = false; }
1163 bool has_timed_out() { return _has_timed_out; }
1164 bool claimed() { return _claimed; }
1166 void set_cm_oop_closure(G1CMOopClosure* cm_oop_closure);
1168 // It grays the object by marking it and, if necessary, pushing it
1169 // on the local queue
1170 inline void deal_with_reference(oop obj);
1172 // It scans an object and visits its children.
1173 void scan_object(oop obj);
1175 // It pushes an object on the local queue.
1176 inline void push(oop obj);
1178 // These two move entries to/from the global stack.
1179 void move_entries_to_global_stack();
1180 void get_entries_from_global_stack();
1182 // It pops and scans objects from the local queue. If partially is
1183 // true, then it stops when the queue size is of a given limit. If
1184 // partially is false, then it stops when the queue is empty.
1185 void drain_local_queue(bool partially);
1186 // It moves entries from the global stack to the local queue and
1187 // drains the local queue. If partially is true, then it stops when
1188 // both the global stack and the local queue reach a given size. If
1189 // partially if false, it tries to empty them totally.
1190 void drain_global_stack(bool partially);
1191 // It keeps picking SATB buffers and processing them until no SATB
1192 // buffers are available.
1193 void drain_satb_buffers();
1195 // moves the local finger to a new location
1196 inline void move_finger_to(HeapWord* new_finger) {
1197 assert(new_finger >= _finger && new_finger < _region_limit, "invariant");
1198 _finger = new_finger;
1199 }
1201 CMTask(uint worker_id, ConcurrentMark *cm,
1202 size_t* marked_bytes, BitMap* card_bm,
1203 CMTaskQueue* task_queue, CMTaskQueueSet* task_queues);
1205 // it prints statistics associated with this task
1206 void print_stats();
1208 #if _MARKING_STATS_
1209 void increase_objs_found_on_bitmap() { ++_objs_found_on_bitmap; }
1210 #endif // _MARKING_STATS_
1211 };
1213 // Class that's used to to print out per-region liveness
1214 // information. It's currently used at the end of marking and also
1215 // after we sort the old regions at the end of the cleanup operation.
1216 class G1PrintRegionLivenessInfoClosure: public HeapRegionClosure {
1217 private:
1218 outputStream* _out;
1220 // Accumulators for these values.
1221 size_t _total_used_bytes;
1222 size_t _total_capacity_bytes;
1223 size_t _total_prev_live_bytes;
1224 size_t _total_next_live_bytes;
1226 // These are set up when we come across a "stars humongous" region
1227 // (as this is where most of this information is stored, not in the
1228 // subsequent "continues humongous" regions). After that, for every
1229 // region in a given humongous region series we deduce the right
1230 // values for it by simply subtracting the appropriate amount from
1231 // these fields. All these values should reach 0 after we've visited
1232 // the last region in the series.
1233 size_t _hum_used_bytes;
1234 size_t _hum_capacity_bytes;
1235 size_t _hum_prev_live_bytes;
1236 size_t _hum_next_live_bytes;
1238 static double perc(size_t val, size_t total) {
1239 if (total == 0) {
1240 return 0.0;
1241 } else {
1242 return 100.0 * ((double) val / (double) total);
1243 }
1244 }
1246 static double bytes_to_mb(size_t val) {
1247 return (double) val / (double) M;
1248 }
1250 // See the .cpp file.
1251 size_t get_hum_bytes(size_t* hum_bytes);
1252 void get_hum_bytes(size_t* used_bytes, size_t* capacity_bytes,
1253 size_t* prev_live_bytes, size_t* next_live_bytes);
1255 public:
1256 // The header and footer are printed in the constructor and
1257 // destructor respectively.
1258 G1PrintRegionLivenessInfoClosure(outputStream* out, const char* phase_name);
1259 virtual bool doHeapRegion(HeapRegion* r);
1260 ~G1PrintRegionLivenessInfoClosure();
1261 };
1263 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_HPP