Thu, 12 Jan 2012 00:06:47 -0800
6484965: G1: piggy-back liveness accounting phase on marking
Summary: Remove the separate counting phase of concurrent marking by tracking the amount of marked bytes and the cards spanned by marked objects in marking task/worker thread local data structures, which are updated as individual objects are marked.
Reviewed-by: brutisso, tonyp
1 /*
2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_HPP
26 #define SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_HPP
28 #include "gc_implementation/g1/heapRegionSets.hpp"
29 #include "utilities/taskqueue.hpp"
31 class G1CollectedHeap;
32 class CMTask;
33 typedef GenericTaskQueue<oop> CMTaskQueue;
34 typedef GenericTaskQueueSet<CMTaskQueue> CMTaskQueueSet;
36 // Closure used by CM during concurrent reference discovery
37 // and reference processing (during remarking) to determine
38 // if a particular object is alive. It is primarily used
39 // to determine if referents of discovered reference objects
40 // are alive. An instance is also embedded into the
41 // reference processor as the _is_alive_non_header field
42 class G1CMIsAliveClosure: public BoolObjectClosure {
43 G1CollectedHeap* _g1;
44 public:
45 G1CMIsAliveClosure(G1CollectedHeap* g1) :
46 _g1(g1)
47 {}
49 void do_object(oop obj) {
50 ShouldNotCallThis();
51 }
52 bool do_object_b(oop obj);
53 };
55 // A generic CM bit map. This is essentially a wrapper around the BitMap
56 // class, with one bit per (1<<_shifter) HeapWords.
58 class CMBitMapRO VALUE_OBJ_CLASS_SPEC {
59 protected:
60 HeapWord* _bmStartWord; // base address of range covered by map
61 size_t _bmWordSize; // map size (in #HeapWords covered)
62 const int _shifter; // map to char or bit
63 VirtualSpace _virtual_space; // underlying the bit map
64 BitMap _bm; // the bit map itself
66 public:
67 // constructor
68 CMBitMapRO(ReservedSpace rs, int shifter);
70 enum { do_yield = true };
72 // inquiries
73 HeapWord* startWord() const { return _bmStartWord; }
74 size_t sizeInWords() const { return _bmWordSize; }
75 // the following is one past the last word in space
76 HeapWord* endWord() const { return _bmStartWord + _bmWordSize; }
78 // read marks
80 bool isMarked(HeapWord* addr) const {
81 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
82 "outside underlying space?");
83 return _bm.at(heapWordToOffset(addr));
84 }
86 // iteration
87 inline bool iterate(BitMapClosure* cl, MemRegion mr);
88 inline bool iterate(BitMapClosure* cl);
90 // Return the address corresponding to the next marked bit at or after
91 // "addr", and before "limit", if "limit" is non-NULL. If there is no
92 // such bit, returns "limit" if that is non-NULL, or else "endWord()".
93 HeapWord* getNextMarkedWordAddress(HeapWord* addr,
94 HeapWord* limit = NULL) const;
95 // Return the address corresponding to the next unmarked bit at or after
96 // "addr", and before "limit", if "limit" is non-NULL. If there is no
97 // such bit, returns "limit" if that is non-NULL, or else "endWord()".
98 HeapWord* getNextUnmarkedWordAddress(HeapWord* addr,
99 HeapWord* limit = NULL) const;
101 // conversion utilities
102 // XXX Fix these so that offsets are size_t's...
103 HeapWord* offsetToHeapWord(size_t offset) const {
104 return _bmStartWord + (offset << _shifter);
105 }
106 size_t heapWordToOffset(HeapWord* addr) const {
107 return pointer_delta(addr, _bmStartWord) >> _shifter;
108 }
109 int heapWordDiffToOffsetDiff(size_t diff) const;
110 HeapWord* nextWord(HeapWord* addr) {
111 return offsetToHeapWord(heapWordToOffset(addr) + 1);
112 }
114 void mostly_disjoint_range_union(BitMap* from_bitmap,
115 size_t from_start_index,
116 HeapWord* to_start_word,
117 size_t word_num);
119 // debugging
120 NOT_PRODUCT(bool covers(ReservedSpace rs) const;)
121 };
123 class CMBitMap : public CMBitMapRO {
125 public:
126 // constructor
127 CMBitMap(ReservedSpace rs, int shifter) :
128 CMBitMapRO(rs, shifter) {}
130 // write marks
131 void mark(HeapWord* addr) {
132 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
133 "outside underlying space?");
134 _bm.set_bit(heapWordToOffset(addr));
135 }
136 void clear(HeapWord* addr) {
137 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
138 "outside underlying space?");
139 _bm.clear_bit(heapWordToOffset(addr));
140 }
141 bool parMark(HeapWord* addr) {
142 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
143 "outside underlying space?");
144 return _bm.par_set_bit(heapWordToOffset(addr));
145 }
146 bool parClear(HeapWord* addr) {
147 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
148 "outside underlying space?");
149 return _bm.par_clear_bit(heapWordToOffset(addr));
150 }
151 void markRange(MemRegion mr);
152 void clearAll();
153 void clearRange(MemRegion mr);
155 // Starting at the bit corresponding to "addr" (inclusive), find the next
156 // "1" bit, if any. This bit starts some run of consecutive "1"'s; find
157 // the end of this run (stopping at "end_addr"). Return the MemRegion
158 // covering from the start of the region corresponding to the first bit
159 // of the run to the end of the region corresponding to the last bit of
160 // the run. If there is no "1" bit at or after "addr", return an empty
161 // MemRegion.
162 MemRegion getAndClearMarkedRegion(HeapWord* addr, HeapWord* end_addr);
163 };
165 // Represents a marking stack used by the CM collector.
166 // Ideally this should be GrowableArray<> just like MSC's marking stack(s).
167 class CMMarkStack VALUE_OBJ_CLASS_SPEC {
168 ConcurrentMark* _cm;
169 oop* _base; // bottom of stack
170 jint _index; // one more than last occupied index
171 jint _capacity; // max #elements
172 jint _saved_index; // value of _index saved at start of GC
173 NOT_PRODUCT(jint _max_depth;) // max depth plumbed during run
175 bool _overflow;
176 DEBUG_ONLY(bool _drain_in_progress;)
177 DEBUG_ONLY(bool _drain_in_progress_yields;)
179 public:
180 CMMarkStack(ConcurrentMark* cm);
181 ~CMMarkStack();
183 void allocate(size_t size);
185 oop pop() {
186 if (!isEmpty()) {
187 return _base[--_index] ;
188 }
189 return NULL;
190 }
192 // If overflow happens, don't do the push, and record the overflow.
193 // *Requires* that "ptr" is already marked.
194 void push(oop ptr) {
195 if (isFull()) {
196 // Record overflow.
197 _overflow = true;
198 return;
199 } else {
200 _base[_index++] = ptr;
201 NOT_PRODUCT(_max_depth = MAX2(_max_depth, _index));
202 }
203 }
204 // Non-block impl. Note: concurrency is allowed only with other
205 // "par_push" operations, not with "pop" or "drain". We would need
206 // parallel versions of them if such concurrency was desired.
207 void par_push(oop ptr);
209 // Pushes the first "n" elements of "ptr_arr" on the stack.
210 // Non-block impl. Note: concurrency is allowed only with other
211 // "par_adjoin_arr" or "push" operations, not with "pop" or "drain".
212 void par_adjoin_arr(oop* ptr_arr, int n);
214 // Pushes the first "n" elements of "ptr_arr" on the stack.
215 // Locking impl: concurrency is allowed only with
216 // "par_push_arr" and/or "par_pop_arr" operations, which use the same
217 // locking strategy.
218 void par_push_arr(oop* ptr_arr, int n);
220 // If returns false, the array was empty. Otherwise, removes up to "max"
221 // elements from the stack, and transfers them to "ptr_arr" in an
222 // unspecified order. The actual number transferred is given in "n" ("n
223 // == 0" is deliberately redundant with the return value.) Locking impl:
224 // concurrency is allowed only with "par_push_arr" and/or "par_pop_arr"
225 // operations, which use the same locking strategy.
226 bool par_pop_arr(oop* ptr_arr, int max, int* n);
228 // Drain the mark stack, applying the given closure to all fields of
229 // objects on the stack. (That is, continue until the stack is empty,
230 // even if closure applications add entries to the stack.) The "bm"
231 // argument, if non-null, may be used to verify that only marked objects
232 // are on the mark stack. If "yield_after" is "true", then the
233 // concurrent marker performing the drain offers to yield after
234 // processing each object. If a yield occurs, stops the drain operation
235 // and returns false. Otherwise, returns true.
236 template<class OopClosureClass>
237 bool drain(OopClosureClass* cl, CMBitMap* bm, bool yield_after = false);
239 bool isEmpty() { return _index == 0; }
240 bool isFull() { return _index == _capacity; }
241 int maxElems() { return _capacity; }
243 bool overflow() { return _overflow; }
244 void clear_overflow() { _overflow = false; }
246 int size() { return _index; }
248 void setEmpty() { _index = 0; clear_overflow(); }
250 // Record the current index.
251 void note_start_of_gc();
253 // Make sure that we have not added any entries to the stack during GC.
254 void note_end_of_gc();
256 // iterate over the oops in the mark stack, up to the bound recorded via
257 // the call above.
258 void oops_do(OopClosure* f);
259 };
261 class CMRegionStack VALUE_OBJ_CLASS_SPEC {
262 MemRegion* _base;
263 jint _capacity;
264 jint _index;
265 jint _oops_do_bound;
266 bool _overflow;
267 public:
268 CMRegionStack();
269 ~CMRegionStack();
270 void allocate(size_t size);
272 // This is lock-free; assumes that it will only be called in parallel
273 // with other "push" operations (no pops).
274 void push_lock_free(MemRegion mr);
276 // Lock-free; assumes that it will only be called in parallel
277 // with other "pop" operations (no pushes).
278 MemRegion pop_lock_free();
280 #if 0
281 // The routines that manipulate the region stack with a lock are
282 // not currently used. They should be retained, however, as a
283 // diagnostic aid.
285 // These two are the implementations that use a lock. They can be
286 // called concurrently with each other but they should not be called
287 // concurrently with the lock-free versions (push() / pop()).
288 void push_with_lock(MemRegion mr);
289 MemRegion pop_with_lock();
290 #endif
292 bool isEmpty() { return _index == 0; }
293 bool isFull() { return _index == _capacity; }
295 bool overflow() { return _overflow; }
296 void clear_overflow() { _overflow = false; }
298 int size() { return _index; }
300 // It iterates over the entries in the region stack and it
301 // invalidates (i.e. assigns MemRegion()) the ones that point to
302 // regions in the collection set.
303 bool invalidate_entries_into_cset();
305 // This gives an upper bound up to which the iteration in
306 // invalidate_entries_into_cset() will reach. This prevents
307 // newly-added entries to be unnecessarily scanned.
308 void set_oops_do_bound() {
309 _oops_do_bound = _index;
310 }
312 void setEmpty() { _index = 0; clear_overflow(); }
313 };
315 class ForceOverflowSettings VALUE_OBJ_CLASS_SPEC {
316 private:
317 #ifndef PRODUCT
318 uintx _num_remaining;
319 bool _force;
320 #endif // !defined(PRODUCT)
322 public:
323 void init() PRODUCT_RETURN;
324 void update() PRODUCT_RETURN;
325 bool should_force() PRODUCT_RETURN_( return false; );
326 };
328 // this will enable a variety of different statistics per GC task
329 #define _MARKING_STATS_ 0
330 // this will enable the higher verbose levels
331 #define _MARKING_VERBOSE_ 0
333 #if _MARKING_STATS_
334 #define statsOnly(statement) \
335 do { \
336 statement ; \
337 } while (0)
338 #else // _MARKING_STATS_
339 #define statsOnly(statement) \
340 do { \
341 } while (0)
342 #endif // _MARKING_STATS_
344 typedef enum {
345 no_verbose = 0, // verbose turned off
346 stats_verbose, // only prints stats at the end of marking
347 low_verbose, // low verbose, mostly per region and per major event
348 medium_verbose, // a bit more detailed than low
349 high_verbose // per object verbose
350 } CMVerboseLevel;
353 class ConcurrentMarkThread;
355 class ConcurrentMark: public CHeapObj {
356 friend class ConcurrentMarkThread;
357 friend class CMTask;
358 friend class CMBitMapClosure;
359 friend class CSetMarkOopClosure;
360 friend class CMGlobalObjectClosure;
361 friend class CMRemarkTask;
362 friend class CMConcurrentMarkingTask;
363 friend class G1ParNoteEndTask;
364 friend class CalcLiveObjectsClosure;
365 friend class G1CMRefProcTaskProxy;
366 friend class G1CMRefProcTaskExecutor;
367 friend class G1CMParKeepAliveAndDrainClosure;
368 friend class G1CMParDrainMarkingStackClosure;
370 protected:
371 ConcurrentMarkThread* _cmThread; // the thread doing the work
372 G1CollectedHeap* _g1h; // the heap.
373 uint _parallel_marking_threads; // the number of marking
374 // threads we're use
375 uint _max_parallel_marking_threads; // max number of marking
376 // threads we'll ever use
377 double _sleep_factor; // how much we have to sleep, with
378 // respect to the work we just did, to
379 // meet the marking overhead goal
380 double _marking_task_overhead; // marking target overhead for
381 // a single task
383 // same as the two above, but for the cleanup task
384 double _cleanup_sleep_factor;
385 double _cleanup_task_overhead;
387 FreeRegionList _cleanup_list;
389 // Concurrent marking support structures
390 CMBitMap _markBitMap1;
391 CMBitMap _markBitMap2;
392 CMBitMapRO* _prevMarkBitMap; // completed mark bitmap
393 CMBitMap* _nextMarkBitMap; // under-construction mark bitmap
394 bool _at_least_one_mark_complete;
396 BitMap _region_bm;
397 BitMap _card_bm;
399 // Heap bounds
400 HeapWord* _heap_start;
401 HeapWord* _heap_end;
403 // For gray objects
404 CMMarkStack _markStack; // Grey objects behind global finger.
405 CMRegionStack _regionStack; // Grey regions behind global finger.
406 HeapWord* volatile _finger; // the global finger, region aligned,
407 // always points to the end of the
408 // last claimed region
410 // marking tasks
411 uint _max_task_num; // maximum task number
412 uint _active_tasks; // task num currently active
413 CMTask** _tasks; // task queue array (max_task_num len)
414 CMTaskQueueSet* _task_queues; // task queue set
415 ParallelTaskTerminator _terminator; // for termination
417 // Two sync barriers that are used to synchronise tasks when an
418 // overflow occurs. The algorithm is the following. All tasks enter
419 // the first one to ensure that they have all stopped manipulating
420 // the global data structures. After they exit it, they re-initialise
421 // their data structures and task 0 re-initialises the global data
422 // structures. Then, they enter the second sync barrier. This
423 // ensure, that no task starts doing work before all data
424 // structures (local and global) have been re-initialised. When they
425 // exit it, they are free to start working again.
426 WorkGangBarrierSync _first_overflow_barrier_sync;
427 WorkGangBarrierSync _second_overflow_barrier_sync;
429 // this is set by any task, when an overflow on the global data
430 // structures is detected.
431 volatile bool _has_overflown;
432 // true: marking is concurrent, false: we're in remark
433 volatile bool _concurrent;
434 // set at the end of a Full GC so that marking aborts
435 volatile bool _has_aborted;
437 // used when remark aborts due to an overflow to indicate that
438 // another concurrent marking phase should start
439 volatile bool _restart_for_overflow;
441 // This is true from the very start of concurrent marking until the
442 // point when all the tasks complete their work. It is really used
443 // to determine the points between the end of concurrent marking and
444 // time of remark.
445 volatile bool _concurrent_marking_in_progress;
447 // verbose level
448 CMVerboseLevel _verbose_level;
450 // These two fields are used to implement the optimisation that
451 // avoids pushing objects on the global/region stack if there are
452 // no collection set regions above the lowest finger.
454 // This is the lowest finger (among the global and local fingers),
455 // which is calculated before a new collection set is chosen.
456 HeapWord* _min_finger;
457 // If this flag is true, objects/regions that are marked below the
458 // finger should be pushed on the stack(s). If this is flag is
459 // false, it is safe not to push them on the stack(s).
460 bool _should_gray_objects;
462 // All of these times are in ms.
463 NumberSeq _init_times;
464 NumberSeq _remark_times;
465 NumberSeq _remark_mark_times;
466 NumberSeq _remark_weak_ref_times;
467 NumberSeq _cleanup_times;
468 double _total_counting_time;
469 double _total_rs_scrub_time;
471 double* _accum_task_vtime; // accumulated task vtime
473 FlexibleWorkGang* _parallel_workers;
475 ForceOverflowSettings _force_overflow_conc;
476 ForceOverflowSettings _force_overflow_stw;
478 void weakRefsWork(bool clear_all_soft_refs);
480 void swapMarkBitMaps();
482 // It resets the global marking data structures, as well as the
483 // task local ones; should be called during initial mark.
484 void reset();
485 // It resets all the marking data structures.
486 void clear_marking_state(bool clear_overflow = true);
488 // It should be called to indicate which phase we're in (concurrent
489 // mark or remark) and how many threads are currently active.
490 void set_phase(uint active_tasks, bool concurrent);
491 // We do this after we're done with marking so that the marking data
492 // structures are initialised to a sensible and predictable state.
493 void set_non_marking_state();
495 // prints all gathered CM-related statistics
496 void print_stats();
498 bool cleanup_list_is_empty() {
499 return _cleanup_list.is_empty();
500 }
502 // accessor methods
503 uint parallel_marking_threads() { return _parallel_marking_threads; }
504 uint max_parallel_marking_threads() { return _max_parallel_marking_threads;}
505 double sleep_factor() { return _sleep_factor; }
506 double marking_task_overhead() { return _marking_task_overhead;}
507 double cleanup_sleep_factor() { return _cleanup_sleep_factor; }
508 double cleanup_task_overhead() { return _cleanup_task_overhead;}
510 HeapWord* finger() { return _finger; }
511 bool concurrent() { return _concurrent; }
512 uint active_tasks() { return _active_tasks; }
513 ParallelTaskTerminator* terminator() { return &_terminator; }
515 // It claims the next available region to be scanned by a marking
516 // task. It might return NULL if the next region is empty or we have
517 // run out of regions. In the latter case, out_of_regions()
518 // determines whether we've really run out of regions or the task
519 // should call claim_region() again. This might seem a bit
520 // awkward. Originally, the code was written so that claim_region()
521 // either successfully returned with a non-empty region or there
522 // were no more regions to be claimed. The problem with this was
523 // that, in certain circumstances, it iterated over large chunks of
524 // the heap finding only empty regions and, while it was working, it
525 // was preventing the calling task to call its regular clock
526 // method. So, this way, each task will spend very little time in
527 // claim_region() and is allowed to call the regular clock method
528 // frequently.
529 HeapRegion* claim_region(int task);
531 // It determines whether we've run out of regions to scan.
532 bool out_of_regions() { return _finger == _heap_end; }
534 // Returns the task with the given id
535 CMTask* task(int id) {
536 assert(0 <= id && id < (int) _active_tasks,
537 "task id not within active bounds");
538 return _tasks[id];
539 }
541 // Returns the task queue with the given id
542 CMTaskQueue* task_queue(int id) {
543 assert(0 <= id && id < (int) _active_tasks,
544 "task queue id not within active bounds");
545 return (CMTaskQueue*) _task_queues->queue(id);
546 }
548 // Returns the task queue set
549 CMTaskQueueSet* task_queues() { return _task_queues; }
551 // Access / manipulation of the overflow flag which is set to
552 // indicate that the global stack or region stack has overflown
553 bool has_overflown() { return _has_overflown; }
554 void set_has_overflown() { _has_overflown = true; }
555 void clear_has_overflown() { _has_overflown = false; }
557 bool has_aborted() { return _has_aborted; }
558 bool restart_for_overflow() { return _restart_for_overflow; }
560 // Methods to enter the two overflow sync barriers
561 void enter_first_sync_barrier(int task_num);
562 void enter_second_sync_barrier(int task_num);
564 ForceOverflowSettings* force_overflow_conc() {
565 return &_force_overflow_conc;
566 }
568 ForceOverflowSettings* force_overflow_stw() {
569 return &_force_overflow_stw;
570 }
572 ForceOverflowSettings* force_overflow() {
573 if (concurrent()) {
574 return force_overflow_conc();
575 } else {
576 return force_overflow_stw();
577 }
578 }
580 // Live Data Counting data structures...
581 // These data structures are initialized at the start of
582 // marking. They are written to while marking is active.
583 // They are aggregated during remark; the aggregated values
584 // are then used to populate the _region_bm, _card_bm, and
585 // the total live bytes, which are then subsequently updated
586 // during cleanup.
588 // An array of bitmaps (one bit map per task). Each bitmap
589 // is used to record the cards spanned by the live objects
590 // marked by that task/worker.
591 BitMap* _count_card_bitmaps;
593 // Used to record the number of marked live bytes
594 // (for each region, by worker thread).
595 size_t** _count_marked_bytes;
597 // Card index of the bottom of the G1 heap. Used for biasing indices into
598 // the card bitmaps.
599 intptr_t _heap_bottom_card_num;
601 public:
602 // Manipulation of the global mark stack.
603 // Notice that the first mark_stack_push is CAS-based, whereas the
604 // two below are Mutex-based. This is OK since the first one is only
605 // called during evacuation pauses and doesn't compete with the
606 // other two (which are called by the marking tasks during
607 // concurrent marking or remark).
608 bool mark_stack_push(oop p) {
609 _markStack.par_push(p);
610 if (_markStack.overflow()) {
611 set_has_overflown();
612 return false;
613 }
614 return true;
615 }
616 bool mark_stack_push(oop* arr, int n) {
617 _markStack.par_push_arr(arr, n);
618 if (_markStack.overflow()) {
619 set_has_overflown();
620 return false;
621 }
622 return true;
623 }
624 void mark_stack_pop(oop* arr, int max, int* n) {
625 _markStack.par_pop_arr(arr, max, n);
626 }
627 size_t mark_stack_size() { return _markStack.size(); }
628 size_t partial_mark_stack_size_target() { return _markStack.maxElems()/3; }
629 bool mark_stack_overflow() { return _markStack.overflow(); }
630 bool mark_stack_empty() { return _markStack.isEmpty(); }
632 // (Lock-free) Manipulation of the region stack
633 bool region_stack_push_lock_free(MemRegion mr) {
634 // Currently we only call the lock-free version during evacuation
635 // pauses.
636 assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped");
638 _regionStack.push_lock_free(mr);
639 if (_regionStack.overflow()) {
640 set_has_overflown();
641 return false;
642 }
643 return true;
644 }
646 // Lock-free version of region-stack pop. Should only be
647 // called in tandem with other lock-free pops.
648 MemRegion region_stack_pop_lock_free() {
649 return _regionStack.pop_lock_free();
650 }
652 #if 0
653 // The routines that manipulate the region stack with a lock are
654 // not currently used. They should be retained, however, as a
655 // diagnostic aid.
657 bool region_stack_push_with_lock(MemRegion mr) {
658 // Currently we only call the lock-based version during either
659 // concurrent marking or remark.
660 assert(!SafepointSynchronize::is_at_safepoint() || !concurrent(),
661 "if we are at a safepoint it should be the remark safepoint");
663 _regionStack.push_with_lock(mr);
664 if (_regionStack.overflow()) {
665 set_has_overflown();
666 return false;
667 }
668 return true;
669 }
671 MemRegion region_stack_pop_with_lock() {
672 // Currently we only call the lock-based version during either
673 // concurrent marking or remark.
674 assert(!SafepointSynchronize::is_at_safepoint() || !concurrent(),
675 "if we are at a safepoint it should be the remark safepoint");
677 return _regionStack.pop_with_lock();
678 }
679 #endif
681 int region_stack_size() { return _regionStack.size(); }
682 bool region_stack_overflow() { return _regionStack.overflow(); }
683 bool region_stack_empty() { return _regionStack.isEmpty(); }
685 // Iterate over any regions that were aborted while draining the
686 // region stack (any such regions are saved in the corresponding
687 // CMTask) and invalidate (i.e. assign to the empty MemRegion())
688 // any regions that point into the collection set.
689 bool invalidate_aborted_regions_in_cset();
691 // Returns true if there are any aborted memory regions.
692 bool has_aborted_regions();
694 bool concurrent_marking_in_progress() {
695 return _concurrent_marking_in_progress;
696 }
697 void set_concurrent_marking_in_progress() {
698 _concurrent_marking_in_progress = true;
699 }
700 void clear_concurrent_marking_in_progress() {
701 _concurrent_marking_in_progress = false;
702 }
704 void update_accum_task_vtime(int i, double vtime) {
705 _accum_task_vtime[i] += vtime;
706 }
708 double all_task_accum_vtime() {
709 double ret = 0.0;
710 for (int i = 0; i < (int)_max_task_num; ++i)
711 ret += _accum_task_vtime[i];
712 return ret;
713 }
715 // Attempts to steal an object from the task queues of other tasks
716 bool try_stealing(int task_num, int* hash_seed, oop& obj) {
717 return _task_queues->steal(task_num, hash_seed, obj);
718 }
720 // It grays an object by first marking it. Then, if it's behind the
721 // global finger, it also pushes it on the global stack.
722 void deal_with_reference(oop obj);
724 ConcurrentMark(ReservedSpace rs, int max_regions);
725 ~ConcurrentMark();
727 ConcurrentMarkThread* cmThread() { return _cmThread; }
729 CMBitMapRO* prevMarkBitMap() const { return _prevMarkBitMap; }
730 CMBitMap* nextMarkBitMap() const { return _nextMarkBitMap; }
732 // Returns the number of GC threads to be used in a concurrent
733 // phase based on the number of GC threads being used in a STW
734 // phase.
735 uint scale_parallel_threads(uint n_par_threads);
737 // Calculates the number of GC threads to be used in a concurrent phase.
738 uint calc_parallel_marking_threads();
740 // The following three are interaction between CM and
741 // G1CollectedHeap
743 // This notifies CM that a root during initial-mark needs to be
744 // grayed. It is MT-safe.
745 inline void grayRoot(oop obj, size_t word_size, uint worker_id);
747 // It's used during evacuation pauses to gray a region, if
748 // necessary, and it's MT-safe. It assumes that the caller has
749 // marked any objects on that region. If _should_gray_objects is
750 // true and we're still doing concurrent marking, the region is
751 // pushed on the region stack, if it is located below the global
752 // finger, otherwise we do nothing.
753 void grayRegionIfNecessary(MemRegion mr);
755 // It's used during evacuation pauses to mark and, if necessary,
756 // gray a single object and it's MT-safe. It assumes the caller did
757 // not mark the object. If _should_gray_objects is true and we're
758 // still doing concurrent marking, the objects is pushed on the
759 // global stack, if it is located below the global finger, otherwise
760 // we do nothing.
761 void markAndGrayObjectIfNecessary(oop p);
763 // It iterates over the heap and for each object it comes across it
764 // will dump the contents of its reference fields, as well as
765 // liveness information for the object and its referents. The dump
766 // will be written to a file with the following name:
767 // G1PrintReachableBaseFile + "." + str.
768 // vo decides whether the prev (vo == UsePrevMarking), the next
769 // (vo == UseNextMarking) marking information, or the mark word
770 // (vo == UseMarkWord) will be used to determine the liveness of
771 // each object / referent.
772 // If all is true, all objects in the heap will be dumped, otherwise
773 // only the live ones. In the dump the following symbols / breviations
774 // are used:
775 // M : an explicitly live object (its bitmap bit is set)
776 // > : an implicitly live object (over tams)
777 // O : an object outside the G1 heap (typically: in the perm gen)
778 // NOT : a reference field whose referent is not live
779 // AND MARKED : indicates that an object is both explicitly and
780 // implicitly live (it should be one or the other, not both)
781 void print_reachable(const char* str,
782 VerifyOption vo, bool all) PRODUCT_RETURN;
784 // Clear the next marking bitmap (will be called concurrently).
785 void clearNextBitmap();
787 // These two do the work that needs to be done before and after the
788 // initial root checkpoint. Since this checkpoint can be done at two
789 // different points (i.e. an explicit pause or piggy-backed on a
790 // young collection), then it's nice to be able to easily share the
791 // pre/post code. It might be the case that we can put everything in
792 // the post method. TP
793 void checkpointRootsInitialPre();
794 void checkpointRootsInitialPost();
796 // Do concurrent phase of marking, to a tentative transitive closure.
797 void markFromRoots();
799 // Process all unprocessed SATB buffers. It is called at the
800 // beginning of an evacuation pause.
801 void drainAllSATBBuffers();
803 void checkpointRootsFinal(bool clear_all_soft_refs);
804 void checkpointRootsFinalWork();
805 void cleanup();
806 void completeCleanup();
808 // Mark in the previous bitmap. NB: this is usually read-only, so use
809 // this carefully!
810 inline void markPrev(oop p);
812 // Clears marks for all objects in the given range, for the prev,
813 // next, or both bitmaps. NB: the previous bitmap is usually
814 // read-only, so use this carefully!
815 void clearRangePrevBitmap(MemRegion mr);
816 void clearRangeNextBitmap(MemRegion mr);
817 void clearRangeBothBitmaps(MemRegion mr);
819 // Notify data structures that a GC has started.
820 void note_start_of_gc() {
821 _markStack.note_start_of_gc();
822 }
824 // Notify data structures that a GC is finished.
825 void note_end_of_gc() {
826 _markStack.note_end_of_gc();
827 }
829 // Iterate over the oops in the mark stack and all local queues. It
830 // also calls invalidate_entries_into_cset() on the region stack.
831 void oops_do(OopClosure* f);
833 // Verify that there are no CSet oops on the stacks (taskqueues /
834 // global mark stack), enqueued SATB buffers, per-thread SATB
835 // buffers, and fingers (global / per-task). The boolean parameters
836 // decide which of the above data structures to verify. If marking
837 // is not in progress, it's a no-op.
838 void verify_no_cset_oops(bool verify_stacks,
839 bool verify_enqueued_buffers,
840 bool verify_thread_buffers,
841 bool verify_fingers) PRODUCT_RETURN;
843 // It is called at the end of an evacuation pause during marking so
844 // that CM is notified of where the new end of the heap is. It
845 // doesn't do anything if concurrent_marking_in_progress() is false,
846 // unless the force parameter is true.
847 void update_g1_committed(bool force = false);
849 void complete_marking_in_collection_set();
851 // It indicates that a new collection set is being chosen.
852 void newCSet();
854 // It registers a collection set heap region with CM. This is used
855 // to determine whether any heap regions are located above the finger.
856 void registerCSetRegion(HeapRegion* hr);
858 // Resets the region fields of any active CMTask whose region fields
859 // are in the collection set (i.e. the region currently claimed by
860 // the CMTask will be evacuated and may be used, subsequently, as
861 // an alloc region). When this happens the region fields in the CMTask
862 // are stale and, hence, should be cleared causing the worker thread
863 // to claim a new region.
864 void reset_active_task_region_fields_in_cset();
866 // Registers the maximum region-end associated with a set of
867 // regions with CM. Again this is used to determine whether any
868 // heap regions are located above the finger.
869 void register_collection_set_finger(HeapWord* max_finger) {
870 // max_finger is the highest heap region end of the regions currently
871 // contained in the collection set. If this value is larger than
872 // _min_finger then we need to gray objects.
873 // This routine is like registerCSetRegion but for an entire
874 // collection of regions.
875 if (max_finger > _min_finger) {
876 _should_gray_objects = true;
877 }
878 }
880 // Returns "true" if at least one mark has been completed.
881 bool at_least_one_mark_complete() { return _at_least_one_mark_complete; }
883 bool isMarked(oop p) const {
884 assert(p != NULL && p->is_oop(), "expected an oop");
885 HeapWord* addr = (HeapWord*)p;
886 assert(addr >= _nextMarkBitMap->startWord() ||
887 addr < _nextMarkBitMap->endWord(), "in a region");
889 return _nextMarkBitMap->isMarked(addr);
890 }
892 inline bool not_yet_marked(oop p) const;
894 // XXX Debug code
895 bool containing_card_is_marked(void* p);
896 bool containing_cards_are_marked(void* start, void* last);
898 bool isPrevMarked(oop p) const {
899 assert(p != NULL && p->is_oop(), "expected an oop");
900 HeapWord* addr = (HeapWord*)p;
901 assert(addr >= _prevMarkBitMap->startWord() ||
902 addr < _prevMarkBitMap->endWord(), "in a region");
904 return _prevMarkBitMap->isMarked(addr);
905 }
907 inline bool do_yield_check(uint worker_i = 0);
908 inline bool should_yield();
910 // Called to abort the marking cycle after a Full GC takes palce.
911 void abort();
913 // This prints the global/local fingers. It is used for debugging.
914 NOT_PRODUCT(void print_finger();)
916 void print_summary_info();
918 void print_worker_threads_on(outputStream* st) const;
920 // The following indicate whether a given verbose level has been
921 // set. Notice that anything above stats is conditional to
922 // _MARKING_VERBOSE_ having been set to 1
923 bool verbose_stats() {
924 return _verbose_level >= stats_verbose;
925 }
926 bool verbose_low() {
927 return _MARKING_VERBOSE_ && _verbose_level >= low_verbose;
928 }
929 bool verbose_medium() {
930 return _MARKING_VERBOSE_ && _verbose_level >= medium_verbose;
931 }
932 bool verbose_high() {
933 return _MARKING_VERBOSE_ && _verbose_level >= high_verbose;
934 }
936 // Counting data structure accessors
938 // Returns the card number of the bottom of the G1 heap.
939 // Used in biasing indices into accounting card bitmaps.
940 intptr_t heap_bottom_card_num() const {
941 return _heap_bottom_card_num;
942 }
944 // Returns the card bitmap for a given task or worker id.
945 BitMap* count_card_bitmap_for(uint worker_id) {
946 assert(0 <= worker_id && worker_id < _max_task_num, "oob");
947 assert(_count_card_bitmaps != NULL, "uninitialized");
948 BitMap* task_card_bm = &_count_card_bitmaps[worker_id];
949 assert(task_card_bm->size() == _card_bm.size(), "size mismatch");
950 return task_card_bm;
951 }
953 // Returns the array containing the marked bytes for each region,
954 // for the given worker or task id.
955 size_t* count_marked_bytes_array_for(uint worker_id) {
956 assert(0 <= worker_id && worker_id < _max_task_num, "oob");
957 assert(_count_marked_bytes != NULL, "uninitialized");
958 size_t* marked_bytes_array = _count_marked_bytes[worker_id];
959 assert(marked_bytes_array != NULL, "uninitialized");
960 return marked_bytes_array;
961 }
963 // Returns the index in the liveness accounting card table bitmap
964 // for the given address
965 inline BitMap::idx_t card_bitmap_index_for(HeapWord* addr);
967 // Counts the size of the given memory region in the the given
968 // marked_bytes array slot for the given HeapRegion.
969 // Sets the bits in the given card bitmap that are associated with the
970 // cards that are spanned by the memory region.
971 inline void count_region(MemRegion mr, HeapRegion* hr,
972 size_t* marked_bytes_array,
973 BitMap* task_card_bm);
975 // Counts the given memory region in the task/worker counting
976 // data structures for the given worker id.
977 inline void count_region(MemRegion mr, uint worker_id);
979 // Counts the given object in the given task/worker counting
980 // data structures.
981 inline void count_object(oop obj, HeapRegion* hr,
982 size_t* marked_bytes_array,
983 BitMap* task_card_bm);
985 // Counts the given object in the task/worker counting data
986 // structures for the given worker id.
987 inline void count_object(oop obj, HeapRegion* hr, uint worker_id);
989 // Attempts to mark the given object and, if successful, counts
990 // the object in the given task/worker counting structures.
991 inline bool par_mark_and_count(oop obj, HeapRegion* hr,
992 size_t* marked_bytes_array,
993 BitMap* task_card_bm);
995 // Attempts to mark the given object and, if successful, counts
996 // the object in the task/worker counting structures for the
997 // given worker id.
998 inline bool par_mark_and_count(oop obj, HeapRegion* hr, uint worker_id);
1000 // Similar to the above routine but we don't know the heap region that
1001 // contains the object to be marked/counted, which this routine looks up.
1002 inline bool par_mark_and_count(oop obj, uint worker_id);
1004 // Similar to the above routine but there are times when we cannot
1005 // safely calculate the size of obj due to races and we, therefore,
1006 // pass the size in as a parameter. It is the caller's reponsibility
1007 // to ensure that the size passed in for obj is valid.
1008 inline bool par_mark_and_count(oop obj, size_t word_size, uint worker_id);
1010 // Unconditionally mark the given object, and unconditinally count
1011 // the object in the counting structures for worker id 0.
1012 // Should *not* be called from parallel code.
1013 inline bool mark_and_count(oop obj, HeapRegion* hr);
1015 // Similar to the above routine but we don't know the heap region that
1016 // contains the object to be marked/counted, which this routine looks up.
1017 // Should *not* be called from parallel code.
1018 inline bool mark_and_count(oop obj);
1020 protected:
1021 // Clear all the per-task bitmaps and arrays used to store the
1022 // counting data.
1023 void clear_all_count_data();
1025 // Aggregates the counting data for each worker/task
1026 // that was constructed while marking. Also sets
1027 // the amount of marked bytes for each region and
1028 // the top at concurrent mark count.
1029 void aggregate_count_data();
1031 // Verification routine
1032 void verify_count_data();
1033 };
1035 // A class representing a marking task.
1036 class CMTask : public TerminatorTerminator {
1037 private:
1038 enum PrivateConstants {
1039 // the regular clock call is called once the scanned words reaches
1040 // this limit
1041 words_scanned_period = 12*1024,
1042 // the regular clock call is called once the number of visited
1043 // references reaches this limit
1044 refs_reached_period = 384,
1045 // initial value for the hash seed, used in the work stealing code
1046 init_hash_seed = 17,
1047 // how many entries will be transferred between global stack and
1048 // local queues
1049 global_stack_transfer_size = 16
1050 };
1052 int _task_id;
1053 G1CollectedHeap* _g1h;
1054 ConcurrentMark* _cm;
1055 CMBitMap* _nextMarkBitMap;
1056 // the task queue of this task
1057 CMTaskQueue* _task_queue;
1058 private:
1059 // the task queue set---needed for stealing
1060 CMTaskQueueSet* _task_queues;
1061 // indicates whether the task has been claimed---this is only for
1062 // debugging purposes
1063 bool _claimed;
1065 // number of calls to this task
1066 int _calls;
1068 // when the virtual timer reaches this time, the marking step should
1069 // exit
1070 double _time_target_ms;
1071 // the start time of the current marking step
1072 double _start_time_ms;
1074 // the oop closure used for iterations over oops
1075 G1CMOopClosure* _cm_oop_closure;
1077 // the region this task is scanning, NULL if we're not scanning any
1078 HeapRegion* _curr_region;
1079 // the local finger of this task, NULL if we're not scanning a region
1080 HeapWord* _finger;
1081 // limit of the region this task is scanning, NULL if we're not scanning one
1082 HeapWord* _region_limit;
1084 // This is used only when we scan regions popped from the region
1085 // stack. It records what the last object on such a region we
1086 // scanned was. It is used to ensure that, if we abort region
1087 // iteration, we do not rescan the first part of the region. This
1088 // should be NULL when we're not scanning a region from the region
1089 // stack.
1090 HeapWord* _region_finger;
1092 // If we abort while scanning a region we record the remaining
1093 // unscanned portion and check this field when marking restarts.
1094 // This avoids having to push on the region stack while other
1095 // marking threads may still be popping regions.
1096 // If we were to push the unscanned portion directly to the
1097 // region stack then we would need to using locking versions
1098 // of the push and pop operations.
1099 MemRegion _aborted_region;
1101 // the number of words this task has scanned
1102 size_t _words_scanned;
1103 // When _words_scanned reaches this limit, the regular clock is
1104 // called. Notice that this might be decreased under certain
1105 // circumstances (i.e. when we believe that we did an expensive
1106 // operation).
1107 size_t _words_scanned_limit;
1108 // the initial value of _words_scanned_limit (i.e. what it was
1109 // before it was decreased).
1110 size_t _real_words_scanned_limit;
1112 // the number of references this task has visited
1113 size_t _refs_reached;
1114 // When _refs_reached reaches this limit, the regular clock is
1115 // called. Notice this this might be decreased under certain
1116 // circumstances (i.e. when we believe that we did an expensive
1117 // operation).
1118 size_t _refs_reached_limit;
1119 // the initial value of _refs_reached_limit (i.e. what it was before
1120 // it was decreased).
1121 size_t _real_refs_reached_limit;
1123 // used by the work stealing stuff
1124 int _hash_seed;
1125 // if this is true, then the task has aborted for some reason
1126 bool _has_aborted;
1127 // set when the task aborts because it has met its time quota
1128 bool _has_timed_out;
1129 // true when we're draining SATB buffers; this avoids the task
1130 // aborting due to SATB buffers being available (as we're already
1131 // dealing with them)
1132 bool _draining_satb_buffers;
1134 // number sequence of past step times
1135 NumberSeq _step_times_ms;
1136 // elapsed time of this task
1137 double _elapsed_time_ms;
1138 // termination time of this task
1139 double _termination_time_ms;
1140 // when this task got into the termination protocol
1141 double _termination_start_time_ms;
1143 // true when the task is during a concurrent phase, false when it is
1144 // in the remark phase (so, in the latter case, we do not have to
1145 // check all the things that we have to check during the concurrent
1146 // phase, i.e. SATB buffer availability...)
1147 bool _concurrent;
1149 TruncatedSeq _marking_step_diffs_ms;
1151 // Counting data structures. Embedding the task's marked_bytes_array
1152 // and card bitmap into the actual task saves having to go through
1153 // the ConcurrentMark object.
1154 size_t* _marked_bytes_array;
1155 BitMap* _card_bm;
1157 // LOTS of statistics related with this task
1158 #if _MARKING_STATS_
1159 NumberSeq _all_clock_intervals_ms;
1160 double _interval_start_time_ms;
1162 int _aborted;
1163 int _aborted_overflow;
1164 int _aborted_cm_aborted;
1165 int _aborted_yield;
1166 int _aborted_timed_out;
1167 int _aborted_satb;
1168 int _aborted_termination;
1170 int _steal_attempts;
1171 int _steals;
1173 int _clock_due_to_marking;
1174 int _clock_due_to_scanning;
1176 int _local_pushes;
1177 int _local_pops;
1178 int _local_max_size;
1179 int _objs_scanned;
1181 int _global_pushes;
1182 int _global_pops;
1183 int _global_max_size;
1185 int _global_transfers_to;
1186 int _global_transfers_from;
1188 int _region_stack_pops;
1190 int _regions_claimed;
1191 int _objs_found_on_bitmap;
1193 int _satb_buffers_processed;
1194 #endif // _MARKING_STATS_
1196 // it updates the local fields after this task has claimed
1197 // a new region to scan
1198 void setup_for_region(HeapRegion* hr);
1199 // it brings up-to-date the limit of the region
1200 void update_region_limit();
1202 // called when either the words scanned or the refs visited limit
1203 // has been reached
1204 void reached_limit();
1205 // recalculates the words scanned and refs visited limits
1206 void recalculate_limits();
1207 // decreases the words scanned and refs visited limits when we reach
1208 // an expensive operation
1209 void decrease_limits();
1210 // it checks whether the words scanned or refs visited reached their
1211 // respective limit and calls reached_limit() if they have
1212 void check_limits() {
1213 if (_words_scanned >= _words_scanned_limit ||
1214 _refs_reached >= _refs_reached_limit) {
1215 reached_limit();
1216 }
1217 }
1218 // this is supposed to be called regularly during a marking step as
1219 // it checks a bunch of conditions that might cause the marking step
1220 // to abort
1221 void regular_clock_call();
1222 bool concurrent() { return _concurrent; }
1224 public:
1225 // It resets the task; it should be called right at the beginning of
1226 // a marking phase.
1227 void reset(CMBitMap* _nextMarkBitMap);
1228 // it clears all the fields that correspond to a claimed region.
1229 void clear_region_fields();
1231 void set_concurrent(bool concurrent) { _concurrent = concurrent; }
1233 // The main method of this class which performs a marking step
1234 // trying not to exceed the given duration. However, it might exit
1235 // prematurely, according to some conditions (i.e. SATB buffers are
1236 // available for processing).
1237 void do_marking_step(double target_ms, bool do_stealing, bool do_termination);
1239 // These two calls start and stop the timer
1240 void record_start_time() {
1241 _elapsed_time_ms = os::elapsedTime() * 1000.0;
1242 }
1243 void record_end_time() {
1244 _elapsed_time_ms = os::elapsedTime() * 1000.0 - _elapsed_time_ms;
1245 }
1247 // returns the task ID
1248 int task_id() { return _task_id; }
1250 // From TerminatorTerminator. It determines whether this task should
1251 // exit the termination protocol after it's entered it.
1252 virtual bool should_exit_termination();
1254 // Resets the local region fields after a task has finished scanning a
1255 // region; or when they have become stale as a result of the region
1256 // being evacuated.
1257 void giveup_current_region();
1259 HeapWord* finger() { return _finger; }
1261 bool has_aborted() { return _has_aborted; }
1262 void set_has_aborted() { _has_aborted = true; }
1263 void clear_has_aborted() { _has_aborted = false; }
1264 bool has_timed_out() { return _has_timed_out; }
1265 bool claimed() { return _claimed; }
1267 // Support routines for the partially scanned region that may be
1268 // recorded as a result of aborting while draining the CMRegionStack
1269 MemRegion aborted_region() { return _aborted_region; }
1270 void set_aborted_region(MemRegion mr)
1271 { _aborted_region = mr; }
1273 // Clears any recorded partially scanned region
1274 void clear_aborted_region() { set_aborted_region(MemRegion()); }
1276 void set_cm_oop_closure(G1CMOopClosure* cm_oop_closure);
1278 // It grays the object by marking it and, if necessary, pushing it
1279 // on the local queue
1280 inline void deal_with_reference(oop obj);
1282 // It scans an object and visits its children.
1283 void scan_object(oop obj);
1285 // It pushes an object on the local queue.
1286 inline void push(oop obj);
1288 // These two move entries to/from the global stack.
1289 void move_entries_to_global_stack();
1290 void get_entries_from_global_stack();
1292 // It pops and scans objects from the local queue. If partially is
1293 // true, then it stops when the queue size is of a given limit. If
1294 // partially is false, then it stops when the queue is empty.
1295 void drain_local_queue(bool partially);
1296 // It moves entries from the global stack to the local queue and
1297 // drains the local queue. If partially is true, then it stops when
1298 // both the global stack and the local queue reach a given size. If
1299 // partially if false, it tries to empty them totally.
1300 void drain_global_stack(bool partially);
1301 // It keeps picking SATB buffers and processing them until no SATB
1302 // buffers are available.
1303 void drain_satb_buffers();
1305 // It keeps popping regions from the region stack and processing
1306 // them until the region stack is empty.
1307 void drain_region_stack(BitMapClosure* closure);
1309 // moves the local finger to a new location
1310 inline void move_finger_to(HeapWord* new_finger) {
1311 assert(new_finger >= _finger && new_finger < _region_limit, "invariant");
1312 _finger = new_finger;
1313 }
1315 // moves the region finger to a new location
1316 inline void move_region_finger_to(HeapWord* new_finger) {
1317 assert(new_finger < _cm->finger(), "invariant");
1318 _region_finger = new_finger;
1319 }
1321 CMTask(int task_num, ConcurrentMark *cm,
1322 size_t* marked_bytes, BitMap* card_bm,
1323 CMTaskQueue* task_queue, CMTaskQueueSet* task_queues);
1325 // it prints statistics associated with this task
1326 void print_stats();
1328 #if _MARKING_STATS_
1329 void increase_objs_found_on_bitmap() { ++_objs_found_on_bitmap; }
1330 #endif // _MARKING_STATS_
1331 };
1333 // Class that's used to to print out per-region liveness
1334 // information. It's currently used at the end of marking and also
1335 // after we sort the old regions at the end of the cleanup operation.
1336 class G1PrintRegionLivenessInfoClosure: public HeapRegionClosure {
1337 private:
1338 outputStream* _out;
1340 // Accumulators for these values.
1341 size_t _total_used_bytes;
1342 size_t _total_capacity_bytes;
1343 size_t _total_prev_live_bytes;
1344 size_t _total_next_live_bytes;
1346 // These are set up when we come across a "stars humongous" region
1347 // (as this is where most of this information is stored, not in the
1348 // subsequent "continues humongous" regions). After that, for every
1349 // region in a given humongous region series we deduce the right
1350 // values for it by simply subtracting the appropriate amount from
1351 // these fields. All these values should reach 0 after we've visited
1352 // the last region in the series.
1353 size_t _hum_used_bytes;
1354 size_t _hum_capacity_bytes;
1355 size_t _hum_prev_live_bytes;
1356 size_t _hum_next_live_bytes;
1358 static double perc(size_t val, size_t total) {
1359 if (total == 0) {
1360 return 0.0;
1361 } else {
1362 return 100.0 * ((double) val / (double) total);
1363 }
1364 }
1366 static double bytes_to_mb(size_t val) {
1367 return (double) val / (double) M;
1368 }
1370 // See the .cpp file.
1371 size_t get_hum_bytes(size_t* hum_bytes);
1372 void get_hum_bytes(size_t* used_bytes, size_t* capacity_bytes,
1373 size_t* prev_live_bytes, size_t* next_live_bytes);
1375 public:
1376 // The header and footer are printed in the constructor and
1377 // destructor respectively.
1378 G1PrintRegionLivenessInfoClosure(outputStream* out, const char* phase_name);
1379 virtual bool doHeapRegion(HeapRegion* r);
1380 ~G1PrintRegionLivenessInfoClosure();
1381 };
1383 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_HPP