Wed, 25 Jan 2012 12:58:23 -0500
7127706: G1: re-enable survivors during the initial-mark pause
Summary: Re-enable survivors during the initial-mark pause. Afterwards, the concurrent marking threads have to scan them and mark everything reachable from them. The next GC will have to wait for the survivors to be scanned.
Reviewed-by: brutisso, johnc
1 /*
2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_HPP
26 #define SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_HPP
28 #include "gc_implementation/g1/heapRegionSets.hpp"
29 #include "utilities/taskqueue.hpp"
31 class G1CollectedHeap;
32 class CMTask;
33 typedef GenericTaskQueue<oop> CMTaskQueue;
34 typedef GenericTaskQueueSet<CMTaskQueue> CMTaskQueueSet;
36 // Closure used by CM during concurrent reference discovery
37 // and reference processing (during remarking) to determine
38 // if a particular object is alive. It is primarily used
39 // to determine if referents of discovered reference objects
40 // are alive. An instance is also embedded into the
41 // reference processor as the _is_alive_non_header field
42 class G1CMIsAliveClosure: public BoolObjectClosure {
43 G1CollectedHeap* _g1;
44 public:
45 G1CMIsAliveClosure(G1CollectedHeap* g1) :
46 _g1(g1)
47 {}
49 void do_object(oop obj) {
50 ShouldNotCallThis();
51 }
52 bool do_object_b(oop obj);
53 };
55 // A generic CM bit map. This is essentially a wrapper around the BitMap
56 // class, with one bit per (1<<_shifter) HeapWords.
58 class CMBitMapRO VALUE_OBJ_CLASS_SPEC {
59 protected:
60 HeapWord* _bmStartWord; // base address of range covered by map
61 size_t _bmWordSize; // map size (in #HeapWords covered)
62 const int _shifter; // map to char or bit
63 VirtualSpace _virtual_space; // underlying the bit map
64 BitMap _bm; // the bit map itself
66 public:
67 // constructor
68 CMBitMapRO(ReservedSpace rs, int shifter);
70 enum { do_yield = true };
72 // inquiries
73 HeapWord* startWord() const { return _bmStartWord; }
74 size_t sizeInWords() const { return _bmWordSize; }
75 // the following is one past the last word in space
76 HeapWord* endWord() const { return _bmStartWord + _bmWordSize; }
78 // read marks
80 bool isMarked(HeapWord* addr) const {
81 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
82 "outside underlying space?");
83 return _bm.at(heapWordToOffset(addr));
84 }
86 // iteration
87 inline bool iterate(BitMapClosure* cl, MemRegion mr);
88 inline bool iterate(BitMapClosure* cl);
90 // Return the address corresponding to the next marked bit at or after
91 // "addr", and before "limit", if "limit" is non-NULL. If there is no
92 // such bit, returns "limit" if that is non-NULL, or else "endWord()".
93 HeapWord* getNextMarkedWordAddress(HeapWord* addr,
94 HeapWord* limit = NULL) const;
95 // Return the address corresponding to the next unmarked bit at or after
96 // "addr", and before "limit", if "limit" is non-NULL. If there is no
97 // such bit, returns "limit" if that is non-NULL, or else "endWord()".
98 HeapWord* getNextUnmarkedWordAddress(HeapWord* addr,
99 HeapWord* limit = NULL) const;
101 // conversion utilities
102 // XXX Fix these so that offsets are size_t's...
103 HeapWord* offsetToHeapWord(size_t offset) const {
104 return _bmStartWord + (offset << _shifter);
105 }
106 size_t heapWordToOffset(HeapWord* addr) const {
107 return pointer_delta(addr, _bmStartWord) >> _shifter;
108 }
109 int heapWordDiffToOffsetDiff(size_t diff) const;
110 HeapWord* nextWord(HeapWord* addr) {
111 return offsetToHeapWord(heapWordToOffset(addr) + 1);
112 }
114 void mostly_disjoint_range_union(BitMap* from_bitmap,
115 size_t from_start_index,
116 HeapWord* to_start_word,
117 size_t word_num);
119 // debugging
120 NOT_PRODUCT(bool covers(ReservedSpace rs) const;)
121 };
123 class CMBitMap : public CMBitMapRO {
125 public:
126 // constructor
127 CMBitMap(ReservedSpace rs, int shifter) :
128 CMBitMapRO(rs, shifter) {}
130 // write marks
131 void mark(HeapWord* addr) {
132 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
133 "outside underlying space?");
134 _bm.set_bit(heapWordToOffset(addr));
135 }
136 void clear(HeapWord* addr) {
137 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
138 "outside underlying space?");
139 _bm.clear_bit(heapWordToOffset(addr));
140 }
141 bool parMark(HeapWord* addr) {
142 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
143 "outside underlying space?");
144 return _bm.par_set_bit(heapWordToOffset(addr));
145 }
146 bool parClear(HeapWord* addr) {
147 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
148 "outside underlying space?");
149 return _bm.par_clear_bit(heapWordToOffset(addr));
150 }
151 void markRange(MemRegion mr);
152 void clearAll();
153 void clearRange(MemRegion mr);
155 // Starting at the bit corresponding to "addr" (inclusive), find the next
156 // "1" bit, if any. This bit starts some run of consecutive "1"'s; find
157 // the end of this run (stopping at "end_addr"). Return the MemRegion
158 // covering from the start of the region corresponding to the first bit
159 // of the run to the end of the region corresponding to the last bit of
160 // the run. If there is no "1" bit at or after "addr", return an empty
161 // MemRegion.
162 MemRegion getAndClearMarkedRegion(HeapWord* addr, HeapWord* end_addr);
163 };
165 // Represents a marking stack used by the CM collector.
166 // Ideally this should be GrowableArray<> just like MSC's marking stack(s).
167 class CMMarkStack VALUE_OBJ_CLASS_SPEC {
168 ConcurrentMark* _cm;
169 oop* _base; // bottom of stack
170 jint _index; // one more than last occupied index
171 jint _capacity; // max #elements
172 jint _saved_index; // value of _index saved at start of GC
173 NOT_PRODUCT(jint _max_depth;) // max depth plumbed during run
175 bool _overflow;
176 DEBUG_ONLY(bool _drain_in_progress;)
177 DEBUG_ONLY(bool _drain_in_progress_yields;)
179 public:
180 CMMarkStack(ConcurrentMark* cm);
181 ~CMMarkStack();
183 void allocate(size_t size);
185 oop pop() {
186 if (!isEmpty()) {
187 return _base[--_index] ;
188 }
189 return NULL;
190 }
192 // If overflow happens, don't do the push, and record the overflow.
193 // *Requires* that "ptr" is already marked.
194 void push(oop ptr) {
195 if (isFull()) {
196 // Record overflow.
197 _overflow = true;
198 return;
199 } else {
200 _base[_index++] = ptr;
201 NOT_PRODUCT(_max_depth = MAX2(_max_depth, _index));
202 }
203 }
204 // Non-block impl. Note: concurrency is allowed only with other
205 // "par_push" operations, not with "pop" or "drain". We would need
206 // parallel versions of them if such concurrency was desired.
207 void par_push(oop ptr);
209 // Pushes the first "n" elements of "ptr_arr" on the stack.
210 // Non-block impl. Note: concurrency is allowed only with other
211 // "par_adjoin_arr" or "push" operations, not with "pop" or "drain".
212 void par_adjoin_arr(oop* ptr_arr, int n);
214 // Pushes the first "n" elements of "ptr_arr" on the stack.
215 // Locking impl: concurrency is allowed only with
216 // "par_push_arr" and/or "par_pop_arr" operations, which use the same
217 // locking strategy.
218 void par_push_arr(oop* ptr_arr, int n);
220 // If returns false, the array was empty. Otherwise, removes up to "max"
221 // elements from the stack, and transfers them to "ptr_arr" in an
222 // unspecified order. The actual number transferred is given in "n" ("n
223 // == 0" is deliberately redundant with the return value.) Locking impl:
224 // concurrency is allowed only with "par_push_arr" and/or "par_pop_arr"
225 // operations, which use the same locking strategy.
226 bool par_pop_arr(oop* ptr_arr, int max, int* n);
228 // Drain the mark stack, applying the given closure to all fields of
229 // objects on the stack. (That is, continue until the stack is empty,
230 // even if closure applications add entries to the stack.) The "bm"
231 // argument, if non-null, may be used to verify that only marked objects
232 // are on the mark stack. If "yield_after" is "true", then the
233 // concurrent marker performing the drain offers to yield after
234 // processing each object. If a yield occurs, stops the drain operation
235 // and returns false. Otherwise, returns true.
236 template<class OopClosureClass>
237 bool drain(OopClosureClass* cl, CMBitMap* bm, bool yield_after = false);
239 bool isEmpty() { return _index == 0; }
240 bool isFull() { return _index == _capacity; }
241 int maxElems() { return _capacity; }
243 bool overflow() { return _overflow; }
244 void clear_overflow() { _overflow = false; }
246 int size() { return _index; }
248 void setEmpty() { _index = 0; clear_overflow(); }
250 // Record the current index.
251 void note_start_of_gc();
253 // Make sure that we have not added any entries to the stack during GC.
254 void note_end_of_gc();
256 // iterate over the oops in the mark stack, up to the bound recorded via
257 // the call above.
258 void oops_do(OopClosure* f);
259 };
261 class CMRegionStack VALUE_OBJ_CLASS_SPEC {
262 MemRegion* _base;
263 jint _capacity;
264 jint _index;
265 jint _oops_do_bound;
266 bool _overflow;
267 public:
268 CMRegionStack();
269 ~CMRegionStack();
270 void allocate(size_t size);
272 // This is lock-free; assumes that it will only be called in parallel
273 // with other "push" operations (no pops).
274 void push_lock_free(MemRegion mr);
276 // Lock-free; assumes that it will only be called in parallel
277 // with other "pop" operations (no pushes).
278 MemRegion pop_lock_free();
280 #if 0
281 // The routines that manipulate the region stack with a lock are
282 // not currently used. They should be retained, however, as a
283 // diagnostic aid.
285 // These two are the implementations that use a lock. They can be
286 // called concurrently with each other but they should not be called
287 // concurrently with the lock-free versions (push() / pop()).
288 void push_with_lock(MemRegion mr);
289 MemRegion pop_with_lock();
290 #endif
292 bool isEmpty() { return _index == 0; }
293 bool isFull() { return _index == _capacity; }
295 bool overflow() { return _overflow; }
296 void clear_overflow() { _overflow = false; }
298 int size() { return _index; }
300 // It iterates over the entries in the region stack and it
301 // invalidates (i.e. assigns MemRegion()) the ones that point to
302 // regions in the collection set.
303 bool invalidate_entries_into_cset();
305 // This gives an upper bound up to which the iteration in
306 // invalidate_entries_into_cset() will reach. This prevents
307 // newly-added entries to be unnecessarily scanned.
308 void set_oops_do_bound() {
309 _oops_do_bound = _index;
310 }
312 void setEmpty() { _index = 0; clear_overflow(); }
313 };
315 class ForceOverflowSettings VALUE_OBJ_CLASS_SPEC {
316 private:
317 #ifndef PRODUCT
318 uintx _num_remaining;
319 bool _force;
320 #endif // !defined(PRODUCT)
322 public:
323 void init() PRODUCT_RETURN;
324 void update() PRODUCT_RETURN;
325 bool should_force() PRODUCT_RETURN_( return false; );
326 };
328 // this will enable a variety of different statistics per GC task
329 #define _MARKING_STATS_ 0
330 // this will enable the higher verbose levels
331 #define _MARKING_VERBOSE_ 0
333 #if _MARKING_STATS_
334 #define statsOnly(statement) \
335 do { \
336 statement ; \
337 } while (0)
338 #else // _MARKING_STATS_
339 #define statsOnly(statement) \
340 do { \
341 } while (0)
342 #endif // _MARKING_STATS_
344 typedef enum {
345 no_verbose = 0, // verbose turned off
346 stats_verbose, // only prints stats at the end of marking
347 low_verbose, // low verbose, mostly per region and per major event
348 medium_verbose, // a bit more detailed than low
349 high_verbose // per object verbose
350 } CMVerboseLevel;
352 class YoungList;
354 // Root Regions are regions that are not empty at the beginning of a
355 // marking cycle and which we might collect during an evacuation pause
356 // while the cycle is active. Given that, during evacuation pauses, we
357 // do not copy objects that are explicitly marked, what we have to do
358 // for the root regions is to scan them and mark all objects reachable
359 // from them. According to the SATB assumptions, we only need to visit
360 // each object once during marking. So, as long as we finish this scan
361 // before the next evacuation pause, we can copy the objects from the
362 // root regions without having to mark them or do anything else to them.
363 //
364 // Currently, we only support root region scanning once (at the start
365 // of the marking cycle) and the root regions are all the survivor
366 // regions populated during the initial-mark pause.
367 class CMRootRegions VALUE_OBJ_CLASS_SPEC {
368 private:
369 YoungList* _young_list;
370 ConcurrentMark* _cm;
372 volatile bool _scan_in_progress;
373 volatile bool _should_abort;
374 HeapRegion* volatile _next_survivor;
376 public:
377 CMRootRegions();
378 // We actually do most of the initialization in this method.
379 void init(G1CollectedHeap* g1h, ConcurrentMark* cm);
381 // Reset the claiming / scanning of the root regions.
382 void prepare_for_scan();
384 // Forces get_next() to return NULL so that the iteration aborts early.
385 void abort() { _should_abort = true; }
387 // Return true if the CM thread are actively scanning root regions,
388 // false otherwise.
389 bool scan_in_progress() { return _scan_in_progress; }
391 // Claim the next root region to scan atomically, or return NULL if
392 // all have been claimed.
393 HeapRegion* claim_next();
395 // Flag that we're done with root region scanning and notify anyone
396 // who's waiting on it. If aborted is false, assume that all regions
397 // have been claimed.
398 void scan_finished();
400 // If CM threads are still scanning root regions, wait until they
401 // are done. Return true if we had to wait, false otherwise.
402 bool wait_until_scan_finished();
403 };
405 class ConcurrentMarkThread;
407 class ConcurrentMark : public CHeapObj {
408 friend class ConcurrentMarkThread;
409 friend class CMTask;
410 friend class CMBitMapClosure;
411 friend class CSetMarkOopClosure;
412 friend class CMGlobalObjectClosure;
413 friend class CMRemarkTask;
414 friend class CMConcurrentMarkingTask;
415 friend class G1ParNoteEndTask;
416 friend class CalcLiveObjectsClosure;
417 friend class G1CMRefProcTaskProxy;
418 friend class G1CMRefProcTaskExecutor;
419 friend class G1CMParKeepAliveAndDrainClosure;
420 friend class G1CMParDrainMarkingStackClosure;
422 protected:
423 ConcurrentMarkThread* _cmThread; // the thread doing the work
424 G1CollectedHeap* _g1h; // the heap.
425 uint _parallel_marking_threads; // the number of marking
426 // threads we're use
427 uint _max_parallel_marking_threads; // max number of marking
428 // threads we'll ever use
429 double _sleep_factor; // how much we have to sleep, with
430 // respect to the work we just did, to
431 // meet the marking overhead goal
432 double _marking_task_overhead; // marking target overhead for
433 // a single task
435 // same as the two above, but for the cleanup task
436 double _cleanup_sleep_factor;
437 double _cleanup_task_overhead;
439 FreeRegionList _cleanup_list;
441 // Concurrent marking support structures
442 CMBitMap _markBitMap1;
443 CMBitMap _markBitMap2;
444 CMBitMapRO* _prevMarkBitMap; // completed mark bitmap
445 CMBitMap* _nextMarkBitMap; // under-construction mark bitmap
446 bool _at_least_one_mark_complete;
448 BitMap _region_bm;
449 BitMap _card_bm;
451 // Heap bounds
452 HeapWord* _heap_start;
453 HeapWord* _heap_end;
455 // Root region tracking and claiming.
456 CMRootRegions _root_regions;
458 // For gray objects
459 CMMarkStack _markStack; // Grey objects behind global finger.
460 CMRegionStack _regionStack; // Grey regions behind global finger.
461 HeapWord* volatile _finger; // the global finger, region aligned,
462 // always points to the end of the
463 // last claimed region
465 // marking tasks
466 uint _max_task_num; // maximum task number
467 uint _active_tasks; // task num currently active
468 CMTask** _tasks; // task queue array (max_task_num len)
469 CMTaskQueueSet* _task_queues; // task queue set
470 ParallelTaskTerminator _terminator; // for termination
472 // Two sync barriers that are used to synchronise tasks when an
473 // overflow occurs. The algorithm is the following. All tasks enter
474 // the first one to ensure that they have all stopped manipulating
475 // the global data structures. After they exit it, they re-initialise
476 // their data structures and task 0 re-initialises the global data
477 // structures. Then, they enter the second sync barrier. This
478 // ensure, that no task starts doing work before all data
479 // structures (local and global) have been re-initialised. When they
480 // exit it, they are free to start working again.
481 WorkGangBarrierSync _first_overflow_barrier_sync;
482 WorkGangBarrierSync _second_overflow_barrier_sync;
484 // this is set by any task, when an overflow on the global data
485 // structures is detected.
486 volatile bool _has_overflown;
487 // true: marking is concurrent, false: we're in remark
488 volatile bool _concurrent;
489 // set at the end of a Full GC so that marking aborts
490 volatile bool _has_aborted;
492 // used when remark aborts due to an overflow to indicate that
493 // another concurrent marking phase should start
494 volatile bool _restart_for_overflow;
496 // This is true from the very start of concurrent marking until the
497 // point when all the tasks complete their work. It is really used
498 // to determine the points between the end of concurrent marking and
499 // time of remark.
500 volatile bool _concurrent_marking_in_progress;
502 // verbose level
503 CMVerboseLevel _verbose_level;
505 // These two fields are used to implement the optimisation that
506 // avoids pushing objects on the global/region stack if there are
507 // no collection set regions above the lowest finger.
509 // This is the lowest finger (among the global and local fingers),
510 // which is calculated before a new collection set is chosen.
511 HeapWord* _min_finger;
512 // If this flag is true, objects/regions that are marked below the
513 // finger should be pushed on the stack(s). If this is flag is
514 // false, it is safe not to push them on the stack(s).
515 bool _should_gray_objects;
517 // All of these times are in ms.
518 NumberSeq _init_times;
519 NumberSeq _remark_times;
520 NumberSeq _remark_mark_times;
521 NumberSeq _remark_weak_ref_times;
522 NumberSeq _cleanup_times;
523 double _total_counting_time;
524 double _total_rs_scrub_time;
526 double* _accum_task_vtime; // accumulated task vtime
528 FlexibleWorkGang* _parallel_workers;
530 ForceOverflowSettings _force_overflow_conc;
531 ForceOverflowSettings _force_overflow_stw;
533 void weakRefsWork(bool clear_all_soft_refs);
535 void swapMarkBitMaps();
537 // It resets the global marking data structures, as well as the
538 // task local ones; should be called during initial mark.
539 void reset();
540 // It resets all the marking data structures.
541 void clear_marking_state(bool clear_overflow = true);
543 // It should be called to indicate which phase we're in (concurrent
544 // mark or remark) and how many threads are currently active.
545 void set_phase(uint active_tasks, bool concurrent);
546 // We do this after we're done with marking so that the marking data
547 // structures are initialised to a sensible and predictable state.
548 void set_non_marking_state();
550 // prints all gathered CM-related statistics
551 void print_stats();
553 bool cleanup_list_is_empty() {
554 return _cleanup_list.is_empty();
555 }
557 // accessor methods
558 uint parallel_marking_threads() { return _parallel_marking_threads; }
559 uint max_parallel_marking_threads() { return _max_parallel_marking_threads;}
560 double sleep_factor() { return _sleep_factor; }
561 double marking_task_overhead() { return _marking_task_overhead;}
562 double cleanup_sleep_factor() { return _cleanup_sleep_factor; }
563 double cleanup_task_overhead() { return _cleanup_task_overhead;}
565 HeapWord* finger() { return _finger; }
566 bool concurrent() { return _concurrent; }
567 uint active_tasks() { return _active_tasks; }
568 ParallelTaskTerminator* terminator() { return &_terminator; }
570 // It claims the next available region to be scanned by a marking
571 // task. It might return NULL if the next region is empty or we have
572 // run out of regions. In the latter case, out_of_regions()
573 // determines whether we've really run out of regions or the task
574 // should call claim_region() again. This might seem a bit
575 // awkward. Originally, the code was written so that claim_region()
576 // either successfully returned with a non-empty region or there
577 // were no more regions to be claimed. The problem with this was
578 // that, in certain circumstances, it iterated over large chunks of
579 // the heap finding only empty regions and, while it was working, it
580 // was preventing the calling task to call its regular clock
581 // method. So, this way, each task will spend very little time in
582 // claim_region() and is allowed to call the regular clock method
583 // frequently.
584 HeapRegion* claim_region(int task);
586 // It determines whether we've run out of regions to scan.
587 bool out_of_regions() { return _finger == _heap_end; }
589 // Returns the task with the given id
590 CMTask* task(int id) {
591 assert(0 <= id && id < (int) _active_tasks,
592 "task id not within active bounds");
593 return _tasks[id];
594 }
596 // Returns the task queue with the given id
597 CMTaskQueue* task_queue(int id) {
598 assert(0 <= id && id < (int) _active_tasks,
599 "task queue id not within active bounds");
600 return (CMTaskQueue*) _task_queues->queue(id);
601 }
603 // Returns the task queue set
604 CMTaskQueueSet* task_queues() { return _task_queues; }
606 // Access / manipulation of the overflow flag which is set to
607 // indicate that the global stack or region stack has overflown
608 bool has_overflown() { return _has_overflown; }
609 void set_has_overflown() { _has_overflown = true; }
610 void clear_has_overflown() { _has_overflown = false; }
611 bool restart_for_overflow() { return _restart_for_overflow; }
613 bool has_aborted() { return _has_aborted; }
615 // Methods to enter the two overflow sync barriers
616 void enter_first_sync_barrier(int task_num);
617 void enter_second_sync_barrier(int task_num);
619 ForceOverflowSettings* force_overflow_conc() {
620 return &_force_overflow_conc;
621 }
623 ForceOverflowSettings* force_overflow_stw() {
624 return &_force_overflow_stw;
625 }
627 ForceOverflowSettings* force_overflow() {
628 if (concurrent()) {
629 return force_overflow_conc();
630 } else {
631 return force_overflow_stw();
632 }
633 }
635 // Live Data Counting data structures...
636 // These data structures are initialized at the start of
637 // marking. They are written to while marking is active.
638 // They are aggregated during remark; the aggregated values
639 // are then used to populate the _region_bm, _card_bm, and
640 // the total live bytes, which are then subsequently updated
641 // during cleanup.
643 // An array of bitmaps (one bit map per task). Each bitmap
644 // is used to record the cards spanned by the live objects
645 // marked by that task/worker.
646 BitMap* _count_card_bitmaps;
648 // Used to record the number of marked live bytes
649 // (for each region, by worker thread).
650 size_t** _count_marked_bytes;
652 // Card index of the bottom of the G1 heap. Used for biasing indices into
653 // the card bitmaps.
654 intptr_t _heap_bottom_card_num;
656 public:
657 // Manipulation of the global mark stack.
658 // Notice that the first mark_stack_push is CAS-based, whereas the
659 // two below are Mutex-based. This is OK since the first one is only
660 // called during evacuation pauses and doesn't compete with the
661 // other two (which are called by the marking tasks during
662 // concurrent marking or remark).
663 bool mark_stack_push(oop p) {
664 _markStack.par_push(p);
665 if (_markStack.overflow()) {
666 set_has_overflown();
667 return false;
668 }
669 return true;
670 }
671 bool mark_stack_push(oop* arr, int n) {
672 _markStack.par_push_arr(arr, n);
673 if (_markStack.overflow()) {
674 set_has_overflown();
675 return false;
676 }
677 return true;
678 }
679 void mark_stack_pop(oop* arr, int max, int* n) {
680 _markStack.par_pop_arr(arr, max, n);
681 }
682 size_t mark_stack_size() { return _markStack.size(); }
683 size_t partial_mark_stack_size_target() { return _markStack.maxElems()/3; }
684 bool mark_stack_overflow() { return _markStack.overflow(); }
685 bool mark_stack_empty() { return _markStack.isEmpty(); }
687 // (Lock-free) Manipulation of the region stack
688 bool region_stack_push_lock_free(MemRegion mr) {
689 // Currently we only call the lock-free version during evacuation
690 // pauses.
691 assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped");
693 _regionStack.push_lock_free(mr);
694 if (_regionStack.overflow()) {
695 set_has_overflown();
696 return false;
697 }
698 return true;
699 }
701 // Lock-free version of region-stack pop. Should only be
702 // called in tandem with other lock-free pops.
703 MemRegion region_stack_pop_lock_free() {
704 return _regionStack.pop_lock_free();
705 }
707 #if 0
708 // The routines that manipulate the region stack with a lock are
709 // not currently used. They should be retained, however, as a
710 // diagnostic aid.
712 bool region_stack_push_with_lock(MemRegion mr) {
713 // Currently we only call the lock-based version during either
714 // concurrent marking or remark.
715 assert(!SafepointSynchronize::is_at_safepoint() || !concurrent(),
716 "if we are at a safepoint it should be the remark safepoint");
718 _regionStack.push_with_lock(mr);
719 if (_regionStack.overflow()) {
720 set_has_overflown();
721 return false;
722 }
723 return true;
724 }
726 MemRegion region_stack_pop_with_lock() {
727 // Currently we only call the lock-based version during either
728 // concurrent marking or remark.
729 assert(!SafepointSynchronize::is_at_safepoint() || !concurrent(),
730 "if we are at a safepoint it should be the remark safepoint");
732 return _regionStack.pop_with_lock();
733 }
734 #endif
736 int region_stack_size() { return _regionStack.size(); }
737 bool region_stack_overflow() { return _regionStack.overflow(); }
738 bool region_stack_empty() { return _regionStack.isEmpty(); }
740 // Iterate over any regions that were aborted while draining the
741 // region stack (any such regions are saved in the corresponding
742 // CMTask) and invalidate (i.e. assign to the empty MemRegion())
743 // any regions that point into the collection set.
744 bool invalidate_aborted_regions_in_cset();
746 // Returns true if there are any aborted memory regions.
747 bool has_aborted_regions();
749 CMRootRegions* root_regions() { return &_root_regions; }
751 bool concurrent_marking_in_progress() {
752 return _concurrent_marking_in_progress;
753 }
754 void set_concurrent_marking_in_progress() {
755 _concurrent_marking_in_progress = true;
756 }
757 void clear_concurrent_marking_in_progress() {
758 _concurrent_marking_in_progress = false;
759 }
761 void update_accum_task_vtime(int i, double vtime) {
762 _accum_task_vtime[i] += vtime;
763 }
765 double all_task_accum_vtime() {
766 double ret = 0.0;
767 for (int i = 0; i < (int)_max_task_num; ++i)
768 ret += _accum_task_vtime[i];
769 return ret;
770 }
772 // Attempts to steal an object from the task queues of other tasks
773 bool try_stealing(int task_num, int* hash_seed, oop& obj) {
774 return _task_queues->steal(task_num, hash_seed, obj);
775 }
777 // It grays an object by first marking it. Then, if it's behind the
778 // global finger, it also pushes it on the global stack.
779 void deal_with_reference(oop obj);
781 ConcurrentMark(ReservedSpace rs, int max_regions);
782 ~ConcurrentMark();
784 ConcurrentMarkThread* cmThread() { return _cmThread; }
786 CMBitMapRO* prevMarkBitMap() const { return _prevMarkBitMap; }
787 CMBitMap* nextMarkBitMap() const { return _nextMarkBitMap; }
789 // Returns the number of GC threads to be used in a concurrent
790 // phase based on the number of GC threads being used in a STW
791 // phase.
792 uint scale_parallel_threads(uint n_par_threads);
794 // Calculates the number of GC threads to be used in a concurrent phase.
795 uint calc_parallel_marking_threads();
797 // The following three are interaction between CM and
798 // G1CollectedHeap
800 // This notifies CM that a root during initial-mark needs to be
801 // grayed. It is MT-safe. word_size is the size of the object in
802 // words. It is passed explicitly as sometimes we cannot calculate
803 // it from the given object because it might be in an inconsistent
804 // state (e.g., in to-space and being copied). So the caller is
805 // responsible for dealing with this issue (e.g., get the size from
806 // the from-space image when the to-space image might be
807 // inconsistent) and always passing the size. hr is the region that
808 // contains the object and it's passed optionally from callers who
809 // might already have it (no point in recalculating it).
810 inline void grayRoot(oop obj, size_t word_size,
811 uint worker_id, HeapRegion* hr = NULL);
813 // It's used during evacuation pauses to gray a region, if
814 // necessary, and it's MT-safe. It assumes that the caller has
815 // marked any objects on that region. If _should_gray_objects is
816 // true and we're still doing concurrent marking, the region is
817 // pushed on the region stack, if it is located below the global
818 // finger, otherwise we do nothing.
819 void grayRegionIfNecessary(MemRegion mr);
821 // It's used during evacuation pauses to mark and, if necessary,
822 // gray a single object and it's MT-safe. It assumes the caller did
823 // not mark the object. If _should_gray_objects is true and we're
824 // still doing concurrent marking, the objects is pushed on the
825 // global stack, if it is located below the global finger, otherwise
826 // we do nothing.
827 void markAndGrayObjectIfNecessary(oop p);
829 // It iterates over the heap and for each object it comes across it
830 // will dump the contents of its reference fields, as well as
831 // liveness information for the object and its referents. The dump
832 // will be written to a file with the following name:
833 // G1PrintReachableBaseFile + "." + str.
834 // vo decides whether the prev (vo == UsePrevMarking), the next
835 // (vo == UseNextMarking) marking information, or the mark word
836 // (vo == UseMarkWord) will be used to determine the liveness of
837 // each object / referent.
838 // If all is true, all objects in the heap will be dumped, otherwise
839 // only the live ones. In the dump the following symbols / breviations
840 // are used:
841 // M : an explicitly live object (its bitmap bit is set)
842 // > : an implicitly live object (over tams)
843 // O : an object outside the G1 heap (typically: in the perm gen)
844 // NOT : a reference field whose referent is not live
845 // AND MARKED : indicates that an object is both explicitly and
846 // implicitly live (it should be one or the other, not both)
847 void print_reachable(const char* str,
848 VerifyOption vo, bool all) PRODUCT_RETURN;
850 // Clear the next marking bitmap (will be called concurrently).
851 void clearNextBitmap();
853 // These two do the work that needs to be done before and after the
854 // initial root checkpoint. Since this checkpoint can be done at two
855 // different points (i.e. an explicit pause or piggy-backed on a
856 // young collection), then it's nice to be able to easily share the
857 // pre/post code. It might be the case that we can put everything in
858 // the post method. TP
859 void checkpointRootsInitialPre();
860 void checkpointRootsInitialPost();
862 // Scan all the root regions and mark everything reachable from
863 // them.
864 void scanRootRegions();
866 // Scan a single root region and mark everything reachable from it.
867 void scanRootRegion(HeapRegion* hr, uint worker_id);
869 // Do concurrent phase of marking, to a tentative transitive closure.
870 void markFromRoots();
872 // Process all unprocessed SATB buffers. It is called at the
873 // beginning of an evacuation pause.
874 void drainAllSATBBuffers();
876 void checkpointRootsFinal(bool clear_all_soft_refs);
877 void checkpointRootsFinalWork();
878 void cleanup();
879 void completeCleanup();
881 // Mark in the previous bitmap. NB: this is usually read-only, so use
882 // this carefully!
883 inline void markPrev(oop p);
885 // Clears marks for all objects in the given range, for the prev,
886 // next, or both bitmaps. NB: the previous bitmap is usually
887 // read-only, so use this carefully!
888 void clearRangePrevBitmap(MemRegion mr);
889 void clearRangeNextBitmap(MemRegion mr);
890 void clearRangeBothBitmaps(MemRegion mr);
892 // Notify data structures that a GC has started.
893 void note_start_of_gc() {
894 _markStack.note_start_of_gc();
895 }
897 // Notify data structures that a GC is finished.
898 void note_end_of_gc() {
899 _markStack.note_end_of_gc();
900 }
902 // Iterate over the oops in the mark stack and all local queues. It
903 // also calls invalidate_entries_into_cset() on the region stack.
904 void oops_do(OopClosure* f);
906 // Verify that there are no CSet oops on the stacks (taskqueues /
907 // global mark stack), enqueued SATB buffers, per-thread SATB
908 // buffers, and fingers (global / per-task). The boolean parameters
909 // decide which of the above data structures to verify. If marking
910 // is not in progress, it's a no-op.
911 void verify_no_cset_oops(bool verify_stacks,
912 bool verify_enqueued_buffers,
913 bool verify_thread_buffers,
914 bool verify_fingers) PRODUCT_RETURN;
916 // It is called at the end of an evacuation pause during marking so
917 // that CM is notified of where the new end of the heap is. It
918 // doesn't do anything if concurrent_marking_in_progress() is false,
919 // unless the force parameter is true.
920 void update_g1_committed(bool force = false);
922 void complete_marking_in_collection_set();
924 // It indicates that a new collection set is being chosen.
925 void newCSet();
927 // It registers a collection set heap region with CM. This is used
928 // to determine whether any heap regions are located above the finger.
929 void registerCSetRegion(HeapRegion* hr);
931 // Resets the region fields of any active CMTask whose region fields
932 // are in the collection set (i.e. the region currently claimed by
933 // the CMTask will be evacuated and may be used, subsequently, as
934 // an alloc region). When this happens the region fields in the CMTask
935 // are stale and, hence, should be cleared causing the worker thread
936 // to claim a new region.
937 void reset_active_task_region_fields_in_cset();
939 // Registers the maximum region-end associated with a set of
940 // regions with CM. Again this is used to determine whether any
941 // heap regions are located above the finger.
942 void register_collection_set_finger(HeapWord* max_finger) {
943 // max_finger is the highest heap region end of the regions currently
944 // contained in the collection set. If this value is larger than
945 // _min_finger then we need to gray objects.
946 // This routine is like registerCSetRegion but for an entire
947 // collection of regions.
948 if (max_finger > _min_finger) {
949 _should_gray_objects = true;
950 }
951 }
953 // Returns "true" if at least one mark has been completed.
954 bool at_least_one_mark_complete() { return _at_least_one_mark_complete; }
956 bool isMarked(oop p) const {
957 assert(p != NULL && p->is_oop(), "expected an oop");
958 HeapWord* addr = (HeapWord*)p;
959 assert(addr >= _nextMarkBitMap->startWord() ||
960 addr < _nextMarkBitMap->endWord(), "in a region");
962 return _nextMarkBitMap->isMarked(addr);
963 }
965 inline bool not_yet_marked(oop p) const;
967 // XXX Debug code
968 bool containing_card_is_marked(void* p);
969 bool containing_cards_are_marked(void* start, void* last);
971 bool isPrevMarked(oop p) const {
972 assert(p != NULL && p->is_oop(), "expected an oop");
973 HeapWord* addr = (HeapWord*)p;
974 assert(addr >= _prevMarkBitMap->startWord() ||
975 addr < _prevMarkBitMap->endWord(), "in a region");
977 return _prevMarkBitMap->isMarked(addr);
978 }
980 inline bool do_yield_check(uint worker_i = 0);
981 inline bool should_yield();
983 // Called to abort the marking cycle after a Full GC takes palce.
984 void abort();
986 // This prints the global/local fingers. It is used for debugging.
987 NOT_PRODUCT(void print_finger();)
989 void print_summary_info();
991 void print_worker_threads_on(outputStream* st) const;
993 // The following indicate whether a given verbose level has been
994 // set. Notice that anything above stats is conditional to
995 // _MARKING_VERBOSE_ having been set to 1
996 bool verbose_stats() {
997 return _verbose_level >= stats_verbose;
998 }
999 bool verbose_low() {
1000 return _MARKING_VERBOSE_ && _verbose_level >= low_verbose;
1001 }
1002 bool verbose_medium() {
1003 return _MARKING_VERBOSE_ && _verbose_level >= medium_verbose;
1004 }
1005 bool verbose_high() {
1006 return _MARKING_VERBOSE_ && _verbose_level >= high_verbose;
1007 }
1009 // Counting data structure accessors
1011 // Returns the card number of the bottom of the G1 heap.
1012 // Used in biasing indices into accounting card bitmaps.
1013 intptr_t heap_bottom_card_num() const {
1014 return _heap_bottom_card_num;
1015 }
1017 // Returns the card bitmap for a given task or worker id.
1018 BitMap* count_card_bitmap_for(uint worker_id) {
1019 assert(0 <= worker_id && worker_id < _max_task_num, "oob");
1020 assert(_count_card_bitmaps != NULL, "uninitialized");
1021 BitMap* task_card_bm = &_count_card_bitmaps[worker_id];
1022 assert(task_card_bm->size() == _card_bm.size(), "size mismatch");
1023 return task_card_bm;
1024 }
1026 // Returns the array containing the marked bytes for each region,
1027 // for the given worker or task id.
1028 size_t* count_marked_bytes_array_for(uint worker_id) {
1029 assert(0 <= worker_id && worker_id < _max_task_num, "oob");
1030 assert(_count_marked_bytes != NULL, "uninitialized");
1031 size_t* marked_bytes_array = _count_marked_bytes[worker_id];
1032 assert(marked_bytes_array != NULL, "uninitialized");
1033 return marked_bytes_array;
1034 }
1036 // Returns the index in the liveness accounting card table bitmap
1037 // for the given address
1038 inline BitMap::idx_t card_bitmap_index_for(HeapWord* addr);
1040 // Counts the size of the given memory region in the the given
1041 // marked_bytes array slot for the given HeapRegion.
1042 // Sets the bits in the given card bitmap that are associated with the
1043 // cards that are spanned by the memory region.
1044 inline void count_region(MemRegion mr, HeapRegion* hr,
1045 size_t* marked_bytes_array,
1046 BitMap* task_card_bm);
1048 // Counts the given memory region in the task/worker counting
1049 // data structures for the given worker id.
1050 inline void count_region(MemRegion mr, HeapRegion* hr, uint worker_id);
1052 // Counts the given memory region in the task/worker counting
1053 // data structures for the given worker id.
1054 inline void count_region(MemRegion mr, uint worker_id);
1056 // Counts the given object in the given task/worker counting
1057 // data structures.
1058 inline void count_object(oop obj, HeapRegion* hr,
1059 size_t* marked_bytes_array,
1060 BitMap* task_card_bm);
1062 // Counts the given object in the task/worker counting data
1063 // structures for the given worker id.
1064 inline void count_object(oop obj, HeapRegion* hr, uint worker_id);
1066 // Attempts to mark the given object and, if successful, counts
1067 // the object in the given task/worker counting structures.
1068 inline bool par_mark_and_count(oop obj, HeapRegion* hr,
1069 size_t* marked_bytes_array,
1070 BitMap* task_card_bm);
1072 // Attempts to mark the given object and, if successful, counts
1073 // the object in the task/worker counting structures for the
1074 // given worker id.
1075 inline bool par_mark_and_count(oop obj, size_t word_size,
1076 HeapRegion* hr, uint worker_id);
1078 // Attempts to mark the given object and, if successful, counts
1079 // the object in the task/worker counting structures for the
1080 // given worker id.
1081 inline bool par_mark_and_count(oop obj, HeapRegion* hr, uint worker_id);
1083 // Similar to the above routine but we don't know the heap region that
1084 // contains the object to be marked/counted, which this routine looks up.
1085 inline bool par_mark_and_count(oop obj, uint worker_id);
1087 // Similar to the above routine but there are times when we cannot
1088 // safely calculate the size of obj due to races and we, therefore,
1089 // pass the size in as a parameter. It is the caller's reponsibility
1090 // to ensure that the size passed in for obj is valid.
1091 inline bool par_mark_and_count(oop obj, size_t word_size, uint worker_id);
1093 // Unconditionally mark the given object, and unconditinally count
1094 // the object in the counting structures for worker id 0.
1095 // Should *not* be called from parallel code.
1096 inline bool mark_and_count(oop obj, HeapRegion* hr);
1098 // Similar to the above routine but we don't know the heap region that
1099 // contains the object to be marked/counted, which this routine looks up.
1100 // Should *not* be called from parallel code.
1101 inline bool mark_and_count(oop obj);
1103 protected:
1104 // Clear all the per-task bitmaps and arrays used to store the
1105 // counting data.
1106 void clear_all_count_data();
1108 // Aggregates the counting data for each worker/task
1109 // that was constructed while marking. Also sets
1110 // the amount of marked bytes for each region and
1111 // the top at concurrent mark count.
1112 void aggregate_count_data();
1114 // Verification routine
1115 void verify_count_data();
1116 };
1118 // A class representing a marking task.
1119 class CMTask : public TerminatorTerminator {
1120 private:
1121 enum PrivateConstants {
1122 // the regular clock call is called once the scanned words reaches
1123 // this limit
1124 words_scanned_period = 12*1024,
1125 // the regular clock call is called once the number of visited
1126 // references reaches this limit
1127 refs_reached_period = 384,
1128 // initial value for the hash seed, used in the work stealing code
1129 init_hash_seed = 17,
1130 // how many entries will be transferred between global stack and
1131 // local queues
1132 global_stack_transfer_size = 16
1133 };
1135 int _task_id;
1136 G1CollectedHeap* _g1h;
1137 ConcurrentMark* _cm;
1138 CMBitMap* _nextMarkBitMap;
1139 // the task queue of this task
1140 CMTaskQueue* _task_queue;
1141 private:
1142 // the task queue set---needed for stealing
1143 CMTaskQueueSet* _task_queues;
1144 // indicates whether the task has been claimed---this is only for
1145 // debugging purposes
1146 bool _claimed;
1148 // number of calls to this task
1149 int _calls;
1151 // when the virtual timer reaches this time, the marking step should
1152 // exit
1153 double _time_target_ms;
1154 // the start time of the current marking step
1155 double _start_time_ms;
1157 // the oop closure used for iterations over oops
1158 G1CMOopClosure* _cm_oop_closure;
1160 // the region this task is scanning, NULL if we're not scanning any
1161 HeapRegion* _curr_region;
1162 // the local finger of this task, NULL if we're not scanning a region
1163 HeapWord* _finger;
1164 // limit of the region this task is scanning, NULL if we're not scanning one
1165 HeapWord* _region_limit;
1167 // This is used only when we scan regions popped from the region
1168 // stack. It records what the last object on such a region we
1169 // scanned was. It is used to ensure that, if we abort region
1170 // iteration, we do not rescan the first part of the region. This
1171 // should be NULL when we're not scanning a region from the region
1172 // stack.
1173 HeapWord* _region_finger;
1175 // If we abort while scanning a region we record the remaining
1176 // unscanned portion and check this field when marking restarts.
1177 // This avoids having to push on the region stack while other
1178 // marking threads may still be popping regions.
1179 // If we were to push the unscanned portion directly to the
1180 // region stack then we would need to using locking versions
1181 // of the push and pop operations.
1182 MemRegion _aborted_region;
1184 // the number of words this task has scanned
1185 size_t _words_scanned;
1186 // When _words_scanned reaches this limit, the regular clock is
1187 // called. Notice that this might be decreased under certain
1188 // circumstances (i.e. when we believe that we did an expensive
1189 // operation).
1190 size_t _words_scanned_limit;
1191 // the initial value of _words_scanned_limit (i.e. what it was
1192 // before it was decreased).
1193 size_t _real_words_scanned_limit;
1195 // the number of references this task has visited
1196 size_t _refs_reached;
1197 // When _refs_reached reaches this limit, the regular clock is
1198 // called. Notice this this might be decreased under certain
1199 // circumstances (i.e. when we believe that we did an expensive
1200 // operation).
1201 size_t _refs_reached_limit;
1202 // the initial value of _refs_reached_limit (i.e. what it was before
1203 // it was decreased).
1204 size_t _real_refs_reached_limit;
1206 // used by the work stealing stuff
1207 int _hash_seed;
1208 // if this is true, then the task has aborted for some reason
1209 bool _has_aborted;
1210 // set when the task aborts because it has met its time quota
1211 bool _has_timed_out;
1212 // true when we're draining SATB buffers; this avoids the task
1213 // aborting due to SATB buffers being available (as we're already
1214 // dealing with them)
1215 bool _draining_satb_buffers;
1217 // number sequence of past step times
1218 NumberSeq _step_times_ms;
1219 // elapsed time of this task
1220 double _elapsed_time_ms;
1221 // termination time of this task
1222 double _termination_time_ms;
1223 // when this task got into the termination protocol
1224 double _termination_start_time_ms;
1226 // true when the task is during a concurrent phase, false when it is
1227 // in the remark phase (so, in the latter case, we do not have to
1228 // check all the things that we have to check during the concurrent
1229 // phase, i.e. SATB buffer availability...)
1230 bool _concurrent;
1232 TruncatedSeq _marking_step_diffs_ms;
1234 // Counting data structures. Embedding the task's marked_bytes_array
1235 // and card bitmap into the actual task saves having to go through
1236 // the ConcurrentMark object.
1237 size_t* _marked_bytes_array;
1238 BitMap* _card_bm;
1240 // LOTS of statistics related with this task
1241 #if _MARKING_STATS_
1242 NumberSeq _all_clock_intervals_ms;
1243 double _interval_start_time_ms;
1245 int _aborted;
1246 int _aborted_overflow;
1247 int _aborted_cm_aborted;
1248 int _aborted_yield;
1249 int _aborted_timed_out;
1250 int _aborted_satb;
1251 int _aborted_termination;
1253 int _steal_attempts;
1254 int _steals;
1256 int _clock_due_to_marking;
1257 int _clock_due_to_scanning;
1259 int _local_pushes;
1260 int _local_pops;
1261 int _local_max_size;
1262 int _objs_scanned;
1264 int _global_pushes;
1265 int _global_pops;
1266 int _global_max_size;
1268 int _global_transfers_to;
1269 int _global_transfers_from;
1271 int _region_stack_pops;
1273 int _regions_claimed;
1274 int _objs_found_on_bitmap;
1276 int _satb_buffers_processed;
1277 #endif // _MARKING_STATS_
1279 // it updates the local fields after this task has claimed
1280 // a new region to scan
1281 void setup_for_region(HeapRegion* hr);
1282 // it brings up-to-date the limit of the region
1283 void update_region_limit();
1285 // called when either the words scanned or the refs visited limit
1286 // has been reached
1287 void reached_limit();
1288 // recalculates the words scanned and refs visited limits
1289 void recalculate_limits();
1290 // decreases the words scanned and refs visited limits when we reach
1291 // an expensive operation
1292 void decrease_limits();
1293 // it checks whether the words scanned or refs visited reached their
1294 // respective limit and calls reached_limit() if they have
1295 void check_limits() {
1296 if (_words_scanned >= _words_scanned_limit ||
1297 _refs_reached >= _refs_reached_limit) {
1298 reached_limit();
1299 }
1300 }
1301 // this is supposed to be called regularly during a marking step as
1302 // it checks a bunch of conditions that might cause the marking step
1303 // to abort
1304 void regular_clock_call();
1305 bool concurrent() { return _concurrent; }
1307 public:
1308 // It resets the task; it should be called right at the beginning of
1309 // a marking phase.
1310 void reset(CMBitMap* _nextMarkBitMap);
1311 // it clears all the fields that correspond to a claimed region.
1312 void clear_region_fields();
1314 void set_concurrent(bool concurrent) { _concurrent = concurrent; }
1316 // The main method of this class which performs a marking step
1317 // trying not to exceed the given duration. However, it might exit
1318 // prematurely, according to some conditions (i.e. SATB buffers are
1319 // available for processing).
1320 void do_marking_step(double target_ms, bool do_stealing, bool do_termination);
1322 // These two calls start and stop the timer
1323 void record_start_time() {
1324 _elapsed_time_ms = os::elapsedTime() * 1000.0;
1325 }
1326 void record_end_time() {
1327 _elapsed_time_ms = os::elapsedTime() * 1000.0 - _elapsed_time_ms;
1328 }
1330 // returns the task ID
1331 int task_id() { return _task_id; }
1333 // From TerminatorTerminator. It determines whether this task should
1334 // exit the termination protocol after it's entered it.
1335 virtual bool should_exit_termination();
1337 // Resets the local region fields after a task has finished scanning a
1338 // region; or when they have become stale as a result of the region
1339 // being evacuated.
1340 void giveup_current_region();
1342 HeapWord* finger() { return _finger; }
1344 bool has_aborted() { return _has_aborted; }
1345 void set_has_aborted() { _has_aborted = true; }
1346 void clear_has_aborted() { _has_aborted = false; }
1347 bool has_timed_out() { return _has_timed_out; }
1348 bool claimed() { return _claimed; }
1350 // Support routines for the partially scanned region that may be
1351 // recorded as a result of aborting while draining the CMRegionStack
1352 MemRegion aborted_region() { return _aborted_region; }
1353 void set_aborted_region(MemRegion mr)
1354 { _aborted_region = mr; }
1356 // Clears any recorded partially scanned region
1357 void clear_aborted_region() { set_aborted_region(MemRegion()); }
1359 void set_cm_oop_closure(G1CMOopClosure* cm_oop_closure);
1361 // It grays the object by marking it and, if necessary, pushing it
1362 // on the local queue
1363 inline void deal_with_reference(oop obj);
1365 // It scans an object and visits its children.
1366 void scan_object(oop obj);
1368 // It pushes an object on the local queue.
1369 inline void push(oop obj);
1371 // These two move entries to/from the global stack.
1372 void move_entries_to_global_stack();
1373 void get_entries_from_global_stack();
1375 // It pops and scans objects from the local queue. If partially is
1376 // true, then it stops when the queue size is of a given limit. If
1377 // partially is false, then it stops when the queue is empty.
1378 void drain_local_queue(bool partially);
1379 // It moves entries from the global stack to the local queue and
1380 // drains the local queue. If partially is true, then it stops when
1381 // both the global stack and the local queue reach a given size. If
1382 // partially if false, it tries to empty them totally.
1383 void drain_global_stack(bool partially);
1384 // It keeps picking SATB buffers and processing them until no SATB
1385 // buffers are available.
1386 void drain_satb_buffers();
1388 // It keeps popping regions from the region stack and processing
1389 // them until the region stack is empty.
1390 void drain_region_stack(BitMapClosure* closure);
1392 // moves the local finger to a new location
1393 inline void move_finger_to(HeapWord* new_finger) {
1394 assert(new_finger >= _finger && new_finger < _region_limit, "invariant");
1395 _finger = new_finger;
1396 }
1398 // moves the region finger to a new location
1399 inline void move_region_finger_to(HeapWord* new_finger) {
1400 assert(new_finger < _cm->finger(), "invariant");
1401 _region_finger = new_finger;
1402 }
1404 CMTask(int task_num, ConcurrentMark *cm,
1405 size_t* marked_bytes, BitMap* card_bm,
1406 CMTaskQueue* task_queue, CMTaskQueueSet* task_queues);
1408 // it prints statistics associated with this task
1409 void print_stats();
1411 #if _MARKING_STATS_
1412 void increase_objs_found_on_bitmap() { ++_objs_found_on_bitmap; }
1413 #endif // _MARKING_STATS_
1414 };
1416 // Class that's used to to print out per-region liveness
1417 // information. It's currently used at the end of marking and also
1418 // after we sort the old regions at the end of the cleanup operation.
1419 class G1PrintRegionLivenessInfoClosure: public HeapRegionClosure {
1420 private:
1421 outputStream* _out;
1423 // Accumulators for these values.
1424 size_t _total_used_bytes;
1425 size_t _total_capacity_bytes;
1426 size_t _total_prev_live_bytes;
1427 size_t _total_next_live_bytes;
1429 // These are set up when we come across a "stars humongous" region
1430 // (as this is where most of this information is stored, not in the
1431 // subsequent "continues humongous" regions). After that, for every
1432 // region in a given humongous region series we deduce the right
1433 // values for it by simply subtracting the appropriate amount from
1434 // these fields. All these values should reach 0 after we've visited
1435 // the last region in the series.
1436 size_t _hum_used_bytes;
1437 size_t _hum_capacity_bytes;
1438 size_t _hum_prev_live_bytes;
1439 size_t _hum_next_live_bytes;
1441 static double perc(size_t val, size_t total) {
1442 if (total == 0) {
1443 return 0.0;
1444 } else {
1445 return 100.0 * ((double) val / (double) total);
1446 }
1447 }
1449 static double bytes_to_mb(size_t val) {
1450 return (double) val / (double) M;
1451 }
1453 // See the .cpp file.
1454 size_t get_hum_bytes(size_t* hum_bytes);
1455 void get_hum_bytes(size_t* used_bytes, size_t* capacity_bytes,
1456 size_t* prev_live_bytes, size_t* next_live_bytes);
1458 public:
1459 // The header and footer are printed in the constructor and
1460 // destructor respectively.
1461 G1PrintRegionLivenessInfoClosure(outputStream* out, const char* phase_name);
1462 virtual bool doHeapRegion(HeapRegion* r);
1463 ~G1PrintRegionLivenessInfoClosure();
1464 };
1466 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_HPP