Wed, 19 Jan 2011 19:30:42 -0500
6977804: G1: remove the zero-filling thread
Summary: This changeset removes the zero-filling thread from G1 and collapses the two free region lists we had before (the "free" and "unclean" lists) into one. The new free list uses the new heap region sets / lists abstractions that we'll ultimately use it to keep track of all regions in the heap. A heap region set was also introduced for the humongous regions. Finally, this change increases the concurrency between the thread that completes freeing regions (after a cleanup pause) and the rest of the system (before we'd have to wait for said thread to complete before allocating a new region). The changest also includes a lot of refactoring and code simplification.
Reviewed-by: jcoomes, johnc
1 /*
2 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_HPP
26 #define SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_HPP
28 #include "gc_implementation/g1/heapRegionSets.hpp"
29 #include "utilities/taskqueue.hpp"
31 class G1CollectedHeap;
32 class CMTask;
33 typedef GenericTaskQueue<oop> CMTaskQueue;
34 typedef GenericTaskQueueSet<CMTaskQueue> CMTaskQueueSet;
36 // Closure used by CM during concurrent reference discovery
37 // and reference processing (during remarking) to determine
38 // if a particular object is alive. It is primarily used
39 // to determine if referents of discovered reference objects
40 // are alive. An instance is also embedded into the
41 // reference processor as the _is_alive_non_header field
42 class G1CMIsAliveClosure: public BoolObjectClosure {
43 G1CollectedHeap* _g1;
44 public:
45 G1CMIsAliveClosure(G1CollectedHeap* g1) :
46 _g1(g1)
47 {}
49 void do_object(oop obj) {
50 ShouldNotCallThis();
51 }
52 bool do_object_b(oop obj);
53 };
55 // A generic CM bit map. This is essentially a wrapper around the BitMap
56 // class, with one bit per (1<<_shifter) HeapWords.
58 class CMBitMapRO VALUE_OBJ_CLASS_SPEC {
59 protected:
60 HeapWord* _bmStartWord; // base address of range covered by map
61 size_t _bmWordSize; // map size (in #HeapWords covered)
62 const int _shifter; // map to char or bit
63 VirtualSpace _virtual_space; // underlying the bit map
64 BitMap _bm; // the bit map itself
66 public:
67 // constructor
68 CMBitMapRO(ReservedSpace rs, int shifter);
70 enum { do_yield = true };
72 // inquiries
73 HeapWord* startWord() const { return _bmStartWord; }
74 size_t sizeInWords() const { return _bmWordSize; }
75 // the following is one past the last word in space
76 HeapWord* endWord() const { return _bmStartWord + _bmWordSize; }
78 // read marks
80 bool isMarked(HeapWord* addr) const {
81 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
82 "outside underlying space?");
83 return _bm.at(heapWordToOffset(addr));
84 }
86 // iteration
87 bool iterate(BitMapClosure* cl) { return _bm.iterate(cl); }
88 bool iterate(BitMapClosure* cl, MemRegion mr);
90 // Return the address corresponding to the next marked bit at or after
91 // "addr", and before "limit", if "limit" is non-NULL. If there is no
92 // such bit, returns "limit" if that is non-NULL, or else "endWord()".
93 HeapWord* getNextMarkedWordAddress(HeapWord* addr,
94 HeapWord* limit = NULL) const;
95 // Return the address corresponding to the next unmarked bit at or after
96 // "addr", and before "limit", if "limit" is non-NULL. If there is no
97 // such bit, returns "limit" if that is non-NULL, or else "endWord()".
98 HeapWord* getNextUnmarkedWordAddress(HeapWord* addr,
99 HeapWord* limit = NULL) const;
101 // conversion utilities
102 // XXX Fix these so that offsets are size_t's...
103 HeapWord* offsetToHeapWord(size_t offset) const {
104 return _bmStartWord + (offset << _shifter);
105 }
106 size_t heapWordToOffset(HeapWord* addr) const {
107 return pointer_delta(addr, _bmStartWord) >> _shifter;
108 }
109 int heapWordDiffToOffsetDiff(size_t diff) const;
110 HeapWord* nextWord(HeapWord* addr) {
111 return offsetToHeapWord(heapWordToOffset(addr) + 1);
112 }
114 void mostly_disjoint_range_union(BitMap* from_bitmap,
115 size_t from_start_index,
116 HeapWord* to_start_word,
117 size_t word_num);
119 // debugging
120 NOT_PRODUCT(bool covers(ReservedSpace rs) const;)
121 };
123 class CMBitMap : public CMBitMapRO {
125 public:
126 // constructor
127 CMBitMap(ReservedSpace rs, int shifter) :
128 CMBitMapRO(rs, shifter) {}
130 // write marks
131 void mark(HeapWord* addr) {
132 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
133 "outside underlying space?");
134 _bm.at_put(heapWordToOffset(addr), true);
135 }
136 void clear(HeapWord* addr) {
137 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
138 "outside underlying space?");
139 _bm.at_put(heapWordToOffset(addr), false);
140 }
141 bool parMark(HeapWord* addr) {
142 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
143 "outside underlying space?");
144 return _bm.par_at_put(heapWordToOffset(addr), true);
145 }
146 bool parClear(HeapWord* addr) {
147 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
148 "outside underlying space?");
149 return _bm.par_at_put(heapWordToOffset(addr), false);
150 }
151 void markRange(MemRegion mr);
152 void clearAll();
153 void clearRange(MemRegion mr);
155 // Starting at the bit corresponding to "addr" (inclusive), find the next
156 // "1" bit, if any. This bit starts some run of consecutive "1"'s; find
157 // the end of this run (stopping at "end_addr"). Return the MemRegion
158 // covering from the start of the region corresponding to the first bit
159 // of the run to the end of the region corresponding to the last bit of
160 // the run. If there is no "1" bit at or after "addr", return an empty
161 // MemRegion.
162 MemRegion getAndClearMarkedRegion(HeapWord* addr, HeapWord* end_addr);
163 };
165 // Represents a marking stack used by the CM collector.
166 // Ideally this should be GrowableArray<> just like MSC's marking stack(s).
167 class CMMarkStack VALUE_OBJ_CLASS_SPEC {
168 ConcurrentMark* _cm;
169 oop* _base; // bottom of stack
170 jint _index; // one more than last occupied index
171 jint _capacity; // max #elements
172 jint _oops_do_bound; // Number of elements to include in next iteration.
173 NOT_PRODUCT(jint _max_depth;) // max depth plumbed during run
175 bool _overflow;
176 DEBUG_ONLY(bool _drain_in_progress;)
177 DEBUG_ONLY(bool _drain_in_progress_yields;)
179 public:
180 CMMarkStack(ConcurrentMark* cm);
181 ~CMMarkStack();
183 void allocate(size_t size);
185 oop pop() {
186 if (!isEmpty()) {
187 return _base[--_index] ;
188 }
189 return NULL;
190 }
192 // If overflow happens, don't do the push, and record the overflow.
193 // *Requires* that "ptr" is already marked.
194 void push(oop ptr) {
195 if (isFull()) {
196 // Record overflow.
197 _overflow = true;
198 return;
199 } else {
200 _base[_index++] = ptr;
201 NOT_PRODUCT(_max_depth = MAX2(_max_depth, _index));
202 }
203 }
204 // Non-block impl. Note: concurrency is allowed only with other
205 // "par_push" operations, not with "pop" or "drain". We would need
206 // parallel versions of them if such concurrency was desired.
207 void par_push(oop ptr);
209 // Pushes the first "n" elements of "ptr_arr" on the stack.
210 // Non-block impl. Note: concurrency is allowed only with other
211 // "par_adjoin_arr" or "push" operations, not with "pop" or "drain".
212 void par_adjoin_arr(oop* ptr_arr, int n);
214 // Pushes the first "n" elements of "ptr_arr" on the stack.
215 // Locking impl: concurrency is allowed only with
216 // "par_push_arr" and/or "par_pop_arr" operations, which use the same
217 // locking strategy.
218 void par_push_arr(oop* ptr_arr, int n);
220 // If returns false, the array was empty. Otherwise, removes up to "max"
221 // elements from the stack, and transfers them to "ptr_arr" in an
222 // unspecified order. The actual number transferred is given in "n" ("n
223 // == 0" is deliberately redundant with the return value.) Locking impl:
224 // concurrency is allowed only with "par_push_arr" and/or "par_pop_arr"
225 // operations, which use the same locking strategy.
226 bool par_pop_arr(oop* ptr_arr, int max, int* n);
228 // Drain the mark stack, applying the given closure to all fields of
229 // objects on the stack. (That is, continue until the stack is empty,
230 // even if closure applications add entries to the stack.) The "bm"
231 // argument, if non-null, may be used to verify that only marked objects
232 // are on the mark stack. If "yield_after" is "true", then the
233 // concurrent marker performing the drain offers to yield after
234 // processing each object. If a yield occurs, stops the drain operation
235 // and returns false. Otherwise, returns true.
236 template<class OopClosureClass>
237 bool drain(OopClosureClass* cl, CMBitMap* bm, bool yield_after = false);
239 bool isEmpty() { return _index == 0; }
240 bool isFull() { return _index == _capacity; }
241 int maxElems() { return _capacity; }
243 bool overflow() { return _overflow; }
244 void clear_overflow() { _overflow = false; }
246 int size() { return _index; }
248 void setEmpty() { _index = 0; clear_overflow(); }
250 // Record the current size; a subsequent "oops_do" will iterate only over
251 // indices valid at the time of this call.
252 void set_oops_do_bound(jint bound = -1) {
253 if (bound == -1) {
254 _oops_do_bound = _index;
255 } else {
256 _oops_do_bound = bound;
257 }
258 }
259 jint oops_do_bound() { return _oops_do_bound; }
260 // iterate over the oops in the mark stack, up to the bound recorded via
261 // the call above.
262 void oops_do(OopClosure* f);
263 };
265 class CMRegionStack VALUE_OBJ_CLASS_SPEC {
266 MemRegion* _base;
267 jint _capacity;
268 jint _index;
269 jint _oops_do_bound;
270 bool _overflow;
271 public:
272 CMRegionStack();
273 ~CMRegionStack();
274 void allocate(size_t size);
276 // This is lock-free; assumes that it will only be called in parallel
277 // with other "push" operations (no pops).
278 void push_lock_free(MemRegion mr);
280 // Lock-free; assumes that it will only be called in parallel
281 // with other "pop" operations (no pushes).
282 MemRegion pop_lock_free();
284 #if 0
285 // The routines that manipulate the region stack with a lock are
286 // not currently used. They should be retained, however, as a
287 // diagnostic aid.
289 // These two are the implementations that use a lock. They can be
290 // called concurrently with each other but they should not be called
291 // concurrently with the lock-free versions (push() / pop()).
292 void push_with_lock(MemRegion mr);
293 MemRegion pop_with_lock();
294 #endif
296 bool isEmpty() { return _index == 0; }
297 bool isFull() { return _index == _capacity; }
299 bool overflow() { return _overflow; }
300 void clear_overflow() { _overflow = false; }
302 int size() { return _index; }
304 // It iterates over the entries in the region stack and it
305 // invalidates (i.e. assigns MemRegion()) the ones that point to
306 // regions in the collection set.
307 bool invalidate_entries_into_cset();
309 // This gives an upper bound up to which the iteration in
310 // invalidate_entries_into_cset() will reach. This prevents
311 // newly-added entries to be unnecessarily scanned.
312 void set_oops_do_bound() {
313 _oops_do_bound = _index;
314 }
316 void setEmpty() { _index = 0; clear_overflow(); }
317 };
319 // this will enable a variety of different statistics per GC task
320 #define _MARKING_STATS_ 0
321 // this will enable the higher verbose levels
322 #define _MARKING_VERBOSE_ 0
324 #if _MARKING_STATS_
325 #define statsOnly(statement) \
326 do { \
327 statement ; \
328 } while (0)
329 #else // _MARKING_STATS_
330 #define statsOnly(statement) \
331 do { \
332 } while (0)
333 #endif // _MARKING_STATS_
335 typedef enum {
336 no_verbose = 0, // verbose turned off
337 stats_verbose, // only prints stats at the end of marking
338 low_verbose, // low verbose, mostly per region and per major event
339 medium_verbose, // a bit more detailed than low
340 high_verbose // per object verbose
341 } CMVerboseLevel;
344 class ConcurrentMarkThread;
346 class ConcurrentMark: public CHeapObj {
347 friend class ConcurrentMarkThread;
348 friend class CMTask;
349 friend class CMBitMapClosure;
350 friend class CSMarkOopClosure;
351 friend class CMGlobalObjectClosure;
352 friend class CMRemarkTask;
353 friend class CMConcurrentMarkingTask;
354 friend class G1ParNoteEndTask;
355 friend class CalcLiveObjectsClosure;
357 protected:
358 ConcurrentMarkThread* _cmThread; // the thread doing the work
359 G1CollectedHeap* _g1h; // the heap.
360 size_t _parallel_marking_threads; // the number of marking
361 // threads we'll use
362 double _sleep_factor; // how much we have to sleep, with
363 // respect to the work we just did, to
364 // meet the marking overhead goal
365 double _marking_task_overhead; // marking target overhead for
366 // a single task
368 // same as the two above, but for the cleanup task
369 double _cleanup_sleep_factor;
370 double _cleanup_task_overhead;
372 FreeRegionList _cleanup_list;
374 // CMS marking support structures
375 CMBitMap _markBitMap1;
376 CMBitMap _markBitMap2;
377 CMBitMapRO* _prevMarkBitMap; // completed mark bitmap
378 CMBitMap* _nextMarkBitMap; // under-construction mark bitmap
379 bool _at_least_one_mark_complete;
381 BitMap _region_bm;
382 BitMap _card_bm;
384 // Heap bounds
385 HeapWord* _heap_start;
386 HeapWord* _heap_end;
388 // For gray objects
389 CMMarkStack _markStack; // Grey objects behind global finger.
390 CMRegionStack _regionStack; // Grey regions behind global finger.
391 HeapWord* volatile _finger; // the global finger, region aligned,
392 // always points to the end of the
393 // last claimed region
395 // marking tasks
396 size_t _max_task_num; // maximum task number
397 size_t _active_tasks; // task num currently active
398 CMTask** _tasks; // task queue array (max_task_num len)
399 CMTaskQueueSet* _task_queues; // task queue set
400 ParallelTaskTerminator _terminator; // for termination
402 // Two sync barriers that are used to synchronise tasks when an
403 // overflow occurs. The algorithm is the following. All tasks enter
404 // the first one to ensure that they have all stopped manipulating
405 // the global data structures. After they exit it, they re-initialise
406 // their data structures and task 0 re-initialises the global data
407 // structures. Then, they enter the second sync barrier. This
408 // ensure, that no task starts doing work before all data
409 // structures (local and global) have been re-initialised. When they
410 // exit it, they are free to start working again.
411 WorkGangBarrierSync _first_overflow_barrier_sync;
412 WorkGangBarrierSync _second_overflow_barrier_sync;
415 // this is set by any task, when an overflow on the global data
416 // structures is detected.
417 volatile bool _has_overflown;
418 // true: marking is concurrent, false: we're in remark
419 volatile bool _concurrent;
420 // set at the end of a Full GC so that marking aborts
421 volatile bool _has_aborted;
423 // used when remark aborts due to an overflow to indicate that
424 // another concurrent marking phase should start
425 volatile bool _restart_for_overflow;
427 // This is true from the very start of concurrent marking until the
428 // point when all the tasks complete their work. It is really used
429 // to determine the points between the end of concurrent marking and
430 // time of remark.
431 volatile bool _concurrent_marking_in_progress;
433 // verbose level
434 CMVerboseLevel _verbose_level;
436 // These two fields are used to implement the optimisation that
437 // avoids pushing objects on the global/region stack if there are
438 // no collection set regions above the lowest finger.
440 // This is the lowest finger (among the global and local fingers),
441 // which is calculated before a new collection set is chosen.
442 HeapWord* _min_finger;
443 // If this flag is true, objects/regions that are marked below the
444 // finger should be pushed on the stack(s). If this is flag is
445 // false, it is safe not to push them on the stack(s).
446 bool _should_gray_objects;
448 // All of these times are in ms.
449 NumberSeq _init_times;
450 NumberSeq _remark_times;
451 NumberSeq _remark_mark_times;
452 NumberSeq _remark_weak_ref_times;
453 NumberSeq _cleanup_times;
454 double _total_counting_time;
455 double _total_rs_scrub_time;
457 double* _accum_task_vtime; // accumulated task vtime
459 WorkGang* _parallel_workers;
461 void weakRefsWork(bool clear_all_soft_refs);
463 void swapMarkBitMaps();
465 // It resets the global marking data structures, as well as the
466 // task local ones; should be called during initial mark.
467 void reset();
468 // It resets all the marking data structures.
469 void clear_marking_state();
471 // It should be called to indicate which phase we're in (concurrent
472 // mark or remark) and how many threads are currently active.
473 void set_phase(size_t active_tasks, bool concurrent);
474 // We do this after we're done with marking so that the marking data
475 // structures are initialised to a sensible and predictable state.
476 void set_non_marking_state();
478 // prints all gathered CM-related statistics
479 void print_stats();
481 bool cleanup_list_is_empty() {
482 return _cleanup_list.is_empty();
483 }
485 // accessor methods
486 size_t parallel_marking_threads() { return _parallel_marking_threads; }
487 double sleep_factor() { return _sleep_factor; }
488 double marking_task_overhead() { return _marking_task_overhead;}
489 double cleanup_sleep_factor() { return _cleanup_sleep_factor; }
490 double cleanup_task_overhead() { return _cleanup_task_overhead;}
492 HeapWord* finger() { return _finger; }
493 bool concurrent() { return _concurrent; }
494 size_t active_tasks() { return _active_tasks; }
495 ParallelTaskTerminator* terminator() { return &_terminator; }
497 // It claims the next available region to be scanned by a marking
498 // task. It might return NULL if the next region is empty or we have
499 // run out of regions. In the latter case, out_of_regions()
500 // determines whether we've really run out of regions or the task
501 // should call claim_region() again. This might seem a bit
502 // awkward. Originally, the code was written so that claim_region()
503 // either successfully returned with a non-empty region or there
504 // were no more regions to be claimed. The problem with this was
505 // that, in certain circumstances, it iterated over large chunks of
506 // the heap finding only empty regions and, while it was working, it
507 // was preventing the calling task to call its regular clock
508 // method. So, this way, each task will spend very little time in
509 // claim_region() and is allowed to call the regular clock method
510 // frequently.
511 HeapRegion* claim_region(int task);
513 // It determines whether we've run out of regions to scan.
514 bool out_of_regions() { return _finger == _heap_end; }
516 // Returns the task with the given id
517 CMTask* task(int id) {
518 assert(0 <= id && id < (int) _active_tasks,
519 "task id not within active bounds");
520 return _tasks[id];
521 }
523 // Returns the task queue with the given id
524 CMTaskQueue* task_queue(int id) {
525 assert(0 <= id && id < (int) _active_tasks,
526 "task queue id not within active bounds");
527 return (CMTaskQueue*) _task_queues->queue(id);
528 }
530 // Returns the task queue set
531 CMTaskQueueSet* task_queues() { return _task_queues; }
533 // Access / manipulation of the overflow flag which is set to
534 // indicate that the global stack or region stack has overflown
535 bool has_overflown() { return _has_overflown; }
536 void set_has_overflown() { _has_overflown = true; }
537 void clear_has_overflown() { _has_overflown = false; }
539 bool has_aborted() { return _has_aborted; }
540 bool restart_for_overflow() { return _restart_for_overflow; }
542 // Methods to enter the two overflow sync barriers
543 void enter_first_sync_barrier(int task_num);
544 void enter_second_sync_barrier(int task_num);
546 public:
547 // Manipulation of the global mark stack.
548 // Notice that the first mark_stack_push is CAS-based, whereas the
549 // two below are Mutex-based. This is OK since the first one is only
550 // called during evacuation pauses and doesn't compete with the
551 // other two (which are called by the marking tasks during
552 // concurrent marking or remark).
553 bool mark_stack_push(oop p) {
554 _markStack.par_push(p);
555 if (_markStack.overflow()) {
556 set_has_overflown();
557 return false;
558 }
559 return true;
560 }
561 bool mark_stack_push(oop* arr, int n) {
562 _markStack.par_push_arr(arr, n);
563 if (_markStack.overflow()) {
564 set_has_overflown();
565 return false;
566 }
567 return true;
568 }
569 void mark_stack_pop(oop* arr, int max, int* n) {
570 _markStack.par_pop_arr(arr, max, n);
571 }
572 size_t mark_stack_size() { return _markStack.size(); }
573 size_t partial_mark_stack_size_target() { return _markStack.maxElems()/3; }
574 bool mark_stack_overflow() { return _markStack.overflow(); }
575 bool mark_stack_empty() { return _markStack.isEmpty(); }
577 // (Lock-free) Manipulation of the region stack
578 bool region_stack_push_lock_free(MemRegion mr) {
579 // Currently we only call the lock-free version during evacuation
580 // pauses.
581 assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped");
583 _regionStack.push_lock_free(mr);
584 if (_regionStack.overflow()) {
585 set_has_overflown();
586 return false;
587 }
588 return true;
589 }
591 // Lock-free version of region-stack pop. Should only be
592 // called in tandem with other lock-free pops.
593 MemRegion region_stack_pop_lock_free() {
594 return _regionStack.pop_lock_free();
595 }
597 #if 0
598 // The routines that manipulate the region stack with a lock are
599 // not currently used. They should be retained, however, as a
600 // diagnostic aid.
602 bool region_stack_push_with_lock(MemRegion mr) {
603 // Currently we only call the lock-based version during either
604 // concurrent marking or remark.
605 assert(!SafepointSynchronize::is_at_safepoint() || !concurrent(),
606 "if we are at a safepoint it should be the remark safepoint");
608 _regionStack.push_with_lock(mr);
609 if (_regionStack.overflow()) {
610 set_has_overflown();
611 return false;
612 }
613 return true;
614 }
616 MemRegion region_stack_pop_with_lock() {
617 // Currently we only call the lock-based version during either
618 // concurrent marking or remark.
619 assert(!SafepointSynchronize::is_at_safepoint() || !concurrent(),
620 "if we are at a safepoint it should be the remark safepoint");
622 return _regionStack.pop_with_lock();
623 }
624 #endif
626 int region_stack_size() { return _regionStack.size(); }
627 bool region_stack_overflow() { return _regionStack.overflow(); }
628 bool region_stack_empty() { return _regionStack.isEmpty(); }
630 // Iterate over any regions that were aborted while draining the
631 // region stack (any such regions are saved in the corresponding
632 // CMTask) and invalidate (i.e. assign to the empty MemRegion())
633 // any regions that point into the collection set.
634 bool invalidate_aborted_regions_in_cset();
636 // Returns true if there are any aborted memory regions.
637 bool has_aborted_regions();
639 bool concurrent_marking_in_progress() {
640 return _concurrent_marking_in_progress;
641 }
642 void set_concurrent_marking_in_progress() {
643 _concurrent_marking_in_progress = true;
644 }
645 void clear_concurrent_marking_in_progress() {
646 _concurrent_marking_in_progress = false;
647 }
649 void update_accum_task_vtime(int i, double vtime) {
650 _accum_task_vtime[i] += vtime;
651 }
653 double all_task_accum_vtime() {
654 double ret = 0.0;
655 for (int i = 0; i < (int)_max_task_num; ++i)
656 ret += _accum_task_vtime[i];
657 return ret;
658 }
660 // Attempts to steal an object from the task queues of other tasks
661 bool try_stealing(int task_num, int* hash_seed, oop& obj) {
662 return _task_queues->steal(task_num, hash_seed, obj);
663 }
665 // It grays an object by first marking it. Then, if it's behind the
666 // global finger, it also pushes it on the global stack.
667 void deal_with_reference(oop obj);
669 ConcurrentMark(ReservedSpace rs, int max_regions);
670 ~ConcurrentMark();
671 ConcurrentMarkThread* cmThread() { return _cmThread; }
673 CMBitMapRO* prevMarkBitMap() const { return _prevMarkBitMap; }
674 CMBitMap* nextMarkBitMap() const { return _nextMarkBitMap; }
676 // The following three are interaction between CM and
677 // G1CollectedHeap
679 // This notifies CM that a root during initial-mark needs to be
680 // grayed and it's MT-safe. Currently, we just mark it. But, in the
681 // future, we can experiment with pushing it on the stack and we can
682 // do this without changing G1CollectedHeap.
683 void grayRoot(oop p);
684 // It's used during evacuation pauses to gray a region, if
685 // necessary, and it's MT-safe. It assumes that the caller has
686 // marked any objects on that region. If _should_gray_objects is
687 // true and we're still doing concurrent marking, the region is
688 // pushed on the region stack, if it is located below the global
689 // finger, otherwise we do nothing.
690 void grayRegionIfNecessary(MemRegion mr);
691 // It's used during evacuation pauses to mark and, if necessary,
692 // gray a single object and it's MT-safe. It assumes the caller did
693 // not mark the object. If _should_gray_objects is true and we're
694 // still doing concurrent marking, the objects is pushed on the
695 // global stack, if it is located below the global finger, otherwise
696 // we do nothing.
697 void markAndGrayObjectIfNecessary(oop p);
699 // It iterates over the heap and for each object it comes across it
700 // will dump the contents of its reference fields, as well as
701 // liveness information for the object and its referents. The dump
702 // will be written to a file with the following name:
703 // G1PrintReachableBaseFile + "." + str. use_prev_marking decides
704 // whether the prev (use_prev_marking == true) or next
705 // (use_prev_marking == false) marking information will be used to
706 // determine the liveness of each object / referent. If all is true,
707 // all objects in the heap will be dumped, otherwise only the live
708 // ones. In the dump the following symbols / abbreviations are used:
709 // M : an explicitly live object (its bitmap bit is set)
710 // > : an implicitly live object (over tams)
711 // O : an object outside the G1 heap (typically: in the perm gen)
712 // NOT : a reference field whose referent is not live
713 // AND MARKED : indicates that an object is both explicitly and
714 // implicitly live (it should be one or the other, not both)
715 void print_reachable(const char* str,
716 bool use_prev_marking, bool all) PRODUCT_RETURN;
718 // Clear the next marking bitmap (will be called concurrently).
719 void clearNextBitmap();
721 // main CMS steps and related support
722 void checkpointRootsInitial();
724 // These two do the work that needs to be done before and after the
725 // initial root checkpoint. Since this checkpoint can be done at two
726 // different points (i.e. an explicit pause or piggy-backed on a
727 // young collection), then it's nice to be able to easily share the
728 // pre/post code. It might be the case that we can put everything in
729 // the post method. TP
730 void checkpointRootsInitialPre();
731 void checkpointRootsInitialPost();
733 // Do concurrent phase of marking, to a tentative transitive closure.
734 void markFromRoots();
736 // Process all unprocessed SATB buffers. It is called at the
737 // beginning of an evacuation pause.
738 void drainAllSATBBuffers();
740 void checkpointRootsFinal(bool clear_all_soft_refs);
741 void checkpointRootsFinalWork();
742 void calcDesiredRegions();
743 void cleanup();
744 void completeCleanup();
746 // Mark in the previous bitmap. NB: this is usually read-only, so use
747 // this carefully!
748 void markPrev(oop p);
749 void clear(oop p);
750 // Clears marks for all objects in the given range, for both prev and
751 // next bitmaps. NB: the previous bitmap is usually read-only, so use
752 // this carefully!
753 void clearRangeBothMaps(MemRegion mr);
755 // Record the current top of the mark and region stacks; a
756 // subsequent oops_do() on the mark stack and
757 // invalidate_entries_into_cset() on the region stack will iterate
758 // only over indices valid at the time of this call.
759 void set_oops_do_bound() {
760 _markStack.set_oops_do_bound();
761 _regionStack.set_oops_do_bound();
762 }
763 // Iterate over the oops in the mark stack and all local queues. It
764 // also calls invalidate_entries_into_cset() on the region stack.
765 void oops_do(OopClosure* f);
766 // It is called at the end of an evacuation pause during marking so
767 // that CM is notified of where the new end of the heap is. It
768 // doesn't do anything if concurrent_marking_in_progress() is false,
769 // unless the force parameter is true.
770 void update_g1_committed(bool force = false);
772 void complete_marking_in_collection_set();
774 // It indicates that a new collection set is being chosen.
775 void newCSet();
776 // It registers a collection set heap region with CM. This is used
777 // to determine whether any heap regions are located above the finger.
778 void registerCSetRegion(HeapRegion* hr);
780 // Registers the maximum region-end associated with a set of
781 // regions with CM. Again this is used to determine whether any
782 // heap regions are located above the finger.
783 void register_collection_set_finger(HeapWord* max_finger) {
784 // max_finger is the highest heap region end of the regions currently
785 // contained in the collection set. If this value is larger than
786 // _min_finger then we need to gray objects.
787 // This routine is like registerCSetRegion but for an entire
788 // collection of regions.
789 if (max_finger > _min_finger)
790 _should_gray_objects = true;
791 }
793 // Returns "true" if at least one mark has been completed.
794 bool at_least_one_mark_complete() { return _at_least_one_mark_complete; }
796 bool isMarked(oop p) const {
797 assert(p != NULL && p->is_oop(), "expected an oop");
798 HeapWord* addr = (HeapWord*)p;
799 assert(addr >= _nextMarkBitMap->startWord() ||
800 addr < _nextMarkBitMap->endWord(), "in a region");
802 return _nextMarkBitMap->isMarked(addr);
803 }
805 inline bool not_yet_marked(oop p) const;
807 // XXX Debug code
808 bool containing_card_is_marked(void* p);
809 bool containing_cards_are_marked(void* start, void* last);
811 bool isPrevMarked(oop p) const {
812 assert(p != NULL && p->is_oop(), "expected an oop");
813 HeapWord* addr = (HeapWord*)p;
814 assert(addr >= _prevMarkBitMap->startWord() ||
815 addr < _prevMarkBitMap->endWord(), "in a region");
817 return _prevMarkBitMap->isMarked(addr);
818 }
820 inline bool do_yield_check(int worker_i = 0);
821 inline bool should_yield();
823 // Called to abort the marking cycle after a Full GC takes palce.
824 void abort();
826 // This prints the global/local fingers. It is used for debugging.
827 NOT_PRODUCT(void print_finger();)
829 void print_summary_info();
831 void print_worker_threads_on(outputStream* st) const;
833 // The following indicate whether a given verbose level has been
834 // set. Notice that anything above stats is conditional to
835 // _MARKING_VERBOSE_ having been set to 1
836 bool verbose_stats()
837 { return _verbose_level >= stats_verbose; }
838 bool verbose_low()
839 { return _MARKING_VERBOSE_ && _verbose_level >= low_verbose; }
840 bool verbose_medium()
841 { return _MARKING_VERBOSE_ && _verbose_level >= medium_verbose; }
842 bool verbose_high()
843 { return _MARKING_VERBOSE_ && _verbose_level >= high_verbose; }
844 };
846 // A class representing a marking task.
847 class CMTask : public TerminatorTerminator {
848 private:
849 enum PrivateConstants {
850 // the regular clock call is called once the scanned words reaches
851 // this limit
852 words_scanned_period = 12*1024,
853 // the regular clock call is called once the number of visited
854 // references reaches this limit
855 refs_reached_period = 384,
856 // initial value for the hash seed, used in the work stealing code
857 init_hash_seed = 17,
858 // how many entries will be transferred between global stack and
859 // local queues
860 global_stack_transfer_size = 16
861 };
863 int _task_id;
864 G1CollectedHeap* _g1h;
865 ConcurrentMark* _cm;
866 CMBitMap* _nextMarkBitMap;
867 // the task queue of this task
868 CMTaskQueue* _task_queue;
869 private:
870 // the task queue set---needed for stealing
871 CMTaskQueueSet* _task_queues;
872 // indicates whether the task has been claimed---this is only for
873 // debugging purposes
874 bool _claimed;
876 // number of calls to this task
877 int _calls;
879 // when the virtual timer reaches this time, the marking step should
880 // exit
881 double _time_target_ms;
882 // the start time of the current marking step
883 double _start_time_ms;
885 // the oop closure used for iterations over oops
886 OopClosure* _oop_closure;
888 // the region this task is scanning, NULL if we're not scanning any
889 HeapRegion* _curr_region;
890 // the local finger of this task, NULL if we're not scanning a region
891 HeapWord* _finger;
892 // limit of the region this task is scanning, NULL if we're not scanning one
893 HeapWord* _region_limit;
895 // This is used only when we scan regions popped from the region
896 // stack. It records what the last object on such a region we
897 // scanned was. It is used to ensure that, if we abort region
898 // iteration, we do not rescan the first part of the region. This
899 // should be NULL when we're not scanning a region from the region
900 // stack.
901 HeapWord* _region_finger;
903 // If we abort while scanning a region we record the remaining
904 // unscanned portion and check this field when marking restarts.
905 // This avoids having to push on the region stack while other
906 // marking threads may still be popping regions.
907 // If we were to push the unscanned portion directly to the
908 // region stack then we would need to using locking versions
909 // of the push and pop operations.
910 MemRegion _aborted_region;
912 // the number of words this task has scanned
913 size_t _words_scanned;
914 // When _words_scanned reaches this limit, the regular clock is
915 // called. Notice that this might be decreased under certain
916 // circumstances (i.e. when we believe that we did an expensive
917 // operation).
918 size_t _words_scanned_limit;
919 // the initial value of _words_scanned_limit (i.e. what it was
920 // before it was decreased).
921 size_t _real_words_scanned_limit;
923 // the number of references this task has visited
924 size_t _refs_reached;
925 // When _refs_reached reaches this limit, the regular clock is
926 // called. Notice this this might be decreased under certain
927 // circumstances (i.e. when we believe that we did an expensive
928 // operation).
929 size_t _refs_reached_limit;
930 // the initial value of _refs_reached_limit (i.e. what it was before
931 // it was decreased).
932 size_t _real_refs_reached_limit;
934 // used by the work stealing stuff
935 int _hash_seed;
936 // if this is true, then the task has aborted for some reason
937 bool _has_aborted;
938 // set when the task aborts because it has met its time quota
939 bool _has_aborted_timed_out;
940 // true when we're draining SATB buffers; this avoids the task
941 // aborting due to SATB buffers being available (as we're already
942 // dealing with them)
943 bool _draining_satb_buffers;
945 // number sequence of past step times
946 NumberSeq _step_times_ms;
947 // elapsed time of this task
948 double _elapsed_time_ms;
949 // termination time of this task
950 double _termination_time_ms;
951 // when this task got into the termination protocol
952 double _termination_start_time_ms;
954 // true when the task is during a concurrent phase, false when it is
955 // in the remark phase (so, in the latter case, we do not have to
956 // check all the things that we have to check during the concurrent
957 // phase, i.e. SATB buffer availability...)
958 bool _concurrent;
960 TruncatedSeq _marking_step_diffs_ms;
962 // LOTS of statistics related with this task
963 #if _MARKING_STATS_
964 NumberSeq _all_clock_intervals_ms;
965 double _interval_start_time_ms;
967 int _aborted;
968 int _aborted_overflow;
969 int _aborted_cm_aborted;
970 int _aborted_yield;
971 int _aborted_timed_out;
972 int _aborted_satb;
973 int _aborted_termination;
975 int _steal_attempts;
976 int _steals;
978 int _clock_due_to_marking;
979 int _clock_due_to_scanning;
981 int _local_pushes;
982 int _local_pops;
983 int _local_max_size;
984 int _objs_scanned;
986 int _global_pushes;
987 int _global_pops;
988 int _global_max_size;
990 int _global_transfers_to;
991 int _global_transfers_from;
993 int _region_stack_pops;
995 int _regions_claimed;
996 int _objs_found_on_bitmap;
998 int _satb_buffers_processed;
999 #endif // _MARKING_STATS_
1001 // it updates the local fields after this task has claimed
1002 // a new region to scan
1003 void setup_for_region(HeapRegion* hr);
1004 // it brings up-to-date the limit of the region
1005 void update_region_limit();
1006 // it resets the local fields after a task has finished scanning a
1007 // region
1008 void giveup_current_region();
1010 // called when either the words scanned or the refs visited limit
1011 // has been reached
1012 void reached_limit();
1013 // recalculates the words scanned and refs visited limits
1014 void recalculate_limits();
1015 // decreases the words scanned and refs visited limits when we reach
1016 // an expensive operation
1017 void decrease_limits();
1018 // it checks whether the words scanned or refs visited reached their
1019 // respective limit and calls reached_limit() if they have
1020 void check_limits() {
1021 if (_words_scanned >= _words_scanned_limit ||
1022 _refs_reached >= _refs_reached_limit)
1023 reached_limit();
1024 }
1025 // this is supposed to be called regularly during a marking step as
1026 // it checks a bunch of conditions that might cause the marking step
1027 // to abort
1028 void regular_clock_call();
1029 bool concurrent() { return _concurrent; }
1031 public:
1032 // It resets the task; it should be called right at the beginning of
1033 // a marking phase.
1034 void reset(CMBitMap* _nextMarkBitMap);
1035 // it clears all the fields that correspond to a claimed region.
1036 void clear_region_fields();
1038 void set_concurrent(bool concurrent) { _concurrent = concurrent; }
1040 // The main method of this class which performs a marking step
1041 // trying not to exceed the given duration. However, it might exit
1042 // prematurely, according to some conditions (i.e. SATB buffers are
1043 // available for processing).
1044 void do_marking_step(double target_ms);
1046 // These two calls start and stop the timer
1047 void record_start_time() {
1048 _elapsed_time_ms = os::elapsedTime() * 1000.0;
1049 }
1050 void record_end_time() {
1051 _elapsed_time_ms = os::elapsedTime() * 1000.0 - _elapsed_time_ms;
1052 }
1054 // returns the task ID
1055 int task_id() { return _task_id; }
1057 // From TerminatorTerminator. It determines whether this task should
1058 // exit the termination protocol after it's entered it.
1059 virtual bool should_exit_termination();
1061 HeapWord* finger() { return _finger; }
1063 bool has_aborted() { return _has_aborted; }
1064 void set_has_aborted() { _has_aborted = true; }
1065 void clear_has_aborted() { _has_aborted = false; }
1066 bool claimed() { return _claimed; }
1068 // Support routines for the partially scanned region that may be
1069 // recorded as a result of aborting while draining the CMRegionStack
1070 MemRegion aborted_region() { return _aborted_region; }
1071 void set_aborted_region(MemRegion mr)
1072 { _aborted_region = mr; }
1074 // Clears any recorded partially scanned region
1075 void clear_aborted_region() { set_aborted_region(MemRegion()); }
1077 void set_oop_closure(OopClosure* oop_closure) {
1078 _oop_closure = oop_closure;
1079 }
1081 // It grays the object by marking it and, if necessary, pushing it
1082 // on the local queue
1083 void deal_with_reference(oop obj);
1085 // It scans an object and visits its children.
1086 void scan_object(oop obj) {
1087 assert(_nextMarkBitMap->isMarked((HeapWord*) obj), "invariant");
1089 if (_cm->verbose_high())
1090 gclog_or_tty->print_cr("[%d] we're scanning object "PTR_FORMAT,
1091 _task_id, (void*) obj);
1093 size_t obj_size = obj->size();
1094 _words_scanned += obj_size;
1096 obj->oop_iterate(_oop_closure);
1097 statsOnly( ++_objs_scanned );
1098 check_limits();
1099 }
1101 // It pushes an object on the local queue.
1102 void push(oop obj);
1104 // These two move entries to/from the global stack.
1105 void move_entries_to_global_stack();
1106 void get_entries_from_global_stack();
1108 // It pops and scans objects from the local queue. If partially is
1109 // true, then it stops when the queue size is of a given limit. If
1110 // partially is false, then it stops when the queue is empty.
1111 void drain_local_queue(bool partially);
1112 // It moves entries from the global stack to the local queue and
1113 // drains the local queue. If partially is true, then it stops when
1114 // both the global stack and the local queue reach a given size. If
1115 // partially if false, it tries to empty them totally.
1116 void drain_global_stack(bool partially);
1117 // It keeps picking SATB buffers and processing them until no SATB
1118 // buffers are available.
1119 void drain_satb_buffers();
1120 // It keeps popping regions from the region stack and processing
1121 // them until the region stack is empty.
1122 void drain_region_stack(BitMapClosure* closure);
1124 // moves the local finger to a new location
1125 inline void move_finger_to(HeapWord* new_finger) {
1126 assert(new_finger >= _finger && new_finger < _region_limit, "invariant");
1127 _finger = new_finger;
1128 }
1130 // moves the region finger to a new location
1131 inline void move_region_finger_to(HeapWord* new_finger) {
1132 assert(new_finger < _cm->finger(), "invariant");
1133 _region_finger = new_finger;
1134 }
1136 CMTask(int task_num, ConcurrentMark *cm,
1137 CMTaskQueue* task_queue, CMTaskQueueSet* task_queues);
1139 // it prints statistics associated with this task
1140 void print_stats();
1142 #if _MARKING_STATS_
1143 void increase_objs_found_on_bitmap() { ++_objs_found_on_bitmap; }
1144 #endif // _MARKING_STATS_
1145 };
1147 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_HPP