Thu, 22 Sep 2011 10:57:37 -0700
6484982: G1: process references during evacuation pauses
Summary: G1 now uses two reference processors - one is used by concurrent marking and the other is used by STW GCs (both full and incremental evacuation pauses). In an evacuation pause, the reference processor is embedded into the closures used to scan objects. Doing so causes causes reference objects to be 'discovered' by the reference processor. At the end of the evacuation pause, these discovered reference objects are processed - preserving (and copying) referent objects (and their reachable graphs) as appropriate.
Reviewed-by: ysr, jwilhelm, brutisso, stefank, tonyp
1 /*
2 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_HPP
26 #define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_HPP
28 #include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp"
29 #include "gc_implementation/g1/g1_specialized_oop_closures.hpp"
30 #include "gc_implementation/g1/survRateGroup.hpp"
31 #include "gc_implementation/shared/ageTable.hpp"
32 #include "gc_implementation/shared/spaceDecorator.hpp"
33 #include "memory/space.inline.hpp"
34 #include "memory/watermark.hpp"
36 #ifndef SERIALGC
38 // A HeapRegion is the smallest piece of a G1CollectedHeap that
39 // can be collected independently.
41 // NOTE: Although a HeapRegion is a Space, its
42 // Space::initDirtyCardClosure method must not be called.
43 // The problem is that the existence of this method breaks
44 // the independence of barrier sets from remembered sets.
45 // The solution is to remove this method from the definition
46 // of a Space.
48 class CompactibleSpace;
49 class ContiguousSpace;
50 class HeapRegionRemSet;
51 class HeapRegionRemSetIterator;
52 class HeapRegion;
53 class HeapRegionSetBase;
55 #define HR_FORMAT SIZE_FORMAT":(%s)["PTR_FORMAT","PTR_FORMAT","PTR_FORMAT"]"
56 #define HR_FORMAT_PARAMS(_hr_) \
57 (_hr_)->hrs_index(), \
58 (_hr_)->is_survivor() ? "S" : (_hr_)->is_young() ? "E" : "-", \
59 (_hr_)->bottom(), (_hr_)->top(), (_hr_)->end()
61 // A dirty card to oop closure for heap regions. It
62 // knows how to get the G1 heap and how to use the bitmap
63 // in the concurrent marker used by G1 to filter remembered
64 // sets.
66 class HeapRegionDCTOC : public ContiguousSpaceDCTOC {
67 public:
68 // Specification of possible DirtyCardToOopClosure filtering.
69 enum FilterKind {
70 NoFilterKind,
71 IntoCSFilterKind,
72 OutOfRegionFilterKind
73 };
75 protected:
76 HeapRegion* _hr;
77 FilterKind _fk;
78 G1CollectedHeap* _g1;
80 void walk_mem_region_with_cl(MemRegion mr,
81 HeapWord* bottom, HeapWord* top,
82 OopClosure* cl);
84 // We don't specialize this for FilteringClosure; filtering is handled by
85 // the "FilterKind" mechanism. But we provide this to avoid a compiler
86 // warning.
87 void walk_mem_region_with_cl(MemRegion mr,
88 HeapWord* bottom, HeapWord* top,
89 FilteringClosure* cl) {
90 HeapRegionDCTOC::walk_mem_region_with_cl(mr, bottom, top,
91 (OopClosure*)cl);
92 }
94 // Get the actual top of the area on which the closure will
95 // operate, given where the top is assumed to be (the end of the
96 // memory region passed to do_MemRegion) and where the object
97 // at the top is assumed to start. For example, an object may
98 // start at the top but actually extend past the assumed top,
99 // in which case the top becomes the end of the object.
100 HeapWord* get_actual_top(HeapWord* top, HeapWord* top_obj) {
101 return ContiguousSpaceDCTOC::get_actual_top(top, top_obj);
102 }
104 // Walk the given memory region from bottom to (actual) top
105 // looking for objects and applying the oop closure (_cl) to
106 // them. The base implementation of this treats the area as
107 // blocks, where a block may or may not be an object. Sub-
108 // classes should override this to provide more accurate
109 // or possibly more efficient walking.
110 void walk_mem_region(MemRegion mr, HeapWord* bottom, HeapWord* top) {
111 Filtering_DCTOC::walk_mem_region(mr, bottom, top);
112 }
114 public:
115 HeapRegionDCTOC(G1CollectedHeap* g1,
116 HeapRegion* hr, OopClosure* cl,
117 CardTableModRefBS::PrecisionStyle precision,
118 FilterKind fk);
119 };
121 // The complicating factor is that BlockOffsetTable diverged
122 // significantly, and we need functionality that is only in the G1 version.
123 // So I copied that code, which led to an alternate G1 version of
124 // OffsetTableContigSpace. If the two versions of BlockOffsetTable could
125 // be reconciled, then G1OffsetTableContigSpace could go away.
127 // The idea behind time stamps is the following. Doing a save_marks on
128 // all regions at every GC pause is time consuming (if I remember
129 // well, 10ms or so). So, we would like to do that only for regions
130 // that are GC alloc regions. To achieve this, we use time
131 // stamps. For every evacuation pause, G1CollectedHeap generates a
132 // unique time stamp (essentially a counter that gets
133 // incremented). Every time we want to call save_marks on a region,
134 // we set the saved_mark_word to top and also copy the current GC
135 // time stamp to the time stamp field of the space. Reading the
136 // saved_mark_word involves checking the time stamp of the
137 // region. If it is the same as the current GC time stamp, then we
138 // can safely read the saved_mark_word field, as it is valid. If the
139 // time stamp of the region is not the same as the current GC time
140 // stamp, then we instead read top, as the saved_mark_word field is
141 // invalid. Time stamps (on the regions and also on the
142 // G1CollectedHeap) are reset at every cleanup (we iterate over
143 // the regions anyway) and at the end of a Full GC. The current scheme
144 // that uses sequential unsigned ints will fail only if we have 4b
145 // evacuation pauses between two cleanups, which is _highly_ unlikely.
147 class G1OffsetTableContigSpace: public ContiguousSpace {
148 friend class VMStructs;
149 protected:
150 G1BlockOffsetArrayContigSpace _offsets;
151 Mutex _par_alloc_lock;
152 volatile unsigned _gc_time_stamp;
153 // When we need to retire an allocation region, while other threads
154 // are also concurrently trying to allocate into it, we typically
155 // allocate a dummy object at the end of the region to ensure that
156 // no more allocations can take place in it. However, sometimes we
157 // want to know where the end of the last "real" object we allocated
158 // into the region was and this is what this keeps track.
159 HeapWord* _pre_dummy_top;
161 public:
162 // Constructor. If "is_zeroed" is true, the MemRegion "mr" may be
163 // assumed to contain zeros.
164 G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray,
165 MemRegion mr, bool is_zeroed = false);
167 void set_bottom(HeapWord* value);
168 void set_end(HeapWord* value);
170 virtual HeapWord* saved_mark_word() const;
171 virtual void set_saved_mark();
172 void reset_gc_time_stamp() { _gc_time_stamp = 0; }
174 // See the comment above in the declaration of _pre_dummy_top for an
175 // explanation of what it is.
176 void set_pre_dummy_top(HeapWord* pre_dummy_top) {
177 assert(is_in(pre_dummy_top) && pre_dummy_top <= top(), "pre-condition");
178 _pre_dummy_top = pre_dummy_top;
179 }
180 HeapWord* pre_dummy_top() {
181 return (_pre_dummy_top == NULL) ? top() : _pre_dummy_top;
182 }
183 void reset_pre_dummy_top() { _pre_dummy_top = NULL; }
185 virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space);
186 virtual void clear(bool mangle_space);
188 HeapWord* block_start(const void* p);
189 HeapWord* block_start_const(const void* p) const;
191 // Add offset table update.
192 virtual HeapWord* allocate(size_t word_size);
193 HeapWord* par_allocate(size_t word_size);
195 // MarkSweep support phase3
196 virtual HeapWord* initialize_threshold();
197 virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* end);
199 virtual void print() const;
201 void reset_bot() {
202 _offsets.zero_bottom_entry();
203 _offsets.initialize_threshold();
204 }
206 void update_bot_for_object(HeapWord* start, size_t word_size) {
207 _offsets.alloc_block(start, word_size);
208 }
210 void print_bot_on(outputStream* out) {
211 _offsets.print_on(out);
212 }
213 };
215 class HeapRegion: public G1OffsetTableContigSpace {
216 friend class VMStructs;
217 private:
219 enum HumongousType {
220 NotHumongous = 0,
221 StartsHumongous,
222 ContinuesHumongous
223 };
225 // Requires that the region "mr" be dense with objects, and begin and end
226 // with an object.
227 void oops_in_mr_iterate(MemRegion mr, OopClosure* cl);
229 // The remembered set for this region.
230 // (Might want to make this "inline" later, to avoid some alloc failure
231 // issues.)
232 HeapRegionRemSet* _rem_set;
234 G1BlockOffsetArrayContigSpace* offsets() { return &_offsets; }
236 protected:
237 // The index of this region in the heap region sequence.
238 size_t _hrs_index;
240 HumongousType _humongous_type;
241 // For a humongous region, region in which it starts.
242 HeapRegion* _humongous_start_region;
243 // For the start region of a humongous sequence, it's original end().
244 HeapWord* _orig_end;
246 // True iff the region is in current collection_set.
247 bool _in_collection_set;
249 // True iff an attempt to evacuate an object in the region failed.
250 bool _evacuation_failed;
252 // A heap region may be a member one of a number of special subsets, each
253 // represented as linked lists through the field below. Currently, these
254 // sets include:
255 // The collection set.
256 // The set of allocation regions used in a collection pause.
257 // Spaces that may contain gray objects.
258 HeapRegion* _next_in_special_set;
260 // next region in the young "generation" region set
261 HeapRegion* _next_young_region;
263 // Next region whose cards need cleaning
264 HeapRegion* _next_dirty_cards_region;
266 // Fields used by the HeapRegionSetBase class and subclasses.
267 HeapRegion* _next;
268 #ifdef ASSERT
269 HeapRegionSetBase* _containing_set;
270 #endif // ASSERT
271 bool _pending_removal;
273 // For parallel heapRegion traversal.
274 jint _claimed;
276 // We use concurrent marking to determine the amount of live data
277 // in each heap region.
278 size_t _prev_marked_bytes; // Bytes known to be live via last completed marking.
279 size_t _next_marked_bytes; // Bytes known to be live via in-progress marking.
281 // See "sort_index" method. -1 means is not in the array.
282 int _sort_index;
284 // <PREDICTION>
285 double _gc_efficiency;
286 // </PREDICTION>
288 enum YoungType {
289 NotYoung, // a region is not young
290 Young, // a region is young
291 Survivor // a region is young and it contains survivors
292 };
294 volatile YoungType _young_type;
295 int _young_index_in_cset;
296 SurvRateGroup* _surv_rate_group;
297 int _age_index;
299 // The start of the unmarked area. The unmarked area extends from this
300 // word until the top and/or end of the region, and is the part
301 // of the region for which no marking was done, i.e. objects may
302 // have been allocated in this part since the last mark phase.
303 // "prev" is the top at the start of the last completed marking.
304 // "next" is the top at the start of the in-progress marking (if any.)
305 HeapWord* _prev_top_at_mark_start;
306 HeapWord* _next_top_at_mark_start;
307 // If a collection pause is in progress, this is the top at the start
308 // of that pause.
310 // We've counted the marked bytes of objects below here.
311 HeapWord* _top_at_conc_mark_count;
313 void init_top_at_mark_start() {
314 assert(_prev_marked_bytes == 0 &&
315 _next_marked_bytes == 0,
316 "Must be called after zero_marked_bytes.");
317 HeapWord* bot = bottom();
318 _prev_top_at_mark_start = bot;
319 _next_top_at_mark_start = bot;
320 _top_at_conc_mark_count = bot;
321 }
323 void set_young_type(YoungType new_type) {
324 //assert(_young_type != new_type, "setting the same type" );
325 // TODO: add more assertions here
326 _young_type = new_type;
327 }
329 // Cached attributes used in the collection set policy information
331 // The RSet length that was added to the total value
332 // for the collection set.
333 size_t _recorded_rs_length;
335 // The predicted elapsed time that was added to total value
336 // for the collection set.
337 double _predicted_elapsed_time_ms;
339 // The predicted number of bytes to copy that was added to
340 // the total value for the collection set.
341 size_t _predicted_bytes_to_copy;
343 public:
344 // If "is_zeroed" is "true", the region "mr" can be assumed to contain zeros.
345 HeapRegion(size_t hrs_index,
346 G1BlockOffsetSharedArray* sharedOffsetArray,
347 MemRegion mr, bool is_zeroed);
349 static int LogOfHRGrainBytes;
350 static int LogOfHRGrainWords;
351 // The normal type of these should be size_t. However, they used to
352 // be members of an enum before and they are assumed by the
353 // compilers to be ints. To avoid going and fixing all their uses,
354 // I'm declaring them as ints. I'm not anticipating heap region
355 // sizes to reach anywhere near 2g, so using an int here is safe.
356 static int GrainBytes;
357 static int GrainWords;
358 static int CardsPerRegion;
360 // It sets up the heap region size (GrainBytes / GrainWords), as
361 // well as other related fields that are based on the heap region
362 // size (LogOfHRGrainBytes / LogOfHRGrainWords /
363 // CardsPerRegion). All those fields are considered constant
364 // throughout the JVM's execution, therefore they should only be set
365 // up once during initialization time.
366 static void setup_heap_region_size(uintx min_heap_size);
368 enum ClaimValues {
369 InitialClaimValue = 0,
370 FinalCountClaimValue = 1,
371 NoteEndClaimValue = 2,
372 ScrubRemSetClaimValue = 3,
373 ParVerifyClaimValue = 4,
374 RebuildRSClaimValue = 5
375 };
377 inline HeapWord* par_allocate_no_bot_updates(size_t word_size) {
378 assert(is_young(), "we can only skip BOT updates on young regions");
379 return ContiguousSpace::par_allocate(word_size);
380 }
381 inline HeapWord* allocate_no_bot_updates(size_t word_size) {
382 assert(is_young(), "we can only skip BOT updates on young regions");
383 return ContiguousSpace::allocate(word_size);
384 }
386 // If this region is a member of a HeapRegionSeq, the index in that
387 // sequence, otherwise -1.
388 size_t hrs_index() const { return _hrs_index; }
390 // The number of bytes marked live in the region in the last marking phase.
391 size_t marked_bytes() { return _prev_marked_bytes; }
392 size_t live_bytes() {
393 return (top() - prev_top_at_mark_start()) * HeapWordSize + marked_bytes();
394 }
396 // The number of bytes counted in the next marking.
397 size_t next_marked_bytes() { return _next_marked_bytes; }
398 // The number of bytes live wrt the next marking.
399 size_t next_live_bytes() {
400 return
401 (top() - next_top_at_mark_start()) * HeapWordSize + next_marked_bytes();
402 }
404 // A lower bound on the amount of garbage bytes in the region.
405 size_t garbage_bytes() {
406 size_t used_at_mark_start_bytes =
407 (prev_top_at_mark_start() - bottom()) * HeapWordSize;
408 assert(used_at_mark_start_bytes >= marked_bytes(),
409 "Can't mark more than we have.");
410 return used_at_mark_start_bytes - marked_bytes();
411 }
413 // An upper bound on the number of live bytes in the region.
414 size_t max_live_bytes() { return used() - garbage_bytes(); }
416 void add_to_marked_bytes(size_t incr_bytes) {
417 _next_marked_bytes = _next_marked_bytes + incr_bytes;
418 guarantee( _next_marked_bytes <= used(), "invariant" );
419 }
421 void zero_marked_bytes() {
422 _prev_marked_bytes = _next_marked_bytes = 0;
423 }
425 bool isHumongous() const { return _humongous_type != NotHumongous; }
426 bool startsHumongous() const { return _humongous_type == StartsHumongous; }
427 bool continuesHumongous() const { return _humongous_type == ContinuesHumongous; }
428 // For a humongous region, region in which it starts.
429 HeapRegion* humongous_start_region() const {
430 return _humongous_start_region;
431 }
433 // Makes the current region be a "starts humongous" region, i.e.,
434 // the first region in a series of one or more contiguous regions
435 // that will contain a single "humongous" object. The two parameters
436 // are as follows:
437 //
438 // new_top : The new value of the top field of this region which
439 // points to the end of the humongous object that's being
440 // allocated. If there is more than one region in the series, top
441 // will lie beyond this region's original end field and on the last
442 // region in the series.
443 //
444 // new_end : The new value of the end field of this region which
445 // points to the end of the last region in the series. If there is
446 // one region in the series (namely: this one) end will be the same
447 // as the original end of this region.
448 //
449 // Updating top and end as described above makes this region look as
450 // if it spans the entire space taken up by all the regions in the
451 // series and an single allocation moved its top to new_top. This
452 // ensures that the space (capacity / allocated) taken up by all
453 // humongous regions can be calculated by just looking at the
454 // "starts humongous" regions and by ignoring the "continues
455 // humongous" regions.
456 void set_startsHumongous(HeapWord* new_top, HeapWord* new_end);
458 // Makes the current region be a "continues humongous'
459 // region. first_hr is the "start humongous" region of the series
460 // which this region will be part of.
461 void set_continuesHumongous(HeapRegion* first_hr);
463 // Unsets the humongous-related fields on the region.
464 void set_notHumongous();
466 // If the region has a remembered set, return a pointer to it.
467 HeapRegionRemSet* rem_set() const {
468 return _rem_set;
469 }
471 // True iff the region is in current collection_set.
472 bool in_collection_set() const {
473 return _in_collection_set;
474 }
475 void set_in_collection_set(bool b) {
476 _in_collection_set = b;
477 }
478 HeapRegion* next_in_collection_set() {
479 assert(in_collection_set(), "should only invoke on member of CS.");
480 assert(_next_in_special_set == NULL ||
481 _next_in_special_set->in_collection_set(),
482 "Malformed CS.");
483 return _next_in_special_set;
484 }
485 void set_next_in_collection_set(HeapRegion* r) {
486 assert(in_collection_set(), "should only invoke on member of CS.");
487 assert(r == NULL || r->in_collection_set(), "Malformed CS.");
488 _next_in_special_set = r;
489 }
491 // Methods used by the HeapRegionSetBase class and subclasses.
493 // Getter and setter for the next field used to link regions into
494 // linked lists.
495 HeapRegion* next() { return _next; }
497 void set_next(HeapRegion* next) { _next = next; }
499 // Every region added to a set is tagged with a reference to that
500 // set. This is used for doing consistency checking to make sure that
501 // the contents of a set are as they should be and it's only
502 // available in non-product builds.
503 #ifdef ASSERT
504 void set_containing_set(HeapRegionSetBase* containing_set) {
505 assert((containing_set == NULL && _containing_set != NULL) ||
506 (containing_set != NULL && _containing_set == NULL),
507 err_msg("containing_set: "PTR_FORMAT" "
508 "_containing_set: "PTR_FORMAT,
509 containing_set, _containing_set));
511 _containing_set = containing_set;
512 }
514 HeapRegionSetBase* containing_set() { return _containing_set; }
515 #else // ASSERT
516 void set_containing_set(HeapRegionSetBase* containing_set) { }
518 // containing_set() is only used in asserts so there's no reason
519 // to provide a dummy version of it.
520 #endif // ASSERT
522 // If we want to remove regions from a list in bulk we can simply tag
523 // them with the pending_removal tag and call the
524 // remove_all_pending() method on the list.
526 bool pending_removal() { return _pending_removal; }
528 void set_pending_removal(bool pending_removal) {
529 if (pending_removal) {
530 assert(!_pending_removal && containing_set() != NULL,
531 "can only set pending removal to true if it's false and "
532 "the region belongs to a region set");
533 } else {
534 assert( _pending_removal && containing_set() == NULL,
535 "can only set pending removal to false if it's true and "
536 "the region does not belong to a region set");
537 }
539 _pending_removal = pending_removal;
540 }
542 HeapRegion* get_next_young_region() { return _next_young_region; }
543 void set_next_young_region(HeapRegion* hr) {
544 _next_young_region = hr;
545 }
547 HeapRegion* get_next_dirty_cards_region() const { return _next_dirty_cards_region; }
548 HeapRegion** next_dirty_cards_region_addr() { return &_next_dirty_cards_region; }
549 void set_next_dirty_cards_region(HeapRegion* hr) { _next_dirty_cards_region = hr; }
550 bool is_on_dirty_cards_region_list() const { return get_next_dirty_cards_region() != NULL; }
552 HeapWord* orig_end() { return _orig_end; }
554 // Allows logical separation between objects allocated before and after.
555 void save_marks();
557 // Reset HR stuff to default values.
558 void hr_clear(bool par, bool clear_space);
559 void par_clear();
561 void initialize(MemRegion mr, bool clear_space, bool mangle_space);
563 // Get the start of the unmarked area in this region.
564 HeapWord* prev_top_at_mark_start() const { return _prev_top_at_mark_start; }
565 HeapWord* next_top_at_mark_start() const { return _next_top_at_mark_start; }
567 // Apply "cl->do_oop" to (the addresses of) all reference fields in objects
568 // allocated in the current region before the last call to "save_mark".
569 void oop_before_save_marks_iterate(OopClosure* cl);
571 DirtyCardToOopClosure*
572 new_dcto_closure(OopClosure* cl,
573 CardTableModRefBS::PrecisionStyle precision,
574 HeapRegionDCTOC::FilterKind fk);
576 // Note the start or end of marking. This tells the heap region
577 // that the collector is about to start or has finished (concurrently)
578 // marking the heap.
580 // Note the start of a marking phase. Record the
581 // start of the unmarked area of the region here.
582 void note_start_of_marking(bool during_initial_mark) {
583 init_top_at_conc_mark_count();
584 _next_marked_bytes = 0;
585 if (during_initial_mark && is_young() && !is_survivor())
586 _next_top_at_mark_start = bottom();
587 else
588 _next_top_at_mark_start = top();
589 }
591 // Note the end of a marking phase. Install the start of
592 // the unmarked area that was captured at start of marking.
593 void note_end_of_marking() {
594 _prev_top_at_mark_start = _next_top_at_mark_start;
595 _prev_marked_bytes = _next_marked_bytes;
596 _next_marked_bytes = 0;
598 guarantee(_prev_marked_bytes <=
599 (size_t) (prev_top_at_mark_start() - bottom()) * HeapWordSize,
600 "invariant");
601 }
603 // After an evacuation, we need to update _next_top_at_mark_start
604 // to be the current top. Note this is only valid if we have only
605 // ever evacuated into this region. If we evacuate, allocate, and
606 // then evacuate we are in deep doodoo.
607 void note_end_of_copying() {
608 assert(top() >= _next_top_at_mark_start, "Increase only");
609 _next_top_at_mark_start = top();
610 }
612 // Returns "false" iff no object in the region was allocated when the
613 // last mark phase ended.
614 bool is_marked() { return _prev_top_at_mark_start != bottom(); }
616 // If "is_marked()" is true, then this is the index of the region in
617 // an array constructed at the end of marking of the regions in a
618 // "desirability" order.
619 int sort_index() {
620 return _sort_index;
621 }
622 void set_sort_index(int i) {
623 _sort_index = i;
624 }
626 void init_top_at_conc_mark_count() {
627 _top_at_conc_mark_count = bottom();
628 }
630 void set_top_at_conc_mark_count(HeapWord *cur) {
631 assert(bottom() <= cur && cur <= end(), "Sanity.");
632 _top_at_conc_mark_count = cur;
633 }
635 HeapWord* top_at_conc_mark_count() {
636 return _top_at_conc_mark_count;
637 }
639 void reset_during_compaction() {
640 guarantee( isHumongous() && startsHumongous(),
641 "should only be called for humongous regions");
643 zero_marked_bytes();
644 init_top_at_mark_start();
645 }
647 // <PREDICTION>
648 void calc_gc_efficiency(void);
649 double gc_efficiency() { return _gc_efficiency;}
650 // </PREDICTION>
652 bool is_young() const { return _young_type != NotYoung; }
653 bool is_survivor() const { return _young_type == Survivor; }
655 int young_index_in_cset() const { return _young_index_in_cset; }
656 void set_young_index_in_cset(int index) {
657 assert( (index == -1) || is_young(), "pre-condition" );
658 _young_index_in_cset = index;
659 }
661 int age_in_surv_rate_group() {
662 assert( _surv_rate_group != NULL, "pre-condition" );
663 assert( _age_index > -1, "pre-condition" );
664 return _surv_rate_group->age_in_group(_age_index);
665 }
667 void record_surv_words_in_group(size_t words_survived) {
668 assert( _surv_rate_group != NULL, "pre-condition" );
669 assert( _age_index > -1, "pre-condition" );
670 int age_in_group = age_in_surv_rate_group();
671 _surv_rate_group->record_surviving_words(age_in_group, words_survived);
672 }
674 int age_in_surv_rate_group_cond() {
675 if (_surv_rate_group != NULL)
676 return age_in_surv_rate_group();
677 else
678 return -1;
679 }
681 SurvRateGroup* surv_rate_group() {
682 return _surv_rate_group;
683 }
685 void install_surv_rate_group(SurvRateGroup* surv_rate_group) {
686 assert( surv_rate_group != NULL, "pre-condition" );
687 assert( _surv_rate_group == NULL, "pre-condition" );
688 assert( is_young(), "pre-condition" );
690 _surv_rate_group = surv_rate_group;
691 _age_index = surv_rate_group->next_age_index();
692 }
694 void uninstall_surv_rate_group() {
695 if (_surv_rate_group != NULL) {
696 assert( _age_index > -1, "pre-condition" );
697 assert( is_young(), "pre-condition" );
699 _surv_rate_group = NULL;
700 _age_index = -1;
701 } else {
702 assert( _age_index == -1, "pre-condition" );
703 }
704 }
706 void set_young() { set_young_type(Young); }
708 void set_survivor() { set_young_type(Survivor); }
710 void set_not_young() { set_young_type(NotYoung); }
712 // Determine if an object has been allocated since the last
713 // mark performed by the collector. This returns true iff the object
714 // is within the unmarked area of the region.
715 bool obj_allocated_since_prev_marking(oop obj) const {
716 return (HeapWord *) obj >= prev_top_at_mark_start();
717 }
718 bool obj_allocated_since_next_marking(oop obj) const {
719 return (HeapWord *) obj >= next_top_at_mark_start();
720 }
722 // For parallel heapRegion traversal.
723 bool claimHeapRegion(int claimValue);
724 jint claim_value() { return _claimed; }
725 // Use this carefully: only when you're sure no one is claiming...
726 void set_claim_value(int claimValue) { _claimed = claimValue; }
728 // Returns the "evacuation_failed" property of the region.
729 bool evacuation_failed() { return _evacuation_failed; }
731 // Sets the "evacuation_failed" property of the region.
732 void set_evacuation_failed(bool b) {
733 _evacuation_failed = b;
735 if (b) {
736 init_top_at_conc_mark_count();
737 _next_marked_bytes = 0;
738 }
739 }
741 // Requires that "mr" be entirely within the region.
742 // Apply "cl->do_object" to all objects that intersect with "mr".
743 // If the iteration encounters an unparseable portion of the region,
744 // or if "cl->abort()" is true after a closure application,
745 // terminate the iteration and return the address of the start of the
746 // subregion that isn't done. (The two can be distinguished by querying
747 // "cl->abort()".) Return of "NULL" indicates that the iteration
748 // completed.
749 HeapWord*
750 object_iterate_mem_careful(MemRegion mr, ObjectClosure* cl);
752 // filter_young: if true and the region is a young region then we
753 // skip the iteration.
754 // card_ptr: if not NULL, and we decide that the card is not young
755 // and we iterate over it, we'll clean the card before we start the
756 // iteration.
757 HeapWord*
758 oops_on_card_seq_iterate_careful(MemRegion mr,
759 FilterOutOfRegionClosure* cl,
760 bool filter_young,
761 jbyte* card_ptr);
763 // A version of block start that is guaranteed to find *some* block
764 // boundary at or before "p", but does not object iteration, and may
765 // therefore be used safely when the heap is unparseable.
766 HeapWord* block_start_careful(const void* p) const {
767 return _offsets.block_start_careful(p);
768 }
770 // Requires that "addr" is within the region. Returns the start of the
771 // first ("careful") block that starts at or after "addr", or else the
772 // "end" of the region if there is no such block.
773 HeapWord* next_block_start_careful(HeapWord* addr);
775 size_t recorded_rs_length() const { return _recorded_rs_length; }
776 double predicted_elapsed_time_ms() const { return _predicted_elapsed_time_ms; }
777 size_t predicted_bytes_to_copy() const { return _predicted_bytes_to_copy; }
779 void set_recorded_rs_length(size_t rs_length) {
780 _recorded_rs_length = rs_length;
781 }
783 void set_predicted_elapsed_time_ms(double ms) {
784 _predicted_elapsed_time_ms = ms;
785 }
787 void set_predicted_bytes_to_copy(size_t bytes) {
788 _predicted_bytes_to_copy = bytes;
789 }
791 #define HeapRegion_OOP_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \
792 virtual void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl);
793 SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(HeapRegion_OOP_SINCE_SAVE_MARKS_DECL)
795 CompactibleSpace* next_compaction_space() const;
797 virtual void reset_after_compaction();
799 void print() const;
800 void print_on(outputStream* st) const;
802 // vo == UsePrevMarking -> use "prev" marking information,
803 // vo == UseNextMarking -> use "next" marking information
804 // vo == UseMarkWord -> use the mark word in the object header
805 //
806 // NOTE: Only the "prev" marking information is guaranteed to be
807 // consistent most of the time, so most calls to this should use
808 // vo == UsePrevMarking.
809 // Currently, there is only one case where this is called with
810 // vo == UseNextMarking, which is to verify the "next" marking
811 // information at the end of remark.
812 // Currently there is only one place where this is called with
813 // vo == UseMarkWord, which is to verify the marking during a
814 // full GC.
815 void verify(bool allow_dirty, VerifyOption vo, bool *failures) const;
817 // Override; it uses the "prev" marking information
818 virtual void verify(bool allow_dirty) const;
819 };
821 // HeapRegionClosure is used for iterating over regions.
822 // Terminates the iteration when the "doHeapRegion" method returns "true".
823 class HeapRegionClosure : public StackObj {
824 friend class HeapRegionSeq;
825 friend class G1CollectedHeap;
827 bool _complete;
828 void incomplete() { _complete = false; }
830 public:
831 HeapRegionClosure(): _complete(true) {}
833 // Typically called on each region until it returns true.
834 virtual bool doHeapRegion(HeapRegion* r) = 0;
836 // True after iteration if the closure was applied to all heap regions
837 // and returned "false" in all cases.
838 bool complete() { return _complete; }
839 };
841 #endif // SERIALGC
843 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_HPP