Wed, 15 Feb 2012 13:06:53 -0500
7132029: G1: mixed GC phase lasts for longer than it should
Summary: Revamp of the mechanism that chooses old regions for inclusion in the CSet. It simplifies the code and introduces min and max bounds on the number of old regions added to the CSet at each mixed GC to avoid pathological cases. It also ensures that when we do a mixed GC we'll always find old regions to add to the CSet (i.e., it eliminates the case where a mixed GC will collect no old regions which can happen today).
Reviewed-by: johnc, brutisso
1 /*
2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_HPP
26 #define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_HPP
28 #include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp"
29 #include "gc_implementation/g1/g1_specialized_oop_closures.hpp"
30 #include "gc_implementation/g1/survRateGroup.hpp"
31 #include "gc_implementation/shared/ageTable.hpp"
32 #include "gc_implementation/shared/spaceDecorator.hpp"
33 #include "memory/space.inline.hpp"
34 #include "memory/watermark.hpp"
36 #ifndef SERIALGC
38 // A HeapRegion is the smallest piece of a G1CollectedHeap that
39 // can be collected independently.
41 // NOTE: Although a HeapRegion is a Space, its
42 // Space::initDirtyCardClosure method must not be called.
43 // The problem is that the existence of this method breaks
44 // the independence of barrier sets from remembered sets.
45 // The solution is to remove this method from the definition
46 // of a Space.
48 class CompactibleSpace;
49 class ContiguousSpace;
50 class HeapRegionRemSet;
51 class HeapRegionRemSetIterator;
52 class HeapRegion;
53 class HeapRegionSetBase;
55 #define HR_FORMAT SIZE_FORMAT":(%s)["PTR_FORMAT","PTR_FORMAT","PTR_FORMAT"]"
56 #define HR_FORMAT_PARAMS(_hr_) \
57 (_hr_)->hrs_index(), \
58 (_hr_)->is_survivor() ? "S" : (_hr_)->is_young() ? "E" : "-", \
59 (_hr_)->bottom(), (_hr_)->top(), (_hr_)->end()
61 // A dirty card to oop closure for heap regions. It
62 // knows how to get the G1 heap and how to use the bitmap
63 // in the concurrent marker used by G1 to filter remembered
64 // sets.
66 class HeapRegionDCTOC : public ContiguousSpaceDCTOC {
67 public:
68 // Specification of possible DirtyCardToOopClosure filtering.
69 enum FilterKind {
70 NoFilterKind,
71 IntoCSFilterKind,
72 OutOfRegionFilterKind
73 };
75 protected:
76 HeapRegion* _hr;
77 FilterKind _fk;
78 G1CollectedHeap* _g1;
80 void walk_mem_region_with_cl(MemRegion mr,
81 HeapWord* bottom, HeapWord* top,
82 OopClosure* cl);
84 // We don't specialize this for FilteringClosure; filtering is handled by
85 // the "FilterKind" mechanism. But we provide this to avoid a compiler
86 // warning.
87 void walk_mem_region_with_cl(MemRegion mr,
88 HeapWord* bottom, HeapWord* top,
89 FilteringClosure* cl) {
90 HeapRegionDCTOC::walk_mem_region_with_cl(mr, bottom, top,
91 (OopClosure*)cl);
92 }
94 // Get the actual top of the area on which the closure will
95 // operate, given where the top is assumed to be (the end of the
96 // memory region passed to do_MemRegion) and where the object
97 // at the top is assumed to start. For example, an object may
98 // start at the top but actually extend past the assumed top,
99 // in which case the top becomes the end of the object.
100 HeapWord* get_actual_top(HeapWord* top, HeapWord* top_obj) {
101 return ContiguousSpaceDCTOC::get_actual_top(top, top_obj);
102 }
104 // Walk the given memory region from bottom to (actual) top
105 // looking for objects and applying the oop closure (_cl) to
106 // them. The base implementation of this treats the area as
107 // blocks, where a block may or may not be an object. Sub-
108 // classes should override this to provide more accurate
109 // or possibly more efficient walking.
110 void walk_mem_region(MemRegion mr, HeapWord* bottom, HeapWord* top) {
111 Filtering_DCTOC::walk_mem_region(mr, bottom, top);
112 }
114 public:
115 HeapRegionDCTOC(G1CollectedHeap* g1,
116 HeapRegion* hr, OopClosure* cl,
117 CardTableModRefBS::PrecisionStyle precision,
118 FilterKind fk);
119 };
121 // The complicating factor is that BlockOffsetTable diverged
122 // significantly, and we need functionality that is only in the G1 version.
123 // So I copied that code, which led to an alternate G1 version of
124 // OffsetTableContigSpace. If the two versions of BlockOffsetTable could
125 // be reconciled, then G1OffsetTableContigSpace could go away.
127 // The idea behind time stamps is the following. Doing a save_marks on
128 // all regions at every GC pause is time consuming (if I remember
129 // well, 10ms or so). So, we would like to do that only for regions
130 // that are GC alloc regions. To achieve this, we use time
131 // stamps. For every evacuation pause, G1CollectedHeap generates a
132 // unique time stamp (essentially a counter that gets
133 // incremented). Every time we want to call save_marks on a region,
134 // we set the saved_mark_word to top and also copy the current GC
135 // time stamp to the time stamp field of the space. Reading the
136 // saved_mark_word involves checking the time stamp of the
137 // region. If it is the same as the current GC time stamp, then we
138 // can safely read the saved_mark_word field, as it is valid. If the
139 // time stamp of the region is not the same as the current GC time
140 // stamp, then we instead read top, as the saved_mark_word field is
141 // invalid. Time stamps (on the regions and also on the
142 // G1CollectedHeap) are reset at every cleanup (we iterate over
143 // the regions anyway) and at the end of a Full GC. The current scheme
144 // that uses sequential unsigned ints will fail only if we have 4b
145 // evacuation pauses between two cleanups, which is _highly_ unlikely.
147 class G1OffsetTableContigSpace: public ContiguousSpace {
148 friend class VMStructs;
149 protected:
150 G1BlockOffsetArrayContigSpace _offsets;
151 Mutex _par_alloc_lock;
152 volatile unsigned _gc_time_stamp;
153 // When we need to retire an allocation region, while other threads
154 // are also concurrently trying to allocate into it, we typically
155 // allocate a dummy object at the end of the region to ensure that
156 // no more allocations can take place in it. However, sometimes we
157 // want to know where the end of the last "real" object we allocated
158 // into the region was and this is what this keeps track.
159 HeapWord* _pre_dummy_top;
161 public:
162 // Constructor. If "is_zeroed" is true, the MemRegion "mr" may be
163 // assumed to contain zeros.
164 G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray,
165 MemRegion mr, bool is_zeroed = false);
167 void set_bottom(HeapWord* value);
168 void set_end(HeapWord* value);
170 virtual HeapWord* saved_mark_word() const;
171 virtual void set_saved_mark();
172 void reset_gc_time_stamp() { _gc_time_stamp = 0; }
174 // See the comment above in the declaration of _pre_dummy_top for an
175 // explanation of what it is.
176 void set_pre_dummy_top(HeapWord* pre_dummy_top) {
177 assert(is_in(pre_dummy_top) && pre_dummy_top <= top(), "pre-condition");
178 _pre_dummy_top = pre_dummy_top;
179 }
180 HeapWord* pre_dummy_top() {
181 return (_pre_dummy_top == NULL) ? top() : _pre_dummy_top;
182 }
183 void reset_pre_dummy_top() { _pre_dummy_top = NULL; }
185 virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space);
186 virtual void clear(bool mangle_space);
188 HeapWord* block_start(const void* p);
189 HeapWord* block_start_const(const void* p) const;
191 // Add offset table update.
192 virtual HeapWord* allocate(size_t word_size);
193 HeapWord* par_allocate(size_t word_size);
195 // MarkSweep support phase3
196 virtual HeapWord* initialize_threshold();
197 virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* end);
199 virtual void print() const;
201 void reset_bot() {
202 _offsets.zero_bottom_entry();
203 _offsets.initialize_threshold();
204 }
206 void update_bot_for_object(HeapWord* start, size_t word_size) {
207 _offsets.alloc_block(start, word_size);
208 }
210 void print_bot_on(outputStream* out) {
211 _offsets.print_on(out);
212 }
213 };
215 class HeapRegion: public G1OffsetTableContigSpace {
216 friend class VMStructs;
217 private:
219 enum HumongousType {
220 NotHumongous = 0,
221 StartsHumongous,
222 ContinuesHumongous
223 };
225 // Requires that the region "mr" be dense with objects, and begin and end
226 // with an object.
227 void oops_in_mr_iterate(MemRegion mr, OopClosure* cl);
229 // The remembered set for this region.
230 // (Might want to make this "inline" later, to avoid some alloc failure
231 // issues.)
232 HeapRegionRemSet* _rem_set;
234 G1BlockOffsetArrayContigSpace* offsets() { return &_offsets; }
236 protected:
237 // The index of this region in the heap region sequence.
238 size_t _hrs_index;
240 HumongousType _humongous_type;
241 // For a humongous region, region in which it starts.
242 HeapRegion* _humongous_start_region;
243 // For the start region of a humongous sequence, it's original end().
244 HeapWord* _orig_end;
246 // True iff the region is in current collection_set.
247 bool _in_collection_set;
249 // True iff an attempt to evacuate an object in the region failed.
250 bool _evacuation_failed;
252 // A heap region may be a member one of a number of special subsets, each
253 // represented as linked lists through the field below. Currently, these
254 // sets include:
255 // The collection set.
256 // The set of allocation regions used in a collection pause.
257 // Spaces that may contain gray objects.
258 HeapRegion* _next_in_special_set;
260 // next region in the young "generation" region set
261 HeapRegion* _next_young_region;
263 // Next region whose cards need cleaning
264 HeapRegion* _next_dirty_cards_region;
266 // Fields used by the HeapRegionSetBase class and subclasses.
267 HeapRegion* _next;
268 #ifdef ASSERT
269 HeapRegionSetBase* _containing_set;
270 #endif // ASSERT
271 bool _pending_removal;
273 // For parallel heapRegion traversal.
274 jint _claimed;
276 // We use concurrent marking to determine the amount of live data
277 // in each heap region.
278 size_t _prev_marked_bytes; // Bytes known to be live via last completed marking.
279 size_t _next_marked_bytes; // Bytes known to be live via in-progress marking.
281 // See "sort_index" method. -1 means is not in the array.
282 int _sort_index;
284 // <PREDICTION>
285 double _gc_efficiency;
286 // </PREDICTION>
288 enum YoungType {
289 NotYoung, // a region is not young
290 Young, // a region is young
291 Survivor // a region is young and it contains survivors
292 };
294 volatile YoungType _young_type;
295 int _young_index_in_cset;
296 SurvRateGroup* _surv_rate_group;
297 int _age_index;
299 // The start of the unmarked area. The unmarked area extends from this
300 // word until the top and/or end of the region, and is the part
301 // of the region for which no marking was done, i.e. objects may
302 // have been allocated in this part since the last mark phase.
303 // "prev" is the top at the start of the last completed marking.
304 // "next" is the top at the start of the in-progress marking (if any.)
305 HeapWord* _prev_top_at_mark_start;
306 HeapWord* _next_top_at_mark_start;
307 // If a collection pause is in progress, this is the top at the start
308 // of that pause.
310 // We've counted the marked bytes of objects below here.
311 HeapWord* _top_at_conc_mark_count;
313 void init_top_at_mark_start() {
314 assert(_prev_marked_bytes == 0 &&
315 _next_marked_bytes == 0,
316 "Must be called after zero_marked_bytes.");
317 HeapWord* bot = bottom();
318 _prev_top_at_mark_start = bot;
319 _next_top_at_mark_start = bot;
320 _top_at_conc_mark_count = bot;
321 }
323 void set_young_type(YoungType new_type) {
324 //assert(_young_type != new_type, "setting the same type" );
325 // TODO: add more assertions here
326 _young_type = new_type;
327 }
329 // Cached attributes used in the collection set policy information
331 // The RSet length that was added to the total value
332 // for the collection set.
333 size_t _recorded_rs_length;
335 // The predicted elapsed time that was added to total value
336 // for the collection set.
337 double _predicted_elapsed_time_ms;
339 // The predicted number of bytes to copy that was added to
340 // the total value for the collection set.
341 size_t _predicted_bytes_to_copy;
343 public:
344 // If "is_zeroed" is "true", the region "mr" can be assumed to contain zeros.
345 HeapRegion(size_t hrs_index,
346 G1BlockOffsetSharedArray* sharedOffsetArray,
347 MemRegion mr, bool is_zeroed);
349 static int LogOfHRGrainBytes;
350 static int LogOfHRGrainWords;
352 static size_t GrainBytes;
353 static size_t GrainWords;
354 static size_t CardsPerRegion;
356 static size_t align_up_to_region_byte_size(size_t sz) {
357 return (sz + (size_t) GrainBytes - 1) &
358 ~((1 << (size_t) LogOfHRGrainBytes) - 1);
359 }
361 // It sets up the heap region size (GrainBytes / GrainWords), as
362 // well as other related fields that are based on the heap region
363 // size (LogOfHRGrainBytes / LogOfHRGrainWords /
364 // CardsPerRegion). All those fields are considered constant
365 // throughout the JVM's execution, therefore they should only be set
366 // up once during initialization time.
367 static void setup_heap_region_size(uintx min_heap_size);
369 enum ClaimValues {
370 InitialClaimValue = 0,
371 FinalCountClaimValue = 1,
372 NoteEndClaimValue = 2,
373 ScrubRemSetClaimValue = 3,
374 ParVerifyClaimValue = 4,
375 RebuildRSClaimValue = 5,
376 CompleteMarkCSetClaimValue = 6,
377 ParEvacFailureClaimValue = 7,
378 AggregateCountClaimValue = 8,
379 VerifyCountClaimValue = 9
380 };
382 inline HeapWord* par_allocate_no_bot_updates(size_t word_size) {
383 assert(is_young(), "we can only skip BOT updates on young regions");
384 return ContiguousSpace::par_allocate(word_size);
385 }
386 inline HeapWord* allocate_no_bot_updates(size_t word_size) {
387 assert(is_young(), "we can only skip BOT updates on young regions");
388 return ContiguousSpace::allocate(word_size);
389 }
391 // If this region is a member of a HeapRegionSeq, the index in that
392 // sequence, otherwise -1.
393 size_t hrs_index() const { return _hrs_index; }
395 // The number of bytes marked live in the region in the last marking phase.
396 size_t marked_bytes() { return _prev_marked_bytes; }
397 size_t live_bytes() {
398 return (top() - prev_top_at_mark_start()) * HeapWordSize + marked_bytes();
399 }
401 // The number of bytes counted in the next marking.
402 size_t next_marked_bytes() { return _next_marked_bytes; }
403 // The number of bytes live wrt the next marking.
404 size_t next_live_bytes() {
405 return
406 (top() - next_top_at_mark_start()) * HeapWordSize + next_marked_bytes();
407 }
409 // A lower bound on the amount of garbage bytes in the region.
410 size_t garbage_bytes() {
411 size_t used_at_mark_start_bytes =
412 (prev_top_at_mark_start() - bottom()) * HeapWordSize;
413 assert(used_at_mark_start_bytes >= marked_bytes(),
414 "Can't mark more than we have.");
415 return used_at_mark_start_bytes - marked_bytes();
416 }
418 // Return the amount of bytes we'll reclaim if we collect this
419 // region. This includes not only the known garbage bytes in the
420 // region but also any unallocated space in it, i.e., [top, end),
421 // since it will also be reclaimed if we collect the region.
422 size_t reclaimable_bytes() {
423 size_t known_live_bytes = live_bytes();
424 assert(known_live_bytes <= capacity(), "sanity");
425 return capacity() - known_live_bytes;
426 }
428 // An upper bound on the number of live bytes in the region.
429 size_t max_live_bytes() { return used() - garbage_bytes(); }
431 void add_to_marked_bytes(size_t incr_bytes) {
432 _next_marked_bytes = _next_marked_bytes + incr_bytes;
433 assert(_next_marked_bytes <= used(), "invariant" );
434 }
436 void zero_marked_bytes() {
437 _prev_marked_bytes = _next_marked_bytes = 0;
438 }
440 bool isHumongous() const { return _humongous_type != NotHumongous; }
441 bool startsHumongous() const { return _humongous_type == StartsHumongous; }
442 bool continuesHumongous() const { return _humongous_type == ContinuesHumongous; }
443 // For a humongous region, region in which it starts.
444 HeapRegion* humongous_start_region() const {
445 return _humongous_start_region;
446 }
448 // Same as Space::is_in_reserved, but will use the original size of the region.
449 // The original size is different only for start humongous regions. They get
450 // their _end set up to be the end of the last continues region of the
451 // corresponding humongous object.
452 bool is_in_reserved_raw(const void* p) const {
453 return _bottom <= p && p < _orig_end;
454 }
456 // Makes the current region be a "starts humongous" region, i.e.,
457 // the first region in a series of one or more contiguous regions
458 // that will contain a single "humongous" object. The two parameters
459 // are as follows:
460 //
461 // new_top : The new value of the top field of this region which
462 // points to the end of the humongous object that's being
463 // allocated. If there is more than one region in the series, top
464 // will lie beyond this region's original end field and on the last
465 // region in the series.
466 //
467 // new_end : The new value of the end field of this region which
468 // points to the end of the last region in the series. If there is
469 // one region in the series (namely: this one) end will be the same
470 // as the original end of this region.
471 //
472 // Updating top and end as described above makes this region look as
473 // if it spans the entire space taken up by all the regions in the
474 // series and an single allocation moved its top to new_top. This
475 // ensures that the space (capacity / allocated) taken up by all
476 // humongous regions can be calculated by just looking at the
477 // "starts humongous" regions and by ignoring the "continues
478 // humongous" regions.
479 void set_startsHumongous(HeapWord* new_top, HeapWord* new_end);
481 // Makes the current region be a "continues humongous'
482 // region. first_hr is the "start humongous" region of the series
483 // which this region will be part of.
484 void set_continuesHumongous(HeapRegion* first_hr);
486 // Unsets the humongous-related fields on the region.
487 void set_notHumongous();
489 // If the region has a remembered set, return a pointer to it.
490 HeapRegionRemSet* rem_set() const {
491 return _rem_set;
492 }
494 // True iff the region is in current collection_set.
495 bool in_collection_set() const {
496 return _in_collection_set;
497 }
498 void set_in_collection_set(bool b) {
499 _in_collection_set = b;
500 }
501 HeapRegion* next_in_collection_set() {
502 assert(in_collection_set(), "should only invoke on member of CS.");
503 assert(_next_in_special_set == NULL ||
504 _next_in_special_set->in_collection_set(),
505 "Malformed CS.");
506 return _next_in_special_set;
507 }
508 void set_next_in_collection_set(HeapRegion* r) {
509 assert(in_collection_set(), "should only invoke on member of CS.");
510 assert(r == NULL || r->in_collection_set(), "Malformed CS.");
511 _next_in_special_set = r;
512 }
514 // Methods used by the HeapRegionSetBase class and subclasses.
516 // Getter and setter for the next field used to link regions into
517 // linked lists.
518 HeapRegion* next() { return _next; }
520 void set_next(HeapRegion* next) { _next = next; }
522 // Every region added to a set is tagged with a reference to that
523 // set. This is used for doing consistency checking to make sure that
524 // the contents of a set are as they should be and it's only
525 // available in non-product builds.
526 #ifdef ASSERT
527 void set_containing_set(HeapRegionSetBase* containing_set) {
528 assert((containing_set == NULL && _containing_set != NULL) ||
529 (containing_set != NULL && _containing_set == NULL),
530 err_msg("containing_set: "PTR_FORMAT" "
531 "_containing_set: "PTR_FORMAT,
532 containing_set, _containing_set));
534 _containing_set = containing_set;
535 }
537 HeapRegionSetBase* containing_set() { return _containing_set; }
538 #else // ASSERT
539 void set_containing_set(HeapRegionSetBase* containing_set) { }
541 // containing_set() is only used in asserts so there's no reason
542 // to provide a dummy version of it.
543 #endif // ASSERT
545 // If we want to remove regions from a list in bulk we can simply tag
546 // them with the pending_removal tag and call the
547 // remove_all_pending() method on the list.
549 bool pending_removal() { return _pending_removal; }
551 void set_pending_removal(bool pending_removal) {
552 if (pending_removal) {
553 assert(!_pending_removal && containing_set() != NULL,
554 "can only set pending removal to true if it's false and "
555 "the region belongs to a region set");
556 } else {
557 assert( _pending_removal && containing_set() == NULL,
558 "can only set pending removal to false if it's true and "
559 "the region does not belong to a region set");
560 }
562 _pending_removal = pending_removal;
563 }
565 HeapRegion* get_next_young_region() { return _next_young_region; }
566 void set_next_young_region(HeapRegion* hr) {
567 _next_young_region = hr;
568 }
570 HeapRegion* get_next_dirty_cards_region() const { return _next_dirty_cards_region; }
571 HeapRegion** next_dirty_cards_region_addr() { return &_next_dirty_cards_region; }
572 void set_next_dirty_cards_region(HeapRegion* hr) { _next_dirty_cards_region = hr; }
573 bool is_on_dirty_cards_region_list() const { return get_next_dirty_cards_region() != NULL; }
575 HeapWord* orig_end() { return _orig_end; }
577 // Allows logical separation between objects allocated before and after.
578 void save_marks();
580 // Reset HR stuff to default values.
581 void hr_clear(bool par, bool clear_space);
582 void par_clear();
584 void initialize(MemRegion mr, bool clear_space, bool mangle_space);
586 // Get the start of the unmarked area in this region.
587 HeapWord* prev_top_at_mark_start() const { return _prev_top_at_mark_start; }
588 HeapWord* next_top_at_mark_start() const { return _next_top_at_mark_start; }
590 // Apply "cl->do_oop" to (the addresses of) all reference fields in objects
591 // allocated in the current region before the last call to "save_mark".
592 void oop_before_save_marks_iterate(OopClosure* cl);
594 // Note the start or end of marking. This tells the heap region
595 // that the collector is about to start or has finished (concurrently)
596 // marking the heap.
598 // Notify the region that concurrent marking is starting. Initialize
599 // all fields related to the next marking info.
600 inline void note_start_of_marking();
602 // Notify the region that concurrent marking has finished. Copy the
603 // (now finalized) next marking info fields into the prev marking
604 // info fields.
605 inline void note_end_of_marking();
607 // Notify the region that it will be used as to-space during a GC
608 // and we are about to start copying objects into it.
609 inline void note_start_of_copying(bool during_initial_mark);
611 // Notify the region that it ceases being to-space during a GC and
612 // we will not copy objects into it any more.
613 inline void note_end_of_copying(bool during_initial_mark);
615 // Notify the region that we are about to start processing
616 // self-forwarded objects during evac failure handling.
617 void note_self_forwarding_removal_start(bool during_initial_mark,
618 bool during_conc_mark);
620 // Notify the region that we have finished processing self-forwarded
621 // objects during evac failure handling.
622 void note_self_forwarding_removal_end(bool during_initial_mark,
623 bool during_conc_mark,
624 size_t marked_bytes);
626 // Returns "false" iff no object in the region was allocated when the
627 // last mark phase ended.
628 bool is_marked() { return _prev_top_at_mark_start != bottom(); }
630 // If "is_marked()" is true, then this is the index of the region in
631 // an array constructed at the end of marking of the regions in a
632 // "desirability" order.
633 int sort_index() {
634 return _sort_index;
635 }
636 void set_sort_index(int i) {
637 _sort_index = i;
638 }
640 void init_top_at_conc_mark_count() {
641 _top_at_conc_mark_count = bottom();
642 }
644 void set_top_at_conc_mark_count(HeapWord *cur) {
645 assert(bottom() <= cur && cur <= end(), "Sanity.");
646 _top_at_conc_mark_count = cur;
647 }
649 HeapWord* top_at_conc_mark_count() {
650 return _top_at_conc_mark_count;
651 }
653 void reset_during_compaction() {
654 guarantee( isHumongous() && startsHumongous(),
655 "should only be called for humongous regions");
657 zero_marked_bytes();
658 init_top_at_mark_start();
659 }
661 void calc_gc_efficiency(void);
662 double gc_efficiency() { return _gc_efficiency;}
664 bool is_young() const { return _young_type != NotYoung; }
665 bool is_survivor() const { return _young_type == Survivor; }
667 int young_index_in_cset() const { return _young_index_in_cset; }
668 void set_young_index_in_cset(int index) {
669 assert( (index == -1) || is_young(), "pre-condition" );
670 _young_index_in_cset = index;
671 }
673 int age_in_surv_rate_group() {
674 assert( _surv_rate_group != NULL, "pre-condition" );
675 assert( _age_index > -1, "pre-condition" );
676 return _surv_rate_group->age_in_group(_age_index);
677 }
679 void record_surv_words_in_group(size_t words_survived) {
680 assert( _surv_rate_group != NULL, "pre-condition" );
681 assert( _age_index > -1, "pre-condition" );
682 int age_in_group = age_in_surv_rate_group();
683 _surv_rate_group->record_surviving_words(age_in_group, words_survived);
684 }
686 int age_in_surv_rate_group_cond() {
687 if (_surv_rate_group != NULL)
688 return age_in_surv_rate_group();
689 else
690 return -1;
691 }
693 SurvRateGroup* surv_rate_group() {
694 return _surv_rate_group;
695 }
697 void install_surv_rate_group(SurvRateGroup* surv_rate_group) {
698 assert( surv_rate_group != NULL, "pre-condition" );
699 assert( _surv_rate_group == NULL, "pre-condition" );
700 assert( is_young(), "pre-condition" );
702 _surv_rate_group = surv_rate_group;
703 _age_index = surv_rate_group->next_age_index();
704 }
706 void uninstall_surv_rate_group() {
707 if (_surv_rate_group != NULL) {
708 assert( _age_index > -1, "pre-condition" );
709 assert( is_young(), "pre-condition" );
711 _surv_rate_group = NULL;
712 _age_index = -1;
713 } else {
714 assert( _age_index == -1, "pre-condition" );
715 }
716 }
718 void set_young() { set_young_type(Young); }
720 void set_survivor() { set_young_type(Survivor); }
722 void set_not_young() { set_young_type(NotYoung); }
724 // Determine if an object has been allocated since the last
725 // mark performed by the collector. This returns true iff the object
726 // is within the unmarked area of the region.
727 bool obj_allocated_since_prev_marking(oop obj) const {
728 return (HeapWord *) obj >= prev_top_at_mark_start();
729 }
730 bool obj_allocated_since_next_marking(oop obj) const {
731 return (HeapWord *) obj >= next_top_at_mark_start();
732 }
734 // For parallel heapRegion traversal.
735 bool claimHeapRegion(int claimValue);
736 jint claim_value() { return _claimed; }
737 // Use this carefully: only when you're sure no one is claiming...
738 void set_claim_value(int claimValue) { _claimed = claimValue; }
740 // Returns the "evacuation_failed" property of the region.
741 bool evacuation_failed() { return _evacuation_failed; }
743 // Sets the "evacuation_failed" property of the region.
744 void set_evacuation_failed(bool b) {
745 _evacuation_failed = b;
747 if (b) {
748 init_top_at_conc_mark_count();
749 _next_marked_bytes = 0;
750 }
751 }
753 // Requires that "mr" be entirely within the region.
754 // Apply "cl->do_object" to all objects that intersect with "mr".
755 // If the iteration encounters an unparseable portion of the region,
756 // or if "cl->abort()" is true after a closure application,
757 // terminate the iteration and return the address of the start of the
758 // subregion that isn't done. (The two can be distinguished by querying
759 // "cl->abort()".) Return of "NULL" indicates that the iteration
760 // completed.
761 HeapWord*
762 object_iterate_mem_careful(MemRegion mr, ObjectClosure* cl);
764 // filter_young: if true and the region is a young region then we
765 // skip the iteration.
766 // card_ptr: if not NULL, and we decide that the card is not young
767 // and we iterate over it, we'll clean the card before we start the
768 // iteration.
769 HeapWord*
770 oops_on_card_seq_iterate_careful(MemRegion mr,
771 FilterOutOfRegionClosure* cl,
772 bool filter_young,
773 jbyte* card_ptr);
775 // A version of block start that is guaranteed to find *some* block
776 // boundary at or before "p", but does not object iteration, and may
777 // therefore be used safely when the heap is unparseable.
778 HeapWord* block_start_careful(const void* p) const {
779 return _offsets.block_start_careful(p);
780 }
782 // Requires that "addr" is within the region. Returns the start of the
783 // first ("careful") block that starts at or after "addr", or else the
784 // "end" of the region if there is no such block.
785 HeapWord* next_block_start_careful(HeapWord* addr);
787 size_t recorded_rs_length() const { return _recorded_rs_length; }
788 double predicted_elapsed_time_ms() const { return _predicted_elapsed_time_ms; }
789 size_t predicted_bytes_to_copy() const { return _predicted_bytes_to_copy; }
791 void set_recorded_rs_length(size_t rs_length) {
792 _recorded_rs_length = rs_length;
793 }
795 void set_predicted_elapsed_time_ms(double ms) {
796 _predicted_elapsed_time_ms = ms;
797 }
799 void set_predicted_bytes_to_copy(size_t bytes) {
800 _predicted_bytes_to_copy = bytes;
801 }
803 #define HeapRegion_OOP_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \
804 virtual void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl);
805 SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(HeapRegion_OOP_SINCE_SAVE_MARKS_DECL)
807 CompactibleSpace* next_compaction_space() const;
809 virtual void reset_after_compaction();
811 void print() const;
812 void print_on(outputStream* st) const;
814 // vo == UsePrevMarking -> use "prev" marking information,
815 // vo == UseNextMarking -> use "next" marking information
816 // vo == UseMarkWord -> use the mark word in the object header
817 //
818 // NOTE: Only the "prev" marking information is guaranteed to be
819 // consistent most of the time, so most calls to this should use
820 // vo == UsePrevMarking.
821 // Currently, there is only one case where this is called with
822 // vo == UseNextMarking, which is to verify the "next" marking
823 // information at the end of remark.
824 // Currently there is only one place where this is called with
825 // vo == UseMarkWord, which is to verify the marking during a
826 // full GC.
827 void verify(bool allow_dirty, VerifyOption vo, bool *failures) const;
829 // Override; it uses the "prev" marking information
830 virtual void verify(bool allow_dirty) const;
831 };
833 // HeapRegionClosure is used for iterating over regions.
834 // Terminates the iteration when the "doHeapRegion" method returns "true".
835 class HeapRegionClosure : public StackObj {
836 friend class HeapRegionSeq;
837 friend class G1CollectedHeap;
839 bool _complete;
840 void incomplete() { _complete = false; }
842 public:
843 HeapRegionClosure(): _complete(true) {}
845 // Typically called on each region until it returns true.
846 virtual bool doHeapRegion(HeapRegion* r) = 0;
848 // True after iteration if the closure was applied to all heap regions
849 // and returned "false" in all cases.
850 bool complete() { return _complete; }
851 };
853 #endif // SERIALGC
855 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_HPP