Wed, 11 Sep 2013 16:25:02 +0200
8010722: assert: failed: heap size is too big for compressed oops
Summary: Use conservative assumptions of required alignment for the various garbage collector components into account when determining the maximum heap size that supports compressed oops. Using this conservative value avoids several circular dependencies in the calculation.
Reviewed-by: stefank, dholmes
1 /*
2 * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_HPP
26 #define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_HPP
28 #include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp"
29 #include "gc_implementation/g1/g1_specialized_oop_closures.hpp"
30 #include "gc_implementation/g1/survRateGroup.hpp"
31 #include "gc_implementation/shared/ageTable.hpp"
32 #include "gc_implementation/shared/spaceDecorator.hpp"
33 #include "memory/space.inline.hpp"
34 #include "memory/watermark.hpp"
35 #include "utilities/macros.hpp"
37 #if INCLUDE_ALL_GCS
39 // A HeapRegion is the smallest piece of a G1CollectedHeap that
40 // can be collected independently.
42 // NOTE: Although a HeapRegion is a Space, its
43 // Space::initDirtyCardClosure method must not be called.
44 // The problem is that the existence of this method breaks
45 // the independence of barrier sets from remembered sets.
46 // The solution is to remove this method from the definition
47 // of a Space.
49 class CompactibleSpace;
50 class ContiguousSpace;
51 class HeapRegionRemSet;
52 class HeapRegionRemSetIterator;
53 class HeapRegion;
54 class HeapRegionSetBase;
55 class nmethod;
57 #define HR_FORMAT "%u:(%s)["PTR_FORMAT","PTR_FORMAT","PTR_FORMAT"]"
58 #define HR_FORMAT_PARAMS(_hr_) \
59 (_hr_)->hrs_index(), \
60 (_hr_)->is_survivor() ? "S" : (_hr_)->is_young() ? "E" : \
61 (_hr_)->startsHumongous() ? "HS" : \
62 (_hr_)->continuesHumongous() ? "HC" : \
63 !(_hr_)->is_empty() ? "O" : "F", \
64 (_hr_)->bottom(), (_hr_)->top(), (_hr_)->end()
66 // sentinel value for hrs_index
67 #define G1_NULL_HRS_INDEX ((uint) -1)
69 // A dirty card to oop closure for heap regions. It
70 // knows how to get the G1 heap and how to use the bitmap
71 // in the concurrent marker used by G1 to filter remembered
72 // sets.
74 class HeapRegionDCTOC : public ContiguousSpaceDCTOC {
75 public:
76 // Specification of possible DirtyCardToOopClosure filtering.
77 enum FilterKind {
78 NoFilterKind,
79 IntoCSFilterKind,
80 OutOfRegionFilterKind
81 };
83 protected:
84 HeapRegion* _hr;
85 FilterKind _fk;
86 G1CollectedHeap* _g1;
88 void walk_mem_region_with_cl(MemRegion mr,
89 HeapWord* bottom, HeapWord* top,
90 ExtendedOopClosure* cl);
92 // We don't specialize this for FilteringClosure; filtering is handled by
93 // the "FilterKind" mechanism. But we provide this to avoid a compiler
94 // warning.
95 void walk_mem_region_with_cl(MemRegion mr,
96 HeapWord* bottom, HeapWord* top,
97 FilteringClosure* cl) {
98 HeapRegionDCTOC::walk_mem_region_with_cl(mr, bottom, top,
99 (ExtendedOopClosure*)cl);
100 }
102 // Get the actual top of the area on which the closure will
103 // operate, given where the top is assumed to be (the end of the
104 // memory region passed to do_MemRegion) and where the object
105 // at the top is assumed to start. For example, an object may
106 // start at the top but actually extend past the assumed top,
107 // in which case the top becomes the end of the object.
108 HeapWord* get_actual_top(HeapWord* top, HeapWord* top_obj) {
109 return ContiguousSpaceDCTOC::get_actual_top(top, top_obj);
110 }
112 // Walk the given memory region from bottom to (actual) top
113 // looking for objects and applying the oop closure (_cl) to
114 // them. The base implementation of this treats the area as
115 // blocks, where a block may or may not be an object. Sub-
116 // classes should override this to provide more accurate
117 // or possibly more efficient walking.
118 void walk_mem_region(MemRegion mr, HeapWord* bottom, HeapWord* top) {
119 Filtering_DCTOC::walk_mem_region(mr, bottom, top);
120 }
122 public:
123 HeapRegionDCTOC(G1CollectedHeap* g1,
124 HeapRegion* hr, ExtendedOopClosure* cl,
125 CardTableModRefBS::PrecisionStyle precision,
126 FilterKind fk);
127 };
129 // The complicating factor is that BlockOffsetTable diverged
130 // significantly, and we need functionality that is only in the G1 version.
131 // So I copied that code, which led to an alternate G1 version of
132 // OffsetTableContigSpace. If the two versions of BlockOffsetTable could
133 // be reconciled, then G1OffsetTableContigSpace could go away.
135 // The idea behind time stamps is the following. Doing a save_marks on
136 // all regions at every GC pause is time consuming (if I remember
137 // well, 10ms or so). So, we would like to do that only for regions
138 // that are GC alloc regions. To achieve this, we use time
139 // stamps. For every evacuation pause, G1CollectedHeap generates a
140 // unique time stamp (essentially a counter that gets
141 // incremented). Every time we want to call save_marks on a region,
142 // we set the saved_mark_word to top and also copy the current GC
143 // time stamp to the time stamp field of the space. Reading the
144 // saved_mark_word involves checking the time stamp of the
145 // region. If it is the same as the current GC time stamp, then we
146 // can safely read the saved_mark_word field, as it is valid. If the
147 // time stamp of the region is not the same as the current GC time
148 // stamp, then we instead read top, as the saved_mark_word field is
149 // invalid. Time stamps (on the regions and also on the
150 // G1CollectedHeap) are reset at every cleanup (we iterate over
151 // the regions anyway) and at the end of a Full GC. The current scheme
152 // that uses sequential unsigned ints will fail only if we have 4b
153 // evacuation pauses between two cleanups, which is _highly_ unlikely.
155 class G1OffsetTableContigSpace: public ContiguousSpace {
156 friend class VMStructs;
157 protected:
158 G1BlockOffsetArrayContigSpace _offsets;
159 Mutex _par_alloc_lock;
160 volatile unsigned _gc_time_stamp;
161 // When we need to retire an allocation region, while other threads
162 // are also concurrently trying to allocate into it, we typically
163 // allocate a dummy object at the end of the region to ensure that
164 // no more allocations can take place in it. However, sometimes we
165 // want to know where the end of the last "real" object we allocated
166 // into the region was and this is what this keeps track.
167 HeapWord* _pre_dummy_top;
169 public:
170 G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray,
171 MemRegion mr);
173 void set_bottom(HeapWord* value);
174 void set_end(HeapWord* value);
176 virtual HeapWord* saved_mark_word() const;
177 virtual void set_saved_mark();
178 void reset_gc_time_stamp() { _gc_time_stamp = 0; }
179 unsigned get_gc_time_stamp() { return _gc_time_stamp; }
181 // See the comment above in the declaration of _pre_dummy_top for an
182 // explanation of what it is.
183 void set_pre_dummy_top(HeapWord* pre_dummy_top) {
184 assert(is_in(pre_dummy_top) && pre_dummy_top <= top(), "pre-condition");
185 _pre_dummy_top = pre_dummy_top;
186 }
187 HeapWord* pre_dummy_top() {
188 return (_pre_dummy_top == NULL) ? top() : _pre_dummy_top;
189 }
190 void reset_pre_dummy_top() { _pre_dummy_top = NULL; }
192 virtual void clear(bool mangle_space);
194 HeapWord* block_start(const void* p);
195 HeapWord* block_start_const(const void* p) const;
197 // Add offset table update.
198 virtual HeapWord* allocate(size_t word_size);
199 HeapWord* par_allocate(size_t word_size);
201 // MarkSweep support phase3
202 virtual HeapWord* initialize_threshold();
203 virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* end);
205 virtual void print() const;
207 void reset_bot() {
208 _offsets.zero_bottom_entry();
209 _offsets.initialize_threshold();
210 }
212 void update_bot_for_object(HeapWord* start, size_t word_size) {
213 _offsets.alloc_block(start, word_size);
214 }
216 void print_bot_on(outputStream* out) {
217 _offsets.print_on(out);
218 }
219 };
221 class HeapRegion: public G1OffsetTableContigSpace {
222 friend class VMStructs;
223 private:
225 enum HumongousType {
226 NotHumongous = 0,
227 StartsHumongous,
228 ContinuesHumongous
229 };
231 // Requires that the region "mr" be dense with objects, and begin and end
232 // with an object.
233 void oops_in_mr_iterate(MemRegion mr, ExtendedOopClosure* cl);
235 // The remembered set for this region.
236 // (Might want to make this "inline" later, to avoid some alloc failure
237 // issues.)
238 HeapRegionRemSet* _rem_set;
240 G1BlockOffsetArrayContigSpace* offsets() { return &_offsets; }
242 protected:
243 // The index of this region in the heap region sequence.
244 uint _hrs_index;
246 HumongousType _humongous_type;
247 // For a humongous region, region in which it starts.
248 HeapRegion* _humongous_start_region;
249 // For the start region of a humongous sequence, it's original end().
250 HeapWord* _orig_end;
252 // True iff the region is in current collection_set.
253 bool _in_collection_set;
255 // True iff an attempt to evacuate an object in the region failed.
256 bool _evacuation_failed;
258 // A heap region may be a member one of a number of special subsets, each
259 // represented as linked lists through the field below. Currently, these
260 // sets include:
261 // The collection set.
262 // The set of allocation regions used in a collection pause.
263 // Spaces that may contain gray objects.
264 HeapRegion* _next_in_special_set;
266 // next region in the young "generation" region set
267 HeapRegion* _next_young_region;
269 // Next region whose cards need cleaning
270 HeapRegion* _next_dirty_cards_region;
272 // Fields used by the HeapRegionSetBase class and subclasses.
273 HeapRegion* _next;
274 #ifdef ASSERT
275 HeapRegionSetBase* _containing_set;
276 #endif // ASSERT
277 bool _pending_removal;
279 // For parallel heapRegion traversal.
280 jint _claimed;
282 // We use concurrent marking to determine the amount of live data
283 // in each heap region.
284 size_t _prev_marked_bytes; // Bytes known to be live via last completed marking.
285 size_t _next_marked_bytes; // Bytes known to be live via in-progress marking.
287 // The calculated GC efficiency of the region.
288 double _gc_efficiency;
290 enum YoungType {
291 NotYoung, // a region is not young
292 Young, // a region is young
293 Survivor // a region is young and it contains survivors
294 };
296 volatile YoungType _young_type;
297 int _young_index_in_cset;
298 SurvRateGroup* _surv_rate_group;
299 int _age_index;
301 // The start of the unmarked area. The unmarked area extends from this
302 // word until the top and/or end of the region, and is the part
303 // of the region for which no marking was done, i.e. objects may
304 // have been allocated in this part since the last mark phase.
305 // "prev" is the top at the start of the last completed marking.
306 // "next" is the top at the start of the in-progress marking (if any.)
307 HeapWord* _prev_top_at_mark_start;
308 HeapWord* _next_top_at_mark_start;
309 // If a collection pause is in progress, this is the top at the start
310 // of that pause.
312 void init_top_at_mark_start() {
313 assert(_prev_marked_bytes == 0 &&
314 _next_marked_bytes == 0,
315 "Must be called after zero_marked_bytes.");
316 HeapWord* bot = bottom();
317 _prev_top_at_mark_start = bot;
318 _next_top_at_mark_start = bot;
319 }
321 void set_young_type(YoungType new_type) {
322 //assert(_young_type != new_type, "setting the same type" );
323 // TODO: add more assertions here
324 _young_type = new_type;
325 }
327 // Cached attributes used in the collection set policy information
329 // The RSet length that was added to the total value
330 // for the collection set.
331 size_t _recorded_rs_length;
333 // The predicted elapsed time that was added to total value
334 // for the collection set.
335 double _predicted_elapsed_time_ms;
337 // The predicted number of bytes to copy that was added to
338 // the total value for the collection set.
339 size_t _predicted_bytes_to_copy;
341 public:
342 HeapRegion(uint hrs_index,
343 G1BlockOffsetSharedArray* sharedOffsetArray,
344 MemRegion mr);
346 static int LogOfHRGrainBytes;
347 static int LogOfHRGrainWords;
349 static size_t GrainBytes;
350 static size_t GrainWords;
351 static size_t CardsPerRegion;
353 static size_t align_up_to_region_byte_size(size_t sz) {
354 return (sz + (size_t) GrainBytes - 1) &
355 ~((1 << (size_t) LogOfHRGrainBytes) - 1);
356 }
358 static size_t max_region_size();
360 // It sets up the heap region size (GrainBytes / GrainWords), as
361 // well as other related fields that are based on the heap region
362 // size (LogOfHRGrainBytes / LogOfHRGrainWords /
363 // CardsPerRegion). All those fields are considered constant
364 // throughout the JVM's execution, therefore they should only be set
365 // up once during initialization time.
366 static void setup_heap_region_size(size_t initial_heap_size, size_t max_heap_size);
368 enum ClaimValues {
369 InitialClaimValue = 0,
370 FinalCountClaimValue = 1,
371 NoteEndClaimValue = 2,
372 ScrubRemSetClaimValue = 3,
373 ParVerifyClaimValue = 4,
374 RebuildRSClaimValue = 5,
375 ParEvacFailureClaimValue = 6,
376 AggregateCountClaimValue = 7,
377 VerifyCountClaimValue = 8,
378 ParMarkRootClaimValue = 9
379 };
381 inline HeapWord* par_allocate_no_bot_updates(size_t word_size) {
382 assert(is_young(), "we can only skip BOT updates on young regions");
383 return ContiguousSpace::par_allocate(word_size);
384 }
385 inline HeapWord* allocate_no_bot_updates(size_t word_size) {
386 assert(is_young(), "we can only skip BOT updates on young regions");
387 return ContiguousSpace::allocate(word_size);
388 }
390 // If this region is a member of a HeapRegionSeq, the index in that
391 // sequence, otherwise -1.
392 uint hrs_index() const { return _hrs_index; }
394 // The number of bytes marked live in the region in the last marking phase.
395 size_t marked_bytes() { return _prev_marked_bytes; }
396 size_t live_bytes() {
397 return (top() - prev_top_at_mark_start()) * HeapWordSize + marked_bytes();
398 }
400 // The number of bytes counted in the next marking.
401 size_t next_marked_bytes() { return _next_marked_bytes; }
402 // The number of bytes live wrt the next marking.
403 size_t next_live_bytes() {
404 return
405 (top() - next_top_at_mark_start()) * HeapWordSize + next_marked_bytes();
406 }
408 // A lower bound on the amount of garbage bytes in the region.
409 size_t garbage_bytes() {
410 size_t used_at_mark_start_bytes =
411 (prev_top_at_mark_start() - bottom()) * HeapWordSize;
412 assert(used_at_mark_start_bytes >= marked_bytes(),
413 "Can't mark more than we have.");
414 return used_at_mark_start_bytes - marked_bytes();
415 }
417 // Return the amount of bytes we'll reclaim if we collect this
418 // region. This includes not only the known garbage bytes in the
419 // region but also any unallocated space in it, i.e., [top, end),
420 // since it will also be reclaimed if we collect the region.
421 size_t reclaimable_bytes() {
422 size_t known_live_bytes = live_bytes();
423 assert(known_live_bytes <= capacity(), "sanity");
424 return capacity() - known_live_bytes;
425 }
427 // An upper bound on the number of live bytes in the region.
428 size_t max_live_bytes() { return used() - garbage_bytes(); }
430 void add_to_marked_bytes(size_t incr_bytes) {
431 _next_marked_bytes = _next_marked_bytes + incr_bytes;
432 assert(_next_marked_bytes <= used(), "invariant" );
433 }
435 void zero_marked_bytes() {
436 _prev_marked_bytes = _next_marked_bytes = 0;
437 }
439 bool isHumongous() const { return _humongous_type != NotHumongous; }
440 bool startsHumongous() const { return _humongous_type == StartsHumongous; }
441 bool continuesHumongous() const { return _humongous_type == ContinuesHumongous; }
442 // For a humongous region, region in which it starts.
443 HeapRegion* humongous_start_region() const {
444 return _humongous_start_region;
445 }
447 // Return the number of distinct regions that are covered by this region:
448 // 1 if the region is not humongous, >= 1 if the region is humongous.
449 uint region_num() const {
450 if (!isHumongous()) {
451 return 1U;
452 } else {
453 assert(startsHumongous(), "doesn't make sense on HC regions");
454 assert(capacity() % HeapRegion::GrainBytes == 0, "sanity");
455 return (uint) (capacity() >> HeapRegion::LogOfHRGrainBytes);
456 }
457 }
459 // Return the index + 1 of the last HC regions that's associated
460 // with this HS region.
461 uint last_hc_index() const {
462 assert(startsHumongous(), "don't call this otherwise");
463 return hrs_index() + region_num();
464 }
466 // Same as Space::is_in_reserved, but will use the original size of the region.
467 // The original size is different only for start humongous regions. They get
468 // their _end set up to be the end of the last continues region of the
469 // corresponding humongous object.
470 bool is_in_reserved_raw(const void* p) const {
471 return _bottom <= p && p < _orig_end;
472 }
474 // Makes the current region be a "starts humongous" region, i.e.,
475 // the first region in a series of one or more contiguous regions
476 // that will contain a single "humongous" object. The two parameters
477 // are as follows:
478 //
479 // new_top : The new value of the top field of this region which
480 // points to the end of the humongous object that's being
481 // allocated. If there is more than one region in the series, top
482 // will lie beyond this region's original end field and on the last
483 // region in the series.
484 //
485 // new_end : The new value of the end field of this region which
486 // points to the end of the last region in the series. If there is
487 // one region in the series (namely: this one) end will be the same
488 // as the original end of this region.
489 //
490 // Updating top and end as described above makes this region look as
491 // if it spans the entire space taken up by all the regions in the
492 // series and an single allocation moved its top to new_top. This
493 // ensures that the space (capacity / allocated) taken up by all
494 // humongous regions can be calculated by just looking at the
495 // "starts humongous" regions and by ignoring the "continues
496 // humongous" regions.
497 void set_startsHumongous(HeapWord* new_top, HeapWord* new_end);
499 // Makes the current region be a "continues humongous'
500 // region. first_hr is the "start humongous" region of the series
501 // which this region will be part of.
502 void set_continuesHumongous(HeapRegion* first_hr);
504 // Unsets the humongous-related fields on the region.
505 void set_notHumongous();
507 // If the region has a remembered set, return a pointer to it.
508 HeapRegionRemSet* rem_set() const {
509 return _rem_set;
510 }
512 // True iff the region is in current collection_set.
513 bool in_collection_set() const {
514 return _in_collection_set;
515 }
516 void set_in_collection_set(bool b) {
517 _in_collection_set = b;
518 }
519 HeapRegion* next_in_collection_set() {
520 assert(in_collection_set(), "should only invoke on member of CS.");
521 assert(_next_in_special_set == NULL ||
522 _next_in_special_set->in_collection_set(),
523 "Malformed CS.");
524 return _next_in_special_set;
525 }
526 void set_next_in_collection_set(HeapRegion* r) {
527 assert(in_collection_set(), "should only invoke on member of CS.");
528 assert(r == NULL || r->in_collection_set(), "Malformed CS.");
529 _next_in_special_set = r;
530 }
532 // Methods used by the HeapRegionSetBase class and subclasses.
534 // Getter and setter for the next field used to link regions into
535 // linked lists.
536 HeapRegion* next() { return _next; }
538 void set_next(HeapRegion* next) { _next = next; }
540 // Every region added to a set is tagged with a reference to that
541 // set. This is used for doing consistency checking to make sure that
542 // the contents of a set are as they should be and it's only
543 // available in non-product builds.
544 #ifdef ASSERT
545 void set_containing_set(HeapRegionSetBase* containing_set) {
546 assert((containing_set == NULL && _containing_set != NULL) ||
547 (containing_set != NULL && _containing_set == NULL),
548 err_msg("containing_set: "PTR_FORMAT" "
549 "_containing_set: "PTR_FORMAT,
550 containing_set, _containing_set));
552 _containing_set = containing_set;
553 }
555 HeapRegionSetBase* containing_set() { return _containing_set; }
556 #else // ASSERT
557 void set_containing_set(HeapRegionSetBase* containing_set) { }
559 // containing_set() is only used in asserts so there's no reason
560 // to provide a dummy version of it.
561 #endif // ASSERT
563 // If we want to remove regions from a list in bulk we can simply tag
564 // them with the pending_removal tag and call the
565 // remove_all_pending() method on the list.
567 bool pending_removal() { return _pending_removal; }
569 void set_pending_removal(bool pending_removal) {
570 if (pending_removal) {
571 assert(!_pending_removal && containing_set() != NULL,
572 "can only set pending removal to true if it's false and "
573 "the region belongs to a region set");
574 } else {
575 assert( _pending_removal && containing_set() == NULL,
576 "can only set pending removal to false if it's true and "
577 "the region does not belong to a region set");
578 }
580 _pending_removal = pending_removal;
581 }
583 HeapRegion* get_next_young_region() { return _next_young_region; }
584 void set_next_young_region(HeapRegion* hr) {
585 _next_young_region = hr;
586 }
588 HeapRegion* get_next_dirty_cards_region() const { return _next_dirty_cards_region; }
589 HeapRegion** next_dirty_cards_region_addr() { return &_next_dirty_cards_region; }
590 void set_next_dirty_cards_region(HeapRegion* hr) { _next_dirty_cards_region = hr; }
591 bool is_on_dirty_cards_region_list() const { return get_next_dirty_cards_region() != NULL; }
593 HeapWord* orig_end() { return _orig_end; }
595 // Allows logical separation between objects allocated before and after.
596 void save_marks();
598 // Reset HR stuff to default values.
599 void hr_clear(bool par, bool clear_space);
600 void par_clear();
602 // Get the start of the unmarked area in this region.
603 HeapWord* prev_top_at_mark_start() const { return _prev_top_at_mark_start; }
604 HeapWord* next_top_at_mark_start() const { return _next_top_at_mark_start; }
606 // Apply "cl->do_oop" to (the addresses of) all reference fields in objects
607 // allocated in the current region before the last call to "save_mark".
608 void oop_before_save_marks_iterate(ExtendedOopClosure* cl);
610 // Note the start or end of marking. This tells the heap region
611 // that the collector is about to start or has finished (concurrently)
612 // marking the heap.
614 // Notify the region that concurrent marking is starting. Initialize
615 // all fields related to the next marking info.
616 inline void note_start_of_marking();
618 // Notify the region that concurrent marking has finished. Copy the
619 // (now finalized) next marking info fields into the prev marking
620 // info fields.
621 inline void note_end_of_marking();
623 // Notify the region that it will be used as to-space during a GC
624 // and we are about to start copying objects into it.
625 inline void note_start_of_copying(bool during_initial_mark);
627 // Notify the region that it ceases being to-space during a GC and
628 // we will not copy objects into it any more.
629 inline void note_end_of_copying(bool during_initial_mark);
631 // Notify the region that we are about to start processing
632 // self-forwarded objects during evac failure handling.
633 void note_self_forwarding_removal_start(bool during_initial_mark,
634 bool during_conc_mark);
636 // Notify the region that we have finished processing self-forwarded
637 // objects during evac failure handling.
638 void note_self_forwarding_removal_end(bool during_initial_mark,
639 bool during_conc_mark,
640 size_t marked_bytes);
642 // Returns "false" iff no object in the region was allocated when the
643 // last mark phase ended.
644 bool is_marked() { return _prev_top_at_mark_start != bottom(); }
646 void reset_during_compaction() {
647 assert(isHumongous() && startsHumongous(),
648 "should only be called for starts humongous regions");
650 zero_marked_bytes();
651 init_top_at_mark_start();
652 }
654 void calc_gc_efficiency(void);
655 double gc_efficiency() { return _gc_efficiency;}
657 bool is_young() const { return _young_type != NotYoung; }
658 bool is_survivor() const { return _young_type == Survivor; }
660 int young_index_in_cset() const { return _young_index_in_cset; }
661 void set_young_index_in_cset(int index) {
662 assert( (index == -1) || is_young(), "pre-condition" );
663 _young_index_in_cset = index;
664 }
666 int age_in_surv_rate_group() {
667 assert( _surv_rate_group != NULL, "pre-condition" );
668 assert( _age_index > -1, "pre-condition" );
669 return _surv_rate_group->age_in_group(_age_index);
670 }
672 void record_surv_words_in_group(size_t words_survived) {
673 assert( _surv_rate_group != NULL, "pre-condition" );
674 assert( _age_index > -1, "pre-condition" );
675 int age_in_group = age_in_surv_rate_group();
676 _surv_rate_group->record_surviving_words(age_in_group, words_survived);
677 }
679 int age_in_surv_rate_group_cond() {
680 if (_surv_rate_group != NULL)
681 return age_in_surv_rate_group();
682 else
683 return -1;
684 }
686 SurvRateGroup* surv_rate_group() {
687 return _surv_rate_group;
688 }
690 void install_surv_rate_group(SurvRateGroup* surv_rate_group) {
691 assert( surv_rate_group != NULL, "pre-condition" );
692 assert( _surv_rate_group == NULL, "pre-condition" );
693 assert( is_young(), "pre-condition" );
695 _surv_rate_group = surv_rate_group;
696 _age_index = surv_rate_group->next_age_index();
697 }
699 void uninstall_surv_rate_group() {
700 if (_surv_rate_group != NULL) {
701 assert( _age_index > -1, "pre-condition" );
702 assert( is_young(), "pre-condition" );
704 _surv_rate_group = NULL;
705 _age_index = -1;
706 } else {
707 assert( _age_index == -1, "pre-condition" );
708 }
709 }
711 void set_young() { set_young_type(Young); }
713 void set_survivor() { set_young_type(Survivor); }
715 void set_not_young() { set_young_type(NotYoung); }
717 // Determine if an object has been allocated since the last
718 // mark performed by the collector. This returns true iff the object
719 // is within the unmarked area of the region.
720 bool obj_allocated_since_prev_marking(oop obj) const {
721 return (HeapWord *) obj >= prev_top_at_mark_start();
722 }
723 bool obj_allocated_since_next_marking(oop obj) const {
724 return (HeapWord *) obj >= next_top_at_mark_start();
725 }
727 // For parallel heapRegion traversal.
728 bool claimHeapRegion(int claimValue);
729 jint claim_value() { return _claimed; }
730 // Use this carefully: only when you're sure no one is claiming...
731 void set_claim_value(int claimValue) { _claimed = claimValue; }
733 // Returns the "evacuation_failed" property of the region.
734 bool evacuation_failed() { return _evacuation_failed; }
736 // Sets the "evacuation_failed" property of the region.
737 void set_evacuation_failed(bool b) {
738 _evacuation_failed = b;
740 if (b) {
741 _next_marked_bytes = 0;
742 }
743 }
745 // Requires that "mr" be entirely within the region.
746 // Apply "cl->do_object" to all objects that intersect with "mr".
747 // If the iteration encounters an unparseable portion of the region,
748 // or if "cl->abort()" is true after a closure application,
749 // terminate the iteration and return the address of the start of the
750 // subregion that isn't done. (The two can be distinguished by querying
751 // "cl->abort()".) Return of "NULL" indicates that the iteration
752 // completed.
753 HeapWord*
754 object_iterate_mem_careful(MemRegion mr, ObjectClosure* cl);
756 // filter_young: if true and the region is a young region then we
757 // skip the iteration.
758 // card_ptr: if not NULL, and we decide that the card is not young
759 // and we iterate over it, we'll clean the card before we start the
760 // iteration.
761 HeapWord*
762 oops_on_card_seq_iterate_careful(MemRegion mr,
763 FilterOutOfRegionClosure* cl,
764 bool filter_young,
765 jbyte* card_ptr);
767 // A version of block start that is guaranteed to find *some* block
768 // boundary at or before "p", but does not object iteration, and may
769 // therefore be used safely when the heap is unparseable.
770 HeapWord* block_start_careful(const void* p) const {
771 return _offsets.block_start_careful(p);
772 }
774 // Requires that "addr" is within the region. Returns the start of the
775 // first ("careful") block that starts at or after "addr", or else the
776 // "end" of the region if there is no such block.
777 HeapWord* next_block_start_careful(HeapWord* addr);
779 size_t recorded_rs_length() const { return _recorded_rs_length; }
780 double predicted_elapsed_time_ms() const { return _predicted_elapsed_time_ms; }
781 size_t predicted_bytes_to_copy() const { return _predicted_bytes_to_copy; }
783 void set_recorded_rs_length(size_t rs_length) {
784 _recorded_rs_length = rs_length;
785 }
787 void set_predicted_elapsed_time_ms(double ms) {
788 _predicted_elapsed_time_ms = ms;
789 }
791 void set_predicted_bytes_to_copy(size_t bytes) {
792 _predicted_bytes_to_copy = bytes;
793 }
795 #define HeapRegion_OOP_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \
796 virtual void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl);
797 SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(HeapRegion_OOP_SINCE_SAVE_MARKS_DECL)
799 virtual CompactibleSpace* next_compaction_space() const;
801 virtual void reset_after_compaction();
803 // Routines for managing a list of code roots (attached to the
804 // this region's RSet) that point into this heap region.
805 void add_strong_code_root(nmethod* nm);
806 void remove_strong_code_root(nmethod* nm);
808 // During a collection, migrate the successfully evacuated
809 // strong code roots that referenced into this region to the
810 // new regions that they now point into. Unsuccessfully
811 // evacuated code roots are not migrated.
812 void migrate_strong_code_roots();
814 // Applies blk->do_code_blob() to each of the entries in
815 // the strong code roots list for this region
816 void strong_code_roots_do(CodeBlobClosure* blk) const;
818 // Verify that the entries on the strong code root list for this
819 // region are live and include at least one pointer into this region.
820 void verify_strong_code_roots(VerifyOption vo, bool* failures) const;
822 void print() const;
823 void print_on(outputStream* st) const;
825 // vo == UsePrevMarking -> use "prev" marking information,
826 // vo == UseNextMarking -> use "next" marking information
827 // vo == UseMarkWord -> use the mark word in the object header
828 //
829 // NOTE: Only the "prev" marking information is guaranteed to be
830 // consistent most of the time, so most calls to this should use
831 // vo == UsePrevMarking.
832 // Currently, there is only one case where this is called with
833 // vo == UseNextMarking, which is to verify the "next" marking
834 // information at the end of remark.
835 // Currently there is only one place where this is called with
836 // vo == UseMarkWord, which is to verify the marking during a
837 // full GC.
838 void verify(VerifyOption vo, bool *failures) const;
840 // Override; it uses the "prev" marking information
841 virtual void verify() const;
842 };
844 // HeapRegionClosure is used for iterating over regions.
845 // Terminates the iteration when the "doHeapRegion" method returns "true".
846 class HeapRegionClosure : public StackObj {
847 friend class HeapRegionSeq;
848 friend class G1CollectedHeap;
850 bool _complete;
851 void incomplete() { _complete = false; }
853 public:
854 HeapRegionClosure(): _complete(true) {}
856 // Typically called on each region until it returns true.
857 virtual bool doHeapRegion(HeapRegion* r) = 0;
859 // True after iteration if the closure was applied to all heap regions
860 // and returned "false" in all cases.
861 bool complete() { return _complete; }
862 };
864 #endif // INCLUDE_ALL_GCS
866 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_HPP