Fri, 10 Oct 2014 15:51:58 +0200
8059758: Footprint regressions with JDK-8038423
Summary: Changes in JDK-8038423 always initialize (zero out) virtual memory used for auxiliary data structures. This causes a footprint regression for G1 in startup benchmarks. This is because they do not touch that memory at all, so the operating system does not actually commit these pages. The fix is to, if the initialization value of the data structures matches the default value of just committed memory (=0), do not do anything.
Reviewed-by: jwilhelm, brutisso
1 /*
2 * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_HPP
26 #define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_HPP
28 #include "gc_implementation/g1/g1AllocationContext.hpp"
29 #include "gc_implementation/g1/g1BlockOffsetTable.hpp"
30 #include "gc_implementation/g1/g1_specialized_oop_closures.hpp"
31 #include "gc_implementation/g1/heapRegionType.hpp"
32 #include "gc_implementation/g1/survRateGroup.hpp"
33 #include "gc_implementation/shared/ageTable.hpp"
34 #include "gc_implementation/shared/spaceDecorator.hpp"
35 #include "memory/space.inline.hpp"
36 #include "memory/watermark.hpp"
37 #include "utilities/macros.hpp"
39 // A HeapRegion is the smallest piece of a G1CollectedHeap that
40 // can be collected independently.
42 // NOTE: Although a HeapRegion is a Space, its
43 // Space::initDirtyCardClosure method must not be called.
44 // The problem is that the existence of this method breaks
45 // the independence of barrier sets from remembered sets.
46 // The solution is to remove this method from the definition
47 // of a Space.
49 class HeapRegionRemSet;
50 class HeapRegionRemSetIterator;
51 class HeapRegion;
52 class HeapRegionSetBase;
53 class nmethod;
55 #define HR_FORMAT "%u:(%s)["PTR_FORMAT","PTR_FORMAT","PTR_FORMAT"]"
56 #define HR_FORMAT_PARAMS(_hr_) \
57 (_hr_)->hrm_index(), \
58 (_hr_)->get_short_type_str(), \
59 p2i((_hr_)->bottom()), p2i((_hr_)->top()), p2i((_hr_)->end())
61 // sentinel value for hrm_index
62 #define G1_NO_HRM_INDEX ((uint) -1)
64 // A dirty card to oop closure for heap regions. It
65 // knows how to get the G1 heap and how to use the bitmap
66 // in the concurrent marker used by G1 to filter remembered
67 // sets.
69 class HeapRegionDCTOC : public DirtyCardToOopClosure {
70 public:
71 // Specification of possible DirtyCardToOopClosure filtering.
72 enum FilterKind {
73 NoFilterKind,
74 IntoCSFilterKind,
75 OutOfRegionFilterKind
76 };
78 protected:
79 HeapRegion* _hr;
80 FilterKind _fk;
81 G1CollectedHeap* _g1;
83 // Walk the given memory region from bottom to (actual) top
84 // looking for objects and applying the oop closure (_cl) to
85 // them. The base implementation of this treats the area as
86 // blocks, where a block may or may not be an object. Sub-
87 // classes should override this to provide more accurate
88 // or possibly more efficient walking.
89 void walk_mem_region(MemRegion mr, HeapWord* bottom, HeapWord* top);
91 public:
92 HeapRegionDCTOC(G1CollectedHeap* g1,
93 HeapRegion* hr, ExtendedOopClosure* cl,
94 CardTableModRefBS::PrecisionStyle precision,
95 FilterKind fk);
96 };
98 // The complicating factor is that BlockOffsetTable diverged
99 // significantly, and we need functionality that is only in the G1 version.
100 // So I copied that code, which led to an alternate G1 version of
101 // OffsetTableContigSpace. If the two versions of BlockOffsetTable could
102 // be reconciled, then G1OffsetTableContigSpace could go away.
104 // The idea behind time stamps is the following. Doing a save_marks on
105 // all regions at every GC pause is time consuming (if I remember
106 // well, 10ms or so). So, we would like to do that only for regions
107 // that are GC alloc regions. To achieve this, we use time
108 // stamps. For every evacuation pause, G1CollectedHeap generates a
109 // unique time stamp (essentially a counter that gets
110 // incremented). Every time we want to call save_marks on a region,
111 // we set the saved_mark_word to top and also copy the current GC
112 // time stamp to the time stamp field of the space. Reading the
113 // saved_mark_word involves checking the time stamp of the
114 // region. If it is the same as the current GC time stamp, then we
115 // can safely read the saved_mark_word field, as it is valid. If the
116 // time stamp of the region is not the same as the current GC time
117 // stamp, then we instead read top, as the saved_mark_word field is
118 // invalid. Time stamps (on the regions and also on the
119 // G1CollectedHeap) are reset at every cleanup (we iterate over
120 // the regions anyway) and at the end of a Full GC. The current scheme
121 // that uses sequential unsigned ints will fail only if we have 4b
122 // evacuation pauses between two cleanups, which is _highly_ unlikely.
123 class G1OffsetTableContigSpace: public CompactibleSpace {
124 friend class VMStructs;
125 HeapWord* _top;
126 protected:
127 G1BlockOffsetArrayContigSpace _offsets;
128 Mutex _par_alloc_lock;
129 volatile unsigned _gc_time_stamp;
130 // When we need to retire an allocation region, while other threads
131 // are also concurrently trying to allocate into it, we typically
132 // allocate a dummy object at the end of the region to ensure that
133 // no more allocations can take place in it. However, sometimes we
134 // want to know where the end of the last "real" object we allocated
135 // into the region was and this is what this keeps track.
136 HeapWord* _pre_dummy_top;
138 public:
139 G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray,
140 MemRegion mr);
142 void set_top(HeapWord* value) { _top = value; }
143 HeapWord* top() const { return _top; }
145 protected:
146 // Reset the G1OffsetTableContigSpace.
147 virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space);
149 HeapWord** top_addr() { return &_top; }
150 // Allocation helpers (return NULL if full).
151 inline HeapWord* allocate_impl(size_t word_size, HeapWord* end_value);
152 inline HeapWord* par_allocate_impl(size_t word_size, HeapWord* end_value);
154 public:
155 void reset_after_compaction() { set_top(compaction_top()); }
157 size_t used() const { return byte_size(bottom(), top()); }
158 size_t free() const { return byte_size(top(), end()); }
159 bool is_free_block(const HeapWord* p) const { return p >= top(); }
161 MemRegion used_region() const { return MemRegion(bottom(), top()); }
163 void object_iterate(ObjectClosure* blk);
164 void safe_object_iterate(ObjectClosure* blk);
166 void set_bottom(HeapWord* value);
167 void set_end(HeapWord* value);
169 virtual HeapWord* saved_mark_word() const;
170 void record_top_and_timestamp();
171 void reset_gc_time_stamp() { _gc_time_stamp = 0; }
172 unsigned get_gc_time_stamp() { return _gc_time_stamp; }
174 // See the comment above in the declaration of _pre_dummy_top for an
175 // explanation of what it is.
176 void set_pre_dummy_top(HeapWord* pre_dummy_top) {
177 assert(is_in(pre_dummy_top) && pre_dummy_top <= top(), "pre-condition");
178 _pre_dummy_top = pre_dummy_top;
179 }
180 HeapWord* pre_dummy_top() {
181 return (_pre_dummy_top == NULL) ? top() : _pre_dummy_top;
182 }
183 void reset_pre_dummy_top() { _pre_dummy_top = NULL; }
185 virtual void clear(bool mangle_space);
187 HeapWord* block_start(const void* p);
188 HeapWord* block_start_const(const void* p) const;
190 void prepare_for_compaction(CompactPoint* cp);
192 // Add offset table update.
193 virtual HeapWord* allocate(size_t word_size);
194 HeapWord* par_allocate(size_t word_size);
196 // MarkSweep support phase3
197 virtual HeapWord* initialize_threshold();
198 virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* end);
200 virtual void print() const;
202 void reset_bot() {
203 _offsets.reset_bot();
204 }
206 void print_bot_on(outputStream* out) {
207 _offsets.print_on(out);
208 }
209 };
211 class HeapRegion: public G1OffsetTableContigSpace {
212 friend class VMStructs;
213 private:
215 // The remembered set for this region.
216 // (Might want to make this "inline" later, to avoid some alloc failure
217 // issues.)
218 HeapRegionRemSet* _rem_set;
220 G1BlockOffsetArrayContigSpace* offsets() { return &_offsets; }
222 protected:
223 // The index of this region in the heap region sequence.
224 uint _hrm_index;
226 AllocationContext_t _allocation_context;
228 HeapRegionType _type;
230 // For a humongous region, region in which it starts.
231 HeapRegion* _humongous_start_region;
232 // For the start region of a humongous sequence, it's original end().
233 HeapWord* _orig_end;
235 // True iff the region is in current collection_set.
236 bool _in_collection_set;
238 // True iff an attempt to evacuate an object in the region failed.
239 bool _evacuation_failed;
241 // A heap region may be a member one of a number of special subsets, each
242 // represented as linked lists through the field below. Currently, there
243 // is only one set:
244 // The collection set.
245 HeapRegion* _next_in_special_set;
247 // next region in the young "generation" region set
248 HeapRegion* _next_young_region;
250 // Next region whose cards need cleaning
251 HeapRegion* _next_dirty_cards_region;
253 // Fields used by the HeapRegionSetBase class and subclasses.
254 HeapRegion* _next;
255 HeapRegion* _prev;
256 #ifdef ASSERT
257 HeapRegionSetBase* _containing_set;
258 #endif // ASSERT
260 // For parallel heapRegion traversal.
261 jint _claimed;
263 // We use concurrent marking to determine the amount of live data
264 // in each heap region.
265 size_t _prev_marked_bytes; // Bytes known to be live via last completed marking.
266 size_t _next_marked_bytes; // Bytes known to be live via in-progress marking.
268 // The calculated GC efficiency of the region.
269 double _gc_efficiency;
271 int _young_index_in_cset;
272 SurvRateGroup* _surv_rate_group;
273 int _age_index;
275 // The start of the unmarked area. The unmarked area extends from this
276 // word until the top and/or end of the region, and is the part
277 // of the region for which no marking was done, i.e. objects may
278 // have been allocated in this part since the last mark phase.
279 // "prev" is the top at the start of the last completed marking.
280 // "next" is the top at the start of the in-progress marking (if any.)
281 HeapWord* _prev_top_at_mark_start;
282 HeapWord* _next_top_at_mark_start;
283 // If a collection pause is in progress, this is the top at the start
284 // of that pause.
286 void init_top_at_mark_start() {
287 assert(_prev_marked_bytes == 0 &&
288 _next_marked_bytes == 0,
289 "Must be called after zero_marked_bytes.");
290 HeapWord* bot = bottom();
291 _prev_top_at_mark_start = bot;
292 _next_top_at_mark_start = bot;
293 }
295 // Cached attributes used in the collection set policy information
297 // The RSet length that was added to the total value
298 // for the collection set.
299 size_t _recorded_rs_length;
301 // The predicted elapsed time that was added to total value
302 // for the collection set.
303 double _predicted_elapsed_time_ms;
305 // The predicted number of bytes to copy that was added to
306 // the total value for the collection set.
307 size_t _predicted_bytes_to_copy;
309 public:
310 HeapRegion(uint hrm_index,
311 G1BlockOffsetSharedArray* sharedOffsetArray,
312 MemRegion mr);
314 // Initializing the HeapRegion not only resets the data structure, but also
315 // resets the BOT for that heap region.
316 // The default values for clear_space means that we will do the clearing if
317 // there's clearing to be done ourselves. We also always mangle the space.
318 virtual void initialize(MemRegion mr, bool clear_space = false, bool mangle_space = SpaceDecorator::Mangle);
320 static int LogOfHRGrainBytes;
321 static int LogOfHRGrainWords;
323 static size_t GrainBytes;
324 static size_t GrainWords;
325 static size_t CardsPerRegion;
327 static size_t align_up_to_region_byte_size(size_t sz) {
328 return (sz + (size_t) GrainBytes - 1) &
329 ~((1 << (size_t) LogOfHRGrainBytes) - 1);
330 }
332 static size_t max_region_size();
334 // It sets up the heap region size (GrainBytes / GrainWords), as
335 // well as other related fields that are based on the heap region
336 // size (LogOfHRGrainBytes / LogOfHRGrainWords /
337 // CardsPerRegion). All those fields are considered constant
338 // throughout the JVM's execution, therefore they should only be set
339 // up once during initialization time.
340 static void setup_heap_region_size(size_t initial_heap_size, size_t max_heap_size);
342 enum ClaimValues {
343 InitialClaimValue = 0,
344 FinalCountClaimValue = 1,
345 NoteEndClaimValue = 2,
346 ScrubRemSetClaimValue = 3,
347 ParVerifyClaimValue = 4,
348 RebuildRSClaimValue = 5,
349 ParEvacFailureClaimValue = 6,
350 AggregateCountClaimValue = 7,
351 VerifyCountClaimValue = 8,
352 ParMarkRootClaimValue = 9
353 };
355 // All allocated blocks are occupied by objects in a HeapRegion
356 bool block_is_obj(const HeapWord* p) const;
358 // Returns the object size for all valid block starts
359 // and the amount of unallocated words if called on top()
360 size_t block_size(const HeapWord* p) const;
362 inline HeapWord* par_allocate_no_bot_updates(size_t word_size);
363 inline HeapWord* allocate_no_bot_updates(size_t word_size);
365 // If this region is a member of a HeapRegionManager, the index in that
366 // sequence, otherwise -1.
367 uint hrm_index() const { return _hrm_index; }
369 // The number of bytes marked live in the region in the last marking phase.
370 size_t marked_bytes() { return _prev_marked_bytes; }
371 size_t live_bytes() {
372 return (top() - prev_top_at_mark_start()) * HeapWordSize + marked_bytes();
373 }
375 // The number of bytes counted in the next marking.
376 size_t next_marked_bytes() { return _next_marked_bytes; }
377 // The number of bytes live wrt the next marking.
378 size_t next_live_bytes() {
379 return
380 (top() - next_top_at_mark_start()) * HeapWordSize + next_marked_bytes();
381 }
383 // A lower bound on the amount of garbage bytes in the region.
384 size_t garbage_bytes() {
385 size_t used_at_mark_start_bytes =
386 (prev_top_at_mark_start() - bottom()) * HeapWordSize;
387 assert(used_at_mark_start_bytes >= marked_bytes(),
388 "Can't mark more than we have.");
389 return used_at_mark_start_bytes - marked_bytes();
390 }
392 // Return the amount of bytes we'll reclaim if we collect this
393 // region. This includes not only the known garbage bytes in the
394 // region but also any unallocated space in it, i.e., [top, end),
395 // since it will also be reclaimed if we collect the region.
396 size_t reclaimable_bytes() {
397 size_t known_live_bytes = live_bytes();
398 assert(known_live_bytes <= capacity(), "sanity");
399 return capacity() - known_live_bytes;
400 }
402 // An upper bound on the number of live bytes in the region.
403 size_t max_live_bytes() { return used() - garbage_bytes(); }
405 void add_to_marked_bytes(size_t incr_bytes) {
406 _next_marked_bytes = _next_marked_bytes + incr_bytes;
407 assert(_next_marked_bytes <= used(), "invariant" );
408 }
410 void zero_marked_bytes() {
411 _prev_marked_bytes = _next_marked_bytes = 0;
412 }
414 const char* get_type_str() const { return _type.get_str(); }
415 const char* get_short_type_str() const { return _type.get_short_str(); }
417 bool is_free() const { return _type.is_free(); }
419 bool is_young() const { return _type.is_young(); }
420 bool is_eden() const { return _type.is_eden(); }
421 bool is_survivor() const { return _type.is_survivor(); }
423 bool isHumongous() const { return _type.is_humongous(); }
424 bool startsHumongous() const { return _type.is_starts_humongous(); }
425 bool continuesHumongous() const { return _type.is_continues_humongous(); }
427 bool is_old() const { return _type.is_old(); }
429 // For a humongous region, region in which it starts.
430 HeapRegion* humongous_start_region() const {
431 return _humongous_start_region;
432 }
434 // Return the number of distinct regions that are covered by this region:
435 // 1 if the region is not humongous, >= 1 if the region is humongous.
436 uint region_num() const {
437 if (!isHumongous()) {
438 return 1U;
439 } else {
440 assert(startsHumongous(), "doesn't make sense on HC regions");
441 assert(capacity() % HeapRegion::GrainBytes == 0, "sanity");
442 return (uint) (capacity() >> HeapRegion::LogOfHRGrainBytes);
443 }
444 }
446 // Return the index + 1 of the last HC regions that's associated
447 // with this HS region.
448 uint last_hc_index() const {
449 assert(startsHumongous(), "don't call this otherwise");
450 return hrm_index() + region_num();
451 }
453 // Same as Space::is_in_reserved, but will use the original size of the region.
454 // The original size is different only for start humongous regions. They get
455 // their _end set up to be the end of the last continues region of the
456 // corresponding humongous object.
457 bool is_in_reserved_raw(const void* p) const {
458 return _bottom <= p && p < _orig_end;
459 }
461 // Makes the current region be a "starts humongous" region, i.e.,
462 // the first region in a series of one or more contiguous regions
463 // that will contain a single "humongous" object. The two parameters
464 // are as follows:
465 //
466 // new_top : The new value of the top field of this region which
467 // points to the end of the humongous object that's being
468 // allocated. If there is more than one region in the series, top
469 // will lie beyond this region's original end field and on the last
470 // region in the series.
471 //
472 // new_end : The new value of the end field of this region which
473 // points to the end of the last region in the series. If there is
474 // one region in the series (namely: this one) end will be the same
475 // as the original end of this region.
476 //
477 // Updating top and end as described above makes this region look as
478 // if it spans the entire space taken up by all the regions in the
479 // series and an single allocation moved its top to new_top. This
480 // ensures that the space (capacity / allocated) taken up by all
481 // humongous regions can be calculated by just looking at the
482 // "starts humongous" regions and by ignoring the "continues
483 // humongous" regions.
484 void set_startsHumongous(HeapWord* new_top, HeapWord* new_end);
486 // Makes the current region be a "continues humongous'
487 // region. first_hr is the "start humongous" region of the series
488 // which this region will be part of.
489 void set_continuesHumongous(HeapRegion* first_hr);
491 // Unsets the humongous-related fields on the region.
492 void clear_humongous();
494 // If the region has a remembered set, return a pointer to it.
495 HeapRegionRemSet* rem_set() const {
496 return _rem_set;
497 }
499 // True iff the region is in current collection_set.
500 bool in_collection_set() const {
501 return _in_collection_set;
502 }
503 void set_in_collection_set(bool b) {
504 _in_collection_set = b;
505 }
506 HeapRegion* next_in_collection_set() {
507 assert(in_collection_set(), "should only invoke on member of CS.");
508 assert(_next_in_special_set == NULL ||
509 _next_in_special_set->in_collection_set(),
510 "Malformed CS.");
511 return _next_in_special_set;
512 }
513 void set_next_in_collection_set(HeapRegion* r) {
514 assert(in_collection_set(), "should only invoke on member of CS.");
515 assert(r == NULL || r->in_collection_set(), "Malformed CS.");
516 _next_in_special_set = r;
517 }
519 void set_allocation_context(AllocationContext_t context) {
520 _allocation_context = context;
521 }
523 AllocationContext_t allocation_context() const {
524 return _allocation_context;
525 }
527 // Methods used by the HeapRegionSetBase class and subclasses.
529 // Getter and setter for the next and prev fields used to link regions into
530 // linked lists.
531 HeapRegion* next() { return _next; }
532 HeapRegion* prev() { return _prev; }
534 void set_next(HeapRegion* next) { _next = next; }
535 void set_prev(HeapRegion* prev) { _prev = prev; }
537 // Every region added to a set is tagged with a reference to that
538 // set. This is used for doing consistency checking to make sure that
539 // the contents of a set are as they should be and it's only
540 // available in non-product builds.
541 #ifdef ASSERT
542 void set_containing_set(HeapRegionSetBase* containing_set) {
543 assert((containing_set == NULL && _containing_set != NULL) ||
544 (containing_set != NULL && _containing_set == NULL),
545 err_msg("containing_set: "PTR_FORMAT" "
546 "_containing_set: "PTR_FORMAT,
547 p2i(containing_set), p2i(_containing_set)));
549 _containing_set = containing_set;
550 }
552 HeapRegionSetBase* containing_set() { return _containing_set; }
553 #else // ASSERT
554 void set_containing_set(HeapRegionSetBase* containing_set) { }
556 // containing_set() is only used in asserts so there's no reason
557 // to provide a dummy version of it.
558 #endif // ASSERT
560 HeapRegion* get_next_young_region() { return _next_young_region; }
561 void set_next_young_region(HeapRegion* hr) {
562 _next_young_region = hr;
563 }
565 HeapRegion* get_next_dirty_cards_region() const { return _next_dirty_cards_region; }
566 HeapRegion** next_dirty_cards_region_addr() { return &_next_dirty_cards_region; }
567 void set_next_dirty_cards_region(HeapRegion* hr) { _next_dirty_cards_region = hr; }
568 bool is_on_dirty_cards_region_list() const { return get_next_dirty_cards_region() != NULL; }
570 HeapWord* orig_end() const { return _orig_end; }
572 // Reset HR stuff to default values.
573 void hr_clear(bool par, bool clear_space, bool locked = false);
574 void par_clear();
576 // Get the start of the unmarked area in this region.
577 HeapWord* prev_top_at_mark_start() const { return _prev_top_at_mark_start; }
578 HeapWord* next_top_at_mark_start() const { return _next_top_at_mark_start; }
580 // Note the start or end of marking. This tells the heap region
581 // that the collector is about to start or has finished (concurrently)
582 // marking the heap.
584 // Notify the region that concurrent marking is starting. Initialize
585 // all fields related to the next marking info.
586 inline void note_start_of_marking();
588 // Notify the region that concurrent marking has finished. Copy the
589 // (now finalized) next marking info fields into the prev marking
590 // info fields.
591 inline void note_end_of_marking();
593 // Notify the region that it will be used as to-space during a GC
594 // and we are about to start copying objects into it.
595 inline void note_start_of_copying(bool during_initial_mark);
597 // Notify the region that it ceases being to-space during a GC and
598 // we will not copy objects into it any more.
599 inline void note_end_of_copying(bool during_initial_mark);
601 // Notify the region that we are about to start processing
602 // self-forwarded objects during evac failure handling.
603 void note_self_forwarding_removal_start(bool during_initial_mark,
604 bool during_conc_mark);
606 // Notify the region that we have finished processing self-forwarded
607 // objects during evac failure handling.
608 void note_self_forwarding_removal_end(bool during_initial_mark,
609 bool during_conc_mark,
610 size_t marked_bytes);
612 // Returns "false" iff no object in the region was allocated when the
613 // last mark phase ended.
614 bool is_marked() { return _prev_top_at_mark_start != bottom(); }
616 void reset_during_compaction() {
617 assert(isHumongous() && startsHumongous(),
618 "should only be called for starts humongous regions");
620 zero_marked_bytes();
621 init_top_at_mark_start();
622 }
624 void calc_gc_efficiency(void);
625 double gc_efficiency() { return _gc_efficiency;}
627 int young_index_in_cset() const { return _young_index_in_cset; }
628 void set_young_index_in_cset(int index) {
629 assert( (index == -1) || is_young(), "pre-condition" );
630 _young_index_in_cset = index;
631 }
633 int age_in_surv_rate_group() {
634 assert( _surv_rate_group != NULL, "pre-condition" );
635 assert( _age_index > -1, "pre-condition" );
636 return _surv_rate_group->age_in_group(_age_index);
637 }
639 void record_surv_words_in_group(size_t words_survived) {
640 assert( _surv_rate_group != NULL, "pre-condition" );
641 assert( _age_index > -1, "pre-condition" );
642 int age_in_group = age_in_surv_rate_group();
643 _surv_rate_group->record_surviving_words(age_in_group, words_survived);
644 }
646 int age_in_surv_rate_group_cond() {
647 if (_surv_rate_group != NULL)
648 return age_in_surv_rate_group();
649 else
650 return -1;
651 }
653 SurvRateGroup* surv_rate_group() {
654 return _surv_rate_group;
655 }
657 void install_surv_rate_group(SurvRateGroup* surv_rate_group) {
658 assert( surv_rate_group != NULL, "pre-condition" );
659 assert( _surv_rate_group == NULL, "pre-condition" );
660 assert( is_young(), "pre-condition" );
662 _surv_rate_group = surv_rate_group;
663 _age_index = surv_rate_group->next_age_index();
664 }
666 void uninstall_surv_rate_group() {
667 if (_surv_rate_group != NULL) {
668 assert( _age_index > -1, "pre-condition" );
669 assert( is_young(), "pre-condition" );
671 _surv_rate_group = NULL;
672 _age_index = -1;
673 } else {
674 assert( _age_index == -1, "pre-condition" );
675 }
676 }
678 void set_free() { _type.set_free(); }
680 void set_eden() { _type.set_eden(); }
681 void set_eden_pre_gc() { _type.set_eden_pre_gc(); }
682 void set_survivor() { _type.set_survivor(); }
684 void set_old() { _type.set_old(); }
686 // Determine if an object has been allocated since the last
687 // mark performed by the collector. This returns true iff the object
688 // is within the unmarked area of the region.
689 bool obj_allocated_since_prev_marking(oop obj) const {
690 return (HeapWord *) obj >= prev_top_at_mark_start();
691 }
692 bool obj_allocated_since_next_marking(oop obj) const {
693 return (HeapWord *) obj >= next_top_at_mark_start();
694 }
696 // For parallel heapRegion traversal.
697 bool claimHeapRegion(int claimValue);
698 jint claim_value() { return _claimed; }
699 // Use this carefully: only when you're sure no one is claiming...
700 void set_claim_value(int claimValue) { _claimed = claimValue; }
702 // Returns the "evacuation_failed" property of the region.
703 bool evacuation_failed() { return _evacuation_failed; }
705 // Sets the "evacuation_failed" property of the region.
706 void set_evacuation_failed(bool b) {
707 _evacuation_failed = b;
709 if (b) {
710 _next_marked_bytes = 0;
711 }
712 }
714 // Requires that "mr" be entirely within the region.
715 // Apply "cl->do_object" to all objects that intersect with "mr".
716 // If the iteration encounters an unparseable portion of the region,
717 // or if "cl->abort()" is true after a closure application,
718 // terminate the iteration and return the address of the start of the
719 // subregion that isn't done. (The two can be distinguished by querying
720 // "cl->abort()".) Return of "NULL" indicates that the iteration
721 // completed.
722 HeapWord*
723 object_iterate_mem_careful(MemRegion mr, ObjectClosure* cl);
725 // filter_young: if true and the region is a young region then we
726 // skip the iteration.
727 // card_ptr: if not NULL, and we decide that the card is not young
728 // and we iterate over it, we'll clean the card before we start the
729 // iteration.
730 HeapWord*
731 oops_on_card_seq_iterate_careful(MemRegion mr,
732 FilterOutOfRegionClosure* cl,
733 bool filter_young,
734 jbyte* card_ptr);
736 size_t recorded_rs_length() const { return _recorded_rs_length; }
737 double predicted_elapsed_time_ms() const { return _predicted_elapsed_time_ms; }
738 size_t predicted_bytes_to_copy() const { return _predicted_bytes_to_copy; }
740 void set_recorded_rs_length(size_t rs_length) {
741 _recorded_rs_length = rs_length;
742 }
744 void set_predicted_elapsed_time_ms(double ms) {
745 _predicted_elapsed_time_ms = ms;
746 }
748 void set_predicted_bytes_to_copy(size_t bytes) {
749 _predicted_bytes_to_copy = bytes;
750 }
752 virtual CompactibleSpace* next_compaction_space() const;
754 virtual void reset_after_compaction();
756 // Routines for managing a list of code roots (attached to the
757 // this region's RSet) that point into this heap region.
758 void add_strong_code_root(nmethod* nm);
759 void add_strong_code_root_locked(nmethod* nm);
760 void remove_strong_code_root(nmethod* nm);
762 // Applies blk->do_code_blob() to each of the entries in
763 // the strong code roots list for this region
764 void strong_code_roots_do(CodeBlobClosure* blk) const;
766 // Verify that the entries on the strong code root list for this
767 // region are live and include at least one pointer into this region.
768 void verify_strong_code_roots(VerifyOption vo, bool* failures) const;
770 void print() const;
771 void print_on(outputStream* st) const;
773 // vo == UsePrevMarking -> use "prev" marking information,
774 // vo == UseNextMarking -> use "next" marking information
775 // vo == UseMarkWord -> use the mark word in the object header
776 //
777 // NOTE: Only the "prev" marking information is guaranteed to be
778 // consistent most of the time, so most calls to this should use
779 // vo == UsePrevMarking.
780 // Currently, there is only one case where this is called with
781 // vo == UseNextMarking, which is to verify the "next" marking
782 // information at the end of remark.
783 // Currently there is only one place where this is called with
784 // vo == UseMarkWord, which is to verify the marking during a
785 // full GC.
786 void verify(VerifyOption vo, bool *failures) const;
788 // Override; it uses the "prev" marking information
789 virtual void verify() const;
790 };
792 // HeapRegionClosure is used for iterating over regions.
793 // Terminates the iteration when the "doHeapRegion" method returns "true".
794 class HeapRegionClosure : public StackObj {
795 friend class HeapRegionManager;
796 friend class G1CollectedHeap;
798 bool _complete;
799 void incomplete() { _complete = false; }
801 public:
802 HeapRegionClosure(): _complete(true) {}
804 // Typically called on each region until it returns true.
805 virtual bool doHeapRegion(HeapRegion* r) = 0;
807 // True after iteration if the closure was applied to all heap regions
808 // and returned "false" in all cases.
809 bool complete() { return _complete; }
810 };
812 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_HPP