Fri, 12 Aug 2011 11:31:06 -0400
7039627: G1: avoid BOT updates for survivor allocations and dirty survivor regions incrementally
Summary: Refactor the allocation code during GC to use the G1AllocRegion abstraction. Use separate subclasses of G1AllocRegion for survivor and old regions. Avoid BOT updates and dirty survivor cards incrementally for the former.
Reviewed-by: brutisso, johnc, ysr
1 /*
2 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_HPP
26 #define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_HPP
28 #include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp"
29 #include "gc_implementation/g1/g1_specialized_oop_closures.hpp"
30 #include "gc_implementation/g1/survRateGroup.hpp"
31 #include "gc_implementation/shared/ageTable.hpp"
32 #include "gc_implementation/shared/spaceDecorator.hpp"
33 #include "memory/space.inline.hpp"
34 #include "memory/watermark.hpp"
36 #ifndef SERIALGC
38 // A HeapRegion is the smallest piece of a G1CollectedHeap that
39 // can be collected independently.
41 // NOTE: Although a HeapRegion is a Space, its
42 // Space::initDirtyCardClosure method must not be called.
43 // The problem is that the existence of this method breaks
44 // the independence of barrier sets from remembered sets.
45 // The solution is to remove this method from the definition
46 // of a Space.
48 class CompactibleSpace;
49 class ContiguousSpace;
50 class HeapRegionRemSet;
51 class HeapRegionRemSetIterator;
52 class HeapRegion;
53 class HeapRegionSetBase;
55 #define HR_FORMAT SIZE_FORMAT":(%s)["PTR_FORMAT","PTR_FORMAT","PTR_FORMAT"]"
56 #define HR_FORMAT_PARAMS(_hr_) \
57 (_hr_)->hrs_index(), \
58 (_hr_)->is_survivor() ? "S" : (_hr_)->is_young() ? "E" : "-", \
59 (_hr_)->bottom(), (_hr_)->top(), (_hr_)->end()
61 // A dirty card to oop closure for heap regions. It
62 // knows how to get the G1 heap and how to use the bitmap
63 // in the concurrent marker used by G1 to filter remembered
64 // sets.
66 class HeapRegionDCTOC : public ContiguousSpaceDCTOC {
67 public:
68 // Specification of possible DirtyCardToOopClosure filtering.
69 enum FilterKind {
70 NoFilterKind,
71 IntoCSFilterKind,
72 OutOfRegionFilterKind
73 };
75 protected:
76 HeapRegion* _hr;
77 FilterKind _fk;
78 G1CollectedHeap* _g1;
80 void walk_mem_region_with_cl(MemRegion mr,
81 HeapWord* bottom, HeapWord* top,
82 OopClosure* cl);
84 // We don't specialize this for FilteringClosure; filtering is handled by
85 // the "FilterKind" mechanism. But we provide this to avoid a compiler
86 // warning.
87 void walk_mem_region_with_cl(MemRegion mr,
88 HeapWord* bottom, HeapWord* top,
89 FilteringClosure* cl) {
90 HeapRegionDCTOC::walk_mem_region_with_cl(mr, bottom, top,
91 (OopClosure*)cl);
92 }
94 // Get the actual top of the area on which the closure will
95 // operate, given where the top is assumed to be (the end of the
96 // memory region passed to do_MemRegion) and where the object
97 // at the top is assumed to start. For example, an object may
98 // start at the top but actually extend past the assumed top,
99 // in which case the top becomes the end of the object.
100 HeapWord* get_actual_top(HeapWord* top, HeapWord* top_obj) {
101 return ContiguousSpaceDCTOC::get_actual_top(top, top_obj);
102 }
104 // Walk the given memory region from bottom to (actual) top
105 // looking for objects and applying the oop closure (_cl) to
106 // them. The base implementation of this treats the area as
107 // blocks, where a block may or may not be an object. Sub-
108 // classes should override this to provide more accurate
109 // or possibly more efficient walking.
110 void walk_mem_region(MemRegion mr, HeapWord* bottom, HeapWord* top) {
111 Filtering_DCTOC::walk_mem_region(mr, bottom, top);
112 }
114 public:
115 HeapRegionDCTOC(G1CollectedHeap* g1,
116 HeapRegion* hr, OopClosure* cl,
117 CardTableModRefBS::PrecisionStyle precision,
118 FilterKind fk);
119 };
122 // The complicating factor is that BlockOffsetTable diverged
123 // significantly, and we need functionality that is only in the G1 version.
124 // So I copied that code, which led to an alternate G1 version of
125 // OffsetTableContigSpace. If the two versions of BlockOffsetTable could
126 // be reconciled, then G1OffsetTableContigSpace could go away.
128 // The idea behind time stamps is the following. Doing a save_marks on
129 // all regions at every GC pause is time consuming (if I remember
130 // well, 10ms or so). So, we would like to do that only for regions
131 // that are GC alloc regions. To achieve this, we use time
132 // stamps. For every evacuation pause, G1CollectedHeap generates a
133 // unique time stamp (essentially a counter that gets
134 // incremented). Every time we want to call save_marks on a region,
135 // we set the saved_mark_word to top and also copy the current GC
136 // time stamp to the time stamp field of the space. Reading the
137 // saved_mark_word involves checking the time stamp of the
138 // region. If it is the same as the current GC time stamp, then we
139 // can safely read the saved_mark_word field, as it is valid. If the
140 // time stamp of the region is not the same as the current GC time
141 // stamp, then we instead read top, as the saved_mark_word field is
142 // invalid. Time stamps (on the regions and also on the
143 // G1CollectedHeap) are reset at every cleanup (we iterate over
144 // the regions anyway) and at the end of a Full GC. The current scheme
145 // that uses sequential unsigned ints will fail only if we have 4b
146 // evacuation pauses between two cleanups, which is _highly_ unlikely.
148 class G1OffsetTableContigSpace: public ContiguousSpace {
149 friend class VMStructs;
150 protected:
151 G1BlockOffsetArrayContigSpace _offsets;
152 Mutex _par_alloc_lock;
153 volatile unsigned _gc_time_stamp;
154 // When we need to retire an allocation region, while other threads
155 // are also concurrently trying to allocate into it, we typically
156 // allocate a dummy object at the end of the region to ensure that
157 // no more allocations can take place in it. However, sometimes we
158 // want to know where the end of the last "real" object we allocated
159 // into the region was and this is what this keeps track.
160 HeapWord* _pre_dummy_top;
162 public:
163 // Constructor. If "is_zeroed" is true, the MemRegion "mr" may be
164 // assumed to contain zeros.
165 G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray,
166 MemRegion mr, bool is_zeroed = false);
168 void set_bottom(HeapWord* value);
169 void set_end(HeapWord* value);
171 virtual HeapWord* saved_mark_word() const;
172 virtual void set_saved_mark();
173 void reset_gc_time_stamp() { _gc_time_stamp = 0; }
175 // See the comment above in the declaration of _pre_dummy_top for an
176 // explanation of what it is.
177 void set_pre_dummy_top(HeapWord* pre_dummy_top) {
178 assert(is_in(pre_dummy_top) && pre_dummy_top <= top(), "pre-condition");
179 _pre_dummy_top = pre_dummy_top;
180 }
181 HeapWord* pre_dummy_top() {
182 return (_pre_dummy_top == NULL) ? top() : _pre_dummy_top;
183 }
184 void reset_pre_dummy_top() { _pre_dummy_top = NULL; }
186 virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space);
187 virtual void clear(bool mangle_space);
189 HeapWord* block_start(const void* p);
190 HeapWord* block_start_const(const void* p) const;
192 // Add offset table update.
193 virtual HeapWord* allocate(size_t word_size);
194 HeapWord* par_allocate(size_t word_size);
196 // MarkSweep support phase3
197 virtual HeapWord* initialize_threshold();
198 virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* end);
200 virtual void print() const;
202 void reset_bot() {
203 _offsets.zero_bottom_entry();
204 _offsets.initialize_threshold();
205 }
207 void update_bot_for_object(HeapWord* start, size_t word_size) {
208 _offsets.alloc_block(start, word_size);
209 }
211 void print_bot_on(outputStream* out) {
212 _offsets.print_on(out);
213 }
214 };
216 class HeapRegion: public G1OffsetTableContigSpace {
217 friend class VMStructs;
218 private:
220 enum HumongousType {
221 NotHumongous = 0,
222 StartsHumongous,
223 ContinuesHumongous
224 };
226 // The next filter kind that should be used for a "new_dcto_cl" call with
227 // the "traditional" signature.
228 HeapRegionDCTOC::FilterKind _next_fk;
230 // Requires that the region "mr" be dense with objects, and begin and end
231 // with an object.
232 void oops_in_mr_iterate(MemRegion mr, OopClosure* cl);
234 // The remembered set for this region.
235 // (Might want to make this "inline" later, to avoid some alloc failure
236 // issues.)
237 HeapRegionRemSet* _rem_set;
239 G1BlockOffsetArrayContigSpace* offsets() { return &_offsets; }
241 protected:
242 // The index of this region in the heap region sequence.
243 size_t _hrs_index;
245 HumongousType _humongous_type;
246 // For a humongous region, region in which it starts.
247 HeapRegion* _humongous_start_region;
248 // For the start region of a humongous sequence, it's original end().
249 HeapWord* _orig_end;
251 // True iff the region is in current collection_set.
252 bool _in_collection_set;
254 // True iff an attempt to evacuate an object in the region failed.
255 bool _evacuation_failed;
257 // A heap region may be a member one of a number of special subsets, each
258 // represented as linked lists through the field below. Currently, these
259 // sets include:
260 // The collection set.
261 // The set of allocation regions used in a collection pause.
262 // Spaces that may contain gray objects.
263 HeapRegion* _next_in_special_set;
265 // next region in the young "generation" region set
266 HeapRegion* _next_young_region;
268 // Next region whose cards need cleaning
269 HeapRegion* _next_dirty_cards_region;
271 // Fields used by the HeapRegionSetBase class and subclasses.
272 HeapRegion* _next;
273 #ifdef ASSERT
274 HeapRegionSetBase* _containing_set;
275 #endif // ASSERT
276 bool _pending_removal;
278 // For parallel heapRegion traversal.
279 jint _claimed;
281 // We use concurrent marking to determine the amount of live data
282 // in each heap region.
283 size_t _prev_marked_bytes; // Bytes known to be live via last completed marking.
284 size_t _next_marked_bytes; // Bytes known to be live via in-progress marking.
286 // See "sort_index" method. -1 means is not in the array.
287 int _sort_index;
289 // <PREDICTION>
290 double _gc_efficiency;
291 // </PREDICTION>
293 enum YoungType {
294 NotYoung, // a region is not young
295 Young, // a region is young
296 Survivor // a region is young and it contains survivors
297 };
299 volatile YoungType _young_type;
300 int _young_index_in_cset;
301 SurvRateGroup* _surv_rate_group;
302 int _age_index;
304 // The start of the unmarked area. The unmarked area extends from this
305 // word until the top and/or end of the region, and is the part
306 // of the region for which no marking was done, i.e. objects may
307 // have been allocated in this part since the last mark phase.
308 // "prev" is the top at the start of the last completed marking.
309 // "next" is the top at the start of the in-progress marking (if any.)
310 HeapWord* _prev_top_at_mark_start;
311 HeapWord* _next_top_at_mark_start;
312 // If a collection pause is in progress, this is the top at the start
313 // of that pause.
315 // We've counted the marked bytes of objects below here.
316 HeapWord* _top_at_conc_mark_count;
318 void init_top_at_mark_start() {
319 assert(_prev_marked_bytes == 0 &&
320 _next_marked_bytes == 0,
321 "Must be called after zero_marked_bytes.");
322 HeapWord* bot = bottom();
323 _prev_top_at_mark_start = bot;
324 _next_top_at_mark_start = bot;
325 _top_at_conc_mark_count = bot;
326 }
328 void set_young_type(YoungType new_type) {
329 //assert(_young_type != new_type, "setting the same type" );
330 // TODO: add more assertions here
331 _young_type = new_type;
332 }
334 // Cached attributes used in the collection set policy information
336 // The RSet length that was added to the total value
337 // for the collection set.
338 size_t _recorded_rs_length;
340 // The predicted elapsed time that was added to total value
341 // for the collection set.
342 double _predicted_elapsed_time_ms;
344 // The predicted number of bytes to copy that was added to
345 // the total value for the collection set.
346 size_t _predicted_bytes_to_copy;
348 public:
349 // If "is_zeroed" is "true", the region "mr" can be assumed to contain zeros.
350 HeapRegion(size_t hrs_index,
351 G1BlockOffsetSharedArray* sharedOffsetArray,
352 MemRegion mr, bool is_zeroed);
354 static int LogOfHRGrainBytes;
355 static int LogOfHRGrainWords;
356 // The normal type of these should be size_t. However, they used to
357 // be members of an enum before and they are assumed by the
358 // compilers to be ints. To avoid going and fixing all their uses,
359 // I'm declaring them as ints. I'm not anticipating heap region
360 // sizes to reach anywhere near 2g, so using an int here is safe.
361 static int GrainBytes;
362 static int GrainWords;
363 static int CardsPerRegion;
365 // It sets up the heap region size (GrainBytes / GrainWords), as
366 // well as other related fields that are based on the heap region
367 // size (LogOfHRGrainBytes / LogOfHRGrainWords /
368 // CardsPerRegion). All those fields are considered constant
369 // throughout the JVM's execution, therefore they should only be set
370 // up once during initialization time.
371 static void setup_heap_region_size(uintx min_heap_size);
373 enum ClaimValues {
374 InitialClaimValue = 0,
375 FinalCountClaimValue = 1,
376 NoteEndClaimValue = 2,
377 ScrubRemSetClaimValue = 3,
378 ParVerifyClaimValue = 4,
379 RebuildRSClaimValue = 5
380 };
382 inline HeapWord* par_allocate_no_bot_updates(size_t word_size) {
383 assert(is_young(), "we can only skip BOT updates on young regions");
384 return ContiguousSpace::par_allocate(word_size);
385 }
386 inline HeapWord* allocate_no_bot_updates(size_t word_size) {
387 assert(is_young(), "we can only skip BOT updates on young regions");
388 return ContiguousSpace::allocate(word_size);
389 }
391 // If this region is a member of a HeapRegionSeq, the index in that
392 // sequence, otherwise -1.
393 size_t hrs_index() const { return _hrs_index; }
395 // The number of bytes marked live in the region in the last marking phase.
396 size_t marked_bytes() { return _prev_marked_bytes; }
397 size_t live_bytes() {
398 return (top() - prev_top_at_mark_start()) * HeapWordSize + marked_bytes();
399 }
401 // The number of bytes counted in the next marking.
402 size_t next_marked_bytes() { return _next_marked_bytes; }
403 // The number of bytes live wrt the next marking.
404 size_t next_live_bytes() {
405 return
406 (top() - next_top_at_mark_start()) * HeapWordSize + next_marked_bytes();
407 }
409 // A lower bound on the amount of garbage bytes in the region.
410 size_t garbage_bytes() {
411 size_t used_at_mark_start_bytes =
412 (prev_top_at_mark_start() - bottom()) * HeapWordSize;
413 assert(used_at_mark_start_bytes >= marked_bytes(),
414 "Can't mark more than we have.");
415 return used_at_mark_start_bytes - marked_bytes();
416 }
418 // An upper bound on the number of live bytes in the region.
419 size_t max_live_bytes() { return used() - garbage_bytes(); }
421 void add_to_marked_bytes(size_t incr_bytes) {
422 _next_marked_bytes = _next_marked_bytes + incr_bytes;
423 guarantee( _next_marked_bytes <= used(), "invariant" );
424 }
426 void zero_marked_bytes() {
427 _prev_marked_bytes = _next_marked_bytes = 0;
428 }
430 bool isHumongous() const { return _humongous_type != NotHumongous; }
431 bool startsHumongous() const { return _humongous_type == StartsHumongous; }
432 bool continuesHumongous() const { return _humongous_type == ContinuesHumongous; }
433 // For a humongous region, region in which it starts.
434 HeapRegion* humongous_start_region() const {
435 return _humongous_start_region;
436 }
438 // Makes the current region be a "starts humongous" region, i.e.,
439 // the first region in a series of one or more contiguous regions
440 // that will contain a single "humongous" object. The two parameters
441 // are as follows:
442 //
443 // new_top : The new value of the top field of this region which
444 // points to the end of the humongous object that's being
445 // allocated. If there is more than one region in the series, top
446 // will lie beyond this region's original end field and on the last
447 // region in the series.
448 //
449 // new_end : The new value of the end field of this region which
450 // points to the end of the last region in the series. If there is
451 // one region in the series (namely: this one) end will be the same
452 // as the original end of this region.
453 //
454 // Updating top and end as described above makes this region look as
455 // if it spans the entire space taken up by all the regions in the
456 // series and an single allocation moved its top to new_top. This
457 // ensures that the space (capacity / allocated) taken up by all
458 // humongous regions can be calculated by just looking at the
459 // "starts humongous" regions and by ignoring the "continues
460 // humongous" regions.
461 void set_startsHumongous(HeapWord* new_top, HeapWord* new_end);
463 // Makes the current region be a "continues humongous'
464 // region. first_hr is the "start humongous" region of the series
465 // which this region will be part of.
466 void set_continuesHumongous(HeapRegion* first_hr);
468 // Unsets the humongous-related fields on the region.
469 void set_notHumongous();
471 // If the region has a remembered set, return a pointer to it.
472 HeapRegionRemSet* rem_set() const {
473 return _rem_set;
474 }
476 // True iff the region is in current collection_set.
477 bool in_collection_set() const {
478 return _in_collection_set;
479 }
480 void set_in_collection_set(bool b) {
481 _in_collection_set = b;
482 }
483 HeapRegion* next_in_collection_set() {
484 assert(in_collection_set(), "should only invoke on member of CS.");
485 assert(_next_in_special_set == NULL ||
486 _next_in_special_set->in_collection_set(),
487 "Malformed CS.");
488 return _next_in_special_set;
489 }
490 void set_next_in_collection_set(HeapRegion* r) {
491 assert(in_collection_set(), "should only invoke on member of CS.");
492 assert(r == NULL || r->in_collection_set(), "Malformed CS.");
493 _next_in_special_set = r;
494 }
496 // Methods used by the HeapRegionSetBase class and subclasses.
498 // Getter and setter for the next field used to link regions into
499 // linked lists.
500 HeapRegion* next() { return _next; }
502 void set_next(HeapRegion* next) { _next = next; }
504 // Every region added to a set is tagged with a reference to that
505 // set. This is used for doing consistency checking to make sure that
506 // the contents of a set are as they should be and it's only
507 // available in non-product builds.
508 #ifdef ASSERT
509 void set_containing_set(HeapRegionSetBase* containing_set) {
510 assert((containing_set == NULL && _containing_set != NULL) ||
511 (containing_set != NULL && _containing_set == NULL),
512 err_msg("containing_set: "PTR_FORMAT" "
513 "_containing_set: "PTR_FORMAT,
514 containing_set, _containing_set));
516 _containing_set = containing_set;
517 }
519 HeapRegionSetBase* containing_set() { return _containing_set; }
520 #else // ASSERT
521 void set_containing_set(HeapRegionSetBase* containing_set) { }
523 // containing_set() is only used in asserts so there's no reason
524 // to provide a dummy version of it.
525 #endif // ASSERT
527 // If we want to remove regions from a list in bulk we can simply tag
528 // them with the pending_removal tag and call the
529 // remove_all_pending() method on the list.
531 bool pending_removal() { return _pending_removal; }
533 void set_pending_removal(bool pending_removal) {
534 if (pending_removal) {
535 assert(!_pending_removal && containing_set() != NULL,
536 "can only set pending removal to true if it's false and "
537 "the region belongs to a region set");
538 } else {
539 assert( _pending_removal && containing_set() == NULL,
540 "can only set pending removal to false if it's true and "
541 "the region does not belong to a region set");
542 }
544 _pending_removal = pending_removal;
545 }
547 HeapRegion* get_next_young_region() { return _next_young_region; }
548 void set_next_young_region(HeapRegion* hr) {
549 _next_young_region = hr;
550 }
552 HeapRegion* get_next_dirty_cards_region() const { return _next_dirty_cards_region; }
553 HeapRegion** next_dirty_cards_region_addr() { return &_next_dirty_cards_region; }
554 void set_next_dirty_cards_region(HeapRegion* hr) { _next_dirty_cards_region = hr; }
555 bool is_on_dirty_cards_region_list() const { return get_next_dirty_cards_region() != NULL; }
557 HeapWord* orig_end() { return _orig_end; }
559 // Allows logical separation between objects allocated before and after.
560 void save_marks();
562 // Reset HR stuff to default values.
563 void hr_clear(bool par, bool clear_space);
564 void par_clear();
566 void initialize(MemRegion mr, bool clear_space, bool mangle_space);
568 // Get the start of the unmarked area in this region.
569 HeapWord* prev_top_at_mark_start() const { return _prev_top_at_mark_start; }
570 HeapWord* next_top_at_mark_start() const { return _next_top_at_mark_start; }
572 // Apply "cl->do_oop" to (the addresses of) all reference fields in objects
573 // allocated in the current region before the last call to "save_mark".
574 void oop_before_save_marks_iterate(OopClosure* cl);
576 // This call determines the "filter kind" argument that will be used for
577 // the next call to "new_dcto_cl" on this region with the "traditional"
578 // signature (i.e., the call below.) The default, in the absence of a
579 // preceding call to this method, is "NoFilterKind", and a call to this
580 // method is necessary for each such call, or else it reverts to the
581 // default.
582 // (This is really ugly, but all other methods I could think of changed a
583 // lot of main-line code for G1.)
584 void set_next_filter_kind(HeapRegionDCTOC::FilterKind nfk) {
585 _next_fk = nfk;
586 }
588 DirtyCardToOopClosure*
589 new_dcto_closure(OopClosure* cl,
590 CardTableModRefBS::PrecisionStyle precision,
591 HeapRegionDCTOC::FilterKind fk);
593 #if WHASSUP
594 DirtyCardToOopClosure*
595 new_dcto_closure(OopClosure* cl,
596 CardTableModRefBS::PrecisionStyle precision,
597 HeapWord* boundary) {
598 assert(boundary == NULL, "This arg doesn't make sense here.");
599 DirtyCardToOopClosure* res = new_dcto_closure(cl, precision, _next_fk);
600 _next_fk = HeapRegionDCTOC::NoFilterKind;
601 return res;
602 }
603 #endif
605 //
606 // Note the start or end of marking. This tells the heap region
607 // that the collector is about to start or has finished (concurrently)
608 // marking the heap.
609 //
611 // Note the start of a marking phase. Record the
612 // start of the unmarked area of the region here.
613 void note_start_of_marking(bool during_initial_mark) {
614 init_top_at_conc_mark_count();
615 _next_marked_bytes = 0;
616 if (during_initial_mark && is_young() && !is_survivor())
617 _next_top_at_mark_start = bottom();
618 else
619 _next_top_at_mark_start = top();
620 }
622 // Note the end of a marking phase. Install the start of
623 // the unmarked area that was captured at start of marking.
624 void note_end_of_marking() {
625 _prev_top_at_mark_start = _next_top_at_mark_start;
626 _prev_marked_bytes = _next_marked_bytes;
627 _next_marked_bytes = 0;
629 guarantee(_prev_marked_bytes <=
630 (size_t) (prev_top_at_mark_start() - bottom()) * HeapWordSize,
631 "invariant");
632 }
634 // After an evacuation, we need to update _next_top_at_mark_start
635 // to be the current top. Note this is only valid if we have only
636 // ever evacuated into this region. If we evacuate, allocate, and
637 // then evacuate we are in deep doodoo.
638 void note_end_of_copying() {
639 assert(top() >= _next_top_at_mark_start, "Increase only");
640 _next_top_at_mark_start = top();
641 }
643 // Returns "false" iff no object in the region was allocated when the
644 // last mark phase ended.
645 bool is_marked() { return _prev_top_at_mark_start != bottom(); }
647 // If "is_marked()" is true, then this is the index of the region in
648 // an array constructed at the end of marking of the regions in a
649 // "desirability" order.
650 int sort_index() {
651 return _sort_index;
652 }
653 void set_sort_index(int i) {
654 _sort_index = i;
655 }
657 void init_top_at_conc_mark_count() {
658 _top_at_conc_mark_count = bottom();
659 }
661 void set_top_at_conc_mark_count(HeapWord *cur) {
662 assert(bottom() <= cur && cur <= end(), "Sanity.");
663 _top_at_conc_mark_count = cur;
664 }
666 HeapWord* top_at_conc_mark_count() {
667 return _top_at_conc_mark_count;
668 }
670 void reset_during_compaction() {
671 guarantee( isHumongous() && startsHumongous(),
672 "should only be called for humongous regions");
674 zero_marked_bytes();
675 init_top_at_mark_start();
676 }
678 // <PREDICTION>
679 void calc_gc_efficiency(void);
680 double gc_efficiency() { return _gc_efficiency;}
681 // </PREDICTION>
683 bool is_young() const { return _young_type != NotYoung; }
684 bool is_survivor() const { return _young_type == Survivor; }
686 int young_index_in_cset() const { return _young_index_in_cset; }
687 void set_young_index_in_cset(int index) {
688 assert( (index == -1) || is_young(), "pre-condition" );
689 _young_index_in_cset = index;
690 }
692 int age_in_surv_rate_group() {
693 assert( _surv_rate_group != NULL, "pre-condition" );
694 assert( _age_index > -1, "pre-condition" );
695 return _surv_rate_group->age_in_group(_age_index);
696 }
698 void record_surv_words_in_group(size_t words_survived) {
699 assert( _surv_rate_group != NULL, "pre-condition" );
700 assert( _age_index > -1, "pre-condition" );
701 int age_in_group = age_in_surv_rate_group();
702 _surv_rate_group->record_surviving_words(age_in_group, words_survived);
703 }
705 int age_in_surv_rate_group_cond() {
706 if (_surv_rate_group != NULL)
707 return age_in_surv_rate_group();
708 else
709 return -1;
710 }
712 SurvRateGroup* surv_rate_group() {
713 return _surv_rate_group;
714 }
716 void install_surv_rate_group(SurvRateGroup* surv_rate_group) {
717 assert( surv_rate_group != NULL, "pre-condition" );
718 assert( _surv_rate_group == NULL, "pre-condition" );
719 assert( is_young(), "pre-condition" );
721 _surv_rate_group = surv_rate_group;
722 _age_index = surv_rate_group->next_age_index();
723 }
725 void uninstall_surv_rate_group() {
726 if (_surv_rate_group != NULL) {
727 assert( _age_index > -1, "pre-condition" );
728 assert( is_young(), "pre-condition" );
730 _surv_rate_group = NULL;
731 _age_index = -1;
732 } else {
733 assert( _age_index == -1, "pre-condition" );
734 }
735 }
737 void set_young() { set_young_type(Young); }
739 void set_survivor() { set_young_type(Survivor); }
741 void set_not_young() { set_young_type(NotYoung); }
743 // Determine if an object has been allocated since the last
744 // mark performed by the collector. This returns true iff the object
745 // is within the unmarked area of the region.
746 bool obj_allocated_since_prev_marking(oop obj) const {
747 return (HeapWord *) obj >= prev_top_at_mark_start();
748 }
749 bool obj_allocated_since_next_marking(oop obj) const {
750 return (HeapWord *) obj >= next_top_at_mark_start();
751 }
753 // For parallel heapRegion traversal.
754 bool claimHeapRegion(int claimValue);
755 jint claim_value() { return _claimed; }
756 // Use this carefully: only when you're sure no one is claiming...
757 void set_claim_value(int claimValue) { _claimed = claimValue; }
759 // Returns the "evacuation_failed" property of the region.
760 bool evacuation_failed() { return _evacuation_failed; }
762 // Sets the "evacuation_failed" property of the region.
763 void set_evacuation_failed(bool b) {
764 _evacuation_failed = b;
766 if (b) {
767 init_top_at_conc_mark_count();
768 _next_marked_bytes = 0;
769 }
770 }
772 // Requires that "mr" be entirely within the region.
773 // Apply "cl->do_object" to all objects that intersect with "mr".
774 // If the iteration encounters an unparseable portion of the region,
775 // or if "cl->abort()" is true after a closure application,
776 // terminate the iteration and return the address of the start of the
777 // subregion that isn't done. (The two can be distinguished by querying
778 // "cl->abort()".) Return of "NULL" indicates that the iteration
779 // completed.
780 HeapWord*
781 object_iterate_mem_careful(MemRegion mr, ObjectClosure* cl);
783 // filter_young: if true and the region is a young region then we
784 // skip the iteration.
785 // card_ptr: if not NULL, and we decide that the card is not young
786 // and we iterate over it, we'll clean the card before we start the
787 // iteration.
788 HeapWord*
789 oops_on_card_seq_iterate_careful(MemRegion mr,
790 FilterOutOfRegionClosure* cl,
791 bool filter_young,
792 jbyte* card_ptr);
794 // A version of block start that is guaranteed to find *some* block
795 // boundary at or before "p", but does not object iteration, and may
796 // therefore be used safely when the heap is unparseable.
797 HeapWord* block_start_careful(const void* p) const {
798 return _offsets.block_start_careful(p);
799 }
801 // Requires that "addr" is within the region. Returns the start of the
802 // first ("careful") block that starts at or after "addr", or else the
803 // "end" of the region if there is no such block.
804 HeapWord* next_block_start_careful(HeapWord* addr);
806 size_t recorded_rs_length() const { return _recorded_rs_length; }
807 double predicted_elapsed_time_ms() const { return _predicted_elapsed_time_ms; }
808 size_t predicted_bytes_to_copy() const { return _predicted_bytes_to_copy; }
810 void set_recorded_rs_length(size_t rs_length) {
811 _recorded_rs_length = rs_length;
812 }
814 void set_predicted_elapsed_time_ms(double ms) {
815 _predicted_elapsed_time_ms = ms;
816 }
818 void set_predicted_bytes_to_copy(size_t bytes) {
819 _predicted_bytes_to_copy = bytes;
820 }
822 #define HeapRegion_OOP_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \
823 virtual void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl);
824 SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(HeapRegion_OOP_SINCE_SAVE_MARKS_DECL)
826 CompactibleSpace* next_compaction_space() const;
828 virtual void reset_after_compaction();
830 void print() const;
831 void print_on(outputStream* st) const;
833 // vo == UsePrevMarking -> use "prev" marking information,
834 // vo == UseNextMarking -> use "next" marking information
835 // vo == UseMarkWord -> use the mark word in the object header
836 //
837 // NOTE: Only the "prev" marking information is guaranteed to be
838 // consistent most of the time, so most calls to this should use
839 // vo == UsePrevMarking.
840 // Currently, there is only one case where this is called with
841 // vo == UseNextMarking, which is to verify the "next" marking
842 // information at the end of remark.
843 // Currently there is only one place where this is called with
844 // vo == UseMarkWord, which is to verify the marking during a
845 // full GC.
846 void verify(bool allow_dirty, VerifyOption vo, bool *failures) const;
848 // Override; it uses the "prev" marking information
849 virtual void verify(bool allow_dirty) const;
850 };
852 // HeapRegionClosure is used for iterating over regions.
853 // Terminates the iteration when the "doHeapRegion" method returns "true".
854 class HeapRegionClosure : public StackObj {
855 friend class HeapRegionSeq;
856 friend class G1CollectedHeap;
858 bool _complete;
859 void incomplete() { _complete = false; }
861 public:
862 HeapRegionClosure(): _complete(true) {}
864 // Typically called on each region until it returns true.
865 virtual bool doHeapRegion(HeapRegion* r) = 0;
867 // True after iteration if the closure was applied to all heap regions
868 // and returned "false" in all cases.
869 bool complete() { return _complete; }
870 };
872 #endif // SERIALGC
874 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_HPP