Tue, 23 Nov 2010 13:22:55 -0800
6989984: Use standard include model for Hospot
Summary: Replaced MakeDeps and the includeDB files with more standardized solutions.
Reviewed-by: coleenp, kvn, kamg
1 /*
2 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_HPP
26 #define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_HPP
28 #include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp"
29 #include "gc_implementation/g1/g1_specialized_oop_closures.hpp"
30 #include "gc_implementation/g1/survRateGroup.hpp"
31 #include "gc_implementation/shared/ageTable.hpp"
32 #include "gc_implementation/shared/spaceDecorator.hpp"
33 #include "memory/space.inline.hpp"
34 #include "memory/watermark.hpp"
36 #ifndef SERIALGC
38 // A HeapRegion is the smallest piece of a G1CollectedHeap that
39 // can be collected independently.
41 // NOTE: Although a HeapRegion is a Space, its
42 // Space::initDirtyCardClosure method must not be called.
43 // The problem is that the existence of this method breaks
44 // the independence of barrier sets from remembered sets.
45 // The solution is to remove this method from the definition
46 // of a Space.
48 class CompactibleSpace;
49 class ContiguousSpace;
50 class HeapRegionRemSet;
51 class HeapRegionRemSetIterator;
52 class HeapRegion;
54 // A dirty card to oop closure for heap regions. It
55 // knows how to get the G1 heap and how to use the bitmap
56 // in the concurrent marker used by G1 to filter remembered
57 // sets.
59 class HeapRegionDCTOC : public ContiguousSpaceDCTOC {
60 public:
61 // Specification of possible DirtyCardToOopClosure filtering.
62 enum FilterKind {
63 NoFilterKind,
64 IntoCSFilterKind,
65 OutOfRegionFilterKind
66 };
68 protected:
69 HeapRegion* _hr;
70 FilterKind _fk;
71 G1CollectedHeap* _g1;
73 void walk_mem_region_with_cl(MemRegion mr,
74 HeapWord* bottom, HeapWord* top,
75 OopClosure* cl);
77 // We don't specialize this for FilteringClosure; filtering is handled by
78 // the "FilterKind" mechanism. But we provide this to avoid a compiler
79 // warning.
80 void walk_mem_region_with_cl(MemRegion mr,
81 HeapWord* bottom, HeapWord* top,
82 FilteringClosure* cl) {
83 HeapRegionDCTOC::walk_mem_region_with_cl(mr, bottom, top,
84 (OopClosure*)cl);
85 }
87 // Get the actual top of the area on which the closure will
88 // operate, given where the top is assumed to be (the end of the
89 // memory region passed to do_MemRegion) and where the object
90 // at the top is assumed to start. For example, an object may
91 // start at the top but actually extend past the assumed top,
92 // in which case the top becomes the end of the object.
93 HeapWord* get_actual_top(HeapWord* top, HeapWord* top_obj) {
94 return ContiguousSpaceDCTOC::get_actual_top(top, top_obj);
95 }
97 // Walk the given memory region from bottom to (actual) top
98 // looking for objects and applying the oop closure (_cl) to
99 // them. The base implementation of this treats the area as
100 // blocks, where a block may or may not be an object. Sub-
101 // classes should override this to provide more accurate
102 // or possibly more efficient walking.
103 void walk_mem_region(MemRegion mr, HeapWord* bottom, HeapWord* top) {
104 Filtering_DCTOC::walk_mem_region(mr, bottom, top);
105 }
107 public:
108 HeapRegionDCTOC(G1CollectedHeap* g1,
109 HeapRegion* hr, OopClosure* cl,
110 CardTableModRefBS::PrecisionStyle precision,
111 FilterKind fk);
112 };
115 // The complicating factor is that BlockOffsetTable diverged
116 // significantly, and we need functionality that is only in the G1 version.
117 // So I copied that code, which led to an alternate G1 version of
118 // OffsetTableContigSpace. If the two versions of BlockOffsetTable could
119 // be reconciled, then G1OffsetTableContigSpace could go away.
121 // The idea behind time stamps is the following. Doing a save_marks on
122 // all regions at every GC pause is time consuming (if I remember
123 // well, 10ms or so). So, we would like to do that only for regions
124 // that are GC alloc regions. To achieve this, we use time
125 // stamps. For every evacuation pause, G1CollectedHeap generates a
126 // unique time stamp (essentially a counter that gets
127 // incremented). Every time we want to call save_marks on a region,
128 // we set the saved_mark_word to top and also copy the current GC
129 // time stamp to the time stamp field of the space. Reading the
130 // saved_mark_word involves checking the time stamp of the
131 // region. If it is the same as the current GC time stamp, then we
132 // can safely read the saved_mark_word field, as it is valid. If the
133 // time stamp of the region is not the same as the current GC time
134 // stamp, then we instead read top, as the saved_mark_word field is
135 // invalid. Time stamps (on the regions and also on the
136 // G1CollectedHeap) are reset at every cleanup (we iterate over
137 // the regions anyway) and at the end of a Full GC. The current scheme
138 // that uses sequential unsigned ints will fail only if we have 4b
139 // evacuation pauses between two cleanups, which is _highly_ unlikely.
141 class G1OffsetTableContigSpace: public ContiguousSpace {
142 friend class VMStructs;
143 protected:
144 G1BlockOffsetArrayContigSpace _offsets;
145 Mutex _par_alloc_lock;
146 volatile unsigned _gc_time_stamp;
148 public:
149 // Constructor. If "is_zeroed" is true, the MemRegion "mr" may be
150 // assumed to contain zeros.
151 G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray,
152 MemRegion mr, bool is_zeroed = false);
154 void set_bottom(HeapWord* value);
155 void set_end(HeapWord* value);
157 virtual HeapWord* saved_mark_word() const;
158 virtual void set_saved_mark();
159 void reset_gc_time_stamp() { _gc_time_stamp = 0; }
161 virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space);
162 virtual void clear(bool mangle_space);
164 HeapWord* block_start(const void* p);
165 HeapWord* block_start_const(const void* p) const;
167 // Add offset table update.
168 virtual HeapWord* allocate(size_t word_size);
169 HeapWord* par_allocate(size_t word_size);
171 // MarkSweep support phase3
172 virtual HeapWord* initialize_threshold();
173 virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* end);
175 virtual void print() const;
176 };
178 class HeapRegion: public G1OffsetTableContigSpace {
179 friend class VMStructs;
180 private:
182 enum HumongousType {
183 NotHumongous = 0,
184 StartsHumongous,
185 ContinuesHumongous
186 };
188 // The next filter kind that should be used for a "new_dcto_cl" call with
189 // the "traditional" signature.
190 HeapRegionDCTOC::FilterKind _next_fk;
192 // Requires that the region "mr" be dense with objects, and begin and end
193 // with an object.
194 void oops_in_mr_iterate(MemRegion mr, OopClosure* cl);
196 // The remembered set for this region.
197 // (Might want to make this "inline" later, to avoid some alloc failure
198 // issues.)
199 HeapRegionRemSet* _rem_set;
201 G1BlockOffsetArrayContigSpace* offsets() { return &_offsets; }
203 protected:
204 // If this region is a member of a HeapRegionSeq, the index in that
205 // sequence, otherwise -1.
206 int _hrs_index;
208 HumongousType _humongous_type;
209 // For a humongous region, region in which it starts.
210 HeapRegion* _humongous_start_region;
211 // For the start region of a humongous sequence, it's original end().
212 HeapWord* _orig_end;
214 // True iff the region is in current collection_set.
215 bool _in_collection_set;
217 // True iff the region is on the unclean list, waiting to be zero filled.
218 bool _is_on_unclean_list;
220 // True iff the region is on the free list, ready for allocation.
221 bool _is_on_free_list;
223 // Is this or has it been an allocation region in the current collection
224 // pause.
225 bool _is_gc_alloc_region;
227 // True iff an attempt to evacuate an object in the region failed.
228 bool _evacuation_failed;
230 // A heap region may be a member one of a number of special subsets, each
231 // represented as linked lists through the field below. Currently, these
232 // sets include:
233 // The collection set.
234 // The set of allocation regions used in a collection pause.
235 // Spaces that may contain gray objects.
236 HeapRegion* _next_in_special_set;
238 // next region in the young "generation" region set
239 HeapRegion* _next_young_region;
241 // Next region whose cards need cleaning
242 HeapRegion* _next_dirty_cards_region;
244 // For parallel heapRegion traversal.
245 jint _claimed;
247 // We use concurrent marking to determine the amount of live data
248 // in each heap region.
249 size_t _prev_marked_bytes; // Bytes known to be live via last completed marking.
250 size_t _next_marked_bytes; // Bytes known to be live via in-progress marking.
252 // See "sort_index" method. -1 means is not in the array.
253 int _sort_index;
255 // <PREDICTION>
256 double _gc_efficiency;
257 // </PREDICTION>
259 enum YoungType {
260 NotYoung, // a region is not young
261 Young, // a region is young
262 Survivor // a region is young and it contains
263 // survivor
264 };
266 volatile YoungType _young_type;
267 int _young_index_in_cset;
268 SurvRateGroup* _surv_rate_group;
269 int _age_index;
271 // The start of the unmarked area. The unmarked area extends from this
272 // word until the top and/or end of the region, and is the part
273 // of the region for which no marking was done, i.e. objects may
274 // have been allocated in this part since the last mark phase.
275 // "prev" is the top at the start of the last completed marking.
276 // "next" is the top at the start of the in-progress marking (if any.)
277 HeapWord* _prev_top_at_mark_start;
278 HeapWord* _next_top_at_mark_start;
279 // If a collection pause is in progress, this is the top at the start
280 // of that pause.
282 // We've counted the marked bytes of objects below here.
283 HeapWord* _top_at_conc_mark_count;
285 void init_top_at_mark_start() {
286 assert(_prev_marked_bytes == 0 &&
287 _next_marked_bytes == 0,
288 "Must be called after zero_marked_bytes.");
289 HeapWord* bot = bottom();
290 _prev_top_at_mark_start = bot;
291 _next_top_at_mark_start = bot;
292 _top_at_conc_mark_count = bot;
293 }
295 jint _zfs; // A member of ZeroFillState. Protected by ZF_lock.
296 Thread* _zero_filler; // If _zfs is ZeroFilling, the thread that (last)
297 // made it so.
299 void set_young_type(YoungType new_type) {
300 //assert(_young_type != new_type, "setting the same type" );
301 // TODO: add more assertions here
302 _young_type = new_type;
303 }
305 // Cached attributes used in the collection set policy information
307 // The RSet length that was added to the total value
308 // for the collection set.
309 size_t _recorded_rs_length;
311 // The predicted elapsed time that was added to total value
312 // for the collection set.
313 double _predicted_elapsed_time_ms;
315 // The predicted number of bytes to copy that was added to
316 // the total value for the collection set.
317 size_t _predicted_bytes_to_copy;
319 public:
320 // If "is_zeroed" is "true", the region "mr" can be assumed to contain zeros.
321 HeapRegion(G1BlockOffsetSharedArray* sharedOffsetArray,
322 MemRegion mr, bool is_zeroed);
324 static int LogOfHRGrainBytes;
325 static int LogOfHRGrainWords;
326 // The normal type of these should be size_t. However, they used to
327 // be members of an enum before and they are assumed by the
328 // compilers to be ints. To avoid going and fixing all their uses,
329 // I'm declaring them as ints. I'm not anticipating heap region
330 // sizes to reach anywhere near 2g, so using an int here is safe.
331 static int GrainBytes;
332 static int GrainWords;
333 static int CardsPerRegion;
335 // It sets up the heap region size (GrainBytes / GrainWords), as
336 // well as other related fields that are based on the heap region
337 // size (LogOfHRGrainBytes / LogOfHRGrainWords /
338 // CardsPerRegion). All those fields are considered constant
339 // throughout the JVM's execution, therefore they should only be set
340 // up once during initialization time.
341 static void setup_heap_region_size(uintx min_heap_size);
343 enum ClaimValues {
344 InitialClaimValue = 0,
345 FinalCountClaimValue = 1,
346 NoteEndClaimValue = 2,
347 ScrubRemSetClaimValue = 3,
348 ParVerifyClaimValue = 4,
349 RebuildRSClaimValue = 5
350 };
352 // Concurrent refinement requires contiguous heap regions (in which TLABs
353 // might be allocated) to be zero-filled. Each region therefore has a
354 // zero-fill-state.
355 enum ZeroFillState {
356 NotZeroFilled,
357 ZeroFilling,
358 ZeroFilled,
359 Allocated
360 };
362 // If this region is a member of a HeapRegionSeq, the index in that
363 // sequence, otherwise -1.
364 int hrs_index() const { return _hrs_index; }
365 void set_hrs_index(int index) { _hrs_index = index; }
367 // The number of bytes marked live in the region in the last marking phase.
368 size_t marked_bytes() { return _prev_marked_bytes; }
369 // The number of bytes counted in the next marking.
370 size_t next_marked_bytes() { return _next_marked_bytes; }
371 // The number of bytes live wrt the next marking.
372 size_t next_live_bytes() {
373 return (top() - next_top_at_mark_start())
374 * HeapWordSize
375 + next_marked_bytes();
376 }
378 // A lower bound on the amount of garbage bytes in the region.
379 size_t garbage_bytes() {
380 size_t used_at_mark_start_bytes =
381 (prev_top_at_mark_start() - bottom()) * HeapWordSize;
382 assert(used_at_mark_start_bytes >= marked_bytes(),
383 "Can't mark more than we have.");
384 return used_at_mark_start_bytes - marked_bytes();
385 }
387 // An upper bound on the number of live bytes in the region.
388 size_t max_live_bytes() { return used() - garbage_bytes(); }
390 void add_to_marked_bytes(size_t incr_bytes) {
391 _next_marked_bytes = _next_marked_bytes + incr_bytes;
392 guarantee( _next_marked_bytes <= used(), "invariant" );
393 }
395 void zero_marked_bytes() {
396 _prev_marked_bytes = _next_marked_bytes = 0;
397 }
399 bool isHumongous() const { return _humongous_type != NotHumongous; }
400 bool startsHumongous() const { return _humongous_type == StartsHumongous; }
401 bool continuesHumongous() const { return _humongous_type == ContinuesHumongous; }
402 // For a humongous region, region in which it starts.
403 HeapRegion* humongous_start_region() const {
404 return _humongous_start_region;
405 }
407 // Causes the current region to represent a humongous object spanning "n"
408 // regions.
409 void set_startsHumongous(HeapWord* new_end);
411 // The regions that continue a humongous sequence should be added using
412 // this method, in increasing address order.
413 void set_continuesHumongous(HeapRegion* start);
415 // If the region has a remembered set, return a pointer to it.
416 HeapRegionRemSet* rem_set() const {
417 return _rem_set;
418 }
420 // True iff the region is in current collection_set.
421 bool in_collection_set() const {
422 return _in_collection_set;
423 }
424 void set_in_collection_set(bool b) {
425 _in_collection_set = b;
426 }
427 HeapRegion* next_in_collection_set() {
428 assert(in_collection_set(), "should only invoke on member of CS.");
429 assert(_next_in_special_set == NULL ||
430 _next_in_special_set->in_collection_set(),
431 "Malformed CS.");
432 return _next_in_special_set;
433 }
434 void set_next_in_collection_set(HeapRegion* r) {
435 assert(in_collection_set(), "should only invoke on member of CS.");
436 assert(r == NULL || r->in_collection_set(), "Malformed CS.");
437 _next_in_special_set = r;
438 }
440 // True iff it is or has been an allocation region in the current
441 // collection pause.
442 bool is_gc_alloc_region() const {
443 return _is_gc_alloc_region;
444 }
445 void set_is_gc_alloc_region(bool b) {
446 _is_gc_alloc_region = b;
447 }
448 HeapRegion* next_gc_alloc_region() {
449 assert(is_gc_alloc_region(), "should only invoke on member of CS.");
450 assert(_next_in_special_set == NULL ||
451 _next_in_special_set->is_gc_alloc_region(),
452 "Malformed CS.");
453 return _next_in_special_set;
454 }
455 void set_next_gc_alloc_region(HeapRegion* r) {
456 assert(is_gc_alloc_region(), "should only invoke on member of CS.");
457 assert(r == NULL || r->is_gc_alloc_region(), "Malformed CS.");
458 _next_in_special_set = r;
459 }
461 bool is_on_free_list() {
462 return _is_on_free_list;
463 }
465 void set_on_free_list(bool b) {
466 _is_on_free_list = b;
467 }
469 HeapRegion* next_from_free_list() {
470 assert(is_on_free_list(),
471 "Should only invoke on free space.");
472 assert(_next_in_special_set == NULL ||
473 _next_in_special_set->is_on_free_list(),
474 "Malformed Free List.");
475 return _next_in_special_set;
476 }
478 void set_next_on_free_list(HeapRegion* r) {
479 assert(r == NULL || r->is_on_free_list(), "Malformed free list.");
480 _next_in_special_set = r;
481 }
483 bool is_on_unclean_list() {
484 return _is_on_unclean_list;
485 }
487 void set_on_unclean_list(bool b);
489 HeapRegion* next_from_unclean_list() {
490 assert(is_on_unclean_list(),
491 "Should only invoke on unclean space.");
492 assert(_next_in_special_set == NULL ||
493 _next_in_special_set->is_on_unclean_list(),
494 "Malformed unclean List.");
495 return _next_in_special_set;
496 }
498 void set_next_on_unclean_list(HeapRegion* r);
500 HeapRegion* get_next_young_region() { return _next_young_region; }
501 void set_next_young_region(HeapRegion* hr) {
502 _next_young_region = hr;
503 }
505 HeapRegion* get_next_dirty_cards_region() const { return _next_dirty_cards_region; }
506 HeapRegion** next_dirty_cards_region_addr() { return &_next_dirty_cards_region; }
507 void set_next_dirty_cards_region(HeapRegion* hr) { _next_dirty_cards_region = hr; }
508 bool is_on_dirty_cards_region_list() const { return get_next_dirty_cards_region() != NULL; }
510 // Allows logical separation between objects allocated before and after.
511 void save_marks();
513 // Reset HR stuff to default values.
514 void hr_clear(bool par, bool clear_space);
516 void initialize(MemRegion mr, bool clear_space, bool mangle_space);
518 // Ensure that "this" is zero-filled.
519 void ensure_zero_filled();
520 // This one requires that the calling thread holds ZF_mon.
521 void ensure_zero_filled_locked();
523 // Get the start of the unmarked area in this region.
524 HeapWord* prev_top_at_mark_start() const { return _prev_top_at_mark_start; }
525 HeapWord* next_top_at_mark_start() const { return _next_top_at_mark_start; }
527 // Apply "cl->do_oop" to (the addresses of) all reference fields in objects
528 // allocated in the current region before the last call to "save_mark".
529 void oop_before_save_marks_iterate(OopClosure* cl);
531 // This call determines the "filter kind" argument that will be used for
532 // the next call to "new_dcto_cl" on this region with the "traditional"
533 // signature (i.e., the call below.) The default, in the absence of a
534 // preceding call to this method, is "NoFilterKind", and a call to this
535 // method is necessary for each such call, or else it reverts to the
536 // default.
537 // (This is really ugly, but all other methods I could think of changed a
538 // lot of main-line code for G1.)
539 void set_next_filter_kind(HeapRegionDCTOC::FilterKind nfk) {
540 _next_fk = nfk;
541 }
543 DirtyCardToOopClosure*
544 new_dcto_closure(OopClosure* cl,
545 CardTableModRefBS::PrecisionStyle precision,
546 HeapRegionDCTOC::FilterKind fk);
548 #if WHASSUP
549 DirtyCardToOopClosure*
550 new_dcto_closure(OopClosure* cl,
551 CardTableModRefBS::PrecisionStyle precision,
552 HeapWord* boundary) {
553 assert(boundary == NULL, "This arg doesn't make sense here.");
554 DirtyCardToOopClosure* res = new_dcto_closure(cl, precision, _next_fk);
555 _next_fk = HeapRegionDCTOC::NoFilterKind;
556 return res;
557 }
558 #endif
560 //
561 // Note the start or end of marking. This tells the heap region
562 // that the collector is about to start or has finished (concurrently)
563 // marking the heap.
564 //
566 // Note the start of a marking phase. Record the
567 // start of the unmarked area of the region here.
568 void note_start_of_marking(bool during_initial_mark) {
569 init_top_at_conc_mark_count();
570 _next_marked_bytes = 0;
571 if (during_initial_mark && is_young() && !is_survivor())
572 _next_top_at_mark_start = bottom();
573 else
574 _next_top_at_mark_start = top();
575 }
577 // Note the end of a marking phase. Install the start of
578 // the unmarked area that was captured at start of marking.
579 void note_end_of_marking() {
580 _prev_top_at_mark_start = _next_top_at_mark_start;
581 _prev_marked_bytes = _next_marked_bytes;
582 _next_marked_bytes = 0;
584 guarantee(_prev_marked_bytes <=
585 (size_t) (prev_top_at_mark_start() - bottom()) * HeapWordSize,
586 "invariant");
587 }
589 // After an evacuation, we need to update _next_top_at_mark_start
590 // to be the current top. Note this is only valid if we have only
591 // ever evacuated into this region. If we evacuate, allocate, and
592 // then evacuate we are in deep doodoo.
593 void note_end_of_copying() {
594 assert(top() >= _next_top_at_mark_start, "Increase only");
595 _next_top_at_mark_start = top();
596 }
598 // Returns "false" iff no object in the region was allocated when the
599 // last mark phase ended.
600 bool is_marked() { return _prev_top_at_mark_start != bottom(); }
602 // If "is_marked()" is true, then this is the index of the region in
603 // an array constructed at the end of marking of the regions in a
604 // "desirability" order.
605 int sort_index() {
606 return _sort_index;
607 }
608 void set_sort_index(int i) {
609 _sort_index = i;
610 }
612 void init_top_at_conc_mark_count() {
613 _top_at_conc_mark_count = bottom();
614 }
616 void set_top_at_conc_mark_count(HeapWord *cur) {
617 assert(bottom() <= cur && cur <= end(), "Sanity.");
618 _top_at_conc_mark_count = cur;
619 }
621 HeapWord* top_at_conc_mark_count() {
622 return _top_at_conc_mark_count;
623 }
625 void reset_during_compaction() {
626 guarantee( isHumongous() && startsHumongous(),
627 "should only be called for humongous regions");
629 zero_marked_bytes();
630 init_top_at_mark_start();
631 }
633 // <PREDICTION>
634 void calc_gc_efficiency(void);
635 double gc_efficiency() { return _gc_efficiency;}
636 // </PREDICTION>
638 bool is_young() const { return _young_type != NotYoung; }
639 bool is_survivor() const { return _young_type == Survivor; }
641 int young_index_in_cset() const { return _young_index_in_cset; }
642 void set_young_index_in_cset(int index) {
643 assert( (index == -1) || is_young(), "pre-condition" );
644 _young_index_in_cset = index;
645 }
647 int age_in_surv_rate_group() {
648 assert( _surv_rate_group != NULL, "pre-condition" );
649 assert( _age_index > -1, "pre-condition" );
650 return _surv_rate_group->age_in_group(_age_index);
651 }
653 void record_surv_words_in_group(size_t words_survived) {
654 assert( _surv_rate_group != NULL, "pre-condition" );
655 assert( _age_index > -1, "pre-condition" );
656 int age_in_group = age_in_surv_rate_group();
657 _surv_rate_group->record_surviving_words(age_in_group, words_survived);
658 }
660 int age_in_surv_rate_group_cond() {
661 if (_surv_rate_group != NULL)
662 return age_in_surv_rate_group();
663 else
664 return -1;
665 }
667 SurvRateGroup* surv_rate_group() {
668 return _surv_rate_group;
669 }
671 void install_surv_rate_group(SurvRateGroup* surv_rate_group) {
672 assert( surv_rate_group != NULL, "pre-condition" );
673 assert( _surv_rate_group == NULL, "pre-condition" );
674 assert( is_young(), "pre-condition" );
676 _surv_rate_group = surv_rate_group;
677 _age_index = surv_rate_group->next_age_index();
678 }
680 void uninstall_surv_rate_group() {
681 if (_surv_rate_group != NULL) {
682 assert( _age_index > -1, "pre-condition" );
683 assert( is_young(), "pre-condition" );
685 _surv_rate_group = NULL;
686 _age_index = -1;
687 } else {
688 assert( _age_index == -1, "pre-condition" );
689 }
690 }
692 void set_young() { set_young_type(Young); }
694 void set_survivor() { set_young_type(Survivor); }
696 void set_not_young() { set_young_type(NotYoung); }
698 // Determine if an object has been allocated since the last
699 // mark performed by the collector. This returns true iff the object
700 // is within the unmarked area of the region.
701 bool obj_allocated_since_prev_marking(oop obj) const {
702 return (HeapWord *) obj >= prev_top_at_mark_start();
703 }
704 bool obj_allocated_since_next_marking(oop obj) const {
705 return (HeapWord *) obj >= next_top_at_mark_start();
706 }
708 // For parallel heapRegion traversal.
709 bool claimHeapRegion(int claimValue);
710 jint claim_value() { return _claimed; }
711 // Use this carefully: only when you're sure no one is claiming...
712 void set_claim_value(int claimValue) { _claimed = claimValue; }
714 // Returns the "evacuation_failed" property of the region.
715 bool evacuation_failed() { return _evacuation_failed; }
717 // Sets the "evacuation_failed" property of the region.
718 void set_evacuation_failed(bool b) {
719 _evacuation_failed = b;
721 if (b) {
722 init_top_at_conc_mark_count();
723 _next_marked_bytes = 0;
724 }
725 }
727 // Requires that "mr" be entirely within the region.
728 // Apply "cl->do_object" to all objects that intersect with "mr".
729 // If the iteration encounters an unparseable portion of the region,
730 // or if "cl->abort()" is true after a closure application,
731 // terminate the iteration and return the address of the start of the
732 // subregion that isn't done. (The two can be distinguished by querying
733 // "cl->abort()".) Return of "NULL" indicates that the iteration
734 // completed.
735 HeapWord*
736 object_iterate_mem_careful(MemRegion mr, ObjectClosure* cl);
738 // In this version - if filter_young is true and the region
739 // is a young region then we skip the iteration.
740 HeapWord*
741 oops_on_card_seq_iterate_careful(MemRegion mr,
742 FilterOutOfRegionClosure* cl,
743 bool filter_young);
745 // A version of block start that is guaranteed to find *some* block
746 // boundary at or before "p", but does not object iteration, and may
747 // therefore be used safely when the heap is unparseable.
748 HeapWord* block_start_careful(const void* p) const {
749 return _offsets.block_start_careful(p);
750 }
752 // Requires that "addr" is within the region. Returns the start of the
753 // first ("careful") block that starts at or after "addr", or else the
754 // "end" of the region if there is no such block.
755 HeapWord* next_block_start_careful(HeapWord* addr);
757 // Returns the zero-fill-state of the current region.
758 ZeroFillState zero_fill_state() { return (ZeroFillState)_zfs; }
759 bool zero_fill_is_allocated() { return _zfs == Allocated; }
760 Thread* zero_filler() { return _zero_filler; }
762 // Indicate that the contents of the region are unknown, and therefore
763 // might require zero-filling.
764 void set_zero_fill_needed() {
765 set_zero_fill_state_work(NotZeroFilled);
766 }
767 void set_zero_fill_in_progress(Thread* t) {
768 set_zero_fill_state_work(ZeroFilling);
769 _zero_filler = t;
770 }
771 void set_zero_fill_complete();
772 void set_zero_fill_allocated() {
773 set_zero_fill_state_work(Allocated);
774 }
776 void set_zero_fill_state_work(ZeroFillState zfs);
778 // This is called when a full collection shrinks the heap.
779 // We want to set the heap region to a value which says
780 // it is no longer part of the heap. For now, we'll let "NotZF" fill
781 // that role.
782 void reset_zero_fill() {
783 set_zero_fill_state_work(NotZeroFilled);
784 _zero_filler = NULL;
785 }
787 size_t recorded_rs_length() const { return _recorded_rs_length; }
788 double predicted_elapsed_time_ms() const { return _predicted_elapsed_time_ms; }
789 size_t predicted_bytes_to_copy() const { return _predicted_bytes_to_copy; }
791 void set_recorded_rs_length(size_t rs_length) {
792 _recorded_rs_length = rs_length;
793 }
795 void set_predicted_elapsed_time_ms(double ms) {
796 _predicted_elapsed_time_ms = ms;
797 }
799 void set_predicted_bytes_to_copy(size_t bytes) {
800 _predicted_bytes_to_copy = bytes;
801 }
803 #define HeapRegion_OOP_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \
804 virtual void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl);
805 SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(HeapRegion_OOP_SINCE_SAVE_MARKS_DECL)
807 CompactibleSpace* next_compaction_space() const;
809 virtual void reset_after_compaction();
811 void print() const;
812 void print_on(outputStream* st) const;
814 // use_prev_marking == true -> use "prev" marking information,
815 // use_prev_marking == false -> use "next" marking information
816 // NOTE: Only the "prev" marking information is guaranteed to be
817 // consistent most of the time, so most calls to this should use
818 // use_prev_marking == true. Currently, there is only one case where
819 // this is called with use_prev_marking == false, which is to verify
820 // the "next" marking information at the end of remark.
821 void verify(bool allow_dirty, bool use_prev_marking, bool *failures) const;
823 // Override; it uses the "prev" marking information
824 virtual void verify(bool allow_dirty) const;
826 #ifdef DEBUG
827 HeapWord* allocate(size_t size);
828 #endif
829 };
831 // HeapRegionClosure is used for iterating over regions.
832 // Terminates the iteration when the "doHeapRegion" method returns "true".
833 class HeapRegionClosure : public StackObj {
834 friend class HeapRegionSeq;
835 friend class G1CollectedHeap;
837 bool _complete;
838 void incomplete() { _complete = false; }
840 public:
841 HeapRegionClosure(): _complete(true) {}
843 // Typically called on each region until it returns true.
844 virtual bool doHeapRegion(HeapRegion* r) = 0;
846 // True after iteration if the closure was applied to all heap regions
847 // and returned "false" in all cases.
848 bool complete() { return _complete; }
849 };
851 // A linked lists of heap regions. It leaves the "next" field
852 // unspecified; that's up to subtypes.
853 class RegionList VALUE_OBJ_CLASS_SPEC {
854 protected:
855 virtual HeapRegion* get_next(HeapRegion* chr) = 0;
856 virtual void set_next(HeapRegion* chr,
857 HeapRegion* new_next) = 0;
859 HeapRegion* _hd;
860 HeapRegion* _tl;
861 size_t _sz;
863 // Protected constructor because this type is only meaningful
864 // when the _get/_set next functions are defined.
865 RegionList() : _hd(NULL), _tl(NULL), _sz(0) {}
866 public:
867 void reset() {
868 _hd = NULL;
869 _tl = NULL;
870 _sz = 0;
871 }
872 HeapRegion* hd() { return _hd; }
873 HeapRegion* tl() { return _tl; }
874 size_t sz() { return _sz; }
875 size_t length();
877 bool well_formed() {
878 return
879 ((hd() == NULL && tl() == NULL && sz() == 0)
880 || (hd() != NULL && tl() != NULL && sz() > 0))
881 && (sz() == length());
882 }
883 virtual void insert_before_head(HeapRegion* r);
884 void prepend_list(RegionList* new_list);
885 virtual HeapRegion* pop();
886 void dec_sz() { _sz--; }
887 // Requires that "r" is an element of the list, and is not the tail.
888 void delete_after(HeapRegion* r);
889 };
891 class EmptyNonHRegionList: public RegionList {
892 protected:
893 // Protected constructor because this type is only meaningful
894 // when the _get/_set next functions are defined.
895 EmptyNonHRegionList() : RegionList() {}
897 public:
898 void insert_before_head(HeapRegion* r) {
899 // assert(r->is_empty(), "Better be empty");
900 assert(!r->isHumongous(), "Better not be humongous.");
901 RegionList::insert_before_head(r);
902 }
903 void prepend_list(EmptyNonHRegionList* new_list) {
904 // assert(new_list->hd() == NULL || new_list->hd()->is_empty(),
905 // "Better be empty");
906 assert(new_list->hd() == NULL || !new_list->hd()->isHumongous(),
907 "Better not be humongous.");
908 // assert(new_list->tl() == NULL || new_list->tl()->is_empty(),
909 // "Better be empty");
910 assert(new_list->tl() == NULL || !new_list->tl()->isHumongous(),
911 "Better not be humongous.");
912 RegionList::prepend_list(new_list);
913 }
914 };
916 class UncleanRegionList: public EmptyNonHRegionList {
917 public:
918 HeapRegion* get_next(HeapRegion* hr) {
919 return hr->next_from_unclean_list();
920 }
921 void set_next(HeapRegion* hr, HeapRegion* new_next) {
922 hr->set_next_on_unclean_list(new_next);
923 }
925 UncleanRegionList() : EmptyNonHRegionList() {}
927 void insert_before_head(HeapRegion* r) {
928 assert(!r->is_on_free_list(),
929 "Better not already be on free list");
930 assert(!r->is_on_unclean_list(),
931 "Better not already be on unclean list");
932 r->set_zero_fill_needed();
933 r->set_on_unclean_list(true);
934 EmptyNonHRegionList::insert_before_head(r);
935 }
936 void prepend_list(UncleanRegionList* new_list) {
937 assert(new_list->tl() == NULL || !new_list->tl()->is_on_free_list(),
938 "Better not already be on free list");
939 assert(new_list->tl() == NULL || new_list->tl()->is_on_unclean_list(),
940 "Better already be marked as on unclean list");
941 assert(new_list->hd() == NULL || !new_list->hd()->is_on_free_list(),
942 "Better not already be on free list");
943 assert(new_list->hd() == NULL || new_list->hd()->is_on_unclean_list(),
944 "Better already be marked as on unclean list");
945 EmptyNonHRegionList::prepend_list(new_list);
946 }
947 HeapRegion* pop() {
948 HeapRegion* res = RegionList::pop();
949 if (res != NULL) res->set_on_unclean_list(false);
950 return res;
951 }
952 };
954 // Local Variables: ***
955 // c-indentation-style: gnu ***
956 // End: ***
958 #endif // SERIALGC
960 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_HPP