ysr@777: /*
ysr@777: * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved.
ysr@777: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
ysr@777: *
ysr@777: * This code is free software; you can redistribute it and/or modify it
ysr@777: * under the terms of the GNU General Public License version 2 only, as
ysr@777: * published by the Free Software Foundation.
ysr@777: *
ysr@777: * This code is distributed in the hope that it will be useful, but WITHOUT
ysr@777: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
ysr@777: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
ysr@777: * version 2 for more details (a copy is included in the LICENSE file that
ysr@777: * accompanied this code).
ysr@777: *
ysr@777: * You should have received a copy of the GNU General Public License version
ysr@777: * 2 along with this work; if not, write to the Free Software Foundation,
ysr@777: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
ysr@777: *
ysr@777: * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
ysr@777: * CA 95054 USA or visit www.sun.com if you need additional information or
ysr@777: * have any questions.
ysr@777: *
ysr@777: */
ysr@777:
ysr@777: #ifndef SERIALGC
ysr@777:
ysr@777: // A HeapRegion is the smallest piece of a G1CollectedHeap that
ysr@777: // can be collected independently.
ysr@777:
ysr@777: // NOTE: Although a HeapRegion is a Space, its
ysr@777: // Space::initDirtyCardClosure method must not be called.
ysr@777: // The problem is that the existence of this method breaks
ysr@777: // the independence of barrier sets from remembered sets.
ysr@777: // The solution is to remove this method from the definition
ysr@777: // of a Space.
ysr@777:
ysr@777: class CompactibleSpace;
ysr@777: class ContiguousSpace;
ysr@777: class HeapRegionRemSet;
ysr@777: class HeapRegionRemSetIterator;
ysr@777: class HeapRegion;
ysr@777:
ysr@777: // A dirty card to oop closure for heap regions. It
ysr@777: // knows how to get the G1 heap and how to use the bitmap
ysr@777: // in the concurrent marker used by G1 to filter remembered
ysr@777: // sets.
ysr@777:
ysr@777: class HeapRegionDCTOC : public ContiguousSpaceDCTOC {
ysr@777: public:
ysr@777: // Specification of possible DirtyCardToOopClosure filtering.
ysr@777: enum FilterKind {
ysr@777: NoFilterKind,
ysr@777: IntoCSFilterKind,
ysr@777: OutOfRegionFilterKind
ysr@777: };
ysr@777:
ysr@777: protected:
ysr@777: HeapRegion* _hr;
ysr@777: FilterKind _fk;
ysr@777: G1CollectedHeap* _g1;
ysr@777:
ysr@777: void walk_mem_region_with_cl(MemRegion mr,
ysr@777: HeapWord* bottom, HeapWord* top,
ysr@777: OopClosure* cl);
ysr@777:
ysr@777: // We don't specialize this for FilteringClosure; filtering is handled by
ysr@777: // the "FilterKind" mechanism. But we provide this to avoid a compiler
ysr@777: // warning.
ysr@777: void walk_mem_region_with_cl(MemRegion mr,
ysr@777: HeapWord* bottom, HeapWord* top,
ysr@777: FilteringClosure* cl) {
ysr@777: HeapRegionDCTOC::walk_mem_region_with_cl(mr, bottom, top,
ysr@777: (OopClosure*)cl);
ysr@777: }
ysr@777:
ysr@777: // Get the actual top of the area on which the closure will
ysr@777: // operate, given where the top is assumed to be (the end of the
ysr@777: // memory region passed to do_MemRegion) and where the object
ysr@777: // at the top is assumed to start. For example, an object may
ysr@777: // start at the top but actually extend past the assumed top,
ysr@777: // in which case the top becomes the end of the object.
ysr@777: HeapWord* get_actual_top(HeapWord* top, HeapWord* top_obj) {
ysr@777: return ContiguousSpaceDCTOC::get_actual_top(top, top_obj);
ysr@777: }
ysr@777:
ysr@777: // Walk the given memory region from bottom to (actual) top
ysr@777: // looking for objects and applying the oop closure (_cl) to
ysr@777: // them. The base implementation of this treats the area as
ysr@777: // blocks, where a block may or may not be an object. Sub-
ysr@777: // classes should override this to provide more accurate
ysr@777: // or possibly more efficient walking.
ysr@777: void walk_mem_region(MemRegion mr, HeapWord* bottom, HeapWord* top) {
ysr@777: Filtering_DCTOC::walk_mem_region(mr, bottom, top);
ysr@777: }
ysr@777:
ysr@777: public:
ysr@777: HeapRegionDCTOC(G1CollectedHeap* g1,
ysr@777: HeapRegion* hr, OopClosure* cl,
ysr@777: CardTableModRefBS::PrecisionStyle precision,
ysr@777: FilterKind fk);
ysr@777: };
ysr@777:
ysr@777:
ysr@777: // The complicating factor is that BlockOffsetTable diverged
ysr@777: // significantly, and we need functionality that is only in the G1 version.
ysr@777: // So I copied that code, which led to an alternate G1 version of
ysr@777: // OffsetTableContigSpace. If the two versions of BlockOffsetTable could
ysr@777: // be reconciled, then G1OffsetTableContigSpace could go away.
ysr@777:
ysr@777: // The idea behind time stamps is the following. Doing a save_marks on
ysr@777: // all regions at every GC pause is time consuming (if I remember
ysr@777: // well, 10ms or so). So, we would like to do that only for regions
ysr@777: // that are GC alloc regions. To achieve this, we use time
ysr@777: // stamps. For every evacuation pause, G1CollectedHeap generates a
ysr@777: // unique time stamp (essentially a counter that gets
ysr@777: // incremented). Every time we want to call save_marks on a region,
ysr@777: // we set the saved_mark_word to top and also copy the current GC
ysr@777: // time stamp to the time stamp field of the space. Reading the
ysr@777: // saved_mark_word involves checking the time stamp of the
ysr@777: // region. If it is the same as the current GC time stamp, then we
ysr@777: // can safely read the saved_mark_word field, as it is valid. If the
ysr@777: // time stamp of the region is not the same as the current GC time
ysr@777: // stamp, then we instead read top, as the saved_mark_word field is
ysr@777: // invalid. Time stamps (on the regions and also on the
ysr@777: // G1CollectedHeap) are reset at every cleanup (we iterate over
ysr@777: // the regions anyway) and at the end of a Full GC. The current scheme
ysr@777: // that uses sequential unsigned ints will fail only if we have 4b
ysr@777: // evacuation pauses between two cleanups, which is _highly_ unlikely.
ysr@777:
ysr@777: class G1OffsetTableContigSpace: public ContiguousSpace {
ysr@777: friend class VMStructs;
ysr@777: protected:
ysr@777: G1BlockOffsetArrayContigSpace _offsets;
ysr@777: Mutex _par_alloc_lock;
ysr@777: volatile unsigned _gc_time_stamp;
ysr@777:
ysr@777: public:
ysr@777: // Constructor. If "is_zeroed" is true, the MemRegion "mr" may be
ysr@777: // assumed to contain zeros.
ysr@777: G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray,
ysr@777: MemRegion mr, bool is_zeroed = false);
ysr@777:
ysr@777: void set_bottom(HeapWord* value);
ysr@777: void set_end(HeapWord* value);
ysr@777:
ysr@777: virtual HeapWord* saved_mark_word() const;
ysr@777: virtual void set_saved_mark();
ysr@777: void reset_gc_time_stamp() { _gc_time_stamp = 0; }
ysr@777:
ysr@777: virtual void initialize(MemRegion mr, bool clear_space);
ysr@777: virtual void clear();
ysr@777:
ysr@777: HeapWord* block_start(const void* p);
ysr@777: HeapWord* block_start_const(const void* p) const;
ysr@777:
ysr@777: // Add offset table update.
ysr@777: virtual HeapWord* allocate(size_t word_size);
ysr@777: HeapWord* par_allocate(size_t word_size);
ysr@777:
ysr@777: // MarkSweep support phase3
ysr@777: virtual HeapWord* initialize_threshold();
ysr@777: virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* end);
ysr@777:
ysr@777: virtual void print() const;
ysr@777: };
ysr@777:
ysr@777: class HeapRegion: public G1OffsetTableContigSpace {
ysr@777: friend class VMStructs;
ysr@777: private:
ysr@777:
ysr@777: // The next filter kind that should be used for a "new_dcto_cl" call with
ysr@777: // the "traditional" signature.
ysr@777: HeapRegionDCTOC::FilterKind _next_fk;
ysr@777:
ysr@777: // Requires that the region "mr" be dense with objects, and begin and end
ysr@777: // with an object.
ysr@777: void oops_in_mr_iterate(MemRegion mr, OopClosure* cl);
ysr@777:
ysr@777: // The remembered set for this region.
ysr@777: // (Might want to make this "inline" later, to avoid some alloc failure
ysr@777: // issues.)
ysr@777: HeapRegionRemSet* _rem_set;
ysr@777:
ysr@777: G1BlockOffsetArrayContigSpace* offsets() { return &_offsets; }
ysr@777:
ysr@777: protected:
ysr@777: // If this region is a member of a HeapRegionSeq, the index in that
ysr@777: // sequence, otherwise -1.
ysr@777: int _hrs_index;
ysr@777:
ysr@777: bool _humongous; // starts or continues a humongous object
ysr@777: bool _humongous_start; // starts a humongous object
ysr@777: // For a humongous region, region in which it starts.
ysr@777: HeapRegion* _humongous_start_region;
ysr@777: // For the start region of a humongous sequence, it's original end().
ysr@777: HeapWord* _orig_end;
ysr@777:
ysr@777: // True iff the region is in current collection_set.
ysr@777: bool _in_collection_set;
ysr@777:
ysr@777: // True iff the region is on the unclean list, waiting to be zero filled.
ysr@777: bool _is_on_unclean_list;
ysr@777:
ysr@777: // True iff the region is on the free list, ready for allocation.
ysr@777: bool _is_on_free_list;
ysr@777:
ysr@777: // Is this or has it been an allocation region in the current collection
ysr@777: // pause.
ysr@777: bool _is_gc_alloc_region;
ysr@777:
ysr@777: // True iff an attempt to evacuate an object in the region failed.
ysr@777: bool _evacuation_failed;
ysr@777:
ysr@777: // A heap region may be a member one of a number of special subsets, each
ysr@777: // represented as linked lists through the field below. Currently, these
ysr@777: // sets include:
ysr@777: // The collection set.
ysr@777: // The set of allocation regions used in a collection pause.
ysr@777: // Spaces that may contain gray objects.
ysr@777: HeapRegion* _next_in_special_set;
ysr@777:
ysr@777: // next region in the young "generation" region set
ysr@777: HeapRegion* _next_young_region;
ysr@777:
ysr@777: // For parallel heapRegion traversal.
ysr@777: jint _claimed;
ysr@777:
ysr@777: // We use concurrent marking to determine the amount of live data
ysr@777: // in each heap region.
ysr@777: size_t _prev_marked_bytes; // Bytes known to be live via last completed marking.
ysr@777: size_t _next_marked_bytes; // Bytes known to be live via in-progress marking.
ysr@777:
ysr@777: // See "sort_index" method. -1 means is not in the array.
ysr@777: int _sort_index;
ysr@777:
ysr@777: // Means it has (or at least had) a very large RS, and should not be
ysr@777: // considered for membership in a collection set.
ysr@777: enum PopularityState {
ysr@777: NotPopular,
ysr@777: PopularPending,
ysr@777: Popular
ysr@777: };
ysr@777: PopularityState _popularity;
ysr@777:
ysr@777: //
ysr@777: double _gc_efficiency;
ysr@777: //
ysr@777:
ysr@777: enum YoungType {
ysr@777: NotYoung, // a region is not young
ysr@777: ScanOnly, // a region is young and scan-only
ysr@777: Young, // a region is young
ysr@777: Survivor // a region is young and it contains
ysr@777: // survivor
ysr@777: };
ysr@777:
ysr@777: YoungType _young_type;
ysr@777: int _young_index_in_cset;
ysr@777: SurvRateGroup* _surv_rate_group;
ysr@777: int _age_index;
ysr@777:
ysr@777: // The start of the unmarked area. The unmarked area extends from this
ysr@777: // word until the top and/or end of the region, and is the part
ysr@777: // of the region for which no marking was done, i.e. objects may
ysr@777: // have been allocated in this part since the last mark phase.
ysr@777: // "prev" is the top at the start of the last completed marking.
ysr@777: // "next" is the top at the start of the in-progress marking (if any.)
ysr@777: HeapWord* _prev_top_at_mark_start;
ysr@777: HeapWord* _next_top_at_mark_start;
ysr@777: // If a collection pause is in progress, this is the top at the start
ysr@777: // of that pause.
ysr@777:
ysr@777: // We've counted the marked bytes of objects below here.
ysr@777: HeapWord* _top_at_conc_mark_count;
ysr@777:
ysr@777: void init_top_at_mark_start() {
ysr@777: assert(_prev_marked_bytes == 0 &&
ysr@777: _next_marked_bytes == 0,
ysr@777: "Must be called after zero_marked_bytes.");
ysr@777: HeapWord* bot = bottom();
ysr@777: _prev_top_at_mark_start = bot;
ysr@777: _next_top_at_mark_start = bot;
ysr@777: _top_at_conc_mark_count = bot;
ysr@777: }
ysr@777:
ysr@777: jint _zfs; // A member of ZeroFillState. Protected by ZF_lock.
ysr@777: Thread* _zero_filler; // If _zfs is ZeroFilling, the thread that (last)
ysr@777: // made it so.
ysr@777:
ysr@777: void set_young_type(YoungType new_type) {
ysr@777: //assert(_young_type != new_type, "setting the same type" );
ysr@777: // TODO: add more assertions here
ysr@777: _young_type = new_type;
ysr@777: }
ysr@777:
ysr@777: public:
ysr@777: // If "is_zeroed" is "true", the region "mr" can be assumed to contain zeros.
ysr@777: HeapRegion(G1BlockOffsetSharedArray* sharedOffsetArray,
ysr@777: MemRegion mr, bool is_zeroed);
ysr@777:
ysr@777: enum SomePublicConstants {
ysr@777: // HeapRegions are GrainBytes-aligned
ysr@777: // and have sizes that are multiples of GrainBytes.
ysr@777: LogOfHRGrainBytes = 20,
ysr@777: LogOfHRGrainWords = LogOfHRGrainBytes - LogHeapWordSize,
ysr@777: GrainBytes = 1 << LogOfHRGrainBytes,
ysr@777: GrainWords = 1 <= marked_bytes(),
ysr@777: "Can't mark more than we have.");
ysr@777: return used_at_mark_start_bytes - marked_bytes();
ysr@777: }
ysr@777:
ysr@777: // An upper bound on the number of live bytes in the region.
ysr@777: size_t max_live_bytes() { return used() - garbage_bytes(); }
ysr@777:
ysr@777: void add_to_marked_bytes(size_t incr_bytes) {
ysr@777: _next_marked_bytes = _next_marked_bytes + incr_bytes;
ysr@777: guarantee( _next_marked_bytes <= used(), "invariant" );
ysr@777: }
ysr@777:
ysr@777: void zero_marked_bytes() {
ysr@777: _prev_marked_bytes = _next_marked_bytes = 0;
ysr@777: }
ysr@777:
ysr@777: bool isHumongous() const { return _humongous; }
ysr@777: bool startsHumongous() const { return _humongous_start; }
ysr@777: bool continuesHumongous() const { return _humongous && ! _humongous_start; }
ysr@777: // For a humongous region, region in which it starts.
ysr@777: HeapRegion* humongous_start_region() const {
ysr@777: return _humongous_start_region;
ysr@777: }
ysr@777:
ysr@777: // Causes the current region to represent a humongous object spanning "n"
ysr@777: // regions.
ysr@777: virtual void set_startsHumongous();
ysr@777:
ysr@777: // The regions that continue a humongous sequence should be added using
ysr@777: // this method, in increasing address order.
ysr@777: void set_continuesHumongous(HeapRegion* start);
ysr@777:
ysr@777: void add_continuingHumongousRegion(HeapRegion* cont);
ysr@777:
ysr@777: // If the region has a remembered set, return a pointer to it.
ysr@777: HeapRegionRemSet* rem_set() const {
ysr@777: return _rem_set;
ysr@777: }
ysr@777:
ysr@777: // True iff the region is in current collection_set.
ysr@777: bool in_collection_set() const {
ysr@777: return _in_collection_set;
ysr@777: }
ysr@777: void set_in_collection_set(bool b) {
ysr@777: _in_collection_set = b;
ysr@777: }
ysr@777: HeapRegion* next_in_collection_set() {
ysr@777: assert(in_collection_set(), "should only invoke on member of CS.");
ysr@777: assert(_next_in_special_set == NULL ||
ysr@777: _next_in_special_set->in_collection_set(),
ysr@777: "Malformed CS.");
ysr@777: return _next_in_special_set;
ysr@777: }
ysr@777: void set_next_in_collection_set(HeapRegion* r) {
ysr@777: assert(in_collection_set(), "should only invoke on member of CS.");
ysr@777: assert(r == NULL || r->in_collection_set(), "Malformed CS.");
ysr@777: _next_in_special_set = r;
ysr@777: }
ysr@777:
ysr@777: // True iff it is or has been an allocation region in the current
ysr@777: // collection pause.
ysr@777: bool is_gc_alloc_region() const {
ysr@777: return _is_gc_alloc_region;
ysr@777: }
ysr@777: void set_is_gc_alloc_region(bool b) {
ysr@777: _is_gc_alloc_region = b;
ysr@777: }
ysr@777: HeapRegion* next_gc_alloc_region() {
ysr@777: assert(is_gc_alloc_region(), "should only invoke on member of CS.");
ysr@777: assert(_next_in_special_set == NULL ||
ysr@777: _next_in_special_set->is_gc_alloc_region(),
ysr@777: "Malformed CS.");
ysr@777: return _next_in_special_set;
ysr@777: }
ysr@777: void set_next_gc_alloc_region(HeapRegion* r) {
ysr@777: assert(is_gc_alloc_region(), "should only invoke on member of CS.");
ysr@777: assert(r == NULL || r->is_gc_alloc_region(), "Malformed CS.");
ysr@777: _next_in_special_set = r;
ysr@777: }
ysr@777:
ysr@777: bool is_reserved() {
ysr@777: return popular();
ysr@777: }
ysr@777:
ysr@777: bool is_on_free_list() {
ysr@777: return _is_on_free_list;
ysr@777: }
ysr@777:
ysr@777: void set_on_free_list(bool b) {
ysr@777: _is_on_free_list = b;
ysr@777: }
ysr@777:
ysr@777: HeapRegion* next_from_free_list() {
ysr@777: assert(is_on_free_list(),
ysr@777: "Should only invoke on free space.");
ysr@777: assert(_next_in_special_set == NULL ||
ysr@777: _next_in_special_set->is_on_free_list(),
ysr@777: "Malformed Free List.");
ysr@777: return _next_in_special_set;
ysr@777: }
ysr@777:
ysr@777: void set_next_on_free_list(HeapRegion* r) {
ysr@777: assert(r == NULL || r->is_on_free_list(), "Malformed free list.");
ysr@777: _next_in_special_set = r;
ysr@777: }
ysr@777:
ysr@777: bool is_on_unclean_list() {
ysr@777: return _is_on_unclean_list;
ysr@777: }
ysr@777:
ysr@777: void set_on_unclean_list(bool b);
ysr@777:
ysr@777: HeapRegion* next_from_unclean_list() {
ysr@777: assert(is_on_unclean_list(),
ysr@777: "Should only invoke on unclean space.");
ysr@777: assert(_next_in_special_set == NULL ||
ysr@777: _next_in_special_set->is_on_unclean_list(),
ysr@777: "Malformed unclean List.");
ysr@777: return _next_in_special_set;
ysr@777: }
ysr@777:
ysr@777: void set_next_on_unclean_list(HeapRegion* r);
ysr@777:
ysr@777: HeapRegion* get_next_young_region() { return _next_young_region; }
ysr@777: void set_next_young_region(HeapRegion* hr) {
ysr@777: _next_young_region = hr;
ysr@777: }
ysr@777:
ysr@777: // Allows logical separation between objects allocated before and after.
ysr@777: void save_marks();
ysr@777:
ysr@777: // Reset HR stuff to default values.
ysr@777: void hr_clear(bool par, bool clear_space);
ysr@777:
ysr@777: void initialize(MemRegion mr, bool clear_space);
ysr@777:
ysr@777: // Ensure that "this" is zero-filled.
ysr@777: void ensure_zero_filled();
ysr@777: // This one requires that the calling thread holds ZF_mon.
ysr@777: void ensure_zero_filled_locked();
ysr@777:
ysr@777: // Get the start of the unmarked area in this region.
ysr@777: HeapWord* prev_top_at_mark_start() const { return _prev_top_at_mark_start; }
ysr@777: HeapWord* next_top_at_mark_start() const { return _next_top_at_mark_start; }
ysr@777:
ysr@777: // Apply "cl->do_oop" to (the addresses of) all reference fields in objects
ysr@777: // allocated in the current region before the last call to "save_mark".
ysr@777: void oop_before_save_marks_iterate(OopClosure* cl);
ysr@777:
ysr@777: // This call determines the "filter kind" argument that will be used for
ysr@777: // the next call to "new_dcto_cl" on this region with the "traditional"
ysr@777: // signature (i.e., the call below.) The default, in the absence of a
ysr@777: // preceding call to this method, is "NoFilterKind", and a call to this
ysr@777: // method is necessary for each such call, or else it reverts to the
ysr@777: // default.
ysr@777: // (This is really ugly, but all other methods I could think of changed a
ysr@777: // lot of main-line code for G1.)
ysr@777: void set_next_filter_kind(HeapRegionDCTOC::FilterKind nfk) {
ysr@777: _next_fk = nfk;
ysr@777: }
ysr@777:
ysr@777: DirtyCardToOopClosure*
ysr@777: new_dcto_closure(OopClosure* cl,
ysr@777: CardTableModRefBS::PrecisionStyle precision,
ysr@777: HeapRegionDCTOC::FilterKind fk);
ysr@777:
ysr@777: #if WHASSUP
ysr@777: DirtyCardToOopClosure*
ysr@777: new_dcto_closure(OopClosure* cl,
ysr@777: CardTableModRefBS::PrecisionStyle precision,
ysr@777: HeapWord* boundary) {
ysr@777: assert(boundary == NULL, "This arg doesn't make sense here.");
ysr@777: DirtyCardToOopClosure* res = new_dcto_closure(cl, precision, _next_fk);
ysr@777: _next_fk = HeapRegionDCTOC::NoFilterKind;
ysr@777: return res;
ysr@777: }
ysr@777: #endif
ysr@777:
ysr@777: //
ysr@777: // Note the start or end of marking. This tells the heap region
ysr@777: // that the collector is about to start or has finished (concurrently)
ysr@777: // marking the heap.
ysr@777: //
ysr@777:
ysr@777: // Note the start of a marking phase. Record the
ysr@777: // start of the unmarked area of the region here.
ysr@777: void note_start_of_marking(bool during_initial_mark) {
ysr@777: init_top_at_conc_mark_count();
ysr@777: _next_marked_bytes = 0;
ysr@777: if (during_initial_mark && is_young() && !is_survivor())
ysr@777: _next_top_at_mark_start = bottom();
ysr@777: else
ysr@777: _next_top_at_mark_start = top();
ysr@777: }
ysr@777:
ysr@777: // Note the end of a marking phase. Install the start of
ysr@777: // the unmarked area that was captured at start of marking.
ysr@777: void note_end_of_marking() {
ysr@777: _prev_top_at_mark_start = _next_top_at_mark_start;
ysr@777: _prev_marked_bytes = _next_marked_bytes;
ysr@777: _next_marked_bytes = 0;
ysr@777:
ysr@777: guarantee(_prev_marked_bytes <=
ysr@777: (size_t) (prev_top_at_mark_start() - bottom()) * HeapWordSize,
ysr@777: "invariant");
ysr@777: }
ysr@777:
ysr@777: // After an evacuation, we need to update _next_top_at_mark_start
ysr@777: // to be the current top. Note this is only valid if we have only
ysr@777: // ever evacuated into this region. If we evacuate, allocate, and
ysr@777: // then evacuate we are in deep doodoo.
ysr@777: void note_end_of_copying() {
ysr@777: assert(top() >= _next_top_at_mark_start,
ysr@777: "Increase only");
ysr@777: _next_top_at_mark_start = top();
ysr@777: }
ysr@777:
ysr@777: // Returns "false" iff no object in the region was allocated when the
ysr@777: // last mark phase ended.
ysr@777: bool is_marked() { return _prev_top_at_mark_start != bottom(); }
ysr@777:
ysr@777: // If "is_marked()" is true, then this is the index of the region in
ysr@777: // an array constructed at the end of marking of the regions in a
ysr@777: // "desirability" order.
ysr@777: int sort_index() {
ysr@777: return _sort_index;
ysr@777: }
ysr@777: void set_sort_index(int i) {
ysr@777: _sort_index = i;
ysr@777: }
ysr@777:
ysr@777: void init_top_at_conc_mark_count() {
ysr@777: _top_at_conc_mark_count = bottom();
ysr@777: }
ysr@777:
ysr@777: void set_top_at_conc_mark_count(HeapWord *cur) {
ysr@777: assert(bottom() <= cur && cur <= end(), "Sanity.");
ysr@777: _top_at_conc_mark_count = cur;
ysr@777: }
ysr@777:
ysr@777: HeapWord* top_at_conc_mark_count() {
ysr@777: return _top_at_conc_mark_count;
ysr@777: }
ysr@777:
ysr@777: void reset_during_compaction() {
ysr@777: guarantee( isHumongous() && startsHumongous(),
ysr@777: "should only be called for humongous regions");
ysr@777:
ysr@777: zero_marked_bytes();
ysr@777: init_top_at_mark_start();
ysr@777: }
ysr@777:
ysr@777: bool popular() { return _popularity == Popular; }
ysr@777: void set_popular(bool b) {
ysr@777: if (b) {
ysr@777: _popularity = Popular;
ysr@777: } else {
ysr@777: _popularity = NotPopular;
ysr@777: }
ysr@777: }
ysr@777: bool popular_pending() { return _popularity == PopularPending; }
ysr@777: void set_popular_pending(bool b) {
ysr@777: if (b) {
ysr@777: _popularity = PopularPending;
ysr@777: } else {
ysr@777: _popularity = NotPopular;
ysr@777: }
ysr@777: }
ysr@777:
ysr@777: //
ysr@777: void calc_gc_efficiency(void);
ysr@777: double gc_efficiency() { return _gc_efficiency;}
ysr@777: //
ysr@777:
ysr@777: bool is_young() const { return _young_type != NotYoung; }
ysr@777: bool is_scan_only() const { return _young_type == ScanOnly; }
ysr@777: bool is_survivor() const { return _young_type == Survivor; }
ysr@777:
ysr@777: int young_index_in_cset() const { return _young_index_in_cset; }
ysr@777: void set_young_index_in_cset(int index) {
ysr@777: assert( (index == -1) || is_young(), "pre-condition" );
ysr@777: _young_index_in_cset = index;
ysr@777: }
ysr@777:
ysr@777: int age_in_surv_rate_group() {
ysr@777: assert( _surv_rate_group != NULL, "pre-condition" );
ysr@777: assert( _age_index > -1, "pre-condition" );
ysr@777: return _surv_rate_group->age_in_group(_age_index);
ysr@777: }
ysr@777:
ysr@777: void recalculate_age_in_surv_rate_group() {
ysr@777: assert( _surv_rate_group != NULL, "pre-condition" );
ysr@777: assert( _age_index > -1, "pre-condition" );
ysr@777: _age_index = _surv_rate_group->recalculate_age_index(_age_index);
ysr@777: }
ysr@777:
ysr@777: void record_surv_words_in_group(size_t words_survived) {
ysr@777: assert( _surv_rate_group != NULL, "pre-condition" );
ysr@777: assert( _age_index > -1, "pre-condition" );
ysr@777: int age_in_group = age_in_surv_rate_group();
ysr@777: _surv_rate_group->record_surviving_words(age_in_group, words_survived);
ysr@777: }
ysr@777:
ysr@777: int age_in_surv_rate_group_cond() {
ysr@777: if (_surv_rate_group != NULL)
ysr@777: return age_in_surv_rate_group();
ysr@777: else
ysr@777: return -1;
ysr@777: }
ysr@777:
ysr@777: SurvRateGroup* surv_rate_group() {
ysr@777: return _surv_rate_group;
ysr@777: }
ysr@777:
ysr@777: void install_surv_rate_group(SurvRateGroup* surv_rate_group) {
ysr@777: assert( surv_rate_group != NULL, "pre-condition" );
ysr@777: assert( _surv_rate_group == NULL, "pre-condition" );
ysr@777: assert( is_young(), "pre-condition" );
ysr@777:
ysr@777: _surv_rate_group = surv_rate_group;
ysr@777: _age_index = surv_rate_group->next_age_index();
ysr@777: }
ysr@777:
ysr@777: void uninstall_surv_rate_group() {
ysr@777: if (_surv_rate_group != NULL) {
ysr@777: assert( _age_index > -1, "pre-condition" );
ysr@777: assert( is_young(), "pre-condition" );
ysr@777:
ysr@777: _surv_rate_group = NULL;
ysr@777: _age_index = -1;
ysr@777: } else {
ysr@777: assert( _age_index == -1, "pre-condition" );
ysr@777: }
ysr@777: }
ysr@777:
ysr@777: void set_young() { set_young_type(Young); }
ysr@777:
ysr@777: void set_scan_only() { set_young_type(ScanOnly); }
ysr@777:
ysr@777: void set_survivor() { set_young_type(Survivor); }
ysr@777:
ysr@777: void set_not_young() { set_young_type(NotYoung); }
ysr@777:
ysr@777: // Determine if an object has been allocated since the last
ysr@777: // mark performed by the collector. This returns true iff the object
ysr@777: // is within the unmarked area of the region.
ysr@777: bool obj_allocated_since_prev_marking(oop obj) const {
ysr@777: return (HeapWord *) obj >= prev_top_at_mark_start();
ysr@777: }
ysr@777: bool obj_allocated_since_next_marking(oop obj) const {
ysr@777: return (HeapWord *) obj >= next_top_at_mark_start();
ysr@777: }
ysr@777:
ysr@777: // For parallel heapRegion traversal.
ysr@777: bool claimHeapRegion(int claimValue);
ysr@777: jint claim_value() { return _claimed; }
ysr@777: // Use this carefully: only when you're sure no one is claiming...
ysr@777: void set_claim_value(int claimValue) { _claimed = claimValue; }
ysr@777:
ysr@777: // Returns the "evacuation_failed" property of the region.
ysr@777: bool evacuation_failed() { return _evacuation_failed; }
ysr@777:
ysr@777: // Sets the "evacuation_failed" property of the region.
ysr@777: void set_evacuation_failed(bool b) {
ysr@777: _evacuation_failed = b;
ysr@777:
ysr@777: if (b) {
ysr@777: init_top_at_conc_mark_count();
ysr@777: _next_marked_bytes = 0;
ysr@777: }
ysr@777: }
ysr@777:
ysr@777: // Requires that "mr" be entirely within the region.
ysr@777: // Apply "cl->do_object" to all objects that intersect with "mr".
ysr@777: // If the iteration encounters an unparseable portion of the region,
ysr@777: // or if "cl->abort()" is true after a closure application,
ysr@777: // terminate the iteration and return the address of the start of the
ysr@777: // subregion that isn't done. (The two can be distinguished by querying
ysr@777: // "cl->abort()".) Return of "NULL" indicates that the iteration
ysr@777: // completed.
ysr@777: HeapWord*
ysr@777: object_iterate_mem_careful(MemRegion mr, ObjectClosure* cl);
ysr@777:
ysr@777: HeapWord*
ysr@777: oops_on_card_seq_iterate_careful(MemRegion mr,
ysr@777: FilterOutOfRegionClosure* cl);
ysr@777:
ysr@777: // The region "mr" is entirely in "this", and starts and ends at block
ysr@777: // boundaries. The caller declares that all the contained blocks are
ysr@777: // coalesced into one.
ysr@777: void declare_filled_region_to_BOT(MemRegion mr) {
ysr@777: _offsets.single_block(mr.start(), mr.end());
ysr@777: }
ysr@777:
ysr@777: // A version of block start that is guaranteed to find *some* block
ysr@777: // boundary at or before "p", but does not object iteration, and may
ysr@777: // therefore be used safely when the heap is unparseable.
ysr@777: HeapWord* block_start_careful(const void* p) const {
ysr@777: return _offsets.block_start_careful(p);
ysr@777: }
ysr@777:
ysr@777: // Requires that "addr" is within the region. Returns the start of the
ysr@777: // first ("careful") block that starts at or after "addr", or else the
ysr@777: // "end" of the region if there is no such block.
ysr@777: HeapWord* next_block_start_careful(HeapWord* addr);
ysr@777:
ysr@777: // Returns the zero-fill-state of the current region.
ysr@777: ZeroFillState zero_fill_state() { return (ZeroFillState)_zfs; }
ysr@777: bool zero_fill_is_allocated() { return _zfs == Allocated; }
ysr@777: Thread* zero_filler() { return _zero_filler; }
ysr@777:
ysr@777: // Indicate that the contents of the region are unknown, and therefore
ysr@777: // might require zero-filling.
ysr@777: void set_zero_fill_needed() {
ysr@777: set_zero_fill_state_work(NotZeroFilled);
ysr@777: }
ysr@777: void set_zero_fill_in_progress(Thread* t) {
ysr@777: set_zero_fill_state_work(ZeroFilling);
ysr@777: _zero_filler = t;
ysr@777: }
ysr@777: void set_zero_fill_complete();
ysr@777: void set_zero_fill_allocated() {
ysr@777: set_zero_fill_state_work(Allocated);
ysr@777: }
ysr@777:
ysr@777: void set_zero_fill_state_work(ZeroFillState zfs);
ysr@777:
ysr@777: // This is called when a full collection shrinks the heap.
ysr@777: // We want to set the heap region to a value which says
ysr@777: // it is no longer part of the heap. For now, we'll let "NotZF" fill
ysr@777: // that role.
ysr@777: void reset_zero_fill() {
ysr@777: set_zero_fill_state_work(NotZeroFilled);
ysr@777: _zero_filler = NULL;
ysr@777: }
ysr@777:
ysr@777: #define HeapRegion_OOP_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \
ysr@777: virtual void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl);
ysr@777: SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(HeapRegion_OOP_SINCE_SAVE_MARKS_DECL)
ysr@777:
ysr@777: CompactibleSpace* next_compaction_space() const;
ysr@777:
ysr@777: virtual void reset_after_compaction();
ysr@777:
ysr@777: void print() const;
ysr@777: void print_on(outputStream* st) const;
ysr@777:
ysr@777: // Override
ysr@777: virtual void verify(bool allow_dirty) const;
ysr@777:
ysr@777: #ifdef DEBUG
ysr@777: HeapWord* allocate(size_t size);
ysr@777: #endif
ysr@777: };
ysr@777:
ysr@777: // HeapRegionClosure is used for iterating over regions.
ysr@777: // Terminates the iteration when the "doHeapRegion" method returns "true".
ysr@777: class HeapRegionClosure : public StackObj {
ysr@777: friend class HeapRegionSeq;
ysr@777: friend class G1CollectedHeap;
ysr@777:
ysr@777: bool _complete;
ysr@777: void incomplete() { _complete = false; }
ysr@777:
ysr@777: public:
ysr@777: HeapRegionClosure(): _complete(true) {}
ysr@777:
ysr@777: // Typically called on each region until it returns true.
ysr@777: virtual bool doHeapRegion(HeapRegion* r) = 0;
ysr@777:
ysr@777: // True after iteration if the closure was applied to all heap regions
ysr@777: // and returned "false" in all cases.
ysr@777: bool complete() { return _complete; }
ysr@777: };
ysr@777:
ysr@777: // A linked lists of heap regions. It leaves the "next" field
ysr@777: // unspecified; that's up to subtypes.
ysr@777: class RegionList {
ysr@777: protected:
ysr@777: virtual HeapRegion* get_next(HeapRegion* chr) = 0;
ysr@777: virtual void set_next(HeapRegion* chr,
ysr@777: HeapRegion* new_next) = 0;
ysr@777:
ysr@777: HeapRegion* _hd;
ysr@777: HeapRegion* _tl;
ysr@777: size_t _sz;
ysr@777:
ysr@777: // Protected constructor because this type is only meaningful
ysr@777: // when the _get/_set next functions are defined.
ysr@777: RegionList() : _hd(NULL), _tl(NULL), _sz(0) {}
ysr@777: public:
ysr@777: void reset() {
ysr@777: _hd = NULL;
ysr@777: _tl = NULL;
ysr@777: _sz = 0;
ysr@777: }
ysr@777: HeapRegion* hd() { return _hd; }
ysr@777: HeapRegion* tl() { return _tl; }
ysr@777: size_t sz() { return _sz; }
ysr@777: size_t length();
ysr@777:
ysr@777: bool well_formed() {
ysr@777: return
ysr@777: ((hd() == NULL && tl() == NULL && sz() == 0)
ysr@777: || (hd() != NULL && tl() != NULL && sz() > 0))
ysr@777: && (sz() == length());
ysr@777: }
ysr@777: virtual void insert_before_head(HeapRegion* r);
ysr@777: void prepend_list(RegionList* new_list);
ysr@777: virtual HeapRegion* pop();
ysr@777: void dec_sz() { _sz--; }
ysr@777: // Requires that "r" is an element of the list, and is not the tail.
ysr@777: void delete_after(HeapRegion* r);
ysr@777: };
ysr@777:
ysr@777: class EmptyNonHRegionList: public RegionList {
ysr@777: protected:
ysr@777: // Protected constructor because this type is only meaningful
ysr@777: // when the _get/_set next functions are defined.
ysr@777: EmptyNonHRegionList() : RegionList() {}
ysr@777:
ysr@777: public:
ysr@777: void insert_before_head(HeapRegion* r) {
ysr@777: // assert(r->is_empty(), "Better be empty");
ysr@777: assert(!r->isHumongous(), "Better not be humongous.");
ysr@777: RegionList::insert_before_head(r);
ysr@777: }
ysr@777: void prepend_list(EmptyNonHRegionList* new_list) {
ysr@777: // assert(new_list->hd() == NULL || new_list->hd()->is_empty(),
ysr@777: // "Better be empty");
ysr@777: assert(new_list->hd() == NULL || !new_list->hd()->isHumongous(),
ysr@777: "Better not be humongous.");
ysr@777: // assert(new_list->tl() == NULL || new_list->tl()->is_empty(),
ysr@777: // "Better be empty");
ysr@777: assert(new_list->tl() == NULL || !new_list->tl()->isHumongous(),
ysr@777: "Better not be humongous.");
ysr@777: RegionList::prepend_list(new_list);
ysr@777: }
ysr@777: };
ysr@777:
ysr@777: class UncleanRegionList: public EmptyNonHRegionList {
ysr@777: public:
ysr@777: HeapRegion* get_next(HeapRegion* hr) {
ysr@777: return hr->next_from_unclean_list();
ysr@777: }
ysr@777: void set_next(HeapRegion* hr, HeapRegion* new_next) {
ysr@777: hr->set_next_on_unclean_list(new_next);
ysr@777: }
ysr@777:
ysr@777: UncleanRegionList() : EmptyNonHRegionList() {}
ysr@777:
ysr@777: void insert_before_head(HeapRegion* r) {
ysr@777: assert(!r->is_on_free_list(),
ysr@777: "Better not already be on free list");
ysr@777: assert(!r->is_on_unclean_list(),
ysr@777: "Better not already be on unclean list");
ysr@777: r->set_zero_fill_needed();
ysr@777: r->set_on_unclean_list(true);
ysr@777: EmptyNonHRegionList::insert_before_head(r);
ysr@777: }
ysr@777: void prepend_list(UncleanRegionList* new_list) {
ysr@777: assert(new_list->tl() == NULL || !new_list->tl()->is_on_free_list(),
ysr@777: "Better not already be on free list");
ysr@777: assert(new_list->tl() == NULL || new_list->tl()->is_on_unclean_list(),
ysr@777: "Better already be marked as on unclean list");
ysr@777: assert(new_list->hd() == NULL || !new_list->hd()->is_on_free_list(),
ysr@777: "Better not already be on free list");
ysr@777: assert(new_list->hd() == NULL || new_list->hd()->is_on_unclean_list(),
ysr@777: "Better already be marked as on unclean list");
ysr@777: EmptyNonHRegionList::prepend_list(new_list);
ysr@777: }
ysr@777: HeapRegion* pop() {
ysr@777: HeapRegion* res = RegionList::pop();
ysr@777: if (res != NULL) res->set_on_unclean_list(false);
ysr@777: return res;
ysr@777: }
ysr@777: };
ysr@777:
ysr@777: // Local Variables: ***
ysr@777: // c-indentation-style: gnu ***
ysr@777: // End: ***
ysr@777:
ysr@777: #endif // SERIALGC