ysr@777: /* ysr@777: * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved. ysr@777: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ysr@777: * ysr@777: * This code is free software; you can redistribute it and/or modify it ysr@777: * under the terms of the GNU General Public License version 2 only, as ysr@777: * published by the Free Software Foundation. ysr@777: * ysr@777: * This code is distributed in the hope that it will be useful, but WITHOUT ysr@777: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ysr@777: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ysr@777: * version 2 for more details (a copy is included in the LICENSE file that ysr@777: * accompanied this code). ysr@777: * ysr@777: * You should have received a copy of the GNU General Public License version ysr@777: * 2 along with this work; if not, write to the Free Software Foundation, ysr@777: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ysr@777: * ysr@777: * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, ysr@777: * CA 95054 USA or visit www.sun.com if you need additional information or ysr@777: * have any questions. ysr@777: * ysr@777: */ ysr@777: ysr@777: // Remembered set for a heap region. Represent a set of "cards" that ysr@777: // contain pointers into the owner heap region. Cards are defined somewhat ysr@777: // abstractly, in terms of what the "BlockOffsetTable" in use can parse. ysr@777: ysr@777: class G1CollectedHeap; ysr@777: class G1BlockOffsetSharedArray; ysr@777: class HeapRegion; ysr@777: class HeapRegionRemSetIterator; ysr@777: class PosParPRT; ysr@777: class SparsePRT; ysr@777: ysr@777: ysr@777: // The "_coarse_map" is a bitmap with one bit for each region, where set ysr@777: // bits indicate that the corresponding region may contain some pointer ysr@777: // into the owning region. ysr@777: ysr@777: // The "_fine_grain_entries" array is an open hash table of PerRegionTables ysr@777: // (PRTs), indicating regions for which we're keeping the RS as a set of ysr@777: // cards. The strategy is to cap the size of the fine-grain table, ysr@777: // deleting an entry and setting the corresponding coarse-grained bit when ysr@777: // we would overflow this cap. ysr@777: ysr@777: // We use a mixture of locking and lock-free techniques here. We allow ysr@777: // threads to locate PRTs without locking, but threads attempting to alter ysr@777: // a bucket list obtain a lock. This means that any failing attempt to ysr@777: // find a PRT must be retried with the lock. It might seem dangerous that ysr@777: // a read can find a PRT that is concurrently deleted. This is all right, ysr@777: // because: ysr@777: // ysr@777: // 1) We only actually free PRT's at safe points (though we reuse them at ysr@777: // other times). ysr@777: // 2) We find PRT's in an attempt to add entries. If a PRT is deleted, ysr@777: // it's _coarse_map bit is set, so the that we were attempting to add ysr@777: // is represented. If a deleted PRT is re-used, a thread adding a bit, ysr@777: // thinking the PRT is for a different region, does no harm. ysr@777: apetrusenko@984: class OtherRegionsTable VALUE_OBJ_CLASS_SPEC { ysr@777: friend class HeapRegionRemSetIterator; ysr@777: ysr@777: G1CollectedHeap* _g1h; ysr@777: Mutex _m; ysr@777: HeapRegion* _hr; ysr@777: ysr@777: // These are protected by "_m". ysr@777: BitMap _coarse_map; ysr@777: size_t _n_coarse_entries; ysr@777: static jint _n_coarsenings; ysr@777: ysr@777: PosParPRT** _fine_grain_regions; ysr@777: size_t _n_fine_entries; ysr@777: ysr@777: #define SAMPLE_FOR_EVICTION 1 ysr@777: #if SAMPLE_FOR_EVICTION ysr@777: size_t _fine_eviction_start; ysr@777: static size_t _fine_eviction_stride; ysr@777: static size_t _fine_eviction_sample_size; ysr@777: #endif ysr@777: ysr@777: SparsePRT _sparse_table; ysr@777: ysr@777: // These are static after init. ysr@777: static size_t _max_fine_entries; ysr@777: static size_t _mod_max_fine_entries_mask; ysr@777: ysr@777: // Requires "prt" to be the first element of the bucket list appropriate ysr@777: // for "hr". If this list contains an entry for "hr", return it, ysr@777: // otherwise return "NULL". ysr@777: PosParPRT* find_region_table(size_t ind, HeapRegion* hr) const; ysr@777: ysr@777: // Find, delete, and return a candidate PosParPRT, if any exists, ysr@777: // adding the deleted region to the coarse bitmap. Requires the caller ysr@777: // to hold _m, and the fine-grain table to be full. ysr@777: PosParPRT* delete_region_table(); ysr@777: ysr@777: // If a PRT for "hr" is in the bucket list indicated by "ind" (which must ysr@777: // be the correct index for "hr"), delete it and return true; else return ysr@777: // false. ysr@777: bool del_single_region_table(size_t ind, HeapRegion* hr); ysr@777: ysr@777: static jint _cache_probes; ysr@777: static jint _cache_hits; ysr@777: ysr@777: // Indexed by thread X heap region, to minimize thread contention. ysr@777: static int** _from_card_cache; ysr@777: static size_t _from_card_cache_max_regions; ysr@777: static size_t _from_card_cache_mem_size; ysr@777: ysr@777: public: ysr@777: OtherRegionsTable(HeapRegion* hr); ysr@777: ysr@777: HeapRegion* hr() const { return _hr; } ysr@777: ysr@777: // For now. Could "expand" some tables in the future, so that this made ysr@777: // sense. ysr@777: void add_reference(oop* from, int tid); ysr@777: ysr@777: void add_reference(oop* from) { ysr@777: return add_reference(from, 0); ysr@777: } ysr@777: ysr@777: // Removes any entries shown by the given bitmaps to contain only dead ysr@777: // objects. ysr@777: void scrub(CardTableModRefBS* ctbs, BitMap* region_bm, BitMap* card_bm); ysr@777: ysr@777: // Not const because it takes a lock. ysr@777: size_t occupied() const; ysr@777: size_t occ_fine() const; ysr@777: size_t occ_coarse() const; ysr@777: size_t occ_sparse() const; ysr@777: ysr@777: static jint n_coarsenings() { return _n_coarsenings; } ysr@777: ysr@777: // Returns size in bytes. ysr@777: // Not const because it takes a lock. ysr@777: size_t mem_size() const; ysr@777: static size_t static_mem_size(); ysr@777: static size_t fl_mem_size(); ysr@777: ysr@777: bool contains_reference(oop* from) const; ysr@777: bool contains_reference_locked(oop* from) const; ysr@777: ysr@777: void clear(); ysr@777: ysr@777: // Specifically clear the from_card_cache. ysr@777: void clear_fcc(); ysr@777: ysr@777: // "from_hr" is being cleared; remove any entries from it. ysr@777: void clear_incoming_entry(HeapRegion* from_hr); ysr@777: ysr@777: // Declare the heap size (in # of regions) to the OtherRegionsTable. ysr@777: // (Uses it to initialize from_card_cache). ysr@777: static void init_from_card_cache(size_t max_regions); ysr@777: ysr@777: // Declares that only regions i s.t. 0 <= i < new_n_regs are in use. ysr@777: // Make sure any entries for higher regions are invalid. ysr@777: static void shrink_from_card_cache(size_t new_n_regs); ysr@777: ysr@777: static void print_from_card_cache(); ysr@777: ysr@777: }; ysr@777: ysr@777: ysr@777: class HeapRegionRemSet : public CHeapObj { ysr@777: friend class VMStructs; ysr@777: friend class HeapRegionRemSetIterator; ysr@777: ysr@777: public: ysr@777: enum Event { ysr@777: Event_EvacStart, Event_EvacEnd, Event_RSUpdateEnd ysr@777: }; ysr@777: ysr@777: private: ysr@777: G1BlockOffsetSharedArray* _bosa; ysr@777: G1BlockOffsetSharedArray* bosa() const { return _bosa; } ysr@777: ysr@777: static bool _par_traversal; ysr@777: ysr@777: OtherRegionsTable _other_regions; ysr@777: ysr@777: // One set bit for every region that has an entry for this one. ysr@777: BitMap _outgoing_region_map; ysr@777: ysr@777: // Clear entries for the current region in any rem sets named in ysr@777: // the _outgoing_region_map. ysr@777: void clear_outgoing_entries(); ysr@777: ysr@777: #if MAYBE ysr@777: // Audit the given card index. ysr@777: void audit_card(size_t card_num, HeapRegion* hr, u2* rc_arr, ysr@777: HeapRegionRemSet* empty_cards, size_t* one_obj_cards); ysr@777: ysr@777: // Assumes that "audit_stage1" has been called for "hr", to set up ysr@777: // "shadow" and "new_rs" appropriately. Identifies individual popular ysr@777: // objects; returns "true" if any are found. ysr@777: bool audit_find_pop(HeapRegion* hr, u2* rc_arr); ysr@777: ysr@777: // Assumes that "audit_stage1" has been called for "hr", to set up ysr@777: // "shadow" and "new_rs" appropriately. Identifies individual popular ysr@777: // objects, and determines the number of entries in "new_rs" if any such ysr@777: // popular objects are ignored. If this is sufficiently small, returns ysr@777: // "false" to indicate that a constraint should not be introduced. ysr@777: // Otherwise, returns "true" to indicate that we should go ahead with ysr@777: // adding the constraint. ysr@777: bool audit_stag(HeapRegion* hr, u2* rc_arr); ysr@777: ysr@777: ysr@777: u2* alloc_rc_array(); ysr@777: ysr@777: SeqHeapRegionRemSet* audit_post(u2* rc_arr, size_t multi_obj_crds, ysr@777: SeqHeapRegionRemSet* empty_cards); ysr@777: #endif ysr@777: ysr@777: enum ParIterState { Unclaimed, Claimed, Complete }; ysr@777: ParIterState _iter_state; ysr@777: ysr@777: // Unused unless G1RecordHRRSOops is true. ysr@777: ysr@777: static const int MaxRecorded = 1000000; ysr@777: static oop** _recorded_oops; ysr@777: static HeapWord** _recorded_cards; ysr@777: static HeapRegion** _recorded_regions; ysr@777: static int _n_recorded; ysr@777: ysr@777: static const int MaxRecordedEvents = 1000; ysr@777: static Event* _recorded_events; ysr@777: static int* _recorded_event_index; ysr@777: static int _n_recorded_events; ysr@777: ysr@777: static void print_event(outputStream* str, Event evnt); ysr@777: ysr@777: public: ysr@777: HeapRegionRemSet(G1BlockOffsetSharedArray* bosa, ysr@777: HeapRegion* hr); ysr@777: ysr@777: static int num_par_rem_sets(); ysr@777: static bool par_traversal() { return _par_traversal; } ysr@777: static void set_par_traversal(bool b); ysr@777: ysr@777: HeapRegion* hr() const { ysr@777: return _other_regions.hr(); ysr@777: } ysr@777: ysr@777: size_t occupied() const { ysr@777: return _other_regions.occupied(); ysr@777: } ysr@777: size_t occ_fine() const { ysr@777: return _other_regions.occ_fine(); ysr@777: } ysr@777: size_t occ_coarse() const { ysr@777: return _other_regions.occ_coarse(); ysr@777: } ysr@777: size_t occ_sparse() const { ysr@777: return _other_regions.occ_sparse(); ysr@777: } ysr@777: ysr@777: static jint n_coarsenings() { return OtherRegionsTable::n_coarsenings(); } ysr@777: ysr@777: /* Used in the sequential case. Returns "true" iff this addition causes ysr@777: the size limit to be reached. */ ysr@777: bool add_reference(oop* from) { ysr@777: _other_regions.add_reference(from); ysr@777: return false; ysr@777: } ysr@777: ysr@777: /* Used in the parallel case. Returns "true" iff this addition causes ysr@777: the size limit to be reached. */ ysr@777: bool add_reference(oop* from, int tid) { ysr@777: _other_regions.add_reference(from, tid); ysr@777: return false; ysr@777: } ysr@777: ysr@777: // Records the fact that the current region contains an outgoing ysr@777: // reference into "to_hr". ysr@777: void add_outgoing_reference(HeapRegion* to_hr); ysr@777: ysr@777: // Removes any entries shown by the given bitmaps to contain only dead ysr@777: // objects. ysr@777: void scrub(CardTableModRefBS* ctbs, BitMap* region_bm, BitMap* card_bm); ysr@777: ysr@777: // The region is being reclaimed; clear its remset, and any mention of ysr@777: // entries for this region in other remsets. ysr@777: void clear(); ysr@777: ysr@777: // Forget any entries due to pointers from "from_hr". ysr@777: void clear_incoming_entry(HeapRegion* from_hr) { ysr@777: _other_regions.clear_incoming_entry(from_hr); ysr@777: } ysr@777: ysr@777: #if 0 ysr@777: virtual void cleanup() = 0; ysr@777: #endif ysr@777: ysr@777: // Should be called from single-threaded code. ysr@777: void init_for_par_iteration(); ysr@777: // Attempt to claim the region. Returns true iff this call caused an ysr@777: // atomic transition from Unclaimed to Claimed. ysr@777: bool claim_iter(); ysr@777: // Sets the iteration state to "complete". ysr@777: void set_iter_complete(); ysr@777: // Returns "true" iff the region's iteration is complete. ysr@777: bool iter_is_complete(); ysr@777: ysr@777: // Initialize the given iterator to iterate over this rem set. ysr@777: void init_iterator(HeapRegionRemSetIterator* iter) const; ysr@777: ysr@777: #if 0 ysr@777: // Apply the "do_card" method to the start address of every card in the ysr@777: // rem set. Returns false if some application of the closure aborted. ysr@777: virtual bool card_iterate(CardClosure* iter) = 0; ysr@777: #endif ysr@777: ysr@777: // The actual # of bytes this hr_remset takes up. ysr@777: size_t mem_size() { ysr@777: return _other_regions.mem_size() ysr@777: // This correction is necessary because the above includes the second ysr@777: // part. ysr@777: + sizeof(this) - sizeof(OtherRegionsTable); ysr@777: } ysr@777: ysr@777: // Returns the memory occupancy of all static data structures associated ysr@777: // with remembered sets. ysr@777: static size_t static_mem_size() { ysr@777: return OtherRegionsTable::static_mem_size(); ysr@777: } ysr@777: ysr@777: // Returns the memory occupancy of all free_list data structures associated ysr@777: // with remembered sets. ysr@777: static size_t fl_mem_size() { ysr@777: return OtherRegionsTable::fl_mem_size(); ysr@777: } ysr@777: ysr@777: bool contains_reference(oop* from) const { ysr@777: return _other_regions.contains_reference(from); ysr@777: } ysr@777: void print() const; ysr@777: ysr@777: #if MAYBE ysr@777: // We are about to introduce a constraint, requiring the collection time ysr@777: // of the region owning this RS to be <= "hr", and forgetting pointers ysr@777: // from the owning region to "hr." Before doing so, examines this rem ysr@777: // set for pointers to "hr", possibly identifying some popular objects., ysr@777: // and possibly finding some cards to no longer contain pointers to "hr", ysr@777: // ysr@777: // These steps may prevent the the constraint from being necessary; in ysr@777: // which case returns a set of cards now thought to contain no pointers ysr@777: // into HR. In the normal (I assume) case, returns NULL, indicating that ysr@777: // we should go ahead and add the constraint. ysr@777: virtual SeqHeapRegionRemSet* audit(HeapRegion* hr) = 0; ysr@777: #endif ysr@777: ysr@777: // Called during a stop-world phase to perform any deferred cleanups. ysr@777: // The second version may be called by parallel threads after then finish ysr@777: // collection work. ysr@777: static void cleanup(); ysr@777: static void par_cleanup(); ysr@777: ysr@777: // Declare the heap size (in # of regions) to the HeapRegionRemSet(s). ysr@777: // (Uses it to initialize from_card_cache). ysr@777: static void init_heap(size_t max_regions) { ysr@777: OtherRegionsTable::init_from_card_cache(max_regions); ysr@777: } ysr@777: ysr@777: // Declares that only regions i s.t. 0 <= i < new_n_regs are in use. ysr@777: static void shrink_heap(size_t new_n_regs) { ysr@777: OtherRegionsTable::shrink_from_card_cache(new_n_regs); ysr@777: } ysr@777: ysr@777: #ifndef PRODUCT ysr@777: static void print_from_card_cache() { ysr@777: OtherRegionsTable::print_from_card_cache(); ysr@777: } ysr@777: #endif ysr@777: ysr@777: static void record(HeapRegion* hr, oop* f); ysr@777: static void print_recorded(); ysr@777: static void record_event(Event evnt); ysr@777: ysr@777: // Run unit tests. ysr@777: #ifndef PRODUCT ysr@777: static void test(); ysr@777: #endif ysr@777: ysr@777: }; ysr@777: ysr@777: class HeapRegionRemSetIterator : public CHeapObj { ysr@777: ysr@777: // The region over which we're iterating. ysr@777: const HeapRegionRemSet* _hrrs; ysr@777: ysr@777: // Local caching of HRRS fields. ysr@777: const BitMap* _coarse_map; ysr@777: PosParPRT** _fine_grain_regions; ysr@777: ysr@777: G1BlockOffsetSharedArray* _bosa; ysr@777: G1CollectedHeap* _g1h; ysr@777: ysr@777: // The number yielded since initialization. ysr@777: size_t _n_yielded_fine; ysr@777: size_t _n_yielded_coarse; ysr@777: size_t _n_yielded_sparse; ysr@777: ysr@777: // If true we're iterating over the coarse table; if false the fine ysr@777: // table. ysr@777: enum IterState { ysr@777: Sparse, ysr@777: Fine, ysr@777: Coarse ysr@777: }; ysr@777: IterState _is; ysr@777: ysr@777: // In both kinds of iteration, heap offset of first card of current ysr@777: // region. ysr@777: size_t _cur_region_card_offset; ysr@777: // Card offset within cur region. ysr@777: size_t _cur_region_cur_card; ysr@777: ysr@777: // Coarse table iteration fields: ysr@777: ysr@777: // Current region index; ysr@777: int _coarse_cur_region_index; ysr@777: int _coarse_cur_region_cur_card; ysr@777: ysr@777: bool coarse_has_next(size_t& card_index); ysr@777: ysr@777: // Fine table iteration fields: ysr@777: ysr@777: // Index of bucket-list we're working on. ysr@777: int _fine_array_index; ysr@777: // Per Region Table we're doing within current bucket list. ysr@777: PosParPRT* _fine_cur_prt; ysr@777: ysr@777: /* SparsePRT::*/ SparsePRTIter _sparse_iter; ysr@777: ysr@777: void fine_find_next_non_null_prt(); ysr@777: ysr@777: bool fine_has_next(); ysr@777: bool fine_has_next(size_t& card_index); ysr@777: ysr@777: public: ysr@777: // We require an iterator to be initialized before use, so the ysr@777: // constructor does little. ysr@777: HeapRegionRemSetIterator(); ysr@777: ysr@777: void initialize(const HeapRegionRemSet* hrrs); ysr@777: ysr@777: // If there remains one or more cards to be yielded, returns true and ysr@777: // sets "card_index" to one of those cards (which is then considered ysr@777: // yielded.) Otherwise, returns false (and leaves "card_index" ysr@777: // undefined.) ysr@777: bool has_next(size_t& card_index); ysr@777: ysr@777: size_t n_yielded_fine() { return _n_yielded_fine; } ysr@777: size_t n_yielded_coarse() { return _n_yielded_coarse; } ysr@777: size_t n_yielded_sparse() { return _n_yielded_sparse; } ysr@777: size_t n_yielded() { ysr@777: return n_yielded_fine() + n_yielded_coarse() + n_yielded_sparse(); ysr@777: } ysr@777: }; ysr@777: ysr@777: #if 0 ysr@777: class CardClosure: public Closure { ysr@777: public: ysr@777: virtual void do_card(HeapWord* card_start) = 0; ysr@777: }; ysr@777: ysr@777: #endif