ysr@777: /* tschatzl@6402: * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved. ysr@777: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ysr@777: * ysr@777: * This code is free software; you can redistribute it and/or modify it ysr@777: * under the terms of the GNU General Public License version 2 only, as ysr@777: * published by the Free Software Foundation. ysr@777: * ysr@777: * This code is distributed in the hope that it will be useful, but WITHOUT ysr@777: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ysr@777: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ysr@777: * version 2 for more details (a copy is included in the LICENSE file that ysr@777: * accompanied this code). ysr@777: * ysr@777: * You should have received a copy of the GNU General Public License version ysr@777: * 2 along with this work; if not, write to the Free Software Foundation, ysr@777: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ysr@777: * trims@1907: * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA trims@1907: * or visit www.oracle.com if you need additional information or have any trims@1907: * questions. ysr@777: * ysr@777: */ ysr@777: stefank@2314: #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONREMSET_HPP stefank@2314: #define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONREMSET_HPP stefank@2314: tschatzl@6402: #include "gc_implementation/g1/g1CodeCacheRemSet.hpp" stefank@2314: #include "gc_implementation/g1/sparsePRT.hpp" stefank@2314: ysr@777: // Remembered set for a heap region. Represent a set of "cards" that ysr@777: // contain pointers into the owner heap region. Cards are defined somewhat ysr@777: // abstractly, in terms of what the "BlockOffsetTable" in use can parse. ysr@777: ysr@777: class G1CollectedHeap; ysr@777: class G1BlockOffsetSharedArray; ysr@777: class HeapRegion; ysr@777: class HeapRegionRemSetIterator; johnc@3891: class PerRegionTable; ysr@777: class SparsePRT; johnc@5548: class nmethod; ysr@777: tonyp@2493: // Essentially a wrapper around SparsePRTCleanupTask. See tonyp@2493: // sparsePRT.hpp for more details. tonyp@2493: class HRRSCleanupTask : public SparsePRTCleanupTask { tonyp@2493: }; ysr@777: tschatzl@6407: // The FromCardCache remembers the most recently processed card on the heap on tschatzl@6407: // a per-region and per-thread basis. tschatzl@6407: class FromCardCache : public AllStatic { tschatzl@6407: private: tschatzl@6407: // Array of card indices. Indexed by thread X and heap region to minimize tschatzl@6407: // thread contention. tschatzl@6407: static int** _cache; tschatzl@6407: static uint _max_regions; tschatzl@6407: static size_t _static_mem_size; tschatzl@6407: tschatzl@6407: public: tschatzl@6407: enum { tschatzl@6407: InvalidCard = -1 // Card value of an invalid card, i.e. a card index not otherwise used. tschatzl@6407: }; tschatzl@6407: tschatzl@6407: static void clear(uint region_idx); tschatzl@6407: tschatzl@6407: // Returns true if the given card is in the cache at the given location, or tschatzl@6407: // replaces the card at that location and returns false. tschatzl@6407: static bool contains_or_replace(uint worker_id, uint region_idx, int card) { tschatzl@6407: int card_in_cache = at(worker_id, region_idx); tschatzl@6407: if (card_in_cache == card) { tschatzl@6407: return true; tschatzl@6407: } else { tschatzl@6407: set(worker_id, region_idx, card); tschatzl@6407: return false; tschatzl@6407: } tschatzl@6407: } tschatzl@6407: tschatzl@6407: static int at(uint worker_id, uint region_idx) { tschatzl@6407: return _cache[worker_id][region_idx]; tschatzl@6407: } tschatzl@6407: tschatzl@6407: static void set(uint worker_id, uint region_idx, int val) { tschatzl@6407: _cache[worker_id][region_idx] = val; tschatzl@6407: } tschatzl@6407: tschatzl@6407: static void initialize(uint n_par_rs, uint max_num_regions); tschatzl@6407: tschatzl@7051: static void invalidate(uint start_idx, size_t num_regions); tschatzl@6407: tschatzl@6407: static void print(outputStream* out = gclog_or_tty) PRODUCT_RETURN; tschatzl@6407: tschatzl@6407: static size_t static_mem_size() { tschatzl@6407: return _static_mem_size; tschatzl@6407: } tschatzl@6407: }; tschatzl@6407: ysr@777: // The "_coarse_map" is a bitmap with one bit for each region, where set ysr@777: // bits indicate that the corresponding region may contain some pointer ysr@777: // into the owning region. ysr@777: ysr@777: // The "_fine_grain_entries" array is an open hash table of PerRegionTables ysr@777: // (PRTs), indicating regions for which we're keeping the RS as a set of ysr@777: // cards. The strategy is to cap the size of the fine-grain table, ysr@777: // deleting an entry and setting the corresponding coarse-grained bit when ysr@777: // we would overflow this cap. ysr@777: ysr@777: // We use a mixture of locking and lock-free techniques here. We allow ysr@777: // threads to locate PRTs without locking, but threads attempting to alter ysr@777: // a bucket list obtain a lock. This means that any failing attempt to ysr@777: // find a PRT must be retried with the lock. It might seem dangerous that ysr@777: // a read can find a PRT that is concurrently deleted. This is all right, ysr@777: // because: ysr@777: // ysr@777: // 1) We only actually free PRT's at safe points (though we reuse them at ysr@777: // other times). ysr@777: // 2) We find PRT's in an attempt to add entries. If a PRT is deleted, ysr@777: // it's _coarse_map bit is set, so the that we were attempting to add ysr@777: // is represented. If a deleted PRT is re-used, a thread adding a bit, ysr@777: // thinking the PRT is for a different region, does no harm. ysr@777: apetrusenko@984: class OtherRegionsTable VALUE_OBJ_CLASS_SPEC { ysr@777: friend class HeapRegionRemSetIterator; ysr@777: ysr@777: G1CollectedHeap* _g1h; tschatzl@6402: Mutex* _m; ysr@777: HeapRegion* _hr; ysr@777: ysr@777: // These are protected by "_m". ysr@777: BitMap _coarse_map; ysr@777: size_t _n_coarse_entries; ysr@777: static jint _n_coarsenings; ysr@777: johnc@3891: PerRegionTable** _fine_grain_regions; johnc@3891: size_t _n_fine_entries; ysr@777: johnc@3956: // The fine grain remembered sets are doubly linked together using johnc@3956: // their 'next' and 'prev' fields. johnc@3956: // This allows fast bulk freeing of all the fine grain remembered johnc@3956: // set entries, and fast finding of all of them without iterating johnc@3956: // over the _fine_grain_regions table. johnc@3956: PerRegionTable * _first_all_fine_prts; johnc@3956: PerRegionTable * _last_all_fine_prts; johnc@3956: johnc@3891: // Used to sample a subset of the fine grain PRTs to determine which johnc@3891: // PRT to evict and coarsen. ysr@777: size_t _fine_eviction_start; ysr@777: static size_t _fine_eviction_stride; ysr@777: static size_t _fine_eviction_sample_size; ysr@777: ysr@777: SparsePRT _sparse_table; ysr@777: ysr@777: // These are static after init. ysr@777: static size_t _max_fine_entries; ysr@777: static size_t _mod_max_fine_entries_mask; ysr@777: ysr@777: // Requires "prt" to be the first element of the bucket list appropriate ysr@777: // for "hr". If this list contains an entry for "hr", return it, ysr@777: // otherwise return "NULL". johnc@3891: PerRegionTable* find_region_table(size_t ind, HeapRegion* hr) const; ysr@777: johnc@3891: // Find, delete, and return a candidate PerRegionTable, if any exists, ysr@777: // adding the deleted region to the coarse bitmap. Requires the caller ysr@777: // to hold _m, and the fine-grain table to be full. johnc@3891: PerRegionTable* delete_region_table(); ysr@777: ysr@777: // If a PRT for "hr" is in the bucket list indicated by "ind" (which must ysr@777: // be the correct index for "hr"), delete it and return true; else return ysr@777: // false. ysr@777: bool del_single_region_table(size_t ind, HeapRegion* hr); ysr@777: johnc@3956: // link/add the given fine grain remembered set into the "all" list johnc@3956: void link_to_all(PerRegionTable * prt); johnc@3956: // unlink/remove the given fine grain remembered set into the "all" list johnc@3956: void unlink_from_all(PerRegionTable * prt); johnc@3956: ysr@777: public: tschatzl@6402: OtherRegionsTable(HeapRegion* hr, Mutex* m); ysr@777: ysr@777: HeapRegion* hr() const { return _hr; } ysr@777: ysr@777: // For now. Could "expand" some tables in the future, so that this made ysr@777: // sense. ysr@1280: void add_reference(OopOrNarrowOopStar from, int tid); ysr@777: ysr@777: // Removes any entries shown by the given bitmaps to contain only dead ysr@777: // objects. ysr@777: void scrub(CardTableModRefBS* ctbs, BitMap* region_bm, BitMap* card_bm); ysr@777: tschatzl@7010: // Returns whether this remembered set (and all sub-sets) contain no entries. tschatzl@7010: bool is_empty() const; tschatzl@7010: ysr@777: size_t occupied() const; ysr@777: size_t occ_fine() const; ysr@777: size_t occ_coarse() const; ysr@777: size_t occ_sparse() const; ysr@777: ysr@777: static jint n_coarsenings() { return _n_coarsenings; } ysr@777: ysr@777: // Returns size in bytes. ysr@777: // Not const because it takes a lock. ysr@777: size_t mem_size() const; ysr@777: static size_t static_mem_size(); ysr@777: static size_t fl_mem_size(); ysr@777: ysr@1280: bool contains_reference(OopOrNarrowOopStar from) const; ysr@1280: bool contains_reference_locked(OopOrNarrowOopStar from) const; ysr@777: ysr@777: void clear(); ysr@777: ysr@777: // Specifically clear the from_card_cache. ysr@777: void clear_fcc(); ysr@777: tonyp@2493: void do_cleanup_work(HRRSCleanupTask* hrrs_cleanup_task); tonyp@2493: ysr@777: // Declare the heap size (in # of regions) to the OtherRegionsTable. ysr@777: // (Uses it to initialize from_card_cache). tschatzl@7051: static void initialize(uint max_regions); ysr@777: tschatzl@7051: // Declares that regions between start_idx <= i < start_idx + num_regions are tschatzl@7051: // not in use. Make sure that any entries for these regions are invalid. tschatzl@7051: static void invalidate(uint start_idx, size_t num_regions); ysr@777: ysr@777: static void print_from_card_cache(); ysr@777: }; ysr@777: zgu@3900: class HeapRegionRemSet : public CHeapObj { ysr@777: friend class VMStructs; ysr@777: friend class HeapRegionRemSetIterator; ysr@777: ysr@777: public: ysr@777: enum Event { ysr@777: Event_EvacStart, Event_EvacEnd, Event_RSUpdateEnd ysr@777: }; ysr@777: ysr@777: private: ysr@777: G1BlockOffsetSharedArray* _bosa; ysr@777: G1BlockOffsetSharedArray* bosa() const { return _bosa; } ysr@777: tschatzl@6402: // A set of code blobs (nmethods) whose code contains pointers into johnc@5548: // the region that owns this RSet. tschatzl@6402: G1CodeRootSet _code_roots; tschatzl@6402: tschatzl@6402: Mutex _m; johnc@5548: ysr@777: OtherRegionsTable _other_regions; ysr@777: ysr@777: enum ParIterState { Unclaimed, Claimed, Complete }; iveresov@1696: volatile ParIterState _iter_state; iveresov@1696: volatile jlong _iter_claimed; ysr@777: ysr@777: // Unused unless G1RecordHRRSOops is true. ysr@777: ysr@777: static const int MaxRecorded = 1000000; ysr@1280: static OopOrNarrowOopStar* _recorded_oops; ysr@1280: static HeapWord** _recorded_cards; ysr@1280: static HeapRegion** _recorded_regions; ysr@1280: static int _n_recorded; ysr@777: ysr@777: static const int MaxRecordedEvents = 1000; ysr@777: static Event* _recorded_events; ysr@777: static int* _recorded_event_index; ysr@777: static int _n_recorded_events; ysr@777: ysr@777: static void print_event(outputStream* str, Event evnt); ysr@777: ysr@777: public: tschatzl@6402: HeapRegionRemSet(G1BlockOffsetSharedArray* bosa, HeapRegion* hr); ysr@777: tschatzl@6403: static uint num_par_rem_sets(); iveresov@1696: static void setup_remset_size(); ysr@777: ysr@777: HeapRegion* hr() const { ysr@777: return _other_regions.hr(); ysr@777: } ysr@777: tschatzl@7010: bool is_empty() const { tschatzl@7010: return (strong_code_roots_list_length() == 0) && _other_regions.is_empty(); tschatzl@7010: } tschatzl@7010: tschatzl@6402: size_t occupied() { tschatzl@6402: MutexLockerEx x(&_m, Mutex::_no_safepoint_check_flag); tschatzl@6402: return occupied_locked(); tschatzl@6402: } tschatzl@6402: size_t occupied_locked() { ysr@777: return _other_regions.occupied(); ysr@777: } ysr@777: size_t occ_fine() const { ysr@777: return _other_regions.occ_fine(); ysr@777: } ysr@777: size_t occ_coarse() const { ysr@777: return _other_regions.occ_coarse(); ysr@777: } ysr@777: size_t occ_sparse() const { ysr@777: return _other_regions.occ_sparse(); ysr@777: } ysr@777: ysr@777: static jint n_coarsenings() { return OtherRegionsTable::n_coarsenings(); } ysr@777: johnc@3891: // Used in the sequential case. ysr@1280: void add_reference(OopOrNarrowOopStar from) { johnc@3891: _other_regions.add_reference(from, 0); ysr@777: } ysr@777: johnc@3891: // Used in the parallel case. ysr@1280: void add_reference(OopOrNarrowOopStar from, int tid) { ysr@777: _other_regions.add_reference(from, tid); ysr@777: } ysr@777: ysr@777: // Removes any entries shown by the given bitmaps to contain only dead ysr@777: // objects. ysr@777: void scrub(CardTableModRefBS* ctbs, BitMap* region_bm, BitMap* card_bm); ysr@777: ysr@777: // The region is being reclaimed; clear its remset, and any mention of ysr@777: // entries for this region in other remsets. ysr@777: void clear(); tschatzl@6402: void clear_locked(); ysr@777: ysr@777: // Attempt to claim the region. Returns true iff this call caused an ysr@777: // atomic transition from Unclaimed to Claimed. ysr@777: bool claim_iter(); ysr@777: // Sets the iteration state to "complete". ysr@777: void set_iter_complete(); ysr@777: // Returns "true" iff the region's iteration is complete. ysr@777: bool iter_is_complete(); ysr@777: iveresov@1696: // Support for claiming blocks of cards during iteration iveresov@1696: size_t iter_claimed() const { return (size_t)_iter_claimed; } iveresov@1696: // Claim the next block of cards iveresov@1696: size_t iter_claimed_next(size_t step) { iveresov@1696: size_t current, next; iveresov@1696: do { iveresov@1696: current = iter_claimed(); iveresov@1696: next = current + step; iveresov@1696: } while (Atomic::cmpxchg((jlong)next, &_iter_claimed, (jlong)current) != (jlong)current); iveresov@1696: return current; iveresov@1696: } tonyp@2974: void reset_for_par_iteration(); tonyp@2974: tonyp@2974: bool verify_ready_for_par_iteration() { tonyp@2974: return (_iter_state == Unclaimed) && (_iter_claimed == 0); tonyp@2974: } iveresov@1696: ysr@777: // The actual # of bytes this hr_remset takes up. johnc@5548: // Note also includes the strong code root set. ysr@777: size_t mem_size() { tschatzl@6402: MutexLockerEx x(&_m, Mutex::_no_safepoint_check_flag); ysr@777: return _other_regions.mem_size() ysr@777: // This correction is necessary because the above includes the second ysr@777: // part. tschatzl@6932: + (sizeof(HeapRegionRemSet) - sizeof(OtherRegionsTable)) johnc@5548: + strong_code_roots_mem_size(); ysr@777: } ysr@777: ysr@777: // Returns the memory occupancy of all static data structures associated ysr@777: // with remembered sets. ysr@777: static size_t static_mem_size() { mgerdin@7208: return OtherRegionsTable::static_mem_size() + G1CodeRootSet::static_mem_size(); ysr@777: } ysr@777: ysr@777: // Returns the memory occupancy of all free_list data structures associated ysr@777: // with remembered sets. ysr@777: static size_t fl_mem_size() { mgerdin@7208: return OtherRegionsTable::fl_mem_size(); ysr@777: } ysr@777: ysr@1280: bool contains_reference(OopOrNarrowOopStar from) const { ysr@777: return _other_regions.contains_reference(from); ysr@777: } johnc@5548: johnc@5548: // Routines for managing the list of code roots that point into johnc@5548: // the heap region that owns this RSet. johnc@5548: void add_strong_code_root(nmethod* nm); mgerdin@7208: void add_strong_code_root_locked(nmethod* nm); johnc@5548: void remove_strong_code_root(nmethod* nm); johnc@5548: johnc@5548: // Applies blk->do_code_blob() to each of the entries in johnc@5548: // the strong code roots list johnc@5548: void strong_code_roots_do(CodeBlobClosure* blk) const; johnc@5548: mgerdin@7208: void clean_strong_code_roots(HeapRegion* hr); mgerdin@7208: johnc@5548: // Returns the number of elements in the strong code roots list tschatzl@7010: size_t strong_code_roots_list_length() const { tschatzl@6402: return _code_roots.length(); johnc@5548: } johnc@5548: johnc@5548: // Returns true if the strong code roots contains the given johnc@5548: // nmethod. johnc@5548: bool strong_code_roots_list_contains(nmethod* nm) { tschatzl@6402: return _code_roots.contains(nm); johnc@5548: } johnc@5548: johnc@5548: // Returns the amount of memory, in bytes, currently johnc@5548: // consumed by the strong code roots. johnc@5548: size_t strong_code_roots_mem_size(); johnc@5548: tschatzl@6402: void print() PRODUCT_RETURN; ysr@777: ysr@777: // Called during a stop-world phase to perform any deferred cleanups. ysr@777: static void cleanup(); ysr@777: ysr@777: // Declare the heap size (in # of regions) to the HeapRegionRemSet(s). ysr@777: // (Uses it to initialize from_card_cache). tonyp@3713: static void init_heap(uint max_regions) { tschatzl@7051: OtherRegionsTable::initialize(max_regions); ysr@777: } ysr@777: tschatzl@7051: static void invalidate(uint start_idx, uint num_regions) { tschatzl@7051: OtherRegionsTable::invalidate(start_idx, num_regions); ysr@777: } ysr@777: ysr@777: #ifndef PRODUCT ysr@777: static void print_from_card_cache() { ysr@777: OtherRegionsTable::print_from_card_cache(); ysr@777: } ysr@777: #endif ysr@777: ysr@1280: static void record(HeapRegion* hr, OopOrNarrowOopStar f); ysr@777: static void print_recorded(); ysr@777: static void record_event(Event evnt); ysr@777: tonyp@2493: // These are wrappers for the similarly-named methods on tonyp@2493: // SparsePRT. Look at sparsePRT.hpp for more details. tonyp@2493: static void reset_for_cleanup_tasks(); tonyp@2493: void do_cleanup_work(HRRSCleanupTask* hrrs_cleanup_task); tonyp@2493: static void finish_cleanup_task(HRRSCleanupTask* hrrs_cleanup_task); tonyp@2493: ysr@777: // Run unit tests. ysr@777: #ifndef PRODUCT tschatzl@5165: static void test_prt(); ysr@777: static void test(); ysr@777: #endif ysr@777: }; ysr@777: johnc@5014: class HeapRegionRemSetIterator : public StackObj { tschatzl@6927: private: tschatzl@6927: // The region RSet over which we are iterating. tschatzl@6402: HeapRegionRemSet* _hrrs; ysr@777: ysr@777: // Local caching of HRRS fields. ysr@777: const BitMap* _coarse_map; ysr@777: ysr@777: G1BlockOffsetSharedArray* _bosa; ysr@777: G1CollectedHeap* _g1h; ysr@777: tschatzl@6927: // The number of cards yielded since initialization. ysr@777: size_t _n_yielded_fine; ysr@777: size_t _n_yielded_coarse; ysr@777: size_t _n_yielded_sparse; ysr@777: tschatzl@6927: // Indicates what granularity of table that we are currently iterating over. johnc@5014: // We start iterating over the sparse table, progress to the fine grain johnc@5014: // table, and then finish with the coarse table. ysr@777: enum IterState { ysr@777: Sparse, ysr@777: Fine, ysr@777: Coarse ysr@777: }; ysr@777: IterState _is; ysr@777: tschatzl@6927: // For both Coarse and Fine remembered set iteration this contains the tschatzl@6927: // first card number of the heap region we currently iterate over. ysr@777: size_t _cur_region_card_offset; ysr@777: tschatzl@6927: // Current region index for the Coarse remembered set iteration. johnc@3182: int _coarse_cur_region_index; johnc@3182: size_t _coarse_cur_region_cur_card; ysr@777: ysr@777: bool coarse_has_next(size_t& card_index); ysr@777: tschatzl@6927: // The PRT we are currently iterating over. tschatzl@6927: PerRegionTable* _fine_cur_prt; tschatzl@6927: // Card offset within the current PRT. tschatzl@6927: size_t _cur_card_in_prt; ysr@777: tschatzl@6927: // Update internal variables when switching to the given PRT. tschatzl@6927: void switch_to_prt(PerRegionTable* prt); ysr@777: bool fine_has_next(); ysr@777: bool fine_has_next(size_t& card_index); ysr@777: tschatzl@6927: // The Sparse remembered set iterator. tschatzl@6927: SparsePRTIter _sparse_iter; tschatzl@6927: tschatzl@6927: public: tschatzl@6402: HeapRegionRemSetIterator(HeapRegionRemSet* hrrs); ysr@777: ysr@777: // If there remains one or more cards to be yielded, returns true and ysr@777: // sets "card_index" to one of those cards (which is then considered ysr@777: // yielded.) Otherwise, returns false (and leaves "card_index" ysr@777: // undefined.) ysr@777: bool has_next(size_t& card_index); ysr@777: ysr@777: size_t n_yielded_fine() { return _n_yielded_fine; } ysr@777: size_t n_yielded_coarse() { return _n_yielded_coarse; } ysr@777: size_t n_yielded_sparse() { return _n_yielded_sparse; } ysr@777: size_t n_yielded() { ysr@777: return n_yielded_fine() + n_yielded_coarse() + n_yielded_sparse(); ysr@777: } ysr@777: }; ysr@777: stefank@2314: #endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONREMSET_HPP