ysr@777: /* mikael@6198: * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. ysr@777: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ysr@777: * ysr@777: * This code is free software; you can redistribute it and/or modify it ysr@777: * under the terms of the GNU General Public License version 2 only, as ysr@777: * published by the Free Software Foundation. ysr@777: * ysr@777: * This code is distributed in the hope that it will be useful, but WITHOUT ysr@777: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ysr@777: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ysr@777: * version 2 for more details (a copy is included in the LICENSE file that ysr@777: * accompanied this code). ysr@777: * ysr@777: * You should have received a copy of the GNU General Public License version ysr@777: * 2 along with this work; if not, write to the Free Software Foundation, ysr@777: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ysr@777: * trims@1907: * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA trims@1907: * or visit www.oracle.com if you need additional information or have any trims@1907: * questions. ysr@777: * ysr@777: */ ysr@777: stefank@2314: #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_SPARSEPRT_HPP stefank@2314: #define SHARE_VM_GC_IMPLEMENTATION_G1_SPARSEPRT_HPP stefank@2314: tschatzl@6541: #include "gc_implementation/g1/g1CollectedHeap.hpp" stefank@2314: #include "gc_implementation/g1/heapRegion.hpp" stefank@2314: #include "memory/allocation.hpp" stefank@2314: #include "memory/cardTableModRefBS.hpp" stefank@2314: #include "runtime/mutex.hpp" stefank@2314: #include "utilities/globalDefinitions.hpp" stefank@2314: ysr@777: // Sparse remembered set for a heap region (the "owning" region). Maps ysr@777: // indices of other regions to short sequences of cards in the other region ysr@777: // that might contain pointers into the owner region. ysr@777: ysr@777: // These tables only expand while they are accessed in parallel -- ysr@777: // deletions may be done in single-threaded code. This allows us to allow ysr@777: // unsynchronized reads/iterations, as long as expansions caused by ysr@777: // insertions only enqueue old versions for deletions, but do not delete ysr@777: // old versions synchronously. ysr@777: zgu@3900: class SparsePRTEntry: public CHeapObj { ysr@777: public: ysr@777: enum SomePublicConstants { iveresov@1696: NullEntry = -1, iveresov@1696: UnrollFactor = 4 ysr@777: }; ysr@777: private: johnc@1242: RegionIdx_t _region_ind; johnc@1242: int _next_index; iveresov@1696: CardIdx_t _cards[1]; iveresov@1696: // WARNING: Don't put any data members beyond this line. Card array has, in fact, variable length. iveresov@1696: // It should always be the last data member. ysr@777: public: iveresov@1696: // Returns the size of the entry, used for entry allocation. iveresov@1696: static size_t size() { return sizeof(SparsePRTEntry) + sizeof(CardIdx_t) * (cards_num() - 1); } iveresov@1696: // Returns the size of the card array. iveresov@1696: static int cards_num() { iveresov@1696: // The number of cards should be a multiple of 4, because that's our current iveresov@1696: // unrolling factor. iveresov@1696: static const int s = MAX2(G1RSetSparseRegionEntries & ~(UnrollFactor - 1), UnrollFactor); iveresov@1696: return s; iveresov@1696: } ysr@777: ysr@777: // Set the region_ind to the given value, and delete all cards. johnc@1242: inline void init(RegionIdx_t region_ind); ysr@777: johnc@1242: RegionIdx_t r_ind() const { return _region_ind; } ysr@777: bool valid_entry() const { return r_ind() >= 0; } johnc@1242: void set_r_ind(RegionIdx_t rind) { _region_ind = rind; } ysr@777: johnc@1242: int next_index() const { return _next_index; } johnc@1242: int* next_index_addr() { return &_next_index; } johnc@1242: void set_next_index(int ni) { _next_index = ni; } ysr@777: ysr@777: // Returns "true" iff the entry contains the given card index. johnc@1242: inline bool contains_card(CardIdx_t card_index) const; ysr@777: ysr@777: // Returns the number of non-NULL card entries. ysr@777: inline int num_valid_cards() const; ysr@777: ysr@777: // Requires that the entry not contain the given card index. If there is ysr@777: // space available, add the given card index to the entry and return ysr@777: // "true"; otherwise, return "false" to indicate that the entry is full. ysr@777: enum AddCardResult { ysr@777: overflow, ysr@777: found, ysr@777: added ysr@777: }; johnc@1242: inline AddCardResult add_card(CardIdx_t card_index); ysr@777: ysr@777: // Copy the current entry's cards into "cards". johnc@1242: inline void copy_cards(CardIdx_t* cards) const; ysr@777: // Copy the current entry's cards into the "_card" array of "e." ysr@777: inline void copy_cards(SparsePRTEntry* e) const; ysr@777: johnc@1242: inline CardIdx_t card(int i) const { return _cards[i]; } ysr@777: }; ysr@777: ysr@777: zgu@3900: class RSHashTable : public CHeapObj { ysr@777: ysr@777: friend class RSHashTableIter; ysr@777: ysr@777: enum SomePrivateConstants { ysr@777: NullEntry = -1 ysr@777: }; ysr@777: ysr@777: size_t _capacity; ysr@777: size_t _capacity_mask; ysr@777: size_t _occupied_entries; ysr@777: size_t _occupied_cards; ysr@777: ysr@777: SparsePRTEntry* _entries; johnc@1242: int* _buckets; johnc@1242: int _free_region; johnc@1242: int _free_list; ysr@777: ysr@777: // Requires that the caller hold a lock preventing parallel modifying ysr@777: // operations, and that the the table be less than completely full. If ysr@777: // an entry for "region_ind" is already in the table, finds it and ysr@777: // returns its address; otherwise returns "NULL." johnc@1242: SparsePRTEntry* entry_for_region_ind(RegionIdx_t region_ind) const; ysr@777: ysr@777: // Requires that the caller hold a lock preventing parallel modifying ysr@777: // operations, and that the the table be less than completely full. If ysr@777: // an entry for "region_ind" is already in the table, finds it and ysr@777: // returns its address; otherwise allocates, initializes, inserts and ysr@777: // returns a new entry for "region_ind". johnc@1242: SparsePRTEntry* entry_for_region_ind_create(RegionIdx_t region_ind); ysr@777: ysr@777: // Returns the index of the next free entry in "_entries". johnc@1242: int alloc_entry(); ysr@777: // Declares the entry "fi" to be free. (It must have already been ysr@777: // deleted from any bucket lists. johnc@1242: void free_entry(int fi); ysr@777: ysr@777: public: ysr@777: RSHashTable(size_t capacity); ysr@777: ~RSHashTable(); ysr@777: ysr@777: // Attempts to ensure that the given card_index in the given region is in ysr@777: // the sparse table. If successful (because the card was already ysr@777: // present, or because it was successfullly added) returns "true". ysr@777: // Otherwise, returns "false" to indicate that the addition would ysr@777: // overflow the entry for the region. The caller must transfer these ysr@777: // entries to a larger-capacity representation. johnc@1242: bool add_card(RegionIdx_t region_id, CardIdx_t card_index); ysr@777: johnc@1242: bool get_cards(RegionIdx_t region_id, CardIdx_t* cards); iveresov@1696: johnc@1242: bool delete_entry(RegionIdx_t region_id); ysr@777: johnc@1242: bool contains_card(RegionIdx_t region_id, CardIdx_t card_index) const; ysr@777: ysr@777: void add_entry(SparsePRTEntry* e); ysr@777: iveresov@1696: SparsePRTEntry* get_entry(RegionIdx_t region_id); iveresov@1696: ysr@777: void clear(); ysr@777: ysr@777: size_t capacity() const { return _capacity; } ysr@777: size_t capacity_mask() const { return _capacity_mask; } ysr@777: size_t occupied_entries() const { return _occupied_entries; } ysr@777: size_t occupied_cards() const { return _occupied_cards; } ysr@777: size_t mem_size() const; ysr@777: iveresov@1696: SparsePRTEntry* entry(int i) const { return (SparsePRTEntry*)((char*)_entries + SparsePRTEntry::size() * i); } ysr@777: ysr@777: void print(); ysr@777: }; ysr@777: johnc@1242: // ValueObj because will be embedded in HRRS iterator. apetrusenko@984: class RSHashTableIter VALUE_OBJ_CLASS_SPEC { johnc@1242: int _tbl_ind; // [-1, 0.._rsht->_capacity) johnc@1242: int _bl_ind; // [-1, 0.._rsht->_capacity) iveresov@1696: short _card_ind; // [0..SparsePRTEntry::cards_num()) johnc@1242: RSHashTable* _rsht; ysr@777: johnc@1242: // If the bucket list pointed to by _bl_ind contains a card, sets johnc@1242: // _bl_ind to the index of that entry, and returns the card. johnc@1242: // Otherwise, returns SparseEntry::NullEntry. johnc@1242: CardIdx_t find_first_card_in_list(); ysr@777: johnc@1242: // Computes the proper card index for the card whose offset in the johnc@1242: // current region (as indicated by _bl_ind) is "ci". johnc@1242: // This is subject to errors when there is iteration concurrent with johnc@1242: // modification, but these errors should be benign. johnc@1242: size_t compute_card_ind(CardIdx_t ci); ysr@777: johnc@1242: public: johnc@5014: RSHashTableIter(RSHashTable* rsht) : johnc@5014: _tbl_ind(RSHashTable::NullEntry), // So that first increment gets to 0. johnc@1242: _bl_ind(RSHashTable::NullEntry), iveresov@1696: _card_ind((SparsePRTEntry::cards_num() - 1)), johnc@5014: _rsht(rsht) {} ysr@777: johnc@1242: bool has_next(size_t& card_index); johnc@1242: }; ysr@777: ysr@777: // Concurrent accesss to a SparsePRT must be serialized by some external ysr@777: // mutex. ysr@777: ysr@777: class SparsePRTIter; tonyp@2493: class SparsePRTCleanupTask; ysr@777: apetrusenko@984: class SparsePRT VALUE_OBJ_CLASS_SPEC { tonyp@2493: friend class SparsePRTCleanupTask; tonyp@2493: ysr@777: // Iterations are done on the _cur hash table, since they only need to ysr@777: // see entries visible at the start of a collection pause. ysr@777: // All other operations are done using the _next hash table. ysr@777: RSHashTable* _cur; ysr@777: RSHashTable* _next; ysr@777: ysr@777: HeapRegion* _hr; ysr@777: ysr@777: enum SomeAdditionalPrivateConstants { ysr@777: InitialCapacity = 16 ysr@777: }; ysr@777: ysr@777: void expand(); ysr@777: ysr@777: bool _expanded; ysr@777: ysr@777: bool expanded() { return _expanded; } ysr@777: void set_expanded(bool b) { _expanded = b; } ysr@777: ysr@777: SparsePRT* _next_expanded; ysr@777: ysr@777: SparsePRT* next_expanded() { return _next_expanded; } ysr@777: void set_next_expanded(SparsePRT* nxt) { _next_expanded = nxt; } ysr@777: tonyp@2493: bool should_be_on_expanded_list(); tonyp@2493: ysr@777: static SparsePRT* _head_expanded_list; ysr@777: ysr@777: public: ysr@777: SparsePRT(HeapRegion* hr); ysr@777: ysr@777: ~SparsePRT(); ysr@777: ysr@777: size_t occupied() const { return _next->occupied_cards(); } ysr@777: size_t mem_size() const; ysr@777: ysr@777: // Attempts to ensure that the given card_index in the given region is in ysr@777: // the sparse table. If successful (because the card was already ysr@777: // present, or because it was successfullly added) returns "true". ysr@777: // Otherwise, returns "false" to indicate that the addition would ysr@777: // overflow the entry for the region. The caller must transfer these ysr@777: // entries to a larger-capacity representation. johnc@1242: bool add_card(RegionIdx_t region_id, CardIdx_t card_index); ysr@777: ysr@777: // If the table hold an entry for "region_ind", Copies its ysr@777: // cards into "cards", which must be an array of length at least iveresov@1696: // "SparePRTEntry::cards_num()", and returns "true"; otherwise, iveresov@1696: // returns "false". johnc@1242: bool get_cards(RegionIdx_t region_ind, CardIdx_t* cards); ysr@777: iveresov@1696: // Return the pointer to the entry associated with the given region. iveresov@1696: SparsePRTEntry* get_entry(RegionIdx_t region_ind); iveresov@1696: ysr@777: // If there is an entry for "region_ind", removes it and return "true"; ysr@777: // otherwise returns "false." johnc@1242: bool delete_entry(RegionIdx_t region_ind); ysr@777: ysr@777: // Clear the table, and reinitialize to initial capacity. ysr@777: void clear(); ysr@777: ysr@777: // Ensure that "_cur" and "_next" point to the same table. ysr@777: void cleanup(); ysr@777: ysr@777: // Clean up all tables on the expanded list. Called single threaded. ysr@777: static void cleanup_all(); tonyp@1052: RSHashTable* cur() const { return _cur; } ysr@777: ysr@777: static void add_to_expanded_list(SparsePRT* sprt); ysr@777: static SparsePRT* get_from_expanded_list(); ysr@777: tonyp@2493: // The purpose of these three methods is to help the GC workers tonyp@2493: // during the cleanup pause to recreate the expanded list, purging tonyp@2493: // any tables from it that belong to regions that are freed during tonyp@2493: // cleanup (if we don't purge those tables, there is a race that tonyp@2493: // causes various crashes; see CR 7014261). tonyp@2493: // tonyp@2493: // We chose to recreate the expanded list, instead of purging tonyp@2493: // entries from it by iterating over it, to avoid this serial phase tonyp@2493: // at the end of the cleanup pause. tonyp@2493: // tonyp@2493: // The three methods below work as follows: tonyp@2493: // * reset_for_cleanup_tasks() : Nulls the expanded list head at the tonyp@2493: // start of the cleanup pause. tonyp@2493: // * do_cleanup_work() : Called by the cleanup workers for every tonyp@2493: // region that is not free / is being freed by the cleanup tonyp@2493: // pause. It creates a list of expanded tables whose head / tail tonyp@2493: // are on the thread-local SparsePRTCleanupTask object. tonyp@2493: // * finish_cleanup_task() : Called by the cleanup workers after tonyp@2493: // they complete their cleanup task. It adds the local list into tonyp@2493: // the global expanded list. It assumes that the tonyp@2493: // ParGCRareEvent_lock is being held to ensure MT-safety. tonyp@2493: static void reset_for_cleanup_tasks(); tonyp@2493: void do_cleanup_work(SparsePRTCleanupTask* sprt_cleanup_task); tonyp@2493: static void finish_cleanup_task(SparsePRTCleanupTask* sprt_cleanup_task); tonyp@2493: johnc@1242: bool contains_card(RegionIdx_t region_id, CardIdx_t card_index) const { ysr@777: return _next->contains_card(region_id, card_index); ysr@777: } ysr@777: }; ysr@777: tonyp@2239: class SparsePRTIter: public RSHashTableIter { ysr@777: public: johnc@5014: SparsePRTIter(const SparsePRT* sprt) : johnc@5014: RSHashTableIter(sprt->cur()) {} johnc@5014: ysr@777: bool has_next(size_t& card_index) { ysr@777: return RSHashTableIter::has_next(card_index); ysr@777: } ysr@777: }; stefank@2314: tonyp@2493: // This allows each worker during a cleanup pause to create a tonyp@2493: // thread-local list of sparse tables that have been expanded and need tonyp@2493: // to be processed at the beginning of the next GC pause. This lists tonyp@2493: // are concatenated into the single expanded list at the end of the tonyp@2493: // cleanup pause. tonyp@2493: class SparsePRTCleanupTask VALUE_OBJ_CLASS_SPEC { tonyp@2493: private: tonyp@2493: SparsePRT* _head; tonyp@2493: SparsePRT* _tail; tonyp@2493: tonyp@2493: public: tonyp@2493: SparsePRTCleanupTask() : _head(NULL), _tail(NULL) { } tonyp@2493: tonyp@2493: void add(SparsePRT* sprt); tonyp@2493: SparsePRT* head() { return _head; } tonyp@2493: SparsePRT* tail() { return _tail; } tonyp@2493: }; tonyp@2493: stefank@2314: #endif // SHARE_VM_GC_IMPLEMENTATION_G1_SPARSEPRT_HPP