ysr@777: /* ysr@777: * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved. ysr@777: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ysr@777: * ysr@777: * This code is free software; you can redistribute it and/or modify it ysr@777: * under the terms of the GNU General Public License version 2 only, as ysr@777: * published by the Free Software Foundation. ysr@777: * ysr@777: * This code is distributed in the hope that it will be useful, but WITHOUT ysr@777: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ysr@777: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ysr@777: * version 2 for more details (a copy is included in the LICENSE file that ysr@777: * accompanied this code). ysr@777: * ysr@777: * You should have received a copy of the GNU General Public License version ysr@777: * 2 along with this work; if not, write to the Free Software Foundation, ysr@777: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ysr@777: * ysr@777: * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, ysr@777: * CA 95054 USA or visit www.sun.com if you need additional information or ysr@777: * have any questions. ysr@777: * ysr@777: */ ysr@777: ysr@777: // Sparse remembered set for a heap region (the "owning" region). Maps ysr@777: // indices of other regions to short sequences of cards in the other region ysr@777: // that might contain pointers into the owner region. ysr@777: ysr@777: // These tables only expand while they are accessed in parallel -- ysr@777: // deletions may be done in single-threaded code. This allows us to allow ysr@777: // unsynchronized reads/iterations, as long as expansions caused by ysr@777: // insertions only enqueue old versions for deletions, but do not delete ysr@777: // old versions synchronously. ysr@777: ysr@777: apetrusenko@984: class SparsePRTEntry: public CHeapObj { ysr@777: public: ysr@777: enum SomePublicConstants { ysr@777: CardsPerEntry = (short)4, ysr@777: NullEntry = (short)-1, ysr@777: DeletedEntry = (short)-2 ysr@777: }; ysr@777: ysr@777: private: ysr@777: short _region_ind; ysr@777: short _next_index; ysr@777: short _cards[CardsPerEntry]; ysr@777: ysr@777: public: ysr@777: ysr@777: // Set the region_ind to the given value, and delete all cards. ysr@777: inline void init(short region_ind); ysr@777: ysr@777: short r_ind() const { return _region_ind; } ysr@777: bool valid_entry() const { return r_ind() >= 0; } ysr@777: void set_r_ind(short rind) { _region_ind = rind; } ysr@777: ysr@777: short next_index() const { return _next_index; } ysr@777: short* next_index_addr() { return &_next_index; } ysr@777: void set_next_index(short ni) { _next_index = ni; } ysr@777: ysr@777: // Returns "true" iff the entry contains the given card index. ysr@777: inline bool contains_card(short card_index) const; ysr@777: ysr@777: // Returns the number of non-NULL card entries. ysr@777: inline int num_valid_cards() const; ysr@777: ysr@777: // Requires that the entry not contain the given card index. If there is ysr@777: // space available, add the given card index to the entry and return ysr@777: // "true"; otherwise, return "false" to indicate that the entry is full. ysr@777: enum AddCardResult { ysr@777: overflow, ysr@777: found, ysr@777: added ysr@777: }; ysr@777: inline AddCardResult add_card(short card_index); ysr@777: ysr@777: // Copy the current entry's cards into "cards". ysr@777: inline void copy_cards(short* cards) const; ysr@777: // Copy the current entry's cards into the "_card" array of "e." ysr@777: inline void copy_cards(SparsePRTEntry* e) const; ysr@777: ysr@777: inline short card(int i) const { return _cards[i]; } ysr@777: }; ysr@777: ysr@777: ysr@777: class RSHashTable : public CHeapObj { ysr@777: ysr@777: friend class RSHashTableIter; ysr@777: ysr@777: enum SomePrivateConstants { ysr@777: NullEntry = -1 ysr@777: }; ysr@777: ysr@777: size_t _capacity; ysr@777: size_t _capacity_mask; ysr@777: size_t _occupied_entries; ysr@777: size_t _occupied_cards; ysr@777: ysr@777: SparsePRTEntry* _entries; ysr@777: short* _buckets; ysr@777: short _free_region; ysr@777: short _free_list; ysr@777: ysr@777: static RSHashTable* _head_deleted_list; ysr@777: RSHashTable* _next_deleted; ysr@777: RSHashTable* next_deleted() { return _next_deleted; } ysr@777: void set_next_deleted(RSHashTable* rsht) { _next_deleted = rsht; } ysr@777: bool _deleted; ysr@777: void set_deleted(bool b) { _deleted = b; } ysr@777: ysr@777: // Requires that the caller hold a lock preventing parallel modifying ysr@777: // operations, and that the the table be less than completely full. If ysr@777: // an entry for "region_ind" is already in the table, finds it and ysr@777: // returns its address; otherwise returns "NULL." ysr@777: SparsePRTEntry* entry_for_region_ind(short region_ind) const; ysr@777: ysr@777: // Requires that the caller hold a lock preventing parallel modifying ysr@777: // operations, and that the the table be less than completely full. If ysr@777: // an entry for "region_ind" is already in the table, finds it and ysr@777: // returns its address; otherwise allocates, initializes, inserts and ysr@777: // returns a new entry for "region_ind". ysr@777: SparsePRTEntry* entry_for_region_ind_create(short region_ind); ysr@777: ysr@777: // Returns the index of the next free entry in "_entries". ysr@777: short alloc_entry(); ysr@777: // Declares the entry "fi" to be free. (It must have already been ysr@777: // deleted from any bucket lists. ysr@777: void free_entry(short fi); ysr@777: ysr@777: public: ysr@777: RSHashTable(size_t capacity); ysr@777: ~RSHashTable(); ysr@777: ysr@777: // Attempts to ensure that the given card_index in the given region is in ysr@777: // the sparse table. If successful (because the card was already ysr@777: // present, or because it was successfullly added) returns "true". ysr@777: // Otherwise, returns "false" to indicate that the addition would ysr@777: // overflow the entry for the region. The caller must transfer these ysr@777: // entries to a larger-capacity representation. ysr@777: bool add_card(short region_id, short card_index); ysr@777: ysr@777: bool get_cards(short region_id, short* cards); ysr@777: bool delete_entry(short region_id); ysr@777: ysr@777: bool contains_card(short region_id, short card_index) const; ysr@777: ysr@777: void add_entry(SparsePRTEntry* e); ysr@777: ysr@777: void clear(); ysr@777: ysr@777: size_t capacity() const { return _capacity; } ysr@777: size_t capacity_mask() const { return _capacity_mask; } ysr@777: size_t occupied_entries() const { return _occupied_entries; } ysr@777: size_t occupied_cards() const { return _occupied_cards; } ysr@777: size_t mem_size() const; ysr@777: bool deleted() { return _deleted; } ysr@777: ysr@777: SparsePRTEntry* entry(int i) const { return &_entries[i]; } ysr@777: ysr@777: void print(); ysr@777: ysr@777: static void add_to_deleted_list(RSHashTable* rsht); ysr@777: static RSHashTable* get_from_deleted_list(); ysr@777: ysr@777: ysr@777: }; ysr@777: ysr@777: // ValueObj because will be embedded in HRRS iterator. apetrusenko@984: class RSHashTableIter VALUE_OBJ_CLASS_SPEC { ysr@777: short _tbl_ind; ysr@777: short _bl_ind; ysr@777: short _card_ind; ysr@777: RSHashTable* _rsht; ysr@777: size_t _heap_bot_card_ind; ysr@777: ysr@777: enum SomePrivateConstants { ysr@777: CardsPerRegion = HeapRegion::GrainBytes >> CardTableModRefBS::card_shift ysr@777: }; ysr@777: ysr@777: // If the bucket list pointed to by _bl_ind contains a card, sets ysr@777: // _bl_ind to the index of that entry, and returns the card. ysr@777: // Otherwise, returns SparseEntry::NullEnty. ysr@777: short find_first_card_in_list(); ysr@777: // Computes the proper card index for the card whose offset in the ysr@777: // current region (as indicated by _bl_ind) is "ci". ysr@777: // This is subject to errors when there is iteration concurrent with ysr@777: // modification, but these errors should be benign. ysr@777: size_t compute_card_ind(short ci); ysr@777: ysr@777: public: ysr@777: RSHashTableIter(size_t heap_bot_card_ind) : ysr@777: _tbl_ind(RSHashTable::NullEntry), ysr@777: _bl_ind(RSHashTable::NullEntry), ysr@777: _card_ind((SparsePRTEntry::CardsPerEntry-1)), ysr@777: _rsht(NULL), ysr@777: _heap_bot_card_ind(heap_bot_card_ind) ysr@777: {} ysr@777: ysr@777: void init(RSHashTable* rsht) { ysr@777: _rsht = rsht; ysr@777: _tbl_ind = -1; // So that first increment gets to 0. ysr@777: _bl_ind = RSHashTable::NullEntry; ysr@777: _card_ind = (SparsePRTEntry::CardsPerEntry-1); ysr@777: } ysr@777: ysr@777: bool has_next(size_t& card_index); ysr@777: ysr@777: }; ysr@777: ysr@777: // Concurrent accesss to a SparsePRT must be serialized by some external ysr@777: // mutex. ysr@777: ysr@777: class SparsePRTIter; ysr@777: apetrusenko@984: class SparsePRT VALUE_OBJ_CLASS_SPEC { ysr@777: // Iterations are done on the _cur hash table, since they only need to ysr@777: // see entries visible at the start of a collection pause. ysr@777: // All other operations are done using the _next hash table. ysr@777: RSHashTable* _cur; ysr@777: RSHashTable* _next; ysr@777: ysr@777: HeapRegion* _hr; ysr@777: ysr@777: enum SomeAdditionalPrivateConstants { ysr@777: InitialCapacity = 16 ysr@777: }; ysr@777: ysr@777: void expand(); ysr@777: ysr@777: bool _expanded; ysr@777: ysr@777: bool expanded() { return _expanded; } ysr@777: void set_expanded(bool b) { _expanded = b; } ysr@777: ysr@777: SparsePRT* _next_expanded; ysr@777: ysr@777: SparsePRT* next_expanded() { return _next_expanded; } ysr@777: void set_next_expanded(SparsePRT* nxt) { _next_expanded = nxt; } ysr@777: ysr@777: ysr@777: static SparsePRT* _head_expanded_list; ysr@777: ysr@777: public: ysr@777: SparsePRT(HeapRegion* hr); ysr@777: ysr@777: ~SparsePRT(); ysr@777: ysr@777: size_t occupied() const { return _next->occupied_cards(); } ysr@777: size_t mem_size() const; ysr@777: ysr@777: // Attempts to ensure that the given card_index in the given region is in ysr@777: // the sparse table. If successful (because the card was already ysr@777: // present, or because it was successfullly added) returns "true". ysr@777: // Otherwise, returns "false" to indicate that the addition would ysr@777: // overflow the entry for the region. The caller must transfer these ysr@777: // entries to a larger-capacity representation. ysr@777: bool add_card(short region_id, short card_index); ysr@777: ysr@777: // If the table hold an entry for "region_ind", Copies its ysr@777: // cards into "cards", which must be an array of length at least ysr@777: // "CardsPerEntry", and returns "true"; otherwise, returns "false". ysr@777: bool get_cards(short region_ind, short* cards); ysr@777: ysr@777: // If there is an entry for "region_ind", removes it and return "true"; ysr@777: // otherwise returns "false." ysr@777: bool delete_entry(short region_ind); ysr@777: ysr@777: // Clear the table, and reinitialize to initial capacity. ysr@777: void clear(); ysr@777: ysr@777: // Ensure that "_cur" and "_next" point to the same table. ysr@777: void cleanup(); ysr@777: ysr@777: // Clean up all tables on the expanded list. Called single threaded. ysr@777: static void cleanup_all(); tonyp@1052: RSHashTable* cur() const { return _cur; } ysr@777: ysr@777: ysr@777: void init_iterator(SparsePRTIter* sprt_iter); ysr@777: ysr@777: static void add_to_expanded_list(SparsePRT* sprt); ysr@777: static SparsePRT* get_from_expanded_list(); ysr@777: ysr@777: bool contains_card(short region_id, short card_index) const { ysr@777: return _next->contains_card(region_id, card_index); ysr@777: } ysr@777: ysr@777: #if 0 ysr@777: void verify_is_cleared(); ysr@777: void print(); ysr@777: #endif ysr@777: }; ysr@777: ysr@777: ysr@777: class SparsePRTIter: public /* RSHashTable:: */RSHashTableIter { ysr@777: public: ysr@777: SparsePRTIter(size_t heap_bot_card_ind) : ysr@777: /* RSHashTable:: */RSHashTableIter(heap_bot_card_ind) ysr@777: {} ysr@777: ysr@777: void init(const SparsePRT* sprt) { tonyp@1052: RSHashTableIter::init(sprt->cur()); ysr@777: } ysr@777: bool has_next(size_t& card_index) { ysr@777: return RSHashTableIter::has_next(card_index); ysr@777: } ysr@777: };