1.1 --- a/src/share/vm/gc_implementation/g1/heapRegion.hpp Wed Jan 19 13:04:37 2011 -0800 1.2 +++ b/src/share/vm/gc_implementation/g1/heapRegion.hpp Wed Jan 19 19:30:42 2011 -0500 1.3 @@ -50,6 +50,11 @@ 1.4 class HeapRegionRemSet; 1.5 class HeapRegionRemSetIterator; 1.6 class HeapRegion; 1.7 +class HeapRegionSetBase; 1.8 + 1.9 +#define HR_FORMAT "%d:["PTR_FORMAT","PTR_FORMAT","PTR_FORMAT"]" 1.10 +#define HR_FORMAT_PARAMS(__hr) (__hr)->hrs_index(), (__hr)->bottom(), \ 1.11 + (__hr)->top(), (__hr)->end() 1.12 1.13 // A dirty card to oop closure for heap regions. It 1.14 // knows how to get the G1 heap and how to use the bitmap 1.15 @@ -227,12 +232,6 @@ 1.16 // True iff the region is in current collection_set. 1.17 bool _in_collection_set; 1.18 1.19 - // True iff the region is on the unclean list, waiting to be zero filled. 1.20 - bool _is_on_unclean_list; 1.21 - 1.22 - // True iff the region is on the free list, ready for allocation. 1.23 - bool _is_on_free_list; 1.24 - 1.25 // Is this or has it been an allocation region in the current collection 1.26 // pause. 1.27 bool _is_gc_alloc_region; 1.28 @@ -254,6 +253,13 @@ 1.29 // Next region whose cards need cleaning 1.30 HeapRegion* _next_dirty_cards_region; 1.31 1.32 + // Fields used by the HeapRegionSetBase class and subclasses. 1.33 + HeapRegion* _next; 1.34 +#ifdef ASSERT 1.35 + HeapRegionSetBase* _containing_set; 1.36 +#endif // ASSERT 1.37 + bool _pending_removal; 1.38 + 1.39 // For parallel heapRegion traversal. 1.40 jint _claimed; 1.41 1.42 @@ -305,10 +311,6 @@ 1.43 _top_at_conc_mark_count = bot; 1.44 } 1.45 1.46 - jint _zfs; // A member of ZeroFillState. Protected by ZF_lock. 1.47 - Thread* _zero_filler; // If _zfs is ZeroFilling, the thread that (last) 1.48 - // made it so. 1.49 - 1.50 void set_young_type(YoungType new_type) { 1.51 //assert(_young_type != new_type, "setting the same type" ); 1.52 // TODO: add more assertions here 1.53 @@ -362,16 +364,6 @@ 1.54 RebuildRSClaimValue = 5 1.55 }; 1.56 1.57 - // Concurrent refinement requires contiguous heap regions (in which TLABs 1.58 - // might be allocated) to be zero-filled. Each region therefore has a 1.59 - // zero-fill-state. 1.60 - enum ZeroFillState { 1.61 - NotZeroFilled, 1.62 - ZeroFilling, 1.63 - ZeroFilled, 1.64 - Allocated 1.65 - }; 1.66 - 1.67 inline HeapWord* par_allocate_no_bot_updates(size_t word_size) { 1.68 assert(is_young(), "we can only skip BOT updates on young regions"); 1.69 return ContiguousSpace::par_allocate(word_size); 1.70 @@ -456,6 +448,9 @@ 1.71 // which this region will be part of. 1.72 void set_continuesHumongous(HeapRegion* first_hr); 1.73 1.74 + // Unsets the humongous-related fields on the region. 1.75 + void set_notHumongous(); 1.76 + 1.77 // If the region has a remembered set, return a pointer to it. 1.78 HeapRegionRemSet* rem_set() const { 1.79 return _rem_set; 1.80 @@ -502,45 +497,56 @@ 1.81 _next_in_special_set = r; 1.82 } 1.83 1.84 - bool is_on_free_list() { 1.85 - return _is_on_free_list; 1.86 + // Methods used by the HeapRegionSetBase class and subclasses. 1.87 + 1.88 + // Getter and setter for the next field used to link regions into 1.89 + // linked lists. 1.90 + HeapRegion* next() { return _next; } 1.91 + 1.92 + void set_next(HeapRegion* next) { _next = next; } 1.93 + 1.94 + // Every region added to a set is tagged with a reference to that 1.95 + // set. This is used for doing consistency checking to make sure that 1.96 + // the contents of a set are as they should be and it's only 1.97 + // available in non-product builds. 1.98 +#ifdef ASSERT 1.99 + void set_containing_set(HeapRegionSetBase* containing_set) { 1.100 + assert((containing_set == NULL && _containing_set != NULL) || 1.101 + (containing_set != NULL && _containing_set == NULL), 1.102 + err_msg("containing_set: "PTR_FORMAT" " 1.103 + "_containing_set: "PTR_FORMAT, 1.104 + containing_set, _containing_set)); 1.105 + 1.106 + _containing_set = containing_set; 1.107 +} 1.108 + 1.109 + HeapRegionSetBase* containing_set() { return _containing_set; } 1.110 +#else // ASSERT 1.111 + void set_containing_set(HeapRegionSetBase* containing_set) { } 1.112 + 1.113 + // containing_set() is only used in asserts so there's not reason 1.114 + // to provide a dummy version of it. 1.115 +#endif // ASSERT 1.116 + 1.117 + // If we want to remove regions from a list in bulk we can simply tag 1.118 + // them with the pending_removal tag and call the 1.119 + // remove_all_pending() method on the list. 1.120 + 1.121 + bool pending_removal() { return _pending_removal; } 1.122 + 1.123 + void set_pending_removal(bool pending_removal) { 1.124 + // We can only set pending_removal to true, if it's false and the 1.125 + // region belongs to a set. 1.126 + assert(!pending_removal || 1.127 + (!_pending_removal && containing_set() != NULL), "pre-condition"); 1.128 + // We can only set pending_removal to false, if it's true and the 1.129 + // region does not belong to a set. 1.130 + assert( pending_removal || 1.131 + ( _pending_removal && containing_set() == NULL), "pre-condition"); 1.132 + 1.133 + _pending_removal = pending_removal; 1.134 } 1.135 1.136 - void set_on_free_list(bool b) { 1.137 - _is_on_free_list = b; 1.138 - } 1.139 - 1.140 - HeapRegion* next_from_free_list() { 1.141 - assert(is_on_free_list(), 1.142 - "Should only invoke on free space."); 1.143 - assert(_next_in_special_set == NULL || 1.144 - _next_in_special_set->is_on_free_list(), 1.145 - "Malformed Free List."); 1.146 - return _next_in_special_set; 1.147 - } 1.148 - 1.149 - void set_next_on_free_list(HeapRegion* r) { 1.150 - assert(r == NULL || r->is_on_free_list(), "Malformed free list."); 1.151 - _next_in_special_set = r; 1.152 - } 1.153 - 1.154 - bool is_on_unclean_list() { 1.155 - return _is_on_unclean_list; 1.156 - } 1.157 - 1.158 - void set_on_unclean_list(bool b); 1.159 - 1.160 - HeapRegion* next_from_unclean_list() { 1.161 - assert(is_on_unclean_list(), 1.162 - "Should only invoke on unclean space."); 1.163 - assert(_next_in_special_set == NULL || 1.164 - _next_in_special_set->is_on_unclean_list(), 1.165 - "Malformed unclean List."); 1.166 - return _next_in_special_set; 1.167 - } 1.168 - 1.169 - void set_next_on_unclean_list(HeapRegion* r); 1.170 - 1.171 HeapRegion* get_next_young_region() { return _next_young_region; } 1.172 void set_next_young_region(HeapRegion* hr) { 1.173 _next_young_region = hr; 1.174 @@ -559,11 +565,6 @@ 1.175 1.176 void initialize(MemRegion mr, bool clear_space, bool mangle_space); 1.177 1.178 - // Ensure that "this" is zero-filled. 1.179 - void ensure_zero_filled(); 1.180 - // This one requires that the calling thread holds ZF_mon. 1.181 - void ensure_zero_filled_locked(); 1.182 - 1.183 // Get the start of the unmarked area in this region. 1.184 HeapWord* prev_top_at_mark_start() const { return _prev_top_at_mark_start; } 1.185 HeapWord* next_top_at_mark_start() const { return _next_top_at_mark_start; } 1.186 @@ -798,36 +799,6 @@ 1.187 // "end" of the region if there is no such block. 1.188 HeapWord* next_block_start_careful(HeapWord* addr); 1.189 1.190 - // Returns the zero-fill-state of the current region. 1.191 - ZeroFillState zero_fill_state() { return (ZeroFillState)_zfs; } 1.192 - bool zero_fill_is_allocated() { return _zfs == Allocated; } 1.193 - Thread* zero_filler() { return _zero_filler; } 1.194 - 1.195 - // Indicate that the contents of the region are unknown, and therefore 1.196 - // might require zero-filling. 1.197 - void set_zero_fill_needed() { 1.198 - set_zero_fill_state_work(NotZeroFilled); 1.199 - } 1.200 - void set_zero_fill_in_progress(Thread* t) { 1.201 - set_zero_fill_state_work(ZeroFilling); 1.202 - _zero_filler = t; 1.203 - } 1.204 - void set_zero_fill_complete(); 1.205 - void set_zero_fill_allocated() { 1.206 - set_zero_fill_state_work(Allocated); 1.207 - } 1.208 - 1.209 - void set_zero_fill_state_work(ZeroFillState zfs); 1.210 - 1.211 - // This is called when a full collection shrinks the heap. 1.212 - // We want to set the heap region to a value which says 1.213 - // it is no longer part of the heap. For now, we'll let "NotZF" fill 1.214 - // that role. 1.215 - void reset_zero_fill() { 1.216 - set_zero_fill_state_work(NotZeroFilled); 1.217 - _zero_filler = NULL; 1.218 - } 1.219 - 1.220 size_t recorded_rs_length() const { return _recorded_rs_length; } 1.221 double predicted_elapsed_time_ms() const { return _predicted_elapsed_time_ms; } 1.222 size_t predicted_bytes_to_copy() const { return _predicted_bytes_to_copy; } 1.223 @@ -866,10 +837,6 @@ 1.224 1.225 // Override; it uses the "prev" marking information 1.226 virtual void verify(bool allow_dirty) const; 1.227 - 1.228 -#ifdef DEBUG 1.229 - HeapWord* allocate(size_t size); 1.230 -#endif 1.231 }; 1.232 1.233 // HeapRegionClosure is used for iterating over regions. 1.234 @@ -892,113 +859,6 @@ 1.235 bool complete() { return _complete; } 1.236 }; 1.237 1.238 -// A linked lists of heap regions. It leaves the "next" field 1.239 -// unspecified; that's up to subtypes. 1.240 -class RegionList VALUE_OBJ_CLASS_SPEC { 1.241 -protected: 1.242 - virtual HeapRegion* get_next(HeapRegion* chr) = 0; 1.243 - virtual void set_next(HeapRegion* chr, 1.244 - HeapRegion* new_next) = 0; 1.245 - 1.246 - HeapRegion* _hd; 1.247 - HeapRegion* _tl; 1.248 - size_t _sz; 1.249 - 1.250 - // Protected constructor because this type is only meaningful 1.251 - // when the _get/_set next functions are defined. 1.252 - RegionList() : _hd(NULL), _tl(NULL), _sz(0) {} 1.253 -public: 1.254 - void reset() { 1.255 - _hd = NULL; 1.256 - _tl = NULL; 1.257 - _sz = 0; 1.258 - } 1.259 - HeapRegion* hd() { return _hd; } 1.260 - HeapRegion* tl() { return _tl; } 1.261 - size_t sz() { return _sz; } 1.262 - size_t length(); 1.263 - 1.264 - bool well_formed() { 1.265 - return 1.266 - ((hd() == NULL && tl() == NULL && sz() == 0) 1.267 - || (hd() != NULL && tl() != NULL && sz() > 0)) 1.268 - && (sz() == length()); 1.269 - } 1.270 - virtual void insert_before_head(HeapRegion* r); 1.271 - void prepend_list(RegionList* new_list); 1.272 - virtual HeapRegion* pop(); 1.273 - void dec_sz() { _sz--; } 1.274 - // Requires that "r" is an element of the list, and is not the tail. 1.275 - void delete_after(HeapRegion* r); 1.276 -}; 1.277 - 1.278 -class EmptyNonHRegionList: public RegionList { 1.279 -protected: 1.280 - // Protected constructor because this type is only meaningful 1.281 - // when the _get/_set next functions are defined. 1.282 - EmptyNonHRegionList() : RegionList() {} 1.283 - 1.284 -public: 1.285 - void insert_before_head(HeapRegion* r) { 1.286 - // assert(r->is_empty(), "Better be empty"); 1.287 - assert(!r->isHumongous(), "Better not be humongous."); 1.288 - RegionList::insert_before_head(r); 1.289 - } 1.290 - void prepend_list(EmptyNonHRegionList* new_list) { 1.291 - // assert(new_list->hd() == NULL || new_list->hd()->is_empty(), 1.292 - // "Better be empty"); 1.293 - assert(new_list->hd() == NULL || !new_list->hd()->isHumongous(), 1.294 - "Better not be humongous."); 1.295 - // assert(new_list->tl() == NULL || new_list->tl()->is_empty(), 1.296 - // "Better be empty"); 1.297 - assert(new_list->tl() == NULL || !new_list->tl()->isHumongous(), 1.298 - "Better not be humongous."); 1.299 - RegionList::prepend_list(new_list); 1.300 - } 1.301 -}; 1.302 - 1.303 -class UncleanRegionList: public EmptyNonHRegionList { 1.304 -public: 1.305 - HeapRegion* get_next(HeapRegion* hr) { 1.306 - return hr->next_from_unclean_list(); 1.307 - } 1.308 - void set_next(HeapRegion* hr, HeapRegion* new_next) { 1.309 - hr->set_next_on_unclean_list(new_next); 1.310 - } 1.311 - 1.312 - UncleanRegionList() : EmptyNonHRegionList() {} 1.313 - 1.314 - void insert_before_head(HeapRegion* r) { 1.315 - assert(!r->is_on_free_list(), 1.316 - "Better not already be on free list"); 1.317 - assert(!r->is_on_unclean_list(), 1.318 - "Better not already be on unclean list"); 1.319 - r->set_zero_fill_needed(); 1.320 - r->set_on_unclean_list(true); 1.321 - EmptyNonHRegionList::insert_before_head(r); 1.322 - } 1.323 - void prepend_list(UncleanRegionList* new_list) { 1.324 - assert(new_list->tl() == NULL || !new_list->tl()->is_on_free_list(), 1.325 - "Better not already be on free list"); 1.326 - assert(new_list->tl() == NULL || new_list->tl()->is_on_unclean_list(), 1.327 - "Better already be marked as on unclean list"); 1.328 - assert(new_list->hd() == NULL || !new_list->hd()->is_on_free_list(), 1.329 - "Better not already be on free list"); 1.330 - assert(new_list->hd() == NULL || new_list->hd()->is_on_unclean_list(), 1.331 - "Better already be marked as on unclean list"); 1.332 - EmptyNonHRegionList::prepend_list(new_list); 1.333 - } 1.334 - HeapRegion* pop() { 1.335 - HeapRegion* res = RegionList::pop(); 1.336 - if (res != NULL) res->set_on_unclean_list(false); 1.337 - return res; 1.338 - } 1.339 -}; 1.340 - 1.341 -// Local Variables: *** 1.342 -// c-indentation-style: gnu *** 1.343 -// End: *** 1.344 - 1.345 #endif // SERIALGC 1.346 1.347 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_HPP