1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 1.2 +++ b/src/share/vm/memory/blockOffsetTable.hpp Sat Dec 01 00:00:00 2007 +0000 1.3 @@ -0,0 +1,500 @@ 1.4 +/* 1.5 + * Copyright 2000-2006 Sun Microsystems, Inc. All Rights Reserved. 1.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 1.7 + * 1.8 + * This code is free software; you can redistribute it and/or modify it 1.9 + * under the terms of the GNU General Public License version 2 only, as 1.10 + * published by the Free Software Foundation. 1.11 + * 1.12 + * This code is distributed in the hope that it will be useful, but WITHOUT 1.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 1.14 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 1.15 + * version 2 for more details (a copy is included in the LICENSE file that 1.16 + * accompanied this code). 1.17 + * 1.18 + * You should have received a copy of the GNU General Public License version 1.19 + * 2 along with this work; if not, write to the Free Software Foundation, 1.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 1.21 + * 1.22 + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, 1.23 + * CA 95054 USA or visit www.sun.com if you need additional information or 1.24 + * have any questions. 1.25 + * 1.26 + */ 1.27 + 1.28 +// The CollectedHeap type requires subtypes to implement a method 1.29 +// "block_start". For some subtypes, notably generational 1.30 +// systems using card-table-based write barriers, the efficiency of this 1.31 +// operation may be important. Implementations of the "BlockOffsetArray" 1.32 +// class may be useful in providing such efficient implementations. 1.33 +// 1.34 +// BlockOffsetTable (abstract) 1.35 +// - BlockOffsetArray (abstract) 1.36 +// - BlockOffsetArrayNonContigSpace 1.37 +// - BlockOffsetArrayContigSpace 1.38 +// 1.39 + 1.40 +class ContiguousSpace; 1.41 +class SerializeOopClosure; 1.42 + 1.43 +////////////////////////////////////////////////////////////////////////// 1.44 +// The BlockOffsetTable "interface" 1.45 +////////////////////////////////////////////////////////////////////////// 1.46 +class BlockOffsetTable VALUE_OBJ_CLASS_SPEC { 1.47 + friend class VMStructs; 1.48 +protected: 1.49 + // These members describe the region covered by the table. 1.50 + 1.51 + // The space this table is covering. 1.52 + HeapWord* _bottom; // == reserved.start 1.53 + HeapWord* _end; // End of currently allocated region. 1.54 + 1.55 +public: 1.56 + // Initialize the table to cover the given space. 1.57 + // The contents of the initial table are undefined. 1.58 + BlockOffsetTable(HeapWord* bottom, HeapWord* end): 1.59 + _bottom(bottom), _end(end) { 1.60 + assert(_bottom <= _end, "arguments out of order"); 1.61 + } 1.62 + 1.63 + // Note that the committed size of the covered space may have changed, 1.64 + // so the table size might also wish to change. 1.65 + virtual void resize(size_t new_word_size) = 0; 1.66 + 1.67 + virtual void set_bottom(HeapWord* new_bottom) { 1.68 + assert(new_bottom <= _end, "new_bottom > _end"); 1.69 + _bottom = new_bottom; 1.70 + resize(pointer_delta(_end, _bottom)); 1.71 + } 1.72 + 1.73 + // Requires "addr" to be contained by a block, and returns the address of 1.74 + // the start of that block. 1.75 + virtual HeapWord* block_start_unsafe(const void* addr) const = 0; 1.76 + 1.77 + // Returns the address of the start of the block containing "addr", or 1.78 + // else "null" if it is covered by no block. 1.79 + HeapWord* block_start(const void* addr) const; 1.80 +}; 1.81 + 1.82 +////////////////////////////////////////////////////////////////////////// 1.83 +// One implementation of "BlockOffsetTable," the BlockOffsetArray, 1.84 +// divides the covered region into "N"-word subregions (where 1.85 +// "N" = 2^"LogN". An array with an entry for each such subregion 1.86 +// indicates how far back one must go to find the start of the 1.87 +// chunk that includes the first word of the subregion. 1.88 +// 1.89 +// Each BlockOffsetArray is owned by a Space. However, the actual array 1.90 +// may be shared by several BlockOffsetArrays; this is useful 1.91 +// when a single resizable area (such as a generation) is divided up into 1.92 +// several spaces in which contiguous allocation takes place. (Consider, 1.93 +// for example, the garbage-first generation.) 1.94 + 1.95 +// Here is the shared array type. 1.96 +////////////////////////////////////////////////////////////////////////// 1.97 +// BlockOffsetSharedArray 1.98 +////////////////////////////////////////////////////////////////////////// 1.99 +class BlockOffsetSharedArray: public CHeapObj { 1.100 + friend class BlockOffsetArray; 1.101 + friend class BlockOffsetArrayNonContigSpace; 1.102 + friend class BlockOffsetArrayContigSpace; 1.103 + friend class VMStructs; 1.104 + 1.105 + private: 1.106 + enum SomePrivateConstants { 1.107 + LogN = 9, 1.108 + LogN_words = LogN - LogHeapWordSize, 1.109 + N_bytes = 1 << LogN, 1.110 + N_words = 1 << LogN_words 1.111 + }; 1.112 + 1.113 + // The reserved region covered by the shared array. 1.114 + MemRegion _reserved; 1.115 + 1.116 + // End of the current committed region. 1.117 + HeapWord* _end; 1.118 + 1.119 + // Array for keeping offsets for retrieving object start fast given an 1.120 + // address. 1.121 + VirtualSpace _vs; 1.122 + u_char* _offset_array; // byte array keeping backwards offsets 1.123 + 1.124 + protected: 1.125 + // Bounds checking accessors: 1.126 + // For performance these have to devolve to array accesses in product builds. 1.127 + u_char offset_array(size_t index) const { 1.128 + assert(index < _vs.committed_size(), "index out of range"); 1.129 + return _offset_array[index]; 1.130 + } 1.131 + void set_offset_array(size_t index, u_char offset) { 1.132 + assert(index < _vs.committed_size(), "index out of range"); 1.133 + _offset_array[index] = offset; 1.134 + } 1.135 + void set_offset_array(size_t index, HeapWord* high, HeapWord* low) { 1.136 + assert(index < _vs.committed_size(), "index out of range"); 1.137 + assert(high >= low, "addresses out of order"); 1.138 + assert(pointer_delta(high, low) <= N_words, "offset too large"); 1.139 + _offset_array[index] = (u_char)pointer_delta(high, low); 1.140 + } 1.141 + void set_offset_array(HeapWord* left, HeapWord* right, u_char offset) { 1.142 + assert(index_for(right - 1) < _vs.committed_size(), 1.143 + "right address out of range"); 1.144 + assert(left < right, "Heap addresses out of order"); 1.145 + size_t num_cards = pointer_delta(right, left) >> LogN_words; 1.146 + memset(&_offset_array[index_for(left)], offset, num_cards); 1.147 + } 1.148 + 1.149 + void set_offset_array(size_t left, size_t right, u_char offset) { 1.150 + assert(right < _vs.committed_size(), "right address out of range"); 1.151 + assert(left <= right, "indexes out of order"); 1.152 + size_t num_cards = right - left + 1; 1.153 + memset(&_offset_array[left], offset, num_cards); 1.154 + } 1.155 + 1.156 + void check_offset_array(size_t index, HeapWord* high, HeapWord* low) const { 1.157 + assert(index < _vs.committed_size(), "index out of range"); 1.158 + assert(high >= low, "addresses out of order"); 1.159 + assert(pointer_delta(high, low) <= N_words, "offset too large"); 1.160 + assert(_offset_array[index] == pointer_delta(high, low), 1.161 + "Wrong offset"); 1.162 + } 1.163 + 1.164 + bool is_card_boundary(HeapWord* p) const; 1.165 + 1.166 + // Return the number of slots needed for an offset array 1.167 + // that covers mem_region_words words. 1.168 + // We always add an extra slot because if an object 1.169 + // ends on a card boundary we put a 0 in the next 1.170 + // offset array slot, so we want that slot always 1.171 + // to be reserved. 1.172 + 1.173 + size_t compute_size(size_t mem_region_words) { 1.174 + size_t number_of_slots = (mem_region_words / N_words) + 1; 1.175 + return ReservedSpace::allocation_align_size_up(number_of_slots); 1.176 + } 1.177 + 1.178 +public: 1.179 + // Initialize the table to cover from "base" to (at least) 1.180 + // "base + init_word_size". In the future, the table may be expanded 1.181 + // (see "resize" below) up to the size of "_reserved" (which must be at 1.182 + // least "init_word_size".) The contents of the initial table are 1.183 + // undefined; it is the responsibility of the constituent 1.184 + // BlockOffsetTable(s) to initialize cards. 1.185 + BlockOffsetSharedArray(MemRegion reserved, size_t init_word_size); 1.186 + 1.187 + // Notes a change in the committed size of the region covered by the 1.188 + // table. The "new_word_size" may not be larger than the size of the 1.189 + // reserved region this table covers. 1.190 + void resize(size_t new_word_size); 1.191 + 1.192 + void set_bottom(HeapWord* new_bottom); 1.193 + 1.194 + // Updates all the BlockOffsetArray's sharing this shared array to 1.195 + // reflect the current "top"'s of their spaces. 1.196 + void update_offset_arrays(); // Not yet implemented! 1.197 + 1.198 + // Return the appropriate index into "_offset_array" for "p". 1.199 + size_t index_for(const void* p) const; 1.200 + 1.201 + // Return the address indicating the start of the region corresponding to 1.202 + // "index" in "_offset_array". 1.203 + HeapWord* address_for_index(size_t index) const; 1.204 + 1.205 + // Shared space support 1.206 + void serialize(SerializeOopClosure* soc, HeapWord* start, HeapWord* end); 1.207 +}; 1.208 + 1.209 +////////////////////////////////////////////////////////////////////////// 1.210 +// The BlockOffsetArray whose subtypes use the BlockOffsetSharedArray. 1.211 +////////////////////////////////////////////////////////////////////////// 1.212 +class BlockOffsetArray: public BlockOffsetTable { 1.213 + friend class VMStructs; 1.214 + protected: 1.215 + // The following enums are used by do_block_internal() below 1.216 + enum Action { 1.217 + Action_single, // BOT records a single block (see single_block()) 1.218 + Action_mark, // BOT marks the start of a block (see mark_block()) 1.219 + Action_check // Check that BOT records block correctly 1.220 + // (see verify_single_block()). 1.221 + }; 1.222 + 1.223 + enum SomePrivateConstants { 1.224 + N_words = BlockOffsetSharedArray::N_words, 1.225 + LogN = BlockOffsetSharedArray::LogN, 1.226 + // entries "e" of at least N_words mean "go back by Base^(e-N_words)." 1.227 + // All entries are less than "N_words + N_powers". 1.228 + LogBase = 4, 1.229 + Base = (1 << LogBase), 1.230 + N_powers = 14 1.231 + }; 1.232 + 1.233 + static size_t power_to_cards_back(uint i) { 1.234 + return 1 << (LogBase * i); 1.235 + } 1.236 + static size_t power_to_words_back(uint i) { 1.237 + return power_to_cards_back(i) * N_words; 1.238 + } 1.239 + static size_t entry_to_cards_back(u_char entry) { 1.240 + assert(entry >= N_words, "Precondition"); 1.241 + return power_to_cards_back(entry - N_words); 1.242 + } 1.243 + static size_t entry_to_words_back(u_char entry) { 1.244 + assert(entry >= N_words, "Precondition"); 1.245 + return power_to_words_back(entry - N_words); 1.246 + } 1.247 + 1.248 + // The shared array, which is shared with other BlockOffsetArray's 1.249 + // corresponding to different spaces within a generation or span of 1.250 + // memory. 1.251 + BlockOffsetSharedArray* _array; 1.252 + 1.253 + // The space that owns this subregion. 1.254 + Space* _sp; 1.255 + 1.256 + // If true, array entries are initialized to 0; otherwise, they are 1.257 + // initialized to point backwards to the beginning of the covered region. 1.258 + bool _init_to_zero; 1.259 + 1.260 + // Sets the entries 1.261 + // corresponding to the cards starting at "start" and ending at "end" 1.262 + // to point back to the card before "start": the interval [start, end) 1.263 + // is right-open. 1.264 + void set_remainder_to_point_to_start(HeapWord* start, HeapWord* end); 1.265 + // Same as above, except that the args here are a card _index_ interval 1.266 + // that is closed: [start_index, end_index] 1.267 + void set_remainder_to_point_to_start_incl(size_t start, size_t end); 1.268 + 1.269 + // A helper function for BOT adjustment/verification work 1.270 + void do_block_internal(HeapWord* blk_start, HeapWord* blk_end, Action action); 1.271 + 1.272 + public: 1.273 + // The space may not have its bottom and top set yet, which is why the 1.274 + // region is passed as a parameter. If "init_to_zero" is true, the 1.275 + // elements of the array are initialized to zero. Otherwise, they are 1.276 + // initialized to point backwards to the beginning. 1.277 + BlockOffsetArray(BlockOffsetSharedArray* array, MemRegion mr, 1.278 + bool init_to_zero); 1.279 + 1.280 + // Note: this ought to be part of the constructor, but that would require 1.281 + // "this" to be passed as a parameter to a member constructor for 1.282 + // the containing concrete subtype of Space. 1.283 + // This would be legal C++, but MS VC++ doesn't allow it. 1.284 + void set_space(Space* sp) { _sp = sp; } 1.285 + 1.286 + // Resets the covered region to the given "mr". 1.287 + void set_region(MemRegion mr) { 1.288 + _bottom = mr.start(); 1.289 + _end = mr.end(); 1.290 + } 1.291 + 1.292 + // Note that the committed size of the covered space may have changed, 1.293 + // so the table size might also wish to change. 1.294 + virtual void resize(size_t new_word_size) { 1.295 + HeapWord* new_end = _bottom + new_word_size; 1.296 + if (_end < new_end && !init_to_zero()) { 1.297 + // verify that the old and new boundaries are also card boundaries 1.298 + assert(_array->is_card_boundary(_end), 1.299 + "_end not a card boundary"); 1.300 + assert(_array->is_card_boundary(new_end), 1.301 + "new _end would not be a card boundary"); 1.302 + // set all the newly added cards 1.303 + _array->set_offset_array(_end, new_end, N_words); 1.304 + } 1.305 + _end = new_end; // update _end 1.306 + } 1.307 + 1.308 + // Adjust the BOT to show that it has a single block in the 1.309 + // range [blk_start, blk_start + size). All necessary BOT 1.310 + // cards are adjusted, but _unallocated_block isn't. 1.311 + void single_block(HeapWord* blk_start, HeapWord* blk_end); 1.312 + void single_block(HeapWord* blk, size_t size) { 1.313 + single_block(blk, blk + size); 1.314 + } 1.315 + 1.316 + // When the alloc_block() call returns, the block offset table should 1.317 + // have enough information such that any subsequent block_start() call 1.318 + // with an argument equal to an address that is within the range 1.319 + // [blk_start, blk_end) would return the value blk_start, provided 1.320 + // there have been no calls in between that reset this information 1.321 + // (e.g. see BlockOffsetArrayNonContigSpace::single_block() call 1.322 + // for an appropriate range covering the said interval). 1.323 + // These methods expect to be called with [blk_start, blk_end) 1.324 + // representing a block of memory in the heap. 1.325 + virtual void alloc_block(HeapWord* blk_start, HeapWord* blk_end); 1.326 + void alloc_block(HeapWord* blk, size_t size) { 1.327 + alloc_block(blk, blk + size); 1.328 + } 1.329 + 1.330 + // If true, initialize array slots with no allocated blocks to zero. 1.331 + // Otherwise, make them point back to the front. 1.332 + bool init_to_zero() { return _init_to_zero; } 1.333 + 1.334 + // Debugging 1.335 + // Return the index of the last entry in the "active" region. 1.336 + virtual size_t last_active_index() const = 0; 1.337 + // Verify the block offset table 1.338 + void verify() const; 1.339 + void check_all_cards(size_t left_card, size_t right_card) const; 1.340 +}; 1.341 + 1.342 +//////////////////////////////////////////////////////////////////////////// 1.343 +// A subtype of BlockOffsetArray that takes advantage of the fact 1.344 +// that its underlying space is a NonContiguousSpace, so that some 1.345 +// specialized interfaces can be made available for spaces that 1.346 +// manipulate the table. 1.347 +//////////////////////////////////////////////////////////////////////////// 1.348 +class BlockOffsetArrayNonContigSpace: public BlockOffsetArray { 1.349 + friend class VMStructs; 1.350 + private: 1.351 + // The portion [_unallocated_block, _sp.end()) of the space that 1.352 + // is a single block known not to contain any objects. 1.353 + // NOTE: See BlockOffsetArrayUseUnallocatedBlock flag. 1.354 + HeapWord* _unallocated_block; 1.355 + 1.356 + public: 1.357 + BlockOffsetArrayNonContigSpace(BlockOffsetSharedArray* array, MemRegion mr): 1.358 + BlockOffsetArray(array, mr, false), 1.359 + _unallocated_block(_bottom) { } 1.360 + 1.361 + // accessor 1.362 + HeapWord* unallocated_block() const { 1.363 + assert(BlockOffsetArrayUseUnallocatedBlock, 1.364 + "_unallocated_block is not being maintained"); 1.365 + return _unallocated_block; 1.366 + } 1.367 + 1.368 + void set_unallocated_block(HeapWord* block) { 1.369 + assert(BlockOffsetArrayUseUnallocatedBlock, 1.370 + "_unallocated_block is not being maintained"); 1.371 + assert(block >= _bottom && block <= _end, "out of range"); 1.372 + _unallocated_block = block; 1.373 + } 1.374 + 1.375 + // These methods expect to be called with [blk_start, blk_end) 1.376 + // representing a block of memory in the heap. 1.377 + void alloc_block(HeapWord* blk_start, HeapWord* blk_end); 1.378 + void alloc_block(HeapWord* blk, size_t size) { 1.379 + alloc_block(blk, blk + size); 1.380 + } 1.381 + 1.382 + // The following methods are useful and optimized for a 1.383 + // non-contiguous space. 1.384 + 1.385 + // Given a block [blk_start, blk_start + full_blk_size), and 1.386 + // a left_blk_size < full_blk_size, adjust the BOT to show two 1.387 + // blocks [blk_start, blk_start + left_blk_size) and 1.388 + // [blk_start + left_blk_size, blk_start + full_blk_size). 1.389 + // It is assumed (and verified in the non-product VM) that the 1.390 + // BOT was correct for the original block. 1.391 + void split_block(HeapWord* blk_start, size_t full_blk_size, 1.392 + size_t left_blk_size); 1.393 + 1.394 + // Adjust BOT to show that it has a block in the range 1.395 + // [blk_start, blk_start + size). Only the first card 1.396 + // of BOT is touched. It is assumed (and verified in the 1.397 + // non-product VM) that the remaining cards of the block 1.398 + // are correct. 1.399 + void mark_block(HeapWord* blk_start, HeapWord* blk_end); 1.400 + void mark_block(HeapWord* blk, size_t size) { 1.401 + mark_block(blk, blk + size); 1.402 + } 1.403 + 1.404 + // Adjust _unallocated_block to indicate that a particular 1.405 + // block has been newly allocated or freed. It is assumed (and 1.406 + // verified in the non-product VM) that the BOT is correct for 1.407 + // the given block. 1.408 + void allocated(HeapWord* blk_start, HeapWord* blk_end) { 1.409 + // Verify that the BOT shows [blk, blk + blk_size) to be one block. 1.410 + verify_single_block(blk_start, blk_end); 1.411 + if (BlockOffsetArrayUseUnallocatedBlock) { 1.412 + _unallocated_block = MAX2(_unallocated_block, blk_end); 1.413 + } 1.414 + } 1.415 + 1.416 + void allocated(HeapWord* blk, size_t size) { 1.417 + allocated(blk, blk + size); 1.418 + } 1.419 + 1.420 + void freed(HeapWord* blk_start, HeapWord* blk_end); 1.421 + void freed(HeapWord* blk, size_t size) { 1.422 + freed(blk, blk + size); 1.423 + } 1.424 + 1.425 + HeapWord* block_start_unsafe(const void* addr) const; 1.426 + 1.427 + // Requires "addr" to be the start of a card and returns the 1.428 + // start of the block that contains the given address. 1.429 + HeapWord* block_start_careful(const void* addr) const; 1.430 + 1.431 + 1.432 + // Verification & debugging: ensure that the offset table reflects 1.433 + // the fact that the block [blk_start, blk_end) or [blk, blk + size) 1.434 + // is a single block of storage. NOTE: can't const this because of 1.435 + // call to non-const do_block_internal() below. 1.436 + void verify_single_block(HeapWord* blk_start, HeapWord* blk_end) 1.437 + PRODUCT_RETURN; 1.438 + void verify_single_block(HeapWord* blk, size_t size) PRODUCT_RETURN; 1.439 + 1.440 + // Verify that the given block is before _unallocated_block 1.441 + void verify_not_unallocated(HeapWord* blk_start, HeapWord* blk_end) 1.442 + const PRODUCT_RETURN; 1.443 + void verify_not_unallocated(HeapWord* blk, size_t size) 1.444 + const PRODUCT_RETURN; 1.445 + 1.446 + // Debugging support 1.447 + virtual size_t last_active_index() const; 1.448 +}; 1.449 + 1.450 +//////////////////////////////////////////////////////////////////////////// 1.451 +// A subtype of BlockOffsetArray that takes advantage of the fact 1.452 +// that its underlying space is a ContiguousSpace, so that its "active" 1.453 +// region can be more efficiently tracked (than for a non-contiguous space). 1.454 +//////////////////////////////////////////////////////////////////////////// 1.455 +class BlockOffsetArrayContigSpace: public BlockOffsetArray { 1.456 + friend class VMStructs; 1.457 + private: 1.458 + // allocation boundary at which offset array must be updated 1.459 + HeapWord* _next_offset_threshold; 1.460 + size_t _next_offset_index; // index corresponding to that boundary 1.461 + 1.462 + // Work function when allocation start crosses threshold. 1.463 + void alloc_block_work(HeapWord* blk_start, HeapWord* blk_end); 1.464 + 1.465 + public: 1.466 + BlockOffsetArrayContigSpace(BlockOffsetSharedArray* array, MemRegion mr): 1.467 + BlockOffsetArray(array, mr, true) { 1.468 + _next_offset_threshold = NULL; 1.469 + _next_offset_index = 0; 1.470 + } 1.471 + 1.472 + void set_contig_space(ContiguousSpace* sp) { set_space((Space*)sp); } 1.473 + 1.474 + // Initialize the threshold for an empty heap. 1.475 + HeapWord* initialize_threshold(); 1.476 + // Zero out the entry for _bottom (offset will be zero) 1.477 + void zero_bottom_entry(); 1.478 + 1.479 + // Return the next threshold, the point at which the table should be 1.480 + // updated. 1.481 + HeapWord* threshold() const { return _next_offset_threshold; } 1.482 + 1.483 + // In general, these methods expect to be called with 1.484 + // [blk_start, blk_end) representing a block of memory in the heap. 1.485 + // In this implementation, however, we are OK even if blk_start and/or 1.486 + // blk_end are NULL because NULL is represented as 0, and thus 1.487 + // never exceeds the "_next_offset_threshold". 1.488 + void alloc_block(HeapWord* blk_start, HeapWord* blk_end) { 1.489 + if (blk_end > _next_offset_threshold) { 1.490 + alloc_block_work(blk_start, blk_end); 1.491 + } 1.492 + } 1.493 + void alloc_block(HeapWord* blk, size_t size) { 1.494 + alloc_block(blk, blk + size); 1.495 + } 1.496 + 1.497 + HeapWord* block_start_unsafe(const void* addr) const; 1.498 + 1.499 + void serialize(SerializeOopClosure* soc); 1.500 + 1.501 + // Debugging support 1.502 + virtual size_t last_active_index() const; 1.503 +};