Wed, 26 Jun 2013 16:58:37 +0200
8013590: NPG: Add a memory pool MXBean for Metaspace
Reviewed-by: jmasa, mgerdin
duke@435 | 1 | /* |
coleenp@4037 | 2 | * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved. |
duke@435 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
duke@435 | 4 | * |
duke@435 | 5 | * This code is free software; you can redistribute it and/or modify it |
duke@435 | 6 | * under the terms of the GNU General Public License version 2 only, as |
duke@435 | 7 | * published by the Free Software Foundation. |
duke@435 | 8 | * |
duke@435 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
duke@435 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
duke@435 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
duke@435 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
duke@435 | 13 | * accompanied this code). |
duke@435 | 14 | * |
duke@435 | 15 | * You should have received a copy of the GNU General Public License version |
duke@435 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
duke@435 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
duke@435 | 18 | * |
trims@1907 | 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
trims@1907 | 20 | * or visit www.oracle.com if you need additional information or have any |
trims@1907 | 21 | * questions. |
duke@435 | 22 | * |
duke@435 | 23 | */ |
duke@435 | 24 | |
stefank@2314 | 25 | #ifndef SHARE_VM_MEMORY_BLOCKOFFSETTABLE_HPP |
stefank@2314 | 26 | #define SHARE_VM_MEMORY_BLOCKOFFSETTABLE_HPP |
stefank@2314 | 27 | |
stefank@2314 | 28 | #include "memory/memRegion.hpp" |
stefank@2314 | 29 | #include "runtime/virtualspace.hpp" |
stefank@2314 | 30 | #include "utilities/globalDefinitions.hpp" |
stefank@2314 | 31 | |
duke@435 | 32 | // The CollectedHeap type requires subtypes to implement a method |
duke@435 | 33 | // "block_start". For some subtypes, notably generational |
duke@435 | 34 | // systems using card-table-based write barriers, the efficiency of this |
duke@435 | 35 | // operation may be important. Implementations of the "BlockOffsetArray" |
duke@435 | 36 | // class may be useful in providing such efficient implementations. |
duke@435 | 37 | // |
duke@435 | 38 | // BlockOffsetTable (abstract) |
duke@435 | 39 | // - BlockOffsetArray (abstract) |
duke@435 | 40 | // - BlockOffsetArrayNonContigSpace |
duke@435 | 41 | // - BlockOffsetArrayContigSpace |
duke@435 | 42 | // |
duke@435 | 43 | |
duke@435 | 44 | class ContiguousSpace; |
duke@435 | 45 | |
duke@435 | 46 | ////////////////////////////////////////////////////////////////////////// |
duke@435 | 47 | // The BlockOffsetTable "interface" |
duke@435 | 48 | ////////////////////////////////////////////////////////////////////////// |
duke@435 | 49 | class BlockOffsetTable VALUE_OBJ_CLASS_SPEC { |
duke@435 | 50 | friend class VMStructs; |
duke@435 | 51 | protected: |
duke@435 | 52 | // These members describe the region covered by the table. |
duke@435 | 53 | |
duke@435 | 54 | // The space this table is covering. |
duke@435 | 55 | HeapWord* _bottom; // == reserved.start |
duke@435 | 56 | HeapWord* _end; // End of currently allocated region. |
duke@435 | 57 | |
duke@435 | 58 | public: |
duke@435 | 59 | // Initialize the table to cover the given space. |
duke@435 | 60 | // The contents of the initial table are undefined. |
duke@435 | 61 | BlockOffsetTable(HeapWord* bottom, HeapWord* end): |
duke@435 | 62 | _bottom(bottom), _end(end) { |
duke@435 | 63 | assert(_bottom <= _end, "arguments out of order"); |
duke@435 | 64 | } |
duke@435 | 65 | |
duke@435 | 66 | // Note that the committed size of the covered space may have changed, |
duke@435 | 67 | // so the table size might also wish to change. |
duke@435 | 68 | virtual void resize(size_t new_word_size) = 0; |
duke@435 | 69 | |
duke@435 | 70 | virtual void set_bottom(HeapWord* new_bottom) { |
duke@435 | 71 | assert(new_bottom <= _end, "new_bottom > _end"); |
duke@435 | 72 | _bottom = new_bottom; |
duke@435 | 73 | resize(pointer_delta(_end, _bottom)); |
duke@435 | 74 | } |
duke@435 | 75 | |
duke@435 | 76 | // Requires "addr" to be contained by a block, and returns the address of |
duke@435 | 77 | // the start of that block. |
duke@435 | 78 | virtual HeapWord* block_start_unsafe(const void* addr) const = 0; |
duke@435 | 79 | |
duke@435 | 80 | // Returns the address of the start of the block containing "addr", or |
duke@435 | 81 | // else "null" if it is covered by no block. |
duke@435 | 82 | HeapWord* block_start(const void* addr) const; |
duke@435 | 83 | }; |
duke@435 | 84 | |
duke@435 | 85 | ////////////////////////////////////////////////////////////////////////// |
duke@435 | 86 | // One implementation of "BlockOffsetTable," the BlockOffsetArray, |
duke@435 | 87 | // divides the covered region into "N"-word subregions (where |
duke@435 | 88 | // "N" = 2^"LogN". An array with an entry for each such subregion |
duke@435 | 89 | // indicates how far back one must go to find the start of the |
duke@435 | 90 | // chunk that includes the first word of the subregion. |
duke@435 | 91 | // |
duke@435 | 92 | // Each BlockOffsetArray is owned by a Space. However, the actual array |
duke@435 | 93 | // may be shared by several BlockOffsetArrays; this is useful |
duke@435 | 94 | // when a single resizable area (such as a generation) is divided up into |
duke@435 | 95 | // several spaces in which contiguous allocation takes place. (Consider, |
duke@435 | 96 | // for example, the garbage-first generation.) |
duke@435 | 97 | |
duke@435 | 98 | // Here is the shared array type. |
duke@435 | 99 | ////////////////////////////////////////////////////////////////////////// |
duke@435 | 100 | // BlockOffsetSharedArray |
duke@435 | 101 | ////////////////////////////////////////////////////////////////////////// |
zgu@3900 | 102 | class BlockOffsetSharedArray: public CHeapObj<mtGC> { |
duke@435 | 103 | friend class BlockOffsetArray; |
duke@435 | 104 | friend class BlockOffsetArrayNonContigSpace; |
duke@435 | 105 | friend class BlockOffsetArrayContigSpace; |
duke@435 | 106 | friend class VMStructs; |
duke@435 | 107 | |
duke@435 | 108 | private: |
duke@435 | 109 | enum SomePrivateConstants { |
duke@435 | 110 | LogN = 9, |
duke@435 | 111 | LogN_words = LogN - LogHeapWordSize, |
duke@435 | 112 | N_bytes = 1 << LogN, |
duke@435 | 113 | N_words = 1 << LogN_words |
duke@435 | 114 | }; |
duke@435 | 115 | |
ysr@2071 | 116 | bool _init_to_zero; |
ysr@2071 | 117 | |
duke@435 | 118 | // The reserved region covered by the shared array. |
duke@435 | 119 | MemRegion _reserved; |
duke@435 | 120 | |
duke@435 | 121 | // End of the current committed region. |
duke@435 | 122 | HeapWord* _end; |
duke@435 | 123 | |
duke@435 | 124 | // Array for keeping offsets for retrieving object start fast given an |
duke@435 | 125 | // address. |
duke@435 | 126 | VirtualSpace _vs; |
duke@435 | 127 | u_char* _offset_array; // byte array keeping backwards offsets |
duke@435 | 128 | |
duke@435 | 129 | protected: |
duke@435 | 130 | // Bounds checking accessors: |
duke@435 | 131 | // For performance these have to devolve to array accesses in product builds. |
duke@435 | 132 | u_char offset_array(size_t index) const { |
duke@435 | 133 | assert(index < _vs.committed_size(), "index out of range"); |
duke@435 | 134 | return _offset_array[index]; |
duke@435 | 135 | } |
ysr@2071 | 136 | // An assertion-checking helper method for the set_offset_array() methods below. |
ysr@2071 | 137 | void check_reducing_assertion(bool reducing); |
ysr@2071 | 138 | |
ysr@2071 | 139 | void set_offset_array(size_t index, u_char offset, bool reducing = false) { |
ysr@2071 | 140 | check_reducing_assertion(reducing); |
duke@435 | 141 | assert(index < _vs.committed_size(), "index out of range"); |
ysr@2071 | 142 | assert(!reducing || _offset_array[index] >= offset, "Not reducing"); |
duke@435 | 143 | _offset_array[index] = offset; |
duke@435 | 144 | } |
ysr@2071 | 145 | |
ysr@2071 | 146 | void set_offset_array(size_t index, HeapWord* high, HeapWord* low, bool reducing = false) { |
ysr@2071 | 147 | check_reducing_assertion(reducing); |
duke@435 | 148 | assert(index < _vs.committed_size(), "index out of range"); |
duke@435 | 149 | assert(high >= low, "addresses out of order"); |
duke@435 | 150 | assert(pointer_delta(high, low) <= N_words, "offset too large"); |
ysr@2071 | 151 | assert(!reducing || _offset_array[index] >= (u_char)pointer_delta(high, low), |
ysr@2071 | 152 | "Not reducing"); |
duke@435 | 153 | _offset_array[index] = (u_char)pointer_delta(high, low); |
duke@435 | 154 | } |
ysr@2071 | 155 | |
ysr@2071 | 156 | void set_offset_array(HeapWord* left, HeapWord* right, u_char offset, bool reducing = false) { |
ysr@2071 | 157 | check_reducing_assertion(reducing); |
duke@435 | 158 | assert(index_for(right - 1) < _vs.committed_size(), |
duke@435 | 159 | "right address out of range"); |
duke@435 | 160 | assert(left < right, "Heap addresses out of order"); |
duke@435 | 161 | size_t num_cards = pointer_delta(right, left) >> LogN_words; |
ysr@1873 | 162 | |
ysr@1873 | 163 | // Below, we may use an explicit loop instead of memset() |
ysr@1873 | 164 | // because on certain platforms memset() can give concurrent |
ysr@1873 | 165 | // readers "out-of-thin-air," phantom zeros; see 6948537. |
ysr@1873 | 166 | if (UseMemSetInBOT) { |
ysr@1873 | 167 | memset(&_offset_array[index_for(left)], offset, num_cards); |
ysr@1873 | 168 | } else { |
ysr@1873 | 169 | size_t i = index_for(left); |
ysr@1873 | 170 | const size_t end = i + num_cards; |
ysr@1873 | 171 | for (; i < end; i++) { |
ysr@2087 | 172 | // Elided until CR 6977974 is fixed properly. |
ysr@2087 | 173 | // assert(!reducing || _offset_array[i] >= offset, "Not reducing"); |
ysr@1873 | 174 | _offset_array[i] = offset; |
ysr@1873 | 175 | } |
ysr@1873 | 176 | } |
duke@435 | 177 | } |
duke@435 | 178 | |
ysr@2071 | 179 | void set_offset_array(size_t left, size_t right, u_char offset, bool reducing = false) { |
ysr@2071 | 180 | check_reducing_assertion(reducing); |
duke@435 | 181 | assert(right < _vs.committed_size(), "right address out of range"); |
duke@435 | 182 | assert(left <= right, "indexes out of order"); |
duke@435 | 183 | size_t num_cards = right - left + 1; |
ysr@1873 | 184 | |
ysr@1873 | 185 | // Below, we may use an explicit loop instead of memset |
ysr@1873 | 186 | // because on certain platforms memset() can give concurrent |
ysr@1873 | 187 | // readers "out-of-thin-air," phantom zeros; see 6948537. |
ysr@1873 | 188 | if (UseMemSetInBOT) { |
ysr@1873 | 189 | memset(&_offset_array[left], offset, num_cards); |
ysr@1873 | 190 | } else { |
ysr@1873 | 191 | size_t i = left; |
ysr@1873 | 192 | const size_t end = i + num_cards; |
ysr@1873 | 193 | for (; i < end; i++) { |
ysr@2087 | 194 | // Elided until CR 6977974 is fixed properly. |
ysr@2087 | 195 | // assert(!reducing || _offset_array[i] >= offset, "Not reducing"); |
ysr@1873 | 196 | _offset_array[i] = offset; |
ysr@1873 | 197 | } |
ysr@1873 | 198 | } |
duke@435 | 199 | } |
duke@435 | 200 | |
duke@435 | 201 | void check_offset_array(size_t index, HeapWord* high, HeapWord* low) const { |
duke@435 | 202 | assert(index < _vs.committed_size(), "index out of range"); |
duke@435 | 203 | assert(high >= low, "addresses out of order"); |
duke@435 | 204 | assert(pointer_delta(high, low) <= N_words, "offset too large"); |
duke@435 | 205 | assert(_offset_array[index] == pointer_delta(high, low), |
duke@435 | 206 | "Wrong offset"); |
duke@435 | 207 | } |
duke@435 | 208 | |
duke@435 | 209 | bool is_card_boundary(HeapWord* p) const; |
duke@435 | 210 | |
duke@435 | 211 | // Return the number of slots needed for an offset array |
duke@435 | 212 | // that covers mem_region_words words. |
duke@435 | 213 | // We always add an extra slot because if an object |
duke@435 | 214 | // ends on a card boundary we put a 0 in the next |
duke@435 | 215 | // offset array slot, so we want that slot always |
duke@435 | 216 | // to be reserved. |
duke@435 | 217 | |
duke@435 | 218 | size_t compute_size(size_t mem_region_words) { |
duke@435 | 219 | size_t number_of_slots = (mem_region_words / N_words) + 1; |
duke@435 | 220 | return ReservedSpace::allocation_align_size_up(number_of_slots); |
duke@435 | 221 | } |
duke@435 | 222 | |
duke@435 | 223 | public: |
duke@435 | 224 | // Initialize the table to cover from "base" to (at least) |
duke@435 | 225 | // "base + init_word_size". In the future, the table may be expanded |
duke@435 | 226 | // (see "resize" below) up to the size of "_reserved" (which must be at |
duke@435 | 227 | // least "init_word_size".) The contents of the initial table are |
duke@435 | 228 | // undefined; it is the responsibility of the constituent |
duke@435 | 229 | // BlockOffsetTable(s) to initialize cards. |
duke@435 | 230 | BlockOffsetSharedArray(MemRegion reserved, size_t init_word_size); |
duke@435 | 231 | |
duke@435 | 232 | // Notes a change in the committed size of the region covered by the |
duke@435 | 233 | // table. The "new_word_size" may not be larger than the size of the |
duke@435 | 234 | // reserved region this table covers. |
duke@435 | 235 | void resize(size_t new_word_size); |
duke@435 | 236 | |
duke@435 | 237 | void set_bottom(HeapWord* new_bottom); |
duke@435 | 238 | |
ysr@2071 | 239 | // Whether entries should be initialized to zero. Used currently only for |
ysr@2071 | 240 | // error checking. |
ysr@2071 | 241 | void set_init_to_zero(bool val) { _init_to_zero = val; } |
ysr@2071 | 242 | bool init_to_zero() { return _init_to_zero; } |
ysr@2071 | 243 | |
duke@435 | 244 | // Updates all the BlockOffsetArray's sharing this shared array to |
duke@435 | 245 | // reflect the current "top"'s of their spaces. |
duke@435 | 246 | void update_offset_arrays(); // Not yet implemented! |
duke@435 | 247 | |
duke@435 | 248 | // Return the appropriate index into "_offset_array" for "p". |
duke@435 | 249 | size_t index_for(const void* p) const; |
duke@435 | 250 | |
duke@435 | 251 | // Return the address indicating the start of the region corresponding to |
duke@435 | 252 | // "index" in "_offset_array". |
duke@435 | 253 | HeapWord* address_for_index(size_t index) const; |
duke@435 | 254 | |
jmasa@736 | 255 | // Return the address "p" incremented by the size of |
jmasa@736 | 256 | // a region. This method does not align the address |
jmasa@736 | 257 | // returned to the start of a region. It is a simple |
jmasa@736 | 258 | // primitive. |
jmasa@736 | 259 | HeapWord* inc_by_region_size(HeapWord* p) const { return p + N_words; } |
duke@435 | 260 | }; |
duke@435 | 261 | |
duke@435 | 262 | ////////////////////////////////////////////////////////////////////////// |
duke@435 | 263 | // The BlockOffsetArray whose subtypes use the BlockOffsetSharedArray. |
duke@435 | 264 | ////////////////////////////////////////////////////////////////////////// |
duke@435 | 265 | class BlockOffsetArray: public BlockOffsetTable { |
duke@435 | 266 | friend class VMStructs; |
ysr@777 | 267 | friend class G1BlockOffsetArray; // temp. until we restructure and cleanup |
duke@435 | 268 | protected: |
duke@435 | 269 | // The following enums are used by do_block_internal() below |
duke@435 | 270 | enum Action { |
duke@435 | 271 | Action_single, // BOT records a single block (see single_block()) |
duke@435 | 272 | Action_mark, // BOT marks the start of a block (see mark_block()) |
duke@435 | 273 | Action_check // Check that BOT records block correctly |
duke@435 | 274 | // (see verify_single_block()). |
duke@435 | 275 | }; |
duke@435 | 276 | |
duke@435 | 277 | enum SomePrivateConstants { |
duke@435 | 278 | N_words = BlockOffsetSharedArray::N_words, |
duke@435 | 279 | LogN = BlockOffsetSharedArray::LogN, |
duke@435 | 280 | // entries "e" of at least N_words mean "go back by Base^(e-N_words)." |
duke@435 | 281 | // All entries are less than "N_words + N_powers". |
duke@435 | 282 | LogBase = 4, |
duke@435 | 283 | Base = (1 << LogBase), |
duke@435 | 284 | N_powers = 14 |
duke@435 | 285 | }; |
duke@435 | 286 | |
duke@435 | 287 | static size_t power_to_cards_back(uint i) { |
brutisso@4061 | 288 | return (size_t)1 << (LogBase * i); |
duke@435 | 289 | } |
duke@435 | 290 | static size_t power_to_words_back(uint i) { |
duke@435 | 291 | return power_to_cards_back(i) * N_words; |
duke@435 | 292 | } |
duke@435 | 293 | static size_t entry_to_cards_back(u_char entry) { |
duke@435 | 294 | assert(entry >= N_words, "Precondition"); |
duke@435 | 295 | return power_to_cards_back(entry - N_words); |
duke@435 | 296 | } |
duke@435 | 297 | static size_t entry_to_words_back(u_char entry) { |
duke@435 | 298 | assert(entry >= N_words, "Precondition"); |
duke@435 | 299 | return power_to_words_back(entry - N_words); |
duke@435 | 300 | } |
duke@435 | 301 | |
duke@435 | 302 | // The shared array, which is shared with other BlockOffsetArray's |
duke@435 | 303 | // corresponding to different spaces within a generation or span of |
duke@435 | 304 | // memory. |
duke@435 | 305 | BlockOffsetSharedArray* _array; |
duke@435 | 306 | |
duke@435 | 307 | // The space that owns this subregion. |
duke@435 | 308 | Space* _sp; |
duke@435 | 309 | |
duke@435 | 310 | // If true, array entries are initialized to 0; otherwise, they are |
duke@435 | 311 | // initialized to point backwards to the beginning of the covered region. |
duke@435 | 312 | bool _init_to_zero; |
duke@435 | 313 | |
ysr@2071 | 314 | // An assertion-checking helper method for the set_remainder*() methods below. |
ysr@2071 | 315 | void check_reducing_assertion(bool reducing) { _array->check_reducing_assertion(reducing); } |
ysr@2071 | 316 | |
duke@435 | 317 | // Sets the entries |
duke@435 | 318 | // corresponding to the cards starting at "start" and ending at "end" |
duke@435 | 319 | // to point back to the card before "start": the interval [start, end) |
ysr@2071 | 320 | // is right-open. The last parameter, reducing, indicates whether the |
ysr@2071 | 321 | // updates to individual entries always reduce the entry from a higher |
ysr@2071 | 322 | // to a lower value. (For example this would hold true during a temporal |
ysr@2071 | 323 | // regime during which only block splits were updating the BOT. |
ysr@2071 | 324 | void set_remainder_to_point_to_start(HeapWord* start, HeapWord* end, bool reducing = false); |
duke@435 | 325 | // Same as above, except that the args here are a card _index_ interval |
duke@435 | 326 | // that is closed: [start_index, end_index] |
ysr@2071 | 327 | void set_remainder_to_point_to_start_incl(size_t start, size_t end, bool reducing = false); |
duke@435 | 328 | |
duke@435 | 329 | // A helper function for BOT adjustment/verification work |
ysr@2071 | 330 | void do_block_internal(HeapWord* blk_start, HeapWord* blk_end, Action action, bool reducing = false); |
duke@435 | 331 | |
duke@435 | 332 | public: |
duke@435 | 333 | // The space may not have its bottom and top set yet, which is why the |
duke@435 | 334 | // region is passed as a parameter. If "init_to_zero" is true, the |
duke@435 | 335 | // elements of the array are initialized to zero. Otherwise, they are |
duke@435 | 336 | // initialized to point backwards to the beginning. |
duke@435 | 337 | BlockOffsetArray(BlockOffsetSharedArray* array, MemRegion mr, |
ysr@2071 | 338 | bool init_to_zero_); |
duke@435 | 339 | |
duke@435 | 340 | // Note: this ought to be part of the constructor, but that would require |
duke@435 | 341 | // "this" to be passed as a parameter to a member constructor for |
duke@435 | 342 | // the containing concrete subtype of Space. |
duke@435 | 343 | // This would be legal C++, but MS VC++ doesn't allow it. |
duke@435 | 344 | void set_space(Space* sp) { _sp = sp; } |
duke@435 | 345 | |
duke@435 | 346 | // Resets the covered region to the given "mr". |
duke@435 | 347 | void set_region(MemRegion mr) { |
duke@435 | 348 | _bottom = mr.start(); |
duke@435 | 349 | _end = mr.end(); |
duke@435 | 350 | } |
duke@435 | 351 | |
duke@435 | 352 | // Note that the committed size of the covered space may have changed, |
duke@435 | 353 | // so the table size might also wish to change. |
duke@435 | 354 | virtual void resize(size_t new_word_size) { |
duke@435 | 355 | HeapWord* new_end = _bottom + new_word_size; |
duke@435 | 356 | if (_end < new_end && !init_to_zero()) { |
duke@435 | 357 | // verify that the old and new boundaries are also card boundaries |
duke@435 | 358 | assert(_array->is_card_boundary(_end), |
duke@435 | 359 | "_end not a card boundary"); |
duke@435 | 360 | assert(_array->is_card_boundary(new_end), |
duke@435 | 361 | "new _end would not be a card boundary"); |
duke@435 | 362 | // set all the newly added cards |
duke@435 | 363 | _array->set_offset_array(_end, new_end, N_words); |
duke@435 | 364 | } |
duke@435 | 365 | _end = new_end; // update _end |
duke@435 | 366 | } |
duke@435 | 367 | |
duke@435 | 368 | // Adjust the BOT to show that it has a single block in the |
duke@435 | 369 | // range [blk_start, blk_start + size). All necessary BOT |
duke@435 | 370 | // cards are adjusted, but _unallocated_block isn't. |
duke@435 | 371 | void single_block(HeapWord* blk_start, HeapWord* blk_end); |
duke@435 | 372 | void single_block(HeapWord* blk, size_t size) { |
duke@435 | 373 | single_block(blk, blk + size); |
duke@435 | 374 | } |
duke@435 | 375 | |
duke@435 | 376 | // When the alloc_block() call returns, the block offset table should |
duke@435 | 377 | // have enough information such that any subsequent block_start() call |
duke@435 | 378 | // with an argument equal to an address that is within the range |
duke@435 | 379 | // [blk_start, blk_end) would return the value blk_start, provided |
duke@435 | 380 | // there have been no calls in between that reset this information |
duke@435 | 381 | // (e.g. see BlockOffsetArrayNonContigSpace::single_block() call |
duke@435 | 382 | // for an appropriate range covering the said interval). |
duke@435 | 383 | // These methods expect to be called with [blk_start, blk_end) |
duke@435 | 384 | // representing a block of memory in the heap. |
duke@435 | 385 | virtual void alloc_block(HeapWord* blk_start, HeapWord* blk_end); |
duke@435 | 386 | void alloc_block(HeapWord* blk, size_t size) { |
duke@435 | 387 | alloc_block(blk, blk + size); |
duke@435 | 388 | } |
duke@435 | 389 | |
duke@435 | 390 | // If true, initialize array slots with no allocated blocks to zero. |
duke@435 | 391 | // Otherwise, make them point back to the front. |
duke@435 | 392 | bool init_to_zero() { return _init_to_zero; } |
ysr@2071 | 393 | // Corresponding setter |
ysr@2071 | 394 | void set_init_to_zero(bool val) { |
ysr@2071 | 395 | _init_to_zero = val; |
ysr@2071 | 396 | assert(_array != NULL, "_array should be non-NULL"); |
ysr@2071 | 397 | _array->set_init_to_zero(val); |
ysr@2071 | 398 | } |
duke@435 | 399 | |
duke@435 | 400 | // Debugging |
duke@435 | 401 | // Return the index of the last entry in the "active" region. |
duke@435 | 402 | virtual size_t last_active_index() const = 0; |
duke@435 | 403 | // Verify the block offset table |
duke@435 | 404 | void verify() const; |
duke@435 | 405 | void check_all_cards(size_t left_card, size_t right_card) const; |
duke@435 | 406 | }; |
duke@435 | 407 | |
duke@435 | 408 | //////////////////////////////////////////////////////////////////////////// |
duke@435 | 409 | // A subtype of BlockOffsetArray that takes advantage of the fact |
duke@435 | 410 | // that its underlying space is a NonContiguousSpace, so that some |
duke@435 | 411 | // specialized interfaces can be made available for spaces that |
duke@435 | 412 | // manipulate the table. |
duke@435 | 413 | //////////////////////////////////////////////////////////////////////////// |
duke@435 | 414 | class BlockOffsetArrayNonContigSpace: public BlockOffsetArray { |
duke@435 | 415 | friend class VMStructs; |
duke@435 | 416 | private: |
duke@435 | 417 | // The portion [_unallocated_block, _sp.end()) of the space that |
duke@435 | 418 | // is a single block known not to contain any objects. |
duke@435 | 419 | // NOTE: See BlockOffsetArrayUseUnallocatedBlock flag. |
duke@435 | 420 | HeapWord* _unallocated_block; |
duke@435 | 421 | |
duke@435 | 422 | public: |
duke@435 | 423 | BlockOffsetArrayNonContigSpace(BlockOffsetSharedArray* array, MemRegion mr): |
duke@435 | 424 | BlockOffsetArray(array, mr, false), |
duke@435 | 425 | _unallocated_block(_bottom) { } |
duke@435 | 426 | |
duke@435 | 427 | // accessor |
duke@435 | 428 | HeapWord* unallocated_block() const { |
duke@435 | 429 | assert(BlockOffsetArrayUseUnallocatedBlock, |
duke@435 | 430 | "_unallocated_block is not being maintained"); |
duke@435 | 431 | return _unallocated_block; |
duke@435 | 432 | } |
duke@435 | 433 | |
duke@435 | 434 | void set_unallocated_block(HeapWord* block) { |
duke@435 | 435 | assert(BlockOffsetArrayUseUnallocatedBlock, |
duke@435 | 436 | "_unallocated_block is not being maintained"); |
duke@435 | 437 | assert(block >= _bottom && block <= _end, "out of range"); |
duke@435 | 438 | _unallocated_block = block; |
duke@435 | 439 | } |
duke@435 | 440 | |
duke@435 | 441 | // These methods expect to be called with [blk_start, blk_end) |
duke@435 | 442 | // representing a block of memory in the heap. |
duke@435 | 443 | void alloc_block(HeapWord* blk_start, HeapWord* blk_end); |
duke@435 | 444 | void alloc_block(HeapWord* blk, size_t size) { |
duke@435 | 445 | alloc_block(blk, blk + size); |
duke@435 | 446 | } |
duke@435 | 447 | |
duke@435 | 448 | // The following methods are useful and optimized for a |
duke@435 | 449 | // non-contiguous space. |
duke@435 | 450 | |
duke@435 | 451 | // Given a block [blk_start, blk_start + full_blk_size), and |
duke@435 | 452 | // a left_blk_size < full_blk_size, adjust the BOT to show two |
duke@435 | 453 | // blocks [blk_start, blk_start + left_blk_size) and |
duke@435 | 454 | // [blk_start + left_blk_size, blk_start + full_blk_size). |
duke@435 | 455 | // It is assumed (and verified in the non-product VM) that the |
duke@435 | 456 | // BOT was correct for the original block. |
duke@435 | 457 | void split_block(HeapWord* blk_start, size_t full_blk_size, |
duke@435 | 458 | size_t left_blk_size); |
duke@435 | 459 | |
duke@435 | 460 | // Adjust BOT to show that it has a block in the range |
duke@435 | 461 | // [blk_start, blk_start + size). Only the first card |
duke@435 | 462 | // of BOT is touched. It is assumed (and verified in the |
duke@435 | 463 | // non-product VM) that the remaining cards of the block |
duke@435 | 464 | // are correct. |
ysr@2071 | 465 | void mark_block(HeapWord* blk_start, HeapWord* blk_end, bool reducing = false); |
ysr@2071 | 466 | void mark_block(HeapWord* blk, size_t size, bool reducing = false) { |
ysr@2071 | 467 | mark_block(blk, blk + size, reducing); |
duke@435 | 468 | } |
duke@435 | 469 | |
duke@435 | 470 | // Adjust _unallocated_block to indicate that a particular |
duke@435 | 471 | // block has been newly allocated or freed. It is assumed (and |
duke@435 | 472 | // verified in the non-product VM) that the BOT is correct for |
duke@435 | 473 | // the given block. |
ysr@2071 | 474 | void allocated(HeapWord* blk_start, HeapWord* blk_end, bool reducing = false) { |
duke@435 | 475 | // Verify that the BOT shows [blk, blk + blk_size) to be one block. |
duke@435 | 476 | verify_single_block(blk_start, blk_end); |
duke@435 | 477 | if (BlockOffsetArrayUseUnallocatedBlock) { |
duke@435 | 478 | _unallocated_block = MAX2(_unallocated_block, blk_end); |
duke@435 | 479 | } |
duke@435 | 480 | } |
duke@435 | 481 | |
ysr@2071 | 482 | void allocated(HeapWord* blk, size_t size, bool reducing = false) { |
ysr@2071 | 483 | allocated(blk, blk + size, reducing); |
duke@435 | 484 | } |
duke@435 | 485 | |
duke@435 | 486 | void freed(HeapWord* blk_start, HeapWord* blk_end); |
ysr@2071 | 487 | void freed(HeapWord* blk, size_t size); |
duke@435 | 488 | |
duke@435 | 489 | HeapWord* block_start_unsafe(const void* addr) const; |
duke@435 | 490 | |
duke@435 | 491 | // Requires "addr" to be the start of a card and returns the |
duke@435 | 492 | // start of the block that contains the given address. |
duke@435 | 493 | HeapWord* block_start_careful(const void* addr) const; |
duke@435 | 494 | |
duke@435 | 495 | // Verification & debugging: ensure that the offset table reflects |
duke@435 | 496 | // the fact that the block [blk_start, blk_end) or [blk, blk + size) |
duke@435 | 497 | // is a single block of storage. NOTE: can't const this because of |
duke@435 | 498 | // call to non-const do_block_internal() below. |
duke@435 | 499 | void verify_single_block(HeapWord* blk_start, HeapWord* blk_end) |
duke@435 | 500 | PRODUCT_RETURN; |
duke@435 | 501 | void verify_single_block(HeapWord* blk, size_t size) PRODUCT_RETURN; |
duke@435 | 502 | |
duke@435 | 503 | // Verify that the given block is before _unallocated_block |
duke@435 | 504 | void verify_not_unallocated(HeapWord* blk_start, HeapWord* blk_end) |
duke@435 | 505 | const PRODUCT_RETURN; |
duke@435 | 506 | void verify_not_unallocated(HeapWord* blk, size_t size) |
duke@435 | 507 | const PRODUCT_RETURN; |
duke@435 | 508 | |
duke@435 | 509 | // Debugging support |
duke@435 | 510 | virtual size_t last_active_index() const; |
duke@435 | 511 | }; |
duke@435 | 512 | |
duke@435 | 513 | //////////////////////////////////////////////////////////////////////////// |
duke@435 | 514 | // A subtype of BlockOffsetArray that takes advantage of the fact |
duke@435 | 515 | // that its underlying space is a ContiguousSpace, so that its "active" |
duke@435 | 516 | // region can be more efficiently tracked (than for a non-contiguous space). |
duke@435 | 517 | //////////////////////////////////////////////////////////////////////////// |
duke@435 | 518 | class BlockOffsetArrayContigSpace: public BlockOffsetArray { |
duke@435 | 519 | friend class VMStructs; |
duke@435 | 520 | private: |
duke@435 | 521 | // allocation boundary at which offset array must be updated |
duke@435 | 522 | HeapWord* _next_offset_threshold; |
duke@435 | 523 | size_t _next_offset_index; // index corresponding to that boundary |
duke@435 | 524 | |
duke@435 | 525 | // Work function when allocation start crosses threshold. |
duke@435 | 526 | void alloc_block_work(HeapWord* blk_start, HeapWord* blk_end); |
duke@435 | 527 | |
duke@435 | 528 | public: |
duke@435 | 529 | BlockOffsetArrayContigSpace(BlockOffsetSharedArray* array, MemRegion mr): |
duke@435 | 530 | BlockOffsetArray(array, mr, true) { |
duke@435 | 531 | _next_offset_threshold = NULL; |
duke@435 | 532 | _next_offset_index = 0; |
duke@435 | 533 | } |
duke@435 | 534 | |
duke@435 | 535 | void set_contig_space(ContiguousSpace* sp) { set_space((Space*)sp); } |
duke@435 | 536 | |
duke@435 | 537 | // Initialize the threshold for an empty heap. |
duke@435 | 538 | HeapWord* initialize_threshold(); |
duke@435 | 539 | // Zero out the entry for _bottom (offset will be zero) |
duke@435 | 540 | void zero_bottom_entry(); |
duke@435 | 541 | |
duke@435 | 542 | // Return the next threshold, the point at which the table should be |
duke@435 | 543 | // updated. |
duke@435 | 544 | HeapWord* threshold() const { return _next_offset_threshold; } |
duke@435 | 545 | |
duke@435 | 546 | // In general, these methods expect to be called with |
duke@435 | 547 | // [blk_start, blk_end) representing a block of memory in the heap. |
duke@435 | 548 | // In this implementation, however, we are OK even if blk_start and/or |
duke@435 | 549 | // blk_end are NULL because NULL is represented as 0, and thus |
duke@435 | 550 | // never exceeds the "_next_offset_threshold". |
duke@435 | 551 | void alloc_block(HeapWord* blk_start, HeapWord* blk_end) { |
duke@435 | 552 | if (blk_end > _next_offset_threshold) { |
duke@435 | 553 | alloc_block_work(blk_start, blk_end); |
duke@435 | 554 | } |
duke@435 | 555 | } |
duke@435 | 556 | void alloc_block(HeapWord* blk, size_t size) { |
duke@435 | 557 | alloc_block(blk, blk + size); |
duke@435 | 558 | } |
duke@435 | 559 | |
duke@435 | 560 | HeapWord* block_start_unsafe(const void* addr) const; |
duke@435 | 561 | |
duke@435 | 562 | // Debugging support |
duke@435 | 563 | virtual size_t last_active_index() const; |
duke@435 | 564 | }; |
stefank@2314 | 565 | |
stefank@2314 | 566 | #endif // SHARE_VM_MEMORY_BLOCKOFFSETTABLE_HPP |