src/share/vm/memory/blockOffsetTable.hpp

Mon, 16 Aug 2010 15:58:42 -0700

author
ysr
date
Mon, 16 Aug 2010 15:58:42 -0700
changeset 2071
be3f9c242c9d
parent 1907
c18cbe5936b8
child 2087
52f2bc645da5
permissions
-rw-r--r--

6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
Summary: GC workers now recognize an intermediate transient state of blocks which are allocated but have not yet completed initialization. blk_start() calls do not attempt to determine the size of a block in the transient state, rather waiting for the block to become initialized so that it is safe to query its size. Audited and ensured the order of initialization of object fields (klass, free bit and size) to respect block state transition protocol. Also included some new assertion checking code enabled in debug mode.
Reviewed-by: chrisphi, johnc, poonam

duke@435 1 /*
ysr@2071 2 * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
duke@435 22 *
duke@435 23 */
duke@435 24
duke@435 25 // The CollectedHeap type requires subtypes to implement a method
duke@435 26 // "block_start". For some subtypes, notably generational
duke@435 27 // systems using card-table-based write barriers, the efficiency of this
duke@435 28 // operation may be important. Implementations of the "BlockOffsetArray"
duke@435 29 // class may be useful in providing such efficient implementations.
duke@435 30 //
duke@435 31 // BlockOffsetTable (abstract)
duke@435 32 // - BlockOffsetArray (abstract)
duke@435 33 // - BlockOffsetArrayNonContigSpace
duke@435 34 // - BlockOffsetArrayContigSpace
duke@435 35 //
duke@435 36
duke@435 37 class ContiguousSpace;
duke@435 38 class SerializeOopClosure;
duke@435 39
duke@435 40 //////////////////////////////////////////////////////////////////////////
duke@435 41 // The BlockOffsetTable "interface"
duke@435 42 //////////////////////////////////////////////////////////////////////////
duke@435 43 class BlockOffsetTable VALUE_OBJ_CLASS_SPEC {
duke@435 44 friend class VMStructs;
duke@435 45 protected:
duke@435 46 // These members describe the region covered by the table.
duke@435 47
duke@435 48 // The space this table is covering.
duke@435 49 HeapWord* _bottom; // == reserved.start
duke@435 50 HeapWord* _end; // End of currently allocated region.
duke@435 51
duke@435 52 public:
duke@435 53 // Initialize the table to cover the given space.
duke@435 54 // The contents of the initial table are undefined.
duke@435 55 BlockOffsetTable(HeapWord* bottom, HeapWord* end):
duke@435 56 _bottom(bottom), _end(end) {
duke@435 57 assert(_bottom <= _end, "arguments out of order");
duke@435 58 }
duke@435 59
duke@435 60 // Note that the committed size of the covered space may have changed,
duke@435 61 // so the table size might also wish to change.
duke@435 62 virtual void resize(size_t new_word_size) = 0;
duke@435 63
duke@435 64 virtual void set_bottom(HeapWord* new_bottom) {
duke@435 65 assert(new_bottom <= _end, "new_bottom > _end");
duke@435 66 _bottom = new_bottom;
duke@435 67 resize(pointer_delta(_end, _bottom));
duke@435 68 }
duke@435 69
duke@435 70 // Requires "addr" to be contained by a block, and returns the address of
duke@435 71 // the start of that block.
duke@435 72 virtual HeapWord* block_start_unsafe(const void* addr) const = 0;
duke@435 73
duke@435 74 // Returns the address of the start of the block containing "addr", or
duke@435 75 // else "null" if it is covered by no block.
duke@435 76 HeapWord* block_start(const void* addr) const;
duke@435 77 };
duke@435 78
duke@435 79 //////////////////////////////////////////////////////////////////////////
duke@435 80 // One implementation of "BlockOffsetTable," the BlockOffsetArray,
duke@435 81 // divides the covered region into "N"-word subregions (where
duke@435 82 // "N" = 2^"LogN". An array with an entry for each such subregion
duke@435 83 // indicates how far back one must go to find the start of the
duke@435 84 // chunk that includes the first word of the subregion.
duke@435 85 //
duke@435 86 // Each BlockOffsetArray is owned by a Space. However, the actual array
duke@435 87 // may be shared by several BlockOffsetArrays; this is useful
duke@435 88 // when a single resizable area (such as a generation) is divided up into
duke@435 89 // several spaces in which contiguous allocation takes place. (Consider,
duke@435 90 // for example, the garbage-first generation.)
duke@435 91
duke@435 92 // Here is the shared array type.
duke@435 93 //////////////////////////////////////////////////////////////////////////
duke@435 94 // BlockOffsetSharedArray
duke@435 95 //////////////////////////////////////////////////////////////////////////
duke@435 96 class BlockOffsetSharedArray: public CHeapObj {
duke@435 97 friend class BlockOffsetArray;
duke@435 98 friend class BlockOffsetArrayNonContigSpace;
duke@435 99 friend class BlockOffsetArrayContigSpace;
duke@435 100 friend class VMStructs;
duke@435 101
duke@435 102 private:
duke@435 103 enum SomePrivateConstants {
duke@435 104 LogN = 9,
duke@435 105 LogN_words = LogN - LogHeapWordSize,
duke@435 106 N_bytes = 1 << LogN,
duke@435 107 N_words = 1 << LogN_words
duke@435 108 };
duke@435 109
ysr@2071 110 bool _init_to_zero;
ysr@2071 111
duke@435 112 // The reserved region covered by the shared array.
duke@435 113 MemRegion _reserved;
duke@435 114
duke@435 115 // End of the current committed region.
duke@435 116 HeapWord* _end;
duke@435 117
duke@435 118 // Array for keeping offsets for retrieving object start fast given an
duke@435 119 // address.
duke@435 120 VirtualSpace _vs;
duke@435 121 u_char* _offset_array; // byte array keeping backwards offsets
duke@435 122
duke@435 123 protected:
duke@435 124 // Bounds checking accessors:
duke@435 125 // For performance these have to devolve to array accesses in product builds.
duke@435 126 u_char offset_array(size_t index) const {
duke@435 127 assert(index < _vs.committed_size(), "index out of range");
duke@435 128 return _offset_array[index];
duke@435 129 }
ysr@2071 130 // An assertion-checking helper method for the set_offset_array() methods below.
ysr@2071 131 void check_reducing_assertion(bool reducing);
ysr@2071 132
ysr@2071 133 void set_offset_array(size_t index, u_char offset, bool reducing = false) {
ysr@2071 134 check_reducing_assertion(reducing);
duke@435 135 assert(index < _vs.committed_size(), "index out of range");
ysr@2071 136 assert(!reducing || _offset_array[index] >= offset, "Not reducing");
duke@435 137 _offset_array[index] = offset;
duke@435 138 }
ysr@2071 139
ysr@2071 140 void set_offset_array(size_t index, HeapWord* high, HeapWord* low, bool reducing = false) {
ysr@2071 141 check_reducing_assertion(reducing);
duke@435 142 assert(index < _vs.committed_size(), "index out of range");
duke@435 143 assert(high >= low, "addresses out of order");
duke@435 144 assert(pointer_delta(high, low) <= N_words, "offset too large");
ysr@2071 145 assert(!reducing || _offset_array[index] >= (u_char)pointer_delta(high, low),
ysr@2071 146 "Not reducing");
duke@435 147 _offset_array[index] = (u_char)pointer_delta(high, low);
duke@435 148 }
ysr@2071 149
ysr@2071 150 void set_offset_array(HeapWord* left, HeapWord* right, u_char offset, bool reducing = false) {
ysr@2071 151 check_reducing_assertion(reducing);
duke@435 152 assert(index_for(right - 1) < _vs.committed_size(),
duke@435 153 "right address out of range");
duke@435 154 assert(left < right, "Heap addresses out of order");
duke@435 155 size_t num_cards = pointer_delta(right, left) >> LogN_words;
ysr@1873 156
ysr@1873 157 // Below, we may use an explicit loop instead of memset()
ysr@1873 158 // because on certain platforms memset() can give concurrent
ysr@1873 159 // readers "out-of-thin-air," phantom zeros; see 6948537.
ysr@1873 160 if (UseMemSetInBOT) {
ysr@1873 161 memset(&_offset_array[index_for(left)], offset, num_cards);
ysr@1873 162 } else {
ysr@1873 163 size_t i = index_for(left);
ysr@1873 164 const size_t end = i + num_cards;
ysr@1873 165 for (; i < end; i++) {
ysr@2071 166 assert(!reducing || _offset_array[i] >= offset, "Not reducing");
ysr@1873 167 _offset_array[i] = offset;
ysr@1873 168 }
ysr@1873 169 }
duke@435 170 }
duke@435 171
ysr@2071 172 void set_offset_array(size_t left, size_t right, u_char offset, bool reducing = false) {
ysr@2071 173 check_reducing_assertion(reducing);
duke@435 174 assert(right < _vs.committed_size(), "right address out of range");
duke@435 175 assert(left <= right, "indexes out of order");
duke@435 176 size_t num_cards = right - left + 1;
ysr@1873 177
ysr@1873 178 // Below, we may use an explicit loop instead of memset
ysr@1873 179 // because on certain platforms memset() can give concurrent
ysr@1873 180 // readers "out-of-thin-air," phantom zeros; see 6948537.
ysr@1873 181 if (UseMemSetInBOT) {
ysr@1873 182 memset(&_offset_array[left], offset, num_cards);
ysr@1873 183 } else {
ysr@1873 184 size_t i = left;
ysr@1873 185 const size_t end = i + num_cards;
ysr@1873 186 for (; i < end; i++) {
ysr@2071 187 assert(!reducing || _offset_array[i] >= offset, "Not reducing");
ysr@1873 188 _offset_array[i] = offset;
ysr@1873 189 }
ysr@1873 190 }
duke@435 191 }
duke@435 192
duke@435 193 void check_offset_array(size_t index, HeapWord* high, HeapWord* low) const {
duke@435 194 assert(index < _vs.committed_size(), "index out of range");
duke@435 195 assert(high >= low, "addresses out of order");
duke@435 196 assert(pointer_delta(high, low) <= N_words, "offset too large");
duke@435 197 assert(_offset_array[index] == pointer_delta(high, low),
duke@435 198 "Wrong offset");
duke@435 199 }
duke@435 200
duke@435 201 bool is_card_boundary(HeapWord* p) const;
duke@435 202
duke@435 203 // Return the number of slots needed for an offset array
duke@435 204 // that covers mem_region_words words.
duke@435 205 // We always add an extra slot because if an object
duke@435 206 // ends on a card boundary we put a 0 in the next
duke@435 207 // offset array slot, so we want that slot always
duke@435 208 // to be reserved.
duke@435 209
duke@435 210 size_t compute_size(size_t mem_region_words) {
duke@435 211 size_t number_of_slots = (mem_region_words / N_words) + 1;
duke@435 212 return ReservedSpace::allocation_align_size_up(number_of_slots);
duke@435 213 }
duke@435 214
duke@435 215 public:
duke@435 216 // Initialize the table to cover from "base" to (at least)
duke@435 217 // "base + init_word_size". In the future, the table may be expanded
duke@435 218 // (see "resize" below) up to the size of "_reserved" (which must be at
duke@435 219 // least "init_word_size".) The contents of the initial table are
duke@435 220 // undefined; it is the responsibility of the constituent
duke@435 221 // BlockOffsetTable(s) to initialize cards.
duke@435 222 BlockOffsetSharedArray(MemRegion reserved, size_t init_word_size);
duke@435 223
duke@435 224 // Notes a change in the committed size of the region covered by the
duke@435 225 // table. The "new_word_size" may not be larger than the size of the
duke@435 226 // reserved region this table covers.
duke@435 227 void resize(size_t new_word_size);
duke@435 228
duke@435 229 void set_bottom(HeapWord* new_bottom);
duke@435 230
ysr@2071 231 // Whether entries should be initialized to zero. Used currently only for
ysr@2071 232 // error checking.
ysr@2071 233 void set_init_to_zero(bool val) { _init_to_zero = val; }
ysr@2071 234 bool init_to_zero() { return _init_to_zero; }
ysr@2071 235
duke@435 236 // Updates all the BlockOffsetArray's sharing this shared array to
duke@435 237 // reflect the current "top"'s of their spaces.
duke@435 238 void update_offset_arrays(); // Not yet implemented!
duke@435 239
duke@435 240 // Return the appropriate index into "_offset_array" for "p".
duke@435 241 size_t index_for(const void* p) const;
duke@435 242
duke@435 243 // Return the address indicating the start of the region corresponding to
duke@435 244 // "index" in "_offset_array".
duke@435 245 HeapWord* address_for_index(size_t index) const;
duke@435 246
jmasa@736 247 // Return the address "p" incremented by the size of
jmasa@736 248 // a region. This method does not align the address
jmasa@736 249 // returned to the start of a region. It is a simple
jmasa@736 250 // primitive.
jmasa@736 251 HeapWord* inc_by_region_size(HeapWord* p) const { return p + N_words; }
jmasa@736 252
duke@435 253 // Shared space support
duke@435 254 void serialize(SerializeOopClosure* soc, HeapWord* start, HeapWord* end);
duke@435 255 };
duke@435 256
duke@435 257 //////////////////////////////////////////////////////////////////////////
duke@435 258 // The BlockOffsetArray whose subtypes use the BlockOffsetSharedArray.
duke@435 259 //////////////////////////////////////////////////////////////////////////
duke@435 260 class BlockOffsetArray: public BlockOffsetTable {
duke@435 261 friend class VMStructs;
ysr@777 262 friend class G1BlockOffsetArray; // temp. until we restructure and cleanup
duke@435 263 protected:
duke@435 264 // The following enums are used by do_block_internal() below
duke@435 265 enum Action {
duke@435 266 Action_single, // BOT records a single block (see single_block())
duke@435 267 Action_mark, // BOT marks the start of a block (see mark_block())
duke@435 268 Action_check // Check that BOT records block correctly
duke@435 269 // (see verify_single_block()).
duke@435 270 };
duke@435 271
duke@435 272 enum SomePrivateConstants {
duke@435 273 N_words = BlockOffsetSharedArray::N_words,
duke@435 274 LogN = BlockOffsetSharedArray::LogN,
duke@435 275 // entries "e" of at least N_words mean "go back by Base^(e-N_words)."
duke@435 276 // All entries are less than "N_words + N_powers".
duke@435 277 LogBase = 4,
duke@435 278 Base = (1 << LogBase),
duke@435 279 N_powers = 14
duke@435 280 };
duke@435 281
duke@435 282 static size_t power_to_cards_back(uint i) {
kvn@1080 283 return (size_t)(1 << (LogBase * i));
duke@435 284 }
duke@435 285 static size_t power_to_words_back(uint i) {
duke@435 286 return power_to_cards_back(i) * N_words;
duke@435 287 }
duke@435 288 static size_t entry_to_cards_back(u_char entry) {
duke@435 289 assert(entry >= N_words, "Precondition");
duke@435 290 return power_to_cards_back(entry - N_words);
duke@435 291 }
duke@435 292 static size_t entry_to_words_back(u_char entry) {
duke@435 293 assert(entry >= N_words, "Precondition");
duke@435 294 return power_to_words_back(entry - N_words);
duke@435 295 }
duke@435 296
duke@435 297 // The shared array, which is shared with other BlockOffsetArray's
duke@435 298 // corresponding to different spaces within a generation or span of
duke@435 299 // memory.
duke@435 300 BlockOffsetSharedArray* _array;
duke@435 301
duke@435 302 // The space that owns this subregion.
duke@435 303 Space* _sp;
duke@435 304
duke@435 305 // If true, array entries are initialized to 0; otherwise, they are
duke@435 306 // initialized to point backwards to the beginning of the covered region.
duke@435 307 bool _init_to_zero;
duke@435 308
ysr@2071 309 // An assertion-checking helper method for the set_remainder*() methods below.
ysr@2071 310 void check_reducing_assertion(bool reducing) { _array->check_reducing_assertion(reducing); }
ysr@2071 311
duke@435 312 // Sets the entries
duke@435 313 // corresponding to the cards starting at "start" and ending at "end"
duke@435 314 // to point back to the card before "start": the interval [start, end)
ysr@2071 315 // is right-open. The last parameter, reducing, indicates whether the
ysr@2071 316 // updates to individual entries always reduce the entry from a higher
ysr@2071 317 // to a lower value. (For example this would hold true during a temporal
ysr@2071 318 // regime during which only block splits were updating the BOT.
ysr@2071 319 void set_remainder_to_point_to_start(HeapWord* start, HeapWord* end, bool reducing = false);
duke@435 320 // Same as above, except that the args here are a card _index_ interval
duke@435 321 // that is closed: [start_index, end_index]
ysr@2071 322 void set_remainder_to_point_to_start_incl(size_t start, size_t end, bool reducing = false);
duke@435 323
duke@435 324 // A helper function for BOT adjustment/verification work
ysr@2071 325 void do_block_internal(HeapWord* blk_start, HeapWord* blk_end, Action action, bool reducing = false);
duke@435 326
duke@435 327 public:
duke@435 328 // The space may not have its bottom and top set yet, which is why the
duke@435 329 // region is passed as a parameter. If "init_to_zero" is true, the
duke@435 330 // elements of the array are initialized to zero. Otherwise, they are
duke@435 331 // initialized to point backwards to the beginning.
duke@435 332 BlockOffsetArray(BlockOffsetSharedArray* array, MemRegion mr,
ysr@2071 333 bool init_to_zero_);
duke@435 334
duke@435 335 // Note: this ought to be part of the constructor, but that would require
duke@435 336 // "this" to be passed as a parameter to a member constructor for
duke@435 337 // the containing concrete subtype of Space.
duke@435 338 // This would be legal C++, but MS VC++ doesn't allow it.
duke@435 339 void set_space(Space* sp) { _sp = sp; }
duke@435 340
duke@435 341 // Resets the covered region to the given "mr".
duke@435 342 void set_region(MemRegion mr) {
duke@435 343 _bottom = mr.start();
duke@435 344 _end = mr.end();
duke@435 345 }
duke@435 346
duke@435 347 // Note that the committed size of the covered space may have changed,
duke@435 348 // so the table size might also wish to change.
duke@435 349 virtual void resize(size_t new_word_size) {
duke@435 350 HeapWord* new_end = _bottom + new_word_size;
duke@435 351 if (_end < new_end && !init_to_zero()) {
duke@435 352 // verify that the old and new boundaries are also card boundaries
duke@435 353 assert(_array->is_card_boundary(_end),
duke@435 354 "_end not a card boundary");
duke@435 355 assert(_array->is_card_boundary(new_end),
duke@435 356 "new _end would not be a card boundary");
duke@435 357 // set all the newly added cards
duke@435 358 _array->set_offset_array(_end, new_end, N_words);
duke@435 359 }
duke@435 360 _end = new_end; // update _end
duke@435 361 }
duke@435 362
duke@435 363 // Adjust the BOT to show that it has a single block in the
duke@435 364 // range [blk_start, blk_start + size). All necessary BOT
duke@435 365 // cards are adjusted, but _unallocated_block isn't.
duke@435 366 void single_block(HeapWord* blk_start, HeapWord* blk_end);
duke@435 367 void single_block(HeapWord* blk, size_t size) {
duke@435 368 single_block(blk, blk + size);
duke@435 369 }
duke@435 370
duke@435 371 // When the alloc_block() call returns, the block offset table should
duke@435 372 // have enough information such that any subsequent block_start() call
duke@435 373 // with an argument equal to an address that is within the range
duke@435 374 // [blk_start, blk_end) would return the value blk_start, provided
duke@435 375 // there have been no calls in between that reset this information
duke@435 376 // (e.g. see BlockOffsetArrayNonContigSpace::single_block() call
duke@435 377 // for an appropriate range covering the said interval).
duke@435 378 // These methods expect to be called with [blk_start, blk_end)
duke@435 379 // representing a block of memory in the heap.
duke@435 380 virtual void alloc_block(HeapWord* blk_start, HeapWord* blk_end);
duke@435 381 void alloc_block(HeapWord* blk, size_t size) {
duke@435 382 alloc_block(blk, blk + size);
duke@435 383 }
duke@435 384
duke@435 385 // If true, initialize array slots with no allocated blocks to zero.
duke@435 386 // Otherwise, make them point back to the front.
duke@435 387 bool init_to_zero() { return _init_to_zero; }
ysr@2071 388 // Corresponding setter
ysr@2071 389 void set_init_to_zero(bool val) {
ysr@2071 390 _init_to_zero = val;
ysr@2071 391 assert(_array != NULL, "_array should be non-NULL");
ysr@2071 392 _array->set_init_to_zero(val);
ysr@2071 393 }
duke@435 394
duke@435 395 // Debugging
duke@435 396 // Return the index of the last entry in the "active" region.
duke@435 397 virtual size_t last_active_index() const = 0;
duke@435 398 // Verify the block offset table
duke@435 399 void verify() const;
duke@435 400 void check_all_cards(size_t left_card, size_t right_card) const;
duke@435 401 };
duke@435 402
duke@435 403 ////////////////////////////////////////////////////////////////////////////
duke@435 404 // A subtype of BlockOffsetArray that takes advantage of the fact
duke@435 405 // that its underlying space is a NonContiguousSpace, so that some
duke@435 406 // specialized interfaces can be made available for spaces that
duke@435 407 // manipulate the table.
duke@435 408 ////////////////////////////////////////////////////////////////////////////
duke@435 409 class BlockOffsetArrayNonContigSpace: public BlockOffsetArray {
duke@435 410 friend class VMStructs;
duke@435 411 private:
duke@435 412 // The portion [_unallocated_block, _sp.end()) of the space that
duke@435 413 // is a single block known not to contain any objects.
duke@435 414 // NOTE: See BlockOffsetArrayUseUnallocatedBlock flag.
duke@435 415 HeapWord* _unallocated_block;
duke@435 416
duke@435 417 public:
duke@435 418 BlockOffsetArrayNonContigSpace(BlockOffsetSharedArray* array, MemRegion mr):
duke@435 419 BlockOffsetArray(array, mr, false),
duke@435 420 _unallocated_block(_bottom) { }
duke@435 421
duke@435 422 // accessor
duke@435 423 HeapWord* unallocated_block() const {
duke@435 424 assert(BlockOffsetArrayUseUnallocatedBlock,
duke@435 425 "_unallocated_block is not being maintained");
duke@435 426 return _unallocated_block;
duke@435 427 }
duke@435 428
duke@435 429 void set_unallocated_block(HeapWord* block) {
duke@435 430 assert(BlockOffsetArrayUseUnallocatedBlock,
duke@435 431 "_unallocated_block is not being maintained");
duke@435 432 assert(block >= _bottom && block <= _end, "out of range");
duke@435 433 _unallocated_block = block;
duke@435 434 }
duke@435 435
duke@435 436 // These methods expect to be called with [blk_start, blk_end)
duke@435 437 // representing a block of memory in the heap.
duke@435 438 void alloc_block(HeapWord* blk_start, HeapWord* blk_end);
duke@435 439 void alloc_block(HeapWord* blk, size_t size) {
duke@435 440 alloc_block(blk, blk + size);
duke@435 441 }
duke@435 442
duke@435 443 // The following methods are useful and optimized for a
duke@435 444 // non-contiguous space.
duke@435 445
duke@435 446 // Given a block [blk_start, blk_start + full_blk_size), and
duke@435 447 // a left_blk_size < full_blk_size, adjust the BOT to show two
duke@435 448 // blocks [blk_start, blk_start + left_blk_size) and
duke@435 449 // [blk_start + left_blk_size, blk_start + full_blk_size).
duke@435 450 // It is assumed (and verified in the non-product VM) that the
duke@435 451 // BOT was correct for the original block.
duke@435 452 void split_block(HeapWord* blk_start, size_t full_blk_size,
duke@435 453 size_t left_blk_size);
duke@435 454
duke@435 455 // Adjust BOT to show that it has a block in the range
duke@435 456 // [blk_start, blk_start + size). Only the first card
duke@435 457 // of BOT is touched. It is assumed (and verified in the
duke@435 458 // non-product VM) that the remaining cards of the block
duke@435 459 // are correct.
ysr@2071 460 void mark_block(HeapWord* blk_start, HeapWord* blk_end, bool reducing = false);
ysr@2071 461 void mark_block(HeapWord* blk, size_t size, bool reducing = false) {
ysr@2071 462 mark_block(blk, blk + size, reducing);
duke@435 463 }
duke@435 464
duke@435 465 // Adjust _unallocated_block to indicate that a particular
duke@435 466 // block has been newly allocated or freed. It is assumed (and
duke@435 467 // verified in the non-product VM) that the BOT is correct for
duke@435 468 // the given block.
ysr@2071 469 void allocated(HeapWord* blk_start, HeapWord* blk_end, bool reducing = false) {
duke@435 470 // Verify that the BOT shows [blk, blk + blk_size) to be one block.
duke@435 471 verify_single_block(blk_start, blk_end);
duke@435 472 if (BlockOffsetArrayUseUnallocatedBlock) {
duke@435 473 _unallocated_block = MAX2(_unallocated_block, blk_end);
duke@435 474 }
duke@435 475 }
duke@435 476
ysr@2071 477 void allocated(HeapWord* blk, size_t size, bool reducing = false) {
ysr@2071 478 allocated(blk, blk + size, reducing);
duke@435 479 }
duke@435 480
duke@435 481 void freed(HeapWord* blk_start, HeapWord* blk_end);
ysr@2071 482 void freed(HeapWord* blk, size_t size);
duke@435 483
duke@435 484 HeapWord* block_start_unsafe(const void* addr) const;
duke@435 485
duke@435 486 // Requires "addr" to be the start of a card and returns the
duke@435 487 // start of the block that contains the given address.
duke@435 488 HeapWord* block_start_careful(const void* addr) const;
duke@435 489
duke@435 490 // Verification & debugging: ensure that the offset table reflects
duke@435 491 // the fact that the block [blk_start, blk_end) or [blk, blk + size)
duke@435 492 // is a single block of storage. NOTE: can't const this because of
duke@435 493 // call to non-const do_block_internal() below.
duke@435 494 void verify_single_block(HeapWord* blk_start, HeapWord* blk_end)
duke@435 495 PRODUCT_RETURN;
duke@435 496 void verify_single_block(HeapWord* blk, size_t size) PRODUCT_RETURN;
duke@435 497
duke@435 498 // Verify that the given block is before _unallocated_block
duke@435 499 void verify_not_unallocated(HeapWord* blk_start, HeapWord* blk_end)
duke@435 500 const PRODUCT_RETURN;
duke@435 501 void verify_not_unallocated(HeapWord* blk, size_t size)
duke@435 502 const PRODUCT_RETURN;
duke@435 503
duke@435 504 // Debugging support
duke@435 505 virtual size_t last_active_index() const;
duke@435 506 };
duke@435 507
duke@435 508 ////////////////////////////////////////////////////////////////////////////
duke@435 509 // A subtype of BlockOffsetArray that takes advantage of the fact
duke@435 510 // that its underlying space is a ContiguousSpace, so that its "active"
duke@435 511 // region can be more efficiently tracked (than for a non-contiguous space).
duke@435 512 ////////////////////////////////////////////////////////////////////////////
duke@435 513 class BlockOffsetArrayContigSpace: public BlockOffsetArray {
duke@435 514 friend class VMStructs;
duke@435 515 private:
duke@435 516 // allocation boundary at which offset array must be updated
duke@435 517 HeapWord* _next_offset_threshold;
duke@435 518 size_t _next_offset_index; // index corresponding to that boundary
duke@435 519
duke@435 520 // Work function when allocation start crosses threshold.
duke@435 521 void alloc_block_work(HeapWord* blk_start, HeapWord* blk_end);
duke@435 522
duke@435 523 public:
duke@435 524 BlockOffsetArrayContigSpace(BlockOffsetSharedArray* array, MemRegion mr):
duke@435 525 BlockOffsetArray(array, mr, true) {
duke@435 526 _next_offset_threshold = NULL;
duke@435 527 _next_offset_index = 0;
duke@435 528 }
duke@435 529
duke@435 530 void set_contig_space(ContiguousSpace* sp) { set_space((Space*)sp); }
duke@435 531
duke@435 532 // Initialize the threshold for an empty heap.
duke@435 533 HeapWord* initialize_threshold();
duke@435 534 // Zero out the entry for _bottom (offset will be zero)
duke@435 535 void zero_bottom_entry();
duke@435 536
duke@435 537 // Return the next threshold, the point at which the table should be
duke@435 538 // updated.
duke@435 539 HeapWord* threshold() const { return _next_offset_threshold; }
duke@435 540
duke@435 541 // In general, these methods expect to be called with
duke@435 542 // [blk_start, blk_end) representing a block of memory in the heap.
duke@435 543 // In this implementation, however, we are OK even if blk_start and/or
duke@435 544 // blk_end are NULL because NULL is represented as 0, and thus
duke@435 545 // never exceeds the "_next_offset_threshold".
duke@435 546 void alloc_block(HeapWord* blk_start, HeapWord* blk_end) {
duke@435 547 if (blk_end > _next_offset_threshold) {
duke@435 548 alloc_block_work(blk_start, blk_end);
duke@435 549 }
duke@435 550 }
duke@435 551 void alloc_block(HeapWord* blk, size_t size) {
duke@435 552 alloc_block(blk, blk + size);
duke@435 553 }
duke@435 554
duke@435 555 HeapWord* block_start_unsafe(const void* addr) const;
duke@435 556
duke@435 557 void serialize(SerializeOopClosure* soc);
duke@435 558
duke@435 559 // Debugging support
duke@435 560 virtual size_t last_active_index() const;
duke@435 561 };

mercurial