src/share/vm/gc_implementation/g1/g1BlockOffsetTable.hpp

Mon, 02 Aug 2010 12:51:43 -0700

author
johnc
date
Mon, 02 Aug 2010 12:51:43 -0700
changeset 2060
2d160770d2e5
parent 1907
c18cbe5936b8
child 2241
72a161e62cc4
permissions
-rw-r--r--

6814437: G1: remove the _new_refs array
Summary: The per-worker _new_refs array is used to hold references that point into the collection set. It is populated during RSet updating and subsequently processed. In the event of an evacuation failure it processed again to recreate the RSets of regions in the collection set. Remove the per-worker _new_refs array by processing the references directly. Use a DirtyCardQueue to hold the cards containing the references so that the RSets of regions in the collection set can be recreated when handling an evacuation failure.
Reviewed-by: iveresov, jmasa, tonyp

ysr@777 1 /*
trims@1907 2 * Copyright (c) 2001, 2007, Oracle and/or its affiliates. All rights reserved.
ysr@777 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
ysr@777 4 *
ysr@777 5 * This code is free software; you can redistribute it and/or modify it
ysr@777 6 * under the terms of the GNU General Public License version 2 only, as
ysr@777 7 * published by the Free Software Foundation.
ysr@777 8 *
ysr@777 9 * This code is distributed in the hope that it will be useful, but WITHOUT
ysr@777 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
ysr@777 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
ysr@777 12 * version 2 for more details (a copy is included in the LICENSE file that
ysr@777 13 * accompanied this code).
ysr@777 14 *
ysr@777 15 * You should have received a copy of the GNU General Public License version
ysr@777 16 * 2 along with this work; if not, write to the Free Software Foundation,
ysr@777 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
ysr@777 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
ysr@777 22 *
ysr@777 23 */
ysr@777 24
ysr@777 25 // The CollectedHeap type requires subtypes to implement a method
ysr@777 26 // "block_start". For some subtypes, notably generational
ysr@777 27 // systems using card-table-based write barriers, the efficiency of this
ysr@777 28 // operation may be important. Implementations of the "BlockOffsetArray"
ysr@777 29 // class may be useful in providing such efficient implementations.
ysr@777 30 //
ysr@777 31 // While generally mirroring the structure of the BOT for GenCollectedHeap,
ysr@777 32 // the following types are tailored more towards G1's uses; these should,
ysr@777 33 // however, be merged back into a common BOT to avoid code duplication
ysr@777 34 // and reduce maintenance overhead.
ysr@777 35 //
ysr@777 36 // G1BlockOffsetTable (abstract)
ysr@777 37 // -- G1BlockOffsetArray (uses G1BlockOffsetSharedArray)
ysr@777 38 // -- G1BlockOffsetArrayContigSpace
ysr@777 39 //
ysr@777 40 // A main impediment to the consolidation of this code might be the
ysr@777 41 // effect of making some of the block_start*() calls non-const as
ysr@777 42 // below. Whether that might adversely affect performance optimizations
ysr@777 43 // that compilers might normally perform in the case of non-G1
ysr@777 44 // collectors needs to be carefully investigated prior to any such
ysr@777 45 // consolidation.
ysr@777 46
ysr@777 47 // Forward declarations
ysr@777 48 class ContiguousSpace;
ysr@777 49 class G1BlockOffsetSharedArray;
ysr@777 50
ysr@777 51 class G1BlockOffsetTable VALUE_OBJ_CLASS_SPEC {
ysr@777 52 friend class VMStructs;
ysr@777 53 protected:
ysr@777 54 // These members describe the region covered by the table.
ysr@777 55
ysr@777 56 // The space this table is covering.
ysr@777 57 HeapWord* _bottom; // == reserved.start
ysr@777 58 HeapWord* _end; // End of currently allocated region.
ysr@777 59
ysr@777 60 public:
ysr@777 61 // Initialize the table to cover the given space.
ysr@777 62 // The contents of the initial table are undefined.
ysr@777 63 G1BlockOffsetTable(HeapWord* bottom, HeapWord* end) :
ysr@777 64 _bottom(bottom), _end(end)
ysr@777 65 {
ysr@777 66 assert(_bottom <= _end, "arguments out of order");
ysr@777 67 }
ysr@777 68
ysr@777 69 // Note that the committed size of the covered space may have changed,
ysr@777 70 // so the table size might also wish to change.
ysr@777 71 virtual void resize(size_t new_word_size) = 0;
ysr@777 72
ysr@777 73 virtual void set_bottom(HeapWord* new_bottom) {
ysr@777 74 assert(new_bottom <= _end, "new_bottom > _end");
ysr@777 75 _bottom = new_bottom;
ysr@777 76 resize(pointer_delta(_end, _bottom));
ysr@777 77 }
ysr@777 78
ysr@777 79 // Requires "addr" to be contained by a block, and returns the address of
ysr@777 80 // the start of that block. (May have side effects, namely updating of
ysr@777 81 // shared array entries that "point" too far backwards. This can occur,
ysr@777 82 // for example, when LAB allocation is used in a space covered by the
ysr@777 83 // table.)
ysr@777 84 virtual HeapWord* block_start_unsafe(const void* addr) = 0;
ysr@777 85 // Same as above, but does not have any of the possible side effects
ysr@777 86 // discussed above.
ysr@777 87 virtual HeapWord* block_start_unsafe_const(const void* addr) const = 0;
ysr@777 88
ysr@777 89 // Returns the address of the start of the block containing "addr", or
ysr@777 90 // else "null" if it is covered by no block. (May have side effects,
ysr@777 91 // namely updating of shared array entries that "point" too far
ysr@777 92 // backwards. This can occur, for example, when lab allocation is used
ysr@777 93 // in a space covered by the table.)
ysr@777 94 inline HeapWord* block_start(const void* addr);
ysr@777 95 // Same as above, but does not have any of the possible side effects
ysr@777 96 // discussed above.
ysr@777 97 inline HeapWord* block_start_const(const void* addr) const;
ysr@777 98 };
ysr@777 99
ysr@777 100 // This implementation of "G1BlockOffsetTable" divides the covered region
ysr@777 101 // into "N"-word subregions (where "N" = 2^"LogN". An array with an entry
ysr@777 102 // for each such subregion indicates how far back one must go to find the
ysr@777 103 // start of the chunk that includes the first word of the subregion.
ysr@777 104 //
ysr@777 105 // Each BlockOffsetArray is owned by a Space. However, the actual array
ysr@777 106 // may be shared by several BlockOffsetArrays; this is useful
ysr@777 107 // when a single resizable area (such as a generation) is divided up into
ysr@777 108 // several spaces in which contiguous allocation takes place,
ysr@777 109 // such as, for example, in G1 or in the train generation.)
ysr@777 110
ysr@777 111 // Here is the shared array type.
ysr@777 112
ysr@777 113 class G1BlockOffsetSharedArray: public CHeapObj {
ysr@777 114 friend class G1BlockOffsetArray;
ysr@777 115 friend class G1BlockOffsetArrayContigSpace;
ysr@777 116 friend class VMStructs;
ysr@777 117
ysr@777 118 private:
ysr@777 119 // The reserved region covered by the shared array.
ysr@777 120 MemRegion _reserved;
ysr@777 121
ysr@777 122 // End of the current committed region.
ysr@777 123 HeapWord* _end;
ysr@777 124
ysr@777 125 // Array for keeping offsets for retrieving object start fast given an
ysr@777 126 // address.
ysr@777 127 VirtualSpace _vs;
ysr@777 128 u_char* _offset_array; // byte array keeping backwards offsets
ysr@777 129
ysr@777 130 // Bounds checking accessors:
ysr@777 131 // For performance these have to devolve to array accesses in product builds.
ysr@777 132 u_char offset_array(size_t index) const {
ysr@777 133 assert(index < _vs.committed_size(), "index out of range");
ysr@777 134 return _offset_array[index];
ysr@777 135 }
ysr@777 136
ysr@777 137 void set_offset_array(size_t index, u_char offset) {
ysr@777 138 assert(index < _vs.committed_size(), "index out of range");
ysr@777 139 assert(offset <= N_words, "offset too large");
ysr@777 140 _offset_array[index] = offset;
ysr@777 141 }
ysr@777 142
ysr@777 143 void set_offset_array(size_t index, HeapWord* high, HeapWord* low) {
ysr@777 144 assert(index < _vs.committed_size(), "index out of range");
ysr@777 145 assert(high >= low, "addresses out of order");
ysr@777 146 assert(pointer_delta(high, low) <= N_words, "offset too large");
ysr@777 147 _offset_array[index] = (u_char) pointer_delta(high, low);
ysr@777 148 }
ysr@777 149
ysr@777 150 void set_offset_array(HeapWord* left, HeapWord* right, u_char offset) {
ysr@777 151 assert(index_for(right - 1) < _vs.committed_size(),
ysr@777 152 "right address out of range");
ysr@777 153 assert(left < right, "Heap addresses out of order");
ysr@777 154 size_t num_cards = pointer_delta(right, left) >> LogN_words;
ysr@777 155 memset(&_offset_array[index_for(left)], offset, num_cards);
ysr@777 156 }
ysr@777 157
ysr@777 158 void set_offset_array(size_t left, size_t right, u_char offset) {
ysr@777 159 assert(right < _vs.committed_size(), "right address out of range");
ysr@777 160 assert(left <= right, "indexes out of order");
ysr@777 161 size_t num_cards = right - left + 1;
ysr@777 162 memset(&_offset_array[left], offset, num_cards);
ysr@777 163 }
ysr@777 164
ysr@777 165 void check_offset_array(size_t index, HeapWord* high, HeapWord* low) const {
ysr@777 166 assert(index < _vs.committed_size(), "index out of range");
ysr@777 167 assert(high >= low, "addresses out of order");
ysr@777 168 assert(pointer_delta(high, low) <= N_words, "offset too large");
ysr@777 169 assert(_offset_array[index] == pointer_delta(high, low),
ysr@777 170 "Wrong offset");
ysr@777 171 }
ysr@777 172
ysr@777 173 bool is_card_boundary(HeapWord* p) const;
ysr@777 174
ysr@777 175 // Return the number of slots needed for an offset array
ysr@777 176 // that covers mem_region_words words.
ysr@777 177 // We always add an extra slot because if an object
ysr@777 178 // ends on a card boundary we put a 0 in the next
ysr@777 179 // offset array slot, so we want that slot always
ysr@777 180 // to be reserved.
ysr@777 181
ysr@777 182 size_t compute_size(size_t mem_region_words) {
ysr@777 183 size_t number_of_slots = (mem_region_words / N_words) + 1;
ysr@777 184 return ReservedSpace::page_align_size_up(number_of_slots);
ysr@777 185 }
ysr@777 186
ysr@777 187 public:
ysr@777 188 enum SomePublicConstants {
ysr@777 189 LogN = 9,
ysr@777 190 LogN_words = LogN - LogHeapWordSize,
ysr@777 191 N_bytes = 1 << LogN,
ysr@777 192 N_words = 1 << LogN_words
ysr@777 193 };
ysr@777 194
ysr@777 195 // Initialize the table to cover from "base" to (at least)
ysr@777 196 // "base + init_word_size". In the future, the table may be expanded
ysr@777 197 // (see "resize" below) up to the size of "_reserved" (which must be at
ysr@777 198 // least "init_word_size".) The contents of the initial table are
ysr@777 199 // undefined; it is the responsibility of the constituent
ysr@777 200 // G1BlockOffsetTable(s) to initialize cards.
ysr@777 201 G1BlockOffsetSharedArray(MemRegion reserved, size_t init_word_size);
ysr@777 202
ysr@777 203 // Notes a change in the committed size of the region covered by the
ysr@777 204 // table. The "new_word_size" may not be larger than the size of the
ysr@777 205 // reserved region this table covers.
ysr@777 206 void resize(size_t new_word_size);
ysr@777 207
ysr@777 208 void set_bottom(HeapWord* new_bottom);
ysr@777 209
ysr@777 210 // Updates all the BlockOffsetArray's sharing this shared array to
ysr@777 211 // reflect the current "top"'s of their spaces.
ysr@777 212 void update_offset_arrays();
ysr@777 213
ysr@777 214 // Return the appropriate index into "_offset_array" for "p".
ysr@777 215 inline size_t index_for(const void* p) const;
ysr@777 216
ysr@777 217 // Return the address indicating the start of the region corresponding to
ysr@777 218 // "index" in "_offset_array".
ysr@777 219 inline HeapWord* address_for_index(size_t index) const;
ysr@777 220 };
ysr@777 221
ysr@777 222 // And here is the G1BlockOffsetTable subtype that uses the array.
ysr@777 223
ysr@777 224 class G1BlockOffsetArray: public G1BlockOffsetTable {
ysr@777 225 friend class G1BlockOffsetSharedArray;
ysr@777 226 friend class G1BlockOffsetArrayContigSpace;
ysr@777 227 friend class VMStructs;
ysr@777 228 private:
ysr@777 229 enum SomePrivateConstants {
ysr@777 230 N_words = G1BlockOffsetSharedArray::N_words,
ysr@777 231 LogN = G1BlockOffsetSharedArray::LogN
ysr@777 232 };
ysr@777 233
ysr@777 234 // The following enums are used by do_block_helper
ysr@777 235 enum Action {
ysr@777 236 Action_single, // BOT records a single block (see single_block())
ysr@777 237 Action_mark, // BOT marks the start of a block (see mark_block())
ysr@777 238 Action_check // Check that BOT records block correctly
ysr@777 239 // (see verify_single_block()).
ysr@777 240 };
ysr@777 241
ysr@777 242 // This is the array, which can be shared by several BlockOffsetArray's
ysr@777 243 // servicing different
ysr@777 244 G1BlockOffsetSharedArray* _array;
ysr@777 245
ysr@777 246 // The space that owns this subregion.
ysr@777 247 Space* _sp;
ysr@777 248
ysr@777 249 // If "_sp" is a contiguous space, the field below is the view of "_sp"
ysr@777 250 // as a contiguous space, else NULL.
ysr@777 251 ContiguousSpace* _csp;
ysr@777 252
ysr@777 253 // If true, array entries are initialized to 0; otherwise, they are
ysr@777 254 // initialized to point backwards to the beginning of the covered region.
ysr@777 255 bool _init_to_zero;
ysr@777 256
ysr@777 257 // The portion [_unallocated_block, _sp.end()) of the space that
ysr@777 258 // is a single block known not to contain any objects.
ysr@777 259 // NOTE: See BlockOffsetArrayUseUnallocatedBlock flag.
ysr@777 260 HeapWord* _unallocated_block;
ysr@777 261
ysr@777 262 // Sets the entries
ysr@777 263 // corresponding to the cards starting at "start" and ending at "end"
ysr@777 264 // to point back to the card before "start": the interval [start, end)
ysr@777 265 // is right-open.
ysr@777 266 void set_remainder_to_point_to_start(HeapWord* start, HeapWord* end);
ysr@777 267 // Same as above, except that the args here are a card _index_ interval
ysr@777 268 // that is closed: [start_index, end_index]
ysr@777 269 void set_remainder_to_point_to_start_incl(size_t start, size_t end);
ysr@777 270
ysr@777 271 // A helper function for BOT adjustment/verification work
ysr@777 272 void do_block_internal(HeapWord* blk_start, HeapWord* blk_end, Action action);
ysr@777 273
ysr@777 274 protected:
ysr@777 275
ysr@777 276 ContiguousSpace* csp() const { return _csp; }
ysr@777 277
ysr@777 278 // Returns the address of a block whose start is at most "addr".
ysr@777 279 // If "has_max_index" is true, "assumes "max_index" is the last valid one
ysr@777 280 // in the array.
ysr@777 281 inline HeapWord* block_at_or_preceding(const void* addr,
ysr@777 282 bool has_max_index,
ysr@777 283 size_t max_index) const;
ysr@777 284
ysr@777 285 // "q" is a block boundary that is <= "addr"; "n" is the address of the
ysr@777 286 // next block (or the end of the space.) Return the address of the
ysr@777 287 // beginning of the block that contains "addr". Does so without side
ysr@777 288 // effects (see, e.g., spec of block_start.)
ysr@777 289 inline HeapWord*
ysr@777 290 forward_to_block_containing_addr_const(HeapWord* q, HeapWord* n,
ysr@777 291 const void* addr) const;
ysr@777 292
ysr@777 293 // "q" is a block boundary that is <= "addr"; return the address of the
ysr@777 294 // beginning of the block that contains "addr". May have side effects
ysr@777 295 // on "this", by updating imprecise entries.
ysr@777 296 inline HeapWord* forward_to_block_containing_addr(HeapWord* q,
ysr@777 297 const void* addr);
ysr@777 298
ysr@777 299 // "q" is a block boundary that is <= "addr"; "n" is the address of the
ysr@777 300 // next block (or the end of the space.) Return the address of the
ysr@777 301 // beginning of the block that contains "addr". May have side effects
ysr@777 302 // on "this", by updating imprecise entries.
ysr@777 303 HeapWord* forward_to_block_containing_addr_slow(HeapWord* q,
ysr@777 304 HeapWord* n,
ysr@777 305 const void* addr);
ysr@777 306
ysr@777 307 // Requires that "*threshold_" be the first array entry boundary at or
ysr@777 308 // above "blk_start", and that "*index_" be the corresponding array
ysr@777 309 // index. If the block starts at or crosses "*threshold_", records
ysr@777 310 // "blk_start" as the appropriate block start for the array index
ysr@777 311 // starting at "*threshold_", and for any other indices crossed by the
ysr@777 312 // block. Updates "*threshold_" and "*index_" to correspond to the first
ysr@777 313 // index after the block end.
ysr@777 314 void alloc_block_work2(HeapWord** threshold_, size_t* index_,
ysr@777 315 HeapWord* blk_start, HeapWord* blk_end);
ysr@777 316
ysr@777 317 public:
ysr@777 318 // The space may not have it's bottom and top set yet, which is why the
ysr@777 319 // region is passed as a parameter. If "init_to_zero" is true, the
ysr@777 320 // elements of the array are initialized to zero. Otherwise, they are
ysr@777 321 // initialized to point backwards to the beginning.
ysr@777 322 G1BlockOffsetArray(G1BlockOffsetSharedArray* array, MemRegion mr,
ysr@777 323 bool init_to_zero);
ysr@777 324
ysr@777 325 // Note: this ought to be part of the constructor, but that would require
ysr@777 326 // "this" to be passed as a parameter to a member constructor for
ysr@777 327 // the containing concrete subtype of Space.
ysr@777 328 // This would be legal C++, but MS VC++ doesn't allow it.
ysr@777 329 void set_space(Space* sp);
ysr@777 330
ysr@777 331 // Resets the covered region to the given "mr".
ysr@777 332 void set_region(MemRegion mr);
ysr@777 333
ysr@777 334 // Resets the covered region to one with the same _bottom as before but
ysr@777 335 // the "new_word_size".
ysr@777 336 void resize(size_t new_word_size);
ysr@777 337
ysr@777 338 // These must be guaranteed to work properly (i.e., do nothing)
ysr@777 339 // when "blk_start" ("blk" for second version) is "NULL".
ysr@777 340 virtual void alloc_block(HeapWord* blk_start, HeapWord* blk_end);
ysr@777 341 virtual void alloc_block(HeapWord* blk, size_t size) {
ysr@777 342 alloc_block(blk, blk + size);
ysr@777 343 }
ysr@777 344
ysr@777 345 // The following methods are useful and optimized for a
ysr@777 346 // general, non-contiguous space.
ysr@777 347
ysr@777 348 // The given arguments are required to be the starts of adjacent ("blk1"
ysr@777 349 // before "blk2") well-formed blocks covered by "this". After this call,
ysr@777 350 // they should be considered to form one block.
ysr@777 351 virtual void join_blocks(HeapWord* blk1, HeapWord* blk2);
ysr@777 352
ysr@777 353 // Given a block [blk_start, blk_start + full_blk_size), and
ysr@777 354 // a left_blk_size < full_blk_size, adjust the BOT to show two
ysr@777 355 // blocks [blk_start, blk_start + left_blk_size) and
ysr@777 356 // [blk_start + left_blk_size, blk_start + full_blk_size).
ysr@777 357 // It is assumed (and verified in the non-product VM) that the
ysr@777 358 // BOT was correct for the original block.
ysr@777 359 void split_block(HeapWord* blk_start, size_t full_blk_size,
ysr@777 360 size_t left_blk_size);
ysr@777 361
ysr@777 362 // Adjust the BOT to show that it has a single block in the
ysr@777 363 // range [blk_start, blk_start + size). All necessary BOT
ysr@777 364 // cards are adjusted, but _unallocated_block isn't.
ysr@777 365 void single_block(HeapWord* blk_start, HeapWord* blk_end);
ysr@777 366 void single_block(HeapWord* blk, size_t size) {
ysr@777 367 single_block(blk, blk + size);
ysr@777 368 }
ysr@777 369
ysr@777 370 // Adjust BOT to show that it has a block in the range
ysr@777 371 // [blk_start, blk_start + size). Only the first card
ysr@777 372 // of BOT is touched. It is assumed (and verified in the
ysr@777 373 // non-product VM) that the remaining cards of the block
ysr@777 374 // are correct.
ysr@777 375 void mark_block(HeapWord* blk_start, HeapWord* blk_end);
ysr@777 376 void mark_block(HeapWord* blk, size_t size) {
ysr@777 377 mark_block(blk, blk + size);
ysr@777 378 }
ysr@777 379
ysr@777 380 // Adjust _unallocated_block to indicate that a particular
ysr@777 381 // block has been newly allocated or freed. It is assumed (and
ysr@777 382 // verified in the non-product VM) that the BOT is correct for
ysr@777 383 // the given block.
ysr@777 384 inline void allocated(HeapWord* blk_start, HeapWord* blk_end) {
ysr@777 385 // Verify that the BOT shows [blk, blk + blk_size) to be one block.
ysr@777 386 verify_single_block(blk_start, blk_end);
ysr@777 387 if (BlockOffsetArrayUseUnallocatedBlock) {
ysr@777 388 _unallocated_block = MAX2(_unallocated_block, blk_end);
ysr@777 389 }
ysr@777 390 }
ysr@777 391
ysr@777 392 inline void allocated(HeapWord* blk, size_t size) {
ysr@777 393 allocated(blk, blk + size);
ysr@777 394 }
ysr@777 395
ysr@777 396 inline void freed(HeapWord* blk_start, HeapWord* blk_end);
ysr@777 397
ysr@777 398 inline void freed(HeapWord* blk, size_t size);
ysr@777 399
ysr@777 400 virtual HeapWord* block_start_unsafe(const void* addr);
ysr@777 401 virtual HeapWord* block_start_unsafe_const(const void* addr) const;
ysr@777 402
ysr@777 403 // Requires "addr" to be the start of a card and returns the
ysr@777 404 // start of the block that contains the given address.
ysr@777 405 HeapWord* block_start_careful(const void* addr) const;
ysr@777 406
ysr@777 407 // If true, initialize array slots with no allocated blocks to zero.
ysr@777 408 // Otherwise, make them point back to the front.
ysr@777 409 bool init_to_zero() { return _init_to_zero; }
ysr@777 410
ysr@777 411 // Verification & debugging - ensure that the offset table reflects the fact
ysr@777 412 // that the block [blk_start, blk_end) or [blk, blk + size) is a
ysr@777 413 // single block of storage. NOTE: can;t const this because of
ysr@777 414 // call to non-const do_block_internal() below.
ysr@777 415 inline void verify_single_block(HeapWord* blk_start, HeapWord* blk_end) {
ysr@777 416 if (VerifyBlockOffsetArray) {
ysr@777 417 do_block_internal(blk_start, blk_end, Action_check);
ysr@777 418 }
ysr@777 419 }
ysr@777 420
ysr@777 421 inline void verify_single_block(HeapWord* blk, size_t size) {
ysr@777 422 verify_single_block(blk, blk + size);
ysr@777 423 }
ysr@777 424
ysr@777 425 // Verify that the given block is before _unallocated_block
ysr@777 426 inline void verify_not_unallocated(HeapWord* blk_start,
ysr@777 427 HeapWord* blk_end) const {
ysr@777 428 if (BlockOffsetArrayUseUnallocatedBlock) {
ysr@777 429 assert(blk_start < blk_end, "Block inconsistency?");
ysr@777 430 assert(blk_end <= _unallocated_block, "_unallocated_block problem");
ysr@777 431 }
ysr@777 432 }
ysr@777 433
ysr@777 434 inline void verify_not_unallocated(HeapWord* blk, size_t size) const {
ysr@777 435 verify_not_unallocated(blk, blk + size);
ysr@777 436 }
ysr@777 437
ysr@777 438 void check_all_cards(size_t left_card, size_t right_card) const;
ysr@777 439 };
ysr@777 440
ysr@777 441 // A subtype of BlockOffsetArray that takes advantage of the fact
ysr@777 442 // that its underlying space is a ContiguousSpace, so that its "active"
ysr@777 443 // region can be more efficiently tracked (than for a non-contiguous space).
ysr@777 444 class G1BlockOffsetArrayContigSpace: public G1BlockOffsetArray {
ysr@777 445 friend class VMStructs;
ysr@777 446
ysr@777 447 // allocation boundary at which offset array must be updated
ysr@777 448 HeapWord* _next_offset_threshold;
ysr@777 449 size_t _next_offset_index; // index corresponding to that boundary
ysr@777 450
ysr@777 451 // Work function to be called when allocation start crosses the next
ysr@777 452 // threshold in the contig space.
ysr@777 453 void alloc_block_work1(HeapWord* blk_start, HeapWord* blk_end) {
ysr@777 454 alloc_block_work2(&_next_offset_threshold, &_next_offset_index,
ysr@777 455 blk_start, blk_end);
ysr@777 456 }
ysr@777 457
ysr@777 458
ysr@777 459 public:
ysr@777 460 G1BlockOffsetArrayContigSpace(G1BlockOffsetSharedArray* array, MemRegion mr);
ysr@777 461
ysr@777 462 // Initialize the threshold to reflect the first boundary after the
ysr@777 463 // bottom of the covered region.
ysr@777 464 HeapWord* initialize_threshold();
ysr@777 465
ysr@777 466 // Zero out the entry for _bottom (offset will be zero).
ysr@777 467 void zero_bottom_entry();
ysr@777 468
ysr@777 469 // Return the next threshold, the point at which the table should be
ysr@777 470 // updated.
ysr@777 471 HeapWord* threshold() const { return _next_offset_threshold; }
ysr@777 472
ysr@777 473 // These must be guaranteed to work properly (i.e., do nothing)
ysr@777 474 // when "blk_start" ("blk" for second version) is "NULL". In this
ysr@777 475 // implementation, that's true because NULL is represented as 0, and thus
ysr@777 476 // never exceeds the "_next_offset_threshold".
ysr@777 477 void alloc_block(HeapWord* blk_start, HeapWord* blk_end) {
ysr@777 478 if (blk_end > _next_offset_threshold)
ysr@777 479 alloc_block_work1(blk_start, blk_end);
ysr@777 480 }
ysr@777 481 void alloc_block(HeapWord* blk, size_t size) {
ysr@777 482 alloc_block(blk, blk+size);
ysr@777 483 }
ysr@777 484
ysr@777 485 HeapWord* block_start_unsafe(const void* addr);
ysr@777 486 HeapWord* block_start_unsafe_const(const void* addr) const;
ysr@777 487 };

mercurial