src/share/vm/gc_implementation/g1/g1BlockOffsetTable.hpp

Thu, 07 Apr 2011 09:53:20 -0700

author
johnc
date
Thu, 07 Apr 2011 09:53:20 -0700
changeset 2781
e1162778c1c8
parent 2453
2250ee17e258
child 3900
d2a62e0f25eb
permissions
-rw-r--r--

7009266: G1: assert(obj->is_oop_or_null(true )) failed: Error
Summary: A referent object that is only weakly reachable at the start of concurrent marking but is re-attached to the strongly reachable object graph during marking may not be marked as live. This can cause the reference object to be processed prematurely and leave dangling pointers to the referent object. Implement a read barrier for the java.lang.ref.Reference::referent field by intrinsifying the Reference.get() method, and intercepting accesses though JNI, reflection, and Unsafe, so that when a non-null referent object is read it is also logged in an SATB buffer.
Reviewed-by: kvn, iveresov, never, tonyp, dholmes

ysr@777 1 /*
tonyp@2453 2 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
ysr@777 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
ysr@777 4 *
ysr@777 5 * This code is free software; you can redistribute it and/or modify it
ysr@777 6 * under the terms of the GNU General Public License version 2 only, as
ysr@777 7 * published by the Free Software Foundation.
ysr@777 8 *
ysr@777 9 * This code is distributed in the hope that it will be useful, but WITHOUT
ysr@777 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
ysr@777 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
ysr@777 12 * version 2 for more details (a copy is included in the LICENSE file that
ysr@777 13 * accompanied this code).
ysr@777 14 *
ysr@777 15 * You should have received a copy of the GNU General Public License version
ysr@777 16 * 2 along with this work; if not, write to the Free Software Foundation,
ysr@777 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
ysr@777 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
ysr@777 22 *
ysr@777 23 */
ysr@777 24
stefank@2314 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1BLOCKOFFSETTABLE_HPP
stefank@2314 26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1BLOCKOFFSETTABLE_HPP
stefank@2314 27
stefank@2314 28 #include "memory/memRegion.hpp"
stefank@2314 29 #include "runtime/virtualspace.hpp"
stefank@2314 30 #include "utilities/globalDefinitions.hpp"
stefank@2314 31
ysr@777 32 // The CollectedHeap type requires subtypes to implement a method
ysr@777 33 // "block_start". For some subtypes, notably generational
ysr@777 34 // systems using card-table-based write barriers, the efficiency of this
ysr@777 35 // operation may be important. Implementations of the "BlockOffsetArray"
ysr@777 36 // class may be useful in providing such efficient implementations.
ysr@777 37 //
ysr@777 38 // While generally mirroring the structure of the BOT for GenCollectedHeap,
ysr@777 39 // the following types are tailored more towards G1's uses; these should,
ysr@777 40 // however, be merged back into a common BOT to avoid code duplication
ysr@777 41 // and reduce maintenance overhead.
ysr@777 42 //
ysr@777 43 // G1BlockOffsetTable (abstract)
ysr@777 44 // -- G1BlockOffsetArray (uses G1BlockOffsetSharedArray)
ysr@777 45 // -- G1BlockOffsetArrayContigSpace
ysr@777 46 //
ysr@777 47 // A main impediment to the consolidation of this code might be the
ysr@777 48 // effect of making some of the block_start*() calls non-const as
ysr@777 49 // below. Whether that might adversely affect performance optimizations
ysr@777 50 // that compilers might normally perform in the case of non-G1
ysr@777 51 // collectors needs to be carefully investigated prior to any such
ysr@777 52 // consolidation.
ysr@777 53
ysr@777 54 // Forward declarations
ysr@777 55 class ContiguousSpace;
ysr@777 56 class G1BlockOffsetSharedArray;
ysr@777 57
ysr@777 58 class G1BlockOffsetTable VALUE_OBJ_CLASS_SPEC {
ysr@777 59 friend class VMStructs;
ysr@777 60 protected:
ysr@777 61 // These members describe the region covered by the table.
ysr@777 62
ysr@777 63 // The space this table is covering.
ysr@777 64 HeapWord* _bottom; // == reserved.start
ysr@777 65 HeapWord* _end; // End of currently allocated region.
ysr@777 66
ysr@777 67 public:
ysr@777 68 // Initialize the table to cover the given space.
ysr@777 69 // The contents of the initial table are undefined.
ysr@777 70 G1BlockOffsetTable(HeapWord* bottom, HeapWord* end) :
ysr@777 71 _bottom(bottom), _end(end)
ysr@777 72 {
ysr@777 73 assert(_bottom <= _end, "arguments out of order");
ysr@777 74 }
ysr@777 75
ysr@777 76 // Note that the committed size of the covered space may have changed,
ysr@777 77 // so the table size might also wish to change.
ysr@777 78 virtual void resize(size_t new_word_size) = 0;
ysr@777 79
ysr@777 80 virtual void set_bottom(HeapWord* new_bottom) {
ysr@777 81 assert(new_bottom <= _end, "new_bottom > _end");
ysr@777 82 _bottom = new_bottom;
ysr@777 83 resize(pointer_delta(_end, _bottom));
ysr@777 84 }
ysr@777 85
ysr@777 86 // Requires "addr" to be contained by a block, and returns the address of
ysr@777 87 // the start of that block. (May have side effects, namely updating of
ysr@777 88 // shared array entries that "point" too far backwards. This can occur,
ysr@777 89 // for example, when LAB allocation is used in a space covered by the
ysr@777 90 // table.)
ysr@777 91 virtual HeapWord* block_start_unsafe(const void* addr) = 0;
ysr@777 92 // Same as above, but does not have any of the possible side effects
ysr@777 93 // discussed above.
ysr@777 94 virtual HeapWord* block_start_unsafe_const(const void* addr) const = 0;
ysr@777 95
ysr@777 96 // Returns the address of the start of the block containing "addr", or
ysr@777 97 // else "null" if it is covered by no block. (May have side effects,
ysr@777 98 // namely updating of shared array entries that "point" too far
ysr@777 99 // backwards. This can occur, for example, when lab allocation is used
ysr@777 100 // in a space covered by the table.)
ysr@777 101 inline HeapWord* block_start(const void* addr);
ysr@777 102 // Same as above, but does not have any of the possible side effects
ysr@777 103 // discussed above.
ysr@777 104 inline HeapWord* block_start_const(const void* addr) const;
ysr@777 105 };
ysr@777 106
ysr@777 107 // This implementation of "G1BlockOffsetTable" divides the covered region
ysr@777 108 // into "N"-word subregions (where "N" = 2^"LogN". An array with an entry
ysr@777 109 // for each such subregion indicates how far back one must go to find the
ysr@777 110 // start of the chunk that includes the first word of the subregion.
ysr@777 111 //
ysr@777 112 // Each BlockOffsetArray is owned by a Space. However, the actual array
ysr@777 113 // may be shared by several BlockOffsetArrays; this is useful
ysr@777 114 // when a single resizable area (such as a generation) is divided up into
ysr@777 115 // several spaces in which contiguous allocation takes place,
ysr@777 116 // such as, for example, in G1 or in the train generation.)
ysr@777 117
ysr@777 118 // Here is the shared array type.
ysr@777 119
ysr@777 120 class G1BlockOffsetSharedArray: public CHeapObj {
ysr@777 121 friend class G1BlockOffsetArray;
ysr@777 122 friend class G1BlockOffsetArrayContigSpace;
ysr@777 123 friend class VMStructs;
ysr@777 124
ysr@777 125 private:
ysr@777 126 // The reserved region covered by the shared array.
ysr@777 127 MemRegion _reserved;
ysr@777 128
ysr@777 129 // End of the current committed region.
ysr@777 130 HeapWord* _end;
ysr@777 131
ysr@777 132 // Array for keeping offsets for retrieving object start fast given an
ysr@777 133 // address.
ysr@777 134 VirtualSpace _vs;
ysr@777 135 u_char* _offset_array; // byte array keeping backwards offsets
ysr@777 136
ysr@777 137 // Bounds checking accessors:
ysr@777 138 // For performance these have to devolve to array accesses in product builds.
ysr@777 139 u_char offset_array(size_t index) const {
ysr@777 140 assert(index < _vs.committed_size(), "index out of range");
ysr@777 141 return _offset_array[index];
ysr@777 142 }
ysr@777 143
ysr@777 144 void set_offset_array(size_t index, u_char offset) {
ysr@777 145 assert(index < _vs.committed_size(), "index out of range");
ysr@777 146 assert(offset <= N_words, "offset too large");
ysr@777 147 _offset_array[index] = offset;
ysr@777 148 }
ysr@777 149
ysr@777 150 void set_offset_array(size_t index, HeapWord* high, HeapWord* low) {
ysr@777 151 assert(index < _vs.committed_size(), "index out of range");
ysr@777 152 assert(high >= low, "addresses out of order");
ysr@777 153 assert(pointer_delta(high, low) <= N_words, "offset too large");
ysr@777 154 _offset_array[index] = (u_char) pointer_delta(high, low);
ysr@777 155 }
ysr@777 156
ysr@777 157 void set_offset_array(HeapWord* left, HeapWord* right, u_char offset) {
ysr@777 158 assert(index_for(right - 1) < _vs.committed_size(),
ysr@777 159 "right address out of range");
ysr@777 160 assert(left < right, "Heap addresses out of order");
ysr@777 161 size_t num_cards = pointer_delta(right, left) >> LogN_words;
ysr@777 162 memset(&_offset_array[index_for(left)], offset, num_cards);
ysr@777 163 }
ysr@777 164
ysr@777 165 void set_offset_array(size_t left, size_t right, u_char offset) {
ysr@777 166 assert(right < _vs.committed_size(), "right address out of range");
ysr@777 167 assert(left <= right, "indexes out of order");
ysr@777 168 size_t num_cards = right - left + 1;
ysr@777 169 memset(&_offset_array[left], offset, num_cards);
ysr@777 170 }
ysr@777 171
ysr@777 172 void check_offset_array(size_t index, HeapWord* high, HeapWord* low) const {
ysr@777 173 assert(index < _vs.committed_size(), "index out of range");
ysr@777 174 assert(high >= low, "addresses out of order");
ysr@777 175 assert(pointer_delta(high, low) <= N_words, "offset too large");
ysr@777 176 assert(_offset_array[index] == pointer_delta(high, low),
ysr@777 177 "Wrong offset");
ysr@777 178 }
ysr@777 179
ysr@777 180 bool is_card_boundary(HeapWord* p) const;
ysr@777 181
ysr@777 182 // Return the number of slots needed for an offset array
ysr@777 183 // that covers mem_region_words words.
ysr@777 184 // We always add an extra slot because if an object
ysr@777 185 // ends on a card boundary we put a 0 in the next
ysr@777 186 // offset array slot, so we want that slot always
ysr@777 187 // to be reserved.
ysr@777 188
ysr@777 189 size_t compute_size(size_t mem_region_words) {
ysr@777 190 size_t number_of_slots = (mem_region_words / N_words) + 1;
ysr@777 191 return ReservedSpace::page_align_size_up(number_of_slots);
ysr@777 192 }
ysr@777 193
ysr@777 194 public:
ysr@777 195 enum SomePublicConstants {
ysr@777 196 LogN = 9,
ysr@777 197 LogN_words = LogN - LogHeapWordSize,
ysr@777 198 N_bytes = 1 << LogN,
ysr@777 199 N_words = 1 << LogN_words
ysr@777 200 };
ysr@777 201
ysr@777 202 // Initialize the table to cover from "base" to (at least)
ysr@777 203 // "base + init_word_size". In the future, the table may be expanded
ysr@777 204 // (see "resize" below) up to the size of "_reserved" (which must be at
ysr@777 205 // least "init_word_size".) The contents of the initial table are
ysr@777 206 // undefined; it is the responsibility of the constituent
ysr@777 207 // G1BlockOffsetTable(s) to initialize cards.
ysr@777 208 G1BlockOffsetSharedArray(MemRegion reserved, size_t init_word_size);
ysr@777 209
ysr@777 210 // Notes a change in the committed size of the region covered by the
ysr@777 211 // table. The "new_word_size" may not be larger than the size of the
ysr@777 212 // reserved region this table covers.
ysr@777 213 void resize(size_t new_word_size);
ysr@777 214
ysr@777 215 void set_bottom(HeapWord* new_bottom);
ysr@777 216
ysr@777 217 // Updates all the BlockOffsetArray's sharing this shared array to
ysr@777 218 // reflect the current "top"'s of their spaces.
ysr@777 219 void update_offset_arrays();
ysr@777 220
ysr@777 221 // Return the appropriate index into "_offset_array" for "p".
ysr@777 222 inline size_t index_for(const void* p) const;
ysr@777 223
ysr@777 224 // Return the address indicating the start of the region corresponding to
ysr@777 225 // "index" in "_offset_array".
ysr@777 226 inline HeapWord* address_for_index(size_t index) const;
ysr@777 227 };
ysr@777 228
ysr@777 229 // And here is the G1BlockOffsetTable subtype that uses the array.
ysr@777 230
ysr@777 231 class G1BlockOffsetArray: public G1BlockOffsetTable {
ysr@777 232 friend class G1BlockOffsetSharedArray;
ysr@777 233 friend class G1BlockOffsetArrayContigSpace;
ysr@777 234 friend class VMStructs;
ysr@777 235 private:
ysr@777 236 enum SomePrivateConstants {
ysr@777 237 N_words = G1BlockOffsetSharedArray::N_words,
ysr@777 238 LogN = G1BlockOffsetSharedArray::LogN
ysr@777 239 };
ysr@777 240
ysr@777 241 // The following enums are used by do_block_helper
ysr@777 242 enum Action {
ysr@777 243 Action_single, // BOT records a single block (see single_block())
ysr@777 244 Action_mark, // BOT marks the start of a block (see mark_block())
ysr@777 245 Action_check // Check that BOT records block correctly
ysr@777 246 // (see verify_single_block()).
ysr@777 247 };
ysr@777 248
ysr@777 249 // This is the array, which can be shared by several BlockOffsetArray's
ysr@777 250 // servicing different
ysr@777 251 G1BlockOffsetSharedArray* _array;
ysr@777 252
ysr@777 253 // The space that owns this subregion.
ysr@777 254 Space* _sp;
ysr@777 255
ysr@777 256 // If "_sp" is a contiguous space, the field below is the view of "_sp"
ysr@777 257 // as a contiguous space, else NULL.
ysr@777 258 ContiguousSpace* _csp;
ysr@777 259
ysr@777 260 // If true, array entries are initialized to 0; otherwise, they are
ysr@777 261 // initialized to point backwards to the beginning of the covered region.
ysr@777 262 bool _init_to_zero;
ysr@777 263
ysr@777 264 // The portion [_unallocated_block, _sp.end()) of the space that
ysr@777 265 // is a single block known not to contain any objects.
ysr@777 266 // NOTE: See BlockOffsetArrayUseUnallocatedBlock flag.
ysr@777 267 HeapWord* _unallocated_block;
ysr@777 268
ysr@777 269 // Sets the entries
ysr@777 270 // corresponding to the cards starting at "start" and ending at "end"
ysr@777 271 // to point back to the card before "start": the interval [start, end)
ysr@777 272 // is right-open.
ysr@777 273 void set_remainder_to_point_to_start(HeapWord* start, HeapWord* end);
ysr@777 274 // Same as above, except that the args here are a card _index_ interval
ysr@777 275 // that is closed: [start_index, end_index]
ysr@777 276 void set_remainder_to_point_to_start_incl(size_t start, size_t end);
ysr@777 277
ysr@777 278 // A helper function for BOT adjustment/verification work
ysr@777 279 void do_block_internal(HeapWord* blk_start, HeapWord* blk_end, Action action);
ysr@777 280
ysr@777 281 protected:
ysr@777 282
ysr@777 283 ContiguousSpace* csp() const { return _csp; }
ysr@777 284
ysr@777 285 // Returns the address of a block whose start is at most "addr".
ysr@777 286 // If "has_max_index" is true, "assumes "max_index" is the last valid one
ysr@777 287 // in the array.
ysr@777 288 inline HeapWord* block_at_or_preceding(const void* addr,
ysr@777 289 bool has_max_index,
ysr@777 290 size_t max_index) const;
ysr@777 291
ysr@777 292 // "q" is a block boundary that is <= "addr"; "n" is the address of the
ysr@777 293 // next block (or the end of the space.) Return the address of the
ysr@777 294 // beginning of the block that contains "addr". Does so without side
ysr@777 295 // effects (see, e.g., spec of block_start.)
ysr@777 296 inline HeapWord*
ysr@777 297 forward_to_block_containing_addr_const(HeapWord* q, HeapWord* n,
ysr@777 298 const void* addr) const;
ysr@777 299
ysr@777 300 // "q" is a block boundary that is <= "addr"; return the address of the
ysr@777 301 // beginning of the block that contains "addr". May have side effects
ysr@777 302 // on "this", by updating imprecise entries.
ysr@777 303 inline HeapWord* forward_to_block_containing_addr(HeapWord* q,
ysr@777 304 const void* addr);
ysr@777 305
ysr@777 306 // "q" is a block boundary that is <= "addr"; "n" is the address of the
ysr@777 307 // next block (or the end of the space.) Return the address of the
ysr@777 308 // beginning of the block that contains "addr". May have side effects
ysr@777 309 // on "this", by updating imprecise entries.
ysr@777 310 HeapWord* forward_to_block_containing_addr_slow(HeapWord* q,
ysr@777 311 HeapWord* n,
ysr@777 312 const void* addr);
ysr@777 313
ysr@777 314 // Requires that "*threshold_" be the first array entry boundary at or
ysr@777 315 // above "blk_start", and that "*index_" be the corresponding array
ysr@777 316 // index. If the block starts at or crosses "*threshold_", records
ysr@777 317 // "blk_start" as the appropriate block start for the array index
ysr@777 318 // starting at "*threshold_", and for any other indices crossed by the
ysr@777 319 // block. Updates "*threshold_" and "*index_" to correspond to the first
ysr@777 320 // index after the block end.
ysr@777 321 void alloc_block_work2(HeapWord** threshold_, size_t* index_,
ysr@777 322 HeapWord* blk_start, HeapWord* blk_end);
ysr@777 323
ysr@777 324 public:
ysr@777 325 // The space may not have it's bottom and top set yet, which is why the
ysr@777 326 // region is passed as a parameter. If "init_to_zero" is true, the
ysr@777 327 // elements of the array are initialized to zero. Otherwise, they are
ysr@777 328 // initialized to point backwards to the beginning.
ysr@777 329 G1BlockOffsetArray(G1BlockOffsetSharedArray* array, MemRegion mr,
ysr@777 330 bool init_to_zero);
ysr@777 331
ysr@777 332 // Note: this ought to be part of the constructor, but that would require
ysr@777 333 // "this" to be passed as a parameter to a member constructor for
ysr@777 334 // the containing concrete subtype of Space.
ysr@777 335 // This would be legal C++, but MS VC++ doesn't allow it.
ysr@777 336 void set_space(Space* sp);
ysr@777 337
ysr@777 338 // Resets the covered region to the given "mr".
ysr@777 339 void set_region(MemRegion mr);
ysr@777 340
ysr@777 341 // Resets the covered region to one with the same _bottom as before but
ysr@777 342 // the "new_word_size".
ysr@777 343 void resize(size_t new_word_size);
ysr@777 344
ysr@777 345 // These must be guaranteed to work properly (i.e., do nothing)
ysr@777 346 // when "blk_start" ("blk" for second version) is "NULL".
ysr@777 347 virtual void alloc_block(HeapWord* blk_start, HeapWord* blk_end);
ysr@777 348 virtual void alloc_block(HeapWord* blk, size_t size) {
ysr@777 349 alloc_block(blk, blk + size);
ysr@777 350 }
ysr@777 351
ysr@777 352 // The following methods are useful and optimized for a
ysr@777 353 // general, non-contiguous space.
ysr@777 354
ysr@777 355 // Given a block [blk_start, blk_start + full_blk_size), and
ysr@777 356 // a left_blk_size < full_blk_size, adjust the BOT to show two
ysr@777 357 // blocks [blk_start, blk_start + left_blk_size) and
ysr@777 358 // [blk_start + left_blk_size, blk_start + full_blk_size).
ysr@777 359 // It is assumed (and verified in the non-product VM) that the
ysr@777 360 // BOT was correct for the original block.
ysr@777 361 void split_block(HeapWord* blk_start, size_t full_blk_size,
ysr@777 362 size_t left_blk_size);
ysr@777 363
ysr@777 364 // Adjust the BOT to show that it has a single block in the
ysr@777 365 // range [blk_start, blk_start + size). All necessary BOT
ysr@777 366 // cards are adjusted, but _unallocated_block isn't.
ysr@777 367 void single_block(HeapWord* blk_start, HeapWord* blk_end);
ysr@777 368 void single_block(HeapWord* blk, size_t size) {
ysr@777 369 single_block(blk, blk + size);
ysr@777 370 }
ysr@777 371
ysr@777 372 // Adjust BOT to show that it has a block in the range
ysr@777 373 // [blk_start, blk_start + size). Only the first card
ysr@777 374 // of BOT is touched. It is assumed (and verified in the
ysr@777 375 // non-product VM) that the remaining cards of the block
ysr@777 376 // are correct.
ysr@777 377 void mark_block(HeapWord* blk_start, HeapWord* blk_end);
ysr@777 378 void mark_block(HeapWord* blk, size_t size) {
ysr@777 379 mark_block(blk, blk + size);
ysr@777 380 }
ysr@777 381
ysr@777 382 // Adjust _unallocated_block to indicate that a particular
ysr@777 383 // block has been newly allocated or freed. It is assumed (and
ysr@777 384 // verified in the non-product VM) that the BOT is correct for
ysr@777 385 // the given block.
ysr@777 386 inline void allocated(HeapWord* blk_start, HeapWord* blk_end) {
ysr@777 387 // Verify that the BOT shows [blk, blk + blk_size) to be one block.
ysr@777 388 verify_single_block(blk_start, blk_end);
ysr@777 389 if (BlockOffsetArrayUseUnallocatedBlock) {
ysr@777 390 _unallocated_block = MAX2(_unallocated_block, blk_end);
ysr@777 391 }
ysr@777 392 }
ysr@777 393
ysr@777 394 inline void allocated(HeapWord* blk, size_t size) {
ysr@777 395 allocated(blk, blk + size);
ysr@777 396 }
ysr@777 397
ysr@777 398 inline void freed(HeapWord* blk_start, HeapWord* blk_end);
ysr@777 399
ysr@777 400 inline void freed(HeapWord* blk, size_t size);
ysr@777 401
ysr@777 402 virtual HeapWord* block_start_unsafe(const void* addr);
ysr@777 403 virtual HeapWord* block_start_unsafe_const(const void* addr) const;
ysr@777 404
ysr@777 405 // Requires "addr" to be the start of a card and returns the
ysr@777 406 // start of the block that contains the given address.
ysr@777 407 HeapWord* block_start_careful(const void* addr) const;
ysr@777 408
ysr@777 409 // If true, initialize array slots with no allocated blocks to zero.
ysr@777 410 // Otherwise, make them point back to the front.
ysr@777 411 bool init_to_zero() { return _init_to_zero; }
ysr@777 412
ysr@777 413 // Verification & debugging - ensure that the offset table reflects the fact
ysr@777 414 // that the block [blk_start, blk_end) or [blk, blk + size) is a
ysr@777 415 // single block of storage. NOTE: can;t const this because of
ysr@777 416 // call to non-const do_block_internal() below.
ysr@777 417 inline void verify_single_block(HeapWord* blk_start, HeapWord* blk_end) {
ysr@777 418 if (VerifyBlockOffsetArray) {
ysr@777 419 do_block_internal(blk_start, blk_end, Action_check);
ysr@777 420 }
ysr@777 421 }
ysr@777 422
ysr@777 423 inline void verify_single_block(HeapWord* blk, size_t size) {
ysr@777 424 verify_single_block(blk, blk + size);
ysr@777 425 }
ysr@777 426
tonyp@2453 427 // Used by region verification. Checks that the contents of the
tonyp@2453 428 // BOT reflect that there's a single object that spans the address
tonyp@2453 429 // range [obj_start, obj_start + word_size); returns true if this is
tonyp@2453 430 // the case, returns false if it's not.
tonyp@2453 431 bool verify_for_object(HeapWord* obj_start, size_t word_size) const;
tonyp@2453 432
ysr@777 433 // Verify that the given block is before _unallocated_block
ysr@777 434 inline void verify_not_unallocated(HeapWord* blk_start,
ysr@777 435 HeapWord* blk_end) const {
ysr@777 436 if (BlockOffsetArrayUseUnallocatedBlock) {
ysr@777 437 assert(blk_start < blk_end, "Block inconsistency?");
ysr@777 438 assert(blk_end <= _unallocated_block, "_unallocated_block problem");
ysr@777 439 }
ysr@777 440 }
ysr@777 441
ysr@777 442 inline void verify_not_unallocated(HeapWord* blk, size_t size) const {
ysr@777 443 verify_not_unallocated(blk, blk + size);
ysr@777 444 }
ysr@777 445
ysr@777 446 void check_all_cards(size_t left_card, size_t right_card) const;
tonyp@2241 447
tonyp@2453 448 virtual void print_on(outputStream* out) PRODUCT_RETURN;
ysr@777 449 };
ysr@777 450
ysr@777 451 // A subtype of BlockOffsetArray that takes advantage of the fact
ysr@777 452 // that its underlying space is a ContiguousSpace, so that its "active"
ysr@777 453 // region can be more efficiently tracked (than for a non-contiguous space).
ysr@777 454 class G1BlockOffsetArrayContigSpace: public G1BlockOffsetArray {
ysr@777 455 friend class VMStructs;
ysr@777 456
ysr@777 457 // allocation boundary at which offset array must be updated
ysr@777 458 HeapWord* _next_offset_threshold;
ysr@777 459 size_t _next_offset_index; // index corresponding to that boundary
ysr@777 460
ysr@777 461 // Work function to be called when allocation start crosses the next
ysr@777 462 // threshold in the contig space.
ysr@777 463 void alloc_block_work1(HeapWord* blk_start, HeapWord* blk_end) {
ysr@777 464 alloc_block_work2(&_next_offset_threshold, &_next_offset_index,
ysr@777 465 blk_start, blk_end);
ysr@777 466 }
ysr@777 467
ysr@777 468
ysr@777 469 public:
ysr@777 470 G1BlockOffsetArrayContigSpace(G1BlockOffsetSharedArray* array, MemRegion mr);
ysr@777 471
ysr@777 472 // Initialize the threshold to reflect the first boundary after the
ysr@777 473 // bottom of the covered region.
ysr@777 474 HeapWord* initialize_threshold();
ysr@777 475
ysr@777 476 // Zero out the entry for _bottom (offset will be zero).
ysr@777 477 void zero_bottom_entry();
ysr@777 478
ysr@777 479 // Return the next threshold, the point at which the table should be
ysr@777 480 // updated.
ysr@777 481 HeapWord* threshold() const { return _next_offset_threshold; }
ysr@777 482
ysr@777 483 // These must be guaranteed to work properly (i.e., do nothing)
ysr@777 484 // when "blk_start" ("blk" for second version) is "NULL". In this
ysr@777 485 // implementation, that's true because NULL is represented as 0, and thus
ysr@777 486 // never exceeds the "_next_offset_threshold".
ysr@777 487 void alloc_block(HeapWord* blk_start, HeapWord* blk_end) {
ysr@777 488 if (blk_end > _next_offset_threshold)
ysr@777 489 alloc_block_work1(blk_start, blk_end);
ysr@777 490 }
ysr@777 491 void alloc_block(HeapWord* blk, size_t size) {
ysr@777 492 alloc_block(blk, blk+size);
ysr@777 493 }
ysr@777 494
ysr@777 495 HeapWord* block_start_unsafe(const void* addr);
ysr@777 496 HeapWord* block_start_unsafe_const(const void* addr) const;
tonyp@2241 497
tonyp@2453 498 void set_for_starts_humongous(HeapWord* new_top);
tonyp@2453 499
tonyp@2453 500 virtual void print_on(outputStream* out) PRODUCT_RETURN;
ysr@777 501 };
stefank@2314 502
stefank@2314 503 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1BLOCKOFFSETTABLE_HPP

mercurial