src/share/vm/gc_implementation/g1/g1BlockOffsetTable.hpp

Tue, 21 Aug 2012 14:10:39 -0700

author
johnc
date
Tue, 21 Aug 2012 14:10:39 -0700
changeset 3998
7383557659bd
parent 3997
f99a36499b8c
child 4300
2fc0334f613a
permissions
-rw-r--r--

7185699: G1: Prediction model discrepancies
Summary: Correct the result value of G1CollectedHeap::pending_card_num(). Change the code that calculates the GC efficiency of a non-young heap region to use historical data from mixed GCs and the actual number of live bytes when predicting how long it would take to collect the region. Changes were also reviewed by Thomas Schatzl.
Reviewed-by: azeemj, brutisso

ysr@777 1 /*
johnc@3997 2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
ysr@777 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
ysr@777 4 *
ysr@777 5 * This code is free software; you can redistribute it and/or modify it
ysr@777 6 * under the terms of the GNU General Public License version 2 only, as
ysr@777 7 * published by the Free Software Foundation.
ysr@777 8 *
ysr@777 9 * This code is distributed in the hope that it will be useful, but WITHOUT
ysr@777 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
ysr@777 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
ysr@777 12 * version 2 for more details (a copy is included in the LICENSE file that
ysr@777 13 * accompanied this code).
ysr@777 14 *
ysr@777 15 * You should have received a copy of the GNU General Public License version
ysr@777 16 * 2 along with this work; if not, write to the Free Software Foundation,
ysr@777 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
ysr@777 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
ysr@777 22 *
ysr@777 23 */
ysr@777 24
stefank@2314 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1BLOCKOFFSETTABLE_HPP
stefank@2314 26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1BLOCKOFFSETTABLE_HPP
stefank@2314 27
stefank@2314 28 #include "memory/memRegion.hpp"
stefank@2314 29 #include "runtime/virtualspace.hpp"
stefank@2314 30 #include "utilities/globalDefinitions.hpp"
stefank@2314 31
ysr@777 32 // The CollectedHeap type requires subtypes to implement a method
ysr@777 33 // "block_start". For some subtypes, notably generational
ysr@777 34 // systems using card-table-based write barriers, the efficiency of this
ysr@777 35 // operation may be important. Implementations of the "BlockOffsetArray"
ysr@777 36 // class may be useful in providing such efficient implementations.
ysr@777 37 //
ysr@777 38 // While generally mirroring the structure of the BOT for GenCollectedHeap,
ysr@777 39 // the following types are tailored more towards G1's uses; these should,
ysr@777 40 // however, be merged back into a common BOT to avoid code duplication
ysr@777 41 // and reduce maintenance overhead.
ysr@777 42 //
ysr@777 43 // G1BlockOffsetTable (abstract)
ysr@777 44 // -- G1BlockOffsetArray (uses G1BlockOffsetSharedArray)
ysr@777 45 // -- G1BlockOffsetArrayContigSpace
ysr@777 46 //
ysr@777 47 // A main impediment to the consolidation of this code might be the
ysr@777 48 // effect of making some of the block_start*() calls non-const as
ysr@777 49 // below. Whether that might adversely affect performance optimizations
ysr@777 50 // that compilers might normally perform in the case of non-G1
ysr@777 51 // collectors needs to be carefully investigated prior to any such
ysr@777 52 // consolidation.
ysr@777 53
ysr@777 54 // Forward declarations
ysr@777 55 class ContiguousSpace;
ysr@777 56 class G1BlockOffsetSharedArray;
ysr@777 57
ysr@777 58 class G1BlockOffsetTable VALUE_OBJ_CLASS_SPEC {
ysr@777 59 friend class VMStructs;
ysr@777 60 protected:
ysr@777 61 // These members describe the region covered by the table.
ysr@777 62
ysr@777 63 // The space this table is covering.
ysr@777 64 HeapWord* _bottom; // == reserved.start
ysr@777 65 HeapWord* _end; // End of currently allocated region.
ysr@777 66
ysr@777 67 public:
ysr@777 68 // Initialize the table to cover the given space.
ysr@777 69 // The contents of the initial table are undefined.
ysr@777 70 G1BlockOffsetTable(HeapWord* bottom, HeapWord* end) :
ysr@777 71 _bottom(bottom), _end(end)
ysr@777 72 {
ysr@777 73 assert(_bottom <= _end, "arguments out of order");
ysr@777 74 }
ysr@777 75
ysr@777 76 // Note that the committed size of the covered space may have changed,
ysr@777 77 // so the table size might also wish to change.
ysr@777 78 virtual void resize(size_t new_word_size) = 0;
ysr@777 79
ysr@777 80 virtual void set_bottom(HeapWord* new_bottom) {
ysr@777 81 assert(new_bottom <= _end, "new_bottom > _end");
ysr@777 82 _bottom = new_bottom;
ysr@777 83 resize(pointer_delta(_end, _bottom));
ysr@777 84 }
ysr@777 85
ysr@777 86 // Requires "addr" to be contained by a block, and returns the address of
ysr@777 87 // the start of that block. (May have side effects, namely updating of
ysr@777 88 // shared array entries that "point" too far backwards. This can occur,
ysr@777 89 // for example, when LAB allocation is used in a space covered by the
ysr@777 90 // table.)
ysr@777 91 virtual HeapWord* block_start_unsafe(const void* addr) = 0;
ysr@777 92 // Same as above, but does not have any of the possible side effects
ysr@777 93 // discussed above.
ysr@777 94 virtual HeapWord* block_start_unsafe_const(const void* addr) const = 0;
ysr@777 95
ysr@777 96 // Returns the address of the start of the block containing "addr", or
ysr@777 97 // else "null" if it is covered by no block. (May have side effects,
ysr@777 98 // namely updating of shared array entries that "point" too far
ysr@777 99 // backwards. This can occur, for example, when lab allocation is used
ysr@777 100 // in a space covered by the table.)
ysr@777 101 inline HeapWord* block_start(const void* addr);
ysr@777 102 // Same as above, but does not have any of the possible side effects
ysr@777 103 // discussed above.
ysr@777 104 inline HeapWord* block_start_const(const void* addr) const;
ysr@777 105 };
ysr@777 106
ysr@777 107 // This implementation of "G1BlockOffsetTable" divides the covered region
ysr@777 108 // into "N"-word subregions (where "N" = 2^"LogN". An array with an entry
ysr@777 109 // for each such subregion indicates how far back one must go to find the
ysr@777 110 // start of the chunk that includes the first word of the subregion.
ysr@777 111 //
ysr@777 112 // Each BlockOffsetArray is owned by a Space. However, the actual array
ysr@777 113 // may be shared by several BlockOffsetArrays; this is useful
ysr@777 114 // when a single resizable area (such as a generation) is divided up into
ysr@777 115 // several spaces in which contiguous allocation takes place,
ysr@777 116 // such as, for example, in G1 or in the train generation.)
ysr@777 117
ysr@777 118 // Here is the shared array type.
ysr@777 119
zgu@3900 120 class G1BlockOffsetSharedArray: public CHeapObj<mtGC> {
ysr@777 121 friend class G1BlockOffsetArray;
ysr@777 122 friend class G1BlockOffsetArrayContigSpace;
ysr@777 123 friend class VMStructs;
ysr@777 124
ysr@777 125 private:
ysr@777 126 // The reserved region covered by the shared array.
ysr@777 127 MemRegion _reserved;
ysr@777 128
ysr@777 129 // End of the current committed region.
ysr@777 130 HeapWord* _end;
ysr@777 131
ysr@777 132 // Array for keeping offsets for retrieving object start fast given an
ysr@777 133 // address.
ysr@777 134 VirtualSpace _vs;
ysr@777 135 u_char* _offset_array; // byte array keeping backwards offsets
ysr@777 136
ysr@777 137 // Bounds checking accessors:
ysr@777 138 // For performance these have to devolve to array accesses in product builds.
ysr@777 139 u_char offset_array(size_t index) const {
ysr@777 140 assert(index < _vs.committed_size(), "index out of range");
ysr@777 141 return _offset_array[index];
ysr@777 142 }
ysr@777 143
ysr@777 144 void set_offset_array(size_t index, u_char offset) {
ysr@777 145 assert(index < _vs.committed_size(), "index out of range");
ysr@777 146 assert(offset <= N_words, "offset too large");
ysr@777 147 _offset_array[index] = offset;
ysr@777 148 }
ysr@777 149
ysr@777 150 void set_offset_array(size_t index, HeapWord* high, HeapWord* low) {
ysr@777 151 assert(index < _vs.committed_size(), "index out of range");
ysr@777 152 assert(high >= low, "addresses out of order");
ysr@777 153 assert(pointer_delta(high, low) <= N_words, "offset too large");
ysr@777 154 _offset_array[index] = (u_char) pointer_delta(high, low);
ysr@777 155 }
ysr@777 156
ysr@777 157 void set_offset_array(HeapWord* left, HeapWord* right, u_char offset) {
ysr@777 158 assert(index_for(right - 1) < _vs.committed_size(),
ysr@777 159 "right address out of range");
ysr@777 160 assert(left < right, "Heap addresses out of order");
ysr@777 161 size_t num_cards = pointer_delta(right, left) >> LogN_words;
johnc@3997 162 if (UseMemSetInBOT) {
johnc@3997 163 memset(&_offset_array[index_for(left)], offset, num_cards);
johnc@3997 164 } else {
johnc@3997 165 size_t i = index_for(left);
johnc@3997 166 const size_t end = i + num_cards;
johnc@3997 167 for (; i < end; i++) {
johnc@3997 168 _offset_array[i] = offset;
johnc@3997 169 }
johnc@3997 170 }
ysr@777 171 }
ysr@777 172
ysr@777 173 void set_offset_array(size_t left, size_t right, u_char offset) {
ysr@777 174 assert(right < _vs.committed_size(), "right address out of range");
johnc@3997 175 assert(left <= right, "indexes out of order");
ysr@777 176 size_t num_cards = right - left + 1;
johnc@3997 177 if (UseMemSetInBOT) {
johnc@3997 178 memset(&_offset_array[left], offset, num_cards);
johnc@3997 179 } else {
johnc@3997 180 size_t i = left;
johnc@3997 181 const size_t end = i + num_cards;
johnc@3997 182 for (; i < end; i++) {
johnc@3997 183 _offset_array[i] = offset;
johnc@3997 184 }
johnc@3997 185 }
ysr@777 186 }
ysr@777 187
ysr@777 188 void check_offset_array(size_t index, HeapWord* high, HeapWord* low) const {
ysr@777 189 assert(index < _vs.committed_size(), "index out of range");
ysr@777 190 assert(high >= low, "addresses out of order");
ysr@777 191 assert(pointer_delta(high, low) <= N_words, "offset too large");
ysr@777 192 assert(_offset_array[index] == pointer_delta(high, low),
ysr@777 193 "Wrong offset");
ysr@777 194 }
ysr@777 195
ysr@777 196 bool is_card_boundary(HeapWord* p) const;
ysr@777 197
ysr@777 198 // Return the number of slots needed for an offset array
ysr@777 199 // that covers mem_region_words words.
ysr@777 200 // We always add an extra slot because if an object
ysr@777 201 // ends on a card boundary we put a 0 in the next
ysr@777 202 // offset array slot, so we want that slot always
ysr@777 203 // to be reserved.
ysr@777 204
ysr@777 205 size_t compute_size(size_t mem_region_words) {
ysr@777 206 size_t number_of_slots = (mem_region_words / N_words) + 1;
ysr@777 207 return ReservedSpace::page_align_size_up(number_of_slots);
ysr@777 208 }
ysr@777 209
ysr@777 210 public:
ysr@777 211 enum SomePublicConstants {
ysr@777 212 LogN = 9,
ysr@777 213 LogN_words = LogN - LogHeapWordSize,
ysr@777 214 N_bytes = 1 << LogN,
ysr@777 215 N_words = 1 << LogN_words
ysr@777 216 };
ysr@777 217
ysr@777 218 // Initialize the table to cover from "base" to (at least)
ysr@777 219 // "base + init_word_size". In the future, the table may be expanded
ysr@777 220 // (see "resize" below) up to the size of "_reserved" (which must be at
ysr@777 221 // least "init_word_size".) The contents of the initial table are
ysr@777 222 // undefined; it is the responsibility of the constituent
ysr@777 223 // G1BlockOffsetTable(s) to initialize cards.
ysr@777 224 G1BlockOffsetSharedArray(MemRegion reserved, size_t init_word_size);
ysr@777 225
ysr@777 226 // Notes a change in the committed size of the region covered by the
ysr@777 227 // table. The "new_word_size" may not be larger than the size of the
ysr@777 228 // reserved region this table covers.
ysr@777 229 void resize(size_t new_word_size);
ysr@777 230
ysr@777 231 void set_bottom(HeapWord* new_bottom);
ysr@777 232
ysr@777 233 // Updates all the BlockOffsetArray's sharing this shared array to
ysr@777 234 // reflect the current "top"'s of their spaces.
ysr@777 235 void update_offset_arrays();
ysr@777 236
ysr@777 237 // Return the appropriate index into "_offset_array" for "p".
ysr@777 238 inline size_t index_for(const void* p) const;
ysr@777 239
ysr@777 240 // Return the address indicating the start of the region corresponding to
ysr@777 241 // "index" in "_offset_array".
ysr@777 242 inline HeapWord* address_for_index(size_t index) const;
ysr@777 243 };
ysr@777 244
ysr@777 245 // And here is the G1BlockOffsetTable subtype that uses the array.
ysr@777 246
ysr@777 247 class G1BlockOffsetArray: public G1BlockOffsetTable {
ysr@777 248 friend class G1BlockOffsetSharedArray;
ysr@777 249 friend class G1BlockOffsetArrayContigSpace;
ysr@777 250 friend class VMStructs;
ysr@777 251 private:
ysr@777 252 enum SomePrivateConstants {
ysr@777 253 N_words = G1BlockOffsetSharedArray::N_words,
ysr@777 254 LogN = G1BlockOffsetSharedArray::LogN
ysr@777 255 };
ysr@777 256
ysr@777 257 // The following enums are used by do_block_helper
ysr@777 258 enum Action {
ysr@777 259 Action_single, // BOT records a single block (see single_block())
ysr@777 260 Action_mark, // BOT marks the start of a block (see mark_block())
ysr@777 261 Action_check // Check that BOT records block correctly
ysr@777 262 // (see verify_single_block()).
ysr@777 263 };
ysr@777 264
ysr@777 265 // This is the array, which can be shared by several BlockOffsetArray's
ysr@777 266 // servicing different
ysr@777 267 G1BlockOffsetSharedArray* _array;
ysr@777 268
ysr@777 269 // The space that owns this subregion.
ysr@777 270 Space* _sp;
ysr@777 271
ysr@777 272 // If "_sp" is a contiguous space, the field below is the view of "_sp"
ysr@777 273 // as a contiguous space, else NULL.
ysr@777 274 ContiguousSpace* _csp;
ysr@777 275
ysr@777 276 // If true, array entries are initialized to 0; otherwise, they are
ysr@777 277 // initialized to point backwards to the beginning of the covered region.
ysr@777 278 bool _init_to_zero;
ysr@777 279
ysr@777 280 // The portion [_unallocated_block, _sp.end()) of the space that
ysr@777 281 // is a single block known not to contain any objects.
ysr@777 282 // NOTE: See BlockOffsetArrayUseUnallocatedBlock flag.
ysr@777 283 HeapWord* _unallocated_block;
ysr@777 284
ysr@777 285 // Sets the entries
ysr@777 286 // corresponding to the cards starting at "start" and ending at "end"
ysr@777 287 // to point back to the card before "start": the interval [start, end)
ysr@777 288 // is right-open.
ysr@777 289 void set_remainder_to_point_to_start(HeapWord* start, HeapWord* end);
ysr@777 290 // Same as above, except that the args here are a card _index_ interval
ysr@777 291 // that is closed: [start_index, end_index]
ysr@777 292 void set_remainder_to_point_to_start_incl(size_t start, size_t end);
ysr@777 293
ysr@777 294 // A helper function for BOT adjustment/verification work
ysr@777 295 void do_block_internal(HeapWord* blk_start, HeapWord* blk_end, Action action);
ysr@777 296
ysr@777 297 protected:
ysr@777 298
ysr@777 299 ContiguousSpace* csp() const { return _csp; }
ysr@777 300
ysr@777 301 // Returns the address of a block whose start is at most "addr".
ysr@777 302 // If "has_max_index" is true, "assumes "max_index" is the last valid one
ysr@777 303 // in the array.
ysr@777 304 inline HeapWord* block_at_or_preceding(const void* addr,
ysr@777 305 bool has_max_index,
ysr@777 306 size_t max_index) const;
ysr@777 307
ysr@777 308 // "q" is a block boundary that is <= "addr"; "n" is the address of the
ysr@777 309 // next block (or the end of the space.) Return the address of the
ysr@777 310 // beginning of the block that contains "addr". Does so without side
ysr@777 311 // effects (see, e.g., spec of block_start.)
ysr@777 312 inline HeapWord*
ysr@777 313 forward_to_block_containing_addr_const(HeapWord* q, HeapWord* n,
ysr@777 314 const void* addr) const;
ysr@777 315
ysr@777 316 // "q" is a block boundary that is <= "addr"; return the address of the
ysr@777 317 // beginning of the block that contains "addr". May have side effects
ysr@777 318 // on "this", by updating imprecise entries.
ysr@777 319 inline HeapWord* forward_to_block_containing_addr(HeapWord* q,
ysr@777 320 const void* addr);
ysr@777 321
ysr@777 322 // "q" is a block boundary that is <= "addr"; "n" is the address of the
ysr@777 323 // next block (or the end of the space.) Return the address of the
ysr@777 324 // beginning of the block that contains "addr". May have side effects
ysr@777 325 // on "this", by updating imprecise entries.
ysr@777 326 HeapWord* forward_to_block_containing_addr_slow(HeapWord* q,
ysr@777 327 HeapWord* n,
ysr@777 328 const void* addr);
ysr@777 329
ysr@777 330 // Requires that "*threshold_" be the first array entry boundary at or
ysr@777 331 // above "blk_start", and that "*index_" be the corresponding array
ysr@777 332 // index. If the block starts at or crosses "*threshold_", records
ysr@777 333 // "blk_start" as the appropriate block start for the array index
ysr@777 334 // starting at "*threshold_", and for any other indices crossed by the
ysr@777 335 // block. Updates "*threshold_" and "*index_" to correspond to the first
ysr@777 336 // index after the block end.
ysr@777 337 void alloc_block_work2(HeapWord** threshold_, size_t* index_,
ysr@777 338 HeapWord* blk_start, HeapWord* blk_end);
ysr@777 339
ysr@777 340 public:
ysr@777 341 // The space may not have it's bottom and top set yet, which is why the
ysr@777 342 // region is passed as a parameter. If "init_to_zero" is true, the
ysr@777 343 // elements of the array are initialized to zero. Otherwise, they are
ysr@777 344 // initialized to point backwards to the beginning.
ysr@777 345 G1BlockOffsetArray(G1BlockOffsetSharedArray* array, MemRegion mr,
ysr@777 346 bool init_to_zero);
ysr@777 347
ysr@777 348 // Note: this ought to be part of the constructor, but that would require
ysr@777 349 // "this" to be passed as a parameter to a member constructor for
ysr@777 350 // the containing concrete subtype of Space.
ysr@777 351 // This would be legal C++, but MS VC++ doesn't allow it.
ysr@777 352 void set_space(Space* sp);
ysr@777 353
ysr@777 354 // Resets the covered region to the given "mr".
ysr@777 355 void set_region(MemRegion mr);
ysr@777 356
ysr@777 357 // Resets the covered region to one with the same _bottom as before but
ysr@777 358 // the "new_word_size".
ysr@777 359 void resize(size_t new_word_size);
ysr@777 360
ysr@777 361 // These must be guaranteed to work properly (i.e., do nothing)
ysr@777 362 // when "blk_start" ("blk" for second version) is "NULL".
ysr@777 363 virtual void alloc_block(HeapWord* blk_start, HeapWord* blk_end);
ysr@777 364 virtual void alloc_block(HeapWord* blk, size_t size) {
ysr@777 365 alloc_block(blk, blk + size);
ysr@777 366 }
ysr@777 367
ysr@777 368 // The following methods are useful and optimized for a
ysr@777 369 // general, non-contiguous space.
ysr@777 370
ysr@777 371 // Given a block [blk_start, blk_start + full_blk_size), and
ysr@777 372 // a left_blk_size < full_blk_size, adjust the BOT to show two
ysr@777 373 // blocks [blk_start, blk_start + left_blk_size) and
ysr@777 374 // [blk_start + left_blk_size, blk_start + full_blk_size).
ysr@777 375 // It is assumed (and verified in the non-product VM) that the
ysr@777 376 // BOT was correct for the original block.
ysr@777 377 void split_block(HeapWord* blk_start, size_t full_blk_size,
ysr@777 378 size_t left_blk_size);
ysr@777 379
ysr@777 380 // Adjust the BOT to show that it has a single block in the
ysr@777 381 // range [blk_start, blk_start + size). All necessary BOT
ysr@777 382 // cards are adjusted, but _unallocated_block isn't.
ysr@777 383 void single_block(HeapWord* blk_start, HeapWord* blk_end);
ysr@777 384 void single_block(HeapWord* blk, size_t size) {
ysr@777 385 single_block(blk, blk + size);
ysr@777 386 }
ysr@777 387
ysr@777 388 // Adjust BOT to show that it has a block in the range
ysr@777 389 // [blk_start, blk_start + size). Only the first card
ysr@777 390 // of BOT is touched. It is assumed (and verified in the
ysr@777 391 // non-product VM) that the remaining cards of the block
ysr@777 392 // are correct.
ysr@777 393 void mark_block(HeapWord* blk_start, HeapWord* blk_end);
ysr@777 394 void mark_block(HeapWord* blk, size_t size) {
ysr@777 395 mark_block(blk, blk + size);
ysr@777 396 }
ysr@777 397
ysr@777 398 // Adjust _unallocated_block to indicate that a particular
ysr@777 399 // block has been newly allocated or freed. It is assumed (and
ysr@777 400 // verified in the non-product VM) that the BOT is correct for
ysr@777 401 // the given block.
ysr@777 402 inline void allocated(HeapWord* blk_start, HeapWord* blk_end) {
ysr@777 403 // Verify that the BOT shows [blk, blk + blk_size) to be one block.
ysr@777 404 verify_single_block(blk_start, blk_end);
ysr@777 405 if (BlockOffsetArrayUseUnallocatedBlock) {
ysr@777 406 _unallocated_block = MAX2(_unallocated_block, blk_end);
ysr@777 407 }
ysr@777 408 }
ysr@777 409
ysr@777 410 inline void allocated(HeapWord* blk, size_t size) {
ysr@777 411 allocated(blk, blk + size);
ysr@777 412 }
ysr@777 413
ysr@777 414 inline void freed(HeapWord* blk_start, HeapWord* blk_end);
ysr@777 415
ysr@777 416 inline void freed(HeapWord* blk, size_t size);
ysr@777 417
ysr@777 418 virtual HeapWord* block_start_unsafe(const void* addr);
ysr@777 419 virtual HeapWord* block_start_unsafe_const(const void* addr) const;
ysr@777 420
ysr@777 421 // Requires "addr" to be the start of a card and returns the
ysr@777 422 // start of the block that contains the given address.
ysr@777 423 HeapWord* block_start_careful(const void* addr) const;
ysr@777 424
ysr@777 425 // If true, initialize array slots with no allocated blocks to zero.
ysr@777 426 // Otherwise, make them point back to the front.
ysr@777 427 bool init_to_zero() { return _init_to_zero; }
ysr@777 428
ysr@777 429 // Verification & debugging - ensure that the offset table reflects the fact
ysr@777 430 // that the block [blk_start, blk_end) or [blk, blk + size) is a
ysr@777 431 // single block of storage. NOTE: can;t const this because of
ysr@777 432 // call to non-const do_block_internal() below.
ysr@777 433 inline void verify_single_block(HeapWord* blk_start, HeapWord* blk_end) {
ysr@777 434 if (VerifyBlockOffsetArray) {
ysr@777 435 do_block_internal(blk_start, blk_end, Action_check);
ysr@777 436 }
ysr@777 437 }
ysr@777 438
ysr@777 439 inline void verify_single_block(HeapWord* blk, size_t size) {
ysr@777 440 verify_single_block(blk, blk + size);
ysr@777 441 }
ysr@777 442
tonyp@2453 443 // Used by region verification. Checks that the contents of the
tonyp@2453 444 // BOT reflect that there's a single object that spans the address
tonyp@2453 445 // range [obj_start, obj_start + word_size); returns true if this is
tonyp@2453 446 // the case, returns false if it's not.
tonyp@2453 447 bool verify_for_object(HeapWord* obj_start, size_t word_size) const;
tonyp@2453 448
ysr@777 449 // Verify that the given block is before _unallocated_block
ysr@777 450 inline void verify_not_unallocated(HeapWord* blk_start,
ysr@777 451 HeapWord* blk_end) const {
ysr@777 452 if (BlockOffsetArrayUseUnallocatedBlock) {
ysr@777 453 assert(blk_start < blk_end, "Block inconsistency?");
ysr@777 454 assert(blk_end <= _unallocated_block, "_unallocated_block problem");
ysr@777 455 }
ysr@777 456 }
ysr@777 457
ysr@777 458 inline void verify_not_unallocated(HeapWord* blk, size_t size) const {
ysr@777 459 verify_not_unallocated(blk, blk + size);
ysr@777 460 }
ysr@777 461
ysr@777 462 void check_all_cards(size_t left_card, size_t right_card) const;
tonyp@2241 463
tonyp@2453 464 virtual void print_on(outputStream* out) PRODUCT_RETURN;
ysr@777 465 };
ysr@777 466
ysr@777 467 // A subtype of BlockOffsetArray that takes advantage of the fact
ysr@777 468 // that its underlying space is a ContiguousSpace, so that its "active"
ysr@777 469 // region can be more efficiently tracked (than for a non-contiguous space).
ysr@777 470 class G1BlockOffsetArrayContigSpace: public G1BlockOffsetArray {
ysr@777 471 friend class VMStructs;
ysr@777 472
ysr@777 473 // allocation boundary at which offset array must be updated
ysr@777 474 HeapWord* _next_offset_threshold;
ysr@777 475 size_t _next_offset_index; // index corresponding to that boundary
ysr@777 476
ysr@777 477 // Work function to be called when allocation start crosses the next
ysr@777 478 // threshold in the contig space.
ysr@777 479 void alloc_block_work1(HeapWord* blk_start, HeapWord* blk_end) {
ysr@777 480 alloc_block_work2(&_next_offset_threshold, &_next_offset_index,
ysr@777 481 blk_start, blk_end);
ysr@777 482 }
ysr@777 483
ysr@777 484
ysr@777 485 public:
ysr@777 486 G1BlockOffsetArrayContigSpace(G1BlockOffsetSharedArray* array, MemRegion mr);
ysr@777 487
ysr@777 488 // Initialize the threshold to reflect the first boundary after the
ysr@777 489 // bottom of the covered region.
ysr@777 490 HeapWord* initialize_threshold();
ysr@777 491
ysr@777 492 // Zero out the entry for _bottom (offset will be zero).
ysr@777 493 void zero_bottom_entry();
ysr@777 494
ysr@777 495 // Return the next threshold, the point at which the table should be
ysr@777 496 // updated.
ysr@777 497 HeapWord* threshold() const { return _next_offset_threshold; }
ysr@777 498
ysr@777 499 // These must be guaranteed to work properly (i.e., do nothing)
ysr@777 500 // when "blk_start" ("blk" for second version) is "NULL". In this
ysr@777 501 // implementation, that's true because NULL is represented as 0, and thus
ysr@777 502 // never exceeds the "_next_offset_threshold".
ysr@777 503 void alloc_block(HeapWord* blk_start, HeapWord* blk_end) {
ysr@777 504 if (blk_end > _next_offset_threshold)
ysr@777 505 alloc_block_work1(blk_start, blk_end);
ysr@777 506 }
ysr@777 507 void alloc_block(HeapWord* blk, size_t size) {
ysr@777 508 alloc_block(blk, blk+size);
ysr@777 509 }
ysr@777 510
ysr@777 511 HeapWord* block_start_unsafe(const void* addr);
ysr@777 512 HeapWord* block_start_unsafe_const(const void* addr) const;
tonyp@2241 513
tonyp@2453 514 void set_for_starts_humongous(HeapWord* new_top);
tonyp@2453 515
tonyp@2453 516 virtual void print_on(outputStream* out) PRODUCT_RETURN;
ysr@777 517 };
stefank@2314 518
stefank@2314 519 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1BLOCKOFFSETTABLE_HPP

mercurial