src/share/vm/gc_implementation/g1/g1BlockOffsetTable.cpp

Tue, 10 Jan 2012 18:58:13 -0500

author
tonyp
date
Tue, 10 Jan 2012 18:58:13 -0500
changeset 3416
2ace1c4ee8da
parent 2453
2250ee17e258
child 3900
d2a62e0f25eb
permissions
-rw-r--r--

6888336: G1: avoid explicitly marking and pushing objects in survivor spaces
Summary: This change simplifies the interaction between GC and concurrent marking. By disabling survivor spaces during the initial-mark pause we don't need to propagate marks of objects we copy during each GC (since we never need to copy an explicitly marked object).
Reviewed-by: johnc, brutisso

ysr@777 1 /*
tonyp@2453 2 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
ysr@777 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
ysr@777 4 *
ysr@777 5 * This code is free software; you can redistribute it and/or modify it
ysr@777 6 * under the terms of the GNU General Public License version 2 only, as
ysr@777 7 * published by the Free Software Foundation.
ysr@777 8 *
ysr@777 9 * This code is distributed in the hope that it will be useful, but WITHOUT
ysr@777 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
ysr@777 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
ysr@777 12 * version 2 for more details (a copy is included in the LICENSE file that
ysr@777 13 * accompanied this code).
ysr@777 14 *
ysr@777 15 * You should have received a copy of the GNU General Public License version
ysr@777 16 * 2 along with this work; if not, write to the Free Software Foundation,
ysr@777 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
ysr@777 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
ysr@777 22 *
ysr@777 23 */
ysr@777 24
stefank@2314 25 #include "precompiled.hpp"
stefank@2314 26 #include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp"
stefank@2314 27 #include "memory/space.hpp"
stefank@2314 28 #include "oops/oop.inline.hpp"
stefank@2314 29 #include "runtime/java.hpp"
ysr@777 30
ysr@777 31 //////////////////////////////////////////////////////////////////////
ysr@777 32 // G1BlockOffsetSharedArray
ysr@777 33 //////////////////////////////////////////////////////////////////////
ysr@777 34
ysr@777 35 G1BlockOffsetSharedArray::G1BlockOffsetSharedArray(MemRegion reserved,
ysr@777 36 size_t init_word_size) :
ysr@777 37 _reserved(reserved), _end(NULL)
ysr@777 38 {
ysr@777 39 size_t size = compute_size(reserved.word_size());
ysr@777 40 ReservedSpace rs(ReservedSpace::allocation_align_size_up(size));
ysr@777 41 if (!rs.is_reserved()) {
ysr@777 42 vm_exit_during_initialization("Could not reserve enough space for heap offset array");
ysr@777 43 }
ysr@777 44 if (!_vs.initialize(rs, 0)) {
ysr@777 45 vm_exit_during_initialization("Could not reserve enough space for heap offset array");
ysr@777 46 }
ysr@777 47 _offset_array = (u_char*)_vs.low_boundary();
ysr@777 48 resize(init_word_size);
ysr@777 49 if (TraceBlockOffsetTable) {
ysr@777 50 gclog_or_tty->print_cr("G1BlockOffsetSharedArray::G1BlockOffsetSharedArray: ");
ysr@777 51 gclog_or_tty->print_cr(" "
ysr@777 52 " rs.base(): " INTPTR_FORMAT
ysr@777 53 " rs.size(): " INTPTR_FORMAT
ysr@777 54 " rs end(): " INTPTR_FORMAT,
ysr@777 55 rs.base(), rs.size(), rs.base() + rs.size());
ysr@777 56 gclog_or_tty->print_cr(" "
ysr@777 57 " _vs.low_boundary(): " INTPTR_FORMAT
ysr@777 58 " _vs.high_boundary(): " INTPTR_FORMAT,
ysr@777 59 _vs.low_boundary(),
ysr@777 60 _vs.high_boundary());
ysr@777 61 }
ysr@777 62 }
ysr@777 63
ysr@777 64 void G1BlockOffsetSharedArray::resize(size_t new_word_size) {
ysr@777 65 assert(new_word_size <= _reserved.word_size(), "Resize larger than reserved");
ysr@777 66 size_t new_size = compute_size(new_word_size);
ysr@777 67 size_t old_size = _vs.committed_size();
ysr@777 68 size_t delta;
ysr@777 69 char* high = _vs.high();
ysr@777 70 _end = _reserved.start() + new_word_size;
ysr@777 71 if (new_size > old_size) {
ysr@777 72 delta = ReservedSpace::page_align_size_up(new_size - old_size);
ysr@777 73 assert(delta > 0, "just checking");
ysr@777 74 if (!_vs.expand_by(delta)) {
ysr@777 75 // Do better than this for Merlin
ysr@777 76 vm_exit_out_of_memory(delta, "offset table expansion");
ysr@777 77 }
ysr@777 78 assert(_vs.high() == high + delta, "invalid expansion");
ysr@777 79 // Initialization of the contents is left to the
ysr@777 80 // G1BlockOffsetArray that uses it.
ysr@777 81 } else {
ysr@777 82 delta = ReservedSpace::page_align_size_down(old_size - new_size);
ysr@777 83 if (delta == 0) return;
ysr@777 84 _vs.shrink_by(delta);
ysr@777 85 assert(_vs.high() == high - delta, "invalid expansion");
ysr@777 86 }
ysr@777 87 }
ysr@777 88
ysr@777 89 bool G1BlockOffsetSharedArray::is_card_boundary(HeapWord* p) const {
ysr@777 90 assert(p >= _reserved.start(), "just checking");
ysr@777 91 size_t delta = pointer_delta(p, _reserved.start());
ysr@777 92 return (delta & right_n_bits(LogN_words)) == (size_t)NoBits;
ysr@777 93 }
ysr@777 94
ysr@777 95
ysr@777 96 //////////////////////////////////////////////////////////////////////
ysr@777 97 // G1BlockOffsetArray
ysr@777 98 //////////////////////////////////////////////////////////////////////
ysr@777 99
ysr@777 100 G1BlockOffsetArray::G1BlockOffsetArray(G1BlockOffsetSharedArray* array,
ysr@777 101 MemRegion mr, bool init_to_zero) :
ysr@777 102 G1BlockOffsetTable(mr.start(), mr.end()),
ysr@777 103 _unallocated_block(_bottom),
ysr@777 104 _array(array), _csp(NULL),
ysr@777 105 _init_to_zero(init_to_zero) {
ysr@777 106 assert(_bottom <= _end, "arguments out of order");
ysr@777 107 if (!_init_to_zero) {
ysr@777 108 // initialize cards to point back to mr.start()
ysr@777 109 set_remainder_to_point_to_start(mr.start() + N_words, mr.end());
ysr@777 110 _array->set_offset_array(0, 0); // set first card to 0
ysr@777 111 }
ysr@777 112 }
ysr@777 113
ysr@777 114 void G1BlockOffsetArray::set_space(Space* sp) {
ysr@777 115 _sp = sp;
ysr@777 116 _csp = sp->toContiguousSpace();
ysr@777 117 }
ysr@777 118
ysr@777 119 // The arguments follow the normal convention of denoting
ysr@777 120 // a right-open interval: [start, end)
ysr@777 121 void
ysr@777 122 G1BlockOffsetArray:: set_remainder_to_point_to_start(HeapWord* start, HeapWord* end) {
ysr@777 123
ysr@777 124 if (start >= end) {
ysr@777 125 // The start address is equal to the end address (or to
ysr@777 126 // the right of the end address) so there are not cards
ysr@777 127 // that need to be updated..
ysr@777 128 return;
ysr@777 129 }
ysr@777 130
ysr@777 131 // Write the backskip value for each region.
ysr@777 132 //
ysr@777 133 // offset
ysr@777 134 // card 2nd 3rd
ysr@777 135 // | +- 1st | |
ysr@777 136 // v v v v
ysr@777 137 // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +-+-+-+-+-+-+-+-+-+-+-
ysr@777 138 // |x|0|0|0|0|0|0|0|1|1|1|1|1|1| ... |1|1|1|1|2|2|2|2|2|2| ...
ysr@777 139 // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +-+-+-+-+-+-+-+-+-+-+-
ysr@777 140 // 11 19 75
ysr@777 141 // 12
ysr@777 142 //
ysr@777 143 // offset card is the card that points to the start of an object
ysr@777 144 // x - offset value of offset card
ysr@777 145 // 1st - start of first logarithmic region
ysr@777 146 // 0 corresponds to logarithmic value N_words + 0 and 2**(3 * 0) = 1
ysr@777 147 // 2nd - start of second logarithmic region
ysr@777 148 // 1 corresponds to logarithmic value N_words + 1 and 2**(3 * 1) = 8
ysr@777 149 // 3rd - start of third logarithmic region
ysr@777 150 // 2 corresponds to logarithmic value N_words + 2 and 2**(3 * 2) = 64
ysr@777 151 //
ysr@777 152 // integer below the block offset entry is an example of
ysr@777 153 // the index of the entry
ysr@777 154 //
ysr@777 155 // Given an address,
ysr@777 156 // Find the index for the address
ysr@777 157 // Find the block offset table entry
ysr@777 158 // Convert the entry to a back slide
ysr@777 159 // (e.g., with today's, offset = 0x81 =>
ysr@777 160 // back slip = 2**(3*(0x81 - N_words)) = 2**3) = 8
ysr@777 161 // Move back N (e.g., 8) entries and repeat with the
ysr@777 162 // value of the new entry
ysr@777 163 //
ysr@777 164 size_t start_card = _array->index_for(start);
ysr@777 165 size_t end_card = _array->index_for(end-1);
ysr@777 166 assert(start ==_array->address_for_index(start_card), "Precondition");
ysr@777 167 assert(end ==_array->address_for_index(end_card)+N_words, "Precondition");
ysr@777 168 set_remainder_to_point_to_start_incl(start_card, end_card); // closed interval
ysr@777 169 }
ysr@777 170
ysr@777 171 // Unlike the normal convention in this code, the argument here denotes
ysr@777 172 // a closed, inclusive interval: [start_card, end_card], cf set_remainder_to_point_to_start()
ysr@777 173 // above.
ysr@777 174 void
ysr@777 175 G1BlockOffsetArray::set_remainder_to_point_to_start_incl(size_t start_card, size_t end_card) {
ysr@777 176 if (start_card > end_card) {
ysr@777 177 return;
ysr@777 178 }
ysr@777 179 assert(start_card > _array->index_for(_bottom), "Cannot be first card");
ysr@777 180 assert(_array->offset_array(start_card-1) <= N_words,
tonyp@2241 181 "Offset card has an unexpected value");
ysr@777 182 size_t start_card_for_region = start_card;
ysr@777 183 u_char offset = max_jubyte;
ysr@777 184 for (int i = 0; i < BlockOffsetArray::N_powers; i++) {
ysr@777 185 // -1 so that the the card with the actual offset is counted. Another -1
ysr@777 186 // so that the reach ends in this region and not at the start
ysr@777 187 // of the next.
ysr@777 188 size_t reach = start_card - 1 + (BlockOffsetArray::power_to_cards_back(i+1) - 1);
ysr@777 189 offset = N_words + i;
ysr@777 190 if (reach >= end_card) {
ysr@777 191 _array->set_offset_array(start_card_for_region, end_card, offset);
ysr@777 192 start_card_for_region = reach + 1;
ysr@777 193 break;
ysr@777 194 }
ysr@777 195 _array->set_offset_array(start_card_for_region, reach, offset);
ysr@777 196 start_card_for_region = reach + 1;
ysr@777 197 }
ysr@777 198 assert(start_card_for_region > end_card, "Sanity check");
ysr@777 199 DEBUG_ONLY(check_all_cards(start_card, end_card);)
ysr@777 200 }
ysr@777 201
ysr@777 202 // The block [blk_start, blk_end) has been allocated;
ysr@777 203 // adjust the block offset table to represent this information;
ysr@777 204 // right-open interval: [blk_start, blk_end)
ysr@777 205 void
ysr@777 206 G1BlockOffsetArray::alloc_block(HeapWord* blk_start, HeapWord* blk_end) {
ysr@777 207 mark_block(blk_start, blk_end);
ysr@777 208 allocated(blk_start, blk_end);
ysr@777 209 }
ysr@777 210
ysr@777 211 // Adjust BOT to show that a previously whole block has been split
ysr@777 212 // into two.
ysr@777 213 void G1BlockOffsetArray::split_block(HeapWord* blk, size_t blk_size,
ysr@777 214 size_t left_blk_size) {
ysr@777 215 // Verify that the BOT shows [blk, blk + blk_size) to be one block.
ysr@777 216 verify_single_block(blk, blk_size);
ysr@777 217 // Update the BOT to indicate that [blk + left_blk_size, blk + blk_size)
ysr@777 218 // is one single block.
ysr@777 219 mark_block(blk + left_blk_size, blk + blk_size);
ysr@777 220 }
ysr@777 221
ysr@777 222
ysr@777 223 // Action_mark - update the BOT for the block [blk_start, blk_end).
ysr@777 224 // Current typical use is for splitting a block.
tonyp@2453 225 // Action_single - update the BOT for an allocation.
ysr@777 226 // Action_verify - BOT verification.
ysr@777 227 void G1BlockOffsetArray::do_block_internal(HeapWord* blk_start,
ysr@777 228 HeapWord* blk_end,
ysr@777 229 Action action) {
ysr@777 230 assert(Universe::heap()->is_in_reserved(blk_start),
ysr@777 231 "reference must be into the heap");
ysr@777 232 assert(Universe::heap()->is_in_reserved(blk_end-1),
ysr@777 233 "limit must be within the heap");
ysr@777 234 // This is optimized to make the test fast, assuming we only rarely
ysr@777 235 // cross boundaries.
ysr@777 236 uintptr_t end_ui = (uintptr_t)(blk_end - 1);
ysr@777 237 uintptr_t start_ui = (uintptr_t)blk_start;
ysr@777 238 // Calculate the last card boundary preceding end of blk
ysr@777 239 intptr_t boundary_before_end = (intptr_t)end_ui;
ysr@777 240 clear_bits(boundary_before_end, right_n_bits(LogN));
ysr@777 241 if (start_ui <= (uintptr_t)boundary_before_end) {
ysr@777 242 // blk starts at or crosses a boundary
ysr@777 243 // Calculate index of card on which blk begins
ysr@777 244 size_t start_index = _array->index_for(blk_start);
ysr@777 245 // Index of card on which blk ends
ysr@777 246 size_t end_index = _array->index_for(blk_end - 1);
ysr@777 247 // Start address of card on which blk begins
ysr@777 248 HeapWord* boundary = _array->address_for_index(start_index);
ysr@777 249 assert(boundary <= blk_start, "blk should start at or after boundary");
ysr@777 250 if (blk_start != boundary) {
ysr@777 251 // blk starts strictly after boundary
ysr@777 252 // adjust card boundary and start_index forward to next card
ysr@777 253 boundary += N_words;
ysr@777 254 start_index++;
ysr@777 255 }
ysr@777 256 assert(start_index <= end_index, "monotonicity of index_for()");
ysr@777 257 assert(boundary <= (HeapWord*)boundary_before_end, "tautology");
ysr@777 258 switch (action) {
ysr@777 259 case Action_mark: {
ysr@777 260 if (init_to_zero()) {
ysr@777 261 _array->set_offset_array(start_index, boundary, blk_start);
ysr@777 262 break;
ysr@777 263 } // Else fall through to the next case
ysr@777 264 }
ysr@777 265 case Action_single: {
ysr@777 266 _array->set_offset_array(start_index, boundary, blk_start);
ysr@777 267 // We have finished marking the "offset card". We need to now
ysr@777 268 // mark the subsequent cards that this blk spans.
ysr@777 269 if (start_index < end_index) {
ysr@777 270 HeapWord* rem_st = _array->address_for_index(start_index) + N_words;
ysr@777 271 HeapWord* rem_end = _array->address_for_index(end_index) + N_words;
ysr@777 272 set_remainder_to_point_to_start(rem_st, rem_end);
ysr@777 273 }
ysr@777 274 break;
ysr@777 275 }
ysr@777 276 case Action_check: {
ysr@777 277 _array->check_offset_array(start_index, boundary, blk_start);
ysr@777 278 // We have finished checking the "offset card". We need to now
ysr@777 279 // check the subsequent cards that this blk spans.
ysr@777 280 check_all_cards(start_index + 1, end_index);
ysr@777 281 break;
ysr@777 282 }
ysr@777 283 default:
ysr@777 284 ShouldNotReachHere();
ysr@777 285 }
ysr@777 286 }
ysr@777 287 }
ysr@777 288
ysr@777 289 // The card-interval [start_card, end_card] is a closed interval; this
ysr@777 290 // is an expensive check -- use with care and only under protection of
ysr@777 291 // suitable flag.
ysr@777 292 void G1BlockOffsetArray::check_all_cards(size_t start_card, size_t end_card) const {
ysr@777 293
ysr@777 294 if (end_card < start_card) {
ysr@777 295 return;
ysr@777 296 }
ysr@777 297 guarantee(_array->offset_array(start_card) == N_words, "Wrong value in second card");
ysr@777 298 for (size_t c = start_card + 1; c <= end_card; c++ /* yeah! */) {
ysr@777 299 u_char entry = _array->offset_array(c);
ysr@777 300 if (c - start_card > BlockOffsetArray::power_to_cards_back(1)) {
ysr@777 301 guarantee(entry > N_words, "Should be in logarithmic region");
ysr@777 302 }
ysr@777 303 size_t backskip = BlockOffsetArray::entry_to_cards_back(entry);
ysr@777 304 size_t landing_card = c - backskip;
ysr@777 305 guarantee(landing_card >= (start_card - 1), "Inv");
ysr@777 306 if (landing_card >= start_card) {
ysr@777 307 guarantee(_array->offset_array(landing_card) <= entry, "monotonicity");
ysr@777 308 } else {
ysr@777 309 guarantee(landing_card == start_card - 1, "Tautology");
ysr@777 310 guarantee(_array->offset_array(landing_card) <= N_words, "Offset value");
ysr@777 311 }
ysr@777 312 }
ysr@777 313 }
ysr@777 314
ysr@777 315 // The range [blk_start, blk_end) represents a single contiguous block
ysr@777 316 // of storage; modify the block offset table to represent this
ysr@777 317 // information; Right-open interval: [blk_start, blk_end)
ysr@777 318 // NOTE: this method does _not_ adjust _unallocated_block.
ysr@777 319 void
ysr@777 320 G1BlockOffsetArray::single_block(HeapWord* blk_start, HeapWord* blk_end) {
ysr@777 321 do_block_internal(blk_start, blk_end, Action_single);
ysr@777 322 }
ysr@777 323
ysr@777 324 // Mark the BOT such that if [blk_start, blk_end) straddles a card
ysr@777 325 // boundary, the card following the first such boundary is marked
ysr@777 326 // with the appropriate offset.
ysr@777 327 // NOTE: this method does _not_ adjust _unallocated_block or
ysr@777 328 // any cards subsequent to the first one.
ysr@777 329 void
ysr@777 330 G1BlockOffsetArray::mark_block(HeapWord* blk_start, HeapWord* blk_end) {
ysr@777 331 do_block_internal(blk_start, blk_end, Action_mark);
ysr@777 332 }
ysr@777 333
ysr@777 334 HeapWord* G1BlockOffsetArray::block_start_unsafe(const void* addr) {
ysr@777 335 assert(_bottom <= addr && addr < _end,
ysr@777 336 "addr must be covered by this Array");
ysr@777 337 // Must read this exactly once because it can be modified by parallel
ysr@777 338 // allocation.
ysr@777 339 HeapWord* ub = _unallocated_block;
ysr@777 340 if (BlockOffsetArrayUseUnallocatedBlock && addr >= ub) {
ysr@777 341 assert(ub < _end, "tautology (see above)");
ysr@777 342 return ub;
ysr@777 343 }
ysr@777 344 // Otherwise, find the block start using the table.
ysr@777 345 HeapWord* q = block_at_or_preceding(addr, false, 0);
ysr@777 346 return forward_to_block_containing_addr(q, addr);
ysr@777 347 }
ysr@777 348
ysr@777 349 // This duplicates a little code from the above: unavoidable.
ysr@777 350 HeapWord*
ysr@777 351 G1BlockOffsetArray::block_start_unsafe_const(const void* addr) const {
ysr@777 352 assert(_bottom <= addr && addr < _end,
ysr@777 353 "addr must be covered by this Array");
ysr@777 354 // Must read this exactly once because it can be modified by parallel
ysr@777 355 // allocation.
ysr@777 356 HeapWord* ub = _unallocated_block;
ysr@777 357 if (BlockOffsetArrayUseUnallocatedBlock && addr >= ub) {
ysr@777 358 assert(ub < _end, "tautology (see above)");
ysr@777 359 return ub;
ysr@777 360 }
ysr@777 361 // Otherwise, find the block start using the table.
ysr@777 362 HeapWord* q = block_at_or_preceding(addr, false, 0);
ysr@777 363 HeapWord* n = q + _sp->block_size(q);
ysr@777 364 return forward_to_block_containing_addr_const(q, n, addr);
ysr@777 365 }
ysr@777 366
ysr@777 367
ysr@777 368 HeapWord*
ysr@777 369 G1BlockOffsetArray::forward_to_block_containing_addr_slow(HeapWord* q,
ysr@777 370 HeapWord* n,
ysr@777 371 const void* addr) {
ysr@777 372 // We're not in the normal case. We need to handle an important subcase
ysr@777 373 // here: LAB allocation. An allocation previously recorded in the
ysr@777 374 // offset table was actually a lab allocation, and was divided into
ysr@777 375 // several objects subsequently. Fix this situation as we answer the
ysr@777 376 // query, by updating entries as we cross them.
iveresov@787 377
iveresov@787 378 // If the fist object's end q is at the card boundary. Start refining
iveresov@787 379 // with the corresponding card (the value of the entry will be basically
iveresov@787 380 // set to 0). If the object crosses the boundary -- start from the next card.
iveresov@787 381 size_t next_index = _array->index_for(n) + !_array->is_card_boundary(n);
ysr@777 382 HeapWord* next_boundary = _array->address_for_index(next_index);
ysr@777 383 if (csp() != NULL) {
ysr@777 384 if (addr >= csp()->top()) return csp()->top();
ysr@777 385 while (next_boundary < addr) {
ysr@777 386 while (n <= next_boundary) {
ysr@777 387 q = n;
ysr@777 388 oop obj = oop(q);
ysr@1280 389 if (obj->klass_or_null() == NULL) return q;
ysr@777 390 n += obj->size();
ysr@777 391 }
ysr@777 392 assert(q <= next_boundary && n > next_boundary, "Consequence of loop");
ysr@777 393 // [q, n) is the block that crosses the boundary.
ysr@777 394 alloc_block_work2(&next_boundary, &next_index, q, n);
ysr@777 395 }
ysr@777 396 } else {
ysr@777 397 while (next_boundary < addr) {
ysr@777 398 while (n <= next_boundary) {
ysr@777 399 q = n;
ysr@777 400 oop obj = oop(q);
ysr@1280 401 if (obj->klass_or_null() == NULL) return q;
ysr@777 402 n += _sp->block_size(q);
ysr@777 403 }
ysr@777 404 assert(q <= next_boundary && n > next_boundary, "Consequence of loop");
ysr@777 405 // [q, n) is the block that crosses the boundary.
ysr@777 406 alloc_block_work2(&next_boundary, &next_index, q, n);
ysr@777 407 }
ysr@777 408 }
ysr@777 409 return forward_to_block_containing_addr_const(q, n, addr);
ysr@777 410 }
ysr@777 411
ysr@777 412 HeapWord* G1BlockOffsetArray::block_start_careful(const void* addr) const {
ysr@777 413 assert(_array->offset_array(0) == 0, "objects can't cross covered areas");
ysr@777 414
ysr@777 415 assert(_bottom <= addr && addr < _end,
ysr@777 416 "addr must be covered by this Array");
ysr@777 417 // Must read this exactly once because it can be modified by parallel
ysr@777 418 // allocation.
ysr@777 419 HeapWord* ub = _unallocated_block;
ysr@777 420 if (BlockOffsetArrayUseUnallocatedBlock && addr >= ub) {
ysr@777 421 assert(ub < _end, "tautology (see above)");
ysr@777 422 return ub;
ysr@777 423 }
ysr@777 424
ysr@777 425 // Otherwise, find the block start using the table, but taking
ysr@777 426 // care (cf block_start_unsafe() above) not to parse any objects/blocks
ysr@777 427 // on the cards themsleves.
ysr@777 428 size_t index = _array->index_for(addr);
ysr@777 429 assert(_array->address_for_index(index) == addr,
ysr@777 430 "arg should be start of card");
ysr@777 431
ysr@777 432 HeapWord* q = (HeapWord*)addr;
ysr@777 433 uint offset;
ysr@777 434 do {
ysr@777 435 offset = _array->offset_array(index--);
ysr@777 436 q -= offset;
ysr@777 437 } while (offset == N_words);
ysr@777 438 assert(q <= addr, "block start should be to left of arg");
ysr@777 439 return q;
ysr@777 440 }
ysr@777 441
ysr@777 442 // Note that the committed size of the covered space may have changed,
ysr@777 443 // so the table size might also wish to change.
ysr@777 444 void G1BlockOffsetArray::resize(size_t new_word_size) {
ysr@777 445 HeapWord* new_end = _bottom + new_word_size;
ysr@777 446 if (_end < new_end && !init_to_zero()) {
ysr@777 447 // verify that the old and new boundaries are also card boundaries
ysr@777 448 assert(_array->is_card_boundary(_end),
ysr@777 449 "_end not a card boundary");
ysr@777 450 assert(_array->is_card_boundary(new_end),
ysr@777 451 "new _end would not be a card boundary");
ysr@777 452 // set all the newly added cards
ysr@777 453 _array->set_offset_array(_end, new_end, N_words);
ysr@777 454 }
ysr@777 455 _end = new_end; // update _end
ysr@777 456 }
ysr@777 457
ysr@777 458 void G1BlockOffsetArray::set_region(MemRegion mr) {
ysr@777 459 _bottom = mr.start();
ysr@777 460 _end = mr.end();
ysr@777 461 }
ysr@777 462
ysr@777 463 //
ysr@777 464 // threshold_
ysr@777 465 // | _index_
ysr@777 466 // v v
ysr@777 467 // +-------+-------+-------+-------+-------+
ysr@777 468 // | i-1 | i | i+1 | i+2 | i+3 |
ysr@777 469 // +-------+-------+-------+-------+-------+
ysr@777 470 // ( ^ ]
ysr@777 471 // block-start
ysr@777 472 //
ysr@777 473 void G1BlockOffsetArray::alloc_block_work2(HeapWord** threshold_, size_t* index_,
ysr@777 474 HeapWord* blk_start, HeapWord* blk_end) {
ysr@777 475 // For efficiency, do copy-in/copy-out.
ysr@777 476 HeapWord* threshold = *threshold_;
ysr@777 477 size_t index = *index_;
ysr@777 478
ysr@777 479 assert(blk_start != NULL && blk_end > blk_start,
ysr@777 480 "phantom block");
ysr@777 481 assert(blk_end > threshold, "should be past threshold");
jcoomes@1844 482 assert(blk_start <= threshold, "blk_start should be at or before threshold");
ysr@777 483 assert(pointer_delta(threshold, blk_start) <= N_words,
ysr@777 484 "offset should be <= BlockOffsetSharedArray::N");
ysr@777 485 assert(Universe::heap()->is_in_reserved(blk_start),
ysr@777 486 "reference must be into the heap");
ysr@777 487 assert(Universe::heap()->is_in_reserved(blk_end-1),
ysr@777 488 "limit must be within the heap");
ysr@777 489 assert(threshold == _array->_reserved.start() + index*N_words,
ysr@777 490 "index must agree with threshold");
ysr@777 491
ysr@777 492 DEBUG_ONLY(size_t orig_index = index;)
ysr@777 493
ysr@777 494 // Mark the card that holds the offset into the block. Note
ysr@777 495 // that _next_offset_index and _next_offset_threshold are not
ysr@777 496 // updated until the end of this method.
ysr@777 497 _array->set_offset_array(index, threshold, blk_start);
ysr@777 498
ysr@777 499 // We need to now mark the subsequent cards that this blk spans.
ysr@777 500
ysr@777 501 // Index of card on which blk ends.
ysr@777 502 size_t end_index = _array->index_for(blk_end - 1);
ysr@777 503
ysr@777 504 // Are there more cards left to be updated?
ysr@777 505 if (index + 1 <= end_index) {
ysr@777 506 HeapWord* rem_st = _array->address_for_index(index + 1);
ysr@777 507 // Calculate rem_end this way because end_index
ysr@777 508 // may be the last valid index in the covered region.
ysr@777 509 HeapWord* rem_end = _array->address_for_index(end_index) + N_words;
ysr@777 510 set_remainder_to_point_to_start(rem_st, rem_end);
ysr@777 511 }
ysr@777 512
ysr@777 513 index = end_index + 1;
ysr@777 514 // Calculate threshold_ this way because end_index
ysr@777 515 // may be the last valid index in the covered region.
ysr@777 516 threshold = _array->address_for_index(end_index) + N_words;
ysr@777 517 assert(threshold >= blk_end, "Incorrect offset threshold");
ysr@777 518
ysr@777 519 // index_ and threshold_ updated here.
ysr@777 520 *threshold_ = threshold;
ysr@777 521 *index_ = index;
ysr@777 522
ysr@777 523 #ifdef ASSERT
ysr@777 524 // The offset can be 0 if the block starts on a boundary. That
ysr@777 525 // is checked by an assertion above.
ysr@777 526 size_t start_index = _array->index_for(blk_start);
ysr@777 527 HeapWord* boundary = _array->address_for_index(start_index);
ysr@777 528 assert((_array->offset_array(orig_index) == 0 &&
ysr@777 529 blk_start == boundary) ||
ysr@777 530 (_array->offset_array(orig_index) > 0 &&
ysr@777 531 _array->offset_array(orig_index) <= N_words),
ysr@777 532 "offset array should have been set");
ysr@777 533 for (size_t j = orig_index + 1; j <= end_index; j++) {
ysr@777 534 assert(_array->offset_array(j) > 0 &&
ysr@777 535 _array->offset_array(j) <=
ysr@777 536 (u_char) (N_words+BlockOffsetArray::N_powers-1),
ysr@777 537 "offset array should have been set");
ysr@777 538 }
ysr@777 539 #endif
ysr@777 540 }
ysr@777 541
tonyp@2453 542 bool
tonyp@2453 543 G1BlockOffsetArray::verify_for_object(HeapWord* obj_start,
tonyp@2453 544 size_t word_size) const {
tonyp@2453 545 size_t first_card = _array->index_for(obj_start);
tonyp@2453 546 size_t last_card = _array->index_for(obj_start + word_size - 1);
tonyp@2453 547 if (!_array->is_card_boundary(obj_start)) {
tonyp@2453 548 // If the object is not on a card boundary the BOT entry of the
tonyp@2453 549 // first card should point to another object so we should not
tonyp@2453 550 // check that one.
tonyp@2453 551 first_card += 1;
tonyp@2453 552 }
tonyp@2453 553 for (size_t card = first_card; card <= last_card; card += 1) {
tonyp@2453 554 HeapWord* card_addr = _array->address_for_index(card);
tonyp@2453 555 HeapWord* block_start = block_start_const(card_addr);
tonyp@2453 556 if (block_start != obj_start) {
tonyp@2453 557 gclog_or_tty->print_cr("block start: "PTR_FORMAT" is incorrect - "
tonyp@2453 558 "card index: "SIZE_FORMAT" "
tonyp@2453 559 "card addr: "PTR_FORMAT" BOT entry: %u "
tonyp@2453 560 "obj: "PTR_FORMAT" word size: "SIZE_FORMAT" "
tonyp@2453 561 "cards: ["SIZE_FORMAT","SIZE_FORMAT"]",
tonyp@2453 562 block_start, card, card_addr,
tonyp@2453 563 _array->offset_array(card),
tonyp@2453 564 obj_start, word_size, first_card, last_card);
tonyp@2453 565 return false;
tonyp@2453 566 }
tonyp@2453 567 }
tonyp@2453 568 return true;
tonyp@2453 569 }
tonyp@2453 570
tonyp@2453 571 #ifndef PRODUCT
tonyp@2241 572 void
tonyp@2453 573 G1BlockOffsetArray::print_on(outputStream* out) {
tonyp@2453 574 size_t from_index = _array->index_for(_bottom);
tonyp@2453 575 size_t to_index = _array->index_for(_end);
tonyp@2453 576 out->print_cr(">> BOT for area ["PTR_FORMAT","PTR_FORMAT") "
tonyp@2453 577 "cards ["SIZE_FORMAT","SIZE_FORMAT")",
tonyp@2453 578 _bottom, _end, from_index, to_index);
tonyp@2453 579 for (size_t i = from_index; i < to_index; ++i) {
tonyp@2453 580 out->print_cr(" entry "SIZE_FORMAT_W(8)" | "PTR_FORMAT" : %3u",
tonyp@2453 581 i, _array->address_for_index(i),
tonyp@2453 582 (uint) _array->offset_array(i));
tonyp@2453 583 }
tonyp@2241 584 }
tonyp@2453 585 #endif // !PRODUCT
tonyp@2241 586
ysr@777 587 //////////////////////////////////////////////////////////////////////
ysr@777 588 // G1BlockOffsetArrayContigSpace
ysr@777 589 //////////////////////////////////////////////////////////////////////
ysr@777 590
ysr@777 591 HeapWord*
ysr@777 592 G1BlockOffsetArrayContigSpace::block_start_unsafe(const void* addr) {
ysr@777 593 assert(_bottom <= addr && addr < _end,
ysr@777 594 "addr must be covered by this Array");
ysr@777 595 HeapWord* q = block_at_or_preceding(addr, true, _next_offset_index-1);
ysr@777 596 return forward_to_block_containing_addr(q, addr);
ysr@777 597 }
ysr@777 598
ysr@777 599 HeapWord*
ysr@777 600 G1BlockOffsetArrayContigSpace::
ysr@777 601 block_start_unsafe_const(const void* addr) const {
ysr@777 602 assert(_bottom <= addr && addr < _end,
ysr@777 603 "addr must be covered by this Array");
ysr@777 604 HeapWord* q = block_at_or_preceding(addr, true, _next_offset_index-1);
ysr@777 605 HeapWord* n = q + _sp->block_size(q);
ysr@777 606 return forward_to_block_containing_addr_const(q, n, addr);
ysr@777 607 }
ysr@777 608
ysr@777 609 G1BlockOffsetArrayContigSpace::
ysr@777 610 G1BlockOffsetArrayContigSpace(G1BlockOffsetSharedArray* array,
ysr@777 611 MemRegion mr) :
ysr@777 612 G1BlockOffsetArray(array, mr, true)
ysr@777 613 {
ysr@777 614 _next_offset_threshold = NULL;
ysr@777 615 _next_offset_index = 0;
ysr@777 616 }
ysr@777 617
ysr@777 618 HeapWord* G1BlockOffsetArrayContigSpace::initialize_threshold() {
ysr@777 619 assert(!Universe::heap()->is_in_reserved(_array->_offset_array),
ysr@777 620 "just checking");
ysr@777 621 _next_offset_index = _array->index_for(_bottom);
ysr@777 622 _next_offset_index++;
ysr@777 623 _next_offset_threshold =
ysr@777 624 _array->address_for_index(_next_offset_index);
ysr@777 625 return _next_offset_threshold;
ysr@777 626 }
ysr@777 627
ysr@777 628 void G1BlockOffsetArrayContigSpace::zero_bottom_entry() {
ysr@777 629 assert(!Universe::heap()->is_in_reserved(_array->_offset_array),
ysr@777 630 "just checking");
ysr@777 631 size_t bottom_index = _array->index_for(_bottom);
ysr@777 632 assert(_array->address_for_index(bottom_index) == _bottom,
ysr@777 633 "Precondition of call");
ysr@777 634 _array->set_offset_array(bottom_index, 0);
ysr@777 635 }
tonyp@2241 636
tonyp@2241 637 void
tonyp@2453 638 G1BlockOffsetArrayContigSpace::set_for_starts_humongous(HeapWord* new_top) {
tonyp@2453 639 assert(new_top <= _end, "_end should have already been updated");
tonyp@2241 640
tonyp@2453 641 // The first BOT entry should have offset 0.
tonyp@2453 642 zero_bottom_entry();
tonyp@2453 643 initialize_threshold();
tonyp@2453 644 alloc_block(_bottom, new_top);
tonyp@2453 645 }
tonyp@2453 646
tonyp@2453 647 #ifndef PRODUCT
tonyp@2453 648 void
tonyp@2453 649 G1BlockOffsetArrayContigSpace::print_on(outputStream* out) {
tonyp@2453 650 G1BlockOffsetArray::print_on(out);
tonyp@2453 651 out->print_cr(" next offset threshold: "PTR_FORMAT, _next_offset_threshold);
tonyp@2453 652 out->print_cr(" next offset index: "SIZE_FORMAT, _next_offset_index);
tonyp@2241 653 }
tonyp@2453 654 #endif // !PRODUCT

mercurial