src/share/vm/gc_implementation/g1/g1BlockOffsetTable.cpp

Thu, 28 Jun 2012 17:03:16 -0400

author
zgu
date
Thu, 28 Jun 2012 17:03:16 -0400
changeset 3900
d2a62e0f25eb
parent 2453
2250ee17e258
child 4037
da91efe96a93
permissions
-rw-r--r--

6995781: Native Memory Tracking (Phase 1)
7151532: DCmd for hotspot native memory tracking
Summary: Implementation of native memory tracking phase 1, which tracks VM native memory usage, and related DCmd
Reviewed-by: acorn, coleenp, fparain

ysr@777 1 /*
tonyp@2453 2 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
ysr@777 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
ysr@777 4 *
ysr@777 5 * This code is free software; you can redistribute it and/or modify it
ysr@777 6 * under the terms of the GNU General Public License version 2 only, as
ysr@777 7 * published by the Free Software Foundation.
ysr@777 8 *
ysr@777 9 * This code is distributed in the hope that it will be useful, but WITHOUT
ysr@777 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
ysr@777 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
ysr@777 12 * version 2 for more details (a copy is included in the LICENSE file that
ysr@777 13 * accompanied this code).
ysr@777 14 *
ysr@777 15 * You should have received a copy of the GNU General Public License version
ysr@777 16 * 2 along with this work; if not, write to the Free Software Foundation,
ysr@777 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
ysr@777 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
ysr@777 22 *
ysr@777 23 */
ysr@777 24
stefank@2314 25 #include "precompiled.hpp"
stefank@2314 26 #include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp"
stefank@2314 27 #include "memory/space.hpp"
stefank@2314 28 #include "oops/oop.inline.hpp"
stefank@2314 29 #include "runtime/java.hpp"
zgu@3900 30 #include "services/memTracker.hpp"
ysr@777 31
ysr@777 32 //////////////////////////////////////////////////////////////////////
ysr@777 33 // G1BlockOffsetSharedArray
ysr@777 34 //////////////////////////////////////////////////////////////////////
ysr@777 35
ysr@777 36 G1BlockOffsetSharedArray::G1BlockOffsetSharedArray(MemRegion reserved,
ysr@777 37 size_t init_word_size) :
ysr@777 38 _reserved(reserved), _end(NULL)
ysr@777 39 {
ysr@777 40 size_t size = compute_size(reserved.word_size());
ysr@777 41 ReservedSpace rs(ReservedSpace::allocation_align_size_up(size));
ysr@777 42 if (!rs.is_reserved()) {
ysr@777 43 vm_exit_during_initialization("Could not reserve enough space for heap offset array");
ysr@777 44 }
ysr@777 45 if (!_vs.initialize(rs, 0)) {
ysr@777 46 vm_exit_during_initialization("Could not reserve enough space for heap offset array");
ysr@777 47 }
zgu@3900 48
zgu@3900 49 MemTracker::record_virtual_memory_type((address)rs.base(), mtGC);
zgu@3900 50
ysr@777 51 _offset_array = (u_char*)_vs.low_boundary();
ysr@777 52 resize(init_word_size);
ysr@777 53 if (TraceBlockOffsetTable) {
ysr@777 54 gclog_or_tty->print_cr("G1BlockOffsetSharedArray::G1BlockOffsetSharedArray: ");
ysr@777 55 gclog_or_tty->print_cr(" "
ysr@777 56 " rs.base(): " INTPTR_FORMAT
ysr@777 57 " rs.size(): " INTPTR_FORMAT
ysr@777 58 " rs end(): " INTPTR_FORMAT,
ysr@777 59 rs.base(), rs.size(), rs.base() + rs.size());
ysr@777 60 gclog_or_tty->print_cr(" "
ysr@777 61 " _vs.low_boundary(): " INTPTR_FORMAT
ysr@777 62 " _vs.high_boundary(): " INTPTR_FORMAT,
ysr@777 63 _vs.low_boundary(),
ysr@777 64 _vs.high_boundary());
ysr@777 65 }
ysr@777 66 }
ysr@777 67
ysr@777 68 void G1BlockOffsetSharedArray::resize(size_t new_word_size) {
ysr@777 69 assert(new_word_size <= _reserved.word_size(), "Resize larger than reserved");
ysr@777 70 size_t new_size = compute_size(new_word_size);
ysr@777 71 size_t old_size = _vs.committed_size();
ysr@777 72 size_t delta;
ysr@777 73 char* high = _vs.high();
ysr@777 74 _end = _reserved.start() + new_word_size;
ysr@777 75 if (new_size > old_size) {
ysr@777 76 delta = ReservedSpace::page_align_size_up(new_size - old_size);
ysr@777 77 assert(delta > 0, "just checking");
ysr@777 78 if (!_vs.expand_by(delta)) {
ysr@777 79 // Do better than this for Merlin
ysr@777 80 vm_exit_out_of_memory(delta, "offset table expansion");
ysr@777 81 }
ysr@777 82 assert(_vs.high() == high + delta, "invalid expansion");
ysr@777 83 // Initialization of the contents is left to the
ysr@777 84 // G1BlockOffsetArray that uses it.
ysr@777 85 } else {
ysr@777 86 delta = ReservedSpace::page_align_size_down(old_size - new_size);
ysr@777 87 if (delta == 0) return;
ysr@777 88 _vs.shrink_by(delta);
ysr@777 89 assert(_vs.high() == high - delta, "invalid expansion");
ysr@777 90 }
ysr@777 91 }
ysr@777 92
ysr@777 93 bool G1BlockOffsetSharedArray::is_card_boundary(HeapWord* p) const {
ysr@777 94 assert(p >= _reserved.start(), "just checking");
ysr@777 95 size_t delta = pointer_delta(p, _reserved.start());
ysr@777 96 return (delta & right_n_bits(LogN_words)) == (size_t)NoBits;
ysr@777 97 }
ysr@777 98
ysr@777 99
ysr@777 100 //////////////////////////////////////////////////////////////////////
ysr@777 101 // G1BlockOffsetArray
ysr@777 102 //////////////////////////////////////////////////////////////////////
ysr@777 103
ysr@777 104 G1BlockOffsetArray::G1BlockOffsetArray(G1BlockOffsetSharedArray* array,
ysr@777 105 MemRegion mr, bool init_to_zero) :
ysr@777 106 G1BlockOffsetTable(mr.start(), mr.end()),
ysr@777 107 _unallocated_block(_bottom),
ysr@777 108 _array(array), _csp(NULL),
ysr@777 109 _init_to_zero(init_to_zero) {
ysr@777 110 assert(_bottom <= _end, "arguments out of order");
ysr@777 111 if (!_init_to_zero) {
ysr@777 112 // initialize cards to point back to mr.start()
ysr@777 113 set_remainder_to_point_to_start(mr.start() + N_words, mr.end());
ysr@777 114 _array->set_offset_array(0, 0); // set first card to 0
ysr@777 115 }
ysr@777 116 }
ysr@777 117
ysr@777 118 void G1BlockOffsetArray::set_space(Space* sp) {
ysr@777 119 _sp = sp;
ysr@777 120 _csp = sp->toContiguousSpace();
ysr@777 121 }
ysr@777 122
ysr@777 123 // The arguments follow the normal convention of denoting
ysr@777 124 // a right-open interval: [start, end)
ysr@777 125 void
ysr@777 126 G1BlockOffsetArray:: set_remainder_to_point_to_start(HeapWord* start, HeapWord* end) {
ysr@777 127
ysr@777 128 if (start >= end) {
ysr@777 129 // The start address is equal to the end address (or to
ysr@777 130 // the right of the end address) so there are not cards
ysr@777 131 // that need to be updated..
ysr@777 132 return;
ysr@777 133 }
ysr@777 134
ysr@777 135 // Write the backskip value for each region.
ysr@777 136 //
ysr@777 137 // offset
ysr@777 138 // card 2nd 3rd
ysr@777 139 // | +- 1st | |
ysr@777 140 // v v v v
ysr@777 141 // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +-+-+-+-+-+-+-+-+-+-+-
ysr@777 142 // |x|0|0|0|0|0|0|0|1|1|1|1|1|1| ... |1|1|1|1|2|2|2|2|2|2| ...
ysr@777 143 // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +-+-+-+-+-+-+-+-+-+-+-
ysr@777 144 // 11 19 75
ysr@777 145 // 12
ysr@777 146 //
ysr@777 147 // offset card is the card that points to the start of an object
ysr@777 148 // x - offset value of offset card
ysr@777 149 // 1st - start of first logarithmic region
ysr@777 150 // 0 corresponds to logarithmic value N_words + 0 and 2**(3 * 0) = 1
ysr@777 151 // 2nd - start of second logarithmic region
ysr@777 152 // 1 corresponds to logarithmic value N_words + 1 and 2**(3 * 1) = 8
ysr@777 153 // 3rd - start of third logarithmic region
ysr@777 154 // 2 corresponds to logarithmic value N_words + 2 and 2**(3 * 2) = 64
ysr@777 155 //
ysr@777 156 // integer below the block offset entry is an example of
ysr@777 157 // the index of the entry
ysr@777 158 //
ysr@777 159 // Given an address,
ysr@777 160 // Find the index for the address
ysr@777 161 // Find the block offset table entry
ysr@777 162 // Convert the entry to a back slide
ysr@777 163 // (e.g., with today's, offset = 0x81 =>
ysr@777 164 // back slip = 2**(3*(0x81 - N_words)) = 2**3) = 8
ysr@777 165 // Move back N (e.g., 8) entries and repeat with the
ysr@777 166 // value of the new entry
ysr@777 167 //
ysr@777 168 size_t start_card = _array->index_for(start);
ysr@777 169 size_t end_card = _array->index_for(end-1);
ysr@777 170 assert(start ==_array->address_for_index(start_card), "Precondition");
ysr@777 171 assert(end ==_array->address_for_index(end_card)+N_words, "Precondition");
ysr@777 172 set_remainder_to_point_to_start_incl(start_card, end_card); // closed interval
ysr@777 173 }
ysr@777 174
ysr@777 175 // Unlike the normal convention in this code, the argument here denotes
ysr@777 176 // a closed, inclusive interval: [start_card, end_card], cf set_remainder_to_point_to_start()
ysr@777 177 // above.
ysr@777 178 void
ysr@777 179 G1BlockOffsetArray::set_remainder_to_point_to_start_incl(size_t start_card, size_t end_card) {
ysr@777 180 if (start_card > end_card) {
ysr@777 181 return;
ysr@777 182 }
ysr@777 183 assert(start_card > _array->index_for(_bottom), "Cannot be first card");
ysr@777 184 assert(_array->offset_array(start_card-1) <= N_words,
tonyp@2241 185 "Offset card has an unexpected value");
ysr@777 186 size_t start_card_for_region = start_card;
ysr@777 187 u_char offset = max_jubyte;
ysr@777 188 for (int i = 0; i < BlockOffsetArray::N_powers; i++) {
ysr@777 189 // -1 so that the the card with the actual offset is counted. Another -1
ysr@777 190 // so that the reach ends in this region and not at the start
ysr@777 191 // of the next.
ysr@777 192 size_t reach = start_card - 1 + (BlockOffsetArray::power_to_cards_back(i+1) - 1);
ysr@777 193 offset = N_words + i;
ysr@777 194 if (reach >= end_card) {
ysr@777 195 _array->set_offset_array(start_card_for_region, end_card, offset);
ysr@777 196 start_card_for_region = reach + 1;
ysr@777 197 break;
ysr@777 198 }
ysr@777 199 _array->set_offset_array(start_card_for_region, reach, offset);
ysr@777 200 start_card_for_region = reach + 1;
ysr@777 201 }
ysr@777 202 assert(start_card_for_region > end_card, "Sanity check");
ysr@777 203 DEBUG_ONLY(check_all_cards(start_card, end_card);)
ysr@777 204 }
ysr@777 205
ysr@777 206 // The block [blk_start, blk_end) has been allocated;
ysr@777 207 // adjust the block offset table to represent this information;
ysr@777 208 // right-open interval: [blk_start, blk_end)
ysr@777 209 void
ysr@777 210 G1BlockOffsetArray::alloc_block(HeapWord* blk_start, HeapWord* blk_end) {
ysr@777 211 mark_block(blk_start, blk_end);
ysr@777 212 allocated(blk_start, blk_end);
ysr@777 213 }
ysr@777 214
ysr@777 215 // Adjust BOT to show that a previously whole block has been split
ysr@777 216 // into two.
ysr@777 217 void G1BlockOffsetArray::split_block(HeapWord* blk, size_t blk_size,
ysr@777 218 size_t left_blk_size) {
ysr@777 219 // Verify that the BOT shows [blk, blk + blk_size) to be one block.
ysr@777 220 verify_single_block(blk, blk_size);
ysr@777 221 // Update the BOT to indicate that [blk + left_blk_size, blk + blk_size)
ysr@777 222 // is one single block.
ysr@777 223 mark_block(blk + left_blk_size, blk + blk_size);
ysr@777 224 }
ysr@777 225
ysr@777 226
ysr@777 227 // Action_mark - update the BOT for the block [blk_start, blk_end).
ysr@777 228 // Current typical use is for splitting a block.
tonyp@2453 229 // Action_single - update the BOT for an allocation.
ysr@777 230 // Action_verify - BOT verification.
ysr@777 231 void G1BlockOffsetArray::do_block_internal(HeapWord* blk_start,
ysr@777 232 HeapWord* blk_end,
ysr@777 233 Action action) {
ysr@777 234 assert(Universe::heap()->is_in_reserved(blk_start),
ysr@777 235 "reference must be into the heap");
ysr@777 236 assert(Universe::heap()->is_in_reserved(blk_end-1),
ysr@777 237 "limit must be within the heap");
ysr@777 238 // This is optimized to make the test fast, assuming we only rarely
ysr@777 239 // cross boundaries.
ysr@777 240 uintptr_t end_ui = (uintptr_t)(blk_end - 1);
ysr@777 241 uintptr_t start_ui = (uintptr_t)blk_start;
ysr@777 242 // Calculate the last card boundary preceding end of blk
ysr@777 243 intptr_t boundary_before_end = (intptr_t)end_ui;
ysr@777 244 clear_bits(boundary_before_end, right_n_bits(LogN));
ysr@777 245 if (start_ui <= (uintptr_t)boundary_before_end) {
ysr@777 246 // blk starts at or crosses a boundary
ysr@777 247 // Calculate index of card on which blk begins
ysr@777 248 size_t start_index = _array->index_for(blk_start);
ysr@777 249 // Index of card on which blk ends
ysr@777 250 size_t end_index = _array->index_for(blk_end - 1);
ysr@777 251 // Start address of card on which blk begins
ysr@777 252 HeapWord* boundary = _array->address_for_index(start_index);
ysr@777 253 assert(boundary <= blk_start, "blk should start at or after boundary");
ysr@777 254 if (blk_start != boundary) {
ysr@777 255 // blk starts strictly after boundary
ysr@777 256 // adjust card boundary and start_index forward to next card
ysr@777 257 boundary += N_words;
ysr@777 258 start_index++;
ysr@777 259 }
ysr@777 260 assert(start_index <= end_index, "monotonicity of index_for()");
ysr@777 261 assert(boundary <= (HeapWord*)boundary_before_end, "tautology");
ysr@777 262 switch (action) {
ysr@777 263 case Action_mark: {
ysr@777 264 if (init_to_zero()) {
ysr@777 265 _array->set_offset_array(start_index, boundary, blk_start);
ysr@777 266 break;
ysr@777 267 } // Else fall through to the next case
ysr@777 268 }
ysr@777 269 case Action_single: {
ysr@777 270 _array->set_offset_array(start_index, boundary, blk_start);
ysr@777 271 // We have finished marking the "offset card". We need to now
ysr@777 272 // mark the subsequent cards that this blk spans.
ysr@777 273 if (start_index < end_index) {
ysr@777 274 HeapWord* rem_st = _array->address_for_index(start_index) + N_words;
ysr@777 275 HeapWord* rem_end = _array->address_for_index(end_index) + N_words;
ysr@777 276 set_remainder_to_point_to_start(rem_st, rem_end);
ysr@777 277 }
ysr@777 278 break;
ysr@777 279 }
ysr@777 280 case Action_check: {
ysr@777 281 _array->check_offset_array(start_index, boundary, blk_start);
ysr@777 282 // We have finished checking the "offset card". We need to now
ysr@777 283 // check the subsequent cards that this blk spans.
ysr@777 284 check_all_cards(start_index + 1, end_index);
ysr@777 285 break;
ysr@777 286 }
ysr@777 287 default:
ysr@777 288 ShouldNotReachHere();
ysr@777 289 }
ysr@777 290 }
ysr@777 291 }
ysr@777 292
ysr@777 293 // The card-interval [start_card, end_card] is a closed interval; this
ysr@777 294 // is an expensive check -- use with care and only under protection of
ysr@777 295 // suitable flag.
ysr@777 296 void G1BlockOffsetArray::check_all_cards(size_t start_card, size_t end_card) const {
ysr@777 297
ysr@777 298 if (end_card < start_card) {
ysr@777 299 return;
ysr@777 300 }
ysr@777 301 guarantee(_array->offset_array(start_card) == N_words, "Wrong value in second card");
ysr@777 302 for (size_t c = start_card + 1; c <= end_card; c++ /* yeah! */) {
ysr@777 303 u_char entry = _array->offset_array(c);
ysr@777 304 if (c - start_card > BlockOffsetArray::power_to_cards_back(1)) {
ysr@777 305 guarantee(entry > N_words, "Should be in logarithmic region");
ysr@777 306 }
ysr@777 307 size_t backskip = BlockOffsetArray::entry_to_cards_back(entry);
ysr@777 308 size_t landing_card = c - backskip;
ysr@777 309 guarantee(landing_card >= (start_card - 1), "Inv");
ysr@777 310 if (landing_card >= start_card) {
ysr@777 311 guarantee(_array->offset_array(landing_card) <= entry, "monotonicity");
ysr@777 312 } else {
ysr@777 313 guarantee(landing_card == start_card - 1, "Tautology");
ysr@777 314 guarantee(_array->offset_array(landing_card) <= N_words, "Offset value");
ysr@777 315 }
ysr@777 316 }
ysr@777 317 }
ysr@777 318
ysr@777 319 // The range [blk_start, blk_end) represents a single contiguous block
ysr@777 320 // of storage; modify the block offset table to represent this
ysr@777 321 // information; Right-open interval: [blk_start, blk_end)
ysr@777 322 // NOTE: this method does _not_ adjust _unallocated_block.
ysr@777 323 void
ysr@777 324 G1BlockOffsetArray::single_block(HeapWord* blk_start, HeapWord* blk_end) {
ysr@777 325 do_block_internal(blk_start, blk_end, Action_single);
ysr@777 326 }
ysr@777 327
ysr@777 328 // Mark the BOT such that if [blk_start, blk_end) straddles a card
ysr@777 329 // boundary, the card following the first such boundary is marked
ysr@777 330 // with the appropriate offset.
ysr@777 331 // NOTE: this method does _not_ adjust _unallocated_block or
ysr@777 332 // any cards subsequent to the first one.
ysr@777 333 void
ysr@777 334 G1BlockOffsetArray::mark_block(HeapWord* blk_start, HeapWord* blk_end) {
ysr@777 335 do_block_internal(blk_start, blk_end, Action_mark);
ysr@777 336 }
ysr@777 337
ysr@777 338 HeapWord* G1BlockOffsetArray::block_start_unsafe(const void* addr) {
ysr@777 339 assert(_bottom <= addr && addr < _end,
ysr@777 340 "addr must be covered by this Array");
ysr@777 341 // Must read this exactly once because it can be modified by parallel
ysr@777 342 // allocation.
ysr@777 343 HeapWord* ub = _unallocated_block;
ysr@777 344 if (BlockOffsetArrayUseUnallocatedBlock && addr >= ub) {
ysr@777 345 assert(ub < _end, "tautology (see above)");
ysr@777 346 return ub;
ysr@777 347 }
ysr@777 348 // Otherwise, find the block start using the table.
ysr@777 349 HeapWord* q = block_at_or_preceding(addr, false, 0);
ysr@777 350 return forward_to_block_containing_addr(q, addr);
ysr@777 351 }
ysr@777 352
ysr@777 353 // This duplicates a little code from the above: unavoidable.
ysr@777 354 HeapWord*
ysr@777 355 G1BlockOffsetArray::block_start_unsafe_const(const void* addr) const {
ysr@777 356 assert(_bottom <= addr && addr < _end,
ysr@777 357 "addr must be covered by this Array");
ysr@777 358 // Must read this exactly once because it can be modified by parallel
ysr@777 359 // allocation.
ysr@777 360 HeapWord* ub = _unallocated_block;
ysr@777 361 if (BlockOffsetArrayUseUnallocatedBlock && addr >= ub) {
ysr@777 362 assert(ub < _end, "tautology (see above)");
ysr@777 363 return ub;
ysr@777 364 }
ysr@777 365 // Otherwise, find the block start using the table.
ysr@777 366 HeapWord* q = block_at_or_preceding(addr, false, 0);
ysr@777 367 HeapWord* n = q + _sp->block_size(q);
ysr@777 368 return forward_to_block_containing_addr_const(q, n, addr);
ysr@777 369 }
ysr@777 370
ysr@777 371
ysr@777 372 HeapWord*
ysr@777 373 G1BlockOffsetArray::forward_to_block_containing_addr_slow(HeapWord* q,
ysr@777 374 HeapWord* n,
ysr@777 375 const void* addr) {
ysr@777 376 // We're not in the normal case. We need to handle an important subcase
ysr@777 377 // here: LAB allocation. An allocation previously recorded in the
ysr@777 378 // offset table was actually a lab allocation, and was divided into
ysr@777 379 // several objects subsequently. Fix this situation as we answer the
ysr@777 380 // query, by updating entries as we cross them.
iveresov@787 381
iveresov@787 382 // If the fist object's end q is at the card boundary. Start refining
iveresov@787 383 // with the corresponding card (the value of the entry will be basically
iveresov@787 384 // set to 0). If the object crosses the boundary -- start from the next card.
iveresov@787 385 size_t next_index = _array->index_for(n) + !_array->is_card_boundary(n);
ysr@777 386 HeapWord* next_boundary = _array->address_for_index(next_index);
ysr@777 387 if (csp() != NULL) {
ysr@777 388 if (addr >= csp()->top()) return csp()->top();
ysr@777 389 while (next_boundary < addr) {
ysr@777 390 while (n <= next_boundary) {
ysr@777 391 q = n;
ysr@777 392 oop obj = oop(q);
ysr@1280 393 if (obj->klass_or_null() == NULL) return q;
ysr@777 394 n += obj->size();
ysr@777 395 }
ysr@777 396 assert(q <= next_boundary && n > next_boundary, "Consequence of loop");
ysr@777 397 // [q, n) is the block that crosses the boundary.
ysr@777 398 alloc_block_work2(&next_boundary, &next_index, q, n);
ysr@777 399 }
ysr@777 400 } else {
ysr@777 401 while (next_boundary < addr) {
ysr@777 402 while (n <= next_boundary) {
ysr@777 403 q = n;
ysr@777 404 oop obj = oop(q);
ysr@1280 405 if (obj->klass_or_null() == NULL) return q;
ysr@777 406 n += _sp->block_size(q);
ysr@777 407 }
ysr@777 408 assert(q <= next_boundary && n > next_boundary, "Consequence of loop");
ysr@777 409 // [q, n) is the block that crosses the boundary.
ysr@777 410 alloc_block_work2(&next_boundary, &next_index, q, n);
ysr@777 411 }
ysr@777 412 }
ysr@777 413 return forward_to_block_containing_addr_const(q, n, addr);
ysr@777 414 }
ysr@777 415
ysr@777 416 HeapWord* G1BlockOffsetArray::block_start_careful(const void* addr) const {
ysr@777 417 assert(_array->offset_array(0) == 0, "objects can't cross covered areas");
ysr@777 418
ysr@777 419 assert(_bottom <= addr && addr < _end,
ysr@777 420 "addr must be covered by this Array");
ysr@777 421 // Must read this exactly once because it can be modified by parallel
ysr@777 422 // allocation.
ysr@777 423 HeapWord* ub = _unallocated_block;
ysr@777 424 if (BlockOffsetArrayUseUnallocatedBlock && addr >= ub) {
ysr@777 425 assert(ub < _end, "tautology (see above)");
ysr@777 426 return ub;
ysr@777 427 }
ysr@777 428
ysr@777 429 // Otherwise, find the block start using the table, but taking
ysr@777 430 // care (cf block_start_unsafe() above) not to parse any objects/blocks
ysr@777 431 // on the cards themsleves.
ysr@777 432 size_t index = _array->index_for(addr);
ysr@777 433 assert(_array->address_for_index(index) == addr,
ysr@777 434 "arg should be start of card");
ysr@777 435
ysr@777 436 HeapWord* q = (HeapWord*)addr;
ysr@777 437 uint offset;
ysr@777 438 do {
ysr@777 439 offset = _array->offset_array(index--);
ysr@777 440 q -= offset;
ysr@777 441 } while (offset == N_words);
ysr@777 442 assert(q <= addr, "block start should be to left of arg");
ysr@777 443 return q;
ysr@777 444 }
ysr@777 445
ysr@777 446 // Note that the committed size of the covered space may have changed,
ysr@777 447 // so the table size might also wish to change.
ysr@777 448 void G1BlockOffsetArray::resize(size_t new_word_size) {
ysr@777 449 HeapWord* new_end = _bottom + new_word_size;
ysr@777 450 if (_end < new_end && !init_to_zero()) {
ysr@777 451 // verify that the old and new boundaries are also card boundaries
ysr@777 452 assert(_array->is_card_boundary(_end),
ysr@777 453 "_end not a card boundary");
ysr@777 454 assert(_array->is_card_boundary(new_end),
ysr@777 455 "new _end would not be a card boundary");
ysr@777 456 // set all the newly added cards
ysr@777 457 _array->set_offset_array(_end, new_end, N_words);
ysr@777 458 }
ysr@777 459 _end = new_end; // update _end
ysr@777 460 }
ysr@777 461
ysr@777 462 void G1BlockOffsetArray::set_region(MemRegion mr) {
ysr@777 463 _bottom = mr.start();
ysr@777 464 _end = mr.end();
ysr@777 465 }
ysr@777 466
ysr@777 467 //
ysr@777 468 // threshold_
ysr@777 469 // | _index_
ysr@777 470 // v v
ysr@777 471 // +-------+-------+-------+-------+-------+
ysr@777 472 // | i-1 | i | i+1 | i+2 | i+3 |
ysr@777 473 // +-------+-------+-------+-------+-------+
ysr@777 474 // ( ^ ]
ysr@777 475 // block-start
ysr@777 476 //
ysr@777 477 void G1BlockOffsetArray::alloc_block_work2(HeapWord** threshold_, size_t* index_,
ysr@777 478 HeapWord* blk_start, HeapWord* blk_end) {
ysr@777 479 // For efficiency, do copy-in/copy-out.
ysr@777 480 HeapWord* threshold = *threshold_;
ysr@777 481 size_t index = *index_;
ysr@777 482
ysr@777 483 assert(blk_start != NULL && blk_end > blk_start,
ysr@777 484 "phantom block");
ysr@777 485 assert(blk_end > threshold, "should be past threshold");
jcoomes@1844 486 assert(blk_start <= threshold, "blk_start should be at or before threshold");
ysr@777 487 assert(pointer_delta(threshold, blk_start) <= N_words,
ysr@777 488 "offset should be <= BlockOffsetSharedArray::N");
ysr@777 489 assert(Universe::heap()->is_in_reserved(blk_start),
ysr@777 490 "reference must be into the heap");
ysr@777 491 assert(Universe::heap()->is_in_reserved(blk_end-1),
ysr@777 492 "limit must be within the heap");
ysr@777 493 assert(threshold == _array->_reserved.start() + index*N_words,
ysr@777 494 "index must agree with threshold");
ysr@777 495
ysr@777 496 DEBUG_ONLY(size_t orig_index = index;)
ysr@777 497
ysr@777 498 // Mark the card that holds the offset into the block. Note
ysr@777 499 // that _next_offset_index and _next_offset_threshold are not
ysr@777 500 // updated until the end of this method.
ysr@777 501 _array->set_offset_array(index, threshold, blk_start);
ysr@777 502
ysr@777 503 // We need to now mark the subsequent cards that this blk spans.
ysr@777 504
ysr@777 505 // Index of card on which blk ends.
ysr@777 506 size_t end_index = _array->index_for(blk_end - 1);
ysr@777 507
ysr@777 508 // Are there more cards left to be updated?
ysr@777 509 if (index + 1 <= end_index) {
ysr@777 510 HeapWord* rem_st = _array->address_for_index(index + 1);
ysr@777 511 // Calculate rem_end this way because end_index
ysr@777 512 // may be the last valid index in the covered region.
ysr@777 513 HeapWord* rem_end = _array->address_for_index(end_index) + N_words;
ysr@777 514 set_remainder_to_point_to_start(rem_st, rem_end);
ysr@777 515 }
ysr@777 516
ysr@777 517 index = end_index + 1;
ysr@777 518 // Calculate threshold_ this way because end_index
ysr@777 519 // may be the last valid index in the covered region.
ysr@777 520 threshold = _array->address_for_index(end_index) + N_words;
ysr@777 521 assert(threshold >= blk_end, "Incorrect offset threshold");
ysr@777 522
ysr@777 523 // index_ and threshold_ updated here.
ysr@777 524 *threshold_ = threshold;
ysr@777 525 *index_ = index;
ysr@777 526
ysr@777 527 #ifdef ASSERT
ysr@777 528 // The offset can be 0 if the block starts on a boundary. That
ysr@777 529 // is checked by an assertion above.
ysr@777 530 size_t start_index = _array->index_for(blk_start);
ysr@777 531 HeapWord* boundary = _array->address_for_index(start_index);
ysr@777 532 assert((_array->offset_array(orig_index) == 0 &&
ysr@777 533 blk_start == boundary) ||
ysr@777 534 (_array->offset_array(orig_index) > 0 &&
ysr@777 535 _array->offset_array(orig_index) <= N_words),
ysr@777 536 "offset array should have been set");
ysr@777 537 for (size_t j = orig_index + 1; j <= end_index; j++) {
ysr@777 538 assert(_array->offset_array(j) > 0 &&
ysr@777 539 _array->offset_array(j) <=
ysr@777 540 (u_char) (N_words+BlockOffsetArray::N_powers-1),
ysr@777 541 "offset array should have been set");
ysr@777 542 }
ysr@777 543 #endif
ysr@777 544 }
ysr@777 545
tonyp@2453 546 bool
tonyp@2453 547 G1BlockOffsetArray::verify_for_object(HeapWord* obj_start,
tonyp@2453 548 size_t word_size) const {
tonyp@2453 549 size_t first_card = _array->index_for(obj_start);
tonyp@2453 550 size_t last_card = _array->index_for(obj_start + word_size - 1);
tonyp@2453 551 if (!_array->is_card_boundary(obj_start)) {
tonyp@2453 552 // If the object is not on a card boundary the BOT entry of the
tonyp@2453 553 // first card should point to another object so we should not
tonyp@2453 554 // check that one.
tonyp@2453 555 first_card += 1;
tonyp@2453 556 }
tonyp@2453 557 for (size_t card = first_card; card <= last_card; card += 1) {
tonyp@2453 558 HeapWord* card_addr = _array->address_for_index(card);
tonyp@2453 559 HeapWord* block_start = block_start_const(card_addr);
tonyp@2453 560 if (block_start != obj_start) {
tonyp@2453 561 gclog_or_tty->print_cr("block start: "PTR_FORMAT" is incorrect - "
tonyp@2453 562 "card index: "SIZE_FORMAT" "
tonyp@2453 563 "card addr: "PTR_FORMAT" BOT entry: %u "
tonyp@2453 564 "obj: "PTR_FORMAT" word size: "SIZE_FORMAT" "
tonyp@2453 565 "cards: ["SIZE_FORMAT","SIZE_FORMAT"]",
tonyp@2453 566 block_start, card, card_addr,
tonyp@2453 567 _array->offset_array(card),
tonyp@2453 568 obj_start, word_size, first_card, last_card);
tonyp@2453 569 return false;
tonyp@2453 570 }
tonyp@2453 571 }
tonyp@2453 572 return true;
tonyp@2453 573 }
tonyp@2453 574
tonyp@2453 575 #ifndef PRODUCT
tonyp@2241 576 void
tonyp@2453 577 G1BlockOffsetArray::print_on(outputStream* out) {
tonyp@2453 578 size_t from_index = _array->index_for(_bottom);
tonyp@2453 579 size_t to_index = _array->index_for(_end);
tonyp@2453 580 out->print_cr(">> BOT for area ["PTR_FORMAT","PTR_FORMAT") "
tonyp@2453 581 "cards ["SIZE_FORMAT","SIZE_FORMAT")",
tonyp@2453 582 _bottom, _end, from_index, to_index);
tonyp@2453 583 for (size_t i = from_index; i < to_index; ++i) {
tonyp@2453 584 out->print_cr(" entry "SIZE_FORMAT_W(8)" | "PTR_FORMAT" : %3u",
tonyp@2453 585 i, _array->address_for_index(i),
tonyp@2453 586 (uint) _array->offset_array(i));
tonyp@2453 587 }
tonyp@2241 588 }
tonyp@2453 589 #endif // !PRODUCT
tonyp@2241 590
ysr@777 591 //////////////////////////////////////////////////////////////////////
ysr@777 592 // G1BlockOffsetArrayContigSpace
ysr@777 593 //////////////////////////////////////////////////////////////////////
ysr@777 594
ysr@777 595 HeapWord*
ysr@777 596 G1BlockOffsetArrayContigSpace::block_start_unsafe(const void* addr) {
ysr@777 597 assert(_bottom <= addr && addr < _end,
ysr@777 598 "addr must be covered by this Array");
ysr@777 599 HeapWord* q = block_at_or_preceding(addr, true, _next_offset_index-1);
ysr@777 600 return forward_to_block_containing_addr(q, addr);
ysr@777 601 }
ysr@777 602
ysr@777 603 HeapWord*
ysr@777 604 G1BlockOffsetArrayContigSpace::
ysr@777 605 block_start_unsafe_const(const void* addr) const {
ysr@777 606 assert(_bottom <= addr && addr < _end,
ysr@777 607 "addr must be covered by this Array");
ysr@777 608 HeapWord* q = block_at_or_preceding(addr, true, _next_offset_index-1);
ysr@777 609 HeapWord* n = q + _sp->block_size(q);
ysr@777 610 return forward_to_block_containing_addr_const(q, n, addr);
ysr@777 611 }
ysr@777 612
ysr@777 613 G1BlockOffsetArrayContigSpace::
ysr@777 614 G1BlockOffsetArrayContigSpace(G1BlockOffsetSharedArray* array,
ysr@777 615 MemRegion mr) :
ysr@777 616 G1BlockOffsetArray(array, mr, true)
ysr@777 617 {
ysr@777 618 _next_offset_threshold = NULL;
ysr@777 619 _next_offset_index = 0;
ysr@777 620 }
ysr@777 621
ysr@777 622 HeapWord* G1BlockOffsetArrayContigSpace::initialize_threshold() {
ysr@777 623 assert(!Universe::heap()->is_in_reserved(_array->_offset_array),
ysr@777 624 "just checking");
ysr@777 625 _next_offset_index = _array->index_for(_bottom);
ysr@777 626 _next_offset_index++;
ysr@777 627 _next_offset_threshold =
ysr@777 628 _array->address_for_index(_next_offset_index);
ysr@777 629 return _next_offset_threshold;
ysr@777 630 }
ysr@777 631
ysr@777 632 void G1BlockOffsetArrayContigSpace::zero_bottom_entry() {
ysr@777 633 assert(!Universe::heap()->is_in_reserved(_array->_offset_array),
ysr@777 634 "just checking");
ysr@777 635 size_t bottom_index = _array->index_for(_bottom);
ysr@777 636 assert(_array->address_for_index(bottom_index) == _bottom,
ysr@777 637 "Precondition of call");
ysr@777 638 _array->set_offset_array(bottom_index, 0);
ysr@777 639 }
tonyp@2241 640
tonyp@2241 641 void
tonyp@2453 642 G1BlockOffsetArrayContigSpace::set_for_starts_humongous(HeapWord* new_top) {
tonyp@2453 643 assert(new_top <= _end, "_end should have already been updated");
tonyp@2241 644
tonyp@2453 645 // The first BOT entry should have offset 0.
tonyp@2453 646 zero_bottom_entry();
tonyp@2453 647 initialize_threshold();
tonyp@2453 648 alloc_block(_bottom, new_top);
tonyp@2453 649 }
tonyp@2453 650
tonyp@2453 651 #ifndef PRODUCT
tonyp@2453 652 void
tonyp@2453 653 G1BlockOffsetArrayContigSpace::print_on(outputStream* out) {
tonyp@2453 654 G1BlockOffsetArray::print_on(out);
tonyp@2453 655 out->print_cr(" next offset threshold: "PTR_FORMAT, _next_offset_threshold);
tonyp@2453 656 out->print_cr(" next offset index: "SIZE_FORMAT, _next_offset_index);
tonyp@2241 657 }
tonyp@2453 658 #endif // !PRODUCT

mercurial