1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 1.2 +++ b/src/share/vm/memory/blockOffsetTable.cpp Wed Apr 27 01:25:04 2016 +0800 1.3 @@ -0,0 +1,797 @@ 1.4 +/* 1.5 + * Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved. 1.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 1.7 + * 1.8 + * This code is free software; you can redistribute it and/or modify it 1.9 + * under the terms of the GNU General Public License version 2 only, as 1.10 + * published by the Free Software Foundation. 1.11 + * 1.12 + * This code is distributed in the hope that it will be useful, but WITHOUT 1.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 1.14 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 1.15 + * version 2 for more details (a copy is included in the LICENSE file that 1.16 + * accompanied this code). 1.17 + * 1.18 + * You should have received a copy of the GNU General Public License version 1.19 + * 2 along with this work; if not, write to the Free Software Foundation, 1.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 1.21 + * 1.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 1.23 + * or visit www.oracle.com if you need additional information or have any 1.24 + * questions. 1.25 + * 1.26 + */ 1.27 + 1.28 +#include "precompiled.hpp" 1.29 +#include "gc_interface/collectedHeap.inline.hpp" 1.30 +#include "memory/blockOffsetTable.inline.hpp" 1.31 +#include "memory/iterator.hpp" 1.32 +#include "memory/space.inline.hpp" 1.33 +#include "memory/universe.hpp" 1.34 +#include "oops/oop.inline.hpp" 1.35 +#include "runtime/java.hpp" 1.36 +#include "services/memTracker.hpp" 1.37 + 1.38 +////////////////////////////////////////////////////////////////////// 1.39 +// BlockOffsetSharedArray 1.40 +////////////////////////////////////////////////////////////////////// 1.41 + 1.42 +BlockOffsetSharedArray::BlockOffsetSharedArray(MemRegion reserved, 1.43 + size_t init_word_size): 1.44 + _reserved(reserved), _end(NULL) 1.45 +{ 1.46 + size_t size = compute_size(reserved.word_size()); 1.47 + ReservedSpace rs(size); 1.48 + if (!rs.is_reserved()) { 1.49 + vm_exit_during_initialization("Could not reserve enough space for heap offset array"); 1.50 + } 1.51 + 1.52 + MemTracker::record_virtual_memory_type((address)rs.base(), mtGC); 1.53 + 1.54 + if (!_vs.initialize(rs, 0)) { 1.55 + vm_exit_during_initialization("Could not reserve enough space for heap offset array"); 1.56 + } 1.57 + _offset_array = (u_char*)_vs.low_boundary(); 1.58 + resize(init_word_size); 1.59 + if (TraceBlockOffsetTable) { 1.60 + gclog_or_tty->print_cr("BlockOffsetSharedArray::BlockOffsetSharedArray: "); 1.61 + gclog_or_tty->print_cr(" " 1.62 + " rs.base(): " INTPTR_FORMAT 1.63 + " rs.size(): " INTPTR_FORMAT 1.64 + " rs end(): " INTPTR_FORMAT, 1.65 + p2i(rs.base()), rs.size(), p2i(rs.base() + rs.size())); 1.66 + gclog_or_tty->print_cr(" " 1.67 + " _vs.low_boundary(): " INTPTR_FORMAT 1.68 + " _vs.high_boundary(): " INTPTR_FORMAT, 1.69 + p2i(_vs.low_boundary()), 1.70 + p2i(_vs.high_boundary())); 1.71 + } 1.72 +} 1.73 + 1.74 +void BlockOffsetSharedArray::resize(size_t new_word_size) { 1.75 + assert(new_word_size <= _reserved.word_size(), "Resize larger than reserved"); 1.76 + size_t new_size = compute_size(new_word_size); 1.77 + size_t old_size = _vs.committed_size(); 1.78 + size_t delta; 1.79 + char* high = _vs.high(); 1.80 + _end = _reserved.start() + new_word_size; 1.81 + if (new_size > old_size) { 1.82 + delta = ReservedSpace::page_align_size_up(new_size - old_size); 1.83 + assert(delta > 0, "just checking"); 1.84 + if (!_vs.expand_by(delta)) { 1.85 + // Do better than this for Merlin 1.86 + vm_exit_out_of_memory(delta, OOM_MMAP_ERROR, "offset table expansion"); 1.87 + } 1.88 + assert(_vs.high() == high + delta, "invalid expansion"); 1.89 + } else { 1.90 + delta = ReservedSpace::page_align_size_down(old_size - new_size); 1.91 + if (delta == 0) return; 1.92 + _vs.shrink_by(delta); 1.93 + assert(_vs.high() == high - delta, "invalid expansion"); 1.94 + } 1.95 +} 1.96 + 1.97 +bool BlockOffsetSharedArray::is_card_boundary(HeapWord* p) const { 1.98 + assert(p >= _reserved.start(), "just checking"); 1.99 + size_t delta = pointer_delta(p, _reserved.start()); 1.100 + return (delta & right_n_bits(LogN_words)) == (size_t)NoBits; 1.101 +} 1.102 + 1.103 + 1.104 +////////////////////////////////////////////////////////////////////// 1.105 +// BlockOffsetArray 1.106 +////////////////////////////////////////////////////////////////////// 1.107 + 1.108 +BlockOffsetArray::BlockOffsetArray(BlockOffsetSharedArray* array, 1.109 + MemRegion mr, bool init_to_zero_) : 1.110 + BlockOffsetTable(mr.start(), mr.end()), 1.111 + _array(array) 1.112 +{ 1.113 + assert(_bottom <= _end, "arguments out of order"); 1.114 + set_init_to_zero(init_to_zero_); 1.115 + if (!init_to_zero_) { 1.116 + // initialize cards to point back to mr.start() 1.117 + set_remainder_to_point_to_start(mr.start() + N_words, mr.end()); 1.118 + _array->set_offset_array(0, 0); // set first card to 0 1.119 + } 1.120 +} 1.121 + 1.122 + 1.123 +// The arguments follow the normal convention of denoting 1.124 +// a right-open interval: [start, end) 1.125 +void 1.126 +BlockOffsetArray:: 1.127 +set_remainder_to_point_to_start(HeapWord* start, HeapWord* end, bool reducing) { 1.128 + 1.129 + check_reducing_assertion(reducing); 1.130 + if (start >= end) { 1.131 + // The start address is equal to the end address (or to 1.132 + // the right of the end address) so there are not cards 1.133 + // that need to be updated.. 1.134 + return; 1.135 + } 1.136 + 1.137 + // Write the backskip value for each region. 1.138 + // 1.139 + // offset 1.140 + // card 2nd 3rd 1.141 + // | +- 1st | | 1.142 + // v v v v 1.143 + // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +-+-+-+-+-+-+-+-+-+-+- 1.144 + // |x|0|0|0|0|0|0|0|1|1|1|1|1|1| ... |1|1|1|1|2|2|2|2|2|2| ... 1.145 + // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +-+-+-+-+-+-+-+-+-+-+- 1.146 + // 11 19 75 1.147 + // 12 1.148 + // 1.149 + // offset card is the card that points to the start of an object 1.150 + // x - offset value of offset card 1.151 + // 1st - start of first logarithmic region 1.152 + // 0 corresponds to logarithmic value N_words + 0 and 2**(3 * 0) = 1 1.153 + // 2nd - start of second logarithmic region 1.154 + // 1 corresponds to logarithmic value N_words + 1 and 2**(3 * 1) = 8 1.155 + // 3rd - start of third logarithmic region 1.156 + // 2 corresponds to logarithmic value N_words + 2 and 2**(3 * 2) = 64 1.157 + // 1.158 + // integer below the block offset entry is an example of 1.159 + // the index of the entry 1.160 + // 1.161 + // Given an address, 1.162 + // Find the index for the address 1.163 + // Find the block offset table entry 1.164 + // Convert the entry to a back slide 1.165 + // (e.g., with today's, offset = 0x81 => 1.166 + // back slip = 2**(3*(0x81 - N_words)) = 2**3) = 8 1.167 + // Move back N (e.g., 8) entries and repeat with the 1.168 + // value of the new entry 1.169 + // 1.170 + size_t start_card = _array->index_for(start); 1.171 + size_t end_card = _array->index_for(end-1); 1.172 + assert(start ==_array->address_for_index(start_card), "Precondition"); 1.173 + assert(end ==_array->address_for_index(end_card)+N_words, "Precondition"); 1.174 + set_remainder_to_point_to_start_incl(start_card, end_card, reducing); // closed interval 1.175 +} 1.176 + 1.177 + 1.178 +// Unlike the normal convention in this code, the argument here denotes 1.179 +// a closed, inclusive interval: [start_card, end_card], cf set_remainder_to_point_to_start() 1.180 +// above. 1.181 +void 1.182 +BlockOffsetArray::set_remainder_to_point_to_start_incl(size_t start_card, size_t end_card, bool reducing) { 1.183 + 1.184 + check_reducing_assertion(reducing); 1.185 + if (start_card > end_card) { 1.186 + return; 1.187 + } 1.188 + assert(start_card > _array->index_for(_bottom), "Cannot be first card"); 1.189 + assert(_array->offset_array(start_card-1) <= N_words, 1.190 + "Offset card has an unexpected value"); 1.191 + size_t start_card_for_region = start_card; 1.192 + u_char offset = max_jubyte; 1.193 + for (int i = 0; i < N_powers; i++) { 1.194 + // -1 so that the the card with the actual offset is counted. Another -1 1.195 + // so that the reach ends in this region and not at the start 1.196 + // of the next. 1.197 + size_t reach = start_card - 1 + (power_to_cards_back(i+1) - 1); 1.198 + offset = N_words + i; 1.199 + if (reach >= end_card) { 1.200 + _array->set_offset_array(start_card_for_region, end_card, offset, reducing); 1.201 + start_card_for_region = reach + 1; 1.202 + break; 1.203 + } 1.204 + _array->set_offset_array(start_card_for_region, reach, offset, reducing); 1.205 + start_card_for_region = reach + 1; 1.206 + } 1.207 + assert(start_card_for_region > end_card, "Sanity check"); 1.208 + DEBUG_ONLY(check_all_cards(start_card, end_card);) 1.209 +} 1.210 + 1.211 +// The card-interval [start_card, end_card] is a closed interval; this 1.212 +// is an expensive check -- use with care and only under protection of 1.213 +// suitable flag. 1.214 +void BlockOffsetArray::check_all_cards(size_t start_card, size_t end_card) const { 1.215 + 1.216 + if (end_card < start_card) { 1.217 + return; 1.218 + } 1.219 + guarantee(_array->offset_array(start_card) == N_words, "Wrong value in second card"); 1.220 + u_char last_entry = N_words; 1.221 + for (size_t c = start_card + 1; c <= end_card; c++ /* yeah! */) { 1.222 + u_char entry = _array->offset_array(c); 1.223 + guarantee(entry >= last_entry, "Monotonicity"); 1.224 + if (c - start_card > power_to_cards_back(1)) { 1.225 + guarantee(entry > N_words, "Should be in logarithmic region"); 1.226 + } 1.227 + size_t backskip = entry_to_cards_back(entry); 1.228 + size_t landing_card = c - backskip; 1.229 + guarantee(landing_card >= (start_card - 1), "Inv"); 1.230 + if (landing_card >= start_card) { 1.231 + guarantee(_array->offset_array(landing_card) <= entry, "Monotonicity"); 1.232 + } else { 1.233 + guarantee(landing_card == (start_card - 1), "Tautology"); 1.234 + // Note that N_words is the maximum offset value 1.235 + guarantee(_array->offset_array(landing_card) <= N_words, "Offset value"); 1.236 + } 1.237 + last_entry = entry; // remember for monotonicity test 1.238 + } 1.239 +} 1.240 + 1.241 + 1.242 +void 1.243 +BlockOffsetArray::alloc_block(HeapWord* blk_start, HeapWord* blk_end) { 1.244 + assert(blk_start != NULL && blk_end > blk_start, 1.245 + "phantom block"); 1.246 + single_block(blk_start, blk_end); 1.247 +} 1.248 + 1.249 +// Action_mark - update the BOT for the block [blk_start, blk_end). 1.250 +// Current typical use is for splitting a block. 1.251 +// Action_single - udpate the BOT for an allocation. 1.252 +// Action_verify - BOT verification. 1.253 +void 1.254 +BlockOffsetArray::do_block_internal(HeapWord* blk_start, 1.255 + HeapWord* blk_end, 1.256 + Action action, bool reducing) { 1.257 + assert(Universe::heap()->is_in_reserved(blk_start), 1.258 + "reference must be into the heap"); 1.259 + assert(Universe::heap()->is_in_reserved(blk_end-1), 1.260 + "limit must be within the heap"); 1.261 + // This is optimized to make the test fast, assuming we only rarely 1.262 + // cross boundaries. 1.263 + uintptr_t end_ui = (uintptr_t)(blk_end - 1); 1.264 + uintptr_t start_ui = (uintptr_t)blk_start; 1.265 + // Calculate the last card boundary preceding end of blk 1.266 + intptr_t boundary_before_end = (intptr_t)end_ui; 1.267 + clear_bits(boundary_before_end, right_n_bits(LogN)); 1.268 + if (start_ui <= (uintptr_t)boundary_before_end) { 1.269 + // blk starts at or crosses a boundary 1.270 + // Calculate index of card on which blk begins 1.271 + size_t start_index = _array->index_for(blk_start); 1.272 + // Index of card on which blk ends 1.273 + size_t end_index = _array->index_for(blk_end - 1); 1.274 + // Start address of card on which blk begins 1.275 + HeapWord* boundary = _array->address_for_index(start_index); 1.276 + assert(boundary <= blk_start, "blk should start at or after boundary"); 1.277 + if (blk_start != boundary) { 1.278 + // blk starts strictly after boundary 1.279 + // adjust card boundary and start_index forward to next card 1.280 + boundary += N_words; 1.281 + start_index++; 1.282 + } 1.283 + assert(start_index <= end_index, "monotonicity of index_for()"); 1.284 + assert(boundary <= (HeapWord*)boundary_before_end, "tautology"); 1.285 + switch (action) { 1.286 + case Action_mark: { 1.287 + if (init_to_zero()) { 1.288 + _array->set_offset_array(start_index, boundary, blk_start, reducing); 1.289 + break; 1.290 + } // Else fall through to the next case 1.291 + } 1.292 + case Action_single: { 1.293 + _array->set_offset_array(start_index, boundary, blk_start, reducing); 1.294 + // We have finished marking the "offset card". We need to now 1.295 + // mark the subsequent cards that this blk spans. 1.296 + if (start_index < end_index) { 1.297 + HeapWord* rem_st = _array->address_for_index(start_index) + N_words; 1.298 + HeapWord* rem_end = _array->address_for_index(end_index) + N_words; 1.299 + set_remainder_to_point_to_start(rem_st, rem_end, reducing); 1.300 + } 1.301 + break; 1.302 + } 1.303 + case Action_check: { 1.304 + _array->check_offset_array(start_index, boundary, blk_start); 1.305 + // We have finished checking the "offset card". We need to now 1.306 + // check the subsequent cards that this blk spans. 1.307 + check_all_cards(start_index + 1, end_index); 1.308 + break; 1.309 + } 1.310 + default: 1.311 + ShouldNotReachHere(); 1.312 + } 1.313 + } 1.314 +} 1.315 + 1.316 +// The range [blk_start, blk_end) represents a single contiguous block 1.317 +// of storage; modify the block offset table to represent this 1.318 +// information; Right-open interval: [blk_start, blk_end) 1.319 +// NOTE: this method does _not_ adjust _unallocated_block. 1.320 +void 1.321 +BlockOffsetArray::single_block(HeapWord* blk_start, 1.322 + HeapWord* blk_end) { 1.323 + do_block_internal(blk_start, blk_end, Action_single); 1.324 +} 1.325 + 1.326 +void BlockOffsetArray::verify() const { 1.327 + // For each entry in the block offset table, verify that 1.328 + // the entry correctly finds the start of an object at the 1.329 + // first address covered by the block or to the left of that 1.330 + // first address. 1.331 + 1.332 + size_t next_index = 1; 1.333 + size_t last_index = last_active_index(); 1.334 + 1.335 + // Use for debugging. Initialize to NULL to distinguish the 1.336 + // first iteration through the while loop. 1.337 + HeapWord* last_p = NULL; 1.338 + HeapWord* last_start = NULL; 1.339 + oop last_o = NULL; 1.340 + 1.341 + while (next_index <= last_index) { 1.342 + // Use an address past the start of the address for 1.343 + // the entry. 1.344 + HeapWord* p = _array->address_for_index(next_index) + 1; 1.345 + if (p >= _end) { 1.346 + // That's all of the allocated block table. 1.347 + return; 1.348 + } 1.349 + // block_start() asserts that start <= p. 1.350 + HeapWord* start = block_start(p); 1.351 + // First check if the start is an allocated block and only 1.352 + // then if it is a valid object. 1.353 + oop o = oop(start); 1.354 + assert(!Universe::is_fully_initialized() || 1.355 + _sp->is_free_block(start) || 1.356 + o->is_oop_or_null(), "Bad object was found"); 1.357 + next_index++; 1.358 + last_p = p; 1.359 + last_start = start; 1.360 + last_o = o; 1.361 + } 1.362 +} 1.363 + 1.364 +////////////////////////////////////////////////////////////////////// 1.365 +// BlockOffsetArrayNonContigSpace 1.366 +////////////////////////////////////////////////////////////////////// 1.367 + 1.368 +// The block [blk_start, blk_end) has been allocated; 1.369 +// adjust the block offset table to represent this information; 1.370 +// NOTE: Clients of BlockOffsetArrayNonContigSpace: consider using 1.371 +// the somewhat more lightweight split_block() or 1.372 +// (when init_to_zero()) mark_block() wherever possible. 1.373 +// right-open interval: [blk_start, blk_end) 1.374 +void 1.375 +BlockOffsetArrayNonContigSpace::alloc_block(HeapWord* blk_start, 1.376 + HeapWord* blk_end) { 1.377 + assert(blk_start != NULL && blk_end > blk_start, 1.378 + "phantom block"); 1.379 + single_block(blk_start, blk_end); 1.380 + allocated(blk_start, blk_end); 1.381 +} 1.382 + 1.383 +// Adjust BOT to show that a previously whole block has been split 1.384 +// into two. We verify the BOT for the first part (prefix) and 1.385 +// update the BOT for the second part (suffix). 1.386 +// blk is the start of the block 1.387 +// blk_size is the size of the original block 1.388 +// left_blk_size is the size of the first part of the split 1.389 +void BlockOffsetArrayNonContigSpace::split_block(HeapWord* blk, 1.390 + size_t blk_size, 1.391 + size_t left_blk_size) { 1.392 + // Verify that the BOT shows [blk, blk + blk_size) to be one block. 1.393 + verify_single_block(blk, blk_size); 1.394 + // Update the BOT to indicate that [blk + left_blk_size, blk + blk_size) 1.395 + // is one single block. 1.396 + assert(blk_size > 0, "Should be positive"); 1.397 + assert(left_blk_size > 0, "Should be positive"); 1.398 + assert(left_blk_size < blk_size, "Not a split"); 1.399 + 1.400 + // Start addresses of prefix block and suffix block. 1.401 + HeapWord* pref_addr = blk; 1.402 + HeapWord* suff_addr = blk + left_blk_size; 1.403 + HeapWord* end_addr = blk + blk_size; 1.404 + 1.405 + // Indices for starts of prefix block and suffix block. 1.406 + size_t pref_index = _array->index_for(pref_addr); 1.407 + if (_array->address_for_index(pref_index) != pref_addr) { 1.408 + // pref_addr does not begin pref_index 1.409 + pref_index++; 1.410 + } 1.411 + 1.412 + size_t suff_index = _array->index_for(suff_addr); 1.413 + if (_array->address_for_index(suff_index) != suff_addr) { 1.414 + // suff_addr does not begin suff_index 1.415 + suff_index++; 1.416 + } 1.417 + 1.418 + // Definition: A block B, denoted [B_start, B_end) __starts__ 1.419 + // a card C, denoted [C_start, C_end), where C_start and C_end 1.420 + // are the heap addresses that card C covers, iff 1.421 + // B_start <= C_start < B_end. 1.422 + // 1.423 + // We say that a card C "is started by" a block B, iff 1.424 + // B "starts" C. 1.425 + // 1.426 + // Note that the cardinality of the set of cards {C} 1.427 + // started by a block B can be 0, 1, or more. 1.428 + // 1.429 + // Below, pref_index and suff_index are, respectively, the 1.430 + // first (least) card indices that the prefix and suffix of 1.431 + // the split start; end_index is one more than the index of 1.432 + // the last (greatest) card that blk starts. 1.433 + size_t end_index = _array->index_for(end_addr - 1) + 1; 1.434 + 1.435 + // Calculate the # cards that the prefix and suffix affect. 1.436 + size_t num_pref_cards = suff_index - pref_index; 1.437 + 1.438 + size_t num_suff_cards = end_index - suff_index; 1.439 + // Change the cards that need changing 1.440 + if (num_suff_cards > 0) { 1.441 + HeapWord* boundary = _array->address_for_index(suff_index); 1.442 + // Set the offset card for suffix block 1.443 + _array->set_offset_array(suff_index, boundary, suff_addr, true /* reducing */); 1.444 + // Change any further cards that need changing in the suffix 1.445 + if (num_pref_cards > 0) { 1.446 + if (num_pref_cards >= num_suff_cards) { 1.447 + // Unilaterally fix all of the suffix cards: closed card 1.448 + // index interval in args below. 1.449 + set_remainder_to_point_to_start_incl(suff_index + 1, end_index - 1, true /* reducing */); 1.450 + } else { 1.451 + // Unilaterally fix the first (num_pref_cards - 1) following 1.452 + // the "offset card" in the suffix block. 1.453 + set_remainder_to_point_to_start_incl(suff_index + 1, 1.454 + suff_index + num_pref_cards - 1, true /* reducing */); 1.455 + // Fix the appropriate cards in the remainder of the 1.456 + // suffix block -- these are the last num_pref_cards 1.457 + // cards in each power block of the "new" range plumbed 1.458 + // from suff_addr. 1.459 + bool more = true; 1.460 + uint i = 1; 1.461 + while (more && (i < N_powers)) { 1.462 + size_t back_by = power_to_cards_back(i); 1.463 + size_t right_index = suff_index + back_by - 1; 1.464 + size_t left_index = right_index - num_pref_cards + 1; 1.465 + if (right_index >= end_index - 1) { // last iteration 1.466 + right_index = end_index - 1; 1.467 + more = false; 1.468 + } 1.469 + if (back_by > num_pref_cards) { 1.470 + // Fill in the remainder of this "power block", if it 1.471 + // is non-null. 1.472 + if (left_index <= right_index) { 1.473 + _array->set_offset_array(left_index, right_index, 1.474 + N_words + i - 1, true /* reducing */); 1.475 + } else { 1.476 + more = false; // we are done 1.477 + } 1.478 + i++; 1.479 + break; 1.480 + } 1.481 + i++; 1.482 + } 1.483 + while (more && (i < N_powers)) { 1.484 + size_t back_by = power_to_cards_back(i); 1.485 + size_t right_index = suff_index + back_by - 1; 1.486 + size_t left_index = right_index - num_pref_cards + 1; 1.487 + if (right_index >= end_index - 1) { // last iteration 1.488 + right_index = end_index - 1; 1.489 + if (left_index > right_index) { 1.490 + break; 1.491 + } 1.492 + more = false; 1.493 + } 1.494 + assert(left_index <= right_index, "Error"); 1.495 + _array->set_offset_array(left_index, right_index, N_words + i - 1, true /* reducing */); 1.496 + i++; 1.497 + } 1.498 + } 1.499 + } // else no more cards to fix in suffix 1.500 + } // else nothing needs to be done 1.501 + // Verify that we did the right thing 1.502 + verify_single_block(pref_addr, left_blk_size); 1.503 + verify_single_block(suff_addr, blk_size - left_blk_size); 1.504 +} 1.505 + 1.506 + 1.507 +// Mark the BOT such that if [blk_start, blk_end) straddles a card 1.508 +// boundary, the card following the first such boundary is marked 1.509 +// with the appropriate offset. 1.510 +// NOTE: this method does _not_ adjust _unallocated_block or 1.511 +// any cards subsequent to the first one. 1.512 +void 1.513 +BlockOffsetArrayNonContigSpace::mark_block(HeapWord* blk_start, 1.514 + HeapWord* blk_end, bool reducing) { 1.515 + do_block_internal(blk_start, blk_end, Action_mark, reducing); 1.516 +} 1.517 + 1.518 +HeapWord* BlockOffsetArrayNonContigSpace::block_start_unsafe( 1.519 + const void* addr) const { 1.520 + assert(_array->offset_array(0) == 0, "objects can't cross covered areas"); 1.521 + assert(_bottom <= addr && addr < _end, 1.522 + "addr must be covered by this Array"); 1.523 + // Must read this exactly once because it can be modified by parallel 1.524 + // allocation. 1.525 + HeapWord* ub = _unallocated_block; 1.526 + if (BlockOffsetArrayUseUnallocatedBlock && addr >= ub) { 1.527 + assert(ub < _end, "tautology (see above)"); 1.528 + return ub; 1.529 + } 1.530 + 1.531 + // Otherwise, find the block start using the table. 1.532 + size_t index = _array->index_for(addr); 1.533 + HeapWord* q = _array->address_for_index(index); 1.534 + 1.535 + uint offset = _array->offset_array(index); // Extend u_char to uint. 1.536 + while (offset >= N_words) { 1.537 + // The excess of the offset from N_words indicates a power of Base 1.538 + // to go back by. 1.539 + size_t n_cards_back = entry_to_cards_back(offset); 1.540 + q -= (N_words * n_cards_back); 1.541 + assert(q >= _sp->bottom(), 1.542 + err_msg("q = " PTR_FORMAT " crossed below bottom = " PTR_FORMAT, 1.543 + p2i(q), p2i(_sp->bottom()))); 1.544 + assert(q < _sp->end(), 1.545 + err_msg("q = " PTR_FORMAT " crossed above end = " PTR_FORMAT, 1.546 + p2i(q), p2i(_sp->end()))); 1.547 + index -= n_cards_back; 1.548 + offset = _array->offset_array(index); 1.549 + } 1.550 + assert(offset < N_words, "offset too large"); 1.551 + index--; 1.552 + q -= offset; 1.553 + assert(q >= _sp->bottom(), 1.554 + err_msg("q = " PTR_FORMAT " crossed below bottom = " PTR_FORMAT, 1.555 + p2i(q), p2i(_sp->bottom()))); 1.556 + assert(q < _sp->end(), 1.557 + err_msg("q = " PTR_FORMAT " crossed above end = " PTR_FORMAT, 1.558 + p2i(q), p2i(_sp->end()))); 1.559 + HeapWord* n = q; 1.560 + 1.561 + while (n <= addr) { 1.562 + debug_only(HeapWord* last = q); // for debugging 1.563 + q = n; 1.564 + n += _sp->block_size(n); 1.565 + assert(n > q, 1.566 + err_msg("Looping at n = " PTR_FORMAT " with last = " PTR_FORMAT"," 1.567 + " while querying blk_start(" PTR_FORMAT ")" 1.568 + " on _sp = [" PTR_FORMAT "," PTR_FORMAT ")", 1.569 + p2i(n), p2i(last), p2i(addr), p2i(_sp->bottom()), p2i(_sp->end()))); 1.570 + } 1.571 + assert(q <= addr, 1.572 + err_msg("wrong order for current (" INTPTR_FORMAT ")" " <= arg (" INTPTR_FORMAT ")", 1.573 + p2i(q), p2i(addr))); 1.574 + assert(addr <= n, 1.575 + err_msg("wrong order for arg (" INTPTR_FORMAT ") <= next (" INTPTR_FORMAT ")", 1.576 + p2i(addr), p2i(n))); 1.577 + return q; 1.578 +} 1.579 + 1.580 +HeapWord* BlockOffsetArrayNonContigSpace::block_start_careful( 1.581 + const void* addr) const { 1.582 + assert(_array->offset_array(0) == 0, "objects can't cross covered areas"); 1.583 + 1.584 + assert(_bottom <= addr && addr < _end, 1.585 + "addr must be covered by this Array"); 1.586 + // Must read this exactly once because it can be modified by parallel 1.587 + // allocation. 1.588 + HeapWord* ub = _unallocated_block; 1.589 + if (BlockOffsetArrayUseUnallocatedBlock && addr >= ub) { 1.590 + assert(ub < _end, "tautology (see above)"); 1.591 + return ub; 1.592 + } 1.593 + 1.594 + // Otherwise, find the block start using the table, but taking 1.595 + // care (cf block_start_unsafe() above) not to parse any objects/blocks 1.596 + // on the cards themsleves. 1.597 + size_t index = _array->index_for(addr); 1.598 + assert(_array->address_for_index(index) == addr, 1.599 + "arg should be start of card"); 1.600 + 1.601 + HeapWord* q = (HeapWord*)addr; 1.602 + uint offset; 1.603 + do { 1.604 + offset = _array->offset_array(index); 1.605 + if (offset < N_words) { 1.606 + q -= offset; 1.607 + } else { 1.608 + size_t n_cards_back = entry_to_cards_back(offset); 1.609 + q -= (n_cards_back * N_words); 1.610 + index -= n_cards_back; 1.611 + } 1.612 + } while (offset >= N_words); 1.613 + assert(q <= addr, "block start should be to left of arg"); 1.614 + return q; 1.615 +} 1.616 + 1.617 +#ifndef PRODUCT 1.618 +// Verification & debugging - ensure that the offset table reflects the fact 1.619 +// that the block [blk_start, blk_end) or [blk, blk + size) is a 1.620 +// single block of storage. NOTE: can't const this because of 1.621 +// call to non-const do_block_internal() below. 1.622 +void BlockOffsetArrayNonContigSpace::verify_single_block( 1.623 + HeapWord* blk_start, HeapWord* blk_end) { 1.624 + if (VerifyBlockOffsetArray) { 1.625 + do_block_internal(blk_start, blk_end, Action_check); 1.626 + } 1.627 +} 1.628 + 1.629 +void BlockOffsetArrayNonContigSpace::verify_single_block( 1.630 + HeapWord* blk, size_t size) { 1.631 + verify_single_block(blk, blk + size); 1.632 +} 1.633 + 1.634 +// Verify that the given block is before _unallocated_block 1.635 +void BlockOffsetArrayNonContigSpace::verify_not_unallocated( 1.636 + HeapWord* blk_start, HeapWord* blk_end) const { 1.637 + if (BlockOffsetArrayUseUnallocatedBlock) { 1.638 + assert(blk_start < blk_end, "Block inconsistency?"); 1.639 + assert(blk_end <= _unallocated_block, "_unallocated_block problem"); 1.640 + } 1.641 +} 1.642 + 1.643 +void BlockOffsetArrayNonContigSpace::verify_not_unallocated( 1.644 + HeapWord* blk, size_t size) const { 1.645 + verify_not_unallocated(blk, blk + size); 1.646 +} 1.647 +#endif // PRODUCT 1.648 + 1.649 +size_t BlockOffsetArrayNonContigSpace::last_active_index() const { 1.650 + if (_unallocated_block == _bottom) { 1.651 + return 0; 1.652 + } else { 1.653 + return _array->index_for(_unallocated_block - 1); 1.654 + } 1.655 +} 1.656 + 1.657 +////////////////////////////////////////////////////////////////////// 1.658 +// BlockOffsetArrayContigSpace 1.659 +////////////////////////////////////////////////////////////////////// 1.660 + 1.661 +HeapWord* BlockOffsetArrayContigSpace::block_start_unsafe(const void* addr) const { 1.662 + assert(_array->offset_array(0) == 0, "objects can't cross covered areas"); 1.663 + 1.664 + // Otherwise, find the block start using the table. 1.665 + assert(_bottom <= addr && addr < _end, 1.666 + "addr must be covered by this Array"); 1.667 + size_t index = _array->index_for(addr); 1.668 + // We must make sure that the offset table entry we use is valid. If 1.669 + // "addr" is past the end, start at the last known one and go forward. 1.670 + index = MIN2(index, _next_offset_index-1); 1.671 + HeapWord* q = _array->address_for_index(index); 1.672 + 1.673 + uint offset = _array->offset_array(index); // Extend u_char to uint. 1.674 + while (offset > N_words) { 1.675 + // The excess of the offset from N_words indicates a power of Base 1.676 + // to go back by. 1.677 + size_t n_cards_back = entry_to_cards_back(offset); 1.678 + q -= (N_words * n_cards_back); 1.679 + assert(q >= _sp->bottom(), "Went below bottom!"); 1.680 + index -= n_cards_back; 1.681 + offset = _array->offset_array(index); 1.682 + } 1.683 + while (offset == N_words) { 1.684 + assert(q >= _sp->bottom(), "Went below bottom!"); 1.685 + q -= N_words; 1.686 + index--; 1.687 + offset = _array->offset_array(index); 1.688 + } 1.689 + assert(offset < N_words, "offset too large"); 1.690 + q -= offset; 1.691 + HeapWord* n = q; 1.692 + 1.693 + while (n <= addr) { 1.694 + debug_only(HeapWord* last = q); // for debugging 1.695 + q = n; 1.696 + n += _sp->block_size(n); 1.697 + } 1.698 + assert(q <= addr, "wrong order for current and arg"); 1.699 + assert(addr <= n, "wrong order for arg and next"); 1.700 + return q; 1.701 +} 1.702 + 1.703 +// 1.704 +// _next_offset_threshold 1.705 +// | _next_offset_index 1.706 +// v v 1.707 +// +-------+-------+-------+-------+-------+ 1.708 +// | i-1 | i | i+1 | i+2 | i+3 | 1.709 +// +-------+-------+-------+-------+-------+ 1.710 +// ( ^ ] 1.711 +// block-start 1.712 +// 1.713 + 1.714 +void BlockOffsetArrayContigSpace::alloc_block_work(HeapWord* blk_start, 1.715 + HeapWord* blk_end) { 1.716 + assert(blk_start != NULL && blk_end > blk_start, 1.717 + "phantom block"); 1.718 + assert(blk_end > _next_offset_threshold, 1.719 + "should be past threshold"); 1.720 + assert(blk_start <= _next_offset_threshold, 1.721 + "blk_start should be at or before threshold"); 1.722 + assert(pointer_delta(_next_offset_threshold, blk_start) <= N_words, 1.723 + "offset should be <= BlockOffsetSharedArray::N"); 1.724 + assert(Universe::heap()->is_in_reserved(blk_start), 1.725 + "reference must be into the heap"); 1.726 + assert(Universe::heap()->is_in_reserved(blk_end-1), 1.727 + "limit must be within the heap"); 1.728 + assert(_next_offset_threshold == 1.729 + _array->_reserved.start() + _next_offset_index*N_words, 1.730 + "index must agree with threshold"); 1.731 + 1.732 + debug_only(size_t orig_next_offset_index = _next_offset_index;) 1.733 + 1.734 + // Mark the card that holds the offset into the block. Note 1.735 + // that _next_offset_index and _next_offset_threshold are not 1.736 + // updated until the end of this method. 1.737 + _array->set_offset_array(_next_offset_index, 1.738 + _next_offset_threshold, 1.739 + blk_start); 1.740 + 1.741 + // We need to now mark the subsequent cards that this blk spans. 1.742 + 1.743 + // Index of card on which blk ends. 1.744 + size_t end_index = _array->index_for(blk_end - 1); 1.745 + 1.746 + // Are there more cards left to be updated? 1.747 + if (_next_offset_index + 1 <= end_index) { 1.748 + HeapWord* rem_st = _array->address_for_index(_next_offset_index + 1); 1.749 + // Calculate rem_end this way because end_index 1.750 + // may be the last valid index in the covered region. 1.751 + HeapWord* rem_end = _array->address_for_index(end_index) + N_words; 1.752 + set_remainder_to_point_to_start(rem_st, rem_end); 1.753 + } 1.754 + 1.755 + // _next_offset_index and _next_offset_threshold updated here. 1.756 + _next_offset_index = end_index + 1; 1.757 + // Calculate _next_offset_threshold this way because end_index 1.758 + // may be the last valid index in the covered region. 1.759 + _next_offset_threshold = _array->address_for_index(end_index) + N_words; 1.760 + assert(_next_offset_threshold >= blk_end, "Incorrect offset threshold"); 1.761 + 1.762 +#ifdef ASSERT 1.763 + // The offset can be 0 if the block starts on a boundary. That 1.764 + // is checked by an assertion above. 1.765 + size_t start_index = _array->index_for(blk_start); 1.766 + HeapWord* boundary = _array->address_for_index(start_index); 1.767 + assert((_array->offset_array(orig_next_offset_index) == 0 && 1.768 + blk_start == boundary) || 1.769 + (_array->offset_array(orig_next_offset_index) > 0 && 1.770 + _array->offset_array(orig_next_offset_index) <= N_words), 1.771 + "offset array should have been set"); 1.772 + for (size_t j = orig_next_offset_index + 1; j <= end_index; j++) { 1.773 + assert(_array->offset_array(j) > 0 && 1.774 + _array->offset_array(j) <= (u_char) (N_words+N_powers-1), 1.775 + "offset array should have been set"); 1.776 + } 1.777 +#endif 1.778 +} 1.779 + 1.780 +HeapWord* BlockOffsetArrayContigSpace::initialize_threshold() { 1.781 + assert(!Universe::heap()->is_in_reserved(_array->_offset_array), 1.782 + "just checking"); 1.783 + _next_offset_index = _array->index_for(_bottom); 1.784 + _next_offset_index++; 1.785 + _next_offset_threshold = 1.786 + _array->address_for_index(_next_offset_index); 1.787 + return _next_offset_threshold; 1.788 +} 1.789 + 1.790 +void BlockOffsetArrayContigSpace::zero_bottom_entry() { 1.791 + assert(!Universe::heap()->is_in_reserved(_array->_offset_array), 1.792 + "just checking"); 1.793 + size_t bottom_index = _array->index_for(_bottom); 1.794 + _array->set_offset_array(bottom_index, 0); 1.795 +} 1.796 + 1.797 +size_t BlockOffsetArrayContigSpace::last_active_index() const { 1.798 + size_t result = _next_offset_index - 1; 1.799 + return result >= 0 ? result : 0; 1.800 +}