duke@435: /* coleenp@4037: * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved. duke@435: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. duke@435: * duke@435: * This code is free software; you can redistribute it and/or modify it duke@435: * under the terms of the GNU General Public License version 2 only, as duke@435: * published by the Free Software Foundation. duke@435: * duke@435: * This code is distributed in the hope that it will be useful, but WITHOUT duke@435: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or duke@435: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License duke@435: * version 2 for more details (a copy is included in the LICENSE file that duke@435: * accompanied this code). duke@435: * duke@435: * You should have received a copy of the GNU General Public License version duke@435: * 2 along with this work; if not, write to the Free Software Foundation, duke@435: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. duke@435: * trims@1907: * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA trims@1907: * or visit www.oracle.com if you need additional information or have any trims@1907: * questions. duke@435: * duke@435: */ duke@435: stefank@2314: #include "precompiled.hpp" stefank@2314: #include "gc_interface/collectedHeap.inline.hpp" stefank@2314: #include "memory/blockOffsetTable.inline.hpp" stefank@2314: #include "memory/iterator.hpp" stefank@2314: #include "memory/space.inline.hpp" stefank@2314: #include "memory/universe.hpp" stefank@2314: #include "oops/oop.inline.hpp" stefank@2314: #include "runtime/java.hpp" zgu@3900: #include "services/memTracker.hpp" duke@435: duke@435: ////////////////////////////////////////////////////////////////////// duke@435: // BlockOffsetSharedArray duke@435: ////////////////////////////////////////////////////////////////////// duke@435: duke@435: BlockOffsetSharedArray::BlockOffsetSharedArray(MemRegion reserved, duke@435: size_t init_word_size): duke@435: _reserved(reserved), _end(NULL) duke@435: { duke@435: size_t size = compute_size(reserved.word_size()); duke@435: ReservedSpace rs(size); duke@435: if (!rs.is_reserved()) { duke@435: vm_exit_during_initialization("Could not reserve enough space for heap offset array"); duke@435: } zgu@3900: zgu@3900: MemTracker::record_virtual_memory_type((address)rs.base(), mtGC); zgu@3900: duke@435: if (!_vs.initialize(rs, 0)) { duke@435: vm_exit_during_initialization("Could not reserve enough space for heap offset array"); duke@435: } duke@435: _offset_array = (u_char*)_vs.low_boundary(); duke@435: resize(init_word_size); duke@435: if (TraceBlockOffsetTable) { duke@435: gclog_or_tty->print_cr("BlockOffsetSharedArray::BlockOffsetSharedArray: "); duke@435: gclog_or_tty->print_cr(" " duke@435: " rs.base(): " INTPTR_FORMAT duke@435: " rs.size(): " INTPTR_FORMAT duke@435: " rs end(): " INTPTR_FORMAT, duke@435: rs.base(), rs.size(), rs.base() + rs.size()); duke@435: gclog_or_tty->print_cr(" " duke@435: " _vs.low_boundary(): " INTPTR_FORMAT duke@435: " _vs.high_boundary(): " INTPTR_FORMAT, duke@435: _vs.low_boundary(), duke@435: _vs.high_boundary()); duke@435: } duke@435: } duke@435: duke@435: void BlockOffsetSharedArray::resize(size_t new_word_size) { duke@435: assert(new_word_size <= _reserved.word_size(), "Resize larger than reserved"); duke@435: size_t new_size = compute_size(new_word_size); duke@435: size_t old_size = _vs.committed_size(); duke@435: size_t delta; duke@435: char* high = _vs.high(); duke@435: _end = _reserved.start() + new_word_size; duke@435: if (new_size > old_size) { duke@435: delta = ReservedSpace::page_align_size_up(new_size - old_size); duke@435: assert(delta > 0, "just checking"); duke@435: if (!_vs.expand_by(delta)) { duke@435: // Do better than this for Merlin duke@435: vm_exit_out_of_memory(delta, "offset table expansion"); duke@435: } duke@435: assert(_vs.high() == high + delta, "invalid expansion"); duke@435: } else { duke@435: delta = ReservedSpace::page_align_size_down(old_size - new_size); duke@435: if (delta == 0) return; duke@435: _vs.shrink_by(delta); duke@435: assert(_vs.high() == high - delta, "invalid expansion"); duke@435: } duke@435: } duke@435: duke@435: bool BlockOffsetSharedArray::is_card_boundary(HeapWord* p) const { duke@435: assert(p >= _reserved.start(), "just checking"); duke@435: size_t delta = pointer_delta(p, _reserved.start()); duke@435: return (delta & right_n_bits(LogN_words)) == (size_t)NoBits; duke@435: } duke@435: duke@435: duke@435: ////////////////////////////////////////////////////////////////////// duke@435: // BlockOffsetArray duke@435: ////////////////////////////////////////////////////////////////////// duke@435: duke@435: BlockOffsetArray::BlockOffsetArray(BlockOffsetSharedArray* array, ysr@2071: MemRegion mr, bool init_to_zero_) : duke@435: BlockOffsetTable(mr.start(), mr.end()), ysr@2071: _array(array) duke@435: { duke@435: assert(_bottom <= _end, "arguments out of order"); ysr@2071: set_init_to_zero(init_to_zero_); ysr@2071: if (!init_to_zero_) { duke@435: // initialize cards to point back to mr.start() duke@435: set_remainder_to_point_to_start(mr.start() + N_words, mr.end()); duke@435: _array->set_offset_array(0, 0); // set first card to 0 duke@435: } duke@435: } duke@435: duke@435: duke@435: // The arguments follow the normal convention of denoting duke@435: // a right-open interval: [start, end) duke@435: void duke@435: BlockOffsetArray:: ysr@2071: set_remainder_to_point_to_start(HeapWord* start, HeapWord* end, bool reducing) { duke@435: ysr@2071: check_reducing_assertion(reducing); duke@435: if (start >= end) { duke@435: // The start address is equal to the end address (or to duke@435: // the right of the end address) so there are not cards duke@435: // that need to be updated.. duke@435: return; duke@435: } duke@435: duke@435: // Write the backskip value for each region. duke@435: // duke@435: // offset duke@435: // card 2nd 3rd duke@435: // | +- 1st | | duke@435: // v v v v duke@435: // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +-+-+-+-+-+-+-+-+-+-+- duke@435: // |x|0|0|0|0|0|0|0|1|1|1|1|1|1| ... |1|1|1|1|2|2|2|2|2|2| ... duke@435: // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +-+-+-+-+-+-+-+-+-+-+- duke@435: // 11 19 75 duke@435: // 12 duke@435: // duke@435: // offset card is the card that points to the start of an object duke@435: // x - offset value of offset card duke@435: // 1st - start of first logarithmic region duke@435: // 0 corresponds to logarithmic value N_words + 0 and 2**(3 * 0) = 1 duke@435: // 2nd - start of second logarithmic region duke@435: // 1 corresponds to logarithmic value N_words + 1 and 2**(3 * 1) = 8 duke@435: // 3rd - start of third logarithmic region duke@435: // 2 corresponds to logarithmic value N_words + 2 and 2**(3 * 2) = 64 duke@435: // duke@435: // integer below the block offset entry is an example of duke@435: // the index of the entry duke@435: // duke@435: // Given an address, duke@435: // Find the index for the address duke@435: // Find the block offset table entry duke@435: // Convert the entry to a back slide duke@435: // (e.g., with today's, offset = 0x81 => duke@435: // back slip = 2**(3*(0x81 - N_words)) = 2**3) = 8 duke@435: // Move back N (e.g., 8) entries and repeat with the duke@435: // value of the new entry duke@435: // duke@435: size_t start_card = _array->index_for(start); duke@435: size_t end_card = _array->index_for(end-1); duke@435: assert(start ==_array->address_for_index(start_card), "Precondition"); duke@435: assert(end ==_array->address_for_index(end_card)+N_words, "Precondition"); ysr@2071: set_remainder_to_point_to_start_incl(start_card, end_card, reducing); // closed interval duke@435: } duke@435: duke@435: duke@435: // Unlike the normal convention in this code, the argument here denotes duke@435: // a closed, inclusive interval: [start_card, end_card], cf set_remainder_to_point_to_start() duke@435: // above. duke@435: void ysr@2071: BlockOffsetArray::set_remainder_to_point_to_start_incl(size_t start_card, size_t end_card, bool reducing) { ysr@2071: ysr@2071: check_reducing_assertion(reducing); duke@435: if (start_card > end_card) { duke@435: return; duke@435: } duke@435: assert(start_card > _array->index_for(_bottom), "Cannot be first card"); duke@435: assert(_array->offset_array(start_card-1) <= N_words, duke@435: "Offset card has an unexpected value"); duke@435: size_t start_card_for_region = start_card; duke@435: u_char offset = max_jubyte; ysr@777: for (int i = 0; i < N_powers; i++) { duke@435: // -1 so that the the card with the actual offset is counted. Another -1 duke@435: // so that the reach ends in this region and not at the start duke@435: // of the next. duke@435: size_t reach = start_card - 1 + (power_to_cards_back(i+1) - 1); duke@435: offset = N_words + i; duke@435: if (reach >= end_card) { ysr@2071: _array->set_offset_array(start_card_for_region, end_card, offset, reducing); duke@435: start_card_for_region = reach + 1; duke@435: break; duke@435: } ysr@2071: _array->set_offset_array(start_card_for_region, reach, offset, reducing); duke@435: start_card_for_region = reach + 1; duke@435: } duke@435: assert(start_card_for_region > end_card, "Sanity check"); duke@435: DEBUG_ONLY(check_all_cards(start_card, end_card);) duke@435: } duke@435: duke@435: // The card-interval [start_card, end_card] is a closed interval; this duke@435: // is an expensive check -- use with care and only under protection of duke@435: // suitable flag. duke@435: void BlockOffsetArray::check_all_cards(size_t start_card, size_t end_card) const { duke@435: duke@435: if (end_card < start_card) { duke@435: return; duke@435: } duke@435: guarantee(_array->offset_array(start_card) == N_words, "Wrong value in second card"); ysr@2071: u_char last_entry = N_words; duke@435: for (size_t c = start_card + 1; c <= end_card; c++ /* yeah! */) { duke@435: u_char entry = _array->offset_array(c); ysr@2071: guarantee(entry >= last_entry, "Monotonicity"); duke@435: if (c - start_card > power_to_cards_back(1)) { duke@435: guarantee(entry > N_words, "Should be in logarithmic region"); duke@435: } duke@435: size_t backskip = entry_to_cards_back(entry); duke@435: size_t landing_card = c - backskip; duke@435: guarantee(landing_card >= (start_card - 1), "Inv"); duke@435: if (landing_card >= start_card) { ysr@2071: guarantee(_array->offset_array(landing_card) <= entry, "Monotonicity"); duke@435: } else { ysr@2071: guarantee(landing_card == (start_card - 1), "Tautology"); ysr@2071: // Note that N_words is the maximum offset value duke@435: guarantee(_array->offset_array(landing_card) <= N_words, "Offset value"); duke@435: } ysr@2071: last_entry = entry; // remember for monotonicity test duke@435: } duke@435: } duke@435: duke@435: duke@435: void duke@435: BlockOffsetArray::alloc_block(HeapWord* blk_start, HeapWord* blk_end) { duke@435: assert(blk_start != NULL && blk_end > blk_start, duke@435: "phantom block"); duke@435: single_block(blk_start, blk_end); duke@435: } duke@435: duke@435: // Action_mark - update the BOT for the block [blk_start, blk_end). duke@435: // Current typical use is for splitting a block. duke@435: // Action_single - udpate the BOT for an allocation. duke@435: // Action_verify - BOT verification. duke@435: void duke@435: BlockOffsetArray::do_block_internal(HeapWord* blk_start, duke@435: HeapWord* blk_end, ysr@2071: Action action, bool reducing) { duke@435: assert(Universe::heap()->is_in_reserved(blk_start), duke@435: "reference must be into the heap"); duke@435: assert(Universe::heap()->is_in_reserved(blk_end-1), duke@435: "limit must be within the heap"); duke@435: // This is optimized to make the test fast, assuming we only rarely duke@435: // cross boundaries. duke@435: uintptr_t end_ui = (uintptr_t)(blk_end - 1); duke@435: uintptr_t start_ui = (uintptr_t)blk_start; duke@435: // Calculate the last card boundary preceding end of blk duke@435: intptr_t boundary_before_end = (intptr_t)end_ui; duke@435: clear_bits(boundary_before_end, right_n_bits(LogN)); duke@435: if (start_ui <= (uintptr_t)boundary_before_end) { duke@435: // blk starts at or crosses a boundary duke@435: // Calculate index of card on which blk begins duke@435: size_t start_index = _array->index_for(blk_start); duke@435: // Index of card on which blk ends duke@435: size_t end_index = _array->index_for(blk_end - 1); duke@435: // Start address of card on which blk begins duke@435: HeapWord* boundary = _array->address_for_index(start_index); duke@435: assert(boundary <= blk_start, "blk should start at or after boundary"); duke@435: if (blk_start != boundary) { duke@435: // blk starts strictly after boundary duke@435: // adjust card boundary and start_index forward to next card duke@435: boundary += N_words; duke@435: start_index++; duke@435: } duke@435: assert(start_index <= end_index, "monotonicity of index_for()"); duke@435: assert(boundary <= (HeapWord*)boundary_before_end, "tautology"); duke@435: switch (action) { duke@435: case Action_mark: { duke@435: if (init_to_zero()) { ysr@2071: _array->set_offset_array(start_index, boundary, blk_start, reducing); duke@435: break; duke@435: } // Else fall through to the next case duke@435: } duke@435: case Action_single: { ysr@2071: _array->set_offset_array(start_index, boundary, blk_start, reducing); duke@435: // We have finished marking the "offset card". We need to now duke@435: // mark the subsequent cards that this blk spans. duke@435: if (start_index < end_index) { duke@435: HeapWord* rem_st = _array->address_for_index(start_index) + N_words; duke@435: HeapWord* rem_end = _array->address_for_index(end_index) + N_words; ysr@2071: set_remainder_to_point_to_start(rem_st, rem_end, reducing); duke@435: } duke@435: break; duke@435: } duke@435: case Action_check: { duke@435: _array->check_offset_array(start_index, boundary, blk_start); duke@435: // We have finished checking the "offset card". We need to now duke@435: // check the subsequent cards that this blk spans. duke@435: check_all_cards(start_index + 1, end_index); duke@435: break; duke@435: } duke@435: default: duke@435: ShouldNotReachHere(); duke@435: } duke@435: } duke@435: } duke@435: duke@435: // The range [blk_start, blk_end) represents a single contiguous block duke@435: // of storage; modify the block offset table to represent this duke@435: // information; Right-open interval: [blk_start, blk_end) duke@435: // NOTE: this method does _not_ adjust _unallocated_block. duke@435: void duke@435: BlockOffsetArray::single_block(HeapWord* blk_start, duke@435: HeapWord* blk_end) { duke@435: do_block_internal(blk_start, blk_end, Action_single); duke@435: } duke@435: duke@435: void BlockOffsetArray::verify() const { duke@435: // For each entry in the block offset table, verify that duke@435: // the entry correctly finds the start of an object at the duke@435: // first address covered by the block or to the left of that duke@435: // first address. duke@435: duke@435: size_t next_index = 1; duke@435: size_t last_index = last_active_index(); duke@435: duke@435: // Use for debugging. Initialize to NULL to distinguish the duke@435: // first iteration through the while loop. duke@435: HeapWord* last_p = NULL; duke@435: HeapWord* last_start = NULL; duke@435: oop last_o = NULL; duke@435: duke@435: while (next_index <= last_index) { duke@435: // Use an address past the start of the address for duke@435: // the entry. duke@435: HeapWord* p = _array->address_for_index(next_index) + 1; duke@435: if (p >= _end) { duke@435: // That's all of the allocated block table. duke@435: return; duke@435: } duke@435: // block_start() asserts that start <= p. duke@435: HeapWord* start = block_start(p); duke@435: // First check if the start is an allocated block and only duke@435: // then if it is a valid object. duke@435: oop o = oop(start); duke@435: assert(!Universe::is_fully_initialized() || duke@435: _sp->is_free_block(start) || duke@435: o->is_oop_or_null(), "Bad object was found"); duke@435: next_index++; duke@435: last_p = p; duke@435: last_start = start; duke@435: last_o = o; duke@435: } duke@435: } duke@435: duke@435: ////////////////////////////////////////////////////////////////////// duke@435: // BlockOffsetArrayNonContigSpace duke@435: ////////////////////////////////////////////////////////////////////// duke@435: duke@435: // The block [blk_start, blk_end) has been allocated; duke@435: // adjust the block offset table to represent this information; duke@435: // NOTE: Clients of BlockOffsetArrayNonContigSpace: consider using duke@435: // the somewhat more lightweight split_block() or duke@435: // (when init_to_zero()) mark_block() wherever possible. duke@435: // right-open interval: [blk_start, blk_end) duke@435: void duke@435: BlockOffsetArrayNonContigSpace::alloc_block(HeapWord* blk_start, duke@435: HeapWord* blk_end) { duke@435: assert(blk_start != NULL && blk_end > blk_start, duke@435: "phantom block"); duke@435: single_block(blk_start, blk_end); duke@435: allocated(blk_start, blk_end); duke@435: } duke@435: duke@435: // Adjust BOT to show that a previously whole block has been split duke@435: // into two. We verify the BOT for the first part (prefix) and duke@435: // update the BOT for the second part (suffix). duke@435: // blk is the start of the block duke@435: // blk_size is the size of the original block duke@435: // left_blk_size is the size of the first part of the split duke@435: void BlockOffsetArrayNonContigSpace::split_block(HeapWord* blk, duke@435: size_t blk_size, duke@435: size_t left_blk_size) { duke@435: // Verify that the BOT shows [blk, blk + blk_size) to be one block. duke@435: verify_single_block(blk, blk_size); duke@435: // Update the BOT to indicate that [blk + left_blk_size, blk + blk_size) duke@435: // is one single block. duke@435: assert(blk_size > 0, "Should be positive"); duke@435: assert(left_blk_size > 0, "Should be positive"); duke@435: assert(left_blk_size < blk_size, "Not a split"); duke@435: duke@435: // Start addresses of prefix block and suffix block. duke@435: HeapWord* pref_addr = blk; duke@435: HeapWord* suff_addr = blk + left_blk_size; duke@435: HeapWord* end_addr = blk + blk_size; duke@435: duke@435: // Indices for starts of prefix block and suffix block. duke@435: size_t pref_index = _array->index_for(pref_addr); duke@435: if (_array->address_for_index(pref_index) != pref_addr) { ysr@2071: // pref_addr does not begin pref_index duke@435: pref_index++; duke@435: } duke@435: duke@435: size_t suff_index = _array->index_for(suff_addr); duke@435: if (_array->address_for_index(suff_index) != suff_addr) { duke@435: // suff_addr does not begin suff_index duke@435: suff_index++; duke@435: } duke@435: duke@435: // Definition: A block B, denoted [B_start, B_end) __starts__ duke@435: // a card C, denoted [C_start, C_end), where C_start and C_end duke@435: // are the heap addresses that card C covers, iff duke@435: // B_start <= C_start < B_end. duke@435: // duke@435: // We say that a card C "is started by" a block B, iff duke@435: // B "starts" C. duke@435: // duke@435: // Note that the cardinality of the set of cards {C} duke@435: // started by a block B can be 0, 1, or more. duke@435: // duke@435: // Below, pref_index and suff_index are, respectively, the duke@435: // first (least) card indices that the prefix and suffix of duke@435: // the split start; end_index is one more than the index of duke@435: // the last (greatest) card that blk starts. duke@435: size_t end_index = _array->index_for(end_addr - 1) + 1; duke@435: duke@435: // Calculate the # cards that the prefix and suffix affect. duke@435: size_t num_pref_cards = suff_index - pref_index; duke@435: duke@435: size_t num_suff_cards = end_index - suff_index; duke@435: // Change the cards that need changing duke@435: if (num_suff_cards > 0) { duke@435: HeapWord* boundary = _array->address_for_index(suff_index); duke@435: // Set the offset card for suffix block ysr@2071: _array->set_offset_array(suff_index, boundary, suff_addr, true /* reducing */); duke@435: // Change any further cards that need changing in the suffix duke@435: if (num_pref_cards > 0) { duke@435: if (num_pref_cards >= num_suff_cards) { duke@435: // Unilaterally fix all of the suffix cards: closed card duke@435: // index interval in args below. ysr@2071: set_remainder_to_point_to_start_incl(suff_index + 1, end_index - 1, true /* reducing */); duke@435: } else { duke@435: // Unilaterally fix the first (num_pref_cards - 1) following duke@435: // the "offset card" in the suffix block. duke@435: set_remainder_to_point_to_start_incl(suff_index + 1, ysr@2071: suff_index + num_pref_cards - 1, true /* reducing */); duke@435: // Fix the appropriate cards in the remainder of the duke@435: // suffix block -- these are the last num_pref_cards duke@435: // cards in each power block of the "new" range plumbed duke@435: // from suff_addr. duke@435: bool more = true; duke@435: uint i = 1; duke@435: while (more && (i < N_powers)) { duke@435: size_t back_by = power_to_cards_back(i); duke@435: size_t right_index = suff_index + back_by - 1; duke@435: size_t left_index = right_index - num_pref_cards + 1; duke@435: if (right_index >= end_index - 1) { // last iteration duke@435: right_index = end_index - 1; duke@435: more = false; duke@435: } duke@435: if (back_by > num_pref_cards) { duke@435: // Fill in the remainder of this "power block", if it duke@435: // is non-null. duke@435: if (left_index <= right_index) { duke@435: _array->set_offset_array(left_index, right_index, ysr@2071: N_words + i - 1, true /* reducing */); duke@435: } else { duke@435: more = false; // we are done duke@435: } duke@435: i++; duke@435: break; duke@435: } duke@435: i++; duke@435: } duke@435: while (more && (i < N_powers)) { duke@435: size_t back_by = power_to_cards_back(i); duke@435: size_t right_index = suff_index + back_by - 1; duke@435: size_t left_index = right_index - num_pref_cards + 1; duke@435: if (right_index >= end_index - 1) { // last iteration duke@435: right_index = end_index - 1; duke@435: if (left_index > right_index) { duke@435: break; duke@435: } duke@435: more = false; duke@435: } duke@435: assert(left_index <= right_index, "Error"); ysr@2071: _array->set_offset_array(left_index, right_index, N_words + i - 1, true /* reducing */); duke@435: i++; duke@435: } duke@435: } duke@435: } // else no more cards to fix in suffix duke@435: } // else nothing needs to be done duke@435: // Verify that we did the right thing duke@435: verify_single_block(pref_addr, left_blk_size); duke@435: verify_single_block(suff_addr, blk_size - left_blk_size); duke@435: } duke@435: duke@435: duke@435: // Mark the BOT such that if [blk_start, blk_end) straddles a card duke@435: // boundary, the card following the first such boundary is marked duke@435: // with the appropriate offset. duke@435: // NOTE: this method does _not_ adjust _unallocated_block or duke@435: // any cards subsequent to the first one. duke@435: void duke@435: BlockOffsetArrayNonContigSpace::mark_block(HeapWord* blk_start, ysr@2071: HeapWord* blk_end, bool reducing) { ysr@2071: do_block_internal(blk_start, blk_end, Action_mark, reducing); duke@435: } duke@435: duke@435: HeapWord* BlockOffsetArrayNonContigSpace::block_start_unsafe( duke@435: const void* addr) const { duke@435: assert(_array->offset_array(0) == 0, "objects can't cross covered areas"); duke@435: assert(_bottom <= addr && addr < _end, duke@435: "addr must be covered by this Array"); duke@435: // Must read this exactly once because it can be modified by parallel duke@435: // allocation. duke@435: HeapWord* ub = _unallocated_block; duke@435: if (BlockOffsetArrayUseUnallocatedBlock && addr >= ub) { duke@435: assert(ub < _end, "tautology (see above)"); duke@435: return ub; duke@435: } duke@435: duke@435: // Otherwise, find the block start using the table. duke@435: size_t index = _array->index_for(addr); duke@435: HeapWord* q = _array->address_for_index(index); duke@435: duke@435: uint offset = _array->offset_array(index); // Extend u_char to uint. duke@435: while (offset >= N_words) { duke@435: // The excess of the offset from N_words indicates a power of Base duke@435: // to go back by. duke@435: size_t n_cards_back = entry_to_cards_back(offset); duke@435: q -= (N_words * n_cards_back); ysr@2891: assert(q >= _sp->bottom(), ysr@2891: err_msg("q = " PTR_FORMAT " crossed below bottom = " PTR_FORMAT, ysr@2891: q, _sp->bottom())); ysr@2891: assert(q < _sp->end(), ysr@2891: err_msg("q = " PTR_FORMAT " crossed above end = " PTR_FORMAT, ysr@2891: q, _sp->end())); duke@435: index -= n_cards_back; duke@435: offset = _array->offset_array(index); duke@435: } duke@435: assert(offset < N_words, "offset too large"); duke@435: index--; duke@435: q -= offset; ysr@2891: assert(q >= _sp->bottom(), ysr@2891: err_msg("q = " PTR_FORMAT " crossed below bottom = " PTR_FORMAT, ysr@2891: q, _sp->bottom())); ysr@2891: assert(q < _sp->end(), ysr@2891: err_msg("q = " PTR_FORMAT " crossed above end = " PTR_FORMAT, ysr@2891: q, _sp->end())); duke@435: HeapWord* n = q; duke@435: duke@435: while (n <= addr) { duke@435: debug_only(HeapWord* last = q); // for debugging duke@435: q = n; duke@435: n += _sp->block_size(n); ysr@2891: assert(n > q, ysr@2943: err_msg("Looping at n = " PTR_FORMAT " with last = " PTR_FORMAT"," ysr@2943: " while querying blk_start(" PTR_FORMAT ")" ysr@2943: " on _sp = [" PTR_FORMAT "," PTR_FORMAT ")", ysr@2943: n, last, addr, _sp->bottom(), _sp->end())); duke@435: } ysr@2943: assert(q <= addr, ysr@2943: err_msg("wrong order for current (" INTPTR_FORMAT ")" " <= arg (" INTPTR_FORMAT ")", ysr@2943: q, addr)); ysr@2943: assert(addr <= n, ysr@2943: err_msg("wrong order for arg (" INTPTR_FORMAT ") <= next (" INTPTR_FORMAT ")", ysr@2943: addr, n)); duke@435: return q; duke@435: } duke@435: duke@435: HeapWord* BlockOffsetArrayNonContigSpace::block_start_careful( duke@435: const void* addr) const { duke@435: assert(_array->offset_array(0) == 0, "objects can't cross covered areas"); duke@435: duke@435: assert(_bottom <= addr && addr < _end, duke@435: "addr must be covered by this Array"); duke@435: // Must read this exactly once because it can be modified by parallel duke@435: // allocation. duke@435: HeapWord* ub = _unallocated_block; duke@435: if (BlockOffsetArrayUseUnallocatedBlock && addr >= ub) { duke@435: assert(ub < _end, "tautology (see above)"); duke@435: return ub; duke@435: } duke@435: duke@435: // Otherwise, find the block start using the table, but taking duke@435: // care (cf block_start_unsafe() above) not to parse any objects/blocks duke@435: // on the cards themsleves. duke@435: size_t index = _array->index_for(addr); duke@435: assert(_array->address_for_index(index) == addr, duke@435: "arg should be start of card"); duke@435: duke@435: HeapWord* q = (HeapWord*)addr; duke@435: uint offset; duke@435: do { duke@435: offset = _array->offset_array(index); duke@435: if (offset < N_words) { duke@435: q -= offset; duke@435: } else { duke@435: size_t n_cards_back = entry_to_cards_back(offset); duke@435: q -= (n_cards_back * N_words); duke@435: index -= n_cards_back; duke@435: } duke@435: } while (offset >= N_words); duke@435: assert(q <= addr, "block start should be to left of arg"); duke@435: return q; duke@435: } duke@435: duke@435: #ifndef PRODUCT duke@435: // Verification & debugging - ensure that the offset table reflects the fact duke@435: // that the block [blk_start, blk_end) or [blk, blk + size) is a duke@435: // single block of storage. NOTE: can't const this because of duke@435: // call to non-const do_block_internal() below. duke@435: void BlockOffsetArrayNonContigSpace::verify_single_block( duke@435: HeapWord* blk_start, HeapWord* blk_end) { duke@435: if (VerifyBlockOffsetArray) { duke@435: do_block_internal(blk_start, blk_end, Action_check); duke@435: } duke@435: } duke@435: duke@435: void BlockOffsetArrayNonContigSpace::verify_single_block( duke@435: HeapWord* blk, size_t size) { duke@435: verify_single_block(blk, blk + size); duke@435: } duke@435: duke@435: // Verify that the given block is before _unallocated_block duke@435: void BlockOffsetArrayNonContigSpace::verify_not_unallocated( duke@435: HeapWord* blk_start, HeapWord* blk_end) const { duke@435: if (BlockOffsetArrayUseUnallocatedBlock) { duke@435: assert(blk_start < blk_end, "Block inconsistency?"); duke@435: assert(blk_end <= _unallocated_block, "_unallocated_block problem"); duke@435: } duke@435: } duke@435: duke@435: void BlockOffsetArrayNonContigSpace::verify_not_unallocated( duke@435: HeapWord* blk, size_t size) const { duke@435: verify_not_unallocated(blk, blk + size); duke@435: } duke@435: #endif // PRODUCT duke@435: duke@435: size_t BlockOffsetArrayNonContigSpace::last_active_index() const { duke@435: if (_unallocated_block == _bottom) { duke@435: return 0; duke@435: } else { duke@435: return _array->index_for(_unallocated_block - 1); duke@435: } duke@435: } duke@435: duke@435: ////////////////////////////////////////////////////////////////////// duke@435: // BlockOffsetArrayContigSpace duke@435: ////////////////////////////////////////////////////////////////////// duke@435: duke@435: HeapWord* BlockOffsetArrayContigSpace::block_start_unsafe(const void* addr) const { duke@435: assert(_array->offset_array(0) == 0, "objects can't cross covered areas"); duke@435: duke@435: // Otherwise, find the block start using the table. duke@435: assert(_bottom <= addr && addr < _end, duke@435: "addr must be covered by this Array"); duke@435: size_t index = _array->index_for(addr); duke@435: // We must make sure that the offset table entry we use is valid. If duke@435: // "addr" is past the end, start at the last known one and go forward. duke@435: index = MIN2(index, _next_offset_index-1); duke@435: HeapWord* q = _array->address_for_index(index); duke@435: duke@435: uint offset = _array->offset_array(index); // Extend u_char to uint. duke@435: while (offset > N_words) { duke@435: // The excess of the offset from N_words indicates a power of Base duke@435: // to go back by. duke@435: size_t n_cards_back = entry_to_cards_back(offset); duke@435: q -= (N_words * n_cards_back); duke@435: assert(q >= _sp->bottom(), "Went below bottom!"); duke@435: index -= n_cards_back; duke@435: offset = _array->offset_array(index); duke@435: } duke@435: while (offset == N_words) { duke@435: assert(q >= _sp->bottom(), "Went below bottom!"); duke@435: q -= N_words; duke@435: index--; duke@435: offset = _array->offset_array(index); duke@435: } duke@435: assert(offset < N_words, "offset too large"); duke@435: q -= offset; duke@435: HeapWord* n = q; duke@435: duke@435: while (n <= addr) { duke@435: debug_only(HeapWord* last = q); // for debugging duke@435: q = n; duke@435: n += _sp->block_size(n); duke@435: } duke@435: assert(q <= addr, "wrong order for current and arg"); duke@435: assert(addr <= n, "wrong order for arg and next"); duke@435: return q; duke@435: } duke@435: duke@435: // duke@435: // _next_offset_threshold duke@435: // | _next_offset_index duke@435: // v v duke@435: // +-------+-------+-------+-------+-------+ duke@435: // | i-1 | i | i+1 | i+2 | i+3 | duke@435: // +-------+-------+-------+-------+-------+ duke@435: // ( ^ ] duke@435: // block-start duke@435: // duke@435: duke@435: void BlockOffsetArrayContigSpace::alloc_block_work(HeapWord* blk_start, duke@435: HeapWord* blk_end) { duke@435: assert(blk_start != NULL && blk_end > blk_start, duke@435: "phantom block"); duke@435: assert(blk_end > _next_offset_threshold, duke@435: "should be past threshold"); duke@435: assert(blk_start <= _next_offset_threshold, jcoomes@1844: "blk_start should be at or before threshold"); duke@435: assert(pointer_delta(_next_offset_threshold, blk_start) <= N_words, duke@435: "offset should be <= BlockOffsetSharedArray::N"); duke@435: assert(Universe::heap()->is_in_reserved(blk_start), duke@435: "reference must be into the heap"); duke@435: assert(Universe::heap()->is_in_reserved(blk_end-1), duke@435: "limit must be within the heap"); duke@435: assert(_next_offset_threshold == duke@435: _array->_reserved.start() + _next_offset_index*N_words, duke@435: "index must agree with threshold"); duke@435: duke@435: debug_only(size_t orig_next_offset_index = _next_offset_index;) duke@435: duke@435: // Mark the card that holds the offset into the block. Note duke@435: // that _next_offset_index and _next_offset_threshold are not duke@435: // updated until the end of this method. duke@435: _array->set_offset_array(_next_offset_index, duke@435: _next_offset_threshold, duke@435: blk_start); duke@435: duke@435: // We need to now mark the subsequent cards that this blk spans. duke@435: duke@435: // Index of card on which blk ends. duke@435: size_t end_index = _array->index_for(blk_end - 1); duke@435: duke@435: // Are there more cards left to be updated? duke@435: if (_next_offset_index + 1 <= end_index) { duke@435: HeapWord* rem_st = _array->address_for_index(_next_offset_index + 1); duke@435: // Calculate rem_end this way because end_index duke@435: // may be the last valid index in the covered region. duke@435: HeapWord* rem_end = _array->address_for_index(end_index) + N_words; duke@435: set_remainder_to_point_to_start(rem_st, rem_end); duke@435: } duke@435: duke@435: // _next_offset_index and _next_offset_threshold updated here. duke@435: _next_offset_index = end_index + 1; duke@435: // Calculate _next_offset_threshold this way because end_index duke@435: // may be the last valid index in the covered region. ysr@2071: _next_offset_threshold = _array->address_for_index(end_index) + N_words; ysr@2071: assert(_next_offset_threshold >= blk_end, "Incorrect offset threshold"); duke@435: duke@435: #ifdef ASSERT duke@435: // The offset can be 0 if the block starts on a boundary. That duke@435: // is checked by an assertion above. duke@435: size_t start_index = _array->index_for(blk_start); duke@435: HeapWord* boundary = _array->address_for_index(start_index); duke@435: assert((_array->offset_array(orig_next_offset_index) == 0 && duke@435: blk_start == boundary) || duke@435: (_array->offset_array(orig_next_offset_index) > 0 && duke@435: _array->offset_array(orig_next_offset_index) <= N_words), duke@435: "offset array should have been set"); duke@435: for (size_t j = orig_next_offset_index + 1; j <= end_index; j++) { duke@435: assert(_array->offset_array(j) > 0 && duke@435: _array->offset_array(j) <= (u_char) (N_words+N_powers-1), duke@435: "offset array should have been set"); duke@435: } duke@435: #endif duke@435: } duke@435: duke@435: HeapWord* BlockOffsetArrayContigSpace::initialize_threshold() { duke@435: assert(!Universe::heap()->is_in_reserved(_array->_offset_array), duke@435: "just checking"); duke@435: _next_offset_index = _array->index_for(_bottom); duke@435: _next_offset_index++; duke@435: _next_offset_threshold = duke@435: _array->address_for_index(_next_offset_index); duke@435: return _next_offset_threshold; duke@435: } duke@435: duke@435: void BlockOffsetArrayContigSpace::zero_bottom_entry() { duke@435: assert(!Universe::heap()->is_in_reserved(_array->_offset_array), duke@435: "just checking"); duke@435: size_t bottom_index = _array->index_for(_bottom); duke@435: _array->set_offset_array(bottom_index, 0); duke@435: } duke@435: duke@435: size_t BlockOffsetArrayContigSpace::last_active_index() const { duke@435: size_t result = _next_offset_index - 1; duke@435: return result >= 0 ? result : 0; duke@435: }