duke@435: /* stefank@2314: * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. duke@435: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. duke@435: * duke@435: * This code is free software; you can redistribute it and/or modify it duke@435: * under the terms of the GNU General Public License version 2 only, as duke@435: * published by the Free Software Foundation. duke@435: * duke@435: * This code is distributed in the hope that it will be useful, but WITHOUT duke@435: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or duke@435: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License duke@435: * version 2 for more details (a copy is included in the LICENSE file that duke@435: * accompanied this code). duke@435: * duke@435: * You should have received a copy of the GNU General Public License version duke@435: * 2 along with this work; if not, write to the Free Software Foundation, duke@435: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. duke@435: * trims@1907: * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA trims@1907: * or visit www.oracle.com if you need additional information or have any trims@1907: * questions. duke@435: * duke@435: */ duke@435: stefank@2314: #include "precompiled.hpp" stefank@2314: #include "gc_implementation/parallelScavenge/objectStartArray.hpp" stefank@2314: #include "memory/allocation.inline.hpp" stefank@2314: #include "memory/cardTableModRefBS.hpp" stefank@2314: #include "oops/oop.inline.hpp" stefank@2314: #include "runtime/java.hpp" duke@435: duke@435: void ObjectStartArray::initialize(MemRegion reserved_region) { duke@435: // We're based on the assumption that we use the same duke@435: // size blocks as the card table. duke@435: assert((int)block_size == (int)CardTableModRefBS::card_size, "Sanity"); duke@435: assert((int)block_size <= 512, "block_size must be less than or equal to 512"); duke@435: duke@435: // Calculate how much space must be reserved duke@435: _reserved_region = reserved_region; duke@435: duke@435: size_t bytes_to_reserve = reserved_region.word_size() / block_size_in_words; duke@435: assert(bytes_to_reserve > 0, "Sanity"); duke@435: duke@435: bytes_to_reserve = duke@435: align_size_up(bytes_to_reserve, os::vm_allocation_granularity()); duke@435: duke@435: // Do not use large-pages for the backing store. The one large page region duke@435: // will be used for the heap proper. duke@435: ReservedSpace backing_store(bytes_to_reserve); duke@435: if (!backing_store.is_reserved()) { duke@435: vm_exit_during_initialization("Could not reserve space for ObjectStartArray"); duke@435: } duke@435: duke@435: // We do not commit any memory initially duke@435: if (!_virtual_space.initialize(backing_store, 0)) { duke@435: vm_exit_during_initialization("Could not commit space for ObjectStartArray"); duke@435: } duke@435: duke@435: _raw_base = (jbyte*)_virtual_space.low_boundary(); duke@435: if (_raw_base == NULL) { duke@435: vm_exit_during_initialization("Could not get raw_base address"); duke@435: } duke@435: duke@435: _offset_base = _raw_base - (size_t(reserved_region.start()) >> block_shift); duke@435: duke@435: _covered_region.set_start(reserved_region.start()); duke@435: _covered_region.set_word_size(0); duke@435: duke@435: _blocks_region.set_start((HeapWord*)_raw_base); duke@435: _blocks_region.set_word_size(0); duke@435: } duke@435: duke@435: void ObjectStartArray::set_covered_region(MemRegion mr) { duke@435: assert(_reserved_region.contains(mr), "MemRegion outside of reserved space"); duke@435: assert(_reserved_region.start() == mr.start(), "Attempt to move covered region"); duke@435: duke@435: HeapWord* low_bound = mr.start(); duke@435: HeapWord* high_bound = mr.end(); duke@435: assert((uintptr_t(low_bound) & (block_size - 1)) == 0, "heap must start at block boundary"); duke@435: assert((uintptr_t(high_bound) & (block_size - 1)) == 0, "heap must end at block boundary"); duke@435: duke@435: size_t requested_blocks_size_in_bytes = mr.word_size() / block_size_in_words; duke@435: duke@435: // Only commit memory in page sized chunks duke@435: requested_blocks_size_in_bytes = duke@435: align_size_up(requested_blocks_size_in_bytes, os::vm_page_size()); duke@435: duke@435: _covered_region = mr; duke@435: duke@435: size_t current_blocks_size_in_bytes = _blocks_region.byte_size(); duke@435: duke@435: if (requested_blocks_size_in_bytes > current_blocks_size_in_bytes) { duke@435: // Expand duke@435: size_t expand_by = requested_blocks_size_in_bytes - current_blocks_size_in_bytes; duke@435: if (!_virtual_space.expand_by(expand_by)) { duke@435: vm_exit_out_of_memory(expand_by, "object start array expansion"); duke@435: } duke@435: // Clear *only* the newly allocated region duke@435: memset(_blocks_region.end(), clean_block, expand_by); duke@435: } duke@435: duke@435: if (requested_blocks_size_in_bytes < current_blocks_size_in_bytes) { duke@435: // Shrink duke@435: size_t shrink_by = current_blocks_size_in_bytes - requested_blocks_size_in_bytes; duke@435: _virtual_space.shrink_by(shrink_by); duke@435: } duke@435: duke@435: _blocks_region.set_word_size(requested_blocks_size_in_bytes / sizeof(HeapWord)); duke@435: duke@435: assert(requested_blocks_size_in_bytes % sizeof(HeapWord) == 0, "Block table not expanded in word sized increment"); duke@435: assert(requested_blocks_size_in_bytes == _blocks_region.byte_size(), "Sanity"); duke@435: assert(block_for_addr(low_bound) == &_raw_base[0], "Checking start of map"); duke@435: assert(block_for_addr(high_bound-1) <= &_raw_base[_blocks_region.byte_size()-1], "Checking end of map"); duke@435: } duke@435: duke@435: void ObjectStartArray::reset() { duke@435: memset(_blocks_region.start(), clean_block, _blocks_region.byte_size()); duke@435: } duke@435: duke@435: duke@435: bool ObjectStartArray::object_starts_in_range(HeapWord* start_addr, duke@435: HeapWord* end_addr) const { duke@435: assert(start_addr <= end_addr, "range is wrong"); duke@435: if (start_addr > end_addr) { duke@435: return false; duke@435: } duke@435: duke@435: jbyte* start_block = block_for_addr(start_addr); duke@435: jbyte* end_block = block_for_addr(end_addr); duke@435: duke@435: for (jbyte* block = start_block; block <= end_block; block++) { duke@435: if (*block != clean_block) { duke@435: return true; duke@435: } duke@435: } duke@435: // No object starts in this slice; verify this using duke@435: // more traditional methods: duke@435: assert(object_start(end_addr - 1) <= start_addr, duke@435: "Oops an object does start in this slice?"); duke@435: return false; duke@435: }