src/share/vm/gc_implementation/parallelScavenge/objectStartArray.cpp

Thu, 30 May 2013 13:04:51 -0700

author
jcoomes
date
Thu, 30 May 2013 13:04:51 -0700
changeset 5201
5534bd30c151
parent 4993
746b070f5022
child 6198
55fb97c4c58d
permissions
-rw-r--r--

6725714: par compact - add a table to speed up bitmap searches
Reviewed-by: jmasa, tschatzl

duke@435 1 /*
coleenp@4037 2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
duke@435 22 *
duke@435 23 */
duke@435 24
stefank@2314 25 #include "precompiled.hpp"
stefank@2314 26 #include "gc_implementation/parallelScavenge/objectStartArray.hpp"
stefank@2314 27 #include "memory/allocation.inline.hpp"
stefank@2314 28 #include "memory/cardTableModRefBS.hpp"
stefank@2314 29 #include "oops/oop.inline.hpp"
stefank@2314 30 #include "runtime/java.hpp"
zgu@3900 31 #include "services/memTracker.hpp"
duke@435 32
duke@435 33 void ObjectStartArray::initialize(MemRegion reserved_region) {
duke@435 34 // We're based on the assumption that we use the same
duke@435 35 // size blocks as the card table.
duke@435 36 assert((int)block_size == (int)CardTableModRefBS::card_size, "Sanity");
duke@435 37 assert((int)block_size <= 512, "block_size must be less than or equal to 512");
duke@435 38
duke@435 39 // Calculate how much space must be reserved
duke@435 40 _reserved_region = reserved_region;
duke@435 41
duke@435 42 size_t bytes_to_reserve = reserved_region.word_size() / block_size_in_words;
duke@435 43 assert(bytes_to_reserve > 0, "Sanity");
duke@435 44
duke@435 45 bytes_to_reserve =
duke@435 46 align_size_up(bytes_to_reserve, os::vm_allocation_granularity());
duke@435 47
duke@435 48 // Do not use large-pages for the backing store. The one large page region
duke@435 49 // will be used for the heap proper.
duke@435 50 ReservedSpace backing_store(bytes_to_reserve);
duke@435 51 if (!backing_store.is_reserved()) {
duke@435 52 vm_exit_during_initialization("Could not reserve space for ObjectStartArray");
duke@435 53 }
zgu@3900 54 MemTracker::record_virtual_memory_type((address)backing_store.base(), mtGC);
duke@435 55
duke@435 56 // We do not commit any memory initially
duke@435 57 if (!_virtual_space.initialize(backing_store, 0)) {
duke@435 58 vm_exit_during_initialization("Could not commit space for ObjectStartArray");
duke@435 59 }
duke@435 60
duke@435 61 _raw_base = (jbyte*)_virtual_space.low_boundary();
zgu@3900 62
duke@435 63 if (_raw_base == NULL) {
duke@435 64 vm_exit_during_initialization("Could not get raw_base address");
duke@435 65 }
duke@435 66
zgu@3900 67 MemTracker::record_virtual_memory_type((address)_raw_base, mtGC);
zgu@3900 68
zgu@3900 69
duke@435 70 _offset_base = _raw_base - (size_t(reserved_region.start()) >> block_shift);
duke@435 71
duke@435 72 _covered_region.set_start(reserved_region.start());
duke@435 73 _covered_region.set_word_size(0);
duke@435 74
duke@435 75 _blocks_region.set_start((HeapWord*)_raw_base);
duke@435 76 _blocks_region.set_word_size(0);
duke@435 77 }
duke@435 78
duke@435 79 void ObjectStartArray::set_covered_region(MemRegion mr) {
duke@435 80 assert(_reserved_region.contains(mr), "MemRegion outside of reserved space");
duke@435 81 assert(_reserved_region.start() == mr.start(), "Attempt to move covered region");
duke@435 82
duke@435 83 HeapWord* low_bound = mr.start();
duke@435 84 HeapWord* high_bound = mr.end();
duke@435 85 assert((uintptr_t(low_bound) & (block_size - 1)) == 0, "heap must start at block boundary");
duke@435 86 assert((uintptr_t(high_bound) & (block_size - 1)) == 0, "heap must end at block boundary");
duke@435 87
duke@435 88 size_t requested_blocks_size_in_bytes = mr.word_size() / block_size_in_words;
duke@435 89
duke@435 90 // Only commit memory in page sized chunks
duke@435 91 requested_blocks_size_in_bytes =
duke@435 92 align_size_up(requested_blocks_size_in_bytes, os::vm_page_size());
duke@435 93
duke@435 94 _covered_region = mr;
duke@435 95
duke@435 96 size_t current_blocks_size_in_bytes = _blocks_region.byte_size();
duke@435 97
duke@435 98 if (requested_blocks_size_in_bytes > current_blocks_size_in_bytes) {
duke@435 99 // Expand
duke@435 100 size_t expand_by = requested_blocks_size_in_bytes - current_blocks_size_in_bytes;
duke@435 101 if (!_virtual_space.expand_by(expand_by)) {
ccheung@4993 102 vm_exit_out_of_memory(expand_by, OOM_MMAP_ERROR, "object start array expansion");
duke@435 103 }
duke@435 104 // Clear *only* the newly allocated region
duke@435 105 memset(_blocks_region.end(), clean_block, expand_by);
duke@435 106 }
duke@435 107
duke@435 108 if (requested_blocks_size_in_bytes < current_blocks_size_in_bytes) {
duke@435 109 // Shrink
duke@435 110 size_t shrink_by = current_blocks_size_in_bytes - requested_blocks_size_in_bytes;
duke@435 111 _virtual_space.shrink_by(shrink_by);
duke@435 112 }
duke@435 113
duke@435 114 _blocks_region.set_word_size(requested_blocks_size_in_bytes / sizeof(HeapWord));
duke@435 115
duke@435 116 assert(requested_blocks_size_in_bytes % sizeof(HeapWord) == 0, "Block table not expanded in word sized increment");
duke@435 117 assert(requested_blocks_size_in_bytes == _blocks_region.byte_size(), "Sanity");
duke@435 118 assert(block_for_addr(low_bound) == &_raw_base[0], "Checking start of map");
duke@435 119 assert(block_for_addr(high_bound-1) <= &_raw_base[_blocks_region.byte_size()-1], "Checking end of map");
duke@435 120 }
duke@435 121
duke@435 122 void ObjectStartArray::reset() {
duke@435 123 memset(_blocks_region.start(), clean_block, _blocks_region.byte_size());
duke@435 124 }
duke@435 125
duke@435 126
duke@435 127 bool ObjectStartArray::object_starts_in_range(HeapWord* start_addr,
duke@435 128 HeapWord* end_addr) const {
duke@435 129 assert(start_addr <= end_addr, "range is wrong");
duke@435 130 if (start_addr > end_addr) {
duke@435 131 return false;
duke@435 132 }
duke@435 133
duke@435 134 jbyte* start_block = block_for_addr(start_addr);
duke@435 135 jbyte* end_block = block_for_addr(end_addr);
duke@435 136
duke@435 137 for (jbyte* block = start_block; block <= end_block; block++) {
duke@435 138 if (*block != clean_block) {
duke@435 139 return true;
duke@435 140 }
duke@435 141 }
duke@435 142 // No object starts in this slice; verify this using
coleenp@4037 143 // more traditional methods: Note that no object can
coleenp@4037 144 // start before the start_addr.
coleenp@4037 145 assert(end_addr == start_addr ||
coleenp@4037 146 object_start(end_addr - 1) <= start_addr,
duke@435 147 "Oops an object does start in this slice?");
duke@435 148 return false;
duke@435 149 }

mercurial