src/share/vm/gc_implementation/parallelScavenge/objectStartArray.cpp

Thu, 22 Sep 2011 10:57:37 -0700

author
johnc
date
Thu, 22 Sep 2011 10:57:37 -0700
changeset 3175
4dfb2df418f2
parent 2314
f95d63e2154a
child 3900
d2a62e0f25eb
permissions
-rw-r--r--

6484982: G1: process references during evacuation pauses
Summary: G1 now uses two reference processors - one is used by concurrent marking and the other is used by STW GCs (both full and incremental evacuation pauses). In an evacuation pause, the reference processor is embedded into the closures used to scan objects. Doing so causes causes reference objects to be 'discovered' by the reference processor. At the end of the evacuation pause, these discovered reference objects are processed - preserving (and copying) referent objects (and their reachable graphs) as appropriate.
Reviewed-by: ysr, jwilhelm, brutisso, stefank, tonyp

duke@435 1 /*
stefank@2314 2 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
duke@435 22 *
duke@435 23 */
duke@435 24
stefank@2314 25 #include "precompiled.hpp"
stefank@2314 26 #include "gc_implementation/parallelScavenge/objectStartArray.hpp"
stefank@2314 27 #include "memory/allocation.inline.hpp"
stefank@2314 28 #include "memory/cardTableModRefBS.hpp"
stefank@2314 29 #include "oops/oop.inline.hpp"
stefank@2314 30 #include "runtime/java.hpp"
duke@435 31
duke@435 32 void ObjectStartArray::initialize(MemRegion reserved_region) {
duke@435 33 // We're based on the assumption that we use the same
duke@435 34 // size blocks as the card table.
duke@435 35 assert((int)block_size == (int)CardTableModRefBS::card_size, "Sanity");
duke@435 36 assert((int)block_size <= 512, "block_size must be less than or equal to 512");
duke@435 37
duke@435 38 // Calculate how much space must be reserved
duke@435 39 _reserved_region = reserved_region;
duke@435 40
duke@435 41 size_t bytes_to_reserve = reserved_region.word_size() / block_size_in_words;
duke@435 42 assert(bytes_to_reserve > 0, "Sanity");
duke@435 43
duke@435 44 bytes_to_reserve =
duke@435 45 align_size_up(bytes_to_reserve, os::vm_allocation_granularity());
duke@435 46
duke@435 47 // Do not use large-pages for the backing store. The one large page region
duke@435 48 // will be used for the heap proper.
duke@435 49 ReservedSpace backing_store(bytes_to_reserve);
duke@435 50 if (!backing_store.is_reserved()) {
duke@435 51 vm_exit_during_initialization("Could not reserve space for ObjectStartArray");
duke@435 52 }
duke@435 53
duke@435 54 // We do not commit any memory initially
duke@435 55 if (!_virtual_space.initialize(backing_store, 0)) {
duke@435 56 vm_exit_during_initialization("Could not commit space for ObjectStartArray");
duke@435 57 }
duke@435 58
duke@435 59 _raw_base = (jbyte*)_virtual_space.low_boundary();
duke@435 60 if (_raw_base == NULL) {
duke@435 61 vm_exit_during_initialization("Could not get raw_base address");
duke@435 62 }
duke@435 63
duke@435 64 _offset_base = _raw_base - (size_t(reserved_region.start()) >> block_shift);
duke@435 65
duke@435 66 _covered_region.set_start(reserved_region.start());
duke@435 67 _covered_region.set_word_size(0);
duke@435 68
duke@435 69 _blocks_region.set_start((HeapWord*)_raw_base);
duke@435 70 _blocks_region.set_word_size(0);
duke@435 71 }
duke@435 72
duke@435 73 void ObjectStartArray::set_covered_region(MemRegion mr) {
duke@435 74 assert(_reserved_region.contains(mr), "MemRegion outside of reserved space");
duke@435 75 assert(_reserved_region.start() == mr.start(), "Attempt to move covered region");
duke@435 76
duke@435 77 HeapWord* low_bound = mr.start();
duke@435 78 HeapWord* high_bound = mr.end();
duke@435 79 assert((uintptr_t(low_bound) & (block_size - 1)) == 0, "heap must start at block boundary");
duke@435 80 assert((uintptr_t(high_bound) & (block_size - 1)) == 0, "heap must end at block boundary");
duke@435 81
duke@435 82 size_t requested_blocks_size_in_bytes = mr.word_size() / block_size_in_words;
duke@435 83
duke@435 84 // Only commit memory in page sized chunks
duke@435 85 requested_blocks_size_in_bytes =
duke@435 86 align_size_up(requested_blocks_size_in_bytes, os::vm_page_size());
duke@435 87
duke@435 88 _covered_region = mr;
duke@435 89
duke@435 90 size_t current_blocks_size_in_bytes = _blocks_region.byte_size();
duke@435 91
duke@435 92 if (requested_blocks_size_in_bytes > current_blocks_size_in_bytes) {
duke@435 93 // Expand
duke@435 94 size_t expand_by = requested_blocks_size_in_bytes - current_blocks_size_in_bytes;
duke@435 95 if (!_virtual_space.expand_by(expand_by)) {
duke@435 96 vm_exit_out_of_memory(expand_by, "object start array expansion");
duke@435 97 }
duke@435 98 // Clear *only* the newly allocated region
duke@435 99 memset(_blocks_region.end(), clean_block, expand_by);
duke@435 100 }
duke@435 101
duke@435 102 if (requested_blocks_size_in_bytes < current_blocks_size_in_bytes) {
duke@435 103 // Shrink
duke@435 104 size_t shrink_by = current_blocks_size_in_bytes - requested_blocks_size_in_bytes;
duke@435 105 _virtual_space.shrink_by(shrink_by);
duke@435 106 }
duke@435 107
duke@435 108 _blocks_region.set_word_size(requested_blocks_size_in_bytes / sizeof(HeapWord));
duke@435 109
duke@435 110 assert(requested_blocks_size_in_bytes % sizeof(HeapWord) == 0, "Block table not expanded in word sized increment");
duke@435 111 assert(requested_blocks_size_in_bytes == _blocks_region.byte_size(), "Sanity");
duke@435 112 assert(block_for_addr(low_bound) == &_raw_base[0], "Checking start of map");
duke@435 113 assert(block_for_addr(high_bound-1) <= &_raw_base[_blocks_region.byte_size()-1], "Checking end of map");
duke@435 114 }
duke@435 115
duke@435 116 void ObjectStartArray::reset() {
duke@435 117 memset(_blocks_region.start(), clean_block, _blocks_region.byte_size());
duke@435 118 }
duke@435 119
duke@435 120
duke@435 121 bool ObjectStartArray::object_starts_in_range(HeapWord* start_addr,
duke@435 122 HeapWord* end_addr) const {
duke@435 123 assert(start_addr <= end_addr, "range is wrong");
duke@435 124 if (start_addr > end_addr) {
duke@435 125 return false;
duke@435 126 }
duke@435 127
duke@435 128 jbyte* start_block = block_for_addr(start_addr);
duke@435 129 jbyte* end_block = block_for_addr(end_addr);
duke@435 130
duke@435 131 for (jbyte* block = start_block; block <= end_block; block++) {
duke@435 132 if (*block != clean_block) {
duke@435 133 return true;
duke@435 134 }
duke@435 135 }
duke@435 136 // No object starts in this slice; verify this using
duke@435 137 // more traditional methods:
duke@435 138 assert(object_start(end_addr - 1) <= start_addr,
duke@435 139 "Oops an object does start in this slice?");
duke@435 140 return false;
duke@435 141 }

mercurial