Thu, 27 Dec 2018 11:43:33 +0800
Merge
aoqi@0 | 1 | /* |
aoqi@0 | 2 | * Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved. |
aoqi@0 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
aoqi@0 | 4 | * |
aoqi@0 | 5 | * This code is free software; you can redistribute it and/or modify it |
aoqi@0 | 6 | * under the terms of the GNU General Public License version 2 only, as |
aoqi@0 | 7 | * published by the Free Software Foundation. |
aoqi@0 | 8 | * |
aoqi@0 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
aoqi@0 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
aoqi@0 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
aoqi@0 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
aoqi@0 | 13 | * accompanied this code). |
aoqi@0 | 14 | * |
aoqi@0 | 15 | * You should have received a copy of the GNU General Public License version |
aoqi@0 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
aoqi@0 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
aoqi@0 | 18 | * |
aoqi@0 | 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
aoqi@0 | 20 | * or visit www.oracle.com if you need additional information or have any |
aoqi@0 | 21 | * questions. |
aoqi@0 | 22 | * |
aoqi@0 | 23 | */ |
aoqi@0 | 24 | |
aoqi@1 | 25 | /* |
aoqi@1 | 26 | * This file has been modified by Loongson Technology in 2015. These |
aoqi@1 | 27 | * modifications are Copyright (c) 2015 Loongson Technology, and are made |
aoqi@1 | 28 | * available on the same license terms set forth above. |
aoqi@1 | 29 | */ |
aoqi@1 | 30 | |
aoqi@0 | 31 | #include "precompiled.hpp" |
aoqi@0 | 32 | #include "memory/allocation.inline.hpp" |
aoqi@0 | 33 | #include "memory/cardTableModRefBS.hpp" |
aoqi@0 | 34 | #include "memory/cardTableRS.hpp" |
aoqi@0 | 35 | #include "memory/sharedHeap.hpp" |
aoqi@0 | 36 | #include "memory/space.hpp" |
aoqi@0 | 37 | #include "memory/space.inline.hpp" |
aoqi@0 | 38 | #include "memory/universe.hpp" |
aoqi@0 | 39 | #include "runtime/java.hpp" |
aoqi@0 | 40 | #include "runtime/mutexLocker.hpp" |
aoqi@0 | 41 | #include "runtime/virtualspace.hpp" |
aoqi@0 | 42 | #include "services/memTracker.hpp" |
aoqi@0 | 43 | #include "utilities/macros.hpp" |
aoqi@0 | 44 | #ifdef COMPILER1 |
aoqi@0 | 45 | #include "c1/c1_LIR.hpp" |
aoqi@0 | 46 | #include "c1/c1_LIRGenerator.hpp" |
aoqi@0 | 47 | #endif |
aoqi@0 | 48 | |
aoqi@0 | 49 | // This kind of "BarrierSet" allows a "CollectedHeap" to detect and |
aoqi@0 | 50 | // enumerate ref fields that have been modified (since the last |
aoqi@0 | 51 | // enumeration.) |
aoqi@0 | 52 | |
aoqi@0 | 53 | size_t CardTableModRefBS::compute_byte_map_size() |
aoqi@0 | 54 | { |
aoqi@0 | 55 | assert(_guard_index == cards_required(_whole_heap.word_size()) - 1, |
aoqi@0 | 56 | "unitialized, check declaration order"); |
aoqi@0 | 57 | assert(_page_size != 0, "unitialized, check declaration order"); |
aoqi@0 | 58 | const size_t granularity = os::vm_allocation_granularity(); |
aoqi@0 | 59 | return align_size_up(_guard_index + 1, MAX2(_page_size, granularity)); |
aoqi@0 | 60 | } |
aoqi@0 | 61 | |
aoqi@0 | 62 | CardTableModRefBS::CardTableModRefBS(MemRegion whole_heap, |
aoqi@0 | 63 | int max_covered_regions): |
aoqi@0 | 64 | ModRefBarrierSet(max_covered_regions), |
aoqi@0 | 65 | _whole_heap(whole_heap), |
tschatzl@7051 | 66 | _guard_index(0), |
tschatzl@7051 | 67 | _guard_region(), |
tschatzl@7051 | 68 | _last_valid_index(0), |
aoqi@0 | 69 | _page_size(os::vm_page_size()), |
tschatzl@7051 | 70 | _byte_map_size(0), |
tschatzl@7051 | 71 | _covered(NULL), |
tschatzl@7051 | 72 | _committed(NULL), |
tschatzl@7051 | 73 | _cur_covered_regions(0), |
tschatzl@7051 | 74 | _byte_map(NULL), |
tschatzl@7051 | 75 | byte_map_base(NULL), |
tschatzl@7051 | 76 | // LNC functionality |
tschatzl@7051 | 77 | _lowest_non_clean(NULL), |
tschatzl@7051 | 78 | _lowest_non_clean_chunk_size(NULL), |
tschatzl@7051 | 79 | _lowest_non_clean_base_chunk_index(NULL), |
tschatzl@7051 | 80 | _last_LNC_resizing_collection(NULL) |
aoqi@0 | 81 | { |
aoqi@0 | 82 | _kind = BarrierSet::CardTableModRef; |
aoqi@0 | 83 | |
tschatzl@7051 | 84 | assert((uintptr_t(_whole_heap.start()) & (card_size - 1)) == 0, "heap must start at card boundary"); |
tschatzl@7051 | 85 | assert((uintptr_t(_whole_heap.end()) & (card_size - 1)) == 0, "heap must end at card boundary"); |
aoqi@0 | 86 | |
aoqi@0 | 87 | assert(card_size <= 512, "card_size must be less than 512"); // why? |
aoqi@0 | 88 | |
tschatzl@7051 | 89 | _covered = new MemRegion[_max_covered_regions]; |
tschatzl@7051 | 90 | if (_covered == NULL) { |
tschatzl@7051 | 91 | vm_exit_during_initialization("Could not allocate card table covered region set."); |
tschatzl@7051 | 92 | } |
tschatzl@7051 | 93 | } |
tschatzl@7051 | 94 | |
tschatzl@7051 | 95 | void CardTableModRefBS::initialize() { |
tschatzl@7051 | 96 | _guard_index = cards_required(_whole_heap.word_size()) - 1; |
tschatzl@7051 | 97 | _last_valid_index = _guard_index - 1; |
tschatzl@7051 | 98 | |
tschatzl@7051 | 99 | _byte_map_size = compute_byte_map_size(); |
tschatzl@7051 | 100 | |
tschatzl@7051 | 101 | HeapWord* low_bound = _whole_heap.start(); |
tschatzl@7051 | 102 | HeapWord* high_bound = _whole_heap.end(); |
tschatzl@7051 | 103 | |
tschatzl@7051 | 104 | _cur_covered_regions = 0; |
tschatzl@7051 | 105 | _committed = new MemRegion[_max_covered_regions]; |
tschatzl@7051 | 106 | if (_committed == NULL) { |
tschatzl@7051 | 107 | vm_exit_during_initialization("Could not allocate card table committed region set."); |
aoqi@0 | 108 | } |
aoqi@0 | 109 | |
aoqi@0 | 110 | const size_t rs_align = _page_size == (size_t) os::vm_page_size() ? 0 : |
aoqi@0 | 111 | MAX2(_page_size, (size_t) os::vm_allocation_granularity()); |
aoqi@0 | 112 | ReservedSpace heap_rs(_byte_map_size, rs_align, false); |
fujie@8007 | 113 | |
aoqi@0 | 114 | MemTracker::record_virtual_memory_type((address)heap_rs.base(), mtGC); |
aoqi@0 | 115 | |
aoqi@0 | 116 | os::trace_page_sizes("card table", _guard_index + 1, _guard_index + 1, |
aoqi@0 | 117 | _page_size, heap_rs.base(), heap_rs.size()); |
aoqi@0 | 118 | if (!heap_rs.is_reserved()) { |
aoqi@0 | 119 | vm_exit_during_initialization("Could not reserve enough space for the " |
aoqi@0 | 120 | "card marking array"); |
aoqi@0 | 121 | } |
aoqi@0 | 122 | |
aoqi@0 | 123 | // The assember store_check code will do an unsigned shift of the oop, |
aoqi@0 | 124 | // then add it to byte_map_base, i.e. |
aoqi@0 | 125 | // |
aoqi@0 | 126 | // _byte_map = byte_map_base + (uintptr_t(low_bound) >> card_shift) |
aoqi@0 | 127 | _byte_map = (jbyte*) heap_rs.base(); |
aoqi@0 | 128 | byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift); |
aoqi@0 | 129 | assert(byte_for(low_bound) == &_byte_map[0], "Checking start of map"); |
aoqi@0 | 130 | assert(byte_for(high_bound-1) <= &_byte_map[_last_valid_index], "Checking end of map"); |
aoqi@0 | 131 | |
aoqi@0 | 132 | jbyte* guard_card = &_byte_map[_guard_index]; |
aoqi@0 | 133 | uintptr_t guard_page = align_size_down((uintptr_t)guard_card, _page_size); |
aoqi@0 | 134 | _guard_region = MemRegion((HeapWord*)guard_page, _page_size); |
aoqi@0 | 135 | os::commit_memory_or_exit((char*)guard_page, _page_size, _page_size, |
aoqi@0 | 136 | !ExecMem, "card table last card"); |
aoqi@0 | 137 | *guard_card = last_card; |
aoqi@0 | 138 | |
tschatzl@7051 | 139 | _lowest_non_clean = |
tschatzl@7051 | 140 | NEW_C_HEAP_ARRAY(CardArr, _max_covered_regions, mtGC); |
aoqi@0 | 141 | _lowest_non_clean_chunk_size = |
tschatzl@7051 | 142 | NEW_C_HEAP_ARRAY(size_t, _max_covered_regions, mtGC); |
aoqi@0 | 143 | _lowest_non_clean_base_chunk_index = |
tschatzl@7051 | 144 | NEW_C_HEAP_ARRAY(uintptr_t, _max_covered_regions, mtGC); |
aoqi@0 | 145 | _last_LNC_resizing_collection = |
tschatzl@7051 | 146 | NEW_C_HEAP_ARRAY(int, _max_covered_regions, mtGC); |
aoqi@0 | 147 | if (_lowest_non_clean == NULL |
aoqi@0 | 148 | || _lowest_non_clean_chunk_size == NULL |
aoqi@0 | 149 | || _lowest_non_clean_base_chunk_index == NULL |
aoqi@0 | 150 | || _last_LNC_resizing_collection == NULL) |
aoqi@0 | 151 | vm_exit_during_initialization("couldn't allocate an LNC array."); |
tschatzl@7051 | 152 | for (int i = 0; i < _max_covered_regions; i++) { |
aoqi@0 | 153 | _lowest_non_clean[i] = NULL; |
aoqi@0 | 154 | _lowest_non_clean_chunk_size[i] = 0; |
aoqi@0 | 155 | _last_LNC_resizing_collection[i] = -1; |
aoqi@0 | 156 | } |
aoqi@0 | 157 | |
aoqi@0 | 158 | if (TraceCardTableModRefBS) { |
aoqi@0 | 159 | gclog_or_tty->print_cr("CardTableModRefBS::CardTableModRefBS: "); |
aoqi@0 | 160 | gclog_or_tty->print_cr(" " |
aoqi@0 | 161 | " &_byte_map[0]: " INTPTR_FORMAT |
aoqi@0 | 162 | " &_byte_map[_last_valid_index]: " INTPTR_FORMAT, |
aoqi@0 | 163 | p2i(&_byte_map[0]), |
aoqi@0 | 164 | p2i(&_byte_map[_last_valid_index])); |
aoqi@0 | 165 | gclog_or_tty->print_cr(" " |
aoqi@0 | 166 | " byte_map_base: " INTPTR_FORMAT, |
aoqi@0 | 167 | p2i(byte_map_base)); |
aoqi@0 | 168 | } |
aoqi@0 | 169 | } |
aoqi@0 | 170 | |
aoqi@0 | 171 | CardTableModRefBS::~CardTableModRefBS() { |
aoqi@0 | 172 | if (_covered) { |
aoqi@0 | 173 | delete[] _covered; |
aoqi@0 | 174 | _covered = NULL; |
aoqi@0 | 175 | } |
aoqi@0 | 176 | if (_committed) { |
aoqi@0 | 177 | delete[] _committed; |
aoqi@0 | 178 | _committed = NULL; |
aoqi@0 | 179 | } |
aoqi@0 | 180 | if (_lowest_non_clean) { |
aoqi@0 | 181 | FREE_C_HEAP_ARRAY(CardArr, _lowest_non_clean, mtGC); |
aoqi@0 | 182 | _lowest_non_clean = NULL; |
aoqi@0 | 183 | } |
aoqi@0 | 184 | if (_lowest_non_clean_chunk_size) { |
aoqi@0 | 185 | FREE_C_HEAP_ARRAY(size_t, _lowest_non_clean_chunk_size, mtGC); |
aoqi@0 | 186 | _lowest_non_clean_chunk_size = NULL; |
aoqi@0 | 187 | } |
aoqi@0 | 188 | if (_lowest_non_clean_base_chunk_index) { |
aoqi@0 | 189 | FREE_C_HEAP_ARRAY(uintptr_t, _lowest_non_clean_base_chunk_index, mtGC); |
aoqi@0 | 190 | _lowest_non_clean_base_chunk_index = NULL; |
aoqi@0 | 191 | } |
aoqi@0 | 192 | if (_last_LNC_resizing_collection) { |
aoqi@0 | 193 | FREE_C_HEAP_ARRAY(int, _last_LNC_resizing_collection, mtGC); |
aoqi@0 | 194 | _last_LNC_resizing_collection = NULL; |
aoqi@0 | 195 | } |
aoqi@0 | 196 | } |
aoqi@0 | 197 | |
aoqi@0 | 198 | int CardTableModRefBS::find_covering_region_by_base(HeapWord* base) { |
aoqi@0 | 199 | int i; |
aoqi@0 | 200 | for (i = 0; i < _cur_covered_regions; i++) { |
aoqi@0 | 201 | if (_covered[i].start() == base) return i; |
aoqi@0 | 202 | if (_covered[i].start() > base) break; |
aoqi@0 | 203 | } |
aoqi@0 | 204 | // If we didn't find it, create a new one. |
aoqi@0 | 205 | assert(_cur_covered_regions < _max_covered_regions, |
aoqi@0 | 206 | "too many covered regions"); |
aoqi@0 | 207 | // Move the ones above up, to maintain sorted order. |
aoqi@0 | 208 | for (int j = _cur_covered_regions; j > i; j--) { |
aoqi@0 | 209 | _covered[j] = _covered[j-1]; |
aoqi@0 | 210 | _committed[j] = _committed[j-1]; |
aoqi@0 | 211 | } |
aoqi@0 | 212 | int res = i; |
aoqi@0 | 213 | _cur_covered_regions++; |
aoqi@0 | 214 | _covered[res].set_start(base); |
aoqi@0 | 215 | _covered[res].set_word_size(0); |
aoqi@0 | 216 | jbyte* ct_start = byte_for(base); |
aoqi@0 | 217 | uintptr_t ct_start_aligned = align_size_down((uintptr_t)ct_start, _page_size); |
aoqi@0 | 218 | _committed[res].set_start((HeapWord*)ct_start_aligned); |
aoqi@0 | 219 | _committed[res].set_word_size(0); |
aoqi@0 | 220 | return res; |
aoqi@0 | 221 | } |
aoqi@0 | 222 | |
aoqi@0 | 223 | int CardTableModRefBS::find_covering_region_containing(HeapWord* addr) { |
aoqi@0 | 224 | for (int i = 0; i < _cur_covered_regions; i++) { |
aoqi@0 | 225 | if (_covered[i].contains(addr)) { |
aoqi@0 | 226 | return i; |
aoqi@0 | 227 | } |
aoqi@0 | 228 | } |
aoqi@0 | 229 | assert(0, "address outside of heap?"); |
aoqi@0 | 230 | return -1; |
aoqi@0 | 231 | } |
aoqi@0 | 232 | |
aoqi@0 | 233 | HeapWord* CardTableModRefBS::largest_prev_committed_end(int ind) const { |
aoqi@0 | 234 | HeapWord* max_end = NULL; |
aoqi@0 | 235 | for (int j = 0; j < ind; j++) { |
aoqi@0 | 236 | HeapWord* this_end = _committed[j].end(); |
aoqi@0 | 237 | if (this_end > max_end) max_end = this_end; |
aoqi@0 | 238 | } |
aoqi@0 | 239 | return max_end; |
aoqi@0 | 240 | } |
aoqi@0 | 241 | |
aoqi@0 | 242 | MemRegion CardTableModRefBS::committed_unique_to_self(int self, |
aoqi@0 | 243 | MemRegion mr) const { |
aoqi@0 | 244 | MemRegion result = mr; |
aoqi@0 | 245 | for (int r = 0; r < _cur_covered_regions; r += 1) { |
aoqi@0 | 246 | if (r != self) { |
aoqi@0 | 247 | result = result.minus(_committed[r]); |
aoqi@0 | 248 | } |
aoqi@0 | 249 | } |
aoqi@0 | 250 | // Never include the guard page. |
aoqi@0 | 251 | result = result.minus(_guard_region); |
aoqi@0 | 252 | return result; |
aoqi@0 | 253 | } |
aoqi@0 | 254 | |
aoqi@0 | 255 | void CardTableModRefBS::resize_covered_region(MemRegion new_region) { |
aoqi@0 | 256 | // We don't change the start of a region, only the end. |
aoqi@0 | 257 | assert(_whole_heap.contains(new_region), |
aoqi@0 | 258 | "attempt to cover area not in reserved area"); |
aoqi@0 | 259 | debug_only(verify_guard();) |
aoqi@0 | 260 | // collided is true if the expansion would push into another committed region |
aoqi@0 | 261 | debug_only(bool collided = false;) |
aoqi@0 | 262 | int const ind = find_covering_region_by_base(new_region.start()); |
aoqi@0 | 263 | MemRegion const old_region = _covered[ind]; |
aoqi@0 | 264 | assert(old_region.start() == new_region.start(), "just checking"); |
aoqi@0 | 265 | if (new_region.word_size() != old_region.word_size()) { |
aoqi@0 | 266 | // Commit new or uncommit old pages, if necessary. |
aoqi@0 | 267 | MemRegion cur_committed = _committed[ind]; |
aoqi@0 | 268 | // Extend the end of this _commited region |
aoqi@0 | 269 | // to cover the end of any lower _committed regions. |
aoqi@0 | 270 | // This forms overlapping regions, but never interior regions. |
aoqi@0 | 271 | HeapWord* const max_prev_end = largest_prev_committed_end(ind); |
aoqi@0 | 272 | if (max_prev_end > cur_committed.end()) { |
aoqi@0 | 273 | cur_committed.set_end(max_prev_end); |
aoqi@0 | 274 | } |
aoqi@0 | 275 | // Align the end up to a page size (starts are already aligned). |
aoqi@0 | 276 | jbyte* const new_end = byte_after(new_region.last()); |
aoqi@0 | 277 | HeapWord* new_end_aligned = |
aoqi@0 | 278 | (HeapWord*) align_size_up((uintptr_t)new_end, _page_size); |
aoqi@0 | 279 | assert(new_end_aligned >= (HeapWord*) new_end, |
aoqi@0 | 280 | "align up, but less"); |
aoqi@0 | 281 | // Check the other regions (excludes "ind") to ensure that |
aoqi@0 | 282 | // the new_end_aligned does not intrude onto the committed |
aoqi@0 | 283 | // space of another region. |
aoqi@0 | 284 | int ri = 0; |
aoqi@0 | 285 | for (ri = 0; ri < _cur_covered_regions; ri++) { |
aoqi@0 | 286 | if (ri != ind) { |
aoqi@0 | 287 | if (_committed[ri].contains(new_end_aligned)) { |
aoqi@0 | 288 | // The prior check included in the assert |
aoqi@0 | 289 | // (new_end_aligned >= _committed[ri].start()) |
aoqi@0 | 290 | // is redundant with the "contains" test. |
aoqi@0 | 291 | // Any region containing the new end |
aoqi@0 | 292 | // should start at or beyond the region found (ind) |
aoqi@0 | 293 | // for the new end (committed regions are not expected to |
aoqi@0 | 294 | // be proper subsets of other committed regions). |
aoqi@0 | 295 | assert(_committed[ri].start() >= _committed[ind].start(), |
aoqi@0 | 296 | "New end of committed region is inconsistent"); |
aoqi@0 | 297 | new_end_aligned = _committed[ri].start(); |
aoqi@0 | 298 | // new_end_aligned can be equal to the start of its |
aoqi@0 | 299 | // committed region (i.e., of "ind") if a second |
aoqi@0 | 300 | // region following "ind" also start at the same location |
aoqi@0 | 301 | // as "ind". |
aoqi@0 | 302 | assert(new_end_aligned >= _committed[ind].start(), |
aoqi@0 | 303 | "New end of committed region is before start"); |
aoqi@0 | 304 | debug_only(collided = true;) |
aoqi@0 | 305 | // Should only collide with 1 region |
aoqi@0 | 306 | break; |
aoqi@0 | 307 | } |
aoqi@0 | 308 | } |
aoqi@0 | 309 | } |
aoqi@0 | 310 | #ifdef ASSERT |
aoqi@0 | 311 | for (++ri; ri < _cur_covered_regions; ri++) { |
aoqi@0 | 312 | assert(!_committed[ri].contains(new_end_aligned), |
aoqi@0 | 313 | "New end of committed region is in a second committed region"); |
aoqi@0 | 314 | } |
aoqi@0 | 315 | #endif |
aoqi@0 | 316 | // The guard page is always committed and should not be committed over. |
aoqi@0 | 317 | // "guarded" is used for assertion checking below and recalls the fact |
aoqi@0 | 318 | // that the would-be end of the new committed region would have |
aoqi@0 | 319 | // penetrated the guard page. |
aoqi@0 | 320 | HeapWord* new_end_for_commit = new_end_aligned; |
aoqi@0 | 321 | |
aoqi@0 | 322 | DEBUG_ONLY(bool guarded = false;) |
aoqi@0 | 323 | if (new_end_for_commit > _guard_region.start()) { |
aoqi@0 | 324 | new_end_for_commit = _guard_region.start(); |
aoqi@0 | 325 | DEBUG_ONLY(guarded = true;) |
aoqi@0 | 326 | } |
aoqi@0 | 327 | |
aoqi@0 | 328 | if (new_end_for_commit > cur_committed.end()) { |
aoqi@0 | 329 | // Must commit new pages. |
aoqi@0 | 330 | MemRegion const new_committed = |
aoqi@0 | 331 | MemRegion(cur_committed.end(), new_end_for_commit); |
aoqi@0 | 332 | |
aoqi@0 | 333 | assert(!new_committed.is_empty(), "Region should not be empty here"); |
aoqi@0 | 334 | os::commit_memory_or_exit((char*)new_committed.start(), |
aoqi@0 | 335 | new_committed.byte_size(), _page_size, |
aoqi@0 | 336 | !ExecMem, "card table expansion"); |
aoqi@0 | 337 | // Use new_end_aligned (as opposed to new_end_for_commit) because |
aoqi@0 | 338 | // the cur_committed region may include the guard region. |
aoqi@0 | 339 | } else if (new_end_aligned < cur_committed.end()) { |
aoqi@0 | 340 | // Must uncommit pages. |
aoqi@0 | 341 | MemRegion const uncommit_region = |
aoqi@0 | 342 | committed_unique_to_self(ind, MemRegion(new_end_aligned, |
aoqi@0 | 343 | cur_committed.end())); |
aoqi@0 | 344 | if (!uncommit_region.is_empty()) { |
aoqi@0 | 345 | // It is not safe to uncommit cards if the boundary between |
aoqi@0 | 346 | // the generations is moving. A shrink can uncommit cards |
aoqi@0 | 347 | // owned by generation A but being used by generation B. |
aoqi@0 | 348 | if (!UseAdaptiveGCBoundary) { |
aoqi@0 | 349 | if (!os::uncommit_memory((char*)uncommit_region.start(), |
aoqi@0 | 350 | uncommit_region.byte_size())) { |
aoqi@0 | 351 | assert(false, "Card table contraction failed"); |
aoqi@0 | 352 | // The call failed so don't change the end of the |
aoqi@0 | 353 | // committed region. This is better than taking the |
aoqi@0 | 354 | // VM down. |
aoqi@0 | 355 | new_end_aligned = _committed[ind].end(); |
aoqi@0 | 356 | } |
aoqi@0 | 357 | } else { |
aoqi@0 | 358 | new_end_aligned = _committed[ind].end(); |
aoqi@0 | 359 | } |
aoqi@0 | 360 | } |
aoqi@0 | 361 | } |
aoqi@0 | 362 | // In any case, we can reset the end of the current committed entry. |
aoqi@0 | 363 | _committed[ind].set_end(new_end_aligned); |
aoqi@0 | 364 | |
aoqi@0 | 365 | #ifdef ASSERT |
aoqi@0 | 366 | // Check that the last card in the new region is committed according |
aoqi@0 | 367 | // to the tables. |
aoqi@0 | 368 | bool covered = false; |
aoqi@0 | 369 | for (int cr = 0; cr < _cur_covered_regions; cr++) { |
aoqi@0 | 370 | if (_committed[cr].contains(new_end - 1)) { |
aoqi@0 | 371 | covered = true; |
aoqi@0 | 372 | break; |
aoqi@0 | 373 | } |
aoqi@0 | 374 | } |
aoqi@0 | 375 | assert(covered, "Card for end of new region not committed"); |
aoqi@0 | 376 | #endif |
aoqi@0 | 377 | |
aoqi@0 | 378 | // The default of 0 is not necessarily clean cards. |
aoqi@0 | 379 | jbyte* entry; |
aoqi@0 | 380 | if (old_region.last() < _whole_heap.start()) { |
aoqi@0 | 381 | entry = byte_for(_whole_heap.start()); |
aoqi@0 | 382 | } else { |
aoqi@0 | 383 | entry = byte_after(old_region.last()); |
aoqi@0 | 384 | } |
aoqi@0 | 385 | assert(index_for(new_region.last()) < _guard_index, |
aoqi@0 | 386 | "The guard card will be overwritten"); |
aoqi@0 | 387 | // This line commented out cleans the newly expanded region and |
aoqi@0 | 388 | // not the aligned up expanded region. |
aoqi@0 | 389 | // jbyte* const end = byte_after(new_region.last()); |
aoqi@0 | 390 | jbyte* const end = (jbyte*) new_end_for_commit; |
aoqi@0 | 391 | assert((end >= byte_after(new_region.last())) || collided || guarded, |
aoqi@0 | 392 | "Expect to be beyond new region unless impacting another region"); |
aoqi@0 | 393 | // do nothing if we resized downward. |
aoqi@0 | 394 | #ifdef ASSERT |
aoqi@0 | 395 | for (int ri = 0; ri < _cur_covered_regions; ri++) { |
aoqi@0 | 396 | if (ri != ind) { |
aoqi@0 | 397 | // The end of the new committed region should not |
aoqi@0 | 398 | // be in any existing region unless it matches |
aoqi@0 | 399 | // the start of the next region. |
aoqi@0 | 400 | assert(!_committed[ri].contains(end) || |
aoqi@0 | 401 | (_committed[ri].start() == (HeapWord*) end), |
aoqi@0 | 402 | "Overlapping committed regions"); |
aoqi@0 | 403 | } |
aoqi@0 | 404 | } |
aoqi@0 | 405 | #endif |
aoqi@0 | 406 | if (entry < end) { |
aoqi@0 | 407 | memset(entry, clean_card, pointer_delta(end, entry, sizeof(jbyte))); |
aoqi@0 | 408 | } |
aoqi@0 | 409 | } |
aoqi@0 | 410 | // In any case, the covered size changes. |
aoqi@0 | 411 | _covered[ind].set_word_size(new_region.word_size()); |
aoqi@0 | 412 | if (TraceCardTableModRefBS) { |
aoqi@0 | 413 | gclog_or_tty->print_cr("CardTableModRefBS::resize_covered_region: "); |
aoqi@0 | 414 | gclog_or_tty->print_cr(" " |
aoqi@0 | 415 | " _covered[%d].start(): " INTPTR_FORMAT |
aoqi@0 | 416 | " _covered[%d].last(): " INTPTR_FORMAT, |
aoqi@0 | 417 | ind, p2i(_covered[ind].start()), |
aoqi@0 | 418 | ind, p2i(_covered[ind].last())); |
aoqi@0 | 419 | gclog_or_tty->print_cr(" " |
aoqi@0 | 420 | " _committed[%d].start(): " INTPTR_FORMAT |
aoqi@0 | 421 | " _committed[%d].last(): " INTPTR_FORMAT, |
aoqi@0 | 422 | ind, p2i(_committed[ind].start()), |
aoqi@0 | 423 | ind, p2i(_committed[ind].last())); |
aoqi@0 | 424 | gclog_or_tty->print_cr(" " |
aoqi@0 | 425 | " byte_for(start): " INTPTR_FORMAT |
aoqi@0 | 426 | " byte_for(last): " INTPTR_FORMAT, |
aoqi@0 | 427 | p2i(byte_for(_covered[ind].start())), |
aoqi@0 | 428 | p2i(byte_for(_covered[ind].last()))); |
aoqi@0 | 429 | gclog_or_tty->print_cr(" " |
aoqi@0 | 430 | " addr_for(start): " INTPTR_FORMAT |
aoqi@0 | 431 | " addr_for(last): " INTPTR_FORMAT, |
aoqi@0 | 432 | p2i(addr_for((jbyte*) _committed[ind].start())), |
aoqi@0 | 433 | p2i(addr_for((jbyte*) _committed[ind].last()))); |
aoqi@0 | 434 | } |
aoqi@0 | 435 | // Touch the last card of the covered region to show that it |
aoqi@0 | 436 | // is committed (or SEGV). |
aoqi@0 | 437 | debug_only((void) (*byte_for(_covered[ind].last()));) |
aoqi@0 | 438 | debug_only(verify_guard();) |
aoqi@0 | 439 | } |
aoqi@0 | 440 | |
aoqi@0 | 441 | // Note that these versions are precise! The scanning code has to handle the |
aoqi@0 | 442 | // fact that the write barrier may be either precise or imprecise. |
aoqi@0 | 443 | |
aoqi@0 | 444 | void CardTableModRefBS::write_ref_field_work(void* field, oop newVal, bool release) { |
aoqi@0 | 445 | inline_write_ref_field(field, newVal, release); |
aoqi@0 | 446 | } |
aoqi@0 | 447 | |
aoqi@0 | 448 | |
aoqi@0 | 449 | void CardTableModRefBS::non_clean_card_iterate_possibly_parallel(Space* sp, |
aoqi@0 | 450 | MemRegion mr, |
aoqi@0 | 451 | OopsInGenClosure* cl, |
aoqi@0 | 452 | CardTableRS* ct) { |
aoqi@0 | 453 | if (!mr.is_empty()) { |
stefank@6992 | 454 | // Caller (process_roots()) claims that all GC threads |
aoqi@0 | 455 | // execute this call. With UseDynamicNumberOfGCThreads now all |
aoqi@0 | 456 | // active GC threads execute this call. The number of active GC |
aoqi@0 | 457 | // threads needs to be passed to par_non_clean_card_iterate_work() |
aoqi@0 | 458 | // to get proper partitioning and termination. |
aoqi@0 | 459 | // |
aoqi@0 | 460 | // This is an example of where n_par_threads() is used instead |
aoqi@0 | 461 | // of workers()->active_workers(). n_par_threads can be set to 0 to |
aoqi@0 | 462 | // turn off parallelism. For example when this code is called as |
stefank@6992 | 463 | // part of verification and SharedHeap::process_roots() is being |
aoqi@0 | 464 | // used, then n_par_threads() may have been set to 0. active_workers |
aoqi@0 | 465 | // is not overloaded with the meaning that it is a switch to disable |
aoqi@0 | 466 | // parallelism and so keeps the meaning of the number of |
aoqi@0 | 467 | // active gc workers. If parallelism has not been shut off by |
aoqi@0 | 468 | // setting n_par_threads to 0, then n_par_threads should be |
aoqi@0 | 469 | // equal to active_workers. When a different mechanism for shutting |
aoqi@0 | 470 | // off parallelism is used, then active_workers can be used in |
aoqi@0 | 471 | // place of n_par_threads. |
aoqi@0 | 472 | // This is an example of a path where n_par_threads is |
aoqi@0 | 473 | // set to 0 to turn off parallism. |
aoqi@0 | 474 | // [7] CardTableModRefBS::non_clean_card_iterate() |
aoqi@0 | 475 | // [8] CardTableRS::younger_refs_in_space_iterate() |
aoqi@0 | 476 | // [9] Generation::younger_refs_in_space_iterate() |
aoqi@0 | 477 | // [10] OneContigSpaceCardGeneration::younger_refs_iterate() |
aoqi@0 | 478 | // [11] CompactingPermGenGen::younger_refs_iterate() |
aoqi@0 | 479 | // [12] CardTableRS::younger_refs_iterate() |
aoqi@0 | 480 | // [13] SharedHeap::process_strong_roots() |
aoqi@0 | 481 | // [14] G1CollectedHeap::verify() |
aoqi@0 | 482 | // [15] Universe::verify() |
aoqi@0 | 483 | // [16] G1CollectedHeap::do_collection_pause_at_safepoint() |
aoqi@0 | 484 | // |
aoqi@0 | 485 | int n_threads = SharedHeap::heap()->n_par_threads(); |
aoqi@0 | 486 | bool is_par = n_threads > 0; |
aoqi@0 | 487 | if (is_par) { |
aoqi@0 | 488 | #if INCLUDE_ALL_GCS |
aoqi@0 | 489 | assert(SharedHeap::heap()->n_par_threads() == |
aoqi@0 | 490 | SharedHeap::heap()->workers()->active_workers(), "Mismatch"); |
aoqi@0 | 491 | non_clean_card_iterate_parallel_work(sp, mr, cl, ct, n_threads); |
aoqi@0 | 492 | #else // INCLUDE_ALL_GCS |
aoqi@0 | 493 | fatal("Parallel gc not supported here."); |
aoqi@0 | 494 | #endif // INCLUDE_ALL_GCS |
aoqi@0 | 495 | } else { |
aoqi@0 | 496 | // We do not call the non_clean_card_iterate_serial() version below because |
aoqi@0 | 497 | // we want to clear the cards (which non_clean_card_iterate_serial() does not |
aoqi@0 | 498 | // do for us): clear_cl here does the work of finding contiguous dirty ranges |
aoqi@0 | 499 | // of cards to process and clear. |
aoqi@0 | 500 | |
aoqi@0 | 501 | DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, precision(), |
aoqi@0 | 502 | cl->gen_boundary()); |
aoqi@0 | 503 | ClearNoncleanCardWrapper clear_cl(dcto_cl, ct); |
aoqi@0 | 504 | |
aoqi@0 | 505 | clear_cl.do_MemRegion(mr); |
aoqi@0 | 506 | } |
aoqi@0 | 507 | } |
aoqi@0 | 508 | } |
aoqi@0 | 509 | |
aoqi@0 | 510 | // The iterator itself is not MT-aware, but |
aoqi@0 | 511 | // MT-aware callers and closures can use this to |
aoqi@0 | 512 | // accomplish dirty card iteration in parallel. The |
aoqi@0 | 513 | // iterator itself does not clear the dirty cards, or |
aoqi@0 | 514 | // change their values in any manner. |
aoqi@0 | 515 | void CardTableModRefBS::non_clean_card_iterate_serial(MemRegion mr, |
aoqi@0 | 516 | MemRegionClosure* cl) { |
aoqi@0 | 517 | bool is_par = (SharedHeap::heap()->n_par_threads() > 0); |
aoqi@0 | 518 | assert(!is_par || |
aoqi@0 | 519 | (SharedHeap::heap()->n_par_threads() == |
aoqi@0 | 520 | SharedHeap::heap()->workers()->active_workers()), "Mismatch"); |
aoqi@0 | 521 | for (int i = 0; i < _cur_covered_regions; i++) { |
aoqi@0 | 522 | MemRegion mri = mr.intersection(_covered[i]); |
aoqi@0 | 523 | if (mri.word_size() > 0) { |
aoqi@0 | 524 | jbyte* cur_entry = byte_for(mri.last()); |
aoqi@0 | 525 | jbyte* limit = byte_for(mri.start()); |
aoqi@0 | 526 | while (cur_entry >= limit) { |
aoqi@0 | 527 | jbyte* next_entry = cur_entry - 1; |
aoqi@0 | 528 | if (*cur_entry != clean_card) { |
aoqi@0 | 529 | size_t non_clean_cards = 1; |
aoqi@0 | 530 | // Should the next card be included in this range of dirty cards. |
aoqi@0 | 531 | while (next_entry >= limit && *next_entry != clean_card) { |
aoqi@0 | 532 | non_clean_cards++; |
aoqi@0 | 533 | cur_entry = next_entry; |
aoqi@0 | 534 | next_entry--; |
aoqi@0 | 535 | } |
aoqi@0 | 536 | // The memory region may not be on a card boundary. So that |
aoqi@0 | 537 | // objects beyond the end of the region are not processed, make |
aoqi@0 | 538 | // cur_cards precise with regard to the end of the memory region. |
aoqi@0 | 539 | MemRegion cur_cards(addr_for(cur_entry), |
aoqi@0 | 540 | non_clean_cards * card_size_in_words); |
aoqi@0 | 541 | MemRegion dirty_region = cur_cards.intersection(mri); |
aoqi@0 | 542 | cl->do_MemRegion(dirty_region); |
aoqi@0 | 543 | } |
aoqi@0 | 544 | cur_entry = next_entry; |
aoqi@0 | 545 | } |
aoqi@0 | 546 | } |
aoqi@0 | 547 | } |
aoqi@0 | 548 | } |
aoqi@0 | 549 | |
aoqi@0 | 550 | void CardTableModRefBS::dirty_MemRegion(MemRegion mr) { |
aoqi@0 | 551 | assert((HeapWord*)align_size_down((uintptr_t)mr.start(), HeapWordSize) == mr.start(), "Unaligned start"); |
aoqi@0 | 552 | assert((HeapWord*)align_size_up ((uintptr_t)mr.end(), HeapWordSize) == mr.end(), "Unaligned end" ); |
aoqi@0 | 553 | jbyte* cur = byte_for(mr.start()); |
aoqi@0 | 554 | jbyte* last = byte_after(mr.last()); |
aoqi@0 | 555 | while (cur < last) { |
aoqi@0 | 556 | *cur = dirty_card; |
aoqi@0 | 557 | cur++; |
aoqi@0 | 558 | } |
aoqi@0 | 559 | } |
aoqi@0 | 560 | |
aoqi@0 | 561 | void CardTableModRefBS::invalidate(MemRegion mr, bool whole_heap) { |
aoqi@0 | 562 | assert((HeapWord*)align_size_down((uintptr_t)mr.start(), HeapWordSize) == mr.start(), "Unaligned start"); |
aoqi@0 | 563 | assert((HeapWord*)align_size_up ((uintptr_t)mr.end(), HeapWordSize) == mr.end(), "Unaligned end" ); |
aoqi@0 | 564 | for (int i = 0; i < _cur_covered_regions; i++) { |
aoqi@0 | 565 | MemRegion mri = mr.intersection(_covered[i]); |
aoqi@0 | 566 | if (!mri.is_empty()) dirty_MemRegion(mri); |
aoqi@0 | 567 | } |
aoqi@0 | 568 | } |
aoqi@0 | 569 | |
aoqi@0 | 570 | void CardTableModRefBS::clear_MemRegion(MemRegion mr) { |
aoqi@0 | 571 | // Be conservative: only clean cards entirely contained within the |
aoqi@0 | 572 | // region. |
aoqi@0 | 573 | jbyte* cur; |
aoqi@0 | 574 | if (mr.start() == _whole_heap.start()) { |
aoqi@0 | 575 | cur = byte_for(mr.start()); |
aoqi@0 | 576 | } else { |
aoqi@0 | 577 | assert(mr.start() > _whole_heap.start(), "mr is not covered."); |
aoqi@0 | 578 | cur = byte_after(mr.start() - 1); |
aoqi@0 | 579 | } |
aoqi@0 | 580 | jbyte* last = byte_after(mr.last()); |
aoqi@0 | 581 | memset(cur, clean_card, pointer_delta(last, cur, sizeof(jbyte))); |
aoqi@0 | 582 | } |
aoqi@0 | 583 | |
aoqi@0 | 584 | void CardTableModRefBS::clear(MemRegion mr) { |
aoqi@0 | 585 | for (int i = 0; i < _cur_covered_regions; i++) { |
aoqi@0 | 586 | MemRegion mri = mr.intersection(_covered[i]); |
aoqi@0 | 587 | if (!mri.is_empty()) clear_MemRegion(mri); |
aoqi@0 | 588 | } |
aoqi@0 | 589 | } |
aoqi@0 | 590 | |
aoqi@0 | 591 | void CardTableModRefBS::dirty(MemRegion mr) { |
aoqi@0 | 592 | jbyte* first = byte_for(mr.start()); |
aoqi@0 | 593 | jbyte* last = byte_after(mr.last()); |
aoqi@0 | 594 | memset(first, dirty_card, last-first); |
aoqi@0 | 595 | } |
aoqi@0 | 596 | |
aoqi@0 | 597 | // Unlike several other card table methods, dirty_card_iterate() |
aoqi@0 | 598 | // iterates over dirty cards ranges in increasing address order. |
aoqi@0 | 599 | void CardTableModRefBS::dirty_card_iterate(MemRegion mr, |
aoqi@0 | 600 | MemRegionClosure* cl) { |
aoqi@0 | 601 | for (int i = 0; i < _cur_covered_regions; i++) { |
aoqi@0 | 602 | MemRegion mri = mr.intersection(_covered[i]); |
aoqi@0 | 603 | if (!mri.is_empty()) { |
aoqi@0 | 604 | jbyte *cur_entry, *next_entry, *limit; |
aoqi@0 | 605 | for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last()); |
aoqi@0 | 606 | cur_entry <= limit; |
aoqi@0 | 607 | cur_entry = next_entry) { |
aoqi@0 | 608 | next_entry = cur_entry + 1; |
aoqi@0 | 609 | if (*cur_entry == dirty_card) { |
aoqi@0 | 610 | size_t dirty_cards; |
aoqi@0 | 611 | // Accumulate maximal dirty card range, starting at cur_entry |
aoqi@0 | 612 | for (dirty_cards = 1; |
aoqi@0 | 613 | next_entry <= limit && *next_entry == dirty_card; |
aoqi@0 | 614 | dirty_cards++, next_entry++); |
aoqi@0 | 615 | MemRegion cur_cards(addr_for(cur_entry), |
aoqi@0 | 616 | dirty_cards*card_size_in_words); |
aoqi@0 | 617 | cl->do_MemRegion(cur_cards); |
aoqi@0 | 618 | } |
aoqi@0 | 619 | } |
aoqi@0 | 620 | } |
aoqi@0 | 621 | } |
aoqi@0 | 622 | } |
aoqi@0 | 623 | |
aoqi@0 | 624 | MemRegion CardTableModRefBS::dirty_card_range_after_reset(MemRegion mr, |
aoqi@0 | 625 | bool reset, |
aoqi@0 | 626 | int reset_val) { |
aoqi@0 | 627 | for (int i = 0; i < _cur_covered_regions; i++) { |
aoqi@0 | 628 | MemRegion mri = mr.intersection(_covered[i]); |
aoqi@0 | 629 | if (!mri.is_empty()) { |
aoqi@0 | 630 | jbyte* cur_entry, *next_entry, *limit; |
aoqi@0 | 631 | for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last()); |
aoqi@0 | 632 | cur_entry <= limit; |
aoqi@0 | 633 | cur_entry = next_entry) { |
aoqi@0 | 634 | next_entry = cur_entry + 1; |
aoqi@0 | 635 | if (*cur_entry == dirty_card) { |
aoqi@0 | 636 | size_t dirty_cards; |
aoqi@0 | 637 | // Accumulate maximal dirty card range, starting at cur_entry |
aoqi@0 | 638 | for (dirty_cards = 1; |
aoqi@0 | 639 | next_entry <= limit && *next_entry == dirty_card; |
aoqi@0 | 640 | dirty_cards++, next_entry++); |
aoqi@0 | 641 | MemRegion cur_cards(addr_for(cur_entry), |
aoqi@0 | 642 | dirty_cards*card_size_in_words); |
aoqi@0 | 643 | if (reset) { |
aoqi@0 | 644 | for (size_t i = 0; i < dirty_cards; i++) { |
aoqi@0 | 645 | cur_entry[i] = reset_val; |
aoqi@0 | 646 | } |
aoqi@0 | 647 | } |
aoqi@0 | 648 | return cur_cards; |
aoqi@0 | 649 | } |
aoqi@0 | 650 | } |
aoqi@0 | 651 | } |
aoqi@0 | 652 | } |
aoqi@0 | 653 | return MemRegion(mr.end(), mr.end()); |
aoqi@0 | 654 | } |
aoqi@0 | 655 | |
aoqi@0 | 656 | uintx CardTableModRefBS::ct_max_alignment_constraint() { |
aoqi@0 | 657 | return card_size * os::vm_page_size(); |
aoqi@0 | 658 | } |
aoqi@0 | 659 | |
aoqi@0 | 660 | void CardTableModRefBS::verify_guard() { |
aoqi@0 | 661 | // For product build verification |
aoqi@0 | 662 | guarantee(_byte_map[_guard_index] == last_card, |
aoqi@0 | 663 | "card table guard has been modified"); |
aoqi@0 | 664 | } |
aoqi@0 | 665 | |
aoqi@0 | 666 | void CardTableModRefBS::verify() { |
aoqi@0 | 667 | verify_guard(); |
aoqi@0 | 668 | } |
aoqi@0 | 669 | |
aoqi@0 | 670 | #ifndef PRODUCT |
aoqi@0 | 671 | void CardTableModRefBS::verify_region(MemRegion mr, |
aoqi@0 | 672 | jbyte val, bool val_equals) { |
aoqi@0 | 673 | jbyte* start = byte_for(mr.start()); |
aoqi@0 | 674 | jbyte* end = byte_for(mr.last()); |
tschatzl@7051 | 675 | bool failures = false; |
aoqi@0 | 676 | for (jbyte* curr = start; curr <= end; ++curr) { |
aoqi@0 | 677 | jbyte curr_val = *curr; |
aoqi@0 | 678 | bool failed = (val_equals) ? (curr_val != val) : (curr_val == val); |
aoqi@0 | 679 | if (failed) { |
aoqi@0 | 680 | if (!failures) { |
aoqi@0 | 681 | tty->cr(); |
aoqi@0 | 682 | tty->print_cr("== CT verification failed: [" INTPTR_FORMAT "," INTPTR_FORMAT "]", p2i(start), p2i(end)); |
aoqi@0 | 683 | tty->print_cr("== %sexpecting value: %d", |
aoqi@0 | 684 | (val_equals) ? "" : "not ", val); |
aoqi@0 | 685 | failures = true; |
aoqi@0 | 686 | } |
kevinw@9327 | 687 | tty->print_cr("== card " PTR_FORMAT " [" PTR_FORMAT "," PTR_FORMAT "], " |
aoqi@0 | 688 | "val: %d", p2i(curr), p2i(addr_for(curr)), |
aoqi@0 | 689 | p2i((HeapWord*) (((size_t) addr_for(curr)) + card_size)), |
aoqi@0 | 690 | (int) curr_val); |
aoqi@0 | 691 | } |
aoqi@0 | 692 | } |
aoqi@0 | 693 | guarantee(!failures, "there should not have been any failures"); |
aoqi@0 | 694 | } |
aoqi@0 | 695 | |
aoqi@0 | 696 | void CardTableModRefBS::verify_not_dirty_region(MemRegion mr) { |
aoqi@0 | 697 | verify_region(mr, dirty_card, false /* val_equals */); |
aoqi@0 | 698 | } |
aoqi@0 | 699 | |
aoqi@0 | 700 | void CardTableModRefBS::verify_dirty_region(MemRegion mr) { |
aoqi@0 | 701 | verify_region(mr, dirty_card, true /* val_equals */); |
aoqi@0 | 702 | } |
aoqi@0 | 703 | #endif |
aoqi@0 | 704 | |
aoqi@0 | 705 | void CardTableModRefBS::print_on(outputStream* st) const { |
aoqi@0 | 706 | st->print_cr("Card table byte_map: [" INTPTR_FORMAT "," INTPTR_FORMAT "] byte_map_base: " INTPTR_FORMAT, |
aoqi@0 | 707 | p2i(_byte_map), p2i(_byte_map + _byte_map_size), p2i(byte_map_base)); |
aoqi@0 | 708 | } |
aoqi@0 | 709 | |
aoqi@0 | 710 | bool CardTableModRefBSForCTRS::card_will_be_scanned(jbyte cv) { |
aoqi@0 | 711 | return |
aoqi@0 | 712 | CardTableModRefBS::card_will_be_scanned(cv) || |
aoqi@0 | 713 | _rs->is_prev_nonclean_card_val(cv); |
aoqi@0 | 714 | }; |
aoqi@0 | 715 | |
aoqi@0 | 716 | bool CardTableModRefBSForCTRS::card_may_have_been_dirty(jbyte cv) { |
aoqi@0 | 717 | return |
aoqi@0 | 718 | cv != clean_card && |
aoqi@0 | 719 | (CardTableModRefBS::card_may_have_been_dirty(cv) || |
aoqi@0 | 720 | CardTableRS::youngergen_may_have_been_dirty(cv)); |
aoqi@0 | 721 | }; |