Wed, 26 Jun 2013 16:58:37 +0200
8013590: NPG: Add a memory pool MXBean for Metaspace
Reviewed-by: jmasa, mgerdin
duke@435 | 1 | /* |
minqi@5103 | 2 | * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved. |
duke@435 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
duke@435 | 4 | * |
duke@435 | 5 | * This code is free software; you can redistribute it and/or modify it |
duke@435 | 6 | * under the terms of the GNU General Public License version 2 only, as |
duke@435 | 7 | * published by the Free Software Foundation. |
duke@435 | 8 | * |
duke@435 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
duke@435 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
duke@435 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
duke@435 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
duke@435 | 13 | * accompanied this code). |
duke@435 | 14 | * |
duke@435 | 15 | * You should have received a copy of the GNU General Public License version |
duke@435 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
duke@435 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
duke@435 | 18 | * |
trims@1907 | 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
trims@1907 | 20 | * or visit www.oracle.com if you need additional information or have any |
trims@1907 | 21 | * questions. |
duke@435 | 22 | * |
duke@435 | 23 | */ |
duke@435 | 24 | |
stefank@2314 | 25 | #include "precompiled.hpp" |
stefank@2314 | 26 | #include "memory/allocation.inline.hpp" |
stefank@2314 | 27 | #include "memory/cardTableModRefBS.hpp" |
stefank@2314 | 28 | #include "memory/cardTableRS.hpp" |
stefank@2314 | 29 | #include "memory/sharedHeap.hpp" |
stefank@2314 | 30 | #include "memory/space.hpp" |
stefank@2314 | 31 | #include "memory/space.inline.hpp" |
stefank@2314 | 32 | #include "memory/universe.hpp" |
stefank@2314 | 33 | #include "runtime/java.hpp" |
stefank@2314 | 34 | #include "runtime/mutexLocker.hpp" |
stefank@2314 | 35 | #include "runtime/virtualspace.hpp" |
zgu@3900 | 36 | #include "services/memTracker.hpp" |
jprovino@4542 | 37 | #include "utilities/macros.hpp" |
stefank@2314 | 38 | #ifdef COMPILER1 |
stefank@2314 | 39 | #include "c1/c1_LIR.hpp" |
stefank@2314 | 40 | #include "c1/c1_LIRGenerator.hpp" |
stefank@2314 | 41 | #endif |
stefank@2314 | 42 | |
duke@435 | 43 | // This kind of "BarrierSet" allows a "CollectedHeap" to detect and |
duke@435 | 44 | // enumerate ref fields that have been modified (since the last |
duke@435 | 45 | // enumeration.) |
duke@435 | 46 | |
duke@435 | 47 | size_t CardTableModRefBS::cards_required(size_t covered_words) |
duke@435 | 48 | { |
duke@435 | 49 | // Add one for a guard card, used to detect errors. |
duke@435 | 50 | const size_t words = align_size_up(covered_words, card_size_in_words); |
duke@435 | 51 | return words / card_size_in_words + 1; |
duke@435 | 52 | } |
duke@435 | 53 | |
duke@435 | 54 | size_t CardTableModRefBS::compute_byte_map_size() |
duke@435 | 55 | { |
duke@435 | 56 | assert(_guard_index == cards_required(_whole_heap.word_size()) - 1, |
duke@435 | 57 | "unitialized, check declaration order"); |
duke@435 | 58 | assert(_page_size != 0, "unitialized, check declaration order"); |
duke@435 | 59 | const size_t granularity = os::vm_allocation_granularity(); |
duke@435 | 60 | return align_size_up(_guard_index + 1, MAX2(_page_size, granularity)); |
duke@435 | 61 | } |
duke@435 | 62 | |
duke@435 | 63 | CardTableModRefBS::CardTableModRefBS(MemRegion whole_heap, |
duke@435 | 64 | int max_covered_regions): |
duke@435 | 65 | ModRefBarrierSet(max_covered_regions), |
duke@435 | 66 | _whole_heap(whole_heap), |
duke@435 | 67 | _guard_index(cards_required(whole_heap.word_size()) - 1), |
duke@435 | 68 | _last_valid_index(_guard_index - 1), |
jcoomes@456 | 69 | _page_size(os::vm_page_size()), |
duke@435 | 70 | _byte_map_size(compute_byte_map_size()) |
duke@435 | 71 | { |
duke@435 | 72 | _kind = BarrierSet::CardTableModRef; |
duke@435 | 73 | |
duke@435 | 74 | HeapWord* low_bound = _whole_heap.start(); |
duke@435 | 75 | HeapWord* high_bound = _whole_heap.end(); |
duke@435 | 76 | assert((uintptr_t(low_bound) & (card_size - 1)) == 0, "heap must start at card boundary"); |
duke@435 | 77 | assert((uintptr_t(high_bound) & (card_size - 1)) == 0, "heap must end at card boundary"); |
duke@435 | 78 | |
duke@435 | 79 | assert(card_size <= 512, "card_size must be less than 512"); // why? |
duke@435 | 80 | |
dcubed@4967 | 81 | _covered = new MemRegion[max_covered_regions]; |
dcubed@4967 | 82 | _committed = new MemRegion[max_covered_regions]; |
minqi@5103 | 83 | if (_covered == NULL || _committed == NULL) { |
duke@435 | 84 | vm_exit_during_initialization("couldn't alloc card table covered region set."); |
duke@435 | 85 | } |
minqi@5103 | 86 | |
dcubed@4967 | 87 | _cur_covered_regions = 0; |
duke@435 | 88 | const size_t rs_align = _page_size == (size_t) os::vm_page_size() ? 0 : |
duke@435 | 89 | MAX2(_page_size, (size_t) os::vm_allocation_granularity()); |
duke@435 | 90 | ReservedSpace heap_rs(_byte_map_size, rs_align, false); |
zgu@3900 | 91 | |
zgu@3900 | 92 | MemTracker::record_virtual_memory_type((address)heap_rs.base(), mtGC); |
zgu@3900 | 93 | |
duke@435 | 94 | os::trace_page_sizes("card table", _guard_index + 1, _guard_index + 1, |
duke@435 | 95 | _page_size, heap_rs.base(), heap_rs.size()); |
duke@435 | 96 | if (!heap_rs.is_reserved()) { |
duke@435 | 97 | vm_exit_during_initialization("Could not reserve enough space for the " |
duke@435 | 98 | "card marking array"); |
duke@435 | 99 | } |
duke@435 | 100 | |
duke@435 | 101 | // The assember store_check code will do an unsigned shift of the oop, |
duke@435 | 102 | // then add it to byte_map_base, i.e. |
duke@435 | 103 | // |
duke@435 | 104 | // _byte_map = byte_map_base + (uintptr_t(low_bound) >> card_shift) |
duke@435 | 105 | _byte_map = (jbyte*) heap_rs.base(); |
duke@435 | 106 | byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift); |
duke@435 | 107 | assert(byte_for(low_bound) == &_byte_map[0], "Checking start of map"); |
duke@435 | 108 | assert(byte_for(high_bound-1) <= &_byte_map[_last_valid_index], "Checking end of map"); |
duke@435 | 109 | |
duke@435 | 110 | jbyte* guard_card = &_byte_map[_guard_index]; |
duke@435 | 111 | uintptr_t guard_page = align_size_down((uintptr_t)guard_card, _page_size); |
duke@435 | 112 | _guard_region = MemRegion((HeapWord*)guard_page, _page_size); |
dcubed@5255 | 113 | os::commit_memory_or_exit((char*)guard_page, _page_size, _page_size, |
dcubed@5255 | 114 | !ExecMem, "card table last card"); |
duke@435 | 115 | *guard_card = last_card; |
duke@435 | 116 | |
duke@435 | 117 | _lowest_non_clean = |
zgu@3900 | 118 | NEW_C_HEAP_ARRAY(CardArr, max_covered_regions, mtGC); |
duke@435 | 119 | _lowest_non_clean_chunk_size = |
zgu@3900 | 120 | NEW_C_HEAP_ARRAY(size_t, max_covered_regions, mtGC); |
duke@435 | 121 | _lowest_non_clean_base_chunk_index = |
zgu@3900 | 122 | NEW_C_HEAP_ARRAY(uintptr_t, max_covered_regions, mtGC); |
duke@435 | 123 | _last_LNC_resizing_collection = |
zgu@3900 | 124 | NEW_C_HEAP_ARRAY(int, max_covered_regions, mtGC); |
duke@435 | 125 | if (_lowest_non_clean == NULL |
duke@435 | 126 | || _lowest_non_clean_chunk_size == NULL |
duke@435 | 127 | || _lowest_non_clean_base_chunk_index == NULL |
duke@435 | 128 | || _last_LNC_resizing_collection == NULL) |
duke@435 | 129 | vm_exit_during_initialization("couldn't allocate an LNC array."); |
minqi@5103 | 130 | for (int i = 0; i < max_covered_regions; i++) { |
duke@435 | 131 | _lowest_non_clean[i] = NULL; |
duke@435 | 132 | _lowest_non_clean_chunk_size[i] = 0; |
duke@435 | 133 | _last_LNC_resizing_collection[i] = -1; |
duke@435 | 134 | } |
duke@435 | 135 | |
duke@435 | 136 | if (TraceCardTableModRefBS) { |
duke@435 | 137 | gclog_or_tty->print_cr("CardTableModRefBS::CardTableModRefBS: "); |
duke@435 | 138 | gclog_or_tty->print_cr(" " |
duke@435 | 139 | " &_byte_map[0]: " INTPTR_FORMAT |
duke@435 | 140 | " &_byte_map[_last_valid_index]: " INTPTR_FORMAT, |
duke@435 | 141 | &_byte_map[0], |
duke@435 | 142 | &_byte_map[_last_valid_index]); |
duke@435 | 143 | gclog_or_tty->print_cr(" " |
duke@435 | 144 | " byte_map_base: " INTPTR_FORMAT, |
duke@435 | 145 | byte_map_base); |
duke@435 | 146 | } |
duke@435 | 147 | } |
duke@435 | 148 | |
minqi@5103 | 149 | CardTableModRefBS::~CardTableModRefBS() { |
minqi@5103 | 150 | if (_covered) { |
minqi@5103 | 151 | delete[] _covered; |
minqi@5103 | 152 | _covered = NULL; |
minqi@5103 | 153 | } |
minqi@5103 | 154 | if (_committed) { |
minqi@5103 | 155 | delete[] _committed; |
minqi@5103 | 156 | _committed = NULL; |
minqi@5103 | 157 | } |
minqi@5103 | 158 | if (_lowest_non_clean) { |
minqi@5103 | 159 | FREE_C_HEAP_ARRAY(CardArr, _lowest_non_clean, mtGC); |
minqi@5103 | 160 | _lowest_non_clean = NULL; |
minqi@5103 | 161 | } |
minqi@5103 | 162 | if (_lowest_non_clean_chunk_size) { |
minqi@5103 | 163 | FREE_C_HEAP_ARRAY(size_t, _lowest_non_clean_chunk_size, mtGC); |
minqi@5103 | 164 | _lowest_non_clean_chunk_size = NULL; |
minqi@5103 | 165 | } |
minqi@5103 | 166 | if (_lowest_non_clean_base_chunk_index) { |
minqi@5103 | 167 | FREE_C_HEAP_ARRAY(uintptr_t, _lowest_non_clean_base_chunk_index, mtGC); |
minqi@5103 | 168 | _lowest_non_clean_base_chunk_index = NULL; |
minqi@5103 | 169 | } |
minqi@5103 | 170 | if (_last_LNC_resizing_collection) { |
minqi@5103 | 171 | FREE_C_HEAP_ARRAY(int, _last_LNC_resizing_collection, mtGC); |
minqi@5103 | 172 | _last_LNC_resizing_collection = NULL; |
minqi@5103 | 173 | } |
minqi@5103 | 174 | } |
minqi@5103 | 175 | |
duke@435 | 176 | int CardTableModRefBS::find_covering_region_by_base(HeapWord* base) { |
duke@435 | 177 | int i; |
duke@435 | 178 | for (i = 0; i < _cur_covered_regions; i++) { |
duke@435 | 179 | if (_covered[i].start() == base) return i; |
duke@435 | 180 | if (_covered[i].start() > base) break; |
duke@435 | 181 | } |
duke@435 | 182 | // If we didn't find it, create a new one. |
duke@435 | 183 | assert(_cur_covered_regions < _max_covered_regions, |
duke@435 | 184 | "too many covered regions"); |
duke@435 | 185 | // Move the ones above up, to maintain sorted order. |
duke@435 | 186 | for (int j = _cur_covered_regions; j > i; j--) { |
duke@435 | 187 | _covered[j] = _covered[j-1]; |
duke@435 | 188 | _committed[j] = _committed[j-1]; |
duke@435 | 189 | } |
duke@435 | 190 | int res = i; |
duke@435 | 191 | _cur_covered_regions++; |
duke@435 | 192 | _covered[res].set_start(base); |
duke@435 | 193 | _covered[res].set_word_size(0); |
duke@435 | 194 | jbyte* ct_start = byte_for(base); |
duke@435 | 195 | uintptr_t ct_start_aligned = align_size_down((uintptr_t)ct_start, _page_size); |
duke@435 | 196 | _committed[res].set_start((HeapWord*)ct_start_aligned); |
duke@435 | 197 | _committed[res].set_word_size(0); |
duke@435 | 198 | return res; |
duke@435 | 199 | } |
duke@435 | 200 | |
duke@435 | 201 | int CardTableModRefBS::find_covering_region_containing(HeapWord* addr) { |
duke@435 | 202 | for (int i = 0; i < _cur_covered_regions; i++) { |
duke@435 | 203 | if (_covered[i].contains(addr)) { |
duke@435 | 204 | return i; |
duke@435 | 205 | } |
duke@435 | 206 | } |
duke@435 | 207 | assert(0, "address outside of heap?"); |
duke@435 | 208 | return -1; |
duke@435 | 209 | } |
duke@435 | 210 | |
duke@435 | 211 | HeapWord* CardTableModRefBS::largest_prev_committed_end(int ind) const { |
duke@435 | 212 | HeapWord* max_end = NULL; |
duke@435 | 213 | for (int j = 0; j < ind; j++) { |
duke@435 | 214 | HeapWord* this_end = _committed[j].end(); |
duke@435 | 215 | if (this_end > max_end) max_end = this_end; |
duke@435 | 216 | } |
duke@435 | 217 | return max_end; |
duke@435 | 218 | } |
duke@435 | 219 | |
duke@435 | 220 | MemRegion CardTableModRefBS::committed_unique_to_self(int self, |
duke@435 | 221 | MemRegion mr) const { |
duke@435 | 222 | MemRegion result = mr; |
duke@435 | 223 | for (int r = 0; r < _cur_covered_regions; r += 1) { |
duke@435 | 224 | if (r != self) { |
duke@435 | 225 | result = result.minus(_committed[r]); |
duke@435 | 226 | } |
duke@435 | 227 | } |
duke@435 | 228 | // Never include the guard page. |
duke@435 | 229 | result = result.minus(_guard_region); |
duke@435 | 230 | return result; |
duke@435 | 231 | } |
duke@435 | 232 | |
duke@435 | 233 | void CardTableModRefBS::resize_covered_region(MemRegion new_region) { |
duke@435 | 234 | // We don't change the start of a region, only the end. |
duke@435 | 235 | assert(_whole_heap.contains(new_region), |
duke@435 | 236 | "attempt to cover area not in reserved area"); |
duke@435 | 237 | debug_only(verify_guard();) |
jmasa@643 | 238 | // collided is true if the expansion would push into another committed region |
jmasa@643 | 239 | debug_only(bool collided = false;) |
jmasa@441 | 240 | int const ind = find_covering_region_by_base(new_region.start()); |
jmasa@441 | 241 | MemRegion const old_region = _covered[ind]; |
duke@435 | 242 | assert(old_region.start() == new_region.start(), "just checking"); |
duke@435 | 243 | if (new_region.word_size() != old_region.word_size()) { |
duke@435 | 244 | // Commit new or uncommit old pages, if necessary. |
duke@435 | 245 | MemRegion cur_committed = _committed[ind]; |
duke@435 | 246 | // Extend the end of this _commited region |
duke@435 | 247 | // to cover the end of any lower _committed regions. |
duke@435 | 248 | // This forms overlapping regions, but never interior regions. |
jmasa@441 | 249 | HeapWord* const max_prev_end = largest_prev_committed_end(ind); |
duke@435 | 250 | if (max_prev_end > cur_committed.end()) { |
duke@435 | 251 | cur_committed.set_end(max_prev_end); |
duke@435 | 252 | } |
duke@435 | 253 | // Align the end up to a page size (starts are already aligned). |
jmasa@441 | 254 | jbyte* const new_end = byte_after(new_region.last()); |
jmasa@643 | 255 | HeapWord* new_end_aligned = |
jmasa@441 | 256 | (HeapWord*) align_size_up((uintptr_t)new_end, _page_size); |
duke@435 | 257 | assert(new_end_aligned >= (HeapWord*) new_end, |
duke@435 | 258 | "align up, but less"); |
jmasa@1016 | 259 | // Check the other regions (excludes "ind") to ensure that |
jmasa@1016 | 260 | // the new_end_aligned does not intrude onto the committed |
jmasa@1016 | 261 | // space of another region. |
jmasa@643 | 262 | int ri = 0; |
jmasa@643 | 263 | for (ri = 0; ri < _cur_covered_regions; ri++) { |
jmasa@643 | 264 | if (ri != ind) { |
jmasa@643 | 265 | if (_committed[ri].contains(new_end_aligned)) { |
jmasa@1016 | 266 | // The prior check included in the assert |
jmasa@1016 | 267 | // (new_end_aligned >= _committed[ri].start()) |
jmasa@1016 | 268 | // is redundant with the "contains" test. |
jmasa@1016 | 269 | // Any region containing the new end |
jmasa@1016 | 270 | // should start at or beyond the region found (ind) |
jmasa@1016 | 271 | // for the new end (committed regions are not expected to |
jmasa@1016 | 272 | // be proper subsets of other committed regions). |
jmasa@1016 | 273 | assert(_committed[ri].start() >= _committed[ind].start(), |
jmasa@643 | 274 | "New end of committed region is inconsistent"); |
jmasa@643 | 275 | new_end_aligned = _committed[ri].start(); |
jmasa@1016 | 276 | // new_end_aligned can be equal to the start of its |
jmasa@1016 | 277 | // committed region (i.e., of "ind") if a second |
jmasa@1016 | 278 | // region following "ind" also start at the same location |
jmasa@1016 | 279 | // as "ind". |
jmasa@1016 | 280 | assert(new_end_aligned >= _committed[ind].start(), |
jmasa@643 | 281 | "New end of committed region is before start"); |
jmasa@643 | 282 | debug_only(collided = true;) |
jmasa@643 | 283 | // Should only collide with 1 region |
jmasa@643 | 284 | break; |
jmasa@643 | 285 | } |
jmasa@643 | 286 | } |
jmasa@643 | 287 | } |
jmasa@643 | 288 | #ifdef ASSERT |
jmasa@643 | 289 | for (++ri; ri < _cur_covered_regions; ri++) { |
jmasa@643 | 290 | assert(!_committed[ri].contains(new_end_aligned), |
jmasa@643 | 291 | "New end of committed region is in a second committed region"); |
jmasa@643 | 292 | } |
jmasa@643 | 293 | #endif |
duke@435 | 294 | // The guard page is always committed and should not be committed over. |
jmasa@1322 | 295 | // "guarded" is used for assertion checking below and recalls the fact |
jmasa@1322 | 296 | // that the would-be end of the new committed region would have |
jmasa@1322 | 297 | // penetrated the guard page. |
jmasa@1322 | 298 | HeapWord* new_end_for_commit = new_end_aligned; |
jmasa@1322 | 299 | |
jmasa@1322 | 300 | DEBUG_ONLY(bool guarded = false;) |
jmasa@1322 | 301 | if (new_end_for_commit > _guard_region.start()) { |
jmasa@1322 | 302 | new_end_for_commit = _guard_region.start(); |
jmasa@1322 | 303 | DEBUG_ONLY(guarded = true;) |
jmasa@1322 | 304 | } |
jmasa@643 | 305 | |
duke@435 | 306 | if (new_end_for_commit > cur_committed.end()) { |
duke@435 | 307 | // Must commit new pages. |
jmasa@441 | 308 | MemRegion const new_committed = |
duke@435 | 309 | MemRegion(cur_committed.end(), new_end_for_commit); |
duke@435 | 310 | |
duke@435 | 311 | assert(!new_committed.is_empty(), "Region should not be empty here"); |
dcubed@5255 | 312 | os::commit_memory_or_exit((char*)new_committed.start(), |
dcubed@5255 | 313 | new_committed.byte_size(), _page_size, |
dcubed@5255 | 314 | !ExecMem, "card table expansion"); |
duke@435 | 315 | // Use new_end_aligned (as opposed to new_end_for_commit) because |
duke@435 | 316 | // the cur_committed region may include the guard region. |
duke@435 | 317 | } else if (new_end_aligned < cur_committed.end()) { |
duke@435 | 318 | // Must uncommit pages. |
jmasa@441 | 319 | MemRegion const uncommit_region = |
duke@435 | 320 | committed_unique_to_self(ind, MemRegion(new_end_aligned, |
duke@435 | 321 | cur_committed.end())); |
duke@435 | 322 | if (!uncommit_region.is_empty()) { |
jmasa@1967 | 323 | // It is not safe to uncommit cards if the boundary between |
jmasa@1967 | 324 | // the generations is moving. A shrink can uncommit cards |
jmasa@1967 | 325 | // owned by generation A but being used by generation B. |
jmasa@1967 | 326 | if (!UseAdaptiveGCBoundary) { |
jmasa@1967 | 327 | if (!os::uncommit_memory((char*)uncommit_region.start(), |
jmasa@1967 | 328 | uncommit_region.byte_size())) { |
jmasa@1967 | 329 | assert(false, "Card table contraction failed"); |
jmasa@1967 | 330 | // The call failed so don't change the end of the |
jmasa@1967 | 331 | // committed region. This is better than taking the |
jmasa@1967 | 332 | // VM down. |
jmasa@1967 | 333 | new_end_aligned = _committed[ind].end(); |
jmasa@1967 | 334 | } |
jmasa@1967 | 335 | } else { |
jmasa@643 | 336 | new_end_aligned = _committed[ind].end(); |
duke@435 | 337 | } |
duke@435 | 338 | } |
duke@435 | 339 | } |
duke@435 | 340 | // In any case, we can reset the end of the current committed entry. |
duke@435 | 341 | _committed[ind].set_end(new_end_aligned); |
duke@435 | 342 | |
jmasa@1967 | 343 | #ifdef ASSERT |
jmasa@1967 | 344 | // Check that the last card in the new region is committed according |
jmasa@1967 | 345 | // to the tables. |
jmasa@1967 | 346 | bool covered = false; |
jmasa@1967 | 347 | for (int cr = 0; cr < _cur_covered_regions; cr++) { |
jmasa@1967 | 348 | if (_committed[cr].contains(new_end - 1)) { |
jmasa@1967 | 349 | covered = true; |
jmasa@1967 | 350 | break; |
jmasa@1967 | 351 | } |
jmasa@1967 | 352 | } |
jmasa@1967 | 353 | assert(covered, "Card for end of new region not committed"); |
jmasa@1967 | 354 | #endif |
jmasa@1967 | 355 | |
duke@435 | 356 | // The default of 0 is not necessarily clean cards. |
duke@435 | 357 | jbyte* entry; |
duke@435 | 358 | if (old_region.last() < _whole_heap.start()) { |
duke@435 | 359 | entry = byte_for(_whole_heap.start()); |
duke@435 | 360 | } else { |
duke@435 | 361 | entry = byte_after(old_region.last()); |
duke@435 | 362 | } |
swamyv@924 | 363 | assert(index_for(new_region.last()) < _guard_index, |
duke@435 | 364 | "The guard card will be overwritten"); |
jmasa@643 | 365 | // This line commented out cleans the newly expanded region and |
jmasa@643 | 366 | // not the aligned up expanded region. |
jmasa@643 | 367 | // jbyte* const end = byte_after(new_region.last()); |
jmasa@643 | 368 | jbyte* const end = (jbyte*) new_end_for_commit; |
jmasa@1322 | 369 | assert((end >= byte_after(new_region.last())) || collided || guarded, |
jmasa@643 | 370 | "Expect to be beyond new region unless impacting another region"); |
duke@435 | 371 | // do nothing if we resized downward. |
jmasa@643 | 372 | #ifdef ASSERT |
jmasa@643 | 373 | for (int ri = 0; ri < _cur_covered_regions; ri++) { |
jmasa@643 | 374 | if (ri != ind) { |
jmasa@643 | 375 | // The end of the new committed region should not |
jmasa@643 | 376 | // be in any existing region unless it matches |
jmasa@643 | 377 | // the start of the next region. |
jmasa@643 | 378 | assert(!_committed[ri].contains(end) || |
jmasa@643 | 379 | (_committed[ri].start() == (HeapWord*) end), |
jmasa@643 | 380 | "Overlapping committed regions"); |
jmasa@643 | 381 | } |
jmasa@643 | 382 | } |
jmasa@643 | 383 | #endif |
duke@435 | 384 | if (entry < end) { |
duke@435 | 385 | memset(entry, clean_card, pointer_delta(end, entry, sizeof(jbyte))); |
duke@435 | 386 | } |
duke@435 | 387 | } |
duke@435 | 388 | // In any case, the covered size changes. |
duke@435 | 389 | _covered[ind].set_word_size(new_region.word_size()); |
duke@435 | 390 | if (TraceCardTableModRefBS) { |
duke@435 | 391 | gclog_or_tty->print_cr("CardTableModRefBS::resize_covered_region: "); |
duke@435 | 392 | gclog_or_tty->print_cr(" " |
duke@435 | 393 | " _covered[%d].start(): " INTPTR_FORMAT |
duke@435 | 394 | " _covered[%d].last(): " INTPTR_FORMAT, |
duke@435 | 395 | ind, _covered[ind].start(), |
duke@435 | 396 | ind, _covered[ind].last()); |
duke@435 | 397 | gclog_or_tty->print_cr(" " |
duke@435 | 398 | " _committed[%d].start(): " INTPTR_FORMAT |
duke@435 | 399 | " _committed[%d].last(): " INTPTR_FORMAT, |
duke@435 | 400 | ind, _committed[ind].start(), |
duke@435 | 401 | ind, _committed[ind].last()); |
duke@435 | 402 | gclog_or_tty->print_cr(" " |
duke@435 | 403 | " byte_for(start): " INTPTR_FORMAT |
duke@435 | 404 | " byte_for(last): " INTPTR_FORMAT, |
duke@435 | 405 | byte_for(_covered[ind].start()), |
duke@435 | 406 | byte_for(_covered[ind].last())); |
duke@435 | 407 | gclog_or_tty->print_cr(" " |
duke@435 | 408 | " addr_for(start): " INTPTR_FORMAT |
duke@435 | 409 | " addr_for(last): " INTPTR_FORMAT, |
duke@435 | 410 | addr_for((jbyte*) _committed[ind].start()), |
duke@435 | 411 | addr_for((jbyte*) _committed[ind].last())); |
duke@435 | 412 | } |
jmasa@1967 | 413 | // Touch the last card of the covered region to show that it |
jmasa@1967 | 414 | // is committed (or SEGV). |
ccheung@5259 | 415 | debug_only((void) (*byte_for(_covered[ind].last()));) |
duke@435 | 416 | debug_only(verify_guard();) |
duke@435 | 417 | } |
duke@435 | 418 | |
duke@435 | 419 | // Note that these versions are precise! The scanning code has to handle the |
duke@435 | 420 | // fact that the write barrier may be either precise or imprecise. |
duke@435 | 421 | |
coleenp@548 | 422 | void CardTableModRefBS::write_ref_field_work(void* field, oop newVal) { |
duke@435 | 423 | inline_write_ref_field(field, newVal); |
duke@435 | 424 | } |
duke@435 | 425 | |
iveresov@1051 | 426 | /* |
iveresov@1051 | 427 | Claimed and deferred bits are used together in G1 during the evacuation |
iveresov@1051 | 428 | pause. These bits can have the following state transitions: |
iveresov@1051 | 429 | 1. The claimed bit can be put over any other card state. Except that |
iveresov@1051 | 430 | the "dirty -> dirty and claimed" transition is checked for in |
iveresov@1051 | 431 | G1 code and is not used. |
iveresov@1051 | 432 | 2. Deferred bit can be set only if the previous state of the card |
iveresov@1051 | 433 | was either clean or claimed. mark_card_deferred() is wait-free. |
iveresov@1051 | 434 | We do not care if the operation is be successful because if |
iveresov@1051 | 435 | it does not it will only result in duplicate entry in the update |
iveresov@1051 | 436 | buffer because of the "cache-miss". So it's not worth spinning. |
iveresov@1051 | 437 | */ |
iveresov@1051 | 438 | |
duke@435 | 439 | |
ysr@777 | 440 | bool CardTableModRefBS::claim_card(size_t card_index) { |
ysr@777 | 441 | jbyte val = _byte_map[card_index]; |
iveresov@1051 | 442 | assert(val != dirty_card_val(), "Shouldn't claim a dirty card"); |
iveresov@1051 | 443 | while (val == clean_card_val() || |
iveresov@1051 | 444 | (val & (clean_card_mask_val() | claimed_card_val())) != claimed_card_val()) { |
iveresov@1051 | 445 | jbyte new_val = val; |
iveresov@1051 | 446 | if (val == clean_card_val()) { |
iveresov@1051 | 447 | new_val = (jbyte)claimed_card_val(); |
iveresov@1051 | 448 | } else { |
iveresov@1051 | 449 | new_val = val | (jbyte)claimed_card_val(); |
iveresov@1051 | 450 | } |
iveresov@1051 | 451 | jbyte res = Atomic::cmpxchg(new_val, &_byte_map[card_index], val); |
iveresov@1051 | 452 | if (res == val) { |
ysr@777 | 453 | return true; |
iveresov@1051 | 454 | } |
iveresov@1051 | 455 | val = res; |
ysr@777 | 456 | } |
ysr@777 | 457 | return false; |
ysr@777 | 458 | } |
ysr@777 | 459 | |
iveresov@1051 | 460 | bool CardTableModRefBS::mark_card_deferred(size_t card_index) { |
iveresov@1051 | 461 | jbyte val = _byte_map[card_index]; |
iveresov@1051 | 462 | // It's already processed |
iveresov@1051 | 463 | if ((val & (clean_card_mask_val() | deferred_card_val())) == deferred_card_val()) { |
iveresov@1051 | 464 | return false; |
iveresov@1051 | 465 | } |
iveresov@1051 | 466 | // Cached bit can be installed either on a clean card or on a claimed card. |
iveresov@1051 | 467 | jbyte new_val = val; |
iveresov@1051 | 468 | if (val == clean_card_val()) { |
iveresov@1051 | 469 | new_val = (jbyte)deferred_card_val(); |
iveresov@1051 | 470 | } else { |
iveresov@1051 | 471 | if (val & claimed_card_val()) { |
iveresov@1051 | 472 | new_val = val | (jbyte)deferred_card_val(); |
iveresov@1051 | 473 | } |
iveresov@1051 | 474 | } |
iveresov@1051 | 475 | if (new_val != val) { |
iveresov@1051 | 476 | Atomic::cmpxchg(new_val, &_byte_map[card_index], val); |
iveresov@1051 | 477 | } |
iveresov@1051 | 478 | return true; |
iveresov@1051 | 479 | } |
iveresov@1051 | 480 | |
ysr@2819 | 481 | void CardTableModRefBS::non_clean_card_iterate_possibly_parallel(Space* sp, |
ysr@2819 | 482 | MemRegion mr, |
ysr@2889 | 483 | OopsInGenClosure* cl, |
ysr@2889 | 484 | CardTableRS* ct) { |
duke@435 | 485 | if (!mr.is_empty()) { |
jmasa@3294 | 486 | // Caller (process_strong_roots()) claims that all GC threads |
jmasa@3294 | 487 | // execute this call. With UseDynamicNumberOfGCThreads now all |
jmasa@3294 | 488 | // active GC threads execute this call. The number of active GC |
jmasa@3294 | 489 | // threads needs to be passed to par_non_clean_card_iterate_work() |
jmasa@3294 | 490 | // to get proper partitioning and termination. |
jmasa@3294 | 491 | // |
jmasa@3294 | 492 | // This is an example of where n_par_threads() is used instead |
jmasa@3294 | 493 | // of workers()->active_workers(). n_par_threads can be set to 0 to |
jmasa@3294 | 494 | // turn off parallelism. For example when this code is called as |
jmasa@3294 | 495 | // part of verification and SharedHeap::process_strong_roots() is being |
jmasa@3294 | 496 | // used, then n_par_threads() may have been set to 0. active_workers |
jmasa@3294 | 497 | // is not overloaded with the meaning that it is a switch to disable |
jmasa@3294 | 498 | // parallelism and so keeps the meaning of the number of |
jmasa@3294 | 499 | // active gc workers. If parallelism has not been shut off by |
jmasa@3294 | 500 | // setting n_par_threads to 0, then n_par_threads should be |
jmasa@3294 | 501 | // equal to active_workers. When a different mechanism for shutting |
jmasa@3294 | 502 | // off parallelism is used, then active_workers can be used in |
jmasa@3294 | 503 | // place of n_par_threads. |
jmasa@3294 | 504 | // This is an example of a path where n_par_threads is |
jmasa@3294 | 505 | // set to 0 to turn off parallism. |
jmasa@3294 | 506 | // [7] CardTableModRefBS::non_clean_card_iterate() |
jmasa@3294 | 507 | // [8] CardTableRS::younger_refs_in_space_iterate() |
jmasa@3294 | 508 | // [9] Generation::younger_refs_in_space_iterate() |
jmasa@3294 | 509 | // [10] OneContigSpaceCardGeneration::younger_refs_iterate() |
jmasa@3294 | 510 | // [11] CompactingPermGenGen::younger_refs_iterate() |
jmasa@3294 | 511 | // [12] CardTableRS::younger_refs_iterate() |
jmasa@3294 | 512 | // [13] SharedHeap::process_strong_roots() |
jmasa@3294 | 513 | // [14] G1CollectedHeap::verify() |
jmasa@3294 | 514 | // [15] Universe::verify() |
jmasa@3294 | 515 | // [16] G1CollectedHeap::do_collection_pause_at_safepoint() |
jmasa@3294 | 516 | // |
jmasa@3294 | 517 | int n_threads = SharedHeap::heap()->n_par_threads(); |
jmasa@3294 | 518 | bool is_par = n_threads > 0; |
jmasa@3294 | 519 | if (is_par) { |
jprovino@4542 | 520 | #if INCLUDE_ALL_GCS |
jmasa@3294 | 521 | assert(SharedHeap::heap()->n_par_threads() == |
jmasa@3294 | 522 | SharedHeap::heap()->workers()->active_workers(), "Mismatch"); |
ysr@2889 | 523 | non_clean_card_iterate_parallel_work(sp, mr, cl, ct, n_threads); |
jprovino@4542 | 524 | #else // INCLUDE_ALL_GCS |
duke@435 | 525 | fatal("Parallel gc not supported here."); |
jprovino@4542 | 526 | #endif // INCLUDE_ALL_GCS |
duke@435 | 527 | } else { |
ysr@2819 | 528 | // We do not call the non_clean_card_iterate_serial() version below because |
ysr@2819 | 529 | // we want to clear the cards (which non_clean_card_iterate_serial() does not |
ysr@2889 | 530 | // do for us): clear_cl here does the work of finding contiguous dirty ranges |
ysr@2889 | 531 | // of cards to process and clear. |
ysr@2889 | 532 | |
ysr@2889 | 533 | DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, precision(), |
ysr@2889 | 534 | cl->gen_boundary()); |
ysr@2889 | 535 | ClearNoncleanCardWrapper clear_cl(dcto_cl, ct); |
ysr@2889 | 536 | |
ysr@2889 | 537 | clear_cl.do_MemRegion(mr); |
duke@435 | 538 | } |
duke@435 | 539 | } |
duke@435 | 540 | } |
duke@435 | 541 | |
ysr@2819 | 542 | // The iterator itself is not MT-aware, but |
ysr@2819 | 543 | // MT-aware callers and closures can use this to |
ysr@2819 | 544 | // accomplish dirty card iteration in parallel. The |
ysr@2819 | 545 | // iterator itself does not clear the dirty cards, or |
ysr@2819 | 546 | // change their values in any manner. |
ysr@2819 | 547 | void CardTableModRefBS::non_clean_card_iterate_serial(MemRegion mr, |
ysr@2819 | 548 | MemRegionClosure* cl) { |
jmasa@3294 | 549 | bool is_par = (SharedHeap::heap()->n_par_threads() > 0); |
jmasa@3294 | 550 | assert(!is_par || |
jmasa@3294 | 551 | (SharedHeap::heap()->n_par_threads() == |
jmasa@3294 | 552 | SharedHeap::heap()->workers()->active_workers()), "Mismatch"); |
duke@435 | 553 | for (int i = 0; i < _cur_covered_regions; i++) { |
duke@435 | 554 | MemRegion mri = mr.intersection(_covered[i]); |
duke@435 | 555 | if (mri.word_size() > 0) { |
duke@435 | 556 | jbyte* cur_entry = byte_for(mri.last()); |
duke@435 | 557 | jbyte* limit = byte_for(mri.start()); |
duke@435 | 558 | while (cur_entry >= limit) { |
duke@435 | 559 | jbyte* next_entry = cur_entry - 1; |
duke@435 | 560 | if (*cur_entry != clean_card) { |
duke@435 | 561 | size_t non_clean_cards = 1; |
duke@435 | 562 | // Should the next card be included in this range of dirty cards. |
duke@435 | 563 | while (next_entry >= limit && *next_entry != clean_card) { |
duke@435 | 564 | non_clean_cards++; |
duke@435 | 565 | cur_entry = next_entry; |
duke@435 | 566 | next_entry--; |
duke@435 | 567 | } |
duke@435 | 568 | // The memory region may not be on a card boundary. So that |
duke@435 | 569 | // objects beyond the end of the region are not processed, make |
duke@435 | 570 | // cur_cards precise with regard to the end of the memory region. |
duke@435 | 571 | MemRegion cur_cards(addr_for(cur_entry), |
duke@435 | 572 | non_clean_cards * card_size_in_words); |
duke@435 | 573 | MemRegion dirty_region = cur_cards.intersection(mri); |
duke@435 | 574 | cl->do_MemRegion(dirty_region); |
duke@435 | 575 | } |
duke@435 | 576 | cur_entry = next_entry; |
duke@435 | 577 | } |
duke@435 | 578 | } |
duke@435 | 579 | } |
duke@435 | 580 | } |
duke@435 | 581 | |
duke@435 | 582 | void CardTableModRefBS::dirty_MemRegion(MemRegion mr) { |
ysr@1526 | 583 | assert((HeapWord*)align_size_down((uintptr_t)mr.start(), HeapWordSize) == mr.start(), "Unaligned start"); |
ysr@1526 | 584 | assert((HeapWord*)align_size_up ((uintptr_t)mr.end(), HeapWordSize) == mr.end(), "Unaligned end" ); |
duke@435 | 585 | jbyte* cur = byte_for(mr.start()); |
duke@435 | 586 | jbyte* last = byte_after(mr.last()); |
duke@435 | 587 | while (cur < last) { |
duke@435 | 588 | *cur = dirty_card; |
duke@435 | 589 | cur++; |
duke@435 | 590 | } |
duke@435 | 591 | } |
duke@435 | 592 | |
ysr@777 | 593 | void CardTableModRefBS::invalidate(MemRegion mr, bool whole_heap) { |
ysr@1526 | 594 | assert((HeapWord*)align_size_down((uintptr_t)mr.start(), HeapWordSize) == mr.start(), "Unaligned start"); |
ysr@1526 | 595 | assert((HeapWord*)align_size_up ((uintptr_t)mr.end(), HeapWordSize) == mr.end(), "Unaligned end" ); |
duke@435 | 596 | for (int i = 0; i < _cur_covered_regions; i++) { |
duke@435 | 597 | MemRegion mri = mr.intersection(_covered[i]); |
duke@435 | 598 | if (!mri.is_empty()) dirty_MemRegion(mri); |
duke@435 | 599 | } |
duke@435 | 600 | } |
duke@435 | 601 | |
duke@435 | 602 | void CardTableModRefBS::clear_MemRegion(MemRegion mr) { |
duke@435 | 603 | // Be conservative: only clean cards entirely contained within the |
duke@435 | 604 | // region. |
duke@435 | 605 | jbyte* cur; |
duke@435 | 606 | if (mr.start() == _whole_heap.start()) { |
duke@435 | 607 | cur = byte_for(mr.start()); |
duke@435 | 608 | } else { |
duke@435 | 609 | assert(mr.start() > _whole_heap.start(), "mr is not covered."); |
duke@435 | 610 | cur = byte_after(mr.start() - 1); |
duke@435 | 611 | } |
duke@435 | 612 | jbyte* last = byte_after(mr.last()); |
duke@435 | 613 | memset(cur, clean_card, pointer_delta(last, cur, sizeof(jbyte))); |
duke@435 | 614 | } |
duke@435 | 615 | |
duke@435 | 616 | void CardTableModRefBS::clear(MemRegion mr) { |
duke@435 | 617 | for (int i = 0; i < _cur_covered_regions; i++) { |
duke@435 | 618 | MemRegion mri = mr.intersection(_covered[i]); |
duke@435 | 619 | if (!mri.is_empty()) clear_MemRegion(mri); |
duke@435 | 620 | } |
duke@435 | 621 | } |
duke@435 | 622 | |
ysr@777 | 623 | void CardTableModRefBS::dirty(MemRegion mr) { |
ysr@777 | 624 | jbyte* first = byte_for(mr.start()); |
ysr@777 | 625 | jbyte* last = byte_after(mr.last()); |
ysr@777 | 626 | memset(first, dirty_card, last-first); |
ysr@777 | 627 | } |
ysr@777 | 628 | |
ysr@2788 | 629 | // Unlike several other card table methods, dirty_card_iterate() |
ysr@2788 | 630 | // iterates over dirty cards ranges in increasing address order. |
duke@435 | 631 | void CardTableModRefBS::dirty_card_iterate(MemRegion mr, |
duke@435 | 632 | MemRegionClosure* cl) { |
duke@435 | 633 | for (int i = 0; i < _cur_covered_regions; i++) { |
duke@435 | 634 | MemRegion mri = mr.intersection(_covered[i]); |
duke@435 | 635 | if (!mri.is_empty()) { |
duke@435 | 636 | jbyte *cur_entry, *next_entry, *limit; |
duke@435 | 637 | for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last()); |
duke@435 | 638 | cur_entry <= limit; |
duke@435 | 639 | cur_entry = next_entry) { |
duke@435 | 640 | next_entry = cur_entry + 1; |
duke@435 | 641 | if (*cur_entry == dirty_card) { |
duke@435 | 642 | size_t dirty_cards; |
duke@435 | 643 | // Accumulate maximal dirty card range, starting at cur_entry |
duke@435 | 644 | for (dirty_cards = 1; |
duke@435 | 645 | next_entry <= limit && *next_entry == dirty_card; |
duke@435 | 646 | dirty_cards++, next_entry++); |
duke@435 | 647 | MemRegion cur_cards(addr_for(cur_entry), |
duke@435 | 648 | dirty_cards*card_size_in_words); |
duke@435 | 649 | cl->do_MemRegion(cur_cards); |
duke@435 | 650 | } |
duke@435 | 651 | } |
duke@435 | 652 | } |
duke@435 | 653 | } |
duke@435 | 654 | } |
duke@435 | 655 | |
ysr@777 | 656 | MemRegion CardTableModRefBS::dirty_card_range_after_reset(MemRegion mr, |
ysr@777 | 657 | bool reset, |
ysr@777 | 658 | int reset_val) { |
duke@435 | 659 | for (int i = 0; i < _cur_covered_regions; i++) { |
duke@435 | 660 | MemRegion mri = mr.intersection(_covered[i]); |
duke@435 | 661 | if (!mri.is_empty()) { |
duke@435 | 662 | jbyte* cur_entry, *next_entry, *limit; |
duke@435 | 663 | for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last()); |
duke@435 | 664 | cur_entry <= limit; |
duke@435 | 665 | cur_entry = next_entry) { |
duke@435 | 666 | next_entry = cur_entry + 1; |
duke@435 | 667 | if (*cur_entry == dirty_card) { |
duke@435 | 668 | size_t dirty_cards; |
duke@435 | 669 | // Accumulate maximal dirty card range, starting at cur_entry |
duke@435 | 670 | for (dirty_cards = 1; |
duke@435 | 671 | next_entry <= limit && *next_entry == dirty_card; |
duke@435 | 672 | dirty_cards++, next_entry++); |
duke@435 | 673 | MemRegion cur_cards(addr_for(cur_entry), |
duke@435 | 674 | dirty_cards*card_size_in_words); |
ysr@777 | 675 | if (reset) { |
ysr@777 | 676 | for (size_t i = 0; i < dirty_cards; i++) { |
ysr@777 | 677 | cur_entry[i] = reset_val; |
ysr@777 | 678 | } |
duke@435 | 679 | } |
duke@435 | 680 | return cur_cards; |
duke@435 | 681 | } |
duke@435 | 682 | } |
duke@435 | 683 | } |
duke@435 | 684 | } |
duke@435 | 685 | return MemRegion(mr.end(), mr.end()); |
duke@435 | 686 | } |
duke@435 | 687 | |
duke@435 | 688 | uintx CardTableModRefBS::ct_max_alignment_constraint() { |
duke@435 | 689 | return card_size * os::vm_page_size(); |
duke@435 | 690 | } |
duke@435 | 691 | |
duke@435 | 692 | void CardTableModRefBS::verify_guard() { |
duke@435 | 693 | // For product build verification |
duke@435 | 694 | guarantee(_byte_map[_guard_index] == last_card, |
duke@435 | 695 | "card table guard has been modified"); |
duke@435 | 696 | } |
duke@435 | 697 | |
duke@435 | 698 | void CardTableModRefBS::verify() { |
duke@435 | 699 | verify_guard(); |
duke@435 | 700 | } |
duke@435 | 701 | |
duke@435 | 702 | #ifndef PRODUCT |
tonyp@2849 | 703 | void CardTableModRefBS::verify_region(MemRegion mr, |
tonyp@2849 | 704 | jbyte val, bool val_equals) { |
tonyp@2849 | 705 | jbyte* start = byte_for(mr.start()); |
tonyp@2849 | 706 | jbyte* end = byte_for(mr.last()); |
tonyp@2849 | 707 | bool failures = false; |
tonyp@2849 | 708 | for (jbyte* curr = start; curr <= end; ++curr) { |
tonyp@2849 | 709 | jbyte curr_val = *curr; |
tonyp@2849 | 710 | bool failed = (val_equals) ? (curr_val != val) : (curr_val == val); |
tonyp@2849 | 711 | if (failed) { |
tonyp@2849 | 712 | if (!failures) { |
tonyp@2849 | 713 | tty->cr(); |
mikael@4668 | 714 | tty->print_cr("== CT verification failed: ["PTR_FORMAT","PTR_FORMAT"]", start, end); |
tonyp@2849 | 715 | tty->print_cr("== %sexpecting value: %d", |
tonyp@2849 | 716 | (val_equals) ? "" : "not ", val); |
tonyp@2849 | 717 | failures = true; |
tonyp@2849 | 718 | } |
tonyp@2849 | 719 | tty->print_cr("== card "PTR_FORMAT" ["PTR_FORMAT","PTR_FORMAT"], " |
tonyp@2849 | 720 | "val: %d", curr, addr_for(curr), |
tonyp@2849 | 721 | (HeapWord*) (((size_t) addr_for(curr)) + card_size), |
tonyp@2849 | 722 | (int) curr_val); |
tonyp@2849 | 723 | } |
duke@435 | 724 | } |
tonyp@2849 | 725 | guarantee(!failures, "there should not have been any failures"); |
duke@435 | 726 | } |
apetrusenko@1375 | 727 | |
tonyp@2849 | 728 | void CardTableModRefBS::verify_not_dirty_region(MemRegion mr) { |
tonyp@2849 | 729 | verify_region(mr, dirty_card, false /* val_equals */); |
tonyp@2849 | 730 | } |
apetrusenko@1375 | 731 | |
apetrusenko@1375 | 732 | void CardTableModRefBS::verify_dirty_region(MemRegion mr) { |
tonyp@2849 | 733 | verify_region(mr, dirty_card, true /* val_equals */); |
apetrusenko@1375 | 734 | } |
duke@435 | 735 | #endif |
duke@435 | 736 | |
never@3687 | 737 | void CardTableModRefBS::print_on(outputStream* st) const { |
never@3687 | 738 | st->print_cr("Card table byte_map: [" INTPTR_FORMAT "," INTPTR_FORMAT "] byte_map_base: " INTPTR_FORMAT, |
never@3687 | 739 | _byte_map, _byte_map + _byte_map_size, byte_map_base); |
never@3687 | 740 | } |
never@3687 | 741 | |
duke@435 | 742 | bool CardTableModRefBSForCTRS::card_will_be_scanned(jbyte cv) { |
duke@435 | 743 | return |
duke@435 | 744 | CardTableModRefBS::card_will_be_scanned(cv) || |
duke@435 | 745 | _rs->is_prev_nonclean_card_val(cv); |
duke@435 | 746 | }; |
duke@435 | 747 | |
duke@435 | 748 | bool CardTableModRefBSForCTRS::card_may_have_been_dirty(jbyte cv) { |
duke@435 | 749 | return |
duke@435 | 750 | cv != clean_card && |
duke@435 | 751 | (CardTableModRefBS::card_may_have_been_dirty(cv) || |
duke@435 | 752 | CardTableRS::youngergen_may_have_been_dirty(cv)); |
duke@435 | 753 | }; |