Mon, 26 Jan 2009 12:47:21 -0800
6786503: Overflow list performance can be improved
Summary: Avoid overflow list walk in CMS & ParNew when it is unnecessary. Fix a couple of correctness issues, including a C-heap leak, in ParNew at the intersection of promotion failure, work queue overflow and object array chunking. Add stress testing option and related assertion checking.
Reviewed-by: jmasa
duke@435 | 1 | /* |
xdono@631 | 2 | * Copyright 2000-2008 Sun Microsystems, Inc. All Rights Reserved. |
duke@435 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
duke@435 | 4 | * |
duke@435 | 5 | * This code is free software; you can redistribute it and/or modify it |
duke@435 | 6 | * under the terms of the GNU General Public License version 2 only, as |
duke@435 | 7 | * published by the Free Software Foundation. |
duke@435 | 8 | * |
duke@435 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
duke@435 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
duke@435 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
duke@435 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
duke@435 | 13 | * accompanied this code). |
duke@435 | 14 | * |
duke@435 | 15 | * You should have received a copy of the GNU General Public License version |
duke@435 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
duke@435 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
duke@435 | 18 | * |
duke@435 | 19 | * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, |
duke@435 | 20 | * CA 95054 USA or visit www.sun.com if you need additional information or |
duke@435 | 21 | * have any questions. |
duke@435 | 22 | * |
duke@435 | 23 | */ |
duke@435 | 24 | |
duke@435 | 25 | // This kind of "BarrierSet" allows a "CollectedHeap" to detect and |
duke@435 | 26 | // enumerate ref fields that have been modified (since the last |
duke@435 | 27 | // enumeration.) |
duke@435 | 28 | |
duke@435 | 29 | # include "incls/_precompiled.incl" |
duke@435 | 30 | # include "incls/_cardTableModRefBS.cpp.incl" |
duke@435 | 31 | |
duke@435 | 32 | size_t CardTableModRefBS::cards_required(size_t covered_words) |
duke@435 | 33 | { |
duke@435 | 34 | // Add one for a guard card, used to detect errors. |
duke@435 | 35 | const size_t words = align_size_up(covered_words, card_size_in_words); |
duke@435 | 36 | return words / card_size_in_words + 1; |
duke@435 | 37 | } |
duke@435 | 38 | |
duke@435 | 39 | size_t CardTableModRefBS::compute_byte_map_size() |
duke@435 | 40 | { |
duke@435 | 41 | assert(_guard_index == cards_required(_whole_heap.word_size()) - 1, |
duke@435 | 42 | "unitialized, check declaration order"); |
duke@435 | 43 | assert(_page_size != 0, "unitialized, check declaration order"); |
duke@435 | 44 | const size_t granularity = os::vm_allocation_granularity(); |
duke@435 | 45 | return align_size_up(_guard_index + 1, MAX2(_page_size, granularity)); |
duke@435 | 46 | } |
duke@435 | 47 | |
duke@435 | 48 | CardTableModRefBS::CardTableModRefBS(MemRegion whole_heap, |
duke@435 | 49 | int max_covered_regions): |
duke@435 | 50 | ModRefBarrierSet(max_covered_regions), |
duke@435 | 51 | _whole_heap(whole_heap), |
duke@435 | 52 | _guard_index(cards_required(whole_heap.word_size()) - 1), |
duke@435 | 53 | _last_valid_index(_guard_index - 1), |
jcoomes@456 | 54 | _page_size(os::vm_page_size()), |
duke@435 | 55 | _byte_map_size(compute_byte_map_size()) |
duke@435 | 56 | { |
duke@435 | 57 | _kind = BarrierSet::CardTableModRef; |
duke@435 | 58 | |
duke@435 | 59 | HeapWord* low_bound = _whole_heap.start(); |
duke@435 | 60 | HeapWord* high_bound = _whole_heap.end(); |
duke@435 | 61 | assert((uintptr_t(low_bound) & (card_size - 1)) == 0, "heap must start at card boundary"); |
duke@435 | 62 | assert((uintptr_t(high_bound) & (card_size - 1)) == 0, "heap must end at card boundary"); |
duke@435 | 63 | |
duke@435 | 64 | assert(card_size <= 512, "card_size must be less than 512"); // why? |
duke@435 | 65 | |
duke@435 | 66 | _covered = new MemRegion[max_covered_regions]; |
duke@435 | 67 | _committed = new MemRegion[max_covered_regions]; |
duke@435 | 68 | if (_covered == NULL || _committed == NULL) |
duke@435 | 69 | vm_exit_during_initialization("couldn't alloc card table covered region set."); |
duke@435 | 70 | int i; |
duke@435 | 71 | for (i = 0; i < max_covered_regions; i++) { |
duke@435 | 72 | _covered[i].set_word_size(0); |
duke@435 | 73 | _committed[i].set_word_size(0); |
duke@435 | 74 | } |
duke@435 | 75 | _cur_covered_regions = 0; |
duke@435 | 76 | |
duke@435 | 77 | const size_t rs_align = _page_size == (size_t) os::vm_page_size() ? 0 : |
duke@435 | 78 | MAX2(_page_size, (size_t) os::vm_allocation_granularity()); |
duke@435 | 79 | ReservedSpace heap_rs(_byte_map_size, rs_align, false); |
duke@435 | 80 | os::trace_page_sizes("card table", _guard_index + 1, _guard_index + 1, |
duke@435 | 81 | _page_size, heap_rs.base(), heap_rs.size()); |
duke@435 | 82 | if (!heap_rs.is_reserved()) { |
duke@435 | 83 | vm_exit_during_initialization("Could not reserve enough space for the " |
duke@435 | 84 | "card marking array"); |
duke@435 | 85 | } |
duke@435 | 86 | |
duke@435 | 87 | // The assember store_check code will do an unsigned shift of the oop, |
duke@435 | 88 | // then add it to byte_map_base, i.e. |
duke@435 | 89 | // |
duke@435 | 90 | // _byte_map = byte_map_base + (uintptr_t(low_bound) >> card_shift) |
duke@435 | 91 | _byte_map = (jbyte*) heap_rs.base(); |
duke@435 | 92 | byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift); |
duke@435 | 93 | assert(byte_for(low_bound) == &_byte_map[0], "Checking start of map"); |
duke@435 | 94 | assert(byte_for(high_bound-1) <= &_byte_map[_last_valid_index], "Checking end of map"); |
duke@435 | 95 | |
duke@435 | 96 | jbyte* guard_card = &_byte_map[_guard_index]; |
duke@435 | 97 | uintptr_t guard_page = align_size_down((uintptr_t)guard_card, _page_size); |
duke@435 | 98 | _guard_region = MemRegion((HeapWord*)guard_page, _page_size); |
duke@435 | 99 | if (!os::commit_memory((char*)guard_page, _page_size, _page_size)) { |
duke@435 | 100 | // Do better than this for Merlin |
duke@435 | 101 | vm_exit_out_of_memory(_page_size, "card table last card"); |
duke@435 | 102 | } |
duke@435 | 103 | *guard_card = last_card; |
duke@435 | 104 | |
duke@435 | 105 | _lowest_non_clean = |
duke@435 | 106 | NEW_C_HEAP_ARRAY(CardArr, max_covered_regions); |
duke@435 | 107 | _lowest_non_clean_chunk_size = |
duke@435 | 108 | NEW_C_HEAP_ARRAY(size_t, max_covered_regions); |
duke@435 | 109 | _lowest_non_clean_base_chunk_index = |
duke@435 | 110 | NEW_C_HEAP_ARRAY(uintptr_t, max_covered_regions); |
duke@435 | 111 | _last_LNC_resizing_collection = |
duke@435 | 112 | NEW_C_HEAP_ARRAY(int, max_covered_regions); |
duke@435 | 113 | if (_lowest_non_clean == NULL |
duke@435 | 114 | || _lowest_non_clean_chunk_size == NULL |
duke@435 | 115 | || _lowest_non_clean_base_chunk_index == NULL |
duke@435 | 116 | || _last_LNC_resizing_collection == NULL) |
duke@435 | 117 | vm_exit_during_initialization("couldn't allocate an LNC array."); |
duke@435 | 118 | for (i = 0; i < max_covered_regions; i++) { |
duke@435 | 119 | _lowest_non_clean[i] = NULL; |
duke@435 | 120 | _lowest_non_clean_chunk_size[i] = 0; |
duke@435 | 121 | _last_LNC_resizing_collection[i] = -1; |
duke@435 | 122 | } |
duke@435 | 123 | |
duke@435 | 124 | if (TraceCardTableModRefBS) { |
duke@435 | 125 | gclog_or_tty->print_cr("CardTableModRefBS::CardTableModRefBS: "); |
duke@435 | 126 | gclog_or_tty->print_cr(" " |
duke@435 | 127 | " &_byte_map[0]: " INTPTR_FORMAT |
duke@435 | 128 | " &_byte_map[_last_valid_index]: " INTPTR_FORMAT, |
duke@435 | 129 | &_byte_map[0], |
duke@435 | 130 | &_byte_map[_last_valid_index]); |
duke@435 | 131 | gclog_or_tty->print_cr(" " |
duke@435 | 132 | " byte_map_base: " INTPTR_FORMAT, |
duke@435 | 133 | byte_map_base); |
duke@435 | 134 | } |
duke@435 | 135 | } |
duke@435 | 136 | |
duke@435 | 137 | int CardTableModRefBS::find_covering_region_by_base(HeapWord* base) { |
duke@435 | 138 | int i; |
duke@435 | 139 | for (i = 0; i < _cur_covered_regions; i++) { |
duke@435 | 140 | if (_covered[i].start() == base) return i; |
duke@435 | 141 | if (_covered[i].start() > base) break; |
duke@435 | 142 | } |
duke@435 | 143 | // If we didn't find it, create a new one. |
duke@435 | 144 | assert(_cur_covered_regions < _max_covered_regions, |
duke@435 | 145 | "too many covered regions"); |
duke@435 | 146 | // Move the ones above up, to maintain sorted order. |
duke@435 | 147 | for (int j = _cur_covered_regions; j > i; j--) { |
duke@435 | 148 | _covered[j] = _covered[j-1]; |
duke@435 | 149 | _committed[j] = _committed[j-1]; |
duke@435 | 150 | } |
duke@435 | 151 | int res = i; |
duke@435 | 152 | _cur_covered_regions++; |
duke@435 | 153 | _covered[res].set_start(base); |
duke@435 | 154 | _covered[res].set_word_size(0); |
duke@435 | 155 | jbyte* ct_start = byte_for(base); |
duke@435 | 156 | uintptr_t ct_start_aligned = align_size_down((uintptr_t)ct_start, _page_size); |
duke@435 | 157 | _committed[res].set_start((HeapWord*)ct_start_aligned); |
duke@435 | 158 | _committed[res].set_word_size(0); |
duke@435 | 159 | return res; |
duke@435 | 160 | } |
duke@435 | 161 | |
duke@435 | 162 | int CardTableModRefBS::find_covering_region_containing(HeapWord* addr) { |
duke@435 | 163 | for (int i = 0; i < _cur_covered_regions; i++) { |
duke@435 | 164 | if (_covered[i].contains(addr)) { |
duke@435 | 165 | return i; |
duke@435 | 166 | } |
duke@435 | 167 | } |
duke@435 | 168 | assert(0, "address outside of heap?"); |
duke@435 | 169 | return -1; |
duke@435 | 170 | } |
duke@435 | 171 | |
duke@435 | 172 | HeapWord* CardTableModRefBS::largest_prev_committed_end(int ind) const { |
duke@435 | 173 | HeapWord* max_end = NULL; |
duke@435 | 174 | for (int j = 0; j < ind; j++) { |
duke@435 | 175 | HeapWord* this_end = _committed[j].end(); |
duke@435 | 176 | if (this_end > max_end) max_end = this_end; |
duke@435 | 177 | } |
duke@435 | 178 | return max_end; |
duke@435 | 179 | } |
duke@435 | 180 | |
duke@435 | 181 | MemRegion CardTableModRefBS::committed_unique_to_self(int self, |
duke@435 | 182 | MemRegion mr) const { |
duke@435 | 183 | MemRegion result = mr; |
duke@435 | 184 | for (int r = 0; r < _cur_covered_regions; r += 1) { |
duke@435 | 185 | if (r != self) { |
duke@435 | 186 | result = result.minus(_committed[r]); |
duke@435 | 187 | } |
duke@435 | 188 | } |
duke@435 | 189 | // Never include the guard page. |
duke@435 | 190 | result = result.minus(_guard_region); |
duke@435 | 191 | return result; |
duke@435 | 192 | } |
duke@435 | 193 | |
duke@435 | 194 | void CardTableModRefBS::resize_covered_region(MemRegion new_region) { |
duke@435 | 195 | // We don't change the start of a region, only the end. |
duke@435 | 196 | assert(_whole_heap.contains(new_region), |
duke@435 | 197 | "attempt to cover area not in reserved area"); |
duke@435 | 198 | debug_only(verify_guard();) |
jmasa@643 | 199 | // collided is true if the expansion would push into another committed region |
jmasa@643 | 200 | debug_only(bool collided = false;) |
jmasa@441 | 201 | int const ind = find_covering_region_by_base(new_region.start()); |
jmasa@441 | 202 | MemRegion const old_region = _covered[ind]; |
duke@435 | 203 | assert(old_region.start() == new_region.start(), "just checking"); |
duke@435 | 204 | if (new_region.word_size() != old_region.word_size()) { |
duke@435 | 205 | // Commit new or uncommit old pages, if necessary. |
duke@435 | 206 | MemRegion cur_committed = _committed[ind]; |
duke@435 | 207 | // Extend the end of this _commited region |
duke@435 | 208 | // to cover the end of any lower _committed regions. |
duke@435 | 209 | // This forms overlapping regions, but never interior regions. |
jmasa@441 | 210 | HeapWord* const max_prev_end = largest_prev_committed_end(ind); |
duke@435 | 211 | if (max_prev_end > cur_committed.end()) { |
duke@435 | 212 | cur_committed.set_end(max_prev_end); |
duke@435 | 213 | } |
duke@435 | 214 | // Align the end up to a page size (starts are already aligned). |
jmasa@441 | 215 | jbyte* const new_end = byte_after(new_region.last()); |
jmasa@643 | 216 | HeapWord* new_end_aligned = |
jmasa@441 | 217 | (HeapWord*) align_size_up((uintptr_t)new_end, _page_size); |
duke@435 | 218 | assert(new_end_aligned >= (HeapWord*) new_end, |
duke@435 | 219 | "align up, but less"); |
jmasa@643 | 220 | int ri = 0; |
jmasa@643 | 221 | for (ri = 0; ri < _cur_covered_regions; ri++) { |
jmasa@643 | 222 | if (ri != ind) { |
jmasa@643 | 223 | if (_committed[ri].contains(new_end_aligned)) { |
jmasa@643 | 224 | assert((new_end_aligned >= _committed[ri].start()) && |
jmasa@643 | 225 | (_committed[ri].start() > _committed[ind].start()), |
jmasa@643 | 226 | "New end of committed region is inconsistent"); |
jmasa@643 | 227 | new_end_aligned = _committed[ri].start(); |
jmasa@643 | 228 | assert(new_end_aligned > _committed[ind].start(), |
jmasa@643 | 229 | "New end of committed region is before start"); |
jmasa@643 | 230 | debug_only(collided = true;) |
jmasa@643 | 231 | // Should only collide with 1 region |
jmasa@643 | 232 | break; |
jmasa@643 | 233 | } |
jmasa@643 | 234 | } |
jmasa@643 | 235 | } |
jmasa@643 | 236 | #ifdef ASSERT |
jmasa@643 | 237 | for (++ri; ri < _cur_covered_regions; ri++) { |
jmasa@643 | 238 | assert(!_committed[ri].contains(new_end_aligned), |
jmasa@643 | 239 | "New end of committed region is in a second committed region"); |
jmasa@643 | 240 | } |
jmasa@643 | 241 | #endif |
duke@435 | 242 | // The guard page is always committed and should not be committed over. |
jmasa@643 | 243 | HeapWord* const new_end_for_commit = MIN2(new_end_aligned, |
jmasa@643 | 244 | _guard_region.start()); |
jmasa@643 | 245 | |
duke@435 | 246 | if (new_end_for_commit > cur_committed.end()) { |
duke@435 | 247 | // Must commit new pages. |
jmasa@441 | 248 | MemRegion const new_committed = |
duke@435 | 249 | MemRegion(cur_committed.end(), new_end_for_commit); |
duke@435 | 250 | |
duke@435 | 251 | assert(!new_committed.is_empty(), "Region should not be empty here"); |
duke@435 | 252 | if (!os::commit_memory((char*)new_committed.start(), |
duke@435 | 253 | new_committed.byte_size(), _page_size)) { |
duke@435 | 254 | // Do better than this for Merlin |
duke@435 | 255 | vm_exit_out_of_memory(new_committed.byte_size(), |
duke@435 | 256 | "card table expansion"); |
duke@435 | 257 | } |
duke@435 | 258 | // Use new_end_aligned (as opposed to new_end_for_commit) because |
duke@435 | 259 | // the cur_committed region may include the guard region. |
duke@435 | 260 | } else if (new_end_aligned < cur_committed.end()) { |
duke@435 | 261 | // Must uncommit pages. |
jmasa@441 | 262 | MemRegion const uncommit_region = |
duke@435 | 263 | committed_unique_to_self(ind, MemRegion(new_end_aligned, |
duke@435 | 264 | cur_committed.end())); |
duke@435 | 265 | if (!uncommit_region.is_empty()) { |
duke@435 | 266 | if (!os::uncommit_memory((char*)uncommit_region.start(), |
duke@435 | 267 | uncommit_region.byte_size())) { |
jmasa@643 | 268 | assert(false, "Card table contraction failed"); |
jmasa@643 | 269 | // The call failed so don't change the end of the |
jmasa@643 | 270 | // committed region. This is better than taking the |
jmasa@643 | 271 | // VM down. |
jmasa@643 | 272 | new_end_aligned = _committed[ind].end(); |
duke@435 | 273 | } |
duke@435 | 274 | } |
duke@435 | 275 | } |
duke@435 | 276 | // In any case, we can reset the end of the current committed entry. |
duke@435 | 277 | _committed[ind].set_end(new_end_aligned); |
duke@435 | 278 | |
duke@435 | 279 | // The default of 0 is not necessarily clean cards. |
duke@435 | 280 | jbyte* entry; |
duke@435 | 281 | if (old_region.last() < _whole_heap.start()) { |
duke@435 | 282 | entry = byte_for(_whole_heap.start()); |
duke@435 | 283 | } else { |
duke@435 | 284 | entry = byte_after(old_region.last()); |
duke@435 | 285 | } |
swamyv@924 | 286 | assert(index_for(new_region.last()) < _guard_index, |
duke@435 | 287 | "The guard card will be overwritten"); |
jmasa@643 | 288 | // This line commented out cleans the newly expanded region and |
jmasa@643 | 289 | // not the aligned up expanded region. |
jmasa@643 | 290 | // jbyte* const end = byte_after(new_region.last()); |
jmasa@643 | 291 | jbyte* const end = (jbyte*) new_end_for_commit; |
jmasa@643 | 292 | assert((end >= byte_after(new_region.last())) || collided, |
jmasa@643 | 293 | "Expect to be beyond new region unless impacting another region"); |
duke@435 | 294 | // do nothing if we resized downward. |
jmasa@643 | 295 | #ifdef ASSERT |
jmasa@643 | 296 | for (int ri = 0; ri < _cur_covered_regions; ri++) { |
jmasa@643 | 297 | if (ri != ind) { |
jmasa@643 | 298 | // The end of the new committed region should not |
jmasa@643 | 299 | // be in any existing region unless it matches |
jmasa@643 | 300 | // the start of the next region. |
jmasa@643 | 301 | assert(!_committed[ri].contains(end) || |
jmasa@643 | 302 | (_committed[ri].start() == (HeapWord*) end), |
jmasa@643 | 303 | "Overlapping committed regions"); |
jmasa@643 | 304 | } |
jmasa@643 | 305 | } |
jmasa@643 | 306 | #endif |
duke@435 | 307 | if (entry < end) { |
duke@435 | 308 | memset(entry, clean_card, pointer_delta(end, entry, sizeof(jbyte))); |
duke@435 | 309 | } |
duke@435 | 310 | } |
duke@435 | 311 | // In any case, the covered size changes. |
duke@435 | 312 | _covered[ind].set_word_size(new_region.word_size()); |
duke@435 | 313 | if (TraceCardTableModRefBS) { |
duke@435 | 314 | gclog_or_tty->print_cr("CardTableModRefBS::resize_covered_region: "); |
duke@435 | 315 | gclog_or_tty->print_cr(" " |
duke@435 | 316 | " _covered[%d].start(): " INTPTR_FORMAT |
duke@435 | 317 | " _covered[%d].last(): " INTPTR_FORMAT, |
duke@435 | 318 | ind, _covered[ind].start(), |
duke@435 | 319 | ind, _covered[ind].last()); |
duke@435 | 320 | gclog_or_tty->print_cr(" " |
duke@435 | 321 | " _committed[%d].start(): " INTPTR_FORMAT |
duke@435 | 322 | " _committed[%d].last(): " INTPTR_FORMAT, |
duke@435 | 323 | ind, _committed[ind].start(), |
duke@435 | 324 | ind, _committed[ind].last()); |
duke@435 | 325 | gclog_or_tty->print_cr(" " |
duke@435 | 326 | " byte_for(start): " INTPTR_FORMAT |
duke@435 | 327 | " byte_for(last): " INTPTR_FORMAT, |
duke@435 | 328 | byte_for(_covered[ind].start()), |
duke@435 | 329 | byte_for(_covered[ind].last())); |
duke@435 | 330 | gclog_or_tty->print_cr(" " |
duke@435 | 331 | " addr_for(start): " INTPTR_FORMAT |
duke@435 | 332 | " addr_for(last): " INTPTR_FORMAT, |
duke@435 | 333 | addr_for((jbyte*) _committed[ind].start()), |
duke@435 | 334 | addr_for((jbyte*) _committed[ind].last())); |
duke@435 | 335 | } |
duke@435 | 336 | debug_only(verify_guard();) |
duke@435 | 337 | } |
duke@435 | 338 | |
duke@435 | 339 | // Note that these versions are precise! The scanning code has to handle the |
duke@435 | 340 | // fact that the write barrier may be either precise or imprecise. |
duke@435 | 341 | |
coleenp@548 | 342 | void CardTableModRefBS::write_ref_field_work(void* field, oop newVal) { |
duke@435 | 343 | inline_write_ref_field(field, newVal); |
duke@435 | 344 | } |
duke@435 | 345 | |
duke@435 | 346 | |
ysr@777 | 347 | bool CardTableModRefBS::claim_card(size_t card_index) { |
ysr@777 | 348 | jbyte val = _byte_map[card_index]; |
ysr@777 | 349 | if (val != claimed_card_val()) { |
ysr@777 | 350 | jbyte res = Atomic::cmpxchg((jbyte) claimed_card_val(), &_byte_map[card_index], val); |
ysr@777 | 351 | if (res == val) |
ysr@777 | 352 | return true; |
ysr@777 | 353 | else return false; |
ysr@777 | 354 | } |
ysr@777 | 355 | return false; |
ysr@777 | 356 | } |
ysr@777 | 357 | |
duke@435 | 358 | void CardTableModRefBS::non_clean_card_iterate(Space* sp, |
duke@435 | 359 | MemRegion mr, |
duke@435 | 360 | DirtyCardToOopClosure* dcto_cl, |
duke@435 | 361 | MemRegionClosure* cl, |
duke@435 | 362 | bool clear) { |
duke@435 | 363 | if (!mr.is_empty()) { |
duke@435 | 364 | int n_threads = SharedHeap::heap()->n_par_threads(); |
duke@435 | 365 | if (n_threads > 0) { |
duke@435 | 366 | #ifndef SERIALGC |
duke@435 | 367 | par_non_clean_card_iterate_work(sp, mr, dcto_cl, cl, clear, n_threads); |
duke@435 | 368 | #else // SERIALGC |
duke@435 | 369 | fatal("Parallel gc not supported here."); |
duke@435 | 370 | #endif // SERIALGC |
duke@435 | 371 | } else { |
duke@435 | 372 | non_clean_card_iterate_work(mr, cl, clear); |
duke@435 | 373 | } |
duke@435 | 374 | } |
duke@435 | 375 | } |
duke@435 | 376 | |
duke@435 | 377 | // NOTE: For this to work correctly, it is important that |
duke@435 | 378 | // we look for non-clean cards below (so as to catch those |
duke@435 | 379 | // marked precleaned), rather than look explicitly for dirty |
duke@435 | 380 | // cards (and miss those marked precleaned). In that sense, |
duke@435 | 381 | // the name precleaned is currently somewhat of a misnomer. |
duke@435 | 382 | void CardTableModRefBS::non_clean_card_iterate_work(MemRegion mr, |
duke@435 | 383 | MemRegionClosure* cl, |
duke@435 | 384 | bool clear) { |
duke@435 | 385 | // Figure out whether we have to worry about parallelism. |
duke@435 | 386 | bool is_par = (SharedHeap::heap()->n_par_threads() > 1); |
duke@435 | 387 | for (int i = 0; i < _cur_covered_regions; i++) { |
duke@435 | 388 | MemRegion mri = mr.intersection(_covered[i]); |
duke@435 | 389 | if (mri.word_size() > 0) { |
duke@435 | 390 | jbyte* cur_entry = byte_for(mri.last()); |
duke@435 | 391 | jbyte* limit = byte_for(mri.start()); |
duke@435 | 392 | while (cur_entry >= limit) { |
duke@435 | 393 | jbyte* next_entry = cur_entry - 1; |
duke@435 | 394 | if (*cur_entry != clean_card) { |
duke@435 | 395 | size_t non_clean_cards = 1; |
duke@435 | 396 | // Should the next card be included in this range of dirty cards. |
duke@435 | 397 | while (next_entry >= limit && *next_entry != clean_card) { |
duke@435 | 398 | non_clean_cards++; |
duke@435 | 399 | cur_entry = next_entry; |
duke@435 | 400 | next_entry--; |
duke@435 | 401 | } |
duke@435 | 402 | // The memory region may not be on a card boundary. So that |
duke@435 | 403 | // objects beyond the end of the region are not processed, make |
duke@435 | 404 | // cur_cards precise with regard to the end of the memory region. |
duke@435 | 405 | MemRegion cur_cards(addr_for(cur_entry), |
duke@435 | 406 | non_clean_cards * card_size_in_words); |
duke@435 | 407 | MemRegion dirty_region = cur_cards.intersection(mri); |
duke@435 | 408 | if (clear) { |
duke@435 | 409 | for (size_t i = 0; i < non_clean_cards; i++) { |
duke@435 | 410 | // Clean the dirty cards (but leave the other non-clean |
duke@435 | 411 | // alone.) If parallel, do the cleaning atomically. |
duke@435 | 412 | jbyte cur_entry_val = cur_entry[i]; |
duke@435 | 413 | if (card_is_dirty_wrt_gen_iter(cur_entry_val)) { |
duke@435 | 414 | if (is_par) { |
duke@435 | 415 | jbyte res = Atomic::cmpxchg(clean_card, &cur_entry[i], cur_entry_val); |
duke@435 | 416 | assert(res != clean_card, |
duke@435 | 417 | "Dirty card mysteriously cleaned"); |
duke@435 | 418 | } else { |
duke@435 | 419 | cur_entry[i] = clean_card; |
duke@435 | 420 | } |
duke@435 | 421 | } |
duke@435 | 422 | } |
duke@435 | 423 | } |
duke@435 | 424 | cl->do_MemRegion(dirty_region); |
duke@435 | 425 | } |
duke@435 | 426 | cur_entry = next_entry; |
duke@435 | 427 | } |
duke@435 | 428 | } |
duke@435 | 429 | } |
duke@435 | 430 | } |
duke@435 | 431 | |
duke@435 | 432 | void CardTableModRefBS::mod_oop_in_space_iterate(Space* sp, |
duke@435 | 433 | OopClosure* cl, |
duke@435 | 434 | bool clear, |
duke@435 | 435 | bool before_save_marks) { |
duke@435 | 436 | // Note that dcto_cl is resource-allocated, so there is no |
duke@435 | 437 | // corresponding "delete". |
duke@435 | 438 | DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, precision()); |
duke@435 | 439 | MemRegion used_mr; |
duke@435 | 440 | if (before_save_marks) { |
duke@435 | 441 | used_mr = sp->used_region_at_save_marks(); |
duke@435 | 442 | } else { |
duke@435 | 443 | used_mr = sp->used_region(); |
duke@435 | 444 | } |
duke@435 | 445 | non_clean_card_iterate(sp, used_mr, dcto_cl, dcto_cl, clear); |
duke@435 | 446 | } |
duke@435 | 447 | |
duke@435 | 448 | void CardTableModRefBS::dirty_MemRegion(MemRegion mr) { |
duke@435 | 449 | jbyte* cur = byte_for(mr.start()); |
duke@435 | 450 | jbyte* last = byte_after(mr.last()); |
duke@435 | 451 | while (cur < last) { |
duke@435 | 452 | *cur = dirty_card; |
duke@435 | 453 | cur++; |
duke@435 | 454 | } |
duke@435 | 455 | } |
duke@435 | 456 | |
ysr@777 | 457 | void CardTableModRefBS::invalidate(MemRegion mr, bool whole_heap) { |
duke@435 | 458 | for (int i = 0; i < _cur_covered_regions; i++) { |
duke@435 | 459 | MemRegion mri = mr.intersection(_covered[i]); |
duke@435 | 460 | if (!mri.is_empty()) dirty_MemRegion(mri); |
duke@435 | 461 | } |
duke@435 | 462 | } |
duke@435 | 463 | |
duke@435 | 464 | void CardTableModRefBS::clear_MemRegion(MemRegion mr) { |
duke@435 | 465 | // Be conservative: only clean cards entirely contained within the |
duke@435 | 466 | // region. |
duke@435 | 467 | jbyte* cur; |
duke@435 | 468 | if (mr.start() == _whole_heap.start()) { |
duke@435 | 469 | cur = byte_for(mr.start()); |
duke@435 | 470 | } else { |
duke@435 | 471 | assert(mr.start() > _whole_heap.start(), "mr is not covered."); |
duke@435 | 472 | cur = byte_after(mr.start() - 1); |
duke@435 | 473 | } |
duke@435 | 474 | jbyte* last = byte_after(mr.last()); |
duke@435 | 475 | memset(cur, clean_card, pointer_delta(last, cur, sizeof(jbyte))); |
duke@435 | 476 | } |
duke@435 | 477 | |
duke@435 | 478 | void CardTableModRefBS::clear(MemRegion mr) { |
duke@435 | 479 | for (int i = 0; i < _cur_covered_regions; i++) { |
duke@435 | 480 | MemRegion mri = mr.intersection(_covered[i]); |
duke@435 | 481 | if (!mri.is_empty()) clear_MemRegion(mri); |
duke@435 | 482 | } |
duke@435 | 483 | } |
duke@435 | 484 | |
ysr@777 | 485 | void CardTableModRefBS::dirty(MemRegion mr) { |
ysr@777 | 486 | jbyte* first = byte_for(mr.start()); |
ysr@777 | 487 | jbyte* last = byte_after(mr.last()); |
ysr@777 | 488 | memset(first, dirty_card, last-first); |
ysr@777 | 489 | } |
ysr@777 | 490 | |
duke@435 | 491 | // NOTES: |
duke@435 | 492 | // (1) Unlike mod_oop_in_space_iterate() above, dirty_card_iterate() |
duke@435 | 493 | // iterates over dirty cards ranges in increasing address order. |
duke@435 | 494 | void CardTableModRefBS::dirty_card_iterate(MemRegion mr, |
duke@435 | 495 | MemRegionClosure* cl) { |
duke@435 | 496 | for (int i = 0; i < _cur_covered_regions; i++) { |
duke@435 | 497 | MemRegion mri = mr.intersection(_covered[i]); |
duke@435 | 498 | if (!mri.is_empty()) { |
duke@435 | 499 | jbyte *cur_entry, *next_entry, *limit; |
duke@435 | 500 | for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last()); |
duke@435 | 501 | cur_entry <= limit; |
duke@435 | 502 | cur_entry = next_entry) { |
duke@435 | 503 | next_entry = cur_entry + 1; |
duke@435 | 504 | if (*cur_entry == dirty_card) { |
duke@435 | 505 | size_t dirty_cards; |
duke@435 | 506 | // Accumulate maximal dirty card range, starting at cur_entry |
duke@435 | 507 | for (dirty_cards = 1; |
duke@435 | 508 | next_entry <= limit && *next_entry == dirty_card; |
duke@435 | 509 | dirty_cards++, next_entry++); |
duke@435 | 510 | MemRegion cur_cards(addr_for(cur_entry), |
duke@435 | 511 | dirty_cards*card_size_in_words); |
duke@435 | 512 | cl->do_MemRegion(cur_cards); |
duke@435 | 513 | } |
duke@435 | 514 | } |
duke@435 | 515 | } |
duke@435 | 516 | } |
duke@435 | 517 | } |
duke@435 | 518 | |
ysr@777 | 519 | MemRegion CardTableModRefBS::dirty_card_range_after_reset(MemRegion mr, |
ysr@777 | 520 | bool reset, |
ysr@777 | 521 | int reset_val) { |
duke@435 | 522 | for (int i = 0; i < _cur_covered_regions; i++) { |
duke@435 | 523 | MemRegion mri = mr.intersection(_covered[i]); |
duke@435 | 524 | if (!mri.is_empty()) { |
duke@435 | 525 | jbyte* cur_entry, *next_entry, *limit; |
duke@435 | 526 | for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last()); |
duke@435 | 527 | cur_entry <= limit; |
duke@435 | 528 | cur_entry = next_entry) { |
duke@435 | 529 | next_entry = cur_entry + 1; |
duke@435 | 530 | if (*cur_entry == dirty_card) { |
duke@435 | 531 | size_t dirty_cards; |
duke@435 | 532 | // Accumulate maximal dirty card range, starting at cur_entry |
duke@435 | 533 | for (dirty_cards = 1; |
duke@435 | 534 | next_entry <= limit && *next_entry == dirty_card; |
duke@435 | 535 | dirty_cards++, next_entry++); |
duke@435 | 536 | MemRegion cur_cards(addr_for(cur_entry), |
duke@435 | 537 | dirty_cards*card_size_in_words); |
ysr@777 | 538 | if (reset) { |
ysr@777 | 539 | for (size_t i = 0; i < dirty_cards; i++) { |
ysr@777 | 540 | cur_entry[i] = reset_val; |
ysr@777 | 541 | } |
duke@435 | 542 | } |
duke@435 | 543 | return cur_cards; |
duke@435 | 544 | } |
duke@435 | 545 | } |
duke@435 | 546 | } |
duke@435 | 547 | } |
duke@435 | 548 | return MemRegion(mr.end(), mr.end()); |
duke@435 | 549 | } |
duke@435 | 550 | |
duke@435 | 551 | // Set all the dirty cards in the given region to "precleaned" state. |
duke@435 | 552 | void CardTableModRefBS::preclean_dirty_cards(MemRegion mr) { |
duke@435 | 553 | for (int i = 0; i < _cur_covered_regions; i++) { |
duke@435 | 554 | MemRegion mri = mr.intersection(_covered[i]); |
duke@435 | 555 | if (!mri.is_empty()) { |
duke@435 | 556 | jbyte *cur_entry, *limit; |
duke@435 | 557 | for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last()); |
duke@435 | 558 | cur_entry <= limit; |
duke@435 | 559 | cur_entry++) { |
duke@435 | 560 | if (*cur_entry == dirty_card) { |
duke@435 | 561 | *cur_entry = precleaned_card; |
duke@435 | 562 | } |
duke@435 | 563 | } |
duke@435 | 564 | } |
duke@435 | 565 | } |
duke@435 | 566 | } |
duke@435 | 567 | |
duke@435 | 568 | uintx CardTableModRefBS::ct_max_alignment_constraint() { |
duke@435 | 569 | return card_size * os::vm_page_size(); |
duke@435 | 570 | } |
duke@435 | 571 | |
duke@435 | 572 | void CardTableModRefBS::verify_guard() { |
duke@435 | 573 | // For product build verification |
duke@435 | 574 | guarantee(_byte_map[_guard_index] == last_card, |
duke@435 | 575 | "card table guard has been modified"); |
duke@435 | 576 | } |
duke@435 | 577 | |
duke@435 | 578 | void CardTableModRefBS::verify() { |
duke@435 | 579 | verify_guard(); |
duke@435 | 580 | } |
duke@435 | 581 | |
duke@435 | 582 | #ifndef PRODUCT |
duke@435 | 583 | class GuaranteeNotModClosure: public MemRegionClosure { |
duke@435 | 584 | CardTableModRefBS* _ct; |
duke@435 | 585 | public: |
duke@435 | 586 | GuaranteeNotModClosure(CardTableModRefBS* ct) : _ct(ct) {} |
duke@435 | 587 | void do_MemRegion(MemRegion mr) { |
duke@435 | 588 | jbyte* entry = _ct->byte_for(mr.start()); |
duke@435 | 589 | guarantee(*entry != CardTableModRefBS::clean_card, |
duke@435 | 590 | "Dirty card in region that should be clean"); |
duke@435 | 591 | } |
duke@435 | 592 | }; |
duke@435 | 593 | |
duke@435 | 594 | void CardTableModRefBS::verify_clean_region(MemRegion mr) { |
duke@435 | 595 | GuaranteeNotModClosure blk(this); |
duke@435 | 596 | non_clean_card_iterate_work(mr, &blk, false); |
duke@435 | 597 | } |
duke@435 | 598 | #endif |
duke@435 | 599 | |
duke@435 | 600 | bool CardTableModRefBSForCTRS::card_will_be_scanned(jbyte cv) { |
duke@435 | 601 | return |
duke@435 | 602 | CardTableModRefBS::card_will_be_scanned(cv) || |
duke@435 | 603 | _rs->is_prev_nonclean_card_val(cv); |
duke@435 | 604 | }; |
duke@435 | 605 | |
duke@435 | 606 | bool CardTableModRefBSForCTRS::card_may_have_been_dirty(jbyte cv) { |
duke@435 | 607 | return |
duke@435 | 608 | cv != clean_card && |
duke@435 | 609 | (CardTableModRefBS::card_may_have_been_dirty(cv) || |
duke@435 | 610 | CardTableRS::youngergen_may_have_been_dirty(cv)); |
duke@435 | 611 | }; |