Tue, 17 Feb 2009 15:35:58 -0800
6786346: intermittent Internal Error (src/share/vm/memory/cardTableModRefBS.cpp:226)
Summary: Two assertions were incorrectly composed.
Reviewed-by: tonyp
1 /*
2 * Copyright 2000-2008 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
25 // This kind of "BarrierSet" allows a "CollectedHeap" to detect and
26 // enumerate ref fields that have been modified (since the last
27 // enumeration.)
29 # include "incls/_precompiled.incl"
30 # include "incls/_cardTableModRefBS.cpp.incl"
32 size_t CardTableModRefBS::cards_required(size_t covered_words)
33 {
34 // Add one for a guard card, used to detect errors.
35 const size_t words = align_size_up(covered_words, card_size_in_words);
36 return words / card_size_in_words + 1;
37 }
39 size_t CardTableModRefBS::compute_byte_map_size()
40 {
41 assert(_guard_index == cards_required(_whole_heap.word_size()) - 1,
42 "unitialized, check declaration order");
43 assert(_page_size != 0, "unitialized, check declaration order");
44 const size_t granularity = os::vm_allocation_granularity();
45 return align_size_up(_guard_index + 1, MAX2(_page_size, granularity));
46 }
48 CardTableModRefBS::CardTableModRefBS(MemRegion whole_heap,
49 int max_covered_regions):
50 ModRefBarrierSet(max_covered_regions),
51 _whole_heap(whole_heap),
52 _guard_index(cards_required(whole_heap.word_size()) - 1),
53 _last_valid_index(_guard_index - 1),
54 _page_size(os::vm_page_size()),
55 _byte_map_size(compute_byte_map_size())
56 {
57 _kind = BarrierSet::CardTableModRef;
59 HeapWord* low_bound = _whole_heap.start();
60 HeapWord* high_bound = _whole_heap.end();
61 assert((uintptr_t(low_bound) & (card_size - 1)) == 0, "heap must start at card boundary");
62 assert((uintptr_t(high_bound) & (card_size - 1)) == 0, "heap must end at card boundary");
64 assert(card_size <= 512, "card_size must be less than 512"); // why?
66 _covered = new MemRegion[max_covered_regions];
67 _committed = new MemRegion[max_covered_regions];
68 if (_covered == NULL || _committed == NULL)
69 vm_exit_during_initialization("couldn't alloc card table covered region set.");
70 int i;
71 for (i = 0; i < max_covered_regions; i++) {
72 _covered[i].set_word_size(0);
73 _committed[i].set_word_size(0);
74 }
75 _cur_covered_regions = 0;
77 const size_t rs_align = _page_size == (size_t) os::vm_page_size() ? 0 :
78 MAX2(_page_size, (size_t) os::vm_allocation_granularity());
79 ReservedSpace heap_rs(_byte_map_size, rs_align, false);
80 os::trace_page_sizes("card table", _guard_index + 1, _guard_index + 1,
81 _page_size, heap_rs.base(), heap_rs.size());
82 if (!heap_rs.is_reserved()) {
83 vm_exit_during_initialization("Could not reserve enough space for the "
84 "card marking array");
85 }
87 // The assember store_check code will do an unsigned shift of the oop,
88 // then add it to byte_map_base, i.e.
89 //
90 // _byte_map = byte_map_base + (uintptr_t(low_bound) >> card_shift)
91 _byte_map = (jbyte*) heap_rs.base();
92 byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift);
93 assert(byte_for(low_bound) == &_byte_map[0], "Checking start of map");
94 assert(byte_for(high_bound-1) <= &_byte_map[_last_valid_index], "Checking end of map");
96 jbyte* guard_card = &_byte_map[_guard_index];
97 uintptr_t guard_page = align_size_down((uintptr_t)guard_card, _page_size);
98 _guard_region = MemRegion((HeapWord*)guard_page, _page_size);
99 if (!os::commit_memory((char*)guard_page, _page_size, _page_size)) {
100 // Do better than this for Merlin
101 vm_exit_out_of_memory(_page_size, "card table last card");
102 }
103 *guard_card = last_card;
105 _lowest_non_clean =
106 NEW_C_HEAP_ARRAY(CardArr, max_covered_regions);
107 _lowest_non_clean_chunk_size =
108 NEW_C_HEAP_ARRAY(size_t, max_covered_regions);
109 _lowest_non_clean_base_chunk_index =
110 NEW_C_HEAP_ARRAY(uintptr_t, max_covered_regions);
111 _last_LNC_resizing_collection =
112 NEW_C_HEAP_ARRAY(int, max_covered_regions);
113 if (_lowest_non_clean == NULL
114 || _lowest_non_clean_chunk_size == NULL
115 || _lowest_non_clean_base_chunk_index == NULL
116 || _last_LNC_resizing_collection == NULL)
117 vm_exit_during_initialization("couldn't allocate an LNC array.");
118 for (i = 0; i < max_covered_regions; i++) {
119 _lowest_non_clean[i] = NULL;
120 _lowest_non_clean_chunk_size[i] = 0;
121 _last_LNC_resizing_collection[i] = -1;
122 }
124 if (TraceCardTableModRefBS) {
125 gclog_or_tty->print_cr("CardTableModRefBS::CardTableModRefBS: ");
126 gclog_or_tty->print_cr(" "
127 " &_byte_map[0]: " INTPTR_FORMAT
128 " &_byte_map[_last_valid_index]: " INTPTR_FORMAT,
129 &_byte_map[0],
130 &_byte_map[_last_valid_index]);
131 gclog_or_tty->print_cr(" "
132 " byte_map_base: " INTPTR_FORMAT,
133 byte_map_base);
134 }
135 }
137 int CardTableModRefBS::find_covering_region_by_base(HeapWord* base) {
138 int i;
139 for (i = 0; i < _cur_covered_regions; i++) {
140 if (_covered[i].start() == base) return i;
141 if (_covered[i].start() > base) break;
142 }
143 // If we didn't find it, create a new one.
144 assert(_cur_covered_regions < _max_covered_regions,
145 "too many covered regions");
146 // Move the ones above up, to maintain sorted order.
147 for (int j = _cur_covered_regions; j > i; j--) {
148 _covered[j] = _covered[j-1];
149 _committed[j] = _committed[j-1];
150 }
151 int res = i;
152 _cur_covered_regions++;
153 _covered[res].set_start(base);
154 _covered[res].set_word_size(0);
155 jbyte* ct_start = byte_for(base);
156 uintptr_t ct_start_aligned = align_size_down((uintptr_t)ct_start, _page_size);
157 _committed[res].set_start((HeapWord*)ct_start_aligned);
158 _committed[res].set_word_size(0);
159 return res;
160 }
162 int CardTableModRefBS::find_covering_region_containing(HeapWord* addr) {
163 for (int i = 0; i < _cur_covered_regions; i++) {
164 if (_covered[i].contains(addr)) {
165 return i;
166 }
167 }
168 assert(0, "address outside of heap?");
169 return -1;
170 }
172 HeapWord* CardTableModRefBS::largest_prev_committed_end(int ind) const {
173 HeapWord* max_end = NULL;
174 for (int j = 0; j < ind; j++) {
175 HeapWord* this_end = _committed[j].end();
176 if (this_end > max_end) max_end = this_end;
177 }
178 return max_end;
179 }
181 MemRegion CardTableModRefBS::committed_unique_to_self(int self,
182 MemRegion mr) const {
183 MemRegion result = mr;
184 for (int r = 0; r < _cur_covered_regions; r += 1) {
185 if (r != self) {
186 result = result.minus(_committed[r]);
187 }
188 }
189 // Never include the guard page.
190 result = result.minus(_guard_region);
191 return result;
192 }
194 void CardTableModRefBS::resize_covered_region(MemRegion new_region) {
195 // We don't change the start of a region, only the end.
196 assert(_whole_heap.contains(new_region),
197 "attempt to cover area not in reserved area");
198 debug_only(verify_guard();)
199 // collided is true if the expansion would push into another committed region
200 debug_only(bool collided = false;)
201 int const ind = find_covering_region_by_base(new_region.start());
202 MemRegion const old_region = _covered[ind];
203 assert(old_region.start() == new_region.start(), "just checking");
204 if (new_region.word_size() != old_region.word_size()) {
205 // Commit new or uncommit old pages, if necessary.
206 MemRegion cur_committed = _committed[ind];
207 // Extend the end of this _commited region
208 // to cover the end of any lower _committed regions.
209 // This forms overlapping regions, but never interior regions.
210 HeapWord* const max_prev_end = largest_prev_committed_end(ind);
211 if (max_prev_end > cur_committed.end()) {
212 cur_committed.set_end(max_prev_end);
213 }
214 // Align the end up to a page size (starts are already aligned).
215 jbyte* const new_end = byte_after(new_region.last());
216 HeapWord* new_end_aligned =
217 (HeapWord*) align_size_up((uintptr_t)new_end, _page_size);
218 assert(new_end_aligned >= (HeapWord*) new_end,
219 "align up, but less");
220 // Check the other regions (excludes "ind") to ensure that
221 // the new_end_aligned does not intrude onto the committed
222 // space of another region.
223 int ri = 0;
224 for (ri = 0; ri < _cur_covered_regions; ri++) {
225 if (ri != ind) {
226 if (_committed[ri].contains(new_end_aligned)) {
227 // The prior check included in the assert
228 // (new_end_aligned >= _committed[ri].start())
229 // is redundant with the "contains" test.
230 // Any region containing the new end
231 // should start at or beyond the region found (ind)
232 // for the new end (committed regions are not expected to
233 // be proper subsets of other committed regions).
234 assert(_committed[ri].start() >= _committed[ind].start(),
235 "New end of committed region is inconsistent");
236 new_end_aligned = _committed[ri].start();
237 // new_end_aligned can be equal to the start of its
238 // committed region (i.e., of "ind") if a second
239 // region following "ind" also start at the same location
240 // as "ind".
241 assert(new_end_aligned >= _committed[ind].start(),
242 "New end of committed region is before start");
243 debug_only(collided = true;)
244 // Should only collide with 1 region
245 break;
246 }
247 }
248 }
249 #ifdef ASSERT
250 for (++ri; ri < _cur_covered_regions; ri++) {
251 assert(!_committed[ri].contains(new_end_aligned),
252 "New end of committed region is in a second committed region");
253 }
254 #endif
255 // The guard page is always committed and should not be committed over.
256 HeapWord* const new_end_for_commit = MIN2(new_end_aligned,
257 _guard_region.start());
259 if (new_end_for_commit > cur_committed.end()) {
260 // Must commit new pages.
261 MemRegion const new_committed =
262 MemRegion(cur_committed.end(), new_end_for_commit);
264 assert(!new_committed.is_empty(), "Region should not be empty here");
265 if (!os::commit_memory((char*)new_committed.start(),
266 new_committed.byte_size(), _page_size)) {
267 // Do better than this for Merlin
268 vm_exit_out_of_memory(new_committed.byte_size(),
269 "card table expansion");
270 }
271 // Use new_end_aligned (as opposed to new_end_for_commit) because
272 // the cur_committed region may include the guard region.
273 } else if (new_end_aligned < cur_committed.end()) {
274 // Must uncommit pages.
275 MemRegion const uncommit_region =
276 committed_unique_to_self(ind, MemRegion(new_end_aligned,
277 cur_committed.end()));
278 if (!uncommit_region.is_empty()) {
279 if (!os::uncommit_memory((char*)uncommit_region.start(),
280 uncommit_region.byte_size())) {
281 assert(false, "Card table contraction failed");
282 // The call failed so don't change the end of the
283 // committed region. This is better than taking the
284 // VM down.
285 new_end_aligned = _committed[ind].end();
286 }
287 }
288 }
289 // In any case, we can reset the end of the current committed entry.
290 _committed[ind].set_end(new_end_aligned);
292 // The default of 0 is not necessarily clean cards.
293 jbyte* entry;
294 if (old_region.last() < _whole_heap.start()) {
295 entry = byte_for(_whole_heap.start());
296 } else {
297 entry = byte_after(old_region.last());
298 }
299 assert(index_for(new_region.last()) < _guard_index,
300 "The guard card will be overwritten");
301 // This line commented out cleans the newly expanded region and
302 // not the aligned up expanded region.
303 // jbyte* const end = byte_after(new_region.last());
304 jbyte* const end = (jbyte*) new_end_for_commit;
305 assert((end >= byte_after(new_region.last())) || collided,
306 "Expect to be beyond new region unless impacting another region");
307 // do nothing if we resized downward.
308 #ifdef ASSERT
309 for (int ri = 0; ri < _cur_covered_regions; ri++) {
310 if (ri != ind) {
311 // The end of the new committed region should not
312 // be in any existing region unless it matches
313 // the start of the next region.
314 assert(!_committed[ri].contains(end) ||
315 (_committed[ri].start() == (HeapWord*) end),
316 "Overlapping committed regions");
317 }
318 }
319 #endif
320 if (entry < end) {
321 memset(entry, clean_card, pointer_delta(end, entry, sizeof(jbyte)));
322 }
323 }
324 // In any case, the covered size changes.
325 _covered[ind].set_word_size(new_region.word_size());
326 if (TraceCardTableModRefBS) {
327 gclog_or_tty->print_cr("CardTableModRefBS::resize_covered_region: ");
328 gclog_or_tty->print_cr(" "
329 " _covered[%d].start(): " INTPTR_FORMAT
330 " _covered[%d].last(): " INTPTR_FORMAT,
331 ind, _covered[ind].start(),
332 ind, _covered[ind].last());
333 gclog_or_tty->print_cr(" "
334 " _committed[%d].start(): " INTPTR_FORMAT
335 " _committed[%d].last(): " INTPTR_FORMAT,
336 ind, _committed[ind].start(),
337 ind, _committed[ind].last());
338 gclog_or_tty->print_cr(" "
339 " byte_for(start): " INTPTR_FORMAT
340 " byte_for(last): " INTPTR_FORMAT,
341 byte_for(_covered[ind].start()),
342 byte_for(_covered[ind].last()));
343 gclog_or_tty->print_cr(" "
344 " addr_for(start): " INTPTR_FORMAT
345 " addr_for(last): " INTPTR_FORMAT,
346 addr_for((jbyte*) _committed[ind].start()),
347 addr_for((jbyte*) _committed[ind].last()));
348 }
349 debug_only(verify_guard();)
350 }
352 // Note that these versions are precise! The scanning code has to handle the
353 // fact that the write barrier may be either precise or imprecise.
355 void CardTableModRefBS::write_ref_field_work(void* field, oop newVal) {
356 inline_write_ref_field(field, newVal);
357 }
360 bool CardTableModRefBS::claim_card(size_t card_index) {
361 jbyte val = _byte_map[card_index];
362 if (val != claimed_card_val()) {
363 jbyte res = Atomic::cmpxchg((jbyte) claimed_card_val(), &_byte_map[card_index], val);
364 if (res == val)
365 return true;
366 else return false;
367 }
368 return false;
369 }
371 void CardTableModRefBS::non_clean_card_iterate(Space* sp,
372 MemRegion mr,
373 DirtyCardToOopClosure* dcto_cl,
374 MemRegionClosure* cl,
375 bool clear) {
376 if (!mr.is_empty()) {
377 int n_threads = SharedHeap::heap()->n_par_threads();
378 if (n_threads > 0) {
379 #ifndef SERIALGC
380 par_non_clean_card_iterate_work(sp, mr, dcto_cl, cl, clear, n_threads);
381 #else // SERIALGC
382 fatal("Parallel gc not supported here.");
383 #endif // SERIALGC
384 } else {
385 non_clean_card_iterate_work(mr, cl, clear);
386 }
387 }
388 }
390 // NOTE: For this to work correctly, it is important that
391 // we look for non-clean cards below (so as to catch those
392 // marked precleaned), rather than look explicitly for dirty
393 // cards (and miss those marked precleaned). In that sense,
394 // the name precleaned is currently somewhat of a misnomer.
395 void CardTableModRefBS::non_clean_card_iterate_work(MemRegion mr,
396 MemRegionClosure* cl,
397 bool clear) {
398 // Figure out whether we have to worry about parallelism.
399 bool is_par = (SharedHeap::heap()->n_par_threads() > 1);
400 for (int i = 0; i < _cur_covered_regions; i++) {
401 MemRegion mri = mr.intersection(_covered[i]);
402 if (mri.word_size() > 0) {
403 jbyte* cur_entry = byte_for(mri.last());
404 jbyte* limit = byte_for(mri.start());
405 while (cur_entry >= limit) {
406 jbyte* next_entry = cur_entry - 1;
407 if (*cur_entry != clean_card) {
408 size_t non_clean_cards = 1;
409 // Should the next card be included in this range of dirty cards.
410 while (next_entry >= limit && *next_entry != clean_card) {
411 non_clean_cards++;
412 cur_entry = next_entry;
413 next_entry--;
414 }
415 // The memory region may not be on a card boundary. So that
416 // objects beyond the end of the region are not processed, make
417 // cur_cards precise with regard to the end of the memory region.
418 MemRegion cur_cards(addr_for(cur_entry),
419 non_clean_cards * card_size_in_words);
420 MemRegion dirty_region = cur_cards.intersection(mri);
421 if (clear) {
422 for (size_t i = 0; i < non_clean_cards; i++) {
423 // Clean the dirty cards (but leave the other non-clean
424 // alone.) If parallel, do the cleaning atomically.
425 jbyte cur_entry_val = cur_entry[i];
426 if (card_is_dirty_wrt_gen_iter(cur_entry_val)) {
427 if (is_par) {
428 jbyte res = Atomic::cmpxchg(clean_card, &cur_entry[i], cur_entry_val);
429 assert(res != clean_card,
430 "Dirty card mysteriously cleaned");
431 } else {
432 cur_entry[i] = clean_card;
433 }
434 }
435 }
436 }
437 cl->do_MemRegion(dirty_region);
438 }
439 cur_entry = next_entry;
440 }
441 }
442 }
443 }
445 void CardTableModRefBS::mod_oop_in_space_iterate(Space* sp,
446 OopClosure* cl,
447 bool clear,
448 bool before_save_marks) {
449 // Note that dcto_cl is resource-allocated, so there is no
450 // corresponding "delete".
451 DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, precision());
452 MemRegion used_mr;
453 if (before_save_marks) {
454 used_mr = sp->used_region_at_save_marks();
455 } else {
456 used_mr = sp->used_region();
457 }
458 non_clean_card_iterate(sp, used_mr, dcto_cl, dcto_cl, clear);
459 }
461 void CardTableModRefBS::dirty_MemRegion(MemRegion mr) {
462 jbyte* cur = byte_for(mr.start());
463 jbyte* last = byte_after(mr.last());
464 while (cur < last) {
465 *cur = dirty_card;
466 cur++;
467 }
468 }
470 void CardTableModRefBS::invalidate(MemRegion mr, bool whole_heap) {
471 for (int i = 0; i < _cur_covered_regions; i++) {
472 MemRegion mri = mr.intersection(_covered[i]);
473 if (!mri.is_empty()) dirty_MemRegion(mri);
474 }
475 }
477 void CardTableModRefBS::clear_MemRegion(MemRegion mr) {
478 // Be conservative: only clean cards entirely contained within the
479 // region.
480 jbyte* cur;
481 if (mr.start() == _whole_heap.start()) {
482 cur = byte_for(mr.start());
483 } else {
484 assert(mr.start() > _whole_heap.start(), "mr is not covered.");
485 cur = byte_after(mr.start() - 1);
486 }
487 jbyte* last = byte_after(mr.last());
488 memset(cur, clean_card, pointer_delta(last, cur, sizeof(jbyte)));
489 }
491 void CardTableModRefBS::clear(MemRegion mr) {
492 for (int i = 0; i < _cur_covered_regions; i++) {
493 MemRegion mri = mr.intersection(_covered[i]);
494 if (!mri.is_empty()) clear_MemRegion(mri);
495 }
496 }
498 void CardTableModRefBS::dirty(MemRegion mr) {
499 jbyte* first = byte_for(mr.start());
500 jbyte* last = byte_after(mr.last());
501 memset(first, dirty_card, last-first);
502 }
504 // NOTES:
505 // (1) Unlike mod_oop_in_space_iterate() above, dirty_card_iterate()
506 // iterates over dirty cards ranges in increasing address order.
507 void CardTableModRefBS::dirty_card_iterate(MemRegion mr,
508 MemRegionClosure* cl) {
509 for (int i = 0; i < _cur_covered_regions; i++) {
510 MemRegion mri = mr.intersection(_covered[i]);
511 if (!mri.is_empty()) {
512 jbyte *cur_entry, *next_entry, *limit;
513 for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last());
514 cur_entry <= limit;
515 cur_entry = next_entry) {
516 next_entry = cur_entry + 1;
517 if (*cur_entry == dirty_card) {
518 size_t dirty_cards;
519 // Accumulate maximal dirty card range, starting at cur_entry
520 for (dirty_cards = 1;
521 next_entry <= limit && *next_entry == dirty_card;
522 dirty_cards++, next_entry++);
523 MemRegion cur_cards(addr_for(cur_entry),
524 dirty_cards*card_size_in_words);
525 cl->do_MemRegion(cur_cards);
526 }
527 }
528 }
529 }
530 }
532 MemRegion CardTableModRefBS::dirty_card_range_after_reset(MemRegion mr,
533 bool reset,
534 int reset_val) {
535 for (int i = 0; i < _cur_covered_regions; i++) {
536 MemRegion mri = mr.intersection(_covered[i]);
537 if (!mri.is_empty()) {
538 jbyte* cur_entry, *next_entry, *limit;
539 for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last());
540 cur_entry <= limit;
541 cur_entry = next_entry) {
542 next_entry = cur_entry + 1;
543 if (*cur_entry == dirty_card) {
544 size_t dirty_cards;
545 // Accumulate maximal dirty card range, starting at cur_entry
546 for (dirty_cards = 1;
547 next_entry <= limit && *next_entry == dirty_card;
548 dirty_cards++, next_entry++);
549 MemRegion cur_cards(addr_for(cur_entry),
550 dirty_cards*card_size_in_words);
551 if (reset) {
552 for (size_t i = 0; i < dirty_cards; i++) {
553 cur_entry[i] = reset_val;
554 }
555 }
556 return cur_cards;
557 }
558 }
559 }
560 }
561 return MemRegion(mr.end(), mr.end());
562 }
564 // Set all the dirty cards in the given region to "precleaned" state.
565 void CardTableModRefBS::preclean_dirty_cards(MemRegion mr) {
566 for (int i = 0; i < _cur_covered_regions; i++) {
567 MemRegion mri = mr.intersection(_covered[i]);
568 if (!mri.is_empty()) {
569 jbyte *cur_entry, *limit;
570 for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last());
571 cur_entry <= limit;
572 cur_entry++) {
573 if (*cur_entry == dirty_card) {
574 *cur_entry = precleaned_card;
575 }
576 }
577 }
578 }
579 }
581 uintx CardTableModRefBS::ct_max_alignment_constraint() {
582 return card_size * os::vm_page_size();
583 }
585 void CardTableModRefBS::verify_guard() {
586 // For product build verification
587 guarantee(_byte_map[_guard_index] == last_card,
588 "card table guard has been modified");
589 }
591 void CardTableModRefBS::verify() {
592 verify_guard();
593 }
595 #ifndef PRODUCT
596 class GuaranteeNotModClosure: public MemRegionClosure {
597 CardTableModRefBS* _ct;
598 public:
599 GuaranteeNotModClosure(CardTableModRefBS* ct) : _ct(ct) {}
600 void do_MemRegion(MemRegion mr) {
601 jbyte* entry = _ct->byte_for(mr.start());
602 guarantee(*entry != CardTableModRefBS::clean_card,
603 "Dirty card in region that should be clean");
604 }
605 };
607 void CardTableModRefBS::verify_clean_region(MemRegion mr) {
608 GuaranteeNotModClosure blk(this);
609 non_clean_card_iterate_work(mr, &blk, false);
610 }
611 #endif
613 bool CardTableModRefBSForCTRS::card_will_be_scanned(jbyte cv) {
614 return
615 CardTableModRefBS::card_will_be_scanned(cv) ||
616 _rs->is_prev_nonclean_card_val(cv);
617 };
619 bool CardTableModRefBSForCTRS::card_may_have_been_dirty(jbyte cv) {
620 return
621 cv != clean_card &&
622 (CardTableModRefBS::card_may_have_been_dirty(cv) ||
623 CardTableRS::youngergen_may_have_been_dirty(cv));
624 };