Thu, 11 Dec 2008 12:05:08 -0800
6578152: fill_region_with_object has usability and safety issues
Reviewed-by: apetrusenko, ysr
1 /*
2 * Copyright 2000-2008 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
25 // This kind of "BarrierSet" allows a "CollectedHeap" to detect and
26 // enumerate ref fields that have been modified (since the last
27 // enumeration.)
29 # include "incls/_precompiled.incl"
30 # include "incls/_cardTableModRefBS.cpp.incl"
32 size_t CardTableModRefBS::cards_required(size_t covered_words)
33 {
34 // Add one for a guard card, used to detect errors.
35 const size_t words = align_size_up(covered_words, card_size_in_words);
36 return words / card_size_in_words + 1;
37 }
39 size_t CardTableModRefBS::compute_byte_map_size()
40 {
41 assert(_guard_index == cards_required(_whole_heap.word_size()) - 1,
42 "unitialized, check declaration order");
43 assert(_page_size != 0, "unitialized, check declaration order");
44 const size_t granularity = os::vm_allocation_granularity();
45 return align_size_up(_guard_index + 1, MAX2(_page_size, granularity));
46 }
48 CardTableModRefBS::CardTableModRefBS(MemRegion whole_heap,
49 int max_covered_regions):
50 ModRefBarrierSet(max_covered_regions),
51 _whole_heap(whole_heap),
52 _guard_index(cards_required(whole_heap.word_size()) - 1),
53 _last_valid_index(_guard_index - 1),
54 _page_size(os::vm_page_size()),
55 _byte_map_size(compute_byte_map_size())
56 {
57 _kind = BarrierSet::CardTableModRef;
59 HeapWord* low_bound = _whole_heap.start();
60 HeapWord* high_bound = _whole_heap.end();
61 assert((uintptr_t(low_bound) & (card_size - 1)) == 0, "heap must start at card boundary");
62 assert((uintptr_t(high_bound) & (card_size - 1)) == 0, "heap must end at card boundary");
64 assert(card_size <= 512, "card_size must be less than 512"); // why?
66 _covered = new MemRegion[max_covered_regions];
67 _committed = new MemRegion[max_covered_regions];
68 if (_covered == NULL || _committed == NULL)
69 vm_exit_during_initialization("couldn't alloc card table covered region set.");
70 int i;
71 for (i = 0; i < max_covered_regions; i++) {
72 _covered[i].set_word_size(0);
73 _committed[i].set_word_size(0);
74 }
75 _cur_covered_regions = 0;
77 const size_t rs_align = _page_size == (size_t) os::vm_page_size() ? 0 :
78 MAX2(_page_size, (size_t) os::vm_allocation_granularity());
79 ReservedSpace heap_rs(_byte_map_size, rs_align, false);
80 os::trace_page_sizes("card table", _guard_index + 1, _guard_index + 1,
81 _page_size, heap_rs.base(), heap_rs.size());
82 if (!heap_rs.is_reserved()) {
83 vm_exit_during_initialization("Could not reserve enough space for the "
84 "card marking array");
85 }
87 // The assember store_check code will do an unsigned shift of the oop,
88 // then add it to byte_map_base, i.e.
89 //
90 // _byte_map = byte_map_base + (uintptr_t(low_bound) >> card_shift)
91 _byte_map = (jbyte*) heap_rs.base();
92 byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift);
93 assert(byte_for(low_bound) == &_byte_map[0], "Checking start of map");
94 assert(byte_for(high_bound-1) <= &_byte_map[_last_valid_index], "Checking end of map");
96 jbyte* guard_card = &_byte_map[_guard_index];
97 uintptr_t guard_page = align_size_down((uintptr_t)guard_card, _page_size);
98 _guard_region = MemRegion((HeapWord*)guard_page, _page_size);
99 if (!os::commit_memory((char*)guard_page, _page_size, _page_size)) {
100 // Do better than this for Merlin
101 vm_exit_out_of_memory(_page_size, "card table last card");
102 }
103 *guard_card = last_card;
105 _lowest_non_clean =
106 NEW_C_HEAP_ARRAY(CardArr, max_covered_regions);
107 _lowest_non_clean_chunk_size =
108 NEW_C_HEAP_ARRAY(size_t, max_covered_regions);
109 _lowest_non_clean_base_chunk_index =
110 NEW_C_HEAP_ARRAY(uintptr_t, max_covered_regions);
111 _last_LNC_resizing_collection =
112 NEW_C_HEAP_ARRAY(int, max_covered_regions);
113 if (_lowest_non_clean == NULL
114 || _lowest_non_clean_chunk_size == NULL
115 || _lowest_non_clean_base_chunk_index == NULL
116 || _last_LNC_resizing_collection == NULL)
117 vm_exit_during_initialization("couldn't allocate an LNC array.");
118 for (i = 0; i < max_covered_regions; i++) {
119 _lowest_non_clean[i] = NULL;
120 _lowest_non_clean_chunk_size[i] = 0;
121 _last_LNC_resizing_collection[i] = -1;
122 }
124 if (TraceCardTableModRefBS) {
125 gclog_or_tty->print_cr("CardTableModRefBS::CardTableModRefBS: ");
126 gclog_or_tty->print_cr(" "
127 " &_byte_map[0]: " INTPTR_FORMAT
128 " &_byte_map[_last_valid_index]: " INTPTR_FORMAT,
129 &_byte_map[0],
130 &_byte_map[_last_valid_index]);
131 gclog_or_tty->print_cr(" "
132 " byte_map_base: " INTPTR_FORMAT,
133 byte_map_base);
134 }
135 }
137 int CardTableModRefBS::find_covering_region_by_base(HeapWord* base) {
138 int i;
139 for (i = 0; i < _cur_covered_regions; i++) {
140 if (_covered[i].start() == base) return i;
141 if (_covered[i].start() > base) break;
142 }
143 // If we didn't find it, create a new one.
144 assert(_cur_covered_regions < _max_covered_regions,
145 "too many covered regions");
146 // Move the ones above up, to maintain sorted order.
147 for (int j = _cur_covered_regions; j > i; j--) {
148 _covered[j] = _covered[j-1];
149 _committed[j] = _committed[j-1];
150 }
151 int res = i;
152 _cur_covered_regions++;
153 _covered[res].set_start(base);
154 _covered[res].set_word_size(0);
155 jbyte* ct_start = byte_for(base);
156 uintptr_t ct_start_aligned = align_size_down((uintptr_t)ct_start, _page_size);
157 _committed[res].set_start((HeapWord*)ct_start_aligned);
158 _committed[res].set_word_size(0);
159 return res;
160 }
162 int CardTableModRefBS::find_covering_region_containing(HeapWord* addr) {
163 for (int i = 0; i < _cur_covered_regions; i++) {
164 if (_covered[i].contains(addr)) {
165 return i;
166 }
167 }
168 assert(0, "address outside of heap?");
169 return -1;
170 }
172 HeapWord* CardTableModRefBS::largest_prev_committed_end(int ind) const {
173 HeapWord* max_end = NULL;
174 for (int j = 0; j < ind; j++) {
175 HeapWord* this_end = _committed[j].end();
176 if (this_end > max_end) max_end = this_end;
177 }
178 return max_end;
179 }
181 MemRegion CardTableModRefBS::committed_unique_to_self(int self,
182 MemRegion mr) const {
183 MemRegion result = mr;
184 for (int r = 0; r < _cur_covered_regions; r += 1) {
185 if (r != self) {
186 result = result.minus(_committed[r]);
187 }
188 }
189 // Never include the guard page.
190 result = result.minus(_guard_region);
191 return result;
192 }
194 void CardTableModRefBS::resize_covered_region(MemRegion new_region) {
195 // We don't change the start of a region, only the end.
196 assert(_whole_heap.contains(new_region),
197 "attempt to cover area not in reserved area");
198 debug_only(verify_guard();)
199 // collided is true if the expansion would push into another committed region
200 debug_only(bool collided = false;)
201 int const ind = find_covering_region_by_base(new_region.start());
202 MemRegion const old_region = _covered[ind];
203 assert(old_region.start() == new_region.start(), "just checking");
204 if (new_region.word_size() != old_region.word_size()) {
205 // Commit new or uncommit old pages, if necessary.
206 MemRegion cur_committed = _committed[ind];
207 // Extend the end of this _commited region
208 // to cover the end of any lower _committed regions.
209 // This forms overlapping regions, but never interior regions.
210 HeapWord* const max_prev_end = largest_prev_committed_end(ind);
211 if (max_prev_end > cur_committed.end()) {
212 cur_committed.set_end(max_prev_end);
213 }
214 // Align the end up to a page size (starts are already aligned).
215 jbyte* const new_end = byte_after(new_region.last());
216 HeapWord* new_end_aligned =
217 (HeapWord*) align_size_up((uintptr_t)new_end, _page_size);
218 assert(new_end_aligned >= (HeapWord*) new_end,
219 "align up, but less");
220 int ri = 0;
221 for (ri = 0; ri < _cur_covered_regions; ri++) {
222 if (ri != ind) {
223 if (_committed[ri].contains(new_end_aligned)) {
224 assert((new_end_aligned >= _committed[ri].start()) &&
225 (_committed[ri].start() > _committed[ind].start()),
226 "New end of committed region is inconsistent");
227 new_end_aligned = _committed[ri].start();
228 assert(new_end_aligned > _committed[ind].start(),
229 "New end of committed region is before start");
230 debug_only(collided = true;)
231 // Should only collide with 1 region
232 break;
233 }
234 }
235 }
236 #ifdef ASSERT
237 for (++ri; ri < _cur_covered_regions; ri++) {
238 assert(!_committed[ri].contains(new_end_aligned),
239 "New end of committed region is in a second committed region");
240 }
241 #endif
242 // The guard page is always committed and should not be committed over.
243 HeapWord* const new_end_for_commit = MIN2(new_end_aligned,
244 _guard_region.start());
246 if (new_end_for_commit > cur_committed.end()) {
247 // Must commit new pages.
248 MemRegion const new_committed =
249 MemRegion(cur_committed.end(), new_end_for_commit);
251 assert(!new_committed.is_empty(), "Region should not be empty here");
252 if (!os::commit_memory((char*)new_committed.start(),
253 new_committed.byte_size(), _page_size)) {
254 // Do better than this for Merlin
255 vm_exit_out_of_memory(new_committed.byte_size(),
256 "card table expansion");
257 }
258 // Use new_end_aligned (as opposed to new_end_for_commit) because
259 // the cur_committed region may include the guard region.
260 } else if (new_end_aligned < cur_committed.end()) {
261 // Must uncommit pages.
262 MemRegion const uncommit_region =
263 committed_unique_to_self(ind, MemRegion(new_end_aligned,
264 cur_committed.end()));
265 if (!uncommit_region.is_empty()) {
266 if (!os::uncommit_memory((char*)uncommit_region.start(),
267 uncommit_region.byte_size())) {
268 assert(false, "Card table contraction failed");
269 // The call failed so don't change the end of the
270 // committed region. This is better than taking the
271 // VM down.
272 new_end_aligned = _committed[ind].end();
273 }
274 }
275 }
276 // In any case, we can reset the end of the current committed entry.
277 _committed[ind].set_end(new_end_aligned);
279 // The default of 0 is not necessarily clean cards.
280 jbyte* entry;
281 if (old_region.last() < _whole_heap.start()) {
282 entry = byte_for(_whole_heap.start());
283 } else {
284 entry = byte_after(old_region.last());
285 }
286 assert(index_for(new_region.last()) < (int) _guard_index,
287 "The guard card will be overwritten");
288 // This line commented out cleans the newly expanded region and
289 // not the aligned up expanded region.
290 // jbyte* const end = byte_after(new_region.last());
291 jbyte* const end = (jbyte*) new_end_for_commit;
292 assert((end >= byte_after(new_region.last())) || collided,
293 "Expect to be beyond new region unless impacting another region");
294 // do nothing if we resized downward.
295 #ifdef ASSERT
296 for (int ri = 0; ri < _cur_covered_regions; ri++) {
297 if (ri != ind) {
298 // The end of the new committed region should not
299 // be in any existing region unless it matches
300 // the start of the next region.
301 assert(!_committed[ri].contains(end) ||
302 (_committed[ri].start() == (HeapWord*) end),
303 "Overlapping committed regions");
304 }
305 }
306 #endif
307 if (entry < end) {
308 memset(entry, clean_card, pointer_delta(end, entry, sizeof(jbyte)));
309 }
310 }
311 // In any case, the covered size changes.
312 _covered[ind].set_word_size(new_region.word_size());
313 if (TraceCardTableModRefBS) {
314 gclog_or_tty->print_cr("CardTableModRefBS::resize_covered_region: ");
315 gclog_or_tty->print_cr(" "
316 " _covered[%d].start(): " INTPTR_FORMAT
317 " _covered[%d].last(): " INTPTR_FORMAT,
318 ind, _covered[ind].start(),
319 ind, _covered[ind].last());
320 gclog_or_tty->print_cr(" "
321 " _committed[%d].start(): " INTPTR_FORMAT
322 " _committed[%d].last(): " INTPTR_FORMAT,
323 ind, _committed[ind].start(),
324 ind, _committed[ind].last());
325 gclog_or_tty->print_cr(" "
326 " byte_for(start): " INTPTR_FORMAT
327 " byte_for(last): " INTPTR_FORMAT,
328 byte_for(_covered[ind].start()),
329 byte_for(_covered[ind].last()));
330 gclog_or_tty->print_cr(" "
331 " addr_for(start): " INTPTR_FORMAT
332 " addr_for(last): " INTPTR_FORMAT,
333 addr_for((jbyte*) _committed[ind].start()),
334 addr_for((jbyte*) _committed[ind].last()));
335 }
336 debug_only(verify_guard();)
337 }
339 // Note that these versions are precise! The scanning code has to handle the
340 // fact that the write barrier may be either precise or imprecise.
342 void CardTableModRefBS::write_ref_field_work(void* field, oop newVal) {
343 inline_write_ref_field(field, newVal);
344 }
347 bool CardTableModRefBS::claim_card(size_t card_index) {
348 jbyte val = _byte_map[card_index];
349 if (val != claimed_card_val()) {
350 jbyte res = Atomic::cmpxchg((jbyte) claimed_card_val(), &_byte_map[card_index], val);
351 if (res == val)
352 return true;
353 else return false;
354 }
355 return false;
356 }
358 void CardTableModRefBS::non_clean_card_iterate(Space* sp,
359 MemRegion mr,
360 DirtyCardToOopClosure* dcto_cl,
361 MemRegionClosure* cl,
362 bool clear) {
363 if (!mr.is_empty()) {
364 int n_threads = SharedHeap::heap()->n_par_threads();
365 if (n_threads > 0) {
366 #ifndef SERIALGC
367 par_non_clean_card_iterate_work(sp, mr, dcto_cl, cl, clear, n_threads);
368 #else // SERIALGC
369 fatal("Parallel gc not supported here.");
370 #endif // SERIALGC
371 } else {
372 non_clean_card_iterate_work(mr, cl, clear);
373 }
374 }
375 }
377 // NOTE: For this to work correctly, it is important that
378 // we look for non-clean cards below (so as to catch those
379 // marked precleaned), rather than look explicitly for dirty
380 // cards (and miss those marked precleaned). In that sense,
381 // the name precleaned is currently somewhat of a misnomer.
382 void CardTableModRefBS::non_clean_card_iterate_work(MemRegion mr,
383 MemRegionClosure* cl,
384 bool clear) {
385 // Figure out whether we have to worry about parallelism.
386 bool is_par = (SharedHeap::heap()->n_par_threads() > 1);
387 for (int i = 0; i < _cur_covered_regions; i++) {
388 MemRegion mri = mr.intersection(_covered[i]);
389 if (mri.word_size() > 0) {
390 jbyte* cur_entry = byte_for(mri.last());
391 jbyte* limit = byte_for(mri.start());
392 while (cur_entry >= limit) {
393 jbyte* next_entry = cur_entry - 1;
394 if (*cur_entry != clean_card) {
395 size_t non_clean_cards = 1;
396 // Should the next card be included in this range of dirty cards.
397 while (next_entry >= limit && *next_entry != clean_card) {
398 non_clean_cards++;
399 cur_entry = next_entry;
400 next_entry--;
401 }
402 // The memory region may not be on a card boundary. So that
403 // objects beyond the end of the region are not processed, make
404 // cur_cards precise with regard to the end of the memory region.
405 MemRegion cur_cards(addr_for(cur_entry),
406 non_clean_cards * card_size_in_words);
407 MemRegion dirty_region = cur_cards.intersection(mri);
408 if (clear) {
409 for (size_t i = 0; i < non_clean_cards; i++) {
410 // Clean the dirty cards (but leave the other non-clean
411 // alone.) If parallel, do the cleaning atomically.
412 jbyte cur_entry_val = cur_entry[i];
413 if (card_is_dirty_wrt_gen_iter(cur_entry_val)) {
414 if (is_par) {
415 jbyte res = Atomic::cmpxchg(clean_card, &cur_entry[i], cur_entry_val);
416 assert(res != clean_card,
417 "Dirty card mysteriously cleaned");
418 } else {
419 cur_entry[i] = clean_card;
420 }
421 }
422 }
423 }
424 cl->do_MemRegion(dirty_region);
425 }
426 cur_entry = next_entry;
427 }
428 }
429 }
430 }
432 void CardTableModRefBS::mod_oop_in_space_iterate(Space* sp,
433 OopClosure* cl,
434 bool clear,
435 bool before_save_marks) {
436 // Note that dcto_cl is resource-allocated, so there is no
437 // corresponding "delete".
438 DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, precision());
439 MemRegion used_mr;
440 if (before_save_marks) {
441 used_mr = sp->used_region_at_save_marks();
442 } else {
443 used_mr = sp->used_region();
444 }
445 non_clean_card_iterate(sp, used_mr, dcto_cl, dcto_cl, clear);
446 }
448 void CardTableModRefBS::dirty_MemRegion(MemRegion mr) {
449 jbyte* cur = byte_for(mr.start());
450 jbyte* last = byte_after(mr.last());
451 while (cur < last) {
452 *cur = dirty_card;
453 cur++;
454 }
455 }
457 void CardTableModRefBS::invalidate(MemRegion mr, bool whole_heap) {
458 for (int i = 0; i < _cur_covered_regions; i++) {
459 MemRegion mri = mr.intersection(_covered[i]);
460 if (!mri.is_empty()) dirty_MemRegion(mri);
461 }
462 }
464 void CardTableModRefBS::clear_MemRegion(MemRegion mr) {
465 // Be conservative: only clean cards entirely contained within the
466 // region.
467 jbyte* cur;
468 if (mr.start() == _whole_heap.start()) {
469 cur = byte_for(mr.start());
470 } else {
471 assert(mr.start() > _whole_heap.start(), "mr is not covered.");
472 cur = byte_after(mr.start() - 1);
473 }
474 jbyte* last = byte_after(mr.last());
475 memset(cur, clean_card, pointer_delta(last, cur, sizeof(jbyte)));
476 }
478 void CardTableModRefBS::clear(MemRegion mr) {
479 for (int i = 0; i < _cur_covered_regions; i++) {
480 MemRegion mri = mr.intersection(_covered[i]);
481 if (!mri.is_empty()) clear_MemRegion(mri);
482 }
483 }
485 void CardTableModRefBS::dirty(MemRegion mr) {
486 jbyte* first = byte_for(mr.start());
487 jbyte* last = byte_after(mr.last());
488 memset(first, dirty_card, last-first);
489 }
491 // NOTES:
492 // (1) Unlike mod_oop_in_space_iterate() above, dirty_card_iterate()
493 // iterates over dirty cards ranges in increasing address order.
494 void CardTableModRefBS::dirty_card_iterate(MemRegion mr,
495 MemRegionClosure* cl) {
496 for (int i = 0; i < _cur_covered_regions; i++) {
497 MemRegion mri = mr.intersection(_covered[i]);
498 if (!mri.is_empty()) {
499 jbyte *cur_entry, *next_entry, *limit;
500 for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last());
501 cur_entry <= limit;
502 cur_entry = next_entry) {
503 next_entry = cur_entry + 1;
504 if (*cur_entry == dirty_card) {
505 size_t dirty_cards;
506 // Accumulate maximal dirty card range, starting at cur_entry
507 for (dirty_cards = 1;
508 next_entry <= limit && *next_entry == dirty_card;
509 dirty_cards++, next_entry++);
510 MemRegion cur_cards(addr_for(cur_entry),
511 dirty_cards*card_size_in_words);
512 cl->do_MemRegion(cur_cards);
513 }
514 }
515 }
516 }
517 }
519 MemRegion CardTableModRefBS::dirty_card_range_after_reset(MemRegion mr,
520 bool reset,
521 int reset_val) {
522 for (int i = 0; i < _cur_covered_regions; i++) {
523 MemRegion mri = mr.intersection(_covered[i]);
524 if (!mri.is_empty()) {
525 jbyte* cur_entry, *next_entry, *limit;
526 for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last());
527 cur_entry <= limit;
528 cur_entry = next_entry) {
529 next_entry = cur_entry + 1;
530 if (*cur_entry == dirty_card) {
531 size_t dirty_cards;
532 // Accumulate maximal dirty card range, starting at cur_entry
533 for (dirty_cards = 1;
534 next_entry <= limit && *next_entry == dirty_card;
535 dirty_cards++, next_entry++);
536 MemRegion cur_cards(addr_for(cur_entry),
537 dirty_cards*card_size_in_words);
538 if (reset) {
539 for (size_t i = 0; i < dirty_cards; i++) {
540 cur_entry[i] = reset_val;
541 }
542 }
543 return cur_cards;
544 }
545 }
546 }
547 }
548 return MemRegion(mr.end(), mr.end());
549 }
551 // Set all the dirty cards in the given region to "precleaned" state.
552 void CardTableModRefBS::preclean_dirty_cards(MemRegion mr) {
553 for (int i = 0; i < _cur_covered_regions; i++) {
554 MemRegion mri = mr.intersection(_covered[i]);
555 if (!mri.is_empty()) {
556 jbyte *cur_entry, *limit;
557 for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last());
558 cur_entry <= limit;
559 cur_entry++) {
560 if (*cur_entry == dirty_card) {
561 *cur_entry = precleaned_card;
562 }
563 }
564 }
565 }
566 }
568 uintx CardTableModRefBS::ct_max_alignment_constraint() {
569 return card_size * os::vm_page_size();
570 }
572 void CardTableModRefBS::verify_guard() {
573 // For product build verification
574 guarantee(_byte_map[_guard_index] == last_card,
575 "card table guard has been modified");
576 }
578 void CardTableModRefBS::verify() {
579 verify_guard();
580 }
582 #ifndef PRODUCT
583 class GuaranteeNotModClosure: public MemRegionClosure {
584 CardTableModRefBS* _ct;
585 public:
586 GuaranteeNotModClosure(CardTableModRefBS* ct) : _ct(ct) {}
587 void do_MemRegion(MemRegion mr) {
588 jbyte* entry = _ct->byte_for(mr.start());
589 guarantee(*entry != CardTableModRefBS::clean_card,
590 "Dirty card in region that should be clean");
591 }
592 };
594 void CardTableModRefBS::verify_clean_region(MemRegion mr) {
595 GuaranteeNotModClosure blk(this);
596 non_clean_card_iterate_work(mr, &blk, false);
597 }
598 #endif
600 bool CardTableModRefBSForCTRS::card_will_be_scanned(jbyte cv) {
601 return
602 CardTableModRefBS::card_will_be_scanned(cv) ||
603 _rs->is_prev_nonclean_card_val(cv);
604 };
606 bool CardTableModRefBSForCTRS::card_may_have_been_dirty(jbyte cv) {
607 return
608 cv != clean_card &&
609 (CardTableModRefBS::card_may_have_been_dirty(cv) ||
610 CardTableRS::youngergen_may_have_been_dirty(cv));
611 };