Fri, 06 Mar 2009 13:50:14 -0800
6720309: G1: don't synchronously update RSet during evacuation pauses
6720334: G1: don't update RSets of collection set regions during an evacuation pause
Summary: Introduced a deferred update mechanism for delaying the rset updates during the collection pause
Reviewed-by: apetrusenko, tonyp
1 /*
2 * Copyright 2000-2008 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
25 // This kind of "BarrierSet" allows a "CollectedHeap" to detect and
26 // enumerate ref fields that have been modified (since the last
27 // enumeration.)
29 # include "incls/_precompiled.incl"
30 # include "incls/_cardTableModRefBS.cpp.incl"
32 size_t CardTableModRefBS::cards_required(size_t covered_words)
33 {
34 // Add one for a guard card, used to detect errors.
35 const size_t words = align_size_up(covered_words, card_size_in_words);
36 return words / card_size_in_words + 1;
37 }
39 size_t CardTableModRefBS::compute_byte_map_size()
40 {
41 assert(_guard_index == cards_required(_whole_heap.word_size()) - 1,
42 "unitialized, check declaration order");
43 assert(_page_size != 0, "unitialized, check declaration order");
44 const size_t granularity = os::vm_allocation_granularity();
45 return align_size_up(_guard_index + 1, MAX2(_page_size, granularity));
46 }
48 CardTableModRefBS::CardTableModRefBS(MemRegion whole_heap,
49 int max_covered_regions):
50 ModRefBarrierSet(max_covered_regions),
51 _whole_heap(whole_heap),
52 _guard_index(cards_required(whole_heap.word_size()) - 1),
53 _last_valid_index(_guard_index - 1),
54 _page_size(os::vm_page_size()),
55 _byte_map_size(compute_byte_map_size())
56 {
57 _kind = BarrierSet::CardTableModRef;
59 HeapWord* low_bound = _whole_heap.start();
60 HeapWord* high_bound = _whole_heap.end();
61 assert((uintptr_t(low_bound) & (card_size - 1)) == 0, "heap must start at card boundary");
62 assert((uintptr_t(high_bound) & (card_size - 1)) == 0, "heap must end at card boundary");
64 assert(card_size <= 512, "card_size must be less than 512"); // why?
66 _covered = new MemRegion[max_covered_regions];
67 _committed = new MemRegion[max_covered_regions];
68 if (_covered == NULL || _committed == NULL)
69 vm_exit_during_initialization("couldn't alloc card table covered region set.");
70 int i;
71 for (i = 0; i < max_covered_regions; i++) {
72 _covered[i].set_word_size(0);
73 _committed[i].set_word_size(0);
74 }
75 _cur_covered_regions = 0;
77 const size_t rs_align = _page_size == (size_t) os::vm_page_size() ? 0 :
78 MAX2(_page_size, (size_t) os::vm_allocation_granularity());
79 ReservedSpace heap_rs(_byte_map_size, rs_align, false);
80 os::trace_page_sizes("card table", _guard_index + 1, _guard_index + 1,
81 _page_size, heap_rs.base(), heap_rs.size());
82 if (!heap_rs.is_reserved()) {
83 vm_exit_during_initialization("Could not reserve enough space for the "
84 "card marking array");
85 }
87 // The assember store_check code will do an unsigned shift of the oop,
88 // then add it to byte_map_base, i.e.
89 //
90 // _byte_map = byte_map_base + (uintptr_t(low_bound) >> card_shift)
91 _byte_map = (jbyte*) heap_rs.base();
92 byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift);
93 assert(byte_for(low_bound) == &_byte_map[0], "Checking start of map");
94 assert(byte_for(high_bound-1) <= &_byte_map[_last_valid_index], "Checking end of map");
96 jbyte* guard_card = &_byte_map[_guard_index];
97 uintptr_t guard_page = align_size_down((uintptr_t)guard_card, _page_size);
98 _guard_region = MemRegion((HeapWord*)guard_page, _page_size);
99 if (!os::commit_memory((char*)guard_page, _page_size, _page_size)) {
100 // Do better than this for Merlin
101 vm_exit_out_of_memory(_page_size, "card table last card");
102 }
103 *guard_card = last_card;
105 _lowest_non_clean =
106 NEW_C_HEAP_ARRAY(CardArr, max_covered_regions);
107 _lowest_non_clean_chunk_size =
108 NEW_C_HEAP_ARRAY(size_t, max_covered_regions);
109 _lowest_non_clean_base_chunk_index =
110 NEW_C_HEAP_ARRAY(uintptr_t, max_covered_regions);
111 _last_LNC_resizing_collection =
112 NEW_C_HEAP_ARRAY(int, max_covered_regions);
113 if (_lowest_non_clean == NULL
114 || _lowest_non_clean_chunk_size == NULL
115 || _lowest_non_clean_base_chunk_index == NULL
116 || _last_LNC_resizing_collection == NULL)
117 vm_exit_during_initialization("couldn't allocate an LNC array.");
118 for (i = 0; i < max_covered_regions; i++) {
119 _lowest_non_clean[i] = NULL;
120 _lowest_non_clean_chunk_size[i] = 0;
121 _last_LNC_resizing_collection[i] = -1;
122 }
124 if (TraceCardTableModRefBS) {
125 gclog_or_tty->print_cr("CardTableModRefBS::CardTableModRefBS: ");
126 gclog_or_tty->print_cr(" "
127 " &_byte_map[0]: " INTPTR_FORMAT
128 " &_byte_map[_last_valid_index]: " INTPTR_FORMAT,
129 &_byte_map[0],
130 &_byte_map[_last_valid_index]);
131 gclog_or_tty->print_cr(" "
132 " byte_map_base: " INTPTR_FORMAT,
133 byte_map_base);
134 }
135 }
137 int CardTableModRefBS::find_covering_region_by_base(HeapWord* base) {
138 int i;
139 for (i = 0; i < _cur_covered_regions; i++) {
140 if (_covered[i].start() == base) return i;
141 if (_covered[i].start() > base) break;
142 }
143 // If we didn't find it, create a new one.
144 assert(_cur_covered_regions < _max_covered_regions,
145 "too many covered regions");
146 // Move the ones above up, to maintain sorted order.
147 for (int j = _cur_covered_regions; j > i; j--) {
148 _covered[j] = _covered[j-1];
149 _committed[j] = _committed[j-1];
150 }
151 int res = i;
152 _cur_covered_regions++;
153 _covered[res].set_start(base);
154 _covered[res].set_word_size(0);
155 jbyte* ct_start = byte_for(base);
156 uintptr_t ct_start_aligned = align_size_down((uintptr_t)ct_start, _page_size);
157 _committed[res].set_start((HeapWord*)ct_start_aligned);
158 _committed[res].set_word_size(0);
159 return res;
160 }
162 int CardTableModRefBS::find_covering_region_containing(HeapWord* addr) {
163 for (int i = 0; i < _cur_covered_regions; i++) {
164 if (_covered[i].contains(addr)) {
165 return i;
166 }
167 }
168 assert(0, "address outside of heap?");
169 return -1;
170 }
172 HeapWord* CardTableModRefBS::largest_prev_committed_end(int ind) const {
173 HeapWord* max_end = NULL;
174 for (int j = 0; j < ind; j++) {
175 HeapWord* this_end = _committed[j].end();
176 if (this_end > max_end) max_end = this_end;
177 }
178 return max_end;
179 }
181 MemRegion CardTableModRefBS::committed_unique_to_self(int self,
182 MemRegion mr) const {
183 MemRegion result = mr;
184 for (int r = 0; r < _cur_covered_regions; r += 1) {
185 if (r != self) {
186 result = result.minus(_committed[r]);
187 }
188 }
189 // Never include the guard page.
190 result = result.minus(_guard_region);
191 return result;
192 }
194 void CardTableModRefBS::resize_covered_region(MemRegion new_region) {
195 // We don't change the start of a region, only the end.
196 assert(_whole_heap.contains(new_region),
197 "attempt to cover area not in reserved area");
198 debug_only(verify_guard();)
199 // collided is true if the expansion would push into another committed region
200 debug_only(bool collided = false;)
201 int const ind = find_covering_region_by_base(new_region.start());
202 MemRegion const old_region = _covered[ind];
203 assert(old_region.start() == new_region.start(), "just checking");
204 if (new_region.word_size() != old_region.word_size()) {
205 // Commit new or uncommit old pages, if necessary.
206 MemRegion cur_committed = _committed[ind];
207 // Extend the end of this _commited region
208 // to cover the end of any lower _committed regions.
209 // This forms overlapping regions, but never interior regions.
210 HeapWord* const max_prev_end = largest_prev_committed_end(ind);
211 if (max_prev_end > cur_committed.end()) {
212 cur_committed.set_end(max_prev_end);
213 }
214 // Align the end up to a page size (starts are already aligned).
215 jbyte* const new_end = byte_after(new_region.last());
216 HeapWord* new_end_aligned =
217 (HeapWord*) align_size_up((uintptr_t)new_end, _page_size);
218 assert(new_end_aligned >= (HeapWord*) new_end,
219 "align up, but less");
220 // Check the other regions (excludes "ind") to ensure that
221 // the new_end_aligned does not intrude onto the committed
222 // space of another region.
223 int ri = 0;
224 for (ri = 0; ri < _cur_covered_regions; ri++) {
225 if (ri != ind) {
226 if (_committed[ri].contains(new_end_aligned)) {
227 // The prior check included in the assert
228 // (new_end_aligned >= _committed[ri].start())
229 // is redundant with the "contains" test.
230 // Any region containing the new end
231 // should start at or beyond the region found (ind)
232 // for the new end (committed regions are not expected to
233 // be proper subsets of other committed regions).
234 assert(_committed[ri].start() >= _committed[ind].start(),
235 "New end of committed region is inconsistent");
236 new_end_aligned = _committed[ri].start();
237 // new_end_aligned can be equal to the start of its
238 // committed region (i.e., of "ind") if a second
239 // region following "ind" also start at the same location
240 // as "ind".
241 assert(new_end_aligned >= _committed[ind].start(),
242 "New end of committed region is before start");
243 debug_only(collided = true;)
244 // Should only collide with 1 region
245 break;
246 }
247 }
248 }
249 #ifdef ASSERT
250 for (++ri; ri < _cur_covered_regions; ri++) {
251 assert(!_committed[ri].contains(new_end_aligned),
252 "New end of committed region is in a second committed region");
253 }
254 #endif
255 // The guard page is always committed and should not be committed over.
256 HeapWord* const new_end_for_commit = MIN2(new_end_aligned,
257 _guard_region.start());
259 if (new_end_for_commit > cur_committed.end()) {
260 // Must commit new pages.
261 MemRegion const new_committed =
262 MemRegion(cur_committed.end(), new_end_for_commit);
264 assert(!new_committed.is_empty(), "Region should not be empty here");
265 if (!os::commit_memory((char*)new_committed.start(),
266 new_committed.byte_size(), _page_size)) {
267 // Do better than this for Merlin
268 vm_exit_out_of_memory(new_committed.byte_size(),
269 "card table expansion");
270 }
271 // Use new_end_aligned (as opposed to new_end_for_commit) because
272 // the cur_committed region may include the guard region.
273 } else if (new_end_aligned < cur_committed.end()) {
274 // Must uncommit pages.
275 MemRegion const uncommit_region =
276 committed_unique_to_self(ind, MemRegion(new_end_aligned,
277 cur_committed.end()));
278 if (!uncommit_region.is_empty()) {
279 if (!os::uncommit_memory((char*)uncommit_region.start(),
280 uncommit_region.byte_size())) {
281 assert(false, "Card table contraction failed");
282 // The call failed so don't change the end of the
283 // committed region. This is better than taking the
284 // VM down.
285 new_end_aligned = _committed[ind].end();
286 }
287 }
288 }
289 // In any case, we can reset the end of the current committed entry.
290 _committed[ind].set_end(new_end_aligned);
292 // The default of 0 is not necessarily clean cards.
293 jbyte* entry;
294 if (old_region.last() < _whole_heap.start()) {
295 entry = byte_for(_whole_heap.start());
296 } else {
297 entry = byte_after(old_region.last());
298 }
299 assert(index_for(new_region.last()) < _guard_index,
300 "The guard card will be overwritten");
301 // This line commented out cleans the newly expanded region and
302 // not the aligned up expanded region.
303 // jbyte* const end = byte_after(new_region.last());
304 jbyte* const end = (jbyte*) new_end_for_commit;
305 assert((end >= byte_after(new_region.last())) || collided,
306 "Expect to be beyond new region unless impacting another region");
307 // do nothing if we resized downward.
308 #ifdef ASSERT
309 for (int ri = 0; ri < _cur_covered_regions; ri++) {
310 if (ri != ind) {
311 // The end of the new committed region should not
312 // be in any existing region unless it matches
313 // the start of the next region.
314 assert(!_committed[ri].contains(end) ||
315 (_committed[ri].start() == (HeapWord*) end),
316 "Overlapping committed regions");
317 }
318 }
319 #endif
320 if (entry < end) {
321 memset(entry, clean_card, pointer_delta(end, entry, sizeof(jbyte)));
322 }
323 }
324 // In any case, the covered size changes.
325 _covered[ind].set_word_size(new_region.word_size());
326 if (TraceCardTableModRefBS) {
327 gclog_or_tty->print_cr("CardTableModRefBS::resize_covered_region: ");
328 gclog_or_tty->print_cr(" "
329 " _covered[%d].start(): " INTPTR_FORMAT
330 " _covered[%d].last(): " INTPTR_FORMAT,
331 ind, _covered[ind].start(),
332 ind, _covered[ind].last());
333 gclog_or_tty->print_cr(" "
334 " _committed[%d].start(): " INTPTR_FORMAT
335 " _committed[%d].last(): " INTPTR_FORMAT,
336 ind, _committed[ind].start(),
337 ind, _committed[ind].last());
338 gclog_or_tty->print_cr(" "
339 " byte_for(start): " INTPTR_FORMAT
340 " byte_for(last): " INTPTR_FORMAT,
341 byte_for(_covered[ind].start()),
342 byte_for(_covered[ind].last()));
343 gclog_or_tty->print_cr(" "
344 " addr_for(start): " INTPTR_FORMAT
345 " addr_for(last): " INTPTR_FORMAT,
346 addr_for((jbyte*) _committed[ind].start()),
347 addr_for((jbyte*) _committed[ind].last()));
348 }
349 debug_only(verify_guard();)
350 }
352 // Note that these versions are precise! The scanning code has to handle the
353 // fact that the write barrier may be either precise or imprecise.
355 void CardTableModRefBS::write_ref_field_work(void* field, oop newVal) {
356 inline_write_ref_field(field, newVal);
357 }
359 /*
360 Claimed and deferred bits are used together in G1 during the evacuation
361 pause. These bits can have the following state transitions:
362 1. The claimed bit can be put over any other card state. Except that
363 the "dirty -> dirty and claimed" transition is checked for in
364 G1 code and is not used.
365 2. Deferred bit can be set only if the previous state of the card
366 was either clean or claimed. mark_card_deferred() is wait-free.
367 We do not care if the operation is be successful because if
368 it does not it will only result in duplicate entry in the update
369 buffer because of the "cache-miss". So it's not worth spinning.
370 */
373 bool CardTableModRefBS::claim_card(size_t card_index) {
374 jbyte val = _byte_map[card_index];
375 assert(val != dirty_card_val(), "Shouldn't claim a dirty card");
376 while (val == clean_card_val() ||
377 (val & (clean_card_mask_val() | claimed_card_val())) != claimed_card_val()) {
378 jbyte new_val = val;
379 if (val == clean_card_val()) {
380 new_val = (jbyte)claimed_card_val();
381 } else {
382 new_val = val | (jbyte)claimed_card_val();
383 }
384 jbyte res = Atomic::cmpxchg(new_val, &_byte_map[card_index], val);
385 if (res == val) {
386 return true;
387 }
388 val = res;
389 }
390 return false;
391 }
393 bool CardTableModRefBS::mark_card_deferred(size_t card_index) {
394 jbyte val = _byte_map[card_index];
395 // It's already processed
396 if ((val & (clean_card_mask_val() | deferred_card_val())) == deferred_card_val()) {
397 return false;
398 }
399 // Cached bit can be installed either on a clean card or on a claimed card.
400 jbyte new_val = val;
401 if (val == clean_card_val()) {
402 new_val = (jbyte)deferred_card_val();
403 } else {
404 if (val & claimed_card_val()) {
405 new_val = val | (jbyte)deferred_card_val();
406 }
407 }
408 if (new_val != val) {
409 Atomic::cmpxchg(new_val, &_byte_map[card_index], val);
410 }
411 return true;
412 }
415 void CardTableModRefBS::non_clean_card_iterate(Space* sp,
416 MemRegion mr,
417 DirtyCardToOopClosure* dcto_cl,
418 MemRegionClosure* cl,
419 bool clear) {
420 if (!mr.is_empty()) {
421 int n_threads = SharedHeap::heap()->n_par_threads();
422 if (n_threads > 0) {
423 #ifndef SERIALGC
424 par_non_clean_card_iterate_work(sp, mr, dcto_cl, cl, clear, n_threads);
425 #else // SERIALGC
426 fatal("Parallel gc not supported here.");
427 #endif // SERIALGC
428 } else {
429 non_clean_card_iterate_work(mr, cl, clear);
430 }
431 }
432 }
434 // NOTE: For this to work correctly, it is important that
435 // we look for non-clean cards below (so as to catch those
436 // marked precleaned), rather than look explicitly for dirty
437 // cards (and miss those marked precleaned). In that sense,
438 // the name precleaned is currently somewhat of a misnomer.
439 void CardTableModRefBS::non_clean_card_iterate_work(MemRegion mr,
440 MemRegionClosure* cl,
441 bool clear) {
442 // Figure out whether we have to worry about parallelism.
443 bool is_par = (SharedHeap::heap()->n_par_threads() > 1);
444 for (int i = 0; i < _cur_covered_regions; i++) {
445 MemRegion mri = mr.intersection(_covered[i]);
446 if (mri.word_size() > 0) {
447 jbyte* cur_entry = byte_for(mri.last());
448 jbyte* limit = byte_for(mri.start());
449 while (cur_entry >= limit) {
450 jbyte* next_entry = cur_entry - 1;
451 if (*cur_entry != clean_card) {
452 size_t non_clean_cards = 1;
453 // Should the next card be included in this range of dirty cards.
454 while (next_entry >= limit && *next_entry != clean_card) {
455 non_clean_cards++;
456 cur_entry = next_entry;
457 next_entry--;
458 }
459 // The memory region may not be on a card boundary. So that
460 // objects beyond the end of the region are not processed, make
461 // cur_cards precise with regard to the end of the memory region.
462 MemRegion cur_cards(addr_for(cur_entry),
463 non_clean_cards * card_size_in_words);
464 MemRegion dirty_region = cur_cards.intersection(mri);
465 if (clear) {
466 for (size_t i = 0; i < non_clean_cards; i++) {
467 // Clean the dirty cards (but leave the other non-clean
468 // alone.) If parallel, do the cleaning atomically.
469 jbyte cur_entry_val = cur_entry[i];
470 if (card_is_dirty_wrt_gen_iter(cur_entry_val)) {
471 if (is_par) {
472 jbyte res = Atomic::cmpxchg(clean_card, &cur_entry[i], cur_entry_val);
473 assert(res != clean_card,
474 "Dirty card mysteriously cleaned");
475 } else {
476 cur_entry[i] = clean_card;
477 }
478 }
479 }
480 }
481 cl->do_MemRegion(dirty_region);
482 }
483 cur_entry = next_entry;
484 }
485 }
486 }
487 }
489 void CardTableModRefBS::mod_oop_in_space_iterate(Space* sp,
490 OopClosure* cl,
491 bool clear,
492 bool before_save_marks) {
493 // Note that dcto_cl is resource-allocated, so there is no
494 // corresponding "delete".
495 DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, precision());
496 MemRegion used_mr;
497 if (before_save_marks) {
498 used_mr = sp->used_region_at_save_marks();
499 } else {
500 used_mr = sp->used_region();
501 }
502 non_clean_card_iterate(sp, used_mr, dcto_cl, dcto_cl, clear);
503 }
505 void CardTableModRefBS::dirty_MemRegion(MemRegion mr) {
506 jbyte* cur = byte_for(mr.start());
507 jbyte* last = byte_after(mr.last());
508 while (cur < last) {
509 *cur = dirty_card;
510 cur++;
511 }
512 }
514 void CardTableModRefBS::invalidate(MemRegion mr, bool whole_heap) {
515 for (int i = 0; i < _cur_covered_regions; i++) {
516 MemRegion mri = mr.intersection(_covered[i]);
517 if (!mri.is_empty()) dirty_MemRegion(mri);
518 }
519 }
521 void CardTableModRefBS::clear_MemRegion(MemRegion mr) {
522 // Be conservative: only clean cards entirely contained within the
523 // region.
524 jbyte* cur;
525 if (mr.start() == _whole_heap.start()) {
526 cur = byte_for(mr.start());
527 } else {
528 assert(mr.start() > _whole_heap.start(), "mr is not covered.");
529 cur = byte_after(mr.start() - 1);
530 }
531 jbyte* last = byte_after(mr.last());
532 memset(cur, clean_card, pointer_delta(last, cur, sizeof(jbyte)));
533 }
535 void CardTableModRefBS::clear(MemRegion mr) {
536 for (int i = 0; i < _cur_covered_regions; i++) {
537 MemRegion mri = mr.intersection(_covered[i]);
538 if (!mri.is_empty()) clear_MemRegion(mri);
539 }
540 }
542 void CardTableModRefBS::dirty(MemRegion mr) {
543 jbyte* first = byte_for(mr.start());
544 jbyte* last = byte_after(mr.last());
545 memset(first, dirty_card, last-first);
546 }
548 // NOTES:
549 // (1) Unlike mod_oop_in_space_iterate() above, dirty_card_iterate()
550 // iterates over dirty cards ranges in increasing address order.
551 void CardTableModRefBS::dirty_card_iterate(MemRegion mr,
552 MemRegionClosure* cl) {
553 for (int i = 0; i < _cur_covered_regions; i++) {
554 MemRegion mri = mr.intersection(_covered[i]);
555 if (!mri.is_empty()) {
556 jbyte *cur_entry, *next_entry, *limit;
557 for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last());
558 cur_entry <= limit;
559 cur_entry = next_entry) {
560 next_entry = cur_entry + 1;
561 if (*cur_entry == dirty_card) {
562 size_t dirty_cards;
563 // Accumulate maximal dirty card range, starting at cur_entry
564 for (dirty_cards = 1;
565 next_entry <= limit && *next_entry == dirty_card;
566 dirty_cards++, next_entry++);
567 MemRegion cur_cards(addr_for(cur_entry),
568 dirty_cards*card_size_in_words);
569 cl->do_MemRegion(cur_cards);
570 }
571 }
572 }
573 }
574 }
576 MemRegion CardTableModRefBS::dirty_card_range_after_reset(MemRegion mr,
577 bool reset,
578 int reset_val) {
579 for (int i = 0; i < _cur_covered_regions; i++) {
580 MemRegion mri = mr.intersection(_covered[i]);
581 if (!mri.is_empty()) {
582 jbyte* cur_entry, *next_entry, *limit;
583 for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last());
584 cur_entry <= limit;
585 cur_entry = next_entry) {
586 next_entry = cur_entry + 1;
587 if (*cur_entry == dirty_card) {
588 size_t dirty_cards;
589 // Accumulate maximal dirty card range, starting at cur_entry
590 for (dirty_cards = 1;
591 next_entry <= limit && *next_entry == dirty_card;
592 dirty_cards++, next_entry++);
593 MemRegion cur_cards(addr_for(cur_entry),
594 dirty_cards*card_size_in_words);
595 if (reset) {
596 for (size_t i = 0; i < dirty_cards; i++) {
597 cur_entry[i] = reset_val;
598 }
599 }
600 return cur_cards;
601 }
602 }
603 }
604 }
605 return MemRegion(mr.end(), mr.end());
606 }
608 // Set all the dirty cards in the given region to "precleaned" state.
609 void CardTableModRefBS::preclean_dirty_cards(MemRegion mr) {
610 for (int i = 0; i < _cur_covered_regions; i++) {
611 MemRegion mri = mr.intersection(_covered[i]);
612 if (!mri.is_empty()) {
613 jbyte *cur_entry, *limit;
614 for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last());
615 cur_entry <= limit;
616 cur_entry++) {
617 if (*cur_entry == dirty_card) {
618 *cur_entry = precleaned_card;
619 }
620 }
621 }
622 }
623 }
625 uintx CardTableModRefBS::ct_max_alignment_constraint() {
626 return card_size * os::vm_page_size();
627 }
629 void CardTableModRefBS::verify_guard() {
630 // For product build verification
631 guarantee(_byte_map[_guard_index] == last_card,
632 "card table guard has been modified");
633 }
635 void CardTableModRefBS::verify() {
636 verify_guard();
637 }
639 #ifndef PRODUCT
640 class GuaranteeNotModClosure: public MemRegionClosure {
641 CardTableModRefBS* _ct;
642 public:
643 GuaranteeNotModClosure(CardTableModRefBS* ct) : _ct(ct) {}
644 void do_MemRegion(MemRegion mr) {
645 jbyte* entry = _ct->byte_for(mr.start());
646 guarantee(*entry != CardTableModRefBS::clean_card,
647 "Dirty card in region that should be clean");
648 }
649 };
651 void CardTableModRefBS::verify_clean_region(MemRegion mr) {
652 GuaranteeNotModClosure blk(this);
653 non_clean_card_iterate_work(mr, &blk, false);
654 }
655 #endif
657 bool CardTableModRefBSForCTRS::card_will_be_scanned(jbyte cv) {
658 return
659 CardTableModRefBS::card_will_be_scanned(cv) ||
660 _rs->is_prev_nonclean_card_val(cv);
661 };
663 bool CardTableModRefBSForCTRS::card_may_have_been_dirty(jbyte cv) {
664 return
665 cv != clean_card &&
666 (CardTableModRefBS::card_may_have_been_dirty(cv) ||
667 CardTableRS::youngergen_may_have_been_dirty(cv));
668 };