Thu, 21 Jan 2010 11:33:32 -0800
6895236: CMS: cmsOopClosures.inline.hpp:43 assert(..., "Should remember klasses in this context")
Summary: Adjust assertion checking for ExplicitGCInvokesConcurrentAndUnloadsClasses as a reason for class unloading
Reviewed-by: ysr
1 /*
2 * Copyright 2000-2009 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
25 // This kind of "BarrierSet" allows a "CollectedHeap" to detect and
26 // enumerate ref fields that have been modified (since the last
27 // enumeration.)
29 # include "incls/_precompiled.incl"
30 # include "incls/_cardTableModRefBS.cpp.incl"
32 size_t CardTableModRefBS::cards_required(size_t covered_words)
33 {
34 // Add one for a guard card, used to detect errors.
35 const size_t words = align_size_up(covered_words, card_size_in_words);
36 return words / card_size_in_words + 1;
37 }
39 size_t CardTableModRefBS::compute_byte_map_size()
40 {
41 assert(_guard_index == cards_required(_whole_heap.word_size()) - 1,
42 "unitialized, check declaration order");
43 assert(_page_size != 0, "unitialized, check declaration order");
44 const size_t granularity = os::vm_allocation_granularity();
45 return align_size_up(_guard_index + 1, MAX2(_page_size, granularity));
46 }
48 CardTableModRefBS::CardTableModRefBS(MemRegion whole_heap,
49 int max_covered_regions):
50 ModRefBarrierSet(max_covered_regions),
51 _whole_heap(whole_heap),
52 _guard_index(cards_required(whole_heap.word_size()) - 1),
53 _last_valid_index(_guard_index - 1),
54 _page_size(os::vm_page_size()),
55 _byte_map_size(compute_byte_map_size())
56 {
57 _kind = BarrierSet::CardTableModRef;
59 HeapWord* low_bound = _whole_heap.start();
60 HeapWord* high_bound = _whole_heap.end();
61 assert((uintptr_t(low_bound) & (card_size - 1)) == 0, "heap must start at card boundary");
62 assert((uintptr_t(high_bound) & (card_size - 1)) == 0, "heap must end at card boundary");
64 assert(card_size <= 512, "card_size must be less than 512"); // why?
66 _covered = new MemRegion[max_covered_regions];
67 _committed = new MemRegion[max_covered_regions];
68 if (_covered == NULL || _committed == NULL)
69 vm_exit_during_initialization("couldn't alloc card table covered region set.");
70 int i;
71 for (i = 0; i < max_covered_regions; i++) {
72 _covered[i].set_word_size(0);
73 _committed[i].set_word_size(0);
74 }
75 _cur_covered_regions = 0;
77 const size_t rs_align = _page_size == (size_t) os::vm_page_size() ? 0 :
78 MAX2(_page_size, (size_t) os::vm_allocation_granularity());
79 ReservedSpace heap_rs(_byte_map_size, rs_align, false);
80 os::trace_page_sizes("card table", _guard_index + 1, _guard_index + 1,
81 _page_size, heap_rs.base(), heap_rs.size());
82 if (!heap_rs.is_reserved()) {
83 vm_exit_during_initialization("Could not reserve enough space for the "
84 "card marking array");
85 }
87 // The assember store_check code will do an unsigned shift of the oop,
88 // then add it to byte_map_base, i.e.
89 //
90 // _byte_map = byte_map_base + (uintptr_t(low_bound) >> card_shift)
91 _byte_map = (jbyte*) heap_rs.base();
92 byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift);
93 assert(byte_for(low_bound) == &_byte_map[0], "Checking start of map");
94 assert(byte_for(high_bound-1) <= &_byte_map[_last_valid_index], "Checking end of map");
96 jbyte* guard_card = &_byte_map[_guard_index];
97 uintptr_t guard_page = align_size_down((uintptr_t)guard_card, _page_size);
98 _guard_region = MemRegion((HeapWord*)guard_page, _page_size);
99 if (!os::commit_memory((char*)guard_page, _page_size, _page_size)) {
100 // Do better than this for Merlin
101 vm_exit_out_of_memory(_page_size, "card table last card");
102 }
103 *guard_card = last_card;
105 _lowest_non_clean =
106 NEW_C_HEAP_ARRAY(CardArr, max_covered_regions);
107 _lowest_non_clean_chunk_size =
108 NEW_C_HEAP_ARRAY(size_t, max_covered_regions);
109 _lowest_non_clean_base_chunk_index =
110 NEW_C_HEAP_ARRAY(uintptr_t, max_covered_regions);
111 _last_LNC_resizing_collection =
112 NEW_C_HEAP_ARRAY(int, max_covered_regions);
113 if (_lowest_non_clean == NULL
114 || _lowest_non_clean_chunk_size == NULL
115 || _lowest_non_clean_base_chunk_index == NULL
116 || _last_LNC_resizing_collection == NULL)
117 vm_exit_during_initialization("couldn't allocate an LNC array.");
118 for (i = 0; i < max_covered_regions; i++) {
119 _lowest_non_clean[i] = NULL;
120 _lowest_non_clean_chunk_size[i] = 0;
121 _last_LNC_resizing_collection[i] = -1;
122 }
124 if (TraceCardTableModRefBS) {
125 gclog_or_tty->print_cr("CardTableModRefBS::CardTableModRefBS: ");
126 gclog_or_tty->print_cr(" "
127 " &_byte_map[0]: " INTPTR_FORMAT
128 " &_byte_map[_last_valid_index]: " INTPTR_FORMAT,
129 &_byte_map[0],
130 &_byte_map[_last_valid_index]);
131 gclog_or_tty->print_cr(" "
132 " byte_map_base: " INTPTR_FORMAT,
133 byte_map_base);
134 }
135 }
137 int CardTableModRefBS::find_covering_region_by_base(HeapWord* base) {
138 int i;
139 for (i = 0; i < _cur_covered_regions; i++) {
140 if (_covered[i].start() == base) return i;
141 if (_covered[i].start() > base) break;
142 }
143 // If we didn't find it, create a new one.
144 assert(_cur_covered_regions < _max_covered_regions,
145 "too many covered regions");
146 // Move the ones above up, to maintain sorted order.
147 for (int j = _cur_covered_regions; j > i; j--) {
148 _covered[j] = _covered[j-1];
149 _committed[j] = _committed[j-1];
150 }
151 int res = i;
152 _cur_covered_regions++;
153 _covered[res].set_start(base);
154 _covered[res].set_word_size(0);
155 jbyte* ct_start = byte_for(base);
156 uintptr_t ct_start_aligned = align_size_down((uintptr_t)ct_start, _page_size);
157 _committed[res].set_start((HeapWord*)ct_start_aligned);
158 _committed[res].set_word_size(0);
159 return res;
160 }
162 int CardTableModRefBS::find_covering_region_containing(HeapWord* addr) {
163 for (int i = 0; i < _cur_covered_regions; i++) {
164 if (_covered[i].contains(addr)) {
165 return i;
166 }
167 }
168 assert(0, "address outside of heap?");
169 return -1;
170 }
172 HeapWord* CardTableModRefBS::largest_prev_committed_end(int ind) const {
173 HeapWord* max_end = NULL;
174 for (int j = 0; j < ind; j++) {
175 HeapWord* this_end = _committed[j].end();
176 if (this_end > max_end) max_end = this_end;
177 }
178 return max_end;
179 }
181 MemRegion CardTableModRefBS::committed_unique_to_self(int self,
182 MemRegion mr) const {
183 MemRegion result = mr;
184 for (int r = 0; r < _cur_covered_regions; r += 1) {
185 if (r != self) {
186 result = result.minus(_committed[r]);
187 }
188 }
189 // Never include the guard page.
190 result = result.minus(_guard_region);
191 return result;
192 }
194 void CardTableModRefBS::resize_covered_region(MemRegion new_region) {
195 // We don't change the start of a region, only the end.
196 assert(_whole_heap.contains(new_region),
197 "attempt to cover area not in reserved area");
198 debug_only(verify_guard();)
199 // collided is true if the expansion would push into another committed region
200 debug_only(bool collided = false;)
201 int const ind = find_covering_region_by_base(new_region.start());
202 MemRegion const old_region = _covered[ind];
203 assert(old_region.start() == new_region.start(), "just checking");
204 if (new_region.word_size() != old_region.word_size()) {
205 // Commit new or uncommit old pages, if necessary.
206 MemRegion cur_committed = _committed[ind];
207 // Extend the end of this _commited region
208 // to cover the end of any lower _committed regions.
209 // This forms overlapping regions, but never interior regions.
210 HeapWord* const max_prev_end = largest_prev_committed_end(ind);
211 if (max_prev_end > cur_committed.end()) {
212 cur_committed.set_end(max_prev_end);
213 }
214 // Align the end up to a page size (starts are already aligned).
215 jbyte* const new_end = byte_after(new_region.last());
216 HeapWord* new_end_aligned =
217 (HeapWord*) align_size_up((uintptr_t)new_end, _page_size);
218 assert(new_end_aligned >= (HeapWord*) new_end,
219 "align up, but less");
220 // Check the other regions (excludes "ind") to ensure that
221 // the new_end_aligned does not intrude onto the committed
222 // space of another region.
223 int ri = 0;
224 for (ri = 0; ri < _cur_covered_regions; ri++) {
225 if (ri != ind) {
226 if (_committed[ri].contains(new_end_aligned)) {
227 // The prior check included in the assert
228 // (new_end_aligned >= _committed[ri].start())
229 // is redundant with the "contains" test.
230 // Any region containing the new end
231 // should start at or beyond the region found (ind)
232 // for the new end (committed regions are not expected to
233 // be proper subsets of other committed regions).
234 assert(_committed[ri].start() >= _committed[ind].start(),
235 "New end of committed region is inconsistent");
236 new_end_aligned = _committed[ri].start();
237 // new_end_aligned can be equal to the start of its
238 // committed region (i.e., of "ind") if a second
239 // region following "ind" also start at the same location
240 // as "ind".
241 assert(new_end_aligned >= _committed[ind].start(),
242 "New end of committed region is before start");
243 debug_only(collided = true;)
244 // Should only collide with 1 region
245 break;
246 }
247 }
248 }
249 #ifdef ASSERT
250 for (++ri; ri < _cur_covered_regions; ri++) {
251 assert(!_committed[ri].contains(new_end_aligned),
252 "New end of committed region is in a second committed region");
253 }
254 #endif
255 // The guard page is always committed and should not be committed over.
256 // "guarded" is used for assertion checking below and recalls the fact
257 // that the would-be end of the new committed region would have
258 // penetrated the guard page.
259 HeapWord* new_end_for_commit = new_end_aligned;
261 DEBUG_ONLY(bool guarded = false;)
262 if (new_end_for_commit > _guard_region.start()) {
263 new_end_for_commit = _guard_region.start();
264 DEBUG_ONLY(guarded = true;)
265 }
267 if (new_end_for_commit > cur_committed.end()) {
268 // Must commit new pages.
269 MemRegion const new_committed =
270 MemRegion(cur_committed.end(), new_end_for_commit);
272 assert(!new_committed.is_empty(), "Region should not be empty here");
273 if (!os::commit_memory((char*)new_committed.start(),
274 new_committed.byte_size(), _page_size)) {
275 // Do better than this for Merlin
276 vm_exit_out_of_memory(new_committed.byte_size(),
277 "card table expansion");
278 }
279 // Use new_end_aligned (as opposed to new_end_for_commit) because
280 // the cur_committed region may include the guard region.
281 } else if (new_end_aligned < cur_committed.end()) {
282 // Must uncommit pages.
283 MemRegion const uncommit_region =
284 committed_unique_to_self(ind, MemRegion(new_end_aligned,
285 cur_committed.end()));
286 if (!uncommit_region.is_empty()) {
287 if (!os::uncommit_memory((char*)uncommit_region.start(),
288 uncommit_region.byte_size())) {
289 assert(false, "Card table contraction failed");
290 // The call failed so don't change the end of the
291 // committed region. This is better than taking the
292 // VM down.
293 new_end_aligned = _committed[ind].end();
294 }
295 }
296 }
297 // In any case, we can reset the end of the current committed entry.
298 _committed[ind].set_end(new_end_aligned);
300 // The default of 0 is not necessarily clean cards.
301 jbyte* entry;
302 if (old_region.last() < _whole_heap.start()) {
303 entry = byte_for(_whole_heap.start());
304 } else {
305 entry = byte_after(old_region.last());
306 }
307 assert(index_for(new_region.last()) < _guard_index,
308 "The guard card will be overwritten");
309 // This line commented out cleans the newly expanded region and
310 // not the aligned up expanded region.
311 // jbyte* const end = byte_after(new_region.last());
312 jbyte* const end = (jbyte*) new_end_for_commit;
313 assert((end >= byte_after(new_region.last())) || collided || guarded,
314 "Expect to be beyond new region unless impacting another region");
315 // do nothing if we resized downward.
316 #ifdef ASSERT
317 for (int ri = 0; ri < _cur_covered_regions; ri++) {
318 if (ri != ind) {
319 // The end of the new committed region should not
320 // be in any existing region unless it matches
321 // the start of the next region.
322 assert(!_committed[ri].contains(end) ||
323 (_committed[ri].start() == (HeapWord*) end),
324 "Overlapping committed regions");
325 }
326 }
327 #endif
328 if (entry < end) {
329 memset(entry, clean_card, pointer_delta(end, entry, sizeof(jbyte)));
330 }
331 }
332 // In any case, the covered size changes.
333 _covered[ind].set_word_size(new_region.word_size());
334 if (TraceCardTableModRefBS) {
335 gclog_or_tty->print_cr("CardTableModRefBS::resize_covered_region: ");
336 gclog_or_tty->print_cr(" "
337 " _covered[%d].start(): " INTPTR_FORMAT
338 " _covered[%d].last(): " INTPTR_FORMAT,
339 ind, _covered[ind].start(),
340 ind, _covered[ind].last());
341 gclog_or_tty->print_cr(" "
342 " _committed[%d].start(): " INTPTR_FORMAT
343 " _committed[%d].last(): " INTPTR_FORMAT,
344 ind, _committed[ind].start(),
345 ind, _committed[ind].last());
346 gclog_or_tty->print_cr(" "
347 " byte_for(start): " INTPTR_FORMAT
348 " byte_for(last): " INTPTR_FORMAT,
349 byte_for(_covered[ind].start()),
350 byte_for(_covered[ind].last()));
351 gclog_or_tty->print_cr(" "
352 " addr_for(start): " INTPTR_FORMAT
353 " addr_for(last): " INTPTR_FORMAT,
354 addr_for((jbyte*) _committed[ind].start()),
355 addr_for((jbyte*) _committed[ind].last()));
356 }
357 debug_only(verify_guard();)
358 }
360 // Note that these versions are precise! The scanning code has to handle the
361 // fact that the write barrier may be either precise or imprecise.
363 void CardTableModRefBS::write_ref_field_work(void* field, oop newVal) {
364 inline_write_ref_field(field, newVal);
365 }
367 /*
368 Claimed and deferred bits are used together in G1 during the evacuation
369 pause. These bits can have the following state transitions:
370 1. The claimed bit can be put over any other card state. Except that
371 the "dirty -> dirty and claimed" transition is checked for in
372 G1 code and is not used.
373 2. Deferred bit can be set only if the previous state of the card
374 was either clean or claimed. mark_card_deferred() is wait-free.
375 We do not care if the operation is be successful because if
376 it does not it will only result in duplicate entry in the update
377 buffer because of the "cache-miss". So it's not worth spinning.
378 */
381 bool CardTableModRefBS::claim_card(size_t card_index) {
382 jbyte val = _byte_map[card_index];
383 assert(val != dirty_card_val(), "Shouldn't claim a dirty card");
384 while (val == clean_card_val() ||
385 (val & (clean_card_mask_val() | claimed_card_val())) != claimed_card_val()) {
386 jbyte new_val = val;
387 if (val == clean_card_val()) {
388 new_val = (jbyte)claimed_card_val();
389 } else {
390 new_val = val | (jbyte)claimed_card_val();
391 }
392 jbyte res = Atomic::cmpxchg(new_val, &_byte_map[card_index], val);
393 if (res == val) {
394 return true;
395 }
396 val = res;
397 }
398 return false;
399 }
401 bool CardTableModRefBS::mark_card_deferred(size_t card_index) {
402 jbyte val = _byte_map[card_index];
403 // It's already processed
404 if ((val & (clean_card_mask_val() | deferred_card_val())) == deferred_card_val()) {
405 return false;
406 }
407 // Cached bit can be installed either on a clean card or on a claimed card.
408 jbyte new_val = val;
409 if (val == clean_card_val()) {
410 new_val = (jbyte)deferred_card_val();
411 } else {
412 if (val & claimed_card_val()) {
413 new_val = val | (jbyte)deferred_card_val();
414 }
415 }
416 if (new_val != val) {
417 Atomic::cmpxchg(new_val, &_byte_map[card_index], val);
418 }
419 return true;
420 }
423 void CardTableModRefBS::non_clean_card_iterate(Space* sp,
424 MemRegion mr,
425 DirtyCardToOopClosure* dcto_cl,
426 MemRegionClosure* cl,
427 bool clear) {
428 if (!mr.is_empty()) {
429 int n_threads = SharedHeap::heap()->n_par_threads();
430 if (n_threads > 0) {
431 #ifndef SERIALGC
432 par_non_clean_card_iterate_work(sp, mr, dcto_cl, cl, clear, n_threads);
433 #else // SERIALGC
434 fatal("Parallel gc not supported here.");
435 #endif // SERIALGC
436 } else {
437 non_clean_card_iterate_work(mr, cl, clear);
438 }
439 }
440 }
442 // NOTE: For this to work correctly, it is important that
443 // we look for non-clean cards below (so as to catch those
444 // marked precleaned), rather than look explicitly for dirty
445 // cards (and miss those marked precleaned). In that sense,
446 // the name precleaned is currently somewhat of a misnomer.
447 void CardTableModRefBS::non_clean_card_iterate_work(MemRegion mr,
448 MemRegionClosure* cl,
449 bool clear) {
450 // Figure out whether we have to worry about parallelism.
451 bool is_par = (SharedHeap::heap()->n_par_threads() > 1);
452 for (int i = 0; i < _cur_covered_regions; i++) {
453 MemRegion mri = mr.intersection(_covered[i]);
454 if (mri.word_size() > 0) {
455 jbyte* cur_entry = byte_for(mri.last());
456 jbyte* limit = byte_for(mri.start());
457 while (cur_entry >= limit) {
458 jbyte* next_entry = cur_entry - 1;
459 if (*cur_entry != clean_card) {
460 size_t non_clean_cards = 1;
461 // Should the next card be included in this range of dirty cards.
462 while (next_entry >= limit && *next_entry != clean_card) {
463 non_clean_cards++;
464 cur_entry = next_entry;
465 next_entry--;
466 }
467 // The memory region may not be on a card boundary. So that
468 // objects beyond the end of the region are not processed, make
469 // cur_cards precise with regard to the end of the memory region.
470 MemRegion cur_cards(addr_for(cur_entry),
471 non_clean_cards * card_size_in_words);
472 MemRegion dirty_region = cur_cards.intersection(mri);
473 if (clear) {
474 for (size_t i = 0; i < non_clean_cards; i++) {
475 // Clean the dirty cards (but leave the other non-clean
476 // alone.) If parallel, do the cleaning atomically.
477 jbyte cur_entry_val = cur_entry[i];
478 if (card_is_dirty_wrt_gen_iter(cur_entry_val)) {
479 if (is_par) {
480 jbyte res = Atomic::cmpxchg(clean_card, &cur_entry[i], cur_entry_val);
481 assert(res != clean_card,
482 "Dirty card mysteriously cleaned");
483 } else {
484 cur_entry[i] = clean_card;
485 }
486 }
487 }
488 }
489 cl->do_MemRegion(dirty_region);
490 }
491 cur_entry = next_entry;
492 }
493 }
494 }
495 }
497 void CardTableModRefBS::mod_oop_in_space_iterate(Space* sp,
498 OopClosure* cl,
499 bool clear,
500 bool before_save_marks) {
501 // Note that dcto_cl is resource-allocated, so there is no
502 // corresponding "delete".
503 DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, precision());
504 MemRegion used_mr;
505 if (before_save_marks) {
506 used_mr = sp->used_region_at_save_marks();
507 } else {
508 used_mr = sp->used_region();
509 }
510 non_clean_card_iterate(sp, used_mr, dcto_cl, dcto_cl, clear);
511 }
513 void CardTableModRefBS::dirty_MemRegion(MemRegion mr) {
514 assert((HeapWord*)align_size_down((uintptr_t)mr.start(), HeapWordSize) == mr.start(), "Unaligned start");
515 assert((HeapWord*)align_size_up ((uintptr_t)mr.end(), HeapWordSize) == mr.end(), "Unaligned end" );
516 jbyte* cur = byte_for(mr.start());
517 jbyte* last = byte_after(mr.last());
518 while (cur < last) {
519 *cur = dirty_card;
520 cur++;
521 }
522 }
524 void CardTableModRefBS::invalidate(MemRegion mr, bool whole_heap) {
525 assert((HeapWord*)align_size_down((uintptr_t)mr.start(), HeapWordSize) == mr.start(), "Unaligned start");
526 assert((HeapWord*)align_size_up ((uintptr_t)mr.end(), HeapWordSize) == mr.end(), "Unaligned end" );
527 for (int i = 0; i < _cur_covered_regions; i++) {
528 MemRegion mri = mr.intersection(_covered[i]);
529 if (!mri.is_empty()) dirty_MemRegion(mri);
530 }
531 }
533 void CardTableModRefBS::clear_MemRegion(MemRegion mr) {
534 // Be conservative: only clean cards entirely contained within the
535 // region.
536 jbyte* cur;
537 if (mr.start() == _whole_heap.start()) {
538 cur = byte_for(mr.start());
539 } else {
540 assert(mr.start() > _whole_heap.start(), "mr is not covered.");
541 cur = byte_after(mr.start() - 1);
542 }
543 jbyte* last = byte_after(mr.last());
544 memset(cur, clean_card, pointer_delta(last, cur, sizeof(jbyte)));
545 }
547 void CardTableModRefBS::clear(MemRegion mr) {
548 for (int i = 0; i < _cur_covered_regions; i++) {
549 MemRegion mri = mr.intersection(_covered[i]);
550 if (!mri.is_empty()) clear_MemRegion(mri);
551 }
552 }
554 void CardTableModRefBS::dirty(MemRegion mr) {
555 jbyte* first = byte_for(mr.start());
556 jbyte* last = byte_after(mr.last());
557 memset(first, dirty_card, last-first);
558 }
560 // NOTES:
561 // (1) Unlike mod_oop_in_space_iterate() above, dirty_card_iterate()
562 // iterates over dirty cards ranges in increasing address order.
563 void CardTableModRefBS::dirty_card_iterate(MemRegion mr,
564 MemRegionClosure* cl) {
565 for (int i = 0; i < _cur_covered_regions; i++) {
566 MemRegion mri = mr.intersection(_covered[i]);
567 if (!mri.is_empty()) {
568 jbyte *cur_entry, *next_entry, *limit;
569 for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last());
570 cur_entry <= limit;
571 cur_entry = next_entry) {
572 next_entry = cur_entry + 1;
573 if (*cur_entry == dirty_card) {
574 size_t dirty_cards;
575 // Accumulate maximal dirty card range, starting at cur_entry
576 for (dirty_cards = 1;
577 next_entry <= limit && *next_entry == dirty_card;
578 dirty_cards++, next_entry++);
579 MemRegion cur_cards(addr_for(cur_entry),
580 dirty_cards*card_size_in_words);
581 cl->do_MemRegion(cur_cards);
582 }
583 }
584 }
585 }
586 }
588 MemRegion CardTableModRefBS::dirty_card_range_after_reset(MemRegion mr,
589 bool reset,
590 int reset_val) {
591 for (int i = 0; i < _cur_covered_regions; i++) {
592 MemRegion mri = mr.intersection(_covered[i]);
593 if (!mri.is_empty()) {
594 jbyte* cur_entry, *next_entry, *limit;
595 for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last());
596 cur_entry <= limit;
597 cur_entry = next_entry) {
598 next_entry = cur_entry + 1;
599 if (*cur_entry == dirty_card) {
600 size_t dirty_cards;
601 // Accumulate maximal dirty card range, starting at cur_entry
602 for (dirty_cards = 1;
603 next_entry <= limit && *next_entry == dirty_card;
604 dirty_cards++, next_entry++);
605 MemRegion cur_cards(addr_for(cur_entry),
606 dirty_cards*card_size_in_words);
607 if (reset) {
608 for (size_t i = 0; i < dirty_cards; i++) {
609 cur_entry[i] = reset_val;
610 }
611 }
612 return cur_cards;
613 }
614 }
615 }
616 }
617 return MemRegion(mr.end(), mr.end());
618 }
620 // Set all the dirty cards in the given region to "precleaned" state.
621 void CardTableModRefBS::preclean_dirty_cards(MemRegion mr) {
622 for (int i = 0; i < _cur_covered_regions; i++) {
623 MemRegion mri = mr.intersection(_covered[i]);
624 if (!mri.is_empty()) {
625 jbyte *cur_entry, *limit;
626 for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last());
627 cur_entry <= limit;
628 cur_entry++) {
629 if (*cur_entry == dirty_card) {
630 *cur_entry = precleaned_card;
631 }
632 }
633 }
634 }
635 }
637 uintx CardTableModRefBS::ct_max_alignment_constraint() {
638 return card_size * os::vm_page_size();
639 }
641 void CardTableModRefBS::verify_guard() {
642 // For product build verification
643 guarantee(_byte_map[_guard_index] == last_card,
644 "card table guard has been modified");
645 }
647 void CardTableModRefBS::verify() {
648 verify_guard();
649 }
651 #ifndef PRODUCT
652 class GuaranteeNotModClosure: public MemRegionClosure {
653 CardTableModRefBS* _ct;
654 public:
655 GuaranteeNotModClosure(CardTableModRefBS* ct) : _ct(ct) {}
656 void do_MemRegion(MemRegion mr) {
657 jbyte* entry = _ct->byte_for(mr.start());
658 guarantee(*entry != CardTableModRefBS::clean_card,
659 "Dirty card in region that should be clean");
660 }
661 };
663 void CardTableModRefBS::verify_clean_region(MemRegion mr) {
664 GuaranteeNotModClosure blk(this);
665 non_clean_card_iterate_work(mr, &blk, false);
666 }
668 // To verify a MemRegion is entirely dirty this closure is passed to
669 // dirty_card_iterate. If the region is dirty do_MemRegion will be
670 // invoked only once with a MemRegion equal to the one being
671 // verified.
672 class GuaranteeDirtyClosure: public MemRegionClosure {
673 CardTableModRefBS* _ct;
674 MemRegion _mr;
675 bool _result;
676 public:
677 GuaranteeDirtyClosure(CardTableModRefBS* ct, MemRegion mr)
678 : _ct(ct), _mr(mr), _result(false) {}
679 void do_MemRegion(MemRegion mr) {
680 _result = _mr.equals(mr);
681 }
682 bool result() const { return _result; }
683 };
685 void CardTableModRefBS::verify_dirty_region(MemRegion mr) {
686 GuaranteeDirtyClosure blk(this, mr);
687 dirty_card_iterate(mr, &blk);
688 guarantee(blk.result(), "Non-dirty cards in region that should be dirty");
689 }
690 #endif
692 bool CardTableModRefBSForCTRS::card_will_be_scanned(jbyte cv) {
693 return
694 CardTableModRefBS::card_will_be_scanned(cv) ||
695 _rs->is_prev_nonclean_card_val(cv);
696 };
698 bool CardTableModRefBSForCTRS::card_may_have_been_dirty(jbyte cv) {
699 return
700 cv != clean_card &&
701 (CardTableModRefBS::card_may_have_been_dirty(cv) ||
702 CardTableRS::youngergen_may_have_been_dirty(cv));
703 };