Tue, 08 Aug 2017 15:57:29 +0800
merge
1 /*
2 * Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 /*
26 * This file has been modified by Loongson Technology in 2015. These
27 * modifications are Copyright (c) 2015 Loongson Technology, and are made
28 * available on the same license terms set forth above.
29 */
31 #include "precompiled.hpp"
32 #include "memory/allocation.inline.hpp"
33 #include "memory/cardTableModRefBS.hpp"
34 #include "memory/cardTableRS.hpp"
35 #include "memory/sharedHeap.hpp"
36 #include "memory/space.hpp"
37 #include "memory/space.inline.hpp"
38 #include "memory/universe.hpp"
39 #include "runtime/java.hpp"
40 #include "runtime/mutexLocker.hpp"
41 #include "runtime/virtualspace.hpp"
42 #include "services/memTracker.hpp"
43 #include "utilities/macros.hpp"
44 #ifdef COMPILER1
45 #include "c1/c1_LIR.hpp"
46 #include "c1/c1_LIRGenerator.hpp"
47 #endif
49 // This kind of "BarrierSet" allows a "CollectedHeap" to detect and
50 // enumerate ref fields that have been modified (since the last
51 // enumeration.)
53 size_t CardTableModRefBS::cards_required(size_t covered_words)
54 {
55 // Add one for a guard card, used to detect errors.
56 const size_t words = align_size_up(covered_words, card_size_in_words);
57 return words / card_size_in_words + 1;
58 }
60 size_t CardTableModRefBS::compute_byte_map_size()
61 {
62 assert(_guard_index == cards_required(_whole_heap.word_size()) - 1,
63 "unitialized, check declaration order");
64 assert(_page_size != 0, "unitialized, check declaration order");
65 const size_t granularity = os::vm_allocation_granularity();
66 return align_size_up(_guard_index + 1, MAX2(_page_size, granularity));
67 }
69 CardTableModRefBS::CardTableModRefBS(MemRegion whole_heap,
70 int max_covered_regions):
71 ModRefBarrierSet(max_covered_regions),
72 _whole_heap(whole_heap),
73 _guard_index(cards_required(whole_heap.word_size()) - 1),
74 _last_valid_index(_guard_index - 1),
75 _page_size(os::vm_page_size()),
76 _byte_map_size(compute_byte_map_size())
77 {
78 _kind = BarrierSet::CardTableModRef;
80 HeapWord* low_bound = _whole_heap.start();
81 HeapWord* high_bound = _whole_heap.end();
82 assert((uintptr_t(low_bound) & (card_size - 1)) == 0, "heap must start at card boundary");
83 assert((uintptr_t(high_bound) & (card_size - 1)) == 0, "heap must end at card boundary");
85 assert(card_size <= 512, "card_size must be less than 512"); // why?
87 _covered = new MemRegion[max_covered_regions];
88 _committed = new MemRegion[max_covered_regions];
89 if (_covered == NULL || _committed == NULL) {
90 vm_exit_during_initialization("couldn't alloc card table covered region set.");
91 }
93 _cur_covered_regions = 0;
94 const size_t rs_align = _page_size == (size_t) os::vm_page_size() ? 0 :
95 MAX2(_page_size, (size_t) os::vm_allocation_granularity());
96 #ifdef MIPS64
97 /* 2013/10.25 Jin: try to allocate byte_map_base within 32-bit region.
98 FIXME: should automatically search a spare space. */
99 ReservedSpace heap_rs(_byte_map_size, rs_align, false, (char *)0x20000000);
100 #else
101 ReservedSpace heap_rs(_byte_map_size, rs_align, false);
102 #endif
103 MemTracker::record_virtual_memory_type((address)heap_rs.base(), mtGC);
105 os::trace_page_sizes("card table", _guard_index + 1, _guard_index + 1,
106 _page_size, heap_rs.base(), heap_rs.size());
107 if (!heap_rs.is_reserved()) {
108 vm_exit_during_initialization("Could not reserve enough space for the "
109 "card marking array");
110 }
112 // The assember store_check code will do an unsigned shift of the oop,
113 // then add it to byte_map_base, i.e.
114 //
115 // _byte_map = byte_map_base + (uintptr_t(low_bound) >> card_shift)
116 _byte_map = (jbyte*) heap_rs.base();
117 byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift);
118 assert(byte_for(low_bound) == &_byte_map[0], "Checking start of map");
119 assert(byte_for(high_bound-1) <= &_byte_map[_last_valid_index], "Checking end of map");
121 jbyte* guard_card = &_byte_map[_guard_index];
122 uintptr_t guard_page = align_size_down((uintptr_t)guard_card, _page_size);
123 _guard_region = MemRegion((HeapWord*)guard_page, _page_size);
124 os::commit_memory_or_exit((char*)guard_page, _page_size, _page_size,
125 !ExecMem, "card table last card");
126 *guard_card = last_card;
128 _lowest_non_clean =
129 NEW_C_HEAP_ARRAY(CardArr, max_covered_regions, mtGC);
130 _lowest_non_clean_chunk_size =
131 NEW_C_HEAP_ARRAY(size_t, max_covered_regions, mtGC);
132 _lowest_non_clean_base_chunk_index =
133 NEW_C_HEAP_ARRAY(uintptr_t, max_covered_regions, mtGC);
134 _last_LNC_resizing_collection =
135 NEW_C_HEAP_ARRAY(int, max_covered_regions, mtGC);
136 if (_lowest_non_clean == NULL
137 || _lowest_non_clean_chunk_size == NULL
138 || _lowest_non_clean_base_chunk_index == NULL
139 || _last_LNC_resizing_collection == NULL)
140 vm_exit_during_initialization("couldn't allocate an LNC array.");
141 for (int i = 0; i < max_covered_regions; i++) {
142 _lowest_non_clean[i] = NULL;
143 _lowest_non_clean_chunk_size[i] = 0;
144 _last_LNC_resizing_collection[i] = -1;
145 }
147 if (TraceCardTableModRefBS) {
148 gclog_or_tty->print_cr("CardTableModRefBS::CardTableModRefBS: ");
149 gclog_or_tty->print_cr(" "
150 " &_byte_map[0]: " INTPTR_FORMAT
151 " &_byte_map[_last_valid_index]: " INTPTR_FORMAT,
152 p2i(&_byte_map[0]),
153 p2i(&_byte_map[_last_valid_index]));
154 gclog_or_tty->print_cr(" "
155 " byte_map_base: " INTPTR_FORMAT,
156 p2i(byte_map_base));
157 }
158 }
160 CardTableModRefBS::~CardTableModRefBS() {
161 if (_covered) {
162 delete[] _covered;
163 _covered = NULL;
164 }
165 if (_committed) {
166 delete[] _committed;
167 _committed = NULL;
168 }
169 if (_lowest_non_clean) {
170 FREE_C_HEAP_ARRAY(CardArr, _lowest_non_clean, mtGC);
171 _lowest_non_clean = NULL;
172 }
173 if (_lowest_non_clean_chunk_size) {
174 FREE_C_HEAP_ARRAY(size_t, _lowest_non_clean_chunk_size, mtGC);
175 _lowest_non_clean_chunk_size = NULL;
176 }
177 if (_lowest_non_clean_base_chunk_index) {
178 FREE_C_HEAP_ARRAY(uintptr_t, _lowest_non_clean_base_chunk_index, mtGC);
179 _lowest_non_clean_base_chunk_index = NULL;
180 }
181 if (_last_LNC_resizing_collection) {
182 FREE_C_HEAP_ARRAY(int, _last_LNC_resizing_collection, mtGC);
183 _last_LNC_resizing_collection = NULL;
184 }
185 }
187 int CardTableModRefBS::find_covering_region_by_base(HeapWord* base) {
188 int i;
189 for (i = 0; i < _cur_covered_regions; i++) {
190 if (_covered[i].start() == base) return i;
191 if (_covered[i].start() > base) break;
192 }
193 // If we didn't find it, create a new one.
194 assert(_cur_covered_regions < _max_covered_regions,
195 "too many covered regions");
196 // Move the ones above up, to maintain sorted order.
197 for (int j = _cur_covered_regions; j > i; j--) {
198 _covered[j] = _covered[j-1];
199 _committed[j] = _committed[j-1];
200 }
201 int res = i;
202 _cur_covered_regions++;
203 _covered[res].set_start(base);
204 _covered[res].set_word_size(0);
205 jbyte* ct_start = byte_for(base);
206 uintptr_t ct_start_aligned = align_size_down((uintptr_t)ct_start, _page_size);
207 _committed[res].set_start((HeapWord*)ct_start_aligned);
208 _committed[res].set_word_size(0);
209 return res;
210 }
212 int CardTableModRefBS::find_covering_region_containing(HeapWord* addr) {
213 for (int i = 0; i < _cur_covered_regions; i++) {
214 if (_covered[i].contains(addr)) {
215 return i;
216 }
217 }
218 assert(0, "address outside of heap?");
219 return -1;
220 }
222 HeapWord* CardTableModRefBS::largest_prev_committed_end(int ind) const {
223 HeapWord* max_end = NULL;
224 for (int j = 0; j < ind; j++) {
225 HeapWord* this_end = _committed[j].end();
226 if (this_end > max_end) max_end = this_end;
227 }
228 return max_end;
229 }
231 MemRegion CardTableModRefBS::committed_unique_to_self(int self,
232 MemRegion mr) const {
233 MemRegion result = mr;
234 for (int r = 0; r < _cur_covered_regions; r += 1) {
235 if (r != self) {
236 result = result.minus(_committed[r]);
237 }
238 }
239 // Never include the guard page.
240 result = result.minus(_guard_region);
241 return result;
242 }
244 void CardTableModRefBS::resize_covered_region(MemRegion new_region) {
245 // We don't change the start of a region, only the end.
246 assert(_whole_heap.contains(new_region),
247 "attempt to cover area not in reserved area");
248 debug_only(verify_guard();)
249 // collided is true if the expansion would push into another committed region
250 debug_only(bool collided = false;)
251 int const ind = find_covering_region_by_base(new_region.start());
252 MemRegion const old_region = _covered[ind];
253 assert(old_region.start() == new_region.start(), "just checking");
254 if (new_region.word_size() != old_region.word_size()) {
255 // Commit new or uncommit old pages, if necessary.
256 MemRegion cur_committed = _committed[ind];
257 // Extend the end of this _commited region
258 // to cover the end of any lower _committed regions.
259 // This forms overlapping regions, but never interior regions.
260 HeapWord* const max_prev_end = largest_prev_committed_end(ind);
261 if (max_prev_end > cur_committed.end()) {
262 cur_committed.set_end(max_prev_end);
263 }
264 // Align the end up to a page size (starts are already aligned).
265 jbyte* const new_end = byte_after(new_region.last());
266 HeapWord* new_end_aligned =
267 (HeapWord*) align_size_up((uintptr_t)new_end, _page_size);
268 assert(new_end_aligned >= (HeapWord*) new_end,
269 "align up, but less");
270 // Check the other regions (excludes "ind") to ensure that
271 // the new_end_aligned does not intrude onto the committed
272 // space of another region.
273 int ri = 0;
274 for (ri = 0; ri < _cur_covered_regions; ri++) {
275 if (ri != ind) {
276 if (_committed[ri].contains(new_end_aligned)) {
277 // The prior check included in the assert
278 // (new_end_aligned >= _committed[ri].start())
279 // is redundant with the "contains" test.
280 // Any region containing the new end
281 // should start at or beyond the region found (ind)
282 // for the new end (committed regions are not expected to
283 // be proper subsets of other committed regions).
284 assert(_committed[ri].start() >= _committed[ind].start(),
285 "New end of committed region is inconsistent");
286 new_end_aligned = _committed[ri].start();
287 // new_end_aligned can be equal to the start of its
288 // committed region (i.e., of "ind") if a second
289 // region following "ind" also start at the same location
290 // as "ind".
291 assert(new_end_aligned >= _committed[ind].start(),
292 "New end of committed region is before start");
293 debug_only(collided = true;)
294 // Should only collide with 1 region
295 break;
296 }
297 }
298 }
299 #ifdef ASSERT
300 for (++ri; ri < _cur_covered_regions; ri++) {
301 assert(!_committed[ri].contains(new_end_aligned),
302 "New end of committed region is in a second committed region");
303 }
304 #endif
305 // The guard page is always committed and should not be committed over.
306 // "guarded" is used for assertion checking below and recalls the fact
307 // that the would-be end of the new committed region would have
308 // penetrated the guard page.
309 HeapWord* new_end_for_commit = new_end_aligned;
311 DEBUG_ONLY(bool guarded = false;)
312 if (new_end_for_commit > _guard_region.start()) {
313 new_end_for_commit = _guard_region.start();
314 DEBUG_ONLY(guarded = true;)
315 }
317 if (new_end_for_commit > cur_committed.end()) {
318 // Must commit new pages.
319 MemRegion const new_committed =
320 MemRegion(cur_committed.end(), new_end_for_commit);
322 assert(!new_committed.is_empty(), "Region should not be empty here");
323 os::commit_memory_or_exit((char*)new_committed.start(),
324 new_committed.byte_size(), _page_size,
325 !ExecMem, "card table expansion");
326 // Use new_end_aligned (as opposed to new_end_for_commit) because
327 // the cur_committed region may include the guard region.
328 } else if (new_end_aligned < cur_committed.end()) {
329 // Must uncommit pages.
330 MemRegion const uncommit_region =
331 committed_unique_to_self(ind, MemRegion(new_end_aligned,
332 cur_committed.end()));
333 if (!uncommit_region.is_empty()) {
334 // It is not safe to uncommit cards if the boundary between
335 // the generations is moving. A shrink can uncommit cards
336 // owned by generation A but being used by generation B.
337 if (!UseAdaptiveGCBoundary) {
338 if (!os::uncommit_memory((char*)uncommit_region.start(),
339 uncommit_region.byte_size())) {
340 assert(false, "Card table contraction failed");
341 // The call failed so don't change the end of the
342 // committed region. This is better than taking the
343 // VM down.
344 new_end_aligned = _committed[ind].end();
345 }
346 } else {
347 new_end_aligned = _committed[ind].end();
348 }
349 }
350 }
351 // In any case, we can reset the end of the current committed entry.
352 _committed[ind].set_end(new_end_aligned);
354 #ifdef ASSERT
355 // Check that the last card in the new region is committed according
356 // to the tables.
357 bool covered = false;
358 for (int cr = 0; cr < _cur_covered_regions; cr++) {
359 if (_committed[cr].contains(new_end - 1)) {
360 covered = true;
361 break;
362 }
363 }
364 assert(covered, "Card for end of new region not committed");
365 #endif
367 // The default of 0 is not necessarily clean cards.
368 jbyte* entry;
369 if (old_region.last() < _whole_heap.start()) {
370 entry = byte_for(_whole_heap.start());
371 } else {
372 entry = byte_after(old_region.last());
373 }
374 assert(index_for(new_region.last()) < _guard_index,
375 "The guard card will be overwritten");
376 // This line commented out cleans the newly expanded region and
377 // not the aligned up expanded region.
378 // jbyte* const end = byte_after(new_region.last());
379 jbyte* const end = (jbyte*) new_end_for_commit;
380 assert((end >= byte_after(new_region.last())) || collided || guarded,
381 "Expect to be beyond new region unless impacting another region");
382 // do nothing if we resized downward.
383 #ifdef ASSERT
384 for (int ri = 0; ri < _cur_covered_regions; ri++) {
385 if (ri != ind) {
386 // The end of the new committed region should not
387 // be in any existing region unless it matches
388 // the start of the next region.
389 assert(!_committed[ri].contains(end) ||
390 (_committed[ri].start() == (HeapWord*) end),
391 "Overlapping committed regions");
392 }
393 }
394 #endif
395 if (entry < end) {
396 memset(entry, clean_card, pointer_delta(end, entry, sizeof(jbyte)));
397 }
398 }
399 // In any case, the covered size changes.
400 _covered[ind].set_word_size(new_region.word_size());
401 if (TraceCardTableModRefBS) {
402 gclog_or_tty->print_cr("CardTableModRefBS::resize_covered_region: ");
403 gclog_or_tty->print_cr(" "
404 " _covered[%d].start(): " INTPTR_FORMAT
405 " _covered[%d].last(): " INTPTR_FORMAT,
406 ind, p2i(_covered[ind].start()),
407 ind, p2i(_covered[ind].last()));
408 gclog_or_tty->print_cr(" "
409 " _committed[%d].start(): " INTPTR_FORMAT
410 " _committed[%d].last(): " INTPTR_FORMAT,
411 ind, p2i(_committed[ind].start()),
412 ind, p2i(_committed[ind].last()));
413 gclog_or_tty->print_cr(" "
414 " byte_for(start): " INTPTR_FORMAT
415 " byte_for(last): " INTPTR_FORMAT,
416 p2i(byte_for(_covered[ind].start())),
417 p2i(byte_for(_covered[ind].last())));
418 gclog_or_tty->print_cr(" "
419 " addr_for(start): " INTPTR_FORMAT
420 " addr_for(last): " INTPTR_FORMAT,
421 p2i(addr_for((jbyte*) _committed[ind].start())),
422 p2i(addr_for((jbyte*) _committed[ind].last())));
423 }
424 // Touch the last card of the covered region to show that it
425 // is committed (or SEGV).
426 debug_only((void) (*byte_for(_covered[ind].last()));)
427 debug_only(verify_guard();)
428 }
430 // Note that these versions are precise! The scanning code has to handle the
431 // fact that the write barrier may be either precise or imprecise.
433 void CardTableModRefBS::write_ref_field_work(void* field, oop newVal, bool release) {
434 inline_write_ref_field(field, newVal, release);
435 }
438 void CardTableModRefBS::non_clean_card_iterate_possibly_parallel(Space* sp,
439 MemRegion mr,
440 OopsInGenClosure* cl,
441 CardTableRS* ct) {
442 if (!mr.is_empty()) {
443 // Caller (process_strong_roots()) claims that all GC threads
444 // execute this call. With UseDynamicNumberOfGCThreads now all
445 // active GC threads execute this call. The number of active GC
446 // threads needs to be passed to par_non_clean_card_iterate_work()
447 // to get proper partitioning and termination.
448 //
449 // This is an example of where n_par_threads() is used instead
450 // of workers()->active_workers(). n_par_threads can be set to 0 to
451 // turn off parallelism. For example when this code is called as
452 // part of verification and SharedHeap::process_strong_roots() is being
453 // used, then n_par_threads() may have been set to 0. active_workers
454 // is not overloaded with the meaning that it is a switch to disable
455 // parallelism and so keeps the meaning of the number of
456 // active gc workers. If parallelism has not been shut off by
457 // setting n_par_threads to 0, then n_par_threads should be
458 // equal to active_workers. When a different mechanism for shutting
459 // off parallelism is used, then active_workers can be used in
460 // place of n_par_threads.
461 // This is an example of a path where n_par_threads is
462 // set to 0 to turn off parallism.
463 // [7] CardTableModRefBS::non_clean_card_iterate()
464 // [8] CardTableRS::younger_refs_in_space_iterate()
465 // [9] Generation::younger_refs_in_space_iterate()
466 // [10] OneContigSpaceCardGeneration::younger_refs_iterate()
467 // [11] CompactingPermGenGen::younger_refs_iterate()
468 // [12] CardTableRS::younger_refs_iterate()
469 // [13] SharedHeap::process_strong_roots()
470 // [14] G1CollectedHeap::verify()
471 // [15] Universe::verify()
472 // [16] G1CollectedHeap::do_collection_pause_at_safepoint()
473 //
474 int n_threads = SharedHeap::heap()->n_par_threads();
475 bool is_par = n_threads > 0;
476 if (is_par) {
477 #if INCLUDE_ALL_GCS
478 assert(SharedHeap::heap()->n_par_threads() ==
479 SharedHeap::heap()->workers()->active_workers(), "Mismatch");
480 non_clean_card_iterate_parallel_work(sp, mr, cl, ct, n_threads);
481 #else // INCLUDE_ALL_GCS
482 fatal("Parallel gc not supported here.");
483 #endif // INCLUDE_ALL_GCS
484 } else {
485 // We do not call the non_clean_card_iterate_serial() version below because
486 // we want to clear the cards (which non_clean_card_iterate_serial() does not
487 // do for us): clear_cl here does the work of finding contiguous dirty ranges
488 // of cards to process and clear.
490 DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, precision(),
491 cl->gen_boundary());
492 ClearNoncleanCardWrapper clear_cl(dcto_cl, ct);
494 clear_cl.do_MemRegion(mr);
495 }
496 }
497 }
499 // The iterator itself is not MT-aware, but
500 // MT-aware callers and closures can use this to
501 // accomplish dirty card iteration in parallel. The
502 // iterator itself does not clear the dirty cards, or
503 // change their values in any manner.
504 void CardTableModRefBS::non_clean_card_iterate_serial(MemRegion mr,
505 MemRegionClosure* cl) {
506 bool is_par = (SharedHeap::heap()->n_par_threads() > 0);
507 assert(!is_par ||
508 (SharedHeap::heap()->n_par_threads() ==
509 SharedHeap::heap()->workers()->active_workers()), "Mismatch");
510 for (int i = 0; i < _cur_covered_regions; i++) {
511 MemRegion mri = mr.intersection(_covered[i]);
512 if (mri.word_size() > 0) {
513 jbyte* cur_entry = byte_for(mri.last());
514 jbyte* limit = byte_for(mri.start());
515 while (cur_entry >= limit) {
516 jbyte* next_entry = cur_entry - 1;
517 if (*cur_entry != clean_card) {
518 size_t non_clean_cards = 1;
519 // Should the next card be included in this range of dirty cards.
520 while (next_entry >= limit && *next_entry != clean_card) {
521 non_clean_cards++;
522 cur_entry = next_entry;
523 next_entry--;
524 }
525 // The memory region may not be on a card boundary. So that
526 // objects beyond the end of the region are not processed, make
527 // cur_cards precise with regard to the end of the memory region.
528 MemRegion cur_cards(addr_for(cur_entry),
529 non_clean_cards * card_size_in_words);
530 MemRegion dirty_region = cur_cards.intersection(mri);
531 cl->do_MemRegion(dirty_region);
532 }
533 cur_entry = next_entry;
534 }
535 }
536 }
537 }
539 void CardTableModRefBS::dirty_MemRegion(MemRegion mr) {
540 assert((HeapWord*)align_size_down((uintptr_t)mr.start(), HeapWordSize) == mr.start(), "Unaligned start");
541 assert((HeapWord*)align_size_up ((uintptr_t)mr.end(), HeapWordSize) == mr.end(), "Unaligned end" );
542 jbyte* cur = byte_for(mr.start());
543 jbyte* last = byte_after(mr.last());
544 while (cur < last) {
545 *cur = dirty_card;
546 cur++;
547 }
548 }
550 void CardTableModRefBS::invalidate(MemRegion mr, bool whole_heap) {
551 assert((HeapWord*)align_size_down((uintptr_t)mr.start(), HeapWordSize) == mr.start(), "Unaligned start");
552 assert((HeapWord*)align_size_up ((uintptr_t)mr.end(), HeapWordSize) == mr.end(), "Unaligned end" );
553 for (int i = 0; i < _cur_covered_regions; i++) {
554 MemRegion mri = mr.intersection(_covered[i]);
555 if (!mri.is_empty()) dirty_MemRegion(mri);
556 }
557 }
559 void CardTableModRefBS::clear_MemRegion(MemRegion mr) {
560 // Be conservative: only clean cards entirely contained within the
561 // region.
562 jbyte* cur;
563 if (mr.start() == _whole_heap.start()) {
564 cur = byte_for(mr.start());
565 } else {
566 assert(mr.start() > _whole_heap.start(), "mr is not covered.");
567 cur = byte_after(mr.start() - 1);
568 }
569 jbyte* last = byte_after(mr.last());
570 memset(cur, clean_card, pointer_delta(last, cur, sizeof(jbyte)));
571 }
573 void CardTableModRefBS::clear(MemRegion mr) {
574 for (int i = 0; i < _cur_covered_regions; i++) {
575 MemRegion mri = mr.intersection(_covered[i]);
576 if (!mri.is_empty()) clear_MemRegion(mri);
577 }
578 }
580 void CardTableModRefBS::dirty(MemRegion mr) {
581 jbyte* first = byte_for(mr.start());
582 jbyte* last = byte_after(mr.last());
583 memset(first, dirty_card, last-first);
584 }
586 // Unlike several other card table methods, dirty_card_iterate()
587 // iterates over dirty cards ranges in increasing address order.
588 void CardTableModRefBS::dirty_card_iterate(MemRegion mr,
589 MemRegionClosure* cl) {
590 for (int i = 0; i < _cur_covered_regions; i++) {
591 MemRegion mri = mr.intersection(_covered[i]);
592 if (!mri.is_empty()) {
593 jbyte *cur_entry, *next_entry, *limit;
594 for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last());
595 cur_entry <= limit;
596 cur_entry = next_entry) {
597 next_entry = cur_entry + 1;
598 if (*cur_entry == dirty_card) {
599 size_t dirty_cards;
600 // Accumulate maximal dirty card range, starting at cur_entry
601 for (dirty_cards = 1;
602 next_entry <= limit && *next_entry == dirty_card;
603 dirty_cards++, next_entry++);
604 MemRegion cur_cards(addr_for(cur_entry),
605 dirty_cards*card_size_in_words);
606 cl->do_MemRegion(cur_cards);
607 }
608 }
609 }
610 }
611 }
613 MemRegion CardTableModRefBS::dirty_card_range_after_reset(MemRegion mr,
614 bool reset,
615 int reset_val) {
616 for (int i = 0; i < _cur_covered_regions; i++) {
617 MemRegion mri = mr.intersection(_covered[i]);
618 if (!mri.is_empty()) {
619 jbyte* cur_entry, *next_entry, *limit;
620 for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last());
621 cur_entry <= limit;
622 cur_entry = next_entry) {
623 next_entry = cur_entry + 1;
624 if (*cur_entry == dirty_card) {
625 size_t dirty_cards;
626 // Accumulate maximal dirty card range, starting at cur_entry
627 for (dirty_cards = 1;
628 next_entry <= limit && *next_entry == dirty_card;
629 dirty_cards++, next_entry++);
630 MemRegion cur_cards(addr_for(cur_entry),
631 dirty_cards*card_size_in_words);
632 if (reset) {
633 for (size_t i = 0; i < dirty_cards; i++) {
634 cur_entry[i] = reset_val;
635 }
636 }
637 return cur_cards;
638 }
639 }
640 }
641 }
642 return MemRegion(mr.end(), mr.end());
643 }
645 uintx CardTableModRefBS::ct_max_alignment_constraint() {
646 return card_size * os::vm_page_size();
647 }
649 void CardTableModRefBS::verify_guard() {
650 // For product build verification
651 guarantee(_byte_map[_guard_index] == last_card,
652 "card table guard has been modified");
653 }
655 void CardTableModRefBS::verify() {
656 verify_guard();
657 }
659 #ifndef PRODUCT
660 void CardTableModRefBS::verify_region(MemRegion mr,
661 jbyte val, bool val_equals) {
662 jbyte* start = byte_for(mr.start());
663 jbyte* end = byte_for(mr.last());
664 bool failures = false;
665 for (jbyte* curr = start; curr <= end; ++curr) {
666 jbyte curr_val = *curr;
667 bool failed = (val_equals) ? (curr_val != val) : (curr_val == val);
668 if (failed) {
669 if (!failures) {
670 tty->cr();
671 tty->print_cr("== CT verification failed: [" INTPTR_FORMAT "," INTPTR_FORMAT "]", p2i(start), p2i(end));
672 tty->print_cr("== %sexpecting value: %d",
673 (val_equals) ? "" : "not ", val);
674 failures = true;
675 }
676 tty->print_cr("== card "PTR_FORMAT" ["PTR_FORMAT","PTR_FORMAT"], "
677 "val: %d", p2i(curr), p2i(addr_for(curr)),
678 p2i((HeapWord*) (((size_t) addr_for(curr)) + card_size)),
679 (int) curr_val);
680 }
681 }
682 guarantee(!failures, "there should not have been any failures");
683 }
685 void CardTableModRefBS::verify_not_dirty_region(MemRegion mr) {
686 verify_region(mr, dirty_card, false /* val_equals */);
687 }
689 void CardTableModRefBS::verify_dirty_region(MemRegion mr) {
690 verify_region(mr, dirty_card, true /* val_equals */);
691 }
692 #endif
694 void CardTableModRefBS::print_on(outputStream* st) const {
695 st->print_cr("Card table byte_map: [" INTPTR_FORMAT "," INTPTR_FORMAT "] byte_map_base: " INTPTR_FORMAT,
696 p2i(_byte_map), p2i(_byte_map + _byte_map_size), p2i(byte_map_base));
697 }
699 bool CardTableModRefBSForCTRS::card_will_be_scanned(jbyte cv) {
700 return
701 CardTableModRefBS::card_will_be_scanned(cv) ||
702 _rs->is_prev_nonclean_card_val(cv);
703 };
705 bool CardTableModRefBSForCTRS::card_may_have_been_dirty(jbyte cv) {
706 return
707 cv != clean_card &&
708 (CardTableModRefBS::card_may_have_been_dirty(cv) ||
709 CardTableRS::youngergen_may_have_been_dirty(cv));
710 };