Sat, 07 Nov 2020 10:30:02 +0800
Added tag mips-jdk8u275-b01 for changeset d3b4d62f391f
1 /*
2 * Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 /*
26 * This file has been modified by Loongson Technology in 2015. These
27 * modifications are Copyright (c) 2015 Loongson Technology, and are made
28 * available on the same license terms set forth above.
29 */
31 #include "precompiled.hpp"
32 #include "memory/allocation.inline.hpp"
33 #include "memory/cardTableModRefBS.hpp"
34 #include "memory/cardTableRS.hpp"
35 #include "memory/sharedHeap.hpp"
36 #include "memory/space.hpp"
37 #include "memory/space.inline.hpp"
38 #include "memory/universe.hpp"
39 #include "runtime/java.hpp"
40 #include "runtime/mutexLocker.hpp"
41 #include "runtime/virtualspace.hpp"
42 #include "services/memTracker.hpp"
43 #include "utilities/macros.hpp"
44 #ifdef COMPILER1
45 #include "c1/c1_LIR.hpp"
46 #include "c1/c1_LIRGenerator.hpp"
47 #endif
49 // This kind of "BarrierSet" allows a "CollectedHeap" to detect and
50 // enumerate ref fields that have been modified (since the last
51 // enumeration.)
53 size_t CardTableModRefBS::compute_byte_map_size()
54 {
55 assert(_guard_index == cards_required(_whole_heap.word_size()) - 1,
56 "unitialized, check declaration order");
57 assert(_page_size != 0, "unitialized, check declaration order");
58 const size_t granularity = os::vm_allocation_granularity();
59 return align_size_up(_guard_index + 1, MAX2(_page_size, granularity));
60 }
62 CardTableModRefBS::CardTableModRefBS(MemRegion whole_heap,
63 int max_covered_regions):
64 ModRefBarrierSet(max_covered_regions),
65 _whole_heap(whole_heap),
66 _guard_index(0),
67 _guard_region(),
68 _last_valid_index(0),
69 _page_size(os::vm_page_size()),
70 _byte_map_size(0),
71 _covered(NULL),
72 _committed(NULL),
73 _cur_covered_regions(0),
74 _byte_map(NULL),
75 byte_map_base(NULL),
76 // LNC functionality
77 _lowest_non_clean(NULL),
78 _lowest_non_clean_chunk_size(NULL),
79 _lowest_non_clean_base_chunk_index(NULL),
80 _last_LNC_resizing_collection(NULL)
81 {
82 _kind = BarrierSet::CardTableModRef;
84 assert((uintptr_t(_whole_heap.start()) & (card_size - 1)) == 0, "heap must start at card boundary");
85 assert((uintptr_t(_whole_heap.end()) & (card_size - 1)) == 0, "heap must end at card boundary");
87 assert(card_size <= 512, "card_size must be less than 512"); // why?
89 _covered = new MemRegion[_max_covered_regions];
90 if (_covered == NULL) {
91 vm_exit_during_initialization("Could not allocate card table covered region set.");
92 }
93 }
95 void CardTableModRefBS::initialize() {
96 _guard_index = cards_required(_whole_heap.word_size()) - 1;
97 _last_valid_index = _guard_index - 1;
99 _byte_map_size = compute_byte_map_size();
101 HeapWord* low_bound = _whole_heap.start();
102 HeapWord* high_bound = _whole_heap.end();
104 _cur_covered_regions = 0;
105 _committed = new MemRegion[_max_covered_regions];
106 if (_committed == NULL) {
107 vm_exit_during_initialization("Could not allocate card table committed region set.");
108 }
110 const size_t rs_align = _page_size == (size_t) os::vm_page_size() ? 0 :
111 MAX2(_page_size, (size_t) os::vm_allocation_granularity());
112 ReservedSpace heap_rs(_byte_map_size, rs_align, false);
114 MemTracker::record_virtual_memory_type((address)heap_rs.base(), mtGC);
116 os::trace_page_sizes("card table", _guard_index + 1, _guard_index + 1,
117 _page_size, heap_rs.base(), heap_rs.size());
118 if (!heap_rs.is_reserved()) {
119 vm_exit_during_initialization("Could not reserve enough space for the "
120 "card marking array");
121 }
123 // The assember store_check code will do an unsigned shift of the oop,
124 // then add it to byte_map_base, i.e.
125 //
126 // _byte_map = byte_map_base + (uintptr_t(low_bound) >> card_shift)
127 _byte_map = (jbyte*) heap_rs.base();
128 byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift);
129 assert(byte_for(low_bound) == &_byte_map[0], "Checking start of map");
130 assert(byte_for(high_bound-1) <= &_byte_map[_last_valid_index], "Checking end of map");
132 jbyte* guard_card = &_byte_map[_guard_index];
133 uintptr_t guard_page = align_size_down((uintptr_t)guard_card, _page_size);
134 _guard_region = MemRegion((HeapWord*)guard_page, _page_size);
135 os::commit_memory_or_exit((char*)guard_page, _page_size, _page_size,
136 !ExecMem, "card table last card");
137 *guard_card = last_card;
139 _lowest_non_clean =
140 NEW_C_HEAP_ARRAY(CardArr, _max_covered_regions, mtGC);
141 _lowest_non_clean_chunk_size =
142 NEW_C_HEAP_ARRAY(size_t, _max_covered_regions, mtGC);
143 _lowest_non_clean_base_chunk_index =
144 NEW_C_HEAP_ARRAY(uintptr_t, _max_covered_regions, mtGC);
145 _last_LNC_resizing_collection =
146 NEW_C_HEAP_ARRAY(int, _max_covered_regions, mtGC);
147 if (_lowest_non_clean == NULL
148 || _lowest_non_clean_chunk_size == NULL
149 || _lowest_non_clean_base_chunk_index == NULL
150 || _last_LNC_resizing_collection == NULL)
151 vm_exit_during_initialization("couldn't allocate an LNC array.");
152 for (int i = 0; i < _max_covered_regions; i++) {
153 _lowest_non_clean[i] = NULL;
154 _lowest_non_clean_chunk_size[i] = 0;
155 _last_LNC_resizing_collection[i] = -1;
156 }
158 if (TraceCardTableModRefBS) {
159 gclog_or_tty->print_cr("CardTableModRefBS::CardTableModRefBS: ");
160 gclog_or_tty->print_cr(" "
161 " &_byte_map[0]: " INTPTR_FORMAT
162 " &_byte_map[_last_valid_index]: " INTPTR_FORMAT,
163 p2i(&_byte_map[0]),
164 p2i(&_byte_map[_last_valid_index]));
165 gclog_or_tty->print_cr(" "
166 " byte_map_base: " INTPTR_FORMAT,
167 p2i(byte_map_base));
168 }
169 }
171 CardTableModRefBS::~CardTableModRefBS() {
172 if (_covered) {
173 delete[] _covered;
174 _covered = NULL;
175 }
176 if (_committed) {
177 delete[] _committed;
178 _committed = NULL;
179 }
180 if (_lowest_non_clean) {
181 FREE_C_HEAP_ARRAY(CardArr, _lowest_non_clean, mtGC);
182 _lowest_non_clean = NULL;
183 }
184 if (_lowest_non_clean_chunk_size) {
185 FREE_C_HEAP_ARRAY(size_t, _lowest_non_clean_chunk_size, mtGC);
186 _lowest_non_clean_chunk_size = NULL;
187 }
188 if (_lowest_non_clean_base_chunk_index) {
189 FREE_C_HEAP_ARRAY(uintptr_t, _lowest_non_clean_base_chunk_index, mtGC);
190 _lowest_non_clean_base_chunk_index = NULL;
191 }
192 if (_last_LNC_resizing_collection) {
193 FREE_C_HEAP_ARRAY(int, _last_LNC_resizing_collection, mtGC);
194 _last_LNC_resizing_collection = NULL;
195 }
196 }
198 int CardTableModRefBS::find_covering_region_by_base(HeapWord* base) {
199 int i;
200 for (i = 0; i < _cur_covered_regions; i++) {
201 if (_covered[i].start() == base) return i;
202 if (_covered[i].start() > base) break;
203 }
204 // If we didn't find it, create a new one.
205 assert(_cur_covered_regions < _max_covered_regions,
206 "too many covered regions");
207 // Move the ones above up, to maintain sorted order.
208 for (int j = _cur_covered_regions; j > i; j--) {
209 _covered[j] = _covered[j-1];
210 _committed[j] = _committed[j-1];
211 }
212 int res = i;
213 _cur_covered_regions++;
214 _covered[res].set_start(base);
215 _covered[res].set_word_size(0);
216 jbyte* ct_start = byte_for(base);
217 uintptr_t ct_start_aligned = align_size_down((uintptr_t)ct_start, _page_size);
218 _committed[res].set_start((HeapWord*)ct_start_aligned);
219 _committed[res].set_word_size(0);
220 return res;
221 }
223 int CardTableModRefBS::find_covering_region_containing(HeapWord* addr) {
224 for (int i = 0; i < _cur_covered_regions; i++) {
225 if (_covered[i].contains(addr)) {
226 return i;
227 }
228 }
229 assert(0, "address outside of heap?");
230 return -1;
231 }
233 HeapWord* CardTableModRefBS::largest_prev_committed_end(int ind) const {
234 HeapWord* max_end = NULL;
235 for (int j = 0; j < ind; j++) {
236 HeapWord* this_end = _committed[j].end();
237 if (this_end > max_end) max_end = this_end;
238 }
239 return max_end;
240 }
242 MemRegion CardTableModRefBS::committed_unique_to_self(int self,
243 MemRegion mr) const {
244 MemRegion result = mr;
245 for (int r = 0; r < _cur_covered_regions; r += 1) {
246 if (r != self) {
247 result = result.minus(_committed[r]);
248 }
249 }
250 // Never include the guard page.
251 result = result.minus(_guard_region);
252 return result;
253 }
255 void CardTableModRefBS::resize_covered_region(MemRegion new_region) {
256 // We don't change the start of a region, only the end.
257 assert(_whole_heap.contains(new_region),
258 "attempt to cover area not in reserved area");
259 debug_only(verify_guard();)
260 // collided is true if the expansion would push into another committed region
261 debug_only(bool collided = false;)
262 int const ind = find_covering_region_by_base(new_region.start());
263 MemRegion const old_region = _covered[ind];
264 assert(old_region.start() == new_region.start(), "just checking");
265 if (new_region.word_size() != old_region.word_size()) {
266 // Commit new or uncommit old pages, if necessary.
267 MemRegion cur_committed = _committed[ind];
268 // Extend the end of this _commited region
269 // to cover the end of any lower _committed regions.
270 // This forms overlapping regions, but never interior regions.
271 HeapWord* const max_prev_end = largest_prev_committed_end(ind);
272 if (max_prev_end > cur_committed.end()) {
273 cur_committed.set_end(max_prev_end);
274 }
275 // Align the end up to a page size (starts are already aligned).
276 jbyte* const new_end = byte_after(new_region.last());
277 HeapWord* new_end_aligned =
278 (HeapWord*) align_size_up((uintptr_t)new_end, _page_size);
279 assert(new_end_aligned >= (HeapWord*) new_end,
280 "align up, but less");
281 // Check the other regions (excludes "ind") to ensure that
282 // the new_end_aligned does not intrude onto the committed
283 // space of another region.
284 int ri = 0;
285 for (ri = 0; ri < _cur_covered_regions; ri++) {
286 if (ri != ind) {
287 if (_committed[ri].contains(new_end_aligned)) {
288 // The prior check included in the assert
289 // (new_end_aligned >= _committed[ri].start())
290 // is redundant with the "contains" test.
291 // Any region containing the new end
292 // should start at or beyond the region found (ind)
293 // for the new end (committed regions are not expected to
294 // be proper subsets of other committed regions).
295 assert(_committed[ri].start() >= _committed[ind].start(),
296 "New end of committed region is inconsistent");
297 new_end_aligned = _committed[ri].start();
298 // new_end_aligned can be equal to the start of its
299 // committed region (i.e., of "ind") if a second
300 // region following "ind" also start at the same location
301 // as "ind".
302 assert(new_end_aligned >= _committed[ind].start(),
303 "New end of committed region is before start");
304 debug_only(collided = true;)
305 // Should only collide with 1 region
306 break;
307 }
308 }
309 }
310 #ifdef ASSERT
311 for (++ri; ri < _cur_covered_regions; ri++) {
312 assert(!_committed[ri].contains(new_end_aligned),
313 "New end of committed region is in a second committed region");
314 }
315 #endif
316 // The guard page is always committed and should not be committed over.
317 // "guarded" is used for assertion checking below and recalls the fact
318 // that the would-be end of the new committed region would have
319 // penetrated the guard page.
320 HeapWord* new_end_for_commit = new_end_aligned;
322 DEBUG_ONLY(bool guarded = false;)
323 if (new_end_for_commit > _guard_region.start()) {
324 new_end_for_commit = _guard_region.start();
325 DEBUG_ONLY(guarded = true;)
326 }
328 if (new_end_for_commit > cur_committed.end()) {
329 // Must commit new pages.
330 MemRegion const new_committed =
331 MemRegion(cur_committed.end(), new_end_for_commit);
333 assert(!new_committed.is_empty(), "Region should not be empty here");
334 os::commit_memory_or_exit((char*)new_committed.start(),
335 new_committed.byte_size(), _page_size,
336 !ExecMem, "card table expansion");
337 // Use new_end_aligned (as opposed to new_end_for_commit) because
338 // the cur_committed region may include the guard region.
339 } else if (new_end_aligned < cur_committed.end()) {
340 // Must uncommit pages.
341 MemRegion const uncommit_region =
342 committed_unique_to_self(ind, MemRegion(new_end_aligned,
343 cur_committed.end()));
344 if (!uncommit_region.is_empty()) {
345 // It is not safe to uncommit cards if the boundary between
346 // the generations is moving. A shrink can uncommit cards
347 // owned by generation A but being used by generation B.
348 if (!UseAdaptiveGCBoundary) {
349 if (!os::uncommit_memory((char*)uncommit_region.start(),
350 uncommit_region.byte_size())) {
351 assert(false, "Card table contraction failed");
352 // The call failed so don't change the end of the
353 // committed region. This is better than taking the
354 // VM down.
355 new_end_aligned = _committed[ind].end();
356 }
357 } else {
358 new_end_aligned = _committed[ind].end();
359 }
360 }
361 }
362 // In any case, we can reset the end of the current committed entry.
363 _committed[ind].set_end(new_end_aligned);
365 #ifdef ASSERT
366 // Check that the last card in the new region is committed according
367 // to the tables.
368 bool covered = false;
369 for (int cr = 0; cr < _cur_covered_regions; cr++) {
370 if (_committed[cr].contains(new_end - 1)) {
371 covered = true;
372 break;
373 }
374 }
375 assert(covered, "Card for end of new region not committed");
376 #endif
378 // The default of 0 is not necessarily clean cards.
379 jbyte* entry;
380 if (old_region.last() < _whole_heap.start()) {
381 entry = byte_for(_whole_heap.start());
382 } else {
383 entry = byte_after(old_region.last());
384 }
385 assert(index_for(new_region.last()) < _guard_index,
386 "The guard card will be overwritten");
387 // This line commented out cleans the newly expanded region and
388 // not the aligned up expanded region.
389 // jbyte* const end = byte_after(new_region.last());
390 jbyte* const end = (jbyte*) new_end_for_commit;
391 assert((end >= byte_after(new_region.last())) || collided || guarded,
392 "Expect to be beyond new region unless impacting another region");
393 // do nothing if we resized downward.
394 #ifdef ASSERT
395 for (int ri = 0; ri < _cur_covered_regions; ri++) {
396 if (ri != ind) {
397 // The end of the new committed region should not
398 // be in any existing region unless it matches
399 // the start of the next region.
400 assert(!_committed[ri].contains(end) ||
401 (_committed[ri].start() == (HeapWord*) end),
402 "Overlapping committed regions");
403 }
404 }
405 #endif
406 if (entry < end) {
407 memset(entry, clean_card, pointer_delta(end, entry, sizeof(jbyte)));
408 }
409 }
410 // In any case, the covered size changes.
411 _covered[ind].set_word_size(new_region.word_size());
412 if (TraceCardTableModRefBS) {
413 gclog_or_tty->print_cr("CardTableModRefBS::resize_covered_region: ");
414 gclog_or_tty->print_cr(" "
415 " _covered[%d].start(): " INTPTR_FORMAT
416 " _covered[%d].last(): " INTPTR_FORMAT,
417 ind, p2i(_covered[ind].start()),
418 ind, p2i(_covered[ind].last()));
419 gclog_or_tty->print_cr(" "
420 " _committed[%d].start(): " INTPTR_FORMAT
421 " _committed[%d].last(): " INTPTR_FORMAT,
422 ind, p2i(_committed[ind].start()),
423 ind, p2i(_committed[ind].last()));
424 gclog_or_tty->print_cr(" "
425 " byte_for(start): " INTPTR_FORMAT
426 " byte_for(last): " INTPTR_FORMAT,
427 p2i(byte_for(_covered[ind].start())),
428 p2i(byte_for(_covered[ind].last())));
429 gclog_or_tty->print_cr(" "
430 " addr_for(start): " INTPTR_FORMAT
431 " addr_for(last): " INTPTR_FORMAT,
432 p2i(addr_for((jbyte*) _committed[ind].start())),
433 p2i(addr_for((jbyte*) _committed[ind].last())));
434 }
435 // Touch the last card of the covered region to show that it
436 // is committed (or SEGV).
437 debug_only((void) (*byte_for(_covered[ind].last()));)
438 debug_only(verify_guard();)
439 }
441 // Note that these versions are precise! The scanning code has to handle the
442 // fact that the write barrier may be either precise or imprecise.
444 void CardTableModRefBS::write_ref_field_work(void* field, oop newVal, bool release) {
445 inline_write_ref_field(field, newVal, release);
446 }
449 void CardTableModRefBS::non_clean_card_iterate_possibly_parallel(Space* sp,
450 MemRegion mr,
451 OopsInGenClosure* cl,
452 CardTableRS* ct) {
453 if (!mr.is_empty()) {
454 // Caller (process_roots()) claims that all GC threads
455 // execute this call. With UseDynamicNumberOfGCThreads now all
456 // active GC threads execute this call. The number of active GC
457 // threads needs to be passed to par_non_clean_card_iterate_work()
458 // to get proper partitioning and termination.
459 //
460 // This is an example of where n_par_threads() is used instead
461 // of workers()->active_workers(). n_par_threads can be set to 0 to
462 // turn off parallelism. For example when this code is called as
463 // part of verification and SharedHeap::process_roots() is being
464 // used, then n_par_threads() may have been set to 0. active_workers
465 // is not overloaded with the meaning that it is a switch to disable
466 // parallelism and so keeps the meaning of the number of
467 // active gc workers. If parallelism has not been shut off by
468 // setting n_par_threads to 0, then n_par_threads should be
469 // equal to active_workers. When a different mechanism for shutting
470 // off parallelism is used, then active_workers can be used in
471 // place of n_par_threads.
472 // This is an example of a path where n_par_threads is
473 // set to 0 to turn off parallism.
474 // [7] CardTableModRefBS::non_clean_card_iterate()
475 // [8] CardTableRS::younger_refs_in_space_iterate()
476 // [9] Generation::younger_refs_in_space_iterate()
477 // [10] OneContigSpaceCardGeneration::younger_refs_iterate()
478 // [11] CompactingPermGenGen::younger_refs_iterate()
479 // [12] CardTableRS::younger_refs_iterate()
480 // [13] SharedHeap::process_strong_roots()
481 // [14] G1CollectedHeap::verify()
482 // [15] Universe::verify()
483 // [16] G1CollectedHeap::do_collection_pause_at_safepoint()
484 //
485 int n_threads = SharedHeap::heap()->n_par_threads();
486 bool is_par = n_threads > 0;
487 if (is_par) {
488 #if INCLUDE_ALL_GCS
489 assert(SharedHeap::heap()->n_par_threads() ==
490 SharedHeap::heap()->workers()->active_workers(), "Mismatch");
491 non_clean_card_iterate_parallel_work(sp, mr, cl, ct, n_threads);
492 #else // INCLUDE_ALL_GCS
493 fatal("Parallel gc not supported here.");
494 #endif // INCLUDE_ALL_GCS
495 } else {
496 // We do not call the non_clean_card_iterate_serial() version below because
497 // we want to clear the cards (which non_clean_card_iterate_serial() does not
498 // do for us): clear_cl here does the work of finding contiguous dirty ranges
499 // of cards to process and clear.
501 DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, precision(),
502 cl->gen_boundary());
503 ClearNoncleanCardWrapper clear_cl(dcto_cl, ct);
505 clear_cl.do_MemRegion(mr);
506 }
507 }
508 }
510 // The iterator itself is not MT-aware, but
511 // MT-aware callers and closures can use this to
512 // accomplish dirty card iteration in parallel. The
513 // iterator itself does not clear the dirty cards, or
514 // change their values in any manner.
515 void CardTableModRefBS::non_clean_card_iterate_serial(MemRegion mr,
516 MemRegionClosure* cl) {
517 bool is_par = (SharedHeap::heap()->n_par_threads() > 0);
518 assert(!is_par ||
519 (SharedHeap::heap()->n_par_threads() ==
520 SharedHeap::heap()->workers()->active_workers()), "Mismatch");
521 for (int i = 0; i < _cur_covered_regions; i++) {
522 MemRegion mri = mr.intersection(_covered[i]);
523 if (mri.word_size() > 0) {
524 jbyte* cur_entry = byte_for(mri.last());
525 jbyte* limit = byte_for(mri.start());
526 while (cur_entry >= limit) {
527 jbyte* next_entry = cur_entry - 1;
528 if (*cur_entry != clean_card) {
529 size_t non_clean_cards = 1;
530 // Should the next card be included in this range of dirty cards.
531 while (next_entry >= limit && *next_entry != clean_card) {
532 non_clean_cards++;
533 cur_entry = next_entry;
534 next_entry--;
535 }
536 // The memory region may not be on a card boundary. So that
537 // objects beyond the end of the region are not processed, make
538 // cur_cards precise with regard to the end of the memory region.
539 MemRegion cur_cards(addr_for(cur_entry),
540 non_clean_cards * card_size_in_words);
541 MemRegion dirty_region = cur_cards.intersection(mri);
542 cl->do_MemRegion(dirty_region);
543 }
544 cur_entry = next_entry;
545 }
546 }
547 }
548 }
550 void CardTableModRefBS::dirty_MemRegion(MemRegion mr) {
551 assert((HeapWord*)align_size_down((uintptr_t)mr.start(), HeapWordSize) == mr.start(), "Unaligned start");
552 assert((HeapWord*)align_size_up ((uintptr_t)mr.end(), HeapWordSize) == mr.end(), "Unaligned end" );
553 jbyte* cur = byte_for(mr.start());
554 jbyte* last = byte_after(mr.last());
555 while (cur < last) {
556 *cur = dirty_card;
557 cur++;
558 }
559 }
561 void CardTableModRefBS::invalidate(MemRegion mr, bool whole_heap) {
562 assert((HeapWord*)align_size_down((uintptr_t)mr.start(), HeapWordSize) == mr.start(), "Unaligned start");
563 assert((HeapWord*)align_size_up ((uintptr_t)mr.end(), HeapWordSize) == mr.end(), "Unaligned end" );
564 for (int i = 0; i < _cur_covered_regions; i++) {
565 MemRegion mri = mr.intersection(_covered[i]);
566 if (!mri.is_empty()) dirty_MemRegion(mri);
567 }
568 }
570 void CardTableModRefBS::clear_MemRegion(MemRegion mr) {
571 // Be conservative: only clean cards entirely contained within the
572 // region.
573 jbyte* cur;
574 if (mr.start() == _whole_heap.start()) {
575 cur = byte_for(mr.start());
576 } else {
577 assert(mr.start() > _whole_heap.start(), "mr is not covered.");
578 cur = byte_after(mr.start() - 1);
579 }
580 jbyte* last = byte_after(mr.last());
581 memset(cur, clean_card, pointer_delta(last, cur, sizeof(jbyte)));
582 }
584 void CardTableModRefBS::clear(MemRegion mr) {
585 for (int i = 0; i < _cur_covered_regions; i++) {
586 MemRegion mri = mr.intersection(_covered[i]);
587 if (!mri.is_empty()) clear_MemRegion(mri);
588 }
589 }
591 void CardTableModRefBS::dirty(MemRegion mr) {
592 jbyte* first = byte_for(mr.start());
593 jbyte* last = byte_after(mr.last());
594 memset(first, dirty_card, last-first);
595 }
597 // Unlike several other card table methods, dirty_card_iterate()
598 // iterates over dirty cards ranges in increasing address order.
599 void CardTableModRefBS::dirty_card_iterate(MemRegion mr,
600 MemRegionClosure* cl) {
601 for (int i = 0; i < _cur_covered_regions; i++) {
602 MemRegion mri = mr.intersection(_covered[i]);
603 if (!mri.is_empty()) {
604 jbyte *cur_entry, *next_entry, *limit;
605 for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last());
606 cur_entry <= limit;
607 cur_entry = next_entry) {
608 next_entry = cur_entry + 1;
609 if (*cur_entry == dirty_card) {
610 size_t dirty_cards;
611 // Accumulate maximal dirty card range, starting at cur_entry
612 for (dirty_cards = 1;
613 next_entry <= limit && *next_entry == dirty_card;
614 dirty_cards++, next_entry++);
615 MemRegion cur_cards(addr_for(cur_entry),
616 dirty_cards*card_size_in_words);
617 cl->do_MemRegion(cur_cards);
618 }
619 }
620 }
621 }
622 }
624 MemRegion CardTableModRefBS::dirty_card_range_after_reset(MemRegion mr,
625 bool reset,
626 int reset_val) {
627 for (int i = 0; i < _cur_covered_regions; i++) {
628 MemRegion mri = mr.intersection(_covered[i]);
629 if (!mri.is_empty()) {
630 jbyte* cur_entry, *next_entry, *limit;
631 for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last());
632 cur_entry <= limit;
633 cur_entry = next_entry) {
634 next_entry = cur_entry + 1;
635 if (*cur_entry == dirty_card) {
636 size_t dirty_cards;
637 // Accumulate maximal dirty card range, starting at cur_entry
638 for (dirty_cards = 1;
639 next_entry <= limit && *next_entry == dirty_card;
640 dirty_cards++, next_entry++);
641 MemRegion cur_cards(addr_for(cur_entry),
642 dirty_cards*card_size_in_words);
643 if (reset) {
644 for (size_t i = 0; i < dirty_cards; i++) {
645 cur_entry[i] = reset_val;
646 }
647 }
648 return cur_cards;
649 }
650 }
651 }
652 }
653 return MemRegion(mr.end(), mr.end());
654 }
656 uintx CardTableModRefBS::ct_max_alignment_constraint() {
657 return card_size * os::vm_page_size();
658 }
660 void CardTableModRefBS::verify_guard() {
661 // For product build verification
662 guarantee(_byte_map[_guard_index] == last_card,
663 "card table guard has been modified");
664 }
666 void CardTableModRefBS::verify() {
667 verify_guard();
668 }
670 #ifndef PRODUCT
671 void CardTableModRefBS::verify_region(MemRegion mr,
672 jbyte val, bool val_equals) {
673 jbyte* start = byte_for(mr.start());
674 jbyte* end = byte_for(mr.last());
675 bool failures = false;
676 for (jbyte* curr = start; curr <= end; ++curr) {
677 jbyte curr_val = *curr;
678 bool failed = (val_equals) ? (curr_val != val) : (curr_val == val);
679 if (failed) {
680 if (!failures) {
681 tty->cr();
682 tty->print_cr("== CT verification failed: [" INTPTR_FORMAT "," INTPTR_FORMAT "]", p2i(start), p2i(end));
683 tty->print_cr("== %sexpecting value: %d",
684 (val_equals) ? "" : "not ", val);
685 failures = true;
686 }
687 tty->print_cr("== card " PTR_FORMAT " [" PTR_FORMAT "," PTR_FORMAT "], "
688 "val: %d", p2i(curr), p2i(addr_for(curr)),
689 p2i((HeapWord*) (((size_t) addr_for(curr)) + card_size)),
690 (int) curr_val);
691 }
692 }
693 guarantee(!failures, "there should not have been any failures");
694 }
696 void CardTableModRefBS::verify_not_dirty_region(MemRegion mr) {
697 verify_region(mr, dirty_card, false /* val_equals */);
698 }
700 void CardTableModRefBS::verify_dirty_region(MemRegion mr) {
701 verify_region(mr, dirty_card, true /* val_equals */);
702 }
703 #endif
705 void CardTableModRefBS::print_on(outputStream* st) const {
706 st->print_cr("Card table byte_map: [" INTPTR_FORMAT "," INTPTR_FORMAT "] byte_map_base: " INTPTR_FORMAT,
707 p2i(_byte_map), p2i(_byte_map + _byte_map_size), p2i(byte_map_base));
708 }
710 bool CardTableModRefBSForCTRS::card_will_be_scanned(jbyte cv) {
711 return
712 CardTableModRefBS::card_will_be_scanned(cv) ||
713 _rs->is_prev_nonclean_card_val(cv);
714 };
716 bool CardTableModRefBSForCTRS::card_may_have_been_dirty(jbyte cv) {
717 return
718 cv != clean_card &&
719 (CardTableModRefBS::card_may_have_been_dirty(cv) ||
720 CardTableRS::youngergen_may_have_been_dirty(cv));
721 };