Fri, 19 Apr 2013 11:08:52 -0700
8010992: Remove calls to global ::operator new[] and new
Summary: disable use of global operator new and new[] which could cause unexpected exception and escape from NMT tracking.
Reviewed-by: coleenp, dholmes, zgu
Contributed-by: yumin.qi@oracle.com
1 /*
2 * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "memory/allocation.inline.hpp"
27 #include "memory/cardTableModRefBS.hpp"
28 #include "memory/cardTableRS.hpp"
29 #include "memory/sharedHeap.hpp"
30 #include "memory/space.hpp"
31 #include "memory/space.inline.hpp"
32 #include "memory/universe.hpp"
33 #include "runtime/java.hpp"
34 #include "runtime/mutexLocker.hpp"
35 #include "runtime/virtualspace.hpp"
36 #include "services/memTracker.hpp"
37 #include "utilities/macros.hpp"
38 #ifdef COMPILER1
39 #include "c1/c1_LIR.hpp"
40 #include "c1/c1_LIRGenerator.hpp"
41 #endif
43 // This kind of "BarrierSet" allows a "CollectedHeap" to detect and
44 // enumerate ref fields that have been modified (since the last
45 // enumeration.)
47 size_t CardTableModRefBS::cards_required(size_t covered_words)
48 {
49 // Add one for a guard card, used to detect errors.
50 const size_t words = align_size_up(covered_words, card_size_in_words);
51 return words / card_size_in_words + 1;
52 }
54 size_t CardTableModRefBS::compute_byte_map_size()
55 {
56 assert(_guard_index == cards_required(_whole_heap.word_size()) - 1,
57 "unitialized, check declaration order");
58 assert(_page_size != 0, "unitialized, check declaration order");
59 const size_t granularity = os::vm_allocation_granularity();
60 return align_size_up(_guard_index + 1, MAX2(_page_size, granularity));
61 }
63 CardTableModRefBS::CardTableModRefBS(MemRegion whole_heap,
64 int max_covered_regions):
65 ModRefBarrierSet(max_covered_regions),
66 _whole_heap(whole_heap),
67 _guard_index(cards_required(whole_heap.word_size()) - 1),
68 _last_valid_index(_guard_index - 1),
69 _page_size(os::vm_page_size()),
70 _byte_map_size(compute_byte_map_size())
71 {
72 _kind = BarrierSet::CardTableModRef;
74 HeapWord* low_bound = _whole_heap.start();
75 HeapWord* high_bound = _whole_heap.end();
76 assert((uintptr_t(low_bound) & (card_size - 1)) == 0, "heap must start at card boundary");
77 assert((uintptr_t(high_bound) & (card_size - 1)) == 0, "heap must end at card boundary");
79 assert(card_size <= 512, "card_size must be less than 512"); // why?
81 NEW_C_HEAP_OBJECT_ARRAY(_covered, MemRegion, max_covered_regions, mtGC, 0, AllocFailStrategy::RETURN_NULL);
82 NEW_C_HEAP_OBJECT_ARRAY(_committed, MemRegion, max_covered_regions, mtGC, 0, AllocFailStrategy::RETURN_NULL);
83 if (_covered == NULL || _committed == NULL) {
84 vm_exit_during_initialization("couldn't alloc card table covered region set.");
85 }
87 _cur_covered_regions = 0;
88 const size_t rs_align = _page_size == (size_t) os::vm_page_size() ? 0 :
89 MAX2(_page_size, (size_t) os::vm_allocation_granularity());
90 ReservedSpace heap_rs(_byte_map_size, rs_align, false);
92 MemTracker::record_virtual_memory_type((address)heap_rs.base(), mtGC);
94 os::trace_page_sizes("card table", _guard_index + 1, _guard_index + 1,
95 _page_size, heap_rs.base(), heap_rs.size());
96 if (!heap_rs.is_reserved()) {
97 vm_exit_during_initialization("Could not reserve enough space for the "
98 "card marking array");
99 }
101 // The assember store_check code will do an unsigned shift of the oop,
102 // then add it to byte_map_base, i.e.
103 //
104 // _byte_map = byte_map_base + (uintptr_t(low_bound) >> card_shift)
105 _byte_map = (jbyte*) heap_rs.base();
106 byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift);
107 assert(byte_for(low_bound) == &_byte_map[0], "Checking start of map");
108 assert(byte_for(high_bound-1) <= &_byte_map[_last_valid_index], "Checking end of map");
110 jbyte* guard_card = &_byte_map[_guard_index];
111 uintptr_t guard_page = align_size_down((uintptr_t)guard_card, _page_size);
112 _guard_region = MemRegion((HeapWord*)guard_page, _page_size);
113 if (!os::commit_memory((char*)guard_page, _page_size, _page_size)) {
114 // Do better than this for Merlin
115 vm_exit_out_of_memory(_page_size, "card table last card");
116 }
118 *guard_card = last_card;
120 _lowest_non_clean =
121 NEW_C_HEAP_ARRAY(CardArr, max_covered_regions, mtGC);
122 _lowest_non_clean_chunk_size =
123 NEW_C_HEAP_ARRAY(size_t, max_covered_regions, mtGC);
124 _lowest_non_clean_base_chunk_index =
125 NEW_C_HEAP_ARRAY(uintptr_t, max_covered_regions, mtGC);
126 _last_LNC_resizing_collection =
127 NEW_C_HEAP_ARRAY(int, max_covered_regions, mtGC);
128 if (_lowest_non_clean == NULL
129 || _lowest_non_clean_chunk_size == NULL
130 || _lowest_non_clean_base_chunk_index == NULL
131 || _last_LNC_resizing_collection == NULL)
132 vm_exit_during_initialization("couldn't allocate an LNC array.");
133 for (int i = 0; i < max_covered_regions; i++) {
134 _lowest_non_clean[i] = NULL;
135 _lowest_non_clean_chunk_size[i] = 0;
136 _last_LNC_resizing_collection[i] = -1;
137 }
139 if (TraceCardTableModRefBS) {
140 gclog_or_tty->print_cr("CardTableModRefBS::CardTableModRefBS: ");
141 gclog_or_tty->print_cr(" "
142 " &_byte_map[0]: " INTPTR_FORMAT
143 " &_byte_map[_last_valid_index]: " INTPTR_FORMAT,
144 &_byte_map[0],
145 &_byte_map[_last_valid_index]);
146 gclog_or_tty->print_cr(" "
147 " byte_map_base: " INTPTR_FORMAT,
148 byte_map_base);
149 }
150 }
152 CardTableModRefBS::~CardTableModRefBS() {
153 if (_covered) {
154 FREE_C_HEAP_OBJECT_ARRAY(MemRegion, _covered, _max_covered_regions, mtGC);
155 _covered = NULL;
156 }
157 if (_committed) {
158 FREE_C_HEAP_OBJECT_ARRAY(MemRegion, _committed, _max_covered_regions, mtGC);
159 _committed = NULL;
160 }
161 if (_lowest_non_clean) {
162 FREE_C_HEAP_ARRAY(CardArr, _lowest_non_clean, mtGC);
163 _lowest_non_clean = NULL;
164 }
165 if (_lowest_non_clean_chunk_size) {
166 FREE_C_HEAP_ARRAY(size_t, _lowest_non_clean_chunk_size, mtGC);
167 _lowest_non_clean_chunk_size = NULL;
168 }
169 if (_lowest_non_clean_base_chunk_index) {
170 FREE_C_HEAP_ARRAY(uintptr_t, _lowest_non_clean_base_chunk_index, mtGC);
171 _lowest_non_clean_base_chunk_index = NULL;
172 }
173 if (_last_LNC_resizing_collection) {
174 FREE_C_HEAP_ARRAY(int, _last_LNC_resizing_collection, mtGC);
175 _last_LNC_resizing_collection = NULL;
176 }
177 }
179 int CardTableModRefBS::find_covering_region_by_base(HeapWord* base) {
180 int i;
181 for (i = 0; i < _cur_covered_regions; i++) {
182 if (_covered[i].start() == base) return i;
183 if (_covered[i].start() > base) break;
184 }
185 // If we didn't find it, create a new one.
186 assert(_cur_covered_regions < _max_covered_regions,
187 "too many covered regions");
188 // Move the ones above up, to maintain sorted order.
189 for (int j = _cur_covered_regions; j > i; j--) {
190 _covered[j] = _covered[j-1];
191 _committed[j] = _committed[j-1];
192 }
193 int res = i;
194 _cur_covered_regions++;
195 _covered[res].set_start(base);
196 _covered[res].set_word_size(0);
197 jbyte* ct_start = byte_for(base);
198 uintptr_t ct_start_aligned = align_size_down((uintptr_t)ct_start, _page_size);
199 _committed[res].set_start((HeapWord*)ct_start_aligned);
200 _committed[res].set_word_size(0);
201 return res;
202 }
204 int CardTableModRefBS::find_covering_region_containing(HeapWord* addr) {
205 for (int i = 0; i < _cur_covered_regions; i++) {
206 if (_covered[i].contains(addr)) {
207 return i;
208 }
209 }
210 assert(0, "address outside of heap?");
211 return -1;
212 }
214 HeapWord* CardTableModRefBS::largest_prev_committed_end(int ind) const {
215 HeapWord* max_end = NULL;
216 for (int j = 0; j < ind; j++) {
217 HeapWord* this_end = _committed[j].end();
218 if (this_end > max_end) max_end = this_end;
219 }
220 return max_end;
221 }
223 MemRegion CardTableModRefBS::committed_unique_to_self(int self,
224 MemRegion mr) const {
225 MemRegion result = mr;
226 for (int r = 0; r < _cur_covered_regions; r += 1) {
227 if (r != self) {
228 result = result.minus(_committed[r]);
229 }
230 }
231 // Never include the guard page.
232 result = result.minus(_guard_region);
233 return result;
234 }
236 void CardTableModRefBS::resize_covered_region(MemRegion new_region) {
237 // We don't change the start of a region, only the end.
238 assert(_whole_heap.contains(new_region),
239 "attempt to cover area not in reserved area");
240 debug_only(verify_guard();)
241 // collided is true if the expansion would push into another committed region
242 debug_only(bool collided = false;)
243 int const ind = find_covering_region_by_base(new_region.start());
244 MemRegion const old_region = _covered[ind];
245 assert(old_region.start() == new_region.start(), "just checking");
246 if (new_region.word_size() != old_region.word_size()) {
247 // Commit new or uncommit old pages, if necessary.
248 MemRegion cur_committed = _committed[ind];
249 // Extend the end of this _commited region
250 // to cover the end of any lower _committed regions.
251 // This forms overlapping regions, but never interior regions.
252 HeapWord* const max_prev_end = largest_prev_committed_end(ind);
253 if (max_prev_end > cur_committed.end()) {
254 cur_committed.set_end(max_prev_end);
255 }
256 // Align the end up to a page size (starts are already aligned).
257 jbyte* const new_end = byte_after(new_region.last());
258 HeapWord* new_end_aligned =
259 (HeapWord*) align_size_up((uintptr_t)new_end, _page_size);
260 assert(new_end_aligned >= (HeapWord*) new_end,
261 "align up, but less");
262 // Check the other regions (excludes "ind") to ensure that
263 // the new_end_aligned does not intrude onto the committed
264 // space of another region.
265 int ri = 0;
266 for (ri = 0; ri < _cur_covered_regions; ri++) {
267 if (ri != ind) {
268 if (_committed[ri].contains(new_end_aligned)) {
269 // The prior check included in the assert
270 // (new_end_aligned >= _committed[ri].start())
271 // is redundant with the "contains" test.
272 // Any region containing the new end
273 // should start at or beyond the region found (ind)
274 // for the new end (committed regions are not expected to
275 // be proper subsets of other committed regions).
276 assert(_committed[ri].start() >= _committed[ind].start(),
277 "New end of committed region is inconsistent");
278 new_end_aligned = _committed[ri].start();
279 // new_end_aligned can be equal to the start of its
280 // committed region (i.e., of "ind") if a second
281 // region following "ind" also start at the same location
282 // as "ind".
283 assert(new_end_aligned >= _committed[ind].start(),
284 "New end of committed region is before start");
285 debug_only(collided = true;)
286 // Should only collide with 1 region
287 break;
288 }
289 }
290 }
291 #ifdef ASSERT
292 for (++ri; ri < _cur_covered_regions; ri++) {
293 assert(!_committed[ri].contains(new_end_aligned),
294 "New end of committed region is in a second committed region");
295 }
296 #endif
297 // The guard page is always committed and should not be committed over.
298 // "guarded" is used for assertion checking below and recalls the fact
299 // that the would-be end of the new committed region would have
300 // penetrated the guard page.
301 HeapWord* new_end_for_commit = new_end_aligned;
303 DEBUG_ONLY(bool guarded = false;)
304 if (new_end_for_commit > _guard_region.start()) {
305 new_end_for_commit = _guard_region.start();
306 DEBUG_ONLY(guarded = true;)
307 }
309 if (new_end_for_commit > cur_committed.end()) {
310 // Must commit new pages.
311 MemRegion const new_committed =
312 MemRegion(cur_committed.end(), new_end_for_commit);
314 assert(!new_committed.is_empty(), "Region should not be empty here");
315 if (!os::commit_memory((char*)new_committed.start(),
316 new_committed.byte_size(), _page_size)) {
317 // Do better than this for Merlin
318 vm_exit_out_of_memory(new_committed.byte_size(),
319 "card table expansion");
320 }
321 // Use new_end_aligned (as opposed to new_end_for_commit) because
322 // the cur_committed region may include the guard region.
323 } else if (new_end_aligned < cur_committed.end()) {
324 // Must uncommit pages.
325 MemRegion const uncommit_region =
326 committed_unique_to_self(ind, MemRegion(new_end_aligned,
327 cur_committed.end()));
328 if (!uncommit_region.is_empty()) {
329 // It is not safe to uncommit cards if the boundary between
330 // the generations is moving. A shrink can uncommit cards
331 // owned by generation A but being used by generation B.
332 if (!UseAdaptiveGCBoundary) {
333 if (!os::uncommit_memory((char*)uncommit_region.start(),
334 uncommit_region.byte_size())) {
335 assert(false, "Card table contraction failed");
336 // The call failed so don't change the end of the
337 // committed region. This is better than taking the
338 // VM down.
339 new_end_aligned = _committed[ind].end();
340 }
341 } else {
342 new_end_aligned = _committed[ind].end();
343 }
344 }
345 }
346 // In any case, we can reset the end of the current committed entry.
347 _committed[ind].set_end(new_end_aligned);
349 #ifdef ASSERT
350 // Check that the last card in the new region is committed according
351 // to the tables.
352 bool covered = false;
353 for (int cr = 0; cr < _cur_covered_regions; cr++) {
354 if (_committed[cr].contains(new_end - 1)) {
355 covered = true;
356 break;
357 }
358 }
359 assert(covered, "Card for end of new region not committed");
360 #endif
362 // The default of 0 is not necessarily clean cards.
363 jbyte* entry;
364 if (old_region.last() < _whole_heap.start()) {
365 entry = byte_for(_whole_heap.start());
366 } else {
367 entry = byte_after(old_region.last());
368 }
369 assert(index_for(new_region.last()) < _guard_index,
370 "The guard card will be overwritten");
371 // This line commented out cleans the newly expanded region and
372 // not the aligned up expanded region.
373 // jbyte* const end = byte_after(new_region.last());
374 jbyte* const end = (jbyte*) new_end_for_commit;
375 assert((end >= byte_after(new_region.last())) || collided || guarded,
376 "Expect to be beyond new region unless impacting another region");
377 // do nothing if we resized downward.
378 #ifdef ASSERT
379 for (int ri = 0; ri < _cur_covered_regions; ri++) {
380 if (ri != ind) {
381 // The end of the new committed region should not
382 // be in any existing region unless it matches
383 // the start of the next region.
384 assert(!_committed[ri].contains(end) ||
385 (_committed[ri].start() == (HeapWord*) end),
386 "Overlapping committed regions");
387 }
388 }
389 #endif
390 if (entry < end) {
391 memset(entry, clean_card, pointer_delta(end, entry, sizeof(jbyte)));
392 }
393 }
394 // In any case, the covered size changes.
395 _covered[ind].set_word_size(new_region.word_size());
396 if (TraceCardTableModRefBS) {
397 gclog_or_tty->print_cr("CardTableModRefBS::resize_covered_region: ");
398 gclog_or_tty->print_cr(" "
399 " _covered[%d].start(): " INTPTR_FORMAT
400 " _covered[%d].last(): " INTPTR_FORMAT,
401 ind, _covered[ind].start(),
402 ind, _covered[ind].last());
403 gclog_or_tty->print_cr(" "
404 " _committed[%d].start(): " INTPTR_FORMAT
405 " _committed[%d].last(): " INTPTR_FORMAT,
406 ind, _committed[ind].start(),
407 ind, _committed[ind].last());
408 gclog_or_tty->print_cr(" "
409 " byte_for(start): " INTPTR_FORMAT
410 " byte_for(last): " INTPTR_FORMAT,
411 byte_for(_covered[ind].start()),
412 byte_for(_covered[ind].last()));
413 gclog_or_tty->print_cr(" "
414 " addr_for(start): " INTPTR_FORMAT
415 " addr_for(last): " INTPTR_FORMAT,
416 addr_for((jbyte*) _committed[ind].start()),
417 addr_for((jbyte*) _committed[ind].last()));
418 }
419 // Touch the last card of the covered region to show that it
420 // is committed (or SEGV).
421 debug_only(*byte_for(_covered[ind].last());)
422 debug_only(verify_guard();)
423 }
425 // Note that these versions are precise! The scanning code has to handle the
426 // fact that the write barrier may be either precise or imprecise.
428 void CardTableModRefBS::write_ref_field_work(void* field, oop newVal) {
429 inline_write_ref_field(field, newVal);
430 }
432 /*
433 Claimed and deferred bits are used together in G1 during the evacuation
434 pause. These bits can have the following state transitions:
435 1. The claimed bit can be put over any other card state. Except that
436 the "dirty -> dirty and claimed" transition is checked for in
437 G1 code and is not used.
438 2. Deferred bit can be set only if the previous state of the card
439 was either clean or claimed. mark_card_deferred() is wait-free.
440 We do not care if the operation is be successful because if
441 it does not it will only result in duplicate entry in the update
442 buffer because of the "cache-miss". So it's not worth spinning.
443 */
446 bool CardTableModRefBS::claim_card(size_t card_index) {
447 jbyte val = _byte_map[card_index];
448 assert(val != dirty_card_val(), "Shouldn't claim a dirty card");
449 while (val == clean_card_val() ||
450 (val & (clean_card_mask_val() | claimed_card_val())) != claimed_card_val()) {
451 jbyte new_val = val;
452 if (val == clean_card_val()) {
453 new_val = (jbyte)claimed_card_val();
454 } else {
455 new_val = val | (jbyte)claimed_card_val();
456 }
457 jbyte res = Atomic::cmpxchg(new_val, &_byte_map[card_index], val);
458 if (res == val) {
459 return true;
460 }
461 val = res;
462 }
463 return false;
464 }
466 bool CardTableModRefBS::mark_card_deferred(size_t card_index) {
467 jbyte val = _byte_map[card_index];
468 // It's already processed
469 if ((val & (clean_card_mask_val() | deferred_card_val())) == deferred_card_val()) {
470 return false;
471 }
472 // Cached bit can be installed either on a clean card or on a claimed card.
473 jbyte new_val = val;
474 if (val == clean_card_val()) {
475 new_val = (jbyte)deferred_card_val();
476 } else {
477 if (val & claimed_card_val()) {
478 new_val = val | (jbyte)deferred_card_val();
479 }
480 }
481 if (new_val != val) {
482 Atomic::cmpxchg(new_val, &_byte_map[card_index], val);
483 }
484 return true;
485 }
487 void CardTableModRefBS::non_clean_card_iterate_possibly_parallel(Space* sp,
488 MemRegion mr,
489 OopsInGenClosure* cl,
490 CardTableRS* ct) {
491 if (!mr.is_empty()) {
492 // Caller (process_strong_roots()) claims that all GC threads
493 // execute this call. With UseDynamicNumberOfGCThreads now all
494 // active GC threads execute this call. The number of active GC
495 // threads needs to be passed to par_non_clean_card_iterate_work()
496 // to get proper partitioning and termination.
497 //
498 // This is an example of where n_par_threads() is used instead
499 // of workers()->active_workers(). n_par_threads can be set to 0 to
500 // turn off parallelism. For example when this code is called as
501 // part of verification and SharedHeap::process_strong_roots() is being
502 // used, then n_par_threads() may have been set to 0. active_workers
503 // is not overloaded with the meaning that it is a switch to disable
504 // parallelism and so keeps the meaning of the number of
505 // active gc workers. If parallelism has not been shut off by
506 // setting n_par_threads to 0, then n_par_threads should be
507 // equal to active_workers. When a different mechanism for shutting
508 // off parallelism is used, then active_workers can be used in
509 // place of n_par_threads.
510 // This is an example of a path where n_par_threads is
511 // set to 0 to turn off parallism.
512 // [7] CardTableModRefBS::non_clean_card_iterate()
513 // [8] CardTableRS::younger_refs_in_space_iterate()
514 // [9] Generation::younger_refs_in_space_iterate()
515 // [10] OneContigSpaceCardGeneration::younger_refs_iterate()
516 // [11] CompactingPermGenGen::younger_refs_iterate()
517 // [12] CardTableRS::younger_refs_iterate()
518 // [13] SharedHeap::process_strong_roots()
519 // [14] G1CollectedHeap::verify()
520 // [15] Universe::verify()
521 // [16] G1CollectedHeap::do_collection_pause_at_safepoint()
522 //
523 int n_threads = SharedHeap::heap()->n_par_threads();
524 bool is_par = n_threads > 0;
525 if (is_par) {
526 #if INCLUDE_ALL_GCS
527 assert(SharedHeap::heap()->n_par_threads() ==
528 SharedHeap::heap()->workers()->active_workers(), "Mismatch");
529 non_clean_card_iterate_parallel_work(sp, mr, cl, ct, n_threads);
530 #else // INCLUDE_ALL_GCS
531 fatal("Parallel gc not supported here.");
532 #endif // INCLUDE_ALL_GCS
533 } else {
534 // We do not call the non_clean_card_iterate_serial() version below because
535 // we want to clear the cards (which non_clean_card_iterate_serial() does not
536 // do for us): clear_cl here does the work of finding contiguous dirty ranges
537 // of cards to process and clear.
539 DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, precision(),
540 cl->gen_boundary());
541 ClearNoncleanCardWrapper clear_cl(dcto_cl, ct);
543 clear_cl.do_MemRegion(mr);
544 }
545 }
546 }
548 // The iterator itself is not MT-aware, but
549 // MT-aware callers and closures can use this to
550 // accomplish dirty card iteration in parallel. The
551 // iterator itself does not clear the dirty cards, or
552 // change their values in any manner.
553 void CardTableModRefBS::non_clean_card_iterate_serial(MemRegion mr,
554 MemRegionClosure* cl) {
555 bool is_par = (SharedHeap::heap()->n_par_threads() > 0);
556 assert(!is_par ||
557 (SharedHeap::heap()->n_par_threads() ==
558 SharedHeap::heap()->workers()->active_workers()), "Mismatch");
559 for (int i = 0; i < _cur_covered_regions; i++) {
560 MemRegion mri = mr.intersection(_covered[i]);
561 if (mri.word_size() > 0) {
562 jbyte* cur_entry = byte_for(mri.last());
563 jbyte* limit = byte_for(mri.start());
564 while (cur_entry >= limit) {
565 jbyte* next_entry = cur_entry - 1;
566 if (*cur_entry != clean_card) {
567 size_t non_clean_cards = 1;
568 // Should the next card be included in this range of dirty cards.
569 while (next_entry >= limit && *next_entry != clean_card) {
570 non_clean_cards++;
571 cur_entry = next_entry;
572 next_entry--;
573 }
574 // The memory region may not be on a card boundary. So that
575 // objects beyond the end of the region are not processed, make
576 // cur_cards precise with regard to the end of the memory region.
577 MemRegion cur_cards(addr_for(cur_entry),
578 non_clean_cards * card_size_in_words);
579 MemRegion dirty_region = cur_cards.intersection(mri);
580 cl->do_MemRegion(dirty_region);
581 }
582 cur_entry = next_entry;
583 }
584 }
585 }
586 }
588 void CardTableModRefBS::dirty_MemRegion(MemRegion mr) {
589 assert((HeapWord*)align_size_down((uintptr_t)mr.start(), HeapWordSize) == mr.start(), "Unaligned start");
590 assert((HeapWord*)align_size_up ((uintptr_t)mr.end(), HeapWordSize) == mr.end(), "Unaligned end" );
591 jbyte* cur = byte_for(mr.start());
592 jbyte* last = byte_after(mr.last());
593 while (cur < last) {
594 *cur = dirty_card;
595 cur++;
596 }
597 }
599 void CardTableModRefBS::invalidate(MemRegion mr, bool whole_heap) {
600 assert((HeapWord*)align_size_down((uintptr_t)mr.start(), HeapWordSize) == mr.start(), "Unaligned start");
601 assert((HeapWord*)align_size_up ((uintptr_t)mr.end(), HeapWordSize) == mr.end(), "Unaligned end" );
602 for (int i = 0; i < _cur_covered_regions; i++) {
603 MemRegion mri = mr.intersection(_covered[i]);
604 if (!mri.is_empty()) dirty_MemRegion(mri);
605 }
606 }
608 void CardTableModRefBS::clear_MemRegion(MemRegion mr) {
609 // Be conservative: only clean cards entirely contained within the
610 // region.
611 jbyte* cur;
612 if (mr.start() == _whole_heap.start()) {
613 cur = byte_for(mr.start());
614 } else {
615 assert(mr.start() > _whole_heap.start(), "mr is not covered.");
616 cur = byte_after(mr.start() - 1);
617 }
618 jbyte* last = byte_after(mr.last());
619 memset(cur, clean_card, pointer_delta(last, cur, sizeof(jbyte)));
620 }
622 void CardTableModRefBS::clear(MemRegion mr) {
623 for (int i = 0; i < _cur_covered_regions; i++) {
624 MemRegion mri = mr.intersection(_covered[i]);
625 if (!mri.is_empty()) clear_MemRegion(mri);
626 }
627 }
629 void CardTableModRefBS::dirty(MemRegion mr) {
630 jbyte* first = byte_for(mr.start());
631 jbyte* last = byte_after(mr.last());
632 memset(first, dirty_card, last-first);
633 }
635 // Unlike several other card table methods, dirty_card_iterate()
636 // iterates over dirty cards ranges in increasing address order.
637 void CardTableModRefBS::dirty_card_iterate(MemRegion mr,
638 MemRegionClosure* cl) {
639 for (int i = 0; i < _cur_covered_regions; i++) {
640 MemRegion mri = mr.intersection(_covered[i]);
641 if (!mri.is_empty()) {
642 jbyte *cur_entry, *next_entry, *limit;
643 for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last());
644 cur_entry <= limit;
645 cur_entry = next_entry) {
646 next_entry = cur_entry + 1;
647 if (*cur_entry == dirty_card) {
648 size_t dirty_cards;
649 // Accumulate maximal dirty card range, starting at cur_entry
650 for (dirty_cards = 1;
651 next_entry <= limit && *next_entry == dirty_card;
652 dirty_cards++, next_entry++);
653 MemRegion cur_cards(addr_for(cur_entry),
654 dirty_cards*card_size_in_words);
655 cl->do_MemRegion(cur_cards);
656 }
657 }
658 }
659 }
660 }
662 MemRegion CardTableModRefBS::dirty_card_range_after_reset(MemRegion mr,
663 bool reset,
664 int reset_val) {
665 for (int i = 0; i < _cur_covered_regions; i++) {
666 MemRegion mri = mr.intersection(_covered[i]);
667 if (!mri.is_empty()) {
668 jbyte* cur_entry, *next_entry, *limit;
669 for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last());
670 cur_entry <= limit;
671 cur_entry = next_entry) {
672 next_entry = cur_entry + 1;
673 if (*cur_entry == dirty_card) {
674 size_t dirty_cards;
675 // Accumulate maximal dirty card range, starting at cur_entry
676 for (dirty_cards = 1;
677 next_entry <= limit && *next_entry == dirty_card;
678 dirty_cards++, next_entry++);
679 MemRegion cur_cards(addr_for(cur_entry),
680 dirty_cards*card_size_in_words);
681 if (reset) {
682 for (size_t i = 0; i < dirty_cards; i++) {
683 cur_entry[i] = reset_val;
684 }
685 }
686 return cur_cards;
687 }
688 }
689 }
690 }
691 return MemRegion(mr.end(), mr.end());
692 }
694 uintx CardTableModRefBS::ct_max_alignment_constraint() {
695 return card_size * os::vm_page_size();
696 }
698 void CardTableModRefBS::verify_guard() {
699 // For product build verification
700 guarantee(_byte_map[_guard_index] == last_card,
701 "card table guard has been modified");
702 }
704 void CardTableModRefBS::verify() {
705 verify_guard();
706 }
708 #ifndef PRODUCT
709 void CardTableModRefBS::verify_region(MemRegion mr,
710 jbyte val, bool val_equals) {
711 jbyte* start = byte_for(mr.start());
712 jbyte* end = byte_for(mr.last());
713 bool failures = false;
714 for (jbyte* curr = start; curr <= end; ++curr) {
715 jbyte curr_val = *curr;
716 bool failed = (val_equals) ? (curr_val != val) : (curr_val == val);
717 if (failed) {
718 if (!failures) {
719 tty->cr();
720 tty->print_cr("== CT verification failed: ["PTR_FORMAT","PTR_FORMAT"]", start, end);
721 tty->print_cr("== %sexpecting value: %d",
722 (val_equals) ? "" : "not ", val);
723 failures = true;
724 }
725 tty->print_cr("== card "PTR_FORMAT" ["PTR_FORMAT","PTR_FORMAT"], "
726 "val: %d", curr, addr_for(curr),
727 (HeapWord*) (((size_t) addr_for(curr)) + card_size),
728 (int) curr_val);
729 }
730 }
731 guarantee(!failures, "there should not have been any failures");
732 }
734 void CardTableModRefBS::verify_not_dirty_region(MemRegion mr) {
735 verify_region(mr, dirty_card, false /* val_equals */);
736 }
738 void CardTableModRefBS::verify_dirty_region(MemRegion mr) {
739 verify_region(mr, dirty_card, true /* val_equals */);
740 }
741 #endif
743 void CardTableModRefBS::print_on(outputStream* st) const {
744 st->print_cr("Card table byte_map: [" INTPTR_FORMAT "," INTPTR_FORMAT "] byte_map_base: " INTPTR_FORMAT,
745 _byte_map, _byte_map + _byte_map_size, byte_map_base);
746 }
748 bool CardTableModRefBSForCTRS::card_will_be_scanned(jbyte cv) {
749 return
750 CardTableModRefBS::card_will_be_scanned(cv) ||
751 _rs->is_prev_nonclean_card_val(cv);
752 };
754 bool CardTableModRefBSForCTRS::card_may_have_been_dirty(jbyte cv) {
755 return
756 cv != clean_card &&
757 (CardTableModRefBS::card_may_have_been_dirty(cv) ||
758 CardTableRS::youngergen_may_have_been_dirty(cv));
759 };