Wed, 26 Jun 2013 16:58:37 +0200
8013590: NPG: Add a memory pool MXBean for Metaspace
Reviewed-by: jmasa, mgerdin
1 /*
2 * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "memory/allocation.inline.hpp"
27 #include "memory/cardTableModRefBS.hpp"
28 #include "memory/cardTableRS.hpp"
29 #include "memory/sharedHeap.hpp"
30 #include "memory/space.hpp"
31 #include "memory/space.inline.hpp"
32 #include "memory/universe.hpp"
33 #include "runtime/java.hpp"
34 #include "runtime/mutexLocker.hpp"
35 #include "runtime/virtualspace.hpp"
36 #include "services/memTracker.hpp"
37 #include "utilities/macros.hpp"
38 #ifdef COMPILER1
39 #include "c1/c1_LIR.hpp"
40 #include "c1/c1_LIRGenerator.hpp"
41 #endif
43 // This kind of "BarrierSet" allows a "CollectedHeap" to detect and
44 // enumerate ref fields that have been modified (since the last
45 // enumeration.)
47 size_t CardTableModRefBS::cards_required(size_t covered_words)
48 {
49 // Add one for a guard card, used to detect errors.
50 const size_t words = align_size_up(covered_words, card_size_in_words);
51 return words / card_size_in_words + 1;
52 }
54 size_t CardTableModRefBS::compute_byte_map_size()
55 {
56 assert(_guard_index == cards_required(_whole_heap.word_size()) - 1,
57 "unitialized, check declaration order");
58 assert(_page_size != 0, "unitialized, check declaration order");
59 const size_t granularity = os::vm_allocation_granularity();
60 return align_size_up(_guard_index + 1, MAX2(_page_size, granularity));
61 }
63 CardTableModRefBS::CardTableModRefBS(MemRegion whole_heap,
64 int max_covered_regions):
65 ModRefBarrierSet(max_covered_regions),
66 _whole_heap(whole_heap),
67 _guard_index(cards_required(whole_heap.word_size()) - 1),
68 _last_valid_index(_guard_index - 1),
69 _page_size(os::vm_page_size()),
70 _byte_map_size(compute_byte_map_size())
71 {
72 _kind = BarrierSet::CardTableModRef;
74 HeapWord* low_bound = _whole_heap.start();
75 HeapWord* high_bound = _whole_heap.end();
76 assert((uintptr_t(low_bound) & (card_size - 1)) == 0, "heap must start at card boundary");
77 assert((uintptr_t(high_bound) & (card_size - 1)) == 0, "heap must end at card boundary");
79 assert(card_size <= 512, "card_size must be less than 512"); // why?
81 _covered = new MemRegion[max_covered_regions];
82 _committed = new MemRegion[max_covered_regions];
83 if (_covered == NULL || _committed == NULL) {
84 vm_exit_during_initialization("couldn't alloc card table covered region set.");
85 }
87 _cur_covered_regions = 0;
88 const size_t rs_align = _page_size == (size_t) os::vm_page_size() ? 0 :
89 MAX2(_page_size, (size_t) os::vm_allocation_granularity());
90 ReservedSpace heap_rs(_byte_map_size, rs_align, false);
92 MemTracker::record_virtual_memory_type((address)heap_rs.base(), mtGC);
94 os::trace_page_sizes("card table", _guard_index + 1, _guard_index + 1,
95 _page_size, heap_rs.base(), heap_rs.size());
96 if (!heap_rs.is_reserved()) {
97 vm_exit_during_initialization("Could not reserve enough space for the "
98 "card marking array");
99 }
101 // The assember store_check code will do an unsigned shift of the oop,
102 // then add it to byte_map_base, i.e.
103 //
104 // _byte_map = byte_map_base + (uintptr_t(low_bound) >> card_shift)
105 _byte_map = (jbyte*) heap_rs.base();
106 byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift);
107 assert(byte_for(low_bound) == &_byte_map[0], "Checking start of map");
108 assert(byte_for(high_bound-1) <= &_byte_map[_last_valid_index], "Checking end of map");
110 jbyte* guard_card = &_byte_map[_guard_index];
111 uintptr_t guard_page = align_size_down((uintptr_t)guard_card, _page_size);
112 _guard_region = MemRegion((HeapWord*)guard_page, _page_size);
113 os::commit_memory_or_exit((char*)guard_page, _page_size, _page_size,
114 !ExecMem, "card table last card");
115 *guard_card = last_card;
117 _lowest_non_clean =
118 NEW_C_HEAP_ARRAY(CardArr, max_covered_regions, mtGC);
119 _lowest_non_clean_chunk_size =
120 NEW_C_HEAP_ARRAY(size_t, max_covered_regions, mtGC);
121 _lowest_non_clean_base_chunk_index =
122 NEW_C_HEAP_ARRAY(uintptr_t, max_covered_regions, mtGC);
123 _last_LNC_resizing_collection =
124 NEW_C_HEAP_ARRAY(int, max_covered_regions, mtGC);
125 if (_lowest_non_clean == NULL
126 || _lowest_non_clean_chunk_size == NULL
127 || _lowest_non_clean_base_chunk_index == NULL
128 || _last_LNC_resizing_collection == NULL)
129 vm_exit_during_initialization("couldn't allocate an LNC array.");
130 for (int i = 0; i < max_covered_regions; i++) {
131 _lowest_non_clean[i] = NULL;
132 _lowest_non_clean_chunk_size[i] = 0;
133 _last_LNC_resizing_collection[i] = -1;
134 }
136 if (TraceCardTableModRefBS) {
137 gclog_or_tty->print_cr("CardTableModRefBS::CardTableModRefBS: ");
138 gclog_or_tty->print_cr(" "
139 " &_byte_map[0]: " INTPTR_FORMAT
140 " &_byte_map[_last_valid_index]: " INTPTR_FORMAT,
141 &_byte_map[0],
142 &_byte_map[_last_valid_index]);
143 gclog_or_tty->print_cr(" "
144 " byte_map_base: " INTPTR_FORMAT,
145 byte_map_base);
146 }
147 }
149 CardTableModRefBS::~CardTableModRefBS() {
150 if (_covered) {
151 delete[] _covered;
152 _covered = NULL;
153 }
154 if (_committed) {
155 delete[] _committed;
156 _committed = NULL;
157 }
158 if (_lowest_non_clean) {
159 FREE_C_HEAP_ARRAY(CardArr, _lowest_non_clean, mtGC);
160 _lowest_non_clean = NULL;
161 }
162 if (_lowest_non_clean_chunk_size) {
163 FREE_C_HEAP_ARRAY(size_t, _lowest_non_clean_chunk_size, mtGC);
164 _lowest_non_clean_chunk_size = NULL;
165 }
166 if (_lowest_non_clean_base_chunk_index) {
167 FREE_C_HEAP_ARRAY(uintptr_t, _lowest_non_clean_base_chunk_index, mtGC);
168 _lowest_non_clean_base_chunk_index = NULL;
169 }
170 if (_last_LNC_resizing_collection) {
171 FREE_C_HEAP_ARRAY(int, _last_LNC_resizing_collection, mtGC);
172 _last_LNC_resizing_collection = NULL;
173 }
174 }
176 int CardTableModRefBS::find_covering_region_by_base(HeapWord* base) {
177 int i;
178 for (i = 0; i < _cur_covered_regions; i++) {
179 if (_covered[i].start() == base) return i;
180 if (_covered[i].start() > base) break;
181 }
182 // If we didn't find it, create a new one.
183 assert(_cur_covered_regions < _max_covered_regions,
184 "too many covered regions");
185 // Move the ones above up, to maintain sorted order.
186 for (int j = _cur_covered_regions; j > i; j--) {
187 _covered[j] = _covered[j-1];
188 _committed[j] = _committed[j-1];
189 }
190 int res = i;
191 _cur_covered_regions++;
192 _covered[res].set_start(base);
193 _covered[res].set_word_size(0);
194 jbyte* ct_start = byte_for(base);
195 uintptr_t ct_start_aligned = align_size_down((uintptr_t)ct_start, _page_size);
196 _committed[res].set_start((HeapWord*)ct_start_aligned);
197 _committed[res].set_word_size(0);
198 return res;
199 }
201 int CardTableModRefBS::find_covering_region_containing(HeapWord* addr) {
202 for (int i = 0; i < _cur_covered_regions; i++) {
203 if (_covered[i].contains(addr)) {
204 return i;
205 }
206 }
207 assert(0, "address outside of heap?");
208 return -1;
209 }
211 HeapWord* CardTableModRefBS::largest_prev_committed_end(int ind) const {
212 HeapWord* max_end = NULL;
213 for (int j = 0; j < ind; j++) {
214 HeapWord* this_end = _committed[j].end();
215 if (this_end > max_end) max_end = this_end;
216 }
217 return max_end;
218 }
220 MemRegion CardTableModRefBS::committed_unique_to_self(int self,
221 MemRegion mr) const {
222 MemRegion result = mr;
223 for (int r = 0; r < _cur_covered_regions; r += 1) {
224 if (r != self) {
225 result = result.minus(_committed[r]);
226 }
227 }
228 // Never include the guard page.
229 result = result.minus(_guard_region);
230 return result;
231 }
233 void CardTableModRefBS::resize_covered_region(MemRegion new_region) {
234 // We don't change the start of a region, only the end.
235 assert(_whole_heap.contains(new_region),
236 "attempt to cover area not in reserved area");
237 debug_only(verify_guard();)
238 // collided is true if the expansion would push into another committed region
239 debug_only(bool collided = false;)
240 int const ind = find_covering_region_by_base(new_region.start());
241 MemRegion const old_region = _covered[ind];
242 assert(old_region.start() == new_region.start(), "just checking");
243 if (new_region.word_size() != old_region.word_size()) {
244 // Commit new or uncommit old pages, if necessary.
245 MemRegion cur_committed = _committed[ind];
246 // Extend the end of this _commited region
247 // to cover the end of any lower _committed regions.
248 // This forms overlapping regions, but never interior regions.
249 HeapWord* const max_prev_end = largest_prev_committed_end(ind);
250 if (max_prev_end > cur_committed.end()) {
251 cur_committed.set_end(max_prev_end);
252 }
253 // Align the end up to a page size (starts are already aligned).
254 jbyte* const new_end = byte_after(new_region.last());
255 HeapWord* new_end_aligned =
256 (HeapWord*) align_size_up((uintptr_t)new_end, _page_size);
257 assert(new_end_aligned >= (HeapWord*) new_end,
258 "align up, but less");
259 // Check the other regions (excludes "ind") to ensure that
260 // the new_end_aligned does not intrude onto the committed
261 // space of another region.
262 int ri = 0;
263 for (ri = 0; ri < _cur_covered_regions; ri++) {
264 if (ri != ind) {
265 if (_committed[ri].contains(new_end_aligned)) {
266 // The prior check included in the assert
267 // (new_end_aligned >= _committed[ri].start())
268 // is redundant with the "contains" test.
269 // Any region containing the new end
270 // should start at or beyond the region found (ind)
271 // for the new end (committed regions are not expected to
272 // be proper subsets of other committed regions).
273 assert(_committed[ri].start() >= _committed[ind].start(),
274 "New end of committed region is inconsistent");
275 new_end_aligned = _committed[ri].start();
276 // new_end_aligned can be equal to the start of its
277 // committed region (i.e., of "ind") if a second
278 // region following "ind" also start at the same location
279 // as "ind".
280 assert(new_end_aligned >= _committed[ind].start(),
281 "New end of committed region is before start");
282 debug_only(collided = true;)
283 // Should only collide with 1 region
284 break;
285 }
286 }
287 }
288 #ifdef ASSERT
289 for (++ri; ri < _cur_covered_regions; ri++) {
290 assert(!_committed[ri].contains(new_end_aligned),
291 "New end of committed region is in a second committed region");
292 }
293 #endif
294 // The guard page is always committed and should not be committed over.
295 // "guarded" is used for assertion checking below and recalls the fact
296 // that the would-be end of the new committed region would have
297 // penetrated the guard page.
298 HeapWord* new_end_for_commit = new_end_aligned;
300 DEBUG_ONLY(bool guarded = false;)
301 if (new_end_for_commit > _guard_region.start()) {
302 new_end_for_commit = _guard_region.start();
303 DEBUG_ONLY(guarded = true;)
304 }
306 if (new_end_for_commit > cur_committed.end()) {
307 // Must commit new pages.
308 MemRegion const new_committed =
309 MemRegion(cur_committed.end(), new_end_for_commit);
311 assert(!new_committed.is_empty(), "Region should not be empty here");
312 os::commit_memory_or_exit((char*)new_committed.start(),
313 new_committed.byte_size(), _page_size,
314 !ExecMem, "card table expansion");
315 // Use new_end_aligned (as opposed to new_end_for_commit) because
316 // the cur_committed region may include the guard region.
317 } else if (new_end_aligned < cur_committed.end()) {
318 // Must uncommit pages.
319 MemRegion const uncommit_region =
320 committed_unique_to_self(ind, MemRegion(new_end_aligned,
321 cur_committed.end()));
322 if (!uncommit_region.is_empty()) {
323 // It is not safe to uncommit cards if the boundary between
324 // the generations is moving. A shrink can uncommit cards
325 // owned by generation A but being used by generation B.
326 if (!UseAdaptiveGCBoundary) {
327 if (!os::uncommit_memory((char*)uncommit_region.start(),
328 uncommit_region.byte_size())) {
329 assert(false, "Card table contraction failed");
330 // The call failed so don't change the end of the
331 // committed region. This is better than taking the
332 // VM down.
333 new_end_aligned = _committed[ind].end();
334 }
335 } else {
336 new_end_aligned = _committed[ind].end();
337 }
338 }
339 }
340 // In any case, we can reset the end of the current committed entry.
341 _committed[ind].set_end(new_end_aligned);
343 #ifdef ASSERT
344 // Check that the last card in the new region is committed according
345 // to the tables.
346 bool covered = false;
347 for (int cr = 0; cr < _cur_covered_regions; cr++) {
348 if (_committed[cr].contains(new_end - 1)) {
349 covered = true;
350 break;
351 }
352 }
353 assert(covered, "Card for end of new region not committed");
354 #endif
356 // The default of 0 is not necessarily clean cards.
357 jbyte* entry;
358 if (old_region.last() < _whole_heap.start()) {
359 entry = byte_for(_whole_heap.start());
360 } else {
361 entry = byte_after(old_region.last());
362 }
363 assert(index_for(new_region.last()) < _guard_index,
364 "The guard card will be overwritten");
365 // This line commented out cleans the newly expanded region and
366 // not the aligned up expanded region.
367 // jbyte* const end = byte_after(new_region.last());
368 jbyte* const end = (jbyte*) new_end_for_commit;
369 assert((end >= byte_after(new_region.last())) || collided || guarded,
370 "Expect to be beyond new region unless impacting another region");
371 // do nothing if we resized downward.
372 #ifdef ASSERT
373 for (int ri = 0; ri < _cur_covered_regions; ri++) {
374 if (ri != ind) {
375 // The end of the new committed region should not
376 // be in any existing region unless it matches
377 // the start of the next region.
378 assert(!_committed[ri].contains(end) ||
379 (_committed[ri].start() == (HeapWord*) end),
380 "Overlapping committed regions");
381 }
382 }
383 #endif
384 if (entry < end) {
385 memset(entry, clean_card, pointer_delta(end, entry, sizeof(jbyte)));
386 }
387 }
388 // In any case, the covered size changes.
389 _covered[ind].set_word_size(new_region.word_size());
390 if (TraceCardTableModRefBS) {
391 gclog_or_tty->print_cr("CardTableModRefBS::resize_covered_region: ");
392 gclog_or_tty->print_cr(" "
393 " _covered[%d].start(): " INTPTR_FORMAT
394 " _covered[%d].last(): " INTPTR_FORMAT,
395 ind, _covered[ind].start(),
396 ind, _covered[ind].last());
397 gclog_or_tty->print_cr(" "
398 " _committed[%d].start(): " INTPTR_FORMAT
399 " _committed[%d].last(): " INTPTR_FORMAT,
400 ind, _committed[ind].start(),
401 ind, _committed[ind].last());
402 gclog_or_tty->print_cr(" "
403 " byte_for(start): " INTPTR_FORMAT
404 " byte_for(last): " INTPTR_FORMAT,
405 byte_for(_covered[ind].start()),
406 byte_for(_covered[ind].last()));
407 gclog_or_tty->print_cr(" "
408 " addr_for(start): " INTPTR_FORMAT
409 " addr_for(last): " INTPTR_FORMAT,
410 addr_for((jbyte*) _committed[ind].start()),
411 addr_for((jbyte*) _committed[ind].last()));
412 }
413 // Touch the last card of the covered region to show that it
414 // is committed (or SEGV).
415 debug_only((void) (*byte_for(_covered[ind].last()));)
416 debug_only(verify_guard();)
417 }
419 // Note that these versions are precise! The scanning code has to handle the
420 // fact that the write barrier may be either precise or imprecise.
422 void CardTableModRefBS::write_ref_field_work(void* field, oop newVal) {
423 inline_write_ref_field(field, newVal);
424 }
426 /*
427 Claimed and deferred bits are used together in G1 during the evacuation
428 pause. These bits can have the following state transitions:
429 1. The claimed bit can be put over any other card state. Except that
430 the "dirty -> dirty and claimed" transition is checked for in
431 G1 code and is not used.
432 2. Deferred bit can be set only if the previous state of the card
433 was either clean or claimed. mark_card_deferred() is wait-free.
434 We do not care if the operation is be successful because if
435 it does not it will only result in duplicate entry in the update
436 buffer because of the "cache-miss". So it's not worth spinning.
437 */
440 bool CardTableModRefBS::claim_card(size_t card_index) {
441 jbyte val = _byte_map[card_index];
442 assert(val != dirty_card_val(), "Shouldn't claim a dirty card");
443 while (val == clean_card_val() ||
444 (val & (clean_card_mask_val() | claimed_card_val())) != claimed_card_val()) {
445 jbyte new_val = val;
446 if (val == clean_card_val()) {
447 new_val = (jbyte)claimed_card_val();
448 } else {
449 new_val = val | (jbyte)claimed_card_val();
450 }
451 jbyte res = Atomic::cmpxchg(new_val, &_byte_map[card_index], val);
452 if (res == val) {
453 return true;
454 }
455 val = res;
456 }
457 return false;
458 }
460 bool CardTableModRefBS::mark_card_deferred(size_t card_index) {
461 jbyte val = _byte_map[card_index];
462 // It's already processed
463 if ((val & (clean_card_mask_val() | deferred_card_val())) == deferred_card_val()) {
464 return false;
465 }
466 // Cached bit can be installed either on a clean card or on a claimed card.
467 jbyte new_val = val;
468 if (val == clean_card_val()) {
469 new_val = (jbyte)deferred_card_val();
470 } else {
471 if (val & claimed_card_val()) {
472 new_val = val | (jbyte)deferred_card_val();
473 }
474 }
475 if (new_val != val) {
476 Atomic::cmpxchg(new_val, &_byte_map[card_index], val);
477 }
478 return true;
479 }
481 void CardTableModRefBS::non_clean_card_iterate_possibly_parallel(Space* sp,
482 MemRegion mr,
483 OopsInGenClosure* cl,
484 CardTableRS* ct) {
485 if (!mr.is_empty()) {
486 // Caller (process_strong_roots()) claims that all GC threads
487 // execute this call. With UseDynamicNumberOfGCThreads now all
488 // active GC threads execute this call. The number of active GC
489 // threads needs to be passed to par_non_clean_card_iterate_work()
490 // to get proper partitioning and termination.
491 //
492 // This is an example of where n_par_threads() is used instead
493 // of workers()->active_workers(). n_par_threads can be set to 0 to
494 // turn off parallelism. For example when this code is called as
495 // part of verification and SharedHeap::process_strong_roots() is being
496 // used, then n_par_threads() may have been set to 0. active_workers
497 // is not overloaded with the meaning that it is a switch to disable
498 // parallelism and so keeps the meaning of the number of
499 // active gc workers. If parallelism has not been shut off by
500 // setting n_par_threads to 0, then n_par_threads should be
501 // equal to active_workers. When a different mechanism for shutting
502 // off parallelism is used, then active_workers can be used in
503 // place of n_par_threads.
504 // This is an example of a path where n_par_threads is
505 // set to 0 to turn off parallism.
506 // [7] CardTableModRefBS::non_clean_card_iterate()
507 // [8] CardTableRS::younger_refs_in_space_iterate()
508 // [9] Generation::younger_refs_in_space_iterate()
509 // [10] OneContigSpaceCardGeneration::younger_refs_iterate()
510 // [11] CompactingPermGenGen::younger_refs_iterate()
511 // [12] CardTableRS::younger_refs_iterate()
512 // [13] SharedHeap::process_strong_roots()
513 // [14] G1CollectedHeap::verify()
514 // [15] Universe::verify()
515 // [16] G1CollectedHeap::do_collection_pause_at_safepoint()
516 //
517 int n_threads = SharedHeap::heap()->n_par_threads();
518 bool is_par = n_threads > 0;
519 if (is_par) {
520 #if INCLUDE_ALL_GCS
521 assert(SharedHeap::heap()->n_par_threads() ==
522 SharedHeap::heap()->workers()->active_workers(), "Mismatch");
523 non_clean_card_iterate_parallel_work(sp, mr, cl, ct, n_threads);
524 #else // INCLUDE_ALL_GCS
525 fatal("Parallel gc not supported here.");
526 #endif // INCLUDE_ALL_GCS
527 } else {
528 // We do not call the non_clean_card_iterate_serial() version below because
529 // we want to clear the cards (which non_clean_card_iterate_serial() does not
530 // do for us): clear_cl here does the work of finding contiguous dirty ranges
531 // of cards to process and clear.
533 DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, precision(),
534 cl->gen_boundary());
535 ClearNoncleanCardWrapper clear_cl(dcto_cl, ct);
537 clear_cl.do_MemRegion(mr);
538 }
539 }
540 }
542 // The iterator itself is not MT-aware, but
543 // MT-aware callers and closures can use this to
544 // accomplish dirty card iteration in parallel. The
545 // iterator itself does not clear the dirty cards, or
546 // change their values in any manner.
547 void CardTableModRefBS::non_clean_card_iterate_serial(MemRegion mr,
548 MemRegionClosure* cl) {
549 bool is_par = (SharedHeap::heap()->n_par_threads() > 0);
550 assert(!is_par ||
551 (SharedHeap::heap()->n_par_threads() ==
552 SharedHeap::heap()->workers()->active_workers()), "Mismatch");
553 for (int i = 0; i < _cur_covered_regions; i++) {
554 MemRegion mri = mr.intersection(_covered[i]);
555 if (mri.word_size() > 0) {
556 jbyte* cur_entry = byte_for(mri.last());
557 jbyte* limit = byte_for(mri.start());
558 while (cur_entry >= limit) {
559 jbyte* next_entry = cur_entry - 1;
560 if (*cur_entry != clean_card) {
561 size_t non_clean_cards = 1;
562 // Should the next card be included in this range of dirty cards.
563 while (next_entry >= limit && *next_entry != clean_card) {
564 non_clean_cards++;
565 cur_entry = next_entry;
566 next_entry--;
567 }
568 // The memory region may not be on a card boundary. So that
569 // objects beyond the end of the region are not processed, make
570 // cur_cards precise with regard to the end of the memory region.
571 MemRegion cur_cards(addr_for(cur_entry),
572 non_clean_cards * card_size_in_words);
573 MemRegion dirty_region = cur_cards.intersection(mri);
574 cl->do_MemRegion(dirty_region);
575 }
576 cur_entry = next_entry;
577 }
578 }
579 }
580 }
582 void CardTableModRefBS::dirty_MemRegion(MemRegion mr) {
583 assert((HeapWord*)align_size_down((uintptr_t)mr.start(), HeapWordSize) == mr.start(), "Unaligned start");
584 assert((HeapWord*)align_size_up ((uintptr_t)mr.end(), HeapWordSize) == mr.end(), "Unaligned end" );
585 jbyte* cur = byte_for(mr.start());
586 jbyte* last = byte_after(mr.last());
587 while (cur < last) {
588 *cur = dirty_card;
589 cur++;
590 }
591 }
593 void CardTableModRefBS::invalidate(MemRegion mr, bool whole_heap) {
594 assert((HeapWord*)align_size_down((uintptr_t)mr.start(), HeapWordSize) == mr.start(), "Unaligned start");
595 assert((HeapWord*)align_size_up ((uintptr_t)mr.end(), HeapWordSize) == mr.end(), "Unaligned end" );
596 for (int i = 0; i < _cur_covered_regions; i++) {
597 MemRegion mri = mr.intersection(_covered[i]);
598 if (!mri.is_empty()) dirty_MemRegion(mri);
599 }
600 }
602 void CardTableModRefBS::clear_MemRegion(MemRegion mr) {
603 // Be conservative: only clean cards entirely contained within the
604 // region.
605 jbyte* cur;
606 if (mr.start() == _whole_heap.start()) {
607 cur = byte_for(mr.start());
608 } else {
609 assert(mr.start() > _whole_heap.start(), "mr is not covered.");
610 cur = byte_after(mr.start() - 1);
611 }
612 jbyte* last = byte_after(mr.last());
613 memset(cur, clean_card, pointer_delta(last, cur, sizeof(jbyte)));
614 }
616 void CardTableModRefBS::clear(MemRegion mr) {
617 for (int i = 0; i < _cur_covered_regions; i++) {
618 MemRegion mri = mr.intersection(_covered[i]);
619 if (!mri.is_empty()) clear_MemRegion(mri);
620 }
621 }
623 void CardTableModRefBS::dirty(MemRegion mr) {
624 jbyte* first = byte_for(mr.start());
625 jbyte* last = byte_after(mr.last());
626 memset(first, dirty_card, last-first);
627 }
629 // Unlike several other card table methods, dirty_card_iterate()
630 // iterates over dirty cards ranges in increasing address order.
631 void CardTableModRefBS::dirty_card_iterate(MemRegion mr,
632 MemRegionClosure* cl) {
633 for (int i = 0; i < _cur_covered_regions; i++) {
634 MemRegion mri = mr.intersection(_covered[i]);
635 if (!mri.is_empty()) {
636 jbyte *cur_entry, *next_entry, *limit;
637 for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last());
638 cur_entry <= limit;
639 cur_entry = next_entry) {
640 next_entry = cur_entry + 1;
641 if (*cur_entry == dirty_card) {
642 size_t dirty_cards;
643 // Accumulate maximal dirty card range, starting at cur_entry
644 for (dirty_cards = 1;
645 next_entry <= limit && *next_entry == dirty_card;
646 dirty_cards++, next_entry++);
647 MemRegion cur_cards(addr_for(cur_entry),
648 dirty_cards*card_size_in_words);
649 cl->do_MemRegion(cur_cards);
650 }
651 }
652 }
653 }
654 }
656 MemRegion CardTableModRefBS::dirty_card_range_after_reset(MemRegion mr,
657 bool reset,
658 int reset_val) {
659 for (int i = 0; i < _cur_covered_regions; i++) {
660 MemRegion mri = mr.intersection(_covered[i]);
661 if (!mri.is_empty()) {
662 jbyte* cur_entry, *next_entry, *limit;
663 for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last());
664 cur_entry <= limit;
665 cur_entry = next_entry) {
666 next_entry = cur_entry + 1;
667 if (*cur_entry == dirty_card) {
668 size_t dirty_cards;
669 // Accumulate maximal dirty card range, starting at cur_entry
670 for (dirty_cards = 1;
671 next_entry <= limit && *next_entry == dirty_card;
672 dirty_cards++, next_entry++);
673 MemRegion cur_cards(addr_for(cur_entry),
674 dirty_cards*card_size_in_words);
675 if (reset) {
676 for (size_t i = 0; i < dirty_cards; i++) {
677 cur_entry[i] = reset_val;
678 }
679 }
680 return cur_cards;
681 }
682 }
683 }
684 }
685 return MemRegion(mr.end(), mr.end());
686 }
688 uintx CardTableModRefBS::ct_max_alignment_constraint() {
689 return card_size * os::vm_page_size();
690 }
692 void CardTableModRefBS::verify_guard() {
693 // For product build verification
694 guarantee(_byte_map[_guard_index] == last_card,
695 "card table guard has been modified");
696 }
698 void CardTableModRefBS::verify() {
699 verify_guard();
700 }
702 #ifndef PRODUCT
703 void CardTableModRefBS::verify_region(MemRegion mr,
704 jbyte val, bool val_equals) {
705 jbyte* start = byte_for(mr.start());
706 jbyte* end = byte_for(mr.last());
707 bool failures = false;
708 for (jbyte* curr = start; curr <= end; ++curr) {
709 jbyte curr_val = *curr;
710 bool failed = (val_equals) ? (curr_val != val) : (curr_val == val);
711 if (failed) {
712 if (!failures) {
713 tty->cr();
714 tty->print_cr("== CT verification failed: ["PTR_FORMAT","PTR_FORMAT"]", start, end);
715 tty->print_cr("== %sexpecting value: %d",
716 (val_equals) ? "" : "not ", val);
717 failures = true;
718 }
719 tty->print_cr("== card "PTR_FORMAT" ["PTR_FORMAT","PTR_FORMAT"], "
720 "val: %d", curr, addr_for(curr),
721 (HeapWord*) (((size_t) addr_for(curr)) + card_size),
722 (int) curr_val);
723 }
724 }
725 guarantee(!failures, "there should not have been any failures");
726 }
728 void CardTableModRefBS::verify_not_dirty_region(MemRegion mr) {
729 verify_region(mr, dirty_card, false /* val_equals */);
730 }
732 void CardTableModRefBS::verify_dirty_region(MemRegion mr) {
733 verify_region(mr, dirty_card, true /* val_equals */);
734 }
735 #endif
737 void CardTableModRefBS::print_on(outputStream* st) const {
738 st->print_cr("Card table byte_map: [" INTPTR_FORMAT "," INTPTR_FORMAT "] byte_map_base: " INTPTR_FORMAT,
739 _byte_map, _byte_map + _byte_map_size, byte_map_base);
740 }
742 bool CardTableModRefBSForCTRS::card_will_be_scanned(jbyte cv) {
743 return
744 CardTableModRefBS::card_will_be_scanned(cv) ||
745 _rs->is_prev_nonclean_card_val(cv);
746 };
748 bool CardTableModRefBSForCTRS::card_may_have_been_dirty(jbyte cv) {
749 return
750 cv != clean_card &&
751 (CardTableModRefBS::card_may_have_been_dirty(cv) ||
752 CardTableRS::youngergen_may_have_been_dirty(cv));
753 };