duke@435: /* xdono@1014: * Copyright 2000-2009 Sun Microsystems, Inc. All Rights Reserved. duke@435: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. duke@435: * duke@435: * This code is free software; you can redistribute it and/or modify it duke@435: * under the terms of the GNU General Public License version 2 only, as duke@435: * published by the Free Software Foundation. duke@435: * duke@435: * This code is distributed in the hope that it will be useful, but WITHOUT duke@435: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or duke@435: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License duke@435: * version 2 for more details (a copy is included in the LICENSE file that duke@435: * accompanied this code). duke@435: * duke@435: * You should have received a copy of the GNU General Public License version duke@435: * 2 along with this work; if not, write to the Free Software Foundation, duke@435: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. duke@435: * duke@435: * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, duke@435: * CA 95054 USA or visit www.sun.com if you need additional information or duke@435: * have any questions. duke@435: * duke@435: */ duke@435: duke@435: // This kind of "BarrierSet" allows a "CollectedHeap" to detect and duke@435: // enumerate ref fields that have been modified (since the last duke@435: // enumeration.) duke@435: duke@435: # include "incls/_precompiled.incl" duke@435: # include "incls/_cardTableModRefBS.cpp.incl" duke@435: duke@435: size_t CardTableModRefBS::cards_required(size_t covered_words) duke@435: { duke@435: // Add one for a guard card, used to detect errors. duke@435: const size_t words = align_size_up(covered_words, card_size_in_words); duke@435: return words / card_size_in_words + 1; duke@435: } duke@435: duke@435: size_t CardTableModRefBS::compute_byte_map_size() duke@435: { duke@435: assert(_guard_index == cards_required(_whole_heap.word_size()) - 1, duke@435: "unitialized, check declaration order"); duke@435: assert(_page_size != 0, "unitialized, check declaration order"); duke@435: const size_t granularity = os::vm_allocation_granularity(); duke@435: return align_size_up(_guard_index + 1, MAX2(_page_size, granularity)); duke@435: } duke@435: duke@435: CardTableModRefBS::CardTableModRefBS(MemRegion whole_heap, duke@435: int max_covered_regions): duke@435: ModRefBarrierSet(max_covered_regions), duke@435: _whole_heap(whole_heap), duke@435: _guard_index(cards_required(whole_heap.word_size()) - 1), duke@435: _last_valid_index(_guard_index - 1), jcoomes@456: _page_size(os::vm_page_size()), duke@435: _byte_map_size(compute_byte_map_size()) duke@435: { duke@435: _kind = BarrierSet::CardTableModRef; duke@435: duke@435: HeapWord* low_bound = _whole_heap.start(); duke@435: HeapWord* high_bound = _whole_heap.end(); duke@435: assert((uintptr_t(low_bound) & (card_size - 1)) == 0, "heap must start at card boundary"); duke@435: assert((uintptr_t(high_bound) & (card_size - 1)) == 0, "heap must end at card boundary"); duke@435: duke@435: assert(card_size <= 512, "card_size must be less than 512"); // why? duke@435: duke@435: _covered = new MemRegion[max_covered_regions]; duke@435: _committed = new MemRegion[max_covered_regions]; duke@435: if (_covered == NULL || _committed == NULL) duke@435: vm_exit_during_initialization("couldn't alloc card table covered region set."); duke@435: int i; duke@435: for (i = 0; i < max_covered_regions; i++) { duke@435: _covered[i].set_word_size(0); duke@435: _committed[i].set_word_size(0); duke@435: } duke@435: _cur_covered_regions = 0; duke@435: duke@435: const size_t rs_align = _page_size == (size_t) os::vm_page_size() ? 0 : duke@435: MAX2(_page_size, (size_t) os::vm_allocation_granularity()); duke@435: ReservedSpace heap_rs(_byte_map_size, rs_align, false); duke@435: os::trace_page_sizes("card table", _guard_index + 1, _guard_index + 1, duke@435: _page_size, heap_rs.base(), heap_rs.size()); duke@435: if (!heap_rs.is_reserved()) { duke@435: vm_exit_during_initialization("Could not reserve enough space for the " duke@435: "card marking array"); duke@435: } duke@435: duke@435: // The assember store_check code will do an unsigned shift of the oop, duke@435: // then add it to byte_map_base, i.e. duke@435: // duke@435: // _byte_map = byte_map_base + (uintptr_t(low_bound) >> card_shift) duke@435: _byte_map = (jbyte*) heap_rs.base(); duke@435: byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift); duke@435: assert(byte_for(low_bound) == &_byte_map[0], "Checking start of map"); duke@435: assert(byte_for(high_bound-1) <= &_byte_map[_last_valid_index], "Checking end of map"); duke@435: duke@435: jbyte* guard_card = &_byte_map[_guard_index]; duke@435: uintptr_t guard_page = align_size_down((uintptr_t)guard_card, _page_size); duke@435: _guard_region = MemRegion((HeapWord*)guard_page, _page_size); duke@435: if (!os::commit_memory((char*)guard_page, _page_size, _page_size)) { duke@435: // Do better than this for Merlin duke@435: vm_exit_out_of_memory(_page_size, "card table last card"); duke@435: } duke@435: *guard_card = last_card; duke@435: duke@435: _lowest_non_clean = duke@435: NEW_C_HEAP_ARRAY(CardArr, max_covered_regions); duke@435: _lowest_non_clean_chunk_size = duke@435: NEW_C_HEAP_ARRAY(size_t, max_covered_regions); duke@435: _lowest_non_clean_base_chunk_index = duke@435: NEW_C_HEAP_ARRAY(uintptr_t, max_covered_regions); duke@435: _last_LNC_resizing_collection = duke@435: NEW_C_HEAP_ARRAY(int, max_covered_regions); duke@435: if (_lowest_non_clean == NULL duke@435: || _lowest_non_clean_chunk_size == NULL duke@435: || _lowest_non_clean_base_chunk_index == NULL duke@435: || _last_LNC_resizing_collection == NULL) duke@435: vm_exit_during_initialization("couldn't allocate an LNC array."); duke@435: for (i = 0; i < max_covered_regions; i++) { duke@435: _lowest_non_clean[i] = NULL; duke@435: _lowest_non_clean_chunk_size[i] = 0; duke@435: _last_LNC_resizing_collection[i] = -1; duke@435: } duke@435: duke@435: if (TraceCardTableModRefBS) { duke@435: gclog_or_tty->print_cr("CardTableModRefBS::CardTableModRefBS: "); duke@435: gclog_or_tty->print_cr(" " duke@435: " &_byte_map[0]: " INTPTR_FORMAT duke@435: " &_byte_map[_last_valid_index]: " INTPTR_FORMAT, duke@435: &_byte_map[0], duke@435: &_byte_map[_last_valid_index]); duke@435: gclog_or_tty->print_cr(" " duke@435: " byte_map_base: " INTPTR_FORMAT, duke@435: byte_map_base); duke@435: } duke@435: } duke@435: duke@435: int CardTableModRefBS::find_covering_region_by_base(HeapWord* base) { duke@435: int i; duke@435: for (i = 0; i < _cur_covered_regions; i++) { duke@435: if (_covered[i].start() == base) return i; duke@435: if (_covered[i].start() > base) break; duke@435: } duke@435: // If we didn't find it, create a new one. duke@435: assert(_cur_covered_regions < _max_covered_regions, duke@435: "too many covered regions"); duke@435: // Move the ones above up, to maintain sorted order. duke@435: for (int j = _cur_covered_regions; j > i; j--) { duke@435: _covered[j] = _covered[j-1]; duke@435: _committed[j] = _committed[j-1]; duke@435: } duke@435: int res = i; duke@435: _cur_covered_regions++; duke@435: _covered[res].set_start(base); duke@435: _covered[res].set_word_size(0); duke@435: jbyte* ct_start = byte_for(base); duke@435: uintptr_t ct_start_aligned = align_size_down((uintptr_t)ct_start, _page_size); duke@435: _committed[res].set_start((HeapWord*)ct_start_aligned); duke@435: _committed[res].set_word_size(0); duke@435: return res; duke@435: } duke@435: duke@435: int CardTableModRefBS::find_covering_region_containing(HeapWord* addr) { duke@435: for (int i = 0; i < _cur_covered_regions; i++) { duke@435: if (_covered[i].contains(addr)) { duke@435: return i; duke@435: } duke@435: } duke@435: assert(0, "address outside of heap?"); duke@435: return -1; duke@435: } duke@435: duke@435: HeapWord* CardTableModRefBS::largest_prev_committed_end(int ind) const { duke@435: HeapWord* max_end = NULL; duke@435: for (int j = 0; j < ind; j++) { duke@435: HeapWord* this_end = _committed[j].end(); duke@435: if (this_end > max_end) max_end = this_end; duke@435: } duke@435: return max_end; duke@435: } duke@435: duke@435: MemRegion CardTableModRefBS::committed_unique_to_self(int self, duke@435: MemRegion mr) const { duke@435: MemRegion result = mr; duke@435: for (int r = 0; r < _cur_covered_regions; r += 1) { duke@435: if (r != self) { duke@435: result = result.minus(_committed[r]); duke@435: } duke@435: } duke@435: // Never include the guard page. duke@435: result = result.minus(_guard_region); duke@435: return result; duke@435: } duke@435: duke@435: void CardTableModRefBS::resize_covered_region(MemRegion new_region) { duke@435: // We don't change the start of a region, only the end. duke@435: assert(_whole_heap.contains(new_region), duke@435: "attempt to cover area not in reserved area"); duke@435: debug_only(verify_guard();) jmasa@643: // collided is true if the expansion would push into another committed region jmasa@643: debug_only(bool collided = false;) jmasa@441: int const ind = find_covering_region_by_base(new_region.start()); jmasa@441: MemRegion const old_region = _covered[ind]; duke@435: assert(old_region.start() == new_region.start(), "just checking"); duke@435: if (new_region.word_size() != old_region.word_size()) { duke@435: // Commit new or uncommit old pages, if necessary. duke@435: MemRegion cur_committed = _committed[ind]; duke@435: // Extend the end of this _commited region duke@435: // to cover the end of any lower _committed regions. duke@435: // This forms overlapping regions, but never interior regions. jmasa@441: HeapWord* const max_prev_end = largest_prev_committed_end(ind); duke@435: if (max_prev_end > cur_committed.end()) { duke@435: cur_committed.set_end(max_prev_end); duke@435: } duke@435: // Align the end up to a page size (starts are already aligned). jmasa@441: jbyte* const new_end = byte_after(new_region.last()); jmasa@643: HeapWord* new_end_aligned = jmasa@441: (HeapWord*) align_size_up((uintptr_t)new_end, _page_size); duke@435: assert(new_end_aligned >= (HeapWord*) new_end, duke@435: "align up, but less"); jmasa@1016: // Check the other regions (excludes "ind") to ensure that jmasa@1016: // the new_end_aligned does not intrude onto the committed jmasa@1016: // space of another region. jmasa@643: int ri = 0; jmasa@643: for (ri = 0; ri < _cur_covered_regions; ri++) { jmasa@643: if (ri != ind) { jmasa@643: if (_committed[ri].contains(new_end_aligned)) { jmasa@1016: // The prior check included in the assert jmasa@1016: // (new_end_aligned >= _committed[ri].start()) jmasa@1016: // is redundant with the "contains" test. jmasa@1016: // Any region containing the new end jmasa@1016: // should start at or beyond the region found (ind) jmasa@1016: // for the new end (committed regions are not expected to jmasa@1016: // be proper subsets of other committed regions). jmasa@1016: assert(_committed[ri].start() >= _committed[ind].start(), jmasa@643: "New end of committed region is inconsistent"); jmasa@643: new_end_aligned = _committed[ri].start(); jmasa@1016: // new_end_aligned can be equal to the start of its jmasa@1016: // committed region (i.e., of "ind") if a second jmasa@1016: // region following "ind" also start at the same location jmasa@1016: // as "ind". jmasa@1016: assert(new_end_aligned >= _committed[ind].start(), jmasa@643: "New end of committed region is before start"); jmasa@643: debug_only(collided = true;) jmasa@643: // Should only collide with 1 region jmasa@643: break; jmasa@643: } jmasa@643: } jmasa@643: } jmasa@643: #ifdef ASSERT jmasa@643: for (++ri; ri < _cur_covered_regions; ri++) { jmasa@643: assert(!_committed[ri].contains(new_end_aligned), jmasa@643: "New end of committed region is in a second committed region"); jmasa@643: } jmasa@643: #endif duke@435: // The guard page is always committed and should not be committed over. jmasa@1322: // "guarded" is used for assertion checking below and recalls the fact jmasa@1322: // that the would-be end of the new committed region would have jmasa@1322: // penetrated the guard page. jmasa@1322: HeapWord* new_end_for_commit = new_end_aligned; jmasa@1322: jmasa@1322: DEBUG_ONLY(bool guarded = false;) jmasa@1322: if (new_end_for_commit > _guard_region.start()) { jmasa@1322: new_end_for_commit = _guard_region.start(); jmasa@1322: DEBUG_ONLY(guarded = true;) jmasa@1322: } jmasa@643: duke@435: if (new_end_for_commit > cur_committed.end()) { duke@435: // Must commit new pages. jmasa@441: MemRegion const new_committed = duke@435: MemRegion(cur_committed.end(), new_end_for_commit); duke@435: duke@435: assert(!new_committed.is_empty(), "Region should not be empty here"); duke@435: if (!os::commit_memory((char*)new_committed.start(), duke@435: new_committed.byte_size(), _page_size)) { duke@435: // Do better than this for Merlin duke@435: vm_exit_out_of_memory(new_committed.byte_size(), duke@435: "card table expansion"); duke@435: } duke@435: // Use new_end_aligned (as opposed to new_end_for_commit) because duke@435: // the cur_committed region may include the guard region. duke@435: } else if (new_end_aligned < cur_committed.end()) { duke@435: // Must uncommit pages. jmasa@441: MemRegion const uncommit_region = duke@435: committed_unique_to_self(ind, MemRegion(new_end_aligned, duke@435: cur_committed.end())); duke@435: if (!uncommit_region.is_empty()) { duke@435: if (!os::uncommit_memory((char*)uncommit_region.start(), duke@435: uncommit_region.byte_size())) { jmasa@643: assert(false, "Card table contraction failed"); jmasa@643: // The call failed so don't change the end of the jmasa@643: // committed region. This is better than taking the jmasa@643: // VM down. jmasa@643: new_end_aligned = _committed[ind].end(); duke@435: } duke@435: } duke@435: } duke@435: // In any case, we can reset the end of the current committed entry. duke@435: _committed[ind].set_end(new_end_aligned); duke@435: duke@435: // The default of 0 is not necessarily clean cards. duke@435: jbyte* entry; duke@435: if (old_region.last() < _whole_heap.start()) { duke@435: entry = byte_for(_whole_heap.start()); duke@435: } else { duke@435: entry = byte_after(old_region.last()); duke@435: } swamyv@924: assert(index_for(new_region.last()) < _guard_index, duke@435: "The guard card will be overwritten"); jmasa@643: // This line commented out cleans the newly expanded region and jmasa@643: // not the aligned up expanded region. jmasa@643: // jbyte* const end = byte_after(new_region.last()); jmasa@643: jbyte* const end = (jbyte*) new_end_for_commit; jmasa@1322: assert((end >= byte_after(new_region.last())) || collided || guarded, jmasa@643: "Expect to be beyond new region unless impacting another region"); duke@435: // do nothing if we resized downward. jmasa@643: #ifdef ASSERT jmasa@643: for (int ri = 0; ri < _cur_covered_regions; ri++) { jmasa@643: if (ri != ind) { jmasa@643: // The end of the new committed region should not jmasa@643: // be in any existing region unless it matches jmasa@643: // the start of the next region. jmasa@643: assert(!_committed[ri].contains(end) || jmasa@643: (_committed[ri].start() == (HeapWord*) end), jmasa@643: "Overlapping committed regions"); jmasa@643: } jmasa@643: } jmasa@643: #endif duke@435: if (entry < end) { duke@435: memset(entry, clean_card, pointer_delta(end, entry, sizeof(jbyte))); duke@435: } duke@435: } duke@435: // In any case, the covered size changes. duke@435: _covered[ind].set_word_size(new_region.word_size()); duke@435: if (TraceCardTableModRefBS) { duke@435: gclog_or_tty->print_cr("CardTableModRefBS::resize_covered_region: "); duke@435: gclog_or_tty->print_cr(" " duke@435: " _covered[%d].start(): " INTPTR_FORMAT duke@435: " _covered[%d].last(): " INTPTR_FORMAT, duke@435: ind, _covered[ind].start(), duke@435: ind, _covered[ind].last()); duke@435: gclog_or_tty->print_cr(" " duke@435: " _committed[%d].start(): " INTPTR_FORMAT duke@435: " _committed[%d].last(): " INTPTR_FORMAT, duke@435: ind, _committed[ind].start(), duke@435: ind, _committed[ind].last()); duke@435: gclog_or_tty->print_cr(" " duke@435: " byte_for(start): " INTPTR_FORMAT duke@435: " byte_for(last): " INTPTR_FORMAT, duke@435: byte_for(_covered[ind].start()), duke@435: byte_for(_covered[ind].last())); duke@435: gclog_or_tty->print_cr(" " duke@435: " addr_for(start): " INTPTR_FORMAT duke@435: " addr_for(last): " INTPTR_FORMAT, duke@435: addr_for((jbyte*) _committed[ind].start()), duke@435: addr_for((jbyte*) _committed[ind].last())); duke@435: } duke@435: debug_only(verify_guard();) duke@435: } duke@435: duke@435: // Note that these versions are precise! The scanning code has to handle the duke@435: // fact that the write barrier may be either precise or imprecise. duke@435: coleenp@548: void CardTableModRefBS::write_ref_field_work(void* field, oop newVal) { duke@435: inline_write_ref_field(field, newVal); duke@435: } duke@435: iveresov@1051: /* iveresov@1051: Claimed and deferred bits are used together in G1 during the evacuation iveresov@1051: pause. These bits can have the following state transitions: iveresov@1051: 1. The claimed bit can be put over any other card state. Except that iveresov@1051: the "dirty -> dirty and claimed" transition is checked for in iveresov@1051: G1 code and is not used. iveresov@1051: 2. Deferred bit can be set only if the previous state of the card iveresov@1051: was either clean or claimed. mark_card_deferred() is wait-free. iveresov@1051: We do not care if the operation is be successful because if iveresov@1051: it does not it will only result in duplicate entry in the update iveresov@1051: buffer because of the "cache-miss". So it's not worth spinning. iveresov@1051: */ iveresov@1051: duke@435: ysr@777: bool CardTableModRefBS::claim_card(size_t card_index) { ysr@777: jbyte val = _byte_map[card_index]; iveresov@1051: assert(val != dirty_card_val(), "Shouldn't claim a dirty card"); iveresov@1051: while (val == clean_card_val() || iveresov@1051: (val & (clean_card_mask_val() | claimed_card_val())) != claimed_card_val()) { iveresov@1051: jbyte new_val = val; iveresov@1051: if (val == clean_card_val()) { iveresov@1051: new_val = (jbyte)claimed_card_val(); iveresov@1051: } else { iveresov@1051: new_val = val | (jbyte)claimed_card_val(); iveresov@1051: } iveresov@1051: jbyte res = Atomic::cmpxchg(new_val, &_byte_map[card_index], val); iveresov@1051: if (res == val) { ysr@777: return true; iveresov@1051: } iveresov@1051: val = res; ysr@777: } ysr@777: return false; ysr@777: } ysr@777: iveresov@1051: bool CardTableModRefBS::mark_card_deferred(size_t card_index) { iveresov@1051: jbyte val = _byte_map[card_index]; iveresov@1051: // It's already processed iveresov@1051: if ((val & (clean_card_mask_val() | deferred_card_val())) == deferred_card_val()) { iveresov@1051: return false; iveresov@1051: } iveresov@1051: // Cached bit can be installed either on a clean card or on a claimed card. iveresov@1051: jbyte new_val = val; iveresov@1051: if (val == clean_card_val()) { iveresov@1051: new_val = (jbyte)deferred_card_val(); iveresov@1051: } else { iveresov@1051: if (val & claimed_card_val()) { iveresov@1051: new_val = val | (jbyte)deferred_card_val(); iveresov@1051: } iveresov@1051: } iveresov@1051: if (new_val != val) { iveresov@1051: Atomic::cmpxchg(new_val, &_byte_map[card_index], val); iveresov@1051: } iveresov@1051: return true; iveresov@1051: } iveresov@1051: iveresov@1051: duke@435: void CardTableModRefBS::non_clean_card_iterate(Space* sp, duke@435: MemRegion mr, duke@435: DirtyCardToOopClosure* dcto_cl, duke@435: MemRegionClosure* cl, duke@435: bool clear) { duke@435: if (!mr.is_empty()) { duke@435: int n_threads = SharedHeap::heap()->n_par_threads(); duke@435: if (n_threads > 0) { duke@435: #ifndef SERIALGC duke@435: par_non_clean_card_iterate_work(sp, mr, dcto_cl, cl, clear, n_threads); duke@435: #else // SERIALGC duke@435: fatal("Parallel gc not supported here."); duke@435: #endif // SERIALGC duke@435: } else { duke@435: non_clean_card_iterate_work(mr, cl, clear); duke@435: } duke@435: } duke@435: } duke@435: duke@435: // NOTE: For this to work correctly, it is important that duke@435: // we look for non-clean cards below (so as to catch those duke@435: // marked precleaned), rather than look explicitly for dirty duke@435: // cards (and miss those marked precleaned). In that sense, duke@435: // the name precleaned is currently somewhat of a misnomer. duke@435: void CardTableModRefBS::non_clean_card_iterate_work(MemRegion mr, duke@435: MemRegionClosure* cl, duke@435: bool clear) { duke@435: // Figure out whether we have to worry about parallelism. duke@435: bool is_par = (SharedHeap::heap()->n_par_threads() > 1); duke@435: for (int i = 0; i < _cur_covered_regions; i++) { duke@435: MemRegion mri = mr.intersection(_covered[i]); duke@435: if (mri.word_size() > 0) { duke@435: jbyte* cur_entry = byte_for(mri.last()); duke@435: jbyte* limit = byte_for(mri.start()); duke@435: while (cur_entry >= limit) { duke@435: jbyte* next_entry = cur_entry - 1; duke@435: if (*cur_entry != clean_card) { duke@435: size_t non_clean_cards = 1; duke@435: // Should the next card be included in this range of dirty cards. duke@435: while (next_entry >= limit && *next_entry != clean_card) { duke@435: non_clean_cards++; duke@435: cur_entry = next_entry; duke@435: next_entry--; duke@435: } duke@435: // The memory region may not be on a card boundary. So that duke@435: // objects beyond the end of the region are not processed, make duke@435: // cur_cards precise with regard to the end of the memory region. duke@435: MemRegion cur_cards(addr_for(cur_entry), duke@435: non_clean_cards * card_size_in_words); duke@435: MemRegion dirty_region = cur_cards.intersection(mri); duke@435: if (clear) { duke@435: for (size_t i = 0; i < non_clean_cards; i++) { duke@435: // Clean the dirty cards (but leave the other non-clean duke@435: // alone.) If parallel, do the cleaning atomically. duke@435: jbyte cur_entry_val = cur_entry[i]; duke@435: if (card_is_dirty_wrt_gen_iter(cur_entry_val)) { duke@435: if (is_par) { duke@435: jbyte res = Atomic::cmpxchg(clean_card, &cur_entry[i], cur_entry_val); duke@435: assert(res != clean_card, duke@435: "Dirty card mysteriously cleaned"); duke@435: } else { duke@435: cur_entry[i] = clean_card; duke@435: } duke@435: } duke@435: } duke@435: } duke@435: cl->do_MemRegion(dirty_region); duke@435: } duke@435: cur_entry = next_entry; duke@435: } duke@435: } duke@435: } duke@435: } duke@435: duke@435: void CardTableModRefBS::mod_oop_in_space_iterate(Space* sp, duke@435: OopClosure* cl, duke@435: bool clear, duke@435: bool before_save_marks) { duke@435: // Note that dcto_cl is resource-allocated, so there is no duke@435: // corresponding "delete". duke@435: DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, precision()); duke@435: MemRegion used_mr; duke@435: if (before_save_marks) { duke@435: used_mr = sp->used_region_at_save_marks(); duke@435: } else { duke@435: used_mr = sp->used_region(); duke@435: } duke@435: non_clean_card_iterate(sp, used_mr, dcto_cl, dcto_cl, clear); duke@435: } duke@435: duke@435: void CardTableModRefBS::dirty_MemRegion(MemRegion mr) { duke@435: jbyte* cur = byte_for(mr.start()); duke@435: jbyte* last = byte_after(mr.last()); duke@435: while (cur < last) { duke@435: *cur = dirty_card; duke@435: cur++; duke@435: } duke@435: } duke@435: ysr@777: void CardTableModRefBS::invalidate(MemRegion mr, bool whole_heap) { duke@435: for (int i = 0; i < _cur_covered_regions; i++) { duke@435: MemRegion mri = mr.intersection(_covered[i]); duke@435: if (!mri.is_empty()) dirty_MemRegion(mri); duke@435: } duke@435: } duke@435: duke@435: void CardTableModRefBS::clear_MemRegion(MemRegion mr) { duke@435: // Be conservative: only clean cards entirely contained within the duke@435: // region. duke@435: jbyte* cur; duke@435: if (mr.start() == _whole_heap.start()) { duke@435: cur = byte_for(mr.start()); duke@435: } else { duke@435: assert(mr.start() > _whole_heap.start(), "mr is not covered."); duke@435: cur = byte_after(mr.start() - 1); duke@435: } duke@435: jbyte* last = byte_after(mr.last()); duke@435: memset(cur, clean_card, pointer_delta(last, cur, sizeof(jbyte))); duke@435: } duke@435: duke@435: void CardTableModRefBS::clear(MemRegion mr) { duke@435: for (int i = 0; i < _cur_covered_regions; i++) { duke@435: MemRegion mri = mr.intersection(_covered[i]); duke@435: if (!mri.is_empty()) clear_MemRegion(mri); duke@435: } duke@435: } duke@435: ysr@777: void CardTableModRefBS::dirty(MemRegion mr) { ysr@777: jbyte* first = byte_for(mr.start()); ysr@777: jbyte* last = byte_after(mr.last()); ysr@777: memset(first, dirty_card, last-first); ysr@777: } ysr@777: duke@435: // NOTES: duke@435: // (1) Unlike mod_oop_in_space_iterate() above, dirty_card_iterate() duke@435: // iterates over dirty cards ranges in increasing address order. duke@435: void CardTableModRefBS::dirty_card_iterate(MemRegion mr, duke@435: MemRegionClosure* cl) { duke@435: for (int i = 0; i < _cur_covered_regions; i++) { duke@435: MemRegion mri = mr.intersection(_covered[i]); duke@435: if (!mri.is_empty()) { duke@435: jbyte *cur_entry, *next_entry, *limit; duke@435: for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last()); duke@435: cur_entry <= limit; duke@435: cur_entry = next_entry) { duke@435: next_entry = cur_entry + 1; duke@435: if (*cur_entry == dirty_card) { duke@435: size_t dirty_cards; duke@435: // Accumulate maximal dirty card range, starting at cur_entry duke@435: for (dirty_cards = 1; duke@435: next_entry <= limit && *next_entry == dirty_card; duke@435: dirty_cards++, next_entry++); duke@435: MemRegion cur_cards(addr_for(cur_entry), duke@435: dirty_cards*card_size_in_words); duke@435: cl->do_MemRegion(cur_cards); duke@435: } duke@435: } duke@435: } duke@435: } duke@435: } duke@435: ysr@777: MemRegion CardTableModRefBS::dirty_card_range_after_reset(MemRegion mr, ysr@777: bool reset, ysr@777: int reset_val) { duke@435: for (int i = 0; i < _cur_covered_regions; i++) { duke@435: MemRegion mri = mr.intersection(_covered[i]); duke@435: if (!mri.is_empty()) { duke@435: jbyte* cur_entry, *next_entry, *limit; duke@435: for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last()); duke@435: cur_entry <= limit; duke@435: cur_entry = next_entry) { duke@435: next_entry = cur_entry + 1; duke@435: if (*cur_entry == dirty_card) { duke@435: size_t dirty_cards; duke@435: // Accumulate maximal dirty card range, starting at cur_entry duke@435: for (dirty_cards = 1; duke@435: next_entry <= limit && *next_entry == dirty_card; duke@435: dirty_cards++, next_entry++); duke@435: MemRegion cur_cards(addr_for(cur_entry), duke@435: dirty_cards*card_size_in_words); ysr@777: if (reset) { ysr@777: for (size_t i = 0; i < dirty_cards; i++) { ysr@777: cur_entry[i] = reset_val; ysr@777: } duke@435: } duke@435: return cur_cards; duke@435: } duke@435: } duke@435: } duke@435: } duke@435: return MemRegion(mr.end(), mr.end()); duke@435: } duke@435: duke@435: // Set all the dirty cards in the given region to "precleaned" state. duke@435: void CardTableModRefBS::preclean_dirty_cards(MemRegion mr) { duke@435: for (int i = 0; i < _cur_covered_regions; i++) { duke@435: MemRegion mri = mr.intersection(_covered[i]); duke@435: if (!mri.is_empty()) { duke@435: jbyte *cur_entry, *limit; duke@435: for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last()); duke@435: cur_entry <= limit; duke@435: cur_entry++) { duke@435: if (*cur_entry == dirty_card) { duke@435: *cur_entry = precleaned_card; duke@435: } duke@435: } duke@435: } duke@435: } duke@435: } duke@435: duke@435: uintx CardTableModRefBS::ct_max_alignment_constraint() { duke@435: return card_size * os::vm_page_size(); duke@435: } duke@435: duke@435: void CardTableModRefBS::verify_guard() { duke@435: // For product build verification duke@435: guarantee(_byte_map[_guard_index] == last_card, duke@435: "card table guard has been modified"); duke@435: } duke@435: duke@435: void CardTableModRefBS::verify() { duke@435: verify_guard(); duke@435: } duke@435: duke@435: #ifndef PRODUCT duke@435: class GuaranteeNotModClosure: public MemRegionClosure { duke@435: CardTableModRefBS* _ct; duke@435: public: duke@435: GuaranteeNotModClosure(CardTableModRefBS* ct) : _ct(ct) {} duke@435: void do_MemRegion(MemRegion mr) { duke@435: jbyte* entry = _ct->byte_for(mr.start()); duke@435: guarantee(*entry != CardTableModRefBS::clean_card, duke@435: "Dirty card in region that should be clean"); duke@435: } duke@435: }; duke@435: duke@435: void CardTableModRefBS::verify_clean_region(MemRegion mr) { duke@435: GuaranteeNotModClosure blk(this); duke@435: non_clean_card_iterate_work(mr, &blk, false); duke@435: } apetrusenko@1375: apetrusenko@1375: // To verify a MemRegion is entirely dirty this closure is passed to apetrusenko@1375: // dirty_card_iterate. If the region is dirty do_MemRegion will be apetrusenko@1375: // invoked only once with a MemRegion equal to the one being apetrusenko@1375: // verified. apetrusenko@1375: class GuaranteeDirtyClosure: public MemRegionClosure { apetrusenko@1375: CardTableModRefBS* _ct; apetrusenko@1375: MemRegion _mr; apetrusenko@1375: bool _result; apetrusenko@1375: public: apetrusenko@1375: GuaranteeDirtyClosure(CardTableModRefBS* ct, MemRegion mr) apetrusenko@1375: : _ct(ct), _mr(mr), _result(false) {} apetrusenko@1375: void do_MemRegion(MemRegion mr) { apetrusenko@1375: _result = _mr.equals(mr); apetrusenko@1375: } apetrusenko@1375: bool result() const { return _result; } apetrusenko@1375: }; apetrusenko@1375: apetrusenko@1375: void CardTableModRefBS::verify_dirty_region(MemRegion mr) { apetrusenko@1375: GuaranteeDirtyClosure blk(this, mr); apetrusenko@1375: dirty_card_iterate(mr, &blk); apetrusenko@1375: guarantee(blk.result(), "Non-dirty cards in region that should be dirty"); apetrusenko@1375: } duke@435: #endif duke@435: duke@435: bool CardTableModRefBSForCTRS::card_will_be_scanned(jbyte cv) { duke@435: return duke@435: CardTableModRefBS::card_will_be_scanned(cv) || duke@435: _rs->is_prev_nonclean_card_val(cv); duke@435: }; duke@435: duke@435: bool CardTableModRefBSForCTRS::card_may_have_been_dirty(jbyte cv) { duke@435: return duke@435: cv != clean_card && duke@435: (CardTableModRefBS::card_may_have_been_dirty(cv) || duke@435: CardTableRS::youngergen_may_have_been_dirty(cv)); duke@435: };