duke@435: /* minqi@5103: * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved. duke@435: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. duke@435: * duke@435: * This code is free software; you can redistribute it and/or modify it duke@435: * under the terms of the GNU General Public License version 2 only, as duke@435: * published by the Free Software Foundation. duke@435: * duke@435: * This code is distributed in the hope that it will be useful, but WITHOUT duke@435: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or duke@435: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License duke@435: * version 2 for more details (a copy is included in the LICENSE file that duke@435: * accompanied this code). duke@435: * duke@435: * You should have received a copy of the GNU General Public License version duke@435: * 2 along with this work; if not, write to the Free Software Foundation, duke@435: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. duke@435: * trims@1907: * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA trims@1907: * or visit www.oracle.com if you need additional information or have any trims@1907: * questions. duke@435: * duke@435: */ duke@435: stefank@2314: #include "precompiled.hpp" stefank@2314: #include "memory/allocation.inline.hpp" stefank@2314: #include "memory/cardTableModRefBS.hpp" stefank@2314: #include "memory/cardTableRS.hpp" stefank@2314: #include "memory/sharedHeap.hpp" stefank@2314: #include "memory/space.hpp" stefank@2314: #include "memory/space.inline.hpp" stefank@2314: #include "memory/universe.hpp" stefank@2314: #include "runtime/java.hpp" stefank@2314: #include "runtime/mutexLocker.hpp" stefank@2314: #include "runtime/virtualspace.hpp" zgu@3900: #include "services/memTracker.hpp" jprovino@4542: #include "utilities/macros.hpp" stefank@2314: #ifdef COMPILER1 stefank@2314: #include "c1/c1_LIR.hpp" stefank@2314: #include "c1/c1_LIRGenerator.hpp" stefank@2314: #endif stefank@2314: duke@435: // This kind of "BarrierSet" allows a "CollectedHeap" to detect and duke@435: // enumerate ref fields that have been modified (since the last duke@435: // enumeration.) duke@435: duke@435: size_t CardTableModRefBS::cards_required(size_t covered_words) duke@435: { duke@435: // Add one for a guard card, used to detect errors. duke@435: const size_t words = align_size_up(covered_words, card_size_in_words); duke@435: return words / card_size_in_words + 1; duke@435: } duke@435: duke@435: size_t CardTableModRefBS::compute_byte_map_size() duke@435: { duke@435: assert(_guard_index == cards_required(_whole_heap.word_size()) - 1, duke@435: "unitialized, check declaration order"); duke@435: assert(_page_size != 0, "unitialized, check declaration order"); duke@435: const size_t granularity = os::vm_allocation_granularity(); duke@435: return align_size_up(_guard_index + 1, MAX2(_page_size, granularity)); duke@435: } duke@435: duke@435: CardTableModRefBS::CardTableModRefBS(MemRegion whole_heap, duke@435: int max_covered_regions): duke@435: ModRefBarrierSet(max_covered_regions), duke@435: _whole_heap(whole_heap), duke@435: _guard_index(cards_required(whole_heap.word_size()) - 1), duke@435: _last_valid_index(_guard_index - 1), jcoomes@456: _page_size(os::vm_page_size()), duke@435: _byte_map_size(compute_byte_map_size()) duke@435: { duke@435: _kind = BarrierSet::CardTableModRef; duke@435: duke@435: HeapWord* low_bound = _whole_heap.start(); duke@435: HeapWord* high_bound = _whole_heap.end(); duke@435: assert((uintptr_t(low_bound) & (card_size - 1)) == 0, "heap must start at card boundary"); duke@435: assert((uintptr_t(high_bound) & (card_size - 1)) == 0, "heap must end at card boundary"); duke@435: duke@435: assert(card_size <= 512, "card_size must be less than 512"); // why? duke@435: dcubed@4967: _covered = new MemRegion[max_covered_regions]; dcubed@4967: _committed = new MemRegion[max_covered_regions]; minqi@5103: if (_covered == NULL || _committed == NULL) { duke@435: vm_exit_during_initialization("couldn't alloc card table covered region set."); duke@435: } minqi@5103: dcubed@4967: _cur_covered_regions = 0; duke@435: const size_t rs_align = _page_size == (size_t) os::vm_page_size() ? 0 : duke@435: MAX2(_page_size, (size_t) os::vm_allocation_granularity()); duke@435: ReservedSpace heap_rs(_byte_map_size, rs_align, false); zgu@3900: zgu@3900: MemTracker::record_virtual_memory_type((address)heap_rs.base(), mtGC); zgu@3900: duke@435: os::trace_page_sizes("card table", _guard_index + 1, _guard_index + 1, duke@435: _page_size, heap_rs.base(), heap_rs.size()); duke@435: if (!heap_rs.is_reserved()) { duke@435: vm_exit_during_initialization("Could not reserve enough space for the " duke@435: "card marking array"); duke@435: } duke@435: duke@435: // The assember store_check code will do an unsigned shift of the oop, duke@435: // then add it to byte_map_base, i.e. duke@435: // duke@435: // _byte_map = byte_map_base + (uintptr_t(low_bound) >> card_shift) duke@435: _byte_map = (jbyte*) heap_rs.base(); duke@435: byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift); duke@435: assert(byte_for(low_bound) == &_byte_map[0], "Checking start of map"); duke@435: assert(byte_for(high_bound-1) <= &_byte_map[_last_valid_index], "Checking end of map"); duke@435: duke@435: jbyte* guard_card = &_byte_map[_guard_index]; duke@435: uintptr_t guard_page = align_size_down((uintptr_t)guard_card, _page_size); duke@435: _guard_region = MemRegion((HeapWord*)guard_page, _page_size); dcubed@5255: os::commit_memory_or_exit((char*)guard_page, _page_size, _page_size, dcubed@5255: !ExecMem, "card table last card"); duke@435: *guard_card = last_card; duke@435: duke@435: _lowest_non_clean = zgu@3900: NEW_C_HEAP_ARRAY(CardArr, max_covered_regions, mtGC); duke@435: _lowest_non_clean_chunk_size = zgu@3900: NEW_C_HEAP_ARRAY(size_t, max_covered_regions, mtGC); duke@435: _lowest_non_clean_base_chunk_index = zgu@3900: NEW_C_HEAP_ARRAY(uintptr_t, max_covered_regions, mtGC); duke@435: _last_LNC_resizing_collection = zgu@3900: NEW_C_HEAP_ARRAY(int, max_covered_regions, mtGC); duke@435: if (_lowest_non_clean == NULL duke@435: || _lowest_non_clean_chunk_size == NULL duke@435: || _lowest_non_clean_base_chunk_index == NULL duke@435: || _last_LNC_resizing_collection == NULL) duke@435: vm_exit_during_initialization("couldn't allocate an LNC array."); minqi@5103: for (int i = 0; i < max_covered_regions; i++) { duke@435: _lowest_non_clean[i] = NULL; duke@435: _lowest_non_clean_chunk_size[i] = 0; duke@435: _last_LNC_resizing_collection[i] = -1; duke@435: } duke@435: duke@435: if (TraceCardTableModRefBS) { duke@435: gclog_or_tty->print_cr("CardTableModRefBS::CardTableModRefBS: "); duke@435: gclog_or_tty->print_cr(" " duke@435: " &_byte_map[0]: " INTPTR_FORMAT duke@435: " &_byte_map[_last_valid_index]: " INTPTR_FORMAT, duke@435: &_byte_map[0], duke@435: &_byte_map[_last_valid_index]); duke@435: gclog_or_tty->print_cr(" " duke@435: " byte_map_base: " INTPTR_FORMAT, duke@435: byte_map_base); duke@435: } duke@435: } duke@435: minqi@5103: CardTableModRefBS::~CardTableModRefBS() { minqi@5103: if (_covered) { minqi@5103: delete[] _covered; minqi@5103: _covered = NULL; minqi@5103: } minqi@5103: if (_committed) { minqi@5103: delete[] _committed; minqi@5103: _committed = NULL; minqi@5103: } minqi@5103: if (_lowest_non_clean) { minqi@5103: FREE_C_HEAP_ARRAY(CardArr, _lowest_non_clean, mtGC); minqi@5103: _lowest_non_clean = NULL; minqi@5103: } minqi@5103: if (_lowest_non_clean_chunk_size) { minqi@5103: FREE_C_HEAP_ARRAY(size_t, _lowest_non_clean_chunk_size, mtGC); minqi@5103: _lowest_non_clean_chunk_size = NULL; minqi@5103: } minqi@5103: if (_lowest_non_clean_base_chunk_index) { minqi@5103: FREE_C_HEAP_ARRAY(uintptr_t, _lowest_non_clean_base_chunk_index, mtGC); minqi@5103: _lowest_non_clean_base_chunk_index = NULL; minqi@5103: } minqi@5103: if (_last_LNC_resizing_collection) { minqi@5103: FREE_C_HEAP_ARRAY(int, _last_LNC_resizing_collection, mtGC); minqi@5103: _last_LNC_resizing_collection = NULL; minqi@5103: } minqi@5103: } minqi@5103: duke@435: int CardTableModRefBS::find_covering_region_by_base(HeapWord* base) { duke@435: int i; duke@435: for (i = 0; i < _cur_covered_regions; i++) { duke@435: if (_covered[i].start() == base) return i; duke@435: if (_covered[i].start() > base) break; duke@435: } duke@435: // If we didn't find it, create a new one. duke@435: assert(_cur_covered_regions < _max_covered_regions, duke@435: "too many covered regions"); duke@435: // Move the ones above up, to maintain sorted order. duke@435: for (int j = _cur_covered_regions; j > i; j--) { duke@435: _covered[j] = _covered[j-1]; duke@435: _committed[j] = _committed[j-1]; duke@435: } duke@435: int res = i; duke@435: _cur_covered_regions++; duke@435: _covered[res].set_start(base); duke@435: _covered[res].set_word_size(0); duke@435: jbyte* ct_start = byte_for(base); duke@435: uintptr_t ct_start_aligned = align_size_down((uintptr_t)ct_start, _page_size); duke@435: _committed[res].set_start((HeapWord*)ct_start_aligned); duke@435: _committed[res].set_word_size(0); duke@435: return res; duke@435: } duke@435: duke@435: int CardTableModRefBS::find_covering_region_containing(HeapWord* addr) { duke@435: for (int i = 0; i < _cur_covered_regions; i++) { duke@435: if (_covered[i].contains(addr)) { duke@435: return i; duke@435: } duke@435: } duke@435: assert(0, "address outside of heap?"); duke@435: return -1; duke@435: } duke@435: duke@435: HeapWord* CardTableModRefBS::largest_prev_committed_end(int ind) const { duke@435: HeapWord* max_end = NULL; duke@435: for (int j = 0; j < ind; j++) { duke@435: HeapWord* this_end = _committed[j].end(); duke@435: if (this_end > max_end) max_end = this_end; duke@435: } duke@435: return max_end; duke@435: } duke@435: duke@435: MemRegion CardTableModRefBS::committed_unique_to_self(int self, duke@435: MemRegion mr) const { duke@435: MemRegion result = mr; duke@435: for (int r = 0; r < _cur_covered_regions; r += 1) { duke@435: if (r != self) { duke@435: result = result.minus(_committed[r]); duke@435: } duke@435: } duke@435: // Never include the guard page. duke@435: result = result.minus(_guard_region); duke@435: return result; duke@435: } duke@435: duke@435: void CardTableModRefBS::resize_covered_region(MemRegion new_region) { duke@435: // We don't change the start of a region, only the end. duke@435: assert(_whole_heap.contains(new_region), duke@435: "attempt to cover area not in reserved area"); duke@435: debug_only(verify_guard();) jmasa@643: // collided is true if the expansion would push into another committed region jmasa@643: debug_only(bool collided = false;) jmasa@441: int const ind = find_covering_region_by_base(new_region.start()); jmasa@441: MemRegion const old_region = _covered[ind]; duke@435: assert(old_region.start() == new_region.start(), "just checking"); duke@435: if (new_region.word_size() != old_region.word_size()) { duke@435: // Commit new or uncommit old pages, if necessary. duke@435: MemRegion cur_committed = _committed[ind]; duke@435: // Extend the end of this _commited region duke@435: // to cover the end of any lower _committed regions. duke@435: // This forms overlapping regions, but never interior regions. jmasa@441: HeapWord* const max_prev_end = largest_prev_committed_end(ind); duke@435: if (max_prev_end > cur_committed.end()) { duke@435: cur_committed.set_end(max_prev_end); duke@435: } duke@435: // Align the end up to a page size (starts are already aligned). jmasa@441: jbyte* const new_end = byte_after(new_region.last()); jmasa@643: HeapWord* new_end_aligned = jmasa@441: (HeapWord*) align_size_up((uintptr_t)new_end, _page_size); duke@435: assert(new_end_aligned >= (HeapWord*) new_end, duke@435: "align up, but less"); jmasa@1016: // Check the other regions (excludes "ind") to ensure that jmasa@1016: // the new_end_aligned does not intrude onto the committed jmasa@1016: // space of another region. jmasa@643: int ri = 0; jmasa@643: for (ri = 0; ri < _cur_covered_regions; ri++) { jmasa@643: if (ri != ind) { jmasa@643: if (_committed[ri].contains(new_end_aligned)) { jmasa@1016: // The prior check included in the assert jmasa@1016: // (new_end_aligned >= _committed[ri].start()) jmasa@1016: // is redundant with the "contains" test. jmasa@1016: // Any region containing the new end jmasa@1016: // should start at or beyond the region found (ind) jmasa@1016: // for the new end (committed regions are not expected to jmasa@1016: // be proper subsets of other committed regions). jmasa@1016: assert(_committed[ri].start() >= _committed[ind].start(), jmasa@643: "New end of committed region is inconsistent"); jmasa@643: new_end_aligned = _committed[ri].start(); jmasa@1016: // new_end_aligned can be equal to the start of its jmasa@1016: // committed region (i.e., of "ind") if a second jmasa@1016: // region following "ind" also start at the same location jmasa@1016: // as "ind". jmasa@1016: assert(new_end_aligned >= _committed[ind].start(), jmasa@643: "New end of committed region is before start"); jmasa@643: debug_only(collided = true;) jmasa@643: // Should only collide with 1 region jmasa@643: break; jmasa@643: } jmasa@643: } jmasa@643: } jmasa@643: #ifdef ASSERT jmasa@643: for (++ri; ri < _cur_covered_regions; ri++) { jmasa@643: assert(!_committed[ri].contains(new_end_aligned), jmasa@643: "New end of committed region is in a second committed region"); jmasa@643: } jmasa@643: #endif duke@435: // The guard page is always committed and should not be committed over. jmasa@1322: // "guarded" is used for assertion checking below and recalls the fact jmasa@1322: // that the would-be end of the new committed region would have jmasa@1322: // penetrated the guard page. jmasa@1322: HeapWord* new_end_for_commit = new_end_aligned; jmasa@1322: jmasa@1322: DEBUG_ONLY(bool guarded = false;) jmasa@1322: if (new_end_for_commit > _guard_region.start()) { jmasa@1322: new_end_for_commit = _guard_region.start(); jmasa@1322: DEBUG_ONLY(guarded = true;) jmasa@1322: } jmasa@643: duke@435: if (new_end_for_commit > cur_committed.end()) { duke@435: // Must commit new pages. jmasa@441: MemRegion const new_committed = duke@435: MemRegion(cur_committed.end(), new_end_for_commit); duke@435: duke@435: assert(!new_committed.is_empty(), "Region should not be empty here"); dcubed@5255: os::commit_memory_or_exit((char*)new_committed.start(), dcubed@5255: new_committed.byte_size(), _page_size, dcubed@5255: !ExecMem, "card table expansion"); duke@435: // Use new_end_aligned (as opposed to new_end_for_commit) because duke@435: // the cur_committed region may include the guard region. duke@435: } else if (new_end_aligned < cur_committed.end()) { duke@435: // Must uncommit pages. jmasa@441: MemRegion const uncommit_region = duke@435: committed_unique_to_self(ind, MemRegion(new_end_aligned, duke@435: cur_committed.end())); duke@435: if (!uncommit_region.is_empty()) { jmasa@1967: // It is not safe to uncommit cards if the boundary between jmasa@1967: // the generations is moving. A shrink can uncommit cards jmasa@1967: // owned by generation A but being used by generation B. jmasa@1967: if (!UseAdaptiveGCBoundary) { jmasa@1967: if (!os::uncommit_memory((char*)uncommit_region.start(), jmasa@1967: uncommit_region.byte_size())) { jmasa@1967: assert(false, "Card table contraction failed"); jmasa@1967: // The call failed so don't change the end of the jmasa@1967: // committed region. This is better than taking the jmasa@1967: // VM down. jmasa@1967: new_end_aligned = _committed[ind].end(); jmasa@1967: } jmasa@1967: } else { jmasa@643: new_end_aligned = _committed[ind].end(); duke@435: } duke@435: } duke@435: } duke@435: // In any case, we can reset the end of the current committed entry. duke@435: _committed[ind].set_end(new_end_aligned); duke@435: jmasa@1967: #ifdef ASSERT jmasa@1967: // Check that the last card in the new region is committed according jmasa@1967: // to the tables. jmasa@1967: bool covered = false; jmasa@1967: for (int cr = 0; cr < _cur_covered_regions; cr++) { jmasa@1967: if (_committed[cr].contains(new_end - 1)) { jmasa@1967: covered = true; jmasa@1967: break; jmasa@1967: } jmasa@1967: } jmasa@1967: assert(covered, "Card for end of new region not committed"); jmasa@1967: #endif jmasa@1967: duke@435: // The default of 0 is not necessarily clean cards. duke@435: jbyte* entry; duke@435: if (old_region.last() < _whole_heap.start()) { duke@435: entry = byte_for(_whole_heap.start()); duke@435: } else { duke@435: entry = byte_after(old_region.last()); duke@435: } swamyv@924: assert(index_for(new_region.last()) < _guard_index, duke@435: "The guard card will be overwritten"); jmasa@643: // This line commented out cleans the newly expanded region and jmasa@643: // not the aligned up expanded region. jmasa@643: // jbyte* const end = byte_after(new_region.last()); jmasa@643: jbyte* const end = (jbyte*) new_end_for_commit; jmasa@1322: assert((end >= byte_after(new_region.last())) || collided || guarded, jmasa@643: "Expect to be beyond new region unless impacting another region"); duke@435: // do nothing if we resized downward. jmasa@643: #ifdef ASSERT jmasa@643: for (int ri = 0; ri < _cur_covered_regions; ri++) { jmasa@643: if (ri != ind) { jmasa@643: // The end of the new committed region should not jmasa@643: // be in any existing region unless it matches jmasa@643: // the start of the next region. jmasa@643: assert(!_committed[ri].contains(end) || jmasa@643: (_committed[ri].start() == (HeapWord*) end), jmasa@643: "Overlapping committed regions"); jmasa@643: } jmasa@643: } jmasa@643: #endif duke@435: if (entry < end) { duke@435: memset(entry, clean_card, pointer_delta(end, entry, sizeof(jbyte))); duke@435: } duke@435: } duke@435: // In any case, the covered size changes. duke@435: _covered[ind].set_word_size(new_region.word_size()); duke@435: if (TraceCardTableModRefBS) { duke@435: gclog_or_tty->print_cr("CardTableModRefBS::resize_covered_region: "); duke@435: gclog_or_tty->print_cr(" " duke@435: " _covered[%d].start(): " INTPTR_FORMAT duke@435: " _covered[%d].last(): " INTPTR_FORMAT, duke@435: ind, _covered[ind].start(), duke@435: ind, _covered[ind].last()); duke@435: gclog_or_tty->print_cr(" " duke@435: " _committed[%d].start(): " INTPTR_FORMAT duke@435: " _committed[%d].last(): " INTPTR_FORMAT, duke@435: ind, _committed[ind].start(), duke@435: ind, _committed[ind].last()); duke@435: gclog_or_tty->print_cr(" " duke@435: " byte_for(start): " INTPTR_FORMAT duke@435: " byte_for(last): " INTPTR_FORMAT, duke@435: byte_for(_covered[ind].start()), duke@435: byte_for(_covered[ind].last())); duke@435: gclog_or_tty->print_cr(" " duke@435: " addr_for(start): " INTPTR_FORMAT duke@435: " addr_for(last): " INTPTR_FORMAT, duke@435: addr_for((jbyte*) _committed[ind].start()), duke@435: addr_for((jbyte*) _committed[ind].last())); duke@435: } jmasa@1967: // Touch the last card of the covered region to show that it jmasa@1967: // is committed (or SEGV). ccheung@5259: debug_only((void) (*byte_for(_covered[ind].last()));) duke@435: debug_only(verify_guard();) duke@435: } duke@435: duke@435: // Note that these versions are precise! The scanning code has to handle the duke@435: // fact that the write barrier may be either precise or imprecise. duke@435: coleenp@548: void CardTableModRefBS::write_ref_field_work(void* field, oop newVal) { duke@435: inline_write_ref_field(field, newVal); duke@435: } duke@435: iveresov@1051: ysr@2819: void CardTableModRefBS::non_clean_card_iterate_possibly_parallel(Space* sp, ysr@2819: MemRegion mr, ysr@2889: OopsInGenClosure* cl, ysr@2889: CardTableRS* ct) { duke@435: if (!mr.is_empty()) { jmasa@3294: // Caller (process_strong_roots()) claims that all GC threads jmasa@3294: // execute this call. With UseDynamicNumberOfGCThreads now all jmasa@3294: // active GC threads execute this call. The number of active GC jmasa@3294: // threads needs to be passed to par_non_clean_card_iterate_work() jmasa@3294: // to get proper partitioning and termination. jmasa@3294: // jmasa@3294: // This is an example of where n_par_threads() is used instead jmasa@3294: // of workers()->active_workers(). n_par_threads can be set to 0 to jmasa@3294: // turn off parallelism. For example when this code is called as jmasa@3294: // part of verification and SharedHeap::process_strong_roots() is being jmasa@3294: // used, then n_par_threads() may have been set to 0. active_workers jmasa@3294: // is not overloaded with the meaning that it is a switch to disable jmasa@3294: // parallelism and so keeps the meaning of the number of jmasa@3294: // active gc workers. If parallelism has not been shut off by jmasa@3294: // setting n_par_threads to 0, then n_par_threads should be jmasa@3294: // equal to active_workers. When a different mechanism for shutting jmasa@3294: // off parallelism is used, then active_workers can be used in jmasa@3294: // place of n_par_threads. jmasa@3294: // This is an example of a path where n_par_threads is jmasa@3294: // set to 0 to turn off parallism. jmasa@3294: // [7] CardTableModRefBS::non_clean_card_iterate() jmasa@3294: // [8] CardTableRS::younger_refs_in_space_iterate() jmasa@3294: // [9] Generation::younger_refs_in_space_iterate() jmasa@3294: // [10] OneContigSpaceCardGeneration::younger_refs_iterate() jmasa@3294: // [11] CompactingPermGenGen::younger_refs_iterate() jmasa@3294: // [12] CardTableRS::younger_refs_iterate() jmasa@3294: // [13] SharedHeap::process_strong_roots() jmasa@3294: // [14] G1CollectedHeap::verify() jmasa@3294: // [15] Universe::verify() jmasa@3294: // [16] G1CollectedHeap::do_collection_pause_at_safepoint() jmasa@3294: // jmasa@3294: int n_threads = SharedHeap::heap()->n_par_threads(); jmasa@3294: bool is_par = n_threads > 0; jmasa@3294: if (is_par) { jprovino@4542: #if INCLUDE_ALL_GCS jmasa@3294: assert(SharedHeap::heap()->n_par_threads() == jmasa@3294: SharedHeap::heap()->workers()->active_workers(), "Mismatch"); ysr@2889: non_clean_card_iterate_parallel_work(sp, mr, cl, ct, n_threads); jprovino@4542: #else // INCLUDE_ALL_GCS duke@435: fatal("Parallel gc not supported here."); jprovino@4542: #endif // INCLUDE_ALL_GCS duke@435: } else { ysr@2819: // We do not call the non_clean_card_iterate_serial() version below because ysr@2819: // we want to clear the cards (which non_clean_card_iterate_serial() does not ysr@2889: // do for us): clear_cl here does the work of finding contiguous dirty ranges ysr@2889: // of cards to process and clear. ysr@2889: ysr@2889: DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, precision(), ysr@2889: cl->gen_boundary()); ysr@2889: ClearNoncleanCardWrapper clear_cl(dcto_cl, ct); ysr@2889: ysr@2889: clear_cl.do_MemRegion(mr); duke@435: } duke@435: } duke@435: } duke@435: ysr@2819: // The iterator itself is not MT-aware, but ysr@2819: // MT-aware callers and closures can use this to ysr@2819: // accomplish dirty card iteration in parallel. The ysr@2819: // iterator itself does not clear the dirty cards, or ysr@2819: // change their values in any manner. ysr@2819: void CardTableModRefBS::non_clean_card_iterate_serial(MemRegion mr, ysr@2819: MemRegionClosure* cl) { jmasa@3294: bool is_par = (SharedHeap::heap()->n_par_threads() > 0); jmasa@3294: assert(!is_par || jmasa@3294: (SharedHeap::heap()->n_par_threads() == jmasa@3294: SharedHeap::heap()->workers()->active_workers()), "Mismatch"); duke@435: for (int i = 0; i < _cur_covered_regions; i++) { duke@435: MemRegion mri = mr.intersection(_covered[i]); duke@435: if (mri.word_size() > 0) { duke@435: jbyte* cur_entry = byte_for(mri.last()); duke@435: jbyte* limit = byte_for(mri.start()); duke@435: while (cur_entry >= limit) { duke@435: jbyte* next_entry = cur_entry - 1; duke@435: if (*cur_entry != clean_card) { duke@435: size_t non_clean_cards = 1; duke@435: // Should the next card be included in this range of dirty cards. duke@435: while (next_entry >= limit && *next_entry != clean_card) { duke@435: non_clean_cards++; duke@435: cur_entry = next_entry; duke@435: next_entry--; duke@435: } duke@435: // The memory region may not be on a card boundary. So that duke@435: // objects beyond the end of the region are not processed, make duke@435: // cur_cards precise with regard to the end of the memory region. duke@435: MemRegion cur_cards(addr_for(cur_entry), duke@435: non_clean_cards * card_size_in_words); duke@435: MemRegion dirty_region = cur_cards.intersection(mri); duke@435: cl->do_MemRegion(dirty_region); duke@435: } duke@435: cur_entry = next_entry; duke@435: } duke@435: } duke@435: } duke@435: } duke@435: duke@435: void CardTableModRefBS::dirty_MemRegion(MemRegion mr) { ysr@1526: assert((HeapWord*)align_size_down((uintptr_t)mr.start(), HeapWordSize) == mr.start(), "Unaligned start"); ysr@1526: assert((HeapWord*)align_size_up ((uintptr_t)mr.end(), HeapWordSize) == mr.end(), "Unaligned end" ); duke@435: jbyte* cur = byte_for(mr.start()); duke@435: jbyte* last = byte_after(mr.last()); duke@435: while (cur < last) { duke@435: *cur = dirty_card; duke@435: cur++; duke@435: } duke@435: } duke@435: ysr@777: void CardTableModRefBS::invalidate(MemRegion mr, bool whole_heap) { ysr@1526: assert((HeapWord*)align_size_down((uintptr_t)mr.start(), HeapWordSize) == mr.start(), "Unaligned start"); ysr@1526: assert((HeapWord*)align_size_up ((uintptr_t)mr.end(), HeapWordSize) == mr.end(), "Unaligned end" ); duke@435: for (int i = 0; i < _cur_covered_regions; i++) { duke@435: MemRegion mri = mr.intersection(_covered[i]); duke@435: if (!mri.is_empty()) dirty_MemRegion(mri); duke@435: } duke@435: } duke@435: duke@435: void CardTableModRefBS::clear_MemRegion(MemRegion mr) { duke@435: // Be conservative: only clean cards entirely contained within the duke@435: // region. duke@435: jbyte* cur; duke@435: if (mr.start() == _whole_heap.start()) { duke@435: cur = byte_for(mr.start()); duke@435: } else { duke@435: assert(mr.start() > _whole_heap.start(), "mr is not covered."); duke@435: cur = byte_after(mr.start() - 1); duke@435: } duke@435: jbyte* last = byte_after(mr.last()); duke@435: memset(cur, clean_card, pointer_delta(last, cur, sizeof(jbyte))); duke@435: } duke@435: duke@435: void CardTableModRefBS::clear(MemRegion mr) { duke@435: for (int i = 0; i < _cur_covered_regions; i++) { duke@435: MemRegion mri = mr.intersection(_covered[i]); duke@435: if (!mri.is_empty()) clear_MemRegion(mri); duke@435: } duke@435: } duke@435: ysr@777: void CardTableModRefBS::dirty(MemRegion mr) { ysr@777: jbyte* first = byte_for(mr.start()); ysr@777: jbyte* last = byte_after(mr.last()); ysr@777: memset(first, dirty_card, last-first); ysr@777: } ysr@777: ysr@2788: // Unlike several other card table methods, dirty_card_iterate() ysr@2788: // iterates over dirty cards ranges in increasing address order. duke@435: void CardTableModRefBS::dirty_card_iterate(MemRegion mr, duke@435: MemRegionClosure* cl) { duke@435: for (int i = 0; i < _cur_covered_regions; i++) { duke@435: MemRegion mri = mr.intersection(_covered[i]); duke@435: if (!mri.is_empty()) { duke@435: jbyte *cur_entry, *next_entry, *limit; duke@435: for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last()); duke@435: cur_entry <= limit; duke@435: cur_entry = next_entry) { duke@435: next_entry = cur_entry + 1; duke@435: if (*cur_entry == dirty_card) { duke@435: size_t dirty_cards; duke@435: // Accumulate maximal dirty card range, starting at cur_entry duke@435: for (dirty_cards = 1; duke@435: next_entry <= limit && *next_entry == dirty_card; duke@435: dirty_cards++, next_entry++); duke@435: MemRegion cur_cards(addr_for(cur_entry), duke@435: dirty_cards*card_size_in_words); duke@435: cl->do_MemRegion(cur_cards); duke@435: } duke@435: } duke@435: } duke@435: } duke@435: } duke@435: ysr@777: MemRegion CardTableModRefBS::dirty_card_range_after_reset(MemRegion mr, ysr@777: bool reset, ysr@777: int reset_val) { duke@435: for (int i = 0; i < _cur_covered_regions; i++) { duke@435: MemRegion mri = mr.intersection(_covered[i]); duke@435: if (!mri.is_empty()) { duke@435: jbyte* cur_entry, *next_entry, *limit; duke@435: for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last()); duke@435: cur_entry <= limit; duke@435: cur_entry = next_entry) { duke@435: next_entry = cur_entry + 1; duke@435: if (*cur_entry == dirty_card) { duke@435: size_t dirty_cards; duke@435: // Accumulate maximal dirty card range, starting at cur_entry duke@435: for (dirty_cards = 1; duke@435: next_entry <= limit && *next_entry == dirty_card; duke@435: dirty_cards++, next_entry++); duke@435: MemRegion cur_cards(addr_for(cur_entry), duke@435: dirty_cards*card_size_in_words); ysr@777: if (reset) { ysr@777: for (size_t i = 0; i < dirty_cards; i++) { ysr@777: cur_entry[i] = reset_val; ysr@777: } duke@435: } duke@435: return cur_cards; duke@435: } duke@435: } duke@435: } duke@435: } duke@435: return MemRegion(mr.end(), mr.end()); duke@435: } duke@435: duke@435: uintx CardTableModRefBS::ct_max_alignment_constraint() { duke@435: return card_size * os::vm_page_size(); duke@435: } duke@435: duke@435: void CardTableModRefBS::verify_guard() { duke@435: // For product build verification duke@435: guarantee(_byte_map[_guard_index] == last_card, duke@435: "card table guard has been modified"); duke@435: } duke@435: duke@435: void CardTableModRefBS::verify() { duke@435: verify_guard(); duke@435: } duke@435: duke@435: #ifndef PRODUCT tonyp@2849: void CardTableModRefBS::verify_region(MemRegion mr, tonyp@2849: jbyte val, bool val_equals) { tonyp@2849: jbyte* start = byte_for(mr.start()); tonyp@2849: jbyte* end = byte_for(mr.last()); tonyp@2849: bool failures = false; tonyp@2849: for (jbyte* curr = start; curr <= end; ++curr) { tonyp@2849: jbyte curr_val = *curr; tonyp@2849: bool failed = (val_equals) ? (curr_val != val) : (curr_val == val); tonyp@2849: if (failed) { tonyp@2849: if (!failures) { tonyp@2849: tty->cr(); mikael@4668: tty->print_cr("== CT verification failed: ["PTR_FORMAT","PTR_FORMAT"]", start, end); tonyp@2849: tty->print_cr("== %sexpecting value: %d", tonyp@2849: (val_equals) ? "" : "not ", val); tonyp@2849: failures = true; tonyp@2849: } tonyp@2849: tty->print_cr("== card "PTR_FORMAT" ["PTR_FORMAT","PTR_FORMAT"], " tonyp@2849: "val: %d", curr, addr_for(curr), tonyp@2849: (HeapWord*) (((size_t) addr_for(curr)) + card_size), tonyp@2849: (int) curr_val); tonyp@2849: } duke@435: } tonyp@2849: guarantee(!failures, "there should not have been any failures"); duke@435: } apetrusenko@1375: tonyp@2849: void CardTableModRefBS::verify_not_dirty_region(MemRegion mr) { tonyp@2849: verify_region(mr, dirty_card, false /* val_equals */); tonyp@2849: } apetrusenko@1375: apetrusenko@1375: void CardTableModRefBS::verify_dirty_region(MemRegion mr) { tonyp@2849: verify_region(mr, dirty_card, true /* val_equals */); apetrusenko@1375: } duke@435: #endif duke@435: never@3687: void CardTableModRefBS::print_on(outputStream* st) const { never@3687: st->print_cr("Card table byte_map: [" INTPTR_FORMAT "," INTPTR_FORMAT "] byte_map_base: " INTPTR_FORMAT, never@3687: _byte_map, _byte_map + _byte_map_size, byte_map_base); never@3687: } never@3687: duke@435: bool CardTableModRefBSForCTRS::card_will_be_scanned(jbyte cv) { duke@435: return duke@435: CardTableModRefBS::card_will_be_scanned(cv) || duke@435: _rs->is_prev_nonclean_card_val(cv); duke@435: }; duke@435: duke@435: bool CardTableModRefBSForCTRS::card_may_have_been_dirty(jbyte cv) { duke@435: return duke@435: cv != clean_card && duke@435: (CardTableModRefBS::card_may_have_been_dirty(cv) || duke@435: CardTableRS::youngergen_may_have_been_dirty(cv)); duke@435: };