aoqi@0: /* aoqi@0: * Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved. aoqi@0: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. aoqi@0: * aoqi@0: * This code is free software; you can redistribute it and/or modify it aoqi@0: * under the terms of the GNU General Public License version 2 only, as aoqi@0: * published by the Free Software Foundation. aoqi@0: * aoqi@0: * This code is distributed in the hope that it will be useful, but WITHOUT aoqi@0: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or aoqi@0: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License aoqi@0: * version 2 for more details (a copy is included in the LICENSE file that aoqi@0: * accompanied this code). aoqi@0: * aoqi@0: * You should have received a copy of the GNU General Public License version aoqi@0: * 2 along with this work; if not, write to the Free Software Foundation, aoqi@0: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. aoqi@0: * aoqi@0: * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA aoqi@0: * or visit www.oracle.com if you need additional information or have any aoqi@0: * questions. aoqi@0: * aoqi@0: */ aoqi@0: aoqi@1: /* aoqi@1: * This file has been modified by Loongson Technology in 2015. These aoqi@1: * modifications are Copyright (c) 2015 Loongson Technology, and are made aoqi@1: * available on the same license terms set forth above. aoqi@1: */ aoqi@1: aoqi@0: #include "precompiled.hpp" aoqi@0: #include "memory/allocation.inline.hpp" aoqi@0: #include "memory/cardTableModRefBS.hpp" aoqi@0: #include "memory/cardTableRS.hpp" aoqi@0: #include "memory/sharedHeap.hpp" aoqi@0: #include "memory/space.hpp" aoqi@0: #include "memory/space.inline.hpp" aoqi@0: #include "memory/universe.hpp" aoqi@0: #include "runtime/java.hpp" aoqi@0: #include "runtime/mutexLocker.hpp" aoqi@0: #include "runtime/virtualspace.hpp" aoqi@0: #include "services/memTracker.hpp" aoqi@0: #include "utilities/macros.hpp" aoqi@0: #ifdef COMPILER1 aoqi@0: #include "c1/c1_LIR.hpp" aoqi@0: #include "c1/c1_LIRGenerator.hpp" aoqi@0: #endif aoqi@0: aoqi@0: // This kind of "BarrierSet" allows a "CollectedHeap" to detect and aoqi@0: // enumerate ref fields that have been modified (since the last aoqi@0: // enumeration.) aoqi@0: aoqi@0: size_t CardTableModRefBS::cards_required(size_t covered_words) aoqi@0: { aoqi@0: // Add one for a guard card, used to detect errors. aoqi@0: const size_t words = align_size_up(covered_words, card_size_in_words); aoqi@0: return words / card_size_in_words + 1; aoqi@0: } aoqi@0: aoqi@0: size_t CardTableModRefBS::compute_byte_map_size() aoqi@0: { aoqi@0: assert(_guard_index == cards_required(_whole_heap.word_size()) - 1, aoqi@0: "unitialized, check declaration order"); aoqi@0: assert(_page_size != 0, "unitialized, check declaration order"); aoqi@0: const size_t granularity = os::vm_allocation_granularity(); aoqi@0: return align_size_up(_guard_index + 1, MAX2(_page_size, granularity)); aoqi@0: } aoqi@0: aoqi@0: CardTableModRefBS::CardTableModRefBS(MemRegion whole_heap, aoqi@0: int max_covered_regions): aoqi@0: ModRefBarrierSet(max_covered_regions), aoqi@0: _whole_heap(whole_heap), aoqi@0: _guard_index(cards_required(whole_heap.word_size()) - 1), aoqi@0: _last_valid_index(_guard_index - 1), aoqi@0: _page_size(os::vm_page_size()), aoqi@0: _byte_map_size(compute_byte_map_size()) aoqi@0: { aoqi@0: _kind = BarrierSet::CardTableModRef; aoqi@0: aoqi@0: HeapWord* low_bound = _whole_heap.start(); aoqi@0: HeapWord* high_bound = _whole_heap.end(); aoqi@0: assert((uintptr_t(low_bound) & (card_size - 1)) == 0, "heap must start at card boundary"); aoqi@0: assert((uintptr_t(high_bound) & (card_size - 1)) == 0, "heap must end at card boundary"); aoqi@0: aoqi@0: assert(card_size <= 512, "card_size must be less than 512"); // why? aoqi@0: aoqi@0: _covered = new MemRegion[max_covered_regions]; aoqi@0: _committed = new MemRegion[max_covered_regions]; aoqi@0: if (_covered == NULL || _committed == NULL) { aoqi@0: vm_exit_during_initialization("couldn't alloc card table covered region set."); aoqi@0: } aoqi@0: aoqi@0: _cur_covered_regions = 0; aoqi@0: const size_t rs_align = _page_size == (size_t) os::vm_page_size() ? 0 : aoqi@0: MAX2(_page_size, (size_t) os::vm_allocation_granularity()); aoqi@1: #ifdef MIPS64 aoqi@1: /* 2013/10.25 Jin: try to allocate byte_map_base within 32-bit region. aoqi@1: FIXME: should automatically search a spare space. */ aoqi@1: ReservedSpace heap_rs(_byte_map_size, rs_align, false, (char *)0x20000000); aoqi@1: #else aoqi@0: ReservedSpace heap_rs(_byte_map_size, rs_align, false); aoqi@1: #endif aoqi@0: MemTracker::record_virtual_memory_type((address)heap_rs.base(), mtGC); aoqi@0: aoqi@0: os::trace_page_sizes("card table", _guard_index + 1, _guard_index + 1, aoqi@0: _page_size, heap_rs.base(), heap_rs.size()); aoqi@0: if (!heap_rs.is_reserved()) { aoqi@0: vm_exit_during_initialization("Could not reserve enough space for the " aoqi@0: "card marking array"); aoqi@0: } aoqi@0: aoqi@0: // The assember store_check code will do an unsigned shift of the oop, aoqi@0: // then add it to byte_map_base, i.e. aoqi@0: // aoqi@0: // _byte_map = byte_map_base + (uintptr_t(low_bound) >> card_shift) aoqi@0: _byte_map = (jbyte*) heap_rs.base(); aoqi@0: byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift); aoqi@0: assert(byte_for(low_bound) == &_byte_map[0], "Checking start of map"); aoqi@0: assert(byte_for(high_bound-1) <= &_byte_map[_last_valid_index], "Checking end of map"); aoqi@0: aoqi@0: jbyte* guard_card = &_byte_map[_guard_index]; aoqi@0: uintptr_t guard_page = align_size_down((uintptr_t)guard_card, _page_size); aoqi@0: _guard_region = MemRegion((HeapWord*)guard_page, _page_size); aoqi@0: os::commit_memory_or_exit((char*)guard_page, _page_size, _page_size, aoqi@0: !ExecMem, "card table last card"); aoqi@0: *guard_card = last_card; aoqi@0: aoqi@0: _lowest_non_clean = aoqi@0: NEW_C_HEAP_ARRAY(CardArr, max_covered_regions, mtGC); aoqi@0: _lowest_non_clean_chunk_size = aoqi@0: NEW_C_HEAP_ARRAY(size_t, max_covered_regions, mtGC); aoqi@0: _lowest_non_clean_base_chunk_index = aoqi@0: NEW_C_HEAP_ARRAY(uintptr_t, max_covered_regions, mtGC); aoqi@0: _last_LNC_resizing_collection = aoqi@0: NEW_C_HEAP_ARRAY(int, max_covered_regions, mtGC); aoqi@0: if (_lowest_non_clean == NULL aoqi@0: || _lowest_non_clean_chunk_size == NULL aoqi@0: || _lowest_non_clean_base_chunk_index == NULL aoqi@0: || _last_LNC_resizing_collection == NULL) aoqi@0: vm_exit_during_initialization("couldn't allocate an LNC array."); aoqi@0: for (int i = 0; i < max_covered_regions; i++) { aoqi@0: _lowest_non_clean[i] = NULL; aoqi@0: _lowest_non_clean_chunk_size[i] = 0; aoqi@0: _last_LNC_resizing_collection[i] = -1; aoqi@0: } aoqi@0: aoqi@0: if (TraceCardTableModRefBS) { aoqi@0: gclog_or_tty->print_cr("CardTableModRefBS::CardTableModRefBS: "); aoqi@0: gclog_or_tty->print_cr(" " aoqi@0: " &_byte_map[0]: " INTPTR_FORMAT aoqi@0: " &_byte_map[_last_valid_index]: " INTPTR_FORMAT, aoqi@0: p2i(&_byte_map[0]), aoqi@0: p2i(&_byte_map[_last_valid_index])); aoqi@0: gclog_or_tty->print_cr(" " aoqi@0: " byte_map_base: " INTPTR_FORMAT, aoqi@0: p2i(byte_map_base)); aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: CardTableModRefBS::~CardTableModRefBS() { aoqi@0: if (_covered) { aoqi@0: delete[] _covered; aoqi@0: _covered = NULL; aoqi@0: } aoqi@0: if (_committed) { aoqi@0: delete[] _committed; aoqi@0: _committed = NULL; aoqi@0: } aoqi@0: if (_lowest_non_clean) { aoqi@0: FREE_C_HEAP_ARRAY(CardArr, _lowest_non_clean, mtGC); aoqi@0: _lowest_non_clean = NULL; aoqi@0: } aoqi@0: if (_lowest_non_clean_chunk_size) { aoqi@0: FREE_C_HEAP_ARRAY(size_t, _lowest_non_clean_chunk_size, mtGC); aoqi@0: _lowest_non_clean_chunk_size = NULL; aoqi@0: } aoqi@0: if (_lowest_non_clean_base_chunk_index) { aoqi@0: FREE_C_HEAP_ARRAY(uintptr_t, _lowest_non_clean_base_chunk_index, mtGC); aoqi@0: _lowest_non_clean_base_chunk_index = NULL; aoqi@0: } aoqi@0: if (_last_LNC_resizing_collection) { aoqi@0: FREE_C_HEAP_ARRAY(int, _last_LNC_resizing_collection, mtGC); aoqi@0: _last_LNC_resizing_collection = NULL; aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: int CardTableModRefBS::find_covering_region_by_base(HeapWord* base) { aoqi@0: int i; aoqi@0: for (i = 0; i < _cur_covered_regions; i++) { aoqi@0: if (_covered[i].start() == base) return i; aoqi@0: if (_covered[i].start() > base) break; aoqi@0: } aoqi@0: // If we didn't find it, create a new one. aoqi@0: assert(_cur_covered_regions < _max_covered_regions, aoqi@0: "too many covered regions"); aoqi@0: // Move the ones above up, to maintain sorted order. aoqi@0: for (int j = _cur_covered_regions; j > i; j--) { aoqi@0: _covered[j] = _covered[j-1]; aoqi@0: _committed[j] = _committed[j-1]; aoqi@0: } aoqi@0: int res = i; aoqi@0: _cur_covered_regions++; aoqi@0: _covered[res].set_start(base); aoqi@0: _covered[res].set_word_size(0); aoqi@0: jbyte* ct_start = byte_for(base); aoqi@0: uintptr_t ct_start_aligned = align_size_down((uintptr_t)ct_start, _page_size); aoqi@0: _committed[res].set_start((HeapWord*)ct_start_aligned); aoqi@0: _committed[res].set_word_size(0); aoqi@0: return res; aoqi@0: } aoqi@0: aoqi@0: int CardTableModRefBS::find_covering_region_containing(HeapWord* addr) { aoqi@0: for (int i = 0; i < _cur_covered_regions; i++) { aoqi@0: if (_covered[i].contains(addr)) { aoqi@0: return i; aoqi@0: } aoqi@0: } aoqi@0: assert(0, "address outside of heap?"); aoqi@0: return -1; aoqi@0: } aoqi@0: aoqi@0: HeapWord* CardTableModRefBS::largest_prev_committed_end(int ind) const { aoqi@0: HeapWord* max_end = NULL; aoqi@0: for (int j = 0; j < ind; j++) { aoqi@0: HeapWord* this_end = _committed[j].end(); aoqi@0: if (this_end > max_end) max_end = this_end; aoqi@0: } aoqi@0: return max_end; aoqi@0: } aoqi@0: aoqi@0: MemRegion CardTableModRefBS::committed_unique_to_self(int self, aoqi@0: MemRegion mr) const { aoqi@0: MemRegion result = mr; aoqi@0: for (int r = 0; r < _cur_covered_regions; r += 1) { aoqi@0: if (r != self) { aoqi@0: result = result.minus(_committed[r]); aoqi@0: } aoqi@0: } aoqi@0: // Never include the guard page. aoqi@0: result = result.minus(_guard_region); aoqi@0: return result; aoqi@0: } aoqi@0: aoqi@0: void CardTableModRefBS::resize_covered_region(MemRegion new_region) { aoqi@0: // We don't change the start of a region, only the end. aoqi@0: assert(_whole_heap.contains(new_region), aoqi@0: "attempt to cover area not in reserved area"); aoqi@0: debug_only(verify_guard();) aoqi@0: // collided is true if the expansion would push into another committed region aoqi@0: debug_only(bool collided = false;) aoqi@0: int const ind = find_covering_region_by_base(new_region.start()); aoqi@0: MemRegion const old_region = _covered[ind]; aoqi@0: assert(old_region.start() == new_region.start(), "just checking"); aoqi@0: if (new_region.word_size() != old_region.word_size()) { aoqi@0: // Commit new or uncommit old pages, if necessary. aoqi@0: MemRegion cur_committed = _committed[ind]; aoqi@0: // Extend the end of this _commited region aoqi@0: // to cover the end of any lower _committed regions. aoqi@0: // This forms overlapping regions, but never interior regions. aoqi@0: HeapWord* const max_prev_end = largest_prev_committed_end(ind); aoqi@0: if (max_prev_end > cur_committed.end()) { aoqi@0: cur_committed.set_end(max_prev_end); aoqi@0: } aoqi@0: // Align the end up to a page size (starts are already aligned). aoqi@0: jbyte* const new_end = byte_after(new_region.last()); aoqi@0: HeapWord* new_end_aligned = aoqi@0: (HeapWord*) align_size_up((uintptr_t)new_end, _page_size); aoqi@0: assert(new_end_aligned >= (HeapWord*) new_end, aoqi@0: "align up, but less"); aoqi@0: // Check the other regions (excludes "ind") to ensure that aoqi@0: // the new_end_aligned does not intrude onto the committed aoqi@0: // space of another region. aoqi@0: int ri = 0; aoqi@0: for (ri = 0; ri < _cur_covered_regions; ri++) { aoqi@0: if (ri != ind) { aoqi@0: if (_committed[ri].contains(new_end_aligned)) { aoqi@0: // The prior check included in the assert aoqi@0: // (new_end_aligned >= _committed[ri].start()) aoqi@0: // is redundant with the "contains" test. aoqi@0: // Any region containing the new end aoqi@0: // should start at or beyond the region found (ind) aoqi@0: // for the new end (committed regions are not expected to aoqi@0: // be proper subsets of other committed regions). aoqi@0: assert(_committed[ri].start() >= _committed[ind].start(), aoqi@0: "New end of committed region is inconsistent"); aoqi@0: new_end_aligned = _committed[ri].start(); aoqi@0: // new_end_aligned can be equal to the start of its aoqi@0: // committed region (i.e., of "ind") if a second aoqi@0: // region following "ind" also start at the same location aoqi@0: // as "ind". aoqi@0: assert(new_end_aligned >= _committed[ind].start(), aoqi@0: "New end of committed region is before start"); aoqi@0: debug_only(collided = true;) aoqi@0: // Should only collide with 1 region aoqi@0: break; aoqi@0: } aoqi@0: } aoqi@0: } aoqi@0: #ifdef ASSERT aoqi@0: for (++ri; ri < _cur_covered_regions; ri++) { aoqi@0: assert(!_committed[ri].contains(new_end_aligned), aoqi@0: "New end of committed region is in a second committed region"); aoqi@0: } aoqi@0: #endif aoqi@0: // The guard page is always committed and should not be committed over. aoqi@0: // "guarded" is used for assertion checking below and recalls the fact aoqi@0: // that the would-be end of the new committed region would have aoqi@0: // penetrated the guard page. aoqi@0: HeapWord* new_end_for_commit = new_end_aligned; aoqi@0: aoqi@0: DEBUG_ONLY(bool guarded = false;) aoqi@0: if (new_end_for_commit > _guard_region.start()) { aoqi@0: new_end_for_commit = _guard_region.start(); aoqi@0: DEBUG_ONLY(guarded = true;) aoqi@0: } aoqi@0: aoqi@0: if (new_end_for_commit > cur_committed.end()) { aoqi@0: // Must commit new pages. aoqi@0: MemRegion const new_committed = aoqi@0: MemRegion(cur_committed.end(), new_end_for_commit); aoqi@0: aoqi@0: assert(!new_committed.is_empty(), "Region should not be empty here"); aoqi@0: os::commit_memory_or_exit((char*)new_committed.start(), aoqi@0: new_committed.byte_size(), _page_size, aoqi@0: !ExecMem, "card table expansion"); aoqi@0: // Use new_end_aligned (as opposed to new_end_for_commit) because aoqi@0: // the cur_committed region may include the guard region. aoqi@0: } else if (new_end_aligned < cur_committed.end()) { aoqi@0: // Must uncommit pages. aoqi@0: MemRegion const uncommit_region = aoqi@0: committed_unique_to_self(ind, MemRegion(new_end_aligned, aoqi@0: cur_committed.end())); aoqi@0: if (!uncommit_region.is_empty()) { aoqi@0: // It is not safe to uncommit cards if the boundary between aoqi@0: // the generations is moving. A shrink can uncommit cards aoqi@0: // owned by generation A but being used by generation B. aoqi@0: if (!UseAdaptiveGCBoundary) { aoqi@0: if (!os::uncommit_memory((char*)uncommit_region.start(), aoqi@0: uncommit_region.byte_size())) { aoqi@0: assert(false, "Card table contraction failed"); aoqi@0: // The call failed so don't change the end of the aoqi@0: // committed region. This is better than taking the aoqi@0: // VM down. aoqi@0: new_end_aligned = _committed[ind].end(); aoqi@0: } aoqi@0: } else { aoqi@0: new_end_aligned = _committed[ind].end(); aoqi@0: } aoqi@0: } aoqi@0: } aoqi@0: // In any case, we can reset the end of the current committed entry. aoqi@0: _committed[ind].set_end(new_end_aligned); aoqi@0: aoqi@0: #ifdef ASSERT aoqi@0: // Check that the last card in the new region is committed according aoqi@0: // to the tables. aoqi@0: bool covered = false; aoqi@0: for (int cr = 0; cr < _cur_covered_regions; cr++) { aoqi@0: if (_committed[cr].contains(new_end - 1)) { aoqi@0: covered = true; aoqi@0: break; aoqi@0: } aoqi@0: } aoqi@0: assert(covered, "Card for end of new region not committed"); aoqi@0: #endif aoqi@0: aoqi@0: // The default of 0 is not necessarily clean cards. aoqi@0: jbyte* entry; aoqi@0: if (old_region.last() < _whole_heap.start()) { aoqi@0: entry = byte_for(_whole_heap.start()); aoqi@0: } else { aoqi@0: entry = byte_after(old_region.last()); aoqi@0: } aoqi@0: assert(index_for(new_region.last()) < _guard_index, aoqi@0: "The guard card will be overwritten"); aoqi@0: // This line commented out cleans the newly expanded region and aoqi@0: // not the aligned up expanded region. aoqi@0: // jbyte* const end = byte_after(new_region.last()); aoqi@0: jbyte* const end = (jbyte*) new_end_for_commit; aoqi@0: assert((end >= byte_after(new_region.last())) || collided || guarded, aoqi@0: "Expect to be beyond new region unless impacting another region"); aoqi@0: // do nothing if we resized downward. aoqi@0: #ifdef ASSERT aoqi@0: for (int ri = 0; ri < _cur_covered_regions; ri++) { aoqi@0: if (ri != ind) { aoqi@0: // The end of the new committed region should not aoqi@0: // be in any existing region unless it matches aoqi@0: // the start of the next region. aoqi@0: assert(!_committed[ri].contains(end) || aoqi@0: (_committed[ri].start() == (HeapWord*) end), aoqi@0: "Overlapping committed regions"); aoqi@0: } aoqi@0: } aoqi@0: #endif aoqi@0: if (entry < end) { aoqi@0: memset(entry, clean_card, pointer_delta(end, entry, sizeof(jbyte))); aoqi@0: } aoqi@0: } aoqi@0: // In any case, the covered size changes. aoqi@0: _covered[ind].set_word_size(new_region.word_size()); aoqi@0: if (TraceCardTableModRefBS) { aoqi@0: gclog_or_tty->print_cr("CardTableModRefBS::resize_covered_region: "); aoqi@0: gclog_or_tty->print_cr(" " aoqi@0: " _covered[%d].start(): " INTPTR_FORMAT aoqi@0: " _covered[%d].last(): " INTPTR_FORMAT, aoqi@0: ind, p2i(_covered[ind].start()), aoqi@0: ind, p2i(_covered[ind].last())); aoqi@0: gclog_or_tty->print_cr(" " aoqi@0: " _committed[%d].start(): " INTPTR_FORMAT aoqi@0: " _committed[%d].last(): " INTPTR_FORMAT, aoqi@0: ind, p2i(_committed[ind].start()), aoqi@0: ind, p2i(_committed[ind].last())); aoqi@0: gclog_or_tty->print_cr(" " aoqi@0: " byte_for(start): " INTPTR_FORMAT aoqi@0: " byte_for(last): " INTPTR_FORMAT, aoqi@0: p2i(byte_for(_covered[ind].start())), aoqi@0: p2i(byte_for(_covered[ind].last()))); aoqi@0: gclog_or_tty->print_cr(" " aoqi@0: " addr_for(start): " INTPTR_FORMAT aoqi@0: " addr_for(last): " INTPTR_FORMAT, aoqi@0: p2i(addr_for((jbyte*) _committed[ind].start())), aoqi@0: p2i(addr_for((jbyte*) _committed[ind].last()))); aoqi@0: } aoqi@0: // Touch the last card of the covered region to show that it aoqi@0: // is committed (or SEGV). aoqi@0: debug_only((void) (*byte_for(_covered[ind].last()));) aoqi@0: debug_only(verify_guard();) aoqi@0: } aoqi@0: aoqi@0: // Note that these versions are precise! The scanning code has to handle the aoqi@0: // fact that the write barrier may be either precise or imprecise. aoqi@0: aoqi@0: void CardTableModRefBS::write_ref_field_work(void* field, oop newVal, bool release) { aoqi@0: inline_write_ref_field(field, newVal, release); aoqi@0: } aoqi@0: aoqi@0: aoqi@0: void CardTableModRefBS::non_clean_card_iterate_possibly_parallel(Space* sp, aoqi@0: MemRegion mr, aoqi@0: OopsInGenClosure* cl, aoqi@0: CardTableRS* ct) { aoqi@0: if (!mr.is_empty()) { aoqi@0: // Caller (process_strong_roots()) claims that all GC threads aoqi@0: // execute this call. With UseDynamicNumberOfGCThreads now all aoqi@0: // active GC threads execute this call. The number of active GC aoqi@0: // threads needs to be passed to par_non_clean_card_iterate_work() aoqi@0: // to get proper partitioning and termination. aoqi@0: // aoqi@0: // This is an example of where n_par_threads() is used instead aoqi@0: // of workers()->active_workers(). n_par_threads can be set to 0 to aoqi@0: // turn off parallelism. For example when this code is called as aoqi@0: // part of verification and SharedHeap::process_strong_roots() is being aoqi@0: // used, then n_par_threads() may have been set to 0. active_workers aoqi@0: // is not overloaded with the meaning that it is a switch to disable aoqi@0: // parallelism and so keeps the meaning of the number of aoqi@0: // active gc workers. If parallelism has not been shut off by aoqi@0: // setting n_par_threads to 0, then n_par_threads should be aoqi@0: // equal to active_workers. When a different mechanism for shutting aoqi@0: // off parallelism is used, then active_workers can be used in aoqi@0: // place of n_par_threads. aoqi@0: // This is an example of a path where n_par_threads is aoqi@0: // set to 0 to turn off parallism. aoqi@0: // [7] CardTableModRefBS::non_clean_card_iterate() aoqi@0: // [8] CardTableRS::younger_refs_in_space_iterate() aoqi@0: // [9] Generation::younger_refs_in_space_iterate() aoqi@0: // [10] OneContigSpaceCardGeneration::younger_refs_iterate() aoqi@0: // [11] CompactingPermGenGen::younger_refs_iterate() aoqi@0: // [12] CardTableRS::younger_refs_iterate() aoqi@0: // [13] SharedHeap::process_strong_roots() aoqi@0: // [14] G1CollectedHeap::verify() aoqi@0: // [15] Universe::verify() aoqi@0: // [16] G1CollectedHeap::do_collection_pause_at_safepoint() aoqi@0: // aoqi@0: int n_threads = SharedHeap::heap()->n_par_threads(); aoqi@0: bool is_par = n_threads > 0; aoqi@0: if (is_par) { aoqi@0: #if INCLUDE_ALL_GCS aoqi@0: assert(SharedHeap::heap()->n_par_threads() == aoqi@0: SharedHeap::heap()->workers()->active_workers(), "Mismatch"); aoqi@0: non_clean_card_iterate_parallel_work(sp, mr, cl, ct, n_threads); aoqi@0: #else // INCLUDE_ALL_GCS aoqi@0: fatal("Parallel gc not supported here."); aoqi@0: #endif // INCLUDE_ALL_GCS aoqi@0: } else { aoqi@0: // We do not call the non_clean_card_iterate_serial() version below because aoqi@0: // we want to clear the cards (which non_clean_card_iterate_serial() does not aoqi@0: // do for us): clear_cl here does the work of finding contiguous dirty ranges aoqi@0: // of cards to process and clear. aoqi@0: aoqi@0: DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, precision(), aoqi@0: cl->gen_boundary()); aoqi@0: ClearNoncleanCardWrapper clear_cl(dcto_cl, ct); aoqi@0: aoqi@0: clear_cl.do_MemRegion(mr); aoqi@0: } aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: // The iterator itself is not MT-aware, but aoqi@0: // MT-aware callers and closures can use this to aoqi@0: // accomplish dirty card iteration in parallel. The aoqi@0: // iterator itself does not clear the dirty cards, or aoqi@0: // change their values in any manner. aoqi@0: void CardTableModRefBS::non_clean_card_iterate_serial(MemRegion mr, aoqi@0: MemRegionClosure* cl) { aoqi@0: bool is_par = (SharedHeap::heap()->n_par_threads() > 0); aoqi@0: assert(!is_par || aoqi@0: (SharedHeap::heap()->n_par_threads() == aoqi@0: SharedHeap::heap()->workers()->active_workers()), "Mismatch"); aoqi@0: for (int i = 0; i < _cur_covered_regions; i++) { aoqi@0: MemRegion mri = mr.intersection(_covered[i]); aoqi@0: if (mri.word_size() > 0) { aoqi@0: jbyte* cur_entry = byte_for(mri.last()); aoqi@0: jbyte* limit = byte_for(mri.start()); aoqi@0: while (cur_entry >= limit) { aoqi@0: jbyte* next_entry = cur_entry - 1; aoqi@0: if (*cur_entry != clean_card) { aoqi@0: size_t non_clean_cards = 1; aoqi@0: // Should the next card be included in this range of dirty cards. aoqi@0: while (next_entry >= limit && *next_entry != clean_card) { aoqi@0: non_clean_cards++; aoqi@0: cur_entry = next_entry; aoqi@0: next_entry--; aoqi@0: } aoqi@0: // The memory region may not be on a card boundary. So that aoqi@0: // objects beyond the end of the region are not processed, make aoqi@0: // cur_cards precise with regard to the end of the memory region. aoqi@0: MemRegion cur_cards(addr_for(cur_entry), aoqi@0: non_clean_cards * card_size_in_words); aoqi@0: MemRegion dirty_region = cur_cards.intersection(mri); aoqi@0: cl->do_MemRegion(dirty_region); aoqi@0: } aoqi@0: cur_entry = next_entry; aoqi@0: } aoqi@0: } aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: void CardTableModRefBS::dirty_MemRegion(MemRegion mr) { aoqi@0: assert((HeapWord*)align_size_down((uintptr_t)mr.start(), HeapWordSize) == mr.start(), "Unaligned start"); aoqi@0: assert((HeapWord*)align_size_up ((uintptr_t)mr.end(), HeapWordSize) == mr.end(), "Unaligned end" ); aoqi@0: jbyte* cur = byte_for(mr.start()); aoqi@0: jbyte* last = byte_after(mr.last()); aoqi@0: while (cur < last) { aoqi@0: *cur = dirty_card; aoqi@0: cur++; aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: void CardTableModRefBS::invalidate(MemRegion mr, bool whole_heap) { aoqi@0: assert((HeapWord*)align_size_down((uintptr_t)mr.start(), HeapWordSize) == mr.start(), "Unaligned start"); aoqi@0: assert((HeapWord*)align_size_up ((uintptr_t)mr.end(), HeapWordSize) == mr.end(), "Unaligned end" ); aoqi@0: for (int i = 0; i < _cur_covered_regions; i++) { aoqi@0: MemRegion mri = mr.intersection(_covered[i]); aoqi@0: if (!mri.is_empty()) dirty_MemRegion(mri); aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: void CardTableModRefBS::clear_MemRegion(MemRegion mr) { aoqi@0: // Be conservative: only clean cards entirely contained within the aoqi@0: // region. aoqi@0: jbyte* cur; aoqi@0: if (mr.start() == _whole_heap.start()) { aoqi@0: cur = byte_for(mr.start()); aoqi@0: } else { aoqi@0: assert(mr.start() > _whole_heap.start(), "mr is not covered."); aoqi@0: cur = byte_after(mr.start() - 1); aoqi@0: } aoqi@0: jbyte* last = byte_after(mr.last()); aoqi@0: memset(cur, clean_card, pointer_delta(last, cur, sizeof(jbyte))); aoqi@0: } aoqi@0: aoqi@0: void CardTableModRefBS::clear(MemRegion mr) { aoqi@0: for (int i = 0; i < _cur_covered_regions; i++) { aoqi@0: MemRegion mri = mr.intersection(_covered[i]); aoqi@0: if (!mri.is_empty()) clear_MemRegion(mri); aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: void CardTableModRefBS::dirty(MemRegion mr) { aoqi@0: jbyte* first = byte_for(mr.start()); aoqi@0: jbyte* last = byte_after(mr.last()); aoqi@0: memset(first, dirty_card, last-first); aoqi@0: } aoqi@0: aoqi@0: // Unlike several other card table methods, dirty_card_iterate() aoqi@0: // iterates over dirty cards ranges in increasing address order. aoqi@0: void CardTableModRefBS::dirty_card_iterate(MemRegion mr, aoqi@0: MemRegionClosure* cl) { aoqi@0: for (int i = 0; i < _cur_covered_regions; i++) { aoqi@0: MemRegion mri = mr.intersection(_covered[i]); aoqi@0: if (!mri.is_empty()) { aoqi@0: jbyte *cur_entry, *next_entry, *limit; aoqi@0: for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last()); aoqi@0: cur_entry <= limit; aoqi@0: cur_entry = next_entry) { aoqi@0: next_entry = cur_entry + 1; aoqi@0: if (*cur_entry == dirty_card) { aoqi@0: size_t dirty_cards; aoqi@0: // Accumulate maximal dirty card range, starting at cur_entry aoqi@0: for (dirty_cards = 1; aoqi@0: next_entry <= limit && *next_entry == dirty_card; aoqi@0: dirty_cards++, next_entry++); aoqi@0: MemRegion cur_cards(addr_for(cur_entry), aoqi@0: dirty_cards*card_size_in_words); aoqi@0: cl->do_MemRegion(cur_cards); aoqi@0: } aoqi@0: } aoqi@0: } aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: MemRegion CardTableModRefBS::dirty_card_range_after_reset(MemRegion mr, aoqi@0: bool reset, aoqi@0: int reset_val) { aoqi@0: for (int i = 0; i < _cur_covered_regions; i++) { aoqi@0: MemRegion mri = mr.intersection(_covered[i]); aoqi@0: if (!mri.is_empty()) { aoqi@0: jbyte* cur_entry, *next_entry, *limit; aoqi@0: for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last()); aoqi@0: cur_entry <= limit; aoqi@0: cur_entry = next_entry) { aoqi@0: next_entry = cur_entry + 1; aoqi@0: if (*cur_entry == dirty_card) { aoqi@0: size_t dirty_cards; aoqi@0: // Accumulate maximal dirty card range, starting at cur_entry aoqi@0: for (dirty_cards = 1; aoqi@0: next_entry <= limit && *next_entry == dirty_card; aoqi@0: dirty_cards++, next_entry++); aoqi@0: MemRegion cur_cards(addr_for(cur_entry), aoqi@0: dirty_cards*card_size_in_words); aoqi@0: if (reset) { aoqi@0: for (size_t i = 0; i < dirty_cards; i++) { aoqi@0: cur_entry[i] = reset_val; aoqi@0: } aoqi@0: } aoqi@0: return cur_cards; aoqi@0: } aoqi@0: } aoqi@0: } aoqi@0: } aoqi@0: return MemRegion(mr.end(), mr.end()); aoqi@0: } aoqi@0: aoqi@0: uintx CardTableModRefBS::ct_max_alignment_constraint() { aoqi@0: return card_size * os::vm_page_size(); aoqi@0: } aoqi@0: aoqi@0: void CardTableModRefBS::verify_guard() { aoqi@0: // For product build verification aoqi@0: guarantee(_byte_map[_guard_index] == last_card, aoqi@0: "card table guard has been modified"); aoqi@0: } aoqi@0: aoqi@0: void CardTableModRefBS::verify() { aoqi@0: verify_guard(); aoqi@0: } aoqi@0: aoqi@0: #ifndef PRODUCT aoqi@0: void CardTableModRefBS::verify_region(MemRegion mr, aoqi@0: jbyte val, bool val_equals) { aoqi@0: jbyte* start = byte_for(mr.start()); aoqi@0: jbyte* end = byte_for(mr.last()); aoqi@0: bool failures = false; aoqi@0: for (jbyte* curr = start; curr <= end; ++curr) { aoqi@0: jbyte curr_val = *curr; aoqi@0: bool failed = (val_equals) ? (curr_val != val) : (curr_val == val); aoqi@0: if (failed) { aoqi@0: if (!failures) { aoqi@0: tty->cr(); aoqi@0: tty->print_cr("== CT verification failed: [" INTPTR_FORMAT "," INTPTR_FORMAT "]", p2i(start), p2i(end)); aoqi@0: tty->print_cr("== %sexpecting value: %d", aoqi@0: (val_equals) ? "" : "not ", val); aoqi@0: failures = true; aoqi@0: } aoqi@0: tty->print_cr("== card "PTR_FORMAT" ["PTR_FORMAT","PTR_FORMAT"], " aoqi@0: "val: %d", p2i(curr), p2i(addr_for(curr)), aoqi@0: p2i((HeapWord*) (((size_t) addr_for(curr)) + card_size)), aoqi@0: (int) curr_val); aoqi@0: } aoqi@0: } aoqi@0: guarantee(!failures, "there should not have been any failures"); aoqi@0: } aoqi@0: aoqi@0: void CardTableModRefBS::verify_not_dirty_region(MemRegion mr) { aoqi@0: verify_region(mr, dirty_card, false /* val_equals */); aoqi@0: } aoqi@0: aoqi@0: void CardTableModRefBS::verify_dirty_region(MemRegion mr) { aoqi@0: verify_region(mr, dirty_card, true /* val_equals */); aoqi@0: } aoqi@0: #endif aoqi@0: aoqi@0: void CardTableModRefBS::print_on(outputStream* st) const { aoqi@0: st->print_cr("Card table byte_map: [" INTPTR_FORMAT "," INTPTR_FORMAT "] byte_map_base: " INTPTR_FORMAT, aoqi@0: p2i(_byte_map), p2i(_byte_map + _byte_map_size), p2i(byte_map_base)); aoqi@0: } aoqi@0: aoqi@0: bool CardTableModRefBSForCTRS::card_will_be_scanned(jbyte cv) { aoqi@0: return aoqi@0: CardTableModRefBS::card_will_be_scanned(cv) || aoqi@0: _rs->is_prev_nonclean_card_val(cv); aoqi@0: }; aoqi@0: aoqi@0: bool CardTableModRefBSForCTRS::card_may_have_been_dirty(jbyte cv) { aoqi@0: return aoqi@0: cv != clean_card && aoqi@0: (CardTableModRefBS::card_may_have_been_dirty(cv) || aoqi@0: CardTableRS::youngergen_may_have_been_dirty(cv)); aoqi@0: };