duke@435: /* tamao@5161: * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved. duke@435: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. duke@435: * duke@435: * This code is free software; you can redistribute it and/or modify it duke@435: * under the terms of the GNU General Public License version 2 only, as duke@435: * published by the Free Software Foundation. duke@435: * duke@435: * This code is distributed in the hope that it will be useful, but WITHOUT duke@435: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or duke@435: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License duke@435: * version 2 for more details (a copy is included in the LICENSE file that duke@435: * accompanied this code). duke@435: * duke@435: * You should have received a copy of the GNU General Public License version duke@435: * 2 along with this work; if not, write to the Free Software Foundation, duke@435: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. duke@435: * trims@1907: * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA trims@1907: * or visit www.oracle.com if you need additional information or have any trims@1907: * questions. duke@435: * duke@435: */ duke@435: stefank@2314: #include "precompiled.hpp" stefank@2314: #include "gc_implementation/parallelScavenge/parMarkBitMap.hpp" stefank@2314: #include "gc_implementation/parallelScavenge/psParallelCompact.hpp" stefank@2314: #include "oops/oop.inline.hpp" stefank@2314: #include "runtime/os.hpp" stefank@2314: #include "utilities/bitMap.inline.hpp" zgu@3900: #include "services/memTracker.hpp" stefank@2314: #ifdef TARGET_OS_FAMILY_linux stefank@2314: # include "os_linux.inline.hpp" stefank@2314: #endif stefank@2314: #ifdef TARGET_OS_FAMILY_solaris stefank@2314: # include "os_solaris.inline.hpp" stefank@2314: #endif stefank@2314: #ifdef TARGET_OS_FAMILY_windows stefank@2314: # include "os_windows.inline.hpp" stefank@2314: #endif goetz@6461: #ifdef TARGET_OS_FAMILY_aix goetz@6461: # include "os_aix.inline.hpp" goetz@6461: #endif never@3156: #ifdef TARGET_OS_FAMILY_bsd never@3156: # include "os_bsd.inline.hpp" never@3156: #endif duke@435: duke@435: bool duke@435: ParMarkBitMap::initialize(MemRegion covered_region) duke@435: { duke@435: const idx_t bits = bits_required(covered_region); duke@435: // The bits will be divided evenly between two bitmaps; each of them should be duke@435: // an integral number of words. duke@435: assert(bits % (BitsPerWord * 2) == 0, "region size unaligned"); duke@435: duke@435: const size_t words = bits / BitsPerWord; duke@435: const size_t raw_bytes = words * sizeof(idx_t); ehelin@7778: const size_t page_sz = os::page_size_for_region(raw_bytes, 10); duke@435: const size_t granularity = os::vm_allocation_granularity(); tamao@5161: _reserved_byte_size = align_size_up(raw_bytes, MAX2(page_sz, granularity)); duke@435: duke@435: const size_t rs_align = page_sz == (size_t) os::vm_page_size() ? 0 : duke@435: MAX2(page_sz, granularity); tamao@5161: ReservedSpace rs(_reserved_byte_size, rs_align, rs_align > 0); duke@435: os::trace_page_sizes("par bitmap", raw_bytes, raw_bytes, page_sz, duke@435: rs.base(), rs.size()); zgu@3900: zgu@3900: MemTracker::record_virtual_memory_type((address)rs.base(), mtGC); zgu@3900: duke@435: _virtual_space = new PSVirtualSpace(rs, page_sz); tamao@5161: if (_virtual_space != NULL && _virtual_space->expand_by(_reserved_byte_size)) { duke@435: _region_start = covered_region.start(); duke@435: _region_size = covered_region.word_size(); tschatzl@6935: BitMap::bm_word_t* map = (BitMap::bm_word_t*)_virtual_space->reserved_low_addr(); duke@435: _beg_bits.set_map(map); duke@435: _beg_bits.set_size(bits / 2); duke@435: _end_bits.set_map(map + words / 2); duke@435: _end_bits.set_size(bits / 2); duke@435: return true; duke@435: } duke@435: duke@435: _region_start = 0; duke@435: _region_size = 0; duke@435: if (_virtual_space != NULL) { duke@435: delete _virtual_space; duke@435: _virtual_space = NULL; coleenp@672: // Release memory reserved in the space. coleenp@672: rs.release(); duke@435: } duke@435: return false; duke@435: } duke@435: duke@435: #ifdef ASSERT duke@435: extern size_t mark_bitmap_count; duke@435: extern size_t mark_bitmap_size; duke@435: #endif // #ifdef ASSERT duke@435: duke@435: bool duke@435: ParMarkBitMap::mark_obj(HeapWord* addr, size_t size) duke@435: { duke@435: const idx_t beg_bit = addr_to_bit(addr); duke@435: if (_beg_bits.par_set_bit(beg_bit)) { duke@435: const idx_t end_bit = addr_to_bit(addr + size - 1); duke@435: bool end_bit_ok = _end_bits.par_set_bit(end_bit); duke@435: assert(end_bit_ok, "concurrency problem"); duke@435: DEBUG_ONLY(Atomic::inc_ptr(&mark_bitmap_count)); duke@435: DEBUG_ONLY(Atomic::add_ptr(size, &mark_bitmap_size)); duke@435: return true; duke@435: } duke@435: return false; duke@435: } duke@435: duke@435: size_t ParMarkBitMap::live_words_in_range(HeapWord* beg_addr, oop end_obj) const duke@435: { duke@435: assert(beg_addr <= (HeapWord*)end_obj, "bad range"); duke@435: assert(is_marked(end_obj), "end_obj must be live"); duke@435: duke@435: idx_t live_bits = 0; duke@435: duke@435: // The bitmap routines require the right boundary to be word-aligned. duke@435: const idx_t end_bit = addr_to_bit((HeapWord*)end_obj); duke@435: const idx_t range_end = BitMap::word_align_up(end_bit); duke@435: duke@435: idx_t beg_bit = find_obj_beg(addr_to_bit(beg_addr), range_end); duke@435: while (beg_bit < end_bit) { duke@435: idx_t tmp_end = find_obj_end(beg_bit, range_end); duke@435: assert(tmp_end < end_bit, "missing end bit"); duke@435: live_bits += tmp_end - beg_bit + 1; duke@435: beg_bit = find_obj_beg(tmp_end + 1, range_end); duke@435: } duke@435: return bits_to_words(live_bits); duke@435: } duke@435: duke@435: ParMarkBitMap::IterationStatus duke@435: ParMarkBitMap::iterate(ParMarkBitMapClosure* live_closure, duke@435: idx_t range_beg, idx_t range_end) const duke@435: { duke@435: DEBUG_ONLY(verify_bit(range_beg);) duke@435: DEBUG_ONLY(verify_bit(range_end);) duke@435: assert(range_beg <= range_end, "live range invalid"); duke@435: duke@435: // The bitmap routines require the right boundary to be word-aligned. duke@435: const idx_t search_end = BitMap::word_align_up(range_end); duke@435: duke@435: idx_t cur_beg = find_obj_beg(range_beg, search_end); duke@435: while (cur_beg < range_end) { duke@435: const idx_t cur_end = find_obj_end(cur_beg, search_end); duke@435: if (cur_end >= range_end) { duke@435: // The obj ends outside the range. duke@435: live_closure->set_source(bit_to_addr(cur_beg)); duke@435: return incomplete; duke@435: } duke@435: duke@435: const size_t size = obj_size(cur_beg, cur_end); duke@435: IterationStatus status = live_closure->do_addr(bit_to_addr(cur_beg), size); duke@435: if (status != incomplete) { duke@435: assert(status == would_overflow || status == full, "sanity"); duke@435: return status; duke@435: } duke@435: duke@435: // Successfully processed the object; look for the next object. duke@435: cur_beg = find_obj_beg(cur_end + 1, search_end); duke@435: } duke@435: duke@435: live_closure->set_source(bit_to_addr(range_end)); duke@435: return complete; duke@435: } duke@435: duke@435: ParMarkBitMap::IterationStatus duke@435: ParMarkBitMap::iterate(ParMarkBitMapClosure* live_closure, duke@435: ParMarkBitMapClosure* dead_closure, duke@435: idx_t range_beg, idx_t range_end, duke@435: idx_t dead_range_end) const duke@435: { duke@435: DEBUG_ONLY(verify_bit(range_beg);) duke@435: DEBUG_ONLY(verify_bit(range_end);) duke@435: DEBUG_ONLY(verify_bit(dead_range_end);) duke@435: assert(range_beg <= range_end, "live range invalid"); duke@435: assert(range_end <= dead_range_end, "dead range invalid"); duke@435: duke@435: // The bitmap routines require the right boundary to be word-aligned. duke@435: const idx_t live_search_end = BitMap::word_align_up(range_end); duke@435: const idx_t dead_search_end = BitMap::word_align_up(dead_range_end); duke@435: duke@435: idx_t cur_beg = range_beg; duke@435: if (range_beg < range_end && is_unmarked(range_beg)) { duke@435: // The range starts with dead space. Look for the next object, then fill. duke@435: cur_beg = find_obj_beg(range_beg + 1, dead_search_end); duke@435: const idx_t dead_space_end = MIN2(cur_beg - 1, dead_range_end - 1); duke@435: const size_t size = obj_size(range_beg, dead_space_end); duke@435: dead_closure->do_addr(bit_to_addr(range_beg), size); duke@435: } duke@435: duke@435: while (cur_beg < range_end) { duke@435: const idx_t cur_end = find_obj_end(cur_beg, live_search_end); duke@435: if (cur_end >= range_end) { duke@435: // The obj ends outside the range. duke@435: live_closure->set_source(bit_to_addr(cur_beg)); duke@435: return incomplete; duke@435: } duke@435: duke@435: const size_t size = obj_size(cur_beg, cur_end); duke@435: IterationStatus status = live_closure->do_addr(bit_to_addr(cur_beg), size); duke@435: if (status != incomplete) { duke@435: assert(status == would_overflow || status == full, "sanity"); duke@435: return status; duke@435: } duke@435: duke@435: // Look for the start of the next object. duke@435: const idx_t dead_space_beg = cur_end + 1; duke@435: cur_beg = find_obj_beg(dead_space_beg, dead_search_end); duke@435: if (cur_beg > dead_space_beg) { duke@435: // Found dead space; compute the size and invoke the dead closure. duke@435: const idx_t dead_space_end = MIN2(cur_beg - 1, dead_range_end - 1); duke@435: const size_t size = obj_size(dead_space_beg, dead_space_end); duke@435: dead_closure->do_addr(bit_to_addr(dead_space_beg), size); duke@435: } duke@435: } duke@435: duke@435: live_closure->set_source(bit_to_addr(range_end)); duke@435: return complete; duke@435: } duke@435: duke@435: #ifdef ASSERT duke@435: void ParMarkBitMap::verify_clear() const duke@435: { duke@435: const idx_t* const beg = (const idx_t*)_virtual_space->committed_low_addr(); duke@435: const idx_t* const end = (const idx_t*)_virtual_space->committed_high_addr(); duke@435: for (const idx_t* p = beg; p < end; ++p) { duke@435: assert(*p == 0, "bitmap not clear"); duke@435: } duke@435: } duke@435: #endif // #ifdef ASSERT