duke@435: /* mikael@4153: * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved. duke@435: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. duke@435: * duke@435: * This code is free software; you can redistribute it and/or modify it duke@435: * under the terms of the GNU General Public License version 2 only, as duke@435: * published by the Free Software Foundation. duke@435: * duke@435: * This code is distributed in the hope that it will be useful, but WITHOUT duke@435: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or duke@435: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License duke@435: * version 2 for more details (a copy is included in the LICENSE file that duke@435: * accompanied this code). duke@435: * duke@435: * You should have received a copy of the GNU General Public License version duke@435: * 2 along with this work; if not, write to the Free Software Foundation, duke@435: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. duke@435: * trims@1907: * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA trims@1907: * or visit www.oracle.com if you need additional information or have any trims@1907: * questions. duke@435: * duke@435: */ duke@435: stefank@2314: #ifndef SHARE_VM_MEMORY_SPACE_INLINE_HPP stefank@2314: #define SHARE_VM_MEMORY_SPACE_INLINE_HPP stefank@2314: stefank@2314: #include "gc_interface/collectedHeap.hpp" stefank@2314: #include "memory/space.hpp" stefank@2314: #include "memory/universe.hpp" goetz@6912: #include "runtime/prefetch.inline.hpp" stefank@2314: #include "runtime/safepoint.hpp" stefank@2314: ysr@777: inline HeapWord* Space::block_start(const void* p) { ysr@777: return block_start_const(p); ysr@777: } ysr@777: goetz@6912: #define SCAN_AND_FORWARD(cp,scan_limit,block_is_obj,block_size) { \ goetz@6912: /* Compute the new addresses for the live objects and store it in the mark \ goetz@6912: * Used by universe::mark_sweep_phase2() \ goetz@6912: */ \ goetz@6912: HeapWord* compact_top; /* This is where we are currently compacting to. */ \ goetz@6912: \ goetz@6912: /* We're sure to be here before any objects are compacted into this \ goetz@6912: * space, so this is a good time to initialize this: \ goetz@6912: */ \ goetz@6912: set_compaction_top(bottom()); \ goetz@6912: \ goetz@6912: if (cp->space == NULL) { \ goetz@6912: assert(cp->gen != NULL, "need a generation"); \ goetz@6912: assert(cp->threshold == NULL, "just checking"); \ goetz@6912: assert(cp->gen->first_compaction_space() == this, "just checking"); \ goetz@6912: cp->space = cp->gen->first_compaction_space(); \ goetz@6912: compact_top = cp->space->bottom(); \ goetz@6912: cp->space->set_compaction_top(compact_top); \ goetz@6912: cp->threshold = cp->space->initialize_threshold(); \ goetz@6912: } else { \ goetz@6912: compact_top = cp->space->compaction_top(); \ goetz@6912: } \ goetz@6912: \ goetz@6912: /* We allow some amount of garbage towards the bottom of the space, so \ goetz@6912: * we don't start compacting before there is a significant gain to be made.\ goetz@6912: * Occasionally, we want to ensure a full compaction, which is determined \ goetz@6912: * by the MarkSweepAlwaysCompactCount parameter. \ goetz@6912: */ \ goetz@6912: uint invocations = MarkSweep::total_invocations(); \ goetz@6912: bool skip_dead = ((invocations % MarkSweepAlwaysCompactCount) != 0); \ goetz@6912: \ goetz@6912: size_t allowed_deadspace = 0; \ goetz@6912: if (skip_dead) { \ goetz@6912: const size_t ratio = allowed_dead_ratio(); \ goetz@6912: allowed_deadspace = (capacity() * ratio / 100) / HeapWordSize; \ goetz@6912: } \ goetz@6912: \ goetz@6912: HeapWord* q = bottom(); \ goetz@6912: HeapWord* t = scan_limit(); \ goetz@6912: \ goetz@6912: HeapWord* end_of_live= q; /* One byte beyond the last byte of the last \ goetz@6912: live object. */ \ goetz@6912: HeapWord* first_dead = end();/* The first dead object. */ \ goetz@6912: LiveRange* liveRange = NULL; /* The current live range, recorded in the \ goetz@6912: first header of preceding free area. */ \ goetz@6912: _first_dead = first_dead; \ goetz@6912: \ goetz@6912: const intx interval = PrefetchScanIntervalInBytes; \ goetz@6912: \ goetz@6912: while (q < t) { \ goetz@6912: assert(!block_is_obj(q) || \ goetz@6912: oop(q)->mark()->is_marked() || oop(q)->mark()->is_unlocked() || \ goetz@6912: oop(q)->mark()->has_bias_pattern(), \ goetz@6912: "these are the only valid states during a mark sweep"); \ goetz@6912: if (block_is_obj(q) && oop(q)->is_gc_marked()) { \ goetz@6912: /* prefetch beyond q */ \ goetz@6912: Prefetch::write(q, interval); \ goetz@6912: size_t size = block_size(q); \ goetz@6912: compact_top = cp->space->forward(oop(q), size, cp, compact_top); \ goetz@6912: q += size; \ goetz@6912: end_of_live = q; \ goetz@6912: } else { \ goetz@6912: /* run over all the contiguous dead objects */ \ goetz@6912: HeapWord* end = q; \ goetz@6912: do { \ goetz@6912: /* prefetch beyond end */ \ goetz@6912: Prefetch::write(end, interval); \ goetz@6912: end += block_size(end); \ goetz@6912: } while (end < t && (!block_is_obj(end) || !oop(end)->is_gc_marked()));\ goetz@6912: \ goetz@6912: /* see if we might want to pretend this object is alive so that \ goetz@6912: * we don't have to compact quite as often. \ goetz@6912: */ \ goetz@6912: if (allowed_deadspace > 0 && q == compact_top) { \ goetz@6912: size_t sz = pointer_delta(end, q); \ goetz@6912: if (insert_deadspace(allowed_deadspace, q, sz)) { \ goetz@6912: compact_top = cp->space->forward(oop(q), sz, cp, compact_top); \ goetz@6912: q = end; \ goetz@6912: end_of_live = end; \ goetz@6912: continue; \ goetz@6912: } \ goetz@6912: } \ goetz@6912: \ goetz@6912: /* otherwise, it really is a free region. */ \ goetz@6912: \ goetz@6912: /* for the previous LiveRange, record the end of the live objects. */ \ goetz@6912: if (liveRange) { \ goetz@6912: liveRange->set_end(q); \ goetz@6912: } \ goetz@6912: \ goetz@6912: /* record the current LiveRange object. \ goetz@6912: * liveRange->start() is overlaid on the mark word. \ goetz@6912: */ \ goetz@6912: liveRange = (LiveRange*)q; \ goetz@6912: liveRange->set_start(end); \ goetz@6912: liveRange->set_end(end); \ goetz@6912: \ goetz@6912: /* see if this is the first dead region. */ \ goetz@6912: if (q < first_dead) { \ goetz@6912: first_dead = q; \ goetz@6912: } \ goetz@6912: \ goetz@6912: /* move on to the next object */ \ goetz@6912: q = end; \ goetz@6912: } \ goetz@6912: } \ goetz@6912: \ goetz@6912: assert(q == t, "just checking"); \ goetz@6912: if (liveRange != NULL) { \ goetz@6912: liveRange->set_end(q); \ goetz@6912: } \ goetz@6912: _end_of_live = end_of_live; \ goetz@6912: if (end_of_live < first_dead) { \ goetz@6912: first_dead = end_of_live; \ goetz@6912: } \ goetz@6912: _first_dead = first_dead; \ goetz@6912: \ goetz@6912: /* save the compaction_top of the compaction space. */ \ goetz@6912: cp->space->set_compaction_top(compact_top); \ goetz@6912: } goetz@6912: goetz@6912: #define SCAN_AND_ADJUST_POINTERS(adjust_obj_size) { \ goetz@6912: /* adjust all the interior pointers to point at the new locations of objects \ goetz@6912: * Used by MarkSweep::mark_sweep_phase3() */ \ goetz@6912: \ goetz@6912: HeapWord* q = bottom(); \ goetz@6912: HeapWord* t = _end_of_live; /* Established by "prepare_for_compaction". */ \ goetz@6912: \ goetz@6912: assert(_first_dead <= _end_of_live, "Stands to reason, no?"); \ goetz@6912: \ goetz@6912: if (q < t && _first_dead > q && \ goetz@6912: !oop(q)->is_gc_marked()) { \ goetz@6912: /* we have a chunk of the space which hasn't moved and we've \ goetz@6912: * reinitialized the mark word during the previous pass, so we can't \ goetz@6912: * use is_gc_marked for the traversal. */ \ goetz@6912: HeapWord* end = _first_dead; \ goetz@6912: \ goetz@6912: while (q < end) { \ goetz@6912: /* I originally tried to conjoin "block_start(q) == q" to the \ goetz@6912: * assertion below, but that doesn't work, because you can't \ goetz@6912: * accurately traverse previous objects to get to the current one \ goetz@6912: * after their pointers have been \ goetz@6912: * updated, until the actual compaction is done. dld, 4/00 */ \ goetz@6912: assert(block_is_obj(q), \ goetz@6912: "should be at block boundaries, and should be looking at objs"); \ goetz@6912: \ goetz@6912: /* point all the oops to the new location */ \ goetz@6912: size_t size = oop(q)->adjust_pointers(); \ goetz@6912: size = adjust_obj_size(size); \ goetz@6912: \ goetz@6912: q += size; \ goetz@6912: } \ goetz@6912: \ goetz@6912: if (_first_dead == t) { \ goetz@6912: q = t; \ goetz@6912: } else { \ goetz@6912: /* $$$ This is funky. Using this to read the previously written \ goetz@6912: * LiveRange. See also use below. */ \ goetz@6912: q = (HeapWord*)oop(_first_dead)->mark()->decode_pointer(); \ goetz@6912: } \ goetz@6912: } \ goetz@6912: \ goetz@6912: const intx interval = PrefetchScanIntervalInBytes; \ goetz@6912: \ goetz@6912: debug_only(HeapWord* prev_q = NULL); \ goetz@6912: while (q < t) { \ goetz@6912: /* prefetch beyond q */ \ goetz@6912: Prefetch::write(q, interval); \ goetz@6912: if (oop(q)->is_gc_marked()) { \ goetz@6912: /* q is alive */ \ goetz@6912: /* point all the oops to the new location */ \ goetz@6912: size_t size = oop(q)->adjust_pointers(); \ goetz@6912: size = adjust_obj_size(size); \ goetz@6912: debug_only(prev_q = q); \ goetz@6912: q += size; \ goetz@6912: } else { \ goetz@6912: /* q is not a live object, so its mark should point at the next \ goetz@6912: * live object */ \ goetz@6912: debug_only(prev_q = q); \ goetz@6912: q = (HeapWord*) oop(q)->mark()->decode_pointer(); \ goetz@6912: assert(q > prev_q, "we should be moving forward through memory"); \ goetz@6912: } \ goetz@6912: } \ goetz@6912: \ goetz@6912: assert(q == t, "just checking"); \ goetz@6912: } goetz@6912: goetz@6912: #define SCAN_AND_COMPACT(obj_size) { \ goetz@6912: /* Copy all live objects to their new location \ goetz@6912: * Used by MarkSweep::mark_sweep_phase4() */ \ goetz@6912: \ goetz@6912: HeapWord* q = bottom(); \ goetz@6912: HeapWord* const t = _end_of_live; \ goetz@6912: debug_only(HeapWord* prev_q = NULL); \ goetz@6912: \ goetz@6912: if (q < t && _first_dead > q && \ goetz@6912: !oop(q)->is_gc_marked()) { \ goetz@6912: debug_only( \ goetz@6912: /* we have a chunk of the space which hasn't moved and we've reinitialized \ goetz@6912: * the mark word during the previous pass, so we can't use is_gc_marked for \ goetz@6912: * the traversal. */ \ goetz@6912: HeapWord* const end = _first_dead; \ goetz@6912: \ goetz@6912: while (q < end) { \ goetz@6912: size_t size = obj_size(q); \ goetz@6912: assert(!oop(q)->is_gc_marked(), \ goetz@6912: "should be unmarked (special dense prefix handling)"); \ goetz@6912: debug_only(prev_q = q); \ goetz@6912: q += size; \ goetz@6912: } \ goetz@6912: ) /* debug_only */ \ goetz@6912: \ goetz@6912: if (_first_dead == t) { \ goetz@6912: q = t; \ goetz@6912: } else { \ goetz@6912: /* $$$ Funky */ \ goetz@6912: q = (HeapWord*) oop(_first_dead)->mark()->decode_pointer(); \ goetz@6912: } \ goetz@6912: } \ goetz@6912: \ goetz@6912: const intx scan_interval = PrefetchScanIntervalInBytes; \ goetz@6912: const intx copy_interval = PrefetchCopyIntervalInBytes; \ goetz@6912: while (q < t) { \ goetz@6912: if (!oop(q)->is_gc_marked()) { \ goetz@6912: /* mark is pointer to next marked oop */ \ goetz@6912: debug_only(prev_q = q); \ goetz@6912: q = (HeapWord*) oop(q)->mark()->decode_pointer(); \ goetz@6912: assert(q > prev_q, "we should be moving forward through memory"); \ goetz@6912: } else { \ goetz@6912: /* prefetch beyond q */ \ goetz@6912: Prefetch::read(q, scan_interval); \ goetz@6912: \ goetz@6912: /* size and destination */ \ goetz@6912: size_t size = obj_size(q); \ goetz@6912: HeapWord* compaction_top = (HeapWord*)oop(q)->forwardee(); \ goetz@6912: \ goetz@6912: /* prefetch beyond compaction_top */ \ goetz@6912: Prefetch::write(compaction_top, copy_interval); \ goetz@6912: \ goetz@6912: /* copy object and reinit its mark */ \ goetz@6912: assert(q != compaction_top, "everything in this pass should be moving"); \ goetz@6912: Copy::aligned_conjoint_words(q, compaction_top, size); \ goetz@6912: oop(compaction_top)->init_mark(); \ goetz@6912: assert(oop(compaction_top)->klass() != NULL, "should have a class"); \ goetz@6912: \ goetz@6912: debug_only(prev_q = q); \ goetz@6912: q += size; \ goetz@6912: } \ goetz@6912: } \ goetz@6912: \ goetz@6912: /* Let's remember if we were empty before we did the compaction. */ \ goetz@6912: bool was_empty = used_region().is_empty(); \ goetz@6912: /* Reset space after compaction is complete */ \ goetz@6912: reset_after_compaction(); \ goetz@6912: /* We do this clear, below, since it has overloaded meanings for some */ \ goetz@6912: /* space subtypes. For example, OffsetTableContigSpace's that were */ \ goetz@6912: /* compacted into will have had their offset table thresholds updated */ \ goetz@6912: /* continuously, but those that weren't need to have their thresholds */ \ goetz@6912: /* re-initialized. Also mangles unused area for debugging. */ \ goetz@6912: if (used_region().is_empty()) { \ goetz@6912: if (!was_empty) clear(SpaceDecorator::Mangle); \ goetz@6912: } else { \ goetz@6912: if (ZapUnusedHeapArea) mangle_unused_area(); \ goetz@6912: } \ goetz@6912: } goetz@6912: duke@435: inline HeapWord* OffsetTableContigSpace::allocate(size_t size) { duke@435: HeapWord* res = ContiguousSpace::allocate(size); duke@435: if (res != NULL) { duke@435: _offsets.alloc_block(res, size); duke@435: } duke@435: return res; duke@435: } duke@435: duke@435: // Because of the requirement of keeping "_offsets" up to date with the duke@435: // allocations, we sequentialize these with a lock. Therefore, best if duke@435: // this is used for larger LAB allocations only. duke@435: inline HeapWord* OffsetTableContigSpace::par_allocate(size_t size) { duke@435: MutexLocker x(&_par_alloc_lock); duke@435: // This ought to be just "allocate", because of the lock above, but that duke@435: // ContiguousSpace::allocate asserts that either the allocating thread duke@435: // holds the heap lock or it is the VM thread and we're at a safepoint. duke@435: // The best I (dld) could figure was to put a field in ContiguousSpace duke@435: // meaning "locking at safepoint taken care of", and set/reset that duke@435: // here. But this will do for now, especially in light of the comment duke@435: // above. Perhaps in the future some lock-free manner of keeping the duke@435: // coordination. duke@435: HeapWord* res = ContiguousSpace::par_allocate(size); duke@435: if (res != NULL) { duke@435: _offsets.alloc_block(res, size); duke@435: } duke@435: return res; duke@435: } duke@435: ysr@777: inline HeapWord* ysr@777: OffsetTableContigSpace::block_start_const(const void* p) const { duke@435: return _offsets.block_start(p); duke@435: } duke@435: stefank@2314: #endif // SHARE_VM_MEMORY_SPACE_INLINE_HPP