ysr@777: /* tonyp@3416: * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. ysr@777: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ysr@777: * ysr@777: * This code is free software; you can redistribute it and/or modify it ysr@777: * under the terms of the GNU General Public License version 2 only, as ysr@777: * published by the Free Software Foundation. ysr@777: * ysr@777: * This code is distributed in the hope that it will be useful, but WITHOUT ysr@777: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ysr@777: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ysr@777: * version 2 for more details (a copy is included in the LICENSE file that ysr@777: * accompanied this code). ysr@777: * ysr@777: * You should have received a copy of the GNU General Public License version ysr@777: * 2 along with this work; if not, write to the Free Software Foundation, ysr@777: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ysr@777: * trims@1907: * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA trims@1907: * or visit www.oracle.com if you need additional information or have any trims@1907: * questions. ysr@777: * ysr@777: */ ysr@777: stefank@2314: #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_INLINE_HPP stefank@2314: #define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_INLINE_HPP stefank@2314: mgerdin@6987: #include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp" mgerdin@6990: #include "gc_implementation/g1/g1CollectedHeap.hpp" mgerdin@6990: #include "gc_implementation/g1/heapRegion.hpp" mgerdin@6990: #include "memory/space.hpp" mgerdin@6990: #include "runtime/atomic.inline.hpp" mgerdin@6990: mgerdin@6990: // This version requires locking. mgerdin@6990: inline HeapWord* G1OffsetTableContigSpace::allocate_impl(size_t size, mgerdin@6990: HeapWord* const end_value) { mgerdin@6990: HeapWord* obj = top(); mgerdin@6990: if (pointer_delta(end_value, obj) >= size) { mgerdin@6990: HeapWord* new_top = obj + size; mgerdin@6990: set_top(new_top); mgerdin@6990: assert(is_aligned(obj) && is_aligned(new_top), "checking alignment"); mgerdin@6990: return obj; mgerdin@6990: } else { mgerdin@6990: return NULL; mgerdin@6990: } mgerdin@6990: } mgerdin@6990: mgerdin@6990: // This version is lock-free. mgerdin@6990: inline HeapWord* G1OffsetTableContigSpace::par_allocate_impl(size_t size, mgerdin@6990: HeapWord* const end_value) { mgerdin@6990: do { mgerdin@6990: HeapWord* obj = top(); mgerdin@6990: if (pointer_delta(end_value, obj) >= size) { mgerdin@6990: HeapWord* new_top = obj + size; mgerdin@6990: HeapWord* result = (HeapWord*)Atomic::cmpxchg_ptr(new_top, top_addr(), obj); mgerdin@6990: // result can be one of two: mgerdin@6990: // the old top value: the exchange succeeded mgerdin@6990: // otherwise: the new value of the top is returned. mgerdin@6990: if (result == obj) { mgerdin@6990: assert(is_aligned(obj) && is_aligned(new_top), "checking alignment"); mgerdin@6990: return obj; mgerdin@6990: } mgerdin@6990: } else { mgerdin@6990: return NULL; mgerdin@6990: } mgerdin@6990: } while (true); mgerdin@6990: } mgerdin@6987: ysr@777: inline HeapWord* G1OffsetTableContigSpace::allocate(size_t size) { mgerdin@6990: HeapWord* res = allocate_impl(size, end()); ysr@777: if (res != NULL) { ysr@777: _offsets.alloc_block(res, size); ysr@777: } ysr@777: return res; ysr@777: } ysr@777: ysr@777: // Because of the requirement of keeping "_offsets" up to date with the ysr@777: // allocations, we sequentialize these with a lock. Therefore, best if ysr@777: // this is used for larger LAB allocations only. ysr@777: inline HeapWord* G1OffsetTableContigSpace::par_allocate(size_t size) { ysr@777: MutexLocker x(&_par_alloc_lock); mgerdin@6990: return allocate(size); ysr@777: } ysr@777: ysr@777: inline HeapWord* G1OffsetTableContigSpace::block_start(const void* p) { ysr@777: return _offsets.block_start(p); ysr@777: } ysr@777: ysr@777: inline HeapWord* ysr@777: G1OffsetTableContigSpace::block_start_const(const void* p) const { ysr@777: return _offsets.block_start_const(p); ysr@777: } stefank@2314: mgerdin@6990: inline bool mgerdin@6990: HeapRegion::block_is_obj(const HeapWord* p) const { stefank@6992: G1CollectedHeap* g1h = G1CollectedHeap::heap(); stefank@6992: return !g1h->is_obj_dead(oop(p), this); mgerdin@6990: } mgerdin@6990: mgerdin@6990: inline size_t mgerdin@6990: HeapRegion::block_size(const HeapWord *addr) const { stefank@6992: // Old regions' dead objects may have dead classes stefank@6992: // We need to find the next live object in some other stefank@6992: // manner than getting the oop size stefank@6992: G1CollectedHeap* g1h = G1CollectedHeap::heap(); stefank@6992: if (g1h->is_obj_dead(oop(addr), this)) { stefank@6992: HeapWord* next = g1h->concurrent_mark()->prevMarkBitMap()-> stefank@6992: getNextMarkedWordAddress(addr, prev_top_at_mark_start()); stefank@6992: stefank@6992: assert(next > addr, "must get the next live object"); stefank@6992: stefank@6992: return pointer_delta(next, addr); stefank@6992: } else if (addr == top()) { mgerdin@6990: return pointer_delta(end(), addr); mgerdin@6990: } stefank@6992: return oop(addr)->size(); mgerdin@6990: } mgerdin@6990: mgerdin@6990: inline HeapWord* HeapRegion::par_allocate_no_bot_updates(size_t word_size) { mgerdin@6990: assert(is_young(), "we can only skip BOT updates on young regions"); mgerdin@6990: return par_allocate_impl(word_size, end()); mgerdin@6990: } mgerdin@6990: mgerdin@6990: inline HeapWord* HeapRegion::allocate_no_bot_updates(size_t word_size) { mgerdin@6990: assert(is_young(), "we can only skip BOT updates on young regions"); mgerdin@6990: return allocate_impl(word_size, end()); mgerdin@6990: } mgerdin@6990: tonyp@3416: inline void HeapRegion::note_start_of_marking() { tonyp@3416: _next_marked_bytes = 0; tonyp@3416: _next_top_at_mark_start = top(); tonyp@3416: } tonyp@3416: tonyp@3416: inline void HeapRegion::note_end_of_marking() { tonyp@3416: _prev_top_at_mark_start = _next_top_at_mark_start; tonyp@3416: _prev_marked_bytes = _next_marked_bytes; tonyp@3416: _next_marked_bytes = 0; tonyp@3416: tonyp@3416: assert(_prev_marked_bytes <= tonyp@3416: (size_t) pointer_delta(prev_top_at_mark_start(), bottom()) * tonyp@3416: HeapWordSize, "invariant"); tonyp@3416: } tonyp@3416: tonyp@3416: inline void HeapRegion::note_start_of_copying(bool during_initial_mark) { tonyp@3464: if (is_survivor()) { tonyp@3464: // This is how we always allocate survivors. tonyp@3464: assert(_next_top_at_mark_start == bottom(), "invariant"); tonyp@3464: } else { tonyp@3464: if (during_initial_mark) { tonyp@3416: // During initial-mark we'll explicitly mark any objects on old tonyp@3416: // regions that are pointed to by roots. Given that explicit tonyp@3416: // marks only make sense under NTAMS it'd be nice if we could tonyp@3416: // check that condition if we wanted to. Given that we don't tonyp@3416: // know where the top of this region will end up, we simply set tonyp@3416: // NTAMS to the end of the region so all marks will be below tonyp@3416: // NTAMS. We'll set it to the actual top when we retire this region. tonyp@3416: _next_top_at_mark_start = end(); tonyp@3416: } else { tonyp@3416: // We could have re-used this old region as to-space over a tonyp@3416: // couple of GCs since the start of the concurrent marking tonyp@3416: // cycle. This means that [bottom,NTAMS) will contain objects tonyp@3416: // copied up to and including initial-mark and [NTAMS, top) tonyp@3416: // will contain objects copied during the concurrent marking cycle. tonyp@3416: assert(top() >= _next_top_at_mark_start, "invariant"); tonyp@3416: } tonyp@3416: } tonyp@3416: } tonyp@3416: tonyp@3416: inline void HeapRegion::note_end_of_copying(bool during_initial_mark) { tonyp@3464: if (is_survivor()) { tonyp@3464: // This is how we always allocate survivors. tonyp@3464: assert(_next_top_at_mark_start == bottom(), "invariant"); tonyp@3464: } else { tonyp@3464: if (during_initial_mark) { tonyp@3416: // See the comment for note_start_of_copying() for the details tonyp@3416: // on this. tonyp@3416: assert(_next_top_at_mark_start == end(), "pre-condition"); tonyp@3416: _next_top_at_mark_start = top(); tonyp@3416: } else { tonyp@3416: // See the comment for note_start_of_copying() for the details tonyp@3416: // on this. tonyp@3416: assert(top() >= _next_top_at_mark_start, "invariant"); tonyp@3416: } tonyp@3416: } tonyp@3416: } tonyp@3416: stefank@2314: #endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_INLINE_HPP