ysr@777: /* tonyp@3416: * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. ysr@777: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ysr@777: * ysr@777: * This code is free software; you can redistribute it and/or modify it ysr@777: * under the terms of the GNU General Public License version 2 only, as ysr@777: * published by the Free Software Foundation. ysr@777: * ysr@777: * This code is distributed in the hope that it will be useful, but WITHOUT ysr@777: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ysr@777: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ysr@777: * version 2 for more details (a copy is included in the LICENSE file that ysr@777: * accompanied this code). ysr@777: * ysr@777: * You should have received a copy of the GNU General Public License version ysr@777: * 2 along with this work; if not, write to the Free Software Foundation, ysr@777: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ysr@777: * trims@1907: * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA trims@1907: * or visit www.oracle.com if you need additional information or have any trims@1907: * questions. ysr@777: * ysr@777: */ ysr@777: stefank@2314: #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_INLINE_HPP stefank@2314: #define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_INLINE_HPP stefank@2314: ysr@777: inline HeapWord* G1OffsetTableContigSpace::allocate(size_t size) { ysr@777: HeapWord* res = ContiguousSpace::allocate(size); ysr@777: if (res != NULL) { ysr@777: _offsets.alloc_block(res, size); ysr@777: } ysr@777: return res; ysr@777: } ysr@777: ysr@777: // Because of the requirement of keeping "_offsets" up to date with the ysr@777: // allocations, we sequentialize these with a lock. Therefore, best if ysr@777: // this is used for larger LAB allocations only. ysr@777: inline HeapWord* G1OffsetTableContigSpace::par_allocate(size_t size) { ysr@777: MutexLocker x(&_par_alloc_lock); tonyp@2715: // Given that we take the lock no need to use par_allocate() here. tonyp@2715: HeapWord* res = ContiguousSpace::allocate(size); ysr@777: if (res != NULL) { ysr@777: _offsets.alloc_block(res, size); ysr@777: } ysr@777: return res; ysr@777: } ysr@777: ysr@777: inline HeapWord* G1OffsetTableContigSpace::block_start(const void* p) { ysr@777: return _offsets.block_start(p); ysr@777: } ysr@777: ysr@777: inline HeapWord* ysr@777: G1OffsetTableContigSpace::block_start_const(const void* p) const { ysr@777: return _offsets.block_start_const(p); ysr@777: } stefank@2314: tonyp@3416: inline void HeapRegion::note_start_of_marking() { tonyp@3416: _next_marked_bytes = 0; tonyp@3416: _next_top_at_mark_start = top(); tonyp@3416: } tonyp@3416: tonyp@3416: inline void HeapRegion::note_end_of_marking() { tonyp@3416: _prev_top_at_mark_start = _next_top_at_mark_start; tonyp@3416: _prev_marked_bytes = _next_marked_bytes; tonyp@3416: _next_marked_bytes = 0; tonyp@3416: tonyp@3416: assert(_prev_marked_bytes <= tonyp@3416: (size_t) pointer_delta(prev_top_at_mark_start(), bottom()) * tonyp@3416: HeapWordSize, "invariant"); tonyp@3416: } tonyp@3416: tonyp@3416: inline void HeapRegion::note_start_of_copying(bool during_initial_mark) { tonyp@3464: if (is_survivor()) { tonyp@3464: // This is how we always allocate survivors. tonyp@3464: assert(_next_top_at_mark_start == bottom(), "invariant"); tonyp@3464: } else { tonyp@3464: if (during_initial_mark) { tonyp@3416: // During initial-mark we'll explicitly mark any objects on old tonyp@3416: // regions that are pointed to by roots. Given that explicit tonyp@3416: // marks only make sense under NTAMS it'd be nice if we could tonyp@3416: // check that condition if we wanted to. Given that we don't tonyp@3416: // know where the top of this region will end up, we simply set tonyp@3416: // NTAMS to the end of the region so all marks will be below tonyp@3416: // NTAMS. We'll set it to the actual top when we retire this region. tonyp@3416: _next_top_at_mark_start = end(); tonyp@3416: } else { tonyp@3416: // We could have re-used this old region as to-space over a tonyp@3416: // couple of GCs since the start of the concurrent marking tonyp@3416: // cycle. This means that [bottom,NTAMS) will contain objects tonyp@3416: // copied up to and including initial-mark and [NTAMS, top) tonyp@3416: // will contain objects copied during the concurrent marking cycle. tonyp@3416: assert(top() >= _next_top_at_mark_start, "invariant"); tonyp@3416: } tonyp@3416: } tonyp@3416: } tonyp@3416: tonyp@3416: inline void HeapRegion::note_end_of_copying(bool during_initial_mark) { tonyp@3464: if (is_survivor()) { tonyp@3464: // This is how we always allocate survivors. tonyp@3464: assert(_next_top_at_mark_start == bottom(), "invariant"); tonyp@3464: } else { tonyp@3464: if (during_initial_mark) { tonyp@3416: // See the comment for note_start_of_copying() for the details tonyp@3416: // on this. tonyp@3416: assert(_next_top_at_mark_start == end(), "pre-condition"); tonyp@3416: _next_top_at_mark_start = top(); tonyp@3416: } else { tonyp@3416: // See the comment for note_start_of_copying() for the details tonyp@3416: // on this. tonyp@3416: assert(top() >= _next_top_at_mark_start, "invariant"); tonyp@3416: } tonyp@3416: } tonyp@3416: } tonyp@3416: stefank@2314: #endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_INLINE_HPP