duke@435: /* trims@1907: * Copyright (c) 2000, 2006, Oracle and/or its affiliates. All rights reserved. duke@435: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. duke@435: * duke@435: * This code is free software; you can redistribute it and/or modify it duke@435: * under the terms of the GNU General Public License version 2 only, as duke@435: * published by the Free Software Foundation. duke@435: * duke@435: * This code is distributed in the hope that it will be useful, but WITHOUT duke@435: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or duke@435: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License duke@435: * version 2 for more details (a copy is included in the LICENSE file that duke@435: * accompanied this code). duke@435: * duke@435: * You should have received a copy of the GNU General Public License version duke@435: * 2 along with this work; if not, write to the Free Software Foundation, duke@435: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. duke@435: * trims@1907: * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA trims@1907: * or visit www.oracle.com if you need additional information or have any trims@1907: * questions. duke@435: * duke@435: */ duke@435: ysr@777: inline HeapWord* Space::block_start(const void* p) { ysr@777: return block_start_const(p); ysr@777: } ysr@777: duke@435: inline HeapWord* OffsetTableContigSpace::allocate(size_t size) { duke@435: HeapWord* res = ContiguousSpace::allocate(size); duke@435: if (res != NULL) { duke@435: _offsets.alloc_block(res, size); duke@435: } duke@435: return res; duke@435: } duke@435: duke@435: // Because of the requirement of keeping "_offsets" up to date with the duke@435: // allocations, we sequentialize these with a lock. Therefore, best if duke@435: // this is used for larger LAB allocations only. duke@435: inline HeapWord* OffsetTableContigSpace::par_allocate(size_t size) { duke@435: MutexLocker x(&_par_alloc_lock); duke@435: // This ought to be just "allocate", because of the lock above, but that duke@435: // ContiguousSpace::allocate asserts that either the allocating thread duke@435: // holds the heap lock or it is the VM thread and we're at a safepoint. duke@435: // The best I (dld) could figure was to put a field in ContiguousSpace duke@435: // meaning "locking at safepoint taken care of", and set/reset that duke@435: // here. But this will do for now, especially in light of the comment duke@435: // above. Perhaps in the future some lock-free manner of keeping the duke@435: // coordination. duke@435: HeapWord* res = ContiguousSpace::par_allocate(size); duke@435: if (res != NULL) { duke@435: _offsets.alloc_block(res, size); duke@435: } duke@435: return res; duke@435: } duke@435: ysr@777: inline HeapWord* ysr@777: OffsetTableContigSpace::block_start_const(const void* p) const { duke@435: return _offsets.block_start(p); duke@435: } duke@435: duke@435: inline HeapWord* ContiguousSpace::concurrent_iteration_safe_limit() duke@435: { duke@435: assert(_concurrent_iteration_safe_limit <= top(), duke@435: "_concurrent_iteration_safe_limit update missed"); duke@435: return _concurrent_iteration_safe_limit; duke@435: } duke@435: duke@435: inline void ContiguousSpace::set_concurrent_iteration_safe_limit(HeapWord* new_limit) duke@435: { duke@435: assert(new_limit <= top(), "uninitialized objects in the safe range"); duke@435: _concurrent_iteration_safe_limit = new_limit; duke@435: }