src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp

Tue, 17 Aug 2010 14:40:00 -0400

author
tonyp
date
Tue, 17 Aug 2010 14:40:00 -0400
changeset 2073
bb847e31b836
parent 2064
5f429ee79634
child 2314
f95d63e2154a
permissions
-rw-r--r--

6974928: G1: sometimes humongous objects are allocated in young regions
Summary: as the title says, sometimes we are allocating humongous objects in young regions and we shouldn't.
Reviewed-by: ysr, johnc

ysr@777 1 /*
trims@1907 2 * Copyright (c) 2001, 2009, Oracle and/or its affiliates. All rights reserved.
ysr@777 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
ysr@777 4 *
ysr@777 5 * This code is free software; you can redistribute it and/or modify it
ysr@777 6 * under the terms of the GNU General Public License version 2 only, as
ysr@777 7 * published by the Free Software Foundation.
ysr@777 8 *
ysr@777 9 * This code is distributed in the hope that it will be useful, but WITHOUT
ysr@777 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
ysr@777 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
ysr@777 12 * version 2 for more details (a copy is included in the LICENSE file that
ysr@777 13 * accompanied this code).
ysr@777 14 *
ysr@777 15 * You should have received a copy of the GNU General Public License version
ysr@777 16 * 2 along with this work; if not, write to the Free Software Foundation,
ysr@777 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
ysr@777 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
ysr@777 22 *
ysr@777 23 */
ysr@777 24
ysr@777 25 // Inline functions for G1CollectedHeap
ysr@777 26
ysr@777 27 inline HeapRegion*
ysr@777 28 G1CollectedHeap::heap_region_containing(const void* addr) const {
ysr@777 29 HeapRegion* hr = _hrs->addr_to_region(addr);
ysr@777 30 // hr can be null if addr in perm_gen
ysr@777 31 if (hr != NULL && hr->continuesHumongous()) {
ysr@777 32 hr = hr->humongous_start_region();
ysr@777 33 }
ysr@777 34 return hr;
ysr@777 35 }
ysr@777 36
ysr@777 37 inline HeapRegion*
ysr@777 38 G1CollectedHeap::heap_region_containing_raw(const void* addr) const {
tonyp@961 39 assert(_g1_reserved.contains(addr), "invariant");
johnc@1187 40 size_t index = pointer_delta(addr, _g1_reserved.start(), 1)
johnc@1187 41 >> HeapRegion::LogOfHRGrainBytes;
johnc@1187 42
tonyp@961 43 HeapRegion* res = _hrs->at(index);
tonyp@961 44 assert(res == _hrs->addr_to_region(addr), "sanity");
ysr@777 45 return res;
ysr@777 46 }
ysr@777 47
ysr@777 48 inline bool G1CollectedHeap::obj_in_cs(oop obj) {
ysr@777 49 HeapRegion* r = _hrs->addr_to_region(obj);
ysr@777 50 return r != NULL && r->in_collection_set();
ysr@777 51 }
ysr@777 52
ysr@777 53 inline HeapWord* G1CollectedHeap::attempt_allocation(size_t word_size,
ysr@777 54 bool permit_collection_pause) {
ysr@777 55 HeapWord* res = NULL;
ysr@777 56
ysr@777 57 assert( SafepointSynchronize::is_at_safepoint() ||
ysr@777 58 Heap_lock->owned_by_self(), "pre-condition of the call" );
ysr@777 59
tonyp@2073 60 // All humongous allocation requests should go through the slow path in
tonyp@2073 61 // attempt_allocation_slow().
tonyp@2073 62 if (!isHumongous(word_size) && _cur_alloc_region != NULL) {
ysr@777 63 // If this allocation causes a region to become non empty,
ysr@777 64 // then we need to update our free_regions count.
ysr@777 65
ysr@777 66 if (_cur_alloc_region->is_empty()) {
ysr@777 67 res = _cur_alloc_region->allocate(word_size);
ysr@777 68 if (res != NULL)
ysr@777 69 _free_regions--;
ysr@777 70 } else {
ysr@777 71 res = _cur_alloc_region->allocate(word_size);
ysr@777 72 }
tonyp@2073 73
tonyp@2073 74 if (res != NULL) {
tonyp@2073 75 if (!SafepointSynchronize::is_at_safepoint()) {
tonyp@2073 76 assert( Heap_lock->owned_by_self(), "invariant" );
tonyp@2073 77 Heap_lock->unlock();
tonyp@2073 78 }
tonyp@2073 79 return res;
ysr@777 80 }
ysr@777 81 }
ysr@777 82 // attempt_allocation_slow will also unlock the heap lock when appropriate.
ysr@777 83 return attempt_allocation_slow(word_size, permit_collection_pause);
ysr@777 84 }
ysr@777 85
jcoomes@2064 86 inline RefToScanQueue* G1CollectedHeap::task_queue(int i) const {
ysr@777 87 return _task_queues->queue(i);
ysr@777 88 }
ysr@777 89
ysr@777 90 inline bool G1CollectedHeap::isMarkedPrev(oop obj) const {
ysr@777 91 return _cm->prevMarkBitMap()->isMarked((HeapWord *)obj);
ysr@777 92 }
ysr@777 93
ysr@777 94 inline bool G1CollectedHeap::isMarkedNext(oop obj) const {
ysr@777 95 return _cm->nextMarkBitMap()->isMarked((HeapWord *)obj);
ysr@777 96 }

mercurial