src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp

Thu, 22 Sep 2011 10:57:37 -0700

author
johnc
date
Thu, 22 Sep 2011 10:57:37 -0700
changeset 3175
4dfb2df418f2
parent 3028
f44782f04dd4
child 4016
c9814fadeb38
permissions
-rw-r--r--

6484982: G1: process references during evacuation pauses
Summary: G1 now uses two reference processors - one is used by concurrent marking and the other is used by STW GCs (both full and incremental evacuation pauses). In an evacuation pause, the reference processor is embedded into the closures used to scan objects. Doing so causes causes reference objects to be 'discovered' by the reference processor. At the end of the evacuation pause, these discovered reference objects are processed - preserving (and copying) referent objects (and their reachable graphs) as appropriate.
Reviewed-by: ysr, jwilhelm, brutisso, stefank, tonyp

ysr@777 1 /*
tonyp@2454 2 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
ysr@777 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
ysr@777 4 *
ysr@777 5 * This code is free software; you can redistribute it and/or modify it
ysr@777 6 * under the terms of the GNU General Public License version 2 only, as
ysr@777 7 * published by the Free Software Foundation.
ysr@777 8 *
ysr@777 9 * This code is distributed in the hope that it will be useful, but WITHOUT
ysr@777 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
ysr@777 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
ysr@777 12 * version 2 for more details (a copy is included in the LICENSE file that
ysr@777 13 * accompanied this code).
ysr@777 14 *
ysr@777 15 * You should have received a copy of the GNU General Public License version
ysr@777 16 * 2 along with this work; if not, write to the Free Software Foundation,
ysr@777 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
ysr@777 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
ysr@777 22 *
ysr@777 23 */
ysr@777 24
stefank@2314 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP
stefank@2314 26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP
stefank@2314 27
stefank@2314 28 #include "gc_implementation/g1/concurrentMark.hpp"
stefank@2314 29 #include "gc_implementation/g1/g1CollectedHeap.hpp"
tonyp@2715 30 #include "gc_implementation/g1/g1AllocRegion.inline.hpp"
tonyp@2315 31 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
tonyp@2469 32 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
stefank@2314 33 #include "utilities/taskqueue.hpp"
stefank@2314 34
ysr@777 35 // Inline functions for G1CollectedHeap
ysr@777 36
tonyp@2963 37 template <class T>
ysr@777 38 inline HeapRegion*
tonyp@2963 39 G1CollectedHeap::heap_region_containing(const T addr) const {
tonyp@2963 40 HeapRegion* hr = _hrs.addr_to_region((HeapWord*) addr);
ysr@777 41 // hr can be null if addr in perm_gen
ysr@777 42 if (hr != NULL && hr->continuesHumongous()) {
ysr@777 43 hr = hr->humongous_start_region();
ysr@777 44 }
ysr@777 45 return hr;
ysr@777 46 }
ysr@777 47
tonyp@2963 48 template <class T>
ysr@777 49 inline HeapRegion*
tonyp@2963 50 G1CollectedHeap::heap_region_containing_raw(const T addr) const {
tonyp@2963 51 assert(_g1_reserved.contains((const void*) addr), "invariant");
tonyp@2963 52 HeapRegion* res = _hrs.addr_to_region_unsafe((HeapWord*) addr);
ysr@777 53 return res;
ysr@777 54 }
ysr@777 55
ysr@777 56 inline bool G1CollectedHeap::obj_in_cs(oop obj) {
tonyp@2963 57 HeapRegion* r = _hrs.addr_to_region((HeapWord*) obj);
ysr@777 58 return r != NULL && r->in_collection_set();
ysr@777 59 }
ysr@777 60
tonyp@2315 61 inline HeapWord*
tonyp@2715 62 G1CollectedHeap::attempt_allocation(size_t word_size,
tonyp@2715 63 unsigned int* gc_count_before_ret) {
tonyp@2715 64 assert_heap_not_locked_and_not_at_safepoint();
tonyp@2715 65 assert(!isHumongous(word_size), "attempt_allocation() should not "
tonyp@2715 66 "be called for humongous allocation requests");
ysr@777 67
tonyp@2715 68 HeapWord* result = _mutator_alloc_region.attempt_allocation(word_size,
tonyp@2715 69 false /* bot_updates */);
tonyp@2715 70 if (result == NULL) {
tonyp@2715 71 result = attempt_allocation_slow(word_size, gc_count_before_ret);
tonyp@2715 72 }
tonyp@2715 73 assert_heap_not_locked();
tonyp@2315 74 if (result != NULL) {
tonyp@2315 75 dirty_young_block(result, word_size);
tonyp@2315 76 }
tonyp@2715 77 return result;
tonyp@2454 78 }
tonyp@2454 79
tonyp@3028 80 inline HeapWord* G1CollectedHeap::survivor_attempt_allocation(size_t
tonyp@3028 81 word_size) {
tonyp@3028 82 assert(!isHumongous(word_size),
tonyp@3028 83 "we should not be seeing humongous-size allocations in this path");
tonyp@3028 84
tonyp@3028 85 HeapWord* result = _survivor_gc_alloc_region.attempt_allocation(word_size,
tonyp@3028 86 false /* bot_updates */);
tonyp@3028 87 if (result == NULL) {
tonyp@3028 88 MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
tonyp@3028 89 result = _survivor_gc_alloc_region.attempt_allocation_locked(word_size,
tonyp@3028 90 false /* bot_updates */);
tonyp@3028 91 }
tonyp@3028 92 if (result != NULL) {
tonyp@3028 93 dirty_young_block(result, word_size);
tonyp@3028 94 }
tonyp@3028 95 return result;
tonyp@3028 96 }
tonyp@3028 97
tonyp@3028 98 inline HeapWord* G1CollectedHeap::old_attempt_allocation(size_t word_size) {
tonyp@3028 99 assert(!isHumongous(word_size),
tonyp@3028 100 "we should not be seeing humongous-size allocations in this path");
tonyp@3028 101
tonyp@3028 102 HeapWord* result = _old_gc_alloc_region.attempt_allocation(word_size,
tonyp@3028 103 true /* bot_updates */);
tonyp@3028 104 if (result == NULL) {
tonyp@3028 105 MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
tonyp@3028 106 result = _old_gc_alloc_region.attempt_allocation_locked(word_size,
tonyp@3028 107 true /* bot_updates */);
tonyp@3028 108 }
tonyp@3028 109 return result;
tonyp@3028 110 }
tonyp@3028 111
tonyp@2315 112 // It dirties the cards that cover the block so that so that the post
tonyp@2315 113 // write barrier never queues anything when updating objects on this
tonyp@2315 114 // block. It is assumed (and in fact we assert) that the block
tonyp@2315 115 // belongs to a young region.
tonyp@2315 116 inline void
tonyp@2315 117 G1CollectedHeap::dirty_young_block(HeapWord* start, size_t word_size) {
tonyp@2315 118 assert_heap_not_locked();
tonyp@2315 119
tonyp@2315 120 // Assign the containing region to containing_hr so that we don't
tonyp@2315 121 // have to keep calling heap_region_containing_raw() in the
tonyp@2315 122 // asserts below.
tonyp@2315 123 DEBUG_ONLY(HeapRegion* containing_hr = heap_region_containing_raw(start);)
tonyp@2315 124 assert(containing_hr != NULL && start != NULL && word_size > 0,
tonyp@2315 125 "pre-condition");
tonyp@2315 126 assert(containing_hr->is_in(start), "it should contain start");
tonyp@2315 127 assert(containing_hr->is_young(), "it should be young");
tonyp@2315 128 assert(!containing_hr->isHumongous(), "it should not be humongous");
tonyp@2315 129
tonyp@2315 130 HeapWord* end = start + word_size;
tonyp@2315 131 assert(containing_hr->is_in(end - 1), "it should also contain end - 1");
tonyp@2315 132
tonyp@2315 133 MemRegion mr(start, end);
tonyp@2315 134 ((CardTableModRefBS*)_g1h->barrier_set())->dirty(mr);
ysr@777 135 }
ysr@777 136
jcoomes@2064 137 inline RefToScanQueue* G1CollectedHeap::task_queue(int i) const {
ysr@777 138 return _task_queues->queue(i);
ysr@777 139 }
ysr@777 140
ysr@777 141 inline bool G1CollectedHeap::isMarkedPrev(oop obj) const {
ysr@777 142 return _cm->prevMarkBitMap()->isMarked((HeapWord *)obj);
ysr@777 143 }
ysr@777 144
ysr@777 145 inline bool G1CollectedHeap::isMarkedNext(oop obj) const {
ysr@777 146 return _cm->nextMarkBitMap()->isMarked((HeapWord *)obj);
ysr@777 147 }
stefank@2314 148
stefank@2314 149 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP

mercurial