Mon, 08 Dec 2014 18:57:33 +0100
8067655: Clean up G1 remembered set oop iteration
Summary: Pass on the static type G1ParPushHeapRSClosure to allow oop_iterate devirtualization
Reviewed-by: jmasa, kbarrett
ysr@777 | 1 | /* |
tonyp@3416 | 2 | * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. |
ysr@777 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
ysr@777 | 4 | * |
ysr@777 | 5 | * This code is free software; you can redistribute it and/or modify it |
ysr@777 | 6 | * under the terms of the GNU General Public License version 2 only, as |
ysr@777 | 7 | * published by the Free Software Foundation. |
ysr@777 | 8 | * |
ysr@777 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
ysr@777 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
ysr@777 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
ysr@777 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
ysr@777 | 13 | * accompanied this code). |
ysr@777 | 14 | * |
ysr@777 | 15 | * You should have received a copy of the GNU General Public License version |
ysr@777 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
ysr@777 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
ysr@777 | 18 | * |
trims@1907 | 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
trims@1907 | 20 | * or visit www.oracle.com if you need additional information or have any |
trims@1907 | 21 | * questions. |
ysr@777 | 22 | * |
ysr@777 | 23 | */ |
ysr@777 | 24 | |
stefank@2314 | 25 | #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_INLINE_HPP |
stefank@2314 | 26 | #define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_INLINE_HPP |
stefank@2314 | 27 | |
mgerdin@6987 | 28 | #include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp" |
mgerdin@6990 | 29 | #include "gc_implementation/g1/g1CollectedHeap.hpp" |
mgerdin@6990 | 30 | #include "gc_implementation/g1/heapRegion.hpp" |
mgerdin@6990 | 31 | #include "memory/space.hpp" |
mgerdin@6990 | 32 | #include "runtime/atomic.inline.hpp" |
mgerdin@6990 | 33 | |
mgerdin@6990 | 34 | // This version requires locking. |
mgerdin@6990 | 35 | inline HeapWord* G1OffsetTableContigSpace::allocate_impl(size_t size, |
mgerdin@6990 | 36 | HeapWord* const end_value) { |
mgerdin@6990 | 37 | HeapWord* obj = top(); |
mgerdin@6990 | 38 | if (pointer_delta(end_value, obj) >= size) { |
mgerdin@6990 | 39 | HeapWord* new_top = obj + size; |
mgerdin@6990 | 40 | set_top(new_top); |
mgerdin@6990 | 41 | assert(is_aligned(obj) && is_aligned(new_top), "checking alignment"); |
mgerdin@6990 | 42 | return obj; |
mgerdin@6990 | 43 | } else { |
mgerdin@6990 | 44 | return NULL; |
mgerdin@6990 | 45 | } |
mgerdin@6990 | 46 | } |
mgerdin@6990 | 47 | |
mgerdin@6990 | 48 | // This version is lock-free. |
mgerdin@6990 | 49 | inline HeapWord* G1OffsetTableContigSpace::par_allocate_impl(size_t size, |
mgerdin@6990 | 50 | HeapWord* const end_value) { |
mgerdin@6990 | 51 | do { |
mgerdin@6990 | 52 | HeapWord* obj = top(); |
mgerdin@6990 | 53 | if (pointer_delta(end_value, obj) >= size) { |
mgerdin@6990 | 54 | HeapWord* new_top = obj + size; |
mgerdin@6990 | 55 | HeapWord* result = (HeapWord*)Atomic::cmpxchg_ptr(new_top, top_addr(), obj); |
mgerdin@6990 | 56 | // result can be one of two: |
mgerdin@6990 | 57 | // the old top value: the exchange succeeded |
mgerdin@6990 | 58 | // otherwise: the new value of the top is returned. |
mgerdin@6990 | 59 | if (result == obj) { |
mgerdin@6990 | 60 | assert(is_aligned(obj) && is_aligned(new_top), "checking alignment"); |
mgerdin@6990 | 61 | return obj; |
mgerdin@6990 | 62 | } |
mgerdin@6990 | 63 | } else { |
mgerdin@6990 | 64 | return NULL; |
mgerdin@6990 | 65 | } |
mgerdin@6990 | 66 | } while (true); |
mgerdin@6990 | 67 | } |
mgerdin@6987 | 68 | |
ysr@777 | 69 | inline HeapWord* G1OffsetTableContigSpace::allocate(size_t size) { |
mgerdin@6990 | 70 | HeapWord* res = allocate_impl(size, end()); |
ysr@777 | 71 | if (res != NULL) { |
ysr@777 | 72 | _offsets.alloc_block(res, size); |
ysr@777 | 73 | } |
ysr@777 | 74 | return res; |
ysr@777 | 75 | } |
ysr@777 | 76 | |
ysr@777 | 77 | // Because of the requirement of keeping "_offsets" up to date with the |
ysr@777 | 78 | // allocations, we sequentialize these with a lock. Therefore, best if |
ysr@777 | 79 | // this is used for larger LAB allocations only. |
ysr@777 | 80 | inline HeapWord* G1OffsetTableContigSpace::par_allocate(size_t size) { |
ysr@777 | 81 | MutexLocker x(&_par_alloc_lock); |
mgerdin@6990 | 82 | return allocate(size); |
ysr@777 | 83 | } |
ysr@777 | 84 | |
ysr@777 | 85 | inline HeapWord* G1OffsetTableContigSpace::block_start(const void* p) { |
ysr@777 | 86 | return _offsets.block_start(p); |
ysr@777 | 87 | } |
ysr@777 | 88 | |
ysr@777 | 89 | inline HeapWord* |
ysr@777 | 90 | G1OffsetTableContigSpace::block_start_const(const void* p) const { |
ysr@777 | 91 | return _offsets.block_start_const(p); |
ysr@777 | 92 | } |
stefank@2314 | 93 | |
mgerdin@6990 | 94 | inline bool |
mgerdin@6990 | 95 | HeapRegion::block_is_obj(const HeapWord* p) const { |
stefank@6992 | 96 | G1CollectedHeap* g1h = G1CollectedHeap::heap(); |
stefank@6996 | 97 | if (ClassUnloadingWithConcurrentMark) { |
stefank@6996 | 98 | return !g1h->is_obj_dead(oop(p), this); |
stefank@6996 | 99 | } |
stefank@6996 | 100 | return p < top(); |
mgerdin@6990 | 101 | } |
mgerdin@6990 | 102 | |
mgerdin@6990 | 103 | inline size_t |
mgerdin@6990 | 104 | HeapRegion::block_size(const HeapWord *addr) const { |
stefank@6996 | 105 | if (addr == top()) { |
stefank@6996 | 106 | return pointer_delta(end(), addr); |
stefank@6996 | 107 | } |
stefank@6996 | 108 | |
stefank@6996 | 109 | if (block_is_obj(addr)) { |
stefank@6996 | 110 | return oop(addr)->size(); |
stefank@6996 | 111 | } |
stefank@6996 | 112 | |
stefank@6996 | 113 | assert(ClassUnloadingWithConcurrentMark, |
stefank@6996 | 114 | err_msg("All blocks should be objects if G1 Class Unloading isn't used. " |
stefank@6996 | 115 | "HR: ["PTR_FORMAT", "PTR_FORMAT", "PTR_FORMAT") " |
stefank@6996 | 116 | "addr: " PTR_FORMAT, |
stefank@6996 | 117 | p2i(bottom()), p2i(top()), p2i(end()), p2i(addr))); |
stefank@6996 | 118 | |
stefank@6992 | 119 | // Old regions' dead objects may have dead classes |
stefank@6992 | 120 | // We need to find the next live object in some other |
stefank@6992 | 121 | // manner than getting the oop size |
stefank@6992 | 122 | G1CollectedHeap* g1h = G1CollectedHeap::heap(); |
stefank@6996 | 123 | HeapWord* next = g1h->concurrent_mark()->prevMarkBitMap()-> |
stefank@6996 | 124 | getNextMarkedWordAddress(addr, prev_top_at_mark_start()); |
stefank@6992 | 125 | |
stefank@6996 | 126 | assert(next > addr, "must get the next live object"); |
stefank@6996 | 127 | return pointer_delta(next, addr); |
mgerdin@6990 | 128 | } |
mgerdin@6990 | 129 | |
mgerdin@6990 | 130 | inline HeapWord* HeapRegion::par_allocate_no_bot_updates(size_t word_size) { |
mgerdin@6990 | 131 | assert(is_young(), "we can only skip BOT updates on young regions"); |
mgerdin@6990 | 132 | return par_allocate_impl(word_size, end()); |
mgerdin@6990 | 133 | } |
mgerdin@6990 | 134 | |
mgerdin@6990 | 135 | inline HeapWord* HeapRegion::allocate_no_bot_updates(size_t word_size) { |
mgerdin@6990 | 136 | assert(is_young(), "we can only skip BOT updates on young regions"); |
mgerdin@6990 | 137 | return allocate_impl(word_size, end()); |
mgerdin@6990 | 138 | } |
mgerdin@6990 | 139 | |
tonyp@3416 | 140 | inline void HeapRegion::note_start_of_marking() { |
tonyp@3416 | 141 | _next_marked_bytes = 0; |
tonyp@3416 | 142 | _next_top_at_mark_start = top(); |
tonyp@3416 | 143 | } |
tonyp@3416 | 144 | |
tonyp@3416 | 145 | inline void HeapRegion::note_end_of_marking() { |
tonyp@3416 | 146 | _prev_top_at_mark_start = _next_top_at_mark_start; |
tonyp@3416 | 147 | _prev_marked_bytes = _next_marked_bytes; |
tonyp@3416 | 148 | _next_marked_bytes = 0; |
tonyp@3416 | 149 | |
tonyp@3416 | 150 | assert(_prev_marked_bytes <= |
tonyp@3416 | 151 | (size_t) pointer_delta(prev_top_at_mark_start(), bottom()) * |
tonyp@3416 | 152 | HeapWordSize, "invariant"); |
tonyp@3416 | 153 | } |
tonyp@3416 | 154 | |
tonyp@3416 | 155 | inline void HeapRegion::note_start_of_copying(bool during_initial_mark) { |
tonyp@3464 | 156 | if (is_survivor()) { |
tonyp@3464 | 157 | // This is how we always allocate survivors. |
tonyp@3464 | 158 | assert(_next_top_at_mark_start == bottom(), "invariant"); |
tonyp@3464 | 159 | } else { |
tonyp@3464 | 160 | if (during_initial_mark) { |
tonyp@3416 | 161 | // During initial-mark we'll explicitly mark any objects on old |
tonyp@3416 | 162 | // regions that are pointed to by roots. Given that explicit |
tonyp@3416 | 163 | // marks only make sense under NTAMS it'd be nice if we could |
tonyp@3416 | 164 | // check that condition if we wanted to. Given that we don't |
tonyp@3416 | 165 | // know where the top of this region will end up, we simply set |
tonyp@3416 | 166 | // NTAMS to the end of the region so all marks will be below |
tonyp@3416 | 167 | // NTAMS. We'll set it to the actual top when we retire this region. |
tonyp@3416 | 168 | _next_top_at_mark_start = end(); |
tonyp@3416 | 169 | } else { |
tonyp@3416 | 170 | // We could have re-used this old region as to-space over a |
tonyp@3416 | 171 | // couple of GCs since the start of the concurrent marking |
tonyp@3416 | 172 | // cycle. This means that [bottom,NTAMS) will contain objects |
tonyp@3416 | 173 | // copied up to and including initial-mark and [NTAMS, top) |
tonyp@3416 | 174 | // will contain objects copied during the concurrent marking cycle. |
tonyp@3416 | 175 | assert(top() >= _next_top_at_mark_start, "invariant"); |
tonyp@3416 | 176 | } |
tonyp@3416 | 177 | } |
tonyp@3416 | 178 | } |
tonyp@3416 | 179 | |
tonyp@3416 | 180 | inline void HeapRegion::note_end_of_copying(bool during_initial_mark) { |
tonyp@3464 | 181 | if (is_survivor()) { |
tonyp@3464 | 182 | // This is how we always allocate survivors. |
tonyp@3464 | 183 | assert(_next_top_at_mark_start == bottom(), "invariant"); |
tonyp@3464 | 184 | } else { |
tonyp@3464 | 185 | if (during_initial_mark) { |
tonyp@3416 | 186 | // See the comment for note_start_of_copying() for the details |
tonyp@3416 | 187 | // on this. |
tonyp@3416 | 188 | assert(_next_top_at_mark_start == end(), "pre-condition"); |
tonyp@3416 | 189 | _next_top_at_mark_start = top(); |
tonyp@3416 | 190 | } else { |
tonyp@3416 | 191 | // See the comment for note_start_of_copying() for the details |
tonyp@3416 | 192 | // on this. |
tonyp@3416 | 193 | assert(top() >= _next_top_at_mark_start, "invariant"); |
tonyp@3416 | 194 | } |
tonyp@3416 | 195 | } |
tonyp@3416 | 196 | } |
tonyp@3416 | 197 | |
stefank@2314 | 198 | #endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_INLINE_HPP |