Mon, 07 Jul 2014 10:12:40 +0200
8049421: G1 Class Unloading after completing a concurrent mark cycle
Reviewed-by: tschatzl, ehelin, brutisso, coleenp, roland, iveresov
Contributed-by: stefan.karlsson@oracle.com, mikael.gerdin@oracle.com
1 /*
2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_INLINE_HPP
26 #define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_INLINE_HPP
28 #include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp"
29 #include "gc_implementation/g1/g1CollectedHeap.hpp"
30 #include "gc_implementation/g1/heapRegion.hpp"
31 #include "memory/space.hpp"
32 #include "runtime/atomic.inline.hpp"
34 // This version requires locking.
35 inline HeapWord* G1OffsetTableContigSpace::allocate_impl(size_t size,
36 HeapWord* const end_value) {
37 HeapWord* obj = top();
38 if (pointer_delta(end_value, obj) >= size) {
39 HeapWord* new_top = obj + size;
40 set_top(new_top);
41 assert(is_aligned(obj) && is_aligned(new_top), "checking alignment");
42 return obj;
43 } else {
44 return NULL;
45 }
46 }
48 // This version is lock-free.
49 inline HeapWord* G1OffsetTableContigSpace::par_allocate_impl(size_t size,
50 HeapWord* const end_value) {
51 do {
52 HeapWord* obj = top();
53 if (pointer_delta(end_value, obj) >= size) {
54 HeapWord* new_top = obj + size;
55 HeapWord* result = (HeapWord*)Atomic::cmpxchg_ptr(new_top, top_addr(), obj);
56 // result can be one of two:
57 // the old top value: the exchange succeeded
58 // otherwise: the new value of the top is returned.
59 if (result == obj) {
60 assert(is_aligned(obj) && is_aligned(new_top), "checking alignment");
61 return obj;
62 }
63 } else {
64 return NULL;
65 }
66 } while (true);
67 }
69 inline HeapWord* G1OffsetTableContigSpace::allocate(size_t size) {
70 HeapWord* res = allocate_impl(size, end());
71 if (res != NULL) {
72 _offsets.alloc_block(res, size);
73 }
74 return res;
75 }
77 // Because of the requirement of keeping "_offsets" up to date with the
78 // allocations, we sequentialize these with a lock. Therefore, best if
79 // this is used for larger LAB allocations only.
80 inline HeapWord* G1OffsetTableContigSpace::par_allocate(size_t size) {
81 MutexLocker x(&_par_alloc_lock);
82 return allocate(size);
83 }
85 inline HeapWord* G1OffsetTableContigSpace::block_start(const void* p) {
86 return _offsets.block_start(p);
87 }
89 inline HeapWord*
90 G1OffsetTableContigSpace::block_start_const(const void* p) const {
91 return _offsets.block_start_const(p);
92 }
94 inline bool
95 HeapRegion::block_is_obj(const HeapWord* p) const {
96 G1CollectedHeap* g1h = G1CollectedHeap::heap();
97 return !g1h->is_obj_dead(oop(p), this);
98 }
100 inline size_t
101 HeapRegion::block_size(const HeapWord *addr) const {
102 // Old regions' dead objects may have dead classes
103 // We need to find the next live object in some other
104 // manner than getting the oop size
105 G1CollectedHeap* g1h = G1CollectedHeap::heap();
106 if (g1h->is_obj_dead(oop(addr), this)) {
107 HeapWord* next = g1h->concurrent_mark()->prevMarkBitMap()->
108 getNextMarkedWordAddress(addr, prev_top_at_mark_start());
110 assert(next > addr, "must get the next live object");
112 return pointer_delta(next, addr);
113 } else if (addr == top()) {
114 return pointer_delta(end(), addr);
115 }
116 return oop(addr)->size();
117 }
119 inline HeapWord* HeapRegion::par_allocate_no_bot_updates(size_t word_size) {
120 assert(is_young(), "we can only skip BOT updates on young regions");
121 return par_allocate_impl(word_size, end());
122 }
124 inline HeapWord* HeapRegion::allocate_no_bot_updates(size_t word_size) {
125 assert(is_young(), "we can only skip BOT updates on young regions");
126 return allocate_impl(word_size, end());
127 }
129 inline void HeapRegion::note_start_of_marking() {
130 _next_marked_bytes = 0;
131 _next_top_at_mark_start = top();
132 }
134 inline void HeapRegion::note_end_of_marking() {
135 _prev_top_at_mark_start = _next_top_at_mark_start;
136 _prev_marked_bytes = _next_marked_bytes;
137 _next_marked_bytes = 0;
139 assert(_prev_marked_bytes <=
140 (size_t) pointer_delta(prev_top_at_mark_start(), bottom()) *
141 HeapWordSize, "invariant");
142 }
144 inline void HeapRegion::note_start_of_copying(bool during_initial_mark) {
145 if (is_survivor()) {
146 // This is how we always allocate survivors.
147 assert(_next_top_at_mark_start == bottom(), "invariant");
148 } else {
149 if (during_initial_mark) {
150 // During initial-mark we'll explicitly mark any objects on old
151 // regions that are pointed to by roots. Given that explicit
152 // marks only make sense under NTAMS it'd be nice if we could
153 // check that condition if we wanted to. Given that we don't
154 // know where the top of this region will end up, we simply set
155 // NTAMS to the end of the region so all marks will be below
156 // NTAMS. We'll set it to the actual top when we retire this region.
157 _next_top_at_mark_start = end();
158 } else {
159 // We could have re-used this old region as to-space over a
160 // couple of GCs since the start of the concurrent marking
161 // cycle. This means that [bottom,NTAMS) will contain objects
162 // copied up to and including initial-mark and [NTAMS, top)
163 // will contain objects copied during the concurrent marking cycle.
164 assert(top() >= _next_top_at_mark_start, "invariant");
165 }
166 }
167 }
169 inline void HeapRegion::note_end_of_copying(bool during_initial_mark) {
170 if (is_survivor()) {
171 // This is how we always allocate survivors.
172 assert(_next_top_at_mark_start == bottom(), "invariant");
173 } else {
174 if (during_initial_mark) {
175 // See the comment for note_start_of_copying() for the details
176 // on this.
177 assert(_next_top_at_mark_start == end(), "pre-condition");
178 _next_top_at_mark_start = top();
179 } else {
180 // See the comment for note_start_of_copying() for the details
181 // on this.
182 assert(top() >= _next_top_at_mark_start, "invariant");
183 }
184 }
185 }
187 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_INLINE_HPP