Tue, 12 Dec 2017 10:30:27 +0800
#6345 sync is controled by UseSyncLevel instead of Use3A2000
Reviewed-by: fujie
1 /*
2 * Copyright (c) 2002, 2014, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #ifndef SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPROMOTIONMANAGER_INLINE_HPP
26 #define SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPROMOTIONMANAGER_INLINE_HPP
28 #include "gc_implementation/parallelScavenge/psOldGen.hpp"
29 #include "gc_implementation/parallelScavenge/psPromotionManager.hpp"
30 #include "gc_implementation/parallelScavenge/psPromotionLAB.inline.hpp"
31 #include "gc_implementation/parallelScavenge/psScavenge.hpp"
32 #include "oops/oop.psgc.inline.hpp"
34 inline PSPromotionManager* PSPromotionManager::manager_array(int index) {
35 assert(_manager_array != NULL, "access of NULL manager_array");
36 assert(index >= 0 && index <= (int)ParallelGCThreads, "out of range manager_array access");
37 return &_manager_array[index];
38 }
40 template <class T>
41 inline void PSPromotionManager::claim_or_forward_internal_depth(T* p) {
42 if (p != NULL) { // XXX: error if p != NULL here
43 oop o = oopDesc::load_decode_heap_oop_not_null(p);
44 if (o->is_forwarded()) {
45 o = o->forwardee();
46 // Card mark
47 if (PSScavenge::is_obj_in_young(o)) {
48 PSScavenge::card_table()->inline_write_ref_field_gc(p, o);
49 }
50 oopDesc::encode_store_heap_oop_not_null(p, o);
51 } else {
52 push_depth(p);
53 }
54 }
55 }
57 template <class T>
58 inline void PSPromotionManager::claim_or_forward_depth(T* p) {
59 assert(PSScavenge::should_scavenge(p, true), "revisiting object?");
60 assert(Universe::heap()->kind() == CollectedHeap::ParallelScavengeHeap,
61 "Sanity");
62 assert(Universe::heap()->is_in(p), "pointer outside heap");
64 claim_or_forward_internal_depth(p);
65 }
67 //
68 // This method is pretty bulky. It would be nice to split it up
69 // into smaller submethods, but we need to be careful not to hurt
70 // performance.
71 //
72 template<bool promote_immediately>
73 oop PSPromotionManager::copy_to_survivor_space(oop o) {
74 assert(PSScavenge::should_scavenge(&o), "Sanity");
76 oop new_obj = NULL;
78 #ifdef MIPS64
79 if (UseSyncLevel >= 2000) OrderAccess::fence();
80 #endif
82 // NOTE! We must be very careful with any methods that access the mark
83 // in o. There may be multiple threads racing on it, and it may be forwarded
84 // at any time. Do not use oop methods for accessing the mark!
85 markOop test_mark = o->mark();
87 #ifdef MIPS64
88 if (UseSyncLevel >= 2000) OrderAccess::fence();
89 #endif
91 // The same test as "o->is_forwarded()"
92 if (!test_mark->is_marked()) {
93 bool new_obj_is_tenured = false;
94 size_t new_obj_size = o->size();
96 if (!promote_immediately) {
97 // Find the objects age, MT safe.
98 uint age = (test_mark->has_displaced_mark_helper() /* o->has_displaced_mark() */) ?
99 test_mark->displaced_mark_helper()->age() : test_mark->age();
101 // Try allocating obj in to-space (unless too old)
102 if (age < PSScavenge::tenuring_threshold()) {
103 new_obj = (oop) _young_lab.allocate(new_obj_size);
104 if (new_obj == NULL && !_young_gen_is_full) {
105 // Do we allocate directly, or flush and refill?
106 if (new_obj_size > (YoungPLABSize / 2)) {
107 // Allocate this object directly
108 new_obj = (oop)young_space()->cas_allocate(new_obj_size);
109 } else {
110 // Flush and fill
111 _young_lab.flush();
113 HeapWord* lab_base = young_space()->cas_allocate(YoungPLABSize);
114 if (lab_base != NULL) {
115 _young_lab.initialize(MemRegion(lab_base, YoungPLABSize));
116 // Try the young lab allocation again.
117 new_obj = (oop) _young_lab.allocate(new_obj_size);
118 } else {
119 _young_gen_is_full = true;
120 }
121 }
122 }
124 #ifdef MIPS64
125 if (UseSyncLevel >= 2000) OrderAccess::fence();
126 #endif
127 }
128 }
130 // Otherwise try allocating obj tenured
131 if (new_obj == NULL) {
132 #ifndef PRODUCT
133 if (Universe::heap()->promotion_should_fail()) {
134 return oop_promotion_failed(o, test_mark);
135 }
136 #endif // #ifndef PRODUCT
138 new_obj = (oop) _old_lab.allocate(new_obj_size);
139 new_obj_is_tenured = true;
141 if (new_obj == NULL) {
142 if (!_old_gen_is_full) {
143 // Do we allocate directly, or flush and refill?
144 if (new_obj_size > (OldPLABSize / 2)) {
145 // Allocate this object directly
146 new_obj = (oop)old_gen()->cas_allocate(new_obj_size);
147 } else {
148 // Flush and fill
149 _old_lab.flush();
151 HeapWord* lab_base = old_gen()->cas_allocate(OldPLABSize);
152 if(lab_base != NULL) {
153 #ifdef ASSERT
154 // Delay the initialization of the promotion lab (plab).
155 // This exposes uninitialized plabs to card table processing.
156 if (GCWorkerDelayMillis > 0) {
157 os::sleep(Thread::current(), GCWorkerDelayMillis, false);
158 }
159 #endif
160 _old_lab.initialize(MemRegion(lab_base, OldPLABSize));
161 // Try the old lab allocation again.
162 new_obj = (oop) _old_lab.allocate(new_obj_size);
163 }
164 }
165 }
167 // This is the promotion failed test, and code handling.
168 // The code belongs here for two reasons. It is slightly
169 // different than the code below, and cannot share the
170 // CAS testing code. Keeping the code here also minimizes
171 // the impact on the common case fast path code.
173 if (new_obj == NULL) {
174 _old_gen_is_full = true;
175 return oop_promotion_failed(o, test_mark);
176 }
177 }
178 }
180 assert(new_obj != NULL, "allocation should have succeeded");
182 // Copy obj
183 Copy::aligned_disjoint_words((HeapWord*)o, (HeapWord*)new_obj, new_obj_size);
184 #ifdef MIPS64
185 if (UseSyncLevel >= 2000) OrderAccess::fence();
186 #endif
188 // Now we have to CAS in the header.
189 if (o->cas_forward_to(new_obj, test_mark)) {
190 // We won any races, we "own" this object.
191 assert(new_obj == o->forwardee(), "Sanity");
193 // Increment age if obj still in new generation. Now that
194 // we're dealing with a markOop that cannot change, it is
195 // okay to use the non mt safe oop methods.
196 if (!new_obj_is_tenured) {
197 new_obj->incr_age();
198 assert(young_space()->contains(new_obj), "Attempt to push non-promoted obj");
199 }
201 // Do the size comparison first with new_obj_size, which we
202 // already have. Hopefully, only a few objects are larger than
203 // _min_array_size_for_chunking, and most of them will be arrays.
204 // So, the is->objArray() test would be very infrequent.
205 if (new_obj_size > _min_array_size_for_chunking &&
206 new_obj->is_objArray() &&
207 PSChunkLargeArrays) {
208 // we'll chunk it
209 oop* const masked_o = mask_chunked_array_oop(o);
210 push_depth(masked_o);
211 TASKQUEUE_STATS_ONLY(++_arrays_chunked; ++_masked_pushes);
212 } else {
213 // we'll just push its contents
214 new_obj->push_contents(this);
215 }
216 } else {
217 // We lost, someone else "owns" this object
218 guarantee(o->is_forwarded(), "Object must be forwarded if the cas failed.");
220 // Try to deallocate the space. If it was directly allocated we cannot
221 // deallocate it, so we have to test. If the deallocation fails,
222 // overwrite with a filler object.
223 if (new_obj_is_tenured) {
224 if (!_old_lab.unallocate_object((HeapWord*) new_obj, new_obj_size)) {
225 CollectedHeap::fill_with_object((HeapWord*) new_obj, new_obj_size);
226 }
227 } else if (!_young_lab.unallocate_object((HeapWord*) new_obj, new_obj_size)) {
228 CollectedHeap::fill_with_object((HeapWord*) new_obj, new_obj_size);
229 }
231 // don't update this before the unallocation!
232 new_obj = o->forwardee();
233 }
235 #ifdef MIPS64
236 if (UseSyncLevel >= 2000) OrderAccess::fence();
237 #endif
238 } else {
239 assert(o->is_forwarded(), "Sanity");
240 new_obj = o->forwardee();
241 }
243 #ifndef PRODUCT
244 // This code must come after the CAS test, or it will print incorrect
245 // information.
246 if (TraceScavenge) {
247 gclog_or_tty->print_cr("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (%d)}",
248 PSScavenge::should_scavenge(&new_obj) ? "copying" : "tenuring",
249 new_obj->klass()->internal_name(), p2i((void *)o), p2i((void *)new_obj), new_obj->size());
250 }
251 #endif
253 return new_obj;
254 }
257 inline void PSPromotionManager::process_popped_location_depth(StarTask p) {
258 if (is_oop_masked(p)) {
259 assert(PSChunkLargeArrays, "invariant");
260 oop const old = unmask_chunked_array_oop(p);
261 process_array_chunk(old);
262 } else {
263 if (p.is_narrow()) {
264 assert(UseCompressedOops, "Error");
265 PSScavenge::copy_and_push_safe_barrier<narrowOop, /*promote_immediately=*/false>(this, p);
266 } else {
267 PSScavenge::copy_and_push_safe_barrier<oop, /*promote_immediately=*/false>(this, p);
268 }
269 }
270 }
272 #if TASKQUEUE_STATS
273 void PSPromotionManager::record_steal(StarTask& p) {
274 if (is_oop_masked(p)) {
275 ++_masked_steals;
276 }
277 }
278 #endif // TASKQUEUE_STATS
280 #endif // SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPROMOTIONMANAGER_INLINE_HPP