src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp

Tue, 18 Oct 2016 10:12:00 +0800

author
fujie
date
Tue, 18 Oct 2016 10:12:00 +0800
changeset 134
58a58e4782dd
parent 25
873fd82b133d
child 413
6deac53aa96b
permissions
-rw-r--r--

Sync in oopDesc* PSPromotionManager::copy_to_survivor_space(...) for 3A2000.

aoqi@0 1 /*
aoqi@0 2 * Copyright (c) 2002, 2014, Oracle and/or its affiliates. All rights reserved.
aoqi@0 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
aoqi@0 4 *
aoqi@0 5 * This code is free software; you can redistribute it and/or modify it
aoqi@0 6 * under the terms of the GNU General Public License version 2 only, as
aoqi@0 7 * published by the Free Software Foundation.
aoqi@0 8 *
aoqi@0 9 * This code is distributed in the hope that it will be useful, but WITHOUT
aoqi@0 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
aoqi@0 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
aoqi@0 12 * version 2 for more details (a copy is included in the LICENSE file that
aoqi@0 13 * accompanied this code).
aoqi@0 14 *
aoqi@0 15 * You should have received a copy of the GNU General Public License version
aoqi@0 16 * 2 along with this work; if not, write to the Free Software Foundation,
aoqi@0 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
aoqi@0 18 *
aoqi@0 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
aoqi@0 20 * or visit www.oracle.com if you need additional information or have any
aoqi@0 21 * questions.
aoqi@0 22 *
aoqi@0 23 */
aoqi@0 24
aoqi@0 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPROMOTIONMANAGER_INLINE_HPP
aoqi@0 26 #define SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPROMOTIONMANAGER_INLINE_HPP
aoqi@0 27
aoqi@0 28 #include "gc_implementation/parallelScavenge/psOldGen.hpp"
aoqi@0 29 #include "gc_implementation/parallelScavenge/psPromotionManager.hpp"
aoqi@0 30 #include "gc_implementation/parallelScavenge/psScavenge.hpp"
aoqi@0 31 #include "oops/oop.psgc.inline.hpp"
aoqi@0 32
aoqi@0 33 inline PSPromotionManager* PSPromotionManager::manager_array(int index) {
aoqi@0 34 assert(_manager_array != NULL, "access of NULL manager_array");
aoqi@0 35 assert(index >= 0 && index <= (int)ParallelGCThreads, "out of range manager_array access");
aoqi@0 36 return &_manager_array[index];
aoqi@0 37 }
aoqi@0 38
aoqi@0 39 template <class T>
aoqi@0 40 inline void PSPromotionManager::claim_or_forward_internal_depth(T* p) {
aoqi@0 41 if (p != NULL) { // XXX: error if p != NULL here
aoqi@0 42 oop o = oopDesc::load_decode_heap_oop_not_null(p);
aoqi@0 43 if (o->is_forwarded()) {
aoqi@0 44 o = o->forwardee();
aoqi@0 45 // Card mark
aoqi@0 46 if (PSScavenge::is_obj_in_young(o)) {
aoqi@0 47 PSScavenge::card_table()->inline_write_ref_field_gc(p, o);
aoqi@0 48 }
aoqi@0 49 oopDesc::encode_store_heap_oop_not_null(p, o);
aoqi@0 50 } else {
aoqi@0 51 push_depth(p);
aoqi@0 52 }
aoqi@0 53 }
aoqi@0 54 }
aoqi@0 55
aoqi@0 56 template <class T>
aoqi@0 57 inline void PSPromotionManager::claim_or_forward_depth(T* p) {
aoqi@0 58 assert(PSScavenge::should_scavenge(p, true), "revisiting object?");
aoqi@0 59 assert(Universe::heap()->kind() == CollectedHeap::ParallelScavengeHeap,
aoqi@0 60 "Sanity");
aoqi@0 61 assert(Universe::heap()->is_in(p), "pointer outside heap");
aoqi@0 62
aoqi@0 63 claim_or_forward_internal_depth(p);
aoqi@0 64 }
aoqi@0 65
aoqi@0 66 //
aoqi@0 67 // This method is pretty bulky. It would be nice to split it up
aoqi@0 68 // into smaller submethods, but we need to be careful not to hurt
aoqi@0 69 // performance.
aoqi@0 70 //
aoqi@0 71 template<bool promote_immediately>
aoqi@0 72 oop PSPromotionManager::copy_to_survivor_space(oop o) {
aoqi@0 73 assert(PSScavenge::should_scavenge(&o), "Sanity");
aoqi@0 74
aoqi@0 75 oop new_obj = NULL;
aoqi@0 76
fujie@134 77 #ifdef MIPS64
fujie@134 78 if (Use3A2000) OrderAccess::fence();
fujie@134 79 #endif
fujie@134 80
aoqi@0 81 // NOTE! We must be very careful with any methods that access the mark
aoqi@0 82 // in o. There may be multiple threads racing on it, and it may be forwarded
aoqi@0 83 // at any time. Do not use oop methods for accessing the mark!
aoqi@0 84 markOop test_mark = o->mark();
aoqi@0 85
aoqi@0 86 // The same test as "o->is_forwarded()"
aoqi@0 87 if (!test_mark->is_marked()) {
aoqi@0 88 bool new_obj_is_tenured = false;
aoqi@0 89 size_t new_obj_size = o->size();
aoqi@0 90
aoqi@0 91 if (!promote_immediately) {
aoqi@0 92 // Find the objects age, MT safe.
aoqi@0 93 uint age = (test_mark->has_displaced_mark_helper() /* o->has_displaced_mark() */) ?
aoqi@0 94 test_mark->displaced_mark_helper()->age() : test_mark->age();
aoqi@0 95
aoqi@0 96 // Try allocating obj in to-space (unless too old)
aoqi@0 97 if (age < PSScavenge::tenuring_threshold()) {
aoqi@0 98 new_obj = (oop) _young_lab.allocate(new_obj_size);
aoqi@0 99 if (new_obj == NULL && !_young_gen_is_full) {
aoqi@0 100 // Do we allocate directly, or flush and refill?
aoqi@0 101 if (new_obj_size > (YoungPLABSize / 2)) {
aoqi@0 102 // Allocate this object directly
aoqi@0 103 new_obj = (oop)young_space()->cas_allocate(new_obj_size);
aoqi@0 104 } else {
aoqi@0 105 // Flush and fill
aoqi@0 106 _young_lab.flush();
aoqi@0 107
aoqi@0 108 HeapWord* lab_base = young_space()->cas_allocate(YoungPLABSize);
aoqi@0 109 if (lab_base != NULL) {
aoqi@0 110 _young_lab.initialize(MemRegion(lab_base, YoungPLABSize));
aoqi@0 111 // Try the young lab allocation again.
aoqi@0 112 new_obj = (oop) _young_lab.allocate(new_obj_size);
aoqi@0 113 } else {
aoqi@0 114 _young_gen_is_full = true;
aoqi@0 115 }
aoqi@0 116 }
aoqi@0 117 }
fujie@134 118
fujie@134 119 #ifdef MIPS64
fujie@134 120 if (Use3A2000) OrderAccess::fence();
fujie@134 121 #endif
aoqi@0 122 }
aoqi@0 123 }
aoqi@0 124
aoqi@0 125 // Otherwise try allocating obj tenured
aoqi@0 126 if (new_obj == NULL) {
aoqi@0 127 #ifndef PRODUCT
aoqi@0 128 if (Universe::heap()->promotion_should_fail()) {
aoqi@0 129 return oop_promotion_failed(o, test_mark);
aoqi@0 130 }
aoqi@0 131 #endif // #ifndef PRODUCT
aoqi@0 132
aoqi@25 133 new_obj = (oop) _old_lab.allocate(new_obj_size);
aoqi@25 134 new_obj_is_tenured = true;
aoqi@0 135
aoqi@25 136 if (new_obj == NULL) {
aoqi@25 137 if (!_old_gen_is_full) {
aoqi@25 138 // Do we allocate directly, or flush and refill?
aoqi@25 139 if (new_obj_size > (OldPLABSize / 2)) {
aoqi@25 140 // Allocate this object directly
aoqi@25 141 new_obj = (oop)old_gen()->cas_allocate(new_obj_size);
aoqi@25 142 } else {
aoqi@25 143 // Flush and fill
aoqi@25 144 _old_lab.flush();
aoqi@0 145
aoqi@25 146 HeapWord* lab_base = old_gen()->cas_allocate(OldPLABSize);
aoqi@25 147 if(lab_base != NULL) {
aoqi@25 148 #ifdef ASSERT
aoqi@25 149 // Delay the initialization of the promotion lab (plab).
aoqi@25 150 // This exposes uninitialized plabs to card table processing.
aoqi@25 151 if (GCWorkerDelayMillis > 0) {
aoqi@25 152 os::sleep(Thread::current(), GCWorkerDelayMillis, false);
aoqi@0 153 }
aoqi@25 154 #endif
aoqi@25 155 _old_lab.initialize(MemRegion(lab_base, OldPLABSize));
aoqi@25 156 // Try the old lab allocation again.
aoqi@25 157 new_obj = (oop) _old_lab.allocate(new_obj_size);
aoqi@0 158 }
aoqi@0 159 }
fujie@134 160 #ifdef MIPS64
fujie@134 161 if (Use3A2000) OrderAccess::fence();
fujie@134 162 #endif
aoqi@0 163 }
aoqi@0 164
aoqi@25 165 // This is the promotion failed test, and code handling.
aoqi@25 166 // The code belongs here for two reasons. It is slightly
aoqi@25 167 // different than the code below, and cannot share the
aoqi@25 168 // CAS testing code. Keeping the code here also minimizes
aoqi@25 169 // the impact on the common case fast path code.
aoqi@0 170
aoqi@0 171 if (new_obj == NULL) {
aoqi@25 172 _old_gen_is_full = true;
aoqi@25 173 return oop_promotion_failed(o, test_mark);
aoqi@0 174 }
aoqi@0 175 }
aoqi@0 176 }
aoqi@0 177
aoqi@0 178 assert(new_obj != NULL, "allocation should have succeeded");
aoqi@0 179
aoqi@0 180 // Copy obj
aoqi@0 181 Copy::aligned_disjoint_words((HeapWord*)o, (HeapWord*)new_obj, new_obj_size);
fujie@134 182 #ifdef MIPS64
fujie@134 183 if (Use3A2000) OrderAccess::fence();
fujie@134 184 #endif
aoqi@0 185
aoqi@0 186 // Now we have to CAS in the header.
aoqi@0 187 if (o->cas_forward_to(new_obj, test_mark)) {
aoqi@0 188 // We won any races, we "own" this object.
aoqi@0 189 assert(new_obj == o->forwardee(), "Sanity");
aoqi@0 190
aoqi@0 191 // Increment age if obj still in new generation. Now that
aoqi@0 192 // we're dealing with a markOop that cannot change, it is
aoqi@0 193 // okay to use the non mt safe oop methods.
aoqi@0 194 if (!new_obj_is_tenured) {
aoqi@0 195 new_obj->incr_age();
aoqi@0 196 assert(young_space()->contains(new_obj), "Attempt to push non-promoted obj");
aoqi@0 197 }
aoqi@0 198
aoqi@0 199 // Do the size comparison first with new_obj_size, which we
aoqi@0 200 // already have. Hopefully, only a few objects are larger than
aoqi@0 201 // _min_array_size_for_chunking, and most of them will be arrays.
aoqi@0 202 // So, the is->objArray() test would be very infrequent.
aoqi@0 203 if (new_obj_size > _min_array_size_for_chunking &&
aoqi@0 204 new_obj->is_objArray() &&
aoqi@0 205 PSChunkLargeArrays) {
aoqi@0 206 // we'll chunk it
aoqi@0 207 oop* const masked_o = mask_chunked_array_oop(o);
aoqi@0 208 push_depth(masked_o);
aoqi@0 209 TASKQUEUE_STATS_ONLY(++_arrays_chunked; ++_masked_pushes);
aoqi@0 210 } else {
aoqi@0 211 // we'll just push its contents
aoqi@0 212 new_obj->push_contents(this);
aoqi@0 213 }
aoqi@0 214 } else {
aoqi@0 215 // We lost, someone else "owns" this object
aoqi@0 216 guarantee(o->is_forwarded(), "Object must be forwarded if the cas failed.");
aoqi@0 217
aoqi@0 218 // Try to deallocate the space. If it was directly allocated we cannot
aoqi@0 219 // deallocate it, so we have to test. If the deallocation fails,
aoqi@0 220 // overwrite with a filler object.
aoqi@0 221 if (new_obj_is_tenured) {
aoqi@25 222 if (!_old_lab.unallocate_object((HeapWord*) new_obj, new_obj_size)) {
aoqi@25 223 CollectedHeap::fill_with_object((HeapWord*) new_obj, new_obj_size);
aoqi@0 224 }
aoqi@0 225 } else if (!_young_lab.unallocate_object((HeapWord*) new_obj, new_obj_size)) {
aoqi@0 226 CollectedHeap::fill_with_object((HeapWord*) new_obj, new_obj_size);
aoqi@0 227 }
aoqi@0 228
aoqi@0 229 // don't update this before the unallocation!
aoqi@0 230 new_obj = o->forwardee();
aoqi@0 231 }
fujie@134 232
fujie@134 233 #ifdef MIPS64
fujie@134 234 if (Use3A2000) OrderAccess::fence();
fujie@134 235 #endif
aoqi@0 236 } else {
aoqi@0 237 assert(o->is_forwarded(), "Sanity");
aoqi@0 238 new_obj = o->forwardee();
aoqi@0 239 }
aoqi@0 240
aoqi@0 241 #ifndef PRODUCT
aoqi@0 242 // This code must come after the CAS test, or it will print incorrect
aoqi@0 243 // information.
aoqi@0 244 if (TraceScavenge) {
aoqi@0 245 gclog_or_tty->print_cr("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (%d)}",
aoqi@0 246 PSScavenge::should_scavenge(&new_obj) ? "copying" : "tenuring",
aoqi@0 247 new_obj->klass()->internal_name(), p2i((void *)o), p2i((void *)new_obj), new_obj->size());
aoqi@0 248 }
aoqi@0 249 #endif
aoqi@0 250
aoqi@0 251 return new_obj;
aoqi@0 252 }
aoqi@0 253
aoqi@25 254
aoqi@0 255 inline void PSPromotionManager::process_popped_location_depth(StarTask p) {
aoqi@0 256 if (is_oop_masked(p)) {
aoqi@0 257 assert(PSChunkLargeArrays, "invariant");
aoqi@0 258 oop const old = unmask_chunked_array_oop(p);
aoqi@0 259 process_array_chunk(old);
aoqi@0 260 } else {
aoqi@0 261 if (p.is_narrow()) {
aoqi@0 262 assert(UseCompressedOops, "Error");
aoqi@0 263 PSScavenge::copy_and_push_safe_barrier<narrowOop, /*promote_immediately=*/false>(this, p);
aoqi@0 264 } else {
aoqi@0 265 PSScavenge::copy_and_push_safe_barrier<oop, /*promote_immediately=*/false>(this, p);
aoqi@0 266 }
aoqi@0 267 }
aoqi@0 268 }
aoqi@0 269
aoqi@0 270 #if TASKQUEUE_STATS
aoqi@0 271 void PSPromotionManager::record_steal(StarTask& p) {
aoqi@0 272 if (is_oop_masked(p)) {
aoqi@0 273 ++_masked_steals;
aoqi@0 274 }
aoqi@0 275 }
aoqi@0 276 #endif // TASKQUEUE_STATS
aoqi@0 277
aoqi@0 278 #endif // SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPROMOTIONMANAGER_INLINE_HPP

mercurial