src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp

Thu, 12 Oct 2017 21:27:07 +0800

author
aoqi
date
Thu, 12 Oct 2017 21:27:07 +0800
changeset 7535
7ae4e26cb1e0
parent 7031
ee019285a52c
parent 6876
710a3c8b516e
child 8019
3fb3ceb7398f
permissions
-rw-r--r--

merge

aoqi@0 1 /*
aoqi@0 2 * Copyright (c) 2002, 2014, Oracle and/or its affiliates. All rights reserved.
aoqi@0 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
aoqi@0 4 *
aoqi@0 5 * This code is free software; you can redistribute it and/or modify it
aoqi@0 6 * under the terms of the GNU General Public License version 2 only, as
aoqi@0 7 * published by the Free Software Foundation.
aoqi@0 8 *
aoqi@0 9 * This code is distributed in the hope that it will be useful, but WITHOUT
aoqi@0 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
aoqi@0 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
aoqi@0 12 * version 2 for more details (a copy is included in the LICENSE file that
aoqi@0 13 * accompanied this code).
aoqi@0 14 *
aoqi@0 15 * You should have received a copy of the GNU General Public License version
aoqi@0 16 * 2 along with this work; if not, write to the Free Software Foundation,
aoqi@0 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
aoqi@0 18 *
aoqi@0 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
aoqi@0 20 * or visit www.oracle.com if you need additional information or have any
aoqi@0 21 * questions.
aoqi@0 22 *
aoqi@0 23 */
aoqi@0 24
aoqi@0 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPROMOTIONMANAGER_INLINE_HPP
aoqi@0 26 #define SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPROMOTIONMANAGER_INLINE_HPP
aoqi@0 27
aoqi@0 28 #include "gc_implementation/parallelScavenge/psOldGen.hpp"
aoqi@0 29 #include "gc_implementation/parallelScavenge/psPromotionManager.hpp"
jmasa@7031 30 #include "gc_implementation/parallelScavenge/psPromotionLAB.inline.hpp"
aoqi@0 31 #include "gc_implementation/parallelScavenge/psScavenge.hpp"
aoqi@0 32 #include "oops/oop.psgc.inline.hpp"
aoqi@0 33
aoqi@0 34 inline PSPromotionManager* PSPromotionManager::manager_array(int index) {
aoqi@0 35 assert(_manager_array != NULL, "access of NULL manager_array");
aoqi@0 36 assert(index >= 0 && index <= (int)ParallelGCThreads, "out of range manager_array access");
aoqi@0 37 return &_manager_array[index];
aoqi@0 38 }
aoqi@0 39
aoqi@0 40 template <class T>
aoqi@0 41 inline void PSPromotionManager::claim_or_forward_internal_depth(T* p) {
aoqi@0 42 if (p != NULL) { // XXX: error if p != NULL here
aoqi@0 43 oop o = oopDesc::load_decode_heap_oop_not_null(p);
aoqi@0 44 if (o->is_forwarded()) {
aoqi@0 45 o = o->forwardee();
aoqi@0 46 // Card mark
aoqi@0 47 if (PSScavenge::is_obj_in_young(o)) {
aoqi@0 48 PSScavenge::card_table()->inline_write_ref_field_gc(p, o);
aoqi@0 49 }
aoqi@0 50 oopDesc::encode_store_heap_oop_not_null(p, o);
aoqi@0 51 } else {
aoqi@0 52 push_depth(p);
aoqi@0 53 }
aoqi@0 54 }
aoqi@0 55 }
aoqi@0 56
aoqi@0 57 template <class T>
aoqi@0 58 inline void PSPromotionManager::claim_or_forward_depth(T* p) {
aoqi@0 59 assert(PSScavenge::should_scavenge(p, true), "revisiting object?");
aoqi@0 60 assert(Universe::heap()->kind() == CollectedHeap::ParallelScavengeHeap,
aoqi@0 61 "Sanity");
aoqi@0 62 assert(Universe::heap()->is_in(p), "pointer outside heap");
aoqi@0 63
aoqi@0 64 claim_or_forward_internal_depth(p);
aoqi@0 65 }
aoqi@0 66
aoqi@0 67 //
aoqi@0 68 // This method is pretty bulky. It would be nice to split it up
aoqi@0 69 // into smaller submethods, but we need to be careful not to hurt
aoqi@0 70 // performance.
aoqi@0 71 //
aoqi@0 72 template<bool promote_immediately>
aoqi@0 73 oop PSPromotionManager::copy_to_survivor_space(oop o) {
aoqi@0 74 assert(PSScavenge::should_scavenge(&o), "Sanity");
aoqi@0 75
aoqi@0 76 oop new_obj = NULL;
aoqi@0 77
fujie@134 78 #ifdef MIPS64
fujie@134 79 if (Use3A2000) OrderAccess::fence();
fujie@134 80 #endif
fujie@134 81
aoqi@0 82 // NOTE! We must be very careful with any methods that access the mark
aoqi@0 83 // in o. There may be multiple threads racing on it, and it may be forwarded
aoqi@0 84 // at any time. Do not use oop methods for accessing the mark!
aoqi@0 85 markOop test_mark = o->mark();
aoqi@0 86
fujie@414 87 #ifdef MIPS64
fujie@414 88 if (Use3A2000) OrderAccess::fence();
fujie@414 89 #endif
fujie@414 90
aoqi@0 91 // The same test as "o->is_forwarded()"
aoqi@0 92 if (!test_mark->is_marked()) {
aoqi@0 93 bool new_obj_is_tenured = false;
aoqi@0 94 size_t new_obj_size = o->size();
aoqi@0 95
aoqi@0 96 if (!promote_immediately) {
aoqi@0 97 // Find the objects age, MT safe.
aoqi@0 98 uint age = (test_mark->has_displaced_mark_helper() /* o->has_displaced_mark() */) ?
aoqi@0 99 test_mark->displaced_mark_helper()->age() : test_mark->age();
aoqi@0 100
aoqi@0 101 // Try allocating obj in to-space (unless too old)
aoqi@0 102 if (age < PSScavenge::tenuring_threshold()) {
aoqi@0 103 new_obj = (oop) _young_lab.allocate(new_obj_size);
aoqi@0 104 if (new_obj == NULL && !_young_gen_is_full) {
aoqi@0 105 // Do we allocate directly, or flush and refill?
aoqi@0 106 if (new_obj_size > (YoungPLABSize / 2)) {
aoqi@0 107 // Allocate this object directly
aoqi@0 108 new_obj = (oop)young_space()->cas_allocate(new_obj_size);
aoqi@0 109 } else {
aoqi@0 110 // Flush and fill
aoqi@0 111 _young_lab.flush();
aoqi@0 112
aoqi@0 113 HeapWord* lab_base = young_space()->cas_allocate(YoungPLABSize);
aoqi@0 114 if (lab_base != NULL) {
aoqi@0 115 _young_lab.initialize(MemRegion(lab_base, YoungPLABSize));
aoqi@0 116 // Try the young lab allocation again.
aoqi@0 117 new_obj = (oop) _young_lab.allocate(new_obj_size);
aoqi@0 118 } else {
aoqi@0 119 _young_gen_is_full = true;
aoqi@0 120 }
aoqi@0 121 }
aoqi@0 122 }
fujie@134 123
fujie@134 124 #ifdef MIPS64
fujie@134 125 if (Use3A2000) OrderAccess::fence();
fujie@134 126 #endif
aoqi@0 127 }
aoqi@0 128 }
aoqi@0 129
aoqi@0 130 // Otherwise try allocating obj tenured
aoqi@0 131 if (new_obj == NULL) {
aoqi@0 132 #ifndef PRODUCT
aoqi@0 133 if (Universe::heap()->promotion_should_fail()) {
aoqi@0 134 return oop_promotion_failed(o, test_mark);
aoqi@0 135 }
aoqi@0 136 #endif // #ifndef PRODUCT
aoqi@0 137
aoqi@25 138 new_obj = (oop) _old_lab.allocate(new_obj_size);
aoqi@25 139 new_obj_is_tenured = true;
aoqi@0 140
aoqi@25 141 if (new_obj == NULL) {
aoqi@25 142 if (!_old_gen_is_full) {
aoqi@25 143 // Do we allocate directly, or flush and refill?
aoqi@25 144 if (new_obj_size > (OldPLABSize / 2)) {
aoqi@25 145 // Allocate this object directly
aoqi@25 146 new_obj = (oop)old_gen()->cas_allocate(new_obj_size);
aoqi@25 147 } else {
aoqi@25 148 // Flush and fill
aoqi@25 149 _old_lab.flush();
aoqi@0 150
aoqi@25 151 HeapWord* lab_base = old_gen()->cas_allocate(OldPLABSize);
aoqi@25 152 if(lab_base != NULL) {
aoqi@25 153 #ifdef ASSERT
aoqi@25 154 // Delay the initialization of the promotion lab (plab).
aoqi@25 155 // This exposes uninitialized plabs to card table processing.
aoqi@25 156 if (GCWorkerDelayMillis > 0) {
aoqi@25 157 os::sleep(Thread::current(), GCWorkerDelayMillis, false);
aoqi@0 158 }
aoqi@25 159 #endif
aoqi@25 160 _old_lab.initialize(MemRegion(lab_base, OldPLABSize));
aoqi@25 161 // Try the old lab allocation again.
aoqi@25 162 new_obj = (oop) _old_lab.allocate(new_obj_size);
aoqi@0 163 }
aoqi@0 164 }
aoqi@0 165 }
aoqi@0 166
aoqi@25 167 // This is the promotion failed test, and code handling.
aoqi@25 168 // The code belongs here for two reasons. It is slightly
aoqi@25 169 // different than the code below, and cannot share the
aoqi@25 170 // CAS testing code. Keeping the code here also minimizes
aoqi@25 171 // the impact on the common case fast path code.
aoqi@0 172
aoqi@0 173 if (new_obj == NULL) {
aoqi@25 174 _old_gen_is_full = true;
aoqi@25 175 return oop_promotion_failed(o, test_mark);
aoqi@0 176 }
aoqi@0 177 }
aoqi@0 178 }
aoqi@0 179
aoqi@0 180 assert(new_obj != NULL, "allocation should have succeeded");
aoqi@0 181
aoqi@0 182 // Copy obj
aoqi@0 183 Copy::aligned_disjoint_words((HeapWord*)o, (HeapWord*)new_obj, new_obj_size);
fujie@134 184 #ifdef MIPS64
fujie@134 185 if (Use3A2000) OrderAccess::fence();
fujie@134 186 #endif
aoqi@0 187
aoqi@0 188 // Now we have to CAS in the header.
aoqi@0 189 if (o->cas_forward_to(new_obj, test_mark)) {
aoqi@0 190 // We won any races, we "own" this object.
aoqi@0 191 assert(new_obj == o->forwardee(), "Sanity");
aoqi@0 192
aoqi@0 193 // Increment age if obj still in new generation. Now that
aoqi@0 194 // we're dealing with a markOop that cannot change, it is
aoqi@0 195 // okay to use the non mt safe oop methods.
aoqi@0 196 if (!new_obj_is_tenured) {
aoqi@0 197 new_obj->incr_age();
aoqi@0 198 assert(young_space()->contains(new_obj), "Attempt to push non-promoted obj");
aoqi@0 199 }
aoqi@0 200
aoqi@0 201 // Do the size comparison first with new_obj_size, which we
aoqi@0 202 // already have. Hopefully, only a few objects are larger than
aoqi@0 203 // _min_array_size_for_chunking, and most of them will be arrays.
aoqi@0 204 // So, the is->objArray() test would be very infrequent.
aoqi@0 205 if (new_obj_size > _min_array_size_for_chunking &&
aoqi@0 206 new_obj->is_objArray() &&
aoqi@0 207 PSChunkLargeArrays) {
aoqi@0 208 // we'll chunk it
aoqi@0 209 oop* const masked_o = mask_chunked_array_oop(o);
aoqi@0 210 push_depth(masked_o);
aoqi@0 211 TASKQUEUE_STATS_ONLY(++_arrays_chunked; ++_masked_pushes);
aoqi@0 212 } else {
aoqi@0 213 // we'll just push its contents
aoqi@0 214 new_obj->push_contents(this);
aoqi@0 215 }
aoqi@0 216 } else {
aoqi@0 217 // We lost, someone else "owns" this object
aoqi@0 218 guarantee(o->is_forwarded(), "Object must be forwarded if the cas failed.");
aoqi@0 219
aoqi@0 220 // Try to deallocate the space. If it was directly allocated we cannot
aoqi@0 221 // deallocate it, so we have to test. If the deallocation fails,
aoqi@0 222 // overwrite with a filler object.
aoqi@0 223 if (new_obj_is_tenured) {
aoqi@25 224 if (!_old_lab.unallocate_object((HeapWord*) new_obj, new_obj_size)) {
aoqi@25 225 CollectedHeap::fill_with_object((HeapWord*) new_obj, new_obj_size);
aoqi@0 226 }
aoqi@0 227 } else if (!_young_lab.unallocate_object((HeapWord*) new_obj, new_obj_size)) {
aoqi@0 228 CollectedHeap::fill_with_object((HeapWord*) new_obj, new_obj_size);
aoqi@0 229 }
aoqi@0 230
aoqi@0 231 // don't update this before the unallocation!
aoqi@0 232 new_obj = o->forwardee();
aoqi@0 233 }
fujie@134 234
fujie@134 235 #ifdef MIPS64
fujie@134 236 if (Use3A2000) OrderAccess::fence();
fujie@134 237 #endif
aoqi@0 238 } else {
aoqi@0 239 assert(o->is_forwarded(), "Sanity");
aoqi@0 240 new_obj = o->forwardee();
aoqi@0 241 }
aoqi@0 242
aoqi@0 243 #ifndef PRODUCT
aoqi@0 244 // This code must come after the CAS test, or it will print incorrect
aoqi@0 245 // information.
aoqi@0 246 if (TraceScavenge) {
aoqi@0 247 gclog_or_tty->print_cr("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (%d)}",
aoqi@0 248 PSScavenge::should_scavenge(&new_obj) ? "copying" : "tenuring",
aoqi@0 249 new_obj->klass()->internal_name(), p2i((void *)o), p2i((void *)new_obj), new_obj->size());
aoqi@0 250 }
aoqi@0 251 #endif
aoqi@0 252
aoqi@0 253 return new_obj;
aoqi@0 254 }
aoqi@0 255
aoqi@25 256
aoqi@0 257 inline void PSPromotionManager::process_popped_location_depth(StarTask p) {
aoqi@0 258 if (is_oop_masked(p)) {
aoqi@0 259 assert(PSChunkLargeArrays, "invariant");
aoqi@0 260 oop const old = unmask_chunked_array_oop(p);
aoqi@0 261 process_array_chunk(old);
aoqi@0 262 } else {
aoqi@0 263 if (p.is_narrow()) {
aoqi@0 264 assert(UseCompressedOops, "Error");
aoqi@0 265 PSScavenge::copy_and_push_safe_barrier<narrowOop, /*promote_immediately=*/false>(this, p);
aoqi@0 266 } else {
aoqi@0 267 PSScavenge::copy_and_push_safe_barrier<oop, /*promote_immediately=*/false>(this, p);
aoqi@0 268 }
aoqi@0 269 }
aoqi@0 270 }
aoqi@0 271
aoqi@0 272 #if TASKQUEUE_STATS
aoqi@0 273 void PSPromotionManager::record_steal(StarTask& p) {
aoqi@0 274 if (is_oop_masked(p)) {
aoqi@0 275 ++_masked_steals;
aoqi@0 276 }
aoqi@0 277 }
aoqi@0 278 #endif // TASKQUEUE_STATS
aoqi@0 279
aoqi@0 280 #endif // SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPROMOTIONMANAGER_INLINE_HPP

mercurial