src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp

Fri, 29 Apr 2016 00:06:10 +0800

author
aoqi
date
Fri, 29 Apr 2016 00:06:10 +0800
changeset 1
2d8a650513c2
parent 0
f90c822e73f8
child 25
873fd82b133d
permissions
-rw-r--r--

Added MIPS 64-bit port.

aoqi@0 1 /*
aoqi@0 2 * Copyright (c) 2002, 2014, Oracle and/or its affiliates. All rights reserved.
aoqi@0 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
aoqi@0 4 *
aoqi@0 5 * This code is free software; you can redistribute it and/or modify it
aoqi@0 6 * under the terms of the GNU General Public License version 2 only, as
aoqi@0 7 * published by the Free Software Foundation.
aoqi@0 8 *
aoqi@0 9 * This code is distributed in the hope that it will be useful, but WITHOUT
aoqi@0 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
aoqi@0 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
aoqi@0 12 * version 2 for more details (a copy is included in the LICENSE file that
aoqi@0 13 * accompanied this code).
aoqi@0 14 *
aoqi@0 15 * You should have received a copy of the GNU General Public License version
aoqi@0 16 * 2 along with this work; if not, write to the Free Software Foundation,
aoqi@0 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
aoqi@0 18 *
aoqi@0 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
aoqi@0 20 * or visit www.oracle.com if you need additional information or have any
aoqi@0 21 * questions.
aoqi@0 22 *
aoqi@0 23 */
aoqi@0 24
aoqi@1 25 /*
aoqi@1 26 * This file has been modified by Loongson Technology in 2015. These
aoqi@1 27 * modifications are Copyright (c) 2015 Loongson Technology, and are made
aoqi@1 28 * available on the same license terms set forth above.
aoqi@1 29 */
aoqi@1 30
aoqi@0 31 #ifndef SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPROMOTIONMANAGER_INLINE_HPP
aoqi@0 32 #define SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPROMOTIONMANAGER_INLINE_HPP
aoqi@0 33
aoqi@0 34 #include "gc_implementation/parallelScavenge/psOldGen.hpp"
aoqi@0 35 #include "gc_implementation/parallelScavenge/psPromotionManager.hpp"
aoqi@0 36 #include "gc_implementation/parallelScavenge/psScavenge.hpp"
aoqi@1 37 #include "gc_implementation/shared/mutableNUMASpace.hpp"
aoqi@0 38 #include "oops/oop.psgc.inline.hpp"
aoqi@0 39
aoqi@0 40 inline PSPromotionManager* PSPromotionManager::manager_array(int index) {
aoqi@0 41 assert(_manager_array != NULL, "access of NULL manager_array");
aoqi@0 42 assert(index >= 0 && index <= (int)ParallelGCThreads, "out of range manager_array access");
aoqi@0 43 return &_manager_array[index];
aoqi@0 44 }
aoqi@0 45
aoqi@0 46 template <class T>
aoqi@0 47 inline void PSPromotionManager::claim_or_forward_internal_depth(T* p) {
aoqi@0 48 if (p != NULL) { // XXX: error if p != NULL here
aoqi@0 49 oop o = oopDesc::load_decode_heap_oop_not_null(p);
aoqi@0 50 if (o->is_forwarded()) {
aoqi@0 51 o = o->forwardee();
aoqi@0 52 // Card mark
aoqi@0 53 if (PSScavenge::is_obj_in_young(o)) {
aoqi@0 54 PSScavenge::card_table()->inline_write_ref_field_gc(p, o);
aoqi@0 55 }
aoqi@0 56 oopDesc::encode_store_heap_oop_not_null(p, o);
aoqi@0 57 } else {
aoqi@0 58 push_depth(p);
aoqi@0 59 }
aoqi@0 60 }
aoqi@0 61 }
aoqi@0 62
aoqi@0 63 template <class T>
aoqi@0 64 inline void PSPromotionManager::claim_or_forward_depth(T* p) {
aoqi@0 65 assert(PSScavenge::should_scavenge(p, true), "revisiting object?");
aoqi@0 66 assert(Universe::heap()->kind() == CollectedHeap::ParallelScavengeHeap,
aoqi@0 67 "Sanity");
aoqi@0 68 assert(Universe::heap()->is_in(p), "pointer outside heap");
aoqi@0 69
aoqi@0 70 claim_or_forward_internal_depth(p);
aoqi@0 71 }
aoqi@0 72
aoqi@0 73 //
aoqi@0 74 // This method is pretty bulky. It would be nice to split it up
aoqi@0 75 // into smaller submethods, but we need to be careful not to hurt
aoqi@0 76 // performance.
aoqi@0 77 //
aoqi@1 78
aoqi@1 79 extern int node_ex;
aoqi@1 80 extern int each_gc_copy_fre[16];
aoqi@1 81 extern float each_gc_copy_time[16];
aoqi@1 82
aoqi@0 83 template<bool promote_immediately>
aoqi@0 84 oop PSPromotionManager::copy_to_survivor_space(oop o) {
aoqi@0 85 assert(PSScavenge::should_scavenge(&o), "Sanity");
aoqi@0 86
aoqi@0 87 oop new_obj = NULL;
aoqi@0 88
aoqi@0 89 // NOTE! We must be very careful with any methods that access the mark
aoqi@0 90 // in o. There may be multiple threads racing on it, and it may be forwarded
aoqi@0 91 // at any time. Do not use oop methods for accessing the mark!
aoqi@0 92 markOop test_mark = o->mark();
aoqi@0 93
aoqi@0 94 // The same test as "o->is_forwarded()"
aoqi@0 95 if (!test_mark->is_marked()) {
aoqi@0 96 bool new_obj_is_tenured = false;
aoqi@0 97 size_t new_obj_size = o->size();
aoqi@1 98
aoqi@1 99 if(UseStasticScavenge) {
aoqi@1 100 stastic_scavenge(o);
aoqi@1 101 }
aoqi@0 102
aoqi@0 103 if (!promote_immediately) {
aoqi@0 104 // Find the objects age, MT safe.
aoqi@0 105 uint age = (test_mark->has_displaced_mark_helper() /* o->has_displaced_mark() */) ?
aoqi@0 106 test_mark->displaced_mark_helper()->age() : test_mark->age();
aoqi@0 107
aoqi@0 108 // Try allocating obj in to-space (unless too old)
aoqi@0 109 if (age < PSScavenge::tenuring_threshold()) {
aoqi@0 110 new_obj = (oop) _young_lab.allocate(new_obj_size);
aoqi@0 111 if (new_obj == NULL && !_young_gen_is_full) {
aoqi@0 112 // Do we allocate directly, or flush and refill?
aoqi@0 113 if (new_obj_size > (YoungPLABSize / 2)) {
aoqi@0 114 // Allocate this object directly
aoqi@0 115 new_obj = (oop)young_space()->cas_allocate(new_obj_size);
aoqi@0 116 } else {
aoqi@0 117 // Flush and fill
aoqi@0 118 _young_lab.flush();
aoqi@0 119
aoqi@0 120 HeapWord* lab_base = young_space()->cas_allocate(YoungPLABSize);
aoqi@0 121 if (lab_base != NULL) {
aoqi@0 122 _young_lab.initialize(MemRegion(lab_base, YoungPLABSize));
aoqi@0 123 // Try the young lab allocation again.
aoqi@0 124 new_obj = (oop) _young_lab.allocate(new_obj_size);
aoqi@0 125 } else {
aoqi@0 126 _young_gen_is_full = true;
aoqi@0 127 }
aoqi@0 128 }
aoqi@0 129 }
aoqi@0 130 }
aoqi@0 131 }
aoqi@0 132
aoqi@0 133 // Otherwise try allocating obj tenured
aoqi@0 134 if (new_obj == NULL) {
aoqi@0 135 #ifndef PRODUCT
aoqi@0 136 if (Universe::heap()->promotion_should_fail()) {
aoqi@0 137 return oop_promotion_failed(o, test_mark);
aoqi@0 138 }
aoqi@0 139 #endif // #ifndef PRODUCT
aoqi@0 140
aoqi@1 141 if(UseOldNUMA) {
aoqi@1 142 /* 2014/7/7 Liao: Copy objects to the same node of current GC thread */
aoqi@1 143 if(UseNUMAGC) {
aoqi@1 144 new_obj = (oop) _old_lab_oldnuma[os::numa_get_group_id()].allocate(new_obj_size);
aoqi@1 145 new_obj_is_tenured = true;
aoqi@0 146
aoqi@1 147 if (new_obj == NULL) {
aoqi@1 148 if (!_old_gen_is_full) {
aoqi@1 149 // Do we allocate directly, or flush and refill?
aoqi@1 150 if (new_obj_size > (OldPLABSize / 2)) {
aoqi@1 151 // Allocate this object directly
aoqi@1 152 new_obj = (oop)old_gen()->cas_allocate(new_obj_size, os::numa_get_group_id());
aoqi@1 153 } else {
aoqi@1 154 // Flush and fill
aoqi@1 155 _old_lab_oldnuma[os::numa_get_group_id()].flush();
aoqi@0 156
aoqi@1 157 HeapWord* lab_base = old_gen()->cas_allocate(OldPLABSize, os::numa_get_group_id());
aoqi@1 158 if(lab_base != NULL) {
aoqi@1 159 _old_lab_oldnuma[os::numa_get_group_id()].initialize(MemRegion(lab_base, OldPLABSize));
aoqi@1 160 // Try the old lab allocation again.
aoqi@1 161 new_obj = (oop) _old_lab_oldnuma[os::numa_get_group_id()].allocate(new_obj_size);
aoqi@1 162 }
aoqi@0 163 }
aoqi@1 164 }
aoqi@1 165
aoqi@1 166 // This is the promotion failed test, and code handling.
aoqi@1 167 // The code belongs here for two reasons. It is slightly
aoqi@1 168 // different than the code below, and cannot share the
aoqi@1 169 // CAS testing code. Keeping the code here also minimizes
aoqi@1 170 // the impact on the common case fast path code.
aoqi@1 171
aoqi@1 172 if (new_obj == NULL) {
aoqi@1 173 _old_gen_is_full = true;
aoqi@1 174 return oop_promotion_failed(o, test_mark);
aoqi@0 175 }
aoqi@0 176 }
aoqi@0 177 }
aoqi@1 178 else {
aoqi@1 179 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
aoqi@1 180 MutableNUMASpace* s = (MutableNUMASpace*) heap->old_gen()->object_space();
aoqi@1 181 int i = s->lgrp_spaces()->length();
aoqi@1 182 int node;
aoqi@1 183 if(i > 1) {
aoqi@1 184 node = node_ex % (i - 1) + 1;
aoqi@1 185 node_ex++;
aoqi@1 186 }
aoqi@1 187 else
aoqi@1 188 node = 0;
aoqi@0 189
aoqi@1 190 new_obj = (oop) _old_lab_oldnuma[node].allocate(new_obj_size);
aoqi@1 191 new_obj_is_tenured = true;
aoqi@1 192
aoqi@1 193 if (new_obj == NULL) {
aoqi@1 194 if (!_old_gen_is_full) {
aoqi@1 195 // Do we allocate directly, or flush and refill?
aoqi@1 196 if (new_obj_size > (OldPLABSize / 2)) {
aoqi@1 197 // Allocate this object directly
aoqi@1 198 new_obj = (oop)old_gen()->cas_allocate(new_obj_size, node);
aoqi@1 199 } else {
aoqi@1 200 // Flush and fill
aoqi@1 201 _old_lab_oldnuma[node].flush();
aoqi@1 202
aoqi@1 203 HeapWord* lab_base = old_gen()->cas_allocate(OldPLABSize, node);
aoqi@1 204 if(lab_base != NULL) {
aoqi@1 205 _old_lab_oldnuma[node].initialize(MemRegion(lab_base, OldPLABSize));
aoqi@1 206 // Try the old lab allocation again.
aoqi@1 207 new_obj = (oop) _old_lab_oldnuma[node].allocate(new_obj_size);
aoqi@1 208 }
aoqi@1 209 }
aoqi@1 210 }
aoqi@1 211
aoqi@1 212 // This is the promotion failed test, and code handling.
aoqi@1 213 // The code belongs here for two reasons. It is slightly
aoqi@1 214 // different than the code below, and cannot share the
aoqi@1 215 // CAS testing code. Keeping the code here also minimizes
aoqi@1 216 // the impact on the common case fast path code.
aoqi@1 217
aoqi@1 218 if (new_obj == NULL) {
aoqi@1 219 _old_gen_is_full = true;
aoqi@1 220 return oop_promotion_failed(o, test_mark);
aoqi@1 221 }
aoqi@1 222 }
aoqi@1 223 }
aoqi@1 224 }
aoqi@1 225 else {
aoqi@1 226 new_obj = (oop) _old_lab.allocate(new_obj_size);
aoqi@1 227 new_obj_is_tenured = true;
aoqi@0 228
aoqi@0 229 if (new_obj == NULL) {
aoqi@1 230 if (!_old_gen_is_full) {
aoqi@1 231 // Do we allocate directly, or flush and refill?
aoqi@1 232 if (new_obj_size > (OldPLABSize / 2)) {
aoqi@1 233 // Allocate this object directly
aoqi@1 234 new_obj = (oop)old_gen()->cas_allocate(new_obj_size, 0);
aoqi@1 235 } else {
aoqi@1 236 // Flush and fill
aoqi@1 237 _old_lab.flush();
aoqi@1 238
aoqi@1 239 HeapWord* lab_base = old_gen()->cas_allocate(OldPLABSize, 0);
aoqi@1 240 if(lab_base != NULL) {
aoqi@1 241 #ifdef ASSERT
aoqi@1 242 // Delay the initialization of the promotion lab (plab).
aoqi@1 243 // This exposes uninitialized plabs to card table processing.
aoqi@1 244 if (GCWorkerDelayMillis > 0) {
aoqi@1 245 os::sleep(Thread::current(), GCWorkerDelayMillis, false);
aoqi@1 246 }
aoqi@1 247 #endif
aoqi@1 248 _old_lab.initialize(MemRegion(lab_base, OldPLABSize));
aoqi@1 249 // Try the old lab allocation again.
aoqi@1 250 new_obj = (oop) _old_lab.allocate(new_obj_size);
aoqi@1 251 }
aoqi@1 252 }
aoqi@1 253 }
aoqi@1 254
aoqi@1 255 // This is the promotion failed test, and code handling.
aoqi@1 256 // The code belongs here for two reasons. It is slightly
aoqi@1 257 // different than the code below, and cannot share the
aoqi@1 258 // CAS testing code. Keeping the code here also minimizes
aoqi@1 259 // the impact on the common case fast path code.
aoqi@1 260
aoqi@1 261 if (new_obj == NULL) {
aoqi@1 262 _old_gen_is_full = true;
aoqi@1 263 return oop_promotion_failed(o, test_mark);
aoqi@1 264 }
aoqi@0 265 }
aoqi@0 266 }
aoqi@0 267 }
aoqi@0 268
aoqi@0 269 assert(new_obj != NULL, "allocation should have succeeded");
aoqi@0 270
aoqi@1 271 TimeStamp before_copy, after_copy;
aoqi@1 272
aoqi@1 273 if(UseStasticCopy) {
aoqi@1 274 before_copy.update();
aoqi@1 275 }
aoqi@1 276
aoqi@0 277 // Copy obj
aoqi@0 278 Copy::aligned_disjoint_words((HeapWord*)o, (HeapWord*)new_obj, new_obj_size);
aoqi@0 279
aoqi@1 280 if(UseStasticCopy) {
aoqi@1 281 after_copy.update();
aoqi@1 282 }
aoqi@1 283
aoqi@1 284 if(UseStasticCopy) {
aoqi@1 285 each_gc_copy_time[os::numa_get_cpu_id()] += after_copy.ticks() - before_copy.ticks();
aoqi@1 286 each_gc_copy_fre[os::numa_get_cpu_id()]++;
aoqi@1 287 }
aoqi@1 288
aoqi@0 289 // Now we have to CAS in the header.
aoqi@0 290 if (o->cas_forward_to(new_obj, test_mark)) {
aoqi@0 291 // We won any races, we "own" this object.
aoqi@0 292 assert(new_obj == o->forwardee(), "Sanity");
aoqi@0 293
aoqi@0 294 // Increment age if obj still in new generation. Now that
aoqi@0 295 // we're dealing with a markOop that cannot change, it is
aoqi@0 296 // okay to use the non mt safe oop methods.
aoqi@0 297 if (!new_obj_is_tenured) {
aoqi@0 298 new_obj->incr_age();
aoqi@0 299 assert(young_space()->contains(new_obj), "Attempt to push non-promoted obj");
aoqi@0 300 }
aoqi@0 301
aoqi@0 302 // Do the size comparison first with new_obj_size, which we
aoqi@0 303 // already have. Hopefully, only a few objects are larger than
aoqi@0 304 // _min_array_size_for_chunking, and most of them will be arrays.
aoqi@0 305 // So, the is->objArray() test would be very infrequent.
aoqi@0 306 if (new_obj_size > _min_array_size_for_chunking &&
aoqi@0 307 new_obj->is_objArray() &&
aoqi@0 308 PSChunkLargeArrays) {
aoqi@0 309 // we'll chunk it
aoqi@0 310 oop* const masked_o = mask_chunked_array_oop(o);
aoqi@0 311 push_depth(masked_o);
aoqi@0 312 TASKQUEUE_STATS_ONLY(++_arrays_chunked; ++_masked_pushes);
aoqi@0 313 } else {
aoqi@0 314 // we'll just push its contents
aoqi@0 315 new_obj->push_contents(this);
aoqi@0 316 }
aoqi@0 317 } else {
aoqi@0 318 // We lost, someone else "owns" this object
aoqi@0 319 guarantee(o->is_forwarded(), "Object must be forwarded if the cas failed.");
aoqi@0 320
aoqi@0 321 // Try to deallocate the space. If it was directly allocated we cannot
aoqi@0 322 // deallocate it, so we have to test. If the deallocation fails,
aoqi@0 323 // overwrite with a filler object.
aoqi@0 324 if (new_obj_is_tenured) {
aoqi@1 325 if(UseOldNUMA) {
aoqi@1 326 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
aoqi@1 327 MutableNUMASpace* s = (MutableNUMASpace*) heap->old_gen()->object_space();
aoqi@1 328 int i;
aoqi@1 329 for(i = 0; i < s->lgrp_spaces()->length(); i++) {
aoqi@1 330 if (!_old_lab_oldnuma[i].unallocate_object((HeapWord*) new_obj, new_obj_size)) {
aoqi@1 331 CollectedHeap::fill_with_object((HeapWord*) new_obj, new_obj_size);
aoqi@1 332 }
aoqi@1 333 }
aoqi@1 334 }
aoqi@1 335 else {
aoqi@1 336 if (!_old_lab.unallocate_object((HeapWord*) new_obj, new_obj_size)) {
aoqi@1 337 CollectedHeap::fill_with_object((HeapWord*) new_obj, new_obj_size);
aoqi@1 338 }
aoqi@0 339 }
aoqi@0 340 } else if (!_young_lab.unallocate_object((HeapWord*) new_obj, new_obj_size)) {
aoqi@0 341 CollectedHeap::fill_with_object((HeapWord*) new_obj, new_obj_size);
aoqi@0 342 }
aoqi@0 343
aoqi@0 344 // don't update this before the unallocation!
aoqi@0 345 new_obj = o->forwardee();
aoqi@0 346 }
aoqi@0 347 } else {
aoqi@0 348 assert(o->is_forwarded(), "Sanity");
aoqi@0 349 new_obj = o->forwardee();
aoqi@0 350 }
aoqi@0 351
aoqi@0 352 #ifndef PRODUCT
aoqi@0 353 // This code must come after the CAS test, or it will print incorrect
aoqi@0 354 // information.
aoqi@0 355 if (TraceScavenge) {
aoqi@0 356 gclog_or_tty->print_cr("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (%d)}",
aoqi@0 357 PSScavenge::should_scavenge(&new_obj) ? "copying" : "tenuring",
aoqi@0 358 new_obj->klass()->internal_name(), p2i((void *)o), p2i((void *)new_obj), new_obj->size());
aoqi@0 359 }
aoqi@0 360 #endif
aoqi@0 361
aoqi@0 362 return new_obj;
aoqi@0 363 }
aoqi@0 364
aoqi@0 365 inline void PSPromotionManager::process_popped_location_depth(StarTask p) {
aoqi@0 366 if (is_oop_masked(p)) {
aoqi@0 367 assert(PSChunkLargeArrays, "invariant");
aoqi@0 368 oop const old = unmask_chunked_array_oop(p);
aoqi@0 369 process_array_chunk(old);
aoqi@0 370 } else {
aoqi@0 371 if (p.is_narrow()) {
aoqi@0 372 assert(UseCompressedOops, "Error");
aoqi@0 373 PSScavenge::copy_and_push_safe_barrier<narrowOop, /*promote_immediately=*/false>(this, p);
aoqi@0 374 } else {
aoqi@0 375 PSScavenge::copy_and_push_safe_barrier<oop, /*promote_immediately=*/false>(this, p);
aoqi@0 376 }
aoqi@0 377 }
aoqi@0 378 }
aoqi@0 379
aoqi@0 380 #if TASKQUEUE_STATS
aoqi@0 381 void PSPromotionManager::record_steal(StarTask& p) {
aoqi@0 382 if (is_oop_masked(p)) {
aoqi@0 383 ++_masked_steals;
aoqi@0 384 }
aoqi@0 385 }
aoqi@0 386 #endif // TASKQUEUE_STATS
aoqi@0 387
aoqi@0 388 #endif // SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPROMOTIONMANAGER_INLINE_HPP

mercurial