src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp

Thu, 09 Apr 2015 15:58:49 +0200

author
mlarsson
date
Thu, 09 Apr 2015 15:58:49 +0200
changeset 7686
fb69749583e8
parent 6680
78bbf4d43a14
child 6876
710a3c8b516e
permissions
-rw-r--r--

8072621: Clean up around VM_GC_Operations
Reviewed-by: brutisso, jmasa

duke@435 1 /*
drchase@6680 2 * Copyright (c) 2002, 2014, Oracle and/or its affiliates. All rights reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
duke@435 22 *
duke@435 23 */
duke@435 24
stefank@2314 25 #include "precompiled.hpp"
stefank@2314 26 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
stefank@2314 27 #include "gc_implementation/parallelScavenge/psOldGen.hpp"
stefank@2314 28 #include "gc_implementation/parallelScavenge/psPromotionManager.inline.hpp"
stefank@2314 29 #include "gc_implementation/parallelScavenge/psScavenge.inline.hpp"
sla@5237 30 #include "gc_implementation/shared/gcTrace.hpp"
stefank@2314 31 #include "gc_implementation/shared/mutableSpace.hpp"
stefank@5515 32 #include "memory/allocation.inline.hpp"
stefank@2314 33 #include "memory/memRegion.hpp"
stefank@5515 34 #include "memory/padded.inline.hpp"
stefank@2314 35 #include "oops/oop.inline.hpp"
stefank@2314 36 #include "oops/oop.psgc.inline.hpp"
duke@435 37
drchase@6680 38 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
drchase@6680 39
stefank@5515 40 PaddedEnd<PSPromotionManager>* PSPromotionManager::_manager_array = NULL;
stefank@5515 41 OopStarTaskQueueSet* PSPromotionManager::_stack_array_depth = NULL;
stefank@5515 42 PSOldGen* PSPromotionManager::_old_gen = NULL;
stefank@5515 43 MutableSpace* PSPromotionManager::_young_space = NULL;
duke@435 44
duke@435 45 void PSPromotionManager::initialize() {
duke@435 46 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
duke@435 47 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
duke@435 48
duke@435 49 _old_gen = heap->old_gen();
duke@435 50 _young_space = heap->young_gen()->to_space();
duke@435 51
stefank@5515 52 // To prevent false sharing, we pad the PSPromotionManagers
stefank@5515 53 // and make sure that the first instance starts at a cache line.
duke@435 54 assert(_manager_array == NULL, "Attempt to initialize twice");
stefank@5515 55 _manager_array = PaddedArray<PSPromotionManager, mtGC>::create_unfreeable(ParallelGCThreads + 1);
duke@435 56 guarantee(_manager_array != NULL, "Could not initialize promotion manager");
duke@435 57
tonyp@2061 58 _stack_array_depth = new OopStarTaskQueueSet(ParallelGCThreads);
sla@5237 59 guarantee(_stack_array_depth != NULL, "Could not initialize promotion manager");
duke@435 60
duke@435 61 // Create and register the PSPromotionManager(s) for the worker threads.
duke@435 62 for(uint i=0; i<ParallelGCThreads; i++) {
stefank@5515 63 stack_array_depth()->register_queue(i, _manager_array[i].claimed_stack_depth());
duke@435 64 }
duke@435 65 // The VMThread gets its own PSPromotionManager, which is not available
duke@435 66 // for work stealing.
duke@435 67 }
duke@435 68
duke@435 69 PSPromotionManager* PSPromotionManager::gc_thread_promotion_manager(int index) {
duke@435 70 assert(index >= 0 && index < (int)ParallelGCThreads, "index out of range");
duke@435 71 assert(_manager_array != NULL, "Sanity");
stefank@5515 72 return &_manager_array[index];
duke@435 73 }
duke@435 74
duke@435 75 PSPromotionManager* PSPromotionManager::vm_thread_promotion_manager() {
duke@435 76 assert(_manager_array != NULL, "Sanity");
stefank@5515 77 return &_manager_array[ParallelGCThreads];
duke@435 78 }
duke@435 79
duke@435 80 void PSPromotionManager::pre_scavenge() {
duke@435 81 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
duke@435 82 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
duke@435 83
duke@435 84 _young_space = heap->young_gen()->to_space();
duke@435 85
duke@435 86 for(uint i=0; i<ParallelGCThreads+1; i++) {
duke@435 87 manager_array(i)->reset();
duke@435 88 }
duke@435 89 }
duke@435 90
sla@5237 91 bool PSPromotionManager::post_scavenge(YoungGCTracer& gc_tracer) {
sla@5237 92 bool promotion_failure_occurred = false;
sla@5237 93
jcoomes@2020 94 TASKQUEUE_STATS_ONLY(if (PrintGCDetails && ParallelGCVerbose) print_stats());
jcoomes@1993 95 for (uint i = 0; i < ParallelGCThreads + 1; i++) {
duke@435 96 PSPromotionManager* manager = manager_array(i);
tonyp@2061 97 assert(manager->claimed_stack_depth()->is_empty(), "should be empty");
sla@5237 98 if (manager->_promotion_failed_info.has_failed()) {
sla@5237 99 gc_tracer.report_promotion_failed(manager->_promotion_failed_info);
sla@5237 100 promotion_failure_occurred = true;
sla@5237 101 }
duke@435 102 manager->flush_labs();
duke@435 103 }
sla@5237 104 return promotion_failure_occurred;
duke@435 105 }
duke@435 106
jcoomes@2020 107 #if TASKQUEUE_STATS
duke@435 108 void
jcoomes@2020 109 PSPromotionManager::print_taskqueue_stats(uint i) const {
jcoomes@2020 110 tty->print("%3u ", i);
tonyp@2061 111 _claimed_stack_depth.stats.print();
jcoomes@2020 112 tty->cr();
duke@435 113 }
duke@435 114
duke@435 115 void
jcoomes@2020 116 PSPromotionManager::print_local_stats(uint i) const {
jcoomes@2020 117 #define FMT " " SIZE_FORMAT_W(10)
jcoomes@2020 118 tty->print_cr("%3u" FMT FMT FMT FMT, i, _masked_pushes, _masked_steals,
jcoomes@2020 119 _arrays_chunked, _array_chunks_processed);
jcoomes@2020 120 #undef FMT
jcoomes@2020 121 }
jcoomes@2020 122
jcoomes@2020 123 static const char* const pm_stats_hdr[] = {
jcoomes@2020 124 " --------masked------- arrays array",
jcoomes@2020 125 "thr push steal chunked chunks",
jcoomes@2020 126 "--- ---------- ---------- ---------- ----------"
jcoomes@2020 127 };
jcoomes@2020 128
jcoomes@2020 129 void
duke@435 130 PSPromotionManager::print_stats() {
tonyp@2061 131 tty->print_cr("== GC Tasks Stats, GC %3d",
duke@435 132 Universe::heap()->total_collections());
duke@435 133
jcoomes@2020 134 tty->print("thr "); TaskQueueStats::print_header(1); tty->cr();
jcoomes@2020 135 tty->print("--- "); TaskQueueStats::print_header(2); tty->cr();
jcoomes@2020 136 for (uint i = 0; i < ParallelGCThreads + 1; ++i) {
jcoomes@2020 137 manager_array(i)->print_taskqueue_stats(i);
jcoomes@2020 138 }
jcoomes@2020 139
jcoomes@2020 140 const uint hlines = sizeof(pm_stats_hdr) / sizeof(pm_stats_hdr[0]);
drchase@6680 141 for (uint i = 0; i < hlines; ++i) tty->print_cr("%s", pm_stats_hdr[i]);
jcoomes@2020 142 for (uint i = 0; i < ParallelGCThreads + 1; ++i) {
jcoomes@2020 143 manager_array(i)->print_local_stats(i);
duke@435 144 }
duke@435 145 }
duke@435 146
jcoomes@2020 147 void
jcoomes@2020 148 PSPromotionManager::reset_stats() {
tonyp@2061 149 claimed_stack_depth()->stats.reset();
jcoomes@2020 150 _masked_pushes = _masked_steals = 0;
jcoomes@2020 151 _arrays_chunked = _array_chunks_processed = 0;
jcoomes@2020 152 }
jcoomes@2020 153 #endif // TASKQUEUE_STATS
duke@435 154
duke@435 155 PSPromotionManager::PSPromotionManager() {
duke@435 156 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
duke@435 157 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
duke@435 158
duke@435 159 // We set the old lab's start array.
duke@435 160 _old_lab.set_start_array(old_gen()->start_array());
duke@435 161
duke@435 162 uint queue_size;
tonyp@2061 163 claimed_stack_depth()->initialize();
tonyp@2061 164 queue_size = claimed_stack_depth()->max_elems();
duke@435 165
duke@435 166 _totally_drain = (ParallelGCThreads == 1) || (GCDrainStackTargetSize == 0);
duke@435 167 if (_totally_drain) {
duke@435 168 _target_stack_size = 0;
duke@435 169 } else {
duke@435 170 // don't let the target stack size to be more than 1/4 of the entries
duke@435 171 _target_stack_size = (uint) MIN2((uint) GCDrainStackTargetSize,
duke@435 172 (uint) (queue_size / 4));
duke@435 173 }
duke@435 174
duke@435 175 _array_chunk_size = ParGCArrayScanChunk;
duke@435 176 // let's choose 1.5x the chunk size
duke@435 177 _min_array_size_for_chunking = 3 * _array_chunk_size / 2;
duke@435 178
duke@435 179 reset();
duke@435 180 }
duke@435 181
duke@435 182 void PSPromotionManager::reset() {
jcoomes@1993 183 assert(stacks_empty(), "reset of non-empty stack");
duke@435 184
duke@435 185 // We need to get an assert in here to make sure the labs are always flushed.
duke@435 186
duke@435 187 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
duke@435 188 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
duke@435 189
duke@435 190 // Do not prefill the LAB's, save heap wastage!
duke@435 191 HeapWord* lab_base = young_space()->top();
duke@435 192 _young_lab.initialize(MemRegion(lab_base, (size_t)0));
duke@435 193 _young_gen_is_full = false;
duke@435 194
duke@435 195 lab_base = old_gen()->object_space()->top();
duke@435 196 _old_lab.initialize(MemRegion(lab_base, (size_t)0));
duke@435 197 _old_gen_is_full = false;
duke@435 198
sla@5237 199 _promotion_failed_info.reset();
sla@5237 200
jcoomes@2020 201 TASKQUEUE_STATS_ONLY(reset_stats());
duke@435 202 }
duke@435 203
coleenp@548 204
duke@435 205 void PSPromotionManager::drain_stacks_depth(bool totally_drain) {
duke@435 206 totally_drain = totally_drain || _totally_drain;
duke@435 207
duke@435 208 #ifdef ASSERT
duke@435 209 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
duke@435 210 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
duke@435 211 MutableSpace* to_space = heap->young_gen()->to_space();
duke@435 212 MutableSpace* old_space = heap->old_gen()->object_space();
duke@435 213 #endif /* ASSERT */
duke@435 214
jcoomes@1993 215 OopStarTaskQueue* const tq = claimed_stack_depth();
duke@435 216 do {
coleenp@548 217 StarTask p;
duke@435 218
duke@435 219 // Drain overflow stack first, so other threads can steal from
duke@435 220 // claimed stack while we work.
jcoomes@1993 221 while (tq->pop_overflow(p)) {
jcoomes@1993 222 process_popped_location_depth(p);
duke@435 223 }
duke@435 224
duke@435 225 if (totally_drain) {
jcoomes@1993 226 while (tq->pop_local(p)) {
duke@435 227 process_popped_location_depth(p);
duke@435 228 }
duke@435 229 } else {
jcoomes@1993 230 while (tq->size() > _target_stack_size && tq->pop_local(p)) {
duke@435 231 process_popped_location_depth(p);
duke@435 232 }
duke@435 233 }
jcoomes@1993 234 } while (totally_drain && !tq->taskqueue_empty() || !tq->overflow_empty());
duke@435 235
jcoomes@1993 236 assert(!totally_drain || tq->taskqueue_empty(), "Sanity");
jcoomes@1993 237 assert(totally_drain || tq->size() <= _target_stack_size, "Sanity");
jcoomes@1993 238 assert(tq->overflow_empty(), "Sanity");
duke@435 239 }
duke@435 240
duke@435 241 void PSPromotionManager::flush_labs() {
jcoomes@1993 242 assert(stacks_empty(), "Attempt to flush lab with live stack");
duke@435 243
duke@435 244 // If either promotion lab fills up, we can flush the
duke@435 245 // lab but not refill it, so check first.
duke@435 246 assert(!_young_lab.is_flushed() || _young_gen_is_full, "Sanity");
duke@435 247 if (!_young_lab.is_flushed())
duke@435 248 _young_lab.flush();
duke@435 249
duke@435 250 assert(!_old_lab.is_flushed() || _old_gen_is_full, "Sanity");
duke@435 251 if (!_old_lab.is_flushed())
duke@435 252 _old_lab.flush();
duke@435 253
duke@435 254 // Let PSScavenge know if we overflowed
duke@435 255 if (_young_gen_is_full) {
duke@435 256 PSScavenge::set_survivor_overflow(true);
duke@435 257 }
duke@435 258 }
duke@435 259
coleenp@548 260 template <class T> void PSPromotionManager::process_array_chunk_work(
coleenp@548 261 oop obj,
coleenp@548 262 int start, int end) {
jwilhelm@2648 263 assert(start <= end, "invariant");
coleenp@548 264 T* const base = (T*)objArrayOop(obj)->base();
coleenp@548 265 T* p = base + start;
coleenp@548 266 T* const chunk_end = base + end;
coleenp@548 267 while (p < chunk_end) {
coleenp@548 268 if (PSScavenge::should_scavenge(p)) {
coleenp@548 269 claim_or_forward_depth(p);
coleenp@548 270 }
coleenp@548 271 ++p;
coleenp@548 272 }
coleenp@548 273 }
coleenp@548 274
duke@435 275 void PSPromotionManager::process_array_chunk(oop old) {
duke@435 276 assert(PSChunkLargeArrays, "invariant");
duke@435 277 assert(old->is_objArray(), "invariant");
duke@435 278 assert(old->is_forwarded(), "invariant");
duke@435 279
jcoomes@2020 280 TASKQUEUE_STATS_ONLY(++_array_chunks_processed);
duke@435 281
duke@435 282 oop const obj = old->forwardee();
duke@435 283
duke@435 284 int start;
duke@435 285 int const end = arrayOop(old)->length();
duke@435 286 if (end > (int) _min_array_size_for_chunking) {
duke@435 287 // we'll chunk more
duke@435 288 start = end - _array_chunk_size;
duke@435 289 assert(start > 0, "invariant");
duke@435 290 arrayOop(old)->set_length(start);
duke@435 291 push_depth(mask_chunked_array_oop(old));
jcoomes@2020 292 TASKQUEUE_STATS_ONLY(++_masked_pushes);
duke@435 293 } else {
duke@435 294 // this is the final chunk for this array
duke@435 295 start = 0;
duke@435 296 int const actual_length = arrayOop(obj)->length();
duke@435 297 arrayOop(old)->set_length(actual_length);
duke@435 298 }
duke@435 299
coleenp@548 300 if (UseCompressedOops) {
coleenp@548 301 process_array_chunk_work<narrowOop>(obj, start, end);
coleenp@548 302 } else {
coleenp@548 303 process_array_chunk_work<oop>(obj, start, end);
duke@435 304 }
duke@435 305 }
duke@435 306
duke@435 307 oop PSPromotionManager::oop_promotion_failed(oop obj, markOop obj_mark) {
duke@435 308 assert(_old_gen_is_full || PromotionFailureALot, "Sanity");
duke@435 309
duke@435 310 // Attempt to CAS in the header.
duke@435 311 // This tests if the header is still the same as when
duke@435 312 // this started. If it is the same (i.e., no forwarding
duke@435 313 // pointer has been installed), then this thread owns
duke@435 314 // it.
duke@435 315 if (obj->cas_forward_to(obj, obj_mark)) {
duke@435 316 // We won any races, we "own" this object.
duke@435 317 assert(obj == obj->forwardee(), "Sanity");
duke@435 318
sla@5237 319 _promotion_failed_info.register_copy_failure(obj->size());
sla@5237 320
tonyp@2061 321 obj->push_contents(this);
duke@435 322
duke@435 323 // Save the mark if needed
duke@435 324 PSScavenge::oop_promotion_failed(obj, obj_mark);
duke@435 325 } else {
duke@435 326 // We lost, someone else "owns" this object
duke@435 327 guarantee(obj->is_forwarded(), "Object must be forwarded if the cas failed.");
duke@435 328
duke@435 329 // No unallocation to worry about.
duke@435 330 obj = obj->forwardee();
duke@435 331 }
duke@435 332
coleenp@4037 333 #ifndef PRODUCT
duke@435 334 if (TraceScavenge) {
duke@435 335 gclog_or_tty->print_cr("{%s %s 0x%x (%d)}",
duke@435 336 "promotion-failure",
coleenp@4037 337 obj->klass()->internal_name(),
hseigel@5784 338 (void *)obj, obj->size());
duke@435 339
duke@435 340 }
duke@435 341 #endif
duke@435 342
duke@435 343 return obj;
duke@435 344 }

mercurial