Sat, 23 Nov 2013 12:25:13 +0100
8028128: Add a type safe alternative for working with counter based data
Reviewed-by: dholmes, egahlin
duke@435 | 1 | /* |
sla@5237 | 2 | * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved. |
duke@435 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
duke@435 | 4 | * |
duke@435 | 5 | * This code is free software; you can redistribute it and/or modify it |
duke@435 | 6 | * under the terms of the GNU General Public License version 2 only, as |
duke@435 | 7 | * published by the Free Software Foundation. |
duke@435 | 8 | * |
duke@435 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
duke@435 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
duke@435 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
duke@435 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
duke@435 | 13 | * accompanied this code). |
duke@435 | 14 | * |
duke@435 | 15 | * You should have received a copy of the GNU General Public License version |
duke@435 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
duke@435 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
duke@435 | 18 | * |
trims@1907 | 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
trims@1907 | 20 | * or visit www.oracle.com if you need additional information or have any |
trims@1907 | 21 | * questions. |
duke@435 | 22 | * |
duke@435 | 23 | */ |
duke@435 | 24 | |
stefank@2314 | 25 | #include "precompiled.hpp" |
stefank@2314 | 26 | #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp" |
stefank@2314 | 27 | #include "gc_implementation/parallelScavenge/psOldGen.hpp" |
stefank@2314 | 28 | #include "gc_implementation/parallelScavenge/psPromotionManager.inline.hpp" |
stefank@2314 | 29 | #include "gc_implementation/parallelScavenge/psScavenge.inline.hpp" |
sla@5237 | 30 | #include "gc_implementation/shared/gcTrace.hpp" |
stefank@2314 | 31 | #include "gc_implementation/shared/mutableSpace.hpp" |
stefank@5515 | 32 | #include "memory/allocation.inline.hpp" |
stefank@2314 | 33 | #include "memory/memRegion.hpp" |
stefank@5515 | 34 | #include "memory/padded.inline.hpp" |
stefank@2314 | 35 | #include "oops/oop.inline.hpp" |
stefank@2314 | 36 | #include "oops/oop.psgc.inline.hpp" |
duke@435 | 37 | |
stefank@5515 | 38 | PaddedEnd<PSPromotionManager>* PSPromotionManager::_manager_array = NULL; |
stefank@5515 | 39 | OopStarTaskQueueSet* PSPromotionManager::_stack_array_depth = NULL; |
stefank@5515 | 40 | PSOldGen* PSPromotionManager::_old_gen = NULL; |
stefank@5515 | 41 | MutableSpace* PSPromotionManager::_young_space = NULL; |
duke@435 | 42 | |
duke@435 | 43 | void PSPromotionManager::initialize() { |
duke@435 | 44 | ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); |
duke@435 | 45 | assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); |
duke@435 | 46 | |
duke@435 | 47 | _old_gen = heap->old_gen(); |
duke@435 | 48 | _young_space = heap->young_gen()->to_space(); |
duke@435 | 49 | |
stefank@5515 | 50 | // To prevent false sharing, we pad the PSPromotionManagers |
stefank@5515 | 51 | // and make sure that the first instance starts at a cache line. |
duke@435 | 52 | assert(_manager_array == NULL, "Attempt to initialize twice"); |
stefank@5515 | 53 | _manager_array = PaddedArray<PSPromotionManager, mtGC>::create_unfreeable(ParallelGCThreads + 1); |
duke@435 | 54 | guarantee(_manager_array != NULL, "Could not initialize promotion manager"); |
duke@435 | 55 | |
tonyp@2061 | 56 | _stack_array_depth = new OopStarTaskQueueSet(ParallelGCThreads); |
sla@5237 | 57 | guarantee(_stack_array_depth != NULL, "Could not initialize promotion manager"); |
duke@435 | 58 | |
duke@435 | 59 | // Create and register the PSPromotionManager(s) for the worker threads. |
duke@435 | 60 | for(uint i=0; i<ParallelGCThreads; i++) { |
stefank@5515 | 61 | stack_array_depth()->register_queue(i, _manager_array[i].claimed_stack_depth()); |
duke@435 | 62 | } |
duke@435 | 63 | // The VMThread gets its own PSPromotionManager, which is not available |
duke@435 | 64 | // for work stealing. |
duke@435 | 65 | } |
duke@435 | 66 | |
duke@435 | 67 | PSPromotionManager* PSPromotionManager::gc_thread_promotion_manager(int index) { |
duke@435 | 68 | assert(index >= 0 && index < (int)ParallelGCThreads, "index out of range"); |
duke@435 | 69 | assert(_manager_array != NULL, "Sanity"); |
stefank@5515 | 70 | return &_manager_array[index]; |
duke@435 | 71 | } |
duke@435 | 72 | |
duke@435 | 73 | PSPromotionManager* PSPromotionManager::vm_thread_promotion_manager() { |
duke@435 | 74 | assert(_manager_array != NULL, "Sanity"); |
stefank@5515 | 75 | return &_manager_array[ParallelGCThreads]; |
duke@435 | 76 | } |
duke@435 | 77 | |
duke@435 | 78 | void PSPromotionManager::pre_scavenge() { |
duke@435 | 79 | ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); |
duke@435 | 80 | assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); |
duke@435 | 81 | |
duke@435 | 82 | _young_space = heap->young_gen()->to_space(); |
duke@435 | 83 | |
duke@435 | 84 | for(uint i=0; i<ParallelGCThreads+1; i++) { |
duke@435 | 85 | manager_array(i)->reset(); |
duke@435 | 86 | } |
duke@435 | 87 | } |
duke@435 | 88 | |
sla@5237 | 89 | bool PSPromotionManager::post_scavenge(YoungGCTracer& gc_tracer) { |
sla@5237 | 90 | bool promotion_failure_occurred = false; |
sla@5237 | 91 | |
jcoomes@2020 | 92 | TASKQUEUE_STATS_ONLY(if (PrintGCDetails && ParallelGCVerbose) print_stats()); |
jcoomes@1993 | 93 | for (uint i = 0; i < ParallelGCThreads + 1; i++) { |
duke@435 | 94 | PSPromotionManager* manager = manager_array(i); |
tonyp@2061 | 95 | assert(manager->claimed_stack_depth()->is_empty(), "should be empty"); |
sla@5237 | 96 | if (manager->_promotion_failed_info.has_failed()) { |
sla@5237 | 97 | gc_tracer.report_promotion_failed(manager->_promotion_failed_info); |
sla@5237 | 98 | promotion_failure_occurred = true; |
sla@5237 | 99 | } |
duke@435 | 100 | manager->flush_labs(); |
duke@435 | 101 | } |
sla@5237 | 102 | return promotion_failure_occurred; |
duke@435 | 103 | } |
duke@435 | 104 | |
jcoomes@2020 | 105 | #if TASKQUEUE_STATS |
duke@435 | 106 | void |
jcoomes@2020 | 107 | PSPromotionManager::print_taskqueue_stats(uint i) const { |
jcoomes@2020 | 108 | tty->print("%3u ", i); |
tonyp@2061 | 109 | _claimed_stack_depth.stats.print(); |
jcoomes@2020 | 110 | tty->cr(); |
duke@435 | 111 | } |
duke@435 | 112 | |
duke@435 | 113 | void |
jcoomes@2020 | 114 | PSPromotionManager::print_local_stats(uint i) const { |
jcoomes@2020 | 115 | #define FMT " " SIZE_FORMAT_W(10) |
jcoomes@2020 | 116 | tty->print_cr("%3u" FMT FMT FMT FMT, i, _masked_pushes, _masked_steals, |
jcoomes@2020 | 117 | _arrays_chunked, _array_chunks_processed); |
jcoomes@2020 | 118 | #undef FMT |
jcoomes@2020 | 119 | } |
jcoomes@2020 | 120 | |
jcoomes@2020 | 121 | static const char* const pm_stats_hdr[] = { |
jcoomes@2020 | 122 | " --------masked------- arrays array", |
jcoomes@2020 | 123 | "thr push steal chunked chunks", |
jcoomes@2020 | 124 | "--- ---------- ---------- ---------- ----------" |
jcoomes@2020 | 125 | }; |
jcoomes@2020 | 126 | |
jcoomes@2020 | 127 | void |
duke@435 | 128 | PSPromotionManager::print_stats() { |
tonyp@2061 | 129 | tty->print_cr("== GC Tasks Stats, GC %3d", |
duke@435 | 130 | Universe::heap()->total_collections()); |
duke@435 | 131 | |
jcoomes@2020 | 132 | tty->print("thr "); TaskQueueStats::print_header(1); tty->cr(); |
jcoomes@2020 | 133 | tty->print("--- "); TaskQueueStats::print_header(2); tty->cr(); |
jcoomes@2020 | 134 | for (uint i = 0; i < ParallelGCThreads + 1; ++i) { |
jcoomes@2020 | 135 | manager_array(i)->print_taskqueue_stats(i); |
jcoomes@2020 | 136 | } |
jcoomes@2020 | 137 | |
jcoomes@2020 | 138 | const uint hlines = sizeof(pm_stats_hdr) / sizeof(pm_stats_hdr[0]); |
jcoomes@2020 | 139 | for (uint i = 0; i < hlines; ++i) tty->print_cr(pm_stats_hdr[i]); |
jcoomes@2020 | 140 | for (uint i = 0; i < ParallelGCThreads + 1; ++i) { |
jcoomes@2020 | 141 | manager_array(i)->print_local_stats(i); |
duke@435 | 142 | } |
duke@435 | 143 | } |
duke@435 | 144 | |
jcoomes@2020 | 145 | void |
jcoomes@2020 | 146 | PSPromotionManager::reset_stats() { |
tonyp@2061 | 147 | claimed_stack_depth()->stats.reset(); |
jcoomes@2020 | 148 | _masked_pushes = _masked_steals = 0; |
jcoomes@2020 | 149 | _arrays_chunked = _array_chunks_processed = 0; |
jcoomes@2020 | 150 | } |
jcoomes@2020 | 151 | #endif // TASKQUEUE_STATS |
duke@435 | 152 | |
duke@435 | 153 | PSPromotionManager::PSPromotionManager() { |
duke@435 | 154 | ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); |
duke@435 | 155 | assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); |
duke@435 | 156 | |
duke@435 | 157 | // We set the old lab's start array. |
duke@435 | 158 | _old_lab.set_start_array(old_gen()->start_array()); |
duke@435 | 159 | |
duke@435 | 160 | uint queue_size; |
tonyp@2061 | 161 | claimed_stack_depth()->initialize(); |
tonyp@2061 | 162 | queue_size = claimed_stack_depth()->max_elems(); |
duke@435 | 163 | |
duke@435 | 164 | _totally_drain = (ParallelGCThreads == 1) || (GCDrainStackTargetSize == 0); |
duke@435 | 165 | if (_totally_drain) { |
duke@435 | 166 | _target_stack_size = 0; |
duke@435 | 167 | } else { |
duke@435 | 168 | // don't let the target stack size to be more than 1/4 of the entries |
duke@435 | 169 | _target_stack_size = (uint) MIN2((uint) GCDrainStackTargetSize, |
duke@435 | 170 | (uint) (queue_size / 4)); |
duke@435 | 171 | } |
duke@435 | 172 | |
duke@435 | 173 | _array_chunk_size = ParGCArrayScanChunk; |
duke@435 | 174 | // let's choose 1.5x the chunk size |
duke@435 | 175 | _min_array_size_for_chunking = 3 * _array_chunk_size / 2; |
duke@435 | 176 | |
duke@435 | 177 | reset(); |
duke@435 | 178 | } |
duke@435 | 179 | |
duke@435 | 180 | void PSPromotionManager::reset() { |
jcoomes@1993 | 181 | assert(stacks_empty(), "reset of non-empty stack"); |
duke@435 | 182 | |
duke@435 | 183 | // We need to get an assert in here to make sure the labs are always flushed. |
duke@435 | 184 | |
duke@435 | 185 | ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); |
duke@435 | 186 | assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); |
duke@435 | 187 | |
duke@435 | 188 | // Do not prefill the LAB's, save heap wastage! |
duke@435 | 189 | HeapWord* lab_base = young_space()->top(); |
duke@435 | 190 | _young_lab.initialize(MemRegion(lab_base, (size_t)0)); |
duke@435 | 191 | _young_gen_is_full = false; |
duke@435 | 192 | |
duke@435 | 193 | lab_base = old_gen()->object_space()->top(); |
duke@435 | 194 | _old_lab.initialize(MemRegion(lab_base, (size_t)0)); |
duke@435 | 195 | _old_gen_is_full = false; |
duke@435 | 196 | |
sla@5237 | 197 | _promotion_failed_info.reset(); |
sla@5237 | 198 | |
jcoomes@2020 | 199 | TASKQUEUE_STATS_ONLY(reset_stats()); |
duke@435 | 200 | } |
duke@435 | 201 | |
coleenp@548 | 202 | |
duke@435 | 203 | void PSPromotionManager::drain_stacks_depth(bool totally_drain) { |
duke@435 | 204 | totally_drain = totally_drain || _totally_drain; |
duke@435 | 205 | |
duke@435 | 206 | #ifdef ASSERT |
duke@435 | 207 | ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); |
duke@435 | 208 | assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); |
duke@435 | 209 | MutableSpace* to_space = heap->young_gen()->to_space(); |
duke@435 | 210 | MutableSpace* old_space = heap->old_gen()->object_space(); |
duke@435 | 211 | #endif /* ASSERT */ |
duke@435 | 212 | |
jcoomes@1993 | 213 | OopStarTaskQueue* const tq = claimed_stack_depth(); |
duke@435 | 214 | do { |
coleenp@548 | 215 | StarTask p; |
duke@435 | 216 | |
duke@435 | 217 | // Drain overflow stack first, so other threads can steal from |
duke@435 | 218 | // claimed stack while we work. |
jcoomes@1993 | 219 | while (tq->pop_overflow(p)) { |
jcoomes@1993 | 220 | process_popped_location_depth(p); |
duke@435 | 221 | } |
duke@435 | 222 | |
duke@435 | 223 | if (totally_drain) { |
jcoomes@1993 | 224 | while (tq->pop_local(p)) { |
duke@435 | 225 | process_popped_location_depth(p); |
duke@435 | 226 | } |
duke@435 | 227 | } else { |
jcoomes@1993 | 228 | while (tq->size() > _target_stack_size && tq->pop_local(p)) { |
duke@435 | 229 | process_popped_location_depth(p); |
duke@435 | 230 | } |
duke@435 | 231 | } |
jcoomes@1993 | 232 | } while (totally_drain && !tq->taskqueue_empty() || !tq->overflow_empty()); |
duke@435 | 233 | |
jcoomes@1993 | 234 | assert(!totally_drain || tq->taskqueue_empty(), "Sanity"); |
jcoomes@1993 | 235 | assert(totally_drain || tq->size() <= _target_stack_size, "Sanity"); |
jcoomes@1993 | 236 | assert(tq->overflow_empty(), "Sanity"); |
duke@435 | 237 | } |
duke@435 | 238 | |
duke@435 | 239 | void PSPromotionManager::flush_labs() { |
jcoomes@1993 | 240 | assert(stacks_empty(), "Attempt to flush lab with live stack"); |
duke@435 | 241 | |
duke@435 | 242 | // If either promotion lab fills up, we can flush the |
duke@435 | 243 | // lab but not refill it, so check first. |
duke@435 | 244 | assert(!_young_lab.is_flushed() || _young_gen_is_full, "Sanity"); |
duke@435 | 245 | if (!_young_lab.is_flushed()) |
duke@435 | 246 | _young_lab.flush(); |
duke@435 | 247 | |
duke@435 | 248 | assert(!_old_lab.is_flushed() || _old_gen_is_full, "Sanity"); |
duke@435 | 249 | if (!_old_lab.is_flushed()) |
duke@435 | 250 | _old_lab.flush(); |
duke@435 | 251 | |
duke@435 | 252 | // Let PSScavenge know if we overflowed |
duke@435 | 253 | if (_young_gen_is_full) { |
duke@435 | 254 | PSScavenge::set_survivor_overflow(true); |
duke@435 | 255 | } |
duke@435 | 256 | } |
duke@435 | 257 | |
coleenp@548 | 258 | template <class T> void PSPromotionManager::process_array_chunk_work( |
coleenp@548 | 259 | oop obj, |
coleenp@548 | 260 | int start, int end) { |
jwilhelm@2648 | 261 | assert(start <= end, "invariant"); |
coleenp@548 | 262 | T* const base = (T*)objArrayOop(obj)->base(); |
coleenp@548 | 263 | T* p = base + start; |
coleenp@548 | 264 | T* const chunk_end = base + end; |
coleenp@548 | 265 | while (p < chunk_end) { |
coleenp@548 | 266 | if (PSScavenge::should_scavenge(p)) { |
coleenp@548 | 267 | claim_or_forward_depth(p); |
coleenp@548 | 268 | } |
coleenp@548 | 269 | ++p; |
coleenp@548 | 270 | } |
coleenp@548 | 271 | } |
coleenp@548 | 272 | |
duke@435 | 273 | void PSPromotionManager::process_array_chunk(oop old) { |
duke@435 | 274 | assert(PSChunkLargeArrays, "invariant"); |
duke@435 | 275 | assert(old->is_objArray(), "invariant"); |
duke@435 | 276 | assert(old->is_forwarded(), "invariant"); |
duke@435 | 277 | |
jcoomes@2020 | 278 | TASKQUEUE_STATS_ONLY(++_array_chunks_processed); |
duke@435 | 279 | |
duke@435 | 280 | oop const obj = old->forwardee(); |
duke@435 | 281 | |
duke@435 | 282 | int start; |
duke@435 | 283 | int const end = arrayOop(old)->length(); |
duke@435 | 284 | if (end > (int) _min_array_size_for_chunking) { |
duke@435 | 285 | // we'll chunk more |
duke@435 | 286 | start = end - _array_chunk_size; |
duke@435 | 287 | assert(start > 0, "invariant"); |
duke@435 | 288 | arrayOop(old)->set_length(start); |
duke@435 | 289 | push_depth(mask_chunked_array_oop(old)); |
jcoomes@2020 | 290 | TASKQUEUE_STATS_ONLY(++_masked_pushes); |
duke@435 | 291 | } else { |
duke@435 | 292 | // this is the final chunk for this array |
duke@435 | 293 | start = 0; |
duke@435 | 294 | int const actual_length = arrayOop(obj)->length(); |
duke@435 | 295 | arrayOop(old)->set_length(actual_length); |
duke@435 | 296 | } |
duke@435 | 297 | |
coleenp@548 | 298 | if (UseCompressedOops) { |
coleenp@548 | 299 | process_array_chunk_work<narrowOop>(obj, start, end); |
coleenp@548 | 300 | } else { |
coleenp@548 | 301 | process_array_chunk_work<oop>(obj, start, end); |
duke@435 | 302 | } |
duke@435 | 303 | } |
duke@435 | 304 | |
duke@435 | 305 | oop PSPromotionManager::oop_promotion_failed(oop obj, markOop obj_mark) { |
duke@435 | 306 | assert(_old_gen_is_full || PromotionFailureALot, "Sanity"); |
duke@435 | 307 | |
duke@435 | 308 | // Attempt to CAS in the header. |
duke@435 | 309 | // This tests if the header is still the same as when |
duke@435 | 310 | // this started. If it is the same (i.e., no forwarding |
duke@435 | 311 | // pointer has been installed), then this thread owns |
duke@435 | 312 | // it. |
duke@435 | 313 | if (obj->cas_forward_to(obj, obj_mark)) { |
duke@435 | 314 | // We won any races, we "own" this object. |
duke@435 | 315 | assert(obj == obj->forwardee(), "Sanity"); |
duke@435 | 316 | |
sla@5237 | 317 | _promotion_failed_info.register_copy_failure(obj->size()); |
sla@5237 | 318 | |
tonyp@2061 | 319 | obj->push_contents(this); |
duke@435 | 320 | |
duke@435 | 321 | // Save the mark if needed |
duke@435 | 322 | PSScavenge::oop_promotion_failed(obj, obj_mark); |
duke@435 | 323 | } else { |
duke@435 | 324 | // We lost, someone else "owns" this object |
duke@435 | 325 | guarantee(obj->is_forwarded(), "Object must be forwarded if the cas failed."); |
duke@435 | 326 | |
duke@435 | 327 | // No unallocation to worry about. |
duke@435 | 328 | obj = obj->forwardee(); |
duke@435 | 329 | } |
duke@435 | 330 | |
coleenp@4037 | 331 | #ifndef PRODUCT |
duke@435 | 332 | if (TraceScavenge) { |
duke@435 | 333 | gclog_or_tty->print_cr("{%s %s 0x%x (%d)}", |
duke@435 | 334 | "promotion-failure", |
coleenp@4037 | 335 | obj->klass()->internal_name(), |
hseigel@5784 | 336 | (void *)obj, obj->size()); |
duke@435 | 337 | |
duke@435 | 338 | } |
duke@435 | 339 | #endif |
duke@435 | 340 | |
duke@435 | 341 | return obj; |
duke@435 | 342 | } |