Sat, 23 Nov 2013 12:25:13 +0100
8028128: Add a type safe alternative for working with counter based data
Reviewed-by: dholmes, egahlin
duke@435 | 1 | /* |
sla@5237 | 2 | * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved. |
duke@435 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
duke@435 | 4 | * |
duke@435 | 5 | * This code is free software; you can redistribute it and/or modify it |
duke@435 | 6 | * under the terms of the GNU General Public License version 2 only, as |
duke@435 | 7 | * published by the Free Software Foundation. |
duke@435 | 8 | * |
duke@435 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
duke@435 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
duke@435 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
duke@435 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
duke@435 | 13 | * accompanied this code). |
duke@435 | 14 | * |
duke@435 | 15 | * You should have received a copy of the GNU General Public License version |
duke@435 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
duke@435 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
duke@435 | 18 | * |
trims@1907 | 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
trims@1907 | 20 | * or visit www.oracle.com if you need additional information or have any |
trims@1907 | 21 | * questions. |
duke@435 | 22 | * |
duke@435 | 23 | */ |
duke@435 | 24 | |
stefank@2314 | 25 | #ifndef SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPROMOTIONMANAGER_INLINE_HPP |
stefank@2314 | 26 | #define SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPROMOTIONMANAGER_INLINE_HPP |
stefank@2314 | 27 | |
nloodin@3665 | 28 | #include "gc_implementation/parallelScavenge/psOldGen.hpp" |
stefank@2314 | 29 | #include "gc_implementation/parallelScavenge/psPromotionManager.hpp" |
stefank@2314 | 30 | #include "gc_implementation/parallelScavenge/psScavenge.hpp" |
stefank@2314 | 31 | |
duke@435 | 32 | inline PSPromotionManager* PSPromotionManager::manager_array(int index) { |
duke@435 | 33 | assert(_manager_array != NULL, "access of NULL manager_array"); |
duke@435 | 34 | assert(index >= 0 && index <= (int)ParallelGCThreads, "out of range manager_array access"); |
stefank@5515 | 35 | return &_manager_array[index]; |
duke@435 | 36 | } |
duke@435 | 37 | |
coleenp@548 | 38 | template <class T> |
coleenp@548 | 39 | inline void PSPromotionManager::claim_or_forward_internal_depth(T* p) { |
coleenp@548 | 40 | if (p != NULL) { // XXX: error if p != NULL here |
coleenp@548 | 41 | oop o = oopDesc::load_decode_heap_oop_not_null(p); |
duke@435 | 42 | if (o->is_forwarded()) { |
duke@435 | 43 | o = o->forwardee(); |
duke@435 | 44 | // Card mark |
stefank@5202 | 45 | if (PSScavenge::is_obj_in_young(o)) { |
duke@435 | 46 | PSScavenge::card_table()->inline_write_ref_field_gc(p, o); |
duke@435 | 47 | } |
coleenp@548 | 48 | oopDesc::encode_store_heap_oop_not_null(p, o); |
duke@435 | 49 | } else { |
duke@435 | 50 | push_depth(p); |
duke@435 | 51 | } |
duke@435 | 52 | } |
duke@435 | 53 | } |
duke@435 | 54 | |
coleenp@548 | 55 | template <class T> |
coleenp@548 | 56 | inline void PSPromotionManager::claim_or_forward_depth(T* p) { |
coleenp@548 | 57 | assert(PSScavenge::should_scavenge(p, true), "revisiting object?"); |
coleenp@548 | 58 | assert(Universe::heap()->kind() == CollectedHeap::ParallelScavengeHeap, |
coleenp@548 | 59 | "Sanity"); |
duke@435 | 60 | assert(Universe::heap()->is_in(p), "pointer outside heap"); |
duke@435 | 61 | |
duke@435 | 62 | claim_or_forward_internal_depth(p); |
duke@435 | 63 | } |
duke@435 | 64 | |
iveresov@3536 | 65 | // |
iveresov@3536 | 66 | // This method is pretty bulky. It would be nice to split it up |
iveresov@3536 | 67 | // into smaller submethods, but we need to be careful not to hurt |
iveresov@3536 | 68 | // performance. |
iveresov@3536 | 69 | // |
iveresov@3536 | 70 | template<bool promote_immediately> |
iveresov@3536 | 71 | oop PSPromotionManager::copy_to_survivor_space(oop o) { |
iveresov@3536 | 72 | assert(PSScavenge::should_scavenge(&o), "Sanity"); |
iveresov@3536 | 73 | |
iveresov@3536 | 74 | oop new_obj = NULL; |
iveresov@3536 | 75 | |
iveresov@3536 | 76 | // NOTE! We must be very careful with any methods that access the mark |
iveresov@3536 | 77 | // in o. There may be multiple threads racing on it, and it may be forwarded |
iveresov@3536 | 78 | // at any time. Do not use oop methods for accessing the mark! |
iveresov@3536 | 79 | markOop test_mark = o->mark(); |
iveresov@3536 | 80 | |
iveresov@3536 | 81 | // The same test as "o->is_forwarded()" |
iveresov@3536 | 82 | if (!test_mark->is_marked()) { |
iveresov@3536 | 83 | bool new_obj_is_tenured = false; |
iveresov@3536 | 84 | size_t new_obj_size = o->size(); |
iveresov@3536 | 85 | |
iveresov@3536 | 86 | if (!promote_immediately) { |
iveresov@3536 | 87 | // Find the objects age, MT safe. |
jwilhelm@4129 | 88 | uint age = (test_mark->has_displaced_mark_helper() /* o->has_displaced_mark() */) ? |
iveresov@3536 | 89 | test_mark->displaced_mark_helper()->age() : test_mark->age(); |
iveresov@3536 | 90 | |
iveresov@3536 | 91 | // Try allocating obj in to-space (unless too old) |
iveresov@3536 | 92 | if (age < PSScavenge::tenuring_threshold()) { |
iveresov@3536 | 93 | new_obj = (oop) _young_lab.allocate(new_obj_size); |
iveresov@3536 | 94 | if (new_obj == NULL && !_young_gen_is_full) { |
iveresov@3536 | 95 | // Do we allocate directly, or flush and refill? |
iveresov@3536 | 96 | if (new_obj_size > (YoungPLABSize / 2)) { |
iveresov@3536 | 97 | // Allocate this object directly |
iveresov@3536 | 98 | new_obj = (oop)young_space()->cas_allocate(new_obj_size); |
iveresov@3536 | 99 | } else { |
iveresov@3536 | 100 | // Flush and fill |
iveresov@3536 | 101 | _young_lab.flush(); |
iveresov@3536 | 102 | |
iveresov@3536 | 103 | HeapWord* lab_base = young_space()->cas_allocate(YoungPLABSize); |
iveresov@3536 | 104 | if (lab_base != NULL) { |
iveresov@3536 | 105 | _young_lab.initialize(MemRegion(lab_base, YoungPLABSize)); |
iveresov@3536 | 106 | // Try the young lab allocation again. |
iveresov@3536 | 107 | new_obj = (oop) _young_lab.allocate(new_obj_size); |
iveresov@3536 | 108 | } else { |
iveresov@3536 | 109 | _young_gen_is_full = true; |
iveresov@3536 | 110 | } |
iveresov@3536 | 111 | } |
iveresov@3536 | 112 | } |
iveresov@3536 | 113 | } |
iveresov@3536 | 114 | } |
iveresov@3536 | 115 | |
iveresov@3536 | 116 | // Otherwise try allocating obj tenured |
iveresov@3536 | 117 | if (new_obj == NULL) { |
iveresov@3536 | 118 | #ifndef PRODUCT |
iveresov@3536 | 119 | if (Universe::heap()->promotion_should_fail()) { |
iveresov@3536 | 120 | return oop_promotion_failed(o, test_mark); |
iveresov@3536 | 121 | } |
iveresov@3536 | 122 | #endif // #ifndef PRODUCT |
iveresov@3536 | 123 | |
iveresov@3536 | 124 | new_obj = (oop) _old_lab.allocate(new_obj_size); |
iveresov@3536 | 125 | new_obj_is_tenured = true; |
iveresov@3536 | 126 | |
iveresov@3536 | 127 | if (new_obj == NULL) { |
iveresov@3536 | 128 | if (!_old_gen_is_full) { |
iveresov@3536 | 129 | // Do we allocate directly, or flush and refill? |
iveresov@3536 | 130 | if (new_obj_size > (OldPLABSize / 2)) { |
iveresov@3536 | 131 | // Allocate this object directly |
iveresov@3536 | 132 | new_obj = (oop)old_gen()->cas_allocate(new_obj_size); |
iveresov@3536 | 133 | } else { |
iveresov@3536 | 134 | // Flush and fill |
iveresov@3536 | 135 | _old_lab.flush(); |
iveresov@3536 | 136 | |
iveresov@3536 | 137 | HeapWord* lab_base = old_gen()->cas_allocate(OldPLABSize); |
iveresov@3536 | 138 | if(lab_base != NULL) { |
jmasa@4128 | 139 | #ifdef ASSERT |
jmasa@4128 | 140 | // Delay the initialization of the promotion lab (plab). |
jmasa@4128 | 141 | // This exposes uninitialized plabs to card table processing. |
jmasa@4128 | 142 | if (GCWorkerDelayMillis > 0) { |
jmasa@4128 | 143 | os::sleep(Thread::current(), GCWorkerDelayMillis, false); |
jmasa@4128 | 144 | } |
jmasa@4128 | 145 | #endif |
iveresov@3536 | 146 | _old_lab.initialize(MemRegion(lab_base, OldPLABSize)); |
iveresov@3536 | 147 | // Try the old lab allocation again. |
iveresov@3536 | 148 | new_obj = (oop) _old_lab.allocate(new_obj_size); |
iveresov@3536 | 149 | } |
iveresov@3536 | 150 | } |
iveresov@3536 | 151 | } |
iveresov@3536 | 152 | |
iveresov@3536 | 153 | // This is the promotion failed test, and code handling. |
iveresov@3536 | 154 | // The code belongs here for two reasons. It is slightly |
sla@5237 | 155 | // different than the code below, and cannot share the |
iveresov@3536 | 156 | // CAS testing code. Keeping the code here also minimizes |
iveresov@3536 | 157 | // the impact on the common case fast path code. |
iveresov@3536 | 158 | |
iveresov@3536 | 159 | if (new_obj == NULL) { |
iveresov@3536 | 160 | _old_gen_is_full = true; |
iveresov@3536 | 161 | return oop_promotion_failed(o, test_mark); |
iveresov@3536 | 162 | } |
iveresov@3536 | 163 | } |
iveresov@3536 | 164 | } |
iveresov@3536 | 165 | |
iveresov@3536 | 166 | assert(new_obj != NULL, "allocation should have succeeded"); |
iveresov@3536 | 167 | |
iveresov@3536 | 168 | // Copy obj |
iveresov@3536 | 169 | Copy::aligned_disjoint_words((HeapWord*)o, (HeapWord*)new_obj, new_obj_size); |
iveresov@3536 | 170 | |
iveresov@3536 | 171 | // Now we have to CAS in the header. |
iveresov@3536 | 172 | if (o->cas_forward_to(new_obj, test_mark)) { |
iveresov@3536 | 173 | // We won any races, we "own" this object. |
iveresov@3536 | 174 | assert(new_obj == o->forwardee(), "Sanity"); |
iveresov@3536 | 175 | |
iveresov@3536 | 176 | // Increment age if obj still in new generation. Now that |
iveresov@3536 | 177 | // we're dealing with a markOop that cannot change, it is |
iveresov@3536 | 178 | // okay to use the non mt safe oop methods. |
iveresov@3536 | 179 | if (!new_obj_is_tenured) { |
iveresov@3536 | 180 | new_obj->incr_age(); |
iveresov@3536 | 181 | assert(young_space()->contains(new_obj), "Attempt to push non-promoted obj"); |
iveresov@3536 | 182 | } |
iveresov@3536 | 183 | |
iveresov@3536 | 184 | // Do the size comparison first with new_obj_size, which we |
iveresov@3536 | 185 | // already have. Hopefully, only a few objects are larger than |
iveresov@3536 | 186 | // _min_array_size_for_chunking, and most of them will be arrays. |
iveresov@3536 | 187 | // So, the is->objArray() test would be very infrequent. |
iveresov@3536 | 188 | if (new_obj_size > _min_array_size_for_chunking && |
iveresov@3536 | 189 | new_obj->is_objArray() && |
iveresov@3536 | 190 | PSChunkLargeArrays) { |
iveresov@3536 | 191 | // we'll chunk it |
iveresov@3536 | 192 | oop* const masked_o = mask_chunked_array_oop(o); |
iveresov@3536 | 193 | push_depth(masked_o); |
iveresov@3536 | 194 | TASKQUEUE_STATS_ONLY(++_arrays_chunked; ++_masked_pushes); |
iveresov@3536 | 195 | } else { |
iveresov@3536 | 196 | // we'll just push its contents |
iveresov@3536 | 197 | new_obj->push_contents(this); |
iveresov@3536 | 198 | } |
iveresov@3536 | 199 | } else { |
iveresov@3536 | 200 | // We lost, someone else "owns" this object |
iveresov@3536 | 201 | guarantee(o->is_forwarded(), "Object must be forwarded if the cas failed."); |
iveresov@3536 | 202 | |
iveresov@3536 | 203 | // Try to deallocate the space. If it was directly allocated we cannot |
iveresov@3536 | 204 | // deallocate it, so we have to test. If the deallocation fails, |
iveresov@3536 | 205 | // overwrite with a filler object. |
iveresov@3536 | 206 | if (new_obj_is_tenured) { |
iveresov@3536 | 207 | if (!_old_lab.unallocate_object((HeapWord*) new_obj, new_obj_size)) { |
iveresov@3536 | 208 | CollectedHeap::fill_with_object((HeapWord*) new_obj, new_obj_size); |
iveresov@3536 | 209 | } |
iveresov@3536 | 210 | } else if (!_young_lab.unallocate_object((HeapWord*) new_obj, new_obj_size)) { |
iveresov@3536 | 211 | CollectedHeap::fill_with_object((HeapWord*) new_obj, new_obj_size); |
iveresov@3536 | 212 | } |
iveresov@3536 | 213 | |
iveresov@3536 | 214 | // don't update this before the unallocation! |
iveresov@3536 | 215 | new_obj = o->forwardee(); |
iveresov@3536 | 216 | } |
iveresov@3536 | 217 | } else { |
iveresov@3536 | 218 | assert(o->is_forwarded(), "Sanity"); |
iveresov@3536 | 219 | new_obj = o->forwardee(); |
iveresov@3536 | 220 | } |
iveresov@3536 | 221 | |
coleenp@4037 | 222 | #ifndef PRODUCT |
iveresov@3536 | 223 | // This code must come after the CAS test, or it will print incorrect |
iveresov@3536 | 224 | // information. |
iveresov@3536 | 225 | if (TraceScavenge) { |
coleenp@4037 | 226 | gclog_or_tty->print_cr("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (%d)}", |
iveresov@3536 | 227 | PSScavenge::should_scavenge(&new_obj) ? "copying" : "tenuring", |
hseigel@5784 | 228 | new_obj->klass()->internal_name(), (void *)o, (void *)new_obj, new_obj->size()); |
iveresov@3536 | 229 | } |
iveresov@3536 | 230 | #endif |
iveresov@3536 | 231 | |
iveresov@3536 | 232 | return new_obj; |
iveresov@3536 | 233 | } |
iveresov@3536 | 234 | |
iveresov@3536 | 235 | |
coleenp@548 | 236 | inline void PSPromotionManager::process_popped_location_depth(StarTask p) { |
duke@435 | 237 | if (is_oop_masked(p)) { |
duke@435 | 238 | assert(PSChunkLargeArrays, "invariant"); |
duke@435 | 239 | oop const old = unmask_chunked_array_oop(p); |
duke@435 | 240 | process_array_chunk(old); |
duke@435 | 241 | } else { |
coleenp@548 | 242 | if (p.is_narrow()) { |
ysr@1280 | 243 | assert(UseCompressedOops, "Error"); |
iveresov@3536 | 244 | PSScavenge::copy_and_push_safe_barrier<narrowOop, /*promote_immediately=*/false>(this, p); |
coleenp@548 | 245 | } else { |
iveresov@3536 | 246 | PSScavenge::copy_and_push_safe_barrier<oop, /*promote_immediately=*/false>(this, p); |
coleenp@548 | 247 | } |
duke@435 | 248 | } |
duke@435 | 249 | } |
jcoomes@2020 | 250 | |
jcoomes@2020 | 251 | #if TASKQUEUE_STATS |
jcoomes@2020 | 252 | void PSPromotionManager::record_steal(StarTask& p) { |
jcoomes@2020 | 253 | if (is_oop_masked(p)) { |
jcoomes@2020 | 254 | ++_masked_steals; |
jcoomes@2020 | 255 | } |
jcoomes@2020 | 256 | } |
jcoomes@2020 | 257 | #endif // TASKQUEUE_STATS |
stefank@2314 | 258 | |
stefank@2314 | 259 | #endif // SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPROMOTIONMANAGER_INLINE_HPP |