Fri, 20 Mar 2009 23:19:36 -0700
6814659: separable cleanups and subroutines for 6655638
Summary: preparatory but separable changes for method handles
Reviewed-by: kvn, never
duke@435 | 1 | /* |
jrose@1100 | 2 | * Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved. |
duke@435 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
duke@435 | 4 | * |
duke@435 | 5 | * This code is free software; you can redistribute it and/or modify it |
duke@435 | 6 | * under the terms of the GNU General Public License version 2 only, as |
duke@435 | 7 | * published by the Free Software Foundation. |
duke@435 | 8 | * |
duke@435 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
duke@435 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
duke@435 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
duke@435 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
duke@435 | 13 | * accompanied this code). |
duke@435 | 14 | * |
duke@435 | 15 | * You should have received a copy of the GNU General Public License version |
duke@435 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
duke@435 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
duke@435 | 18 | * |
duke@435 | 19 | * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, |
duke@435 | 20 | * CA 95054 USA or visit www.sun.com if you need additional information or |
duke@435 | 21 | * have any questions. |
duke@435 | 22 | * |
duke@435 | 23 | */ |
duke@435 | 24 | |
duke@435 | 25 | # include "incls/_precompiled.incl" |
duke@435 | 26 | # include "incls/_parallelScavengeHeap.cpp.incl" |
duke@435 | 27 | |
duke@435 | 28 | PSYoungGen* ParallelScavengeHeap::_young_gen = NULL; |
duke@435 | 29 | PSOldGen* ParallelScavengeHeap::_old_gen = NULL; |
duke@435 | 30 | PSPermGen* ParallelScavengeHeap::_perm_gen = NULL; |
duke@435 | 31 | PSAdaptiveSizePolicy* ParallelScavengeHeap::_size_policy = NULL; |
duke@435 | 32 | PSGCAdaptivePolicyCounters* ParallelScavengeHeap::_gc_policy_counters = NULL; |
duke@435 | 33 | ParallelScavengeHeap* ParallelScavengeHeap::_psh = NULL; |
duke@435 | 34 | GCTaskManager* ParallelScavengeHeap::_gc_task_manager = NULL; |
duke@435 | 35 | |
duke@435 | 36 | static void trace_gen_sizes(const char* const str, |
duke@435 | 37 | size_t pg_min, size_t pg_max, |
duke@435 | 38 | size_t og_min, size_t og_max, |
duke@435 | 39 | size_t yg_min, size_t yg_max) |
duke@435 | 40 | { |
duke@435 | 41 | if (TracePageSizes) { |
duke@435 | 42 | tty->print_cr("%s: " SIZE_FORMAT "," SIZE_FORMAT " " |
duke@435 | 43 | SIZE_FORMAT "," SIZE_FORMAT " " |
duke@435 | 44 | SIZE_FORMAT "," SIZE_FORMAT " " |
duke@435 | 45 | SIZE_FORMAT, |
duke@435 | 46 | str, pg_min / K, pg_max / K, |
duke@435 | 47 | og_min / K, og_max / K, |
duke@435 | 48 | yg_min / K, yg_max / K, |
duke@435 | 49 | (pg_max + og_max + yg_max) / K); |
duke@435 | 50 | } |
duke@435 | 51 | } |
duke@435 | 52 | |
duke@435 | 53 | jint ParallelScavengeHeap::initialize() { |
duke@435 | 54 | // Cannot be initialized until after the flags are parsed |
duke@435 | 55 | GenerationSizer flag_parser; |
duke@435 | 56 | |
duke@435 | 57 | size_t yg_min_size = flag_parser.min_young_gen_size(); |
duke@435 | 58 | size_t yg_max_size = flag_parser.max_young_gen_size(); |
duke@435 | 59 | size_t og_min_size = flag_parser.min_old_gen_size(); |
duke@435 | 60 | size_t og_max_size = flag_parser.max_old_gen_size(); |
duke@435 | 61 | // Why isn't there a min_perm_gen_size()? |
duke@435 | 62 | size_t pg_min_size = flag_parser.perm_gen_size(); |
duke@435 | 63 | size_t pg_max_size = flag_parser.max_perm_gen_size(); |
duke@435 | 64 | |
duke@435 | 65 | trace_gen_sizes("ps heap raw", |
duke@435 | 66 | pg_min_size, pg_max_size, |
duke@435 | 67 | og_min_size, og_max_size, |
duke@435 | 68 | yg_min_size, yg_max_size); |
duke@435 | 69 | |
duke@435 | 70 | // The ReservedSpace ctor used below requires that the page size for the perm |
duke@435 | 71 | // gen is <= the page size for the rest of the heap (young + old gens). |
duke@435 | 72 | const size_t og_page_sz = os::page_size_for_region(yg_min_size + og_min_size, |
duke@435 | 73 | yg_max_size + og_max_size, |
duke@435 | 74 | 8); |
duke@435 | 75 | const size_t pg_page_sz = MIN2(os::page_size_for_region(pg_min_size, |
duke@435 | 76 | pg_max_size, 16), |
duke@435 | 77 | og_page_sz); |
duke@435 | 78 | |
duke@435 | 79 | const size_t pg_align = set_alignment(_perm_gen_alignment, pg_page_sz); |
duke@435 | 80 | const size_t og_align = set_alignment(_old_gen_alignment, og_page_sz); |
duke@435 | 81 | const size_t yg_align = set_alignment(_young_gen_alignment, og_page_sz); |
duke@435 | 82 | |
duke@435 | 83 | // Update sizes to reflect the selected page size(s). |
duke@435 | 84 | // |
duke@435 | 85 | // NEEDS_CLEANUP. The default TwoGenerationCollectorPolicy uses NewRatio; it |
duke@435 | 86 | // should check UseAdaptiveSizePolicy. Changes from generationSizer could |
duke@435 | 87 | // move to the common code. |
duke@435 | 88 | yg_min_size = align_size_up(yg_min_size, yg_align); |
duke@435 | 89 | yg_max_size = align_size_up(yg_max_size, yg_align); |
duke@435 | 90 | size_t yg_cur_size = align_size_up(flag_parser.young_gen_size(), yg_align); |
duke@435 | 91 | yg_cur_size = MAX2(yg_cur_size, yg_min_size); |
duke@435 | 92 | |
duke@435 | 93 | og_min_size = align_size_up(og_min_size, og_align); |
duke@435 | 94 | og_max_size = align_size_up(og_max_size, og_align); |
duke@435 | 95 | size_t og_cur_size = align_size_up(flag_parser.old_gen_size(), og_align); |
duke@435 | 96 | og_cur_size = MAX2(og_cur_size, og_min_size); |
duke@435 | 97 | |
duke@435 | 98 | pg_min_size = align_size_up(pg_min_size, pg_align); |
duke@435 | 99 | pg_max_size = align_size_up(pg_max_size, pg_align); |
duke@435 | 100 | size_t pg_cur_size = pg_min_size; |
duke@435 | 101 | |
duke@435 | 102 | trace_gen_sizes("ps heap rnd", |
duke@435 | 103 | pg_min_size, pg_max_size, |
duke@435 | 104 | og_min_size, og_max_size, |
duke@435 | 105 | yg_min_size, yg_max_size); |
duke@435 | 106 | |
kvn@1077 | 107 | const size_t total_reserved = pg_max_size + og_max_size + yg_max_size; |
kvn@1077 | 108 | char* addr = Universe::preferred_heap_base(total_reserved, Universe::UnscaledNarrowOop); |
kvn@1077 | 109 | |
duke@435 | 110 | // The main part of the heap (old gen + young gen) can often use a larger page |
duke@435 | 111 | // size than is needed or wanted for the perm gen. Use the "compound |
duke@435 | 112 | // alignment" ReservedSpace ctor to avoid having to use the same page size for |
duke@435 | 113 | // all gens. |
kvn@1077 | 114 | |
coleenp@672 | 115 | ReservedHeapSpace heap_rs(pg_max_size, pg_align, og_max_size + yg_max_size, |
kvn@1077 | 116 | og_align, addr); |
kvn@1077 | 117 | |
kvn@1077 | 118 | if (UseCompressedOops) { |
kvn@1077 | 119 | if (addr != NULL && !heap_rs.is_reserved()) { |
kvn@1077 | 120 | // Failed to reserve at specified address - the requested memory |
kvn@1077 | 121 | // region is taken already, for example, by 'java' launcher. |
kvn@1077 | 122 | // Try again to reserver heap higher. |
kvn@1077 | 123 | addr = Universe::preferred_heap_base(total_reserved, Universe::ZeroBasedNarrowOop); |
kvn@1077 | 124 | ReservedHeapSpace heap_rs0(pg_max_size, pg_align, og_max_size + yg_max_size, |
kvn@1077 | 125 | og_align, addr); |
kvn@1077 | 126 | if (addr != NULL && !heap_rs0.is_reserved()) { |
kvn@1077 | 127 | // Failed to reserve at specified address again - give up. |
kvn@1077 | 128 | addr = Universe::preferred_heap_base(total_reserved, Universe::HeapBasedNarrowOop); |
kvn@1077 | 129 | assert(addr == NULL, ""); |
kvn@1077 | 130 | ReservedHeapSpace heap_rs1(pg_max_size, pg_align, og_max_size + yg_max_size, |
kvn@1077 | 131 | og_align, addr); |
kvn@1077 | 132 | heap_rs = heap_rs1; |
kvn@1077 | 133 | } else { |
kvn@1077 | 134 | heap_rs = heap_rs0; |
kvn@1077 | 135 | } |
kvn@1077 | 136 | } |
kvn@1077 | 137 | } |
kvn@1077 | 138 | |
duke@435 | 139 | os::trace_page_sizes("ps perm", pg_min_size, pg_max_size, pg_page_sz, |
duke@435 | 140 | heap_rs.base(), pg_max_size); |
duke@435 | 141 | os::trace_page_sizes("ps main", og_min_size + yg_min_size, |
duke@435 | 142 | og_max_size + yg_max_size, og_page_sz, |
duke@435 | 143 | heap_rs.base() + pg_max_size, |
duke@435 | 144 | heap_rs.size() - pg_max_size); |
duke@435 | 145 | if (!heap_rs.is_reserved()) { |
duke@435 | 146 | vm_shutdown_during_initialization( |
duke@435 | 147 | "Could not reserve enough space for object heap"); |
duke@435 | 148 | return JNI_ENOMEM; |
duke@435 | 149 | } |
duke@435 | 150 | |
duke@435 | 151 | _reserved = MemRegion((HeapWord*)heap_rs.base(), |
duke@435 | 152 | (HeapWord*)(heap_rs.base() + heap_rs.size())); |
duke@435 | 153 | |
duke@435 | 154 | CardTableExtension* const barrier_set = new CardTableExtension(_reserved, 3); |
duke@435 | 155 | _barrier_set = barrier_set; |
duke@435 | 156 | oopDesc::set_bs(_barrier_set); |
duke@435 | 157 | if (_barrier_set == NULL) { |
duke@435 | 158 | vm_shutdown_during_initialization( |
duke@435 | 159 | "Could not reserve enough space for barrier set"); |
duke@435 | 160 | return JNI_ENOMEM; |
duke@435 | 161 | } |
duke@435 | 162 | |
duke@435 | 163 | // Initial young gen size is 4 Mb |
duke@435 | 164 | // |
duke@435 | 165 | // XXX - what about flag_parser.young_gen_size()? |
duke@435 | 166 | const size_t init_young_size = align_size_up(4 * M, yg_align); |
duke@435 | 167 | yg_cur_size = MAX2(MIN2(init_young_size, yg_max_size), yg_cur_size); |
duke@435 | 168 | |
duke@435 | 169 | // Split the reserved space into perm gen and the main heap (everything else). |
duke@435 | 170 | // The main heap uses a different alignment. |
duke@435 | 171 | ReservedSpace perm_rs = heap_rs.first_part(pg_max_size); |
duke@435 | 172 | ReservedSpace main_rs = heap_rs.last_part(pg_max_size, og_align); |
duke@435 | 173 | |
duke@435 | 174 | // Make up the generations |
duke@435 | 175 | // Calculate the maximum size that a generation can grow. This |
duke@435 | 176 | // includes growth into the other generation. Note that the |
duke@435 | 177 | // parameter _max_gen_size is kept as the maximum |
duke@435 | 178 | // size of the generation as the boundaries currently stand. |
duke@435 | 179 | // _max_gen_size is still used as that value. |
duke@435 | 180 | double max_gc_pause_sec = ((double) MaxGCPauseMillis)/1000.0; |
duke@435 | 181 | double max_gc_minor_pause_sec = ((double) MaxGCMinorPauseMillis)/1000.0; |
duke@435 | 182 | |
duke@435 | 183 | _gens = new AdjoiningGenerations(main_rs, |
duke@435 | 184 | og_cur_size, |
duke@435 | 185 | og_min_size, |
duke@435 | 186 | og_max_size, |
duke@435 | 187 | yg_cur_size, |
duke@435 | 188 | yg_min_size, |
duke@435 | 189 | yg_max_size, |
duke@435 | 190 | yg_align); |
duke@435 | 191 | |
duke@435 | 192 | _old_gen = _gens->old_gen(); |
duke@435 | 193 | _young_gen = _gens->young_gen(); |
duke@435 | 194 | |
duke@435 | 195 | const size_t eden_capacity = _young_gen->eden_space()->capacity_in_bytes(); |
duke@435 | 196 | const size_t old_capacity = _old_gen->capacity_in_bytes(); |
duke@435 | 197 | const size_t initial_promo_size = MIN2(eden_capacity, old_capacity); |
duke@435 | 198 | _size_policy = |
duke@435 | 199 | new PSAdaptiveSizePolicy(eden_capacity, |
duke@435 | 200 | initial_promo_size, |
duke@435 | 201 | young_gen()->to_space()->capacity_in_bytes(), |
jmasa@448 | 202 | intra_heap_alignment(), |
duke@435 | 203 | max_gc_pause_sec, |
duke@435 | 204 | max_gc_minor_pause_sec, |
duke@435 | 205 | GCTimeRatio |
duke@435 | 206 | ); |
duke@435 | 207 | |
duke@435 | 208 | _perm_gen = new PSPermGen(perm_rs, |
duke@435 | 209 | pg_align, |
duke@435 | 210 | pg_cur_size, |
duke@435 | 211 | pg_cur_size, |
duke@435 | 212 | pg_max_size, |
duke@435 | 213 | "perm", 2); |
duke@435 | 214 | |
duke@435 | 215 | assert(!UseAdaptiveGCBoundary || |
duke@435 | 216 | (old_gen()->virtual_space()->high_boundary() == |
duke@435 | 217 | young_gen()->virtual_space()->low_boundary()), |
duke@435 | 218 | "Boundaries must meet"); |
duke@435 | 219 | // initialize the policy counters - 2 collectors, 3 generations |
duke@435 | 220 | _gc_policy_counters = |
duke@435 | 221 | new PSGCAdaptivePolicyCounters("ParScav:MSC", 2, 3, _size_policy); |
duke@435 | 222 | _psh = this; |
duke@435 | 223 | |
duke@435 | 224 | // Set up the GCTaskManager |
duke@435 | 225 | _gc_task_manager = GCTaskManager::create(ParallelGCThreads); |
duke@435 | 226 | |
duke@435 | 227 | if (UseParallelOldGC && !PSParallelCompact::initialize()) { |
duke@435 | 228 | return JNI_ENOMEM; |
duke@435 | 229 | } |
duke@435 | 230 | |
duke@435 | 231 | return JNI_OK; |
duke@435 | 232 | } |
duke@435 | 233 | |
duke@435 | 234 | void ParallelScavengeHeap::post_initialize() { |
duke@435 | 235 | // Need to init the tenuring threshold |
duke@435 | 236 | PSScavenge::initialize(); |
duke@435 | 237 | if (UseParallelOldGC) { |
duke@435 | 238 | PSParallelCompact::post_initialize(); |
duke@435 | 239 | } else { |
duke@435 | 240 | PSMarkSweep::initialize(); |
duke@435 | 241 | } |
duke@435 | 242 | PSPromotionManager::initialize(); |
duke@435 | 243 | } |
duke@435 | 244 | |
duke@435 | 245 | void ParallelScavengeHeap::update_counters() { |
duke@435 | 246 | young_gen()->update_counters(); |
duke@435 | 247 | old_gen()->update_counters(); |
duke@435 | 248 | perm_gen()->update_counters(); |
duke@435 | 249 | } |
duke@435 | 250 | |
duke@435 | 251 | size_t ParallelScavengeHeap::capacity() const { |
duke@435 | 252 | size_t value = young_gen()->capacity_in_bytes() + old_gen()->capacity_in_bytes(); |
duke@435 | 253 | return value; |
duke@435 | 254 | } |
duke@435 | 255 | |
duke@435 | 256 | size_t ParallelScavengeHeap::used() const { |
duke@435 | 257 | size_t value = young_gen()->used_in_bytes() + old_gen()->used_in_bytes(); |
duke@435 | 258 | return value; |
duke@435 | 259 | } |
duke@435 | 260 | |
duke@435 | 261 | bool ParallelScavengeHeap::is_maximal_no_gc() const { |
duke@435 | 262 | return old_gen()->is_maximal_no_gc() && young_gen()->is_maximal_no_gc(); |
duke@435 | 263 | } |
duke@435 | 264 | |
duke@435 | 265 | |
duke@435 | 266 | size_t ParallelScavengeHeap::permanent_capacity() const { |
duke@435 | 267 | return perm_gen()->capacity_in_bytes(); |
duke@435 | 268 | } |
duke@435 | 269 | |
duke@435 | 270 | size_t ParallelScavengeHeap::permanent_used() const { |
duke@435 | 271 | return perm_gen()->used_in_bytes(); |
duke@435 | 272 | } |
duke@435 | 273 | |
duke@435 | 274 | size_t ParallelScavengeHeap::max_capacity() const { |
duke@435 | 275 | size_t estimated = reserved_region().byte_size(); |
duke@435 | 276 | estimated -= perm_gen()->reserved().byte_size(); |
duke@435 | 277 | if (UseAdaptiveSizePolicy) { |
duke@435 | 278 | estimated -= _size_policy->max_survivor_size(young_gen()->max_size()); |
duke@435 | 279 | } else { |
duke@435 | 280 | estimated -= young_gen()->to_space()->capacity_in_bytes(); |
duke@435 | 281 | } |
duke@435 | 282 | return MAX2(estimated, capacity()); |
duke@435 | 283 | } |
duke@435 | 284 | |
duke@435 | 285 | bool ParallelScavengeHeap::is_in(const void* p) const { |
duke@435 | 286 | if (young_gen()->is_in(p)) { |
duke@435 | 287 | return true; |
duke@435 | 288 | } |
duke@435 | 289 | |
duke@435 | 290 | if (old_gen()->is_in(p)) { |
duke@435 | 291 | return true; |
duke@435 | 292 | } |
duke@435 | 293 | |
duke@435 | 294 | if (perm_gen()->is_in(p)) { |
duke@435 | 295 | return true; |
duke@435 | 296 | } |
duke@435 | 297 | |
duke@435 | 298 | return false; |
duke@435 | 299 | } |
duke@435 | 300 | |
duke@435 | 301 | bool ParallelScavengeHeap::is_in_reserved(const void* p) const { |
duke@435 | 302 | if (young_gen()->is_in_reserved(p)) { |
duke@435 | 303 | return true; |
duke@435 | 304 | } |
duke@435 | 305 | |
duke@435 | 306 | if (old_gen()->is_in_reserved(p)) { |
duke@435 | 307 | return true; |
duke@435 | 308 | } |
duke@435 | 309 | |
duke@435 | 310 | if (perm_gen()->is_in_reserved(p)) { |
duke@435 | 311 | return true; |
duke@435 | 312 | } |
duke@435 | 313 | |
duke@435 | 314 | return false; |
duke@435 | 315 | } |
duke@435 | 316 | |
duke@435 | 317 | // Static method |
duke@435 | 318 | bool ParallelScavengeHeap::is_in_young(oop* p) { |
duke@435 | 319 | ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); |
duke@435 | 320 | assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, |
duke@435 | 321 | "Must be ParallelScavengeHeap"); |
duke@435 | 322 | |
duke@435 | 323 | PSYoungGen* young_gen = heap->young_gen(); |
duke@435 | 324 | |
duke@435 | 325 | if (young_gen->is_in_reserved(p)) { |
duke@435 | 326 | return true; |
duke@435 | 327 | } |
duke@435 | 328 | |
duke@435 | 329 | return false; |
duke@435 | 330 | } |
duke@435 | 331 | |
duke@435 | 332 | // Static method |
duke@435 | 333 | bool ParallelScavengeHeap::is_in_old_or_perm(oop* p) { |
duke@435 | 334 | ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); |
duke@435 | 335 | assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, |
duke@435 | 336 | "Must be ParallelScavengeHeap"); |
duke@435 | 337 | |
duke@435 | 338 | PSOldGen* old_gen = heap->old_gen(); |
duke@435 | 339 | PSPermGen* perm_gen = heap->perm_gen(); |
duke@435 | 340 | |
duke@435 | 341 | if (old_gen->is_in_reserved(p)) { |
duke@435 | 342 | return true; |
duke@435 | 343 | } |
duke@435 | 344 | |
duke@435 | 345 | if (perm_gen->is_in_reserved(p)) { |
duke@435 | 346 | return true; |
duke@435 | 347 | } |
duke@435 | 348 | |
duke@435 | 349 | return false; |
duke@435 | 350 | } |
duke@435 | 351 | |
duke@435 | 352 | // There are two levels of allocation policy here. |
duke@435 | 353 | // |
duke@435 | 354 | // When an allocation request fails, the requesting thread must invoke a VM |
duke@435 | 355 | // operation, transfer control to the VM thread, and await the results of a |
duke@435 | 356 | // garbage collection. That is quite expensive, and we should avoid doing it |
duke@435 | 357 | // multiple times if possible. |
duke@435 | 358 | // |
duke@435 | 359 | // To accomplish this, we have a basic allocation policy, and also a |
duke@435 | 360 | // failed allocation policy. |
duke@435 | 361 | // |
duke@435 | 362 | // The basic allocation policy controls how you allocate memory without |
duke@435 | 363 | // attempting garbage collection. It is okay to grab locks and |
duke@435 | 364 | // expand the heap, if that can be done without coming to a safepoint. |
duke@435 | 365 | // It is likely that the basic allocation policy will not be very |
duke@435 | 366 | // aggressive. |
duke@435 | 367 | // |
duke@435 | 368 | // The failed allocation policy is invoked from the VM thread after |
duke@435 | 369 | // the basic allocation policy is unable to satisfy a mem_allocate |
duke@435 | 370 | // request. This policy needs to cover the entire range of collection, |
duke@435 | 371 | // heap expansion, and out-of-memory conditions. It should make every |
duke@435 | 372 | // attempt to allocate the requested memory. |
duke@435 | 373 | |
duke@435 | 374 | // Basic allocation policy. Should never be called at a safepoint, or |
duke@435 | 375 | // from the VM thread. |
duke@435 | 376 | // |
duke@435 | 377 | // This method must handle cases where many mem_allocate requests fail |
duke@435 | 378 | // simultaneously. When that happens, only one VM operation will succeed, |
duke@435 | 379 | // and the rest will not be executed. For that reason, this method loops |
duke@435 | 380 | // during failed allocation attempts. If the java heap becomes exhausted, |
duke@435 | 381 | // we rely on the size_policy object to force a bail out. |
duke@435 | 382 | HeapWord* ParallelScavengeHeap::mem_allocate( |
duke@435 | 383 | size_t size, |
duke@435 | 384 | bool is_noref, |
duke@435 | 385 | bool is_tlab, |
duke@435 | 386 | bool* gc_overhead_limit_was_exceeded) { |
duke@435 | 387 | assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint"); |
duke@435 | 388 | assert(Thread::current() != (Thread*)VMThread::vm_thread(), "should not be in vm thread"); |
duke@435 | 389 | assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock"); |
duke@435 | 390 | |
duke@435 | 391 | HeapWord* result = young_gen()->allocate(size, is_tlab); |
duke@435 | 392 | |
duke@435 | 393 | uint loop_count = 0; |
duke@435 | 394 | uint gc_count = 0; |
duke@435 | 395 | |
duke@435 | 396 | while (result == NULL) { |
duke@435 | 397 | // We don't want to have multiple collections for a single filled generation. |
duke@435 | 398 | // To prevent this, each thread tracks the total_collections() value, and if |
duke@435 | 399 | // the count has changed, does not do a new collection. |
duke@435 | 400 | // |
duke@435 | 401 | // The collection count must be read only while holding the heap lock. VM |
duke@435 | 402 | // operations also hold the heap lock during collections. There is a lock |
duke@435 | 403 | // contention case where thread A blocks waiting on the Heap_lock, while |
duke@435 | 404 | // thread B is holding it doing a collection. When thread A gets the lock, |
duke@435 | 405 | // the collection count has already changed. To prevent duplicate collections, |
duke@435 | 406 | // The policy MUST attempt allocations during the same period it reads the |
duke@435 | 407 | // total_collections() value! |
duke@435 | 408 | { |
duke@435 | 409 | MutexLocker ml(Heap_lock); |
duke@435 | 410 | gc_count = Universe::heap()->total_collections(); |
duke@435 | 411 | |
duke@435 | 412 | result = young_gen()->allocate(size, is_tlab); |
duke@435 | 413 | |
duke@435 | 414 | // (1) If the requested object is too large to easily fit in the |
duke@435 | 415 | // young_gen, or |
duke@435 | 416 | // (2) If GC is locked out via GCLocker, young gen is full and |
duke@435 | 417 | // the need for a GC already signalled to GCLocker (done |
duke@435 | 418 | // at a safepoint), |
duke@435 | 419 | // ... then, rather than force a safepoint and (a potentially futile) |
duke@435 | 420 | // collection (attempt) for each allocation, try allocation directly |
duke@435 | 421 | // in old_gen. For case (2) above, we may in the future allow |
duke@435 | 422 | // TLAB allocation directly in the old gen. |
duke@435 | 423 | if (result != NULL) { |
duke@435 | 424 | return result; |
duke@435 | 425 | } |
duke@435 | 426 | if (!is_tlab && |
iveresov@808 | 427 | size >= (young_gen()->eden_space()->capacity_in_words(Thread::current()) / 2)) { |
duke@435 | 428 | result = old_gen()->allocate(size, is_tlab); |
duke@435 | 429 | if (result != NULL) { |
duke@435 | 430 | return result; |
duke@435 | 431 | } |
duke@435 | 432 | } |
duke@435 | 433 | if (GC_locker::is_active_and_needs_gc()) { |
duke@435 | 434 | // GC is locked out. If this is a TLAB allocation, |
duke@435 | 435 | // return NULL; the requestor will retry allocation |
duke@435 | 436 | // of an idividual object at a time. |
duke@435 | 437 | if (is_tlab) { |
duke@435 | 438 | return NULL; |
duke@435 | 439 | } |
duke@435 | 440 | |
duke@435 | 441 | // If this thread is not in a jni critical section, we stall |
duke@435 | 442 | // the requestor until the critical section has cleared and |
duke@435 | 443 | // GC allowed. When the critical section clears, a GC is |
duke@435 | 444 | // initiated by the last thread exiting the critical section; so |
duke@435 | 445 | // we retry the allocation sequence from the beginning of the loop, |
duke@435 | 446 | // rather than causing more, now probably unnecessary, GC attempts. |
duke@435 | 447 | JavaThread* jthr = JavaThread::current(); |
duke@435 | 448 | if (!jthr->in_critical()) { |
duke@435 | 449 | MutexUnlocker mul(Heap_lock); |
duke@435 | 450 | GC_locker::stall_until_clear(); |
duke@435 | 451 | continue; |
duke@435 | 452 | } else { |
duke@435 | 453 | if (CheckJNICalls) { |
duke@435 | 454 | fatal("Possible deadlock due to allocating while" |
duke@435 | 455 | " in jni critical section"); |
duke@435 | 456 | } |
duke@435 | 457 | return NULL; |
duke@435 | 458 | } |
duke@435 | 459 | } |
duke@435 | 460 | } |
duke@435 | 461 | |
duke@435 | 462 | if (result == NULL) { |
duke@435 | 463 | |
duke@435 | 464 | // Exit the loop if if the gc time limit has been exceeded. |
duke@435 | 465 | // The allocation must have failed above (result must be NULL), |
duke@435 | 466 | // and the most recent collection must have exceeded the |
duke@435 | 467 | // gc time limit. Exit the loop so that an out-of-memory |
duke@435 | 468 | // will be thrown (returning a NULL will do that), but |
duke@435 | 469 | // clear gc_time_limit_exceeded so that the next collection |
duke@435 | 470 | // will succeeded if the applications decides to handle the |
duke@435 | 471 | // out-of-memory and tries to go on. |
duke@435 | 472 | *gc_overhead_limit_was_exceeded = size_policy()->gc_time_limit_exceeded(); |
duke@435 | 473 | if (size_policy()->gc_time_limit_exceeded()) { |
duke@435 | 474 | size_policy()->set_gc_time_limit_exceeded(false); |
duke@435 | 475 | if (PrintGCDetails && Verbose) { |
duke@435 | 476 | gclog_or_tty->print_cr("ParallelScavengeHeap::mem_allocate: " |
duke@435 | 477 | "return NULL because gc_time_limit_exceeded is set"); |
duke@435 | 478 | } |
duke@435 | 479 | return NULL; |
duke@435 | 480 | } |
duke@435 | 481 | |
duke@435 | 482 | // Generate a VM operation |
duke@435 | 483 | VM_ParallelGCFailedAllocation op(size, is_tlab, gc_count); |
duke@435 | 484 | VMThread::execute(&op); |
duke@435 | 485 | |
duke@435 | 486 | // Did the VM operation execute? If so, return the result directly. |
duke@435 | 487 | // This prevents us from looping until time out on requests that can |
duke@435 | 488 | // not be satisfied. |
duke@435 | 489 | if (op.prologue_succeeded()) { |
duke@435 | 490 | assert(Universe::heap()->is_in_or_null(op.result()), |
duke@435 | 491 | "result not in heap"); |
duke@435 | 492 | |
duke@435 | 493 | // If GC was locked out during VM operation then retry allocation |
duke@435 | 494 | // and/or stall as necessary. |
duke@435 | 495 | if (op.gc_locked()) { |
duke@435 | 496 | assert(op.result() == NULL, "must be NULL if gc_locked() is true"); |
duke@435 | 497 | continue; // retry and/or stall as necessary |
duke@435 | 498 | } |
duke@435 | 499 | // If a NULL result is being returned, an out-of-memory |
duke@435 | 500 | // will be thrown now. Clear the gc_time_limit_exceeded |
duke@435 | 501 | // flag to avoid the following situation. |
duke@435 | 502 | // gc_time_limit_exceeded is set during a collection |
duke@435 | 503 | // the collection fails to return enough space and an OOM is thrown |
duke@435 | 504 | // the next GC is skipped because the gc_time_limit_exceeded |
duke@435 | 505 | // flag is set and another OOM is thrown |
duke@435 | 506 | if (op.result() == NULL) { |
duke@435 | 507 | size_policy()->set_gc_time_limit_exceeded(false); |
duke@435 | 508 | } |
duke@435 | 509 | return op.result(); |
duke@435 | 510 | } |
duke@435 | 511 | } |
duke@435 | 512 | |
duke@435 | 513 | // The policy object will prevent us from looping forever. If the |
duke@435 | 514 | // time spent in gc crosses a threshold, we will bail out. |
duke@435 | 515 | loop_count++; |
duke@435 | 516 | if ((result == NULL) && (QueuedAllocationWarningCount > 0) && |
duke@435 | 517 | (loop_count % QueuedAllocationWarningCount == 0)) { |
duke@435 | 518 | warning("ParallelScavengeHeap::mem_allocate retries %d times \n\t" |
duke@435 | 519 | " size=%d %s", loop_count, size, is_tlab ? "(TLAB)" : ""); |
duke@435 | 520 | } |
duke@435 | 521 | } |
duke@435 | 522 | |
duke@435 | 523 | return result; |
duke@435 | 524 | } |
duke@435 | 525 | |
duke@435 | 526 | // Failed allocation policy. Must be called from the VM thread, and |
duke@435 | 527 | // only at a safepoint! Note that this method has policy for allocation |
duke@435 | 528 | // flow, and NOT collection policy. So we do not check for gc collection |
duke@435 | 529 | // time over limit here, that is the responsibility of the heap specific |
duke@435 | 530 | // collection methods. This method decides where to attempt allocations, |
duke@435 | 531 | // and when to attempt collections, but no collection specific policy. |
duke@435 | 532 | HeapWord* ParallelScavengeHeap::failed_mem_allocate(size_t size, bool is_tlab) { |
duke@435 | 533 | assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); |
duke@435 | 534 | assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); |
duke@435 | 535 | assert(!Universe::heap()->is_gc_active(), "not reentrant"); |
duke@435 | 536 | assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock"); |
duke@435 | 537 | |
duke@435 | 538 | size_t mark_sweep_invocation_count = total_invocations(); |
duke@435 | 539 | |
duke@435 | 540 | // We assume (and assert!) that an allocation at this point will fail |
duke@435 | 541 | // unless we collect. |
duke@435 | 542 | |
duke@435 | 543 | // First level allocation failure, scavenge and allocate in young gen. |
duke@435 | 544 | GCCauseSetter gccs(this, GCCause::_allocation_failure); |
duke@435 | 545 | PSScavenge::invoke(); |
duke@435 | 546 | HeapWord* result = young_gen()->allocate(size, is_tlab); |
duke@435 | 547 | |
duke@435 | 548 | // Second level allocation failure. |
duke@435 | 549 | // Mark sweep and allocate in young generation. |
duke@435 | 550 | if (result == NULL) { |
duke@435 | 551 | // There is some chance the scavenge method decided to invoke mark_sweep. |
duke@435 | 552 | // Don't mark sweep twice if so. |
duke@435 | 553 | if (mark_sweep_invocation_count == total_invocations()) { |
duke@435 | 554 | invoke_full_gc(false); |
duke@435 | 555 | result = young_gen()->allocate(size, is_tlab); |
duke@435 | 556 | } |
duke@435 | 557 | } |
duke@435 | 558 | |
duke@435 | 559 | // Third level allocation failure. |
duke@435 | 560 | // After mark sweep and young generation allocation failure, |
duke@435 | 561 | // allocate in old generation. |
duke@435 | 562 | if (result == NULL && !is_tlab) { |
duke@435 | 563 | result = old_gen()->allocate(size, is_tlab); |
duke@435 | 564 | } |
duke@435 | 565 | |
duke@435 | 566 | // Fourth level allocation failure. We're running out of memory. |
duke@435 | 567 | // More complete mark sweep and allocate in young generation. |
duke@435 | 568 | if (result == NULL) { |
duke@435 | 569 | invoke_full_gc(true); |
duke@435 | 570 | result = young_gen()->allocate(size, is_tlab); |
duke@435 | 571 | } |
duke@435 | 572 | |
duke@435 | 573 | // Fifth level allocation failure. |
duke@435 | 574 | // After more complete mark sweep, allocate in old generation. |
duke@435 | 575 | if (result == NULL && !is_tlab) { |
duke@435 | 576 | result = old_gen()->allocate(size, is_tlab); |
duke@435 | 577 | } |
duke@435 | 578 | |
duke@435 | 579 | return result; |
duke@435 | 580 | } |
duke@435 | 581 | |
duke@435 | 582 | // |
duke@435 | 583 | // This is the policy loop for allocating in the permanent generation. |
duke@435 | 584 | // If the initial allocation fails, we create a vm operation which will |
duke@435 | 585 | // cause a collection. |
duke@435 | 586 | HeapWord* ParallelScavengeHeap::permanent_mem_allocate(size_t size) { |
duke@435 | 587 | assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint"); |
duke@435 | 588 | assert(Thread::current() != (Thread*)VMThread::vm_thread(), "should not be in vm thread"); |
duke@435 | 589 | assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock"); |
duke@435 | 590 | |
duke@435 | 591 | HeapWord* result; |
duke@435 | 592 | |
duke@435 | 593 | uint loop_count = 0; |
duke@435 | 594 | uint gc_count = 0; |
duke@435 | 595 | uint full_gc_count = 0; |
duke@435 | 596 | |
duke@435 | 597 | do { |
duke@435 | 598 | // We don't want to have multiple collections for a single filled generation. |
duke@435 | 599 | // To prevent this, each thread tracks the total_collections() value, and if |
duke@435 | 600 | // the count has changed, does not do a new collection. |
duke@435 | 601 | // |
duke@435 | 602 | // The collection count must be read only while holding the heap lock. VM |
duke@435 | 603 | // operations also hold the heap lock during collections. There is a lock |
duke@435 | 604 | // contention case where thread A blocks waiting on the Heap_lock, while |
duke@435 | 605 | // thread B is holding it doing a collection. When thread A gets the lock, |
duke@435 | 606 | // the collection count has already changed. To prevent duplicate collections, |
duke@435 | 607 | // The policy MUST attempt allocations during the same period it reads the |
duke@435 | 608 | // total_collections() value! |
duke@435 | 609 | { |
duke@435 | 610 | MutexLocker ml(Heap_lock); |
duke@435 | 611 | gc_count = Universe::heap()->total_collections(); |
duke@435 | 612 | full_gc_count = Universe::heap()->total_full_collections(); |
duke@435 | 613 | |
duke@435 | 614 | result = perm_gen()->allocate_permanent(size); |
apetrusenko@574 | 615 | |
apetrusenko@574 | 616 | if (result != NULL) { |
apetrusenko@574 | 617 | return result; |
apetrusenko@574 | 618 | } |
apetrusenko@574 | 619 | |
apetrusenko@574 | 620 | if (GC_locker::is_active_and_needs_gc()) { |
apetrusenko@574 | 621 | // If this thread is not in a jni critical section, we stall |
apetrusenko@574 | 622 | // the requestor until the critical section has cleared and |
apetrusenko@574 | 623 | // GC allowed. When the critical section clears, a GC is |
apetrusenko@574 | 624 | // initiated by the last thread exiting the critical section; so |
apetrusenko@574 | 625 | // we retry the allocation sequence from the beginning of the loop, |
apetrusenko@574 | 626 | // rather than causing more, now probably unnecessary, GC attempts. |
apetrusenko@574 | 627 | JavaThread* jthr = JavaThread::current(); |
apetrusenko@574 | 628 | if (!jthr->in_critical()) { |
apetrusenko@574 | 629 | MutexUnlocker mul(Heap_lock); |
apetrusenko@574 | 630 | GC_locker::stall_until_clear(); |
apetrusenko@574 | 631 | continue; |
apetrusenko@574 | 632 | } else { |
apetrusenko@574 | 633 | if (CheckJNICalls) { |
apetrusenko@574 | 634 | fatal("Possible deadlock due to allocating while" |
apetrusenko@574 | 635 | " in jni critical section"); |
apetrusenko@574 | 636 | } |
apetrusenko@574 | 637 | return NULL; |
apetrusenko@574 | 638 | } |
apetrusenko@574 | 639 | } |
duke@435 | 640 | } |
duke@435 | 641 | |
duke@435 | 642 | if (result == NULL) { |
duke@435 | 643 | |
duke@435 | 644 | // Exit the loop if the gc time limit has been exceeded. |
duke@435 | 645 | // The allocation must have failed above (result must be NULL), |
duke@435 | 646 | // and the most recent collection must have exceeded the |
duke@435 | 647 | // gc time limit. Exit the loop so that an out-of-memory |
duke@435 | 648 | // will be thrown (returning a NULL will do that), but |
duke@435 | 649 | // clear gc_time_limit_exceeded so that the next collection |
duke@435 | 650 | // will succeeded if the applications decides to handle the |
duke@435 | 651 | // out-of-memory and tries to go on. |
duke@435 | 652 | if (size_policy()->gc_time_limit_exceeded()) { |
duke@435 | 653 | size_policy()->set_gc_time_limit_exceeded(false); |
duke@435 | 654 | if (PrintGCDetails && Verbose) { |
duke@435 | 655 | gclog_or_tty->print_cr("ParallelScavengeHeap::permanent_mem_allocate: " |
duke@435 | 656 | "return NULL because gc_time_limit_exceeded is set"); |
duke@435 | 657 | } |
duke@435 | 658 | assert(result == NULL, "Allocation did not fail"); |
duke@435 | 659 | return NULL; |
duke@435 | 660 | } |
duke@435 | 661 | |
duke@435 | 662 | // Generate a VM operation |
duke@435 | 663 | VM_ParallelGCFailedPermanentAllocation op(size, gc_count, full_gc_count); |
duke@435 | 664 | VMThread::execute(&op); |
duke@435 | 665 | |
duke@435 | 666 | // Did the VM operation execute? If so, return the result directly. |
duke@435 | 667 | // This prevents us from looping until time out on requests that can |
duke@435 | 668 | // not be satisfied. |
duke@435 | 669 | if (op.prologue_succeeded()) { |
duke@435 | 670 | assert(Universe::heap()->is_in_permanent_or_null(op.result()), |
duke@435 | 671 | "result not in heap"); |
apetrusenko@574 | 672 | // If GC was locked out during VM operation then retry allocation |
apetrusenko@574 | 673 | // and/or stall as necessary. |
apetrusenko@574 | 674 | if (op.gc_locked()) { |
apetrusenko@574 | 675 | assert(op.result() == NULL, "must be NULL if gc_locked() is true"); |
apetrusenko@574 | 676 | continue; // retry and/or stall as necessary |
apetrusenko@574 | 677 | } |
duke@435 | 678 | // If a NULL results is being returned, an out-of-memory |
duke@435 | 679 | // will be thrown now. Clear the gc_time_limit_exceeded |
duke@435 | 680 | // flag to avoid the following situation. |
duke@435 | 681 | // gc_time_limit_exceeded is set during a collection |
duke@435 | 682 | // the collection fails to return enough space and an OOM is thrown |
duke@435 | 683 | // the next GC is skipped because the gc_time_limit_exceeded |
duke@435 | 684 | // flag is set and another OOM is thrown |
duke@435 | 685 | if (op.result() == NULL) { |
duke@435 | 686 | size_policy()->set_gc_time_limit_exceeded(false); |
duke@435 | 687 | } |
duke@435 | 688 | return op.result(); |
duke@435 | 689 | } |
duke@435 | 690 | } |
duke@435 | 691 | |
duke@435 | 692 | // The policy object will prevent us from looping forever. If the |
duke@435 | 693 | // time spent in gc crosses a threshold, we will bail out. |
duke@435 | 694 | loop_count++; |
duke@435 | 695 | if ((QueuedAllocationWarningCount > 0) && |
duke@435 | 696 | (loop_count % QueuedAllocationWarningCount == 0)) { |
duke@435 | 697 | warning("ParallelScavengeHeap::permanent_mem_allocate retries %d times \n\t" |
duke@435 | 698 | " size=%d", loop_count, size); |
duke@435 | 699 | } |
duke@435 | 700 | } while (result == NULL); |
duke@435 | 701 | |
duke@435 | 702 | return result; |
duke@435 | 703 | } |
duke@435 | 704 | |
duke@435 | 705 | // |
duke@435 | 706 | // This is the policy code for permanent allocations which have failed |
duke@435 | 707 | // and require a collection. Note that just as in failed_mem_allocate, |
duke@435 | 708 | // we do not set collection policy, only where & when to allocate and |
duke@435 | 709 | // collect. |
duke@435 | 710 | HeapWord* ParallelScavengeHeap::failed_permanent_mem_allocate(size_t size) { |
duke@435 | 711 | assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); |
duke@435 | 712 | assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); |
duke@435 | 713 | assert(!Universe::heap()->is_gc_active(), "not reentrant"); |
duke@435 | 714 | assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock"); |
duke@435 | 715 | assert(size > perm_gen()->free_in_words(), "Allocation should fail"); |
duke@435 | 716 | |
duke@435 | 717 | // We assume (and assert!) that an allocation at this point will fail |
duke@435 | 718 | // unless we collect. |
duke@435 | 719 | |
duke@435 | 720 | // First level allocation failure. Mark-sweep and allocate in perm gen. |
duke@435 | 721 | GCCauseSetter gccs(this, GCCause::_allocation_failure); |
duke@435 | 722 | invoke_full_gc(false); |
duke@435 | 723 | HeapWord* result = perm_gen()->allocate_permanent(size); |
duke@435 | 724 | |
duke@435 | 725 | // Second level allocation failure. We're running out of memory. |
duke@435 | 726 | if (result == NULL) { |
duke@435 | 727 | invoke_full_gc(true); |
duke@435 | 728 | result = perm_gen()->allocate_permanent(size); |
duke@435 | 729 | } |
duke@435 | 730 | |
duke@435 | 731 | return result; |
duke@435 | 732 | } |
duke@435 | 733 | |
duke@435 | 734 | void ParallelScavengeHeap::ensure_parsability(bool retire_tlabs) { |
duke@435 | 735 | CollectedHeap::ensure_parsability(retire_tlabs); |
duke@435 | 736 | young_gen()->eden_space()->ensure_parsability(); |
duke@435 | 737 | } |
duke@435 | 738 | |
duke@435 | 739 | size_t ParallelScavengeHeap::unsafe_max_alloc() { |
duke@435 | 740 | return young_gen()->eden_space()->free_in_bytes(); |
duke@435 | 741 | } |
duke@435 | 742 | |
duke@435 | 743 | size_t ParallelScavengeHeap::tlab_capacity(Thread* thr) const { |
duke@435 | 744 | return young_gen()->eden_space()->tlab_capacity(thr); |
duke@435 | 745 | } |
duke@435 | 746 | |
duke@435 | 747 | size_t ParallelScavengeHeap::unsafe_max_tlab_alloc(Thread* thr) const { |
duke@435 | 748 | return young_gen()->eden_space()->unsafe_max_tlab_alloc(thr); |
duke@435 | 749 | } |
duke@435 | 750 | |
duke@435 | 751 | HeapWord* ParallelScavengeHeap::allocate_new_tlab(size_t size) { |
duke@435 | 752 | return young_gen()->allocate(size, true); |
duke@435 | 753 | } |
duke@435 | 754 | |
duke@435 | 755 | void ParallelScavengeHeap::fill_all_tlabs(bool retire) { |
duke@435 | 756 | CollectedHeap::fill_all_tlabs(retire); |
duke@435 | 757 | } |
duke@435 | 758 | |
duke@435 | 759 | void ParallelScavengeHeap::accumulate_statistics_all_tlabs() { |
duke@435 | 760 | CollectedHeap::accumulate_statistics_all_tlabs(); |
duke@435 | 761 | } |
duke@435 | 762 | |
duke@435 | 763 | void ParallelScavengeHeap::resize_all_tlabs() { |
duke@435 | 764 | CollectedHeap::resize_all_tlabs(); |
duke@435 | 765 | } |
duke@435 | 766 | |
duke@435 | 767 | // This method is used by System.gc() and JVMTI. |
duke@435 | 768 | void ParallelScavengeHeap::collect(GCCause::Cause cause) { |
duke@435 | 769 | assert(!Heap_lock->owned_by_self(), |
duke@435 | 770 | "this thread should not own the Heap_lock"); |
duke@435 | 771 | |
duke@435 | 772 | unsigned int gc_count = 0; |
duke@435 | 773 | unsigned int full_gc_count = 0; |
duke@435 | 774 | { |
duke@435 | 775 | MutexLocker ml(Heap_lock); |
duke@435 | 776 | // This value is guarded by the Heap_lock |
duke@435 | 777 | gc_count = Universe::heap()->total_collections(); |
duke@435 | 778 | full_gc_count = Universe::heap()->total_full_collections(); |
duke@435 | 779 | } |
duke@435 | 780 | |
duke@435 | 781 | VM_ParallelGCSystemGC op(gc_count, full_gc_count, cause); |
duke@435 | 782 | VMThread::execute(&op); |
duke@435 | 783 | } |
duke@435 | 784 | |
duke@435 | 785 | // This interface assumes that it's being called by the |
duke@435 | 786 | // vm thread. It collects the heap assuming that the |
duke@435 | 787 | // heap lock is already held and that we are executing in |
duke@435 | 788 | // the context of the vm thread. |
duke@435 | 789 | void ParallelScavengeHeap::collect_as_vm_thread(GCCause::Cause cause) { |
duke@435 | 790 | assert(Thread::current()->is_VM_thread(), "Precondition#1"); |
duke@435 | 791 | assert(Heap_lock->is_locked(), "Precondition#2"); |
duke@435 | 792 | GCCauseSetter gcs(this, cause); |
duke@435 | 793 | switch (cause) { |
duke@435 | 794 | case GCCause::_heap_inspection: |
duke@435 | 795 | case GCCause::_heap_dump: { |
duke@435 | 796 | HandleMark hm; |
duke@435 | 797 | invoke_full_gc(false); |
duke@435 | 798 | break; |
duke@435 | 799 | } |
duke@435 | 800 | default: // XXX FIX ME |
duke@435 | 801 | ShouldNotReachHere(); |
duke@435 | 802 | } |
duke@435 | 803 | } |
duke@435 | 804 | |
duke@435 | 805 | |
duke@435 | 806 | void ParallelScavengeHeap::oop_iterate(OopClosure* cl) { |
duke@435 | 807 | Unimplemented(); |
duke@435 | 808 | } |
duke@435 | 809 | |
duke@435 | 810 | void ParallelScavengeHeap::object_iterate(ObjectClosure* cl) { |
duke@435 | 811 | young_gen()->object_iterate(cl); |
duke@435 | 812 | old_gen()->object_iterate(cl); |
duke@435 | 813 | perm_gen()->object_iterate(cl); |
duke@435 | 814 | } |
duke@435 | 815 | |
duke@435 | 816 | void ParallelScavengeHeap::permanent_oop_iterate(OopClosure* cl) { |
duke@435 | 817 | Unimplemented(); |
duke@435 | 818 | } |
duke@435 | 819 | |
duke@435 | 820 | void ParallelScavengeHeap::permanent_object_iterate(ObjectClosure* cl) { |
duke@435 | 821 | perm_gen()->object_iterate(cl); |
duke@435 | 822 | } |
duke@435 | 823 | |
duke@435 | 824 | HeapWord* ParallelScavengeHeap::block_start(const void* addr) const { |
duke@435 | 825 | if (young_gen()->is_in_reserved(addr)) { |
duke@435 | 826 | assert(young_gen()->is_in(addr), |
duke@435 | 827 | "addr should be in allocated part of young gen"); |
jrose@1100 | 828 | if (Debugging) return NULL; // called from find() in debug.cpp |
duke@435 | 829 | Unimplemented(); |
duke@435 | 830 | } else if (old_gen()->is_in_reserved(addr)) { |
duke@435 | 831 | assert(old_gen()->is_in(addr), |
duke@435 | 832 | "addr should be in allocated part of old gen"); |
duke@435 | 833 | return old_gen()->start_array()->object_start((HeapWord*)addr); |
duke@435 | 834 | } else if (perm_gen()->is_in_reserved(addr)) { |
duke@435 | 835 | assert(perm_gen()->is_in(addr), |
duke@435 | 836 | "addr should be in allocated part of perm gen"); |
duke@435 | 837 | return perm_gen()->start_array()->object_start((HeapWord*)addr); |
duke@435 | 838 | } |
duke@435 | 839 | return 0; |
duke@435 | 840 | } |
duke@435 | 841 | |
duke@435 | 842 | size_t ParallelScavengeHeap::block_size(const HeapWord* addr) const { |
duke@435 | 843 | return oop(addr)->size(); |
duke@435 | 844 | } |
duke@435 | 845 | |
duke@435 | 846 | bool ParallelScavengeHeap::block_is_obj(const HeapWord* addr) const { |
duke@435 | 847 | return block_start(addr) == addr; |
duke@435 | 848 | } |
duke@435 | 849 | |
duke@435 | 850 | jlong ParallelScavengeHeap::millis_since_last_gc() { |
duke@435 | 851 | return UseParallelOldGC ? |
duke@435 | 852 | PSParallelCompact::millis_since_last_gc() : |
duke@435 | 853 | PSMarkSweep::millis_since_last_gc(); |
duke@435 | 854 | } |
duke@435 | 855 | |
duke@435 | 856 | void ParallelScavengeHeap::prepare_for_verify() { |
duke@435 | 857 | ensure_parsability(false); // no need to retire TLABs for verification |
duke@435 | 858 | } |
duke@435 | 859 | |
duke@435 | 860 | void ParallelScavengeHeap::print() const { print_on(tty); } |
duke@435 | 861 | |
duke@435 | 862 | void ParallelScavengeHeap::print_on(outputStream* st) const { |
duke@435 | 863 | young_gen()->print_on(st); |
duke@435 | 864 | old_gen()->print_on(st); |
duke@435 | 865 | perm_gen()->print_on(st); |
duke@435 | 866 | } |
duke@435 | 867 | |
duke@435 | 868 | void ParallelScavengeHeap::gc_threads_do(ThreadClosure* tc) const { |
duke@435 | 869 | PSScavenge::gc_task_manager()->threads_do(tc); |
duke@435 | 870 | } |
duke@435 | 871 | |
duke@435 | 872 | void ParallelScavengeHeap::print_gc_threads_on(outputStream* st) const { |
duke@435 | 873 | PSScavenge::gc_task_manager()->print_threads_on(st); |
duke@435 | 874 | } |
duke@435 | 875 | |
duke@435 | 876 | void ParallelScavengeHeap::print_tracing_info() const { |
duke@435 | 877 | if (TraceGen0Time) { |
duke@435 | 878 | double time = PSScavenge::accumulated_time()->seconds(); |
duke@435 | 879 | tty->print_cr("[Accumulated GC generation 0 time %3.7f secs]", time); |
duke@435 | 880 | } |
duke@435 | 881 | if (TraceGen1Time) { |
duke@435 | 882 | double time = PSMarkSweep::accumulated_time()->seconds(); |
duke@435 | 883 | tty->print_cr("[Accumulated GC generation 1 time %3.7f secs]", time); |
duke@435 | 884 | } |
duke@435 | 885 | } |
duke@435 | 886 | |
duke@435 | 887 | |
duke@435 | 888 | void ParallelScavengeHeap::verify(bool allow_dirty, bool silent) { |
duke@435 | 889 | // Why do we need the total_collections()-filter below? |
duke@435 | 890 | if (total_collections() > 0) { |
duke@435 | 891 | if (!silent) { |
duke@435 | 892 | gclog_or_tty->print("permanent "); |
duke@435 | 893 | } |
duke@435 | 894 | perm_gen()->verify(allow_dirty); |
duke@435 | 895 | |
duke@435 | 896 | if (!silent) { |
duke@435 | 897 | gclog_or_tty->print("tenured "); |
duke@435 | 898 | } |
duke@435 | 899 | old_gen()->verify(allow_dirty); |
duke@435 | 900 | |
duke@435 | 901 | if (!silent) { |
duke@435 | 902 | gclog_or_tty->print("eden "); |
duke@435 | 903 | } |
duke@435 | 904 | young_gen()->verify(allow_dirty); |
duke@435 | 905 | } |
duke@435 | 906 | if (!silent) { |
duke@435 | 907 | gclog_or_tty->print("ref_proc "); |
duke@435 | 908 | } |
duke@435 | 909 | ReferenceProcessor::verify(); |
duke@435 | 910 | } |
duke@435 | 911 | |
duke@435 | 912 | void ParallelScavengeHeap::print_heap_change(size_t prev_used) { |
duke@435 | 913 | if (PrintGCDetails && Verbose) { |
duke@435 | 914 | gclog_or_tty->print(" " SIZE_FORMAT |
duke@435 | 915 | "->" SIZE_FORMAT |
duke@435 | 916 | "(" SIZE_FORMAT ")", |
duke@435 | 917 | prev_used, used(), capacity()); |
duke@435 | 918 | } else { |
duke@435 | 919 | gclog_or_tty->print(" " SIZE_FORMAT "K" |
duke@435 | 920 | "->" SIZE_FORMAT "K" |
duke@435 | 921 | "(" SIZE_FORMAT "K)", |
duke@435 | 922 | prev_used / K, used() / K, capacity() / K); |
duke@435 | 923 | } |
duke@435 | 924 | } |
duke@435 | 925 | |
duke@435 | 926 | ParallelScavengeHeap* ParallelScavengeHeap::heap() { |
duke@435 | 927 | assert(_psh != NULL, "Uninitialized access to ParallelScavengeHeap::heap()"); |
duke@435 | 928 | assert(_psh->kind() == CollectedHeap::ParallelScavengeHeap, "not a parallel scavenge heap"); |
duke@435 | 929 | return _psh; |
duke@435 | 930 | } |
duke@435 | 931 | |
duke@435 | 932 | // Before delegating the resize to the young generation, |
duke@435 | 933 | // the reserved space for the young and old generations |
duke@435 | 934 | // may be changed to accomodate the desired resize. |
duke@435 | 935 | void ParallelScavengeHeap::resize_young_gen(size_t eden_size, |
duke@435 | 936 | size_t survivor_size) { |
duke@435 | 937 | if (UseAdaptiveGCBoundary) { |
duke@435 | 938 | if (size_policy()->bytes_absorbed_from_eden() != 0) { |
duke@435 | 939 | size_policy()->reset_bytes_absorbed_from_eden(); |
duke@435 | 940 | return; // The generation changed size already. |
duke@435 | 941 | } |
duke@435 | 942 | gens()->adjust_boundary_for_young_gen_needs(eden_size, survivor_size); |
duke@435 | 943 | } |
duke@435 | 944 | |
duke@435 | 945 | // Delegate the resize to the generation. |
duke@435 | 946 | _young_gen->resize(eden_size, survivor_size); |
duke@435 | 947 | } |
duke@435 | 948 | |
duke@435 | 949 | // Before delegating the resize to the old generation, |
duke@435 | 950 | // the reserved space for the young and old generations |
duke@435 | 951 | // may be changed to accomodate the desired resize. |
duke@435 | 952 | void ParallelScavengeHeap::resize_old_gen(size_t desired_free_space) { |
duke@435 | 953 | if (UseAdaptiveGCBoundary) { |
duke@435 | 954 | if (size_policy()->bytes_absorbed_from_eden() != 0) { |
duke@435 | 955 | size_policy()->reset_bytes_absorbed_from_eden(); |
duke@435 | 956 | return; // The generation changed size already. |
duke@435 | 957 | } |
duke@435 | 958 | gens()->adjust_boundary_for_old_gen_needs(desired_free_space); |
duke@435 | 959 | } |
duke@435 | 960 | |
duke@435 | 961 | // Delegate the resize to the generation. |
duke@435 | 962 | _old_gen->resize(desired_free_space); |
duke@435 | 963 | } |
jmasa@698 | 964 | |
jmasa@698 | 965 | #ifndef PRODUCT |
jmasa@698 | 966 | void ParallelScavengeHeap::record_gen_tops_before_GC() { |
jmasa@698 | 967 | if (ZapUnusedHeapArea) { |
jmasa@698 | 968 | young_gen()->record_spaces_top(); |
jmasa@698 | 969 | old_gen()->record_spaces_top(); |
jmasa@698 | 970 | perm_gen()->record_spaces_top(); |
jmasa@698 | 971 | } |
jmasa@698 | 972 | } |
jmasa@698 | 973 | |
jmasa@698 | 974 | void ParallelScavengeHeap::gen_mangle_unused_area() { |
jmasa@698 | 975 | if (ZapUnusedHeapArea) { |
jmasa@698 | 976 | young_gen()->eden_space()->mangle_unused_area(); |
jmasa@698 | 977 | young_gen()->to_space()->mangle_unused_area(); |
jmasa@698 | 978 | young_gen()->from_space()->mangle_unused_area(); |
jmasa@698 | 979 | old_gen()->object_space()->mangle_unused_area(); |
jmasa@698 | 980 | perm_gen()->object_space()->mangle_unused_area(); |
jmasa@698 | 981 | } |
jmasa@698 | 982 | } |
jmasa@698 | 983 | #endif |