Thu, 21 Aug 2014 13:57:51 -0700
8046070: Class Data Sharing clean up and refactoring
Summary: Cleaned up CDS to be more configurable, maintainable and extensible
Reviewed-by: dholmes, coleenp, acorn, mchung
duke@435 | 1 | /* |
drchase@6680 | 2 | * Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved. |
duke@435 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
duke@435 | 4 | * |
duke@435 | 5 | * This code is free software; you can redistribute it and/or modify it |
duke@435 | 6 | * under the terms of the GNU General Public License version 2 only, as |
duke@435 | 7 | * published by the Free Software Foundation. |
duke@435 | 8 | * |
duke@435 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
duke@435 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
duke@435 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
duke@435 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
duke@435 | 13 | * accompanied this code). |
duke@435 | 14 | * |
duke@435 | 15 | * You should have received a copy of the GNU General Public License version |
duke@435 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
duke@435 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
duke@435 | 18 | * |
trims@1907 | 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
trims@1907 | 20 | * or visit www.oracle.com if you need additional information or have any |
trims@1907 | 21 | * questions. |
duke@435 | 22 | * |
duke@435 | 23 | */ |
duke@435 | 24 | |
stefank@2314 | 25 | #include "precompiled.hpp" |
stefank@2314 | 26 | #include "classfile/symbolTable.hpp" |
stefank@2314 | 27 | #include "classfile/systemDictionary.hpp" |
stefank@2314 | 28 | #include "classfile/vmSymbols.hpp" |
stefank@2314 | 29 | #include "code/icBuffer.hpp" |
stefank@2314 | 30 | #include "gc_implementation/shared/collectorCounters.hpp" |
brutisso@6904 | 31 | #include "gc_implementation/shared/gcTrace.hpp" |
sla@5237 | 32 | #include "gc_implementation/shared/gcTraceTime.hpp" |
stefank@2314 | 33 | #include "gc_implementation/shared/vmGCOperations.hpp" |
stefank@2314 | 34 | #include "gc_interface/collectedHeap.inline.hpp" |
stefank@2314 | 35 | #include "memory/filemap.hpp" |
stefank@2314 | 36 | #include "memory/gcLocker.inline.hpp" |
stefank@2314 | 37 | #include "memory/genCollectedHeap.hpp" |
stefank@2314 | 38 | #include "memory/genOopClosures.inline.hpp" |
stefank@2314 | 39 | #include "memory/generation.inline.hpp" |
stefank@2314 | 40 | #include "memory/generationSpec.hpp" |
stefank@2314 | 41 | #include "memory/resourceArea.hpp" |
stefank@2314 | 42 | #include "memory/sharedHeap.hpp" |
stefank@2314 | 43 | #include "memory/space.hpp" |
stefank@2314 | 44 | #include "oops/oop.inline.hpp" |
stefank@2314 | 45 | #include "oops/oop.inline2.hpp" |
stefank@2314 | 46 | #include "runtime/biasedLocking.hpp" |
stefank@2314 | 47 | #include "runtime/fprofiler.hpp" |
stefank@2314 | 48 | #include "runtime/handles.hpp" |
stefank@2314 | 49 | #include "runtime/handles.inline.hpp" |
stefank@2314 | 50 | #include "runtime/java.hpp" |
stefank@2314 | 51 | #include "runtime/vmThread.hpp" |
stefank@2314 | 52 | #include "services/memoryService.hpp" |
stefank@2314 | 53 | #include "utilities/vmError.hpp" |
stefank@2314 | 54 | #include "utilities/workgroup.hpp" |
jprovino@4542 | 55 | #include "utilities/macros.hpp" |
jprovino@4542 | 56 | #if INCLUDE_ALL_GCS |
stefank@2314 | 57 | #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp" |
stefank@2314 | 58 | #include "gc_implementation/concurrentMarkSweep/vmCMSOperations.hpp" |
jprovino@4542 | 59 | #endif // INCLUDE_ALL_GCS |
duke@435 | 60 | |
duke@435 | 61 | GenCollectedHeap* GenCollectedHeap::_gch; |
duke@435 | 62 | NOT_PRODUCT(size_t GenCollectedHeap::_skip_header_HeapWords = 0;) |
duke@435 | 63 | |
stefank@6992 | 64 | // The set of potentially parallel tasks in root scanning. |
stefank@6992 | 65 | enum GCH_strong_roots_tasks { |
duke@435 | 66 | // We probably want to parallelize both of these internally, but for now... |
duke@435 | 67 | GCH_PS_younger_gens, |
duke@435 | 68 | // Leave this one last. |
duke@435 | 69 | GCH_PS_NumElements |
duke@435 | 70 | }; |
duke@435 | 71 | |
duke@435 | 72 | GenCollectedHeap::GenCollectedHeap(GenCollectorPolicy *policy) : |
duke@435 | 73 | SharedHeap(policy), |
duke@435 | 74 | _gen_policy(policy), |
stefank@6992 | 75 | _gen_process_roots_tasks(new SubTasksDone(GCH_PS_NumElements)), |
duke@435 | 76 | _full_collections_completed(0) |
duke@435 | 77 | { |
stefank@6992 | 78 | if (_gen_process_roots_tasks == NULL || |
stefank@6992 | 79 | !_gen_process_roots_tasks->valid()) { |
duke@435 | 80 | vm_exit_during_initialization("Failed necessary allocation."); |
duke@435 | 81 | } |
duke@435 | 82 | assert(policy != NULL, "Sanity check"); |
duke@435 | 83 | } |
duke@435 | 84 | |
duke@435 | 85 | jint GenCollectedHeap::initialize() { |
ysr@1601 | 86 | CollectedHeap::pre_initialize(); |
ysr@1601 | 87 | |
duke@435 | 88 | int i; |
duke@435 | 89 | _n_gens = gen_policy()->number_of_generations(); |
duke@435 | 90 | |
duke@435 | 91 | // While there are no constraints in the GC code that HeapWordSize |
duke@435 | 92 | // be any particular value, there are multiple other areas in the |
duke@435 | 93 | // system which believe this to be true (e.g. oop->object_size in some |
duke@435 | 94 | // cases incorrectly returns the size in wordSize units rather than |
duke@435 | 95 | // HeapWordSize). |
duke@435 | 96 | guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize"); |
duke@435 | 97 | |
duke@435 | 98 | // The heap must be at least as aligned as generations. |
stefank@5578 | 99 | size_t gen_alignment = Generation::GenGrain; |
duke@435 | 100 | |
duke@435 | 101 | _gen_specs = gen_policy()->generations(); |
duke@435 | 102 | |
duke@435 | 103 | // Make sure the sizes are all aligned. |
duke@435 | 104 | for (i = 0; i < _n_gens; i++) { |
stefank@5578 | 105 | _gen_specs[i]->align(gen_alignment); |
duke@435 | 106 | } |
duke@435 | 107 | |
duke@435 | 108 | // Allocate space for the heap. |
duke@435 | 109 | |
duke@435 | 110 | char* heap_address; |
duke@435 | 111 | size_t total_reserved = 0; |
duke@435 | 112 | int n_covered_regions = 0; |
stefank@5578 | 113 | ReservedSpace heap_rs; |
duke@435 | 114 | |
jwilhelm@6085 | 115 | size_t heap_alignment = collector_policy()->heap_alignment(); |
stefank@5578 | 116 | |
stefank@5578 | 117 | heap_address = allocate(heap_alignment, &total_reserved, |
duke@435 | 118 | &n_covered_regions, &heap_rs); |
duke@435 | 119 | |
duke@435 | 120 | if (!heap_rs.is_reserved()) { |
duke@435 | 121 | vm_shutdown_during_initialization( |
duke@435 | 122 | "Could not reserve enough space for object heap"); |
duke@435 | 123 | return JNI_ENOMEM; |
duke@435 | 124 | } |
duke@435 | 125 | |
duke@435 | 126 | _reserved = MemRegion((HeapWord*)heap_rs.base(), |
duke@435 | 127 | (HeapWord*)(heap_rs.base() + heap_rs.size())); |
duke@435 | 128 | |
duke@435 | 129 | // It is important to do this in a way such that concurrent readers can't |
duke@435 | 130 | // temporarily think somethings in the heap. (Seen this happen in asserts.) |
duke@435 | 131 | _reserved.set_word_size(0); |
duke@435 | 132 | _reserved.set_start((HeapWord*)heap_rs.base()); |
coleenp@4037 | 133 | size_t actual_heap_size = heap_rs.size(); |
duke@435 | 134 | _reserved.set_end((HeapWord*)(heap_rs.base() + actual_heap_size)); |
duke@435 | 135 | |
duke@435 | 136 | _rem_set = collector_policy()->create_rem_set(_reserved, n_covered_regions); |
duke@435 | 137 | set_barrier_set(rem_set()->bs()); |
ysr@1601 | 138 | |
duke@435 | 139 | _gch = this; |
duke@435 | 140 | |
duke@435 | 141 | for (i = 0; i < _n_gens; i++) { |
coleenp@4037 | 142 | ReservedSpace this_rs = heap_rs.first_part(_gen_specs[i]->max_size(), false, false); |
duke@435 | 143 | _gens[i] = _gen_specs[i]->init(this_rs, i, rem_set()); |
duke@435 | 144 | heap_rs = heap_rs.last_part(_gen_specs[i]->max_size()); |
duke@435 | 145 | } |
ysr@2243 | 146 | clear_incremental_collection_failed(); |
duke@435 | 147 | |
jprovino@4542 | 148 | #if INCLUDE_ALL_GCS |
duke@435 | 149 | // If we are running CMS, create the collector responsible |
duke@435 | 150 | // for collecting the CMS generations. |
duke@435 | 151 | if (collector_policy()->is_concurrent_mark_sweep_policy()) { |
duke@435 | 152 | bool success = create_cms_collector(); |
duke@435 | 153 | if (!success) return JNI_ENOMEM; |
duke@435 | 154 | } |
jprovino@4542 | 155 | #endif // INCLUDE_ALL_GCS |
duke@435 | 156 | |
duke@435 | 157 | return JNI_OK; |
duke@435 | 158 | } |
duke@435 | 159 | |
duke@435 | 160 | |
duke@435 | 161 | char* GenCollectedHeap::allocate(size_t alignment, |
duke@435 | 162 | size_t* _total_reserved, |
duke@435 | 163 | int* _n_covered_regions, |
duke@435 | 164 | ReservedSpace* heap_rs){ |
duke@435 | 165 | const char overflow_msg[] = "The size of the object heap + VM data exceeds " |
duke@435 | 166 | "the maximum representable size"; |
duke@435 | 167 | |
duke@435 | 168 | // Now figure out the total size. |
duke@435 | 169 | size_t total_reserved = 0; |
duke@435 | 170 | int n_covered_regions = 0; |
duke@435 | 171 | const size_t pageSize = UseLargePages ? |
duke@435 | 172 | os::large_page_size() : os::vm_page_size(); |
duke@435 | 173 | |
stefank@5578 | 174 | assert(alignment % pageSize == 0, "Must be"); |
stefank@5578 | 175 | |
duke@435 | 176 | for (int i = 0; i < _n_gens; i++) { |
duke@435 | 177 | total_reserved += _gen_specs[i]->max_size(); |
duke@435 | 178 | if (total_reserved < _gen_specs[i]->max_size()) { |
duke@435 | 179 | vm_exit_during_initialization(overflow_msg); |
duke@435 | 180 | } |
duke@435 | 181 | n_covered_regions += _gen_specs[i]->n_covered_regions(); |
duke@435 | 182 | } |
stefank@5578 | 183 | assert(total_reserved % alignment == 0, |
stefank@5578 | 184 | err_msg("Gen size; total_reserved=" SIZE_FORMAT ", alignment=" |
stefank@5578 | 185 | SIZE_FORMAT, total_reserved, alignment)); |
duke@435 | 186 | |
coleenp@4037 | 187 | // Needed until the cardtable is fixed to have the right number |
coleenp@4037 | 188 | // of covered regions. |
coleenp@4037 | 189 | n_covered_regions += 2; |
duke@435 | 190 | |
stefank@5578 | 191 | *_total_reserved = total_reserved; |
stefank@5578 | 192 | *_n_covered_regions = n_covered_regions; |
duke@435 | 193 | |
coleenp@4037 | 194 | *heap_rs = Universe::reserve_heap(total_reserved, alignment); |
coleenp@4037 | 195 | return heap_rs->base(); |
duke@435 | 196 | } |
duke@435 | 197 | |
duke@435 | 198 | |
duke@435 | 199 | void GenCollectedHeap::post_initialize() { |
duke@435 | 200 | SharedHeap::post_initialize(); |
duke@435 | 201 | TwoGenerationCollectorPolicy *policy = |
duke@435 | 202 | (TwoGenerationCollectorPolicy *)collector_policy(); |
duke@435 | 203 | guarantee(policy->is_two_generation_policy(), "Illegal policy type"); |
duke@435 | 204 | DefNewGeneration* def_new_gen = (DefNewGeneration*) get_gen(0); |
duke@435 | 205 | assert(def_new_gen->kind() == Generation::DefNew || |
duke@435 | 206 | def_new_gen->kind() == Generation::ParNew || |
duke@435 | 207 | def_new_gen->kind() == Generation::ASParNew, |
duke@435 | 208 | "Wrong generation kind"); |
duke@435 | 209 | |
duke@435 | 210 | Generation* old_gen = get_gen(1); |
duke@435 | 211 | assert(old_gen->kind() == Generation::ConcurrentMarkSweep || |
duke@435 | 212 | old_gen->kind() == Generation::ASConcurrentMarkSweep || |
duke@435 | 213 | old_gen->kind() == Generation::MarkSweepCompact, |
duke@435 | 214 | "Wrong generation kind"); |
duke@435 | 215 | |
duke@435 | 216 | policy->initialize_size_policy(def_new_gen->eden()->capacity(), |
duke@435 | 217 | old_gen->capacity(), |
duke@435 | 218 | def_new_gen->from()->capacity()); |
duke@435 | 219 | policy->initialize_gc_policy_counters(); |
duke@435 | 220 | } |
duke@435 | 221 | |
duke@435 | 222 | void GenCollectedHeap::ref_processing_init() { |
duke@435 | 223 | SharedHeap::ref_processing_init(); |
duke@435 | 224 | for (int i = 0; i < _n_gens; i++) { |
duke@435 | 225 | _gens[i]->ref_processor_init(); |
duke@435 | 226 | } |
duke@435 | 227 | } |
duke@435 | 228 | |
duke@435 | 229 | size_t GenCollectedHeap::capacity() const { |
duke@435 | 230 | size_t res = 0; |
duke@435 | 231 | for (int i = 0; i < _n_gens; i++) { |
duke@435 | 232 | res += _gens[i]->capacity(); |
duke@435 | 233 | } |
duke@435 | 234 | return res; |
duke@435 | 235 | } |
duke@435 | 236 | |
duke@435 | 237 | size_t GenCollectedHeap::used() const { |
duke@435 | 238 | size_t res = 0; |
duke@435 | 239 | for (int i = 0; i < _n_gens; i++) { |
duke@435 | 240 | res += _gens[i]->used(); |
duke@435 | 241 | } |
duke@435 | 242 | return res; |
duke@435 | 243 | } |
duke@435 | 244 | |
coleenp@4037 | 245 | // Save the "used_region" for generations level and lower. |
coleenp@4037 | 246 | void GenCollectedHeap::save_used_regions(int level) { |
duke@435 | 247 | assert(level < _n_gens, "Illegal level parameter"); |
duke@435 | 248 | for (int i = level; i >= 0; i--) { |
duke@435 | 249 | _gens[i]->save_used_region(); |
duke@435 | 250 | } |
duke@435 | 251 | } |
duke@435 | 252 | |
duke@435 | 253 | size_t GenCollectedHeap::max_capacity() const { |
duke@435 | 254 | size_t res = 0; |
duke@435 | 255 | for (int i = 0; i < _n_gens; i++) { |
duke@435 | 256 | res += _gens[i]->max_capacity(); |
duke@435 | 257 | } |
duke@435 | 258 | return res; |
duke@435 | 259 | } |
duke@435 | 260 | |
duke@435 | 261 | // Update the _full_collections_completed counter |
duke@435 | 262 | // at the end of a stop-world full GC. |
duke@435 | 263 | unsigned int GenCollectedHeap::update_full_collections_completed() { |
duke@435 | 264 | MonitorLockerEx ml(FullGCCount_lock, Mutex::_no_safepoint_check_flag); |
duke@435 | 265 | assert(_full_collections_completed <= _total_full_collections, |
duke@435 | 266 | "Can't complete more collections than were started"); |
duke@435 | 267 | _full_collections_completed = _total_full_collections; |
duke@435 | 268 | ml.notify_all(); |
duke@435 | 269 | return _full_collections_completed; |
duke@435 | 270 | } |
duke@435 | 271 | |
duke@435 | 272 | // Update the _full_collections_completed counter, as appropriate, |
duke@435 | 273 | // at the end of a concurrent GC cycle. Note the conditional update |
duke@435 | 274 | // below to allow this method to be called by a concurrent collector |
duke@435 | 275 | // without synchronizing in any manner with the VM thread (which |
duke@435 | 276 | // may already have initiated a STW full collection "concurrently"). |
duke@435 | 277 | unsigned int GenCollectedHeap::update_full_collections_completed(unsigned int count) { |
duke@435 | 278 | MonitorLockerEx ml(FullGCCount_lock, Mutex::_no_safepoint_check_flag); |
duke@435 | 279 | assert((_full_collections_completed <= _total_full_collections) && |
duke@435 | 280 | (count <= _total_full_collections), |
duke@435 | 281 | "Can't complete more collections than were started"); |
duke@435 | 282 | if (count > _full_collections_completed) { |
duke@435 | 283 | _full_collections_completed = count; |
duke@435 | 284 | ml.notify_all(); |
duke@435 | 285 | } |
duke@435 | 286 | return _full_collections_completed; |
duke@435 | 287 | } |
duke@435 | 288 | |
duke@435 | 289 | |
duke@435 | 290 | #ifndef PRODUCT |
duke@435 | 291 | // Override of memory state checking method in CollectedHeap: |
duke@435 | 292 | // Some collectors (CMS for example) can't have badHeapWordVal written |
duke@435 | 293 | // in the first two words of an object. (For instance , in the case of |
duke@435 | 294 | // CMS these words hold state used to synchronize between certain |
duke@435 | 295 | // (concurrent) GC steps and direct allocating mutators.) |
duke@435 | 296 | // The skip_header_HeapWords() method below, allows us to skip |
duke@435 | 297 | // over the requisite number of HeapWord's. Note that (for |
duke@435 | 298 | // generational collectors) this means that those many words are |
duke@435 | 299 | // skipped in each object, irrespective of the generation in which |
duke@435 | 300 | // that object lives. The resultant loss of precision seems to be |
duke@435 | 301 | // harmless and the pain of avoiding that imprecision appears somewhat |
duke@435 | 302 | // higher than we are prepared to pay for such rudimentary debugging |
duke@435 | 303 | // support. |
duke@435 | 304 | void GenCollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr, |
duke@435 | 305 | size_t size) { |
duke@435 | 306 | if (CheckMemoryInitialization && ZapUnusedHeapArea) { |
duke@435 | 307 | // We are asked to check a size in HeapWords, |
duke@435 | 308 | // but the memory is mangled in juint words. |
duke@435 | 309 | juint* start = (juint*) (addr + skip_header_HeapWords()); |
duke@435 | 310 | juint* end = (juint*) (addr + size); |
duke@435 | 311 | for (juint* slot = start; slot < end; slot += 1) { |
duke@435 | 312 | assert(*slot == badHeapWordVal, |
duke@435 | 313 | "Found non badHeapWordValue in pre-allocation check"); |
duke@435 | 314 | } |
duke@435 | 315 | } |
duke@435 | 316 | } |
duke@435 | 317 | #endif |
duke@435 | 318 | |
duke@435 | 319 | HeapWord* GenCollectedHeap::attempt_allocation(size_t size, |
duke@435 | 320 | bool is_tlab, |
duke@435 | 321 | bool first_only) { |
duke@435 | 322 | HeapWord* res; |
duke@435 | 323 | for (int i = 0; i < _n_gens; i++) { |
duke@435 | 324 | if (_gens[i]->should_allocate(size, is_tlab)) { |
duke@435 | 325 | res = _gens[i]->allocate(size, is_tlab); |
duke@435 | 326 | if (res != NULL) return res; |
duke@435 | 327 | else if (first_only) break; |
duke@435 | 328 | } |
duke@435 | 329 | } |
duke@435 | 330 | // Otherwise... |
duke@435 | 331 | return NULL; |
duke@435 | 332 | } |
duke@435 | 333 | |
duke@435 | 334 | HeapWord* GenCollectedHeap::mem_allocate(size_t size, |
duke@435 | 335 | bool* gc_overhead_limit_was_exceeded) { |
duke@435 | 336 | return collector_policy()->mem_allocate_work(size, |
tonyp@2971 | 337 | false /* is_tlab */, |
duke@435 | 338 | gc_overhead_limit_was_exceeded); |
duke@435 | 339 | } |
duke@435 | 340 | |
duke@435 | 341 | bool GenCollectedHeap::must_clear_all_soft_refs() { |
duke@435 | 342 | return _gc_cause == GCCause::_last_ditch_collection; |
duke@435 | 343 | } |
duke@435 | 344 | |
duke@435 | 345 | bool GenCollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) { |
ysr@1875 | 346 | return UseConcMarkSweepGC && |
ysr@1875 | 347 | ((cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) || |
ysr@1875 | 348 | (cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent)); |
duke@435 | 349 | } |
duke@435 | 350 | |
duke@435 | 351 | void GenCollectedHeap::do_collection(bool full, |
duke@435 | 352 | bool clear_all_soft_refs, |
duke@435 | 353 | size_t size, |
duke@435 | 354 | bool is_tlab, |
duke@435 | 355 | int max_level) { |
duke@435 | 356 | bool prepared_for_verification = false; |
duke@435 | 357 | ResourceMark rm; |
duke@435 | 358 | DEBUG_ONLY(Thread* my_thread = Thread::current();) |
duke@435 | 359 | |
duke@435 | 360 | assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); |
duke@435 | 361 | assert(my_thread->is_VM_thread() || |
duke@435 | 362 | my_thread->is_ConcurrentGC_thread(), |
duke@435 | 363 | "incorrect thread type capability"); |
jmasa@1822 | 364 | assert(Heap_lock->is_locked(), |
jmasa@1822 | 365 | "the requesting thread should have the Heap_lock"); |
duke@435 | 366 | guarantee(!is_gc_active(), "collection is not reentrant"); |
duke@435 | 367 | assert(max_level < n_gens(), "sanity check"); |
duke@435 | 368 | |
duke@435 | 369 | if (GC_locker::check_active_before_gc()) { |
duke@435 | 370 | return; // GC is disabled (e.g. JNI GetXXXCritical operation) |
duke@435 | 371 | } |
duke@435 | 372 | |
jmasa@1822 | 373 | const bool do_clear_all_soft_refs = clear_all_soft_refs || |
jmasa@1822 | 374 | collector_policy()->should_clear_all_soft_refs(); |
jmasa@1822 | 375 | |
jmasa@1822 | 376 | ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy()); |
jmasa@1822 | 377 | |
ehelin@6609 | 378 | const size_t metadata_prev_used = MetaspaceAux::used_bytes(); |
duke@435 | 379 | |
never@3499 | 380 | print_heap_before_gc(); |
duke@435 | 381 | |
duke@435 | 382 | { |
duke@435 | 383 | FlagSetting fl(_is_gc_active, true); |
duke@435 | 384 | |
duke@435 | 385 | bool complete = full && (max_level == (n_gens()-1)); |
brutisso@3767 | 386 | const char* gc_cause_prefix = complete ? "Full GC" : "GC"; |
duke@435 | 387 | gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); |
duke@435 | 388 | TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); |
brutisso@6904 | 389 | // The PrintGCDetails logging starts before we have incremented the GC id. We will do that later |
brutisso@6904 | 390 | // so we can assume here that the next GC id is what we want. |
brutisso@6904 | 391 | GCTraceTime t(GCCauseString(gc_cause_prefix, gc_cause()), PrintGCDetails, false, NULL, GCId::peek()); |
duke@435 | 392 | |
duke@435 | 393 | gc_prologue(complete); |
duke@435 | 394 | increment_total_collections(complete); |
duke@435 | 395 | |
duke@435 | 396 | size_t gch_prev_used = used(); |
duke@435 | 397 | |
duke@435 | 398 | int starting_level = 0; |
duke@435 | 399 | if (full) { |
duke@435 | 400 | // Search for the oldest generation which will collect all younger |
duke@435 | 401 | // generations, and start collection loop there. |
duke@435 | 402 | for (int i = max_level; i >= 0; i--) { |
duke@435 | 403 | if (_gens[i]->full_collects_younger_generations()) { |
duke@435 | 404 | starting_level = i; |
duke@435 | 405 | break; |
duke@435 | 406 | } |
duke@435 | 407 | } |
duke@435 | 408 | } |
duke@435 | 409 | |
duke@435 | 410 | bool must_restore_marks_for_biased_locking = false; |
duke@435 | 411 | |
duke@435 | 412 | int max_level_collected = starting_level; |
duke@435 | 413 | for (int i = starting_level; i <= max_level; i++) { |
duke@435 | 414 | if (_gens[i]->should_collect(full, size, is_tlab)) { |
dcubed@1315 | 415 | if (i == n_gens() - 1) { // a major collection is to happen |
dcubed@1315 | 416 | if (!complete) { |
dcubed@1315 | 417 | // The full_collections increment was missed above. |
dcubed@1315 | 418 | increment_total_full_collections(); |
dcubed@1315 | 419 | } |
sla@5237 | 420 | pre_full_gc_dump(NULL); // do any pre full gc dumps |
dcubed@1315 | 421 | } |
duke@435 | 422 | // Timer for individual generations. Last argument is false: no CR |
sla@5237 | 423 | // FIXME: We should try to start the timing earlier to cover more of the GC pause |
brutisso@6904 | 424 | // The PrintGCDetails logging starts before we have incremented the GC id. We will do that later |
brutisso@6904 | 425 | // so we can assume here that the next GC id is what we want. |
brutisso@6904 | 426 | GCTraceTime t1(_gens[i]->short_name(), PrintGCDetails, false, NULL, GCId::peek()); |
duke@435 | 427 | TraceCollectorStats tcs(_gens[i]->counters()); |
fparain@2888 | 428 | TraceMemoryManagerStats tmms(_gens[i]->kind(),gc_cause()); |
duke@435 | 429 | |
duke@435 | 430 | size_t prev_used = _gens[i]->used(); |
duke@435 | 431 | _gens[i]->stat_record()->invocations++; |
duke@435 | 432 | _gens[i]->stat_record()->accumulated_time.start(); |
duke@435 | 433 | |
jmasa@698 | 434 | // Must be done anew before each collection because |
jmasa@698 | 435 | // a previous collection will do mangling and will |
jmasa@698 | 436 | // change top of some spaces. |
jmasa@698 | 437 | record_gen_tops_before_GC(); |
jmasa@698 | 438 | |
duke@435 | 439 | if (PrintGC && Verbose) { |
duke@435 | 440 | gclog_or_tty->print("level=%d invoke=%d size=" SIZE_FORMAT, |
duke@435 | 441 | i, |
duke@435 | 442 | _gens[i]->stat_record()->invocations, |
duke@435 | 443 | size*HeapWordSize); |
duke@435 | 444 | } |
duke@435 | 445 | |
duke@435 | 446 | if (VerifyBeforeGC && i >= VerifyGCLevel && |
duke@435 | 447 | total_collections() >= VerifyGCStartAt) { |
duke@435 | 448 | HandleMark hm; // Discard invalid handles created during verification |
duke@435 | 449 | if (!prepared_for_verification) { |
duke@435 | 450 | prepare_for_verify(); |
duke@435 | 451 | prepared_for_verification = true; |
duke@435 | 452 | } |
stefank@5018 | 453 | Universe::verify(" VerifyBeforeGC:"); |
duke@435 | 454 | } |
duke@435 | 455 | COMPILER2_PRESENT(DerivedPointerTable::clear()); |
duke@435 | 456 | |
duke@435 | 457 | if (!must_restore_marks_for_biased_locking && |
duke@435 | 458 | _gens[i]->performs_in_place_marking()) { |
duke@435 | 459 | // We perform this mark word preservation work lazily |
duke@435 | 460 | // because it's only at this point that we know whether we |
duke@435 | 461 | // absolutely have to do it; we want to avoid doing it for |
duke@435 | 462 | // scavenge-only collections where it's unnecessary |
duke@435 | 463 | must_restore_marks_for_biased_locking = true; |
duke@435 | 464 | BiasedLocking::preserve_marks(); |
duke@435 | 465 | } |
duke@435 | 466 | |
duke@435 | 467 | // Do collection work |
duke@435 | 468 | { |
duke@435 | 469 | // Note on ref discovery: For what appear to be historical reasons, |
duke@435 | 470 | // GCH enables and disabled (by enqueing) refs discovery. |
duke@435 | 471 | // In the future this should be moved into the generation's |
duke@435 | 472 | // collect method so that ref discovery and enqueueing concerns |
duke@435 | 473 | // are local to a generation. The collect method could return |
duke@435 | 474 | // an appropriate indication in the case that notification on |
duke@435 | 475 | // the ref lock was needed. This will make the treatment of |
duke@435 | 476 | // weak refs more uniform (and indeed remove such concerns |
duke@435 | 477 | // from GCH). XXX |
duke@435 | 478 | |
duke@435 | 479 | HandleMark hm; // Discard invalid handles created during gc |
duke@435 | 480 | save_marks(); // save marks for all gens |
duke@435 | 481 | // We want to discover references, but not process them yet. |
duke@435 | 482 | // This mode is disabled in process_discovered_references if the |
duke@435 | 483 | // generation does some collection work, or in |
duke@435 | 484 | // enqueue_discovered_references if the generation returns |
duke@435 | 485 | // without doing any work. |
duke@435 | 486 | ReferenceProcessor* rp = _gens[i]->ref_processor(); |
duke@435 | 487 | // If the discovery of ("weak") refs in this generation is |
duke@435 | 488 | // atomic wrt other collectors in this configuration, we |
duke@435 | 489 | // are guaranteed to have empty discovered ref lists. |
duke@435 | 490 | if (rp->discovery_is_atomic()) { |
johnc@3175 | 491 | rp->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/); |
jmasa@1822 | 492 | rp->setup_policy(do_clear_all_soft_refs); |
duke@435 | 493 | } else { |
ysr@888 | 494 | // collect() below will enable discovery as appropriate |
duke@435 | 495 | } |
jmasa@1822 | 496 | _gens[i]->collect(full, do_clear_all_soft_refs, size, is_tlab); |
duke@435 | 497 | if (!rp->enqueuing_is_done()) { |
duke@435 | 498 | rp->enqueue_discovered_references(); |
duke@435 | 499 | } else { |
duke@435 | 500 | rp->set_enqueuing_is_done(false); |
duke@435 | 501 | } |
duke@435 | 502 | rp->verify_no_references_recorded(); |
duke@435 | 503 | } |
duke@435 | 504 | max_level_collected = i; |
duke@435 | 505 | |
duke@435 | 506 | // Determine if allocation request was met. |
duke@435 | 507 | if (size > 0) { |
duke@435 | 508 | if (!is_tlab || _gens[i]->supports_tlab_allocation()) { |
duke@435 | 509 | if (size*HeapWordSize <= _gens[i]->unsafe_max_alloc_nogc()) { |
duke@435 | 510 | size = 0; |
duke@435 | 511 | } |
duke@435 | 512 | } |
duke@435 | 513 | } |
duke@435 | 514 | |
duke@435 | 515 | COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); |
duke@435 | 516 | |
duke@435 | 517 | _gens[i]->stat_record()->accumulated_time.stop(); |
duke@435 | 518 | |
duke@435 | 519 | update_gc_stats(i, full); |
duke@435 | 520 | |
duke@435 | 521 | if (VerifyAfterGC && i >= VerifyGCLevel && |
duke@435 | 522 | total_collections() >= VerifyGCStartAt) { |
duke@435 | 523 | HandleMark hm; // Discard invalid handles created during verification |
stefank@5018 | 524 | Universe::verify(" VerifyAfterGC:"); |
duke@435 | 525 | } |
duke@435 | 526 | |
duke@435 | 527 | if (PrintGCDetails) { |
duke@435 | 528 | gclog_or_tty->print(":"); |
duke@435 | 529 | _gens[i]->print_heap_change(prev_used); |
duke@435 | 530 | } |
duke@435 | 531 | } |
duke@435 | 532 | } |
duke@435 | 533 | |
duke@435 | 534 | // Update "complete" boolean wrt what actually transpired -- |
duke@435 | 535 | // for instance, a promotion failure could have led to |
duke@435 | 536 | // a whole heap collection. |
duke@435 | 537 | complete = complete || (max_level_collected == n_gens() - 1); |
duke@435 | 538 | |
ysr@1050 | 539 | if (complete) { // We did a "major" collection |
sla@5237 | 540 | // FIXME: See comment at pre_full_gc_dump call |
sla@5237 | 541 | post_full_gc_dump(NULL); // do any post full gc dumps |
ysr@1050 | 542 | } |
ysr@1050 | 543 | |
duke@435 | 544 | if (PrintGCDetails) { |
duke@435 | 545 | print_heap_change(gch_prev_used); |
duke@435 | 546 | |
coleenp@4037 | 547 | // Print metaspace info for full GC with PrintGCDetails flag. |
duke@435 | 548 | if (complete) { |
coleenp@4037 | 549 | MetaspaceAux::print_metaspace_change(metadata_prev_used); |
duke@435 | 550 | } |
duke@435 | 551 | } |
duke@435 | 552 | |
duke@435 | 553 | for (int j = max_level_collected; j >= 0; j -= 1) { |
duke@435 | 554 | // Adjust generation sizes. |
duke@435 | 555 | _gens[j]->compute_new_size(); |
duke@435 | 556 | } |
duke@435 | 557 | |
duke@435 | 558 | if (complete) { |
mgerdin@4784 | 559 | // Delete metaspaces for unloaded class loaders and clean up loader_data graph |
mgerdin@4784 | 560 | ClassLoaderDataGraph::purge(); |
jmasa@5015 | 561 | MetaspaceAux::verify_metrics(); |
coleenp@4037 | 562 | // Resize the metaspace capacity after full collections |
coleenp@4037 | 563 | MetaspaceGC::compute_new_size(); |
duke@435 | 564 | update_full_collections_completed(); |
duke@435 | 565 | } |
duke@435 | 566 | |
duke@435 | 567 | // Track memory usage and detect low memory after GC finishes |
duke@435 | 568 | MemoryService::track_memory_usage(); |
duke@435 | 569 | |
duke@435 | 570 | gc_epilogue(complete); |
duke@435 | 571 | |
duke@435 | 572 | if (must_restore_marks_for_biased_locking) { |
duke@435 | 573 | BiasedLocking::restore_marks(); |
duke@435 | 574 | } |
duke@435 | 575 | } |
duke@435 | 576 | |
duke@435 | 577 | AdaptiveSizePolicy* sp = gen_policy()->size_policy(); |
duke@435 | 578 | AdaptiveSizePolicyOutput(sp, total_collections()); |
duke@435 | 579 | |
never@3499 | 580 | print_heap_after_gc(); |
duke@435 | 581 | |
jmasa@981 | 582 | #ifdef TRACESPINNING |
jmasa@981 | 583 | ParallelTaskTerminator::print_termination_counts(); |
jmasa@981 | 584 | #endif |
duke@435 | 585 | } |
duke@435 | 586 | |
duke@435 | 587 | HeapWord* GenCollectedHeap::satisfy_failed_allocation(size_t size, bool is_tlab) { |
duke@435 | 588 | return collector_policy()->satisfy_failed_allocation(size, is_tlab); |
duke@435 | 589 | } |
duke@435 | 590 | |
jmasa@3357 | 591 | void GenCollectedHeap::set_par_threads(uint t) { |
duke@435 | 592 | SharedHeap::set_par_threads(t); |
stefank@6992 | 593 | _gen_process_roots_tasks->set_n_threads(t); |
duke@435 | 594 | } |
duke@435 | 595 | |
duke@435 | 596 | void GenCollectedHeap:: |
stefank@6992 | 597 | gen_process_roots(int level, |
stefank@6992 | 598 | bool younger_gens_as_roots, |
stefank@6992 | 599 | bool activate_scope, |
stefank@6992 | 600 | SharedHeap::ScanningOption so, |
stefank@6992 | 601 | OopsInGenClosure* not_older_gens, |
stefank@6992 | 602 | OopsInGenClosure* weak_roots, |
stefank@6992 | 603 | OopsInGenClosure* older_gens, |
stefank@6992 | 604 | CLDClosure* cld_closure, |
stefank@6992 | 605 | CLDClosure* weak_cld_closure, |
stefank@6992 | 606 | CodeBlobClosure* code_closure) { |
jrose@1424 | 607 | |
stefank@6992 | 608 | // General roots. |
stefank@6992 | 609 | SharedHeap::process_roots(activate_scope, so, |
stefank@6992 | 610 | not_older_gens, weak_roots, |
stefank@6992 | 611 | cld_closure, weak_cld_closure, |
stefank@6992 | 612 | code_closure); |
duke@435 | 613 | |
duke@435 | 614 | if (younger_gens_as_roots) { |
stefank@6992 | 615 | if (!_gen_process_roots_tasks->is_task_claimed(GCH_PS_younger_gens)) { |
duke@435 | 616 | for (int i = 0; i < level; i++) { |
duke@435 | 617 | not_older_gens->set_generation(_gens[i]); |
duke@435 | 618 | _gens[i]->oop_iterate(not_older_gens); |
duke@435 | 619 | } |
duke@435 | 620 | not_older_gens->reset_generation(); |
duke@435 | 621 | } |
duke@435 | 622 | } |
duke@435 | 623 | // When collection is parallel, all threads get to cooperate to do |
duke@435 | 624 | // older-gen scanning. |
duke@435 | 625 | for (int i = level+1; i < _n_gens; i++) { |
duke@435 | 626 | older_gens->set_generation(_gens[i]); |
duke@435 | 627 | rem_set()->younger_refs_iterate(_gens[i], older_gens); |
duke@435 | 628 | older_gens->reset_generation(); |
duke@435 | 629 | } |
duke@435 | 630 | |
stefank@6992 | 631 | _gen_process_roots_tasks->all_tasks_completed(); |
stefank@6992 | 632 | } |
stefank@6992 | 633 | |
stefank@6992 | 634 | void GenCollectedHeap:: |
stefank@6992 | 635 | gen_process_roots(int level, |
stefank@6992 | 636 | bool younger_gens_as_roots, |
stefank@6992 | 637 | bool activate_scope, |
stefank@6992 | 638 | SharedHeap::ScanningOption so, |
stefank@6992 | 639 | bool only_strong_roots, |
stefank@6992 | 640 | OopsInGenClosure* not_older_gens, |
stefank@6992 | 641 | OopsInGenClosure* older_gens, |
stefank@6992 | 642 | CLDClosure* cld_closure) { |
stefank@6992 | 643 | |
stefank@6992 | 644 | const bool is_adjust_phase = !only_strong_roots && !younger_gens_as_roots; |
stefank@6992 | 645 | |
stefank@6992 | 646 | bool is_moving_collection = false; |
stefank@6992 | 647 | if (level == 0 || is_adjust_phase) { |
stefank@6992 | 648 | // young collections are always moving |
stefank@6992 | 649 | is_moving_collection = true; |
stefank@6992 | 650 | } |
stefank@6992 | 651 | |
stefank@6992 | 652 | MarkingCodeBlobClosure mark_code_closure(not_older_gens, is_moving_collection); |
stefank@6992 | 653 | CodeBlobClosure* code_closure = &mark_code_closure; |
stefank@6992 | 654 | |
stefank@6992 | 655 | gen_process_roots(level, |
stefank@6992 | 656 | younger_gens_as_roots, |
stefank@6992 | 657 | activate_scope, so, |
stefank@6992 | 658 | not_older_gens, only_strong_roots ? NULL : not_older_gens, |
stefank@6992 | 659 | older_gens, |
stefank@6992 | 660 | cld_closure, only_strong_roots ? NULL : cld_closure, |
stefank@6992 | 661 | code_closure); |
stefank@6992 | 662 | |
duke@435 | 663 | } |
duke@435 | 664 | |
stefank@6971 | 665 | void GenCollectedHeap::gen_process_weak_roots(OopClosure* root_closure) { |
stefank@6971 | 666 | SharedHeap::process_weak_roots(root_closure); |
duke@435 | 667 | // "Local" "weak" refs |
duke@435 | 668 | for (int i = 0; i < _n_gens; i++) { |
duke@435 | 669 | _gens[i]->ref_processor()->weak_oops_do(root_closure); |
duke@435 | 670 | } |
duke@435 | 671 | } |
duke@435 | 672 | |
duke@435 | 673 | #define GCH_SINCE_SAVE_MARKS_ITERATE_DEFN(OopClosureType, nv_suffix) \ |
duke@435 | 674 | void GenCollectedHeap:: \ |
duke@435 | 675 | oop_since_save_marks_iterate(int level, \ |
duke@435 | 676 | OopClosureType* cur, \ |
duke@435 | 677 | OopClosureType* older) { \ |
duke@435 | 678 | _gens[level]->oop_since_save_marks_iterate##nv_suffix(cur); \ |
duke@435 | 679 | for (int i = level+1; i < n_gens(); i++) { \ |
duke@435 | 680 | _gens[i]->oop_since_save_marks_iterate##nv_suffix(older); \ |
duke@435 | 681 | } \ |
duke@435 | 682 | } |
duke@435 | 683 | |
duke@435 | 684 | ALL_SINCE_SAVE_MARKS_CLOSURES(GCH_SINCE_SAVE_MARKS_ITERATE_DEFN) |
duke@435 | 685 | |
duke@435 | 686 | #undef GCH_SINCE_SAVE_MARKS_ITERATE_DEFN |
duke@435 | 687 | |
duke@435 | 688 | bool GenCollectedHeap::no_allocs_since_save_marks(int level) { |
duke@435 | 689 | for (int i = level; i < _n_gens; i++) { |
duke@435 | 690 | if (!_gens[i]->no_allocs_since_save_marks()) return false; |
duke@435 | 691 | } |
coleenp@4037 | 692 | return true; |
duke@435 | 693 | } |
duke@435 | 694 | |
duke@435 | 695 | bool GenCollectedHeap::supports_inline_contig_alloc() const { |
duke@435 | 696 | return _gens[0]->supports_inline_contig_alloc(); |
duke@435 | 697 | } |
duke@435 | 698 | |
duke@435 | 699 | HeapWord** GenCollectedHeap::top_addr() const { |
duke@435 | 700 | return _gens[0]->top_addr(); |
duke@435 | 701 | } |
duke@435 | 702 | |
duke@435 | 703 | HeapWord** GenCollectedHeap::end_addr() const { |
duke@435 | 704 | return _gens[0]->end_addr(); |
duke@435 | 705 | } |
duke@435 | 706 | |
duke@435 | 707 | // public collection interfaces |
duke@435 | 708 | |
duke@435 | 709 | void GenCollectedHeap::collect(GCCause::Cause cause) { |
duke@435 | 710 | if (should_do_concurrent_full_gc(cause)) { |
jprovino@4542 | 711 | #if INCLUDE_ALL_GCS |
duke@435 | 712 | // mostly concurrent full collection |
duke@435 | 713 | collect_mostly_concurrent(cause); |
jprovino@4542 | 714 | #else // INCLUDE_ALL_GCS |
duke@435 | 715 | ShouldNotReachHere(); |
jprovino@4542 | 716 | #endif // INCLUDE_ALL_GCS |
tschatzl@7071 | 717 | } else if (cause == GCCause::_wb_young_gc) { |
tschatzl@7071 | 718 | // minor collection for WhiteBox API |
tschatzl@7071 | 719 | collect(cause, 0); |
duke@435 | 720 | } else { |
duke@435 | 721 | #ifdef ASSERT |
tschatzl@7071 | 722 | if (cause == GCCause::_scavenge_alot) { |
tschatzl@7071 | 723 | // minor collection only |
tschatzl@7071 | 724 | collect(cause, 0); |
tschatzl@7071 | 725 | } else { |
tschatzl@7071 | 726 | // Stop-the-world full collection |
tschatzl@7071 | 727 | collect(cause, n_gens() - 1); |
tschatzl@7071 | 728 | } |
duke@435 | 729 | #else |
duke@435 | 730 | // Stop-the-world full collection |
duke@435 | 731 | collect(cause, n_gens() - 1); |
duke@435 | 732 | #endif |
duke@435 | 733 | } |
duke@435 | 734 | } |
duke@435 | 735 | |
duke@435 | 736 | void GenCollectedHeap::collect(GCCause::Cause cause, int max_level) { |
duke@435 | 737 | // The caller doesn't have the Heap_lock |
duke@435 | 738 | assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock"); |
duke@435 | 739 | MutexLocker ml(Heap_lock); |
duke@435 | 740 | collect_locked(cause, max_level); |
duke@435 | 741 | } |
duke@435 | 742 | |
duke@435 | 743 | void GenCollectedHeap::collect_locked(GCCause::Cause cause) { |
duke@435 | 744 | // The caller has the Heap_lock |
duke@435 | 745 | assert(Heap_lock->owned_by_self(), "this thread should own the Heap_lock"); |
duke@435 | 746 | collect_locked(cause, n_gens() - 1); |
duke@435 | 747 | } |
duke@435 | 748 | |
duke@435 | 749 | // this is the private collection interface |
duke@435 | 750 | // The Heap_lock is expected to be held on entry. |
duke@435 | 751 | |
duke@435 | 752 | void GenCollectedHeap::collect_locked(GCCause::Cause cause, int max_level) { |
duke@435 | 753 | // Read the GC count while holding the Heap_lock |
duke@435 | 754 | unsigned int gc_count_before = total_collections(); |
duke@435 | 755 | unsigned int full_gc_count_before = total_full_collections(); |
duke@435 | 756 | { |
duke@435 | 757 | MutexUnlocker mu(Heap_lock); // give up heap lock, execute gets it back |
duke@435 | 758 | VM_GenCollectFull op(gc_count_before, full_gc_count_before, |
duke@435 | 759 | cause, max_level); |
duke@435 | 760 | VMThread::execute(&op); |
duke@435 | 761 | } |
duke@435 | 762 | } |
duke@435 | 763 | |
jprovino@4542 | 764 | #if INCLUDE_ALL_GCS |
duke@435 | 765 | bool GenCollectedHeap::create_cms_collector() { |
duke@435 | 766 | |
duke@435 | 767 | assert(((_gens[1]->kind() == Generation::ConcurrentMarkSweep) || |
coleenp@4037 | 768 | (_gens[1]->kind() == Generation::ASConcurrentMarkSweep)), |
duke@435 | 769 | "Unexpected generation kinds"); |
duke@435 | 770 | // Skip two header words in the block content verification |
duke@435 | 771 | NOT_PRODUCT(_skip_header_HeapWords = CMSCollector::skip_header_HeapWords();) |
duke@435 | 772 | CMSCollector* collector = new CMSCollector( |
duke@435 | 773 | (ConcurrentMarkSweepGeneration*)_gens[1], |
duke@435 | 774 | _rem_set->as_CardTableRS(), |
duke@435 | 775 | (ConcurrentMarkSweepPolicy*) collector_policy()); |
duke@435 | 776 | |
duke@435 | 777 | if (collector == NULL || !collector->completed_initialization()) { |
duke@435 | 778 | if (collector) { |
duke@435 | 779 | delete collector; // Be nice in embedded situation |
duke@435 | 780 | } |
duke@435 | 781 | vm_shutdown_during_initialization("Could not create CMS collector"); |
duke@435 | 782 | return false; |
duke@435 | 783 | } |
duke@435 | 784 | return true; // success |
duke@435 | 785 | } |
duke@435 | 786 | |
duke@435 | 787 | void GenCollectedHeap::collect_mostly_concurrent(GCCause::Cause cause) { |
duke@435 | 788 | assert(!Heap_lock->owned_by_self(), "Should not own Heap_lock"); |
duke@435 | 789 | |
duke@435 | 790 | MutexLocker ml(Heap_lock); |
duke@435 | 791 | // Read the GC counts while holding the Heap_lock |
duke@435 | 792 | unsigned int full_gc_count_before = total_full_collections(); |
duke@435 | 793 | unsigned int gc_count_before = total_collections(); |
duke@435 | 794 | { |
duke@435 | 795 | MutexUnlocker mu(Heap_lock); |
duke@435 | 796 | VM_GenCollectFullConcurrent op(gc_count_before, full_gc_count_before, cause); |
duke@435 | 797 | VMThread::execute(&op); |
duke@435 | 798 | } |
duke@435 | 799 | } |
jprovino@4542 | 800 | #endif // INCLUDE_ALL_GCS |
duke@435 | 801 | |
coleenp@4037 | 802 | void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs) { |
coleenp@4037 | 803 | do_full_collection(clear_all_soft_refs, _n_gens - 1); |
coleenp@4037 | 804 | } |
duke@435 | 805 | |
duke@435 | 806 | void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs, |
duke@435 | 807 | int max_level) { |
duke@435 | 808 | int local_max_level; |
ysr@2336 | 809 | if (!incremental_collection_will_fail(false /* don't consult_young */) && |
duke@435 | 810 | gc_cause() == GCCause::_gc_locker) { |
duke@435 | 811 | local_max_level = 0; |
duke@435 | 812 | } else { |
duke@435 | 813 | local_max_level = max_level; |
duke@435 | 814 | } |
duke@435 | 815 | |
duke@435 | 816 | do_collection(true /* full */, |
duke@435 | 817 | clear_all_soft_refs /* clear_all_soft_refs */, |
duke@435 | 818 | 0 /* size */, |
duke@435 | 819 | false /* is_tlab */, |
duke@435 | 820 | local_max_level /* max_level */); |
duke@435 | 821 | // Hack XXX FIX ME !!! |
duke@435 | 822 | // A scavenge may not have been attempted, or may have |
duke@435 | 823 | // been attempted and failed, because the old gen was too full |
duke@435 | 824 | if (local_max_level == 0 && gc_cause() == GCCause::_gc_locker && |
ysr@2336 | 825 | incremental_collection_will_fail(false /* don't consult_young */)) { |
duke@435 | 826 | if (PrintGCDetails) { |
duke@435 | 827 | gclog_or_tty->print_cr("GC locker: Trying a full collection " |
duke@435 | 828 | "because scavenge failed"); |
duke@435 | 829 | } |
duke@435 | 830 | // This time allow the old gen to be collected as well |
duke@435 | 831 | do_collection(true /* full */, |
duke@435 | 832 | clear_all_soft_refs /* clear_all_soft_refs */, |
duke@435 | 833 | 0 /* size */, |
duke@435 | 834 | false /* is_tlab */, |
duke@435 | 835 | n_gens() - 1 /* max_level */); |
duke@435 | 836 | } |
duke@435 | 837 | } |
duke@435 | 838 | |
jmasa@2909 | 839 | bool GenCollectedHeap::is_in_young(oop p) { |
jmasa@2909 | 840 | bool result = ((HeapWord*)p) < _gens[_n_gens - 1]->reserved().start(); |
jmasa@2909 | 841 | assert(result == _gens[0]->is_in_reserved(p), |
drchase@6680 | 842 | err_msg("incorrect test - result=%d, p=" PTR_FORMAT, result, p2i((void*)p))); |
jmasa@2909 | 843 | return result; |
jmasa@2909 | 844 | } |
jmasa@2909 | 845 | |
stefank@3335 | 846 | // Returns "TRUE" iff "p" points into the committed areas of the heap. |
duke@435 | 847 | bool GenCollectedHeap::is_in(const void* p) const { |
duke@435 | 848 | #ifndef ASSERT |
johnc@4899 | 849 | guarantee(VerifyBeforeGC || |
johnc@4899 | 850 | VerifyDuringGC || |
johnc@4899 | 851 | VerifyBeforeExit || |
johnc@4899 | 852 | VerifyDuringStartup || |
johnc@4899 | 853 | PrintAssembly || |
johnc@4899 | 854 | tty->count() != 0 || // already printing |
johnc@4899 | 855 | VerifyAfterGC || |
bobv@2036 | 856 | VMError::fatal_error_in_progress(), "too expensive"); |
bobv@2036 | 857 | |
duke@435 | 858 | #endif |
duke@435 | 859 | // This might be sped up with a cache of the last generation that |
duke@435 | 860 | // answered yes. |
duke@435 | 861 | for (int i = 0; i < _n_gens; i++) { |
duke@435 | 862 | if (_gens[i]->is_in(p)) return true; |
duke@435 | 863 | } |
duke@435 | 864 | // Otherwise... |
duke@435 | 865 | return false; |
duke@435 | 866 | } |
duke@435 | 867 | |
jmasa@2909 | 868 | #ifdef ASSERT |
jmasa@2909 | 869 | // Don't implement this by using is_in_young(). This method is used |
jmasa@2909 | 870 | // in some cases to check that is_in_young() is correct. |
jmasa@2909 | 871 | bool GenCollectedHeap::is_in_partial_collection(const void* p) { |
jmasa@2909 | 872 | assert(is_in_reserved(p) || p == NULL, |
jmasa@2909 | 873 | "Does not work if address is non-null and outside of the heap"); |
jmasa@2909 | 874 | return p < _gens[_n_gens - 2]->reserved().end() && p != NULL; |
duke@435 | 875 | } |
jmasa@2909 | 876 | #endif |
duke@435 | 877 | |
coleenp@4037 | 878 | void GenCollectedHeap::oop_iterate(ExtendedOopClosure* cl) { |
duke@435 | 879 | for (int i = 0; i < _n_gens; i++) { |
duke@435 | 880 | _gens[i]->oop_iterate(cl); |
duke@435 | 881 | } |
duke@435 | 882 | } |
duke@435 | 883 | |
duke@435 | 884 | void GenCollectedHeap::object_iterate(ObjectClosure* cl) { |
duke@435 | 885 | for (int i = 0; i < _n_gens; i++) { |
duke@435 | 886 | _gens[i]->object_iterate(cl); |
duke@435 | 887 | } |
duke@435 | 888 | } |
duke@435 | 889 | |
jmasa@952 | 890 | void GenCollectedHeap::safe_object_iterate(ObjectClosure* cl) { |
jmasa@952 | 891 | for (int i = 0; i < _n_gens; i++) { |
jmasa@952 | 892 | _gens[i]->safe_object_iterate(cl); |
jmasa@952 | 893 | } |
jmasa@952 | 894 | } |
jmasa@952 | 895 | |
duke@435 | 896 | Space* GenCollectedHeap::space_containing(const void* addr) const { |
duke@435 | 897 | for (int i = 0; i < _n_gens; i++) { |
duke@435 | 898 | Space* res = _gens[i]->space_containing(addr); |
duke@435 | 899 | if (res != NULL) return res; |
duke@435 | 900 | } |
duke@435 | 901 | // Otherwise... |
duke@435 | 902 | assert(false, "Could not find containing space"); |
duke@435 | 903 | return NULL; |
duke@435 | 904 | } |
duke@435 | 905 | |
duke@435 | 906 | |
duke@435 | 907 | HeapWord* GenCollectedHeap::block_start(const void* addr) const { |
duke@435 | 908 | assert(is_in_reserved(addr), "block_start of address outside of heap"); |
duke@435 | 909 | for (int i = 0; i < _n_gens; i++) { |
duke@435 | 910 | if (_gens[i]->is_in_reserved(addr)) { |
duke@435 | 911 | assert(_gens[i]->is_in(addr), |
duke@435 | 912 | "addr should be in allocated part of generation"); |
duke@435 | 913 | return _gens[i]->block_start(addr); |
duke@435 | 914 | } |
duke@435 | 915 | } |
duke@435 | 916 | assert(false, "Some generation should contain the address"); |
duke@435 | 917 | return NULL; |
duke@435 | 918 | } |
duke@435 | 919 | |
duke@435 | 920 | size_t GenCollectedHeap::block_size(const HeapWord* addr) const { |
duke@435 | 921 | assert(is_in_reserved(addr), "block_size of address outside of heap"); |
duke@435 | 922 | for (int i = 0; i < _n_gens; i++) { |
duke@435 | 923 | if (_gens[i]->is_in_reserved(addr)) { |
duke@435 | 924 | assert(_gens[i]->is_in(addr), |
duke@435 | 925 | "addr should be in allocated part of generation"); |
duke@435 | 926 | return _gens[i]->block_size(addr); |
duke@435 | 927 | } |
duke@435 | 928 | } |
duke@435 | 929 | assert(false, "Some generation should contain the address"); |
duke@435 | 930 | return 0; |
duke@435 | 931 | } |
duke@435 | 932 | |
duke@435 | 933 | bool GenCollectedHeap::block_is_obj(const HeapWord* addr) const { |
duke@435 | 934 | assert(is_in_reserved(addr), "block_is_obj of address outside of heap"); |
duke@435 | 935 | assert(block_start(addr) == addr, "addr must be a block start"); |
duke@435 | 936 | for (int i = 0; i < _n_gens; i++) { |
duke@435 | 937 | if (_gens[i]->is_in_reserved(addr)) { |
duke@435 | 938 | return _gens[i]->block_is_obj(addr); |
duke@435 | 939 | } |
duke@435 | 940 | } |
duke@435 | 941 | assert(false, "Some generation should contain the address"); |
duke@435 | 942 | return false; |
duke@435 | 943 | } |
duke@435 | 944 | |
duke@435 | 945 | bool GenCollectedHeap::supports_tlab_allocation() const { |
duke@435 | 946 | for (int i = 0; i < _n_gens; i += 1) { |
duke@435 | 947 | if (_gens[i]->supports_tlab_allocation()) { |
duke@435 | 948 | return true; |
duke@435 | 949 | } |
duke@435 | 950 | } |
duke@435 | 951 | return false; |
duke@435 | 952 | } |
duke@435 | 953 | |
duke@435 | 954 | size_t GenCollectedHeap::tlab_capacity(Thread* thr) const { |
duke@435 | 955 | size_t result = 0; |
duke@435 | 956 | for (int i = 0; i < _n_gens; i += 1) { |
duke@435 | 957 | if (_gens[i]->supports_tlab_allocation()) { |
duke@435 | 958 | result += _gens[i]->tlab_capacity(); |
duke@435 | 959 | } |
duke@435 | 960 | } |
duke@435 | 961 | return result; |
duke@435 | 962 | } |
duke@435 | 963 | |
brutisso@6376 | 964 | size_t GenCollectedHeap::tlab_used(Thread* thr) const { |
brutisso@6376 | 965 | size_t result = 0; |
brutisso@6376 | 966 | for (int i = 0; i < _n_gens; i += 1) { |
brutisso@6376 | 967 | if (_gens[i]->supports_tlab_allocation()) { |
brutisso@6376 | 968 | result += _gens[i]->tlab_used(); |
brutisso@6376 | 969 | } |
brutisso@6376 | 970 | } |
brutisso@6376 | 971 | return result; |
brutisso@6376 | 972 | } |
brutisso@6376 | 973 | |
duke@435 | 974 | size_t GenCollectedHeap::unsafe_max_tlab_alloc(Thread* thr) const { |
duke@435 | 975 | size_t result = 0; |
duke@435 | 976 | for (int i = 0; i < _n_gens; i += 1) { |
duke@435 | 977 | if (_gens[i]->supports_tlab_allocation()) { |
duke@435 | 978 | result += _gens[i]->unsafe_max_tlab_alloc(); |
duke@435 | 979 | } |
duke@435 | 980 | } |
duke@435 | 981 | return result; |
duke@435 | 982 | } |
duke@435 | 983 | |
duke@435 | 984 | HeapWord* GenCollectedHeap::allocate_new_tlab(size_t size) { |
duke@435 | 985 | bool gc_overhead_limit_was_exceeded; |
tonyp@2971 | 986 | return collector_policy()->mem_allocate_work(size /* size */, |
tonyp@2971 | 987 | true /* is_tlab */, |
tonyp@2971 | 988 | &gc_overhead_limit_was_exceeded); |
duke@435 | 989 | } |
duke@435 | 990 | |
duke@435 | 991 | // Requires "*prev_ptr" to be non-NULL. Deletes and a block of minimal size |
duke@435 | 992 | // from the list headed by "*prev_ptr". |
duke@435 | 993 | static ScratchBlock *removeSmallestScratch(ScratchBlock **prev_ptr) { |
duke@435 | 994 | bool first = true; |
duke@435 | 995 | size_t min_size = 0; // "first" makes this conceptually infinite. |
duke@435 | 996 | ScratchBlock **smallest_ptr, *smallest; |
duke@435 | 997 | ScratchBlock *cur = *prev_ptr; |
duke@435 | 998 | while (cur) { |
duke@435 | 999 | assert(*prev_ptr == cur, "just checking"); |
duke@435 | 1000 | if (first || cur->num_words < min_size) { |
duke@435 | 1001 | smallest_ptr = prev_ptr; |
duke@435 | 1002 | smallest = cur; |
duke@435 | 1003 | min_size = smallest->num_words; |
duke@435 | 1004 | first = false; |
duke@435 | 1005 | } |
duke@435 | 1006 | prev_ptr = &cur->next; |
duke@435 | 1007 | cur = cur->next; |
duke@435 | 1008 | } |
duke@435 | 1009 | smallest = *smallest_ptr; |
duke@435 | 1010 | *smallest_ptr = smallest->next; |
duke@435 | 1011 | return smallest; |
duke@435 | 1012 | } |
duke@435 | 1013 | |
duke@435 | 1014 | // Sort the scratch block list headed by res into decreasing size order, |
duke@435 | 1015 | // and set "res" to the result. |
duke@435 | 1016 | static void sort_scratch_list(ScratchBlock*& list) { |
duke@435 | 1017 | ScratchBlock* sorted = NULL; |
duke@435 | 1018 | ScratchBlock* unsorted = list; |
duke@435 | 1019 | while (unsorted) { |
duke@435 | 1020 | ScratchBlock *smallest = removeSmallestScratch(&unsorted); |
duke@435 | 1021 | smallest->next = sorted; |
duke@435 | 1022 | sorted = smallest; |
duke@435 | 1023 | } |
duke@435 | 1024 | list = sorted; |
duke@435 | 1025 | } |
duke@435 | 1026 | |
duke@435 | 1027 | ScratchBlock* GenCollectedHeap::gather_scratch(Generation* requestor, |
duke@435 | 1028 | size_t max_alloc_words) { |
duke@435 | 1029 | ScratchBlock* res = NULL; |
duke@435 | 1030 | for (int i = 0; i < _n_gens; i++) { |
duke@435 | 1031 | _gens[i]->contribute_scratch(res, requestor, max_alloc_words); |
duke@435 | 1032 | } |
duke@435 | 1033 | sort_scratch_list(res); |
duke@435 | 1034 | return res; |
duke@435 | 1035 | } |
duke@435 | 1036 | |
jmasa@698 | 1037 | void GenCollectedHeap::release_scratch() { |
jmasa@698 | 1038 | for (int i = 0; i < _n_gens; i++) { |
jmasa@698 | 1039 | _gens[i]->reset_scratch(); |
jmasa@698 | 1040 | } |
jmasa@698 | 1041 | } |
jmasa@698 | 1042 | |
duke@435 | 1043 | class GenPrepareForVerifyClosure: public GenCollectedHeap::GenClosure { |
duke@435 | 1044 | void do_generation(Generation* gen) { |
duke@435 | 1045 | gen->prepare_for_verify(); |
duke@435 | 1046 | } |
duke@435 | 1047 | }; |
duke@435 | 1048 | |
duke@435 | 1049 | void GenCollectedHeap::prepare_for_verify() { |
duke@435 | 1050 | ensure_parsability(false); // no need to retire TLABs |
duke@435 | 1051 | GenPrepareForVerifyClosure blk; |
duke@435 | 1052 | generation_iterate(&blk, false); |
duke@435 | 1053 | } |
duke@435 | 1054 | |
duke@435 | 1055 | |
duke@435 | 1056 | void GenCollectedHeap::generation_iterate(GenClosure* cl, |
duke@435 | 1057 | bool old_to_young) { |
duke@435 | 1058 | if (old_to_young) { |
duke@435 | 1059 | for (int i = _n_gens-1; i >= 0; i--) { |
duke@435 | 1060 | cl->do_generation(_gens[i]); |
duke@435 | 1061 | } |
duke@435 | 1062 | } else { |
duke@435 | 1063 | for (int i = 0; i < _n_gens; i++) { |
duke@435 | 1064 | cl->do_generation(_gens[i]); |
duke@435 | 1065 | } |
duke@435 | 1066 | } |
duke@435 | 1067 | } |
duke@435 | 1068 | |
duke@435 | 1069 | void GenCollectedHeap::space_iterate(SpaceClosure* cl) { |
duke@435 | 1070 | for (int i = 0; i < _n_gens; i++) { |
duke@435 | 1071 | _gens[i]->space_iterate(cl, true); |
duke@435 | 1072 | } |
duke@435 | 1073 | } |
duke@435 | 1074 | |
duke@435 | 1075 | bool GenCollectedHeap::is_maximal_no_gc() const { |
coleenp@4037 | 1076 | for (int i = 0; i < _n_gens; i++) { |
duke@435 | 1077 | if (!_gens[i]->is_maximal_no_gc()) { |
duke@435 | 1078 | return false; |
duke@435 | 1079 | } |
duke@435 | 1080 | } |
duke@435 | 1081 | return true; |
duke@435 | 1082 | } |
duke@435 | 1083 | |
duke@435 | 1084 | void GenCollectedHeap::save_marks() { |
duke@435 | 1085 | for (int i = 0; i < _n_gens; i++) { |
duke@435 | 1086 | _gens[i]->save_marks(); |
duke@435 | 1087 | } |
duke@435 | 1088 | } |
duke@435 | 1089 | |
duke@435 | 1090 | GenCollectedHeap* GenCollectedHeap::heap() { |
duke@435 | 1091 | assert(_gch != NULL, "Uninitialized access to GenCollectedHeap::heap()"); |
duke@435 | 1092 | assert(_gch->kind() == CollectedHeap::GenCollectedHeap, "not a generational heap"); |
duke@435 | 1093 | return _gch; |
duke@435 | 1094 | } |
duke@435 | 1095 | |
duke@435 | 1096 | |
duke@435 | 1097 | void GenCollectedHeap::prepare_for_compaction() { |
brutisso@5516 | 1098 | guarantee(_n_gens = 2, "Wrong number of generations"); |
brutisso@5516 | 1099 | Generation* old_gen = _gens[1]; |
duke@435 | 1100 | // Start by compacting into same gen. |
tschatzl@7009 | 1101 | CompactPoint cp(old_gen); |
brutisso@5516 | 1102 | old_gen->prepare_for_compaction(&cp); |
brutisso@5516 | 1103 | Generation* young_gen = _gens[0]; |
brutisso@5516 | 1104 | young_gen->prepare_for_compaction(&cp); |
duke@435 | 1105 | } |
duke@435 | 1106 | |
duke@435 | 1107 | GCStats* GenCollectedHeap::gc_stats(int level) const { |
duke@435 | 1108 | return _gens[level]->gc_stats(); |
duke@435 | 1109 | } |
duke@435 | 1110 | |
brutisso@3711 | 1111 | void GenCollectedHeap::verify(bool silent, VerifyOption option /* ignored */) { |
duke@435 | 1112 | for (int i = _n_gens-1; i >= 0; i--) { |
duke@435 | 1113 | Generation* g = _gens[i]; |
duke@435 | 1114 | if (!silent) { |
drchase@6680 | 1115 | gclog_or_tty->print("%s", g->name()); |
duke@435 | 1116 | gclog_or_tty->print(" "); |
duke@435 | 1117 | } |
brutisso@3711 | 1118 | g->verify(); |
duke@435 | 1119 | } |
duke@435 | 1120 | if (!silent) { |
duke@435 | 1121 | gclog_or_tty->print("remset "); |
duke@435 | 1122 | } |
duke@435 | 1123 | rem_set()->verify(); |
duke@435 | 1124 | } |
duke@435 | 1125 | |
duke@435 | 1126 | void GenCollectedHeap::print_on(outputStream* st) const { |
duke@435 | 1127 | for (int i = 0; i < _n_gens; i++) { |
duke@435 | 1128 | _gens[i]->print_on(st); |
duke@435 | 1129 | } |
coleenp@4037 | 1130 | MetaspaceAux::print_on(st); |
duke@435 | 1131 | } |
duke@435 | 1132 | |
duke@435 | 1133 | void GenCollectedHeap::gc_threads_do(ThreadClosure* tc) const { |
duke@435 | 1134 | if (workers() != NULL) { |
duke@435 | 1135 | workers()->threads_do(tc); |
duke@435 | 1136 | } |
jprovino@4542 | 1137 | #if INCLUDE_ALL_GCS |
duke@435 | 1138 | if (UseConcMarkSweepGC) { |
duke@435 | 1139 | ConcurrentMarkSweepThread::threads_do(tc); |
duke@435 | 1140 | } |
jprovino@4542 | 1141 | #endif // INCLUDE_ALL_GCS |
duke@435 | 1142 | } |
duke@435 | 1143 | |
duke@435 | 1144 | void GenCollectedHeap::print_gc_threads_on(outputStream* st) const { |
jprovino@4542 | 1145 | #if INCLUDE_ALL_GCS |
duke@435 | 1146 | if (UseParNewGC) { |
duke@435 | 1147 | workers()->print_worker_threads_on(st); |
duke@435 | 1148 | } |
duke@435 | 1149 | if (UseConcMarkSweepGC) { |
duke@435 | 1150 | ConcurrentMarkSweepThread::print_all_on(st); |
duke@435 | 1151 | } |
jprovino@4542 | 1152 | #endif // INCLUDE_ALL_GCS |
duke@435 | 1153 | } |
duke@435 | 1154 | |
stefank@4904 | 1155 | void GenCollectedHeap::print_on_error(outputStream* st) const { |
stefank@4904 | 1156 | this->CollectedHeap::print_on_error(st); |
stefank@4904 | 1157 | |
stefank@4904 | 1158 | #if INCLUDE_ALL_GCS |
stefank@4904 | 1159 | if (UseConcMarkSweepGC) { |
stefank@4904 | 1160 | st->cr(); |
stefank@4904 | 1161 | CMSCollector::print_on_error(st); |
stefank@4904 | 1162 | } |
stefank@4904 | 1163 | #endif // INCLUDE_ALL_GCS |
stefank@4904 | 1164 | } |
stefank@4904 | 1165 | |
duke@435 | 1166 | void GenCollectedHeap::print_tracing_info() const { |
duke@435 | 1167 | if (TraceGen0Time) { |
duke@435 | 1168 | get_gen(0)->print_summary_info(); |
duke@435 | 1169 | } |
duke@435 | 1170 | if (TraceGen1Time) { |
duke@435 | 1171 | get_gen(1)->print_summary_info(); |
duke@435 | 1172 | } |
duke@435 | 1173 | } |
duke@435 | 1174 | |
duke@435 | 1175 | void GenCollectedHeap::print_heap_change(size_t prev_used) const { |
duke@435 | 1176 | if (PrintGCDetails && Verbose) { |
duke@435 | 1177 | gclog_or_tty->print(" " SIZE_FORMAT |
duke@435 | 1178 | "->" SIZE_FORMAT |
duke@435 | 1179 | "(" SIZE_FORMAT ")", |
duke@435 | 1180 | prev_used, used(), capacity()); |
duke@435 | 1181 | } else { |
duke@435 | 1182 | gclog_or_tty->print(" " SIZE_FORMAT "K" |
duke@435 | 1183 | "->" SIZE_FORMAT "K" |
duke@435 | 1184 | "(" SIZE_FORMAT "K)", |
duke@435 | 1185 | prev_used / K, used() / K, capacity() / K); |
duke@435 | 1186 | } |
duke@435 | 1187 | } |
duke@435 | 1188 | |
duke@435 | 1189 | class GenGCPrologueClosure: public GenCollectedHeap::GenClosure { |
duke@435 | 1190 | private: |
duke@435 | 1191 | bool _full; |
duke@435 | 1192 | public: |
duke@435 | 1193 | void do_generation(Generation* gen) { |
duke@435 | 1194 | gen->gc_prologue(_full); |
duke@435 | 1195 | } |
duke@435 | 1196 | GenGCPrologueClosure(bool full) : _full(full) {}; |
duke@435 | 1197 | }; |
duke@435 | 1198 | |
duke@435 | 1199 | void GenCollectedHeap::gc_prologue(bool full) { |
duke@435 | 1200 | assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer"); |
duke@435 | 1201 | |
duke@435 | 1202 | always_do_update_barrier = false; |
duke@435 | 1203 | // Fill TLAB's and such |
duke@435 | 1204 | CollectedHeap::accumulate_statistics_all_tlabs(); |
duke@435 | 1205 | ensure_parsability(true); // retire TLABs |
duke@435 | 1206 | |
duke@435 | 1207 | // Walk generations |
duke@435 | 1208 | GenGCPrologueClosure blk(full); |
duke@435 | 1209 | generation_iterate(&blk, false); // not old-to-young. |
duke@435 | 1210 | }; |
duke@435 | 1211 | |
duke@435 | 1212 | class GenGCEpilogueClosure: public GenCollectedHeap::GenClosure { |
duke@435 | 1213 | private: |
duke@435 | 1214 | bool _full; |
duke@435 | 1215 | public: |
duke@435 | 1216 | void do_generation(Generation* gen) { |
duke@435 | 1217 | gen->gc_epilogue(_full); |
duke@435 | 1218 | } |
duke@435 | 1219 | GenGCEpilogueClosure(bool full) : _full(full) {}; |
duke@435 | 1220 | }; |
duke@435 | 1221 | |
duke@435 | 1222 | void GenCollectedHeap::gc_epilogue(bool full) { |
duke@435 | 1223 | #ifdef COMPILER2 |
duke@435 | 1224 | assert(DerivedPointerTable::is_empty(), "derived pointer present"); |
duke@435 | 1225 | size_t actual_gap = pointer_delta((HeapWord*) (max_uintx-3), *(end_addr())); |
duke@435 | 1226 | guarantee(actual_gap > (size_t)FastAllocateSizeLimit, "inline allocation wraps"); |
duke@435 | 1227 | #endif /* COMPILER2 */ |
duke@435 | 1228 | |
duke@435 | 1229 | resize_all_tlabs(); |
duke@435 | 1230 | |
duke@435 | 1231 | GenGCEpilogueClosure blk(full); |
duke@435 | 1232 | generation_iterate(&blk, false); // not old-to-young. |
duke@435 | 1233 | |
jcoomes@2996 | 1234 | if (!CleanChunkPoolAsync) { |
jcoomes@2996 | 1235 | Chunk::clean_chunk_pool(); |
jcoomes@2996 | 1236 | } |
jcoomes@2996 | 1237 | |
coleenp@4037 | 1238 | MetaspaceCounters::update_performance_counters(); |
ehelin@5531 | 1239 | CompressedClassSpaceCounters::update_performance_counters(); |
coleenp@4037 | 1240 | |
duke@435 | 1241 | always_do_update_barrier = UseConcMarkSweepGC; |
duke@435 | 1242 | }; |
duke@435 | 1243 | |
jmasa@698 | 1244 | #ifndef PRODUCT |
jmasa@698 | 1245 | class GenGCSaveTopsBeforeGCClosure: public GenCollectedHeap::GenClosure { |
jmasa@698 | 1246 | private: |
jmasa@698 | 1247 | public: |
jmasa@698 | 1248 | void do_generation(Generation* gen) { |
jmasa@698 | 1249 | gen->record_spaces_top(); |
jmasa@698 | 1250 | } |
jmasa@698 | 1251 | }; |
jmasa@698 | 1252 | |
jmasa@698 | 1253 | void GenCollectedHeap::record_gen_tops_before_GC() { |
jmasa@698 | 1254 | if (ZapUnusedHeapArea) { |
jmasa@698 | 1255 | GenGCSaveTopsBeforeGCClosure blk; |
jmasa@698 | 1256 | generation_iterate(&blk, false); // not old-to-young. |
jmasa@698 | 1257 | } |
jmasa@698 | 1258 | } |
jmasa@698 | 1259 | #endif // not PRODUCT |
jmasa@698 | 1260 | |
duke@435 | 1261 | class GenEnsureParsabilityClosure: public GenCollectedHeap::GenClosure { |
duke@435 | 1262 | public: |
duke@435 | 1263 | void do_generation(Generation* gen) { |
duke@435 | 1264 | gen->ensure_parsability(); |
duke@435 | 1265 | } |
duke@435 | 1266 | }; |
duke@435 | 1267 | |
duke@435 | 1268 | void GenCollectedHeap::ensure_parsability(bool retire_tlabs) { |
duke@435 | 1269 | CollectedHeap::ensure_parsability(retire_tlabs); |
duke@435 | 1270 | GenEnsureParsabilityClosure ep_cl; |
duke@435 | 1271 | generation_iterate(&ep_cl, false); |
duke@435 | 1272 | } |
duke@435 | 1273 | |
brutisso@5516 | 1274 | oop GenCollectedHeap::handle_failed_promotion(Generation* old_gen, |
duke@435 | 1275 | oop obj, |
coleenp@548 | 1276 | size_t obj_size) { |
brutisso@5516 | 1277 | guarantee(old_gen->level() == 1, "We only get here with an old generation"); |
duke@435 | 1278 | assert(obj_size == (size_t)obj->size(), "bad obj_size passed in"); |
duke@435 | 1279 | HeapWord* result = NULL; |
duke@435 | 1280 | |
brutisso@5516 | 1281 | result = old_gen->expand_and_allocate(obj_size, false); |
duke@435 | 1282 | |
duke@435 | 1283 | if (result != NULL) { |
duke@435 | 1284 | Copy::aligned_disjoint_words((HeapWord*)obj, result, obj_size); |
duke@435 | 1285 | } |
duke@435 | 1286 | return oop(result); |
duke@435 | 1287 | } |
duke@435 | 1288 | |
duke@435 | 1289 | class GenTimeOfLastGCClosure: public GenCollectedHeap::GenClosure { |
duke@435 | 1290 | jlong _time; // in ms |
duke@435 | 1291 | jlong _now; // in ms |
duke@435 | 1292 | |
duke@435 | 1293 | public: |
duke@435 | 1294 | GenTimeOfLastGCClosure(jlong now) : _time(now), _now(now) { } |
duke@435 | 1295 | |
duke@435 | 1296 | jlong time() { return _time; } |
duke@435 | 1297 | |
duke@435 | 1298 | void do_generation(Generation* gen) { |
duke@435 | 1299 | _time = MIN2(_time, gen->time_of_last_gc(_now)); |
duke@435 | 1300 | } |
duke@435 | 1301 | }; |
duke@435 | 1302 | |
duke@435 | 1303 | jlong GenCollectedHeap::millis_since_last_gc() { |
johnc@3339 | 1304 | // We need a monotonically non-deccreasing time in ms but |
johnc@3339 | 1305 | // os::javaTimeMillis() does not guarantee monotonicity. |
johnc@3339 | 1306 | jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; |
duke@435 | 1307 | GenTimeOfLastGCClosure tolgc_cl(now); |
duke@435 | 1308 | // iterate over generations getting the oldest |
duke@435 | 1309 | // time that a generation was collected |
duke@435 | 1310 | generation_iterate(&tolgc_cl, false); |
johnc@3339 | 1311 | |
johnc@3339 | 1312 | // javaTimeNanos() is guaranteed to be monotonically non-decreasing |
johnc@3339 | 1313 | // provided the underlying platform provides such a time source |
johnc@3339 | 1314 | // (and it is bug free). So we still have to guard against getting |
johnc@3339 | 1315 | // back a time later than 'now'. |
duke@435 | 1316 | jlong retVal = now - tolgc_cl.time(); |
duke@435 | 1317 | if (retVal < 0) { |
drchase@6680 | 1318 | NOT_PRODUCT(warning("time warp: "INT64_FORMAT, (int64_t) retVal);) |
duke@435 | 1319 | return 0; |
duke@435 | 1320 | } |
duke@435 | 1321 | return retVal; |
duke@435 | 1322 | } |