Mon, 26 Jan 2009 12:47:21 -0800
6786503: Overflow list performance can be improved
Summary: Avoid overflow list walk in CMS & ParNew when it is unnecessary. Fix a couple of correctness issues, including a C-heap leak, in ParNew at the intersection of promotion failure, work queue overflow and object array chunking. Add stress testing option and related assertion checking.
Reviewed-by: jmasa
duke@435 | 1 | /* |
xdono@631 | 2 | * Copyright 2000-2008 Sun Microsystems, Inc. All Rights Reserved. |
duke@435 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
duke@435 | 4 | * |
duke@435 | 5 | * This code is free software; you can redistribute it and/or modify it |
duke@435 | 6 | * under the terms of the GNU General Public License version 2 only, as |
duke@435 | 7 | * published by the Free Software Foundation. |
duke@435 | 8 | * |
duke@435 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
duke@435 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
duke@435 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
duke@435 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
duke@435 | 13 | * accompanied this code). |
duke@435 | 14 | * |
duke@435 | 15 | * You should have received a copy of the GNU General Public License version |
duke@435 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
duke@435 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
duke@435 | 18 | * |
duke@435 | 19 | * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, |
duke@435 | 20 | * CA 95054 USA or visit www.sun.com if you need additional information or |
duke@435 | 21 | * have any questions. |
duke@435 | 22 | * |
duke@435 | 23 | */ |
duke@435 | 24 | |
duke@435 | 25 | # include "incls/_precompiled.incl" |
duke@435 | 26 | # include "incls/_genCollectedHeap.cpp.incl" |
duke@435 | 27 | |
duke@435 | 28 | GenCollectedHeap* GenCollectedHeap::_gch; |
duke@435 | 29 | NOT_PRODUCT(size_t GenCollectedHeap::_skip_header_HeapWords = 0;) |
duke@435 | 30 | |
duke@435 | 31 | // The set of potentially parallel tasks in strong root scanning. |
duke@435 | 32 | enum GCH_process_strong_roots_tasks { |
duke@435 | 33 | // We probably want to parallelize both of these internally, but for now... |
duke@435 | 34 | GCH_PS_younger_gens, |
duke@435 | 35 | // Leave this one last. |
duke@435 | 36 | GCH_PS_NumElements |
duke@435 | 37 | }; |
duke@435 | 38 | |
duke@435 | 39 | GenCollectedHeap::GenCollectedHeap(GenCollectorPolicy *policy) : |
duke@435 | 40 | SharedHeap(policy), |
duke@435 | 41 | _gen_policy(policy), |
duke@435 | 42 | _gen_process_strong_tasks(new SubTasksDone(GCH_PS_NumElements)), |
duke@435 | 43 | _full_collections_completed(0) |
duke@435 | 44 | { |
duke@435 | 45 | if (_gen_process_strong_tasks == NULL || |
duke@435 | 46 | !_gen_process_strong_tasks->valid()) { |
duke@435 | 47 | vm_exit_during_initialization("Failed necessary allocation."); |
duke@435 | 48 | } |
duke@435 | 49 | assert(policy != NULL, "Sanity check"); |
duke@435 | 50 | _preloading_shared_classes = false; |
duke@435 | 51 | } |
duke@435 | 52 | |
duke@435 | 53 | jint GenCollectedHeap::initialize() { |
duke@435 | 54 | int i; |
duke@435 | 55 | _n_gens = gen_policy()->number_of_generations(); |
duke@435 | 56 | |
duke@435 | 57 | // While there are no constraints in the GC code that HeapWordSize |
duke@435 | 58 | // be any particular value, there are multiple other areas in the |
duke@435 | 59 | // system which believe this to be true (e.g. oop->object_size in some |
duke@435 | 60 | // cases incorrectly returns the size in wordSize units rather than |
duke@435 | 61 | // HeapWordSize). |
duke@435 | 62 | guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize"); |
duke@435 | 63 | |
duke@435 | 64 | // The heap must be at least as aligned as generations. |
duke@435 | 65 | size_t alignment = Generation::GenGrain; |
duke@435 | 66 | |
duke@435 | 67 | _gen_specs = gen_policy()->generations(); |
duke@435 | 68 | PermanentGenerationSpec *perm_gen_spec = |
duke@435 | 69 | collector_policy()->permanent_generation(); |
duke@435 | 70 | |
duke@435 | 71 | // Make sure the sizes are all aligned. |
duke@435 | 72 | for (i = 0; i < _n_gens; i++) { |
duke@435 | 73 | _gen_specs[i]->align(alignment); |
duke@435 | 74 | } |
duke@435 | 75 | perm_gen_spec->align(alignment); |
duke@435 | 76 | |
duke@435 | 77 | // If we are dumping the heap, then allocate a wasted block of address |
duke@435 | 78 | // space in order to push the heap to a lower address. This extra |
duke@435 | 79 | // address range allows for other (or larger) libraries to be loaded |
duke@435 | 80 | // without them occupying the space required for the shared spaces. |
duke@435 | 81 | |
duke@435 | 82 | if (DumpSharedSpaces) { |
duke@435 | 83 | uintx reserved = 0; |
duke@435 | 84 | uintx block_size = 64*1024*1024; |
duke@435 | 85 | while (reserved < SharedDummyBlockSize) { |
duke@435 | 86 | char* dummy = os::reserve_memory(block_size); |
duke@435 | 87 | reserved += block_size; |
duke@435 | 88 | } |
duke@435 | 89 | } |
duke@435 | 90 | |
duke@435 | 91 | // Allocate space for the heap. |
duke@435 | 92 | |
duke@435 | 93 | char* heap_address; |
duke@435 | 94 | size_t total_reserved = 0; |
duke@435 | 95 | int n_covered_regions = 0; |
duke@435 | 96 | ReservedSpace heap_rs(0); |
duke@435 | 97 | |
duke@435 | 98 | heap_address = allocate(alignment, perm_gen_spec, &total_reserved, |
duke@435 | 99 | &n_covered_regions, &heap_rs); |
duke@435 | 100 | |
duke@435 | 101 | if (UseSharedSpaces) { |
duke@435 | 102 | if (!heap_rs.is_reserved() || heap_address != heap_rs.base()) { |
duke@435 | 103 | if (heap_rs.is_reserved()) { |
duke@435 | 104 | heap_rs.release(); |
duke@435 | 105 | } |
duke@435 | 106 | FileMapInfo* mapinfo = FileMapInfo::current_info(); |
duke@435 | 107 | mapinfo->fail_continue("Unable to reserve shared region."); |
duke@435 | 108 | allocate(alignment, perm_gen_spec, &total_reserved, &n_covered_regions, |
duke@435 | 109 | &heap_rs); |
duke@435 | 110 | } |
duke@435 | 111 | } |
duke@435 | 112 | |
duke@435 | 113 | if (!heap_rs.is_reserved()) { |
duke@435 | 114 | vm_shutdown_during_initialization( |
duke@435 | 115 | "Could not reserve enough space for object heap"); |
duke@435 | 116 | return JNI_ENOMEM; |
duke@435 | 117 | } |
duke@435 | 118 | |
duke@435 | 119 | _reserved = MemRegion((HeapWord*)heap_rs.base(), |
duke@435 | 120 | (HeapWord*)(heap_rs.base() + heap_rs.size())); |
duke@435 | 121 | |
duke@435 | 122 | // It is important to do this in a way such that concurrent readers can't |
duke@435 | 123 | // temporarily think somethings in the heap. (Seen this happen in asserts.) |
duke@435 | 124 | _reserved.set_word_size(0); |
duke@435 | 125 | _reserved.set_start((HeapWord*)heap_rs.base()); |
duke@435 | 126 | size_t actual_heap_size = heap_rs.size() - perm_gen_spec->misc_data_size() |
duke@435 | 127 | - perm_gen_spec->misc_code_size(); |
duke@435 | 128 | _reserved.set_end((HeapWord*)(heap_rs.base() + actual_heap_size)); |
duke@435 | 129 | |
duke@435 | 130 | _rem_set = collector_policy()->create_rem_set(_reserved, n_covered_regions); |
duke@435 | 131 | set_barrier_set(rem_set()->bs()); |
duke@435 | 132 | _gch = this; |
duke@435 | 133 | |
duke@435 | 134 | for (i = 0; i < _n_gens; i++) { |
duke@435 | 135 | ReservedSpace this_rs = heap_rs.first_part(_gen_specs[i]->max_size(), |
duke@435 | 136 | UseSharedSpaces, UseSharedSpaces); |
duke@435 | 137 | _gens[i] = _gen_specs[i]->init(this_rs, i, rem_set()); |
duke@435 | 138 | heap_rs = heap_rs.last_part(_gen_specs[i]->max_size()); |
duke@435 | 139 | } |
duke@435 | 140 | _perm_gen = perm_gen_spec->init(heap_rs, PermSize, rem_set()); |
duke@435 | 141 | |
duke@435 | 142 | clear_incremental_collection_will_fail(); |
duke@435 | 143 | clear_last_incremental_collection_failed(); |
duke@435 | 144 | |
duke@435 | 145 | #ifndef SERIALGC |
duke@435 | 146 | // If we are running CMS, create the collector responsible |
duke@435 | 147 | // for collecting the CMS generations. |
duke@435 | 148 | if (collector_policy()->is_concurrent_mark_sweep_policy()) { |
duke@435 | 149 | bool success = create_cms_collector(); |
duke@435 | 150 | if (!success) return JNI_ENOMEM; |
duke@435 | 151 | } |
duke@435 | 152 | #endif // SERIALGC |
duke@435 | 153 | |
duke@435 | 154 | return JNI_OK; |
duke@435 | 155 | } |
duke@435 | 156 | |
duke@435 | 157 | |
duke@435 | 158 | char* GenCollectedHeap::allocate(size_t alignment, |
duke@435 | 159 | PermanentGenerationSpec* perm_gen_spec, |
duke@435 | 160 | size_t* _total_reserved, |
duke@435 | 161 | int* _n_covered_regions, |
duke@435 | 162 | ReservedSpace* heap_rs){ |
duke@435 | 163 | const char overflow_msg[] = "The size of the object heap + VM data exceeds " |
duke@435 | 164 | "the maximum representable size"; |
duke@435 | 165 | |
duke@435 | 166 | // Now figure out the total size. |
duke@435 | 167 | size_t total_reserved = 0; |
duke@435 | 168 | int n_covered_regions = 0; |
duke@435 | 169 | const size_t pageSize = UseLargePages ? |
duke@435 | 170 | os::large_page_size() : os::vm_page_size(); |
duke@435 | 171 | |
duke@435 | 172 | for (int i = 0; i < _n_gens; i++) { |
duke@435 | 173 | total_reserved += _gen_specs[i]->max_size(); |
duke@435 | 174 | if (total_reserved < _gen_specs[i]->max_size()) { |
duke@435 | 175 | vm_exit_during_initialization(overflow_msg); |
duke@435 | 176 | } |
duke@435 | 177 | n_covered_regions += _gen_specs[i]->n_covered_regions(); |
duke@435 | 178 | } |
duke@435 | 179 | assert(total_reserved % pageSize == 0, "Gen size"); |
duke@435 | 180 | total_reserved += perm_gen_spec->max_size(); |
duke@435 | 181 | assert(total_reserved % pageSize == 0, "Perm Gen size"); |
duke@435 | 182 | |
duke@435 | 183 | if (total_reserved < perm_gen_spec->max_size()) { |
duke@435 | 184 | vm_exit_during_initialization(overflow_msg); |
duke@435 | 185 | } |
duke@435 | 186 | n_covered_regions += perm_gen_spec->n_covered_regions(); |
duke@435 | 187 | |
duke@435 | 188 | // Add the size of the data area which shares the same reserved area |
duke@435 | 189 | // as the heap, but which is not actually part of the heap. |
duke@435 | 190 | size_t s = perm_gen_spec->misc_data_size() + perm_gen_spec->misc_code_size(); |
duke@435 | 191 | |
duke@435 | 192 | total_reserved += s; |
duke@435 | 193 | if (total_reserved < s) { |
duke@435 | 194 | vm_exit_during_initialization(overflow_msg); |
duke@435 | 195 | } |
duke@435 | 196 | |
duke@435 | 197 | if (UseLargePages) { |
duke@435 | 198 | assert(total_reserved != 0, "total_reserved cannot be 0"); |
duke@435 | 199 | total_reserved = round_to(total_reserved, os::large_page_size()); |
duke@435 | 200 | if (total_reserved < os::large_page_size()) { |
duke@435 | 201 | vm_exit_during_initialization(overflow_msg); |
duke@435 | 202 | } |
duke@435 | 203 | } |
duke@435 | 204 | |
duke@435 | 205 | // Calculate the address at which the heap must reside in order for |
duke@435 | 206 | // the shared data to be at the required address. |
duke@435 | 207 | |
duke@435 | 208 | char* heap_address; |
duke@435 | 209 | if (UseSharedSpaces) { |
duke@435 | 210 | |
duke@435 | 211 | // Calculate the address of the first word beyond the heap. |
duke@435 | 212 | FileMapInfo* mapinfo = FileMapInfo::current_info(); |
duke@435 | 213 | int lr = CompactingPermGenGen::n_regions - 1; |
duke@435 | 214 | size_t capacity = align_size_up(mapinfo->space_capacity(lr), alignment); |
duke@435 | 215 | heap_address = mapinfo->region_base(lr) + capacity; |
duke@435 | 216 | |
duke@435 | 217 | // Calculate the address of the first word of the heap. |
duke@435 | 218 | heap_address -= total_reserved; |
duke@435 | 219 | } else { |
duke@435 | 220 | heap_address = NULL; // any address will do. |
duke@435 | 221 | } |
duke@435 | 222 | |
duke@435 | 223 | *_total_reserved = total_reserved; |
duke@435 | 224 | *_n_covered_regions = n_covered_regions; |
coleenp@672 | 225 | *heap_rs = ReservedHeapSpace(total_reserved, alignment, |
coleenp@672 | 226 | UseLargePages, heap_address); |
duke@435 | 227 | |
duke@435 | 228 | return heap_address; |
duke@435 | 229 | } |
duke@435 | 230 | |
duke@435 | 231 | |
duke@435 | 232 | void GenCollectedHeap::post_initialize() { |
duke@435 | 233 | SharedHeap::post_initialize(); |
duke@435 | 234 | TwoGenerationCollectorPolicy *policy = |
duke@435 | 235 | (TwoGenerationCollectorPolicy *)collector_policy(); |
duke@435 | 236 | guarantee(policy->is_two_generation_policy(), "Illegal policy type"); |
duke@435 | 237 | DefNewGeneration* def_new_gen = (DefNewGeneration*) get_gen(0); |
duke@435 | 238 | assert(def_new_gen->kind() == Generation::DefNew || |
duke@435 | 239 | def_new_gen->kind() == Generation::ParNew || |
duke@435 | 240 | def_new_gen->kind() == Generation::ASParNew, |
duke@435 | 241 | "Wrong generation kind"); |
duke@435 | 242 | |
duke@435 | 243 | Generation* old_gen = get_gen(1); |
duke@435 | 244 | assert(old_gen->kind() == Generation::ConcurrentMarkSweep || |
duke@435 | 245 | old_gen->kind() == Generation::ASConcurrentMarkSweep || |
duke@435 | 246 | old_gen->kind() == Generation::MarkSweepCompact, |
duke@435 | 247 | "Wrong generation kind"); |
duke@435 | 248 | |
duke@435 | 249 | policy->initialize_size_policy(def_new_gen->eden()->capacity(), |
duke@435 | 250 | old_gen->capacity(), |
duke@435 | 251 | def_new_gen->from()->capacity()); |
duke@435 | 252 | policy->initialize_gc_policy_counters(); |
duke@435 | 253 | } |
duke@435 | 254 | |
duke@435 | 255 | void GenCollectedHeap::ref_processing_init() { |
duke@435 | 256 | SharedHeap::ref_processing_init(); |
duke@435 | 257 | for (int i = 0; i < _n_gens; i++) { |
duke@435 | 258 | _gens[i]->ref_processor_init(); |
duke@435 | 259 | } |
duke@435 | 260 | } |
duke@435 | 261 | |
duke@435 | 262 | size_t GenCollectedHeap::capacity() const { |
duke@435 | 263 | size_t res = 0; |
duke@435 | 264 | for (int i = 0; i < _n_gens; i++) { |
duke@435 | 265 | res += _gens[i]->capacity(); |
duke@435 | 266 | } |
duke@435 | 267 | return res; |
duke@435 | 268 | } |
duke@435 | 269 | |
duke@435 | 270 | size_t GenCollectedHeap::used() const { |
duke@435 | 271 | size_t res = 0; |
duke@435 | 272 | for (int i = 0; i < _n_gens; i++) { |
duke@435 | 273 | res += _gens[i]->used(); |
duke@435 | 274 | } |
duke@435 | 275 | return res; |
duke@435 | 276 | } |
duke@435 | 277 | |
duke@435 | 278 | // Save the "used_region" for generations level and lower, |
duke@435 | 279 | // and, if perm is true, for perm gen. |
duke@435 | 280 | void GenCollectedHeap::save_used_regions(int level, bool perm) { |
duke@435 | 281 | assert(level < _n_gens, "Illegal level parameter"); |
duke@435 | 282 | for (int i = level; i >= 0; i--) { |
duke@435 | 283 | _gens[i]->save_used_region(); |
duke@435 | 284 | } |
duke@435 | 285 | if (perm) { |
duke@435 | 286 | perm_gen()->save_used_region(); |
duke@435 | 287 | } |
duke@435 | 288 | } |
duke@435 | 289 | |
duke@435 | 290 | size_t GenCollectedHeap::max_capacity() const { |
duke@435 | 291 | size_t res = 0; |
duke@435 | 292 | for (int i = 0; i < _n_gens; i++) { |
duke@435 | 293 | res += _gens[i]->max_capacity(); |
duke@435 | 294 | } |
duke@435 | 295 | return res; |
duke@435 | 296 | } |
duke@435 | 297 | |
duke@435 | 298 | // Update the _full_collections_completed counter |
duke@435 | 299 | // at the end of a stop-world full GC. |
duke@435 | 300 | unsigned int GenCollectedHeap::update_full_collections_completed() { |
duke@435 | 301 | MonitorLockerEx ml(FullGCCount_lock, Mutex::_no_safepoint_check_flag); |
duke@435 | 302 | assert(_full_collections_completed <= _total_full_collections, |
duke@435 | 303 | "Can't complete more collections than were started"); |
duke@435 | 304 | _full_collections_completed = _total_full_collections; |
duke@435 | 305 | ml.notify_all(); |
duke@435 | 306 | return _full_collections_completed; |
duke@435 | 307 | } |
duke@435 | 308 | |
duke@435 | 309 | // Update the _full_collections_completed counter, as appropriate, |
duke@435 | 310 | // at the end of a concurrent GC cycle. Note the conditional update |
duke@435 | 311 | // below to allow this method to be called by a concurrent collector |
duke@435 | 312 | // without synchronizing in any manner with the VM thread (which |
duke@435 | 313 | // may already have initiated a STW full collection "concurrently"). |
duke@435 | 314 | unsigned int GenCollectedHeap::update_full_collections_completed(unsigned int count) { |
duke@435 | 315 | MonitorLockerEx ml(FullGCCount_lock, Mutex::_no_safepoint_check_flag); |
duke@435 | 316 | assert((_full_collections_completed <= _total_full_collections) && |
duke@435 | 317 | (count <= _total_full_collections), |
duke@435 | 318 | "Can't complete more collections than were started"); |
duke@435 | 319 | if (count > _full_collections_completed) { |
duke@435 | 320 | _full_collections_completed = count; |
duke@435 | 321 | ml.notify_all(); |
duke@435 | 322 | } |
duke@435 | 323 | return _full_collections_completed; |
duke@435 | 324 | } |
duke@435 | 325 | |
duke@435 | 326 | |
duke@435 | 327 | #ifndef PRODUCT |
duke@435 | 328 | // Override of memory state checking method in CollectedHeap: |
duke@435 | 329 | // Some collectors (CMS for example) can't have badHeapWordVal written |
duke@435 | 330 | // in the first two words of an object. (For instance , in the case of |
duke@435 | 331 | // CMS these words hold state used to synchronize between certain |
duke@435 | 332 | // (concurrent) GC steps and direct allocating mutators.) |
duke@435 | 333 | // The skip_header_HeapWords() method below, allows us to skip |
duke@435 | 334 | // over the requisite number of HeapWord's. Note that (for |
duke@435 | 335 | // generational collectors) this means that those many words are |
duke@435 | 336 | // skipped in each object, irrespective of the generation in which |
duke@435 | 337 | // that object lives. The resultant loss of precision seems to be |
duke@435 | 338 | // harmless and the pain of avoiding that imprecision appears somewhat |
duke@435 | 339 | // higher than we are prepared to pay for such rudimentary debugging |
duke@435 | 340 | // support. |
duke@435 | 341 | void GenCollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr, |
duke@435 | 342 | size_t size) { |
duke@435 | 343 | if (CheckMemoryInitialization && ZapUnusedHeapArea) { |
duke@435 | 344 | // We are asked to check a size in HeapWords, |
duke@435 | 345 | // but the memory is mangled in juint words. |
duke@435 | 346 | juint* start = (juint*) (addr + skip_header_HeapWords()); |
duke@435 | 347 | juint* end = (juint*) (addr + size); |
duke@435 | 348 | for (juint* slot = start; slot < end; slot += 1) { |
duke@435 | 349 | assert(*slot == badHeapWordVal, |
duke@435 | 350 | "Found non badHeapWordValue in pre-allocation check"); |
duke@435 | 351 | } |
duke@435 | 352 | } |
duke@435 | 353 | } |
duke@435 | 354 | #endif |
duke@435 | 355 | |
duke@435 | 356 | HeapWord* GenCollectedHeap::attempt_allocation(size_t size, |
duke@435 | 357 | bool is_tlab, |
duke@435 | 358 | bool first_only) { |
duke@435 | 359 | HeapWord* res; |
duke@435 | 360 | for (int i = 0; i < _n_gens; i++) { |
duke@435 | 361 | if (_gens[i]->should_allocate(size, is_tlab)) { |
duke@435 | 362 | res = _gens[i]->allocate(size, is_tlab); |
duke@435 | 363 | if (res != NULL) return res; |
duke@435 | 364 | else if (first_only) break; |
duke@435 | 365 | } |
duke@435 | 366 | } |
duke@435 | 367 | // Otherwise... |
duke@435 | 368 | return NULL; |
duke@435 | 369 | } |
duke@435 | 370 | |
duke@435 | 371 | HeapWord* GenCollectedHeap::mem_allocate(size_t size, |
duke@435 | 372 | bool is_large_noref, |
duke@435 | 373 | bool is_tlab, |
duke@435 | 374 | bool* gc_overhead_limit_was_exceeded) { |
duke@435 | 375 | return collector_policy()->mem_allocate_work(size, |
duke@435 | 376 | is_tlab, |
duke@435 | 377 | gc_overhead_limit_was_exceeded); |
duke@435 | 378 | } |
duke@435 | 379 | |
duke@435 | 380 | bool GenCollectedHeap::must_clear_all_soft_refs() { |
duke@435 | 381 | return _gc_cause == GCCause::_last_ditch_collection; |
duke@435 | 382 | } |
duke@435 | 383 | |
duke@435 | 384 | bool GenCollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) { |
duke@435 | 385 | return (cause == GCCause::_java_lang_system_gc || |
duke@435 | 386 | cause == GCCause::_gc_locker) && |
duke@435 | 387 | UseConcMarkSweepGC && ExplicitGCInvokesConcurrent; |
duke@435 | 388 | } |
duke@435 | 389 | |
duke@435 | 390 | void GenCollectedHeap::do_collection(bool full, |
duke@435 | 391 | bool clear_all_soft_refs, |
duke@435 | 392 | size_t size, |
duke@435 | 393 | bool is_tlab, |
duke@435 | 394 | int max_level) { |
duke@435 | 395 | bool prepared_for_verification = false; |
duke@435 | 396 | ResourceMark rm; |
duke@435 | 397 | DEBUG_ONLY(Thread* my_thread = Thread::current();) |
duke@435 | 398 | |
duke@435 | 399 | assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); |
duke@435 | 400 | assert(my_thread->is_VM_thread() || |
duke@435 | 401 | my_thread->is_ConcurrentGC_thread(), |
duke@435 | 402 | "incorrect thread type capability"); |
duke@435 | 403 | assert(Heap_lock->is_locked(), "the requesting thread should have the Heap_lock"); |
duke@435 | 404 | guarantee(!is_gc_active(), "collection is not reentrant"); |
duke@435 | 405 | assert(max_level < n_gens(), "sanity check"); |
duke@435 | 406 | |
duke@435 | 407 | if (GC_locker::check_active_before_gc()) { |
duke@435 | 408 | return; // GC is disabled (e.g. JNI GetXXXCritical operation) |
duke@435 | 409 | } |
duke@435 | 410 | |
duke@435 | 411 | const size_t perm_prev_used = perm_gen()->used(); |
duke@435 | 412 | |
duke@435 | 413 | if (PrintHeapAtGC) { |
duke@435 | 414 | Universe::print_heap_before_gc(); |
duke@435 | 415 | if (Verbose) { |
duke@435 | 416 | gclog_or_tty->print_cr("GC Cause: %s", GCCause::to_string(gc_cause())); |
duke@435 | 417 | } |
duke@435 | 418 | } |
duke@435 | 419 | |
duke@435 | 420 | { |
duke@435 | 421 | FlagSetting fl(_is_gc_active, true); |
duke@435 | 422 | |
duke@435 | 423 | bool complete = full && (max_level == (n_gens()-1)); |
duke@435 | 424 | const char* gc_cause_str = "GC "; |
duke@435 | 425 | if (complete) { |
duke@435 | 426 | GCCause::Cause cause = gc_cause(); |
duke@435 | 427 | if (cause == GCCause::_java_lang_system_gc) { |
duke@435 | 428 | gc_cause_str = "Full GC (System) "; |
duke@435 | 429 | } else { |
duke@435 | 430 | gc_cause_str = "Full GC "; |
duke@435 | 431 | } |
duke@435 | 432 | } |
duke@435 | 433 | gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); |
duke@435 | 434 | TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); |
duke@435 | 435 | TraceTime t(gc_cause_str, PrintGCDetails, false, gclog_or_tty); |
duke@435 | 436 | |
duke@435 | 437 | gc_prologue(complete); |
duke@435 | 438 | increment_total_collections(complete); |
duke@435 | 439 | |
duke@435 | 440 | size_t gch_prev_used = used(); |
duke@435 | 441 | |
duke@435 | 442 | int starting_level = 0; |
duke@435 | 443 | if (full) { |
duke@435 | 444 | // Search for the oldest generation which will collect all younger |
duke@435 | 445 | // generations, and start collection loop there. |
duke@435 | 446 | for (int i = max_level; i >= 0; i--) { |
duke@435 | 447 | if (_gens[i]->full_collects_younger_generations()) { |
duke@435 | 448 | starting_level = i; |
duke@435 | 449 | break; |
duke@435 | 450 | } |
duke@435 | 451 | } |
duke@435 | 452 | } |
duke@435 | 453 | |
duke@435 | 454 | bool must_restore_marks_for_biased_locking = false; |
duke@435 | 455 | |
duke@435 | 456 | int max_level_collected = starting_level; |
duke@435 | 457 | for (int i = starting_level; i <= max_level; i++) { |
duke@435 | 458 | if (_gens[i]->should_collect(full, size, is_tlab)) { |
duke@435 | 459 | // Timer for individual generations. Last argument is false: no CR |
duke@435 | 460 | TraceTime t1(_gens[i]->short_name(), PrintGCDetails, false, gclog_or_tty); |
duke@435 | 461 | TraceCollectorStats tcs(_gens[i]->counters()); |
duke@435 | 462 | TraceMemoryManagerStats tmms(_gens[i]->kind()); |
duke@435 | 463 | |
duke@435 | 464 | size_t prev_used = _gens[i]->used(); |
duke@435 | 465 | _gens[i]->stat_record()->invocations++; |
duke@435 | 466 | _gens[i]->stat_record()->accumulated_time.start(); |
duke@435 | 467 | |
jmasa@698 | 468 | // Must be done anew before each collection because |
jmasa@698 | 469 | // a previous collection will do mangling and will |
jmasa@698 | 470 | // change top of some spaces. |
jmasa@698 | 471 | record_gen_tops_before_GC(); |
jmasa@698 | 472 | |
duke@435 | 473 | if (PrintGC && Verbose) { |
duke@435 | 474 | gclog_or_tty->print("level=%d invoke=%d size=" SIZE_FORMAT, |
duke@435 | 475 | i, |
duke@435 | 476 | _gens[i]->stat_record()->invocations, |
duke@435 | 477 | size*HeapWordSize); |
duke@435 | 478 | } |
duke@435 | 479 | |
duke@435 | 480 | if (VerifyBeforeGC && i >= VerifyGCLevel && |
duke@435 | 481 | total_collections() >= VerifyGCStartAt) { |
duke@435 | 482 | HandleMark hm; // Discard invalid handles created during verification |
duke@435 | 483 | if (!prepared_for_verification) { |
duke@435 | 484 | prepare_for_verify(); |
duke@435 | 485 | prepared_for_verification = true; |
duke@435 | 486 | } |
duke@435 | 487 | gclog_or_tty->print(" VerifyBeforeGC:"); |
duke@435 | 488 | Universe::verify(true); |
duke@435 | 489 | } |
duke@435 | 490 | COMPILER2_PRESENT(DerivedPointerTable::clear()); |
duke@435 | 491 | |
duke@435 | 492 | if (!must_restore_marks_for_biased_locking && |
duke@435 | 493 | _gens[i]->performs_in_place_marking()) { |
duke@435 | 494 | // We perform this mark word preservation work lazily |
duke@435 | 495 | // because it's only at this point that we know whether we |
duke@435 | 496 | // absolutely have to do it; we want to avoid doing it for |
duke@435 | 497 | // scavenge-only collections where it's unnecessary |
duke@435 | 498 | must_restore_marks_for_biased_locking = true; |
duke@435 | 499 | BiasedLocking::preserve_marks(); |
duke@435 | 500 | } |
duke@435 | 501 | |
duke@435 | 502 | // Do collection work |
duke@435 | 503 | { |
duke@435 | 504 | // Note on ref discovery: For what appear to be historical reasons, |
duke@435 | 505 | // GCH enables and disabled (by enqueing) refs discovery. |
duke@435 | 506 | // In the future this should be moved into the generation's |
duke@435 | 507 | // collect method so that ref discovery and enqueueing concerns |
duke@435 | 508 | // are local to a generation. The collect method could return |
duke@435 | 509 | // an appropriate indication in the case that notification on |
duke@435 | 510 | // the ref lock was needed. This will make the treatment of |
duke@435 | 511 | // weak refs more uniform (and indeed remove such concerns |
duke@435 | 512 | // from GCH). XXX |
duke@435 | 513 | |
duke@435 | 514 | HandleMark hm; // Discard invalid handles created during gc |
duke@435 | 515 | save_marks(); // save marks for all gens |
duke@435 | 516 | // We want to discover references, but not process them yet. |
duke@435 | 517 | // This mode is disabled in process_discovered_references if the |
duke@435 | 518 | // generation does some collection work, or in |
duke@435 | 519 | // enqueue_discovered_references if the generation returns |
duke@435 | 520 | // without doing any work. |
duke@435 | 521 | ReferenceProcessor* rp = _gens[i]->ref_processor(); |
duke@435 | 522 | // If the discovery of ("weak") refs in this generation is |
duke@435 | 523 | // atomic wrt other collectors in this configuration, we |
duke@435 | 524 | // are guaranteed to have empty discovered ref lists. |
duke@435 | 525 | if (rp->discovery_is_atomic()) { |
duke@435 | 526 | rp->verify_no_references_recorded(); |
duke@435 | 527 | rp->enable_discovery(); |
ysr@892 | 528 | rp->setup_policy(clear_all_soft_refs); |
duke@435 | 529 | } else { |
ysr@888 | 530 | // collect() below will enable discovery as appropriate |
duke@435 | 531 | } |
duke@435 | 532 | _gens[i]->collect(full, clear_all_soft_refs, size, is_tlab); |
duke@435 | 533 | if (!rp->enqueuing_is_done()) { |
duke@435 | 534 | rp->enqueue_discovered_references(); |
duke@435 | 535 | } else { |
duke@435 | 536 | rp->set_enqueuing_is_done(false); |
duke@435 | 537 | } |
duke@435 | 538 | rp->verify_no_references_recorded(); |
duke@435 | 539 | } |
duke@435 | 540 | max_level_collected = i; |
duke@435 | 541 | |
duke@435 | 542 | // Determine if allocation request was met. |
duke@435 | 543 | if (size > 0) { |
duke@435 | 544 | if (!is_tlab || _gens[i]->supports_tlab_allocation()) { |
duke@435 | 545 | if (size*HeapWordSize <= _gens[i]->unsafe_max_alloc_nogc()) { |
duke@435 | 546 | size = 0; |
duke@435 | 547 | } |
duke@435 | 548 | } |
duke@435 | 549 | } |
duke@435 | 550 | |
duke@435 | 551 | COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); |
duke@435 | 552 | |
duke@435 | 553 | _gens[i]->stat_record()->accumulated_time.stop(); |
duke@435 | 554 | |
duke@435 | 555 | update_gc_stats(i, full); |
duke@435 | 556 | |
duke@435 | 557 | if (VerifyAfterGC && i >= VerifyGCLevel && |
duke@435 | 558 | total_collections() >= VerifyGCStartAt) { |
duke@435 | 559 | HandleMark hm; // Discard invalid handles created during verification |
duke@435 | 560 | gclog_or_tty->print(" VerifyAfterGC:"); |
duke@435 | 561 | Universe::verify(false); |
duke@435 | 562 | } |
duke@435 | 563 | |
duke@435 | 564 | if (PrintGCDetails) { |
duke@435 | 565 | gclog_or_tty->print(":"); |
duke@435 | 566 | _gens[i]->print_heap_change(prev_used); |
duke@435 | 567 | } |
duke@435 | 568 | } |
duke@435 | 569 | } |
duke@435 | 570 | |
duke@435 | 571 | // Update "complete" boolean wrt what actually transpired -- |
duke@435 | 572 | // for instance, a promotion failure could have led to |
duke@435 | 573 | // a whole heap collection. |
duke@435 | 574 | complete = complete || (max_level_collected == n_gens() - 1); |
duke@435 | 575 | |
duke@435 | 576 | if (PrintGCDetails) { |
duke@435 | 577 | print_heap_change(gch_prev_used); |
duke@435 | 578 | |
duke@435 | 579 | // Print perm gen info for full GC with PrintGCDetails flag. |
duke@435 | 580 | if (complete) { |
duke@435 | 581 | print_perm_heap_change(perm_prev_used); |
duke@435 | 582 | } |
duke@435 | 583 | } |
duke@435 | 584 | |
duke@435 | 585 | for (int j = max_level_collected; j >= 0; j -= 1) { |
duke@435 | 586 | // Adjust generation sizes. |
duke@435 | 587 | _gens[j]->compute_new_size(); |
duke@435 | 588 | } |
duke@435 | 589 | |
duke@435 | 590 | if (complete) { |
duke@435 | 591 | // Ask the permanent generation to adjust size for full collections |
duke@435 | 592 | perm()->compute_new_size(); |
duke@435 | 593 | update_full_collections_completed(); |
duke@435 | 594 | } |
duke@435 | 595 | |
duke@435 | 596 | // Track memory usage and detect low memory after GC finishes |
duke@435 | 597 | MemoryService::track_memory_usage(); |
duke@435 | 598 | |
duke@435 | 599 | gc_epilogue(complete); |
duke@435 | 600 | |
duke@435 | 601 | if (must_restore_marks_for_biased_locking) { |
duke@435 | 602 | BiasedLocking::restore_marks(); |
duke@435 | 603 | } |
duke@435 | 604 | } |
duke@435 | 605 | |
duke@435 | 606 | AdaptiveSizePolicy* sp = gen_policy()->size_policy(); |
duke@435 | 607 | AdaptiveSizePolicyOutput(sp, total_collections()); |
duke@435 | 608 | |
duke@435 | 609 | if (PrintHeapAtGC) { |
duke@435 | 610 | Universe::print_heap_after_gc(); |
duke@435 | 611 | } |
duke@435 | 612 | |
duke@435 | 613 | if (ExitAfterGCNum > 0 && total_collections() == ExitAfterGCNum) { |
duke@435 | 614 | tty->print_cr("Stopping after GC #%d", ExitAfterGCNum); |
duke@435 | 615 | vm_exit(-1); |
duke@435 | 616 | } |
duke@435 | 617 | } |
duke@435 | 618 | |
duke@435 | 619 | HeapWord* GenCollectedHeap::satisfy_failed_allocation(size_t size, bool is_tlab) { |
duke@435 | 620 | return collector_policy()->satisfy_failed_allocation(size, is_tlab); |
duke@435 | 621 | } |
duke@435 | 622 | |
duke@435 | 623 | void GenCollectedHeap::set_par_threads(int t) { |
duke@435 | 624 | SharedHeap::set_par_threads(t); |
duke@435 | 625 | _gen_process_strong_tasks->set_par_threads(t); |
duke@435 | 626 | } |
duke@435 | 627 | |
duke@435 | 628 | class AssertIsPermClosure: public OopClosure { |
duke@435 | 629 | public: |
duke@435 | 630 | void do_oop(oop* p) { |
duke@435 | 631 | assert((*p) == NULL || (*p)->is_perm(), "Referent should be perm."); |
duke@435 | 632 | } |
coleenp@548 | 633 | void do_oop(narrowOop* p) { ShouldNotReachHere(); } |
duke@435 | 634 | }; |
duke@435 | 635 | static AssertIsPermClosure assert_is_perm_closure; |
duke@435 | 636 | |
duke@435 | 637 | void GenCollectedHeap:: |
duke@435 | 638 | gen_process_strong_roots(int level, |
duke@435 | 639 | bool younger_gens_as_roots, |
duke@435 | 640 | bool collecting_perm_gen, |
duke@435 | 641 | SharedHeap::ScanningOption so, |
duke@435 | 642 | OopsInGenClosure* older_gens, |
duke@435 | 643 | OopsInGenClosure* not_older_gens) { |
duke@435 | 644 | // General strong roots. |
duke@435 | 645 | SharedHeap::process_strong_roots(collecting_perm_gen, so, |
duke@435 | 646 | not_older_gens, older_gens); |
duke@435 | 647 | |
duke@435 | 648 | if (younger_gens_as_roots) { |
duke@435 | 649 | if (!_gen_process_strong_tasks->is_task_claimed(GCH_PS_younger_gens)) { |
duke@435 | 650 | for (int i = 0; i < level; i++) { |
duke@435 | 651 | not_older_gens->set_generation(_gens[i]); |
duke@435 | 652 | _gens[i]->oop_iterate(not_older_gens); |
duke@435 | 653 | } |
duke@435 | 654 | not_older_gens->reset_generation(); |
duke@435 | 655 | } |
duke@435 | 656 | } |
duke@435 | 657 | // When collection is parallel, all threads get to cooperate to do |
duke@435 | 658 | // older-gen scanning. |
duke@435 | 659 | for (int i = level+1; i < _n_gens; i++) { |
duke@435 | 660 | older_gens->set_generation(_gens[i]); |
duke@435 | 661 | rem_set()->younger_refs_iterate(_gens[i], older_gens); |
duke@435 | 662 | older_gens->reset_generation(); |
duke@435 | 663 | } |
duke@435 | 664 | |
duke@435 | 665 | _gen_process_strong_tasks->all_tasks_completed(); |
duke@435 | 666 | } |
duke@435 | 667 | |
duke@435 | 668 | void GenCollectedHeap::gen_process_weak_roots(OopClosure* root_closure, |
duke@435 | 669 | OopClosure* non_root_closure) { |
duke@435 | 670 | SharedHeap::process_weak_roots(root_closure, non_root_closure); |
duke@435 | 671 | // "Local" "weak" refs |
duke@435 | 672 | for (int i = 0; i < _n_gens; i++) { |
duke@435 | 673 | _gens[i]->ref_processor()->weak_oops_do(root_closure); |
duke@435 | 674 | } |
duke@435 | 675 | } |
duke@435 | 676 | |
duke@435 | 677 | #define GCH_SINCE_SAVE_MARKS_ITERATE_DEFN(OopClosureType, nv_suffix) \ |
duke@435 | 678 | void GenCollectedHeap:: \ |
duke@435 | 679 | oop_since_save_marks_iterate(int level, \ |
duke@435 | 680 | OopClosureType* cur, \ |
duke@435 | 681 | OopClosureType* older) { \ |
duke@435 | 682 | _gens[level]->oop_since_save_marks_iterate##nv_suffix(cur); \ |
duke@435 | 683 | for (int i = level+1; i < n_gens(); i++) { \ |
duke@435 | 684 | _gens[i]->oop_since_save_marks_iterate##nv_suffix(older); \ |
duke@435 | 685 | } \ |
duke@435 | 686 | perm_gen()->oop_since_save_marks_iterate##nv_suffix(older); \ |
duke@435 | 687 | } |
duke@435 | 688 | |
duke@435 | 689 | ALL_SINCE_SAVE_MARKS_CLOSURES(GCH_SINCE_SAVE_MARKS_ITERATE_DEFN) |
duke@435 | 690 | |
duke@435 | 691 | #undef GCH_SINCE_SAVE_MARKS_ITERATE_DEFN |
duke@435 | 692 | |
duke@435 | 693 | bool GenCollectedHeap::no_allocs_since_save_marks(int level) { |
duke@435 | 694 | for (int i = level; i < _n_gens; i++) { |
duke@435 | 695 | if (!_gens[i]->no_allocs_since_save_marks()) return false; |
duke@435 | 696 | } |
duke@435 | 697 | return perm_gen()->no_allocs_since_save_marks(); |
duke@435 | 698 | } |
duke@435 | 699 | |
duke@435 | 700 | bool GenCollectedHeap::supports_inline_contig_alloc() const { |
duke@435 | 701 | return _gens[0]->supports_inline_contig_alloc(); |
duke@435 | 702 | } |
duke@435 | 703 | |
duke@435 | 704 | HeapWord** GenCollectedHeap::top_addr() const { |
duke@435 | 705 | return _gens[0]->top_addr(); |
duke@435 | 706 | } |
duke@435 | 707 | |
duke@435 | 708 | HeapWord** GenCollectedHeap::end_addr() const { |
duke@435 | 709 | return _gens[0]->end_addr(); |
duke@435 | 710 | } |
duke@435 | 711 | |
duke@435 | 712 | size_t GenCollectedHeap::unsafe_max_alloc() { |
duke@435 | 713 | return _gens[0]->unsafe_max_alloc_nogc(); |
duke@435 | 714 | } |
duke@435 | 715 | |
duke@435 | 716 | // public collection interfaces |
duke@435 | 717 | |
duke@435 | 718 | void GenCollectedHeap::collect(GCCause::Cause cause) { |
duke@435 | 719 | if (should_do_concurrent_full_gc(cause)) { |
duke@435 | 720 | #ifndef SERIALGC |
duke@435 | 721 | // mostly concurrent full collection |
duke@435 | 722 | collect_mostly_concurrent(cause); |
duke@435 | 723 | #else // SERIALGC |
duke@435 | 724 | ShouldNotReachHere(); |
duke@435 | 725 | #endif // SERIALGC |
duke@435 | 726 | } else { |
duke@435 | 727 | #ifdef ASSERT |
duke@435 | 728 | if (cause == GCCause::_scavenge_alot) { |
duke@435 | 729 | // minor collection only |
duke@435 | 730 | collect(cause, 0); |
duke@435 | 731 | } else { |
duke@435 | 732 | // Stop-the-world full collection |
duke@435 | 733 | collect(cause, n_gens() - 1); |
duke@435 | 734 | } |
duke@435 | 735 | #else |
duke@435 | 736 | // Stop-the-world full collection |
duke@435 | 737 | collect(cause, n_gens() - 1); |
duke@435 | 738 | #endif |
duke@435 | 739 | } |
duke@435 | 740 | } |
duke@435 | 741 | |
duke@435 | 742 | void GenCollectedHeap::collect(GCCause::Cause cause, int max_level) { |
duke@435 | 743 | // The caller doesn't have the Heap_lock |
duke@435 | 744 | assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock"); |
duke@435 | 745 | MutexLocker ml(Heap_lock); |
duke@435 | 746 | collect_locked(cause, max_level); |
duke@435 | 747 | } |
duke@435 | 748 | |
duke@435 | 749 | // This interface assumes that it's being called by the |
duke@435 | 750 | // vm thread. It collects the heap assuming that the |
duke@435 | 751 | // heap lock is already held and that we are executing in |
duke@435 | 752 | // the context of the vm thread. |
duke@435 | 753 | void GenCollectedHeap::collect_as_vm_thread(GCCause::Cause cause) { |
duke@435 | 754 | assert(Thread::current()->is_VM_thread(), "Precondition#1"); |
duke@435 | 755 | assert(Heap_lock->is_locked(), "Precondition#2"); |
duke@435 | 756 | GCCauseSetter gcs(this, cause); |
duke@435 | 757 | switch (cause) { |
duke@435 | 758 | case GCCause::_heap_inspection: |
duke@435 | 759 | case GCCause::_heap_dump: { |
duke@435 | 760 | HandleMark hm; |
duke@435 | 761 | do_full_collection(false, // don't clear all soft refs |
duke@435 | 762 | n_gens() - 1); |
duke@435 | 763 | break; |
duke@435 | 764 | } |
duke@435 | 765 | default: // XXX FIX ME |
duke@435 | 766 | ShouldNotReachHere(); // Unexpected use of this function |
duke@435 | 767 | } |
duke@435 | 768 | } |
duke@435 | 769 | |
duke@435 | 770 | void GenCollectedHeap::collect_locked(GCCause::Cause cause) { |
duke@435 | 771 | // The caller has the Heap_lock |
duke@435 | 772 | assert(Heap_lock->owned_by_self(), "this thread should own the Heap_lock"); |
duke@435 | 773 | collect_locked(cause, n_gens() - 1); |
duke@435 | 774 | } |
duke@435 | 775 | |
duke@435 | 776 | // this is the private collection interface |
duke@435 | 777 | // The Heap_lock is expected to be held on entry. |
duke@435 | 778 | |
duke@435 | 779 | void GenCollectedHeap::collect_locked(GCCause::Cause cause, int max_level) { |
duke@435 | 780 | if (_preloading_shared_classes) { |
duke@435 | 781 | warning("\nThe permanent generation is not large enough to preload " |
duke@435 | 782 | "requested classes.\nUse -XX:PermSize= to increase the initial " |
duke@435 | 783 | "size of the permanent generation.\n"); |
duke@435 | 784 | vm_exit(2); |
duke@435 | 785 | } |
duke@435 | 786 | // Read the GC count while holding the Heap_lock |
duke@435 | 787 | unsigned int gc_count_before = total_collections(); |
duke@435 | 788 | unsigned int full_gc_count_before = total_full_collections(); |
duke@435 | 789 | { |
duke@435 | 790 | MutexUnlocker mu(Heap_lock); // give up heap lock, execute gets it back |
duke@435 | 791 | VM_GenCollectFull op(gc_count_before, full_gc_count_before, |
duke@435 | 792 | cause, max_level); |
duke@435 | 793 | VMThread::execute(&op); |
duke@435 | 794 | } |
duke@435 | 795 | } |
duke@435 | 796 | |
duke@435 | 797 | #ifndef SERIALGC |
duke@435 | 798 | bool GenCollectedHeap::create_cms_collector() { |
duke@435 | 799 | |
duke@435 | 800 | assert(((_gens[1]->kind() == Generation::ConcurrentMarkSweep) || |
duke@435 | 801 | (_gens[1]->kind() == Generation::ASConcurrentMarkSweep)) && |
duke@435 | 802 | _perm_gen->as_gen()->kind() == Generation::ConcurrentMarkSweep, |
duke@435 | 803 | "Unexpected generation kinds"); |
duke@435 | 804 | // Skip two header words in the block content verification |
duke@435 | 805 | NOT_PRODUCT(_skip_header_HeapWords = CMSCollector::skip_header_HeapWords();) |
duke@435 | 806 | CMSCollector* collector = new CMSCollector( |
duke@435 | 807 | (ConcurrentMarkSweepGeneration*)_gens[1], |
duke@435 | 808 | (ConcurrentMarkSweepGeneration*)_perm_gen->as_gen(), |
duke@435 | 809 | _rem_set->as_CardTableRS(), |
duke@435 | 810 | (ConcurrentMarkSweepPolicy*) collector_policy()); |
duke@435 | 811 | |
duke@435 | 812 | if (collector == NULL || !collector->completed_initialization()) { |
duke@435 | 813 | if (collector) { |
duke@435 | 814 | delete collector; // Be nice in embedded situation |
duke@435 | 815 | } |
duke@435 | 816 | vm_shutdown_during_initialization("Could not create CMS collector"); |
duke@435 | 817 | return false; |
duke@435 | 818 | } |
duke@435 | 819 | return true; // success |
duke@435 | 820 | } |
duke@435 | 821 | |
duke@435 | 822 | void GenCollectedHeap::collect_mostly_concurrent(GCCause::Cause cause) { |
duke@435 | 823 | assert(!Heap_lock->owned_by_self(), "Should not own Heap_lock"); |
duke@435 | 824 | |
duke@435 | 825 | MutexLocker ml(Heap_lock); |
duke@435 | 826 | // Read the GC counts while holding the Heap_lock |
duke@435 | 827 | unsigned int full_gc_count_before = total_full_collections(); |
duke@435 | 828 | unsigned int gc_count_before = total_collections(); |
duke@435 | 829 | { |
duke@435 | 830 | MutexUnlocker mu(Heap_lock); |
duke@435 | 831 | VM_GenCollectFullConcurrent op(gc_count_before, full_gc_count_before, cause); |
duke@435 | 832 | VMThread::execute(&op); |
duke@435 | 833 | } |
duke@435 | 834 | } |
duke@435 | 835 | #endif // SERIALGC |
duke@435 | 836 | |
duke@435 | 837 | |
duke@435 | 838 | void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs, |
duke@435 | 839 | int max_level) { |
duke@435 | 840 | int local_max_level; |
duke@435 | 841 | if (!incremental_collection_will_fail() && |
duke@435 | 842 | gc_cause() == GCCause::_gc_locker) { |
duke@435 | 843 | local_max_level = 0; |
duke@435 | 844 | } else { |
duke@435 | 845 | local_max_level = max_level; |
duke@435 | 846 | } |
duke@435 | 847 | |
duke@435 | 848 | do_collection(true /* full */, |
duke@435 | 849 | clear_all_soft_refs /* clear_all_soft_refs */, |
duke@435 | 850 | 0 /* size */, |
duke@435 | 851 | false /* is_tlab */, |
duke@435 | 852 | local_max_level /* max_level */); |
duke@435 | 853 | // Hack XXX FIX ME !!! |
duke@435 | 854 | // A scavenge may not have been attempted, or may have |
duke@435 | 855 | // been attempted and failed, because the old gen was too full |
duke@435 | 856 | if (local_max_level == 0 && gc_cause() == GCCause::_gc_locker && |
duke@435 | 857 | incremental_collection_will_fail()) { |
duke@435 | 858 | if (PrintGCDetails) { |
duke@435 | 859 | gclog_or_tty->print_cr("GC locker: Trying a full collection " |
duke@435 | 860 | "because scavenge failed"); |
duke@435 | 861 | } |
duke@435 | 862 | // This time allow the old gen to be collected as well |
duke@435 | 863 | do_collection(true /* full */, |
duke@435 | 864 | clear_all_soft_refs /* clear_all_soft_refs */, |
duke@435 | 865 | 0 /* size */, |
duke@435 | 866 | false /* is_tlab */, |
duke@435 | 867 | n_gens() - 1 /* max_level */); |
duke@435 | 868 | } |
duke@435 | 869 | } |
duke@435 | 870 | |
duke@435 | 871 | // Returns "TRUE" iff "p" points into the allocated area of the heap. |
duke@435 | 872 | bool GenCollectedHeap::is_in(const void* p) const { |
duke@435 | 873 | #ifndef ASSERT |
duke@435 | 874 | guarantee(VerifyBeforeGC || |
duke@435 | 875 | VerifyDuringGC || |
duke@435 | 876 | VerifyBeforeExit || |
duke@435 | 877 | VerifyAfterGC, "too expensive"); |
duke@435 | 878 | #endif |
duke@435 | 879 | // This might be sped up with a cache of the last generation that |
duke@435 | 880 | // answered yes. |
duke@435 | 881 | for (int i = 0; i < _n_gens; i++) { |
duke@435 | 882 | if (_gens[i]->is_in(p)) return true; |
duke@435 | 883 | } |
duke@435 | 884 | if (_perm_gen->as_gen()->is_in(p)) return true; |
duke@435 | 885 | // Otherwise... |
duke@435 | 886 | return false; |
duke@435 | 887 | } |
duke@435 | 888 | |
duke@435 | 889 | // Returns "TRUE" iff "p" points into the allocated area of the heap. |
duke@435 | 890 | bool GenCollectedHeap::is_in_youngest(void* p) { |
duke@435 | 891 | return _gens[0]->is_in(p); |
duke@435 | 892 | } |
duke@435 | 893 | |
duke@435 | 894 | void GenCollectedHeap::oop_iterate(OopClosure* cl) { |
duke@435 | 895 | for (int i = 0; i < _n_gens; i++) { |
duke@435 | 896 | _gens[i]->oop_iterate(cl); |
duke@435 | 897 | } |
duke@435 | 898 | } |
duke@435 | 899 | |
duke@435 | 900 | void GenCollectedHeap::oop_iterate(MemRegion mr, OopClosure* cl) { |
duke@435 | 901 | for (int i = 0; i < _n_gens; i++) { |
duke@435 | 902 | _gens[i]->oop_iterate(mr, cl); |
duke@435 | 903 | } |
duke@435 | 904 | } |
duke@435 | 905 | |
duke@435 | 906 | void GenCollectedHeap::object_iterate(ObjectClosure* cl) { |
duke@435 | 907 | for (int i = 0; i < _n_gens; i++) { |
duke@435 | 908 | _gens[i]->object_iterate(cl); |
duke@435 | 909 | } |
duke@435 | 910 | perm_gen()->object_iterate(cl); |
duke@435 | 911 | } |
duke@435 | 912 | |
jmasa@952 | 913 | void GenCollectedHeap::safe_object_iterate(ObjectClosure* cl) { |
jmasa@952 | 914 | for (int i = 0; i < _n_gens; i++) { |
jmasa@952 | 915 | _gens[i]->safe_object_iterate(cl); |
jmasa@952 | 916 | } |
jmasa@952 | 917 | perm_gen()->safe_object_iterate(cl); |
jmasa@952 | 918 | } |
jmasa@952 | 919 | |
duke@435 | 920 | void GenCollectedHeap::object_iterate_since_last_GC(ObjectClosure* cl) { |
duke@435 | 921 | for (int i = 0; i < _n_gens; i++) { |
duke@435 | 922 | _gens[i]->object_iterate_since_last_GC(cl); |
duke@435 | 923 | } |
duke@435 | 924 | } |
duke@435 | 925 | |
duke@435 | 926 | Space* GenCollectedHeap::space_containing(const void* addr) const { |
duke@435 | 927 | for (int i = 0; i < _n_gens; i++) { |
duke@435 | 928 | Space* res = _gens[i]->space_containing(addr); |
duke@435 | 929 | if (res != NULL) return res; |
duke@435 | 930 | } |
duke@435 | 931 | Space* res = perm_gen()->space_containing(addr); |
duke@435 | 932 | if (res != NULL) return res; |
duke@435 | 933 | // Otherwise... |
duke@435 | 934 | assert(false, "Could not find containing space"); |
duke@435 | 935 | return NULL; |
duke@435 | 936 | } |
duke@435 | 937 | |
duke@435 | 938 | |
duke@435 | 939 | HeapWord* GenCollectedHeap::block_start(const void* addr) const { |
duke@435 | 940 | assert(is_in_reserved(addr), "block_start of address outside of heap"); |
duke@435 | 941 | for (int i = 0; i < _n_gens; i++) { |
duke@435 | 942 | if (_gens[i]->is_in_reserved(addr)) { |
duke@435 | 943 | assert(_gens[i]->is_in(addr), |
duke@435 | 944 | "addr should be in allocated part of generation"); |
duke@435 | 945 | return _gens[i]->block_start(addr); |
duke@435 | 946 | } |
duke@435 | 947 | } |
duke@435 | 948 | if (perm_gen()->is_in_reserved(addr)) { |
duke@435 | 949 | assert(perm_gen()->is_in(addr), |
duke@435 | 950 | "addr should be in allocated part of perm gen"); |
duke@435 | 951 | return perm_gen()->block_start(addr); |
duke@435 | 952 | } |
duke@435 | 953 | assert(false, "Some generation should contain the address"); |
duke@435 | 954 | return NULL; |
duke@435 | 955 | } |
duke@435 | 956 | |
duke@435 | 957 | size_t GenCollectedHeap::block_size(const HeapWord* addr) const { |
duke@435 | 958 | assert(is_in_reserved(addr), "block_size of address outside of heap"); |
duke@435 | 959 | for (int i = 0; i < _n_gens; i++) { |
duke@435 | 960 | if (_gens[i]->is_in_reserved(addr)) { |
duke@435 | 961 | assert(_gens[i]->is_in(addr), |
duke@435 | 962 | "addr should be in allocated part of generation"); |
duke@435 | 963 | return _gens[i]->block_size(addr); |
duke@435 | 964 | } |
duke@435 | 965 | } |
duke@435 | 966 | if (perm_gen()->is_in_reserved(addr)) { |
duke@435 | 967 | assert(perm_gen()->is_in(addr), |
duke@435 | 968 | "addr should be in allocated part of perm gen"); |
duke@435 | 969 | return perm_gen()->block_size(addr); |
duke@435 | 970 | } |
duke@435 | 971 | assert(false, "Some generation should contain the address"); |
duke@435 | 972 | return 0; |
duke@435 | 973 | } |
duke@435 | 974 | |
duke@435 | 975 | bool GenCollectedHeap::block_is_obj(const HeapWord* addr) const { |
duke@435 | 976 | assert(is_in_reserved(addr), "block_is_obj of address outside of heap"); |
duke@435 | 977 | assert(block_start(addr) == addr, "addr must be a block start"); |
duke@435 | 978 | for (int i = 0; i < _n_gens; i++) { |
duke@435 | 979 | if (_gens[i]->is_in_reserved(addr)) { |
duke@435 | 980 | return _gens[i]->block_is_obj(addr); |
duke@435 | 981 | } |
duke@435 | 982 | } |
duke@435 | 983 | if (perm_gen()->is_in_reserved(addr)) { |
duke@435 | 984 | return perm_gen()->block_is_obj(addr); |
duke@435 | 985 | } |
duke@435 | 986 | assert(false, "Some generation should contain the address"); |
duke@435 | 987 | return false; |
duke@435 | 988 | } |
duke@435 | 989 | |
duke@435 | 990 | bool GenCollectedHeap::supports_tlab_allocation() const { |
duke@435 | 991 | for (int i = 0; i < _n_gens; i += 1) { |
duke@435 | 992 | if (_gens[i]->supports_tlab_allocation()) { |
duke@435 | 993 | return true; |
duke@435 | 994 | } |
duke@435 | 995 | } |
duke@435 | 996 | return false; |
duke@435 | 997 | } |
duke@435 | 998 | |
duke@435 | 999 | size_t GenCollectedHeap::tlab_capacity(Thread* thr) const { |
duke@435 | 1000 | size_t result = 0; |
duke@435 | 1001 | for (int i = 0; i < _n_gens; i += 1) { |
duke@435 | 1002 | if (_gens[i]->supports_tlab_allocation()) { |
duke@435 | 1003 | result += _gens[i]->tlab_capacity(); |
duke@435 | 1004 | } |
duke@435 | 1005 | } |
duke@435 | 1006 | return result; |
duke@435 | 1007 | } |
duke@435 | 1008 | |
duke@435 | 1009 | size_t GenCollectedHeap::unsafe_max_tlab_alloc(Thread* thr) const { |
duke@435 | 1010 | size_t result = 0; |
duke@435 | 1011 | for (int i = 0; i < _n_gens; i += 1) { |
duke@435 | 1012 | if (_gens[i]->supports_tlab_allocation()) { |
duke@435 | 1013 | result += _gens[i]->unsafe_max_tlab_alloc(); |
duke@435 | 1014 | } |
duke@435 | 1015 | } |
duke@435 | 1016 | return result; |
duke@435 | 1017 | } |
duke@435 | 1018 | |
duke@435 | 1019 | HeapWord* GenCollectedHeap::allocate_new_tlab(size_t size) { |
duke@435 | 1020 | bool gc_overhead_limit_was_exceeded; |
duke@435 | 1021 | HeapWord* result = mem_allocate(size /* size */, |
duke@435 | 1022 | false /* is_large_noref */, |
duke@435 | 1023 | true /* is_tlab */, |
duke@435 | 1024 | &gc_overhead_limit_was_exceeded); |
duke@435 | 1025 | return result; |
duke@435 | 1026 | } |
duke@435 | 1027 | |
duke@435 | 1028 | // Requires "*prev_ptr" to be non-NULL. Deletes and a block of minimal size |
duke@435 | 1029 | // from the list headed by "*prev_ptr". |
duke@435 | 1030 | static ScratchBlock *removeSmallestScratch(ScratchBlock **prev_ptr) { |
duke@435 | 1031 | bool first = true; |
duke@435 | 1032 | size_t min_size = 0; // "first" makes this conceptually infinite. |
duke@435 | 1033 | ScratchBlock **smallest_ptr, *smallest; |
duke@435 | 1034 | ScratchBlock *cur = *prev_ptr; |
duke@435 | 1035 | while (cur) { |
duke@435 | 1036 | assert(*prev_ptr == cur, "just checking"); |
duke@435 | 1037 | if (first || cur->num_words < min_size) { |
duke@435 | 1038 | smallest_ptr = prev_ptr; |
duke@435 | 1039 | smallest = cur; |
duke@435 | 1040 | min_size = smallest->num_words; |
duke@435 | 1041 | first = false; |
duke@435 | 1042 | } |
duke@435 | 1043 | prev_ptr = &cur->next; |
duke@435 | 1044 | cur = cur->next; |
duke@435 | 1045 | } |
duke@435 | 1046 | smallest = *smallest_ptr; |
duke@435 | 1047 | *smallest_ptr = smallest->next; |
duke@435 | 1048 | return smallest; |
duke@435 | 1049 | } |
duke@435 | 1050 | |
duke@435 | 1051 | // Sort the scratch block list headed by res into decreasing size order, |
duke@435 | 1052 | // and set "res" to the result. |
duke@435 | 1053 | static void sort_scratch_list(ScratchBlock*& list) { |
duke@435 | 1054 | ScratchBlock* sorted = NULL; |
duke@435 | 1055 | ScratchBlock* unsorted = list; |
duke@435 | 1056 | while (unsorted) { |
duke@435 | 1057 | ScratchBlock *smallest = removeSmallestScratch(&unsorted); |
duke@435 | 1058 | smallest->next = sorted; |
duke@435 | 1059 | sorted = smallest; |
duke@435 | 1060 | } |
duke@435 | 1061 | list = sorted; |
duke@435 | 1062 | } |
duke@435 | 1063 | |
duke@435 | 1064 | ScratchBlock* GenCollectedHeap::gather_scratch(Generation* requestor, |
duke@435 | 1065 | size_t max_alloc_words) { |
duke@435 | 1066 | ScratchBlock* res = NULL; |
duke@435 | 1067 | for (int i = 0; i < _n_gens; i++) { |
duke@435 | 1068 | _gens[i]->contribute_scratch(res, requestor, max_alloc_words); |
duke@435 | 1069 | } |
duke@435 | 1070 | sort_scratch_list(res); |
duke@435 | 1071 | return res; |
duke@435 | 1072 | } |
duke@435 | 1073 | |
jmasa@698 | 1074 | void GenCollectedHeap::release_scratch() { |
jmasa@698 | 1075 | for (int i = 0; i < _n_gens; i++) { |
jmasa@698 | 1076 | _gens[i]->reset_scratch(); |
jmasa@698 | 1077 | } |
jmasa@698 | 1078 | } |
jmasa@698 | 1079 | |
duke@435 | 1080 | size_t GenCollectedHeap::large_typearray_limit() { |
duke@435 | 1081 | return gen_policy()->large_typearray_limit(); |
duke@435 | 1082 | } |
duke@435 | 1083 | |
duke@435 | 1084 | class GenPrepareForVerifyClosure: public GenCollectedHeap::GenClosure { |
duke@435 | 1085 | void do_generation(Generation* gen) { |
duke@435 | 1086 | gen->prepare_for_verify(); |
duke@435 | 1087 | } |
duke@435 | 1088 | }; |
duke@435 | 1089 | |
duke@435 | 1090 | void GenCollectedHeap::prepare_for_verify() { |
duke@435 | 1091 | ensure_parsability(false); // no need to retire TLABs |
duke@435 | 1092 | GenPrepareForVerifyClosure blk; |
duke@435 | 1093 | generation_iterate(&blk, false); |
duke@435 | 1094 | perm_gen()->prepare_for_verify(); |
duke@435 | 1095 | } |
duke@435 | 1096 | |
duke@435 | 1097 | |
duke@435 | 1098 | void GenCollectedHeap::generation_iterate(GenClosure* cl, |
duke@435 | 1099 | bool old_to_young) { |
duke@435 | 1100 | if (old_to_young) { |
duke@435 | 1101 | for (int i = _n_gens-1; i >= 0; i--) { |
duke@435 | 1102 | cl->do_generation(_gens[i]); |
duke@435 | 1103 | } |
duke@435 | 1104 | } else { |
duke@435 | 1105 | for (int i = 0; i < _n_gens; i++) { |
duke@435 | 1106 | cl->do_generation(_gens[i]); |
duke@435 | 1107 | } |
duke@435 | 1108 | } |
duke@435 | 1109 | } |
duke@435 | 1110 | |
duke@435 | 1111 | void GenCollectedHeap::space_iterate(SpaceClosure* cl) { |
duke@435 | 1112 | for (int i = 0; i < _n_gens; i++) { |
duke@435 | 1113 | _gens[i]->space_iterate(cl, true); |
duke@435 | 1114 | } |
duke@435 | 1115 | perm_gen()->space_iterate(cl, true); |
duke@435 | 1116 | } |
duke@435 | 1117 | |
duke@435 | 1118 | bool GenCollectedHeap::is_maximal_no_gc() const { |
duke@435 | 1119 | for (int i = 0; i < _n_gens; i++) { // skip perm gen |
duke@435 | 1120 | if (!_gens[i]->is_maximal_no_gc()) { |
duke@435 | 1121 | return false; |
duke@435 | 1122 | } |
duke@435 | 1123 | } |
duke@435 | 1124 | return true; |
duke@435 | 1125 | } |
duke@435 | 1126 | |
duke@435 | 1127 | void GenCollectedHeap::save_marks() { |
duke@435 | 1128 | for (int i = 0; i < _n_gens; i++) { |
duke@435 | 1129 | _gens[i]->save_marks(); |
duke@435 | 1130 | } |
duke@435 | 1131 | perm_gen()->save_marks(); |
duke@435 | 1132 | } |
duke@435 | 1133 | |
duke@435 | 1134 | void GenCollectedHeap::compute_new_generation_sizes(int collectedGen) { |
duke@435 | 1135 | for (int i = 0; i <= collectedGen; i++) { |
duke@435 | 1136 | _gens[i]->compute_new_size(); |
duke@435 | 1137 | } |
duke@435 | 1138 | } |
duke@435 | 1139 | |
duke@435 | 1140 | GenCollectedHeap* GenCollectedHeap::heap() { |
duke@435 | 1141 | assert(_gch != NULL, "Uninitialized access to GenCollectedHeap::heap()"); |
duke@435 | 1142 | assert(_gch->kind() == CollectedHeap::GenCollectedHeap, "not a generational heap"); |
duke@435 | 1143 | return _gch; |
duke@435 | 1144 | } |
duke@435 | 1145 | |
duke@435 | 1146 | |
duke@435 | 1147 | void GenCollectedHeap::prepare_for_compaction() { |
duke@435 | 1148 | Generation* scanning_gen = _gens[_n_gens-1]; |
duke@435 | 1149 | // Start by compacting into same gen. |
duke@435 | 1150 | CompactPoint cp(scanning_gen, NULL, NULL); |
duke@435 | 1151 | while (scanning_gen != NULL) { |
duke@435 | 1152 | scanning_gen->prepare_for_compaction(&cp); |
duke@435 | 1153 | scanning_gen = prev_gen(scanning_gen); |
duke@435 | 1154 | } |
duke@435 | 1155 | } |
duke@435 | 1156 | |
duke@435 | 1157 | GCStats* GenCollectedHeap::gc_stats(int level) const { |
duke@435 | 1158 | return _gens[level]->gc_stats(); |
duke@435 | 1159 | } |
duke@435 | 1160 | |
duke@435 | 1161 | void GenCollectedHeap::verify(bool allow_dirty, bool silent) { |
duke@435 | 1162 | if (!silent) { |
duke@435 | 1163 | gclog_or_tty->print("permgen "); |
duke@435 | 1164 | } |
duke@435 | 1165 | perm_gen()->verify(allow_dirty); |
duke@435 | 1166 | for (int i = _n_gens-1; i >= 0; i--) { |
duke@435 | 1167 | Generation* g = _gens[i]; |
duke@435 | 1168 | if (!silent) { |
duke@435 | 1169 | gclog_or_tty->print(g->name()); |
duke@435 | 1170 | gclog_or_tty->print(" "); |
duke@435 | 1171 | } |
duke@435 | 1172 | g->verify(allow_dirty); |
duke@435 | 1173 | } |
duke@435 | 1174 | if (!silent) { |
duke@435 | 1175 | gclog_or_tty->print("remset "); |
duke@435 | 1176 | } |
duke@435 | 1177 | rem_set()->verify(); |
duke@435 | 1178 | if (!silent) { |
duke@435 | 1179 | gclog_or_tty->print("ref_proc "); |
duke@435 | 1180 | } |
duke@435 | 1181 | ReferenceProcessor::verify(); |
duke@435 | 1182 | } |
duke@435 | 1183 | |
duke@435 | 1184 | void GenCollectedHeap::print() const { print_on(tty); } |
duke@435 | 1185 | void GenCollectedHeap::print_on(outputStream* st) const { |
duke@435 | 1186 | for (int i = 0; i < _n_gens; i++) { |
duke@435 | 1187 | _gens[i]->print_on(st); |
duke@435 | 1188 | } |
duke@435 | 1189 | perm_gen()->print_on(st); |
duke@435 | 1190 | } |
duke@435 | 1191 | |
duke@435 | 1192 | void GenCollectedHeap::gc_threads_do(ThreadClosure* tc) const { |
duke@435 | 1193 | if (workers() != NULL) { |
duke@435 | 1194 | workers()->threads_do(tc); |
duke@435 | 1195 | } |
duke@435 | 1196 | #ifndef SERIALGC |
duke@435 | 1197 | if (UseConcMarkSweepGC) { |
duke@435 | 1198 | ConcurrentMarkSweepThread::threads_do(tc); |
duke@435 | 1199 | } |
duke@435 | 1200 | #endif // SERIALGC |
duke@435 | 1201 | } |
duke@435 | 1202 | |
duke@435 | 1203 | void GenCollectedHeap::print_gc_threads_on(outputStream* st) const { |
duke@435 | 1204 | #ifndef SERIALGC |
duke@435 | 1205 | if (UseParNewGC) { |
duke@435 | 1206 | workers()->print_worker_threads_on(st); |
duke@435 | 1207 | } |
duke@435 | 1208 | if (UseConcMarkSweepGC) { |
duke@435 | 1209 | ConcurrentMarkSweepThread::print_all_on(st); |
duke@435 | 1210 | } |
duke@435 | 1211 | #endif // SERIALGC |
duke@435 | 1212 | } |
duke@435 | 1213 | |
duke@435 | 1214 | void GenCollectedHeap::print_tracing_info() const { |
duke@435 | 1215 | if (TraceGen0Time) { |
duke@435 | 1216 | get_gen(0)->print_summary_info(); |
duke@435 | 1217 | } |
duke@435 | 1218 | if (TraceGen1Time) { |
duke@435 | 1219 | get_gen(1)->print_summary_info(); |
duke@435 | 1220 | } |
duke@435 | 1221 | } |
duke@435 | 1222 | |
duke@435 | 1223 | void GenCollectedHeap::print_heap_change(size_t prev_used) const { |
duke@435 | 1224 | if (PrintGCDetails && Verbose) { |
duke@435 | 1225 | gclog_or_tty->print(" " SIZE_FORMAT |
duke@435 | 1226 | "->" SIZE_FORMAT |
duke@435 | 1227 | "(" SIZE_FORMAT ")", |
duke@435 | 1228 | prev_used, used(), capacity()); |
duke@435 | 1229 | } else { |
duke@435 | 1230 | gclog_or_tty->print(" " SIZE_FORMAT "K" |
duke@435 | 1231 | "->" SIZE_FORMAT "K" |
duke@435 | 1232 | "(" SIZE_FORMAT "K)", |
duke@435 | 1233 | prev_used / K, used() / K, capacity() / K); |
duke@435 | 1234 | } |
duke@435 | 1235 | } |
duke@435 | 1236 | |
duke@435 | 1237 | //New method to print perm gen info with PrintGCDetails flag |
duke@435 | 1238 | void GenCollectedHeap::print_perm_heap_change(size_t perm_prev_used) const { |
duke@435 | 1239 | gclog_or_tty->print(", [%s :", perm_gen()->short_name()); |
duke@435 | 1240 | perm_gen()->print_heap_change(perm_prev_used); |
duke@435 | 1241 | gclog_or_tty->print("]"); |
duke@435 | 1242 | } |
duke@435 | 1243 | |
duke@435 | 1244 | class GenGCPrologueClosure: public GenCollectedHeap::GenClosure { |
duke@435 | 1245 | private: |
duke@435 | 1246 | bool _full; |
duke@435 | 1247 | public: |
duke@435 | 1248 | void do_generation(Generation* gen) { |
duke@435 | 1249 | gen->gc_prologue(_full); |
duke@435 | 1250 | } |
duke@435 | 1251 | GenGCPrologueClosure(bool full) : _full(full) {}; |
duke@435 | 1252 | }; |
duke@435 | 1253 | |
duke@435 | 1254 | void GenCollectedHeap::gc_prologue(bool full) { |
duke@435 | 1255 | assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer"); |
duke@435 | 1256 | |
duke@435 | 1257 | always_do_update_barrier = false; |
duke@435 | 1258 | // Fill TLAB's and such |
duke@435 | 1259 | CollectedHeap::accumulate_statistics_all_tlabs(); |
duke@435 | 1260 | ensure_parsability(true); // retire TLABs |
duke@435 | 1261 | |
duke@435 | 1262 | // Call allocation profiler |
duke@435 | 1263 | AllocationProfiler::iterate_since_last_gc(); |
duke@435 | 1264 | // Walk generations |
duke@435 | 1265 | GenGCPrologueClosure blk(full); |
duke@435 | 1266 | generation_iterate(&blk, false); // not old-to-young. |
duke@435 | 1267 | perm_gen()->gc_prologue(full); |
duke@435 | 1268 | }; |
duke@435 | 1269 | |
duke@435 | 1270 | class GenGCEpilogueClosure: public GenCollectedHeap::GenClosure { |
duke@435 | 1271 | private: |
duke@435 | 1272 | bool _full; |
duke@435 | 1273 | public: |
duke@435 | 1274 | void do_generation(Generation* gen) { |
duke@435 | 1275 | gen->gc_epilogue(_full); |
duke@435 | 1276 | } |
duke@435 | 1277 | GenGCEpilogueClosure(bool full) : _full(full) {}; |
duke@435 | 1278 | }; |
duke@435 | 1279 | |
duke@435 | 1280 | void GenCollectedHeap::gc_epilogue(bool full) { |
duke@435 | 1281 | // Remember if a partial collection of the heap failed, and |
duke@435 | 1282 | // we did a complete collection. |
duke@435 | 1283 | if (full && incremental_collection_will_fail()) { |
duke@435 | 1284 | set_last_incremental_collection_failed(); |
duke@435 | 1285 | } else { |
duke@435 | 1286 | clear_last_incremental_collection_failed(); |
duke@435 | 1287 | } |
duke@435 | 1288 | // Clear the flag, if set; the generation gc_epilogues will set the |
duke@435 | 1289 | // flag again if the condition persists despite the collection. |
duke@435 | 1290 | clear_incremental_collection_will_fail(); |
duke@435 | 1291 | |
duke@435 | 1292 | #ifdef COMPILER2 |
duke@435 | 1293 | assert(DerivedPointerTable::is_empty(), "derived pointer present"); |
duke@435 | 1294 | size_t actual_gap = pointer_delta((HeapWord*) (max_uintx-3), *(end_addr())); |
duke@435 | 1295 | guarantee(actual_gap > (size_t)FastAllocateSizeLimit, "inline allocation wraps"); |
duke@435 | 1296 | #endif /* COMPILER2 */ |
duke@435 | 1297 | |
duke@435 | 1298 | resize_all_tlabs(); |
duke@435 | 1299 | |
duke@435 | 1300 | GenGCEpilogueClosure blk(full); |
duke@435 | 1301 | generation_iterate(&blk, false); // not old-to-young. |
duke@435 | 1302 | perm_gen()->gc_epilogue(full); |
duke@435 | 1303 | |
duke@435 | 1304 | always_do_update_barrier = UseConcMarkSweepGC; |
duke@435 | 1305 | }; |
duke@435 | 1306 | |
jmasa@698 | 1307 | #ifndef PRODUCT |
jmasa@698 | 1308 | class GenGCSaveTopsBeforeGCClosure: public GenCollectedHeap::GenClosure { |
jmasa@698 | 1309 | private: |
jmasa@698 | 1310 | public: |
jmasa@698 | 1311 | void do_generation(Generation* gen) { |
jmasa@698 | 1312 | gen->record_spaces_top(); |
jmasa@698 | 1313 | } |
jmasa@698 | 1314 | }; |
jmasa@698 | 1315 | |
jmasa@698 | 1316 | void GenCollectedHeap::record_gen_tops_before_GC() { |
jmasa@698 | 1317 | if (ZapUnusedHeapArea) { |
jmasa@698 | 1318 | GenGCSaveTopsBeforeGCClosure blk; |
jmasa@698 | 1319 | generation_iterate(&blk, false); // not old-to-young. |
jmasa@698 | 1320 | perm_gen()->record_spaces_top(); |
jmasa@698 | 1321 | } |
jmasa@698 | 1322 | } |
jmasa@698 | 1323 | #endif // not PRODUCT |
jmasa@698 | 1324 | |
duke@435 | 1325 | class GenEnsureParsabilityClosure: public GenCollectedHeap::GenClosure { |
duke@435 | 1326 | public: |
duke@435 | 1327 | void do_generation(Generation* gen) { |
duke@435 | 1328 | gen->ensure_parsability(); |
duke@435 | 1329 | } |
duke@435 | 1330 | }; |
duke@435 | 1331 | |
duke@435 | 1332 | void GenCollectedHeap::ensure_parsability(bool retire_tlabs) { |
duke@435 | 1333 | CollectedHeap::ensure_parsability(retire_tlabs); |
duke@435 | 1334 | GenEnsureParsabilityClosure ep_cl; |
duke@435 | 1335 | generation_iterate(&ep_cl, false); |
duke@435 | 1336 | perm_gen()->ensure_parsability(); |
duke@435 | 1337 | } |
duke@435 | 1338 | |
duke@435 | 1339 | oop GenCollectedHeap::handle_failed_promotion(Generation* gen, |
duke@435 | 1340 | oop obj, |
coleenp@548 | 1341 | size_t obj_size) { |
duke@435 | 1342 | assert(obj_size == (size_t)obj->size(), "bad obj_size passed in"); |
duke@435 | 1343 | HeapWord* result = NULL; |
duke@435 | 1344 | |
duke@435 | 1345 | // First give each higher generation a chance to allocate the promoted object. |
duke@435 | 1346 | Generation* allocator = next_gen(gen); |
duke@435 | 1347 | if (allocator != NULL) { |
duke@435 | 1348 | do { |
duke@435 | 1349 | result = allocator->allocate(obj_size, false); |
duke@435 | 1350 | } while (result == NULL && (allocator = next_gen(allocator)) != NULL); |
duke@435 | 1351 | } |
duke@435 | 1352 | |
duke@435 | 1353 | if (result == NULL) { |
duke@435 | 1354 | // Then give gen and higher generations a chance to expand and allocate the |
duke@435 | 1355 | // object. |
duke@435 | 1356 | do { |
duke@435 | 1357 | result = gen->expand_and_allocate(obj_size, false); |
duke@435 | 1358 | } while (result == NULL && (gen = next_gen(gen)) != NULL); |
duke@435 | 1359 | } |
duke@435 | 1360 | |
duke@435 | 1361 | if (result != NULL) { |
duke@435 | 1362 | Copy::aligned_disjoint_words((HeapWord*)obj, result, obj_size); |
duke@435 | 1363 | } |
duke@435 | 1364 | return oop(result); |
duke@435 | 1365 | } |
duke@435 | 1366 | |
duke@435 | 1367 | class GenTimeOfLastGCClosure: public GenCollectedHeap::GenClosure { |
duke@435 | 1368 | jlong _time; // in ms |
duke@435 | 1369 | jlong _now; // in ms |
duke@435 | 1370 | |
duke@435 | 1371 | public: |
duke@435 | 1372 | GenTimeOfLastGCClosure(jlong now) : _time(now), _now(now) { } |
duke@435 | 1373 | |
duke@435 | 1374 | jlong time() { return _time; } |
duke@435 | 1375 | |
duke@435 | 1376 | void do_generation(Generation* gen) { |
duke@435 | 1377 | _time = MIN2(_time, gen->time_of_last_gc(_now)); |
duke@435 | 1378 | } |
duke@435 | 1379 | }; |
duke@435 | 1380 | |
duke@435 | 1381 | jlong GenCollectedHeap::millis_since_last_gc() { |
duke@435 | 1382 | jlong now = os::javaTimeMillis(); |
duke@435 | 1383 | GenTimeOfLastGCClosure tolgc_cl(now); |
duke@435 | 1384 | // iterate over generations getting the oldest |
duke@435 | 1385 | // time that a generation was collected |
duke@435 | 1386 | generation_iterate(&tolgc_cl, false); |
duke@435 | 1387 | tolgc_cl.do_generation(perm_gen()); |
duke@435 | 1388 | // XXX Despite the assert above, since javaTimeMillis() |
duke@435 | 1389 | // doesnot guarantee monotonically increasing return |
duke@435 | 1390 | // values (note, i didn't say "strictly monotonic"), |
duke@435 | 1391 | // we need to guard against getting back a time |
duke@435 | 1392 | // later than now. This should be fixed by basing |
duke@435 | 1393 | // on someting like gethrtime() which guarantees |
duke@435 | 1394 | // monotonicity. Note that cond_wait() is susceptible |
duke@435 | 1395 | // to a similar problem, because its interface is |
duke@435 | 1396 | // based on absolute time in the form of the |
duke@435 | 1397 | // system time's notion of UCT. See also 4506635 |
duke@435 | 1398 | // for yet another problem of similar nature. XXX |
duke@435 | 1399 | jlong retVal = now - tolgc_cl.time(); |
duke@435 | 1400 | if (retVal < 0) { |
duke@435 | 1401 | NOT_PRODUCT(warning("time warp: %d", retVal);) |
duke@435 | 1402 | return 0; |
duke@435 | 1403 | } |
duke@435 | 1404 | return retVal; |
duke@435 | 1405 | } |