src/share/vm/memory/genCollectedHeap.cpp

Wed, 03 Mar 2010 14:48:26 -0800

author
jcoomes
date
Wed, 03 Mar 2010 14:48:26 -0800
changeset 1746
2a1472c30599
parent 1605
c4d722788ed6
child 1822
0bfd3fb24150
permissions
-rw-r--r--

4396719: Mark Sweep stack overflow on deeply nested Object arrays
Summary: Use an explicit stack for object arrays and process them in chunks.
Reviewed-by: iveresov, apetrusenko

duke@435 1 /*
dcubed@1315 2 * Copyright 2000-2009 Sun Microsystems, Inc. All Rights Reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
duke@435 19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
duke@435 20 * CA 95054 USA or visit www.sun.com if you need additional information or
duke@435 21 * have any questions.
duke@435 22 *
duke@435 23 */
duke@435 24
duke@435 25 # include "incls/_precompiled.incl"
duke@435 26 # include "incls/_genCollectedHeap.cpp.incl"
duke@435 27
duke@435 28 GenCollectedHeap* GenCollectedHeap::_gch;
duke@435 29 NOT_PRODUCT(size_t GenCollectedHeap::_skip_header_HeapWords = 0;)
duke@435 30
duke@435 31 // The set of potentially parallel tasks in strong root scanning.
duke@435 32 enum GCH_process_strong_roots_tasks {
duke@435 33 // We probably want to parallelize both of these internally, but for now...
duke@435 34 GCH_PS_younger_gens,
duke@435 35 // Leave this one last.
duke@435 36 GCH_PS_NumElements
duke@435 37 };
duke@435 38
duke@435 39 GenCollectedHeap::GenCollectedHeap(GenCollectorPolicy *policy) :
duke@435 40 SharedHeap(policy),
duke@435 41 _gen_policy(policy),
duke@435 42 _gen_process_strong_tasks(new SubTasksDone(GCH_PS_NumElements)),
duke@435 43 _full_collections_completed(0)
duke@435 44 {
duke@435 45 if (_gen_process_strong_tasks == NULL ||
duke@435 46 !_gen_process_strong_tasks->valid()) {
duke@435 47 vm_exit_during_initialization("Failed necessary allocation.");
duke@435 48 }
duke@435 49 assert(policy != NULL, "Sanity check");
duke@435 50 _preloading_shared_classes = false;
duke@435 51 }
duke@435 52
duke@435 53 jint GenCollectedHeap::initialize() {
ysr@1601 54 CollectedHeap::pre_initialize();
ysr@1601 55
duke@435 56 int i;
duke@435 57 _n_gens = gen_policy()->number_of_generations();
duke@435 58
duke@435 59 // While there are no constraints in the GC code that HeapWordSize
duke@435 60 // be any particular value, there are multiple other areas in the
duke@435 61 // system which believe this to be true (e.g. oop->object_size in some
duke@435 62 // cases incorrectly returns the size in wordSize units rather than
duke@435 63 // HeapWordSize).
duke@435 64 guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
duke@435 65
duke@435 66 // The heap must be at least as aligned as generations.
duke@435 67 size_t alignment = Generation::GenGrain;
duke@435 68
duke@435 69 _gen_specs = gen_policy()->generations();
duke@435 70 PermanentGenerationSpec *perm_gen_spec =
duke@435 71 collector_policy()->permanent_generation();
duke@435 72
duke@435 73 // Make sure the sizes are all aligned.
duke@435 74 for (i = 0; i < _n_gens; i++) {
duke@435 75 _gen_specs[i]->align(alignment);
duke@435 76 }
duke@435 77 perm_gen_spec->align(alignment);
duke@435 78
duke@435 79 // If we are dumping the heap, then allocate a wasted block of address
duke@435 80 // space in order to push the heap to a lower address. This extra
duke@435 81 // address range allows for other (or larger) libraries to be loaded
duke@435 82 // without them occupying the space required for the shared spaces.
duke@435 83
duke@435 84 if (DumpSharedSpaces) {
duke@435 85 uintx reserved = 0;
duke@435 86 uintx block_size = 64*1024*1024;
duke@435 87 while (reserved < SharedDummyBlockSize) {
duke@435 88 char* dummy = os::reserve_memory(block_size);
duke@435 89 reserved += block_size;
duke@435 90 }
duke@435 91 }
duke@435 92
duke@435 93 // Allocate space for the heap.
duke@435 94
duke@435 95 char* heap_address;
duke@435 96 size_t total_reserved = 0;
duke@435 97 int n_covered_regions = 0;
duke@435 98 ReservedSpace heap_rs(0);
duke@435 99
duke@435 100 heap_address = allocate(alignment, perm_gen_spec, &total_reserved,
duke@435 101 &n_covered_regions, &heap_rs);
duke@435 102
duke@435 103 if (UseSharedSpaces) {
duke@435 104 if (!heap_rs.is_reserved() || heap_address != heap_rs.base()) {
duke@435 105 if (heap_rs.is_reserved()) {
duke@435 106 heap_rs.release();
duke@435 107 }
duke@435 108 FileMapInfo* mapinfo = FileMapInfo::current_info();
duke@435 109 mapinfo->fail_continue("Unable to reserve shared region.");
duke@435 110 allocate(alignment, perm_gen_spec, &total_reserved, &n_covered_regions,
duke@435 111 &heap_rs);
duke@435 112 }
duke@435 113 }
duke@435 114
duke@435 115 if (!heap_rs.is_reserved()) {
duke@435 116 vm_shutdown_during_initialization(
duke@435 117 "Could not reserve enough space for object heap");
duke@435 118 return JNI_ENOMEM;
duke@435 119 }
duke@435 120
duke@435 121 _reserved = MemRegion((HeapWord*)heap_rs.base(),
duke@435 122 (HeapWord*)(heap_rs.base() + heap_rs.size()));
duke@435 123
duke@435 124 // It is important to do this in a way such that concurrent readers can't
duke@435 125 // temporarily think somethings in the heap. (Seen this happen in asserts.)
duke@435 126 _reserved.set_word_size(0);
duke@435 127 _reserved.set_start((HeapWord*)heap_rs.base());
duke@435 128 size_t actual_heap_size = heap_rs.size() - perm_gen_spec->misc_data_size()
duke@435 129 - perm_gen_spec->misc_code_size();
duke@435 130 _reserved.set_end((HeapWord*)(heap_rs.base() + actual_heap_size));
duke@435 131
duke@435 132 _rem_set = collector_policy()->create_rem_set(_reserved, n_covered_regions);
duke@435 133 set_barrier_set(rem_set()->bs());
ysr@1601 134
duke@435 135 _gch = this;
duke@435 136
duke@435 137 for (i = 0; i < _n_gens; i++) {
duke@435 138 ReservedSpace this_rs = heap_rs.first_part(_gen_specs[i]->max_size(),
duke@435 139 UseSharedSpaces, UseSharedSpaces);
duke@435 140 _gens[i] = _gen_specs[i]->init(this_rs, i, rem_set());
duke@435 141 heap_rs = heap_rs.last_part(_gen_specs[i]->max_size());
duke@435 142 }
duke@435 143 _perm_gen = perm_gen_spec->init(heap_rs, PermSize, rem_set());
duke@435 144
duke@435 145 clear_incremental_collection_will_fail();
duke@435 146 clear_last_incremental_collection_failed();
duke@435 147
duke@435 148 #ifndef SERIALGC
duke@435 149 // If we are running CMS, create the collector responsible
duke@435 150 // for collecting the CMS generations.
duke@435 151 if (collector_policy()->is_concurrent_mark_sweep_policy()) {
duke@435 152 bool success = create_cms_collector();
duke@435 153 if (!success) return JNI_ENOMEM;
duke@435 154 }
duke@435 155 #endif // SERIALGC
duke@435 156
duke@435 157 return JNI_OK;
duke@435 158 }
duke@435 159
duke@435 160
duke@435 161 char* GenCollectedHeap::allocate(size_t alignment,
duke@435 162 PermanentGenerationSpec* perm_gen_spec,
duke@435 163 size_t* _total_reserved,
duke@435 164 int* _n_covered_regions,
duke@435 165 ReservedSpace* heap_rs){
duke@435 166 const char overflow_msg[] = "The size of the object heap + VM data exceeds "
duke@435 167 "the maximum representable size";
duke@435 168
duke@435 169 // Now figure out the total size.
duke@435 170 size_t total_reserved = 0;
duke@435 171 int n_covered_regions = 0;
duke@435 172 const size_t pageSize = UseLargePages ?
duke@435 173 os::large_page_size() : os::vm_page_size();
duke@435 174
duke@435 175 for (int i = 0; i < _n_gens; i++) {
duke@435 176 total_reserved += _gen_specs[i]->max_size();
duke@435 177 if (total_reserved < _gen_specs[i]->max_size()) {
duke@435 178 vm_exit_during_initialization(overflow_msg);
duke@435 179 }
duke@435 180 n_covered_regions += _gen_specs[i]->n_covered_regions();
duke@435 181 }
duke@435 182 assert(total_reserved % pageSize == 0, "Gen size");
duke@435 183 total_reserved += perm_gen_spec->max_size();
duke@435 184 assert(total_reserved % pageSize == 0, "Perm Gen size");
duke@435 185
duke@435 186 if (total_reserved < perm_gen_spec->max_size()) {
duke@435 187 vm_exit_during_initialization(overflow_msg);
duke@435 188 }
duke@435 189 n_covered_regions += perm_gen_spec->n_covered_regions();
duke@435 190
duke@435 191 // Add the size of the data area which shares the same reserved area
duke@435 192 // as the heap, but which is not actually part of the heap.
duke@435 193 size_t s = perm_gen_spec->misc_data_size() + perm_gen_spec->misc_code_size();
duke@435 194
duke@435 195 total_reserved += s;
duke@435 196 if (total_reserved < s) {
duke@435 197 vm_exit_during_initialization(overflow_msg);
duke@435 198 }
duke@435 199
duke@435 200 if (UseLargePages) {
duke@435 201 assert(total_reserved != 0, "total_reserved cannot be 0");
duke@435 202 total_reserved = round_to(total_reserved, os::large_page_size());
duke@435 203 if (total_reserved < os::large_page_size()) {
duke@435 204 vm_exit_during_initialization(overflow_msg);
duke@435 205 }
duke@435 206 }
duke@435 207
duke@435 208 // Calculate the address at which the heap must reside in order for
duke@435 209 // the shared data to be at the required address.
duke@435 210
duke@435 211 char* heap_address;
duke@435 212 if (UseSharedSpaces) {
duke@435 213
duke@435 214 // Calculate the address of the first word beyond the heap.
duke@435 215 FileMapInfo* mapinfo = FileMapInfo::current_info();
duke@435 216 int lr = CompactingPermGenGen::n_regions - 1;
duke@435 217 size_t capacity = align_size_up(mapinfo->space_capacity(lr), alignment);
duke@435 218 heap_address = mapinfo->region_base(lr) + capacity;
duke@435 219
duke@435 220 // Calculate the address of the first word of the heap.
duke@435 221 heap_address -= total_reserved;
duke@435 222 } else {
duke@435 223 heap_address = NULL; // any address will do.
kvn@1077 224 if (UseCompressedOops) {
kvn@1077 225 heap_address = Universe::preferred_heap_base(total_reserved, Universe::UnscaledNarrowOop);
kvn@1077 226 *_total_reserved = total_reserved;
kvn@1077 227 *_n_covered_regions = n_covered_regions;
kvn@1077 228 *heap_rs = ReservedHeapSpace(total_reserved, alignment,
kvn@1077 229 UseLargePages, heap_address);
kvn@1077 230
kvn@1077 231 if (heap_address != NULL && !heap_rs->is_reserved()) {
kvn@1077 232 // Failed to reserve at specified address - the requested memory
kvn@1077 233 // region is taken already, for example, by 'java' launcher.
kvn@1077 234 // Try again to reserver heap higher.
kvn@1077 235 heap_address = Universe::preferred_heap_base(total_reserved, Universe::ZeroBasedNarrowOop);
kvn@1077 236 *heap_rs = ReservedHeapSpace(total_reserved, alignment,
kvn@1077 237 UseLargePages, heap_address);
kvn@1077 238
kvn@1077 239 if (heap_address != NULL && !heap_rs->is_reserved()) {
kvn@1077 240 // Failed to reserve at specified address again - give up.
kvn@1077 241 heap_address = Universe::preferred_heap_base(total_reserved, Universe::HeapBasedNarrowOop);
kvn@1077 242 assert(heap_address == NULL, "");
kvn@1077 243 *heap_rs = ReservedHeapSpace(total_reserved, alignment,
kvn@1077 244 UseLargePages, heap_address);
kvn@1077 245 }
kvn@1077 246 }
kvn@1077 247 return heap_address;
kvn@1077 248 }
duke@435 249 }
duke@435 250
duke@435 251 *_total_reserved = total_reserved;
duke@435 252 *_n_covered_regions = n_covered_regions;
coleenp@672 253 *heap_rs = ReservedHeapSpace(total_reserved, alignment,
coleenp@672 254 UseLargePages, heap_address);
duke@435 255
duke@435 256 return heap_address;
duke@435 257 }
duke@435 258
duke@435 259
duke@435 260 void GenCollectedHeap::post_initialize() {
duke@435 261 SharedHeap::post_initialize();
duke@435 262 TwoGenerationCollectorPolicy *policy =
duke@435 263 (TwoGenerationCollectorPolicy *)collector_policy();
duke@435 264 guarantee(policy->is_two_generation_policy(), "Illegal policy type");
duke@435 265 DefNewGeneration* def_new_gen = (DefNewGeneration*) get_gen(0);
duke@435 266 assert(def_new_gen->kind() == Generation::DefNew ||
duke@435 267 def_new_gen->kind() == Generation::ParNew ||
duke@435 268 def_new_gen->kind() == Generation::ASParNew,
duke@435 269 "Wrong generation kind");
duke@435 270
duke@435 271 Generation* old_gen = get_gen(1);
duke@435 272 assert(old_gen->kind() == Generation::ConcurrentMarkSweep ||
duke@435 273 old_gen->kind() == Generation::ASConcurrentMarkSweep ||
duke@435 274 old_gen->kind() == Generation::MarkSweepCompact,
duke@435 275 "Wrong generation kind");
duke@435 276
duke@435 277 policy->initialize_size_policy(def_new_gen->eden()->capacity(),
duke@435 278 old_gen->capacity(),
duke@435 279 def_new_gen->from()->capacity());
duke@435 280 policy->initialize_gc_policy_counters();
duke@435 281 }
duke@435 282
duke@435 283 void GenCollectedHeap::ref_processing_init() {
duke@435 284 SharedHeap::ref_processing_init();
duke@435 285 for (int i = 0; i < _n_gens; i++) {
duke@435 286 _gens[i]->ref_processor_init();
duke@435 287 }
duke@435 288 }
duke@435 289
duke@435 290 size_t GenCollectedHeap::capacity() const {
duke@435 291 size_t res = 0;
duke@435 292 for (int i = 0; i < _n_gens; i++) {
duke@435 293 res += _gens[i]->capacity();
duke@435 294 }
duke@435 295 return res;
duke@435 296 }
duke@435 297
duke@435 298 size_t GenCollectedHeap::used() const {
duke@435 299 size_t res = 0;
duke@435 300 for (int i = 0; i < _n_gens; i++) {
duke@435 301 res += _gens[i]->used();
duke@435 302 }
duke@435 303 return res;
duke@435 304 }
duke@435 305
duke@435 306 // Save the "used_region" for generations level and lower,
duke@435 307 // and, if perm is true, for perm gen.
duke@435 308 void GenCollectedHeap::save_used_regions(int level, bool perm) {
duke@435 309 assert(level < _n_gens, "Illegal level parameter");
duke@435 310 for (int i = level; i >= 0; i--) {
duke@435 311 _gens[i]->save_used_region();
duke@435 312 }
duke@435 313 if (perm) {
duke@435 314 perm_gen()->save_used_region();
duke@435 315 }
duke@435 316 }
duke@435 317
duke@435 318 size_t GenCollectedHeap::max_capacity() const {
duke@435 319 size_t res = 0;
duke@435 320 for (int i = 0; i < _n_gens; i++) {
duke@435 321 res += _gens[i]->max_capacity();
duke@435 322 }
duke@435 323 return res;
duke@435 324 }
duke@435 325
duke@435 326 // Update the _full_collections_completed counter
duke@435 327 // at the end of a stop-world full GC.
duke@435 328 unsigned int GenCollectedHeap::update_full_collections_completed() {
duke@435 329 MonitorLockerEx ml(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
duke@435 330 assert(_full_collections_completed <= _total_full_collections,
duke@435 331 "Can't complete more collections than were started");
duke@435 332 _full_collections_completed = _total_full_collections;
duke@435 333 ml.notify_all();
duke@435 334 return _full_collections_completed;
duke@435 335 }
duke@435 336
duke@435 337 // Update the _full_collections_completed counter, as appropriate,
duke@435 338 // at the end of a concurrent GC cycle. Note the conditional update
duke@435 339 // below to allow this method to be called by a concurrent collector
duke@435 340 // without synchronizing in any manner with the VM thread (which
duke@435 341 // may already have initiated a STW full collection "concurrently").
duke@435 342 unsigned int GenCollectedHeap::update_full_collections_completed(unsigned int count) {
duke@435 343 MonitorLockerEx ml(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
duke@435 344 assert((_full_collections_completed <= _total_full_collections) &&
duke@435 345 (count <= _total_full_collections),
duke@435 346 "Can't complete more collections than were started");
duke@435 347 if (count > _full_collections_completed) {
duke@435 348 _full_collections_completed = count;
duke@435 349 ml.notify_all();
duke@435 350 }
duke@435 351 return _full_collections_completed;
duke@435 352 }
duke@435 353
duke@435 354
duke@435 355 #ifndef PRODUCT
duke@435 356 // Override of memory state checking method in CollectedHeap:
duke@435 357 // Some collectors (CMS for example) can't have badHeapWordVal written
duke@435 358 // in the first two words of an object. (For instance , in the case of
duke@435 359 // CMS these words hold state used to synchronize between certain
duke@435 360 // (concurrent) GC steps and direct allocating mutators.)
duke@435 361 // The skip_header_HeapWords() method below, allows us to skip
duke@435 362 // over the requisite number of HeapWord's. Note that (for
duke@435 363 // generational collectors) this means that those many words are
duke@435 364 // skipped in each object, irrespective of the generation in which
duke@435 365 // that object lives. The resultant loss of precision seems to be
duke@435 366 // harmless and the pain of avoiding that imprecision appears somewhat
duke@435 367 // higher than we are prepared to pay for such rudimentary debugging
duke@435 368 // support.
duke@435 369 void GenCollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr,
duke@435 370 size_t size) {
duke@435 371 if (CheckMemoryInitialization && ZapUnusedHeapArea) {
duke@435 372 // We are asked to check a size in HeapWords,
duke@435 373 // but the memory is mangled in juint words.
duke@435 374 juint* start = (juint*) (addr + skip_header_HeapWords());
duke@435 375 juint* end = (juint*) (addr + size);
duke@435 376 for (juint* slot = start; slot < end; slot += 1) {
duke@435 377 assert(*slot == badHeapWordVal,
duke@435 378 "Found non badHeapWordValue in pre-allocation check");
duke@435 379 }
duke@435 380 }
duke@435 381 }
duke@435 382 #endif
duke@435 383
duke@435 384 HeapWord* GenCollectedHeap::attempt_allocation(size_t size,
duke@435 385 bool is_tlab,
duke@435 386 bool first_only) {
duke@435 387 HeapWord* res;
duke@435 388 for (int i = 0; i < _n_gens; i++) {
duke@435 389 if (_gens[i]->should_allocate(size, is_tlab)) {
duke@435 390 res = _gens[i]->allocate(size, is_tlab);
duke@435 391 if (res != NULL) return res;
duke@435 392 else if (first_only) break;
duke@435 393 }
duke@435 394 }
duke@435 395 // Otherwise...
duke@435 396 return NULL;
duke@435 397 }
duke@435 398
duke@435 399 HeapWord* GenCollectedHeap::mem_allocate(size_t size,
duke@435 400 bool is_large_noref,
duke@435 401 bool is_tlab,
duke@435 402 bool* gc_overhead_limit_was_exceeded) {
duke@435 403 return collector_policy()->mem_allocate_work(size,
duke@435 404 is_tlab,
duke@435 405 gc_overhead_limit_was_exceeded);
duke@435 406 }
duke@435 407
duke@435 408 bool GenCollectedHeap::must_clear_all_soft_refs() {
duke@435 409 return _gc_cause == GCCause::_last_ditch_collection;
duke@435 410 }
duke@435 411
duke@435 412 bool GenCollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
duke@435 413 return (cause == GCCause::_java_lang_system_gc ||
duke@435 414 cause == GCCause::_gc_locker) &&
duke@435 415 UseConcMarkSweepGC && ExplicitGCInvokesConcurrent;
duke@435 416 }
duke@435 417
duke@435 418 void GenCollectedHeap::do_collection(bool full,
duke@435 419 bool clear_all_soft_refs,
duke@435 420 size_t size,
duke@435 421 bool is_tlab,
duke@435 422 int max_level) {
duke@435 423 bool prepared_for_verification = false;
duke@435 424 ResourceMark rm;
duke@435 425 DEBUG_ONLY(Thread* my_thread = Thread::current();)
duke@435 426
duke@435 427 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
duke@435 428 assert(my_thread->is_VM_thread() ||
duke@435 429 my_thread->is_ConcurrentGC_thread(),
duke@435 430 "incorrect thread type capability");
duke@435 431 assert(Heap_lock->is_locked(), "the requesting thread should have the Heap_lock");
duke@435 432 guarantee(!is_gc_active(), "collection is not reentrant");
duke@435 433 assert(max_level < n_gens(), "sanity check");
duke@435 434
duke@435 435 if (GC_locker::check_active_before_gc()) {
duke@435 436 return; // GC is disabled (e.g. JNI GetXXXCritical operation)
duke@435 437 }
duke@435 438
duke@435 439 const size_t perm_prev_used = perm_gen()->used();
duke@435 440
duke@435 441 if (PrintHeapAtGC) {
duke@435 442 Universe::print_heap_before_gc();
duke@435 443 if (Verbose) {
duke@435 444 gclog_or_tty->print_cr("GC Cause: %s", GCCause::to_string(gc_cause()));
duke@435 445 }
duke@435 446 }
duke@435 447
duke@435 448 {
duke@435 449 FlagSetting fl(_is_gc_active, true);
duke@435 450
duke@435 451 bool complete = full && (max_level == (n_gens()-1));
duke@435 452 const char* gc_cause_str = "GC ";
duke@435 453 if (complete) {
duke@435 454 GCCause::Cause cause = gc_cause();
duke@435 455 if (cause == GCCause::_java_lang_system_gc) {
duke@435 456 gc_cause_str = "Full GC (System) ";
duke@435 457 } else {
duke@435 458 gc_cause_str = "Full GC ";
duke@435 459 }
duke@435 460 }
duke@435 461 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
duke@435 462 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
duke@435 463 TraceTime t(gc_cause_str, PrintGCDetails, false, gclog_or_tty);
duke@435 464
duke@435 465 gc_prologue(complete);
duke@435 466 increment_total_collections(complete);
duke@435 467
duke@435 468 size_t gch_prev_used = used();
duke@435 469
duke@435 470 int starting_level = 0;
duke@435 471 if (full) {
duke@435 472 // Search for the oldest generation which will collect all younger
duke@435 473 // generations, and start collection loop there.
duke@435 474 for (int i = max_level; i >= 0; i--) {
duke@435 475 if (_gens[i]->full_collects_younger_generations()) {
duke@435 476 starting_level = i;
duke@435 477 break;
duke@435 478 }
duke@435 479 }
duke@435 480 }
duke@435 481
duke@435 482 bool must_restore_marks_for_biased_locking = false;
duke@435 483
duke@435 484 int max_level_collected = starting_level;
duke@435 485 for (int i = starting_level; i <= max_level; i++) {
duke@435 486 if (_gens[i]->should_collect(full, size, is_tlab)) {
dcubed@1315 487 if (i == n_gens() - 1) { // a major collection is to happen
dcubed@1315 488 if (!complete) {
dcubed@1315 489 // The full_collections increment was missed above.
dcubed@1315 490 increment_total_full_collections();
dcubed@1315 491 }
ysr@1050 492 pre_full_gc_dump(); // do any pre full gc dumps
dcubed@1315 493 }
duke@435 494 // Timer for individual generations. Last argument is false: no CR
duke@435 495 TraceTime t1(_gens[i]->short_name(), PrintGCDetails, false, gclog_or_tty);
duke@435 496 TraceCollectorStats tcs(_gens[i]->counters());
duke@435 497 TraceMemoryManagerStats tmms(_gens[i]->kind());
duke@435 498
duke@435 499 size_t prev_used = _gens[i]->used();
duke@435 500 _gens[i]->stat_record()->invocations++;
duke@435 501 _gens[i]->stat_record()->accumulated_time.start();
duke@435 502
jmasa@698 503 // Must be done anew before each collection because
jmasa@698 504 // a previous collection will do mangling and will
jmasa@698 505 // change top of some spaces.
jmasa@698 506 record_gen_tops_before_GC();
jmasa@698 507
duke@435 508 if (PrintGC && Verbose) {
duke@435 509 gclog_or_tty->print("level=%d invoke=%d size=" SIZE_FORMAT,
duke@435 510 i,
duke@435 511 _gens[i]->stat_record()->invocations,
duke@435 512 size*HeapWordSize);
duke@435 513 }
duke@435 514
duke@435 515 if (VerifyBeforeGC && i >= VerifyGCLevel &&
duke@435 516 total_collections() >= VerifyGCStartAt) {
duke@435 517 HandleMark hm; // Discard invalid handles created during verification
duke@435 518 if (!prepared_for_verification) {
duke@435 519 prepare_for_verify();
duke@435 520 prepared_for_verification = true;
duke@435 521 }
duke@435 522 gclog_or_tty->print(" VerifyBeforeGC:");
duke@435 523 Universe::verify(true);
duke@435 524 }
duke@435 525 COMPILER2_PRESENT(DerivedPointerTable::clear());
duke@435 526
duke@435 527 if (!must_restore_marks_for_biased_locking &&
duke@435 528 _gens[i]->performs_in_place_marking()) {
duke@435 529 // We perform this mark word preservation work lazily
duke@435 530 // because it's only at this point that we know whether we
duke@435 531 // absolutely have to do it; we want to avoid doing it for
duke@435 532 // scavenge-only collections where it's unnecessary
duke@435 533 must_restore_marks_for_biased_locking = true;
duke@435 534 BiasedLocking::preserve_marks();
duke@435 535 }
duke@435 536
duke@435 537 // Do collection work
duke@435 538 {
duke@435 539 // Note on ref discovery: For what appear to be historical reasons,
duke@435 540 // GCH enables and disabled (by enqueing) refs discovery.
duke@435 541 // In the future this should be moved into the generation's
duke@435 542 // collect method so that ref discovery and enqueueing concerns
duke@435 543 // are local to a generation. The collect method could return
duke@435 544 // an appropriate indication in the case that notification on
duke@435 545 // the ref lock was needed. This will make the treatment of
duke@435 546 // weak refs more uniform (and indeed remove such concerns
duke@435 547 // from GCH). XXX
duke@435 548
duke@435 549 HandleMark hm; // Discard invalid handles created during gc
duke@435 550 save_marks(); // save marks for all gens
duke@435 551 // We want to discover references, but not process them yet.
duke@435 552 // This mode is disabled in process_discovered_references if the
duke@435 553 // generation does some collection work, or in
duke@435 554 // enqueue_discovered_references if the generation returns
duke@435 555 // without doing any work.
duke@435 556 ReferenceProcessor* rp = _gens[i]->ref_processor();
duke@435 557 // If the discovery of ("weak") refs in this generation is
duke@435 558 // atomic wrt other collectors in this configuration, we
duke@435 559 // are guaranteed to have empty discovered ref lists.
duke@435 560 if (rp->discovery_is_atomic()) {
duke@435 561 rp->verify_no_references_recorded();
duke@435 562 rp->enable_discovery();
ysr@892 563 rp->setup_policy(clear_all_soft_refs);
duke@435 564 } else {
ysr@888 565 // collect() below will enable discovery as appropriate
duke@435 566 }
duke@435 567 _gens[i]->collect(full, clear_all_soft_refs, size, is_tlab);
duke@435 568 if (!rp->enqueuing_is_done()) {
duke@435 569 rp->enqueue_discovered_references();
duke@435 570 } else {
duke@435 571 rp->set_enqueuing_is_done(false);
duke@435 572 }
duke@435 573 rp->verify_no_references_recorded();
duke@435 574 }
duke@435 575 max_level_collected = i;
duke@435 576
duke@435 577 // Determine if allocation request was met.
duke@435 578 if (size > 0) {
duke@435 579 if (!is_tlab || _gens[i]->supports_tlab_allocation()) {
duke@435 580 if (size*HeapWordSize <= _gens[i]->unsafe_max_alloc_nogc()) {
duke@435 581 size = 0;
duke@435 582 }
duke@435 583 }
duke@435 584 }
duke@435 585
duke@435 586 COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
duke@435 587
duke@435 588 _gens[i]->stat_record()->accumulated_time.stop();
duke@435 589
duke@435 590 update_gc_stats(i, full);
duke@435 591
duke@435 592 if (VerifyAfterGC && i >= VerifyGCLevel &&
duke@435 593 total_collections() >= VerifyGCStartAt) {
duke@435 594 HandleMark hm; // Discard invalid handles created during verification
duke@435 595 gclog_or_tty->print(" VerifyAfterGC:");
duke@435 596 Universe::verify(false);
duke@435 597 }
duke@435 598
duke@435 599 if (PrintGCDetails) {
duke@435 600 gclog_or_tty->print(":");
duke@435 601 _gens[i]->print_heap_change(prev_used);
duke@435 602 }
duke@435 603 }
duke@435 604 }
duke@435 605
duke@435 606 // Update "complete" boolean wrt what actually transpired --
duke@435 607 // for instance, a promotion failure could have led to
duke@435 608 // a whole heap collection.
duke@435 609 complete = complete || (max_level_collected == n_gens() - 1);
duke@435 610
ysr@1050 611 if (complete) { // We did a "major" collection
ysr@1050 612 post_full_gc_dump(); // do any post full gc dumps
ysr@1050 613 }
ysr@1050 614
duke@435 615 if (PrintGCDetails) {
duke@435 616 print_heap_change(gch_prev_used);
duke@435 617
duke@435 618 // Print perm gen info for full GC with PrintGCDetails flag.
duke@435 619 if (complete) {
duke@435 620 print_perm_heap_change(perm_prev_used);
duke@435 621 }
duke@435 622 }
duke@435 623
duke@435 624 for (int j = max_level_collected; j >= 0; j -= 1) {
duke@435 625 // Adjust generation sizes.
duke@435 626 _gens[j]->compute_new_size();
duke@435 627 }
duke@435 628
duke@435 629 if (complete) {
duke@435 630 // Ask the permanent generation to adjust size for full collections
duke@435 631 perm()->compute_new_size();
duke@435 632 update_full_collections_completed();
duke@435 633 }
duke@435 634
duke@435 635 // Track memory usage and detect low memory after GC finishes
duke@435 636 MemoryService::track_memory_usage();
duke@435 637
duke@435 638 gc_epilogue(complete);
duke@435 639
duke@435 640 if (must_restore_marks_for_biased_locking) {
duke@435 641 BiasedLocking::restore_marks();
duke@435 642 }
duke@435 643 }
duke@435 644
duke@435 645 AdaptiveSizePolicy* sp = gen_policy()->size_policy();
duke@435 646 AdaptiveSizePolicyOutput(sp, total_collections());
duke@435 647
duke@435 648 if (PrintHeapAtGC) {
duke@435 649 Universe::print_heap_after_gc();
duke@435 650 }
duke@435 651
jmasa@981 652 #ifdef TRACESPINNING
jmasa@981 653 ParallelTaskTerminator::print_termination_counts();
jmasa@981 654 #endif
jmasa@981 655
duke@435 656 if (ExitAfterGCNum > 0 && total_collections() == ExitAfterGCNum) {
duke@435 657 tty->print_cr("Stopping after GC #%d", ExitAfterGCNum);
duke@435 658 vm_exit(-1);
duke@435 659 }
duke@435 660 }
duke@435 661
duke@435 662 HeapWord* GenCollectedHeap::satisfy_failed_allocation(size_t size, bool is_tlab) {
duke@435 663 return collector_policy()->satisfy_failed_allocation(size, is_tlab);
duke@435 664 }
duke@435 665
duke@435 666 void GenCollectedHeap::set_par_threads(int t) {
duke@435 667 SharedHeap::set_par_threads(t);
duke@435 668 _gen_process_strong_tasks->set_par_threads(t);
duke@435 669 }
duke@435 670
duke@435 671 class AssertIsPermClosure: public OopClosure {
duke@435 672 public:
duke@435 673 void do_oop(oop* p) {
duke@435 674 assert((*p) == NULL || (*p)->is_perm(), "Referent should be perm.");
duke@435 675 }
coleenp@548 676 void do_oop(narrowOop* p) { ShouldNotReachHere(); }
duke@435 677 };
duke@435 678 static AssertIsPermClosure assert_is_perm_closure;
duke@435 679
duke@435 680 void GenCollectedHeap::
duke@435 681 gen_process_strong_roots(int level,
duke@435 682 bool younger_gens_as_roots,
jrose@1424 683 bool activate_scope,
duke@435 684 bool collecting_perm_gen,
duke@435 685 SharedHeap::ScanningOption so,
jrose@1424 686 OopsInGenClosure* not_older_gens,
jrose@1424 687 bool do_code_roots,
jrose@1424 688 OopsInGenClosure* older_gens) {
duke@435 689 // General strong roots.
jrose@1424 690
jrose@1424 691 if (!do_code_roots) {
jrose@1424 692 SharedHeap::process_strong_roots(activate_scope, collecting_perm_gen, so,
jrose@1424 693 not_older_gens, NULL, older_gens);
jrose@1424 694 } else {
jrose@1424 695 bool do_code_marking = (activate_scope || nmethod::oops_do_marking_is_active());
jrose@1424 696 CodeBlobToOopClosure code_roots(not_older_gens, /*do_marking=*/ do_code_marking);
jrose@1424 697 SharedHeap::process_strong_roots(activate_scope, collecting_perm_gen, so,
jrose@1424 698 not_older_gens, &code_roots, older_gens);
jrose@1424 699 }
duke@435 700
duke@435 701 if (younger_gens_as_roots) {
duke@435 702 if (!_gen_process_strong_tasks->is_task_claimed(GCH_PS_younger_gens)) {
duke@435 703 for (int i = 0; i < level; i++) {
duke@435 704 not_older_gens->set_generation(_gens[i]);
duke@435 705 _gens[i]->oop_iterate(not_older_gens);
duke@435 706 }
duke@435 707 not_older_gens->reset_generation();
duke@435 708 }
duke@435 709 }
duke@435 710 // When collection is parallel, all threads get to cooperate to do
duke@435 711 // older-gen scanning.
duke@435 712 for (int i = level+1; i < _n_gens; i++) {
duke@435 713 older_gens->set_generation(_gens[i]);
duke@435 714 rem_set()->younger_refs_iterate(_gens[i], older_gens);
duke@435 715 older_gens->reset_generation();
duke@435 716 }
duke@435 717
duke@435 718 _gen_process_strong_tasks->all_tasks_completed();
duke@435 719 }
duke@435 720
duke@435 721 void GenCollectedHeap::gen_process_weak_roots(OopClosure* root_closure,
jrose@1424 722 CodeBlobClosure* code_roots,
duke@435 723 OopClosure* non_root_closure) {
jrose@1424 724 SharedHeap::process_weak_roots(root_closure, code_roots, non_root_closure);
duke@435 725 // "Local" "weak" refs
duke@435 726 for (int i = 0; i < _n_gens; i++) {
duke@435 727 _gens[i]->ref_processor()->weak_oops_do(root_closure);
duke@435 728 }
duke@435 729 }
duke@435 730
duke@435 731 #define GCH_SINCE_SAVE_MARKS_ITERATE_DEFN(OopClosureType, nv_suffix) \
duke@435 732 void GenCollectedHeap:: \
duke@435 733 oop_since_save_marks_iterate(int level, \
duke@435 734 OopClosureType* cur, \
duke@435 735 OopClosureType* older) { \
duke@435 736 _gens[level]->oop_since_save_marks_iterate##nv_suffix(cur); \
duke@435 737 for (int i = level+1; i < n_gens(); i++) { \
duke@435 738 _gens[i]->oop_since_save_marks_iterate##nv_suffix(older); \
duke@435 739 } \
duke@435 740 perm_gen()->oop_since_save_marks_iterate##nv_suffix(older); \
duke@435 741 }
duke@435 742
duke@435 743 ALL_SINCE_SAVE_MARKS_CLOSURES(GCH_SINCE_SAVE_MARKS_ITERATE_DEFN)
duke@435 744
duke@435 745 #undef GCH_SINCE_SAVE_MARKS_ITERATE_DEFN
duke@435 746
duke@435 747 bool GenCollectedHeap::no_allocs_since_save_marks(int level) {
duke@435 748 for (int i = level; i < _n_gens; i++) {
duke@435 749 if (!_gens[i]->no_allocs_since_save_marks()) return false;
duke@435 750 }
duke@435 751 return perm_gen()->no_allocs_since_save_marks();
duke@435 752 }
duke@435 753
duke@435 754 bool GenCollectedHeap::supports_inline_contig_alloc() const {
duke@435 755 return _gens[0]->supports_inline_contig_alloc();
duke@435 756 }
duke@435 757
duke@435 758 HeapWord** GenCollectedHeap::top_addr() const {
duke@435 759 return _gens[0]->top_addr();
duke@435 760 }
duke@435 761
duke@435 762 HeapWord** GenCollectedHeap::end_addr() const {
duke@435 763 return _gens[0]->end_addr();
duke@435 764 }
duke@435 765
duke@435 766 size_t GenCollectedHeap::unsafe_max_alloc() {
duke@435 767 return _gens[0]->unsafe_max_alloc_nogc();
duke@435 768 }
duke@435 769
duke@435 770 // public collection interfaces
duke@435 771
duke@435 772 void GenCollectedHeap::collect(GCCause::Cause cause) {
duke@435 773 if (should_do_concurrent_full_gc(cause)) {
duke@435 774 #ifndef SERIALGC
duke@435 775 // mostly concurrent full collection
duke@435 776 collect_mostly_concurrent(cause);
duke@435 777 #else // SERIALGC
duke@435 778 ShouldNotReachHere();
duke@435 779 #endif // SERIALGC
duke@435 780 } else {
duke@435 781 #ifdef ASSERT
duke@435 782 if (cause == GCCause::_scavenge_alot) {
duke@435 783 // minor collection only
duke@435 784 collect(cause, 0);
duke@435 785 } else {
duke@435 786 // Stop-the-world full collection
duke@435 787 collect(cause, n_gens() - 1);
duke@435 788 }
duke@435 789 #else
duke@435 790 // Stop-the-world full collection
duke@435 791 collect(cause, n_gens() - 1);
duke@435 792 #endif
duke@435 793 }
duke@435 794 }
duke@435 795
duke@435 796 void GenCollectedHeap::collect(GCCause::Cause cause, int max_level) {
duke@435 797 // The caller doesn't have the Heap_lock
duke@435 798 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
duke@435 799 MutexLocker ml(Heap_lock);
duke@435 800 collect_locked(cause, max_level);
duke@435 801 }
duke@435 802
duke@435 803 // This interface assumes that it's being called by the
duke@435 804 // vm thread. It collects the heap assuming that the
duke@435 805 // heap lock is already held and that we are executing in
duke@435 806 // the context of the vm thread.
duke@435 807 void GenCollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {
duke@435 808 assert(Thread::current()->is_VM_thread(), "Precondition#1");
duke@435 809 assert(Heap_lock->is_locked(), "Precondition#2");
duke@435 810 GCCauseSetter gcs(this, cause);
duke@435 811 switch (cause) {
duke@435 812 case GCCause::_heap_inspection:
duke@435 813 case GCCause::_heap_dump: {
duke@435 814 HandleMark hm;
duke@435 815 do_full_collection(false, // don't clear all soft refs
duke@435 816 n_gens() - 1);
duke@435 817 break;
duke@435 818 }
duke@435 819 default: // XXX FIX ME
duke@435 820 ShouldNotReachHere(); // Unexpected use of this function
duke@435 821 }
duke@435 822 }
duke@435 823
duke@435 824 void GenCollectedHeap::collect_locked(GCCause::Cause cause) {
duke@435 825 // The caller has the Heap_lock
duke@435 826 assert(Heap_lock->owned_by_self(), "this thread should own the Heap_lock");
duke@435 827 collect_locked(cause, n_gens() - 1);
duke@435 828 }
duke@435 829
duke@435 830 // this is the private collection interface
duke@435 831 // The Heap_lock is expected to be held on entry.
duke@435 832
duke@435 833 void GenCollectedHeap::collect_locked(GCCause::Cause cause, int max_level) {
duke@435 834 if (_preloading_shared_classes) {
duke@435 835 warning("\nThe permanent generation is not large enough to preload "
duke@435 836 "requested classes.\nUse -XX:PermSize= to increase the initial "
duke@435 837 "size of the permanent generation.\n");
duke@435 838 vm_exit(2);
duke@435 839 }
duke@435 840 // Read the GC count while holding the Heap_lock
duke@435 841 unsigned int gc_count_before = total_collections();
duke@435 842 unsigned int full_gc_count_before = total_full_collections();
duke@435 843 {
duke@435 844 MutexUnlocker mu(Heap_lock); // give up heap lock, execute gets it back
duke@435 845 VM_GenCollectFull op(gc_count_before, full_gc_count_before,
duke@435 846 cause, max_level);
duke@435 847 VMThread::execute(&op);
duke@435 848 }
duke@435 849 }
duke@435 850
duke@435 851 #ifndef SERIALGC
duke@435 852 bool GenCollectedHeap::create_cms_collector() {
duke@435 853
duke@435 854 assert(((_gens[1]->kind() == Generation::ConcurrentMarkSweep) ||
duke@435 855 (_gens[1]->kind() == Generation::ASConcurrentMarkSweep)) &&
duke@435 856 _perm_gen->as_gen()->kind() == Generation::ConcurrentMarkSweep,
duke@435 857 "Unexpected generation kinds");
duke@435 858 // Skip two header words in the block content verification
duke@435 859 NOT_PRODUCT(_skip_header_HeapWords = CMSCollector::skip_header_HeapWords();)
duke@435 860 CMSCollector* collector = new CMSCollector(
duke@435 861 (ConcurrentMarkSweepGeneration*)_gens[1],
duke@435 862 (ConcurrentMarkSweepGeneration*)_perm_gen->as_gen(),
duke@435 863 _rem_set->as_CardTableRS(),
duke@435 864 (ConcurrentMarkSweepPolicy*) collector_policy());
duke@435 865
duke@435 866 if (collector == NULL || !collector->completed_initialization()) {
duke@435 867 if (collector) {
duke@435 868 delete collector; // Be nice in embedded situation
duke@435 869 }
duke@435 870 vm_shutdown_during_initialization("Could not create CMS collector");
duke@435 871 return false;
duke@435 872 }
duke@435 873 return true; // success
duke@435 874 }
duke@435 875
duke@435 876 void GenCollectedHeap::collect_mostly_concurrent(GCCause::Cause cause) {
duke@435 877 assert(!Heap_lock->owned_by_self(), "Should not own Heap_lock");
duke@435 878
duke@435 879 MutexLocker ml(Heap_lock);
duke@435 880 // Read the GC counts while holding the Heap_lock
duke@435 881 unsigned int full_gc_count_before = total_full_collections();
duke@435 882 unsigned int gc_count_before = total_collections();
duke@435 883 {
duke@435 884 MutexUnlocker mu(Heap_lock);
duke@435 885 VM_GenCollectFullConcurrent op(gc_count_before, full_gc_count_before, cause);
duke@435 886 VMThread::execute(&op);
duke@435 887 }
duke@435 888 }
duke@435 889 #endif // SERIALGC
duke@435 890
duke@435 891
duke@435 892 void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs,
duke@435 893 int max_level) {
duke@435 894 int local_max_level;
duke@435 895 if (!incremental_collection_will_fail() &&
duke@435 896 gc_cause() == GCCause::_gc_locker) {
duke@435 897 local_max_level = 0;
duke@435 898 } else {
duke@435 899 local_max_level = max_level;
duke@435 900 }
duke@435 901
duke@435 902 do_collection(true /* full */,
duke@435 903 clear_all_soft_refs /* clear_all_soft_refs */,
duke@435 904 0 /* size */,
duke@435 905 false /* is_tlab */,
duke@435 906 local_max_level /* max_level */);
duke@435 907 // Hack XXX FIX ME !!!
duke@435 908 // A scavenge may not have been attempted, or may have
duke@435 909 // been attempted and failed, because the old gen was too full
duke@435 910 if (local_max_level == 0 && gc_cause() == GCCause::_gc_locker &&
duke@435 911 incremental_collection_will_fail()) {
duke@435 912 if (PrintGCDetails) {
duke@435 913 gclog_or_tty->print_cr("GC locker: Trying a full collection "
duke@435 914 "because scavenge failed");
duke@435 915 }
duke@435 916 // This time allow the old gen to be collected as well
duke@435 917 do_collection(true /* full */,
duke@435 918 clear_all_soft_refs /* clear_all_soft_refs */,
duke@435 919 0 /* size */,
duke@435 920 false /* is_tlab */,
duke@435 921 n_gens() - 1 /* max_level */);
duke@435 922 }
duke@435 923 }
duke@435 924
duke@435 925 // Returns "TRUE" iff "p" points into the allocated area of the heap.
duke@435 926 bool GenCollectedHeap::is_in(const void* p) const {
duke@435 927 #ifndef ASSERT
duke@435 928 guarantee(VerifyBeforeGC ||
duke@435 929 VerifyDuringGC ||
duke@435 930 VerifyBeforeExit ||
jrose@1590 931 PrintAssembly ||
jrose@1590 932 tty->count() != 0 || // already printing
duke@435 933 VerifyAfterGC, "too expensive");
duke@435 934 #endif
duke@435 935 // This might be sped up with a cache of the last generation that
duke@435 936 // answered yes.
duke@435 937 for (int i = 0; i < _n_gens; i++) {
duke@435 938 if (_gens[i]->is_in(p)) return true;
duke@435 939 }
duke@435 940 if (_perm_gen->as_gen()->is_in(p)) return true;
duke@435 941 // Otherwise...
duke@435 942 return false;
duke@435 943 }
duke@435 944
duke@435 945 // Returns "TRUE" iff "p" points into the allocated area of the heap.
duke@435 946 bool GenCollectedHeap::is_in_youngest(void* p) {
duke@435 947 return _gens[0]->is_in(p);
duke@435 948 }
duke@435 949
duke@435 950 void GenCollectedHeap::oop_iterate(OopClosure* cl) {
duke@435 951 for (int i = 0; i < _n_gens; i++) {
duke@435 952 _gens[i]->oop_iterate(cl);
duke@435 953 }
duke@435 954 }
duke@435 955
duke@435 956 void GenCollectedHeap::oop_iterate(MemRegion mr, OopClosure* cl) {
duke@435 957 for (int i = 0; i < _n_gens; i++) {
duke@435 958 _gens[i]->oop_iterate(mr, cl);
duke@435 959 }
duke@435 960 }
duke@435 961
duke@435 962 void GenCollectedHeap::object_iterate(ObjectClosure* cl) {
duke@435 963 for (int i = 0; i < _n_gens; i++) {
duke@435 964 _gens[i]->object_iterate(cl);
duke@435 965 }
duke@435 966 perm_gen()->object_iterate(cl);
duke@435 967 }
duke@435 968
jmasa@952 969 void GenCollectedHeap::safe_object_iterate(ObjectClosure* cl) {
jmasa@952 970 for (int i = 0; i < _n_gens; i++) {
jmasa@952 971 _gens[i]->safe_object_iterate(cl);
jmasa@952 972 }
jmasa@952 973 perm_gen()->safe_object_iterate(cl);
jmasa@952 974 }
jmasa@952 975
duke@435 976 void GenCollectedHeap::object_iterate_since_last_GC(ObjectClosure* cl) {
duke@435 977 for (int i = 0; i < _n_gens; i++) {
duke@435 978 _gens[i]->object_iterate_since_last_GC(cl);
duke@435 979 }
duke@435 980 }
duke@435 981
duke@435 982 Space* GenCollectedHeap::space_containing(const void* addr) const {
duke@435 983 for (int i = 0; i < _n_gens; i++) {
duke@435 984 Space* res = _gens[i]->space_containing(addr);
duke@435 985 if (res != NULL) return res;
duke@435 986 }
duke@435 987 Space* res = perm_gen()->space_containing(addr);
duke@435 988 if (res != NULL) return res;
duke@435 989 // Otherwise...
duke@435 990 assert(false, "Could not find containing space");
duke@435 991 return NULL;
duke@435 992 }
duke@435 993
duke@435 994
duke@435 995 HeapWord* GenCollectedHeap::block_start(const void* addr) const {
duke@435 996 assert(is_in_reserved(addr), "block_start of address outside of heap");
duke@435 997 for (int i = 0; i < _n_gens; i++) {
duke@435 998 if (_gens[i]->is_in_reserved(addr)) {
duke@435 999 assert(_gens[i]->is_in(addr),
duke@435 1000 "addr should be in allocated part of generation");
duke@435 1001 return _gens[i]->block_start(addr);
duke@435 1002 }
duke@435 1003 }
duke@435 1004 if (perm_gen()->is_in_reserved(addr)) {
duke@435 1005 assert(perm_gen()->is_in(addr),
duke@435 1006 "addr should be in allocated part of perm gen");
duke@435 1007 return perm_gen()->block_start(addr);
duke@435 1008 }
duke@435 1009 assert(false, "Some generation should contain the address");
duke@435 1010 return NULL;
duke@435 1011 }
duke@435 1012
duke@435 1013 size_t GenCollectedHeap::block_size(const HeapWord* addr) const {
duke@435 1014 assert(is_in_reserved(addr), "block_size of address outside of heap");
duke@435 1015 for (int i = 0; i < _n_gens; i++) {
duke@435 1016 if (_gens[i]->is_in_reserved(addr)) {
duke@435 1017 assert(_gens[i]->is_in(addr),
duke@435 1018 "addr should be in allocated part of generation");
duke@435 1019 return _gens[i]->block_size(addr);
duke@435 1020 }
duke@435 1021 }
duke@435 1022 if (perm_gen()->is_in_reserved(addr)) {
duke@435 1023 assert(perm_gen()->is_in(addr),
duke@435 1024 "addr should be in allocated part of perm gen");
duke@435 1025 return perm_gen()->block_size(addr);
duke@435 1026 }
duke@435 1027 assert(false, "Some generation should contain the address");
duke@435 1028 return 0;
duke@435 1029 }
duke@435 1030
duke@435 1031 bool GenCollectedHeap::block_is_obj(const HeapWord* addr) const {
duke@435 1032 assert(is_in_reserved(addr), "block_is_obj of address outside of heap");
duke@435 1033 assert(block_start(addr) == addr, "addr must be a block start");
duke@435 1034 for (int i = 0; i < _n_gens; i++) {
duke@435 1035 if (_gens[i]->is_in_reserved(addr)) {
duke@435 1036 return _gens[i]->block_is_obj(addr);
duke@435 1037 }
duke@435 1038 }
duke@435 1039 if (perm_gen()->is_in_reserved(addr)) {
duke@435 1040 return perm_gen()->block_is_obj(addr);
duke@435 1041 }
duke@435 1042 assert(false, "Some generation should contain the address");
duke@435 1043 return false;
duke@435 1044 }
duke@435 1045
duke@435 1046 bool GenCollectedHeap::supports_tlab_allocation() const {
duke@435 1047 for (int i = 0; i < _n_gens; i += 1) {
duke@435 1048 if (_gens[i]->supports_tlab_allocation()) {
duke@435 1049 return true;
duke@435 1050 }
duke@435 1051 }
duke@435 1052 return false;
duke@435 1053 }
duke@435 1054
duke@435 1055 size_t GenCollectedHeap::tlab_capacity(Thread* thr) const {
duke@435 1056 size_t result = 0;
duke@435 1057 for (int i = 0; i < _n_gens; i += 1) {
duke@435 1058 if (_gens[i]->supports_tlab_allocation()) {
duke@435 1059 result += _gens[i]->tlab_capacity();
duke@435 1060 }
duke@435 1061 }
duke@435 1062 return result;
duke@435 1063 }
duke@435 1064
duke@435 1065 size_t GenCollectedHeap::unsafe_max_tlab_alloc(Thread* thr) const {
duke@435 1066 size_t result = 0;
duke@435 1067 for (int i = 0; i < _n_gens; i += 1) {
duke@435 1068 if (_gens[i]->supports_tlab_allocation()) {
duke@435 1069 result += _gens[i]->unsafe_max_tlab_alloc();
duke@435 1070 }
duke@435 1071 }
duke@435 1072 return result;
duke@435 1073 }
duke@435 1074
duke@435 1075 HeapWord* GenCollectedHeap::allocate_new_tlab(size_t size) {
duke@435 1076 bool gc_overhead_limit_was_exceeded;
duke@435 1077 HeapWord* result = mem_allocate(size /* size */,
duke@435 1078 false /* is_large_noref */,
duke@435 1079 true /* is_tlab */,
duke@435 1080 &gc_overhead_limit_was_exceeded);
duke@435 1081 return result;
duke@435 1082 }
duke@435 1083
duke@435 1084 // Requires "*prev_ptr" to be non-NULL. Deletes and a block of minimal size
duke@435 1085 // from the list headed by "*prev_ptr".
duke@435 1086 static ScratchBlock *removeSmallestScratch(ScratchBlock **prev_ptr) {
duke@435 1087 bool first = true;
duke@435 1088 size_t min_size = 0; // "first" makes this conceptually infinite.
duke@435 1089 ScratchBlock **smallest_ptr, *smallest;
duke@435 1090 ScratchBlock *cur = *prev_ptr;
duke@435 1091 while (cur) {
duke@435 1092 assert(*prev_ptr == cur, "just checking");
duke@435 1093 if (first || cur->num_words < min_size) {
duke@435 1094 smallest_ptr = prev_ptr;
duke@435 1095 smallest = cur;
duke@435 1096 min_size = smallest->num_words;
duke@435 1097 first = false;
duke@435 1098 }
duke@435 1099 prev_ptr = &cur->next;
duke@435 1100 cur = cur->next;
duke@435 1101 }
duke@435 1102 smallest = *smallest_ptr;
duke@435 1103 *smallest_ptr = smallest->next;
duke@435 1104 return smallest;
duke@435 1105 }
duke@435 1106
duke@435 1107 // Sort the scratch block list headed by res into decreasing size order,
duke@435 1108 // and set "res" to the result.
duke@435 1109 static void sort_scratch_list(ScratchBlock*& list) {
duke@435 1110 ScratchBlock* sorted = NULL;
duke@435 1111 ScratchBlock* unsorted = list;
duke@435 1112 while (unsorted) {
duke@435 1113 ScratchBlock *smallest = removeSmallestScratch(&unsorted);
duke@435 1114 smallest->next = sorted;
duke@435 1115 sorted = smallest;
duke@435 1116 }
duke@435 1117 list = sorted;
duke@435 1118 }
duke@435 1119
duke@435 1120 ScratchBlock* GenCollectedHeap::gather_scratch(Generation* requestor,
duke@435 1121 size_t max_alloc_words) {
duke@435 1122 ScratchBlock* res = NULL;
duke@435 1123 for (int i = 0; i < _n_gens; i++) {
duke@435 1124 _gens[i]->contribute_scratch(res, requestor, max_alloc_words);
duke@435 1125 }
duke@435 1126 sort_scratch_list(res);
duke@435 1127 return res;
duke@435 1128 }
duke@435 1129
jmasa@698 1130 void GenCollectedHeap::release_scratch() {
jmasa@698 1131 for (int i = 0; i < _n_gens; i++) {
jmasa@698 1132 _gens[i]->reset_scratch();
jmasa@698 1133 }
jmasa@698 1134 }
jmasa@698 1135
duke@435 1136 size_t GenCollectedHeap::large_typearray_limit() {
duke@435 1137 return gen_policy()->large_typearray_limit();
duke@435 1138 }
duke@435 1139
duke@435 1140 class GenPrepareForVerifyClosure: public GenCollectedHeap::GenClosure {
duke@435 1141 void do_generation(Generation* gen) {
duke@435 1142 gen->prepare_for_verify();
duke@435 1143 }
duke@435 1144 };
duke@435 1145
duke@435 1146 void GenCollectedHeap::prepare_for_verify() {
duke@435 1147 ensure_parsability(false); // no need to retire TLABs
duke@435 1148 GenPrepareForVerifyClosure blk;
duke@435 1149 generation_iterate(&blk, false);
duke@435 1150 perm_gen()->prepare_for_verify();
duke@435 1151 }
duke@435 1152
duke@435 1153
duke@435 1154 void GenCollectedHeap::generation_iterate(GenClosure* cl,
duke@435 1155 bool old_to_young) {
duke@435 1156 if (old_to_young) {
duke@435 1157 for (int i = _n_gens-1; i >= 0; i--) {
duke@435 1158 cl->do_generation(_gens[i]);
duke@435 1159 }
duke@435 1160 } else {
duke@435 1161 for (int i = 0; i < _n_gens; i++) {
duke@435 1162 cl->do_generation(_gens[i]);
duke@435 1163 }
duke@435 1164 }
duke@435 1165 }
duke@435 1166
duke@435 1167 void GenCollectedHeap::space_iterate(SpaceClosure* cl) {
duke@435 1168 for (int i = 0; i < _n_gens; i++) {
duke@435 1169 _gens[i]->space_iterate(cl, true);
duke@435 1170 }
duke@435 1171 perm_gen()->space_iterate(cl, true);
duke@435 1172 }
duke@435 1173
duke@435 1174 bool GenCollectedHeap::is_maximal_no_gc() const {
duke@435 1175 for (int i = 0; i < _n_gens; i++) { // skip perm gen
duke@435 1176 if (!_gens[i]->is_maximal_no_gc()) {
duke@435 1177 return false;
duke@435 1178 }
duke@435 1179 }
duke@435 1180 return true;
duke@435 1181 }
duke@435 1182
duke@435 1183 void GenCollectedHeap::save_marks() {
duke@435 1184 for (int i = 0; i < _n_gens; i++) {
duke@435 1185 _gens[i]->save_marks();
duke@435 1186 }
duke@435 1187 perm_gen()->save_marks();
duke@435 1188 }
duke@435 1189
duke@435 1190 void GenCollectedHeap::compute_new_generation_sizes(int collectedGen) {
duke@435 1191 for (int i = 0; i <= collectedGen; i++) {
duke@435 1192 _gens[i]->compute_new_size();
duke@435 1193 }
duke@435 1194 }
duke@435 1195
duke@435 1196 GenCollectedHeap* GenCollectedHeap::heap() {
duke@435 1197 assert(_gch != NULL, "Uninitialized access to GenCollectedHeap::heap()");
duke@435 1198 assert(_gch->kind() == CollectedHeap::GenCollectedHeap, "not a generational heap");
duke@435 1199 return _gch;
duke@435 1200 }
duke@435 1201
duke@435 1202
duke@435 1203 void GenCollectedHeap::prepare_for_compaction() {
duke@435 1204 Generation* scanning_gen = _gens[_n_gens-1];
duke@435 1205 // Start by compacting into same gen.
duke@435 1206 CompactPoint cp(scanning_gen, NULL, NULL);
duke@435 1207 while (scanning_gen != NULL) {
duke@435 1208 scanning_gen->prepare_for_compaction(&cp);
duke@435 1209 scanning_gen = prev_gen(scanning_gen);
duke@435 1210 }
duke@435 1211 }
duke@435 1212
duke@435 1213 GCStats* GenCollectedHeap::gc_stats(int level) const {
duke@435 1214 return _gens[level]->gc_stats();
duke@435 1215 }
duke@435 1216
ysr@1280 1217 void GenCollectedHeap::verify(bool allow_dirty, bool silent, bool option /* ignored */) {
duke@435 1218 if (!silent) {
duke@435 1219 gclog_or_tty->print("permgen ");
duke@435 1220 }
duke@435 1221 perm_gen()->verify(allow_dirty);
duke@435 1222 for (int i = _n_gens-1; i >= 0; i--) {
duke@435 1223 Generation* g = _gens[i];
duke@435 1224 if (!silent) {
duke@435 1225 gclog_or_tty->print(g->name());
duke@435 1226 gclog_or_tty->print(" ");
duke@435 1227 }
duke@435 1228 g->verify(allow_dirty);
duke@435 1229 }
duke@435 1230 if (!silent) {
duke@435 1231 gclog_or_tty->print("remset ");
duke@435 1232 }
duke@435 1233 rem_set()->verify();
duke@435 1234 if (!silent) {
duke@435 1235 gclog_or_tty->print("ref_proc ");
duke@435 1236 }
duke@435 1237 ReferenceProcessor::verify();
duke@435 1238 }
duke@435 1239
duke@435 1240 void GenCollectedHeap::print() const { print_on(tty); }
duke@435 1241 void GenCollectedHeap::print_on(outputStream* st) const {
duke@435 1242 for (int i = 0; i < _n_gens; i++) {
duke@435 1243 _gens[i]->print_on(st);
duke@435 1244 }
duke@435 1245 perm_gen()->print_on(st);
duke@435 1246 }
duke@435 1247
duke@435 1248 void GenCollectedHeap::gc_threads_do(ThreadClosure* tc) const {
duke@435 1249 if (workers() != NULL) {
duke@435 1250 workers()->threads_do(tc);
duke@435 1251 }
duke@435 1252 #ifndef SERIALGC
duke@435 1253 if (UseConcMarkSweepGC) {
duke@435 1254 ConcurrentMarkSweepThread::threads_do(tc);
duke@435 1255 }
duke@435 1256 #endif // SERIALGC
duke@435 1257 }
duke@435 1258
duke@435 1259 void GenCollectedHeap::print_gc_threads_on(outputStream* st) const {
duke@435 1260 #ifndef SERIALGC
duke@435 1261 if (UseParNewGC) {
duke@435 1262 workers()->print_worker_threads_on(st);
duke@435 1263 }
duke@435 1264 if (UseConcMarkSweepGC) {
duke@435 1265 ConcurrentMarkSweepThread::print_all_on(st);
duke@435 1266 }
duke@435 1267 #endif // SERIALGC
duke@435 1268 }
duke@435 1269
duke@435 1270 void GenCollectedHeap::print_tracing_info() const {
duke@435 1271 if (TraceGen0Time) {
duke@435 1272 get_gen(0)->print_summary_info();
duke@435 1273 }
duke@435 1274 if (TraceGen1Time) {
duke@435 1275 get_gen(1)->print_summary_info();
duke@435 1276 }
duke@435 1277 }
duke@435 1278
duke@435 1279 void GenCollectedHeap::print_heap_change(size_t prev_used) const {
duke@435 1280 if (PrintGCDetails && Verbose) {
duke@435 1281 gclog_or_tty->print(" " SIZE_FORMAT
duke@435 1282 "->" SIZE_FORMAT
duke@435 1283 "(" SIZE_FORMAT ")",
duke@435 1284 prev_used, used(), capacity());
duke@435 1285 } else {
duke@435 1286 gclog_or_tty->print(" " SIZE_FORMAT "K"
duke@435 1287 "->" SIZE_FORMAT "K"
duke@435 1288 "(" SIZE_FORMAT "K)",
duke@435 1289 prev_used / K, used() / K, capacity() / K);
duke@435 1290 }
duke@435 1291 }
duke@435 1292
duke@435 1293 //New method to print perm gen info with PrintGCDetails flag
duke@435 1294 void GenCollectedHeap::print_perm_heap_change(size_t perm_prev_used) const {
duke@435 1295 gclog_or_tty->print(", [%s :", perm_gen()->short_name());
duke@435 1296 perm_gen()->print_heap_change(perm_prev_used);
duke@435 1297 gclog_or_tty->print("]");
duke@435 1298 }
duke@435 1299
duke@435 1300 class GenGCPrologueClosure: public GenCollectedHeap::GenClosure {
duke@435 1301 private:
duke@435 1302 bool _full;
duke@435 1303 public:
duke@435 1304 void do_generation(Generation* gen) {
duke@435 1305 gen->gc_prologue(_full);
duke@435 1306 }
duke@435 1307 GenGCPrologueClosure(bool full) : _full(full) {};
duke@435 1308 };
duke@435 1309
duke@435 1310 void GenCollectedHeap::gc_prologue(bool full) {
duke@435 1311 assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
duke@435 1312
duke@435 1313 always_do_update_barrier = false;
duke@435 1314 // Fill TLAB's and such
duke@435 1315 CollectedHeap::accumulate_statistics_all_tlabs();
duke@435 1316 ensure_parsability(true); // retire TLABs
duke@435 1317
duke@435 1318 // Call allocation profiler
duke@435 1319 AllocationProfiler::iterate_since_last_gc();
duke@435 1320 // Walk generations
duke@435 1321 GenGCPrologueClosure blk(full);
duke@435 1322 generation_iterate(&blk, false); // not old-to-young.
duke@435 1323 perm_gen()->gc_prologue(full);
duke@435 1324 };
duke@435 1325
duke@435 1326 class GenGCEpilogueClosure: public GenCollectedHeap::GenClosure {
duke@435 1327 private:
duke@435 1328 bool _full;
duke@435 1329 public:
duke@435 1330 void do_generation(Generation* gen) {
duke@435 1331 gen->gc_epilogue(_full);
duke@435 1332 }
duke@435 1333 GenGCEpilogueClosure(bool full) : _full(full) {};
duke@435 1334 };
duke@435 1335
duke@435 1336 void GenCollectedHeap::gc_epilogue(bool full) {
duke@435 1337 // Remember if a partial collection of the heap failed, and
duke@435 1338 // we did a complete collection.
duke@435 1339 if (full && incremental_collection_will_fail()) {
duke@435 1340 set_last_incremental_collection_failed();
duke@435 1341 } else {
duke@435 1342 clear_last_incremental_collection_failed();
duke@435 1343 }
duke@435 1344 // Clear the flag, if set; the generation gc_epilogues will set the
duke@435 1345 // flag again if the condition persists despite the collection.
duke@435 1346 clear_incremental_collection_will_fail();
duke@435 1347
duke@435 1348 #ifdef COMPILER2
duke@435 1349 assert(DerivedPointerTable::is_empty(), "derived pointer present");
duke@435 1350 size_t actual_gap = pointer_delta((HeapWord*) (max_uintx-3), *(end_addr()));
duke@435 1351 guarantee(actual_gap > (size_t)FastAllocateSizeLimit, "inline allocation wraps");
duke@435 1352 #endif /* COMPILER2 */
duke@435 1353
duke@435 1354 resize_all_tlabs();
duke@435 1355
duke@435 1356 GenGCEpilogueClosure blk(full);
duke@435 1357 generation_iterate(&blk, false); // not old-to-young.
duke@435 1358 perm_gen()->gc_epilogue(full);
duke@435 1359
duke@435 1360 always_do_update_barrier = UseConcMarkSweepGC;
duke@435 1361 };
duke@435 1362
jmasa@698 1363 #ifndef PRODUCT
jmasa@698 1364 class GenGCSaveTopsBeforeGCClosure: public GenCollectedHeap::GenClosure {
jmasa@698 1365 private:
jmasa@698 1366 public:
jmasa@698 1367 void do_generation(Generation* gen) {
jmasa@698 1368 gen->record_spaces_top();
jmasa@698 1369 }
jmasa@698 1370 };
jmasa@698 1371
jmasa@698 1372 void GenCollectedHeap::record_gen_tops_before_GC() {
jmasa@698 1373 if (ZapUnusedHeapArea) {
jmasa@698 1374 GenGCSaveTopsBeforeGCClosure blk;
jmasa@698 1375 generation_iterate(&blk, false); // not old-to-young.
jmasa@698 1376 perm_gen()->record_spaces_top();
jmasa@698 1377 }
jmasa@698 1378 }
jmasa@698 1379 #endif // not PRODUCT
jmasa@698 1380
duke@435 1381 class GenEnsureParsabilityClosure: public GenCollectedHeap::GenClosure {
duke@435 1382 public:
duke@435 1383 void do_generation(Generation* gen) {
duke@435 1384 gen->ensure_parsability();
duke@435 1385 }
duke@435 1386 };
duke@435 1387
duke@435 1388 void GenCollectedHeap::ensure_parsability(bool retire_tlabs) {
duke@435 1389 CollectedHeap::ensure_parsability(retire_tlabs);
duke@435 1390 GenEnsureParsabilityClosure ep_cl;
duke@435 1391 generation_iterate(&ep_cl, false);
duke@435 1392 perm_gen()->ensure_parsability();
duke@435 1393 }
duke@435 1394
duke@435 1395 oop GenCollectedHeap::handle_failed_promotion(Generation* gen,
duke@435 1396 oop obj,
coleenp@548 1397 size_t obj_size) {
duke@435 1398 assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
duke@435 1399 HeapWord* result = NULL;
duke@435 1400
duke@435 1401 // First give each higher generation a chance to allocate the promoted object.
duke@435 1402 Generation* allocator = next_gen(gen);
duke@435 1403 if (allocator != NULL) {
duke@435 1404 do {
duke@435 1405 result = allocator->allocate(obj_size, false);
duke@435 1406 } while (result == NULL && (allocator = next_gen(allocator)) != NULL);
duke@435 1407 }
duke@435 1408
duke@435 1409 if (result == NULL) {
duke@435 1410 // Then give gen and higher generations a chance to expand and allocate the
duke@435 1411 // object.
duke@435 1412 do {
duke@435 1413 result = gen->expand_and_allocate(obj_size, false);
duke@435 1414 } while (result == NULL && (gen = next_gen(gen)) != NULL);
duke@435 1415 }
duke@435 1416
duke@435 1417 if (result != NULL) {
duke@435 1418 Copy::aligned_disjoint_words((HeapWord*)obj, result, obj_size);
duke@435 1419 }
duke@435 1420 return oop(result);
duke@435 1421 }
duke@435 1422
duke@435 1423 class GenTimeOfLastGCClosure: public GenCollectedHeap::GenClosure {
duke@435 1424 jlong _time; // in ms
duke@435 1425 jlong _now; // in ms
duke@435 1426
duke@435 1427 public:
duke@435 1428 GenTimeOfLastGCClosure(jlong now) : _time(now), _now(now) { }
duke@435 1429
duke@435 1430 jlong time() { return _time; }
duke@435 1431
duke@435 1432 void do_generation(Generation* gen) {
duke@435 1433 _time = MIN2(_time, gen->time_of_last_gc(_now));
duke@435 1434 }
duke@435 1435 };
duke@435 1436
duke@435 1437 jlong GenCollectedHeap::millis_since_last_gc() {
duke@435 1438 jlong now = os::javaTimeMillis();
duke@435 1439 GenTimeOfLastGCClosure tolgc_cl(now);
duke@435 1440 // iterate over generations getting the oldest
duke@435 1441 // time that a generation was collected
duke@435 1442 generation_iterate(&tolgc_cl, false);
duke@435 1443 tolgc_cl.do_generation(perm_gen());
duke@435 1444 // XXX Despite the assert above, since javaTimeMillis()
duke@435 1445 // doesnot guarantee monotonically increasing return
duke@435 1446 // values (note, i didn't say "strictly monotonic"),
duke@435 1447 // we need to guard against getting back a time
duke@435 1448 // later than now. This should be fixed by basing
duke@435 1449 // on someting like gethrtime() which guarantees
duke@435 1450 // monotonicity. Note that cond_wait() is susceptible
duke@435 1451 // to a similar problem, because its interface is
duke@435 1452 // based on absolute time in the form of the
duke@435 1453 // system time's notion of UCT. See also 4506635
duke@435 1454 // for yet another problem of similar nature. XXX
duke@435 1455 jlong retVal = now - tolgc_cl.time();
duke@435 1456 if (retVal < 0) {
duke@435 1457 NOT_PRODUCT(warning("time warp: %d", retVal);)
duke@435 1458 return 0;
duke@435 1459 }
duke@435 1460 return retVal;
duke@435 1461 }

mercurial