src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp

changeset 1580
e018e6884bd8
parent 1520
0e2d7ae2bc67
child 1625
4788266644c1
equal deleted inserted replaced
1546:44f61c24ddab 1580:e018e6884bd8
251 (double)(tr * MinHeapFreeRatio) / 100.0) 251 (double)(tr * MinHeapFreeRatio) / 100.0)
252 / 100.0; 252 / 100.0;
253 } 253 }
254 } 254 }
255 255
256
257 void ConcurrentMarkSweepGeneration::ref_processor_init() { 256 void ConcurrentMarkSweepGeneration::ref_processor_init() {
258 assert(collector() != NULL, "no collector"); 257 assert(collector() != NULL, "no collector");
259 collector()->ref_processor_init(); 258 collector()->ref_processor_init();
260 } 259 }
261 260
339 _allow_duty_cycle_reduction = false; 338 _allow_duty_cycle_reduction = false;
340 _valid_bits = 0; 339 _valid_bits = 0;
341 _icms_duty_cycle = CMSIncrementalDutyCycle; 340 _icms_duty_cycle = CMSIncrementalDutyCycle;
342 } 341 }
343 342
343 double CMSStats::cms_free_adjustment_factor(size_t free) const {
344 // TBD: CR 6909490
345 return 1.0;
346 }
347
348 void CMSStats::adjust_cms_free_adjustment_factor(bool fail, size_t free) {
349 }
350
344 // If promotion failure handling is on use 351 // If promotion failure handling is on use
345 // the padded average size of the promotion for each 352 // the padded average size of the promotion for each
346 // young generation collection. 353 // young generation collection.
347 double CMSStats::time_until_cms_gen_full() const { 354 double CMSStats::time_until_cms_gen_full() const {
348 size_t cms_free = _cms_gen->cmsSpace()->free(); 355 size_t cms_free = _cms_gen->cmsSpace()->free();
359 // a safety factor. 366 // a safety factor.
360 cms_free -= expected_promotion; 367 cms_free -= expected_promotion;
361 368
362 // Adjust by the safety factor. 369 // Adjust by the safety factor.
363 double cms_free_dbl = (double)cms_free; 370 double cms_free_dbl = (double)cms_free;
364 cms_free_dbl = cms_free_dbl * (100.0 - CMSIncrementalSafetyFactor) / 100.0; 371 double cms_adjustment = (100.0 - CMSIncrementalSafetyFactor)/100.0;
372 // Apply a further correction factor which tries to adjust
373 // for recent occurance of concurrent mode failures.
374 cms_adjustment = cms_adjustment * cms_free_adjustment_factor(cms_free);
375 cms_free_dbl = cms_free_dbl * cms_adjustment;
365 376
366 if (PrintGCDetails && Verbose) { 377 if (PrintGCDetails && Verbose) {
367 gclog_or_tty->print_cr("CMSStats::time_until_cms_gen_full: cms_free " 378 gclog_or_tty->print_cr("CMSStats::time_until_cms_gen_full: cms_free "
368 SIZE_FORMAT " expected_promotion " SIZE_FORMAT, 379 SIZE_FORMAT " expected_promotion " SIZE_FORMAT,
369 cms_free, expected_promotion); 380 cms_free, expected_promotion);
393 // in the query so as to avoid concurrent mode failures 404 // in the query so as to avoid concurrent mode failures
394 // due to starting the collection just a wee bit too 405 // due to starting the collection just a wee bit too
395 // late. 406 // late.
396 double work = cms_duration() + gc0_period(); 407 double work = cms_duration() + gc0_period();
397 double deadline = time_until_cms_gen_full(); 408 double deadline = time_until_cms_gen_full();
409 // If a concurrent mode failure occurred recently, we want to be
410 // more conservative and halve our expected time_until_cms_gen_full()
398 if (work > deadline) { 411 if (work > deadline) {
399 if (Verbose && PrintGCDetails) { 412 if (Verbose && PrintGCDetails) {
400 gclog_or_tty->print( 413 gclog_or_tty->print(
401 " CMSCollector: collect because of anticipated promotion " 414 " CMSCollector: collect because of anticipated promotion "
402 "before full %3.7f + %3.7f > %3.7f ", cms_duration(), 415 "before full %3.7f + %3.7f > %3.7f ", cms_duration(),
554 _completed_initialization(false), 567 _completed_initialization(false),
555 _collector_policy(cp), 568 _collector_policy(cp),
556 _should_unload_classes(false), 569 _should_unload_classes(false),
557 _concurrent_cycles_since_last_unload(0), 570 _concurrent_cycles_since_last_unload(0),
558 _roots_scanning_options(0), 571 _roots_scanning_options(0),
559 _sweep_estimate(CMS_SweepWeight, CMS_SweepPadding) 572 _inter_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),
573 _intra_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding)
560 { 574 {
561 if (ExplicitGCInvokesConcurrentAndUnloadsClasses) { 575 if (ExplicitGCInvokesConcurrentAndUnloadsClasses) {
562 ExplicitGCInvokesConcurrent = true; 576 ExplicitGCInvokesConcurrent = true;
563 } 577 }
564 // Now expand the span and allocate the collection support structures 578 // Now expand the span and allocate the collection support structures
771 } 785 }
772 786
773 NOT_PRODUCT(_overflow_counter = CMSMarkStackOverflowInterval;) 787 NOT_PRODUCT(_overflow_counter = CMSMarkStackOverflowInterval;)
774 _gc_counters = new CollectorCounters("CMS", 1); 788 _gc_counters = new CollectorCounters("CMS", 1);
775 _completed_initialization = true; 789 _completed_initialization = true;
776 _sweep_timer.start(); // start of time 790 _inter_sweep_timer.start(); // start of time
777 } 791 }
778 792
779 const char* ConcurrentMarkSweepGeneration::name() const { 793 const char* ConcurrentMarkSweepGeneration::name() const {
780 return "concurrent mark-sweep generation"; 794 return "concurrent mark-sweep generation";
781 } 795 }
896 " adj_max_promo_bytes: " SIZE_FORMAT, 910 " adj_max_promo_bytes: " SIZE_FORMAT,
897 max_contiguous_available(), (size_t)adjusted_max_promo_bytes); 911 max_contiguous_available(), (size_t)adjusted_max_promo_bytes);
898 } 912 }
899 } 913 }
900 return result; 914 return result;
915 }
916
917 // At a promotion failure dump information on block layout in heap
918 // (cms old generation).
919 void ConcurrentMarkSweepGeneration::promotion_failure_occurred() {
920 if (CMSDumpAtPromotionFailure) {
921 cmsSpace()->dump_at_safepoint_with_locks(collector(), gclog_or_tty);
922 }
901 } 923 }
902 924
903 CompactibleSpace* 925 CompactibleSpace*
904 ConcurrentMarkSweepGeneration::first_compaction_space() const { 926 ConcurrentMarkSweepGeneration::first_compaction_space() const {
905 return _cmsSpace; 927 return _cmsSpace;
1366 1388
1367 void 1389 void
1368 ConcurrentMarkSweepGeneration:: 1390 ConcurrentMarkSweepGeneration::
1369 par_promote_alloc_done(int thread_num) { 1391 par_promote_alloc_done(int thread_num) {
1370 CMSParGCThreadState* ps = _par_gc_thread_states[thread_num]; 1392 CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1371 ps->lab.retire(); 1393 ps->lab.retire(thread_num);
1372 #if CFLS_LAB_REFILL_STATS
1373 if (thread_num == 0) {
1374 _cmsSpace->print_par_alloc_stats();
1375 }
1376 #endif
1377 } 1394 }
1378 1395
1379 void 1396 void
1380 ConcurrentMarkSweepGeneration:: 1397 ConcurrentMarkSweepGeneration::
1381 par_oop_since_save_marks_iterate_done(int thread_num) { 1398 par_oop_since_save_marks_iterate_done(int thread_num) {
1972 _modUnionTable.clear_all(); 1989 _modUnionTable.clear_all();
1973 1990
1974 // We must adjust the allocation statistics being maintained 1991 // We must adjust the allocation statistics being maintained
1975 // in the free list space. We do so by reading and clearing 1992 // in the free list space. We do so by reading and clearing
1976 // the sweep timer and updating the block flux rate estimates below. 1993 // the sweep timer and updating the block flux rate estimates below.
1977 assert(_sweep_timer.is_active(), "We should never see the timer inactive"); 1994 assert(!_intra_sweep_timer.is_active(), "_intra_sweep_timer should be inactive");
1978 _sweep_timer.stop(); 1995 if (_inter_sweep_timer.is_active()) {
1979 // Note that we do not use this sample to update the _sweep_estimate. 1996 _inter_sweep_timer.stop();
1980 _cmsGen->cmsSpace()->beginSweepFLCensus((float)(_sweep_timer.seconds()), 1997 // Note that we do not use this sample to update the _inter_sweep_estimate.
1981 _sweep_estimate.padded_average()); 1998 _cmsGen->cmsSpace()->beginSweepFLCensus((float)(_inter_sweep_timer.seconds()),
1999 _inter_sweep_estimate.padded_average(),
2000 _intra_sweep_estimate.padded_average());
2001 }
1982 2002
1983 GenMarkSweep::invoke_at_safepoint(_cmsGen->level(), 2003 GenMarkSweep::invoke_at_safepoint(_cmsGen->level(),
1984 ref_processor(), clear_all_soft_refs); 2004 ref_processor(), clear_all_soft_refs);
1985 #ifdef ASSERT 2005 #ifdef ASSERT
1986 CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace(); 2006 CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace();
2013 if (_survivor_plab_array != NULL) { 2033 if (_survivor_plab_array != NULL) {
2014 reset_survivor_plab_arrays(); 2034 reset_survivor_plab_arrays();
2015 } 2035 }
2016 2036
2017 // Adjust the per-size allocation stats for the next epoch. 2037 // Adjust the per-size allocation stats for the next epoch.
2018 _cmsGen->cmsSpace()->endSweepFLCensus(sweepCount() /* fake */); 2038 _cmsGen->cmsSpace()->endSweepFLCensus(sweep_count() /* fake */);
2019 // Restart the "sweep timer" for next epoch. 2039 // Restart the "inter sweep timer" for the next epoch.
2020 _sweep_timer.reset(); 2040 _inter_sweep_timer.reset();
2021 _sweep_timer.start(); 2041 _inter_sweep_timer.start();
2022 2042
2023 // Sample collection pause time and reset for collection interval. 2043 // Sample collection pause time and reset for collection interval.
2024 if (UseAdaptiveSizePolicy) { 2044 if (UseAdaptiveSizePolicy) {
2025 size_policy()->msc_collection_end(gch->gc_cause()); 2045 size_policy()->msc_collection_end(gch->gc_cause());
2026 } 2046 }
2674 collector()->gc_epilogue(full); 2694 collector()->gc_epilogue(full);
2675 2695
2676 // Also reset promotion tracking in par gc thread states. 2696 // Also reset promotion tracking in par gc thread states.
2677 if (ParallelGCThreads > 0) { 2697 if (ParallelGCThreads > 0) {
2678 for (uint i = 0; i < ParallelGCThreads; i++) { 2698 for (uint i = 0; i < ParallelGCThreads; i++) {
2679 _par_gc_thread_states[i]->promo.stopTrackingPromotions(); 2699 _par_gc_thread_states[i]->promo.stopTrackingPromotions(i);
2680 } 2700 }
2681 } 2701 }
2682 } 2702 }
2683 2703
2684 void ConcurrentMarkSweepGeneration::gc_epilogue_work(bool full) { 2704 void ConcurrentMarkSweepGeneration::gc_epilogue_work(bool full) {
2769 VerifyMarkedClosure(CMSBitMap* bm): _marks(bm), _failed(false) {} 2789 VerifyMarkedClosure(CMSBitMap* bm): _marks(bm), _failed(false) {}
2770 2790
2771 bool do_bit(size_t offset) { 2791 bool do_bit(size_t offset) {
2772 HeapWord* addr = _marks->offsetToHeapWord(offset); 2792 HeapWord* addr = _marks->offsetToHeapWord(offset);
2773 if (!_marks->isMarked(addr)) { 2793 if (!_marks->isMarked(addr)) {
2774 oop(addr)->print(); 2794 oop(addr)->print_on(gclog_or_tty);
2775 gclog_or_tty->print_cr(" ("INTPTR_FORMAT" should have been marked)", addr); 2795 gclog_or_tty->print_cr(" ("INTPTR_FORMAT" should have been marked)", addr);
2776 _failed = true; 2796 _failed = true;
2777 } 2797 }
2778 return true; 2798 return true;
2779 } 2799 }
2818 COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;) 2838 COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
2819 2839
2820 // Clear any marks from a previous round 2840 // Clear any marks from a previous round
2821 verification_mark_bm()->clear_all(); 2841 verification_mark_bm()->clear_all();
2822 assert(verification_mark_stack()->isEmpty(), "markStack should be empty"); 2842 assert(verification_mark_stack()->isEmpty(), "markStack should be empty");
2823 assert(overflow_list_is_empty(), "overflow list should be empty"); 2843 verify_work_stacks_empty();
2824 2844
2825 GenCollectedHeap* gch = GenCollectedHeap::heap(); 2845 GenCollectedHeap* gch = GenCollectedHeap::heap();
2826 gch->ensure_parsability(false); // fill TLABs, but no need to retire them 2846 gch->ensure_parsability(false); // fill TLABs, but no need to retire them
2827 // Update the saved marks which may affect the root scans. 2847 // Update the saved marks which may affect the root scans.
2828 gch->save_marks(); 2848 gch->save_marks();
2891 // errors by printing corresponding objects. 2911 // errors by printing corresponding objects.
2892 VerifyMarkedClosure vcl(markBitMap()); 2912 VerifyMarkedClosure vcl(markBitMap());
2893 verification_mark_bm()->iterate(&vcl); 2913 verification_mark_bm()->iterate(&vcl);
2894 if (vcl.failed()) { 2914 if (vcl.failed()) {
2895 gclog_or_tty->print("Verification failed"); 2915 gclog_or_tty->print("Verification failed");
2896 Universe::heap()->print(); 2916 Universe::heap()->print_on(gclog_or_tty);
2897 fatal(" ... aborting"); 2917 fatal("CMS: failed marking verification after remark");
2898 } 2918 }
2899 } 2919 }
2900 2920
2901 void CMSCollector::verify_after_remark_work_2() { 2921 void CMSCollector::verify_after_remark_work_2() {
2902 ResourceMark rm; 2922 ResourceMark rm;
3312 MemRegion mr(_cmsSpace->bottom(), new_word_size); 3332 MemRegion mr(_cmsSpace->bottom(), new_word_size);
3313 _bts->resize(new_word_size); // resize the block offset shared array 3333 _bts->resize(new_word_size); // resize the block offset shared array
3314 Universe::heap()->barrier_set()->resize_covered_region(mr); 3334 Universe::heap()->barrier_set()->resize_covered_region(mr);
3315 // Hmmmm... why doesn't CFLS::set_end verify locking? 3335 // Hmmmm... why doesn't CFLS::set_end verify locking?
3316 // This is quite ugly; FIX ME XXX 3336 // This is quite ugly; FIX ME XXX
3317 _cmsSpace->assert_locked(); 3337 _cmsSpace->assert_locked(freelistLock());
3318 _cmsSpace->set_end((HeapWord*)_virtual_space.high()); 3338 _cmsSpace->set_end((HeapWord*)_virtual_space.high());
3319 3339
3320 // update the space and generation capacity counters 3340 // update the space and generation capacity counters
3321 if (UsePerfData) { 3341 if (UsePerfData) {
3322 _space_counters->update_capacity(); 3342 _space_counters->update_capacity();
5866 void CMSCollector::sweep(bool asynch) { 5886 void CMSCollector::sweep(bool asynch) {
5867 assert(_collectorState == Sweeping, "just checking"); 5887 assert(_collectorState == Sweeping, "just checking");
5868 check_correct_thread_executing(); 5888 check_correct_thread_executing();
5869 verify_work_stacks_empty(); 5889 verify_work_stacks_empty();
5870 verify_overflow_empty(); 5890 verify_overflow_empty();
5871 incrementSweepCount(); 5891 increment_sweep_count();
5872 _sweep_timer.stop(); 5892 _inter_sweep_timer.stop();
5873 _sweep_estimate.sample(_sweep_timer.seconds()); 5893 _inter_sweep_estimate.sample(_inter_sweep_timer.seconds());
5874 size_policy()->avg_cms_free_at_sweep()->sample(_cmsGen->free()); 5894 size_policy()->avg_cms_free_at_sweep()->sample(_cmsGen->free());
5875 5895
5876 // PermGen verification support: If perm gen sweeping is disabled in 5896 // PermGen verification support: If perm gen sweeping is disabled in
5877 // this cycle, we preserve the perm gen object "deadness" information 5897 // this cycle, we preserve the perm gen object "deadness" information
5878 // in the perm_gen_verify_bit_map. In order to do that we traverse 5898 // in the perm_gen_verify_bit_map. In order to do that we traverse
5891 // the requisite locks/tokens. 5911 // the requisite locks/tokens.
5892 _permGen->cmsSpace()->blk_iterate(&mdo); 5912 _permGen->cmsSpace()->blk_iterate(&mdo);
5893 } 5913 }
5894 } 5914 }
5895 5915
5916 assert(!_intra_sweep_timer.is_active(), "Should not be active");
5917 _intra_sweep_timer.reset();
5918 _intra_sweep_timer.start();
5896 if (asynch) { 5919 if (asynch) {
5897 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); 5920 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
5898 CMSPhaseAccounting pa(this, "sweep", !PrintGCDetails); 5921 CMSPhaseAccounting pa(this, "sweep", !PrintGCDetails);
5899 // First sweep the old gen then the perm gen 5922 // First sweep the old gen then the perm gen
5900 { 5923 {
5935 _collectorState = Resizing; 5958 _collectorState = Resizing;
5936 } 5959 }
5937 verify_work_stacks_empty(); 5960 verify_work_stacks_empty();
5938 verify_overflow_empty(); 5961 verify_overflow_empty();
5939 5962
5940 _sweep_timer.reset(); 5963 _intra_sweep_timer.stop();
5941 _sweep_timer.start(); 5964 _intra_sweep_estimate.sample(_intra_sweep_timer.seconds());
5965
5966 _inter_sweep_timer.reset();
5967 _inter_sweep_timer.start();
5942 5968
5943 update_time_of_last_gc(os::javaTimeMillis()); 5969 update_time_of_last_gc(os::javaTimeMillis());
5944 5970
5945 // NOTE on abstract state transitions: 5971 // NOTE on abstract state transitions:
5946 // Mutators allocate-live and/or mark the mod-union table dirty 5972 // Mutators allocate-live and/or mark the mod-union table dirty
5979 } 6005 }
5980 6006
5981 // FIX ME!!! Looks like this belongs in CFLSpace, with 6007 // FIX ME!!! Looks like this belongs in CFLSpace, with
5982 // CMSGen merely delegating to it. 6008 // CMSGen merely delegating to it.
5983 void ConcurrentMarkSweepGeneration::setNearLargestChunk() { 6009 void ConcurrentMarkSweepGeneration::setNearLargestChunk() {
5984 double nearLargestPercent = 0.999; 6010 double nearLargestPercent = FLSLargestBlockCoalesceProximity;
5985 HeapWord* minAddr = _cmsSpace->bottom(); 6011 HeapWord* minAddr = _cmsSpace->bottom();
5986 HeapWord* largestAddr = 6012 HeapWord* largestAddr =
5987 (HeapWord*) _cmsSpace->dictionary()->findLargestDict(); 6013 (HeapWord*) _cmsSpace->dictionary()->findLargestDict();
5988 if (largestAddr == 0) { 6014 if (largestAddr == NULL) {
5989 // The dictionary appears to be empty. In this case 6015 // The dictionary appears to be empty. In this case
5990 // try to coalesce at the end of the heap. 6016 // try to coalesce at the end of the heap.
5991 largestAddr = _cmsSpace->end(); 6017 largestAddr = _cmsSpace->end();
5992 } 6018 }
5993 size_t largestOffset = pointer_delta(largestAddr, minAddr); 6019 size_t largestOffset = pointer_delta(largestAddr, minAddr);
5994 size_t nearLargestOffset = 6020 size_t nearLargestOffset =
5995 (size_t)((double)largestOffset * nearLargestPercent) - MinChunkSize; 6021 (size_t)((double)largestOffset * nearLargestPercent) - MinChunkSize;
6022 if (PrintFLSStatistics != 0) {
6023 gclog_or_tty->print_cr(
6024 "CMS: Large Block: " PTR_FORMAT ";"
6025 " Proximity: " PTR_FORMAT " -> " PTR_FORMAT,
6026 largestAddr,
6027 _cmsSpace->nearLargestChunk(), minAddr + nearLargestOffset);
6028 }
5996 _cmsSpace->set_nearLargestChunk(minAddr + nearLargestOffset); 6029 _cmsSpace->set_nearLargestChunk(minAddr + nearLargestOffset);
5997 } 6030 }
5998 6031
5999 bool ConcurrentMarkSweepGeneration::isNearLargestChunk(HeapWord* addr) { 6032 bool ConcurrentMarkSweepGeneration::isNearLargestChunk(HeapWord* addr) {
6000 return addr >= _cmsSpace->nearLargestChunk(); 6033 return addr >= _cmsSpace->nearLargestChunk();
6070 || (!asynch && ConcurrentMarkSweepThread::vm_thread_has_cms_token()), 6103 || (!asynch && ConcurrentMarkSweepThread::vm_thread_has_cms_token()),
6071 "Should possess CMS token to sweep"); 6104 "Should possess CMS token to sweep");
6072 assert_lock_strong(gen->freelistLock()); 6105 assert_lock_strong(gen->freelistLock());
6073 assert_lock_strong(bitMapLock()); 6106 assert_lock_strong(bitMapLock());
6074 6107
6075 assert(!_sweep_timer.is_active(), "Was switched off in an outer context"); 6108 assert(!_inter_sweep_timer.is_active(), "Was switched off in an outer context");
6076 gen->cmsSpace()->beginSweepFLCensus((float)(_sweep_timer.seconds()), 6109 assert(_intra_sweep_timer.is_active(), "Was switched on in an outer context");
6077 _sweep_estimate.padded_average()); 6110 gen->cmsSpace()->beginSweepFLCensus((float)(_inter_sweep_timer.seconds()),
6111 _inter_sweep_estimate.padded_average(),
6112 _intra_sweep_estimate.padded_average());
6078 gen->setNearLargestChunk(); 6113 gen->setNearLargestChunk();
6079 6114
6080 { 6115 {
6081 SweepClosure sweepClosure(this, gen, &_markBitMap, 6116 SweepClosure sweepClosure(this, gen, &_markBitMap,
6082 CMSYield && asynch); 6117 CMSYield && asynch);
6085 // co-terminal free run. This is done in the SweepClosure 6120 // co-terminal free run. This is done in the SweepClosure
6086 // destructor; so, do not remove this scope, else the 6121 // destructor; so, do not remove this scope, else the
6087 // end-of-sweep-census below will be off by a little bit. 6122 // end-of-sweep-census below will be off by a little bit.
6088 } 6123 }
6089 gen->cmsSpace()->sweep_completed(); 6124 gen->cmsSpace()->sweep_completed();
6090 gen->cmsSpace()->endSweepFLCensus(sweepCount()); 6125 gen->cmsSpace()->endSweepFLCensus(sweep_count());
6091 if (should_unload_classes()) { // unloaded classes this cycle, 6126 if (should_unload_classes()) { // unloaded classes this cycle,
6092 _concurrent_cycles_since_last_unload = 0; // ... reset count 6127 _concurrent_cycles_since_last_unload = 0; // ... reset count
6093 } else { // did not unload classes, 6128 } else { // did not unload classes,
6094 _concurrent_cycles_since_last_unload++; // ... increment count 6129 _concurrent_cycles_since_last_unload++; // ... increment count
6095 } 6130 }

mercurial