Fri, 16 Jul 2010 10:09:15 -0700
Merge
1.1 --- a/src/cpu/sparc/vm/stubGenerator_sparc.cpp Mon Jul 12 12:53:52 2010 -0700 1.2 +++ b/src/cpu/sparc/vm/stubGenerator_sparc.cpp Fri Jul 16 10:09:15 2010 -0700 1.3 @@ -1007,9 +1007,9 @@ 1.4 __ brx(Assembler::lessEqualUnsigned, false, Assembler::pt, (*NOLp)); 1.5 __ delayed()->cmp(to_from, byte_count); 1.6 if (NOLp == NULL) 1.7 - __ brx(Assembler::greaterEqual, false, Assembler::pt, no_overlap_target); 1.8 + __ brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, no_overlap_target); 1.9 else 1.10 - __ brx(Assembler::greaterEqual, false, Assembler::pt, (*NOLp)); 1.11 + __ brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, (*NOLp)); 1.12 __ delayed()->nop(); 1.13 } 1.14
2.1 --- a/src/share/vm/gc_implementation/concurrentMarkSweep/vmCMSOperations.cpp Mon Jul 12 12:53:52 2010 -0700 2.2 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/vmCMSOperations.cpp Fri Jul 16 10:09:15 2010 -0700 2.3 @@ -234,6 +234,11 @@ 2.4 GenCollectedHeap* gch = GenCollectedHeap::heap(); 2.5 if (_gc_cause != GCCause::_gc_locker && 2.6 gch->total_full_collections_completed() <= _full_gc_count_before) { 2.7 + // maybe we should change the condition to test _gc_cause == 2.8 + // GCCause::_java_lang_system_gc, instead of 2.9 + // _gc_cause != GCCause::_gc_locker 2.10 + assert(_gc_cause == GCCause::_java_lang_system_gc, 2.11 + "the only way to get here if this was a System.gc()-induced GC"); 2.12 assert(ExplicitGCInvokesConcurrent, "Error"); 2.13 // Now, wait for witnessing concurrent gc cycle to complete, 2.14 // but do so in native mode, because we want to lock the
3.1 --- a/src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp Mon Jul 12 12:53:52 2010 -0700 3.2 +++ b/src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp Fri Jul 16 10:09:15 2010 -0700 3.3 @@ -266,6 +266,12 @@ 3.4 _cm->clearNextBitmap(); 3.5 _sts.leave(); 3.6 } 3.7 + 3.8 + // Update the number of full collections that have been 3.9 + // completed. This will also notify the FullGCCount_lock in case a 3.10 + // Java thread is waiting for a full GC to happen (e.g., it 3.11 + // called System.gc() with +ExplicitGCInvokesConcurrent). 3.12 + g1->increment_full_collections_completed(true /* outer */); 3.13 } 3.14 assert(_should_terminate, "just checking"); 3.15
4.1 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Mon Jul 12 12:53:52 2010 -0700 4.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Fri Jul 16 10:09:15 2010 -0700 4.3 @@ -809,7 +809,8 @@ 4.4 } 4.5 }; 4.6 4.7 -void G1CollectedHeap::do_collection(bool full, bool clear_all_soft_refs, 4.8 +void G1CollectedHeap::do_collection(bool explicit_gc, 4.9 + bool clear_all_soft_refs, 4.10 size_t word_size) { 4.11 if (GC_locker::check_active_before_gc()) { 4.12 return; // GC is disabled (e.g. JNI GetXXXCritical operation) 4.13 @@ -821,10 +822,6 @@ 4.14 Universe::print_heap_before_gc(); 4.15 } 4.16 4.17 - if (full && DisableExplicitGC) { 4.18 - return; 4.19 - } 4.20 - 4.21 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); 4.22 assert(Thread::current() == VMThread::vm_thread(), "should be in vm thread"); 4.23 4.24 @@ -837,9 +834,11 @@ 4.25 IsGCActiveMark x; 4.26 4.27 // Timing 4.28 + bool system_gc = (gc_cause() == GCCause::_java_lang_system_gc); 4.29 + assert(!system_gc || explicit_gc, "invariant"); 4.30 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); 4.31 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); 4.32 - TraceTime t(full ? "Full GC (System.gc())" : "Full GC", 4.33 + TraceTime t(system_gc ? "Full GC (System.gc())" : "Full GC", 4.34 PrintGC, true, gclog_or_tty); 4.35 4.36 TraceMemoryManagerStats tms(true /* fullGC */); 4.37 @@ -944,7 +943,7 @@ 4.38 heap_region_iterate(&rs_clear); 4.39 4.40 // Resize the heap if necessary. 4.41 - resize_if_necessary_after_full_collection(full ? 0 : word_size); 4.42 + resize_if_necessary_after_full_collection(explicit_gc ? 0 : word_size); 4.43 4.44 if (_cg1r->use_cache()) { 4.45 _cg1r->clear_and_record_card_counts(); 4.46 @@ -1009,13 +1008,18 @@ 4.47 "young list should be empty at this point"); 4.48 } 4.49 4.50 + // Update the number of full collections that have been completed. 4.51 + increment_full_collections_completed(false /* outer */); 4.52 + 4.53 if (PrintHeapAtGC) { 4.54 Universe::print_heap_after_gc(); 4.55 } 4.56 } 4.57 4.58 void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) { 4.59 - do_collection(true, clear_all_soft_refs, 0); 4.60 + do_collection(true, /* explicit_gc */ 4.61 + clear_all_soft_refs, 4.62 + 0 /* word_size */); 4.63 } 4.64 4.65 // This code is mostly copied from TenuredGeneration. 4.66 @@ -1331,6 +1335,7 @@ 4.67 _young_list(new YoungList(this)), 4.68 _gc_time_stamp(0), 4.69 _surviving_young_words(NULL), 4.70 + _full_collections_completed(0), 4.71 _in_cset_fast_test(NULL), 4.72 _in_cset_fast_test_base(NULL), 4.73 _dirty_cards_region_list(NULL) { 4.74 @@ -1689,6 +1694,51 @@ 4.75 return car->free(); 4.76 } 4.77 4.78 +bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) { 4.79 + return 4.80 + ((cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) || 4.81 + (cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent)); 4.82 +} 4.83 + 4.84 +void G1CollectedHeap::increment_full_collections_completed(bool outer) { 4.85 + MonitorLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag); 4.86 + 4.87 + // We have already incremented _total_full_collections at the start 4.88 + // of the GC, so total_full_collections() represents how many full 4.89 + // collections have been started. 4.90 + unsigned int full_collections_started = total_full_collections(); 4.91 + 4.92 + // Given that this method is called at the end of a Full GC or of a 4.93 + // concurrent cycle, and those can be nested (i.e., a Full GC can 4.94 + // interrupt a concurrent cycle), the number of full collections 4.95 + // completed should be either one (in the case where there was no 4.96 + // nesting) or two (when a Full GC interrupted a concurrent cycle) 4.97 + // behind the number of full collections started. 4.98 + 4.99 + // This is the case for the inner caller, i.e. a Full GC. 4.100 + assert(outer || 4.101 + (full_collections_started == _full_collections_completed + 1) || 4.102 + (full_collections_started == _full_collections_completed + 2), 4.103 + err_msg("for inner caller: full_collections_started = %u " 4.104 + "is inconsistent with _full_collections_completed = %u", 4.105 + full_collections_started, _full_collections_completed)); 4.106 + 4.107 + // This is the case for the outer caller, i.e. the concurrent cycle. 4.108 + assert(!outer || 4.109 + (full_collections_started == _full_collections_completed + 1), 4.110 + err_msg("for outer caller: full_collections_started = %u " 4.111 + "is inconsistent with _full_collections_completed = %u", 4.112 + full_collections_started, _full_collections_completed)); 4.113 + 4.114 + _full_collections_completed += 1; 4.115 + 4.116 + // This notify_all() will ensure that a thread that called 4.117 + // System.gc() with (with ExplicitGCInvokesConcurrent set or not) 4.118 + // and it's waiting for a full GC to finish will be woken up. It is 4.119 + // waiting in VM_G1IncCollectionPause::doit_epilogue(). 4.120 + FullGCCount_lock->notify_all(); 4.121 +} 4.122 + 4.123 void G1CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) { 4.124 assert(Thread::current()->is_VM_thread(), "Precondition#1"); 4.125 assert(Heap_lock->is_locked(), "Precondition#2"); 4.126 @@ -1709,25 +1759,41 @@ 4.127 // The caller doesn't have the Heap_lock 4.128 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock"); 4.129 4.130 - int gc_count_before; 4.131 + unsigned int gc_count_before; 4.132 + unsigned int full_gc_count_before; 4.133 { 4.134 MutexLocker ml(Heap_lock); 4.135 // Read the GC count while holding the Heap_lock 4.136 gc_count_before = SharedHeap::heap()->total_collections(); 4.137 + full_gc_count_before = SharedHeap::heap()->total_full_collections(); 4.138 4.139 // Don't want to do a GC until cleanup is completed. 4.140 wait_for_cleanup_complete(); 4.141 - } // We give up heap lock; VMThread::execute gets it back below 4.142 - switch (cause) { 4.143 - case GCCause::_scavenge_alot: { 4.144 - // Do an incremental pause, which might sometimes be abandoned. 4.145 - VM_G1IncCollectionPause op(gc_count_before, cause); 4.146 + 4.147 + // We give up heap lock; VMThread::execute gets it back below 4.148 + } 4.149 + 4.150 + if (should_do_concurrent_full_gc(cause)) { 4.151 + // Schedule an initial-mark evacuation pause that will start a 4.152 + // concurrent cycle. 4.153 + VM_G1IncCollectionPause op(gc_count_before, 4.154 + true, /* should_initiate_conc_mark */ 4.155 + g1_policy()->max_pause_time_ms(), 4.156 + cause); 4.157 + VMThread::execute(&op); 4.158 + } else { 4.159 + if (cause == GCCause::_gc_locker 4.160 + DEBUG_ONLY(|| cause == GCCause::_scavenge_alot)) { 4.161 + 4.162 + // Schedule a standard evacuation pause. 4.163 + VM_G1IncCollectionPause op(gc_count_before, 4.164 + false, /* should_initiate_conc_mark */ 4.165 + g1_policy()->max_pause_time_ms(), 4.166 + cause); 4.167 VMThread::execute(&op); 4.168 - break; 4.169 - } 4.170 - default: { 4.171 - // In all other cases, we currently do a full gc. 4.172 - VM_G1CollectFull op(gc_count_before, cause); 4.173 + } else { 4.174 + // Schedule a Full GC. 4.175 + VM_G1CollectFull op(gc_count_before, full_gc_count_before, cause); 4.176 VMThread::execute(&op); 4.177 } 4.178 } 4.179 @@ -1989,6 +2055,11 @@ 4.180 4.181 void G1CollectedHeap::collection_set_iterate_from(HeapRegion* r, 4.182 HeapRegionClosure *cl) { 4.183 + if (r == NULL) { 4.184 + // The CSet is empty so there's nothing to do. 4.185 + return; 4.186 + } 4.187 + 4.188 assert(r->in_collection_set(), 4.189 "Start region must be a member of the collection set."); 4.190 HeapRegion* cur = r; 4.191 @@ -2481,11 +2552,13 @@ 4.192 } 4.193 4.194 void G1CollectedHeap::do_collection_pause() { 4.195 + assert(Heap_lock->owned_by_self(), "we assume we'reholding the Heap_lock"); 4.196 + 4.197 // Read the GC count while holding the Heap_lock 4.198 // we need to do this _before_ wait_for_cleanup_complete(), to 4.199 // ensure that we do not give up the heap lock and potentially 4.200 // pick up the wrong count 4.201 - int gc_count_before = SharedHeap::heap()->total_collections(); 4.202 + unsigned int gc_count_before = SharedHeap::heap()->total_collections(); 4.203 4.204 // Don't want to do a GC pause while cleanup is being completed! 4.205 wait_for_cleanup_complete(); 4.206 @@ -2493,7 +2566,10 @@ 4.207 g1_policy()->record_stop_world_start(); 4.208 { 4.209 MutexUnlocker mu(Heap_lock); // give up heap lock, execute gets it back 4.210 - VM_G1IncCollectionPause op(gc_count_before); 4.211 + VM_G1IncCollectionPause op(gc_count_before, 4.212 + false, /* should_initiate_conc_mark */ 4.213 + g1_policy()->max_pause_time_ms(), 4.214 + GCCause::_g1_inc_collection_pause); 4.215 VMThread::execute(&op); 4.216 } 4.217 } 4.218 @@ -2612,7 +2688,7 @@ 4.219 }; 4.220 4.221 void 4.222 -G1CollectedHeap::do_collection_pause_at_safepoint() { 4.223 +G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) { 4.224 if (GC_locker::check_active_before_gc()) { 4.225 return; // GC is disabled (e.g. JNI GetXXXCritical operation) 4.226 } 4.227 @@ -2637,8 +2713,12 @@ 4.228 else 4.229 strcat(verbose_str, "(partial)"); 4.230 } 4.231 - if (g1_policy()->during_initial_mark_pause()) 4.232 + if (g1_policy()->during_initial_mark_pause()) { 4.233 strcat(verbose_str, " (initial-mark)"); 4.234 + // We are about to start a marking cycle, so we increment the 4.235 + // full collection counter. 4.236 + increment_total_full_collections(); 4.237 + } 4.238 4.239 // if PrintGCDetails is on, we'll print long statistics information 4.240 // in the collector policy code, so let's not print this as the output 4.241 @@ -2661,7 +2741,6 @@ 4.242 "young list should be well formed"); 4.243 } 4.244 4.245 - bool abandoned = false; 4.246 { // Call to jvmpi::post_class_unload_events must occur outside of active GC 4.247 IsGCActiveMark x; 4.248 4.249 @@ -2743,7 +2822,7 @@ 4.250 4.251 // Now choose the CS. We may abandon a pause if we find no 4.252 // region that will fit in the MMU pause. 4.253 - bool abandoned = g1_policy()->choose_collection_set(); 4.254 + bool abandoned = g1_policy()->choose_collection_set(target_pause_time_ms); 4.255 4.256 // Nothing to do if we were unable to choose a collection set. 4.257 if (!abandoned) {
5.1 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Mon Jul 12 12:53:52 2010 -0700 5.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Fri Jul 16 10:09:15 2010 -0700 5.3 @@ -277,6 +277,18 @@ 5.4 void update_surviving_young_words(size_t* surv_young_words); 5.5 void cleanup_surviving_young_words(); 5.6 5.7 + // It decides whether an explicit GC should start a concurrent cycle 5.8 + // instead of doing a STW GC. Currently, a concurrent cycle is 5.9 + // explicitly started if: 5.10 + // (a) cause == _gc_locker and +GCLockerInvokesConcurrent, or 5.11 + // (b) cause == _java_lang_system_gc and +ExplicitGCInvokesConcurrent. 5.12 + bool should_do_concurrent_full_gc(GCCause::Cause cause); 5.13 + 5.14 + // Keeps track of how many "full collections" (i.e., Full GCs or 5.15 + // concurrent cycles) we have completed. The number of them we have 5.16 + // started is maintained in _total_full_collections in CollectedHeap. 5.17 + volatile unsigned int _full_collections_completed; 5.18 + 5.19 protected: 5.20 5.21 // Returns "true" iff none of the gc alloc regions have any allocations 5.22 @@ -356,13 +368,14 @@ 5.23 // GC pause. 5.24 void retire_alloc_region(HeapRegion* alloc_region, bool par); 5.25 5.26 - // Helper function for two callbacks below. 5.27 - // "full", if true, indicates that the GC is for a System.gc() request, 5.28 - // and should collect the entire heap. If "clear_all_soft_refs" is true, 5.29 - // all soft references are cleared during the GC. If "full" is false, 5.30 - // "word_size" describes the allocation that the GC should 5.31 - // attempt (at least) to satisfy. 5.32 - void do_collection(bool full, bool clear_all_soft_refs, 5.33 + // - if explicit_gc is true, the GC is for a System.gc() or a heap 5.34 + // inspection request and should collect the entire heap 5.35 + // - if clear_all_soft_refs is true, all soft references are cleared 5.36 + // during the GC 5.37 + // - if explicit_gc is false, word_size describes the allocation that 5.38 + // the GC should attempt (at least) to satisfy 5.39 + void do_collection(bool explicit_gc, 5.40 + bool clear_all_soft_refs, 5.41 size_t word_size); 5.42 5.43 // Callback from VM_G1CollectFull operation. 5.44 @@ -431,6 +444,26 @@ 5.45 _in_cset_fast_test_length * sizeof(bool)); 5.46 } 5.47 5.48 + // This is called at the end of either a concurrent cycle or a Full 5.49 + // GC to update the number of full collections completed. Those two 5.50 + // can happen in a nested fashion, i.e., we start a concurrent 5.51 + // cycle, a Full GC happens half-way through it which ends first, 5.52 + // and then the cycle notices that a Full GC happened and ends 5.53 + // too. The outer parameter is a boolean to help us do a bit tighter 5.54 + // consistency checking in the method. If outer is false, the caller 5.55 + // is the inner caller in the nesting (i.e., the Full GC). If outer 5.56 + // is true, the caller is the outer caller in this nesting (i.e., 5.57 + // the concurrent cycle). Further nesting is not currently 5.58 + // supported. The end of the this call also notifies the 5.59 + // FullGCCount_lock in case a Java thread is waiting for a full GC 5.60 + // to happen (e.g., it called System.gc() with 5.61 + // +ExplicitGCInvokesConcurrent). 5.62 + void increment_full_collections_completed(bool outer); 5.63 + 5.64 + unsigned int full_collections_completed() { 5.65 + return _full_collections_completed; 5.66 + } 5.67 + 5.68 protected: 5.69 5.70 // Shrink the garbage-first heap by at most the given size (in bytes!). 5.71 @@ -444,7 +477,7 @@ 5.72 5.73 // The guts of the incremental collection pause, executed by the vm 5.74 // thread. 5.75 - virtual void do_collection_pause_at_safepoint(); 5.76 + virtual void do_collection_pause_at_safepoint(double target_pause_time_ms); 5.77 5.78 // Actually do the work of evacuating the collection set. 5.79 virtual void evacuate_collection_set();
6.1 --- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp Mon Jul 12 12:53:52 2010 -0700 6.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp Fri Jul 16 10:09:15 2010 -0700 6.3 @@ -154,7 +154,6 @@ 6.4 _known_garbage_bytes(0), 6.5 6.6 _young_gc_eff_seq(new TruncatedSeq(TruncatedSeqLength)), 6.7 - _target_pause_time_ms(-1.0), 6.8 6.9 _recent_prev_end_times_for_all_gcs_sec(new TruncatedSeq(NumPrevPausesForHeuristics)), 6.10 6.11 @@ -1635,8 +1634,6 @@ 6.12 double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0; 6.13 adjust_concurrent_refinement(update_rs_time, update_rs_processed_buffers, update_rs_time_goal_ms); 6.14 // </NEW PREDICTION> 6.15 - 6.16 - _target_pause_time_ms = -1.0; 6.17 } 6.18 6.19 // <NEW PREDICTION> 6.20 @@ -2366,7 +2363,6 @@ 6.21 if (reached_target_length) { 6.22 assert( young_list_length > 0 && _g1->young_list()->length() > 0, 6.23 "invariant" ); 6.24 - _target_pause_time_ms = max_pause_time_ms; 6.25 return true; 6.26 } 6.27 } else { 6.28 @@ -2398,6 +2394,17 @@ 6.29 } 6.30 #endif 6.31 6.32 +bool 6.33 +G1CollectorPolicy::force_initial_mark_if_outside_cycle() { 6.34 + bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle(); 6.35 + if (!during_cycle) { 6.36 + set_initiate_conc_mark_if_possible(); 6.37 + return true; 6.38 + } else { 6.39 + return false; 6.40 + } 6.41 +} 6.42 + 6.43 void 6.44 G1CollectorPolicy::decide_on_conc_mark_initiation() { 6.45 // We are about to decide on whether this pause will be an 6.46 @@ -2864,7 +2871,8 @@ 6.47 #endif // !PRODUCT 6.48 6.49 bool 6.50 -G1CollectorPolicy_BestRegionsFirst::choose_collection_set() { 6.51 +G1CollectorPolicy_BestRegionsFirst::choose_collection_set( 6.52 + double target_pause_time_ms) { 6.53 // Set this here - in case we're not doing young collections. 6.54 double non_young_start_time_sec = os::elapsedTime(); 6.55 6.56 @@ -2877,26 +2885,19 @@ 6.57 6.58 start_recording_regions(); 6.59 6.60 - guarantee(_target_pause_time_ms > -1.0 6.61 - NOT_PRODUCT(|| Universe::heap()->gc_cause() == GCCause::_scavenge_alot), 6.62 - "_target_pause_time_ms should have been set!"); 6.63 -#ifndef PRODUCT 6.64 - if (_target_pause_time_ms <= -1.0) { 6.65 - assert(ScavengeALot && Universe::heap()->gc_cause() == GCCause::_scavenge_alot, "Error"); 6.66 - _target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0; 6.67 - } 6.68 -#endif 6.69 - assert(_collection_set == NULL, "Precondition"); 6.70 + guarantee(target_pause_time_ms > 0.0, 6.71 + err_msg("target_pause_time_ms = %1.6lf should be positive", 6.72 + target_pause_time_ms)); 6.73 + guarantee(_collection_set == NULL, "Precondition"); 6.74 6.75 double base_time_ms = predict_base_elapsed_time_ms(_pending_cards); 6.76 double predicted_pause_time_ms = base_time_ms; 6.77 6.78 - double target_time_ms = _target_pause_time_ms; 6.79 - double time_remaining_ms = target_time_ms - base_time_ms; 6.80 + double time_remaining_ms = target_pause_time_ms - base_time_ms; 6.81 6.82 // the 10% and 50% values are arbitrary... 6.83 - if (time_remaining_ms < 0.10*target_time_ms) { 6.84 - time_remaining_ms = 0.50 * target_time_ms; 6.85 + if (time_remaining_ms < 0.10 * target_pause_time_ms) { 6.86 + time_remaining_ms = 0.50 * target_pause_time_ms; 6.87 _within_target = false; 6.88 } else { 6.89 _within_target = true; 6.90 @@ -3059,7 +3060,18 @@ 6.91 _recorded_non_young_cset_choice_time_ms = 6.92 (non_young_end_time_sec - non_young_start_time_sec) * 1000.0; 6.93 6.94 - return abandon_collection; 6.95 + // Here we are supposed to return whether the pause should be 6.96 + // abandoned or not (i.e., whether the collection set is empty or 6.97 + // not). However, this introduces a subtle issue when a pause is 6.98 + // initiated explicitly with System.gc() and 6.99 + // +ExplicitGCInvokesConcurrent (see Comment #2 in CR 6944166), it's 6.100 + // supposed to start a marking cycle, and it's abandoned. So, by 6.101 + // returning false here we are telling the caller never to consider 6.102 + // a pause to be abandoned. We'll actually remove all the code 6.103 + // associated with abandoned pauses as part of CR 6963209, but we are 6.104 + // just disabling them this way for the moment to avoid increasing 6.105 + // further the amount of changes for CR 6944166. 6.106 + return false; 6.107 } 6.108 6.109 void G1CollectorPolicy_BestRegionsFirst::record_full_collection_end() {
7.1 --- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp Mon Jul 12 12:53:52 2010 -0700 7.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp Fri Jul 16 10:09:15 2010 -0700 7.3 @@ -199,8 +199,6 @@ 7.4 size_t _young_cset_length; 7.5 bool _last_young_gc_full; 7.6 7.7 - double _target_pause_time_ms; 7.8 - 7.9 unsigned _full_young_pause_num; 7.10 unsigned _partial_young_pause_num; 7.11 7.12 @@ -526,6 +524,10 @@ 7.13 return _mmu_tracker; 7.14 } 7.15 7.16 + double max_pause_time_ms() { 7.17 + return _mmu_tracker->max_gc_time() * 1000.0; 7.18 + } 7.19 + 7.20 double predict_init_time_ms() { 7.21 return get_new_prediction(_concurrent_mark_init_times_ms); 7.22 } 7.23 @@ -1008,7 +1010,7 @@ 7.24 // Choose a new collection set. Marks the chosen regions as being 7.25 // "in_collection_set", and links them together. The head and number of 7.26 // the collection set are available via access methods. 7.27 - virtual bool choose_collection_set() = 0; 7.28 + virtual bool choose_collection_set(double target_pause_time_ms) = 0; 7.29 7.30 // The head of the list (via "next_in_collection_set()") representing the 7.31 // current collection set. 7.32 @@ -1077,6 +1079,12 @@ 7.33 void set_during_initial_mark_pause() { _during_initial_mark_pause = true; } 7.34 void clear_during_initial_mark_pause(){ _during_initial_mark_pause = false; } 7.35 7.36 + // This sets the initiate_conc_mark_if_possible() flag to start a 7.37 + // new cycle, as long as we are not already in one. It's best if it 7.38 + // is called during a safepoint when the test whether a cycle is in 7.39 + // progress or not is stable. 7.40 + bool force_initial_mark_if_outside_cycle(); 7.41 + 7.42 // This is called at the very beginning of an evacuation pause (it 7.43 // has to be the first thing that the pause does). If 7.44 // initiate_conc_mark_if_possible() is true, and the concurrent 7.45 @@ -1259,7 +1267,7 @@ 7.46 // If the estimated is less then desirable, resize if possible. 7.47 void expand_if_possible(size_t numRegions); 7.48 7.49 - virtual bool choose_collection_set(); 7.50 + virtual bool choose_collection_set(double target_pause_time_ms); 7.51 virtual void record_collection_pause_start(double start_time_sec, 7.52 size_t start_used); 7.53 virtual void record_concurrent_mark_cleanup_end(size_t freed_bytes,
8.1 --- a/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp Mon Jul 12 12:53:52 2010 -0700 8.2 +++ b/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp Fri Jul 16 10:09:15 2010 -0700 8.3 @@ -42,8 +42,65 @@ 8.4 void VM_G1IncCollectionPause::doit() { 8.5 JvmtiGCForAllocationMarker jgcm; 8.6 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 8.7 + assert(!_should_initiate_conc_mark || 8.8 + ((_gc_cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) || 8.9 + (_gc_cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent)), 8.10 + "only a GC locker or a System.gc() induced GC should start a cycle"); 8.11 + 8.12 GCCauseSetter x(g1h, _gc_cause); 8.13 - g1h->do_collection_pause_at_safepoint(); 8.14 + if (_should_initiate_conc_mark) { 8.15 + // It's safer to read full_collections_completed() here, given 8.16 + // that noone else will be updating it concurrently. Since we'll 8.17 + // only need it if we're initiating a marking cycle, no point in 8.18 + // setting it earlier. 8.19 + _full_collections_completed_before = g1h->full_collections_completed(); 8.20 + 8.21 + // At this point we are supposed to start a concurrent cycle. We 8.22 + // will do so if one is not already in progress. 8.23 + bool res = g1h->g1_policy()->force_initial_mark_if_outside_cycle(); 8.24 + } 8.25 + g1h->do_collection_pause_at_safepoint(_target_pause_time_ms); 8.26 +} 8.27 + 8.28 +void VM_G1IncCollectionPause::doit_epilogue() { 8.29 + VM_GC_Operation::doit_epilogue(); 8.30 + 8.31 + // If the pause was initiated by a System.gc() and 8.32 + // +ExplicitGCInvokesConcurrent, we have to wait here for the cycle 8.33 + // that just started (or maybe one that was already in progress) to 8.34 + // finish. 8.35 + if (_gc_cause == GCCause::_java_lang_system_gc && 8.36 + _should_initiate_conc_mark) { 8.37 + assert(ExplicitGCInvokesConcurrent, 8.38 + "the only way to be here is if ExplicitGCInvokesConcurrent is set"); 8.39 + 8.40 + G1CollectedHeap* g1h = G1CollectedHeap::heap(); 8.41 + 8.42 + // In the doit() method we saved g1h->full_collections_completed() 8.43 + // in the _full_collections_completed_before field. We have to 8.44 + // wait until we observe that g1h->full_collections_completed() 8.45 + // has increased by at least one. This can happen if a) we started 8.46 + // a cycle and it completes, b) a cycle already in progress 8.47 + // completes, or c) a Full GC happens. 8.48 + 8.49 + // If the condition has already been reached, there's no point in 8.50 + // actually taking the lock and doing the wait. 8.51 + if (g1h->full_collections_completed() <= 8.52 + _full_collections_completed_before) { 8.53 + // The following is largely copied from CMS 8.54 + 8.55 + Thread* thr = Thread::current(); 8.56 + assert(thr->is_Java_thread(), "invariant"); 8.57 + JavaThread* jt = (JavaThread*)thr; 8.58 + ThreadToNativeFromVM native(jt); 8.59 + 8.60 + MutexLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag); 8.61 + while (g1h->full_collections_completed() <= 8.62 + _full_collections_completed_before) { 8.63 + FullGCCount_lock->wait(Mutex::_no_safepoint_check_flag); 8.64 + } 8.65 + } 8.66 + } 8.67 } 8.68 8.69 void VM_CGC_Operation::doit() {
9.1 --- a/src/share/vm/gc_implementation/g1/vm_operations_g1.hpp Mon Jul 12 12:53:52 2010 -0700 9.2 +++ b/src/share/vm/gc_implementation/g1/vm_operations_g1.hpp Fri Jul 16 10:09:15 2010 -0700 9.3 @@ -31,13 +31,12 @@ 9.4 // - VM_G1PopRegionCollectionPause 9.5 9.6 class VM_G1CollectFull: public VM_GC_Operation { 9.7 - private: 9.8 public: 9.9 - VM_G1CollectFull(int gc_count_before, 9.10 - GCCause::Cause gc_cause) 9.11 - : VM_GC_Operation(gc_count_before) 9.12 - { 9.13 - _gc_cause = gc_cause; 9.14 + VM_G1CollectFull(unsigned int gc_count_before, 9.15 + unsigned int full_gc_count_before, 9.16 + GCCause::Cause cause) 9.17 + : VM_GC_Operation(gc_count_before, full_gc_count_before) { 9.18 + _gc_cause = cause; 9.19 } 9.20 ~VM_G1CollectFull() {} 9.21 virtual VMOp_Type type() const { return VMOp_G1CollectFull; } 9.22 @@ -67,12 +66,28 @@ 9.23 }; 9.24 9.25 class VM_G1IncCollectionPause: public VM_GC_Operation { 9.26 - public: 9.27 - VM_G1IncCollectionPause(int gc_count_before, 9.28 - GCCause::Cause gc_cause = GCCause::_g1_inc_collection_pause) : 9.29 - VM_GC_Operation(gc_count_before) { _gc_cause = gc_cause; } 9.30 +private: 9.31 + bool _should_initiate_conc_mark; 9.32 + double _target_pause_time_ms; 9.33 + unsigned int _full_collections_completed_before; 9.34 +public: 9.35 + VM_G1IncCollectionPause(unsigned int gc_count_before, 9.36 + bool should_initiate_conc_mark, 9.37 + double target_pause_time_ms, 9.38 + GCCause::Cause cause) 9.39 + : VM_GC_Operation(gc_count_before), 9.40 + _full_collections_completed_before(0), 9.41 + _should_initiate_conc_mark(should_initiate_conc_mark), 9.42 + _target_pause_time_ms(target_pause_time_ms) { 9.43 + guarantee(target_pause_time_ms > 0.0, 9.44 + err_msg("target_pause_time_ms = %1.6lf should be positive", 9.45 + target_pause_time_ms)); 9.46 + 9.47 + _gc_cause = cause; 9.48 + } 9.49 virtual VMOp_Type type() const { return VMOp_G1IncCollectionPause; } 9.50 virtual void doit(); 9.51 + virtual void doit_epilogue(); 9.52 virtual const char* name() const { 9.53 return "garbage-first incremental collection pause"; 9.54 }
10.1 --- a/src/share/vm/gc_implementation/includeDB_gc_g1 Mon Jul 12 12:53:52 2010 -0700 10.2 +++ b/src/share/vm/gc_implementation/includeDB_gc_g1 Fri Jul 16 10:09:15 2010 -0700 10.3 @@ -367,4 +367,6 @@ 10.4 10.5 vm_operations_g1.cpp vm_operations_g1.hpp 10.6 vm_operations_g1.cpp g1CollectedHeap.inline.hpp 10.7 +vm_operations_g1.cpp g1CollectorPolicy.hpp 10.8 +vm_operations_g1.cpp interfaceSupport.hpp 10.9 vm_operations_g1.cpp isGCActiveMark.hpp
11.1 --- a/src/share/vm/gc_implementation/shared/vmGCOperations.hpp Mon Jul 12 12:53:52 2010 -0700 11.2 +++ b/src/share/vm/gc_implementation/shared/vmGCOperations.hpp Fri Jul 16 10:09:15 2010 -0700 11.3 @@ -86,9 +86,7 @@ 11.4 11.5 _gc_locked = false; 11.6 11.7 - if (full) { 11.8 - _full_gc_count_before = full_gc_count_before; 11.9 - } 11.10 + _full_gc_count_before = full_gc_count_before; 11.11 // In ParallelScavengeHeap::mem_allocate() collections can be 11.12 // executed within a loop and _all_soft_refs_clear can be set 11.13 // true after they have been cleared by a collection and another
12.1 --- a/src/share/vm/gc_interface/gcCause.cpp Mon Jul 12 12:53:52 2010 -0700 12.2 +++ b/src/share/vm/gc_interface/gcCause.cpp Fri Jul 16 10:09:15 2010 -0700 12.3 @@ -78,6 +78,9 @@ 12.4 case _old_generation_too_full_to_scavenge: 12.5 return "Old Generation Too Full To Scavenge"; 12.6 12.7 + case _g1_inc_collection_pause: 12.8 + return "G1 Evacuation Pause"; 12.9 + 12.10 case _last_ditch_collection: 12.11 return "Last ditch collection"; 12.12
13.1 --- a/src/share/vm/runtime/mutexLocker.cpp Mon Jul 12 12:53:52 2010 -0700 13.2 +++ b/src/share/vm/runtime/mutexLocker.cpp Fri Jul 16 10:09:15 2010 -0700 13.3 @@ -159,6 +159,8 @@ 13.4 def(STS_init_lock , Mutex, leaf, true ); 13.5 if (UseConcMarkSweepGC) { 13.6 def(iCMS_lock , Monitor, special, true ); // CMS incremental mode start/stop notification 13.7 + } 13.8 + if (UseConcMarkSweepGC || UseG1GC) { 13.9 def(FullGCCount_lock , Monitor, leaf, true ); // in support of ExplicitGCInvokesConcurrent 13.10 } 13.11 if (UseG1GC) {