1.1 --- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp Tue Nov 22 04:47:10 2011 -0500 1.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp Tue Aug 09 10:16:01 2011 -0700 1.3 @@ -1024,7 +1024,7 @@ 1.4 double total = 0.0; 1.5 LineBuffer buf(level); 1.6 buf.append("[%s (ms):", str); 1.7 - for (uint i = 0; i < ParallelGCThreads; ++i) { 1.8 + for (uint i = 0; i < no_of_gc_threads(); ++i) { 1.9 double val = data[i]; 1.10 if (val < min) 1.11 min = val; 1.12 @@ -1034,7 +1034,7 @@ 1.13 buf.append(" %3.1lf", val); 1.14 } 1.15 buf.append_and_print_cr(""); 1.16 - double avg = total / (double) ParallelGCThreads; 1.17 + double avg = total / (double) no_of_gc_threads(); 1.18 buf.append_and_print_cr(" Avg: %5.1lf, Min: %5.1lf, Max: %5.1lf, Diff: %5.1lf]", 1.19 avg, min, max, max - min); 1.20 } 1.21 @@ -1046,7 +1046,7 @@ 1.22 double total = 0.0; 1.23 LineBuffer buf(level); 1.24 buf.append("[%s :", str); 1.25 - for (uint i = 0; i < ParallelGCThreads; ++i) { 1.26 + for (uint i = 0; i < no_of_gc_threads(); ++i) { 1.27 double val = data[i]; 1.28 if (val < min) 1.29 min = val; 1.30 @@ -1056,7 +1056,7 @@ 1.31 buf.append(" %d", (int) val); 1.32 } 1.33 buf.append_and_print_cr(""); 1.34 - double avg = total / (double) ParallelGCThreads; 1.35 + double avg = total / (double) no_of_gc_threads(); 1.36 buf.append_and_print_cr(" Sum: %d, Avg: %d, Min: %d, Max: %d, Diff: %d]", 1.37 (int)total, (int)avg, (int)min, (int)max, (int)max - (int)min); 1.38 } 1.39 @@ -1076,10 +1076,10 @@ 1.40 double G1CollectorPolicy::avg_value(double* data) { 1.41 if (G1CollectedHeap::use_parallel_gc_threads()) { 1.42 double ret = 0.0; 1.43 - for (uint i = 0; i < ParallelGCThreads; ++i) { 1.44 + for (uint i = 0; i < no_of_gc_threads(); ++i) { 1.45 ret += data[i]; 1.46 } 1.47 - return ret / (double) ParallelGCThreads; 1.48 + return ret / (double) no_of_gc_threads(); 1.49 } else { 1.50 return data[0]; 1.51 } 1.52 @@ -1088,7 +1088,7 @@ 1.53 double G1CollectorPolicy::max_value(double* data) { 1.54 if (G1CollectedHeap::use_parallel_gc_threads()) { 1.55 double ret = data[0]; 1.56 - for (uint i = 1; i < ParallelGCThreads; ++i) { 1.57 + for (uint i = 1; i < no_of_gc_threads(); ++i) { 1.58 if (data[i] > ret) { 1.59 ret = data[i]; 1.60 } 1.61 @@ -1102,7 +1102,7 @@ 1.62 double G1CollectorPolicy::sum_of_values(double* data) { 1.63 if (G1CollectedHeap::use_parallel_gc_threads()) { 1.64 double sum = 0.0; 1.65 - for (uint i = 0; i < ParallelGCThreads; i++) { 1.66 + for (uint i = 0; i < no_of_gc_threads(); i++) { 1.67 sum += data[i]; 1.68 } 1.69 return sum; 1.70 @@ -1115,7 +1115,7 @@ 1.71 double ret = data1[0] + data2[0]; 1.72 1.73 if (G1CollectedHeap::use_parallel_gc_threads()) { 1.74 - for (uint i = 1; i < ParallelGCThreads; ++i) { 1.75 + for (uint i = 1; i < no_of_gc_threads(); ++i) { 1.76 double data = data1[i] + data2[i]; 1.77 if (data > ret) { 1.78 ret = data; 1.79 @@ -1128,7 +1128,7 @@ 1.80 // Anything below that is considered to be zero 1.81 #define MIN_TIMER_GRANULARITY 0.0000001 1.82 1.83 -void G1CollectorPolicy::record_collection_pause_end() { 1.84 +void G1CollectorPolicy::record_collection_pause_end(int no_of_gc_threads) { 1.85 double end_time_sec = os::elapsedTime(); 1.86 double elapsed_ms = _last_pause_time_ms; 1.87 bool parallel = G1CollectedHeap::use_parallel_gc_threads(); 1.88 @@ -1140,6 +1140,7 @@ 1.89 assert(cur_used_bytes == _g1->recalculate_used(), "It should!"); 1.90 bool last_pause_included_initial_mark = false; 1.91 bool update_stats = !_g1->evacuation_failed(); 1.92 + set_no_of_gc_threads(no_of_gc_threads); 1.93 1.94 #ifndef PRODUCT 1.95 if (G1YoungSurvRateVerbose) { 1.96 @@ -2304,6 +2305,7 @@ 1.97 ParKnownGarbageHRClosure parKnownGarbageCl(_hrSorted, _chunk_size, i); 1.98 // Back to zero for the claim value. 1.99 _g1->heap_region_par_iterate_chunked(&parKnownGarbageCl, i, 1.100 + _g1->workers()->active_workers(), 1.101 HeapRegion::InitialClaimValue); 1.102 jint regions_added = parKnownGarbageCl.marked_regions_added(); 1.103 _hrSorted->incNumMarkedHeapRegions(regions_added); 1.104 @@ -2315,7 +2317,7 @@ 1.105 }; 1.106 1.107 void 1.108 -G1CollectorPolicy::record_concurrent_mark_cleanup_end() { 1.109 +G1CollectorPolicy::record_concurrent_mark_cleanup_end(int no_of_gc_threads) { 1.110 double start_sec; 1.111 if (G1PrintParCleanupStats) { 1.112 start_sec = os::elapsedTime(); 1.113 @@ -2331,10 +2333,27 @@ 1.114 1.115 if (G1CollectedHeap::use_parallel_gc_threads()) { 1.116 const size_t OverpartitionFactor = 4; 1.117 - const size_t MinWorkUnit = 8; 1.118 - const size_t WorkUnit = 1.119 - MAX2(_g1->n_regions() / (ParallelGCThreads * OverpartitionFactor), 1.120 - MinWorkUnit); 1.121 + size_t WorkUnit; 1.122 + // The use of MinChunkSize = 8 in the original code 1.123 + // causes some assertion failures when the total number of 1.124 + // region is less than 8. The code here tries to fix that. 1.125 + // Should the original code also be fixed? 1.126 + if (no_of_gc_threads > 0) { 1.127 + const size_t MinWorkUnit = 1.128 + MAX2(_g1->n_regions() / no_of_gc_threads, (size_t) 1U); 1.129 + WorkUnit = 1.130 + MAX2(_g1->n_regions() / (no_of_gc_threads * OverpartitionFactor), 1.131 + MinWorkUnit); 1.132 + } else { 1.133 + assert(no_of_gc_threads > 0, 1.134 + "The active gc workers should be greater than 0"); 1.135 + // In a product build do something reasonable to avoid a crash. 1.136 + const size_t MinWorkUnit = 1.137 + MAX2(_g1->n_regions() / ParallelGCThreads, (size_t) 1U); 1.138 + WorkUnit = 1.139 + MAX2(_g1->n_regions() / (ParallelGCThreads * OverpartitionFactor), 1.140 + MinWorkUnit); 1.141 + } 1.142 _collectionSetChooser->prepareForAddMarkedHeapRegionsPar(_g1->n_regions(), 1.143 WorkUnit); 1.144 ParKnownGarbageTask parKnownGarbageTask(_collectionSetChooser,