1.1 --- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp Thu Sep 16 13:45:55 2010 -0700 1.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp Mon Sep 20 14:38:38 2010 -0700 1.3 @@ -72,7 +72,10 @@ 1.4 // </NEW PREDICTION> 1.5 1.6 G1CollectorPolicy::G1CollectorPolicy() : 1.7 - _parallel_gc_threads((ParallelGCThreads > 0) ? ParallelGCThreads : 1), 1.8 + _parallel_gc_threads(G1CollectedHeap::use_parallel_gc_threads() 1.9 + ? ParallelGCThreads : 1), 1.10 + 1.11 + 1.12 _n_pauses(0), 1.13 _recent_CH_strong_roots_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)), 1.14 _recent_G1_strong_roots_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)), 1.15 @@ -1073,7 +1076,7 @@ 1.16 } 1.17 1.18 double G1CollectorPolicy::avg_value (double* data) { 1.19 - if (ParallelGCThreads > 0) { 1.20 + if (G1CollectedHeap::use_parallel_gc_threads()) { 1.21 double ret = 0.0; 1.22 for (uint i = 0; i < ParallelGCThreads; ++i) 1.23 ret += data[i]; 1.24 @@ -1084,7 +1087,7 @@ 1.25 } 1.26 1.27 double G1CollectorPolicy::max_value (double* data) { 1.28 - if (ParallelGCThreads > 0) { 1.29 + if (G1CollectedHeap::use_parallel_gc_threads()) { 1.30 double ret = data[0]; 1.31 for (uint i = 1; i < ParallelGCThreads; ++i) 1.32 if (data[i] > ret) 1.33 @@ -1096,7 +1099,7 @@ 1.34 } 1.35 1.36 double G1CollectorPolicy::sum_of_values (double* data) { 1.37 - if (ParallelGCThreads > 0) { 1.38 + if (G1CollectedHeap::use_parallel_gc_threads()) { 1.39 double sum = 0.0; 1.40 for (uint i = 0; i < ParallelGCThreads; i++) 1.41 sum += data[i]; 1.42 @@ -1110,7 +1113,7 @@ 1.43 double* data2) { 1.44 double ret = data1[0] + data2[0]; 1.45 1.46 - if (ParallelGCThreads > 0) { 1.47 + if (G1CollectedHeap::use_parallel_gc_threads()) { 1.48 for (uint i = 1; i < ParallelGCThreads; ++i) { 1.49 double data = data1[i] + data2[i]; 1.50 if (data > ret) 1.51 @@ -1126,7 +1129,7 @@ 1.52 void G1CollectorPolicy::record_collection_pause_end() { 1.53 double end_time_sec = os::elapsedTime(); 1.54 double elapsed_ms = _last_pause_time_ms; 1.55 - bool parallel = ParallelGCThreads > 0; 1.56 + bool parallel = G1CollectedHeap::use_parallel_gc_threads(); 1.57 double evac_ms = (end_time_sec - _cur_G1_strong_roots_end_sec) * 1000.0; 1.58 size_t rs_size = 1.59 _cur_collection_pause_used_regions_at_start - collection_set_size(); 1.60 @@ -1941,7 +1944,7 @@ 1.61 // Further, we're now always doing parallel collection. But I'm still 1.62 // leaving this here as a placeholder for a more precise assertion later. 1.63 // (DLD, 10/05.) 1.64 - assert((true || ParallelGCThreads > 0) || 1.65 + assert((true || G1CollectedHeap::use_parallel_gc_threads()) || 1.66 _g1->evacuation_failed() || 1.67 recent_survival_rate <= 1.0, "Or bad frac"); 1.68 return recent_survival_rate; 1.69 @@ -1961,7 +1964,7 @@ 1.70 // Further, we're now always doing parallel collection. But I'm still 1.71 // leaving this here as a placeholder for a more precise assertion later. 1.72 // (DLD, 10/05.) 1.73 - assert((true || ParallelGCThreads > 0) || 1.74 + assert((true || G1CollectedHeap::use_parallel_gc_threads()) || 1.75 last_survival_rate <= 1.0, "Or bad frac"); 1.76 return last_survival_rate; 1.77 } else { 1.78 @@ -2121,7 +2124,7 @@ 1.79 } 1.80 1.81 void G1CollectorPolicy::print_summary(PauseSummary* summary) const { 1.82 - bool parallel = ParallelGCThreads > 0; 1.83 + bool parallel = G1CollectedHeap::use_parallel_gc_threads(); 1.84 MainBodySummary* body_summary = summary->main_body_summary(); 1.85 if (summary->get_total_seq()->num() > 0) { 1.86 print_summary_sd(0, "Evacuation Pauses", summary->get_total_seq()); 1.87 @@ -2559,7 +2562,7 @@ 1.88 gclog_or_tty->print_cr(" clear marked regions + work1: %8.3f ms.", 1.89 (clear_marked_end - start)*1000.0); 1.90 } 1.91 - if (ParallelGCThreads > 0) { 1.92 + if (G1CollectedHeap::use_parallel_gc_threads()) { 1.93 const size_t OverpartitionFactor = 4; 1.94 const size_t MinWorkUnit = 8; 1.95 const size_t WorkUnit =