Mon, 06 May 2013 09:16:14 +0200
8013791: G1: G1CollectorPolicy::initialize_flags() may set min_alignment > max_alignment
Summary: Make sure max alignemnt is at least as large as min alignment
Reviewed-by: johnc, jmasa, tschatzl
1 /*
2 * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "gc_implementation/g1/concurrentG1Refine.hpp"
27 #include "gc_implementation/g1/concurrentMark.hpp"
28 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
29 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
30 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
31 #include "gc_implementation/g1/g1ErgoVerbose.hpp"
32 #include "gc_implementation/g1/g1GCPhaseTimes.hpp"
33 #include "gc_implementation/g1/g1Log.hpp"
34 #include "gc_implementation/g1/heapRegionRemSet.hpp"
35 #include "gc_implementation/shared/gcPolicyCounters.hpp"
36 #include "runtime/arguments.hpp"
37 #include "runtime/java.hpp"
38 #include "runtime/mutexLocker.hpp"
39 #include "utilities/debug.hpp"
41 // Different defaults for different number of GC threads
42 // They were chosen by running GCOld and SPECjbb on debris with different
43 // numbers of GC threads and choosing them based on the results
45 // all the same
46 static double rs_length_diff_defaults[] = {
47 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
48 };
50 static double cost_per_card_ms_defaults[] = {
51 0.01, 0.005, 0.005, 0.003, 0.003, 0.002, 0.002, 0.0015
52 };
54 // all the same
55 static double young_cards_per_entry_ratio_defaults[] = {
56 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0
57 };
59 static double cost_per_entry_ms_defaults[] = {
60 0.015, 0.01, 0.01, 0.008, 0.008, 0.0055, 0.0055, 0.005
61 };
63 static double cost_per_byte_ms_defaults[] = {
64 0.00006, 0.00003, 0.00003, 0.000015, 0.000015, 0.00001, 0.00001, 0.000009
65 };
67 // these should be pretty consistent
68 static double constant_other_time_ms_defaults[] = {
69 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0
70 };
73 static double young_other_cost_per_region_ms_defaults[] = {
74 0.3, 0.2, 0.2, 0.15, 0.15, 0.12, 0.12, 0.1
75 };
77 static double non_young_other_cost_per_region_ms_defaults[] = {
78 1.0, 0.7, 0.7, 0.5, 0.5, 0.42, 0.42, 0.30
79 };
81 G1CollectorPolicy::G1CollectorPolicy() :
82 _parallel_gc_threads(G1CollectedHeap::use_parallel_gc_threads()
83 ? ParallelGCThreads : 1),
85 _recent_gc_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
86 _stop_world_start(0.0),
88 _concurrent_mark_remark_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
89 _concurrent_mark_cleanup_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
91 _alloc_rate_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
92 _prev_collection_pause_end_ms(0.0),
93 _rs_length_diff_seq(new TruncatedSeq(TruncatedSeqLength)),
94 _cost_per_card_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
95 _young_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)),
96 _mixed_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)),
97 _cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
98 _mixed_cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
99 _cost_per_byte_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
100 _cost_per_byte_ms_during_cm_seq(new TruncatedSeq(TruncatedSeqLength)),
101 _constant_other_time_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
102 _young_other_cost_per_region_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
103 _non_young_other_cost_per_region_ms_seq(
104 new TruncatedSeq(TruncatedSeqLength)),
106 _pending_cards_seq(new TruncatedSeq(TruncatedSeqLength)),
107 _rs_lengths_seq(new TruncatedSeq(TruncatedSeqLength)),
109 _pause_time_target_ms((double) MaxGCPauseMillis),
111 _gcs_are_young(true),
113 _during_marking(false),
114 _in_marking_window(false),
115 _in_marking_window_im(false),
117 _recent_prev_end_times_for_all_gcs_sec(
118 new TruncatedSeq(NumPrevPausesForHeuristics)),
120 _recent_avg_pause_time_ratio(0.0),
122 _initiate_conc_mark_if_possible(false),
123 _during_initial_mark_pause(false),
124 _last_young_gc(false),
125 _last_gc_was_young(false),
127 _eden_bytes_before_gc(0),
128 _survivor_bytes_before_gc(0),
129 _capacity_before_gc(0),
131 _eden_cset_region_length(0),
132 _survivor_cset_region_length(0),
133 _old_cset_region_length(0),
135 _collection_set(NULL),
136 _collection_set_bytes_used_before(0),
138 // Incremental CSet attributes
139 _inc_cset_build_state(Inactive),
140 _inc_cset_head(NULL),
141 _inc_cset_tail(NULL),
142 _inc_cset_bytes_used_before(0),
143 _inc_cset_max_finger(NULL),
144 _inc_cset_recorded_rs_lengths(0),
145 _inc_cset_recorded_rs_lengths_diffs(0),
146 _inc_cset_predicted_elapsed_time_ms(0.0),
147 _inc_cset_predicted_elapsed_time_ms_diffs(0.0),
149 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
150 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
151 #endif // _MSC_VER
153 _short_lived_surv_rate_group(new SurvRateGroup(this, "Short Lived",
154 G1YoungSurvRateNumRegionsSummary)),
155 _survivor_surv_rate_group(new SurvRateGroup(this, "Survivor",
156 G1YoungSurvRateNumRegionsSummary)),
157 // add here any more surv rate groups
158 _recorded_survivor_regions(0),
159 _recorded_survivor_head(NULL),
160 _recorded_survivor_tail(NULL),
161 _survivors_age_table(true),
163 _gc_overhead_perc(0.0) {
165 // Set up the region size and associated fields. Given that the
166 // policy is created before the heap, we have to set this up here,
167 // so it's done as soon as possible.
168 HeapRegion::setup_heap_region_size(Arguments::min_heap_size());
169 HeapRegionRemSet::setup_remset_size();
171 G1ErgoVerbose::initialize();
172 if (PrintAdaptiveSizePolicy) {
173 // Currently, we only use a single switch for all the heuristics.
174 G1ErgoVerbose::set_enabled(true);
175 // Given that we don't currently have a verboseness level
176 // parameter, we'll hardcode this to high. This can be easily
177 // changed in the future.
178 G1ErgoVerbose::set_level(ErgoHigh);
179 } else {
180 G1ErgoVerbose::set_enabled(false);
181 }
183 // Verify PLAB sizes
184 const size_t region_size = HeapRegion::GrainWords;
185 if (YoungPLABSize > region_size || OldPLABSize > region_size) {
186 char buffer[128];
187 jio_snprintf(buffer, sizeof(buffer), "%sPLABSize should be at most "SIZE_FORMAT,
188 OldPLABSize > region_size ? "Old" : "Young", region_size);
189 vm_exit_during_initialization(buffer);
190 }
192 _recent_prev_end_times_for_all_gcs_sec->add(os::elapsedTime());
193 _prev_collection_pause_end_ms = os::elapsedTime() * 1000.0;
195 _phase_times = new G1GCPhaseTimes(_parallel_gc_threads);
197 int index = MIN2(_parallel_gc_threads - 1, 7);
199 _rs_length_diff_seq->add(rs_length_diff_defaults[index]);
200 _cost_per_card_ms_seq->add(cost_per_card_ms_defaults[index]);
201 _young_cards_per_entry_ratio_seq->add(
202 young_cards_per_entry_ratio_defaults[index]);
203 _cost_per_entry_ms_seq->add(cost_per_entry_ms_defaults[index]);
204 _cost_per_byte_ms_seq->add(cost_per_byte_ms_defaults[index]);
205 _constant_other_time_ms_seq->add(constant_other_time_ms_defaults[index]);
206 _young_other_cost_per_region_ms_seq->add(
207 young_other_cost_per_region_ms_defaults[index]);
208 _non_young_other_cost_per_region_ms_seq->add(
209 non_young_other_cost_per_region_ms_defaults[index]);
211 // Below, we might need to calculate the pause time target based on
212 // the pause interval. When we do so we are going to give G1 maximum
213 // flexibility and allow it to do pauses when it needs to. So, we'll
214 // arrange that the pause interval to be pause time target + 1 to
215 // ensure that a) the pause time target is maximized with respect to
216 // the pause interval and b) we maintain the invariant that pause
217 // time target < pause interval. If the user does not want this
218 // maximum flexibility, they will have to set the pause interval
219 // explicitly.
221 // First make sure that, if either parameter is set, its value is
222 // reasonable.
223 if (!FLAG_IS_DEFAULT(MaxGCPauseMillis)) {
224 if (MaxGCPauseMillis < 1) {
225 vm_exit_during_initialization("MaxGCPauseMillis should be "
226 "greater than 0");
227 }
228 }
229 if (!FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
230 if (GCPauseIntervalMillis < 1) {
231 vm_exit_during_initialization("GCPauseIntervalMillis should be "
232 "greater than 0");
233 }
234 }
236 // Then, if the pause time target parameter was not set, set it to
237 // the default value.
238 if (FLAG_IS_DEFAULT(MaxGCPauseMillis)) {
239 if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
240 // The default pause time target in G1 is 200ms
241 FLAG_SET_DEFAULT(MaxGCPauseMillis, 200);
242 } else {
243 // We do not allow the pause interval to be set without the
244 // pause time target
245 vm_exit_during_initialization("GCPauseIntervalMillis cannot be set "
246 "without setting MaxGCPauseMillis");
247 }
248 }
250 // Then, if the interval parameter was not set, set it according to
251 // the pause time target (this will also deal with the case when the
252 // pause time target is the default value).
253 if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
254 FLAG_SET_DEFAULT(GCPauseIntervalMillis, MaxGCPauseMillis + 1);
255 }
257 // Finally, make sure that the two parameters are consistent.
258 if (MaxGCPauseMillis >= GCPauseIntervalMillis) {
259 char buffer[256];
260 jio_snprintf(buffer, 256,
261 "MaxGCPauseMillis (%u) should be less than "
262 "GCPauseIntervalMillis (%u)",
263 MaxGCPauseMillis, GCPauseIntervalMillis);
264 vm_exit_during_initialization(buffer);
265 }
267 double max_gc_time = (double) MaxGCPauseMillis / 1000.0;
268 double time_slice = (double) GCPauseIntervalMillis / 1000.0;
269 _mmu_tracker = new G1MMUTrackerQueue(time_slice, max_gc_time);
271 uintx confidence_perc = G1ConfidencePercent;
272 // Put an artificial ceiling on this so that it's not set to a silly value.
273 if (confidence_perc > 100) {
274 confidence_perc = 100;
275 warning("G1ConfidencePercent is set to a value that is too large, "
276 "it's been updated to %u", confidence_perc);
277 }
278 _sigma = (double) confidence_perc / 100.0;
280 // start conservatively (around 50ms is about right)
281 _concurrent_mark_remark_times_ms->add(0.05);
282 _concurrent_mark_cleanup_times_ms->add(0.20);
283 _tenuring_threshold = MaxTenuringThreshold;
284 // _max_survivor_regions will be calculated by
285 // update_young_list_target_length() during initialization.
286 _max_survivor_regions = 0;
288 assert(GCTimeRatio > 0,
289 "we should have set it to a default value set_g1_gc_flags() "
290 "if a user set it to 0");
291 _gc_overhead_perc = 100.0 * (1.0 / (1.0 + GCTimeRatio));
293 uintx reserve_perc = G1ReservePercent;
294 // Put an artificial ceiling on this so that it's not set to a silly value.
295 if (reserve_perc > 50) {
296 reserve_perc = 50;
297 warning("G1ReservePercent is set to a value that is too large, "
298 "it's been updated to %u", reserve_perc);
299 }
300 _reserve_factor = (double) reserve_perc / 100.0;
301 // This will be set when the heap is expanded
302 // for the first time during initialization.
303 _reserve_regions = 0;
305 initialize_all();
306 _collectionSetChooser = new CollectionSetChooser();
307 _young_gen_sizer = new G1YoungGenSizer(); // Must be after call to initialize_flags
308 }
310 void G1CollectorPolicy::initialize_flags() {
311 set_min_alignment(HeapRegion::GrainBytes);
312 size_t card_table_alignment = GenRemSet::max_alignment_constraint(rem_set_name());
313 set_max_alignment(MAX2(card_table_alignment, min_alignment()));
314 if (SurvivorRatio < 1) {
315 vm_exit_during_initialization("Invalid survivor ratio specified");
316 }
317 CollectorPolicy::initialize_flags();
318 }
320 G1YoungGenSizer::G1YoungGenSizer() : _sizer_kind(SizerDefaults), _adaptive_size(true) {
321 assert(G1NewSizePercent <= G1MaxNewSizePercent, "Min larger than max");
322 assert(G1NewSizePercent > 0 && G1NewSizePercent < 100, "Min out of bounds");
323 assert(G1MaxNewSizePercent > 0 && G1MaxNewSizePercent < 100, "Max out of bounds");
325 if (FLAG_IS_CMDLINE(NewRatio)) {
326 if (FLAG_IS_CMDLINE(NewSize) || FLAG_IS_CMDLINE(MaxNewSize)) {
327 warning("-XX:NewSize and -XX:MaxNewSize override -XX:NewRatio");
328 } else {
329 _sizer_kind = SizerNewRatio;
330 _adaptive_size = false;
331 return;
332 }
333 }
335 if (FLAG_IS_CMDLINE(NewSize)) {
336 _min_desired_young_length = MAX2((uint) (NewSize / HeapRegion::GrainBytes),
337 1U);
338 if (FLAG_IS_CMDLINE(MaxNewSize)) {
339 _max_desired_young_length =
340 MAX2((uint) (MaxNewSize / HeapRegion::GrainBytes),
341 1U);
342 _sizer_kind = SizerMaxAndNewSize;
343 _adaptive_size = _min_desired_young_length == _max_desired_young_length;
344 } else {
345 _sizer_kind = SizerNewSizeOnly;
346 }
347 } else if (FLAG_IS_CMDLINE(MaxNewSize)) {
348 _max_desired_young_length =
349 MAX2((uint) (MaxNewSize / HeapRegion::GrainBytes),
350 1U);
351 _sizer_kind = SizerMaxNewSizeOnly;
352 }
353 }
355 uint G1YoungGenSizer::calculate_default_min_length(uint new_number_of_heap_regions) {
356 uint default_value = (new_number_of_heap_regions * G1NewSizePercent) / 100;
357 return MAX2(1U, default_value);
358 }
360 uint G1YoungGenSizer::calculate_default_max_length(uint new_number_of_heap_regions) {
361 uint default_value = (new_number_of_heap_regions * G1MaxNewSizePercent) / 100;
362 return MAX2(1U, default_value);
363 }
365 void G1YoungGenSizer::heap_size_changed(uint new_number_of_heap_regions) {
366 assert(new_number_of_heap_regions > 0, "Heap must be initialized");
368 switch (_sizer_kind) {
369 case SizerDefaults:
370 _min_desired_young_length = calculate_default_min_length(new_number_of_heap_regions);
371 _max_desired_young_length = calculate_default_max_length(new_number_of_heap_regions);
372 break;
373 case SizerNewSizeOnly:
374 _max_desired_young_length = calculate_default_max_length(new_number_of_heap_regions);
375 _max_desired_young_length = MAX2(_min_desired_young_length, _max_desired_young_length);
376 break;
377 case SizerMaxNewSizeOnly:
378 _min_desired_young_length = calculate_default_min_length(new_number_of_heap_regions);
379 _min_desired_young_length = MIN2(_min_desired_young_length, _max_desired_young_length);
380 break;
381 case SizerMaxAndNewSize:
382 // Do nothing. Values set on the command line, don't update them at runtime.
383 break;
384 case SizerNewRatio:
385 _min_desired_young_length = new_number_of_heap_regions / (NewRatio + 1);
386 _max_desired_young_length = _min_desired_young_length;
387 break;
388 default:
389 ShouldNotReachHere();
390 }
392 assert(_min_desired_young_length <= _max_desired_young_length, "Invalid min/max young gen size values");
393 }
395 void G1CollectorPolicy::init() {
396 // Set aside an initial future to_space.
397 _g1 = G1CollectedHeap::heap();
399 assert(Heap_lock->owned_by_self(), "Locking discipline.");
401 initialize_gc_policy_counters();
403 if (adaptive_young_list_length()) {
404 _young_list_fixed_length = 0;
405 } else {
406 _young_list_fixed_length = _young_gen_sizer->min_desired_young_length();
407 }
408 _free_regions_at_end_of_collection = _g1->free_regions();
409 update_young_list_target_length();
411 // We may immediately start allocating regions and placing them on the
412 // collection set list. Initialize the per-collection set info
413 start_incremental_cset_building();
414 }
416 // Create the jstat counters for the policy.
417 void G1CollectorPolicy::initialize_gc_policy_counters() {
418 _gc_policy_counters = new GCPolicyCounters("GarbageFirst", 1, 3);
419 }
421 bool G1CollectorPolicy::predict_will_fit(uint young_length,
422 double base_time_ms,
423 uint base_free_regions,
424 double target_pause_time_ms) {
425 if (young_length >= base_free_regions) {
426 // end condition 1: not enough space for the young regions
427 return false;
428 }
430 double accum_surv_rate = accum_yg_surv_rate_pred((int) young_length - 1);
431 size_t bytes_to_copy =
432 (size_t) (accum_surv_rate * (double) HeapRegion::GrainBytes);
433 double copy_time_ms = predict_object_copy_time_ms(bytes_to_copy);
434 double young_other_time_ms = predict_young_other_time_ms(young_length);
435 double pause_time_ms = base_time_ms + copy_time_ms + young_other_time_ms;
436 if (pause_time_ms > target_pause_time_ms) {
437 // end condition 2: prediction is over the target pause time
438 return false;
439 }
441 size_t free_bytes =
442 (base_free_regions - young_length) * HeapRegion::GrainBytes;
443 if ((2.0 * sigma()) * (double) bytes_to_copy > (double) free_bytes) {
444 // end condition 3: out-of-space (conservatively!)
445 return false;
446 }
448 // success!
449 return true;
450 }
452 void G1CollectorPolicy::record_new_heap_size(uint new_number_of_regions) {
453 // re-calculate the necessary reserve
454 double reserve_regions_d = (double) new_number_of_regions * _reserve_factor;
455 // We use ceiling so that if reserve_regions_d is > 0.0 (but
456 // smaller than 1.0) we'll get 1.
457 _reserve_regions = (uint) ceil(reserve_regions_d);
459 _young_gen_sizer->heap_size_changed(new_number_of_regions);
460 }
462 uint G1CollectorPolicy::calculate_young_list_desired_min_length(
463 uint base_min_length) {
464 uint desired_min_length = 0;
465 if (adaptive_young_list_length()) {
466 if (_alloc_rate_ms_seq->num() > 3) {
467 double now_sec = os::elapsedTime();
468 double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0;
469 double alloc_rate_ms = predict_alloc_rate_ms();
470 desired_min_length = (uint) ceil(alloc_rate_ms * when_ms);
471 } else {
472 // otherwise we don't have enough info to make the prediction
473 }
474 }
475 desired_min_length += base_min_length;
476 // make sure we don't go below any user-defined minimum bound
477 return MAX2(_young_gen_sizer->min_desired_young_length(), desired_min_length);
478 }
480 uint G1CollectorPolicy::calculate_young_list_desired_max_length() {
481 // Here, we might want to also take into account any additional
482 // constraints (i.e., user-defined minimum bound). Currently, we
483 // effectively don't set this bound.
484 return _young_gen_sizer->max_desired_young_length();
485 }
487 void G1CollectorPolicy::update_young_list_target_length(size_t rs_lengths) {
488 if (rs_lengths == (size_t) -1) {
489 // if it's set to the default value (-1), we should predict it;
490 // otherwise, use the given value.
491 rs_lengths = (size_t) get_new_prediction(_rs_lengths_seq);
492 }
494 // Calculate the absolute and desired min bounds.
496 // This is how many young regions we already have (currently: the survivors).
497 uint base_min_length = recorded_survivor_regions();
498 // This is the absolute minimum young length, which ensures that we
499 // can allocate one eden region in the worst-case.
500 uint absolute_min_length = base_min_length + 1;
501 uint desired_min_length =
502 calculate_young_list_desired_min_length(base_min_length);
503 if (desired_min_length < absolute_min_length) {
504 desired_min_length = absolute_min_length;
505 }
507 // Calculate the absolute and desired max bounds.
509 // We will try our best not to "eat" into the reserve.
510 uint absolute_max_length = 0;
511 if (_free_regions_at_end_of_collection > _reserve_regions) {
512 absolute_max_length = _free_regions_at_end_of_collection - _reserve_regions;
513 }
514 uint desired_max_length = calculate_young_list_desired_max_length();
515 if (desired_max_length > absolute_max_length) {
516 desired_max_length = absolute_max_length;
517 }
519 uint young_list_target_length = 0;
520 if (adaptive_young_list_length()) {
521 if (gcs_are_young()) {
522 young_list_target_length =
523 calculate_young_list_target_length(rs_lengths,
524 base_min_length,
525 desired_min_length,
526 desired_max_length);
527 _rs_lengths_prediction = rs_lengths;
528 } else {
529 // Don't calculate anything and let the code below bound it to
530 // the desired_min_length, i.e., do the next GC as soon as
531 // possible to maximize how many old regions we can add to it.
532 }
533 } else {
534 // The user asked for a fixed young gen so we'll fix the young gen
535 // whether the next GC is young or mixed.
536 young_list_target_length = _young_list_fixed_length;
537 }
539 // Make sure we don't go over the desired max length, nor under the
540 // desired min length. In case they clash, desired_min_length wins
541 // which is why that test is second.
542 if (young_list_target_length > desired_max_length) {
543 young_list_target_length = desired_max_length;
544 }
545 if (young_list_target_length < desired_min_length) {
546 young_list_target_length = desired_min_length;
547 }
549 assert(young_list_target_length > recorded_survivor_regions(),
550 "we should be able to allocate at least one eden region");
551 assert(young_list_target_length >= absolute_min_length, "post-condition");
552 _young_list_target_length = young_list_target_length;
554 update_max_gc_locker_expansion();
555 }
557 uint
558 G1CollectorPolicy::calculate_young_list_target_length(size_t rs_lengths,
559 uint base_min_length,
560 uint desired_min_length,
561 uint desired_max_length) {
562 assert(adaptive_young_list_length(), "pre-condition");
563 assert(gcs_are_young(), "only call this for young GCs");
565 // In case some edge-condition makes the desired max length too small...
566 if (desired_max_length <= desired_min_length) {
567 return desired_min_length;
568 }
570 // We'll adjust min_young_length and max_young_length not to include
571 // the already allocated young regions (i.e., so they reflect the
572 // min and max eden regions we'll allocate). The base_min_length
573 // will be reflected in the predictions by the
574 // survivor_regions_evac_time prediction.
575 assert(desired_min_length > base_min_length, "invariant");
576 uint min_young_length = desired_min_length - base_min_length;
577 assert(desired_max_length > base_min_length, "invariant");
578 uint max_young_length = desired_max_length - base_min_length;
580 double target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0;
581 double survivor_regions_evac_time = predict_survivor_regions_evac_time();
582 size_t pending_cards = (size_t) get_new_prediction(_pending_cards_seq);
583 size_t adj_rs_lengths = rs_lengths + predict_rs_length_diff();
584 size_t scanned_cards = predict_young_card_num(adj_rs_lengths);
585 double base_time_ms =
586 predict_base_elapsed_time_ms(pending_cards, scanned_cards) +
587 survivor_regions_evac_time;
588 uint available_free_regions = _free_regions_at_end_of_collection;
589 uint base_free_regions = 0;
590 if (available_free_regions > _reserve_regions) {
591 base_free_regions = available_free_regions - _reserve_regions;
592 }
594 // Here, we will make sure that the shortest young length that
595 // makes sense fits within the target pause time.
597 if (predict_will_fit(min_young_length, base_time_ms,
598 base_free_regions, target_pause_time_ms)) {
599 // The shortest young length will fit into the target pause time;
600 // we'll now check whether the absolute maximum number of young
601 // regions will fit in the target pause time. If not, we'll do
602 // a binary search between min_young_length and max_young_length.
603 if (predict_will_fit(max_young_length, base_time_ms,
604 base_free_regions, target_pause_time_ms)) {
605 // The maximum young length will fit into the target pause time.
606 // We are done so set min young length to the maximum length (as
607 // the result is assumed to be returned in min_young_length).
608 min_young_length = max_young_length;
609 } else {
610 // The maximum possible number of young regions will not fit within
611 // the target pause time so we'll search for the optimal
612 // length. The loop invariants are:
613 //
614 // min_young_length < max_young_length
615 // min_young_length is known to fit into the target pause time
616 // max_young_length is known not to fit into the target pause time
617 //
618 // Going into the loop we know the above hold as we've just
619 // checked them. Every time around the loop we check whether
620 // the middle value between min_young_length and
621 // max_young_length fits into the target pause time. If it
622 // does, it becomes the new min. If it doesn't, it becomes
623 // the new max. This way we maintain the loop invariants.
625 assert(min_young_length < max_young_length, "invariant");
626 uint diff = (max_young_length - min_young_length) / 2;
627 while (diff > 0) {
628 uint young_length = min_young_length + diff;
629 if (predict_will_fit(young_length, base_time_ms,
630 base_free_regions, target_pause_time_ms)) {
631 min_young_length = young_length;
632 } else {
633 max_young_length = young_length;
634 }
635 assert(min_young_length < max_young_length, "invariant");
636 diff = (max_young_length - min_young_length) / 2;
637 }
638 // The results is min_young_length which, according to the
639 // loop invariants, should fit within the target pause time.
641 // These are the post-conditions of the binary search above:
642 assert(min_young_length < max_young_length,
643 "otherwise we should have discovered that max_young_length "
644 "fits into the pause target and not done the binary search");
645 assert(predict_will_fit(min_young_length, base_time_ms,
646 base_free_regions, target_pause_time_ms),
647 "min_young_length, the result of the binary search, should "
648 "fit into the pause target");
649 assert(!predict_will_fit(min_young_length + 1, base_time_ms,
650 base_free_regions, target_pause_time_ms),
651 "min_young_length, the result of the binary search, should be "
652 "optimal, so no larger length should fit into the pause target");
653 }
654 } else {
655 // Even the minimum length doesn't fit into the pause time
656 // target, return it as the result nevertheless.
657 }
658 return base_min_length + min_young_length;
659 }
661 double G1CollectorPolicy::predict_survivor_regions_evac_time() {
662 double survivor_regions_evac_time = 0.0;
663 for (HeapRegion * r = _recorded_survivor_head;
664 r != NULL && r != _recorded_survivor_tail->get_next_young_region();
665 r = r->get_next_young_region()) {
666 survivor_regions_evac_time += predict_region_elapsed_time_ms(r, gcs_are_young());
667 }
668 return survivor_regions_evac_time;
669 }
671 void G1CollectorPolicy::revise_young_list_target_length_if_necessary() {
672 guarantee( adaptive_young_list_length(), "should not call this otherwise" );
674 size_t rs_lengths = _g1->young_list()->sampled_rs_lengths();
675 if (rs_lengths > _rs_lengths_prediction) {
676 // add 10% to avoid having to recalculate often
677 size_t rs_lengths_prediction = rs_lengths * 1100 / 1000;
678 update_young_list_target_length(rs_lengths_prediction);
679 }
680 }
684 HeapWord* G1CollectorPolicy::mem_allocate_work(size_t size,
685 bool is_tlab,
686 bool* gc_overhead_limit_was_exceeded) {
687 guarantee(false, "Not using this policy feature yet.");
688 return NULL;
689 }
691 // This method controls how a collector handles one or more
692 // of its generations being fully allocated.
693 HeapWord* G1CollectorPolicy::satisfy_failed_allocation(size_t size,
694 bool is_tlab) {
695 guarantee(false, "Not using this policy feature yet.");
696 return NULL;
697 }
700 #ifndef PRODUCT
701 bool G1CollectorPolicy::verify_young_ages() {
702 HeapRegion* head = _g1->young_list()->first_region();
703 return
704 verify_young_ages(head, _short_lived_surv_rate_group);
705 // also call verify_young_ages on any additional surv rate groups
706 }
708 bool
709 G1CollectorPolicy::verify_young_ages(HeapRegion* head,
710 SurvRateGroup *surv_rate_group) {
711 guarantee( surv_rate_group != NULL, "pre-condition" );
713 const char* name = surv_rate_group->name();
714 bool ret = true;
715 int prev_age = -1;
717 for (HeapRegion* curr = head;
718 curr != NULL;
719 curr = curr->get_next_young_region()) {
720 SurvRateGroup* group = curr->surv_rate_group();
721 if (group == NULL && !curr->is_survivor()) {
722 gclog_or_tty->print_cr("## %s: encountered NULL surv_rate_group", name);
723 ret = false;
724 }
726 if (surv_rate_group == group) {
727 int age = curr->age_in_surv_rate_group();
729 if (age < 0) {
730 gclog_or_tty->print_cr("## %s: encountered negative age", name);
731 ret = false;
732 }
734 if (age <= prev_age) {
735 gclog_or_tty->print_cr("## %s: region ages are not strictly increasing "
736 "(%d, %d)", name, age, prev_age);
737 ret = false;
738 }
739 prev_age = age;
740 }
741 }
743 return ret;
744 }
745 #endif // PRODUCT
747 void G1CollectorPolicy::record_full_collection_start() {
748 _full_collection_start_sec = os::elapsedTime();
749 record_heap_size_info_at_start();
750 // Release the future to-space so that it is available for compaction into.
751 _g1->set_full_collection();
752 }
754 void G1CollectorPolicy::record_full_collection_end() {
755 // Consider this like a collection pause for the purposes of allocation
756 // since last pause.
757 double end_sec = os::elapsedTime();
758 double full_gc_time_sec = end_sec - _full_collection_start_sec;
759 double full_gc_time_ms = full_gc_time_sec * 1000.0;
761 _trace_gen1_time_data.record_full_collection(full_gc_time_ms);
763 update_recent_gc_times(end_sec, full_gc_time_ms);
765 _g1->clear_full_collection();
767 // "Nuke" the heuristics that control the young/mixed GC
768 // transitions and make sure we start with young GCs after the Full GC.
769 set_gcs_are_young(true);
770 _last_young_gc = false;
771 clear_initiate_conc_mark_if_possible();
772 clear_during_initial_mark_pause();
773 _in_marking_window = false;
774 _in_marking_window_im = false;
776 _short_lived_surv_rate_group->start_adding_regions();
777 // also call this on any additional surv rate groups
779 record_survivor_regions(0, NULL, NULL);
781 _free_regions_at_end_of_collection = _g1->free_regions();
782 // Reset survivors SurvRateGroup.
783 _survivor_surv_rate_group->reset();
784 update_young_list_target_length();
785 _collectionSetChooser->clear();
786 }
788 void G1CollectorPolicy::record_stop_world_start() {
789 _stop_world_start = os::elapsedTime();
790 }
792 void G1CollectorPolicy::record_collection_pause_start(double start_time_sec) {
793 // We only need to do this here as the policy will only be applied
794 // to the GC we're about to start. so, no point is calculating this
795 // every time we calculate / recalculate the target young length.
796 update_survivors_policy();
798 assert(_g1->used() == _g1->recalculate_used(),
799 err_msg("sanity, used: "SIZE_FORMAT" recalculate_used: "SIZE_FORMAT,
800 _g1->used(), _g1->recalculate_used()));
802 double s_w_t_ms = (start_time_sec - _stop_world_start) * 1000.0;
803 _trace_gen0_time_data.record_start_collection(s_w_t_ms);
804 _stop_world_start = 0.0;
806 record_heap_size_info_at_start();
808 phase_times()->record_cur_collection_start_sec(start_time_sec);
809 _pending_cards = _g1->pending_card_num();
811 _collection_set_bytes_used_before = 0;
812 _bytes_copied_during_gc = 0;
814 _last_gc_was_young = false;
816 // do that for any other surv rate groups
817 _short_lived_surv_rate_group->stop_adding_regions();
818 _survivors_age_table.clear();
820 assert( verify_young_ages(), "region age verification" );
821 }
823 void G1CollectorPolicy::record_concurrent_mark_init_end(double
824 mark_init_elapsed_time_ms) {
825 _during_marking = true;
826 assert(!initiate_conc_mark_if_possible(), "we should have cleared it by now");
827 clear_during_initial_mark_pause();
828 _cur_mark_stop_world_time_ms = mark_init_elapsed_time_ms;
829 }
831 void G1CollectorPolicy::record_concurrent_mark_remark_start() {
832 _mark_remark_start_sec = os::elapsedTime();
833 _during_marking = false;
834 }
836 void G1CollectorPolicy::record_concurrent_mark_remark_end() {
837 double end_time_sec = os::elapsedTime();
838 double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0;
839 _concurrent_mark_remark_times_ms->add(elapsed_time_ms);
840 _cur_mark_stop_world_time_ms += elapsed_time_ms;
841 _prev_collection_pause_end_ms += elapsed_time_ms;
843 _mmu_tracker->add_pause(_mark_remark_start_sec, end_time_sec, true);
844 }
846 void G1CollectorPolicy::record_concurrent_mark_cleanup_start() {
847 _mark_cleanup_start_sec = os::elapsedTime();
848 }
850 void G1CollectorPolicy::record_concurrent_mark_cleanup_completed() {
851 _last_young_gc = true;
852 _in_marking_window = false;
853 }
855 void G1CollectorPolicy::record_concurrent_pause() {
856 if (_stop_world_start > 0.0) {
857 double yield_ms = (os::elapsedTime() - _stop_world_start) * 1000.0;
858 _trace_gen0_time_data.record_yield_time(yield_ms);
859 }
860 }
862 bool G1CollectorPolicy::need_to_start_conc_mark(const char* source, size_t alloc_word_size) {
863 if (_g1->concurrent_mark()->cmThread()->during_cycle()) {
864 return false;
865 }
867 size_t marking_initiating_used_threshold =
868 (_g1->capacity() / 100) * InitiatingHeapOccupancyPercent;
869 size_t cur_used_bytes = _g1->non_young_capacity_bytes();
870 size_t alloc_byte_size = alloc_word_size * HeapWordSize;
872 if ((cur_used_bytes + alloc_byte_size) > marking_initiating_used_threshold) {
873 if (gcs_are_young()) {
874 ergo_verbose5(ErgoConcCycles,
875 "request concurrent cycle initiation",
876 ergo_format_reason("occupancy higher than threshold")
877 ergo_format_byte("occupancy")
878 ergo_format_byte("allocation request")
879 ergo_format_byte_perc("threshold")
880 ergo_format_str("source"),
881 cur_used_bytes,
882 alloc_byte_size,
883 marking_initiating_used_threshold,
884 (double) InitiatingHeapOccupancyPercent,
885 source);
886 return true;
887 } else {
888 ergo_verbose5(ErgoConcCycles,
889 "do not request concurrent cycle initiation",
890 ergo_format_reason("still doing mixed collections")
891 ergo_format_byte("occupancy")
892 ergo_format_byte("allocation request")
893 ergo_format_byte_perc("threshold")
894 ergo_format_str("source"),
895 cur_used_bytes,
896 alloc_byte_size,
897 marking_initiating_used_threshold,
898 (double) InitiatingHeapOccupancyPercent,
899 source);
900 }
901 }
903 return false;
904 }
906 // Anything below that is considered to be zero
907 #define MIN_TIMER_GRANULARITY 0.0000001
909 void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms) {
910 double end_time_sec = os::elapsedTime();
911 assert(_cur_collection_pause_used_regions_at_start >= cset_region_length(),
912 "otherwise, the subtraction below does not make sense");
913 size_t rs_size =
914 _cur_collection_pause_used_regions_at_start - cset_region_length();
915 size_t cur_used_bytes = _g1->used();
916 assert(cur_used_bytes == _g1->recalculate_used(), "It should!");
917 bool last_pause_included_initial_mark = false;
918 bool update_stats = !_g1->evacuation_failed();
920 #ifndef PRODUCT
921 if (G1YoungSurvRateVerbose) {
922 gclog_or_tty->print_cr("");
923 _short_lived_surv_rate_group->print();
924 // do that for any other surv rate groups too
925 }
926 #endif // PRODUCT
928 last_pause_included_initial_mark = during_initial_mark_pause();
929 if (last_pause_included_initial_mark) {
930 record_concurrent_mark_init_end(0.0);
931 } else if (!_last_young_gc && need_to_start_conc_mark("end of GC")) {
932 // Note: this might have already been set, if during the last
933 // pause we decided to start a cycle but at the beginning of
934 // this pause we decided to postpone it. That's OK.
935 set_initiate_conc_mark_if_possible();
936 }
938 _mmu_tracker->add_pause(end_time_sec - pause_time_ms/1000.0,
939 end_time_sec, false);
941 size_t freed_bytes =
942 _cur_collection_pause_used_at_start_bytes - cur_used_bytes;
943 size_t surviving_bytes = _collection_set_bytes_used_before - freed_bytes;
945 double survival_fraction =
946 (double)surviving_bytes/
947 (double)_collection_set_bytes_used_before;
949 if (update_stats) {
950 _trace_gen0_time_data.record_end_collection(pause_time_ms, phase_times());
951 // this is where we update the allocation rate of the application
952 double app_time_ms =
953 (phase_times()->cur_collection_start_sec() * 1000.0 - _prev_collection_pause_end_ms);
954 if (app_time_ms < MIN_TIMER_GRANULARITY) {
955 // This usually happens due to the timer not having the required
956 // granularity. Some Linuxes are the usual culprits.
957 // We'll just set it to something (arbitrarily) small.
958 app_time_ms = 1.0;
959 }
960 // We maintain the invariant that all objects allocated by mutator
961 // threads will be allocated out of eden regions. So, we can use
962 // the eden region number allocated since the previous GC to
963 // calculate the application's allocate rate. The only exception
964 // to that is humongous objects that are allocated separately. But
965 // given that humongous object allocations do not really affect
966 // either the pause's duration nor when the next pause will take
967 // place we can safely ignore them here.
968 uint regions_allocated = eden_cset_region_length();
969 double alloc_rate_ms = (double) regions_allocated / app_time_ms;
970 _alloc_rate_ms_seq->add(alloc_rate_ms);
972 double interval_ms =
973 (end_time_sec - _recent_prev_end_times_for_all_gcs_sec->oldest()) * 1000.0;
974 update_recent_gc_times(end_time_sec, pause_time_ms);
975 _recent_avg_pause_time_ratio = _recent_gc_times_ms->sum()/interval_ms;
976 if (recent_avg_pause_time_ratio() < 0.0 ||
977 (recent_avg_pause_time_ratio() - 1.0 > 0.0)) {
978 #ifndef PRODUCT
979 // Dump info to allow post-facto debugging
980 gclog_or_tty->print_cr("recent_avg_pause_time_ratio() out of bounds");
981 gclog_or_tty->print_cr("-------------------------------------------");
982 gclog_or_tty->print_cr("Recent GC Times (ms):");
983 _recent_gc_times_ms->dump();
984 gclog_or_tty->print_cr("(End Time=%3.3f) Recent GC End Times (s):", end_time_sec);
985 _recent_prev_end_times_for_all_gcs_sec->dump();
986 gclog_or_tty->print_cr("GC = %3.3f, Interval = %3.3f, Ratio = %3.3f",
987 _recent_gc_times_ms->sum(), interval_ms, recent_avg_pause_time_ratio());
988 // In debug mode, terminate the JVM if the user wants to debug at this point.
989 assert(!G1FailOnFPError, "Debugging data for CR 6898948 has been dumped above");
990 #endif // !PRODUCT
991 // Clip ratio between 0.0 and 1.0, and continue. This will be fixed in
992 // CR 6902692 by redoing the manner in which the ratio is incrementally computed.
993 if (_recent_avg_pause_time_ratio < 0.0) {
994 _recent_avg_pause_time_ratio = 0.0;
995 } else {
996 assert(_recent_avg_pause_time_ratio - 1.0 > 0.0, "Ctl-point invariant");
997 _recent_avg_pause_time_ratio = 1.0;
998 }
999 }
1000 }
1001 bool new_in_marking_window = _in_marking_window;
1002 bool new_in_marking_window_im = false;
1003 if (during_initial_mark_pause()) {
1004 new_in_marking_window = true;
1005 new_in_marking_window_im = true;
1006 }
1008 if (_last_young_gc) {
1009 // This is supposed to to be the "last young GC" before we start
1010 // doing mixed GCs. Here we decide whether to start mixed GCs or not.
1012 if (!last_pause_included_initial_mark) {
1013 if (next_gc_should_be_mixed("start mixed GCs",
1014 "do not start mixed GCs")) {
1015 set_gcs_are_young(false);
1016 }
1017 } else {
1018 ergo_verbose0(ErgoMixedGCs,
1019 "do not start mixed GCs",
1020 ergo_format_reason("concurrent cycle is about to start"));
1021 }
1022 _last_young_gc = false;
1023 }
1025 if (!_last_gc_was_young) {
1026 // This is a mixed GC. Here we decide whether to continue doing
1027 // mixed GCs or not.
1029 if (!next_gc_should_be_mixed("continue mixed GCs",
1030 "do not continue mixed GCs")) {
1031 set_gcs_are_young(true);
1032 }
1033 }
1035 _short_lived_surv_rate_group->start_adding_regions();
1036 // do that for any other surv rate groupsx
1038 if (update_stats) {
1039 double cost_per_card_ms = 0.0;
1040 if (_pending_cards > 0) {
1041 cost_per_card_ms = phase_times()->average_last_update_rs_time() / (double) _pending_cards;
1042 _cost_per_card_ms_seq->add(cost_per_card_ms);
1043 }
1045 size_t cards_scanned = _g1->cards_scanned();
1047 double cost_per_entry_ms = 0.0;
1048 if (cards_scanned > 10) {
1049 cost_per_entry_ms = phase_times()->average_last_scan_rs_time() / (double) cards_scanned;
1050 if (_last_gc_was_young) {
1051 _cost_per_entry_ms_seq->add(cost_per_entry_ms);
1052 } else {
1053 _mixed_cost_per_entry_ms_seq->add(cost_per_entry_ms);
1054 }
1055 }
1057 if (_max_rs_lengths > 0) {
1058 double cards_per_entry_ratio =
1059 (double) cards_scanned / (double) _max_rs_lengths;
1060 if (_last_gc_was_young) {
1061 _young_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
1062 } else {
1063 _mixed_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
1064 }
1065 }
1067 // This is defensive. For a while _max_rs_lengths could get
1068 // smaller than _recorded_rs_lengths which was causing
1069 // rs_length_diff to get very large and mess up the RSet length
1070 // predictions. The reason was unsafe concurrent updates to the
1071 // _inc_cset_recorded_rs_lengths field which the code below guards
1072 // against (see CR 7118202). This bug has now been fixed (see CR
1073 // 7119027). However, I'm still worried that
1074 // _inc_cset_recorded_rs_lengths might still end up somewhat
1075 // inaccurate. The concurrent refinement thread calculates an
1076 // RSet's length concurrently with other CR threads updating it
1077 // which might cause it to calculate the length incorrectly (if,
1078 // say, it's in mid-coarsening). So I'll leave in the defensive
1079 // conditional below just in case.
1080 size_t rs_length_diff = 0;
1081 if (_max_rs_lengths > _recorded_rs_lengths) {
1082 rs_length_diff = _max_rs_lengths - _recorded_rs_lengths;
1083 }
1084 _rs_length_diff_seq->add((double) rs_length_diff);
1086 size_t copied_bytes = surviving_bytes;
1087 double cost_per_byte_ms = 0.0;
1088 if (copied_bytes > 0) {
1089 cost_per_byte_ms = phase_times()->average_last_obj_copy_time() / (double) copied_bytes;
1090 if (_in_marking_window) {
1091 _cost_per_byte_ms_during_cm_seq->add(cost_per_byte_ms);
1092 } else {
1093 _cost_per_byte_ms_seq->add(cost_per_byte_ms);
1094 }
1095 }
1097 double all_other_time_ms = pause_time_ms -
1098 (phase_times()->average_last_update_rs_time() + phase_times()->average_last_scan_rs_time()
1099 + phase_times()->average_last_obj_copy_time() + phase_times()->average_last_termination_time());
1101 double young_other_time_ms = 0.0;
1102 if (young_cset_region_length() > 0) {
1103 young_other_time_ms =
1104 phase_times()->young_cset_choice_time_ms() +
1105 phase_times()->young_free_cset_time_ms();
1106 _young_other_cost_per_region_ms_seq->add(young_other_time_ms /
1107 (double) young_cset_region_length());
1108 }
1109 double non_young_other_time_ms = 0.0;
1110 if (old_cset_region_length() > 0) {
1111 non_young_other_time_ms =
1112 phase_times()->non_young_cset_choice_time_ms() +
1113 phase_times()->non_young_free_cset_time_ms();
1115 _non_young_other_cost_per_region_ms_seq->add(non_young_other_time_ms /
1116 (double) old_cset_region_length());
1117 }
1119 double constant_other_time_ms = all_other_time_ms -
1120 (young_other_time_ms + non_young_other_time_ms);
1121 _constant_other_time_ms_seq->add(constant_other_time_ms);
1123 double survival_ratio = 0.0;
1124 if (_collection_set_bytes_used_before > 0) {
1125 survival_ratio = (double) _bytes_copied_during_gc /
1126 (double) _collection_set_bytes_used_before;
1127 }
1129 _pending_cards_seq->add((double) _pending_cards);
1130 _rs_lengths_seq->add((double) _max_rs_lengths);
1131 }
1133 _in_marking_window = new_in_marking_window;
1134 _in_marking_window_im = new_in_marking_window_im;
1135 _free_regions_at_end_of_collection = _g1->free_regions();
1136 update_young_list_target_length();
1138 // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
1139 double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
1140 adjust_concurrent_refinement(phase_times()->average_last_update_rs_time(),
1141 phase_times()->sum_last_update_rs_processed_buffers(), update_rs_time_goal_ms);
1143 _collectionSetChooser->verify();
1144 }
1146 #define EXT_SIZE_FORMAT "%.1f%s"
1147 #define EXT_SIZE_PARAMS(bytes) \
1148 byte_size_in_proper_unit((double)(bytes)), \
1149 proper_unit_for_byte_size((bytes))
1151 void G1CollectorPolicy::record_heap_size_info_at_start() {
1152 YoungList* young_list = _g1->young_list();
1153 _eden_bytes_before_gc = young_list->eden_used_bytes();
1154 _survivor_bytes_before_gc = young_list->survivor_used_bytes();
1155 _capacity_before_gc = _g1->capacity();
1157 _cur_collection_pause_used_at_start_bytes = _g1->used();
1158 _cur_collection_pause_used_regions_at_start = _g1->used_regions();
1160 size_t eden_capacity_before_gc =
1161 (_young_list_target_length * HeapRegion::GrainBytes) - _survivor_bytes_before_gc;
1163 _prev_eden_capacity = eden_capacity_before_gc;
1164 }
1166 void G1CollectorPolicy::print_heap_transition() {
1167 _g1->print_size_transition(gclog_or_tty,
1168 _cur_collection_pause_used_at_start_bytes, _g1->used(), _g1->capacity());
1169 }
1171 void G1CollectorPolicy::print_detailed_heap_transition() {
1172 YoungList* young_list = _g1->young_list();
1173 size_t eden_bytes = young_list->eden_used_bytes();
1174 size_t survivor_bytes = young_list->survivor_used_bytes();
1175 size_t used_before_gc = _cur_collection_pause_used_at_start_bytes;
1176 size_t used = _g1->used();
1177 size_t capacity = _g1->capacity();
1178 size_t eden_capacity =
1179 (_young_list_target_length * HeapRegion::GrainBytes) - survivor_bytes;
1181 gclog_or_tty->print_cr(
1182 " [Eden: "EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")->"EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT") "
1183 "Survivors: "EXT_SIZE_FORMAT"->"EXT_SIZE_FORMAT" "
1184 "Heap: "EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")->"
1185 EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")]",
1186 EXT_SIZE_PARAMS(_eden_bytes_before_gc),
1187 EXT_SIZE_PARAMS(_prev_eden_capacity),
1188 EXT_SIZE_PARAMS(eden_bytes),
1189 EXT_SIZE_PARAMS(eden_capacity),
1190 EXT_SIZE_PARAMS(_survivor_bytes_before_gc),
1191 EXT_SIZE_PARAMS(survivor_bytes),
1192 EXT_SIZE_PARAMS(used_before_gc),
1193 EXT_SIZE_PARAMS(_capacity_before_gc),
1194 EXT_SIZE_PARAMS(used),
1195 EXT_SIZE_PARAMS(capacity));
1196 }
1198 void G1CollectorPolicy::adjust_concurrent_refinement(double update_rs_time,
1199 double update_rs_processed_buffers,
1200 double goal_ms) {
1201 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
1202 ConcurrentG1Refine *cg1r = G1CollectedHeap::heap()->concurrent_g1_refine();
1204 if (G1UseAdaptiveConcRefinement) {
1205 const int k_gy = 3, k_gr = 6;
1206 const double inc_k = 1.1, dec_k = 0.9;
1208 int g = cg1r->green_zone();
1209 if (update_rs_time > goal_ms) {
1210 g = (int)(g * dec_k); // Can become 0, that's OK. That would mean a mutator-only processing.
1211 } else {
1212 if (update_rs_time < goal_ms && update_rs_processed_buffers > g) {
1213 g = (int)MAX2(g * inc_k, g + 1.0);
1214 }
1215 }
1216 // Change the refinement threads params
1217 cg1r->set_green_zone(g);
1218 cg1r->set_yellow_zone(g * k_gy);
1219 cg1r->set_red_zone(g * k_gr);
1220 cg1r->reinitialize_threads();
1222 int processing_threshold_delta = MAX2((int)(cg1r->green_zone() * sigma()), 1);
1223 int processing_threshold = MIN2(cg1r->green_zone() + processing_threshold_delta,
1224 cg1r->yellow_zone());
1225 // Change the barrier params
1226 dcqs.set_process_completed_threshold(processing_threshold);
1227 dcqs.set_max_completed_queue(cg1r->red_zone());
1228 }
1230 int curr_queue_size = dcqs.completed_buffers_num();
1231 if (curr_queue_size >= cg1r->yellow_zone()) {
1232 dcqs.set_completed_queue_padding(curr_queue_size);
1233 } else {
1234 dcqs.set_completed_queue_padding(0);
1235 }
1236 dcqs.notify_if_necessary();
1237 }
1239 double
1240 G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards,
1241 size_t scanned_cards) {
1242 return
1243 predict_rs_update_time_ms(pending_cards) +
1244 predict_rs_scan_time_ms(scanned_cards) +
1245 predict_constant_other_time_ms();
1246 }
1248 double
1249 G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards) {
1250 size_t rs_length = predict_rs_length_diff();
1251 size_t card_num;
1252 if (gcs_are_young()) {
1253 card_num = predict_young_card_num(rs_length);
1254 } else {
1255 card_num = predict_non_young_card_num(rs_length);
1256 }
1257 return predict_base_elapsed_time_ms(pending_cards, card_num);
1258 }
1260 size_t G1CollectorPolicy::predict_bytes_to_copy(HeapRegion* hr) {
1261 size_t bytes_to_copy;
1262 if (hr->is_marked())
1263 bytes_to_copy = hr->max_live_bytes();
1264 else {
1265 assert(hr->is_young() && hr->age_in_surv_rate_group() != -1, "invariant");
1266 int age = hr->age_in_surv_rate_group();
1267 double yg_surv_rate = predict_yg_surv_rate(age, hr->surv_rate_group());
1268 bytes_to_copy = (size_t) ((double) hr->used() * yg_surv_rate);
1269 }
1270 return bytes_to_copy;
1271 }
1273 double
1274 G1CollectorPolicy::predict_region_elapsed_time_ms(HeapRegion* hr,
1275 bool for_young_gc) {
1276 size_t rs_length = hr->rem_set()->occupied();
1277 size_t card_num;
1279 // Predicting the number of cards is based on which type of GC
1280 // we're predicting for.
1281 if (for_young_gc) {
1282 card_num = predict_young_card_num(rs_length);
1283 } else {
1284 card_num = predict_non_young_card_num(rs_length);
1285 }
1286 size_t bytes_to_copy = predict_bytes_to_copy(hr);
1288 double region_elapsed_time_ms =
1289 predict_rs_scan_time_ms(card_num) +
1290 predict_object_copy_time_ms(bytes_to_copy);
1292 // The prediction of the "other" time for this region is based
1293 // upon the region type and NOT the GC type.
1294 if (hr->is_young()) {
1295 region_elapsed_time_ms += predict_young_other_time_ms(1);
1296 } else {
1297 region_elapsed_time_ms += predict_non_young_other_time_ms(1);
1298 }
1299 return region_elapsed_time_ms;
1300 }
1302 void
1303 G1CollectorPolicy::init_cset_region_lengths(uint eden_cset_region_length,
1304 uint survivor_cset_region_length) {
1305 _eden_cset_region_length = eden_cset_region_length;
1306 _survivor_cset_region_length = survivor_cset_region_length;
1307 _old_cset_region_length = 0;
1308 }
1310 void G1CollectorPolicy::set_recorded_rs_lengths(size_t rs_lengths) {
1311 _recorded_rs_lengths = rs_lengths;
1312 }
1314 void G1CollectorPolicy::update_recent_gc_times(double end_time_sec,
1315 double elapsed_ms) {
1316 _recent_gc_times_ms->add(elapsed_ms);
1317 _recent_prev_end_times_for_all_gcs_sec->add(end_time_sec);
1318 _prev_collection_pause_end_ms = end_time_sec * 1000.0;
1319 }
1321 size_t G1CollectorPolicy::expansion_amount() {
1322 double recent_gc_overhead = recent_avg_pause_time_ratio() * 100.0;
1323 double threshold = _gc_overhead_perc;
1324 if (recent_gc_overhead > threshold) {
1325 // We will double the existing space, or take
1326 // G1ExpandByPercentOfAvailable % of the available expansion
1327 // space, whichever is smaller, bounded below by a minimum
1328 // expansion (unless that's all that's left.)
1329 const size_t min_expand_bytes = 1*M;
1330 size_t reserved_bytes = _g1->max_capacity();
1331 size_t committed_bytes = _g1->capacity();
1332 size_t uncommitted_bytes = reserved_bytes - committed_bytes;
1333 size_t expand_bytes;
1334 size_t expand_bytes_via_pct =
1335 uncommitted_bytes * G1ExpandByPercentOfAvailable / 100;
1336 expand_bytes = MIN2(expand_bytes_via_pct, committed_bytes);
1337 expand_bytes = MAX2(expand_bytes, min_expand_bytes);
1338 expand_bytes = MIN2(expand_bytes, uncommitted_bytes);
1340 ergo_verbose5(ErgoHeapSizing,
1341 "attempt heap expansion",
1342 ergo_format_reason("recent GC overhead higher than "
1343 "threshold after GC")
1344 ergo_format_perc("recent GC overhead")
1345 ergo_format_perc("threshold")
1346 ergo_format_byte("uncommitted")
1347 ergo_format_byte_perc("calculated expansion amount"),
1348 recent_gc_overhead, threshold,
1349 uncommitted_bytes,
1350 expand_bytes_via_pct, (double) G1ExpandByPercentOfAvailable);
1352 return expand_bytes;
1353 } else {
1354 return 0;
1355 }
1356 }
1358 void G1CollectorPolicy::print_tracing_info() const {
1359 _trace_gen0_time_data.print();
1360 _trace_gen1_time_data.print();
1361 }
1363 void G1CollectorPolicy::print_yg_surv_rate_info() const {
1364 #ifndef PRODUCT
1365 _short_lived_surv_rate_group->print_surv_rate_summary();
1366 // add this call for any other surv rate groups
1367 #endif // PRODUCT
1368 }
1370 uint G1CollectorPolicy::max_regions(int purpose) {
1371 switch (purpose) {
1372 case GCAllocForSurvived:
1373 return _max_survivor_regions;
1374 case GCAllocForTenured:
1375 return REGIONS_UNLIMITED;
1376 default:
1377 ShouldNotReachHere();
1378 return REGIONS_UNLIMITED;
1379 };
1380 }
1382 void G1CollectorPolicy::update_max_gc_locker_expansion() {
1383 uint expansion_region_num = 0;
1384 if (GCLockerEdenExpansionPercent > 0) {
1385 double perc = (double) GCLockerEdenExpansionPercent / 100.0;
1386 double expansion_region_num_d = perc * (double) _young_list_target_length;
1387 // We use ceiling so that if expansion_region_num_d is > 0.0 (but
1388 // less than 1.0) we'll get 1.
1389 expansion_region_num = (uint) ceil(expansion_region_num_d);
1390 } else {
1391 assert(expansion_region_num == 0, "sanity");
1392 }
1393 _young_list_max_length = _young_list_target_length + expansion_region_num;
1394 assert(_young_list_target_length <= _young_list_max_length, "post-condition");
1395 }
1397 // Calculates survivor space parameters.
1398 void G1CollectorPolicy::update_survivors_policy() {
1399 double max_survivor_regions_d =
1400 (double) _young_list_target_length / (double) SurvivorRatio;
1401 // We use ceiling so that if max_survivor_regions_d is > 0.0 (but
1402 // smaller than 1.0) we'll get 1.
1403 _max_survivor_regions = (uint) ceil(max_survivor_regions_d);
1405 _tenuring_threshold = _survivors_age_table.compute_tenuring_threshold(
1406 HeapRegion::GrainWords * _max_survivor_regions);
1407 }
1409 bool G1CollectorPolicy::force_initial_mark_if_outside_cycle(
1410 GCCause::Cause gc_cause) {
1411 bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();
1412 if (!during_cycle) {
1413 ergo_verbose1(ErgoConcCycles,
1414 "request concurrent cycle initiation",
1415 ergo_format_reason("requested by GC cause")
1416 ergo_format_str("GC cause"),
1417 GCCause::to_string(gc_cause));
1418 set_initiate_conc_mark_if_possible();
1419 return true;
1420 } else {
1421 ergo_verbose1(ErgoConcCycles,
1422 "do not request concurrent cycle initiation",
1423 ergo_format_reason("concurrent cycle already in progress")
1424 ergo_format_str("GC cause"),
1425 GCCause::to_string(gc_cause));
1426 return false;
1427 }
1428 }
1430 void
1431 G1CollectorPolicy::decide_on_conc_mark_initiation() {
1432 // We are about to decide on whether this pause will be an
1433 // initial-mark pause.
1435 // First, during_initial_mark_pause() should not be already set. We
1436 // will set it here if we have to. However, it should be cleared by
1437 // the end of the pause (it's only set for the duration of an
1438 // initial-mark pause).
1439 assert(!during_initial_mark_pause(), "pre-condition");
1441 if (initiate_conc_mark_if_possible()) {
1442 // We had noticed on a previous pause that the heap occupancy has
1443 // gone over the initiating threshold and we should start a
1444 // concurrent marking cycle. So we might initiate one.
1446 bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();
1447 if (!during_cycle) {
1448 // The concurrent marking thread is not "during a cycle", i.e.,
1449 // it has completed the last one. So we can go ahead and
1450 // initiate a new cycle.
1452 set_during_initial_mark_pause();
1453 // We do not allow mixed GCs during marking.
1454 if (!gcs_are_young()) {
1455 set_gcs_are_young(true);
1456 ergo_verbose0(ErgoMixedGCs,
1457 "end mixed GCs",
1458 ergo_format_reason("concurrent cycle is about to start"));
1459 }
1461 // And we can now clear initiate_conc_mark_if_possible() as
1462 // we've already acted on it.
1463 clear_initiate_conc_mark_if_possible();
1465 ergo_verbose0(ErgoConcCycles,
1466 "initiate concurrent cycle",
1467 ergo_format_reason("concurrent cycle initiation requested"));
1468 } else {
1469 // The concurrent marking thread is still finishing up the
1470 // previous cycle. If we start one right now the two cycles
1471 // overlap. In particular, the concurrent marking thread might
1472 // be in the process of clearing the next marking bitmap (which
1473 // we will use for the next cycle if we start one). Starting a
1474 // cycle now will be bad given that parts of the marking
1475 // information might get cleared by the marking thread. And we
1476 // cannot wait for the marking thread to finish the cycle as it
1477 // periodically yields while clearing the next marking bitmap
1478 // and, if it's in a yield point, it's waiting for us to
1479 // finish. So, at this point we will not start a cycle and we'll
1480 // let the concurrent marking thread complete the last one.
1481 ergo_verbose0(ErgoConcCycles,
1482 "do not initiate concurrent cycle",
1483 ergo_format_reason("concurrent cycle already in progress"));
1484 }
1485 }
1486 }
1488 class KnownGarbageClosure: public HeapRegionClosure {
1489 G1CollectedHeap* _g1h;
1490 CollectionSetChooser* _hrSorted;
1492 public:
1493 KnownGarbageClosure(CollectionSetChooser* hrSorted) :
1494 _g1h(G1CollectedHeap::heap()), _hrSorted(hrSorted) { }
1496 bool doHeapRegion(HeapRegion* r) {
1497 // We only include humongous regions in collection
1498 // sets when concurrent mark shows that their contained object is
1499 // unreachable.
1501 // Do we have any marking information for this region?
1502 if (r->is_marked()) {
1503 // We will skip any region that's currently used as an old GC
1504 // alloc region (we should not consider those for collection
1505 // before we fill them up).
1506 if (_hrSorted->should_add(r) && !_g1h->is_old_gc_alloc_region(r)) {
1507 _hrSorted->add_region(r);
1508 }
1509 }
1510 return false;
1511 }
1512 };
1514 class ParKnownGarbageHRClosure: public HeapRegionClosure {
1515 G1CollectedHeap* _g1h;
1516 CSetChooserParUpdater _cset_updater;
1518 public:
1519 ParKnownGarbageHRClosure(CollectionSetChooser* hrSorted,
1520 uint chunk_size) :
1521 _g1h(G1CollectedHeap::heap()),
1522 _cset_updater(hrSorted, true /* parallel */, chunk_size) { }
1524 bool doHeapRegion(HeapRegion* r) {
1525 // Do we have any marking information for this region?
1526 if (r->is_marked()) {
1527 // We will skip any region that's currently used as an old GC
1528 // alloc region (we should not consider those for collection
1529 // before we fill them up).
1530 if (_cset_updater.should_add(r) && !_g1h->is_old_gc_alloc_region(r)) {
1531 _cset_updater.add_region(r);
1532 }
1533 }
1534 return false;
1535 }
1536 };
1538 class ParKnownGarbageTask: public AbstractGangTask {
1539 CollectionSetChooser* _hrSorted;
1540 uint _chunk_size;
1541 G1CollectedHeap* _g1;
1542 public:
1543 ParKnownGarbageTask(CollectionSetChooser* hrSorted, uint chunk_size) :
1544 AbstractGangTask("ParKnownGarbageTask"),
1545 _hrSorted(hrSorted), _chunk_size(chunk_size),
1546 _g1(G1CollectedHeap::heap()) { }
1548 void work(uint worker_id) {
1549 ParKnownGarbageHRClosure parKnownGarbageCl(_hrSorted, _chunk_size);
1551 // Back to zero for the claim value.
1552 _g1->heap_region_par_iterate_chunked(&parKnownGarbageCl, worker_id,
1553 _g1->workers()->active_workers(),
1554 HeapRegion::InitialClaimValue);
1555 }
1556 };
1558 void
1559 G1CollectorPolicy::record_concurrent_mark_cleanup_end(int no_of_gc_threads) {
1560 _collectionSetChooser->clear();
1562 uint region_num = _g1->n_regions();
1563 if (G1CollectedHeap::use_parallel_gc_threads()) {
1564 const uint OverpartitionFactor = 4;
1565 uint WorkUnit;
1566 // The use of MinChunkSize = 8 in the original code
1567 // causes some assertion failures when the total number of
1568 // region is less than 8. The code here tries to fix that.
1569 // Should the original code also be fixed?
1570 if (no_of_gc_threads > 0) {
1571 const uint MinWorkUnit = MAX2(region_num / no_of_gc_threads, 1U);
1572 WorkUnit = MAX2(region_num / (no_of_gc_threads * OverpartitionFactor),
1573 MinWorkUnit);
1574 } else {
1575 assert(no_of_gc_threads > 0,
1576 "The active gc workers should be greater than 0");
1577 // In a product build do something reasonable to avoid a crash.
1578 const uint MinWorkUnit = MAX2(region_num / (uint) ParallelGCThreads, 1U);
1579 WorkUnit =
1580 MAX2(region_num / (uint) (ParallelGCThreads * OverpartitionFactor),
1581 MinWorkUnit);
1582 }
1583 _collectionSetChooser->prepare_for_par_region_addition(_g1->n_regions(),
1584 WorkUnit);
1585 ParKnownGarbageTask parKnownGarbageTask(_collectionSetChooser,
1586 (int) WorkUnit);
1587 _g1->workers()->run_task(&parKnownGarbageTask);
1589 assert(_g1->check_heap_region_claim_values(HeapRegion::InitialClaimValue),
1590 "sanity check");
1591 } else {
1592 KnownGarbageClosure knownGarbagecl(_collectionSetChooser);
1593 _g1->heap_region_iterate(&knownGarbagecl);
1594 }
1596 _collectionSetChooser->sort_regions();
1598 double end_sec = os::elapsedTime();
1599 double elapsed_time_ms = (end_sec - _mark_cleanup_start_sec) * 1000.0;
1600 _concurrent_mark_cleanup_times_ms->add(elapsed_time_ms);
1601 _cur_mark_stop_world_time_ms += elapsed_time_ms;
1602 _prev_collection_pause_end_ms += elapsed_time_ms;
1603 _mmu_tracker->add_pause(_mark_cleanup_start_sec, end_sec, true);
1604 }
1606 // Add the heap region at the head of the non-incremental collection set
1607 void G1CollectorPolicy::add_old_region_to_cset(HeapRegion* hr) {
1608 assert(_inc_cset_build_state == Active, "Precondition");
1609 assert(!hr->is_young(), "non-incremental add of young region");
1611 assert(!hr->in_collection_set(), "should not already be in the CSet");
1612 hr->set_in_collection_set(true);
1613 hr->set_next_in_collection_set(_collection_set);
1614 _collection_set = hr;
1615 _collection_set_bytes_used_before += hr->used();
1616 _g1->register_region_with_in_cset_fast_test(hr);
1617 size_t rs_length = hr->rem_set()->occupied();
1618 _recorded_rs_lengths += rs_length;
1619 _old_cset_region_length += 1;
1620 }
1622 // Initialize the per-collection-set information
1623 void G1CollectorPolicy::start_incremental_cset_building() {
1624 assert(_inc_cset_build_state == Inactive, "Precondition");
1626 _inc_cset_head = NULL;
1627 _inc_cset_tail = NULL;
1628 _inc_cset_bytes_used_before = 0;
1630 _inc_cset_max_finger = 0;
1631 _inc_cset_recorded_rs_lengths = 0;
1632 _inc_cset_recorded_rs_lengths_diffs = 0;
1633 _inc_cset_predicted_elapsed_time_ms = 0.0;
1634 _inc_cset_predicted_elapsed_time_ms_diffs = 0.0;
1635 _inc_cset_build_state = Active;
1636 }
1638 void G1CollectorPolicy::finalize_incremental_cset_building() {
1639 assert(_inc_cset_build_state == Active, "Precondition");
1640 assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint");
1642 // The two "main" fields, _inc_cset_recorded_rs_lengths and
1643 // _inc_cset_predicted_elapsed_time_ms, are updated by the thread
1644 // that adds a new region to the CSet. Further updates by the
1645 // concurrent refinement thread that samples the young RSet lengths
1646 // are accumulated in the *_diffs fields. Here we add the diffs to
1647 // the "main" fields.
1649 if (_inc_cset_recorded_rs_lengths_diffs >= 0) {
1650 _inc_cset_recorded_rs_lengths += _inc_cset_recorded_rs_lengths_diffs;
1651 } else {
1652 // This is defensive. The diff should in theory be always positive
1653 // as RSets can only grow between GCs. However, given that we
1654 // sample their size concurrently with other threads updating them
1655 // it's possible that we might get the wrong size back, which
1656 // could make the calculations somewhat inaccurate.
1657 size_t diffs = (size_t) (-_inc_cset_recorded_rs_lengths_diffs);
1658 if (_inc_cset_recorded_rs_lengths >= diffs) {
1659 _inc_cset_recorded_rs_lengths -= diffs;
1660 } else {
1661 _inc_cset_recorded_rs_lengths = 0;
1662 }
1663 }
1664 _inc_cset_predicted_elapsed_time_ms +=
1665 _inc_cset_predicted_elapsed_time_ms_diffs;
1667 _inc_cset_recorded_rs_lengths_diffs = 0;
1668 _inc_cset_predicted_elapsed_time_ms_diffs = 0.0;
1669 }
1671 void G1CollectorPolicy::add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length) {
1672 // This routine is used when:
1673 // * adding survivor regions to the incremental cset at the end of an
1674 // evacuation pause,
1675 // * adding the current allocation region to the incremental cset
1676 // when it is retired, and
1677 // * updating existing policy information for a region in the
1678 // incremental cset via young list RSet sampling.
1679 // Therefore this routine may be called at a safepoint by the
1680 // VM thread, or in-between safepoints by mutator threads (when
1681 // retiring the current allocation region) or a concurrent
1682 // refine thread (RSet sampling).
1684 double region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, gcs_are_young());
1685 size_t used_bytes = hr->used();
1686 _inc_cset_recorded_rs_lengths += rs_length;
1687 _inc_cset_predicted_elapsed_time_ms += region_elapsed_time_ms;
1688 _inc_cset_bytes_used_before += used_bytes;
1690 // Cache the values we have added to the aggregated informtion
1691 // in the heap region in case we have to remove this region from
1692 // the incremental collection set, or it is updated by the
1693 // rset sampling code
1694 hr->set_recorded_rs_length(rs_length);
1695 hr->set_predicted_elapsed_time_ms(region_elapsed_time_ms);
1696 }
1698 void G1CollectorPolicy::update_incremental_cset_info(HeapRegion* hr,
1699 size_t new_rs_length) {
1700 // Update the CSet information that is dependent on the new RS length
1701 assert(hr->is_young(), "Precondition");
1702 assert(!SafepointSynchronize::is_at_safepoint(),
1703 "should not be at a safepoint");
1705 // We could have updated _inc_cset_recorded_rs_lengths and
1706 // _inc_cset_predicted_elapsed_time_ms directly but we'd need to do
1707 // that atomically, as this code is executed by a concurrent
1708 // refinement thread, potentially concurrently with a mutator thread
1709 // allocating a new region and also updating the same fields. To
1710 // avoid the atomic operations we accumulate these updates on two
1711 // separate fields (*_diffs) and we'll just add them to the "main"
1712 // fields at the start of a GC.
1714 ssize_t old_rs_length = (ssize_t) hr->recorded_rs_length();
1715 ssize_t rs_lengths_diff = (ssize_t) new_rs_length - old_rs_length;
1716 _inc_cset_recorded_rs_lengths_diffs += rs_lengths_diff;
1718 double old_elapsed_time_ms = hr->predicted_elapsed_time_ms();
1719 double new_region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, gcs_are_young());
1720 double elapsed_ms_diff = new_region_elapsed_time_ms - old_elapsed_time_ms;
1721 _inc_cset_predicted_elapsed_time_ms_diffs += elapsed_ms_diff;
1723 hr->set_recorded_rs_length(new_rs_length);
1724 hr->set_predicted_elapsed_time_ms(new_region_elapsed_time_ms);
1725 }
1727 void G1CollectorPolicy::add_region_to_incremental_cset_common(HeapRegion* hr) {
1728 assert(hr->is_young(), "invariant");
1729 assert(hr->young_index_in_cset() > -1, "should have already been set");
1730 assert(_inc_cset_build_state == Active, "Precondition");
1732 // We need to clear and set the cached recorded/cached collection set
1733 // information in the heap region here (before the region gets added
1734 // to the collection set). An individual heap region's cached values
1735 // are calculated, aggregated with the policy collection set info,
1736 // and cached in the heap region here (initially) and (subsequently)
1737 // by the Young List sampling code.
1739 size_t rs_length = hr->rem_set()->occupied();
1740 add_to_incremental_cset_info(hr, rs_length);
1742 HeapWord* hr_end = hr->end();
1743 _inc_cset_max_finger = MAX2(_inc_cset_max_finger, hr_end);
1745 assert(!hr->in_collection_set(), "invariant");
1746 hr->set_in_collection_set(true);
1747 assert( hr->next_in_collection_set() == NULL, "invariant");
1749 _g1->register_region_with_in_cset_fast_test(hr);
1750 }
1752 // Add the region at the RHS of the incremental cset
1753 void G1CollectorPolicy::add_region_to_incremental_cset_rhs(HeapRegion* hr) {
1754 // We should only ever be appending survivors at the end of a pause
1755 assert( hr->is_survivor(), "Logic");
1757 // Do the 'common' stuff
1758 add_region_to_incremental_cset_common(hr);
1760 // Now add the region at the right hand side
1761 if (_inc_cset_tail == NULL) {
1762 assert(_inc_cset_head == NULL, "invariant");
1763 _inc_cset_head = hr;
1764 } else {
1765 _inc_cset_tail->set_next_in_collection_set(hr);
1766 }
1767 _inc_cset_tail = hr;
1768 }
1770 // Add the region to the LHS of the incremental cset
1771 void G1CollectorPolicy::add_region_to_incremental_cset_lhs(HeapRegion* hr) {
1772 // Survivors should be added to the RHS at the end of a pause
1773 assert(!hr->is_survivor(), "Logic");
1775 // Do the 'common' stuff
1776 add_region_to_incremental_cset_common(hr);
1778 // Add the region at the left hand side
1779 hr->set_next_in_collection_set(_inc_cset_head);
1780 if (_inc_cset_head == NULL) {
1781 assert(_inc_cset_tail == NULL, "Invariant");
1782 _inc_cset_tail = hr;
1783 }
1784 _inc_cset_head = hr;
1785 }
1787 #ifndef PRODUCT
1788 void G1CollectorPolicy::print_collection_set(HeapRegion* list_head, outputStream* st) {
1789 assert(list_head == inc_cset_head() || list_head == collection_set(), "must be");
1791 st->print_cr("\nCollection_set:");
1792 HeapRegion* csr = list_head;
1793 while (csr != NULL) {
1794 HeapRegion* next = csr->next_in_collection_set();
1795 assert(csr->in_collection_set(), "bad CS");
1796 st->print_cr(" "HR_FORMAT", P: "PTR_FORMAT "N: "PTR_FORMAT", age: %4d",
1797 HR_FORMAT_PARAMS(csr),
1798 csr->prev_top_at_mark_start(), csr->next_top_at_mark_start(),
1799 csr->age_in_surv_rate_group_cond());
1800 csr = next;
1801 }
1802 }
1803 #endif // !PRODUCT
1805 double G1CollectorPolicy::reclaimable_bytes_perc(size_t reclaimable_bytes) {
1806 // Returns the given amount of reclaimable bytes (that represents
1807 // the amount of reclaimable space still to be collected) as a
1808 // percentage of the current heap capacity.
1809 size_t capacity_bytes = _g1->capacity();
1810 return (double) reclaimable_bytes * 100.0 / (double) capacity_bytes;
1811 }
1813 bool G1CollectorPolicy::next_gc_should_be_mixed(const char* true_action_str,
1814 const char* false_action_str) {
1815 CollectionSetChooser* cset_chooser = _collectionSetChooser;
1816 if (cset_chooser->is_empty()) {
1817 ergo_verbose0(ErgoMixedGCs,
1818 false_action_str,
1819 ergo_format_reason("candidate old regions not available"));
1820 return false;
1821 }
1823 // Is the amount of uncollected reclaimable space above G1HeapWastePercent?
1824 size_t reclaimable_bytes = cset_chooser->remaining_reclaimable_bytes();
1825 double reclaimable_perc = reclaimable_bytes_perc(reclaimable_bytes);
1826 double threshold = (double) G1HeapWastePercent;
1827 if (reclaimable_perc <= threshold) {
1828 ergo_verbose4(ErgoMixedGCs,
1829 false_action_str,
1830 ergo_format_reason("reclaimable percentage not over threshold")
1831 ergo_format_region("candidate old regions")
1832 ergo_format_byte_perc("reclaimable")
1833 ergo_format_perc("threshold"),
1834 cset_chooser->remaining_regions(),
1835 reclaimable_bytes,
1836 reclaimable_perc, threshold);
1837 return false;
1838 }
1840 ergo_verbose4(ErgoMixedGCs,
1841 true_action_str,
1842 ergo_format_reason("candidate old regions available")
1843 ergo_format_region("candidate old regions")
1844 ergo_format_byte_perc("reclaimable")
1845 ergo_format_perc("threshold"),
1846 cset_chooser->remaining_regions(),
1847 reclaimable_bytes,
1848 reclaimable_perc, threshold);
1849 return true;
1850 }
1852 uint G1CollectorPolicy::calc_min_old_cset_length() {
1853 // The min old CSet region bound is based on the maximum desired
1854 // number of mixed GCs after a cycle. I.e., even if some old regions
1855 // look expensive, we should add them to the CSet anyway to make
1856 // sure we go through the available old regions in no more than the
1857 // maximum desired number of mixed GCs.
1858 //
1859 // The calculation is based on the number of marked regions we added
1860 // to the CSet chooser in the first place, not how many remain, so
1861 // that the result is the same during all mixed GCs that follow a cycle.
1863 const size_t region_num = (size_t) _collectionSetChooser->length();
1864 const size_t gc_num = (size_t) MAX2(G1MixedGCCountTarget, (uintx) 1);
1865 size_t result = region_num / gc_num;
1866 // emulate ceiling
1867 if (result * gc_num < region_num) {
1868 result += 1;
1869 }
1870 return (uint) result;
1871 }
1873 uint G1CollectorPolicy::calc_max_old_cset_length() {
1874 // The max old CSet region bound is based on the threshold expressed
1875 // as a percentage of the heap size. I.e., it should bound the
1876 // number of old regions added to the CSet irrespective of how many
1877 // of them are available.
1879 G1CollectedHeap* g1h = G1CollectedHeap::heap();
1880 const size_t region_num = g1h->n_regions();
1881 const size_t perc = (size_t) G1OldCSetRegionThresholdPercent;
1882 size_t result = region_num * perc / 100;
1883 // emulate ceiling
1884 if (100 * result < region_num * perc) {
1885 result += 1;
1886 }
1887 return (uint) result;
1888 }
1891 void G1CollectorPolicy::finalize_cset(double target_pause_time_ms) {
1892 double young_start_time_sec = os::elapsedTime();
1894 YoungList* young_list = _g1->young_list();
1895 finalize_incremental_cset_building();
1897 guarantee(target_pause_time_ms > 0.0,
1898 err_msg("target_pause_time_ms = %1.6lf should be positive",
1899 target_pause_time_ms));
1900 guarantee(_collection_set == NULL, "Precondition");
1902 double base_time_ms = predict_base_elapsed_time_ms(_pending_cards);
1903 double predicted_pause_time_ms = base_time_ms;
1904 double time_remaining_ms = MAX2(target_pause_time_ms - base_time_ms, 0.0);
1906 ergo_verbose4(ErgoCSetConstruction | ErgoHigh,
1907 "start choosing CSet",
1908 ergo_format_size("_pending_cards")
1909 ergo_format_ms("predicted base time")
1910 ergo_format_ms("remaining time")
1911 ergo_format_ms("target pause time"),
1912 _pending_cards, base_time_ms, time_remaining_ms, target_pause_time_ms);
1914 _last_gc_was_young = gcs_are_young() ? true : false;
1916 if (_last_gc_was_young) {
1917 _trace_gen0_time_data.increment_young_collection_count();
1918 } else {
1919 _trace_gen0_time_data.increment_mixed_collection_count();
1920 }
1922 // The young list is laid with the survivor regions from the previous
1923 // pause are appended to the RHS of the young list, i.e.
1924 // [Newly Young Regions ++ Survivors from last pause].
1926 uint survivor_region_length = young_list->survivor_length();
1927 uint eden_region_length = young_list->length() - survivor_region_length;
1928 init_cset_region_lengths(eden_region_length, survivor_region_length);
1930 HeapRegion* hr = young_list->first_survivor_region();
1931 while (hr != NULL) {
1932 assert(hr->is_survivor(), "badly formed young list");
1933 hr->set_young();
1934 hr = hr->get_next_young_region();
1935 }
1937 // Clear the fields that point to the survivor list - they are all young now.
1938 young_list->clear_survivors();
1940 _collection_set = _inc_cset_head;
1941 _collection_set_bytes_used_before = _inc_cset_bytes_used_before;
1942 time_remaining_ms = MAX2(time_remaining_ms - _inc_cset_predicted_elapsed_time_ms, 0.0);
1943 predicted_pause_time_ms += _inc_cset_predicted_elapsed_time_ms;
1945 ergo_verbose3(ErgoCSetConstruction | ErgoHigh,
1946 "add young regions to CSet",
1947 ergo_format_region("eden")
1948 ergo_format_region("survivors")
1949 ergo_format_ms("predicted young region time"),
1950 eden_region_length, survivor_region_length,
1951 _inc_cset_predicted_elapsed_time_ms);
1953 // The number of recorded young regions is the incremental
1954 // collection set's current size
1955 set_recorded_rs_lengths(_inc_cset_recorded_rs_lengths);
1957 double young_end_time_sec = os::elapsedTime();
1958 phase_times()->record_young_cset_choice_time_ms((young_end_time_sec - young_start_time_sec) * 1000.0);
1960 // Set the start of the non-young choice time.
1961 double non_young_start_time_sec = young_end_time_sec;
1963 if (!gcs_are_young()) {
1964 CollectionSetChooser* cset_chooser = _collectionSetChooser;
1965 cset_chooser->verify();
1966 const uint min_old_cset_length = calc_min_old_cset_length();
1967 const uint max_old_cset_length = calc_max_old_cset_length();
1969 uint expensive_region_num = 0;
1970 bool check_time_remaining = adaptive_young_list_length();
1972 HeapRegion* hr = cset_chooser->peek();
1973 while (hr != NULL) {
1974 if (old_cset_region_length() >= max_old_cset_length) {
1975 // Added maximum number of old regions to the CSet.
1976 ergo_verbose2(ErgoCSetConstruction,
1977 "finish adding old regions to CSet",
1978 ergo_format_reason("old CSet region num reached max")
1979 ergo_format_region("old")
1980 ergo_format_region("max"),
1981 old_cset_region_length(), max_old_cset_length);
1982 break;
1983 }
1986 // Stop adding regions if the remaining reclaimable space is
1987 // not above G1HeapWastePercent.
1988 size_t reclaimable_bytes = cset_chooser->remaining_reclaimable_bytes();
1989 double reclaimable_perc = reclaimable_bytes_perc(reclaimable_bytes);
1990 double threshold = (double) G1HeapWastePercent;
1991 if (reclaimable_perc <= threshold) {
1992 // We've added enough old regions that the amount of uncollected
1993 // reclaimable space is at or below the waste threshold. Stop
1994 // adding old regions to the CSet.
1995 ergo_verbose5(ErgoCSetConstruction,
1996 "finish adding old regions to CSet",
1997 ergo_format_reason("reclaimable percentage not over threshold")
1998 ergo_format_region("old")
1999 ergo_format_region("max")
2000 ergo_format_byte_perc("reclaimable")
2001 ergo_format_perc("threshold"),
2002 old_cset_region_length(),
2003 max_old_cset_length,
2004 reclaimable_bytes,
2005 reclaimable_perc, threshold);
2006 break;
2007 }
2009 double predicted_time_ms = predict_region_elapsed_time_ms(hr, gcs_are_young());
2010 if (check_time_remaining) {
2011 if (predicted_time_ms > time_remaining_ms) {
2012 // Too expensive for the current CSet.
2014 if (old_cset_region_length() >= min_old_cset_length) {
2015 // We have added the minimum number of old regions to the CSet,
2016 // we are done with this CSet.
2017 ergo_verbose4(ErgoCSetConstruction,
2018 "finish adding old regions to CSet",
2019 ergo_format_reason("predicted time is too high")
2020 ergo_format_ms("predicted time")
2021 ergo_format_ms("remaining time")
2022 ergo_format_region("old")
2023 ergo_format_region("min"),
2024 predicted_time_ms, time_remaining_ms,
2025 old_cset_region_length(), min_old_cset_length);
2026 break;
2027 }
2029 // We'll add it anyway given that we haven't reached the
2030 // minimum number of old regions.
2031 expensive_region_num += 1;
2032 }
2033 } else {
2034 if (old_cset_region_length() >= min_old_cset_length) {
2035 // In the non-auto-tuning case, we'll finish adding regions
2036 // to the CSet if we reach the minimum.
2037 ergo_verbose2(ErgoCSetConstruction,
2038 "finish adding old regions to CSet",
2039 ergo_format_reason("old CSet region num reached min")
2040 ergo_format_region("old")
2041 ergo_format_region("min"),
2042 old_cset_region_length(), min_old_cset_length);
2043 break;
2044 }
2045 }
2047 // We will add this region to the CSet.
2048 time_remaining_ms = MAX2(time_remaining_ms - predicted_time_ms, 0.0);
2049 predicted_pause_time_ms += predicted_time_ms;
2050 cset_chooser->remove_and_move_to_next(hr);
2051 _g1->old_set_remove(hr);
2052 add_old_region_to_cset(hr);
2054 hr = cset_chooser->peek();
2055 }
2056 if (hr == NULL) {
2057 ergo_verbose0(ErgoCSetConstruction,
2058 "finish adding old regions to CSet",
2059 ergo_format_reason("candidate old regions not available"));
2060 }
2062 if (expensive_region_num > 0) {
2063 // We print the information once here at the end, predicated on
2064 // whether we added any apparently expensive regions or not, to
2065 // avoid generating output per region.
2066 ergo_verbose4(ErgoCSetConstruction,
2067 "added expensive regions to CSet",
2068 ergo_format_reason("old CSet region num not reached min")
2069 ergo_format_region("old")
2070 ergo_format_region("expensive")
2071 ergo_format_region("min")
2072 ergo_format_ms("remaining time"),
2073 old_cset_region_length(),
2074 expensive_region_num,
2075 min_old_cset_length,
2076 time_remaining_ms);
2077 }
2079 cset_chooser->verify();
2080 }
2082 stop_incremental_cset_building();
2084 ergo_verbose5(ErgoCSetConstruction,
2085 "finish choosing CSet",
2086 ergo_format_region("eden")
2087 ergo_format_region("survivors")
2088 ergo_format_region("old")
2089 ergo_format_ms("predicted pause time")
2090 ergo_format_ms("target pause time"),
2091 eden_region_length, survivor_region_length,
2092 old_cset_region_length(),
2093 predicted_pause_time_ms, target_pause_time_ms);
2095 double non_young_end_time_sec = os::elapsedTime();
2096 phase_times()->record_non_young_cset_choice_time_ms((non_young_end_time_sec - non_young_start_time_sec) * 1000.0);
2097 }
2099 void TraceGen0TimeData::record_start_collection(double time_to_stop_the_world_ms) {
2100 if(TraceGen0Time) {
2101 _all_stop_world_times_ms.add(time_to_stop_the_world_ms);
2102 }
2103 }
2105 void TraceGen0TimeData::record_yield_time(double yield_time_ms) {
2106 if(TraceGen0Time) {
2107 _all_yield_times_ms.add(yield_time_ms);
2108 }
2109 }
2111 void TraceGen0TimeData::record_end_collection(double pause_time_ms, G1GCPhaseTimes* phase_times) {
2112 if(TraceGen0Time) {
2113 _total.add(pause_time_ms);
2114 _other.add(pause_time_ms - phase_times->accounted_time_ms());
2115 _root_region_scan_wait.add(phase_times->root_region_scan_wait_time_ms());
2116 _parallel.add(phase_times->cur_collection_par_time_ms());
2117 _ext_root_scan.add(phase_times->average_last_ext_root_scan_time());
2118 _satb_filtering.add(phase_times->average_last_satb_filtering_times_ms());
2119 _update_rs.add(phase_times->average_last_update_rs_time());
2120 _scan_rs.add(phase_times->average_last_scan_rs_time());
2121 _obj_copy.add(phase_times->average_last_obj_copy_time());
2122 _termination.add(phase_times->average_last_termination_time());
2124 double parallel_known_time = phase_times->average_last_ext_root_scan_time() +
2125 phase_times->average_last_satb_filtering_times_ms() +
2126 phase_times->average_last_update_rs_time() +
2127 phase_times->average_last_scan_rs_time() +
2128 phase_times->average_last_obj_copy_time() +
2129 + phase_times->average_last_termination_time();
2131 double parallel_other_time = phase_times->cur_collection_par_time_ms() - parallel_known_time;
2132 _parallel_other.add(parallel_other_time);
2133 _clear_ct.add(phase_times->cur_clear_ct_time_ms());
2134 }
2135 }
2137 void TraceGen0TimeData::increment_young_collection_count() {
2138 if(TraceGen0Time) {
2139 ++_young_pause_num;
2140 }
2141 }
2143 void TraceGen0TimeData::increment_mixed_collection_count() {
2144 if(TraceGen0Time) {
2145 ++_mixed_pause_num;
2146 }
2147 }
2149 void TraceGen0TimeData::print_summary(const char* str,
2150 const NumberSeq* seq) const {
2151 double sum = seq->sum();
2152 gclog_or_tty->print_cr("%-27s = %8.2lf s (avg = %8.2lf ms)",
2153 str, sum / 1000.0, seq->avg());
2154 }
2156 void TraceGen0TimeData::print_summary_sd(const char* str,
2157 const NumberSeq* seq) const {
2158 print_summary(str, seq);
2159 gclog_or_tty->print_cr("%+45s = %5d, std dev = %8.2lf ms, max = %8.2lf ms)",
2160 "(num", seq->num(), seq->sd(), seq->maximum());
2161 }
2163 void TraceGen0TimeData::print() const {
2164 if (!TraceGen0Time) {
2165 return;
2166 }
2168 gclog_or_tty->print_cr("ALL PAUSES");
2169 print_summary_sd(" Total", &_total);
2170 gclog_or_tty->print_cr("");
2171 gclog_or_tty->print_cr("");
2172 gclog_or_tty->print_cr(" Young GC Pauses: %8d", _young_pause_num);
2173 gclog_or_tty->print_cr(" Mixed GC Pauses: %8d", _mixed_pause_num);
2174 gclog_or_tty->print_cr("");
2176 gclog_or_tty->print_cr("EVACUATION PAUSES");
2178 if (_young_pause_num == 0 && _mixed_pause_num == 0) {
2179 gclog_or_tty->print_cr("none");
2180 } else {
2181 print_summary_sd(" Evacuation Pauses", &_total);
2182 print_summary(" Root Region Scan Wait", &_root_region_scan_wait);
2183 print_summary(" Parallel Time", &_parallel);
2184 print_summary(" Ext Root Scanning", &_ext_root_scan);
2185 print_summary(" SATB Filtering", &_satb_filtering);
2186 print_summary(" Update RS", &_update_rs);
2187 print_summary(" Scan RS", &_scan_rs);
2188 print_summary(" Object Copy", &_obj_copy);
2189 print_summary(" Termination", &_termination);
2190 print_summary(" Parallel Other", &_parallel_other);
2191 print_summary(" Clear CT", &_clear_ct);
2192 print_summary(" Other", &_other);
2193 }
2194 gclog_or_tty->print_cr("");
2196 gclog_or_tty->print_cr("MISC");
2197 print_summary_sd(" Stop World", &_all_stop_world_times_ms);
2198 print_summary_sd(" Yields", &_all_yield_times_ms);
2199 }
2201 void TraceGen1TimeData::record_full_collection(double full_gc_time_ms) {
2202 if (TraceGen1Time) {
2203 _all_full_gc_times.add(full_gc_time_ms);
2204 }
2205 }
2207 void TraceGen1TimeData::print() const {
2208 if (!TraceGen1Time) {
2209 return;
2210 }
2212 if (_all_full_gc_times.num() > 0) {
2213 gclog_or_tty->print("\n%4d full_gcs: total time = %8.2f s",
2214 _all_full_gc_times.num(),
2215 _all_full_gc_times.sum() / 1000.0);
2216 gclog_or_tty->print_cr(" (avg = %8.2fms).", _all_full_gc_times.avg());
2217 gclog_or_tty->print_cr(" [std. dev = %8.2f ms, max = %8.2f ms]",
2218 _all_full_gc_times.sd(),
2219 _all_full_gc_times.maximum());
2220 }
2221 }