Wed, 10 Apr 2013 10:57:34 -0700
8010780: G1: Eden occupancy/capacity output wrong after a full GC
Summary: Move the calculation and recording of eden capacity to the start of a GC and print a detailed heap transition for full GCs.
Reviewed-by: tschatzl, jmasa
1 /*
2 * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "gc_implementation/g1/concurrentG1Refine.hpp"
27 #include "gc_implementation/g1/concurrentMark.hpp"
28 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
29 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
30 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
31 #include "gc_implementation/g1/g1ErgoVerbose.hpp"
32 #include "gc_implementation/g1/g1GCPhaseTimes.hpp"
33 #include "gc_implementation/g1/g1Log.hpp"
34 #include "gc_implementation/g1/heapRegionRemSet.hpp"
35 #include "gc_implementation/shared/gcPolicyCounters.hpp"
36 #include "runtime/arguments.hpp"
37 #include "runtime/java.hpp"
38 #include "runtime/mutexLocker.hpp"
39 #include "utilities/debug.hpp"
41 // Different defaults for different number of GC threads
42 // They were chosen by running GCOld and SPECjbb on debris with different
43 // numbers of GC threads and choosing them based on the results
45 // all the same
46 static double rs_length_diff_defaults[] = {
47 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
48 };
50 static double cost_per_card_ms_defaults[] = {
51 0.01, 0.005, 0.005, 0.003, 0.003, 0.002, 0.002, 0.0015
52 };
54 // all the same
55 static double young_cards_per_entry_ratio_defaults[] = {
56 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0
57 };
59 static double cost_per_entry_ms_defaults[] = {
60 0.015, 0.01, 0.01, 0.008, 0.008, 0.0055, 0.0055, 0.005
61 };
63 static double cost_per_byte_ms_defaults[] = {
64 0.00006, 0.00003, 0.00003, 0.000015, 0.000015, 0.00001, 0.00001, 0.000009
65 };
67 // these should be pretty consistent
68 static double constant_other_time_ms_defaults[] = {
69 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0
70 };
73 static double young_other_cost_per_region_ms_defaults[] = {
74 0.3, 0.2, 0.2, 0.15, 0.15, 0.12, 0.12, 0.1
75 };
77 static double non_young_other_cost_per_region_ms_defaults[] = {
78 1.0, 0.7, 0.7, 0.5, 0.5, 0.42, 0.42, 0.30
79 };
81 G1CollectorPolicy::G1CollectorPolicy() :
82 _parallel_gc_threads(G1CollectedHeap::use_parallel_gc_threads()
83 ? ParallelGCThreads : 1),
85 _recent_gc_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
86 _stop_world_start(0.0),
88 _concurrent_mark_remark_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
89 _concurrent_mark_cleanup_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
91 _alloc_rate_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
92 _prev_collection_pause_end_ms(0.0),
93 _rs_length_diff_seq(new TruncatedSeq(TruncatedSeqLength)),
94 _cost_per_card_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
95 _young_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)),
96 _mixed_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)),
97 _cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
98 _mixed_cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
99 _cost_per_byte_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
100 _cost_per_byte_ms_during_cm_seq(new TruncatedSeq(TruncatedSeqLength)),
101 _constant_other_time_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
102 _young_other_cost_per_region_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
103 _non_young_other_cost_per_region_ms_seq(
104 new TruncatedSeq(TruncatedSeqLength)),
106 _pending_cards_seq(new TruncatedSeq(TruncatedSeqLength)),
107 _rs_lengths_seq(new TruncatedSeq(TruncatedSeqLength)),
109 _pause_time_target_ms((double) MaxGCPauseMillis),
111 _gcs_are_young(true),
113 _during_marking(false),
114 _in_marking_window(false),
115 _in_marking_window_im(false),
117 _recent_prev_end_times_for_all_gcs_sec(
118 new TruncatedSeq(NumPrevPausesForHeuristics)),
120 _recent_avg_pause_time_ratio(0.0),
122 _initiate_conc_mark_if_possible(false),
123 _during_initial_mark_pause(false),
124 _last_young_gc(false),
125 _last_gc_was_young(false),
127 _eden_bytes_before_gc(0),
128 _survivor_bytes_before_gc(0),
129 _capacity_before_gc(0),
131 _eden_cset_region_length(0),
132 _survivor_cset_region_length(0),
133 _old_cset_region_length(0),
135 _collection_set(NULL),
136 _collection_set_bytes_used_before(0),
138 // Incremental CSet attributes
139 _inc_cset_build_state(Inactive),
140 _inc_cset_head(NULL),
141 _inc_cset_tail(NULL),
142 _inc_cset_bytes_used_before(0),
143 _inc_cset_max_finger(NULL),
144 _inc_cset_recorded_rs_lengths(0),
145 _inc_cset_recorded_rs_lengths_diffs(0),
146 _inc_cset_predicted_elapsed_time_ms(0.0),
147 _inc_cset_predicted_elapsed_time_ms_diffs(0.0),
149 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
150 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
151 #endif // _MSC_VER
153 _short_lived_surv_rate_group(new SurvRateGroup(this, "Short Lived",
154 G1YoungSurvRateNumRegionsSummary)),
155 _survivor_surv_rate_group(new SurvRateGroup(this, "Survivor",
156 G1YoungSurvRateNumRegionsSummary)),
157 // add here any more surv rate groups
158 _recorded_survivor_regions(0),
159 _recorded_survivor_head(NULL),
160 _recorded_survivor_tail(NULL),
161 _survivors_age_table(true),
163 _gc_overhead_perc(0.0) {
165 // Set up the region size and associated fields. Given that the
166 // policy is created before the heap, we have to set this up here,
167 // so it's done as soon as possible.
168 HeapRegion::setup_heap_region_size(Arguments::min_heap_size());
169 HeapRegionRemSet::setup_remset_size();
171 G1ErgoVerbose::initialize();
172 if (PrintAdaptiveSizePolicy) {
173 // Currently, we only use a single switch for all the heuristics.
174 G1ErgoVerbose::set_enabled(true);
175 // Given that we don't currently have a verboseness level
176 // parameter, we'll hardcode this to high. This can be easily
177 // changed in the future.
178 G1ErgoVerbose::set_level(ErgoHigh);
179 } else {
180 G1ErgoVerbose::set_enabled(false);
181 }
183 // Verify PLAB sizes
184 const size_t region_size = HeapRegion::GrainWords;
185 if (YoungPLABSize > region_size || OldPLABSize > region_size) {
186 char buffer[128];
187 jio_snprintf(buffer, sizeof(buffer), "%sPLABSize should be at most "SIZE_FORMAT,
188 OldPLABSize > region_size ? "Old" : "Young", region_size);
189 vm_exit_during_initialization(buffer);
190 }
192 _recent_prev_end_times_for_all_gcs_sec->add(os::elapsedTime());
193 _prev_collection_pause_end_ms = os::elapsedTime() * 1000.0;
195 _phase_times = new G1GCPhaseTimes(_parallel_gc_threads);
197 int index = MIN2(_parallel_gc_threads - 1, 7);
199 _rs_length_diff_seq->add(rs_length_diff_defaults[index]);
200 _cost_per_card_ms_seq->add(cost_per_card_ms_defaults[index]);
201 _young_cards_per_entry_ratio_seq->add(
202 young_cards_per_entry_ratio_defaults[index]);
203 _cost_per_entry_ms_seq->add(cost_per_entry_ms_defaults[index]);
204 _cost_per_byte_ms_seq->add(cost_per_byte_ms_defaults[index]);
205 _constant_other_time_ms_seq->add(constant_other_time_ms_defaults[index]);
206 _young_other_cost_per_region_ms_seq->add(
207 young_other_cost_per_region_ms_defaults[index]);
208 _non_young_other_cost_per_region_ms_seq->add(
209 non_young_other_cost_per_region_ms_defaults[index]);
211 // Below, we might need to calculate the pause time target based on
212 // the pause interval. When we do so we are going to give G1 maximum
213 // flexibility and allow it to do pauses when it needs to. So, we'll
214 // arrange that the pause interval to be pause time target + 1 to
215 // ensure that a) the pause time target is maximized with respect to
216 // the pause interval and b) we maintain the invariant that pause
217 // time target < pause interval. If the user does not want this
218 // maximum flexibility, they will have to set the pause interval
219 // explicitly.
221 // First make sure that, if either parameter is set, its value is
222 // reasonable.
223 if (!FLAG_IS_DEFAULT(MaxGCPauseMillis)) {
224 if (MaxGCPauseMillis < 1) {
225 vm_exit_during_initialization("MaxGCPauseMillis should be "
226 "greater than 0");
227 }
228 }
229 if (!FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
230 if (GCPauseIntervalMillis < 1) {
231 vm_exit_during_initialization("GCPauseIntervalMillis should be "
232 "greater than 0");
233 }
234 }
236 // Then, if the pause time target parameter was not set, set it to
237 // the default value.
238 if (FLAG_IS_DEFAULT(MaxGCPauseMillis)) {
239 if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
240 // The default pause time target in G1 is 200ms
241 FLAG_SET_DEFAULT(MaxGCPauseMillis, 200);
242 } else {
243 // We do not allow the pause interval to be set without the
244 // pause time target
245 vm_exit_during_initialization("GCPauseIntervalMillis cannot be set "
246 "without setting MaxGCPauseMillis");
247 }
248 }
250 // Then, if the interval parameter was not set, set it according to
251 // the pause time target (this will also deal with the case when the
252 // pause time target is the default value).
253 if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
254 FLAG_SET_DEFAULT(GCPauseIntervalMillis, MaxGCPauseMillis + 1);
255 }
257 // Finally, make sure that the two parameters are consistent.
258 if (MaxGCPauseMillis >= GCPauseIntervalMillis) {
259 char buffer[256];
260 jio_snprintf(buffer, 256,
261 "MaxGCPauseMillis (%u) should be less than "
262 "GCPauseIntervalMillis (%u)",
263 MaxGCPauseMillis, GCPauseIntervalMillis);
264 vm_exit_during_initialization(buffer);
265 }
267 double max_gc_time = (double) MaxGCPauseMillis / 1000.0;
268 double time_slice = (double) GCPauseIntervalMillis / 1000.0;
269 _mmu_tracker = new G1MMUTrackerQueue(time_slice, max_gc_time);
271 uintx confidence_perc = G1ConfidencePercent;
272 // Put an artificial ceiling on this so that it's not set to a silly value.
273 if (confidence_perc > 100) {
274 confidence_perc = 100;
275 warning("G1ConfidencePercent is set to a value that is too large, "
276 "it's been updated to %u", confidence_perc);
277 }
278 _sigma = (double) confidence_perc / 100.0;
280 // start conservatively (around 50ms is about right)
281 _concurrent_mark_remark_times_ms->add(0.05);
282 _concurrent_mark_cleanup_times_ms->add(0.20);
283 _tenuring_threshold = MaxTenuringThreshold;
284 // _max_survivor_regions will be calculated by
285 // update_young_list_target_length() during initialization.
286 _max_survivor_regions = 0;
288 assert(GCTimeRatio > 0,
289 "we should have set it to a default value set_g1_gc_flags() "
290 "if a user set it to 0");
291 _gc_overhead_perc = 100.0 * (1.0 / (1.0 + GCTimeRatio));
293 uintx reserve_perc = G1ReservePercent;
294 // Put an artificial ceiling on this so that it's not set to a silly value.
295 if (reserve_perc > 50) {
296 reserve_perc = 50;
297 warning("G1ReservePercent is set to a value that is too large, "
298 "it's been updated to %u", reserve_perc);
299 }
300 _reserve_factor = (double) reserve_perc / 100.0;
301 // This will be set when the heap is expanded
302 // for the first time during initialization.
303 _reserve_regions = 0;
305 initialize_all();
306 _collectionSetChooser = new CollectionSetChooser();
307 _young_gen_sizer = new G1YoungGenSizer(); // Must be after call to initialize_flags
308 }
310 void G1CollectorPolicy::initialize_flags() {
311 set_min_alignment(HeapRegion::GrainBytes);
312 set_max_alignment(GenRemSet::max_alignment_constraint(rem_set_name()));
313 if (SurvivorRatio < 1) {
314 vm_exit_during_initialization("Invalid survivor ratio specified");
315 }
316 CollectorPolicy::initialize_flags();
317 }
319 G1YoungGenSizer::G1YoungGenSizer() : _sizer_kind(SizerDefaults), _adaptive_size(true) {
320 assert(G1NewSizePercent <= G1MaxNewSizePercent, "Min larger than max");
321 assert(G1NewSizePercent > 0 && G1NewSizePercent < 100, "Min out of bounds");
322 assert(G1MaxNewSizePercent > 0 && G1MaxNewSizePercent < 100, "Max out of bounds");
324 if (FLAG_IS_CMDLINE(NewRatio)) {
325 if (FLAG_IS_CMDLINE(NewSize) || FLAG_IS_CMDLINE(MaxNewSize)) {
326 warning("-XX:NewSize and -XX:MaxNewSize override -XX:NewRatio");
327 } else {
328 _sizer_kind = SizerNewRatio;
329 _adaptive_size = false;
330 return;
331 }
332 }
334 if (FLAG_IS_CMDLINE(NewSize)) {
335 _min_desired_young_length = MAX2((uint) (NewSize / HeapRegion::GrainBytes),
336 1U);
337 if (FLAG_IS_CMDLINE(MaxNewSize)) {
338 _max_desired_young_length =
339 MAX2((uint) (MaxNewSize / HeapRegion::GrainBytes),
340 1U);
341 _sizer_kind = SizerMaxAndNewSize;
342 _adaptive_size = _min_desired_young_length == _max_desired_young_length;
343 } else {
344 _sizer_kind = SizerNewSizeOnly;
345 }
346 } else if (FLAG_IS_CMDLINE(MaxNewSize)) {
347 _max_desired_young_length =
348 MAX2((uint) (MaxNewSize / HeapRegion::GrainBytes),
349 1U);
350 _sizer_kind = SizerMaxNewSizeOnly;
351 }
352 }
354 uint G1YoungGenSizer::calculate_default_min_length(uint new_number_of_heap_regions) {
355 uint default_value = (new_number_of_heap_regions * G1NewSizePercent) / 100;
356 return MAX2(1U, default_value);
357 }
359 uint G1YoungGenSizer::calculate_default_max_length(uint new_number_of_heap_regions) {
360 uint default_value = (new_number_of_heap_regions * G1MaxNewSizePercent) / 100;
361 return MAX2(1U, default_value);
362 }
364 void G1YoungGenSizer::heap_size_changed(uint new_number_of_heap_regions) {
365 assert(new_number_of_heap_regions > 0, "Heap must be initialized");
367 switch (_sizer_kind) {
368 case SizerDefaults:
369 _min_desired_young_length = calculate_default_min_length(new_number_of_heap_regions);
370 _max_desired_young_length = calculate_default_max_length(new_number_of_heap_regions);
371 break;
372 case SizerNewSizeOnly:
373 _max_desired_young_length = calculate_default_max_length(new_number_of_heap_regions);
374 _max_desired_young_length = MAX2(_min_desired_young_length, _max_desired_young_length);
375 break;
376 case SizerMaxNewSizeOnly:
377 _min_desired_young_length = calculate_default_min_length(new_number_of_heap_regions);
378 _min_desired_young_length = MIN2(_min_desired_young_length, _max_desired_young_length);
379 break;
380 case SizerMaxAndNewSize:
381 // Do nothing. Values set on the command line, don't update them at runtime.
382 break;
383 case SizerNewRatio:
384 _min_desired_young_length = new_number_of_heap_regions / (NewRatio + 1);
385 _max_desired_young_length = _min_desired_young_length;
386 break;
387 default:
388 ShouldNotReachHere();
389 }
391 assert(_min_desired_young_length <= _max_desired_young_length, "Invalid min/max young gen size values");
392 }
394 void G1CollectorPolicy::init() {
395 // Set aside an initial future to_space.
396 _g1 = G1CollectedHeap::heap();
398 assert(Heap_lock->owned_by_self(), "Locking discipline.");
400 initialize_gc_policy_counters();
402 if (adaptive_young_list_length()) {
403 _young_list_fixed_length = 0;
404 } else {
405 _young_list_fixed_length = _young_gen_sizer->min_desired_young_length();
406 }
407 _free_regions_at_end_of_collection = _g1->free_regions();
408 update_young_list_target_length();
410 // We may immediately start allocating regions and placing them on the
411 // collection set list. Initialize the per-collection set info
412 start_incremental_cset_building();
413 }
415 // Create the jstat counters for the policy.
416 void G1CollectorPolicy::initialize_gc_policy_counters() {
417 _gc_policy_counters = new GCPolicyCounters("GarbageFirst", 1, 3);
418 }
420 bool G1CollectorPolicy::predict_will_fit(uint young_length,
421 double base_time_ms,
422 uint base_free_regions,
423 double target_pause_time_ms) {
424 if (young_length >= base_free_regions) {
425 // end condition 1: not enough space for the young regions
426 return false;
427 }
429 double accum_surv_rate = accum_yg_surv_rate_pred((int) young_length - 1);
430 size_t bytes_to_copy =
431 (size_t) (accum_surv_rate * (double) HeapRegion::GrainBytes);
432 double copy_time_ms = predict_object_copy_time_ms(bytes_to_copy);
433 double young_other_time_ms = predict_young_other_time_ms(young_length);
434 double pause_time_ms = base_time_ms + copy_time_ms + young_other_time_ms;
435 if (pause_time_ms > target_pause_time_ms) {
436 // end condition 2: prediction is over the target pause time
437 return false;
438 }
440 size_t free_bytes =
441 (base_free_regions - young_length) * HeapRegion::GrainBytes;
442 if ((2.0 * sigma()) * (double) bytes_to_copy > (double) free_bytes) {
443 // end condition 3: out-of-space (conservatively!)
444 return false;
445 }
447 // success!
448 return true;
449 }
451 void G1CollectorPolicy::record_new_heap_size(uint new_number_of_regions) {
452 // re-calculate the necessary reserve
453 double reserve_regions_d = (double) new_number_of_regions * _reserve_factor;
454 // We use ceiling so that if reserve_regions_d is > 0.0 (but
455 // smaller than 1.0) we'll get 1.
456 _reserve_regions = (uint) ceil(reserve_regions_d);
458 _young_gen_sizer->heap_size_changed(new_number_of_regions);
459 }
461 uint G1CollectorPolicy::calculate_young_list_desired_min_length(
462 uint base_min_length) {
463 uint desired_min_length = 0;
464 if (adaptive_young_list_length()) {
465 if (_alloc_rate_ms_seq->num() > 3) {
466 double now_sec = os::elapsedTime();
467 double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0;
468 double alloc_rate_ms = predict_alloc_rate_ms();
469 desired_min_length = (uint) ceil(alloc_rate_ms * when_ms);
470 } else {
471 // otherwise we don't have enough info to make the prediction
472 }
473 }
474 desired_min_length += base_min_length;
475 // make sure we don't go below any user-defined minimum bound
476 return MAX2(_young_gen_sizer->min_desired_young_length(), desired_min_length);
477 }
479 uint G1CollectorPolicy::calculate_young_list_desired_max_length() {
480 // Here, we might want to also take into account any additional
481 // constraints (i.e., user-defined minimum bound). Currently, we
482 // effectively don't set this bound.
483 return _young_gen_sizer->max_desired_young_length();
484 }
486 void G1CollectorPolicy::update_young_list_target_length(size_t rs_lengths) {
487 if (rs_lengths == (size_t) -1) {
488 // if it's set to the default value (-1), we should predict it;
489 // otherwise, use the given value.
490 rs_lengths = (size_t) get_new_prediction(_rs_lengths_seq);
491 }
493 // Calculate the absolute and desired min bounds.
495 // This is how many young regions we already have (currently: the survivors).
496 uint base_min_length = recorded_survivor_regions();
497 // This is the absolute minimum young length, which ensures that we
498 // can allocate one eden region in the worst-case.
499 uint absolute_min_length = base_min_length + 1;
500 uint desired_min_length =
501 calculate_young_list_desired_min_length(base_min_length);
502 if (desired_min_length < absolute_min_length) {
503 desired_min_length = absolute_min_length;
504 }
506 // Calculate the absolute and desired max bounds.
508 // We will try our best not to "eat" into the reserve.
509 uint absolute_max_length = 0;
510 if (_free_regions_at_end_of_collection > _reserve_regions) {
511 absolute_max_length = _free_regions_at_end_of_collection - _reserve_regions;
512 }
513 uint desired_max_length = calculate_young_list_desired_max_length();
514 if (desired_max_length > absolute_max_length) {
515 desired_max_length = absolute_max_length;
516 }
518 uint young_list_target_length = 0;
519 if (adaptive_young_list_length()) {
520 if (gcs_are_young()) {
521 young_list_target_length =
522 calculate_young_list_target_length(rs_lengths,
523 base_min_length,
524 desired_min_length,
525 desired_max_length);
526 _rs_lengths_prediction = rs_lengths;
527 } else {
528 // Don't calculate anything and let the code below bound it to
529 // the desired_min_length, i.e., do the next GC as soon as
530 // possible to maximize how many old regions we can add to it.
531 }
532 } else {
533 // The user asked for a fixed young gen so we'll fix the young gen
534 // whether the next GC is young or mixed.
535 young_list_target_length = _young_list_fixed_length;
536 }
538 // Make sure we don't go over the desired max length, nor under the
539 // desired min length. In case they clash, desired_min_length wins
540 // which is why that test is second.
541 if (young_list_target_length > desired_max_length) {
542 young_list_target_length = desired_max_length;
543 }
544 if (young_list_target_length < desired_min_length) {
545 young_list_target_length = desired_min_length;
546 }
548 assert(young_list_target_length > recorded_survivor_regions(),
549 "we should be able to allocate at least one eden region");
550 assert(young_list_target_length >= absolute_min_length, "post-condition");
551 _young_list_target_length = young_list_target_length;
553 update_max_gc_locker_expansion();
554 }
556 uint
557 G1CollectorPolicy::calculate_young_list_target_length(size_t rs_lengths,
558 uint base_min_length,
559 uint desired_min_length,
560 uint desired_max_length) {
561 assert(adaptive_young_list_length(), "pre-condition");
562 assert(gcs_are_young(), "only call this for young GCs");
564 // In case some edge-condition makes the desired max length too small...
565 if (desired_max_length <= desired_min_length) {
566 return desired_min_length;
567 }
569 // We'll adjust min_young_length and max_young_length not to include
570 // the already allocated young regions (i.e., so they reflect the
571 // min and max eden regions we'll allocate). The base_min_length
572 // will be reflected in the predictions by the
573 // survivor_regions_evac_time prediction.
574 assert(desired_min_length > base_min_length, "invariant");
575 uint min_young_length = desired_min_length - base_min_length;
576 assert(desired_max_length > base_min_length, "invariant");
577 uint max_young_length = desired_max_length - base_min_length;
579 double target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0;
580 double survivor_regions_evac_time = predict_survivor_regions_evac_time();
581 size_t pending_cards = (size_t) get_new_prediction(_pending_cards_seq);
582 size_t adj_rs_lengths = rs_lengths + predict_rs_length_diff();
583 size_t scanned_cards = predict_young_card_num(adj_rs_lengths);
584 double base_time_ms =
585 predict_base_elapsed_time_ms(pending_cards, scanned_cards) +
586 survivor_regions_evac_time;
587 uint available_free_regions = _free_regions_at_end_of_collection;
588 uint base_free_regions = 0;
589 if (available_free_regions > _reserve_regions) {
590 base_free_regions = available_free_regions - _reserve_regions;
591 }
593 // Here, we will make sure that the shortest young length that
594 // makes sense fits within the target pause time.
596 if (predict_will_fit(min_young_length, base_time_ms,
597 base_free_regions, target_pause_time_ms)) {
598 // The shortest young length will fit into the target pause time;
599 // we'll now check whether the absolute maximum number of young
600 // regions will fit in the target pause time. If not, we'll do
601 // a binary search between min_young_length and max_young_length.
602 if (predict_will_fit(max_young_length, base_time_ms,
603 base_free_regions, target_pause_time_ms)) {
604 // The maximum young length will fit into the target pause time.
605 // We are done so set min young length to the maximum length (as
606 // the result is assumed to be returned in min_young_length).
607 min_young_length = max_young_length;
608 } else {
609 // The maximum possible number of young regions will not fit within
610 // the target pause time so we'll search for the optimal
611 // length. The loop invariants are:
612 //
613 // min_young_length < max_young_length
614 // min_young_length is known to fit into the target pause time
615 // max_young_length is known not to fit into the target pause time
616 //
617 // Going into the loop we know the above hold as we've just
618 // checked them. Every time around the loop we check whether
619 // the middle value between min_young_length and
620 // max_young_length fits into the target pause time. If it
621 // does, it becomes the new min. If it doesn't, it becomes
622 // the new max. This way we maintain the loop invariants.
624 assert(min_young_length < max_young_length, "invariant");
625 uint diff = (max_young_length - min_young_length) / 2;
626 while (diff > 0) {
627 uint young_length = min_young_length + diff;
628 if (predict_will_fit(young_length, base_time_ms,
629 base_free_regions, target_pause_time_ms)) {
630 min_young_length = young_length;
631 } else {
632 max_young_length = young_length;
633 }
634 assert(min_young_length < max_young_length, "invariant");
635 diff = (max_young_length - min_young_length) / 2;
636 }
637 // The results is min_young_length which, according to the
638 // loop invariants, should fit within the target pause time.
640 // These are the post-conditions of the binary search above:
641 assert(min_young_length < max_young_length,
642 "otherwise we should have discovered that max_young_length "
643 "fits into the pause target and not done the binary search");
644 assert(predict_will_fit(min_young_length, base_time_ms,
645 base_free_regions, target_pause_time_ms),
646 "min_young_length, the result of the binary search, should "
647 "fit into the pause target");
648 assert(!predict_will_fit(min_young_length + 1, base_time_ms,
649 base_free_regions, target_pause_time_ms),
650 "min_young_length, the result of the binary search, should be "
651 "optimal, so no larger length should fit into the pause target");
652 }
653 } else {
654 // Even the minimum length doesn't fit into the pause time
655 // target, return it as the result nevertheless.
656 }
657 return base_min_length + min_young_length;
658 }
660 double G1CollectorPolicy::predict_survivor_regions_evac_time() {
661 double survivor_regions_evac_time = 0.0;
662 for (HeapRegion * r = _recorded_survivor_head;
663 r != NULL && r != _recorded_survivor_tail->get_next_young_region();
664 r = r->get_next_young_region()) {
665 survivor_regions_evac_time += predict_region_elapsed_time_ms(r, gcs_are_young());
666 }
667 return survivor_regions_evac_time;
668 }
670 void G1CollectorPolicy::revise_young_list_target_length_if_necessary() {
671 guarantee( adaptive_young_list_length(), "should not call this otherwise" );
673 size_t rs_lengths = _g1->young_list()->sampled_rs_lengths();
674 if (rs_lengths > _rs_lengths_prediction) {
675 // add 10% to avoid having to recalculate often
676 size_t rs_lengths_prediction = rs_lengths * 1100 / 1000;
677 update_young_list_target_length(rs_lengths_prediction);
678 }
679 }
683 HeapWord* G1CollectorPolicy::mem_allocate_work(size_t size,
684 bool is_tlab,
685 bool* gc_overhead_limit_was_exceeded) {
686 guarantee(false, "Not using this policy feature yet.");
687 return NULL;
688 }
690 // This method controls how a collector handles one or more
691 // of its generations being fully allocated.
692 HeapWord* G1CollectorPolicy::satisfy_failed_allocation(size_t size,
693 bool is_tlab) {
694 guarantee(false, "Not using this policy feature yet.");
695 return NULL;
696 }
699 #ifndef PRODUCT
700 bool G1CollectorPolicy::verify_young_ages() {
701 HeapRegion* head = _g1->young_list()->first_region();
702 return
703 verify_young_ages(head, _short_lived_surv_rate_group);
704 // also call verify_young_ages on any additional surv rate groups
705 }
707 bool
708 G1CollectorPolicy::verify_young_ages(HeapRegion* head,
709 SurvRateGroup *surv_rate_group) {
710 guarantee( surv_rate_group != NULL, "pre-condition" );
712 const char* name = surv_rate_group->name();
713 bool ret = true;
714 int prev_age = -1;
716 for (HeapRegion* curr = head;
717 curr != NULL;
718 curr = curr->get_next_young_region()) {
719 SurvRateGroup* group = curr->surv_rate_group();
720 if (group == NULL && !curr->is_survivor()) {
721 gclog_or_tty->print_cr("## %s: encountered NULL surv_rate_group", name);
722 ret = false;
723 }
725 if (surv_rate_group == group) {
726 int age = curr->age_in_surv_rate_group();
728 if (age < 0) {
729 gclog_or_tty->print_cr("## %s: encountered negative age", name);
730 ret = false;
731 }
733 if (age <= prev_age) {
734 gclog_or_tty->print_cr("## %s: region ages are not strictly increasing "
735 "(%d, %d)", name, age, prev_age);
736 ret = false;
737 }
738 prev_age = age;
739 }
740 }
742 return ret;
743 }
744 #endif // PRODUCT
746 void G1CollectorPolicy::record_full_collection_start() {
747 _full_collection_start_sec = os::elapsedTime();
748 record_heap_size_info_at_start();
749 // Release the future to-space so that it is available for compaction into.
750 _g1->set_full_collection();
751 }
753 void G1CollectorPolicy::record_full_collection_end() {
754 // Consider this like a collection pause for the purposes of allocation
755 // since last pause.
756 double end_sec = os::elapsedTime();
757 double full_gc_time_sec = end_sec - _full_collection_start_sec;
758 double full_gc_time_ms = full_gc_time_sec * 1000.0;
760 _trace_gen1_time_data.record_full_collection(full_gc_time_ms);
762 update_recent_gc_times(end_sec, full_gc_time_ms);
764 _g1->clear_full_collection();
766 // "Nuke" the heuristics that control the young/mixed GC
767 // transitions and make sure we start with young GCs after the Full GC.
768 set_gcs_are_young(true);
769 _last_young_gc = false;
770 clear_initiate_conc_mark_if_possible();
771 clear_during_initial_mark_pause();
772 _in_marking_window = false;
773 _in_marking_window_im = false;
775 _short_lived_surv_rate_group->start_adding_regions();
776 // also call this on any additional surv rate groups
778 record_survivor_regions(0, NULL, NULL);
780 _free_regions_at_end_of_collection = _g1->free_regions();
781 // Reset survivors SurvRateGroup.
782 _survivor_surv_rate_group->reset();
783 update_young_list_target_length();
784 _collectionSetChooser->clear();
785 }
787 void G1CollectorPolicy::record_stop_world_start() {
788 _stop_world_start = os::elapsedTime();
789 }
791 void G1CollectorPolicy::record_collection_pause_start(double start_time_sec) {
792 // We only need to do this here as the policy will only be applied
793 // to the GC we're about to start. so, no point is calculating this
794 // every time we calculate / recalculate the target young length.
795 update_survivors_policy();
797 assert(_g1->used() == _g1->recalculate_used(),
798 err_msg("sanity, used: "SIZE_FORMAT" recalculate_used: "SIZE_FORMAT,
799 _g1->used(), _g1->recalculate_used()));
801 double s_w_t_ms = (start_time_sec - _stop_world_start) * 1000.0;
802 _trace_gen0_time_data.record_start_collection(s_w_t_ms);
803 _stop_world_start = 0.0;
805 record_heap_size_info_at_start();
807 phase_times()->record_cur_collection_start_sec(start_time_sec);
808 _pending_cards = _g1->pending_card_num();
810 _collection_set_bytes_used_before = 0;
811 _bytes_copied_during_gc = 0;
813 _last_gc_was_young = false;
815 // do that for any other surv rate groups
816 _short_lived_surv_rate_group->stop_adding_regions();
817 _survivors_age_table.clear();
819 assert( verify_young_ages(), "region age verification" );
820 }
822 void G1CollectorPolicy::record_concurrent_mark_init_end(double
823 mark_init_elapsed_time_ms) {
824 _during_marking = true;
825 assert(!initiate_conc_mark_if_possible(), "we should have cleared it by now");
826 clear_during_initial_mark_pause();
827 _cur_mark_stop_world_time_ms = mark_init_elapsed_time_ms;
828 }
830 void G1CollectorPolicy::record_concurrent_mark_remark_start() {
831 _mark_remark_start_sec = os::elapsedTime();
832 _during_marking = false;
833 }
835 void G1CollectorPolicy::record_concurrent_mark_remark_end() {
836 double end_time_sec = os::elapsedTime();
837 double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0;
838 _concurrent_mark_remark_times_ms->add(elapsed_time_ms);
839 _cur_mark_stop_world_time_ms += elapsed_time_ms;
840 _prev_collection_pause_end_ms += elapsed_time_ms;
842 _mmu_tracker->add_pause(_mark_remark_start_sec, end_time_sec, true);
843 }
845 void G1CollectorPolicy::record_concurrent_mark_cleanup_start() {
846 _mark_cleanup_start_sec = os::elapsedTime();
847 }
849 void G1CollectorPolicy::record_concurrent_mark_cleanup_completed() {
850 _last_young_gc = true;
851 _in_marking_window = false;
852 }
854 void G1CollectorPolicy::record_concurrent_pause() {
855 if (_stop_world_start > 0.0) {
856 double yield_ms = (os::elapsedTime() - _stop_world_start) * 1000.0;
857 _trace_gen0_time_data.record_yield_time(yield_ms);
858 }
859 }
861 bool G1CollectorPolicy::need_to_start_conc_mark(const char* source, size_t alloc_word_size) {
862 if (_g1->concurrent_mark()->cmThread()->during_cycle()) {
863 return false;
864 }
866 size_t marking_initiating_used_threshold =
867 (_g1->capacity() / 100) * InitiatingHeapOccupancyPercent;
868 size_t cur_used_bytes = _g1->non_young_capacity_bytes();
869 size_t alloc_byte_size = alloc_word_size * HeapWordSize;
871 if ((cur_used_bytes + alloc_byte_size) > marking_initiating_used_threshold) {
872 if (gcs_are_young()) {
873 ergo_verbose5(ErgoConcCycles,
874 "request concurrent cycle initiation",
875 ergo_format_reason("occupancy higher than threshold")
876 ergo_format_byte("occupancy")
877 ergo_format_byte("allocation request")
878 ergo_format_byte_perc("threshold")
879 ergo_format_str("source"),
880 cur_used_bytes,
881 alloc_byte_size,
882 marking_initiating_used_threshold,
883 (double) InitiatingHeapOccupancyPercent,
884 source);
885 return true;
886 } else {
887 ergo_verbose5(ErgoConcCycles,
888 "do not request concurrent cycle initiation",
889 ergo_format_reason("still doing mixed collections")
890 ergo_format_byte("occupancy")
891 ergo_format_byte("allocation request")
892 ergo_format_byte_perc("threshold")
893 ergo_format_str("source"),
894 cur_used_bytes,
895 alloc_byte_size,
896 marking_initiating_used_threshold,
897 (double) InitiatingHeapOccupancyPercent,
898 source);
899 }
900 }
902 return false;
903 }
905 // Anything below that is considered to be zero
906 #define MIN_TIMER_GRANULARITY 0.0000001
908 void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms) {
909 double end_time_sec = os::elapsedTime();
910 assert(_cur_collection_pause_used_regions_at_start >= cset_region_length(),
911 "otherwise, the subtraction below does not make sense");
912 size_t rs_size =
913 _cur_collection_pause_used_regions_at_start - cset_region_length();
914 size_t cur_used_bytes = _g1->used();
915 assert(cur_used_bytes == _g1->recalculate_used(), "It should!");
916 bool last_pause_included_initial_mark = false;
917 bool update_stats = !_g1->evacuation_failed();
919 #ifndef PRODUCT
920 if (G1YoungSurvRateVerbose) {
921 gclog_or_tty->print_cr("");
922 _short_lived_surv_rate_group->print();
923 // do that for any other surv rate groups too
924 }
925 #endif // PRODUCT
927 last_pause_included_initial_mark = during_initial_mark_pause();
928 if (last_pause_included_initial_mark) {
929 record_concurrent_mark_init_end(0.0);
930 } else if (!_last_young_gc && need_to_start_conc_mark("end of GC")) {
931 // Note: this might have already been set, if during the last
932 // pause we decided to start a cycle but at the beginning of
933 // this pause we decided to postpone it. That's OK.
934 set_initiate_conc_mark_if_possible();
935 }
937 _mmu_tracker->add_pause(end_time_sec - pause_time_ms/1000.0,
938 end_time_sec, false);
940 size_t freed_bytes =
941 _cur_collection_pause_used_at_start_bytes - cur_used_bytes;
942 size_t surviving_bytes = _collection_set_bytes_used_before - freed_bytes;
944 double survival_fraction =
945 (double)surviving_bytes/
946 (double)_collection_set_bytes_used_before;
948 if (update_stats) {
949 _trace_gen0_time_data.record_end_collection(pause_time_ms, phase_times());
950 // this is where we update the allocation rate of the application
951 double app_time_ms =
952 (phase_times()->cur_collection_start_sec() * 1000.0 - _prev_collection_pause_end_ms);
953 if (app_time_ms < MIN_TIMER_GRANULARITY) {
954 // This usually happens due to the timer not having the required
955 // granularity. Some Linuxes are the usual culprits.
956 // We'll just set it to something (arbitrarily) small.
957 app_time_ms = 1.0;
958 }
959 // We maintain the invariant that all objects allocated by mutator
960 // threads will be allocated out of eden regions. So, we can use
961 // the eden region number allocated since the previous GC to
962 // calculate the application's allocate rate. The only exception
963 // to that is humongous objects that are allocated separately. But
964 // given that humongous object allocations do not really affect
965 // either the pause's duration nor when the next pause will take
966 // place we can safely ignore them here.
967 uint regions_allocated = eden_cset_region_length();
968 double alloc_rate_ms = (double) regions_allocated / app_time_ms;
969 _alloc_rate_ms_seq->add(alloc_rate_ms);
971 double interval_ms =
972 (end_time_sec - _recent_prev_end_times_for_all_gcs_sec->oldest()) * 1000.0;
973 update_recent_gc_times(end_time_sec, pause_time_ms);
974 _recent_avg_pause_time_ratio = _recent_gc_times_ms->sum()/interval_ms;
975 if (recent_avg_pause_time_ratio() < 0.0 ||
976 (recent_avg_pause_time_ratio() - 1.0 > 0.0)) {
977 #ifndef PRODUCT
978 // Dump info to allow post-facto debugging
979 gclog_or_tty->print_cr("recent_avg_pause_time_ratio() out of bounds");
980 gclog_or_tty->print_cr("-------------------------------------------");
981 gclog_or_tty->print_cr("Recent GC Times (ms):");
982 _recent_gc_times_ms->dump();
983 gclog_or_tty->print_cr("(End Time=%3.3f) Recent GC End Times (s):", end_time_sec);
984 _recent_prev_end_times_for_all_gcs_sec->dump();
985 gclog_or_tty->print_cr("GC = %3.3f, Interval = %3.3f, Ratio = %3.3f",
986 _recent_gc_times_ms->sum(), interval_ms, recent_avg_pause_time_ratio());
987 // In debug mode, terminate the JVM if the user wants to debug at this point.
988 assert(!G1FailOnFPError, "Debugging data for CR 6898948 has been dumped above");
989 #endif // !PRODUCT
990 // Clip ratio between 0.0 and 1.0, and continue. This will be fixed in
991 // CR 6902692 by redoing the manner in which the ratio is incrementally computed.
992 if (_recent_avg_pause_time_ratio < 0.0) {
993 _recent_avg_pause_time_ratio = 0.0;
994 } else {
995 assert(_recent_avg_pause_time_ratio - 1.0 > 0.0, "Ctl-point invariant");
996 _recent_avg_pause_time_ratio = 1.0;
997 }
998 }
999 }
1000 bool new_in_marking_window = _in_marking_window;
1001 bool new_in_marking_window_im = false;
1002 if (during_initial_mark_pause()) {
1003 new_in_marking_window = true;
1004 new_in_marking_window_im = true;
1005 }
1007 if (_last_young_gc) {
1008 // This is supposed to to be the "last young GC" before we start
1009 // doing mixed GCs. Here we decide whether to start mixed GCs or not.
1011 if (!last_pause_included_initial_mark) {
1012 if (next_gc_should_be_mixed("start mixed GCs",
1013 "do not start mixed GCs")) {
1014 set_gcs_are_young(false);
1015 }
1016 } else {
1017 ergo_verbose0(ErgoMixedGCs,
1018 "do not start mixed GCs",
1019 ergo_format_reason("concurrent cycle is about to start"));
1020 }
1021 _last_young_gc = false;
1022 }
1024 if (!_last_gc_was_young) {
1025 // This is a mixed GC. Here we decide whether to continue doing
1026 // mixed GCs or not.
1028 if (!next_gc_should_be_mixed("continue mixed GCs",
1029 "do not continue mixed GCs")) {
1030 set_gcs_are_young(true);
1031 }
1032 }
1034 _short_lived_surv_rate_group->start_adding_regions();
1035 // do that for any other surv rate groupsx
1037 if (update_stats) {
1038 double cost_per_card_ms = 0.0;
1039 if (_pending_cards > 0) {
1040 cost_per_card_ms = phase_times()->average_last_update_rs_time() / (double) _pending_cards;
1041 _cost_per_card_ms_seq->add(cost_per_card_ms);
1042 }
1044 size_t cards_scanned = _g1->cards_scanned();
1046 double cost_per_entry_ms = 0.0;
1047 if (cards_scanned > 10) {
1048 cost_per_entry_ms = phase_times()->average_last_scan_rs_time() / (double) cards_scanned;
1049 if (_last_gc_was_young) {
1050 _cost_per_entry_ms_seq->add(cost_per_entry_ms);
1051 } else {
1052 _mixed_cost_per_entry_ms_seq->add(cost_per_entry_ms);
1053 }
1054 }
1056 if (_max_rs_lengths > 0) {
1057 double cards_per_entry_ratio =
1058 (double) cards_scanned / (double) _max_rs_lengths;
1059 if (_last_gc_was_young) {
1060 _young_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
1061 } else {
1062 _mixed_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
1063 }
1064 }
1066 // This is defensive. For a while _max_rs_lengths could get
1067 // smaller than _recorded_rs_lengths which was causing
1068 // rs_length_diff to get very large and mess up the RSet length
1069 // predictions. The reason was unsafe concurrent updates to the
1070 // _inc_cset_recorded_rs_lengths field which the code below guards
1071 // against (see CR 7118202). This bug has now been fixed (see CR
1072 // 7119027). However, I'm still worried that
1073 // _inc_cset_recorded_rs_lengths might still end up somewhat
1074 // inaccurate. The concurrent refinement thread calculates an
1075 // RSet's length concurrently with other CR threads updating it
1076 // which might cause it to calculate the length incorrectly (if,
1077 // say, it's in mid-coarsening). So I'll leave in the defensive
1078 // conditional below just in case.
1079 size_t rs_length_diff = 0;
1080 if (_max_rs_lengths > _recorded_rs_lengths) {
1081 rs_length_diff = _max_rs_lengths - _recorded_rs_lengths;
1082 }
1083 _rs_length_diff_seq->add((double) rs_length_diff);
1085 size_t copied_bytes = surviving_bytes;
1086 double cost_per_byte_ms = 0.0;
1087 if (copied_bytes > 0) {
1088 cost_per_byte_ms = phase_times()->average_last_obj_copy_time() / (double) copied_bytes;
1089 if (_in_marking_window) {
1090 _cost_per_byte_ms_during_cm_seq->add(cost_per_byte_ms);
1091 } else {
1092 _cost_per_byte_ms_seq->add(cost_per_byte_ms);
1093 }
1094 }
1096 double all_other_time_ms = pause_time_ms -
1097 (phase_times()->average_last_update_rs_time() + phase_times()->average_last_scan_rs_time()
1098 + phase_times()->average_last_obj_copy_time() + phase_times()->average_last_termination_time());
1100 double young_other_time_ms = 0.0;
1101 if (young_cset_region_length() > 0) {
1102 young_other_time_ms =
1103 phase_times()->young_cset_choice_time_ms() +
1104 phase_times()->young_free_cset_time_ms();
1105 _young_other_cost_per_region_ms_seq->add(young_other_time_ms /
1106 (double) young_cset_region_length());
1107 }
1108 double non_young_other_time_ms = 0.0;
1109 if (old_cset_region_length() > 0) {
1110 non_young_other_time_ms =
1111 phase_times()->non_young_cset_choice_time_ms() +
1112 phase_times()->non_young_free_cset_time_ms();
1114 _non_young_other_cost_per_region_ms_seq->add(non_young_other_time_ms /
1115 (double) old_cset_region_length());
1116 }
1118 double constant_other_time_ms = all_other_time_ms -
1119 (young_other_time_ms + non_young_other_time_ms);
1120 _constant_other_time_ms_seq->add(constant_other_time_ms);
1122 double survival_ratio = 0.0;
1123 if (_collection_set_bytes_used_before > 0) {
1124 survival_ratio = (double) _bytes_copied_during_gc /
1125 (double) _collection_set_bytes_used_before;
1126 }
1128 _pending_cards_seq->add((double) _pending_cards);
1129 _rs_lengths_seq->add((double) _max_rs_lengths);
1130 }
1132 _in_marking_window = new_in_marking_window;
1133 _in_marking_window_im = new_in_marking_window_im;
1134 _free_regions_at_end_of_collection = _g1->free_regions();
1135 update_young_list_target_length();
1137 // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
1138 double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
1139 adjust_concurrent_refinement(phase_times()->average_last_update_rs_time(),
1140 phase_times()->sum_last_update_rs_processed_buffers(), update_rs_time_goal_ms);
1142 _collectionSetChooser->verify();
1143 }
1145 #define EXT_SIZE_FORMAT "%.1f%s"
1146 #define EXT_SIZE_PARAMS(bytes) \
1147 byte_size_in_proper_unit((double)(bytes)), \
1148 proper_unit_for_byte_size((bytes))
1150 void G1CollectorPolicy::record_heap_size_info_at_start() {
1151 YoungList* young_list = _g1->young_list();
1152 _eden_bytes_before_gc = young_list->eden_used_bytes();
1153 _survivor_bytes_before_gc = young_list->survivor_used_bytes();
1154 _capacity_before_gc = _g1->capacity();
1156 _cur_collection_pause_used_at_start_bytes = _g1->used();
1157 _cur_collection_pause_used_regions_at_start = _g1->used_regions();
1159 size_t eden_capacity_before_gc =
1160 (_young_list_target_length * HeapRegion::GrainBytes) - _survivor_bytes_before_gc;
1162 _prev_eden_capacity = eden_capacity_before_gc;
1163 }
1165 void G1CollectorPolicy::print_heap_transition() {
1166 _g1->print_size_transition(gclog_or_tty,
1167 _cur_collection_pause_used_at_start_bytes, _g1->used(), _g1->capacity());
1168 }
1170 void G1CollectorPolicy::print_detailed_heap_transition() {
1171 YoungList* young_list = _g1->young_list();
1172 size_t eden_bytes = young_list->eden_used_bytes();
1173 size_t survivor_bytes = young_list->survivor_used_bytes();
1174 size_t used_before_gc = _cur_collection_pause_used_at_start_bytes;
1175 size_t used = _g1->used();
1176 size_t capacity = _g1->capacity();
1177 size_t eden_capacity =
1178 (_young_list_target_length * HeapRegion::GrainBytes) - survivor_bytes;
1180 gclog_or_tty->print_cr(
1181 " [Eden: "EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")->"EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT") "
1182 "Survivors: "EXT_SIZE_FORMAT"->"EXT_SIZE_FORMAT" "
1183 "Heap: "EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")->"
1184 EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")]",
1185 EXT_SIZE_PARAMS(_eden_bytes_before_gc),
1186 EXT_SIZE_PARAMS(_prev_eden_capacity),
1187 EXT_SIZE_PARAMS(eden_bytes),
1188 EXT_SIZE_PARAMS(eden_capacity),
1189 EXT_SIZE_PARAMS(_survivor_bytes_before_gc),
1190 EXT_SIZE_PARAMS(survivor_bytes),
1191 EXT_SIZE_PARAMS(used_before_gc),
1192 EXT_SIZE_PARAMS(_capacity_before_gc),
1193 EXT_SIZE_PARAMS(used),
1194 EXT_SIZE_PARAMS(capacity));
1195 }
1197 void G1CollectorPolicy::adjust_concurrent_refinement(double update_rs_time,
1198 double update_rs_processed_buffers,
1199 double goal_ms) {
1200 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
1201 ConcurrentG1Refine *cg1r = G1CollectedHeap::heap()->concurrent_g1_refine();
1203 if (G1UseAdaptiveConcRefinement) {
1204 const int k_gy = 3, k_gr = 6;
1205 const double inc_k = 1.1, dec_k = 0.9;
1207 int g = cg1r->green_zone();
1208 if (update_rs_time > goal_ms) {
1209 g = (int)(g * dec_k); // Can become 0, that's OK. That would mean a mutator-only processing.
1210 } else {
1211 if (update_rs_time < goal_ms && update_rs_processed_buffers > g) {
1212 g = (int)MAX2(g * inc_k, g + 1.0);
1213 }
1214 }
1215 // Change the refinement threads params
1216 cg1r->set_green_zone(g);
1217 cg1r->set_yellow_zone(g * k_gy);
1218 cg1r->set_red_zone(g * k_gr);
1219 cg1r->reinitialize_threads();
1221 int processing_threshold_delta = MAX2((int)(cg1r->green_zone() * sigma()), 1);
1222 int processing_threshold = MIN2(cg1r->green_zone() + processing_threshold_delta,
1223 cg1r->yellow_zone());
1224 // Change the barrier params
1225 dcqs.set_process_completed_threshold(processing_threshold);
1226 dcqs.set_max_completed_queue(cg1r->red_zone());
1227 }
1229 int curr_queue_size = dcqs.completed_buffers_num();
1230 if (curr_queue_size >= cg1r->yellow_zone()) {
1231 dcqs.set_completed_queue_padding(curr_queue_size);
1232 } else {
1233 dcqs.set_completed_queue_padding(0);
1234 }
1235 dcqs.notify_if_necessary();
1236 }
1238 double
1239 G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards,
1240 size_t scanned_cards) {
1241 return
1242 predict_rs_update_time_ms(pending_cards) +
1243 predict_rs_scan_time_ms(scanned_cards) +
1244 predict_constant_other_time_ms();
1245 }
1247 double
1248 G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards) {
1249 size_t rs_length = predict_rs_length_diff();
1250 size_t card_num;
1251 if (gcs_are_young()) {
1252 card_num = predict_young_card_num(rs_length);
1253 } else {
1254 card_num = predict_non_young_card_num(rs_length);
1255 }
1256 return predict_base_elapsed_time_ms(pending_cards, card_num);
1257 }
1259 size_t G1CollectorPolicy::predict_bytes_to_copy(HeapRegion* hr) {
1260 size_t bytes_to_copy;
1261 if (hr->is_marked())
1262 bytes_to_copy = hr->max_live_bytes();
1263 else {
1264 assert(hr->is_young() && hr->age_in_surv_rate_group() != -1, "invariant");
1265 int age = hr->age_in_surv_rate_group();
1266 double yg_surv_rate = predict_yg_surv_rate(age, hr->surv_rate_group());
1267 bytes_to_copy = (size_t) ((double) hr->used() * yg_surv_rate);
1268 }
1269 return bytes_to_copy;
1270 }
1272 double
1273 G1CollectorPolicy::predict_region_elapsed_time_ms(HeapRegion* hr,
1274 bool for_young_gc) {
1275 size_t rs_length = hr->rem_set()->occupied();
1276 size_t card_num;
1278 // Predicting the number of cards is based on which type of GC
1279 // we're predicting for.
1280 if (for_young_gc) {
1281 card_num = predict_young_card_num(rs_length);
1282 } else {
1283 card_num = predict_non_young_card_num(rs_length);
1284 }
1285 size_t bytes_to_copy = predict_bytes_to_copy(hr);
1287 double region_elapsed_time_ms =
1288 predict_rs_scan_time_ms(card_num) +
1289 predict_object_copy_time_ms(bytes_to_copy);
1291 // The prediction of the "other" time for this region is based
1292 // upon the region type and NOT the GC type.
1293 if (hr->is_young()) {
1294 region_elapsed_time_ms += predict_young_other_time_ms(1);
1295 } else {
1296 region_elapsed_time_ms += predict_non_young_other_time_ms(1);
1297 }
1298 return region_elapsed_time_ms;
1299 }
1301 void
1302 G1CollectorPolicy::init_cset_region_lengths(uint eden_cset_region_length,
1303 uint survivor_cset_region_length) {
1304 _eden_cset_region_length = eden_cset_region_length;
1305 _survivor_cset_region_length = survivor_cset_region_length;
1306 _old_cset_region_length = 0;
1307 }
1309 void G1CollectorPolicy::set_recorded_rs_lengths(size_t rs_lengths) {
1310 _recorded_rs_lengths = rs_lengths;
1311 }
1313 void G1CollectorPolicy::update_recent_gc_times(double end_time_sec,
1314 double elapsed_ms) {
1315 _recent_gc_times_ms->add(elapsed_ms);
1316 _recent_prev_end_times_for_all_gcs_sec->add(end_time_sec);
1317 _prev_collection_pause_end_ms = end_time_sec * 1000.0;
1318 }
1320 size_t G1CollectorPolicy::expansion_amount() {
1321 double recent_gc_overhead = recent_avg_pause_time_ratio() * 100.0;
1322 double threshold = _gc_overhead_perc;
1323 if (recent_gc_overhead > threshold) {
1324 // We will double the existing space, or take
1325 // G1ExpandByPercentOfAvailable % of the available expansion
1326 // space, whichever is smaller, bounded below by a minimum
1327 // expansion (unless that's all that's left.)
1328 const size_t min_expand_bytes = 1*M;
1329 size_t reserved_bytes = _g1->max_capacity();
1330 size_t committed_bytes = _g1->capacity();
1331 size_t uncommitted_bytes = reserved_bytes - committed_bytes;
1332 size_t expand_bytes;
1333 size_t expand_bytes_via_pct =
1334 uncommitted_bytes * G1ExpandByPercentOfAvailable / 100;
1335 expand_bytes = MIN2(expand_bytes_via_pct, committed_bytes);
1336 expand_bytes = MAX2(expand_bytes, min_expand_bytes);
1337 expand_bytes = MIN2(expand_bytes, uncommitted_bytes);
1339 ergo_verbose5(ErgoHeapSizing,
1340 "attempt heap expansion",
1341 ergo_format_reason("recent GC overhead higher than "
1342 "threshold after GC")
1343 ergo_format_perc("recent GC overhead")
1344 ergo_format_perc("threshold")
1345 ergo_format_byte("uncommitted")
1346 ergo_format_byte_perc("calculated expansion amount"),
1347 recent_gc_overhead, threshold,
1348 uncommitted_bytes,
1349 expand_bytes_via_pct, (double) G1ExpandByPercentOfAvailable);
1351 return expand_bytes;
1352 } else {
1353 return 0;
1354 }
1355 }
1357 void G1CollectorPolicy::print_tracing_info() const {
1358 _trace_gen0_time_data.print();
1359 _trace_gen1_time_data.print();
1360 }
1362 void G1CollectorPolicy::print_yg_surv_rate_info() const {
1363 #ifndef PRODUCT
1364 _short_lived_surv_rate_group->print_surv_rate_summary();
1365 // add this call for any other surv rate groups
1366 #endif // PRODUCT
1367 }
1369 #ifndef PRODUCT
1370 // for debugging, bit of a hack...
1371 static char*
1372 region_num_to_mbs(int length) {
1373 static char buffer[64];
1374 double bytes = (double) (length * HeapRegion::GrainBytes);
1375 double mbs = bytes / (double) (1024 * 1024);
1376 sprintf(buffer, "%7.2lfMB", mbs);
1377 return buffer;
1378 }
1379 #endif // PRODUCT
1381 uint G1CollectorPolicy::max_regions(int purpose) {
1382 switch (purpose) {
1383 case GCAllocForSurvived:
1384 return _max_survivor_regions;
1385 case GCAllocForTenured:
1386 return REGIONS_UNLIMITED;
1387 default:
1388 ShouldNotReachHere();
1389 return REGIONS_UNLIMITED;
1390 };
1391 }
1393 void G1CollectorPolicy::update_max_gc_locker_expansion() {
1394 uint expansion_region_num = 0;
1395 if (GCLockerEdenExpansionPercent > 0) {
1396 double perc = (double) GCLockerEdenExpansionPercent / 100.0;
1397 double expansion_region_num_d = perc * (double) _young_list_target_length;
1398 // We use ceiling so that if expansion_region_num_d is > 0.0 (but
1399 // less than 1.0) we'll get 1.
1400 expansion_region_num = (uint) ceil(expansion_region_num_d);
1401 } else {
1402 assert(expansion_region_num == 0, "sanity");
1403 }
1404 _young_list_max_length = _young_list_target_length + expansion_region_num;
1405 assert(_young_list_target_length <= _young_list_max_length, "post-condition");
1406 }
1408 // Calculates survivor space parameters.
1409 void G1CollectorPolicy::update_survivors_policy() {
1410 double max_survivor_regions_d =
1411 (double) _young_list_target_length / (double) SurvivorRatio;
1412 // We use ceiling so that if max_survivor_regions_d is > 0.0 (but
1413 // smaller than 1.0) we'll get 1.
1414 _max_survivor_regions = (uint) ceil(max_survivor_regions_d);
1416 _tenuring_threshold = _survivors_age_table.compute_tenuring_threshold(
1417 HeapRegion::GrainWords * _max_survivor_regions);
1418 }
1420 bool G1CollectorPolicy::force_initial_mark_if_outside_cycle(
1421 GCCause::Cause gc_cause) {
1422 bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();
1423 if (!during_cycle) {
1424 ergo_verbose1(ErgoConcCycles,
1425 "request concurrent cycle initiation",
1426 ergo_format_reason("requested by GC cause")
1427 ergo_format_str("GC cause"),
1428 GCCause::to_string(gc_cause));
1429 set_initiate_conc_mark_if_possible();
1430 return true;
1431 } else {
1432 ergo_verbose1(ErgoConcCycles,
1433 "do not request concurrent cycle initiation",
1434 ergo_format_reason("concurrent cycle already in progress")
1435 ergo_format_str("GC cause"),
1436 GCCause::to_string(gc_cause));
1437 return false;
1438 }
1439 }
1441 void
1442 G1CollectorPolicy::decide_on_conc_mark_initiation() {
1443 // We are about to decide on whether this pause will be an
1444 // initial-mark pause.
1446 // First, during_initial_mark_pause() should not be already set. We
1447 // will set it here if we have to. However, it should be cleared by
1448 // the end of the pause (it's only set for the duration of an
1449 // initial-mark pause).
1450 assert(!during_initial_mark_pause(), "pre-condition");
1452 if (initiate_conc_mark_if_possible()) {
1453 // We had noticed on a previous pause that the heap occupancy has
1454 // gone over the initiating threshold and we should start a
1455 // concurrent marking cycle. So we might initiate one.
1457 bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();
1458 if (!during_cycle) {
1459 // The concurrent marking thread is not "during a cycle", i.e.,
1460 // it has completed the last one. So we can go ahead and
1461 // initiate a new cycle.
1463 set_during_initial_mark_pause();
1464 // We do not allow mixed GCs during marking.
1465 if (!gcs_are_young()) {
1466 set_gcs_are_young(true);
1467 ergo_verbose0(ErgoMixedGCs,
1468 "end mixed GCs",
1469 ergo_format_reason("concurrent cycle is about to start"));
1470 }
1472 // And we can now clear initiate_conc_mark_if_possible() as
1473 // we've already acted on it.
1474 clear_initiate_conc_mark_if_possible();
1476 ergo_verbose0(ErgoConcCycles,
1477 "initiate concurrent cycle",
1478 ergo_format_reason("concurrent cycle initiation requested"));
1479 } else {
1480 // The concurrent marking thread is still finishing up the
1481 // previous cycle. If we start one right now the two cycles
1482 // overlap. In particular, the concurrent marking thread might
1483 // be in the process of clearing the next marking bitmap (which
1484 // we will use for the next cycle if we start one). Starting a
1485 // cycle now will be bad given that parts of the marking
1486 // information might get cleared by the marking thread. And we
1487 // cannot wait for the marking thread to finish the cycle as it
1488 // periodically yields while clearing the next marking bitmap
1489 // and, if it's in a yield point, it's waiting for us to
1490 // finish. So, at this point we will not start a cycle and we'll
1491 // let the concurrent marking thread complete the last one.
1492 ergo_verbose0(ErgoConcCycles,
1493 "do not initiate concurrent cycle",
1494 ergo_format_reason("concurrent cycle already in progress"));
1495 }
1496 }
1497 }
1499 class KnownGarbageClosure: public HeapRegionClosure {
1500 G1CollectedHeap* _g1h;
1501 CollectionSetChooser* _hrSorted;
1503 public:
1504 KnownGarbageClosure(CollectionSetChooser* hrSorted) :
1505 _g1h(G1CollectedHeap::heap()), _hrSorted(hrSorted) { }
1507 bool doHeapRegion(HeapRegion* r) {
1508 // We only include humongous regions in collection
1509 // sets when concurrent mark shows that their contained object is
1510 // unreachable.
1512 // Do we have any marking information for this region?
1513 if (r->is_marked()) {
1514 // We will skip any region that's currently used as an old GC
1515 // alloc region (we should not consider those for collection
1516 // before we fill them up).
1517 if (_hrSorted->should_add(r) && !_g1h->is_old_gc_alloc_region(r)) {
1518 _hrSorted->add_region(r);
1519 }
1520 }
1521 return false;
1522 }
1523 };
1525 class ParKnownGarbageHRClosure: public HeapRegionClosure {
1526 G1CollectedHeap* _g1h;
1527 CSetChooserParUpdater _cset_updater;
1529 public:
1530 ParKnownGarbageHRClosure(CollectionSetChooser* hrSorted,
1531 uint chunk_size) :
1532 _g1h(G1CollectedHeap::heap()),
1533 _cset_updater(hrSorted, true /* parallel */, chunk_size) { }
1535 bool doHeapRegion(HeapRegion* r) {
1536 // Do we have any marking information for this region?
1537 if (r->is_marked()) {
1538 // We will skip any region that's currently used as an old GC
1539 // alloc region (we should not consider those for collection
1540 // before we fill them up).
1541 if (_cset_updater.should_add(r) && !_g1h->is_old_gc_alloc_region(r)) {
1542 _cset_updater.add_region(r);
1543 }
1544 }
1545 return false;
1546 }
1547 };
1549 class ParKnownGarbageTask: public AbstractGangTask {
1550 CollectionSetChooser* _hrSorted;
1551 uint _chunk_size;
1552 G1CollectedHeap* _g1;
1553 public:
1554 ParKnownGarbageTask(CollectionSetChooser* hrSorted, uint chunk_size) :
1555 AbstractGangTask("ParKnownGarbageTask"),
1556 _hrSorted(hrSorted), _chunk_size(chunk_size),
1557 _g1(G1CollectedHeap::heap()) { }
1559 void work(uint worker_id) {
1560 ParKnownGarbageHRClosure parKnownGarbageCl(_hrSorted, _chunk_size);
1562 // Back to zero for the claim value.
1563 _g1->heap_region_par_iterate_chunked(&parKnownGarbageCl, worker_id,
1564 _g1->workers()->active_workers(),
1565 HeapRegion::InitialClaimValue);
1566 }
1567 };
1569 void
1570 G1CollectorPolicy::record_concurrent_mark_cleanup_end(int no_of_gc_threads) {
1571 _collectionSetChooser->clear();
1573 uint region_num = _g1->n_regions();
1574 if (G1CollectedHeap::use_parallel_gc_threads()) {
1575 const uint OverpartitionFactor = 4;
1576 uint WorkUnit;
1577 // The use of MinChunkSize = 8 in the original code
1578 // causes some assertion failures when the total number of
1579 // region is less than 8. The code here tries to fix that.
1580 // Should the original code also be fixed?
1581 if (no_of_gc_threads > 0) {
1582 const uint MinWorkUnit = MAX2(region_num / no_of_gc_threads, 1U);
1583 WorkUnit = MAX2(region_num / (no_of_gc_threads * OverpartitionFactor),
1584 MinWorkUnit);
1585 } else {
1586 assert(no_of_gc_threads > 0,
1587 "The active gc workers should be greater than 0");
1588 // In a product build do something reasonable to avoid a crash.
1589 const uint MinWorkUnit = MAX2(region_num / (uint) ParallelGCThreads, 1U);
1590 WorkUnit =
1591 MAX2(region_num / (uint) (ParallelGCThreads * OverpartitionFactor),
1592 MinWorkUnit);
1593 }
1594 _collectionSetChooser->prepare_for_par_region_addition(_g1->n_regions(),
1595 WorkUnit);
1596 ParKnownGarbageTask parKnownGarbageTask(_collectionSetChooser,
1597 (int) WorkUnit);
1598 _g1->workers()->run_task(&parKnownGarbageTask);
1600 assert(_g1->check_heap_region_claim_values(HeapRegion::InitialClaimValue),
1601 "sanity check");
1602 } else {
1603 KnownGarbageClosure knownGarbagecl(_collectionSetChooser);
1604 _g1->heap_region_iterate(&knownGarbagecl);
1605 }
1607 _collectionSetChooser->sort_regions();
1609 double end_sec = os::elapsedTime();
1610 double elapsed_time_ms = (end_sec - _mark_cleanup_start_sec) * 1000.0;
1611 _concurrent_mark_cleanup_times_ms->add(elapsed_time_ms);
1612 _cur_mark_stop_world_time_ms += elapsed_time_ms;
1613 _prev_collection_pause_end_ms += elapsed_time_ms;
1614 _mmu_tracker->add_pause(_mark_cleanup_start_sec, end_sec, true);
1615 }
1617 // Add the heap region at the head of the non-incremental collection set
1618 void G1CollectorPolicy::add_old_region_to_cset(HeapRegion* hr) {
1619 assert(_inc_cset_build_state == Active, "Precondition");
1620 assert(!hr->is_young(), "non-incremental add of young region");
1622 assert(!hr->in_collection_set(), "should not already be in the CSet");
1623 hr->set_in_collection_set(true);
1624 hr->set_next_in_collection_set(_collection_set);
1625 _collection_set = hr;
1626 _collection_set_bytes_used_before += hr->used();
1627 _g1->register_region_with_in_cset_fast_test(hr);
1628 size_t rs_length = hr->rem_set()->occupied();
1629 _recorded_rs_lengths += rs_length;
1630 _old_cset_region_length += 1;
1631 }
1633 // Initialize the per-collection-set information
1634 void G1CollectorPolicy::start_incremental_cset_building() {
1635 assert(_inc_cset_build_state == Inactive, "Precondition");
1637 _inc_cset_head = NULL;
1638 _inc_cset_tail = NULL;
1639 _inc_cset_bytes_used_before = 0;
1641 _inc_cset_max_finger = 0;
1642 _inc_cset_recorded_rs_lengths = 0;
1643 _inc_cset_recorded_rs_lengths_diffs = 0;
1644 _inc_cset_predicted_elapsed_time_ms = 0.0;
1645 _inc_cset_predicted_elapsed_time_ms_diffs = 0.0;
1646 _inc_cset_build_state = Active;
1647 }
1649 void G1CollectorPolicy::finalize_incremental_cset_building() {
1650 assert(_inc_cset_build_state == Active, "Precondition");
1651 assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint");
1653 // The two "main" fields, _inc_cset_recorded_rs_lengths and
1654 // _inc_cset_predicted_elapsed_time_ms, are updated by the thread
1655 // that adds a new region to the CSet. Further updates by the
1656 // concurrent refinement thread that samples the young RSet lengths
1657 // are accumulated in the *_diffs fields. Here we add the diffs to
1658 // the "main" fields.
1660 if (_inc_cset_recorded_rs_lengths_diffs >= 0) {
1661 _inc_cset_recorded_rs_lengths += _inc_cset_recorded_rs_lengths_diffs;
1662 } else {
1663 // This is defensive. The diff should in theory be always positive
1664 // as RSets can only grow between GCs. However, given that we
1665 // sample their size concurrently with other threads updating them
1666 // it's possible that we might get the wrong size back, which
1667 // could make the calculations somewhat inaccurate.
1668 size_t diffs = (size_t) (-_inc_cset_recorded_rs_lengths_diffs);
1669 if (_inc_cset_recorded_rs_lengths >= diffs) {
1670 _inc_cset_recorded_rs_lengths -= diffs;
1671 } else {
1672 _inc_cset_recorded_rs_lengths = 0;
1673 }
1674 }
1675 _inc_cset_predicted_elapsed_time_ms +=
1676 _inc_cset_predicted_elapsed_time_ms_diffs;
1678 _inc_cset_recorded_rs_lengths_diffs = 0;
1679 _inc_cset_predicted_elapsed_time_ms_diffs = 0.0;
1680 }
1682 void G1CollectorPolicy::add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length) {
1683 // This routine is used when:
1684 // * adding survivor regions to the incremental cset at the end of an
1685 // evacuation pause,
1686 // * adding the current allocation region to the incremental cset
1687 // when it is retired, and
1688 // * updating existing policy information for a region in the
1689 // incremental cset via young list RSet sampling.
1690 // Therefore this routine may be called at a safepoint by the
1691 // VM thread, or in-between safepoints by mutator threads (when
1692 // retiring the current allocation region) or a concurrent
1693 // refine thread (RSet sampling).
1695 double region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, gcs_are_young());
1696 size_t used_bytes = hr->used();
1697 _inc_cset_recorded_rs_lengths += rs_length;
1698 _inc_cset_predicted_elapsed_time_ms += region_elapsed_time_ms;
1699 _inc_cset_bytes_used_before += used_bytes;
1701 // Cache the values we have added to the aggregated informtion
1702 // in the heap region in case we have to remove this region from
1703 // the incremental collection set, or it is updated by the
1704 // rset sampling code
1705 hr->set_recorded_rs_length(rs_length);
1706 hr->set_predicted_elapsed_time_ms(region_elapsed_time_ms);
1707 }
1709 void G1CollectorPolicy::update_incremental_cset_info(HeapRegion* hr,
1710 size_t new_rs_length) {
1711 // Update the CSet information that is dependent on the new RS length
1712 assert(hr->is_young(), "Precondition");
1713 assert(!SafepointSynchronize::is_at_safepoint(),
1714 "should not be at a safepoint");
1716 // We could have updated _inc_cset_recorded_rs_lengths and
1717 // _inc_cset_predicted_elapsed_time_ms directly but we'd need to do
1718 // that atomically, as this code is executed by a concurrent
1719 // refinement thread, potentially concurrently with a mutator thread
1720 // allocating a new region and also updating the same fields. To
1721 // avoid the atomic operations we accumulate these updates on two
1722 // separate fields (*_diffs) and we'll just add them to the "main"
1723 // fields at the start of a GC.
1725 ssize_t old_rs_length = (ssize_t) hr->recorded_rs_length();
1726 ssize_t rs_lengths_diff = (ssize_t) new_rs_length - old_rs_length;
1727 _inc_cset_recorded_rs_lengths_diffs += rs_lengths_diff;
1729 double old_elapsed_time_ms = hr->predicted_elapsed_time_ms();
1730 double new_region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, gcs_are_young());
1731 double elapsed_ms_diff = new_region_elapsed_time_ms - old_elapsed_time_ms;
1732 _inc_cset_predicted_elapsed_time_ms_diffs += elapsed_ms_diff;
1734 hr->set_recorded_rs_length(new_rs_length);
1735 hr->set_predicted_elapsed_time_ms(new_region_elapsed_time_ms);
1736 }
1738 void G1CollectorPolicy::add_region_to_incremental_cset_common(HeapRegion* hr) {
1739 assert(hr->is_young(), "invariant");
1740 assert(hr->young_index_in_cset() > -1, "should have already been set");
1741 assert(_inc_cset_build_state == Active, "Precondition");
1743 // We need to clear and set the cached recorded/cached collection set
1744 // information in the heap region here (before the region gets added
1745 // to the collection set). An individual heap region's cached values
1746 // are calculated, aggregated with the policy collection set info,
1747 // and cached in the heap region here (initially) and (subsequently)
1748 // by the Young List sampling code.
1750 size_t rs_length = hr->rem_set()->occupied();
1751 add_to_incremental_cset_info(hr, rs_length);
1753 HeapWord* hr_end = hr->end();
1754 _inc_cset_max_finger = MAX2(_inc_cset_max_finger, hr_end);
1756 assert(!hr->in_collection_set(), "invariant");
1757 hr->set_in_collection_set(true);
1758 assert( hr->next_in_collection_set() == NULL, "invariant");
1760 _g1->register_region_with_in_cset_fast_test(hr);
1761 }
1763 // Add the region at the RHS of the incremental cset
1764 void G1CollectorPolicy::add_region_to_incremental_cset_rhs(HeapRegion* hr) {
1765 // We should only ever be appending survivors at the end of a pause
1766 assert( hr->is_survivor(), "Logic");
1768 // Do the 'common' stuff
1769 add_region_to_incremental_cset_common(hr);
1771 // Now add the region at the right hand side
1772 if (_inc_cset_tail == NULL) {
1773 assert(_inc_cset_head == NULL, "invariant");
1774 _inc_cset_head = hr;
1775 } else {
1776 _inc_cset_tail->set_next_in_collection_set(hr);
1777 }
1778 _inc_cset_tail = hr;
1779 }
1781 // Add the region to the LHS of the incremental cset
1782 void G1CollectorPolicy::add_region_to_incremental_cset_lhs(HeapRegion* hr) {
1783 // Survivors should be added to the RHS at the end of a pause
1784 assert(!hr->is_survivor(), "Logic");
1786 // Do the 'common' stuff
1787 add_region_to_incremental_cset_common(hr);
1789 // Add the region at the left hand side
1790 hr->set_next_in_collection_set(_inc_cset_head);
1791 if (_inc_cset_head == NULL) {
1792 assert(_inc_cset_tail == NULL, "Invariant");
1793 _inc_cset_tail = hr;
1794 }
1795 _inc_cset_head = hr;
1796 }
1798 #ifndef PRODUCT
1799 void G1CollectorPolicy::print_collection_set(HeapRegion* list_head, outputStream* st) {
1800 assert(list_head == inc_cset_head() || list_head == collection_set(), "must be");
1802 st->print_cr("\nCollection_set:");
1803 HeapRegion* csr = list_head;
1804 while (csr != NULL) {
1805 HeapRegion* next = csr->next_in_collection_set();
1806 assert(csr->in_collection_set(), "bad CS");
1807 st->print_cr(" "HR_FORMAT", P: "PTR_FORMAT "N: "PTR_FORMAT", age: %4d",
1808 HR_FORMAT_PARAMS(csr),
1809 csr->prev_top_at_mark_start(), csr->next_top_at_mark_start(),
1810 csr->age_in_surv_rate_group_cond());
1811 csr = next;
1812 }
1813 }
1814 #endif // !PRODUCT
1816 double G1CollectorPolicy::reclaimable_bytes_perc(size_t reclaimable_bytes) {
1817 // Returns the given amount of reclaimable bytes (that represents
1818 // the amount of reclaimable space still to be collected) as a
1819 // percentage of the current heap capacity.
1820 size_t capacity_bytes = _g1->capacity();
1821 return (double) reclaimable_bytes * 100.0 / (double) capacity_bytes;
1822 }
1824 bool G1CollectorPolicy::next_gc_should_be_mixed(const char* true_action_str,
1825 const char* false_action_str) {
1826 CollectionSetChooser* cset_chooser = _collectionSetChooser;
1827 if (cset_chooser->is_empty()) {
1828 ergo_verbose0(ErgoMixedGCs,
1829 false_action_str,
1830 ergo_format_reason("candidate old regions not available"));
1831 return false;
1832 }
1834 // Is the amount of uncollected reclaimable space above G1HeapWastePercent?
1835 size_t reclaimable_bytes = cset_chooser->remaining_reclaimable_bytes();
1836 double reclaimable_perc = reclaimable_bytes_perc(reclaimable_bytes);
1837 double threshold = (double) G1HeapWastePercent;
1838 if (reclaimable_perc <= threshold) {
1839 ergo_verbose4(ErgoMixedGCs,
1840 false_action_str,
1841 ergo_format_reason("reclaimable percentage not over threshold")
1842 ergo_format_region("candidate old regions")
1843 ergo_format_byte_perc("reclaimable")
1844 ergo_format_perc("threshold"),
1845 cset_chooser->remaining_regions(),
1846 reclaimable_bytes,
1847 reclaimable_perc, threshold);
1848 return false;
1849 }
1851 ergo_verbose4(ErgoMixedGCs,
1852 true_action_str,
1853 ergo_format_reason("candidate old regions available")
1854 ergo_format_region("candidate old regions")
1855 ergo_format_byte_perc("reclaimable")
1856 ergo_format_perc("threshold"),
1857 cset_chooser->remaining_regions(),
1858 reclaimable_bytes,
1859 reclaimable_perc, threshold);
1860 return true;
1861 }
1863 uint G1CollectorPolicy::calc_min_old_cset_length() {
1864 // The min old CSet region bound is based on the maximum desired
1865 // number of mixed GCs after a cycle. I.e., even if some old regions
1866 // look expensive, we should add them to the CSet anyway to make
1867 // sure we go through the available old regions in no more than the
1868 // maximum desired number of mixed GCs.
1869 //
1870 // The calculation is based on the number of marked regions we added
1871 // to the CSet chooser in the first place, not how many remain, so
1872 // that the result is the same during all mixed GCs that follow a cycle.
1874 const size_t region_num = (size_t) _collectionSetChooser->length();
1875 const size_t gc_num = (size_t) MAX2(G1MixedGCCountTarget, (uintx) 1);
1876 size_t result = region_num / gc_num;
1877 // emulate ceiling
1878 if (result * gc_num < region_num) {
1879 result += 1;
1880 }
1881 return (uint) result;
1882 }
1884 uint G1CollectorPolicy::calc_max_old_cset_length() {
1885 // The max old CSet region bound is based on the threshold expressed
1886 // as a percentage of the heap size. I.e., it should bound the
1887 // number of old regions added to the CSet irrespective of how many
1888 // of them are available.
1890 G1CollectedHeap* g1h = G1CollectedHeap::heap();
1891 const size_t region_num = g1h->n_regions();
1892 const size_t perc = (size_t) G1OldCSetRegionThresholdPercent;
1893 size_t result = region_num * perc / 100;
1894 // emulate ceiling
1895 if (100 * result < region_num * perc) {
1896 result += 1;
1897 }
1898 return (uint) result;
1899 }
1902 void G1CollectorPolicy::finalize_cset(double target_pause_time_ms) {
1903 double young_start_time_sec = os::elapsedTime();
1905 YoungList* young_list = _g1->young_list();
1906 finalize_incremental_cset_building();
1908 guarantee(target_pause_time_ms > 0.0,
1909 err_msg("target_pause_time_ms = %1.6lf should be positive",
1910 target_pause_time_ms));
1911 guarantee(_collection_set == NULL, "Precondition");
1913 double base_time_ms = predict_base_elapsed_time_ms(_pending_cards);
1914 double predicted_pause_time_ms = base_time_ms;
1915 double time_remaining_ms = MAX2(target_pause_time_ms - base_time_ms, 0.0);
1917 ergo_verbose4(ErgoCSetConstruction | ErgoHigh,
1918 "start choosing CSet",
1919 ergo_format_size("_pending_cards")
1920 ergo_format_ms("predicted base time")
1921 ergo_format_ms("remaining time")
1922 ergo_format_ms("target pause time"),
1923 _pending_cards, base_time_ms, time_remaining_ms, target_pause_time_ms);
1925 _last_gc_was_young = gcs_are_young() ? true : false;
1927 if (_last_gc_was_young) {
1928 _trace_gen0_time_data.increment_young_collection_count();
1929 } else {
1930 _trace_gen0_time_data.increment_mixed_collection_count();
1931 }
1933 // The young list is laid with the survivor regions from the previous
1934 // pause are appended to the RHS of the young list, i.e.
1935 // [Newly Young Regions ++ Survivors from last pause].
1937 uint survivor_region_length = young_list->survivor_length();
1938 uint eden_region_length = young_list->length() - survivor_region_length;
1939 init_cset_region_lengths(eden_region_length, survivor_region_length);
1941 HeapRegion* hr = young_list->first_survivor_region();
1942 while (hr != NULL) {
1943 assert(hr->is_survivor(), "badly formed young list");
1944 hr->set_young();
1945 hr = hr->get_next_young_region();
1946 }
1948 // Clear the fields that point to the survivor list - they are all young now.
1949 young_list->clear_survivors();
1951 _collection_set = _inc_cset_head;
1952 _collection_set_bytes_used_before = _inc_cset_bytes_used_before;
1953 time_remaining_ms = MAX2(time_remaining_ms - _inc_cset_predicted_elapsed_time_ms, 0.0);
1954 predicted_pause_time_ms += _inc_cset_predicted_elapsed_time_ms;
1956 ergo_verbose3(ErgoCSetConstruction | ErgoHigh,
1957 "add young regions to CSet",
1958 ergo_format_region("eden")
1959 ergo_format_region("survivors")
1960 ergo_format_ms("predicted young region time"),
1961 eden_region_length, survivor_region_length,
1962 _inc_cset_predicted_elapsed_time_ms);
1964 // The number of recorded young regions is the incremental
1965 // collection set's current size
1966 set_recorded_rs_lengths(_inc_cset_recorded_rs_lengths);
1968 double young_end_time_sec = os::elapsedTime();
1969 phase_times()->record_young_cset_choice_time_ms((young_end_time_sec - young_start_time_sec) * 1000.0);
1971 // Set the start of the non-young choice time.
1972 double non_young_start_time_sec = young_end_time_sec;
1974 if (!gcs_are_young()) {
1975 CollectionSetChooser* cset_chooser = _collectionSetChooser;
1976 cset_chooser->verify();
1977 const uint min_old_cset_length = calc_min_old_cset_length();
1978 const uint max_old_cset_length = calc_max_old_cset_length();
1980 uint expensive_region_num = 0;
1981 bool check_time_remaining = adaptive_young_list_length();
1983 HeapRegion* hr = cset_chooser->peek();
1984 while (hr != NULL) {
1985 if (old_cset_region_length() >= max_old_cset_length) {
1986 // Added maximum number of old regions to the CSet.
1987 ergo_verbose2(ErgoCSetConstruction,
1988 "finish adding old regions to CSet",
1989 ergo_format_reason("old CSet region num reached max")
1990 ergo_format_region("old")
1991 ergo_format_region("max"),
1992 old_cset_region_length(), max_old_cset_length);
1993 break;
1994 }
1997 // Stop adding regions if the remaining reclaimable space is
1998 // not above G1HeapWastePercent.
1999 size_t reclaimable_bytes = cset_chooser->remaining_reclaimable_bytes();
2000 double reclaimable_perc = reclaimable_bytes_perc(reclaimable_bytes);
2001 double threshold = (double) G1HeapWastePercent;
2002 if (reclaimable_perc <= threshold) {
2003 // We've added enough old regions that the amount of uncollected
2004 // reclaimable space is at or below the waste threshold. Stop
2005 // adding old regions to the CSet.
2006 ergo_verbose5(ErgoCSetConstruction,
2007 "finish adding old regions to CSet",
2008 ergo_format_reason("reclaimable percentage not over threshold")
2009 ergo_format_region("old")
2010 ergo_format_region("max")
2011 ergo_format_byte_perc("reclaimable")
2012 ergo_format_perc("threshold"),
2013 old_cset_region_length(),
2014 max_old_cset_length,
2015 reclaimable_bytes,
2016 reclaimable_perc, threshold);
2017 break;
2018 }
2020 double predicted_time_ms = predict_region_elapsed_time_ms(hr, gcs_are_young());
2021 if (check_time_remaining) {
2022 if (predicted_time_ms > time_remaining_ms) {
2023 // Too expensive for the current CSet.
2025 if (old_cset_region_length() >= min_old_cset_length) {
2026 // We have added the minimum number of old regions to the CSet,
2027 // we are done with this CSet.
2028 ergo_verbose4(ErgoCSetConstruction,
2029 "finish adding old regions to CSet",
2030 ergo_format_reason("predicted time is too high")
2031 ergo_format_ms("predicted time")
2032 ergo_format_ms("remaining time")
2033 ergo_format_region("old")
2034 ergo_format_region("min"),
2035 predicted_time_ms, time_remaining_ms,
2036 old_cset_region_length(), min_old_cset_length);
2037 break;
2038 }
2040 // We'll add it anyway given that we haven't reached the
2041 // minimum number of old regions.
2042 expensive_region_num += 1;
2043 }
2044 } else {
2045 if (old_cset_region_length() >= min_old_cset_length) {
2046 // In the non-auto-tuning case, we'll finish adding regions
2047 // to the CSet if we reach the minimum.
2048 ergo_verbose2(ErgoCSetConstruction,
2049 "finish adding old regions to CSet",
2050 ergo_format_reason("old CSet region num reached min")
2051 ergo_format_region("old")
2052 ergo_format_region("min"),
2053 old_cset_region_length(), min_old_cset_length);
2054 break;
2055 }
2056 }
2058 // We will add this region to the CSet.
2059 time_remaining_ms = MAX2(time_remaining_ms - predicted_time_ms, 0.0);
2060 predicted_pause_time_ms += predicted_time_ms;
2061 cset_chooser->remove_and_move_to_next(hr);
2062 _g1->old_set_remove(hr);
2063 add_old_region_to_cset(hr);
2065 hr = cset_chooser->peek();
2066 }
2067 if (hr == NULL) {
2068 ergo_verbose0(ErgoCSetConstruction,
2069 "finish adding old regions to CSet",
2070 ergo_format_reason("candidate old regions not available"));
2071 }
2073 if (expensive_region_num > 0) {
2074 // We print the information once here at the end, predicated on
2075 // whether we added any apparently expensive regions or not, to
2076 // avoid generating output per region.
2077 ergo_verbose4(ErgoCSetConstruction,
2078 "added expensive regions to CSet",
2079 ergo_format_reason("old CSet region num not reached min")
2080 ergo_format_region("old")
2081 ergo_format_region("expensive")
2082 ergo_format_region("min")
2083 ergo_format_ms("remaining time"),
2084 old_cset_region_length(),
2085 expensive_region_num,
2086 min_old_cset_length,
2087 time_remaining_ms);
2088 }
2090 cset_chooser->verify();
2091 }
2093 stop_incremental_cset_building();
2095 ergo_verbose5(ErgoCSetConstruction,
2096 "finish choosing CSet",
2097 ergo_format_region("eden")
2098 ergo_format_region("survivors")
2099 ergo_format_region("old")
2100 ergo_format_ms("predicted pause time")
2101 ergo_format_ms("target pause time"),
2102 eden_region_length, survivor_region_length,
2103 old_cset_region_length(),
2104 predicted_pause_time_ms, target_pause_time_ms);
2106 double non_young_end_time_sec = os::elapsedTime();
2107 phase_times()->record_non_young_cset_choice_time_ms((non_young_end_time_sec - non_young_start_time_sec) * 1000.0);
2108 }
2110 void TraceGen0TimeData::record_start_collection(double time_to_stop_the_world_ms) {
2111 if(TraceGen0Time) {
2112 _all_stop_world_times_ms.add(time_to_stop_the_world_ms);
2113 }
2114 }
2116 void TraceGen0TimeData::record_yield_time(double yield_time_ms) {
2117 if(TraceGen0Time) {
2118 _all_yield_times_ms.add(yield_time_ms);
2119 }
2120 }
2122 void TraceGen0TimeData::record_end_collection(double pause_time_ms, G1GCPhaseTimes* phase_times) {
2123 if(TraceGen0Time) {
2124 _total.add(pause_time_ms);
2125 _other.add(pause_time_ms - phase_times->accounted_time_ms());
2126 _root_region_scan_wait.add(phase_times->root_region_scan_wait_time_ms());
2127 _parallel.add(phase_times->cur_collection_par_time_ms());
2128 _ext_root_scan.add(phase_times->average_last_ext_root_scan_time());
2129 _satb_filtering.add(phase_times->average_last_satb_filtering_times_ms());
2130 _update_rs.add(phase_times->average_last_update_rs_time());
2131 _scan_rs.add(phase_times->average_last_scan_rs_time());
2132 _obj_copy.add(phase_times->average_last_obj_copy_time());
2133 _termination.add(phase_times->average_last_termination_time());
2135 double parallel_known_time = phase_times->average_last_ext_root_scan_time() +
2136 phase_times->average_last_satb_filtering_times_ms() +
2137 phase_times->average_last_update_rs_time() +
2138 phase_times->average_last_scan_rs_time() +
2139 phase_times->average_last_obj_copy_time() +
2140 + phase_times->average_last_termination_time();
2142 double parallel_other_time = phase_times->cur_collection_par_time_ms() - parallel_known_time;
2143 _parallel_other.add(parallel_other_time);
2144 _clear_ct.add(phase_times->cur_clear_ct_time_ms());
2145 }
2146 }
2148 void TraceGen0TimeData::increment_young_collection_count() {
2149 if(TraceGen0Time) {
2150 ++_young_pause_num;
2151 }
2152 }
2154 void TraceGen0TimeData::increment_mixed_collection_count() {
2155 if(TraceGen0Time) {
2156 ++_mixed_pause_num;
2157 }
2158 }
2160 void TraceGen0TimeData::print_summary(const char* str,
2161 const NumberSeq* seq) const {
2162 double sum = seq->sum();
2163 gclog_or_tty->print_cr("%-27s = %8.2lf s (avg = %8.2lf ms)",
2164 str, sum / 1000.0, seq->avg());
2165 }
2167 void TraceGen0TimeData::print_summary_sd(const char* str,
2168 const NumberSeq* seq) const {
2169 print_summary(str, seq);
2170 gclog_or_tty->print_cr("%+45s = %5d, std dev = %8.2lf ms, max = %8.2lf ms)",
2171 "(num", seq->num(), seq->sd(), seq->maximum());
2172 }
2174 void TraceGen0TimeData::print() const {
2175 if (!TraceGen0Time) {
2176 return;
2177 }
2179 gclog_or_tty->print_cr("ALL PAUSES");
2180 print_summary_sd(" Total", &_total);
2181 gclog_or_tty->print_cr("");
2182 gclog_or_tty->print_cr("");
2183 gclog_or_tty->print_cr(" Young GC Pauses: %8d", _young_pause_num);
2184 gclog_or_tty->print_cr(" Mixed GC Pauses: %8d", _mixed_pause_num);
2185 gclog_or_tty->print_cr("");
2187 gclog_or_tty->print_cr("EVACUATION PAUSES");
2189 if (_young_pause_num == 0 && _mixed_pause_num == 0) {
2190 gclog_or_tty->print_cr("none");
2191 } else {
2192 print_summary_sd(" Evacuation Pauses", &_total);
2193 print_summary(" Root Region Scan Wait", &_root_region_scan_wait);
2194 print_summary(" Parallel Time", &_parallel);
2195 print_summary(" Ext Root Scanning", &_ext_root_scan);
2196 print_summary(" SATB Filtering", &_satb_filtering);
2197 print_summary(" Update RS", &_update_rs);
2198 print_summary(" Scan RS", &_scan_rs);
2199 print_summary(" Object Copy", &_obj_copy);
2200 print_summary(" Termination", &_termination);
2201 print_summary(" Parallel Other", &_parallel_other);
2202 print_summary(" Clear CT", &_clear_ct);
2203 print_summary(" Other", &_other);
2204 }
2205 gclog_or_tty->print_cr("");
2207 gclog_or_tty->print_cr("MISC");
2208 print_summary_sd(" Stop World", &_all_stop_world_times_ms);
2209 print_summary_sd(" Yields", &_all_yield_times_ms);
2210 }
2212 void TraceGen1TimeData::record_full_collection(double full_gc_time_ms) {
2213 if (TraceGen1Time) {
2214 _all_full_gc_times.add(full_gc_time_ms);
2215 }
2216 }
2218 void TraceGen1TimeData::print() const {
2219 if (!TraceGen1Time) {
2220 return;
2221 }
2223 if (_all_full_gc_times.num() > 0) {
2224 gclog_or_tty->print("\n%4d full_gcs: total time = %8.2f s",
2225 _all_full_gc_times.num(),
2226 _all_full_gc_times.sum() / 1000.0);
2227 gclog_or_tty->print_cr(" (avg = %8.2fms).", _all_full_gc_times.avg());
2228 gclog_or_tty->print_cr(" [std. dev = %8.2f ms, max = %8.2f ms]",
2229 _all_full_gc_times.sd(),
2230 _all_full_gc_times.maximum());
2231 }
2232 }