Tue, 25 Sep 2012 14:58:12 +0200
7200470: KeepAliveClosure not needed in CodeCache::do_unloading
Summary: Removed the unused keep_alive parameter
Reviewed-by: stefank, dholmes, kamg, coleenp
1 /*
2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "gc_implementation/g1/concurrentG1Refine.hpp"
27 #include "gc_implementation/g1/concurrentMark.hpp"
28 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
29 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
30 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
31 #include "gc_implementation/g1/g1ErgoVerbose.hpp"
32 #include "gc_implementation/g1/g1GCPhaseTimes.hpp"
33 #include "gc_implementation/g1/g1Log.hpp"
34 #include "gc_implementation/g1/heapRegionRemSet.hpp"
35 #include "gc_implementation/shared/gcPolicyCounters.hpp"
36 #include "runtime/arguments.hpp"
37 #include "runtime/java.hpp"
38 #include "runtime/mutexLocker.hpp"
39 #include "utilities/debug.hpp"
41 // Different defaults for different number of GC threads
42 // They were chosen by running GCOld and SPECjbb on debris with different
43 // numbers of GC threads and choosing them based on the results
45 // all the same
46 static double rs_length_diff_defaults[] = {
47 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
48 };
50 static double cost_per_card_ms_defaults[] = {
51 0.01, 0.005, 0.005, 0.003, 0.003, 0.002, 0.002, 0.0015
52 };
54 // all the same
55 static double young_cards_per_entry_ratio_defaults[] = {
56 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0
57 };
59 static double cost_per_entry_ms_defaults[] = {
60 0.015, 0.01, 0.01, 0.008, 0.008, 0.0055, 0.0055, 0.005
61 };
63 static double cost_per_byte_ms_defaults[] = {
64 0.00006, 0.00003, 0.00003, 0.000015, 0.000015, 0.00001, 0.00001, 0.000009
65 };
67 // these should be pretty consistent
68 static double constant_other_time_ms_defaults[] = {
69 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0
70 };
73 static double young_other_cost_per_region_ms_defaults[] = {
74 0.3, 0.2, 0.2, 0.15, 0.15, 0.12, 0.12, 0.1
75 };
77 static double non_young_other_cost_per_region_ms_defaults[] = {
78 1.0, 0.7, 0.7, 0.5, 0.5, 0.42, 0.42, 0.30
79 };
81 G1CollectorPolicy::G1CollectorPolicy() :
82 _parallel_gc_threads(G1CollectedHeap::use_parallel_gc_threads()
83 ? ParallelGCThreads : 1),
85 _recent_gc_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
86 _stop_world_start(0.0),
88 _concurrent_mark_remark_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
89 _concurrent_mark_cleanup_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
91 _alloc_rate_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
92 _prev_collection_pause_end_ms(0.0),
93 _rs_length_diff_seq(new TruncatedSeq(TruncatedSeqLength)),
94 _cost_per_card_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
95 _young_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)),
96 _mixed_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)),
97 _cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
98 _mixed_cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
99 _cost_per_byte_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
100 _cost_per_byte_ms_during_cm_seq(new TruncatedSeq(TruncatedSeqLength)),
101 _constant_other_time_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
102 _young_other_cost_per_region_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
103 _non_young_other_cost_per_region_ms_seq(
104 new TruncatedSeq(TruncatedSeqLength)),
106 _pending_cards_seq(new TruncatedSeq(TruncatedSeqLength)),
107 _rs_lengths_seq(new TruncatedSeq(TruncatedSeqLength)),
109 _pause_time_target_ms((double) MaxGCPauseMillis),
111 _gcs_are_young(true),
113 _during_marking(false),
114 _in_marking_window(false),
115 _in_marking_window_im(false),
117 _recent_prev_end_times_for_all_gcs_sec(
118 new TruncatedSeq(NumPrevPausesForHeuristics)),
120 _recent_avg_pause_time_ratio(0.0),
122 _initiate_conc_mark_if_possible(false),
123 _during_initial_mark_pause(false),
124 _last_young_gc(false),
125 _last_gc_was_young(false),
127 _eden_bytes_before_gc(0),
128 _survivor_bytes_before_gc(0),
129 _capacity_before_gc(0),
131 _eden_cset_region_length(0),
132 _survivor_cset_region_length(0),
133 _old_cset_region_length(0),
135 _collection_set(NULL),
136 _collection_set_bytes_used_before(0),
138 // Incremental CSet attributes
139 _inc_cset_build_state(Inactive),
140 _inc_cset_head(NULL),
141 _inc_cset_tail(NULL),
142 _inc_cset_bytes_used_before(0),
143 _inc_cset_max_finger(NULL),
144 _inc_cset_recorded_rs_lengths(0),
145 _inc_cset_recorded_rs_lengths_diffs(0),
146 _inc_cset_predicted_elapsed_time_ms(0.0),
147 _inc_cset_predicted_elapsed_time_ms_diffs(0.0),
149 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
150 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
151 #endif // _MSC_VER
153 _short_lived_surv_rate_group(new SurvRateGroup(this, "Short Lived",
154 G1YoungSurvRateNumRegionsSummary)),
155 _survivor_surv_rate_group(new SurvRateGroup(this, "Survivor",
156 G1YoungSurvRateNumRegionsSummary)),
157 // add here any more surv rate groups
158 _recorded_survivor_regions(0),
159 _recorded_survivor_head(NULL),
160 _recorded_survivor_tail(NULL),
161 _survivors_age_table(true),
163 _gc_overhead_perc(0.0) {
165 // Set up the region size and associated fields. Given that the
166 // policy is created before the heap, we have to set this up here,
167 // so it's done as soon as possible.
168 HeapRegion::setup_heap_region_size(Arguments::min_heap_size());
169 HeapRegionRemSet::setup_remset_size();
171 G1ErgoVerbose::initialize();
172 if (PrintAdaptiveSizePolicy) {
173 // Currently, we only use a single switch for all the heuristics.
174 G1ErgoVerbose::set_enabled(true);
175 // Given that we don't currently have a verboseness level
176 // parameter, we'll hardcode this to high. This can be easily
177 // changed in the future.
178 G1ErgoVerbose::set_level(ErgoHigh);
179 } else {
180 G1ErgoVerbose::set_enabled(false);
181 }
183 // Verify PLAB sizes
184 const size_t region_size = HeapRegion::GrainWords;
185 if (YoungPLABSize > region_size || OldPLABSize > region_size) {
186 char buffer[128];
187 jio_snprintf(buffer, sizeof(buffer), "%sPLABSize should be at most "SIZE_FORMAT,
188 OldPLABSize > region_size ? "Old" : "Young", region_size);
189 vm_exit_during_initialization(buffer);
190 }
192 _recent_prev_end_times_for_all_gcs_sec->add(os::elapsedTime());
193 _prev_collection_pause_end_ms = os::elapsedTime() * 1000.0;
195 _phase_times = new G1GCPhaseTimes(_parallel_gc_threads);
197 int index = MIN2(_parallel_gc_threads - 1, 7);
199 _rs_length_diff_seq->add(rs_length_diff_defaults[index]);
200 _cost_per_card_ms_seq->add(cost_per_card_ms_defaults[index]);
201 _young_cards_per_entry_ratio_seq->add(
202 young_cards_per_entry_ratio_defaults[index]);
203 _cost_per_entry_ms_seq->add(cost_per_entry_ms_defaults[index]);
204 _cost_per_byte_ms_seq->add(cost_per_byte_ms_defaults[index]);
205 _constant_other_time_ms_seq->add(constant_other_time_ms_defaults[index]);
206 _young_other_cost_per_region_ms_seq->add(
207 young_other_cost_per_region_ms_defaults[index]);
208 _non_young_other_cost_per_region_ms_seq->add(
209 non_young_other_cost_per_region_ms_defaults[index]);
211 // Below, we might need to calculate the pause time target based on
212 // the pause interval. When we do so we are going to give G1 maximum
213 // flexibility and allow it to do pauses when it needs to. So, we'll
214 // arrange that the pause interval to be pause time target + 1 to
215 // ensure that a) the pause time target is maximized with respect to
216 // the pause interval and b) we maintain the invariant that pause
217 // time target < pause interval. If the user does not want this
218 // maximum flexibility, they will have to set the pause interval
219 // explicitly.
221 // First make sure that, if either parameter is set, its value is
222 // reasonable.
223 if (!FLAG_IS_DEFAULT(MaxGCPauseMillis)) {
224 if (MaxGCPauseMillis < 1) {
225 vm_exit_during_initialization("MaxGCPauseMillis should be "
226 "greater than 0");
227 }
228 }
229 if (!FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
230 if (GCPauseIntervalMillis < 1) {
231 vm_exit_during_initialization("GCPauseIntervalMillis should be "
232 "greater than 0");
233 }
234 }
236 // Then, if the pause time target parameter was not set, set it to
237 // the default value.
238 if (FLAG_IS_DEFAULT(MaxGCPauseMillis)) {
239 if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
240 // The default pause time target in G1 is 200ms
241 FLAG_SET_DEFAULT(MaxGCPauseMillis, 200);
242 } else {
243 // We do not allow the pause interval to be set without the
244 // pause time target
245 vm_exit_during_initialization("GCPauseIntervalMillis cannot be set "
246 "without setting MaxGCPauseMillis");
247 }
248 }
250 // Then, if the interval parameter was not set, set it according to
251 // the pause time target (this will also deal with the case when the
252 // pause time target is the default value).
253 if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
254 FLAG_SET_DEFAULT(GCPauseIntervalMillis, MaxGCPauseMillis + 1);
255 }
257 // Finally, make sure that the two parameters are consistent.
258 if (MaxGCPauseMillis >= GCPauseIntervalMillis) {
259 char buffer[256];
260 jio_snprintf(buffer, 256,
261 "MaxGCPauseMillis (%u) should be less than "
262 "GCPauseIntervalMillis (%u)",
263 MaxGCPauseMillis, GCPauseIntervalMillis);
264 vm_exit_during_initialization(buffer);
265 }
267 double max_gc_time = (double) MaxGCPauseMillis / 1000.0;
268 double time_slice = (double) GCPauseIntervalMillis / 1000.0;
269 _mmu_tracker = new G1MMUTrackerQueue(time_slice, max_gc_time);
270 _sigma = (double) G1ConfidencePercent / 100.0;
272 // start conservatively (around 50ms is about right)
273 _concurrent_mark_remark_times_ms->add(0.05);
274 _concurrent_mark_cleanup_times_ms->add(0.20);
275 _tenuring_threshold = MaxTenuringThreshold;
276 // _max_survivor_regions will be calculated by
277 // update_young_list_target_length() during initialization.
278 _max_survivor_regions = 0;
280 assert(GCTimeRatio > 0,
281 "we should have set it to a default value set_g1_gc_flags() "
282 "if a user set it to 0");
283 _gc_overhead_perc = 100.0 * (1.0 / (1.0 + GCTimeRatio));
285 uintx reserve_perc = G1ReservePercent;
286 // Put an artificial ceiling on this so that it's not set to a silly value.
287 if (reserve_perc > 50) {
288 reserve_perc = 50;
289 warning("G1ReservePercent is set to a value that is too large, "
290 "it's been updated to %u", reserve_perc);
291 }
292 _reserve_factor = (double) reserve_perc / 100.0;
293 // This will be set when the heap is expanded
294 // for the first time during initialization.
295 _reserve_regions = 0;
297 initialize_all();
298 _collectionSetChooser = new CollectionSetChooser();
299 _young_gen_sizer = new G1YoungGenSizer(); // Must be after call to initialize_flags
300 }
302 void G1CollectorPolicy::initialize_flags() {
303 set_min_alignment(HeapRegion::GrainBytes);
304 set_max_alignment(GenRemSet::max_alignment_constraint(rem_set_name()));
305 if (SurvivorRatio < 1) {
306 vm_exit_during_initialization("Invalid survivor ratio specified");
307 }
308 CollectorPolicy::initialize_flags();
309 }
311 G1YoungGenSizer::G1YoungGenSizer() : _sizer_kind(SizerDefaults), _adaptive_size(true) {
312 assert(G1DefaultMinNewGenPercent <= G1DefaultMaxNewGenPercent, "Min larger than max");
313 assert(G1DefaultMinNewGenPercent > 0 && G1DefaultMinNewGenPercent < 100, "Min out of bounds");
314 assert(G1DefaultMaxNewGenPercent > 0 && G1DefaultMaxNewGenPercent < 100, "Max out of bounds");
316 if (FLAG_IS_CMDLINE(NewRatio)) {
317 if (FLAG_IS_CMDLINE(NewSize) || FLAG_IS_CMDLINE(MaxNewSize)) {
318 warning("-XX:NewSize and -XX:MaxNewSize override -XX:NewRatio");
319 } else {
320 _sizer_kind = SizerNewRatio;
321 _adaptive_size = false;
322 return;
323 }
324 }
326 if (FLAG_IS_CMDLINE(NewSize)) {
327 _min_desired_young_length = MAX2((uint) (NewSize / HeapRegion::GrainBytes),
328 1U);
329 if (FLAG_IS_CMDLINE(MaxNewSize)) {
330 _max_desired_young_length =
331 MAX2((uint) (MaxNewSize / HeapRegion::GrainBytes),
332 1U);
333 _sizer_kind = SizerMaxAndNewSize;
334 _adaptive_size = _min_desired_young_length == _max_desired_young_length;
335 } else {
336 _sizer_kind = SizerNewSizeOnly;
337 }
338 } else if (FLAG_IS_CMDLINE(MaxNewSize)) {
339 _max_desired_young_length =
340 MAX2((uint) (MaxNewSize / HeapRegion::GrainBytes),
341 1U);
342 _sizer_kind = SizerMaxNewSizeOnly;
343 }
344 }
346 uint G1YoungGenSizer::calculate_default_min_length(uint new_number_of_heap_regions) {
347 uint default_value = (new_number_of_heap_regions * G1DefaultMinNewGenPercent) / 100;
348 return MAX2(1U, default_value);
349 }
351 uint G1YoungGenSizer::calculate_default_max_length(uint new_number_of_heap_regions) {
352 uint default_value = (new_number_of_heap_regions * G1DefaultMaxNewGenPercent) / 100;
353 return MAX2(1U, default_value);
354 }
356 void G1YoungGenSizer::heap_size_changed(uint new_number_of_heap_regions) {
357 assert(new_number_of_heap_regions > 0, "Heap must be initialized");
359 switch (_sizer_kind) {
360 case SizerDefaults:
361 _min_desired_young_length = calculate_default_min_length(new_number_of_heap_regions);
362 _max_desired_young_length = calculate_default_max_length(new_number_of_heap_regions);
363 break;
364 case SizerNewSizeOnly:
365 _max_desired_young_length = calculate_default_max_length(new_number_of_heap_regions);
366 _max_desired_young_length = MAX2(_min_desired_young_length, _max_desired_young_length);
367 break;
368 case SizerMaxNewSizeOnly:
369 _min_desired_young_length = calculate_default_min_length(new_number_of_heap_regions);
370 _min_desired_young_length = MIN2(_min_desired_young_length, _max_desired_young_length);
371 break;
372 case SizerMaxAndNewSize:
373 // Do nothing. Values set on the command line, don't update them at runtime.
374 break;
375 case SizerNewRatio:
376 _min_desired_young_length = new_number_of_heap_regions / (NewRatio + 1);
377 _max_desired_young_length = _min_desired_young_length;
378 break;
379 default:
380 ShouldNotReachHere();
381 }
383 assert(_min_desired_young_length <= _max_desired_young_length, "Invalid min/max young gen size values");
384 }
386 void G1CollectorPolicy::init() {
387 // Set aside an initial future to_space.
388 _g1 = G1CollectedHeap::heap();
390 assert(Heap_lock->owned_by_self(), "Locking discipline.");
392 initialize_gc_policy_counters();
394 if (adaptive_young_list_length()) {
395 _young_list_fixed_length = 0;
396 } else {
397 _young_list_fixed_length = _young_gen_sizer->min_desired_young_length();
398 }
399 _free_regions_at_end_of_collection = _g1->free_regions();
400 update_young_list_target_length();
401 _prev_eden_capacity = _young_list_target_length * HeapRegion::GrainBytes;
403 // We may immediately start allocating regions and placing them on the
404 // collection set list. Initialize the per-collection set info
405 start_incremental_cset_building();
406 }
408 // Create the jstat counters for the policy.
409 void G1CollectorPolicy::initialize_gc_policy_counters() {
410 _gc_policy_counters = new GCPolicyCounters("GarbageFirst", 1, 3);
411 }
413 bool G1CollectorPolicy::predict_will_fit(uint young_length,
414 double base_time_ms,
415 uint base_free_regions,
416 double target_pause_time_ms) {
417 if (young_length >= base_free_regions) {
418 // end condition 1: not enough space for the young regions
419 return false;
420 }
422 double accum_surv_rate = accum_yg_surv_rate_pred((int) young_length - 1);
423 size_t bytes_to_copy =
424 (size_t) (accum_surv_rate * (double) HeapRegion::GrainBytes);
425 double copy_time_ms = predict_object_copy_time_ms(bytes_to_copy);
426 double young_other_time_ms = predict_young_other_time_ms(young_length);
427 double pause_time_ms = base_time_ms + copy_time_ms + young_other_time_ms;
428 if (pause_time_ms > target_pause_time_ms) {
429 // end condition 2: prediction is over the target pause time
430 return false;
431 }
433 size_t free_bytes =
434 (base_free_regions - young_length) * HeapRegion::GrainBytes;
435 if ((2.0 * sigma()) * (double) bytes_to_copy > (double) free_bytes) {
436 // end condition 3: out-of-space (conservatively!)
437 return false;
438 }
440 // success!
441 return true;
442 }
444 void G1CollectorPolicy::record_new_heap_size(uint new_number_of_regions) {
445 // re-calculate the necessary reserve
446 double reserve_regions_d = (double) new_number_of_regions * _reserve_factor;
447 // We use ceiling so that if reserve_regions_d is > 0.0 (but
448 // smaller than 1.0) we'll get 1.
449 _reserve_regions = (uint) ceil(reserve_regions_d);
451 _young_gen_sizer->heap_size_changed(new_number_of_regions);
452 }
454 uint G1CollectorPolicy::calculate_young_list_desired_min_length(
455 uint base_min_length) {
456 uint desired_min_length = 0;
457 if (adaptive_young_list_length()) {
458 if (_alloc_rate_ms_seq->num() > 3) {
459 double now_sec = os::elapsedTime();
460 double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0;
461 double alloc_rate_ms = predict_alloc_rate_ms();
462 desired_min_length = (uint) ceil(alloc_rate_ms * when_ms);
463 } else {
464 // otherwise we don't have enough info to make the prediction
465 }
466 }
467 desired_min_length += base_min_length;
468 // make sure we don't go below any user-defined minimum bound
469 return MAX2(_young_gen_sizer->min_desired_young_length(), desired_min_length);
470 }
472 uint G1CollectorPolicy::calculate_young_list_desired_max_length() {
473 // Here, we might want to also take into account any additional
474 // constraints (i.e., user-defined minimum bound). Currently, we
475 // effectively don't set this bound.
476 return _young_gen_sizer->max_desired_young_length();
477 }
479 void G1CollectorPolicy::update_young_list_target_length(size_t rs_lengths) {
480 if (rs_lengths == (size_t) -1) {
481 // if it's set to the default value (-1), we should predict it;
482 // otherwise, use the given value.
483 rs_lengths = (size_t) get_new_prediction(_rs_lengths_seq);
484 }
486 // Calculate the absolute and desired min bounds.
488 // This is how many young regions we already have (currently: the survivors).
489 uint base_min_length = recorded_survivor_regions();
490 // This is the absolute minimum young length, which ensures that we
491 // can allocate one eden region in the worst-case.
492 uint absolute_min_length = base_min_length + 1;
493 uint desired_min_length =
494 calculate_young_list_desired_min_length(base_min_length);
495 if (desired_min_length < absolute_min_length) {
496 desired_min_length = absolute_min_length;
497 }
499 // Calculate the absolute and desired max bounds.
501 // We will try our best not to "eat" into the reserve.
502 uint absolute_max_length = 0;
503 if (_free_regions_at_end_of_collection > _reserve_regions) {
504 absolute_max_length = _free_regions_at_end_of_collection - _reserve_regions;
505 }
506 uint desired_max_length = calculate_young_list_desired_max_length();
507 if (desired_max_length > absolute_max_length) {
508 desired_max_length = absolute_max_length;
509 }
511 uint young_list_target_length = 0;
512 if (adaptive_young_list_length()) {
513 if (gcs_are_young()) {
514 young_list_target_length =
515 calculate_young_list_target_length(rs_lengths,
516 base_min_length,
517 desired_min_length,
518 desired_max_length);
519 _rs_lengths_prediction = rs_lengths;
520 } else {
521 // Don't calculate anything and let the code below bound it to
522 // the desired_min_length, i.e., do the next GC as soon as
523 // possible to maximize how many old regions we can add to it.
524 }
525 } else {
526 // The user asked for a fixed young gen so we'll fix the young gen
527 // whether the next GC is young or mixed.
528 young_list_target_length = _young_list_fixed_length;
529 }
531 // Make sure we don't go over the desired max length, nor under the
532 // desired min length. In case they clash, desired_min_length wins
533 // which is why that test is second.
534 if (young_list_target_length > desired_max_length) {
535 young_list_target_length = desired_max_length;
536 }
537 if (young_list_target_length < desired_min_length) {
538 young_list_target_length = desired_min_length;
539 }
541 assert(young_list_target_length > recorded_survivor_regions(),
542 "we should be able to allocate at least one eden region");
543 assert(young_list_target_length >= absolute_min_length, "post-condition");
544 _young_list_target_length = young_list_target_length;
546 update_max_gc_locker_expansion();
547 }
549 uint
550 G1CollectorPolicy::calculate_young_list_target_length(size_t rs_lengths,
551 uint base_min_length,
552 uint desired_min_length,
553 uint desired_max_length) {
554 assert(adaptive_young_list_length(), "pre-condition");
555 assert(gcs_are_young(), "only call this for young GCs");
557 // In case some edge-condition makes the desired max length too small...
558 if (desired_max_length <= desired_min_length) {
559 return desired_min_length;
560 }
562 // We'll adjust min_young_length and max_young_length not to include
563 // the already allocated young regions (i.e., so they reflect the
564 // min and max eden regions we'll allocate). The base_min_length
565 // will be reflected in the predictions by the
566 // survivor_regions_evac_time prediction.
567 assert(desired_min_length > base_min_length, "invariant");
568 uint min_young_length = desired_min_length - base_min_length;
569 assert(desired_max_length > base_min_length, "invariant");
570 uint max_young_length = desired_max_length - base_min_length;
572 double target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0;
573 double survivor_regions_evac_time = predict_survivor_regions_evac_time();
574 size_t pending_cards = (size_t) get_new_prediction(_pending_cards_seq);
575 size_t adj_rs_lengths = rs_lengths + predict_rs_length_diff();
576 size_t scanned_cards = predict_young_card_num(adj_rs_lengths);
577 double base_time_ms =
578 predict_base_elapsed_time_ms(pending_cards, scanned_cards) +
579 survivor_regions_evac_time;
580 uint available_free_regions = _free_regions_at_end_of_collection;
581 uint base_free_regions = 0;
582 if (available_free_regions > _reserve_regions) {
583 base_free_regions = available_free_regions - _reserve_regions;
584 }
586 // Here, we will make sure that the shortest young length that
587 // makes sense fits within the target pause time.
589 if (predict_will_fit(min_young_length, base_time_ms,
590 base_free_regions, target_pause_time_ms)) {
591 // The shortest young length will fit into the target pause time;
592 // we'll now check whether the absolute maximum number of young
593 // regions will fit in the target pause time. If not, we'll do
594 // a binary search between min_young_length and max_young_length.
595 if (predict_will_fit(max_young_length, base_time_ms,
596 base_free_regions, target_pause_time_ms)) {
597 // The maximum young length will fit into the target pause time.
598 // We are done so set min young length to the maximum length (as
599 // the result is assumed to be returned in min_young_length).
600 min_young_length = max_young_length;
601 } else {
602 // The maximum possible number of young regions will not fit within
603 // the target pause time so we'll search for the optimal
604 // length. The loop invariants are:
605 //
606 // min_young_length < max_young_length
607 // min_young_length is known to fit into the target pause time
608 // max_young_length is known not to fit into the target pause time
609 //
610 // Going into the loop we know the above hold as we've just
611 // checked them. Every time around the loop we check whether
612 // the middle value between min_young_length and
613 // max_young_length fits into the target pause time. If it
614 // does, it becomes the new min. If it doesn't, it becomes
615 // the new max. This way we maintain the loop invariants.
617 assert(min_young_length < max_young_length, "invariant");
618 uint diff = (max_young_length - min_young_length) / 2;
619 while (diff > 0) {
620 uint young_length = min_young_length + diff;
621 if (predict_will_fit(young_length, base_time_ms,
622 base_free_regions, target_pause_time_ms)) {
623 min_young_length = young_length;
624 } else {
625 max_young_length = young_length;
626 }
627 assert(min_young_length < max_young_length, "invariant");
628 diff = (max_young_length - min_young_length) / 2;
629 }
630 // The results is min_young_length which, according to the
631 // loop invariants, should fit within the target pause time.
633 // These are the post-conditions of the binary search above:
634 assert(min_young_length < max_young_length,
635 "otherwise we should have discovered that max_young_length "
636 "fits into the pause target and not done the binary search");
637 assert(predict_will_fit(min_young_length, base_time_ms,
638 base_free_regions, target_pause_time_ms),
639 "min_young_length, the result of the binary search, should "
640 "fit into the pause target");
641 assert(!predict_will_fit(min_young_length + 1, base_time_ms,
642 base_free_regions, target_pause_time_ms),
643 "min_young_length, the result of the binary search, should be "
644 "optimal, so no larger length should fit into the pause target");
645 }
646 } else {
647 // Even the minimum length doesn't fit into the pause time
648 // target, return it as the result nevertheless.
649 }
650 return base_min_length + min_young_length;
651 }
653 double G1CollectorPolicy::predict_survivor_regions_evac_time() {
654 double survivor_regions_evac_time = 0.0;
655 for (HeapRegion * r = _recorded_survivor_head;
656 r != NULL && r != _recorded_survivor_tail->get_next_young_region();
657 r = r->get_next_young_region()) {
658 survivor_regions_evac_time += predict_region_elapsed_time_ms(r, gcs_are_young());
659 }
660 return survivor_regions_evac_time;
661 }
663 void G1CollectorPolicy::revise_young_list_target_length_if_necessary() {
664 guarantee( adaptive_young_list_length(), "should not call this otherwise" );
666 size_t rs_lengths = _g1->young_list()->sampled_rs_lengths();
667 if (rs_lengths > _rs_lengths_prediction) {
668 // add 10% to avoid having to recalculate often
669 size_t rs_lengths_prediction = rs_lengths * 1100 / 1000;
670 update_young_list_target_length(rs_lengths_prediction);
671 }
672 }
676 HeapWord* G1CollectorPolicy::mem_allocate_work(size_t size,
677 bool is_tlab,
678 bool* gc_overhead_limit_was_exceeded) {
679 guarantee(false, "Not using this policy feature yet.");
680 return NULL;
681 }
683 // This method controls how a collector handles one or more
684 // of its generations being fully allocated.
685 HeapWord* G1CollectorPolicy::satisfy_failed_allocation(size_t size,
686 bool is_tlab) {
687 guarantee(false, "Not using this policy feature yet.");
688 return NULL;
689 }
692 #ifndef PRODUCT
693 bool G1CollectorPolicy::verify_young_ages() {
694 HeapRegion* head = _g1->young_list()->first_region();
695 return
696 verify_young_ages(head, _short_lived_surv_rate_group);
697 // also call verify_young_ages on any additional surv rate groups
698 }
700 bool
701 G1CollectorPolicy::verify_young_ages(HeapRegion* head,
702 SurvRateGroup *surv_rate_group) {
703 guarantee( surv_rate_group != NULL, "pre-condition" );
705 const char* name = surv_rate_group->name();
706 bool ret = true;
707 int prev_age = -1;
709 for (HeapRegion* curr = head;
710 curr != NULL;
711 curr = curr->get_next_young_region()) {
712 SurvRateGroup* group = curr->surv_rate_group();
713 if (group == NULL && !curr->is_survivor()) {
714 gclog_or_tty->print_cr("## %s: encountered NULL surv_rate_group", name);
715 ret = false;
716 }
718 if (surv_rate_group == group) {
719 int age = curr->age_in_surv_rate_group();
721 if (age < 0) {
722 gclog_or_tty->print_cr("## %s: encountered negative age", name);
723 ret = false;
724 }
726 if (age <= prev_age) {
727 gclog_or_tty->print_cr("## %s: region ages are not strictly increasing "
728 "(%d, %d)", name, age, prev_age);
729 ret = false;
730 }
731 prev_age = age;
732 }
733 }
735 return ret;
736 }
737 #endif // PRODUCT
739 void G1CollectorPolicy::record_full_collection_start() {
740 _full_collection_start_sec = os::elapsedTime();
741 // Release the future to-space so that it is available for compaction into.
742 _g1->set_full_collection();
743 }
745 void G1CollectorPolicy::record_full_collection_end() {
746 // Consider this like a collection pause for the purposes of allocation
747 // since last pause.
748 double end_sec = os::elapsedTime();
749 double full_gc_time_sec = end_sec - _full_collection_start_sec;
750 double full_gc_time_ms = full_gc_time_sec * 1000.0;
752 _trace_gen1_time_data.record_full_collection(full_gc_time_ms);
754 update_recent_gc_times(end_sec, full_gc_time_ms);
756 _g1->clear_full_collection();
758 // "Nuke" the heuristics that control the young/mixed GC
759 // transitions and make sure we start with young GCs after the Full GC.
760 set_gcs_are_young(true);
761 _last_young_gc = false;
762 clear_initiate_conc_mark_if_possible();
763 clear_during_initial_mark_pause();
764 _in_marking_window = false;
765 _in_marking_window_im = false;
767 _short_lived_surv_rate_group->start_adding_regions();
768 // also call this on any additional surv rate groups
770 record_survivor_regions(0, NULL, NULL);
772 _free_regions_at_end_of_collection = _g1->free_regions();
773 // Reset survivors SurvRateGroup.
774 _survivor_surv_rate_group->reset();
775 update_young_list_target_length();
776 _collectionSetChooser->clear();
777 }
779 void G1CollectorPolicy::record_stop_world_start() {
780 _stop_world_start = os::elapsedTime();
781 }
783 void G1CollectorPolicy::record_collection_pause_start(double start_time_sec,
784 size_t start_used) {
785 // We only need to do this here as the policy will only be applied
786 // to the GC we're about to start. so, no point is calculating this
787 // every time we calculate / recalculate the target young length.
788 update_survivors_policy();
790 assert(_g1->used() == _g1->recalculate_used(),
791 err_msg("sanity, used: "SIZE_FORMAT" recalculate_used: "SIZE_FORMAT,
792 _g1->used(), _g1->recalculate_used()));
794 double s_w_t_ms = (start_time_sec - _stop_world_start) * 1000.0;
795 _trace_gen0_time_data.record_start_collection(s_w_t_ms);
796 _stop_world_start = 0.0;
798 phase_times()->record_cur_collection_start_sec(start_time_sec);
799 _cur_collection_pause_used_at_start_bytes = start_used;
800 _cur_collection_pause_used_regions_at_start = _g1->used_regions();
801 _pending_cards = _g1->pending_card_num();
803 _collection_set_bytes_used_before = 0;
804 _bytes_copied_during_gc = 0;
806 YoungList* young_list = _g1->young_list();
807 _eden_bytes_before_gc = young_list->eden_used_bytes();
808 _survivor_bytes_before_gc = young_list->survivor_used_bytes();
809 _capacity_before_gc = _g1->capacity();
811 _last_gc_was_young = false;
813 // do that for any other surv rate groups
814 _short_lived_surv_rate_group->stop_adding_regions();
815 _survivors_age_table.clear();
817 assert( verify_young_ages(), "region age verification" );
818 }
820 void G1CollectorPolicy::record_concurrent_mark_init_end(double
821 mark_init_elapsed_time_ms) {
822 _during_marking = true;
823 assert(!initiate_conc_mark_if_possible(), "we should have cleared it by now");
824 clear_during_initial_mark_pause();
825 _cur_mark_stop_world_time_ms = mark_init_elapsed_time_ms;
826 }
828 void G1CollectorPolicy::record_concurrent_mark_remark_start() {
829 _mark_remark_start_sec = os::elapsedTime();
830 _during_marking = false;
831 }
833 void G1CollectorPolicy::record_concurrent_mark_remark_end() {
834 double end_time_sec = os::elapsedTime();
835 double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0;
836 _concurrent_mark_remark_times_ms->add(elapsed_time_ms);
837 _cur_mark_stop_world_time_ms += elapsed_time_ms;
838 _prev_collection_pause_end_ms += elapsed_time_ms;
840 _mmu_tracker->add_pause(_mark_remark_start_sec, end_time_sec, true);
841 }
843 void G1CollectorPolicy::record_concurrent_mark_cleanup_start() {
844 _mark_cleanup_start_sec = os::elapsedTime();
845 }
847 void G1CollectorPolicy::record_concurrent_mark_cleanup_completed() {
848 _last_young_gc = true;
849 _in_marking_window = false;
850 }
852 void G1CollectorPolicy::record_concurrent_pause() {
853 if (_stop_world_start > 0.0) {
854 double yield_ms = (os::elapsedTime() - _stop_world_start) * 1000.0;
855 _trace_gen0_time_data.record_yield_time(yield_ms);
856 }
857 }
859 bool G1CollectorPolicy::need_to_start_conc_mark(const char* source, size_t alloc_word_size) {
860 if (_g1->concurrent_mark()->cmThread()->during_cycle()) {
861 return false;
862 }
864 size_t marking_initiating_used_threshold =
865 (_g1->capacity() / 100) * InitiatingHeapOccupancyPercent;
866 size_t cur_used_bytes = _g1->non_young_capacity_bytes();
867 size_t alloc_byte_size = alloc_word_size * HeapWordSize;
869 if ((cur_used_bytes + alloc_byte_size) > marking_initiating_used_threshold) {
870 if (gcs_are_young()) {
871 ergo_verbose5(ErgoConcCycles,
872 "request concurrent cycle initiation",
873 ergo_format_reason("occupancy higher than threshold")
874 ergo_format_byte("occupancy")
875 ergo_format_byte("allocation request")
876 ergo_format_byte_perc("threshold")
877 ergo_format_str("source"),
878 cur_used_bytes,
879 alloc_byte_size,
880 marking_initiating_used_threshold,
881 (double) InitiatingHeapOccupancyPercent,
882 source);
883 return true;
884 } else {
885 ergo_verbose5(ErgoConcCycles,
886 "do not request concurrent cycle initiation",
887 ergo_format_reason("still doing mixed collections")
888 ergo_format_byte("occupancy")
889 ergo_format_byte("allocation request")
890 ergo_format_byte_perc("threshold")
891 ergo_format_str("source"),
892 cur_used_bytes,
893 alloc_byte_size,
894 marking_initiating_used_threshold,
895 (double) InitiatingHeapOccupancyPercent,
896 source);
897 }
898 }
900 return false;
901 }
903 // Anything below that is considered to be zero
904 #define MIN_TIMER_GRANULARITY 0.0000001
906 void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms) {
907 double end_time_sec = os::elapsedTime();
908 assert(_cur_collection_pause_used_regions_at_start >= cset_region_length(),
909 "otherwise, the subtraction below does not make sense");
910 size_t rs_size =
911 _cur_collection_pause_used_regions_at_start - cset_region_length();
912 size_t cur_used_bytes = _g1->used();
913 assert(cur_used_bytes == _g1->recalculate_used(), "It should!");
914 bool last_pause_included_initial_mark = false;
915 bool update_stats = !_g1->evacuation_failed();
917 #ifndef PRODUCT
918 if (G1YoungSurvRateVerbose) {
919 gclog_or_tty->print_cr("");
920 _short_lived_surv_rate_group->print();
921 // do that for any other surv rate groups too
922 }
923 #endif // PRODUCT
925 last_pause_included_initial_mark = during_initial_mark_pause();
926 if (last_pause_included_initial_mark) {
927 record_concurrent_mark_init_end(0.0);
928 } else if (!_last_young_gc && need_to_start_conc_mark("end of GC")) {
929 // Note: this might have already been set, if during the last
930 // pause we decided to start a cycle but at the beginning of
931 // this pause we decided to postpone it. That's OK.
932 set_initiate_conc_mark_if_possible();
933 }
935 _mmu_tracker->add_pause(end_time_sec - pause_time_ms/1000.0,
936 end_time_sec, false);
938 size_t freed_bytes =
939 _cur_collection_pause_used_at_start_bytes - cur_used_bytes;
940 size_t surviving_bytes = _collection_set_bytes_used_before - freed_bytes;
942 double survival_fraction =
943 (double)surviving_bytes/
944 (double)_collection_set_bytes_used_before;
946 if (update_stats) {
947 _trace_gen0_time_data.record_end_collection(pause_time_ms, phase_times());
948 // this is where we update the allocation rate of the application
949 double app_time_ms =
950 (phase_times()->cur_collection_start_sec() * 1000.0 - _prev_collection_pause_end_ms);
951 if (app_time_ms < MIN_TIMER_GRANULARITY) {
952 // This usually happens due to the timer not having the required
953 // granularity. Some Linuxes are the usual culprits.
954 // We'll just set it to something (arbitrarily) small.
955 app_time_ms = 1.0;
956 }
957 // We maintain the invariant that all objects allocated by mutator
958 // threads will be allocated out of eden regions. So, we can use
959 // the eden region number allocated since the previous GC to
960 // calculate the application's allocate rate. The only exception
961 // to that is humongous objects that are allocated separately. But
962 // given that humongous object allocations do not really affect
963 // either the pause's duration nor when the next pause will take
964 // place we can safely ignore them here.
965 uint regions_allocated = eden_cset_region_length();
966 double alloc_rate_ms = (double) regions_allocated / app_time_ms;
967 _alloc_rate_ms_seq->add(alloc_rate_ms);
969 double interval_ms =
970 (end_time_sec - _recent_prev_end_times_for_all_gcs_sec->oldest()) * 1000.0;
971 update_recent_gc_times(end_time_sec, pause_time_ms);
972 _recent_avg_pause_time_ratio = _recent_gc_times_ms->sum()/interval_ms;
973 if (recent_avg_pause_time_ratio() < 0.0 ||
974 (recent_avg_pause_time_ratio() - 1.0 > 0.0)) {
975 #ifndef PRODUCT
976 // Dump info to allow post-facto debugging
977 gclog_or_tty->print_cr("recent_avg_pause_time_ratio() out of bounds");
978 gclog_or_tty->print_cr("-------------------------------------------");
979 gclog_or_tty->print_cr("Recent GC Times (ms):");
980 _recent_gc_times_ms->dump();
981 gclog_or_tty->print_cr("(End Time=%3.3f) Recent GC End Times (s):", end_time_sec);
982 _recent_prev_end_times_for_all_gcs_sec->dump();
983 gclog_or_tty->print_cr("GC = %3.3f, Interval = %3.3f, Ratio = %3.3f",
984 _recent_gc_times_ms->sum(), interval_ms, recent_avg_pause_time_ratio());
985 // In debug mode, terminate the JVM if the user wants to debug at this point.
986 assert(!G1FailOnFPError, "Debugging data for CR 6898948 has been dumped above");
987 #endif // !PRODUCT
988 // Clip ratio between 0.0 and 1.0, and continue. This will be fixed in
989 // CR 6902692 by redoing the manner in which the ratio is incrementally computed.
990 if (_recent_avg_pause_time_ratio < 0.0) {
991 _recent_avg_pause_time_ratio = 0.0;
992 } else {
993 assert(_recent_avg_pause_time_ratio - 1.0 > 0.0, "Ctl-point invariant");
994 _recent_avg_pause_time_ratio = 1.0;
995 }
996 }
997 }
998 bool new_in_marking_window = _in_marking_window;
999 bool new_in_marking_window_im = false;
1000 if (during_initial_mark_pause()) {
1001 new_in_marking_window = true;
1002 new_in_marking_window_im = true;
1003 }
1005 if (_last_young_gc) {
1006 // This is supposed to to be the "last young GC" before we start
1007 // doing mixed GCs. Here we decide whether to start mixed GCs or not.
1009 if (!last_pause_included_initial_mark) {
1010 if (next_gc_should_be_mixed("start mixed GCs",
1011 "do not start mixed GCs")) {
1012 set_gcs_are_young(false);
1013 }
1014 } else {
1015 ergo_verbose0(ErgoMixedGCs,
1016 "do not start mixed GCs",
1017 ergo_format_reason("concurrent cycle is about to start"));
1018 }
1019 _last_young_gc = false;
1020 }
1022 if (!_last_gc_was_young) {
1023 // This is a mixed GC. Here we decide whether to continue doing
1024 // mixed GCs or not.
1026 if (!next_gc_should_be_mixed("continue mixed GCs",
1027 "do not continue mixed GCs")) {
1028 set_gcs_are_young(true);
1029 }
1030 }
1032 _short_lived_surv_rate_group->start_adding_regions();
1033 // do that for any other surv rate groupsx
1035 if (update_stats) {
1036 double cost_per_card_ms = 0.0;
1037 if (_pending_cards > 0) {
1038 cost_per_card_ms = phase_times()->average_last_update_rs_time() / (double) _pending_cards;
1039 _cost_per_card_ms_seq->add(cost_per_card_ms);
1040 }
1042 size_t cards_scanned = _g1->cards_scanned();
1044 double cost_per_entry_ms = 0.0;
1045 if (cards_scanned > 10) {
1046 cost_per_entry_ms = phase_times()->average_last_scan_rs_time() / (double) cards_scanned;
1047 if (_last_gc_was_young) {
1048 _cost_per_entry_ms_seq->add(cost_per_entry_ms);
1049 } else {
1050 _mixed_cost_per_entry_ms_seq->add(cost_per_entry_ms);
1051 }
1052 }
1054 if (_max_rs_lengths > 0) {
1055 double cards_per_entry_ratio =
1056 (double) cards_scanned / (double) _max_rs_lengths;
1057 if (_last_gc_was_young) {
1058 _young_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
1059 } else {
1060 _mixed_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
1061 }
1062 }
1064 // This is defensive. For a while _max_rs_lengths could get
1065 // smaller than _recorded_rs_lengths which was causing
1066 // rs_length_diff to get very large and mess up the RSet length
1067 // predictions. The reason was unsafe concurrent updates to the
1068 // _inc_cset_recorded_rs_lengths field which the code below guards
1069 // against (see CR 7118202). This bug has now been fixed (see CR
1070 // 7119027). However, I'm still worried that
1071 // _inc_cset_recorded_rs_lengths might still end up somewhat
1072 // inaccurate. The concurrent refinement thread calculates an
1073 // RSet's length concurrently with other CR threads updating it
1074 // which might cause it to calculate the length incorrectly (if,
1075 // say, it's in mid-coarsening). So I'll leave in the defensive
1076 // conditional below just in case.
1077 size_t rs_length_diff = 0;
1078 if (_max_rs_lengths > _recorded_rs_lengths) {
1079 rs_length_diff = _max_rs_lengths - _recorded_rs_lengths;
1080 }
1081 _rs_length_diff_seq->add((double) rs_length_diff);
1083 size_t copied_bytes = surviving_bytes;
1084 double cost_per_byte_ms = 0.0;
1085 if (copied_bytes > 0) {
1086 cost_per_byte_ms = phase_times()->average_last_obj_copy_time() / (double) copied_bytes;
1087 if (_in_marking_window) {
1088 _cost_per_byte_ms_during_cm_seq->add(cost_per_byte_ms);
1089 } else {
1090 _cost_per_byte_ms_seq->add(cost_per_byte_ms);
1091 }
1092 }
1094 double all_other_time_ms = pause_time_ms -
1095 (phase_times()->average_last_update_rs_time() + phase_times()->average_last_scan_rs_time()
1096 + phase_times()->average_last_obj_copy_time() + phase_times()->average_last_termination_time());
1098 double young_other_time_ms = 0.0;
1099 if (young_cset_region_length() > 0) {
1100 young_other_time_ms =
1101 phase_times()->young_cset_choice_time_ms() +
1102 phase_times()->young_free_cset_time_ms();
1103 _young_other_cost_per_region_ms_seq->add(young_other_time_ms /
1104 (double) young_cset_region_length());
1105 }
1106 double non_young_other_time_ms = 0.0;
1107 if (old_cset_region_length() > 0) {
1108 non_young_other_time_ms =
1109 phase_times()->non_young_cset_choice_time_ms() +
1110 phase_times()->non_young_free_cset_time_ms();
1112 _non_young_other_cost_per_region_ms_seq->add(non_young_other_time_ms /
1113 (double) old_cset_region_length());
1114 }
1116 double constant_other_time_ms = all_other_time_ms -
1117 (young_other_time_ms + non_young_other_time_ms);
1118 _constant_other_time_ms_seq->add(constant_other_time_ms);
1120 double survival_ratio = 0.0;
1121 if (_collection_set_bytes_used_before > 0) {
1122 survival_ratio = (double) _bytes_copied_during_gc /
1123 (double) _collection_set_bytes_used_before;
1124 }
1126 _pending_cards_seq->add((double) _pending_cards);
1127 _rs_lengths_seq->add((double) _max_rs_lengths);
1128 }
1130 _in_marking_window = new_in_marking_window;
1131 _in_marking_window_im = new_in_marking_window_im;
1132 _free_regions_at_end_of_collection = _g1->free_regions();
1133 update_young_list_target_length();
1135 // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
1136 double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
1137 adjust_concurrent_refinement(phase_times()->average_last_update_rs_time(),
1138 phase_times()->sum_last_update_rs_processed_buffers(), update_rs_time_goal_ms);
1140 _collectionSetChooser->verify();
1141 }
1143 #define EXT_SIZE_FORMAT "%.1f%s"
1144 #define EXT_SIZE_PARAMS(bytes) \
1145 byte_size_in_proper_unit((double)(bytes)), \
1146 proper_unit_for_byte_size((bytes))
1148 void G1CollectorPolicy::print_heap_transition() {
1149 _g1->print_size_transition(gclog_or_tty,
1150 _cur_collection_pause_used_at_start_bytes, _g1->used(), _g1->capacity());
1151 }
1153 void G1CollectorPolicy::print_detailed_heap_transition() {
1154 YoungList* young_list = _g1->young_list();
1155 size_t eden_bytes = young_list->eden_used_bytes();
1156 size_t survivor_bytes = young_list->survivor_used_bytes();
1157 size_t used_before_gc = _cur_collection_pause_used_at_start_bytes;
1158 size_t used = _g1->used();
1159 size_t capacity = _g1->capacity();
1160 size_t eden_capacity =
1161 (_young_list_target_length * HeapRegion::GrainBytes) - survivor_bytes;
1163 gclog_or_tty->print_cr(
1164 " [Eden: "EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")->"EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT") "
1165 "Survivors: "EXT_SIZE_FORMAT"->"EXT_SIZE_FORMAT" "
1166 "Heap: "EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")->"
1167 EXT_SIZE_FORMAT"("EXT_SIZE_FORMAT")]",
1168 EXT_SIZE_PARAMS(_eden_bytes_before_gc),
1169 EXT_SIZE_PARAMS(_prev_eden_capacity),
1170 EXT_SIZE_PARAMS(eden_bytes),
1171 EXT_SIZE_PARAMS(eden_capacity),
1172 EXT_SIZE_PARAMS(_survivor_bytes_before_gc),
1173 EXT_SIZE_PARAMS(survivor_bytes),
1174 EXT_SIZE_PARAMS(used_before_gc),
1175 EXT_SIZE_PARAMS(_capacity_before_gc),
1176 EXT_SIZE_PARAMS(used),
1177 EXT_SIZE_PARAMS(capacity));
1179 _prev_eden_capacity = eden_capacity;
1180 }
1182 void G1CollectorPolicy::adjust_concurrent_refinement(double update_rs_time,
1183 double update_rs_processed_buffers,
1184 double goal_ms) {
1185 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
1186 ConcurrentG1Refine *cg1r = G1CollectedHeap::heap()->concurrent_g1_refine();
1188 if (G1UseAdaptiveConcRefinement) {
1189 const int k_gy = 3, k_gr = 6;
1190 const double inc_k = 1.1, dec_k = 0.9;
1192 int g = cg1r->green_zone();
1193 if (update_rs_time > goal_ms) {
1194 g = (int)(g * dec_k); // Can become 0, that's OK. That would mean a mutator-only processing.
1195 } else {
1196 if (update_rs_time < goal_ms && update_rs_processed_buffers > g) {
1197 g = (int)MAX2(g * inc_k, g + 1.0);
1198 }
1199 }
1200 // Change the refinement threads params
1201 cg1r->set_green_zone(g);
1202 cg1r->set_yellow_zone(g * k_gy);
1203 cg1r->set_red_zone(g * k_gr);
1204 cg1r->reinitialize_threads();
1206 int processing_threshold_delta = MAX2((int)(cg1r->green_zone() * sigma()), 1);
1207 int processing_threshold = MIN2(cg1r->green_zone() + processing_threshold_delta,
1208 cg1r->yellow_zone());
1209 // Change the barrier params
1210 dcqs.set_process_completed_threshold(processing_threshold);
1211 dcqs.set_max_completed_queue(cg1r->red_zone());
1212 }
1214 int curr_queue_size = dcqs.completed_buffers_num();
1215 if (curr_queue_size >= cg1r->yellow_zone()) {
1216 dcqs.set_completed_queue_padding(curr_queue_size);
1217 } else {
1218 dcqs.set_completed_queue_padding(0);
1219 }
1220 dcqs.notify_if_necessary();
1221 }
1223 double
1224 G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards,
1225 size_t scanned_cards) {
1226 return
1227 predict_rs_update_time_ms(pending_cards) +
1228 predict_rs_scan_time_ms(scanned_cards) +
1229 predict_constant_other_time_ms();
1230 }
1232 double
1233 G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards) {
1234 size_t rs_length = predict_rs_length_diff();
1235 size_t card_num;
1236 if (gcs_are_young()) {
1237 card_num = predict_young_card_num(rs_length);
1238 } else {
1239 card_num = predict_non_young_card_num(rs_length);
1240 }
1241 return predict_base_elapsed_time_ms(pending_cards, card_num);
1242 }
1244 size_t G1CollectorPolicy::predict_bytes_to_copy(HeapRegion* hr) {
1245 size_t bytes_to_copy;
1246 if (hr->is_marked())
1247 bytes_to_copy = hr->max_live_bytes();
1248 else {
1249 assert(hr->is_young() && hr->age_in_surv_rate_group() != -1, "invariant");
1250 int age = hr->age_in_surv_rate_group();
1251 double yg_surv_rate = predict_yg_surv_rate(age, hr->surv_rate_group());
1252 bytes_to_copy = (size_t) ((double) hr->used() * yg_surv_rate);
1253 }
1254 return bytes_to_copy;
1255 }
1257 double
1258 G1CollectorPolicy::predict_region_elapsed_time_ms(HeapRegion* hr,
1259 bool for_young_gc) {
1260 size_t rs_length = hr->rem_set()->occupied();
1261 size_t card_num;
1263 // Predicting the number of cards is based on which type of GC
1264 // we're predicting for.
1265 if (for_young_gc) {
1266 card_num = predict_young_card_num(rs_length);
1267 } else {
1268 card_num = predict_non_young_card_num(rs_length);
1269 }
1270 size_t bytes_to_copy = predict_bytes_to_copy(hr);
1272 double region_elapsed_time_ms =
1273 predict_rs_scan_time_ms(card_num) +
1274 predict_object_copy_time_ms(bytes_to_copy);
1276 // The prediction of the "other" time for this region is based
1277 // upon the region type and NOT the GC type.
1278 if (hr->is_young()) {
1279 region_elapsed_time_ms += predict_young_other_time_ms(1);
1280 } else {
1281 region_elapsed_time_ms += predict_non_young_other_time_ms(1);
1282 }
1283 return region_elapsed_time_ms;
1284 }
1286 void
1287 G1CollectorPolicy::init_cset_region_lengths(uint eden_cset_region_length,
1288 uint survivor_cset_region_length) {
1289 _eden_cset_region_length = eden_cset_region_length;
1290 _survivor_cset_region_length = survivor_cset_region_length;
1291 _old_cset_region_length = 0;
1292 }
1294 void G1CollectorPolicy::set_recorded_rs_lengths(size_t rs_lengths) {
1295 _recorded_rs_lengths = rs_lengths;
1296 }
1298 void G1CollectorPolicy::update_recent_gc_times(double end_time_sec,
1299 double elapsed_ms) {
1300 _recent_gc_times_ms->add(elapsed_ms);
1301 _recent_prev_end_times_for_all_gcs_sec->add(end_time_sec);
1302 _prev_collection_pause_end_ms = end_time_sec * 1000.0;
1303 }
1305 size_t G1CollectorPolicy::expansion_amount() {
1306 double recent_gc_overhead = recent_avg_pause_time_ratio() * 100.0;
1307 double threshold = _gc_overhead_perc;
1308 if (recent_gc_overhead > threshold) {
1309 // We will double the existing space, or take
1310 // G1ExpandByPercentOfAvailable % of the available expansion
1311 // space, whichever is smaller, bounded below by a minimum
1312 // expansion (unless that's all that's left.)
1313 const size_t min_expand_bytes = 1*M;
1314 size_t reserved_bytes = _g1->max_capacity();
1315 size_t committed_bytes = _g1->capacity();
1316 size_t uncommitted_bytes = reserved_bytes - committed_bytes;
1317 size_t expand_bytes;
1318 size_t expand_bytes_via_pct =
1319 uncommitted_bytes * G1ExpandByPercentOfAvailable / 100;
1320 expand_bytes = MIN2(expand_bytes_via_pct, committed_bytes);
1321 expand_bytes = MAX2(expand_bytes, min_expand_bytes);
1322 expand_bytes = MIN2(expand_bytes, uncommitted_bytes);
1324 ergo_verbose5(ErgoHeapSizing,
1325 "attempt heap expansion",
1326 ergo_format_reason("recent GC overhead higher than "
1327 "threshold after GC")
1328 ergo_format_perc("recent GC overhead")
1329 ergo_format_perc("threshold")
1330 ergo_format_byte("uncommitted")
1331 ergo_format_byte_perc("calculated expansion amount"),
1332 recent_gc_overhead, threshold,
1333 uncommitted_bytes,
1334 expand_bytes_via_pct, (double) G1ExpandByPercentOfAvailable);
1336 return expand_bytes;
1337 } else {
1338 return 0;
1339 }
1340 }
1342 void G1CollectorPolicy::print_tracing_info() const {
1343 _trace_gen0_time_data.print();
1344 _trace_gen1_time_data.print();
1345 }
1347 void G1CollectorPolicy::print_yg_surv_rate_info() const {
1348 #ifndef PRODUCT
1349 _short_lived_surv_rate_group->print_surv_rate_summary();
1350 // add this call for any other surv rate groups
1351 #endif // PRODUCT
1352 }
1354 #ifndef PRODUCT
1355 // for debugging, bit of a hack...
1356 static char*
1357 region_num_to_mbs(int length) {
1358 static char buffer[64];
1359 double bytes = (double) (length * HeapRegion::GrainBytes);
1360 double mbs = bytes / (double) (1024 * 1024);
1361 sprintf(buffer, "%7.2lfMB", mbs);
1362 return buffer;
1363 }
1364 #endif // PRODUCT
1366 uint G1CollectorPolicy::max_regions(int purpose) {
1367 switch (purpose) {
1368 case GCAllocForSurvived:
1369 return _max_survivor_regions;
1370 case GCAllocForTenured:
1371 return REGIONS_UNLIMITED;
1372 default:
1373 ShouldNotReachHere();
1374 return REGIONS_UNLIMITED;
1375 };
1376 }
1378 void G1CollectorPolicy::update_max_gc_locker_expansion() {
1379 uint expansion_region_num = 0;
1380 if (GCLockerEdenExpansionPercent > 0) {
1381 double perc = (double) GCLockerEdenExpansionPercent / 100.0;
1382 double expansion_region_num_d = perc * (double) _young_list_target_length;
1383 // We use ceiling so that if expansion_region_num_d is > 0.0 (but
1384 // less than 1.0) we'll get 1.
1385 expansion_region_num = (uint) ceil(expansion_region_num_d);
1386 } else {
1387 assert(expansion_region_num == 0, "sanity");
1388 }
1389 _young_list_max_length = _young_list_target_length + expansion_region_num;
1390 assert(_young_list_target_length <= _young_list_max_length, "post-condition");
1391 }
1393 // Calculates survivor space parameters.
1394 void G1CollectorPolicy::update_survivors_policy() {
1395 double max_survivor_regions_d =
1396 (double) _young_list_target_length / (double) SurvivorRatio;
1397 // We use ceiling so that if max_survivor_regions_d is > 0.0 (but
1398 // smaller than 1.0) we'll get 1.
1399 _max_survivor_regions = (uint) ceil(max_survivor_regions_d);
1401 _tenuring_threshold = _survivors_age_table.compute_tenuring_threshold(
1402 HeapRegion::GrainWords * _max_survivor_regions);
1403 }
1405 bool G1CollectorPolicy::force_initial_mark_if_outside_cycle(
1406 GCCause::Cause gc_cause) {
1407 bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();
1408 if (!during_cycle) {
1409 ergo_verbose1(ErgoConcCycles,
1410 "request concurrent cycle initiation",
1411 ergo_format_reason("requested by GC cause")
1412 ergo_format_str("GC cause"),
1413 GCCause::to_string(gc_cause));
1414 set_initiate_conc_mark_if_possible();
1415 return true;
1416 } else {
1417 ergo_verbose1(ErgoConcCycles,
1418 "do not request concurrent cycle initiation",
1419 ergo_format_reason("concurrent cycle already in progress")
1420 ergo_format_str("GC cause"),
1421 GCCause::to_string(gc_cause));
1422 return false;
1423 }
1424 }
1426 void
1427 G1CollectorPolicy::decide_on_conc_mark_initiation() {
1428 // We are about to decide on whether this pause will be an
1429 // initial-mark pause.
1431 // First, during_initial_mark_pause() should not be already set. We
1432 // will set it here if we have to. However, it should be cleared by
1433 // the end of the pause (it's only set for the duration of an
1434 // initial-mark pause).
1435 assert(!during_initial_mark_pause(), "pre-condition");
1437 if (initiate_conc_mark_if_possible()) {
1438 // We had noticed on a previous pause that the heap occupancy has
1439 // gone over the initiating threshold and we should start a
1440 // concurrent marking cycle. So we might initiate one.
1442 bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();
1443 if (!during_cycle) {
1444 // The concurrent marking thread is not "during a cycle", i.e.,
1445 // it has completed the last one. So we can go ahead and
1446 // initiate a new cycle.
1448 set_during_initial_mark_pause();
1449 // We do not allow mixed GCs during marking.
1450 if (!gcs_are_young()) {
1451 set_gcs_are_young(true);
1452 ergo_verbose0(ErgoMixedGCs,
1453 "end mixed GCs",
1454 ergo_format_reason("concurrent cycle is about to start"));
1455 }
1457 // And we can now clear initiate_conc_mark_if_possible() as
1458 // we've already acted on it.
1459 clear_initiate_conc_mark_if_possible();
1461 ergo_verbose0(ErgoConcCycles,
1462 "initiate concurrent cycle",
1463 ergo_format_reason("concurrent cycle initiation requested"));
1464 } else {
1465 // The concurrent marking thread is still finishing up the
1466 // previous cycle. If we start one right now the two cycles
1467 // overlap. In particular, the concurrent marking thread might
1468 // be in the process of clearing the next marking bitmap (which
1469 // we will use for the next cycle if we start one). Starting a
1470 // cycle now will be bad given that parts of the marking
1471 // information might get cleared by the marking thread. And we
1472 // cannot wait for the marking thread to finish the cycle as it
1473 // periodically yields while clearing the next marking bitmap
1474 // and, if it's in a yield point, it's waiting for us to
1475 // finish. So, at this point we will not start a cycle and we'll
1476 // let the concurrent marking thread complete the last one.
1477 ergo_verbose0(ErgoConcCycles,
1478 "do not initiate concurrent cycle",
1479 ergo_format_reason("concurrent cycle already in progress"));
1480 }
1481 }
1482 }
1484 class KnownGarbageClosure: public HeapRegionClosure {
1485 G1CollectedHeap* _g1h;
1486 CollectionSetChooser* _hrSorted;
1488 public:
1489 KnownGarbageClosure(CollectionSetChooser* hrSorted) :
1490 _g1h(G1CollectedHeap::heap()), _hrSorted(hrSorted) { }
1492 bool doHeapRegion(HeapRegion* r) {
1493 // We only include humongous regions in collection
1494 // sets when concurrent mark shows that their contained object is
1495 // unreachable.
1497 // Do we have any marking information for this region?
1498 if (r->is_marked()) {
1499 // We will skip any region that's currently used as an old GC
1500 // alloc region (we should not consider those for collection
1501 // before we fill them up).
1502 if (_hrSorted->should_add(r) && !_g1h->is_old_gc_alloc_region(r)) {
1503 _hrSorted->add_region(r);
1504 }
1505 }
1506 return false;
1507 }
1508 };
1510 class ParKnownGarbageHRClosure: public HeapRegionClosure {
1511 G1CollectedHeap* _g1h;
1512 CSetChooserParUpdater _cset_updater;
1514 public:
1515 ParKnownGarbageHRClosure(CollectionSetChooser* hrSorted,
1516 uint chunk_size) :
1517 _g1h(G1CollectedHeap::heap()),
1518 _cset_updater(hrSorted, true /* parallel */, chunk_size) { }
1520 bool doHeapRegion(HeapRegion* r) {
1521 // Do we have any marking information for this region?
1522 if (r->is_marked()) {
1523 // We will skip any region that's currently used as an old GC
1524 // alloc region (we should not consider those for collection
1525 // before we fill them up).
1526 if (_cset_updater.should_add(r) && !_g1h->is_old_gc_alloc_region(r)) {
1527 _cset_updater.add_region(r);
1528 }
1529 }
1530 return false;
1531 }
1532 };
1534 class ParKnownGarbageTask: public AbstractGangTask {
1535 CollectionSetChooser* _hrSorted;
1536 uint _chunk_size;
1537 G1CollectedHeap* _g1;
1538 public:
1539 ParKnownGarbageTask(CollectionSetChooser* hrSorted, uint chunk_size) :
1540 AbstractGangTask("ParKnownGarbageTask"),
1541 _hrSorted(hrSorted), _chunk_size(chunk_size),
1542 _g1(G1CollectedHeap::heap()) { }
1544 void work(uint worker_id) {
1545 ParKnownGarbageHRClosure parKnownGarbageCl(_hrSorted, _chunk_size);
1547 // Back to zero for the claim value.
1548 _g1->heap_region_par_iterate_chunked(&parKnownGarbageCl, worker_id,
1549 _g1->workers()->active_workers(),
1550 HeapRegion::InitialClaimValue);
1551 }
1552 };
1554 void
1555 G1CollectorPolicy::record_concurrent_mark_cleanup_end(int no_of_gc_threads) {
1556 _collectionSetChooser->clear();
1558 uint region_num = _g1->n_regions();
1559 if (G1CollectedHeap::use_parallel_gc_threads()) {
1560 const uint OverpartitionFactor = 4;
1561 uint WorkUnit;
1562 // The use of MinChunkSize = 8 in the original code
1563 // causes some assertion failures when the total number of
1564 // region is less than 8. The code here tries to fix that.
1565 // Should the original code also be fixed?
1566 if (no_of_gc_threads > 0) {
1567 const uint MinWorkUnit = MAX2(region_num / no_of_gc_threads, 1U);
1568 WorkUnit = MAX2(region_num / (no_of_gc_threads * OverpartitionFactor),
1569 MinWorkUnit);
1570 } else {
1571 assert(no_of_gc_threads > 0,
1572 "The active gc workers should be greater than 0");
1573 // In a product build do something reasonable to avoid a crash.
1574 const uint MinWorkUnit = MAX2(region_num / (uint) ParallelGCThreads, 1U);
1575 WorkUnit =
1576 MAX2(region_num / (uint) (ParallelGCThreads * OverpartitionFactor),
1577 MinWorkUnit);
1578 }
1579 _collectionSetChooser->prepare_for_par_region_addition(_g1->n_regions(),
1580 WorkUnit);
1581 ParKnownGarbageTask parKnownGarbageTask(_collectionSetChooser,
1582 (int) WorkUnit);
1583 _g1->workers()->run_task(&parKnownGarbageTask);
1585 assert(_g1->check_heap_region_claim_values(HeapRegion::InitialClaimValue),
1586 "sanity check");
1587 } else {
1588 KnownGarbageClosure knownGarbagecl(_collectionSetChooser);
1589 _g1->heap_region_iterate(&knownGarbagecl);
1590 }
1592 _collectionSetChooser->sort_regions();
1594 double end_sec = os::elapsedTime();
1595 double elapsed_time_ms = (end_sec - _mark_cleanup_start_sec) * 1000.0;
1596 _concurrent_mark_cleanup_times_ms->add(elapsed_time_ms);
1597 _cur_mark_stop_world_time_ms += elapsed_time_ms;
1598 _prev_collection_pause_end_ms += elapsed_time_ms;
1599 _mmu_tracker->add_pause(_mark_cleanup_start_sec, end_sec, true);
1600 }
1602 // Add the heap region at the head of the non-incremental collection set
1603 void G1CollectorPolicy::add_old_region_to_cset(HeapRegion* hr) {
1604 assert(_inc_cset_build_state == Active, "Precondition");
1605 assert(!hr->is_young(), "non-incremental add of young region");
1607 assert(!hr->in_collection_set(), "should not already be in the CSet");
1608 hr->set_in_collection_set(true);
1609 hr->set_next_in_collection_set(_collection_set);
1610 _collection_set = hr;
1611 _collection_set_bytes_used_before += hr->used();
1612 _g1->register_region_with_in_cset_fast_test(hr);
1613 size_t rs_length = hr->rem_set()->occupied();
1614 _recorded_rs_lengths += rs_length;
1615 _old_cset_region_length += 1;
1616 }
1618 // Initialize the per-collection-set information
1619 void G1CollectorPolicy::start_incremental_cset_building() {
1620 assert(_inc_cset_build_state == Inactive, "Precondition");
1622 _inc_cset_head = NULL;
1623 _inc_cset_tail = NULL;
1624 _inc_cset_bytes_used_before = 0;
1626 _inc_cset_max_finger = 0;
1627 _inc_cset_recorded_rs_lengths = 0;
1628 _inc_cset_recorded_rs_lengths_diffs = 0;
1629 _inc_cset_predicted_elapsed_time_ms = 0.0;
1630 _inc_cset_predicted_elapsed_time_ms_diffs = 0.0;
1631 _inc_cset_build_state = Active;
1632 }
1634 void G1CollectorPolicy::finalize_incremental_cset_building() {
1635 assert(_inc_cset_build_state == Active, "Precondition");
1636 assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint");
1638 // The two "main" fields, _inc_cset_recorded_rs_lengths and
1639 // _inc_cset_predicted_elapsed_time_ms, are updated by the thread
1640 // that adds a new region to the CSet. Further updates by the
1641 // concurrent refinement thread that samples the young RSet lengths
1642 // are accumulated in the *_diffs fields. Here we add the diffs to
1643 // the "main" fields.
1645 if (_inc_cset_recorded_rs_lengths_diffs >= 0) {
1646 _inc_cset_recorded_rs_lengths += _inc_cset_recorded_rs_lengths_diffs;
1647 } else {
1648 // This is defensive. The diff should in theory be always positive
1649 // as RSets can only grow between GCs. However, given that we
1650 // sample their size concurrently with other threads updating them
1651 // it's possible that we might get the wrong size back, which
1652 // could make the calculations somewhat inaccurate.
1653 size_t diffs = (size_t) (-_inc_cset_recorded_rs_lengths_diffs);
1654 if (_inc_cset_recorded_rs_lengths >= diffs) {
1655 _inc_cset_recorded_rs_lengths -= diffs;
1656 } else {
1657 _inc_cset_recorded_rs_lengths = 0;
1658 }
1659 }
1660 _inc_cset_predicted_elapsed_time_ms +=
1661 _inc_cset_predicted_elapsed_time_ms_diffs;
1663 _inc_cset_recorded_rs_lengths_diffs = 0;
1664 _inc_cset_predicted_elapsed_time_ms_diffs = 0.0;
1665 }
1667 void G1CollectorPolicy::add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length) {
1668 // This routine is used when:
1669 // * adding survivor regions to the incremental cset at the end of an
1670 // evacuation pause,
1671 // * adding the current allocation region to the incremental cset
1672 // when it is retired, and
1673 // * updating existing policy information for a region in the
1674 // incremental cset via young list RSet sampling.
1675 // Therefore this routine may be called at a safepoint by the
1676 // VM thread, or in-between safepoints by mutator threads (when
1677 // retiring the current allocation region) or a concurrent
1678 // refine thread (RSet sampling).
1680 double region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, gcs_are_young());
1681 size_t used_bytes = hr->used();
1682 _inc_cset_recorded_rs_lengths += rs_length;
1683 _inc_cset_predicted_elapsed_time_ms += region_elapsed_time_ms;
1684 _inc_cset_bytes_used_before += used_bytes;
1686 // Cache the values we have added to the aggregated informtion
1687 // in the heap region in case we have to remove this region from
1688 // the incremental collection set, or it is updated by the
1689 // rset sampling code
1690 hr->set_recorded_rs_length(rs_length);
1691 hr->set_predicted_elapsed_time_ms(region_elapsed_time_ms);
1692 }
1694 void G1CollectorPolicy::update_incremental_cset_info(HeapRegion* hr,
1695 size_t new_rs_length) {
1696 // Update the CSet information that is dependent on the new RS length
1697 assert(hr->is_young(), "Precondition");
1698 assert(!SafepointSynchronize::is_at_safepoint(),
1699 "should not be at a safepoint");
1701 // We could have updated _inc_cset_recorded_rs_lengths and
1702 // _inc_cset_predicted_elapsed_time_ms directly but we'd need to do
1703 // that atomically, as this code is executed by a concurrent
1704 // refinement thread, potentially concurrently with a mutator thread
1705 // allocating a new region and also updating the same fields. To
1706 // avoid the atomic operations we accumulate these updates on two
1707 // separate fields (*_diffs) and we'll just add them to the "main"
1708 // fields at the start of a GC.
1710 ssize_t old_rs_length = (ssize_t) hr->recorded_rs_length();
1711 ssize_t rs_lengths_diff = (ssize_t) new_rs_length - old_rs_length;
1712 _inc_cset_recorded_rs_lengths_diffs += rs_lengths_diff;
1714 double old_elapsed_time_ms = hr->predicted_elapsed_time_ms();
1715 double new_region_elapsed_time_ms = predict_region_elapsed_time_ms(hr, gcs_are_young());
1716 double elapsed_ms_diff = new_region_elapsed_time_ms - old_elapsed_time_ms;
1717 _inc_cset_predicted_elapsed_time_ms_diffs += elapsed_ms_diff;
1719 hr->set_recorded_rs_length(new_rs_length);
1720 hr->set_predicted_elapsed_time_ms(new_region_elapsed_time_ms);
1721 }
1723 void G1CollectorPolicy::add_region_to_incremental_cset_common(HeapRegion* hr) {
1724 assert(hr->is_young(), "invariant");
1725 assert(hr->young_index_in_cset() > -1, "should have already been set");
1726 assert(_inc_cset_build_state == Active, "Precondition");
1728 // We need to clear and set the cached recorded/cached collection set
1729 // information in the heap region here (before the region gets added
1730 // to the collection set). An individual heap region's cached values
1731 // are calculated, aggregated with the policy collection set info,
1732 // and cached in the heap region here (initially) and (subsequently)
1733 // by the Young List sampling code.
1735 size_t rs_length = hr->rem_set()->occupied();
1736 add_to_incremental_cset_info(hr, rs_length);
1738 HeapWord* hr_end = hr->end();
1739 _inc_cset_max_finger = MAX2(_inc_cset_max_finger, hr_end);
1741 assert(!hr->in_collection_set(), "invariant");
1742 hr->set_in_collection_set(true);
1743 assert( hr->next_in_collection_set() == NULL, "invariant");
1745 _g1->register_region_with_in_cset_fast_test(hr);
1746 }
1748 // Add the region at the RHS of the incremental cset
1749 void G1CollectorPolicy::add_region_to_incremental_cset_rhs(HeapRegion* hr) {
1750 // We should only ever be appending survivors at the end of a pause
1751 assert( hr->is_survivor(), "Logic");
1753 // Do the 'common' stuff
1754 add_region_to_incremental_cset_common(hr);
1756 // Now add the region at the right hand side
1757 if (_inc_cset_tail == NULL) {
1758 assert(_inc_cset_head == NULL, "invariant");
1759 _inc_cset_head = hr;
1760 } else {
1761 _inc_cset_tail->set_next_in_collection_set(hr);
1762 }
1763 _inc_cset_tail = hr;
1764 }
1766 // Add the region to the LHS of the incremental cset
1767 void G1CollectorPolicy::add_region_to_incremental_cset_lhs(HeapRegion* hr) {
1768 // Survivors should be added to the RHS at the end of a pause
1769 assert(!hr->is_survivor(), "Logic");
1771 // Do the 'common' stuff
1772 add_region_to_incremental_cset_common(hr);
1774 // Add the region at the left hand side
1775 hr->set_next_in_collection_set(_inc_cset_head);
1776 if (_inc_cset_head == NULL) {
1777 assert(_inc_cset_tail == NULL, "Invariant");
1778 _inc_cset_tail = hr;
1779 }
1780 _inc_cset_head = hr;
1781 }
1783 #ifndef PRODUCT
1784 void G1CollectorPolicy::print_collection_set(HeapRegion* list_head, outputStream* st) {
1785 assert(list_head == inc_cset_head() || list_head == collection_set(), "must be");
1787 st->print_cr("\nCollection_set:");
1788 HeapRegion* csr = list_head;
1789 while (csr != NULL) {
1790 HeapRegion* next = csr->next_in_collection_set();
1791 assert(csr->in_collection_set(), "bad CS");
1792 st->print_cr(" "HR_FORMAT", P: "PTR_FORMAT "N: "PTR_FORMAT", age: %4d",
1793 HR_FORMAT_PARAMS(csr),
1794 csr->prev_top_at_mark_start(), csr->next_top_at_mark_start(),
1795 csr->age_in_surv_rate_group_cond());
1796 csr = next;
1797 }
1798 }
1799 #endif // !PRODUCT
1801 bool G1CollectorPolicy::next_gc_should_be_mixed(const char* true_action_str,
1802 const char* false_action_str) {
1803 CollectionSetChooser* cset_chooser = _collectionSetChooser;
1804 if (cset_chooser->is_empty()) {
1805 ergo_verbose0(ErgoMixedGCs,
1806 false_action_str,
1807 ergo_format_reason("candidate old regions not available"));
1808 return false;
1809 }
1810 size_t reclaimable_bytes = cset_chooser->remaining_reclaimable_bytes();
1811 size_t capacity_bytes = _g1->capacity();
1812 double perc = (double) reclaimable_bytes * 100.0 / (double) capacity_bytes;
1813 double threshold = (double) G1HeapWastePercent;
1814 if (perc < threshold) {
1815 ergo_verbose4(ErgoMixedGCs,
1816 false_action_str,
1817 ergo_format_reason("reclaimable percentage lower than threshold")
1818 ergo_format_region("candidate old regions")
1819 ergo_format_byte_perc("reclaimable")
1820 ergo_format_perc("threshold"),
1821 cset_chooser->remaining_regions(),
1822 reclaimable_bytes, perc, threshold);
1823 return false;
1824 }
1826 ergo_verbose4(ErgoMixedGCs,
1827 true_action_str,
1828 ergo_format_reason("candidate old regions available")
1829 ergo_format_region("candidate old regions")
1830 ergo_format_byte_perc("reclaimable")
1831 ergo_format_perc("threshold"),
1832 cset_chooser->remaining_regions(),
1833 reclaimable_bytes, perc, threshold);
1834 return true;
1835 }
1837 void G1CollectorPolicy::finalize_cset(double target_pause_time_ms) {
1838 double young_start_time_sec = os::elapsedTime();
1840 YoungList* young_list = _g1->young_list();
1841 finalize_incremental_cset_building();
1843 guarantee(target_pause_time_ms > 0.0,
1844 err_msg("target_pause_time_ms = %1.6lf should be positive",
1845 target_pause_time_ms));
1846 guarantee(_collection_set == NULL, "Precondition");
1848 double base_time_ms = predict_base_elapsed_time_ms(_pending_cards);
1849 double predicted_pause_time_ms = base_time_ms;
1850 double time_remaining_ms = target_pause_time_ms - base_time_ms;
1852 ergo_verbose4(ErgoCSetConstruction | ErgoHigh,
1853 "start choosing CSet",
1854 ergo_format_size("_pending_cards")
1855 ergo_format_ms("predicted base time")
1856 ergo_format_ms("remaining time")
1857 ergo_format_ms("target pause time"),
1858 _pending_cards, base_time_ms, time_remaining_ms, target_pause_time_ms);
1860 _last_gc_was_young = gcs_are_young() ? true : false;
1862 if (_last_gc_was_young) {
1863 _trace_gen0_time_data.increment_young_collection_count();
1864 } else {
1865 _trace_gen0_time_data.increment_mixed_collection_count();
1866 }
1868 // The young list is laid with the survivor regions from the previous
1869 // pause are appended to the RHS of the young list, i.e.
1870 // [Newly Young Regions ++ Survivors from last pause].
1872 uint survivor_region_length = young_list->survivor_length();
1873 uint eden_region_length = young_list->length() - survivor_region_length;
1874 init_cset_region_lengths(eden_region_length, survivor_region_length);
1876 HeapRegion* hr = young_list->first_survivor_region();
1877 while (hr != NULL) {
1878 assert(hr->is_survivor(), "badly formed young list");
1879 hr->set_young();
1880 hr = hr->get_next_young_region();
1881 }
1883 // Clear the fields that point to the survivor list - they are all young now.
1884 young_list->clear_survivors();
1886 _collection_set = _inc_cset_head;
1887 _collection_set_bytes_used_before = _inc_cset_bytes_used_before;
1888 time_remaining_ms -= _inc_cset_predicted_elapsed_time_ms;
1889 predicted_pause_time_ms += _inc_cset_predicted_elapsed_time_ms;
1891 ergo_verbose3(ErgoCSetConstruction | ErgoHigh,
1892 "add young regions to CSet",
1893 ergo_format_region("eden")
1894 ergo_format_region("survivors")
1895 ergo_format_ms("predicted young region time"),
1896 eden_region_length, survivor_region_length,
1897 _inc_cset_predicted_elapsed_time_ms);
1899 // The number of recorded young regions is the incremental
1900 // collection set's current size
1901 set_recorded_rs_lengths(_inc_cset_recorded_rs_lengths);
1903 double young_end_time_sec = os::elapsedTime();
1904 phase_times()->record_young_cset_choice_time_ms((young_end_time_sec - young_start_time_sec) * 1000.0);
1906 // Set the start of the non-young choice time.
1907 double non_young_start_time_sec = young_end_time_sec;
1909 if (!gcs_are_young()) {
1910 CollectionSetChooser* cset_chooser = _collectionSetChooser;
1911 cset_chooser->verify();
1912 const uint min_old_cset_length = cset_chooser->calc_min_old_cset_length();
1913 const uint max_old_cset_length = cset_chooser->calc_max_old_cset_length();
1915 uint expensive_region_num = 0;
1916 bool check_time_remaining = adaptive_young_list_length();
1918 HeapRegion* hr = cset_chooser->peek();
1919 while (hr != NULL) {
1920 if (old_cset_region_length() >= max_old_cset_length) {
1921 // Added maximum number of old regions to the CSet.
1922 ergo_verbose2(ErgoCSetConstruction,
1923 "finish adding old regions to CSet",
1924 ergo_format_reason("old CSet region num reached max")
1925 ergo_format_region("old")
1926 ergo_format_region("max"),
1927 old_cset_region_length(), max_old_cset_length);
1928 break;
1929 }
1931 double predicted_time_ms = predict_region_elapsed_time_ms(hr, gcs_are_young());
1932 if (check_time_remaining) {
1933 if (predicted_time_ms > time_remaining_ms) {
1934 // Too expensive for the current CSet.
1936 if (old_cset_region_length() >= min_old_cset_length) {
1937 // We have added the minimum number of old regions to the CSet,
1938 // we are done with this CSet.
1939 ergo_verbose4(ErgoCSetConstruction,
1940 "finish adding old regions to CSet",
1941 ergo_format_reason("predicted time is too high")
1942 ergo_format_ms("predicted time")
1943 ergo_format_ms("remaining time")
1944 ergo_format_region("old")
1945 ergo_format_region("min"),
1946 predicted_time_ms, time_remaining_ms,
1947 old_cset_region_length(), min_old_cset_length);
1948 break;
1949 }
1951 // We'll add it anyway given that we haven't reached the
1952 // minimum number of old regions.
1953 expensive_region_num += 1;
1954 }
1955 } else {
1956 if (old_cset_region_length() >= min_old_cset_length) {
1957 // In the non-auto-tuning case, we'll finish adding regions
1958 // to the CSet if we reach the minimum.
1959 ergo_verbose2(ErgoCSetConstruction,
1960 "finish adding old regions to CSet",
1961 ergo_format_reason("old CSet region num reached min")
1962 ergo_format_region("old")
1963 ergo_format_region("min"),
1964 old_cset_region_length(), min_old_cset_length);
1965 break;
1966 }
1967 }
1969 // We will add this region to the CSet.
1970 time_remaining_ms -= predicted_time_ms;
1971 predicted_pause_time_ms += predicted_time_ms;
1972 cset_chooser->remove_and_move_to_next(hr);
1973 _g1->old_set_remove(hr);
1974 add_old_region_to_cset(hr);
1976 hr = cset_chooser->peek();
1977 }
1978 if (hr == NULL) {
1979 ergo_verbose0(ErgoCSetConstruction,
1980 "finish adding old regions to CSet",
1981 ergo_format_reason("candidate old regions not available"));
1982 }
1984 if (expensive_region_num > 0) {
1985 // We print the information once here at the end, predicated on
1986 // whether we added any apparently expensive regions or not, to
1987 // avoid generating output per region.
1988 ergo_verbose4(ErgoCSetConstruction,
1989 "added expensive regions to CSet",
1990 ergo_format_reason("old CSet region num not reached min")
1991 ergo_format_region("old")
1992 ergo_format_region("expensive")
1993 ergo_format_region("min")
1994 ergo_format_ms("remaining time"),
1995 old_cset_region_length(),
1996 expensive_region_num,
1997 min_old_cset_length,
1998 time_remaining_ms);
1999 }
2001 cset_chooser->verify();
2002 }
2004 stop_incremental_cset_building();
2006 ergo_verbose5(ErgoCSetConstruction,
2007 "finish choosing CSet",
2008 ergo_format_region("eden")
2009 ergo_format_region("survivors")
2010 ergo_format_region("old")
2011 ergo_format_ms("predicted pause time")
2012 ergo_format_ms("target pause time"),
2013 eden_region_length, survivor_region_length,
2014 old_cset_region_length(),
2015 predicted_pause_time_ms, target_pause_time_ms);
2017 double non_young_end_time_sec = os::elapsedTime();
2018 phase_times()->record_non_young_cset_choice_time_ms((non_young_end_time_sec - non_young_start_time_sec) * 1000.0);
2019 }
2021 void TraceGen0TimeData::record_start_collection(double time_to_stop_the_world_ms) {
2022 if(TraceGen0Time) {
2023 _all_stop_world_times_ms.add(time_to_stop_the_world_ms);
2024 }
2025 }
2027 void TraceGen0TimeData::record_yield_time(double yield_time_ms) {
2028 if(TraceGen0Time) {
2029 _all_yield_times_ms.add(yield_time_ms);
2030 }
2031 }
2033 void TraceGen0TimeData::record_end_collection(double pause_time_ms, G1GCPhaseTimes* phase_times) {
2034 if(TraceGen0Time) {
2035 _total.add(pause_time_ms);
2036 _other.add(pause_time_ms - phase_times->accounted_time_ms());
2037 _root_region_scan_wait.add(phase_times->root_region_scan_wait_time_ms());
2038 _parallel.add(phase_times->cur_collection_par_time_ms());
2039 _ext_root_scan.add(phase_times->average_last_ext_root_scan_time());
2040 _satb_filtering.add(phase_times->average_last_satb_filtering_times_ms());
2041 _update_rs.add(phase_times->average_last_update_rs_time());
2042 _scan_rs.add(phase_times->average_last_scan_rs_time());
2043 _obj_copy.add(phase_times->average_last_obj_copy_time());
2044 _termination.add(phase_times->average_last_termination_time());
2046 double parallel_known_time = phase_times->average_last_ext_root_scan_time() +
2047 phase_times->average_last_satb_filtering_times_ms() +
2048 phase_times->average_last_update_rs_time() +
2049 phase_times->average_last_scan_rs_time() +
2050 phase_times->average_last_obj_copy_time() +
2051 + phase_times->average_last_termination_time();
2053 double parallel_other_time = phase_times->cur_collection_par_time_ms() - parallel_known_time;
2054 _parallel_other.add(parallel_other_time);
2055 _clear_ct.add(phase_times->cur_clear_ct_time_ms());
2056 }
2057 }
2059 void TraceGen0TimeData::increment_young_collection_count() {
2060 if(TraceGen0Time) {
2061 ++_young_pause_num;
2062 }
2063 }
2065 void TraceGen0TimeData::increment_mixed_collection_count() {
2066 if(TraceGen0Time) {
2067 ++_mixed_pause_num;
2068 }
2069 }
2071 void TraceGen0TimeData::print_summary(const char* str,
2072 const NumberSeq* seq) const {
2073 double sum = seq->sum();
2074 gclog_or_tty->print_cr("%-27s = %8.2lf s (avg = %8.2lf ms)",
2075 str, sum / 1000.0, seq->avg());
2076 }
2078 void TraceGen0TimeData::print_summary_sd(const char* str,
2079 const NumberSeq* seq) const {
2080 print_summary(str, seq);
2081 gclog_or_tty->print_cr("%+45s = %5d, std dev = %8.2lf ms, max = %8.2lf ms)",
2082 "(num", seq->num(), seq->sd(), seq->maximum());
2083 }
2085 void TraceGen0TimeData::print() const {
2086 if (!TraceGen0Time) {
2087 return;
2088 }
2090 gclog_or_tty->print_cr("ALL PAUSES");
2091 print_summary_sd(" Total", &_total);
2092 gclog_or_tty->print_cr("");
2093 gclog_or_tty->print_cr("");
2094 gclog_or_tty->print_cr(" Young GC Pauses: %8d", _young_pause_num);
2095 gclog_or_tty->print_cr(" Mixed GC Pauses: %8d", _mixed_pause_num);
2096 gclog_or_tty->print_cr("");
2098 gclog_or_tty->print_cr("EVACUATION PAUSES");
2100 if (_young_pause_num == 0 && _mixed_pause_num == 0) {
2101 gclog_or_tty->print_cr("none");
2102 } else {
2103 print_summary_sd(" Evacuation Pauses", &_total);
2104 print_summary(" Root Region Scan Wait", &_root_region_scan_wait);
2105 print_summary(" Parallel Time", &_parallel);
2106 print_summary(" Ext Root Scanning", &_ext_root_scan);
2107 print_summary(" SATB Filtering", &_satb_filtering);
2108 print_summary(" Update RS", &_update_rs);
2109 print_summary(" Scan RS", &_scan_rs);
2110 print_summary(" Object Copy", &_obj_copy);
2111 print_summary(" Termination", &_termination);
2112 print_summary(" Parallel Other", &_parallel_other);
2113 print_summary(" Clear CT", &_clear_ct);
2114 print_summary(" Other", &_other);
2115 }
2116 gclog_or_tty->print_cr("");
2118 gclog_or_tty->print_cr("MISC");
2119 print_summary_sd(" Stop World", &_all_stop_world_times_ms);
2120 print_summary_sd(" Yields", &_all_yield_times_ms);
2121 }
2123 void TraceGen1TimeData::record_full_collection(double full_gc_time_ms) {
2124 if (TraceGen1Time) {
2125 _all_full_gc_times.add(full_gc_time_ms);
2126 }
2127 }
2129 void TraceGen1TimeData::print() const {
2130 if (!TraceGen1Time) {
2131 return;
2132 }
2134 if (_all_full_gc_times.num() > 0) {
2135 gclog_or_tty->print("\n%4d full_gcs: total time = %8.2f s",
2136 _all_full_gc_times.num(),
2137 _all_full_gc_times.sum() / 1000.0);
2138 gclog_or_tty->print_cr(" (avg = %8.2fms).", _all_full_gc_times.avg());
2139 gclog_or_tty->print_cr(" [std. dev = %8.2f ms, max = %8.2f ms]",
2140 _all_full_gc_times.sd(),
2141 _all_full_gc_times.maximum());
2142 }
2143 }